[r-cran-learnbayes] 10/15: New upstream version 2.15
Andreas Tille
tille at debian.org
Thu Nov 9 15:55:37 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-learnbayes.
commit e15d89308351e753964870f6cad9c0dbfb4681b9
Author: Andreas Tille <tille at debian.org>
Date: Thu Nov 9 16:50:28 2017 +0100
New upstream version 2.15
---
DESCRIPTION | 14 +++
MD5 | 261 +++++++++++++++++++++++++++++++++++++++
NAMESPACE | 1 +
R/bayes.influence.R | 20 +++
R/bayes.model.selection.R | 51 ++++++++
R/bayes.probit.R | 38 ++++++
R/bayesresiduals.R | 14 +++
R/beta.select.R | 32 +++++
R/betabinexch.R | 16 +++
R/betabinexch0.R | 16 +++
R/bfexch.R | 13 ++
R/bfindep.R | 46 +++++++
R/binomial.beta.mix.R | 13 ++
R/blinreg.R | 32 +++++
R/blinregexpected.R | 24 ++++
R/blinregpred.R | 24 ++++
R/bprobit.probs.R | 24 ++++
R/bradley.terry.post.R | 12 ++
R/careertraj.setup.R | 25 ++++
R/cauchyerrorpost.R | 7 ++
R/ctable.R | 29 +++++
R/discint.R | 27 ++++
R/discrete.bayes.2.R | 26 ++++
R/discrete.bayes.R | 13 ++
R/dmnorm.R | 18 +++
R/dmt.R | 22 ++++
R/gibbs.R | 28 +++++
R/groupeddatapost.R | 8 ++
R/hiergibbs.R | 59 +++++++++
R/histprior.R | 11 ++
R/howardprior.R | 14 +++
R/impsampling.R | 17 +++
R/indepmetrop.R | 30 +++++
R/laplace.R | 15 +++
R/lbinorm.R | 11 ++
R/logctablepost.R | 14 +++
R/logisticpost.R | 17 +++
R/logpoissgamma.R | 9 ++
R/logpoissnormal.R | 9 ++
R/mnormt.onesided.R | 28 +++++
R/mnormt.twosided.R | 14 +++
R/mycontour.R | 23 ++++
R/normal.normal.mix.R | 14 +++
R/normal.select.R | 12 ++
R/normchi2post.R | 12 ++
R/normnormexch.R | 11 ++
R/normpostpred.R | 12 ++
R/normpostsim.R | 40 ++++++
R/ordergibbs.R | 75 +++++++++++
R/pbetap.R | 23 ++++
R/pbetat.R | 27 ++++
R/pdisc.R | 27 ++++
R/pdiscp.R | 25 ++++
R/plot.bayes.R | 2 +
R/plot.bayes2.R | 6 +
R/poissgamexch.R | 17 +++
R/poisson.gamma.mix.R | 18 +++
R/predplot.R | 10 ++
R/print.bayes.R | 2 +
R/prior.two.parameters.R | 7 ++
R/rdirichlet.R | 14 +++
R/reg.gprior.post.R | 10 ++
R/regroup.R | 19 +++
R/rejectsampling.R | 17 +++
R/rigamma.R | 3 +
R/rmnorm.R | 9 ++
R/rmt.R | 13 ++
R/robustt.R | 24 ++++
R/rtruncated.R | 2 +
R/rwmetrop.R | 26 ++++
R/simcontour.R | 31 +++++
R/sir.R | 17 +++
R/summary.bayes.R | 22 ++++
R/transplantpost.R | 32 +++++
R/triplot.R | 21 ++++
R/weibullregpost.R | 21 ++++
build/vignette.rds | Bin 0 -> 383 bytes
data/achievement.txt.gz | Bin 0 -> 896 bytes
data/baseball.1964.txt.gz | Bin 0 -> 206 bytes
data/bermuda.grass.txt.gz | Bin 0 -> 281 bytes
data/birdextinct.txt.gz | Bin 0 -> 830 bytes
data/birthweight.txt.gz | Bin 0 -> 149 bytes
data/breastcancer.txt.gz | Bin 0 -> 151 bytes
data/calculus.grades.txt.gz | Bin 0 -> 212 bytes
data/cancermortality.txt.gz | Bin 0 -> 97 bytes
data/chemotherapy.txt.gz | Bin 0 -> 218 bytes
data/darwin.txt.gz | Bin 0 -> 72 bytes
data/donner.txt.gz | Bin 0 -> 151 bytes
data/election.2008.txt.gz | Bin 0 -> 574 bytes
data/election.txt.gz | Bin 0 -> 1167 bytes
data/footballscores.txt.gz | Bin 0 -> 5295 bytes
data/hearttransplants.txt.gz | Bin 0 -> 330 bytes
data/iowagpa.txt.gz | Bin 0 -> 265 bytes
data/jeter2004.txt.gz | Bin 0 -> 815 bytes
data/marathontimes.txt.gz | Bin 0 -> 76 bytes
data/puffin.txt.gz | Bin 0 -> 334 bytes
data/schmidt.txt.gz | Bin 0 -> 438 bytes
data/sluggerdata.txt.gz | Bin 0 -> 3723 bytes
data/soccergoals.txt.gz | Bin 0 -> 53 bytes
data/stanfordheart.txt.gz | Bin 0 -> 337 bytes
data/strikeout.txt.gz | Bin 0 -> 2594 bytes
data/studentdata.txt.gz | Bin 0 -> 7396 bytes
debian/README.test | 8 --
debian/changelog | 34 -----
debian/compat | 1 -
debian/control | 26 ----
debian/copyright | 30 -----
debian/docs | 2 -
debian/examples | 1 -
debian/rules | 4 -
debian/source/format | 1 -
debian/tests/control | 3 -
debian/tests/run-unit-test | 18 ---
debian/watch | 2 -
demo/00Index | 45 +++++++
demo/Chapter.1.2.R | 46 +++++++
demo/Chapter.1.3.R | 55 +++++++++
demo/Chapter.10.2.R | 23 ++++
demo/Chapter.10.3.R | 57 +++++++++
demo/Chapter.10.4.R | 84 +++++++++++++
demo/Chapter.2.3.R | 27 ++++
demo/Chapter.2.4.R | 33 +++++
demo/Chapter.2.5.R | 33 +++++
demo/Chapter.2.6.R | 33 +++++
demo/Chapter.3.2.R | 17 +++
demo/Chapter.3.3.R | 31 +++++
demo/Chapter.3.4.R | 58 +++++++++
demo/Chapter.3.5.R | 25 ++++
demo/Chapter.3.6.R | 42 +++++++
demo/Chapter.4.2.R | 21 ++++
demo/Chapter.4.3.R | 39 ++++++
demo/Chapter.4.4.R | 62 ++++++++++
demo/Chapter.4.5.R | 31 +++++
demo/Chapter.5.10.R | 18 +++
demo/Chapter.5.4.R | 16 +++
demo/Chapter.5.6.R | 19 +++
demo/Chapter.5.7.R | 8 ++
demo/Chapter.5.8.R | 33 +++++
demo/Chapter.5.9.R | 48 +++++++
demo/Chapter.6.10.R | 40 ++++++
demo/Chapter.6.2.R | 22 ++++
demo/Chapter.6.7.R | 38 ++++++
demo/Chapter.6.8.R | 50 ++++++++
demo/Chapter.6.9.R | 58 +++++++++
demo/Chapter.7.10.R | 38 ++++++
demo/Chapter.7.2.R | 28 +++++
demo/Chapter.7.3.R | 10 ++
demo/Chapter.7.4.R | 30 +++++
demo/Chapter.7.5.R | 18 +++
demo/Chapter.7.7.R | 51 ++++++++
demo/Chapter.7.8.R | 53 ++++++++
demo/Chapter.7.9.R | 54 ++++++++
demo/Chapter.8.3.R | 39 ++++++
demo/Chapter.8.4.R | 10 ++
demo/Chapter.8.6.R | 22 ++++
demo/Chapter.8.7.R | 19 +++
demo/Chapter.8.8.R | 23 ++++
demo/Chapter.9.2.R | 111 +++++++++++++++++
demo/Chapter.9.3.R | 38 ++++++
demo/Chapter.9.4.R | 28 +++++
inst/doc/BayesFactors.R | 63 ++++++++++
inst/doc/BayesFactors.Rnw | 107 ++++++++++++++++
inst/doc/BayesFactors.pdf | Bin 0 -> 138575 bytes
inst/doc/BinomialInference.R | 40 ++++++
inst/doc/BinomialInference.Rnw | 70 +++++++++++
inst/doc/BinomialInference.pdf | Bin 0 -> 110608 bytes
inst/doc/DiscreteBayes.R | 91 ++++++++++++++
inst/doc/DiscreteBayes.Rnw | 101 +++++++++++++++
inst/doc/DiscreteBayes.pdf | Bin 0 -> 109329 bytes
inst/doc/MCMCintro.R | 68 ++++++++++
inst/doc/MCMCintro.Rnw | 114 +++++++++++++++++
inst/doc/MCMCintro.pdf | Bin 0 -> 284718 bytes
inst/doc/MultilevelModeling.R | 74 +++++++++++
inst/doc/MultilevelModeling.Rnw | 118 ++++++++++++++++++
inst/doc/MultilevelModeling.pdf | Bin 0 -> 271497 bytes
man/achievement.Rd | 28 +++++
man/baseball.1964.Rd | 27 ++++
man/bayes.influence.Rd | 32 +++++
man/bayes.model.selection.Rd | 32 +++++
man/bayes.probit.Rd | 35 ++++++
man/bayesresiduals.Rd | 34 +++++
man/bermuda.grass.Rd | 26 ++++
man/beta.select.Rd | 32 +++++
man/betabinexch.Rd | 29 +++++
man/betabinexch0.Rd | 28 +++++
man/bfexch.Rd | 32 +++++
man/bfindep.Rd | 30 +++++
man/binomial.beta.mix.Rd | 32 +++++
man/birdextinct.Rd | 28 +++++
man/birthweight.Rd | 24 ++++
man/blinreg.Rd | 33 +++++
man/blinregexpected.Rd | 35 ++++++
man/blinregpred.Rd | 35 ++++++
man/bprobit.probs.Rd | 34 +++++
man/bradley.terry.post.Rd | 30 +++++
man/breastcancer.Rd | 24 ++++
man/calculus.grades.Rd | 21 ++++
man/cancermortality.Rd | 23 ++++
man/careertraj.setup.Rd | 32 +++++
man/cauchyerrorpost.Rd | 29 +++++
man/chemotherapy.Rd | 29 +++++
man/ctable.Rd | 27 ++++
man/darwin.Rd | 20 +++
man/discint.Rd | 31 +++++
man/discrete.bayes.2.Rd | 41 ++++++
man/discrete.bayes.Rd | 39 ++++++
man/dmnorm.Rd | 30 +++++
man/dmt.Rd | 32 +++++
man/donner.Rd | 23 ++++
man/election.2008.Rd | 22 ++++
man/election.Rd | 24 ++++
man/footballscores.Rd | 27 ++++
man/gibbs.Rd | 34 +++++
man/groupeddatapost.Rd | 32 +++++
man/hearttransplants.Rd | 25 ++++
man/hiergibbs.Rd | 30 +++++
man/histprior.Rd | 30 +++++
man/howardprior.Rd | 28 +++++
man/impsampling.Rd | 36 ++++++
man/indepmetrop.Rd | 31 +++++
man/iowagpa.Rd | 25 ++++
man/jeter2004.Rd | 27 ++++
man/laplace.Rd | 38 ++++++
man/lbinorm.Rd | 29 +++++
man/logctablepost.Rd | 30 +++++
man/logisticpost.Rd | 32 +++++
man/logpoissgamma.Rd | 31 +++++
man/logpoissnormal.Rd | 31 +++++
man/marathontimes.Rd | 19 +++
man/mnormt.onesided.Rd | 34 +++++
man/mnormt.twosided.Rd | 40 ++++++
man/mycontour.Rd | 32 +++++
man/normal.normal.mix.Rd | 32 +++++
man/normal.select.Rd | 33 +++++
man/normchi2post.Rd | 25 ++++
man/normnormexch.Rd | 29 +++++
man/normpostpred.Rd | 30 +++++
man/normpostsim.Rd | 32 +++++
man/ordergibbs.Rd | 28 +++++
man/pbetap.Rd | 31 +++++
man/pbetat.Rd | 34 +++++
man/pdisc.Rd | 30 +++++
man/pdiscp.Rd | 33 +++++
man/poissgamexch.Rd | 31 +++++
man/poisson.gamma.mix.Rd | 32 +++++
man/predplot.Rd | 26 ++++
man/prior.two.parameters.Rd | 25 ++++
man/puffin.Rd | 24 ++++
man/rdirichlet.Rd | 27 ++++
man/reg.gprior.post.Rd | 28 +++++
man/regroup.Rd | 25 ++++
man/rejectsampling.Rd | 32 +++++
man/rigamma.Rd | 30 +++++
man/rmnorm.Rd | 28 +++++
man/rmt.Rd | 30 +++++
man/robustt.Rd | 26 ++++
man/rtruncated.Rd | 36 ++++++
man/rwmetrop.Rd | 35 ++++++
man/schmidt.Rd | 32 +++++
man/simcontour.Rd | 32 +++++
man/sir.Rd | 31 +++++
man/sluggerdata.Rd | 31 +++++
man/soccergoals.Rd | 19 +++
man/stanfordheart.Rd | 23 ++++
man/strikeout.Rd | 23 ++++
man/studentdata.Rd | 30 +++++
man/transplantpost.Rd | 29 +++++
man/triplot.Rd | 26 ++++
man/weibullregpost.Rd | 30 +++++
vignettes/BayesFactors.Rnw | 107 ++++++++++++++++
vignettes/BinomialInference.Rnw | 70 +++++++++++
vignettes/DiscreteBayes.Rnw | 101 +++++++++++++++
vignettes/MCMCintro.Rnw | 114 +++++++++++++++++
vignettes/MultilevelModeling.Rnw | 118 ++++++++++++++++++
274 files changed, 7568 insertions(+), 130 deletions(-)
diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100644
index 0000000..cb5a393
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,14 @@
+Package: LearnBayes
+Type: Package
+Title: Functions for Learning Bayesian Inference
+Version: 2.15
+Date: 2014-05-28
+Author: Jim Albert
+Maintainer: Jim Albert <albert at bgsu.edu>
+LazyData: yes
+Description: LearnBayes contains a collection of functions helpful in learning the basic tenets of Bayesian statistical inference. It contains functions for summarizing basic one and two parameter posterior distributions and predictive distributions. It contains MCMC algorithms for summarizing posterior distributions defined by the user. It also contains functions for regression models, hierarchical models, Bayesian tests, and illustrations of Gibbs sampling.
+License: GPL (>= 2)
+Packaged: 2014-05-29 11:59:53 UTC; albert
+NeedsCompilation: no
+Repository: CRAN
+Date/Publication: 2014-05-29 17:33:08
diff --git a/MD5 b/MD5
new file mode 100644
index 0000000..73e1203
--- /dev/null
+++ b/MD5
@@ -0,0 +1,261 @@
+f74ae770852743a7a06ffe65fedffd37 *DESCRIPTION
+8b54e5a89fbda3af5e077053d40bec76 *NAMESPACE
+898a08431c9004861b3ec4fd0c784ee2 *R/bayes.influence.R
+22f4cccb4f46de787dee9c74af2b283a *R/bayes.model.selection.R
+769f1b08a58479b8fab604840b802e9b *R/bayes.probit.R
+4e19cd38ae7078975ed2165f77fc3040 *R/bayesresiduals.R
+fbc11b8fa09801928c3ed7c9efb15668 *R/beta.select.R
+37d2b3d5e6d3e24fd3b4eefba77a8f89 *R/betabinexch.R
+9bd12f9a020b5cf54c5aaf3096b6e91f *R/betabinexch0.R
+4958278f73f422c9899897535b6b4287 *R/bfexch.R
+68e83353fece11a03dd1c9b53b7e8b4c *R/bfindep.R
+c61aec9c9d03a038839b0e7d9a04ce29 *R/binomial.beta.mix.R
+1df421262c60c62bd22311234c4fd522 *R/blinreg.R
+c6a241d361109cc9d4cbef2acddb4322 *R/blinregexpected.R
+db4a3a1c3cc0756ef1c79a7899fccffe *R/blinregpred.R
+8b257a4a960b5618ac63f81c2a3d97bc *R/bprobit.probs.R
+0eb84201d5263abc1c194138f3475380 *R/bradley.terry.post.R
+7330118420406e7b2dfa73c0c326c1c3 *R/careertraj.setup.R
+a5c932c9ee4c5755a125bf751b4e3d89 *R/cauchyerrorpost.R
+ba39f7917a9819227db08abf9f18f014 *R/ctable.R
+1c2d150f4671a2c330f5f87d3235e1ae *R/discint.R
+69f8a00b75039785300678374ac3e7ff *R/discrete.bayes.2.R
+13ceffa9b1a3047e1d27698d40a6ea00 *R/discrete.bayes.R
+4c62518d20d05153f64f3a60ceda3e87 *R/dmnorm.R
+1187a62ddfeb65fac2497cc6d61dcc87 *R/dmt.R
+8487380778cb5029b6b3a07782687b9a *R/gibbs.R
+360bae448399478ed53098ab3538f0a8 *R/groupeddatapost.R
+8dada9a4e777465fed0d0112cd869c74 *R/hiergibbs.R
+202886ea8cee2743acb0fcc27f9f05fd *R/histprior.R
+66c159127429ae9278fc87581941e58e *R/howardprior.R
+b667cf56872d63cf0d2fe32aaad53ab0 *R/impsampling.R
+afadb48b693ac0bb3bf52812f1da6fbd *R/indepmetrop.R
+0a49148264cb3e57fc27beedc8834f55 *R/laplace.R
+12d087bbd997a4dd66b832c43a9a1983 *R/lbinorm.R
+77c24b74c7cb733f542f2110248c8d99 *R/logctablepost.R
+8fc3a440a61bce56dfc60c0136d48a15 *R/logisticpost.R
+26f2f3f0b1335373d0f0e61f9ba28433 *R/logpoissgamma.R
+7b383299fc96727f8bfaac619e8488a7 *R/logpoissnormal.R
+69222b200e3af366c5cdbc076b0a0469 *R/mnormt.onesided.R
+09316f78dda69ea4db95723d448da40a *R/mnormt.twosided.R
+7818233996c26664b1684cd0858ff942 *R/mycontour.R
+ee8df8184ae863699b3d9c700f046266 *R/normal.normal.mix.R
+a50662b72b533fe8e37ec0c98ae1d7b1 *R/normal.select.R
+72fb2ad1a086b3db169d0a6324f56d21 *R/normchi2post.R
+f3613aa12d0f5112d2142172bbdef027 *R/normnormexch.R
+ae1d5003ab4c845c560cea5a62e85e56 *R/normpostpred.R
+8c569f83114b21bc24b83a5f98864dd1 *R/normpostsim.R
+2f8683119467677ee79b74c7d41e57ba *R/ordergibbs.R
+629e3e65d519c624320c3da35dbc51a6 *R/pbetap.R
+d0baeea9ff76fcf41bffe1128c053249 *R/pbetat.R
+ed659f82aba03b8e9e64ed760aca61f0 *R/pdisc.R
+53454d5197757d1b3b71818b4b29d449 *R/pdiscp.R
+ab0f8196b33f21e1f41447f826853812 *R/plot.bayes.R
+b090ab0453262867f9dd7733c7e64fd8 *R/plot.bayes2.R
+320137b38e3f249370c5c5a13820f9fa *R/poissgamexch.R
+927a92665be2638fbf369f4cd34585da *R/poisson.gamma.mix.R
+9d114c0dc1d717f2a3f6637a6587db03 *R/predplot.R
+5d68f5d63f78f3d31d00d34ab9a953c3 *R/print.bayes.R
+cb923b308f55e338ce5b50d0c762e31e *R/prior.two.parameters.R
+dd910cbe995190125fadde36e4bdbf4e *R/rdirichlet.R
+8015d9c359902eebfb3b3fb05827c5cc *R/reg.gprior.post.R
+e8f2ca865f26af92260e44a4d9e26cd7 *R/regroup.R
+20ba84113c853873e64b2f78cce20b3d *R/rejectsampling.R
+6dcf1d2901dcc73465c8aeacc400df8f *R/rigamma.R
+cb95a0325b358a40ee2206f2cff3486f *R/rmnorm.R
+d007a2e0bbd971dc268afdb44a085841 *R/rmt.R
+bd565d4dcb403a65a9c3639b1fe863cd *R/robustt.R
+efb473edadcd9ab19305dce1ac8bfa56 *R/rtruncated.R
+9e88e94a5dfb3fceb39befcd40e92db4 *R/rwmetrop.R
+c4ad0aa3cfdcd196b08034f344b84fb7 *R/simcontour.R
+34c83d8c4a3dcabdacd57e0b5f936cce *R/sir.R
+e6f232d48ec2323225260572cd78e55c *R/summary.bayes.R
+09abbfa80415d442dfff7f70866c97f1 *R/transplantpost.R
+99b374549ebefd2e320a87a2b514de13 *R/triplot.R
+1a4f7381d7238c0bd334ff8ae0e8c125 *R/weibullregpost.R
+fefabaed69c0572e1243e3d7c7261ea9 *build/vignette.rds
+03e85ca351f4c9ddd739fd4ebba0fdc9 *data/achievement.txt.gz
+397f7acbef0dbe7d497172ddffd069b9 *data/baseball.1964.txt.gz
+01b069c569d7270028e4e039fb3c7f11 *data/bermuda.grass.txt.gz
+da4c72c95d096ff8182c78c1813e0683 *data/birdextinct.txt.gz
+04c19d25f28741873e7ff3f6d55af704 *data/birthweight.txt.gz
+f52f4af286141448fbfeee66d489c05d *data/breastcancer.txt.gz
+3159909f2ed2db685aef5a2bd3f0e09b *data/calculus.grades.txt.gz
+3fbd4ba3efe9a9c2a621ddf77079f492 *data/cancermortality.txt.gz
+3c07b363db7ba156cd5425017c6cdecd *data/chemotherapy.txt.gz
+e3509a545460413642c6b55da3d4b40e *data/darwin.txt.gz
+b75585797a9a25a1994e7cfb2aea669b *data/donner.txt.gz
+a7f573296ae72639a93f441d6f98ee44 *data/election.2008.txt.gz
+340a6618cb7a123053907cdbb383306c *data/election.txt.gz
+8b6c5c4f1f48bfbfc8236e50015104a2 *data/footballscores.txt.gz
+56b42f2aeb3dff64c48559f7deafcdfc *data/hearttransplants.txt.gz
+5b8faecaa3ec02e380c7640661f543c3 *data/iowagpa.txt.gz
+f83f19fb994c033e6fc92c72eab6a9af *data/jeter2004.txt.gz
+5f2777ec75736a7acc319f88ac307bd1 *data/marathontimes.txt.gz
+8b1cadf9eb03b07746d6511ee4b7b649 *data/puffin.txt.gz
+538ac18bf98694b07990ee857ba89f80 *data/schmidt.txt.gz
+3421e3363d09bc5dc3d4ec96dcb9cf8e *data/sluggerdata.txt.gz
+54d7b55c6ef0ae512968b6bd11acc21d *data/soccergoals.txt.gz
+b7a6b9d621299747de34d8894dc62404 *data/stanfordheart.txt.gz
+3a5a54c3e799a85241f32a035b436065 *data/strikeout.txt.gz
+1131a94152473cb62f4018609bd22d0e *data/studentdata.txt.gz
+8964769af3c007efe3388f7150afa18f *demo/00Index
+41eef652b4c507344095b094bcbbbd70 *demo/Chapter.1.2.R
+085c057894eab1afe21f32c737cd9658 *demo/Chapter.1.3.R
+887005bff54db71ad4e038e596ec50cf *demo/Chapter.10.2.R
+d49a732adac45dfc343a7f908157723c *demo/Chapter.10.3.R
+a1e5943bb450cbad4da9b66d21be270d *demo/Chapter.10.4.R
+e8442fe89cbc08381a3e9bbc93a322d6 *demo/Chapter.2.3.R
+eba7052a0873e262a7c6356439ed72cc *demo/Chapter.2.4.R
+58d8b9355778389266fa757e21b5871c *demo/Chapter.2.5.R
+985f7269f4c4765bb2b9fd9e9f77708f *demo/Chapter.2.6.R
+3fe8bdd117d9ba42df686ef2cc7b25f7 *demo/Chapter.3.2.R
+cd46a69f84e542347291361292cbee00 *demo/Chapter.3.3.R
+7f172bb861494224c1b1b19dd4ced0b9 *demo/Chapter.3.4.R
+a6abe7d2d30fab8e630779305fcc8582 *demo/Chapter.3.5.R
+7dfa9eb93191f06d9679a81526b6c05b *demo/Chapter.3.6.R
+8cc49e3fb8274a30a016ff6779c9f7b9 *demo/Chapter.4.2.R
+745fefe52f7d1123427bfefacafcbb22 *demo/Chapter.4.3.R
+767f801f4cf7dbac88df2819c54065cb *demo/Chapter.4.4.R
+130bde1f4d4502a46f1eb2d190e8f8ad *demo/Chapter.4.5.R
+3407ef2ec2f42fc6171ed9022fbe2916 *demo/Chapter.5.10.R
+fc64167d90d9fb0542f5bb1a370f470c *demo/Chapter.5.4.R
+27fb82c18d8f992e54fc81558f8146ea *demo/Chapter.5.6.R
+933f240e1ee1f07cc3ff3fa08f593476 *demo/Chapter.5.7.R
+e382600f42a043f2dce385bf0dd2a765 *demo/Chapter.5.8.R
+52b64e4cbd0837556648496a1311a942 *demo/Chapter.5.9.R
+5cbb407aed67b723666ba07b7a184dda *demo/Chapter.6.10.R
+ccc509dba6548c32e1acba678fdb1450 *demo/Chapter.6.2.R
+82da96357929c77af1789c1146610771 *demo/Chapter.6.7.R
+e8149cb2755546c48b0763136bcf1e1e *demo/Chapter.6.8.R
+9afbd5663661af27ce51b612fe56781e *demo/Chapter.6.9.R
+378778c171febc8f95fcdac32b97c11c *demo/Chapter.7.10.R
+453cd09c38d1ea9b0c2a8ffa15b82602 *demo/Chapter.7.2.R
+f7df016019a9eec55565e607bff7016c *demo/Chapter.7.3.R
+5c903d7080da9ef59888ccd7b6b32f78 *demo/Chapter.7.4.R
+75392421a2b26b4ff1b281dfb82433e4 *demo/Chapter.7.5.R
+0e93d8d49711f040a54117e0b795a269 *demo/Chapter.7.7.R
+b43effd2d996b7a3b5aa889cc7bca4bb *demo/Chapter.7.8.R
+31dcbcbe3904e943e54ad0f36175e2b0 *demo/Chapter.7.9.R
+b91b0c60f6d0f18c126cca5eeaa33755 *demo/Chapter.8.3.R
+14650d2e78e92fa14d5e9c57876106a8 *demo/Chapter.8.4.R
+2a6d44204a9cb7092a93def46acf8bd1 *demo/Chapter.8.6.R
+35b4cc3b7e1292cd2492d706a7dcce16 *demo/Chapter.8.7.R
+c7362984ed3c55ac7decf7b82e7bf657 *demo/Chapter.8.8.R
+b4b61c3395225b800163fd5c13e5c21f *demo/Chapter.9.2.R
+1f2cb6054b44bda76aa50571fcdefad7 *demo/Chapter.9.3.R
+13b18788be48dcc1a7a4b45595d28515 *demo/Chapter.9.4.R
+284c5b240acb761ff644ad36c1cf5222 *inst/doc/BayesFactors.R
+545756224a285ec8e218d653ea4fce94 *inst/doc/BayesFactors.Rnw
+30c91a87cc025913cf7e35e3e0b2f583 *inst/doc/BayesFactors.pdf
+b589491575830a533e4bc38170bc228a *inst/doc/BinomialInference.R
+88e5197769d40875d240982440e7b893 *inst/doc/BinomialInference.Rnw
+826d3651105baffdbfc9902d30e63615 *inst/doc/BinomialInference.pdf
+384a51b16586df45a759d0c38a88c18e *inst/doc/DiscreteBayes.R
+e9bdefa42b343cd82b586f8f8e02b860 *inst/doc/DiscreteBayes.Rnw
+f5811b75b65290dee7c348c2388e1101 *inst/doc/DiscreteBayes.pdf
+b61baffe5bc484f46bf6a5aff90159a0 *inst/doc/MCMCintro.R
+a54e51e3b4ebddad37767eec2781aa43 *inst/doc/MCMCintro.Rnw
+1665ebdad2a822a929b067e94e40c582 *inst/doc/MCMCintro.pdf
+86418aeedc5bcb3fa4b6b17b3c21a9a9 *inst/doc/MultilevelModeling.R
+0f1c304dc6d3d2b8530da418aebcd5b6 *inst/doc/MultilevelModeling.Rnw
+10eb761ac67724d1f9d0b8176ca3fa10 *inst/doc/MultilevelModeling.pdf
+8f7fb93228418983045dd52f2d358452 *man/achievement.Rd
+a6c623a82aa85c17856dd2666d3d0736 *man/baseball.1964.Rd
+95404fedc27369a2624fce0a3f71261d *man/bayes.influence.Rd
+47457c19b86ef33554d6dcc6637761b8 *man/bayes.model.selection.Rd
+0f9827e1f11fc86e62e15826a8670211 *man/bayes.probit.Rd
+d1dae82d2eebf269c03257df9c0b3c00 *man/bayesresiduals.Rd
+ac894f9536885f5311b2fcb539100d4f *man/bermuda.grass.Rd
+9939455bf0b22f9339e57091c7029406 *man/beta.select.Rd
+acb8ecb3365e955a62ab6c6366c41ec2 *man/betabinexch.Rd
+7328387324a39f86c1cf840a15195be3 *man/betabinexch0.Rd
+bd04485c6a7d9831b14e0aa4e45b91ec *man/bfexch.Rd
+741ebb63d7f8206473d5d5052b7ca2bf *man/bfindep.Rd
+7c79a51906a9e6020f7e764deafb7344 *man/binomial.beta.mix.Rd
+112df017dc69367fa8c85866b67e580d *man/birdextinct.Rd
+2dc8ca2ed1cabc661b68e6ba340357ba *man/birthweight.Rd
+c288c219ca4a2423eadd5a21ed76380d *man/blinreg.Rd
+521efdd428f3c76e3d2289b7defe7cad *man/blinregexpected.Rd
+eeed8acad6dc33cf5046fe0c93863697 *man/blinregpred.Rd
+8a198bd73d7aca180f74abee802086f9 *man/bprobit.probs.Rd
+436fe7daa7b6ba88a37e4e28d894447c *man/bradley.terry.post.Rd
+70d6bc53c7f4650728cfeb4c8252d2db *man/breastcancer.Rd
+7968b7675430c1d95a9d223e5128537c *man/calculus.grades.Rd
+bab6566d9e07fc2a7b526cf9a4be6b7b *man/cancermortality.Rd
+7591df0a4b9313dcb8859c8534d05ce1 *man/careertraj.setup.Rd
+cf2644b51f4325e9781f31ff9a7708ee *man/cauchyerrorpost.Rd
+d7f42007c59fe4341b52cdd83ae0b507 *man/chemotherapy.Rd
+987d38ca3c3ecf2c61e981daf912282e *man/ctable.Rd
+9856b5ef87bd99e7f1b77902d586cca8 *man/darwin.Rd
+35edfd64ff3a9b050264329e89474550 *man/discint.Rd
+e5adfd73c938b6b36e5c671a2a444f7b *man/discrete.bayes.2.Rd
+4da611d3037a641cb306216844fbf403 *man/discrete.bayes.Rd
+e60cd961203908c5bfac89227584c695 *man/dmnorm.Rd
+59032c9e6f6e1b05aca2851d1cbfefa1 *man/dmt.Rd
+db5de545003143bb92477b8a463a9dea *man/donner.Rd
+e791548e19fd5f227fcf9a46b1849a6a *man/election.2008.Rd
+7e8e16dafa2760a47e16a324adb8b3cd *man/election.Rd
+a3f7da4354c2cbe909ae46afef1973d4 *man/footballscores.Rd
+260c965b30d96a3cd167821d0468114f *man/gibbs.Rd
+daea7b077164d43ac6af387767f28f2f *man/groupeddatapost.Rd
+76286c46ddc90ad304a378361df2dbf6 *man/hearttransplants.Rd
+5b17b55eb34da6d51536b23add1a345d *man/hiergibbs.Rd
+77633de22a306277d5e0dc6baa027753 *man/histprior.Rd
+7db9b1ad64fa746febee974383270dc9 *man/howardprior.Rd
+45a22f1553ba3a3f264af941fc053bbd *man/impsampling.Rd
+b4aa00eed3f17258cfa7a82d52e32156 *man/indepmetrop.Rd
+cb6db1fff92b9a5547add81c4f585635 *man/iowagpa.Rd
+477897cc0eb616e22c66bf161940a018 *man/jeter2004.Rd
+25b156b6e144792413224a0da87c0d51 *man/laplace.Rd
+f31ed993b7e2ccc77224226050445267 *man/lbinorm.Rd
+8ef0980f0ea876dd39bef7da79d7453b *man/logctablepost.Rd
+4810914878cf996404def78f3690325c *man/logisticpost.Rd
+b8ed19c915bbf93a925bdf12c58760f5 *man/logpoissgamma.Rd
+389efd0a7cb34997b4039ced1a2ac66d *man/logpoissnormal.Rd
+ddcdac3a05b4a1310543ffe12723b703 *man/marathontimes.Rd
+db26556d6026b632ec8e17bea1ddb3a6 *man/mnormt.onesided.Rd
+a1ef4e15664322b76d7dd634cb85ba63 *man/mnormt.twosided.Rd
+56b4b4620882216db02cb9501d9ece21 *man/mycontour.Rd
+b52d6517db4f0411db77ad2de7ca1501 *man/normal.normal.mix.Rd
+4b50e97c7714d0c7237518d4ebdbde00 *man/normal.select.Rd
+28b1e9e6c1f1d692de6834ed1ecd1a1e *man/normchi2post.Rd
+f5eacf996b7bc78875fb8e93967e5930 *man/normnormexch.Rd
+f7b96af216164e1d5e7db18712c8a7cc *man/normpostpred.Rd
+366dbf76a09c16f9dfe9ce7dfaf6b3e9 *man/normpostsim.Rd
+33d1f0c75316daf91bf2cb8fe5f1a52a *man/ordergibbs.Rd
+cb82cc97aeeaf3eaca0f31f55287699c *man/pbetap.Rd
+4f045a30c98c061b70e6a98e747da395 *man/pbetat.Rd
+be1df2c91571ffd3f3844c85e6b76799 *man/pdisc.Rd
+9d51d36a0a4d2b0610ccee15a7ac51b4 *man/pdiscp.Rd
+1dfa3f121e491c34a83a334182241780 *man/poissgamexch.Rd
+5c59c405ca01d486cc70c9f65ad6db3d *man/poisson.gamma.mix.Rd
+5873bd04f763532aabd029180b753beb *man/predplot.Rd
+26a5b070f21e29d1ec8924989a587c57 *man/prior.two.parameters.Rd
+2d66fca7f0d417bb681b06c99699bfb1 *man/puffin.Rd
+9ba868961666d59e79f72bc7611f834f *man/rdirichlet.Rd
+8803efdf9ffbb95e128368c78ff45f6c *man/reg.gprior.post.Rd
+7fdf23b6e09335b8546c836bc83400c7 *man/regroup.Rd
+8a4d2dacd2a66a4d96b00cba6e1912a8 *man/rejectsampling.Rd
+198a4a8738b78ada09d0b6e7ee9b82fb *man/rigamma.Rd
+6f3a3ce86cb20b6a976a139c3c7efaea *man/rmnorm.Rd
+1b2d7ca8a682d564d7570e8641936870 *man/rmt.Rd
+a9af2e2e3dacb4cfd04516cf56a94659 *man/robustt.Rd
+baf44118494f8d9d2e3132bf0afeb76c *man/rtruncated.Rd
+5618d9a3663deabcd834e8c8545f15ed *man/rwmetrop.Rd
+6e0fdd4fd186bc8a74dc9a4cb8e37ca4 *man/schmidt.Rd
+8e4c28a99edffdad4d3a9205c066ba59 *man/simcontour.Rd
+a0efa6c49708f3dec10e0843937f5fee *man/sir.Rd
+a41662fd0a81804580c5614e90b77a83 *man/sluggerdata.Rd
+cf66ae7ee464b5d005d8167ad973630e *man/soccergoals.Rd
+638da9adf334fdf0ab868c6a8de0648f *man/stanfordheart.Rd
+1ce84b40ed14d4b31f3ed6fd10cc2189 *man/strikeout.Rd
+64a94f93cc063d58396b17be9b7aa832 *man/studentdata.Rd
+1389ec48ca8815dd817b6cd7c2deafe0 *man/transplantpost.Rd
+9d8fd893772589b7975ea9b1dbe890bd *man/triplot.Rd
+c9a68349e3c5473b245389b6b89a3a9d *man/weibullregpost.Rd
+545756224a285ec8e218d653ea4fce94 *vignettes/BayesFactors.Rnw
+88e5197769d40875d240982440e7b893 *vignettes/BinomialInference.Rnw
+e9bdefa42b343cd82b586f8f8e02b860 *vignettes/DiscreteBayes.Rnw
+a54e51e3b4ebddad37767eec2781aa43 *vignettes/MCMCintro.Rnw
+0f1c304dc6d3d2b8530da418aebcd5b6 *vignettes/MultilevelModeling.Rnw
diff --git a/NAMESPACE b/NAMESPACE
new file mode 100644
index 0000000..d75f824
--- /dev/null
+++ b/NAMESPACE
@@ -0,0 +1 @@
+exportPattern("^[[:alpha:]]+")
diff --git a/R/bayes.influence.R b/R/bayes.influence.R
new file mode 100644
index 0000000..9d56cc1
--- /dev/null
+++ b/R/bayes.influence.R
@@ -0,0 +1,20 @@
+bayes.influence=function(theta,data)
+{
+y=data[,1]; n=data[,2]
+N=length(y)
+summary=quantile(theta[,2],c(.05,.5,.95))
+summary.obs=array(0,c(N,3))
+K=exp(theta[,2])
+eta=exp(theta[,1])/(1+exp(theta[,1]))
+m=length(K)
+
+for (i in 1:N)
+{
+ weight=exp(lbeta(K*eta,K*(1-eta))-lbeta(K*eta+y[i],K*(1-eta)+n[i]-y[i]))
+ probs=weight/sum(weight)
+ indices=sample(1:m,size=m,prob=probs,replace=TRUE)
+ theta.s=theta[indices,]
+ summary.obs[i,]=quantile(theta.s[,2],c(.05,.5,.95))
+}
+return(list(summary=summary,summary.obs=summary.obs))
+}
diff --git a/R/bayes.model.selection.R b/R/bayes.model.selection.R
new file mode 100644
index 0000000..023d843
--- /dev/null
+++ b/R/bayes.model.selection.R
@@ -0,0 +1,51 @@
+bayes.model.selection=function (y, X, c, constant = TRUE)
+{
+ base2 = function(s, k) {
+ r = rep(0, k)
+ for (j in seq(k, 1, by = -1)) {
+ r[j] = floor(s/(2^(j - 1)))
+ s = s - r[j] * (2^(j - 1))
+ }
+ return(r)
+ }
+ regpost.mod = function(theta, stuff) {
+ y = stuff$y
+ X = stuff$X
+ c = stuff$c
+ beta = theta[-length(theta)]
+ sigma = exp(theta[length(theta)])
+ if (length(beta) > 1)
+ loglike = sum(dnorm(y, mean = X %*% as.vector(beta),
+ sd = sigma, log = TRUE))
+ else loglike = sum(dnorm(y, mean = X * beta, sd = sigma,
+ log = TRUE))
+ logprior = dmnorm(beta, mean = 0 * beta, varcov = c *
+ sigma^2 * solve(t(X) %*% X), log = TRUE)
+ return(loglike + logprior)
+ }
+ require(LearnBayes)
+ X = as.matrix(X)
+ if (constant == FALSE)
+ X = cbind(1, X)
+ p = dim(X)[2] - 1
+ GAM = array(TRUE, c(2^p, p + 1))
+ for (k in 1:(2^p)) GAM[k, ] = as.logical(c(1, base2(k - 1,
+ p)))
+ gof = rep(0, 2^p)
+ converge = rep(TRUE, 2^p)
+ for (j in 1:2^p) {
+ X0 = X[, GAM[j, ]]
+ fit = lm(y ~ 0 + X0)
+ beta = fit$coef
+ s = sqrt(sum(fit$residuals^2)/fit$df.residual)
+ theta = c(beta, log(s))
+ S = list(X = X0, y = y, c = c)
+ fit = laplace(regpost.mod, theta, S)
+ gof[j] = fit$int
+ converge[j] = fit$converge
+ }
+ Prob=exp(gof-max(gof))/sum(exp(gof-max(gof)))
+ mod.prob=data.frame(GAM[, -1], round(gof,2), round(Prob,5))
+ names(mod.prob)=c(dimnames(X)[[2]][-1],"log.m","Prob")
+ return(list(mod.prob=mod.prob, converge = converge))
+}
diff --git a/R/bayes.probit.R b/R/bayes.probit.R
new file mode 100644
index 0000000..39aa65c
--- /dev/null
+++ b/R/bayes.probit.R
@@ -0,0 +1,38 @@
+bayes.probit=function (y, X, m, prior = list(beta = 0, P = 0))
+{
+ rtruncated = function(n, lo, hi, pf, qf, ...) qf(pf(lo, ...) +
+ runif(n) * (pf(hi, ...) - pf(lo, ...)), ...)
+ if (sum(prior$P)==0) log.marg=NULL
+ beta0 = prior$beta
+ BI = prior$P
+ N = length(y)
+ fit = glm(y ~ X - 1, family = binomial(link = probit))
+ beta.s = fit$coef
+ p = length(beta.s)
+ beta = array(beta.s, c(p, 1))
+ beta0 = array(beta0, c(p, 1))
+ BI = array(BI, c(p, p))
+ Mb = array(0, dim = c(m, p))
+ lo = c(-Inf, 0)
+ hi = c(0, Inf)
+ LO = lo[y + 1]
+ HI = hi[y + 1]
+ postvar=solve(BI + t(X) %*% X)
+ aa = chol(postvar)
+ BIbeta0 = BI %*% beta0
+ post.ord=0
+ for (i in 1:m) {
+ z = rtruncated(N, LO, HI, pnorm, qnorm, X %*% beta, 1)
+ mn = solve(BI + t(X) %*% X, BIbeta0 + t(X) %*% z)
+ beta = t(aa) %*% array(rnorm(p), c(p, 1)) + mn
+ post.ord=post.ord+dmnorm(beta.s,mn,postvar)
+ Mb[i, ] = beta
+ }
+ if (sum(BI)>0)
+ {
+ log.f=sum(y*log(fit$fitted)+(1-y)*log(1-fit$fitted))
+ log.g=dmnorm(beta.s,beta0,solve(BI),log=TRUE)
+ log.marg=log.f+log.g-log(post.ord/m)
+ }
+ return(list(beta=Mb,log.marg=log.marg))
+}
diff --git a/R/bayesresiduals.R b/R/bayesresiduals.R
new file mode 100644
index 0000000..3673c7c
--- /dev/null
+++ b/R/bayesresiduals.R
@@ -0,0 +1,14 @@
+bayesresiduals=function(lmfit,post,k)
+{
+ehat=lmfit$residuals
+h=hat(model.matrix(lmfit))
+
+prob=0*ehat
+for (i in 1:length(prob))
+{
+z1=(k-ehat[i]/post$sigma)/sqrt(h[i])
+z2=(-k-ehat[i]/post$sigma)/sqrt(h[i])
+prob[i]=mean(1-pnorm(z1)+pnorm(z2))
+}
+return(prob)
+}
\ No newline at end of file
diff --git a/R/beta.select.R b/R/beta.select.R
new file mode 100644
index 0000000..812628e
--- /dev/null
+++ b/R/beta.select.R
@@ -0,0 +1,32 @@
+beta.select=function(quantile1,quantile2)
+{
+betaprior1=function(K,x,p)
+# suppose one is given a beta(K*m, K*(1-m)) prior
+# where the pth quantile is given by x
+# function outputs the prior mean m
+{
+m.lo=0; m.hi=1; flag=0
+while(flag==0)
+{
+m0=(m.lo+m.hi)/2
+p0=pbeta(x,K*m0,K*(1-m0))
+if(p0<p) m.hi=m0 else m.lo=m0
+if(abs(p0-p)<.0001) flag=1
+}
+return(m0)
+}
+
+p1=quantile1$p; x1=quantile1$x
+p2=quantile2$p; x2=quantile2$x
+
+logK=seq(-3,8,length=100); K=exp(logK)
+m=sapply(K,betaprior1,x1,p1)
+
+prob2=pbeta(x2,K*m, K*(1-m))
+ind=((prob2>0)&(prob2<1))
+app=approx(prob2[ind],logK[ind],p2)
+K0=exp(app$y)
+m0=betaprior1(K0,x1,p1)
+
+return(round(K0*c(m0,(1-m0)),2))
+}
\ No newline at end of file
diff --git a/R/betabinexch.R b/R/betabinexch.R
new file mode 100644
index 0000000..8c4eab6
--- /dev/null
+++ b/R/betabinexch.R
@@ -0,0 +1,16 @@
+betabinexch=function (theta, data)
+{
+ eta = exp(theta[1])/(1 + exp(theta[1]))
+ K = exp(theta[2])
+ y = data[, 1]
+ n = data[, 2]
+ N = length(y)
+
+ logf=function(y,n,K,eta)
+ lbeta(K * eta + y, K * (1 - eta) + n - y)-lbeta(K * eta, K * (1 - eta))
+
+ val=sum(logf(y,n,K,eta))
+
+ val = val + theta[2] - 2 * log(1 + exp(theta[2]))
+ return(val)
+}
diff --git a/R/betabinexch0.R b/R/betabinexch0.R
new file mode 100644
index 0000000..bc536e7
--- /dev/null
+++ b/R/betabinexch0.R
@@ -0,0 +1,16 @@
+betabinexch0=function (theta, data)
+{
+ eta = theta[1]
+ K = theta[2]
+ y = data[, 1]
+ n = data[, 2]
+ N = length(y)
+
+ logf=function(y,n,K,eta)
+ lbeta(K * eta + y, K * (1 - eta) + n - y)-lbeta(K * eta, K * (1 - eta))
+
+ val=sum(logf(y,n,K,eta))
+
+ val = val - 2 * log(1 + K) - log(eta) - log(1 - eta)
+ return(val)
+}
diff --git a/R/bfexch.R b/R/bfexch.R
new file mode 100644
index 0000000..d32a087
--- /dev/null
+++ b/R/bfexch.R
@@ -0,0 +1,13 @@
+bfexch=function (theta, datapar)
+{
+ y = datapar$data[, 1]
+ n = datapar$data[, 2]
+ K = datapar$K
+ eta = exp(theta)/(1 + exp(theta))
+
+ logf=function(K,eta,y,n)
+ lbeta(K*eta+y, K*(1-eta)+n-y)-lbeta(K*eta, K*(1-eta))
+
+ sum(logf(K,eta,y,n)) + log(eta * (1 - eta))-
+ lbeta(sum(y) + 1, sum(n - y) + 1)
+}
diff --git a/R/bfindep.R b/R/bfindep.R
new file mode 100644
index 0000000..9085429
--- /dev/null
+++ b/R/bfindep.R
@@ -0,0 +1,46 @@
+bfindep=function(y,K,m)
+{
+# compute Bayes factor against independence
+# using Albert and Gupta independence priors
+# ymat - I x J matrix
+# K - Dirichlet precision parameter
+# m - number of iterations
+
+rdirichlet=function (n, alpha)
+{
+ l <- length(alpha)
+ x <- matrix(rgamma(l * n, alpha), ncol = l, byrow = TRUE)
+ sm <- x %*% rep(1, l)
+ return(x/as.vector(sm))
+}
+
+ldirichlet=function(alpha)
+{
+# log dirichlet function
+# for multiple values stored in matrix alpha
+return(rowSums(lgamma(alpha))-lgamma(rowSums(alpha)))
+}
+
+yc=colSums(y); yr=rowSums(y); n=sum(yc)
+d=dim(y); I=d[1]; J=d[2]
+
+etaA=rdirichlet(m,yr+1)
+etaB=rdirichlet(m,yc+1)
+
+Keta=c(); KetaY=c()
+for (i in 1:I)
+{
+for (j in 1:J)
+{
+Keta=cbind(Keta,K*etaA[,i]*etaB[,j])
+KetaY=cbind(KetaY,K*etaA[,i]*etaB[,j]+y[i,j])
+}}
+
+logint=ldirichlet(KetaY)-ldirichlet(Keta)
+for (i in 1:I) logint=logint-yr[i]*log(etaA[,i])
+for (j in 1:J) logint=logint-yc[j]*log(etaB[,j])
+
+int=exp(logint)
+
+return(list(bf=mean(int),nse=sd(int)/sqrt(m)))
+}
diff --git a/R/binomial.beta.mix.R b/R/binomial.beta.mix.R
new file mode 100644
index 0000000..31868c8
--- /dev/null
+++ b/R/binomial.beta.mix.R
@@ -0,0 +1,13 @@
+binomial.beta.mix=function(probs,betapar,data)
+{
+N=length(probs)
+s=data[1]; f=data[2]
+post.betapar=betapar+outer(rep(1,N),data)
+p=post.betapar[,1]/(post.betapar[,1]+post.betapar[,2])
+m.prob=exp(dbinom(s,size=s+f,prob=p,log=TRUE)+
+ dbeta(p,betapar[,1],betapar[,2],log=TRUE) -
+ dbeta(p,post.betapar[,1],post.betapar[,2],log=TRUE))
+
+post.probs=probs*m.prob/sum(probs*m.prob)
+return(list(probs=post.probs,betapar=post.betapar))
+}
\ No newline at end of file
diff --git a/R/blinreg.R b/R/blinreg.R
new file mode 100644
index 0000000..8b44f04
--- /dev/null
+++ b/R/blinreg.R
@@ -0,0 +1,32 @@
+blinreg=function (y, X, m, prior=NULL)
+{
+ if(length(prior)>0)
+ { c0=prior$c0; beta0=matrix(prior$b0,c(1,length(prior$b)))}
+
+ fit = lm(y ~ 0 + X)
+ bhat = matrix(fit$coef, c(1, fit$rank))
+ s2 = sum(fit$residuals^2)/fit$df.residual
+
+ if(length(prior)==0)
+ {
+ shape = fit$df.residual/2
+ rate = fit$df.residual/2 * s2
+ beta.m = bhat
+ vbeta = vcov(fit)/s2
+ } else
+ {
+ shape = length(y)/2
+ rate = fit$df.residual/2 * s2 +
+ (beta0 - bhat) %*% t(X) %*% X %*% t(beta0 - bhat)/2/(c0+1)
+ beta.m = c0/(c0+1)*(beta0/c0 + bhat)
+ vbeta = vcov(fit)/s2*c0/(c0+1)
+ }
+
+ sigma = sqrt(1/rgamma(m, shape = shape, rate = rate))
+ beta = rmnorm(m, mean=rep(0, fit$rank), varcov=vbeta)
+ beta = array(1, c(m, 1)) %*% beta.m +
+ array(sigma, c(m, fit$rank))*beta
+
+ return(list(beta = beta, sigma = sigma))
+}
+
diff --git a/R/blinregexpected.R b/R/blinregexpected.R
new file mode 100644
index 0000000..633275f
--- /dev/null
+++ b/R/blinregexpected.R
@@ -0,0 +1,24 @@
+blinregexpected=function(X1,theta.sample)
+{
+#blinregpred Produces a simulated sample from the posterior
+# distribution of an expected response for a linear regression model
+# X1 = design matrix of interest
+# theta.sample = output of blinreg function
+
+d=dim(X1)
+n1=d[1]
+m=length(theta.sample$sigma)
+m1=array(0,c(m,n1))
+
+for (j in 1:n1)
+{
+m1[,j]=t(X1[j,]%*%t(theta.sample$beta))
+}
+return(m1)
+}
+
+
+
+
+
+
diff --git a/R/blinregpred.R b/R/blinregpred.R
new file mode 100644
index 0000000..b47c049
--- /dev/null
+++ b/R/blinregpred.R
@@ -0,0 +1,24 @@
+blinregpred=function(X1,theta.sample)
+{
+#blinregpred Produces a simulated sample from the posterior predictive
+# distribution of a linear regression model
+# X1 = design matrix of interest
+# theta.sample = output of blinreg function
+
+d=dim(X1)
+n1=d[1]
+m=length(theta.sample$sigma)
+y1=array(0,c(m,n1))
+
+for (j in 1:n1)
+{
+y1[,j]=t(X1[j,]%*%t(theta.sample$beta))+rnorm(m)*theta.sample$sigma
+}
+return(y1)
+}
+
+
+
+
+
+
diff --git a/R/bprobit.probs.R b/R/bprobit.probs.R
new file mode 100644
index 0000000..05b1e7b
--- /dev/null
+++ b/R/bprobit.probs.R
@@ -0,0 +1,24 @@
+bprobit.probs=function(X1,fit)
+{
+# bprobit.probs Produces a simulated sample from the posterior
+# distribution of an expected response for a linear regression model
+# X1 = design matrix of interest
+# fit = output of bayes.probit function
+
+d=dim(X1)
+n1=d[1]
+md=dim(fit); m=md[1]
+m1=array(0,c(m,n1))
+
+for (j in 1:n1)
+{
+m1[,j]=pnorm(X1[j,]%*%t(fit))
+}
+return(m1)
+}
+
+
+
+
+
+
diff --git a/R/bradley.terry.post.R b/R/bradley.terry.post.R
new file mode 100644
index 0000000..ab9977d
--- /dev/null
+++ b/R/bradley.terry.post.R
@@ -0,0 +1,12 @@
+bradley.terry.post=function(theta,data)
+{
+N=dim(data)[1]; M=length(theta)
+sigma=exp(theta[M])
+logf=function(k)
+{
+i=data[k,1]; j=data[k,2]
+p=exp(theta[i]-theta[j])/(1+exp(theta[i]-theta[j]))
+data[k,3]*log(p)+data[k,4]*log(1-p)
+}
+sum(sapply(1:N,logf))+sum(dnorm(theta[-M],0,sigma,log=TRUE))
+}
diff --git a/R/careertraj.setup.R b/R/careertraj.setup.R
new file mode 100644
index 0000000..eee00ff
--- /dev/null
+++ b/R/careertraj.setup.R
@@ -0,0 +1,25 @@
+careertraj.setup=function(data)
+{
+Player=data[,1]
+player.names=names(table(Player))
+N=length(player.names)
+m=max(table(Player))
+y=array(0,c(N,m))
+n=0*y
+x=0*y
+T=rep(0,N)
+for (i in 1:N)
+{
+ data1=data[Player==player.names[i],]
+ nk=dim(data1)
+ ni=nk[1]
+ for (j in 1:ni)
+ {
+ y[i,j]=data1[j,10]
+ n[i,j]=data1[j,5]-data1[j,13]
+ x[i,j]=data1[j,3]
+ T[i]=T[i]+(n[i,j]>0)
+ }
+}
+return(list(player.names=player.names,y=y,n=n,x=x,T=T,N=N))
+}
\ No newline at end of file
diff --git a/R/cauchyerrorpost.R b/R/cauchyerrorpost.R
new file mode 100644
index 0000000..4ab8bca
--- /dev/null
+++ b/R/cauchyerrorpost.R
@@ -0,0 +1,7 @@
+cauchyerrorpost=function(theta, data)
+{
+logf=function(data,theta)
+ log(dt((data-theta[1])/exp(theta[2]),df=1)/exp(theta[2]))
+
+return(sum(logf(data,theta)))
+}
diff --git a/R/ctable.R b/R/ctable.R
new file mode 100644
index 0000000..b6b9bff
--- /dev/null
+++ b/R/ctable.R
@@ -0,0 +1,29 @@
+ctable=function(y,a)
+#
+# C_TABLE Bayes factor for testing independence in a contingency table.
+# BF=C_TABLE(Y,A) returns the Bayes factor BF against independence in a
+# 2-way contingency table using uniform priors, where Y is a matrix
+# containing the 2-way table, and A is a matrix of prior parameters
+#------------------------
+# Written by Jim Albert
+# albert at bgnet.bgsu.edu
+# November 2004
+#------------------------
+{
+ldirich=function(a)
+{
+val=sum(lgamma(a))-lgamma(sum(a))
+return(val)
+}
+ac=colSums(a); ar=rowSums(a)
+yc=colSums(y); yr=rowSums(y)
+
+d=dim(y); oc=1+0*yc; or=1+0*yr; I=d[1];J=d[2]
+
+lbf=ldirich(c(y)+c(a))+ldirich(ar-(J-1)*or)+ldirich(ac-(I-1)*oc)-
+ ldirich(c(a))-ldirich(yr+ar-(J-1)*or)-ldirich(yc+ac-(I-1)*oc)
+
+bf=exp(lbf)
+return(bf)
+
+}
diff --git a/R/discint.R b/R/discint.R
new file mode 100644
index 0000000..0a9d5bd
--- /dev/null
+++ b/R/discint.R
@@ -0,0 +1,27 @@
+"discint" <-
+function(dist,prob)
+#
+# DISC_INT Computes a highest probability interval for a discrete distribution.
+# LIST=DISCINT(DIST,PROB) gives a list, where LIST.set is the set of values and
+# LIST.prob is the exact probability context EPROB, where DIST=[VALUE,PROBABILITY]
+# is the matrix which contains the discrete distribution and PROB
+# is the probability content desired.
+#------------------------
+# Written by Jim Albert
+# albert at bgnet.bgsu.edu
+# November 2004
+#------------------------
+{
+x=dist[,1]; p=dist[,2]; n=length(x)
+
+sp=sort(p,index.return=TRUE)
+ps=sp$x
+i=sp$ix[seq(n,1,-1)]; ps=p[i]; xs=x[i]
+cp=cumsum(ps)
+ii=1:n
+j=ii[cp>=prob]; j=j[1]
+eprob=cp[j]; set=sort(xs[1:j])
+v=list(prob=eprob,set=set)
+return(v)
+}
+
diff --git a/R/discrete.bayes.2.R b/R/discrete.bayes.2.R
new file mode 100644
index 0000000..c4ac07e
--- /dev/null
+++ b/R/discrete.bayes.2.R
@@ -0,0 +1,26 @@
+discrete.bayes.2=function(df,prior,y=NULL,...)
+{
+like=function(i,...)
+if(is.matrix(y)==TRUE)
+df(y[i,],param1,param2,...) else
+df(y[i],param1,param2,...)
+n.rows=dim(prior)[1]
+n.cols=dim(prior)[2]
+param1=as.numeric(dimnames(prior)[[1]])
+param2=as.numeric(dimnames(prior)[[2]])
+param1=outer(param1,rep(1,n.cols))
+param2=outer(rep(1,n.rows),param2)
+likelihood=1
+if(length(y)>0)
+{
+n=ifelse(is.matrix(y)==FALSE,length(y),dim(y)[1])
+for(j in 1:n)
+likelihood=likelihood*like(j,...)
+}
+product=prior*likelihood
+pred=sum(prior*likelihood)
+prob=prior*likelihood/pred
+obj=list(prob=prob,pred=pred)
+class(obj)<-"bayes2"
+obj
+}
\ No newline at end of file
diff --git a/R/discrete.bayes.R b/R/discrete.bayes.R
new file mode 100644
index 0000000..ecda2af
--- /dev/null
+++ b/R/discrete.bayes.R
@@ -0,0 +1,13 @@
+discrete.bayes=
+function (df, prior, y, ...)
+{
+ param = as.numeric(names(prior))
+ lk=function(j)
+ prod(df(y,param[j],...))
+ likelihood=sapply(1:length(param),lk)
+ pred = sum(prior * likelihood)
+ prob = prior * likelihood/pred
+ obj = list(prob = prob, pred = pred)
+ class(obj) <- "bayes"
+ obj
+}
diff --git a/R/dmnorm.R b/R/dmnorm.R
new file mode 100644
index 0000000..8a69f98
--- /dev/null
+++ b/R/dmnorm.R
@@ -0,0 +1,18 @@
+dmnorm=function (x, mean = rep(0, d), varcov, log = FALSE)
+{
+ d <- if (is.matrix(varcov))
+ ncol(varcov)
+ else 1
+ if (d > 1 & is.vector(x))
+ x <- matrix(x, 1, d)
+ n <- if (d == 1)
+ length(x)
+ else nrow(x)
+ X <- t(matrix(x, nrow = n, ncol = d)) - mean
+ Q <- apply((solve(varcov) %*% X) * X, 2, sum)
+ logDet <- sum(logb(abs(diag(qr(varcov)[[1]]))))
+ logPDF <- as.vector(Q + d * logb(2 * pi) + logDet)/(-2)
+ if (log)
+ logPDF
+ else exp(logPDF)
+}
diff --git a/R/dmt.R b/R/dmt.R
new file mode 100644
index 0000000..a312168
--- /dev/null
+++ b/R/dmt.R
@@ -0,0 +1,22 @@
+dmt=function (x, mean = rep(0, d), S, df = Inf, log = FALSE)
+{
+ if (df == Inf)
+ return(dmnorm(x, mean, S, log = log))
+ d <- if (is.matrix(S))
+ ncol(S)
+ else 1
+ if (d > 1 & is.vector(x))
+ x <- matrix(x, 1, d)
+ n <- if (d == 1)
+ length(x)
+ else nrow(x)
+ X <- t(matrix(x, nrow = n, ncol = d)) - mean
+ Q <- apply((solve(S) %*% X) * X, 2, sum)
+ logDet <- sum(logb(abs(diag(qr(S)$qr))))
+ logPDF <- (lgamma((df + d)/2) - 0.5 * (d * logb(pi * df) +
+ logDet) - lgamma(df/2) - 0.5 * (df + d) * logb(1 + Q/df))
+ if (log)
+ logPDF
+ else exp(logPDF)
+}
+
diff --git a/R/gibbs.R b/R/gibbs.R
new file mode 100644
index 0000000..29c3a38
--- /dev/null
+++ b/R/gibbs.R
@@ -0,0 +1,28 @@
+gibbs=function(logpost,start,m,scale,...)
+{
+p=length(start)
+vth=array(0,dim=c(m,p))
+f0=logpost(start,...)
+arate=array(0,dim=c(1,p))
+
+th0=start
+for (i in 1:m)
+{
+ for (j in 1:p)
+ {
+ th1=th0
+ th1[j]=th0[j]+rnorm(1)*scale[j]
+ f1=logpost(th1,...)
+ u=runif(1)<exp(f1-f0)
+ th0[j]=th1[j]*(u==1)+th0[j]*(u==0)
+ f0=f1*(u==1)+f0*(u==0)
+ vth[i,j]=th0[j];
+ arate[j]=arate[j]+u
+ }
+}
+arate=arate/m
+stuff=list(par=vth,accept=arate)
+return(stuff)
+}
+
+
diff --git a/R/groupeddatapost.R b/R/groupeddatapost.R
new file mode 100644
index 0000000..0a19c5c
--- /dev/null
+++ b/R/groupeddatapost.R
@@ -0,0 +1,8 @@
+groupeddatapost=function (theta, data)
+{
+ dj=function(f,int.lo,int.hi,mu,sigma)
+ f*log(pnorm(int.hi,mu,sigma)-pnorm(int.lo,mu,sigma))
+ mu = theta[1]
+ sigma = exp(theta[2])
+ sum(dj(data$f,data$int.lo,data$int.hi,mu,sigma))
+}
diff --git a/R/hiergibbs.R b/R/hiergibbs.R
new file mode 100644
index 0000000..570c182
--- /dev/null
+++ b/R/hiergibbs.R
@@ -0,0 +1,59 @@
+hiergibbs=function(data,m)
+{
+###############################################################
+# Implements Gibbs sampling algorithm for posterior of table
+# of means with hierarchical regression prior
+#
+# INPUT
+# data: 40 by 4 matrix where the observed sample means are
+# in column 1, sample sizes are in column 2, and values of
+# two covariates in columns 3 and 4.
+# m: number of cycles of Gibbs sampling
+#
+# OUTPUT
+# a list with
+# -- beta: matrix of simulated values of beta with each row a simulated value
+# -- mu: matrix of simulated values of cell means
+# -- var: vector of simulated values of second-stage variance sigma^2_pi
+###############################################################
+
+y=data[,1] #
+n=data[,2] #
+x1=data[,3] #
+x2=data[,4] # defines variables y,n,x1,x2,a
+X=cbind(1+0*x1,x1,x2) #
+s2=.65^2/n #
+p=3; N=length(y) #
+
+mbeta=array(0,c(m,p)) #
+mmu=array(0,c(m,length(n))) # sets up arrays to store simulated draws
+ms2pi=array(0,c(m,1)) #
+
+######################################## defines prior parameters
+b1=array(c(.55,.018,.033),c(3,1))
+bvar=array(c(8.49e-03,-1.94e-05, -2.88e-04, -1.94e-05, 7.34e-07, -1.52e-06, -2.88e-04,-1.52e-06, 1.71e-05),c(3,3))
+ibvar=solve(bvar)
+s=.02; v=16;
+
+mu=y; s2pi=.006 # starting values of mu and s2pi in Gibbs sampling
+
+for (j in 1:m)
+{
+pvar=solve(ibvar+t(X)%*%X/s2pi) #
+pmean=pvar%*%(ibvar%*%b1+t(X)%*%mu/s2pi) # simulates beta
+beta=t(chol(pvar))%*%array(rnorm(p),c(p,1))+pmean #
+
+s2pi=(sum((mu-X%*%beta)^2)/2+s/2)/rgamma(1,shape=(N+v)/2) # simulates s2pi
+
+postvar=1/(1/s2+1/s2pi) #
+postmean=(y/s2+X%*%beta/s2pi)*postvar # simulates mu
+mu=rnorm(n,postmean,sqrt(postvar)) #
+
+mbeta[j,]=t(beta) #
+mmu[j,]=t(mu) # stores simulated draws
+ms2pi[j]=s2pi #
+}
+
+return(list(beta=mbeta,mu=mmu,var=ms2pi))
+
+}
diff --git a/R/histprior.R b/R/histprior.R
new file mode 100644
index 0000000..49411de
--- /dev/null
+++ b/R/histprior.R
@@ -0,0 +1,11 @@
+histprior=function(p,midpts,prob)
+{
+binwidth=midpts[2]-midpts[1]
+lo=round(10000*(midpts-binwidth/2))/10000
+val=0*p
+for (i in 1:length(p))
+{
+ val[i]=prob[sum(p[i]>=lo)]
+}
+return(val)
+}
\ No newline at end of file
diff --git a/R/howardprior.R b/R/howardprior.R
new file mode 100644
index 0000000..f1c5a55
--- /dev/null
+++ b/R/howardprior.R
@@ -0,0 +1,14 @@
+howardprior=function (xy, par)
+{
+ alpha = par[1]
+ beta = par[2]
+ gam = par[3]
+ delta = par[4]
+ sigma = par[5]
+ p1 = xy[1]
+ p2 = xy[2]
+ u = log(p1/(1 - p1) * (1 - p2)/p2)/sigma
+ z = -0.5 * u^2 + (alpha - 1) * log(p1) + (beta - 1) * log(1 -
+ p1) + (gam - 1) * log(p2) + (delta - 1) * log(1 - p2)
+ return(z)
+}
diff --git a/R/impsampling.R b/R/impsampling.R
new file mode 100644
index 0000000..9123fed
--- /dev/null
+++ b/R/impsampling.R
@@ -0,0 +1,17 @@
+impsampling=function (logf, tpar, h, n, data)
+{
+ theta = rmt(n, mean = c(tpar$m), S = tpar$var, df = tpar$df)
+
+ lf=matrix(0,c(dim(theta)[1],1))
+ for (j in 1:dim(theta)[1]) lf[j]=logf(theta[j,],data)
+ H=lf
+ for (j in 1:dim(theta)[1]) H[j]=h(theta[j,])
+
+ lp = dmt(theta, mean = c(tpar$m), S = tpar$var, df = tpar$df,
+ log = TRUE)
+ md = max(lf - lp)
+ wt = exp(lf - lp - md)
+ est = sum(wt * H)/sum(wt)
+ SEest = sqrt(sum((H - est)^2 * wt^2))/sum(wt)
+ return(list(est = est, se = SEest, theta = theta, wt = wt))
+}
diff --git a/R/indepmetrop.R b/R/indepmetrop.R
new file mode 100644
index 0000000..77634e0
--- /dev/null
+++ b/R/indepmetrop.R
@@ -0,0 +1,30 @@
+indepmetrop=function (logpost, proposal, start, m, ...)
+{
+ logmultinorm = function(x, m, v) {
+ return(-0.5 * t(x - m) %*% solve(v) %*% (x - m))
+ }
+ pb = length(start)
+ Mpar = array(0, c(m, pb))
+ mu = matrix(proposal$mu)
+ if(diff(dim(mu))>0) mu=t(mu)
+ v = proposal$var
+ a = chol(v)
+ f0 = logpost(start, ...)
+ th0 = matrix(t(start))
+ accept = 0
+ for (i in 1:m) {
+ th1 = mu + t(a) %*% array(rnorm(pb), c(pb, 1))
+ f1 = logpost(t(th1), ...)
+ R = exp(logmultinorm(th0, mu, v) - logmultinorm(th1,
+ mu, v) + f1 - f0)
+ u = runif(1) < R
+ if (u == 1) {
+ th0 = th1
+ f0 = f1
+ }
+ Mpar[i, ] = th0
+ accept = accept + u
+ }
+ accept = accept/m
+ return(list(par = Mpar, accept = accept))
+}
diff --git a/R/laplace.R b/R/laplace.R
new file mode 100644
index 0000000..1392707
--- /dev/null
+++ b/R/laplace.R
@@ -0,0 +1,15 @@
+laplace=function (logpost, mode, ...)
+{
+ options(warn=-1)
+ fit=optim(mode, logpost, gr = NULL, ..., hessian=TRUE,
+ control=list(fnscale=-1))
+ options(warn=0)
+ mode=fit$par
+ h=-solve(fit$hessian)
+ p=length(mode)
+ int = p/2 * log(2 * pi) + 0.5 * log(det(h)) +
+ logpost(mode, ...)
+ stuff = list(mode = mode, var = h, int = int,
+ converge=fit$convergence==0)
+ return(stuff)
+}
\ No newline at end of file
diff --git a/R/lbinorm.R b/R/lbinorm.R
new file mode 100644
index 0000000..e8af231
--- /dev/null
+++ b/R/lbinorm.R
@@ -0,0 +1,11 @@
+ lbinorm=function (xy, par)
+{
+ m = par$m
+ v = par$v
+ x = xy[1]
+ y = xy[2]
+ zx = (x - m[1])/sqrt(v[1, 1])
+ zy = (y - m[2])/sqrt(v[2, 2])
+ r = v[1, 2]/sqrt(v[1, 1] * v[2, 2])
+ return(-0.5/(1 - r^2) * (zx^2 - 2 * r * zx * zy + zy^2))
+}
diff --git a/R/logctablepost.R b/R/logctablepost.R
new file mode 100644
index 0000000..db595d6
--- /dev/null
+++ b/R/logctablepost.R
@@ -0,0 +1,14 @@
+logctablepost=function (theta, data)
+{
+ theta1 = theta[1]
+ theta2 = theta[2]
+ s1 = data[1]
+ f1 = data[2]
+ s2 = data[3]
+ f2 = data[4]
+ logitp1 = (theta1 + theta2)/2
+ logitp2 = (theta2 - theta1)/2
+ term1 = s1 * logitp1 - (s1 + f1) * log(1 + exp(logitp1))
+ term2 = s2 * logitp2 - (s2 + f2) * log(1 + exp(logitp2))
+ return(term1 + term2)
+}
diff --git a/R/logisticpost.R b/R/logisticpost.R
new file mode 100644
index 0000000..1f4fba5
--- /dev/null
+++ b/R/logisticpost.R
@@ -0,0 +1,17 @@
+logisticpost=function (beta, data)
+{
+ x = data[, 1]
+ n = data[, 2]
+ y = data[, 3]
+
+ beta0 = beta[1]
+ beta1 = beta[2]
+
+ logf=function(x,n,y,beta0,beta1)
+ { lp = beta0 + beta1 * x
+ p = exp(lp)/(1 + exp(lp))
+ y * log(p) + (n - y) * log(1 - p)
+ }
+
+ return(sum(logf(x,n,y,beta0,beta1)))
+}
diff --git a/R/logpoissgamma.R b/R/logpoissgamma.R
new file mode 100644
index 0000000..cfddf4d
--- /dev/null
+++ b/R/logpoissgamma.R
@@ -0,0 +1,9 @@
+logpoissgamma=function(theta,datapar)
+{
+y=datapar$data
+npar=datapar$par
+lambda=exp(theta)
+loglike=log(dgamma(lambda,shape=sum(y)+1,rate=length(y)))
+logprior=log(dgamma(lambda,shape=npar[1],rate=npar[2])*lambda)
+return(loglike+logprior)
+}
diff --git a/R/logpoissnormal.R b/R/logpoissnormal.R
new file mode 100644
index 0000000..88e57b1
--- /dev/null
+++ b/R/logpoissnormal.R
@@ -0,0 +1,9 @@
+logpoissnormal=function(theta,datapar)
+{
+y=datapar$data
+npar=datapar$par
+lambda=exp(theta)
+loglike=log(dgamma(lambda,shape=sum(y)+1,scale=1/length(y)))
+logprior=log(dnorm(theta,mean=npar[1],sd=npar[2]))
+return(loglike+logprior)
+}
diff --git a/R/mnormt.onesided.R b/R/mnormt.onesided.R
new file mode 100644
index 0000000..2466841
--- /dev/null
+++ b/R/mnormt.onesided.R
@@ -0,0 +1,28 @@
+mnormt.onesided=function(m0,normpar,data)
+{
+#
+# mnormt.onesided Performs a test that a normal mean is <= certain value.
+# m0 = value to be tested
+# normpar = mean and standard deviation of normal prior on mu
+# data = (sample mean, sample size, known sampling standard deviation)
+
+xbar=data[1]; n=data[2]; s=data[3]
+prior.mean=normpar[1]
+prior.sd=normpar[2]
+prior.var=prior.sd^2
+
+priorH=pnorm(m0,prior.mean,prior.sd)
+priorA=1-priorH
+prior.odds=priorH/priorA
+
+post.precision=1/prior.var+n/s^2
+post.var=1/post.precision
+post.sd=sqrt(post.var)
+post.mean=(xbar*n/s^2+prior.mean/prior.var)/post.precision
+postH=pnorm(m0,post.mean,post.sd)
+postA=1-postH
+post.odds=postH/postA
+BF=post.odds/prior.odds
+
+return(list(BF=BF,prior.odds=prior.odds,post.odds=post.odds,postH=postH))
+}
diff --git a/R/mnormt.twosided.R b/R/mnormt.twosided.R
new file mode 100644
index 0000000..8d1bcc4
--- /dev/null
+++ b/R/mnormt.twosided.R
@@ -0,0 +1,14 @@
+mnormt.twosided <-
+function (m0, prob, t, data)
+{
+ xbar = data[1]
+ n = data[2]
+ h = data[3]
+ num = 0.5 * log(n) - log(h) - 0.5 * n/h^2 * (xbar - m0)^2
+ den = -0.5 * log(h^2/n + t^2) - 0.5/(h^2/n + t^2) * (xbar -
+ m0)^2
+ bf = exp(num - den)
+ post = prob * bf/(prob * bf + 1 - prob)
+ return(list(bf = bf, post = post))
+}
+
diff --git a/R/mycontour.R b/R/mycontour.R
new file mode 100644
index 0000000..993e413
--- /dev/null
+++ b/R/mycontour.R
@@ -0,0 +1,23 @@
+mycontour=function (logf, limits, data, ...)
+{
+LOGF=function(theta, data)
+ {
+ if(is.matrix(theta)==TRUE){
+ val=matrix(0,c(dim(theta)[1],1))
+ for (j in 1:dim(theta)[1])
+ val[j]=logf(theta[j,],data)
+ }
+ else val=logf(theta,data)
+ return(val)
+ }
+ ng = 50
+ x0 = seq(limits[1], limits[2], len = ng)
+ y0 = seq(limits[3], limits[4], len = ng)
+ X = outer(x0, rep(1, ng))
+ Y = outer(rep(1, ng), y0)
+ n2 = ng^2
+ Z = LOGF(cbind(X[1:n2], Y[1:n2]), data)
+ Z = Z - max(Z)
+ Z = matrix(Z, c(ng, ng))
+ contour(x0, y0, Z, levels = seq(-6.9, 0, by = 2.3), lwd = 2, ...)
+}
diff --git a/R/normal.normal.mix.R b/R/normal.normal.mix.R
new file mode 100644
index 0000000..b5221c0
--- /dev/null
+++ b/R/normal.normal.mix.R
@@ -0,0 +1,14 @@
+normal.normal.mix=function(probs,normalpar,data)
+{
+N=length(probs)
+y=data[1]; sigma2=data[2]
+prior.mean=normalpar[,1]
+prior.var=normalpar[,2]
+post.precision=1/prior.var+1/sigma2
+post.var=1/post.precision
+post.mean=(y/sigma2+prior.mean/prior.var)/post.precision
+
+m.prob=dnorm(y,prior.mean,sqrt(sigma2+prior.var))
+post.probs=probs*m.prob/sum(probs*m.prob)
+return(list(probs=post.probs,normalpar=cbind(post.mean,post.var)))
+}
\ No newline at end of file
diff --git a/R/normal.select.R b/R/normal.select.R
new file mode 100644
index 0000000..6ba0c56
--- /dev/null
+++ b/R/normal.select.R
@@ -0,0 +1,12 @@
+normal.select=function (quantile1, quantile2)
+{
+ p1 = quantile1$p
+ x1 = quantile1$x
+ p2 = quantile2$p
+ x2 = quantile2$x
+
+ sigma=(x1-x2)/diff(qnorm(c(p2,p1)))
+ mu=x1-sigma*qnorm(p1)
+
+ return(list(mu=mu,sigma=sigma))
+}
diff --git a/R/normchi2post.R b/R/normchi2post.R
new file mode 100644
index 0000000..27793a3
--- /dev/null
+++ b/R/normchi2post.R
@@ -0,0 +1,12 @@
+normchi2post=function(theta,data)
+{
+ mu = theta[1]
+ sig2 = theta[2]
+
+ logf=function(y,mu,sig2)
+ -(y-mu)^2/2/sig2-log(sig2)/2
+
+ z=sum(logf(data,mu,sig2))
+ z = z - log(sig2)
+ return(z)
+}
diff --git a/R/normnormexch.R b/R/normnormexch.R
new file mode 100644
index 0000000..bf582cb
--- /dev/null
+++ b/R/normnormexch.R
@@ -0,0 +1,11 @@
+normnormexch=function(theta,data){
+ y=data[,1]
+ sigma2=data[,2]
+ mu=theta[1]
+ tau=exp(theta[2])
+
+ logf=function(mu,tau,y,sigma2)
+ dnorm(y,mu,sqrt(sigma2+tau^2),log=TRUE)
+
+ sum(logf(mu,tau,y,sigma2))+log(tau)
+}
\ No newline at end of file
diff --git a/R/normpostpred.R b/R/normpostpred.R
new file mode 100644
index 0000000..f613e8b
--- /dev/null
+++ b/R/normpostpred.R
@@ -0,0 +1,12 @@
+normpostpred=function(parameters,sample.size,f=min)
+{
+ normalsample=function(j,parameters,sample.size)
+ rnorm(sample.size,mean=parameters$mu[j],sd=sqrt(parameters$sigma2[j]))
+
+ m=length(parameters$mu)
+ post.pred.samples=sapply(1:m,normalsample,parameters,sample.size)
+
+ stat=apply(post.pred.samples,2,f)
+
+ return(stat)
+}
\ No newline at end of file
diff --git a/R/normpostsim.R b/R/normpostsim.R
new file mode 100644
index 0000000..af87385
--- /dev/null
+++ b/R/normpostsim.R
@@ -0,0 +1,40 @@
+normpostsim=function (data, prior=NULL, m = 1000)
+{
+
+if (length(prior)==0)
+{
+ S = sum((data - mean(data))^2)
+ xbar = mean(data)
+ n = length(data)
+ SIGMA2 = S/rchisq(m, n - 1)
+ MU = rnorm(m, mean = xbar, sd = sqrt(SIGMA2)/sqrt(n))
+} else
+{
+ a=prior$sigma2[1]
+ b=prior$sigma2[2]
+ mu0=prior$mu[1]
+ tau2=prior$mu[2]
+ S = sum((data - mean(data))^2)
+ xbar = mean(data)
+ n = length(data)
+
+ SIGMA2=rep(0,m)
+ MU=rep(0,m)
+ sigma2=S/n
+ for (j in 1:m)
+ {
+ prec=n/sigma2+1/tau2
+ mu1=(xbar*n/sigma2+mu0/tau2)/prec
+ v1=1/prec
+ mu=rnorm(1,mu1,sqrt(v1))
+
+ a1=a+n/2
+ b1=b+sum((data-mu)^2)/2
+ sigma2=rigamma(1,a1,b1)
+
+ SIGMA2[j]=sigma2
+ MU[j]=mu
+ }
+}
+ return(list(mu = MU, sigma2 = SIGMA2))
+}
diff --git a/R/ordergibbs.R b/R/ordergibbs.R
new file mode 100644
index 0000000..8dba9d9
--- /dev/null
+++ b/R/ordergibbs.R
@@ -0,0 +1,75 @@
+ordergibbs=function(data,m)
+{
+# implements Gibbs sampling for table of means
+# with prior belief in order restriction
+# input: data = data matrix with two columns [sample mean, sample size]
+# m = number of iterations of Gibbs sampling
+# output: matrix of simulated values of means where each row
+# represents one simulated draw
+
+#####################################################
+rnormt=function(n,mu,sigma,lo,hi)
+{
+# simulates n random variates from a normal(mu,sigma)
+# distribution truncated on the interval (lo, hi)
+
+p=pnorm(c(lo,hi),mu,sigma)
+return(mu+sigma*qnorm(runif(n)*(p[2]-p[1])+p[1]))
+}
+#####################################################
+
+y=data[,1] # sample means
+n=data[,2] # sample sizes
+s=.65 # assumed value of sigma for this example
+I=8; J=5 # number of rows and columns in matrix
+
+# placing vectors y, n into matrices
+
+y=t(array(y,c(J,I)))
+n=t(array(n,c(J,I)))
+y=y[seq(8,1,by=-1),]
+n=n[seq(8,1,by=-1),]
+
+# setting up the matrix of values of the population means mu
+# two rows and two columns are added that help in the simulation
+# of individual values of mu from truncated normal distributions
+
+mu0=Inf*array(1,c(I+2,J+2))
+mu0[1,]=-mu0[1,]
+mu0[,1]=-mu0[,1]
+mu0[1,1]=-mu0[1,1]
+mu=mu0
+
+# starting value of mu that satisfies order restriction
+
+m1=c(2.64,3.02,3.02,3.07,3.34)
+m2=c(2.37,2.63,2.74,2.76,2.91)
+m3=c(2.37,2.47,2.64,2.66,2.66)
+m4=c(2.31,2.33,2.33,2.33,2.33)
+m5=c(2.04,2.11,2.11,2.33,2.33)
+m6=c(1.85,1.85,1.85,2.10,2.10)
+m7=c(1.85,1.85,1.85,1.88,1.88)
+m8=c(1.59,1.59,1.59,1.67,1.88)
+muint=rbind(m8,m7,m6,m5,m4,m3,m2,m1)
+mu[2:(I+1),2:(J+1)]=muint
+
+MU=array(0,c(m,I*J)) # arry MU stores simulated values of mu
+
+##################### main loop #######################
+for (k in 1:m)
+{
+ for (i in 2:(I+1))
+ {
+ for (j in 2:(J+1))
+ {
+ lo=max(c(mu[i-1,j],mu[i,j-1]))
+ hi=min(c(mu[i+1,j],mu[i,j+1]))
+ mu[i,j]=rnormt(1,y[i-1,j-1],s/sqrt(n[i-1,j-1]),lo,hi)
+ }
+ }
+ mm=mu[2:(I+1),2:(J+1)]
+ MU[k,]=array(mm,c(1,I*J))
+}
+
+return(MU)
+}
diff --git a/R/pbetap.R b/R/pbetap.R
new file mode 100644
index 0000000..294c042
--- /dev/null
+++ b/R/pbetap.R
@@ -0,0 +1,23 @@
+pbetap=function(ab,n,s)
+{
+#
+# PBETAP Predictive distribution of number of successes in future binomial
+# experiment with a beta prior. PRED = PBETAP(AB,N,S) returns a vector
+# PRED of predictive probabilities, where AB is the vector of beta
+# parameters, N is the future binomial sample size, and S is the vector of
+# numbers of successes for which predictive probabilities will be computed.
+#------------------------
+# Written by Jim Albert
+# albert at bgnet.bgsu.edu
+# November 2004
+#------------------------
+
+pred=0*s;
+a=ab[1]; b=ab[2];
+
+lcon=lgamma(n+1)-lgamma(s+1)-lgamma(n-s+1);
+
+pred=exp(lcon+lbeta(s+a,n-s+b)-lbeta(a,b));
+
+return(pred)
+}
diff --git a/R/pbetat.R b/R/pbetat.R
new file mode 100644
index 0000000..a1b1900
--- /dev/null
+++ b/R/pbetat.R
@@ -0,0 +1,27 @@
+pbetat=function(p0,prob,ab,data)
+{
+#
+# PBETAT Performs a test that a proportion is equal to a specific value.
+# PBETAT(P0,PROB,AB,DATA) gives a vector of the Bayes factor and
+# the probability of the hypothesis P=P0, where P0 is the proportion
+# value to be tested, PROB is the prior probability of the hypothesis,
+# AB is the vector of parameters of the beta density under the
+# alternative hypothesis, and DATA is the vector of numbers of
+# successes and failures.
+#------------------------
+# Written by Jim Albert
+# albert at bgnet.bgsu.edu
+# November 2004
+#------------------------
+
+a=ab[1]; b=ab[2]
+s=data[1]; f=data[2]
+
+lbf=s*log(p0)+f*log(1-p0)+lbeta(a,b)-lbeta(a+s,b+f)
+
+bf=exp(lbf)
+post=prob*bf/(prob*bf+1-prob)
+
+return(list(bf=bf,post=post))
+
+}
diff --git a/R/pdisc.R b/R/pdisc.R
new file mode 100644
index 0000000..b2e5d12
--- /dev/null
+++ b/R/pdisc.R
@@ -0,0 +1,27 @@
+"pdisc" <-
+function(p,prior,data)
+{
+# PDISC Posterior distribution for a proportion with discrete models.
+# POST = PDISC(P,PRIOR,DATA) returns a vector of posterior probabilities.
+# P is the vector of values of the proportion, PRIOR is the corresponding
+# vector of prior probabilities and DATA is the vector of data (number of
+# successes and failures in set of independent Bernoulli trials
+#------------------------
+# Written by Jim Albert
+# albert at bgnet.bgsu.edu
+# November 2004
+#------------------------
+
+s=data[1]; f=data[2]
+p1=p+.5*(p==0)-.5*(p==1)
+
+like=s*log(p1)+f*log(1-p1)
+like=like*(p>0)*(p<1)-999*((p==0)*(s>0)+(p==1)*(f>0))
+like=exp(like-max(like))
+
+product=like*prior
+post=product/sum(product)
+
+return(post)
+}
+
diff --git a/R/pdiscp.R b/R/pdiscp.R
new file mode 100644
index 0000000..3b41ba1
--- /dev/null
+++ b/R/pdiscp.R
@@ -0,0 +1,25 @@
+"pdiscp" <-
+function(p,probs,n,s)
+{
+#
+# PDISCP Predictive distribution of number of successes in future binomial
+# experiment with a discrete prior. PRED = PDISCP(P,PROBS,N,S) returns
+# vector PRED of predictive probabilities, where P is the vector of
+# values of the proportion, PROBS is the corresponding vector of
+# probabilities, N is the future binomial sample size, and S is the vector of
+# numbers of successes for which predictive probabilities will be computed.
+#------------------------
+# Written by Jim Albert
+# albert at bgnet.bgsu.edu
+# November 2004
+#------------------------
+
+pred=0*s;
+
+for (i in 1:length(p))
+{
+ pred=pred+probs[i]*dbinom(s,n,p[i]);
+}
+return(pred)
+}
+
diff --git a/R/plot.bayes.R b/R/plot.bayes.R
new file mode 100644
index 0000000..7536113
--- /dev/null
+++ b/R/plot.bayes.R
@@ -0,0 +1,2 @@
+plot.bayes=function(x,...)
+ barplot(x$prob,...)
\ No newline at end of file
diff --git a/R/plot.bayes2.R b/R/plot.bayes2.R
new file mode 100644
index 0000000..8987133
--- /dev/null
+++ b/R/plot.bayes2.R
@@ -0,0 +1,6 @@
+plot.bayes2=function(x,marginal=0,...)
+if(marginal==0)image(as.numeric(dimnames(x$prob)[[1]]),
+as.numeric(dimnames(x$prob)[[2]]),x$prob,
+col=gray(1-(0:32)/32),...) else
+if(marginal==1) barplot(apply(x$prob,1,sum),...) else
+barplot(apply(x$prob,2,sum),...)
\ No newline at end of file
diff --git a/R/poissgamexch.R b/R/poissgamexch.R
new file mode 100644
index 0000000..c36513f
--- /dev/null
+++ b/R/poissgamexch.R
@@ -0,0 +1,17 @@
+ poissgamexch=function (theta, datapar)
+{
+ y = datapar$data[, 2]
+ e = datapar$data[, 1]
+ z0 = datapar$z0
+ alpha = exp(theta[1])
+ mu = exp(theta[2])
+ beta = alpha/mu
+
+ logf=function(y,e,alpha,beta)
+ lgamma(alpha + y) - (y + alpha) * log(e + beta) +
+ alpha * log(beta)-lgamma(alpha)
+
+ val=sum(logf(y,e,alpha,beta))
+ val = val + log(alpha) - 2 * log(alpha + z0)
+ return(val)
+}
\ No newline at end of file
diff --git a/R/poisson.gamma.mix.R b/R/poisson.gamma.mix.R
new file mode 100644
index 0000000..7fe750c
--- /dev/null
+++ b/R/poisson.gamma.mix.R
@@ -0,0 +1,18 @@
+poisson.gamma.mix=function(probs,gammapar,data)
+{
+N=length(probs)
+y=data$y; t=data$t; n=length(y)
+post.gammapar=gammapar+outer(rep(1,N),c(sum(y),sum(t)))
+L=post.gammapar[,1]/post.gammapar[,2]
+
+loglike=0
+for (j in 1:n)
+ loglike=loglike+dpois(y[j],L*t[j],log=TRUE)
+
+m.prob=exp(loglike+
+ dgamma(L,shape=gammapar[,1],rate=gammapar[,2],log=TRUE) -
+ dgamma(L,shape=post.gammapar[,1],rate=post.gammapar[,2],log=TRUE))
+
+post.probs=probs*m.prob/sum(probs*m.prob)
+return(list(probs=post.probs,gammapar=post.gammapar))
+}
\ No newline at end of file
diff --git a/R/predplot.R b/R/predplot.R
new file mode 100644
index 0000000..3dfe6c2
--- /dev/null
+++ b/R/predplot.R
@@ -0,0 +1,10 @@
+predplot=function(prior,n,yobs)
+{
+ y=0:n; a=prior[1]; b=prior[2]
+ probs=pbetap(prior,n,y)
+ m=max(probs)*1.05
+ plot(y,probs,type="h",ylab="Probability",ylim=c(0,m),
+ main=paste("Predictive Dist., beta(",a,",",b,") prior, n=",n,
+ ", yobs=",yobs),lwd=2,col="blue")
+ points(yobs,0,pch=19,cex=2.5,col="red")
+ text(yobs,m/8,"yobs",col="red")}
\ No newline at end of file
diff --git a/R/print.bayes.R b/R/print.bayes.R
new file mode 100644
index 0000000..fa5e620
--- /dev/null
+++ b/R/print.bayes.R
@@ -0,0 +1,2 @@
+print.bayes=function(x,...)
+ x$prob
\ No newline at end of file
diff --git a/R/prior.two.parameters.R b/R/prior.two.parameters.R
new file mode 100644
index 0000000..77b3e5c
--- /dev/null
+++ b/R/prior.two.parameters.R
@@ -0,0 +1,7 @@
+prior.two.parameters = function(parameter1, parameter2) {
+ prior = matrix(1, length(parameter1), length(parameter2))
+ prior = prior/sum(prior)
+ dimnames(prior)[[1]] = parameter1
+ dimnames(prior)[[2]] = parameter2
+ prior
+ }
\ No newline at end of file
diff --git a/R/rdirichlet.R b/R/rdirichlet.R
new file mode 100644
index 0000000..75237dc
--- /dev/null
+++ b/R/rdirichlet.R
@@ -0,0 +1,14 @@
+rdirichlet=function (n, par)
+{
+ k = length(par)
+ z = array(0, dim = c(n, k))
+ s = array(0, dim = c(n, 1))
+ for (i in 1:k) {
+ z[, i] = rgamma(n, shape = par[i])
+ s = s + z[, i]
+ }
+ for (i in 1:k) {
+ z[, i] = z[, i]/s
+ }
+ return(z)
+}
diff --git a/R/reg.gprior.post.R b/R/reg.gprior.post.R
new file mode 100644
index 0000000..aa04bc3
--- /dev/null
+++ b/R/reg.gprior.post.R
@@ -0,0 +1,10 @@
+reg.gprior.post=function(theta,dataprior)
+{
+y=dataprior$data$y; X=dataprior$data$X
+c0=dataprior$prior$c0; beta0=dataprior$prior$b0
+beta=theta[-length(theta)]; sigma=exp(theta[length(theta)])
+
+loglike=sum(dnorm(y,mean=X%*%as.vector(beta),sd=sigma,log=TRUE))
+logprior=dmnorm(beta,mean=beta0,varcov=c0*sigma^2*solve(t(X)%*%X),log=TRUE)
+return(loglike+logprior)
+}
\ No newline at end of file
diff --git a/R/regroup.R b/R/regroup.R
new file mode 100644
index 0000000..e787a6a
--- /dev/null
+++ b/R/regroup.R
@@ -0,0 +1,19 @@
+regroup=function(data,g)
+{
+d=dim(data); n=d[1]; m=d[2]
+N=floor(n/g)
+dataG=array(0,c(N,m))
+k=0
+for (j in seq(1,(N-1)*g+1,g))
+{
+k=k+1
+for (i in 0:(g-1))
+ dataG[k,]=dataG[k,]+data[j+i,]
+}
+if (n>N*g)
+{
+for (i in (N*g+1):n)
+ dataG[N,]=dataG[N,]+data[i,]
+}
+return(dataG)
+}
diff --git a/R/rejectsampling.R b/R/rejectsampling.R
new file mode 100644
index 0000000..fb426cd
--- /dev/null
+++ b/R/rejectsampling.R
@@ -0,0 +1,17 @@
+rejectsampling=function (logf, tpar, dmax, n, data)
+{
+ d = length(tpar$m)
+ theta = rmt(n, mean = c(tpar$m), S = tpar$var, df = tpar$df)
+ lf = matrix(0, c(dim(theta)[1], 1))
+ for (j in 1:dim(theta)[1]) lf[j] = logf(theta[j, ], data)
+ lg = dmt(theta, mean = c(tpar$m), S = tpar$var, df = tpar$df,
+ log = TRUE)
+ if (d == 1) {
+ prob = exp(c(lf) - lg - dmax)
+ return(theta[runif(n) < prob])
+ }
+ else {
+ prob = exp(lf - lg - dmax)
+ return(theta[runif(n) < prob, ])
+ }
+}
diff --git a/R/rigamma.R b/R/rigamma.R
new file mode 100644
index 0000000..5161872
--- /dev/null
+++ b/R/rigamma.R
@@ -0,0 +1,3 @@
+rigamma = function(n, a, b) {
+ return(1/rgamma(n, shape = a, rate = b))
+ }
diff --git a/R/rmnorm.R b/R/rmnorm.R
new file mode 100644
index 0000000..5d05018
--- /dev/null
+++ b/R/rmnorm.R
@@ -0,0 +1,9 @@
+rmnorm=function(n = 1, mean = rep(0, d), varcov)
+{
+ d <- if (is.matrix(varcov))
+ ncol(varcov)
+ else 1
+ z <- matrix(rnorm(n * d), n, d) %*% chol(varcov)
+ y <- t(mean + t(z))
+ return(y)
+}
diff --git a/R/rmt.R b/R/rmt.R
new file mode 100644
index 0000000..86e636a
--- /dev/null
+++ b/R/rmt.R
@@ -0,0 +1,13 @@
+rmt=function (n = 1, mean = rep(0, d), S, df = Inf)
+{
+ d <- if (is.matrix(S))
+ ncol(S)
+ else 1
+ if (df == Inf)
+ x <- 1
+ else x <- rchisq(n, df)/df
+ z <- rmnorm(n, rep(0, d), S)
+ y <- t(mean + t(z/sqrt(x)))
+ return(y)
+}
+
diff --git a/R/robustt.R b/R/robustt.R
new file mode 100644
index 0000000..2a50e44
--- /dev/null
+++ b/R/robustt.R
@@ -0,0 +1,24 @@
+robustt=function(y,v,m)
+{
+rigamma=function(n,a,b)
+{
+# simulates n values from a Inverse Gamma
+# distribution with shape a and rate b
+# density x^(-a-1) exp(b/x)
+
+return(1/rgamma(n,shape=a,rate=b))
+}
+n=length(y)
+mu=mean(y); sig2=sd(y)^2; lam=array(1,c(n,1))
+M=array(0,c(m,1)); S2=M; LAM=array(0,c(m,n))
+
+for (i in 1:m)
+{
+ lam=rgamma(n,shape=(v+1)/2,rate=v/2+(y-mu)^2/2/sig2)
+ mu=rnorm(1,mean=sum(y*lam)/sum(lam),sd=sqrt(sig2/sum(lam)))
+ sig2=rigamma(1,n/2,sum(lam*(y-mu)^2)/2)
+ M[i]=mu; S2[i]=sig2; LAM[i,]=lam
+}
+par=list(mu=M,s2=S2,lam=LAM)
+return(par)
+}
diff --git a/R/rtruncated.R b/R/rtruncated.R
new file mode 100644
index 0000000..dbad54d
--- /dev/null
+++ b/R/rtruncated.R
@@ -0,0 +1,2 @@
+rtruncated=function(n,lo,hi,pf,qf,...)
+qf(pf(lo,...)+runif(n)*(pf(hi,...)-pf(lo,...)),...)
\ No newline at end of file
diff --git a/R/rwmetrop.R b/R/rwmetrop.R
new file mode 100644
index 0000000..f009fa5
--- /dev/null
+++ b/R/rwmetrop.R
@@ -0,0 +1,26 @@
+rwmetrop=function (logpost, proposal, start, m, ...)
+{
+ pb = length(start)
+ Mpar = array(0, c(m, pb))
+ b = matrix(t(start))
+ lb = logpost(start, ...)
+ a = chol(proposal$var)
+ scale = proposal$scale
+ accept = 0
+ for (i in 1:m) {
+ bc = b + scale * t(a) %*% array(rnorm(pb), c(pb, 1))
+ lbc = logpost(t(bc), ...)
+ prob = exp(lbc - lb)
+ if (is.na(prob) == FALSE) {
+ if (runif(1) < prob) {
+ lb = lbc
+ b = bc
+ accept = accept + 1
+ }
+ }
+ Mpar[i, ] = b
+ }
+ accept = accept/m
+ stuff = list(par = Mpar, accept = accept)
+ return(stuff)
+}
diff --git a/R/simcontour.R b/R/simcontour.R
new file mode 100644
index 0000000..a0b728a
--- /dev/null
+++ b/R/simcontour.R
@@ -0,0 +1,31 @@
+simcontour=function (logf, limits, data, m)
+{
+LOGF=function(theta, data)
+ {
+ if(is.matrix(theta)==TRUE){
+ val=matrix(0,c(dim(theta)[1],1))
+ for (j in 1:dim(theta)[1])
+ val[j]=logf(theta[j,],data)
+ }
+ else val=logf(theta,data)
+ return(val)
+ }
+ ng = 50
+ x0 = seq(limits[1], limits[2], len = ng)
+ y0 = seq(limits[3], limits[4], len = ng)
+ X = outer(x0, rep(1, ng))
+ Y = outer(rep(1, ng), y0)
+ n2 = ng^2
+ Z = LOGF(cbind(X[1:n2], Y[1:n2]), data)
+ Z = Z - max(Z)
+ Z = matrix(Z, c(ng, ng))
+ d = cbind(X[1:n2], Y[1:n2], Z[1:n2])
+ dx = diff(x0[1:2])
+ dy = diff(y0[1:2])
+ prob = d[, 3]
+ prob = exp(prob)
+ prob = prob/sum(prob)
+ i = sample(2500, m, replace = TRUE, prob = prob)
+ return(list(x = d[i, 1] + runif(m) * dx - dx/2, y = d[i,
+ 2] + runif(m) * dy - dy/2))
+}
diff --git a/R/sir.R b/R/sir.R
new file mode 100644
index 0000000..eae1aa2
--- /dev/null
+++ b/R/sir.R
@@ -0,0 +1,17 @@
+sir=function (logf, tpar, n, data)
+{
+ k = length(tpar$m)
+ theta = rmt(n, mean = c(tpar$m), S = tpar$var, df = tpar$df)
+ lf=matrix(0,c(dim(theta)[1],1))
+ for (j in 1:dim(theta)[1]) lf[j]=logf(theta[j,],data)
+ lp = dmt(theta, mean = c(tpar$m), S = tpar$var, df = tpar$df,
+ log = TRUE)
+ md = max(lf - lp)
+ wt = exp(lf - lp - md)
+ probs = wt/sum(wt)
+ indices = sample(1:n, size = n, prob = probs, replace = TRUE)
+ if (k > 1)
+ theta = theta[indices, ]
+ else theta = theta[indices]
+ return(theta)
+}
diff --git a/R/summary.bayes.R b/R/summary.bayes.R
new file mode 100644
index 0000000..037218f
--- /dev/null
+++ b/R/summary.bayes.R
@@ -0,0 +1,22 @@
+summary.bayes=function(object,coverage=.9,...)
+{
+x = as.numeric(names(object$prob))
+p = object$prob
+post.mean=sum(x*p)
+post.sd=sqrt(sum((x-post.mean)^2*p))
+names(p)=NULL
+n = length(x)
+sp = sort(p, index.return = TRUE)
+ps = sp$x
+i = sp$ix[seq(n, 1, -1)]
+ps = p[i]
+xs = x[i]
+cp = cumsum(ps)
+ii = 1:n
+j = ii[cp >= coverage]
+j = j[1]
+eprob = cp[j]
+set = sort(xs[1:j])
+v = list(mean=post.mean,sd=post.sd,coverage = eprob, set = set)
+return(v)
+}
diff --git a/R/transplantpost.R b/R/transplantpost.R
new file mode 100644
index 0000000..37be17d
--- /dev/null
+++ b/R/transplantpost.R
@@ -0,0 +1,32 @@
+transplantpost=function (theta, data)
+{
+ x = data[, 1]
+ y = data[, 3]
+ t = data[, 2]
+ d = data[, 4]
+ tau = exp(theta[1])
+ lambda = exp(theta[2])
+ p = exp(theta[3])
+
+ xnt = x[t == 0]
+ dnt = d[t == 0]
+ z = x[t == 1]
+ y = y[t == 1]
+ dt = d[t == 1]
+
+ logf=function(xnt,dnt,lambda,p)
+ (dnt==0)*(p*log(lambda)+log(p)- (p + 1) * log(lambda + xnt)) +
+ (dnt==1)*p*log(lambda/(lambda + xnt))
+
+ logg=function(z,y,tau,lambda,p)
+ (dt==0)*(p * log(lambda) +
+ log(p * tau)-(p + 1) * log(lambda + y + tau * z)) +
+ (dt==1) * p * log(lambda/(lambda + y + tau * z))
+
+ val=sum(logf(xnt,dnt,lambda,p))+sum(logg(z,y,tau,lambda,p))
+
+ val = val + theta[1] + theta[2] + theta[3]
+ return(val)
+}
+
+
diff --git a/R/triplot.R b/R/triplot.R
new file mode 100644
index 0000000..d25b28d
--- /dev/null
+++ b/R/triplot.R
@@ -0,0 +1,21 @@
+triplot=function(prior,data,where="topright")
+{
+a=prior[1]; b=prior[2]
+s=data[1]; f=data[2]
+
+p = seq(0.005, 0.995, length = 500)
+prior=dbeta(p,a,b)
+like=dbeta(p,s+1,f+1)
+post=dbeta(p,a+s, b+f)
+
+m=max(c(prior,like,post))
+
+plot(p,post,type="l", ylab="Density", lty=2, lwd=3,
+ main=paste("Bayes Triplot, beta(",a,",",b,") prior, s=",s,", f=",f),
+ ylim=c(0,m),col="red")
+lines(p,like,lty=1, lwd=3,col="blue")
+lines(p,prior,lty=3, lwd=3,col="green")
+legend(where,c("Prior","Likelihood","Posterior"),
+ lty=c(3,1,2), lwd=c(3,3,3), col=c("green","blue","red"))
+
+}
\ No newline at end of file
diff --git a/R/weibullregpost.R b/R/weibullregpost.R
new file mode 100644
index 0000000..aa10eaf
--- /dev/null
+++ b/R/weibullregpost.R
@@ -0,0 +1,21 @@
+weibullregpost=function (theta, data)
+{
+ logf=function(t,c,x,sigma,mu,beta)
+ {
+ z=(log(t)-mu-x%*%beta)/sigma
+ f=1/sigma*exp(z-exp(z))
+ S=exp(-exp(z))
+ c*log(f)+(1-c)*log(S)
+ }
+
+ k = dim(data)[2]
+ p = k - 2
+ t = data[, 1]
+ c = data[, 2]
+ X = data[, 3:k]
+ sigma = exp(theta[1])
+ mu = theta[2]
+ beta = array(theta[3:k], c(p,1))
+ return(sum(logf(t,c,X,sigma,mu,beta)))
+}
+
diff --git a/build/vignette.rds b/build/vignette.rds
new file mode 100644
index 0000000..6e487b6
Binary files /dev/null and b/build/vignette.rds differ
diff --git a/data/achievement.txt.gz b/data/achievement.txt.gz
new file mode 100644
index 0000000..5927542
Binary files /dev/null and b/data/achievement.txt.gz differ
diff --git a/data/baseball.1964.txt.gz b/data/baseball.1964.txt.gz
new file mode 100644
index 0000000..98b02da
Binary files /dev/null and b/data/baseball.1964.txt.gz differ
diff --git a/data/bermuda.grass.txt.gz b/data/bermuda.grass.txt.gz
new file mode 100644
index 0000000..a50d65f
Binary files /dev/null and b/data/bermuda.grass.txt.gz differ
diff --git a/data/birdextinct.txt.gz b/data/birdextinct.txt.gz
new file mode 100644
index 0000000..ac80926
Binary files /dev/null and b/data/birdextinct.txt.gz differ
diff --git a/data/birthweight.txt.gz b/data/birthweight.txt.gz
new file mode 100644
index 0000000..7e4ee9d
Binary files /dev/null and b/data/birthweight.txt.gz differ
diff --git a/data/breastcancer.txt.gz b/data/breastcancer.txt.gz
new file mode 100644
index 0000000..0c3001e
Binary files /dev/null and b/data/breastcancer.txt.gz differ
diff --git a/data/calculus.grades.txt.gz b/data/calculus.grades.txt.gz
new file mode 100644
index 0000000..5578202
Binary files /dev/null and b/data/calculus.grades.txt.gz differ
diff --git a/data/cancermortality.txt.gz b/data/cancermortality.txt.gz
new file mode 100644
index 0000000..fde2503
Binary files /dev/null and b/data/cancermortality.txt.gz differ
diff --git a/data/chemotherapy.txt.gz b/data/chemotherapy.txt.gz
new file mode 100644
index 0000000..7b68510
Binary files /dev/null and b/data/chemotherapy.txt.gz differ
diff --git a/data/darwin.txt.gz b/data/darwin.txt.gz
new file mode 100644
index 0000000..43b49b8
Binary files /dev/null and b/data/darwin.txt.gz differ
diff --git a/data/donner.txt.gz b/data/donner.txt.gz
new file mode 100644
index 0000000..14d9feb
Binary files /dev/null and b/data/donner.txt.gz differ
diff --git a/data/election.2008.txt.gz b/data/election.2008.txt.gz
new file mode 100644
index 0000000..fefbe6d
Binary files /dev/null and b/data/election.2008.txt.gz differ
diff --git a/data/election.txt.gz b/data/election.txt.gz
new file mode 100644
index 0000000..f06feb6
Binary files /dev/null and b/data/election.txt.gz differ
diff --git a/data/footballscores.txt.gz b/data/footballscores.txt.gz
new file mode 100644
index 0000000..3aa6f15
Binary files /dev/null and b/data/footballscores.txt.gz differ
diff --git a/data/hearttransplants.txt.gz b/data/hearttransplants.txt.gz
new file mode 100644
index 0000000..0823e95
Binary files /dev/null and b/data/hearttransplants.txt.gz differ
diff --git a/data/iowagpa.txt.gz b/data/iowagpa.txt.gz
new file mode 100644
index 0000000..0206cf0
Binary files /dev/null and b/data/iowagpa.txt.gz differ
diff --git a/data/jeter2004.txt.gz b/data/jeter2004.txt.gz
new file mode 100644
index 0000000..05b3410
Binary files /dev/null and b/data/jeter2004.txt.gz differ
diff --git a/data/marathontimes.txt.gz b/data/marathontimes.txt.gz
new file mode 100644
index 0000000..665e279
Binary files /dev/null and b/data/marathontimes.txt.gz differ
diff --git a/data/puffin.txt.gz b/data/puffin.txt.gz
new file mode 100644
index 0000000..3d6b416
Binary files /dev/null and b/data/puffin.txt.gz differ
diff --git a/data/schmidt.txt.gz b/data/schmidt.txt.gz
new file mode 100644
index 0000000..8a086b6
Binary files /dev/null and b/data/schmidt.txt.gz differ
diff --git a/data/sluggerdata.txt.gz b/data/sluggerdata.txt.gz
new file mode 100644
index 0000000..4aa8757
Binary files /dev/null and b/data/sluggerdata.txt.gz differ
diff --git a/data/soccergoals.txt.gz b/data/soccergoals.txt.gz
new file mode 100644
index 0000000..27afce1
Binary files /dev/null and b/data/soccergoals.txt.gz differ
diff --git a/data/stanfordheart.txt.gz b/data/stanfordheart.txt.gz
new file mode 100644
index 0000000..5462f0f
Binary files /dev/null and b/data/stanfordheart.txt.gz differ
diff --git a/data/strikeout.txt.gz b/data/strikeout.txt.gz
new file mode 100644
index 0000000..5fbb559
Binary files /dev/null and b/data/strikeout.txt.gz differ
diff --git a/data/studentdata.txt.gz b/data/studentdata.txt.gz
new file mode 100644
index 0000000..d986ae8
Binary files /dev/null and b/data/studentdata.txt.gz differ
diff --git a/debian/README.test b/debian/README.test
deleted file mode 100644
index 55a9142..0000000
--- a/debian/README.test
+++ /dev/null
@@ -1,8 +0,0 @@
-Notes on how this package can be tested.
-────────────────────────────────────────
-
-To run the unit tests provided by the package you can do
-
- sh run-unit-test
-
-in this directory.
diff --git a/debian/changelog b/debian/changelog
deleted file mode 100644
index 2dc70f4..0000000
--- a/debian/changelog
+++ /dev/null
@@ -1,34 +0,0 @@
-r-cran-learnbayes (2.15-4) unstable; urgency=medium
-
- * Team upload.
- * Rebuild for the r-api-3.4 transition.
- * Convert to dh-r.
- * Bump to debhelper compat level 10.
- * Bump to Standards-Version 4.1.0.
- * Use canonical CRAN homepage.
- * Bump to d/watch file format v4.
- * Use secure URL in Format field of d/copyright.
- * Remove unneeded Testsuite header.
-
- -- Sébastien Villemot <sebastien at debian.org> Thu, 28 Sep 2017 13:56:13 +0200
-
-r-cran-learnbayes (2.15-3) unstable; urgency=medium
-
- * Fix autopkgtest
- Closes: #759413
- * cme fix dpkg-control
-
- -- Andreas Tille <tille at debian.org> Wed, 27 Apr 2016 23:12:40 +0200
-
-r-cran-learnbayes (2.15-2) unstable; urgency=medium
-
- * Fix Vcs fields
- * Add autopkgtest based on vignettes
-
- -- Andreas Tille <tille at debian.org> Thu, 14 Aug 2014 11:27:59 +0200
-
-r-cran-learnbayes (2.15-1) unstable; urgency=low
-
- * Initial release (Closes: #752552).
-
- -- Andreas Tille <tille at debian.org> Mon, 23 Jun 2014 09:24:05 +0200
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index f599e28..0000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-10
diff --git a/debian/control b/debian/control
deleted file mode 100644
index 84b2974..0000000
--- a/debian/control
+++ /dev/null
@@ -1,26 +0,0 @@
-Source: r-cran-learnbayes
-Maintainer: Debian Science Team <debian-science-maintainers at lists.alioth.debian.org>
-Uploaders: Andreas Tille <tille at debian.org>
-Section: gnu-r
-Priority: optional
-Build-Depends: debhelper (>= 10),
- dh-r
-Standards-Version: 4.1.0
-Vcs-Browser: https://anonscm.debian.org/viewvc/debian-science/packages/R/r-cran-learnbayes/trunk/
-Vcs-Svn: svn://anonscm.debian.org/debian-science/packages/R/r-cran-learnbayes/trunk/
-Homepage: https://cran.r-project.org/package=LearnBayes
-
-Package: r-cran-learnbayes
-Architecture: all
-Depends: ${misc:Depends},
- ${R:Depends}
-Recommends: ${R:Recommends}
-Suggests: ${R:Suggests}
-Description: GNU R functions for learning bayesian inference
- LearnBayes contains a collection of functions helpful in learning the
- basic tenets of Bayesian statistical inference. It contains functions
- for summarizing basic one and two parameter posterior distributions and
- predictive distributions. It contains MCMC algorithms for summarizing
- posterior distributions defined by the user. It also contains functions
- for regression models, hierarchical models, Bayesian tests, and
- illustrations of Gibbs sampling.
diff --git a/debian/copyright b/debian/copyright
deleted file mode 100644
index 84ece4d..0000000
--- a/debian/copyright
+++ /dev/null
@@ -1,30 +0,0 @@
-Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Upstream-Name: LearnBayes
-Upstream-Contact: Jim Albert <albert at bgsu.edu>
-Source: http://cran.r-project.org/web/packages/LearnBayes/
-
-Files: *
-Copyright: 2008-2014 Jim Albert <albert at bgsu.edu>
-License: GPL-2+
-
-Files: debian/*
-Copyright: 2014 Andreas Tille <tille at debian.org>
-License: GPL-2+
-
-License: GPL-2+
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- .
- On Debian systems, the complete text of the GNU General Public
- License can be found in `/usr/share/common-licenses/GPL-2'.
diff --git a/debian/docs b/debian/docs
deleted file mode 100644
index 9a4f4f8..0000000
--- a/debian/docs
+++ /dev/null
@@ -1,2 +0,0 @@
-debian/README.test
-debian/tests/run-unit-test
diff --git a/debian/examples b/debian/examples
deleted file mode 100644
index 18244c8..0000000
--- a/debian/examples
+++ /dev/null
@@ -1 +0,0 @@
-vignettes
diff --git a/debian/rules b/debian/rules
deleted file mode 100755
index 68d9a36..0000000
--- a/debian/rules
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/usr/bin/make -f
-
-%:
- dh $@ --buildsystem R
diff --git a/debian/source/format b/debian/source/format
deleted file mode 100644
index 163aaf8..0000000
--- a/debian/source/format
+++ /dev/null
@@ -1 +0,0 @@
-3.0 (quilt)
diff --git a/debian/tests/control b/debian/tests/control
deleted file mode 100644
index d2aa55a..0000000
--- a/debian/tests/control
+++ /dev/null
@@ -1,3 +0,0 @@
-Tests: run-unit-test
-Depends: @
-Restrictions: allow-stderr
diff --git a/debian/tests/run-unit-test b/debian/tests/run-unit-test
deleted file mode 100644
index 7d044a8..0000000
--- a/debian/tests/run-unit-test
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/sh -e
-oname=LearnBayes
-pkg=r-cran-`echo $oname | tr '[A-Z]' '[a-z]'`
-
-if [ "$ADTTMP" = "" ] ; then
- ADTTMP=`mktemp -d /tmp/${pkg}-test.XXXXXX`
-fi
-cd $ADTTMP
-cp /usr/share/doc/$pkg/examples/vignettes/* $ADTTMP
-gunzip *.gz
-for rnw in `ls *.[rR]nw` ; do
-rfile=`echo $rnw | sed 's/\.[rR]nw/.R/'`
-R --no-save <<EOT
- Stangle("$rnw")
- source("$rfile", echo=TRUE)
-EOT
-done
-rm -rf *
diff --git a/debian/watch b/debian/watch
deleted file mode 100644
index 42980f8..0000000
--- a/debian/watch
+++ /dev/null
@@ -1,2 +0,0 @@
-version=4
-http://cran.r-project.org/src/contrib/LearnBayes_([-\d.]*)\.tar\.gz
diff --git a/demo/00Index b/demo/00Index
new file mode 100644
index 0000000..5291e34
--- /dev/null
+++ b/demo/00Index
@@ -0,0 +1,45 @@
+Chapter.1.2 Exploring a Student Dataset
+Chapter.1.3 Exploring the Robustness of the t Statistic
+Chapter.2.3 Learning About a Proportion - Using a Discrete Prior
+Chapter.2.4 Learning About a Proportion - Using a Beta Prior
+Chapter.2.5 Learning About a Proportion - Using a Histogram Prior
+Chapter.2.6 Learning About a Proportion - Prediction
+Chapter.3.2 Normal Distribution with Known Mean, Unknown Variance
+Chapter.3.3 Estimating a Heart Transplant Mortality Rate
+Chapter.3.4 Learning about a Normal Mean with Known Variance
+Chapter.3.5 Mixtures of Conjugate Priors
+Chapter.3.6 A Bayesian Test of the Fairness of a Coin
+Chapter.4.2 Normal Data with Both Parameters Unknown
+Chapter.4.3 A Multinomial Model
+Chapter.4.4 A Bioassay Experiment
+Chapter.4.5 Comparing Two Proportions
+Chapter.5.4 A Beta-Binomial Model for Overdispersion
+Chapter.5.6 Approximations Based on Posterior Modes for Beta-Binomial Model
+Chapter.5.7 Monte Carlo Method for Computing Integrals
+Chapter.5.8 Rejection Sampling
+Chapter.5.9 Importance Sampling
+Chapter.5.10 Sampling Importance Resampling
+Chapter.6.2 Discrete Markov Chains
+Chapter.6.7 MCMC - Learning About a Normal Population Based on Grouped Data
+Chapter.6.8 MCMC Output Analysis
+Chapter.6.9 Modeling Data with Cauchy Errors
+Chapter.6.10 Analysis of the Stanford Heart Transplant Data
+Chapter.7.2 Introduction to Career Trajectory Example
+Chapter.7.3 Introduction to Heart Transplant Mortality Data
+Chapter.7.4 Checking Assumption of Equal Mortality Rates
+Chapter.7.5 Exchangeable Model for Mortality Rates
+Chapter.7.7 Simulating from Posterior from Exchangeable Model
+Chapter.7.8 Illustration of Posterior Inferences
+Chapter.7.9 Bayesian Sensitivity Analysis
+Chapter.7.10 Posterior Predictive Model Checking
+Chapter.8.3 One-Sided Test of a Normal Mean
+Chapter.8.4 Two-Sided Test of a Normal Mean
+Chapter.8.6 Models for Soccer Goal Scoring
+Chapter.8.7 Test if Baseball Player is Streaky
+Chapter.8.8 Test of Independence in a Two-Way Contingency Table
+Chapter.9.2 Normal Linear Regression
+Chapter.9.3 Model Selection Using Zellner's g Prior
+Chapter.9.4 Survival Modeling
+Chapter.10.2 Robust Modeling
+Chapter.10.3 Binary Response Regression with Probit Link
+Chapter.10.4 Estimating Table of Means with Belief of Order Restriction
diff --git a/demo/Chapter.1.2.R b/demo/Chapter.1.2.R
new file mode 100644
index 0000000..95fd89a
--- /dev/null
+++ b/demo/Chapter.1.2.R
@@ -0,0 +1,46 @@
+# Section 1.2 R commands
+
+# Section 1.2.2
+
+library(LearnBayes)
+data(studentdata)
+studentdata[1,]
+attach(studentdata)
+
+# Section 1.2.3
+
+table(Drink)
+barplot(table(Drink),xlab="Drink",ylab="Count")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+windows()
+hours.of.sleep = WakeUp - ToSleep
+summary(hours.of.sleep)
+hist(hours.of.sleep,main="")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+# Section 1.2.4
+
+windows()
+
+boxplot(hours.of.sleep~Gender,
+ ylab="Hours of Sleep")
+
+female.Haircut=Haircut[Gender=="female"]
+male.Haircut=Haircut[Gender=="male"]
+summary(female.Haircut)
+summary(male.Haircut)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+# Section 1.2.5
+
+windows()
+
+plot(jitter(ToSleep),jitter(hours.of.sleep))
+
+fit=lm(hours.of.sleep~ToSleep)
+fit
+abline(fit)
diff --git a/demo/Chapter.1.3.R b/demo/Chapter.1.3.R
new file mode 100644
index 0000000..1dacbc5
--- /dev/null
+++ b/demo/Chapter.1.3.R
@@ -0,0 +1,55 @@
+# Chapter 1.3 R commands
+
+# Section 1.3.2
+
+x=rnorm(10,mean=50,sd=10)
+y=rnorm(10,mean=50,sd=10)
+m=length(x)
+n=length(y)
+sp=sqrt(((m-1)*sd(x)^2+(n-1)*sd(y)^2)/(m+n-2))
+t.stat=(mean(x)-mean(y))/(sp*sqrt(1/m+1/n))
+
+tstatistic=function(x,y)
+{
+m=length(x)
+n=length(y)
+sp=sqrt(((m-1)*sd(x)^2+(n-1)*sd(y)^2)/(m+n-2))
+t.stat=(mean(x)-mean(y))/(sp*sqrt(1/m+1/n))
+return(t.stat)
+}
+
+data.x=c(1,4,3,6,5)
+data.y=c(5,4,7,6,10)
+tstatistic(data.x, data.y)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+# Section 1.3.3
+
+# simulation algorithm for normal populations
+
+alpha=.1; m=10; n=10 # sets alpha, m, n
+N=10000 # sets the number of simulations
+n.reject=0 # counter of num. of rejections
+for (i in 1:N)
+{
+x=rnorm(m,mean=0,sd=1) # simulates xs from population 1
+y=rnorm(n,mean=0,sd=1) # simulates ys from population 2
+t.stat=tstatistic(x,y) # computes the t statistic
+if (abs(t.stat)>qt(1-alpha/2,n+m-2))
+ n.reject=n.reject+1 # reject if |t| exceeds critical pt
+}
+true.sig.level=n.reject/N # est. is proportion of rejections
+
+s=readline(prompt="Type <Return> to continue : ")
+
+# simulation algorithm for normal and exponential populations
+# storing the values of the t statistic in vector tstat
+
+m=10; n=10
+my.tsimulation=function()
+ tstatistic(rnorm(m,mean=10,sd=2), rexp(n,rate=1/10))
+tstat.vector=replicate(10000, my.tsimulation())
+
+plot(density(tstat.vector),xlim=c(-5,8),ylim=c(0,.4),lwd=3)
+curve(dt(x,df=18),add=TRUE)
diff --git a/demo/Chapter.10.2.R b/demo/Chapter.10.2.R
new file mode 100644
index 0000000..0a9d5cc
--- /dev/null
+++ b/demo/Chapter.10.2.R
@@ -0,0 +1,23 @@
+###############################
+# Section 10.2 Robust Modeling
+###############################
+
+library(LearnBayes)
+
+ data(darwin)
+ attach(darwin)
+ fit=robustt(difference,4,10000)
+
+ plot(density(fit$mu),xlab="mu")
+
+ mean.lambda=apply(fit$lam,2,mean)
+ lam5=apply(fit$lam,2,quantile,.05)
+ lam95=apply(fit$lam,2,quantile,.95)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ windows()
+ plot(difference,mean.lambda,lwd=2,ylim=c(0,3),ylab="Lambda")
+ for (i in 1:length(difference))
+ lines(c(1,1)*difference[i],c(lam5[i],lam95[i]))
+ points(difference,0*difference-.05,pch=19,cex=2)
diff --git a/demo/Chapter.10.3.R b/demo/Chapter.10.3.R
new file mode 100644
index 0000000..46d8578
--- /dev/null
+++ b/demo/Chapter.10.3.R
@@ -0,0 +1,57 @@
+#############################################################
+# Section 10.3 Binary Response Regression with a Probit Link
+#############################################################
+
+#################################################
+# Section 10.3.1. Missing data and Gibbs sampling
+#################################################
+
+library(LearnBayes)
+
+ data(donner)
+ attach(donner)
+ X=cbind(1,age,male)
+
+ fit=glm(survival~X-1,family=binomial(link=probit))
+ summary(fit)
+
+ m=10000
+ fit=bayes.probit(survival,X,m)
+
+ apply(fit$beta,2,mean)
+
+ apply(fit$beta,2,sd)
+
+ a=seq(15,65)
+ X1=cbind(1,a,1)
+ p.male=bprobit.probs(X1,fit$beta)
+
+ plot(a,apply(p.male,2,quantile,.5),type="l",ylim=c(0,1),
+ xlab="age",ylab="Probability of Survival")
+ lines(a,apply(p.male,2,quantile,.05),lty=2)
+ lines(a,apply(p.male,2,quantile,.95),lty=2)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+###################################################
+# Section 10.3.2 Proper priors and model selection
+###################################################
+
+library(LearnBayes)
+data(donner)
+y=donner$survival
+X=cbind(1,donner$age,donner$male)
+
+beta0=c(0,0,0); c0=100
+P0=t(X)%*%X/c0
+
+bayes.probit(y,X,1000,list(beta=beta0,P=P0))$log.marg
+
+bayes.probit(y,X[,-2],1000,
+ list(beta=beta0[-2],P=P0[-2,-2]))$log.marg
+
+bayes.probit(y,X[,-3],1000,
+ list(beta=beta0[-3],P=P0[-3,-3]))$log.marg
+
+bayes.probit(y,X[,-c(2,3)],1000,
+ list(beta=beta0[-c(2,3)],P=P0[-c(2,3),-c(2,3)]))$log.marg
diff --git a/demo/Chapter.10.4.R b/demo/Chapter.10.4.R
new file mode 100644
index 0000000..007fe7a
--- /dev/null
+++ b/demo/Chapter.10.4.R
@@ -0,0 +1,84 @@
+###################################################
+# Section 10.4 Estimating a Table of Means
+###################################################
+
+library(LearnBayes)
+
+ data(iowagpa)
+ rlabels = c("91-99", "81-90", "71-80", "61-70", "51-60", "41-50",
+ "31-40", "21-30")
+ clabels = c("16-18", "19-21", "22-24", "25-27", "28-30")
+ gpa = matrix(iowagpa[, 1], nrow = 8, ncol = 5, byrow = T)
+ dimnames(gpa) = list(HSR = rlabels, ACTC = clabels)
+ gpa
+
+ samplesizes = matrix(iowagpa[, 2], nrow = 8, ncol = 5, byrow = T)
+ dimnames(samplesizes) = list(HSR = rlabels, ACTC = clabels)
+ samplesizes
+
+ act = seq(17, 29, by = 3)
+ matplot(act, t(gpa), type = "l", lwd = 3,
+ xlim = c(17, 34), col=1:8, lty=1:8)
+ legend(30, 3, lty = 1:8, lwd = 3, legend = c("HSR=9", "HSR=8",
+ "HSR=7", "HSR=6", "HSR=5", "HSR=4", "HSR=3", "HSR=2"), col=1:8)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ MU = ordergibbs(iowagpa, 5000)
+
+ postmeans = apply(MU, 2, mean)
+ postmeans = matrix(postmeans, nrow = 8, ncol = 5)
+ postmeans=postmeans[seq(8,1,-1),]
+ dimnames(postmeans)=list(HSR=rlabels,ACTC=clabels)
+ round(postmeans,2)
+
+windows()
+matplot(act, t(postmeans), type = "l", lty=1:8, lwd = 3, col = 1, xlim = c(17, 34))
+ legend(30, 3, lty = 1:8, lwd = 2, legend = c("HSR=9", "HSR=8",
+ "HSR=7", "HSR=6", "HSR=5", "HSR=4", "HSR=3", "HSR=2"))
+
+ postsds = apply(MU, 2, sd)
+ postsds = matrix(postsds, nrow = 8, ncol = 5)
+ postsds=postsds[seq(8,1,-1),]
+ dimnames(postsds)=list(HSR=rlabels,ACTC=clabels)
+ round(postsds,3)
+
+ s=.65
+ se=s/sqrt(samplesizes)
+ round(postsds/se,2)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ FIT=hiergibbs(iowagpa,5000)
+
+windows()
+ par(mfrow=c(2,1))
+ plot(density(FIT$beta[,2]),xlab=expression(beta[2]),
+ main="HIGH SCHOOL RANK")
+ plot(density(FIT$beta[,3]),xlab=expression(beta[3]),
+ main="ACT SCORE")
+ quantile(FIT$beta[,2],c(.025,.25,.5,.75,.975))
+
+ quantile(FIT$beta[,3],c(.025,.25,.5,.75,.975))
+
+ quantile(FIT$var,c(.025,.25,.5,.75,.975))
+
+ posterior.means = apply(FIT$mu, 2, mean)
+ posterior.means = matrix(posterior.means, nrow = 8, ncol = 5,
+ byrow = T)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+windows()
+ par(mfrow=c(1,1))
+ matplot(act, t(posterior.means), type = "l", lwd = 3, lty=1:8, col=1,
+ xlim = c(17, 34))
+ legend(30, 3, lty = 1:8, lwd = 2, legend = c("HSR=9", "HSR=8",
+ "HSR=7", "HSR=6", "HSR=5", "HSR=4", "HSR=3", "HSR=2"))
+
+ p=1-pnorm((2.5-FIT$mu)/.65)
+ prob.success=apply(p,2,mean)
+
+ prob.success=matrix(prob.success,nrow=8,ncol=5,byrow=T)
+ dimnames(prob.success)=list(HSR=rlabels,ACTC=clabels)
+ round(prob.success,3)
diff --git a/demo/Chapter.2.3.R b/demo/Chapter.2.3.R
new file mode 100644
index 0000000..063df0c
--- /dev/null
+++ b/demo/Chapter.2.3.R
@@ -0,0 +1,27 @@
+####################################
+# Section 2.3 Using a Discrete Prior
+####################################
+
+ library(LearnBayes)
+
+ p = seq(0.05, 0.95, by = 0.1)
+ prior = c(1, 5.2, 8, 7.2, 4.6, 2.1, 0.7, 0.1, 0, 0)
+ prior = prior/sum(prior)
+ plot(p, prior, type = "h", ylab="Prior Probability")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ data = c(11, 16)
+ post = pdisc(p, prior, data)
+ round(cbind(p, prior, post),2)
+
+ library(lattice)
+ PRIOR=data.frame("prior",p,prior)
+ POST=data.frame("posterior",p,post)
+ names(PRIOR)=c("Type","P","Probability")
+ names(POST)=c("Type","P","Probability")
+ data=rbind(PRIOR,POST)
+
+ windows()
+ xyplot(Probability~P|Type,data=data,layout=c(1,2),type="h",lwd=3,col="black")
+
diff --git a/demo/Chapter.2.4.R b/demo/Chapter.2.4.R
new file mode 100644
index 0000000..d805582
--- /dev/null
+++ b/demo/Chapter.2.4.R
@@ -0,0 +1,33 @@
+################################
+# Section 2.4 Using a Beta Prior
+#############################
+
+library(LearnBayes)
+
+quantile2=list(p=.9,x=.5)
+quantile1=list(p=.5,x=.3)
+ab=beta.select(quantile1,quantile2)
+
+ a = ab[1]
+ b = ab[2]
+ s = 11
+ f = 16
+ curve(dbeta(x,a+s,b+f), from=0, to=1,
+ xlab="p",ylab="Density",lty=1,lwd=4)
+ curve(dbeta(x,s+1,f+1),add=TRUE,lty=2,lwd=4)
+ curve(dbeta(x,a,b),add=TRUE,lty=3,lwd=4)
+ legend(.7,4,c("Prior","Likelihood","Posterior"),
+ lty=c(3,2,1),lwd=c(3,3,3))
+
+ 1 - pbeta(0.5, a + s, b + f)
+
+ qbeta(c(0.05, 0.95), a + s, b + f)
+
+ ps = rbeta(1000, a + s, b + f)
+
+ windows()
+ hist(ps,xlab="p")
+
+ sum(ps >= 0.5)/1000
+
+ quantile(ps, c(0.05, 0.95))
diff --git a/demo/Chapter.2.5.R b/demo/Chapter.2.5.R
new file mode 100644
index 0000000..858eb2f
--- /dev/null
+++ b/demo/Chapter.2.5.R
@@ -0,0 +1,33 @@
+#####################################
+# Section 2.5 Using a Histogram Prior
+#####################################
+
+ library(LearnBayes)
+
+ midpt = seq(0.05, 0.95, by = 0.1)
+ prior = c(1, 5.2, 8, 7.2, 4.6, 2.1, 0.7, 0.1, 0, 0)
+ prior = prior/sum(prior)
+
+ curve(histprior(x,midpt,prior), from=0, to=1,
+ ylab="Prior density",ylim=c(0,.3))
+
+ s = 11
+ f = 16
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ windows()
+ curve(histprior(x,midpt,prior) * dbeta(x,s+1,f+1),
+ from=0, to=1, ylab="Posterior density")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ p = seq(0, 1, length=500)
+ post = histprior(p, midpt, prior) *
+ dbeta(p, s+1, f+1)
+ post = post/sum(post)
+ ps = sample(p, replace = TRUE, prob = post)
+
+ windows()
+ hist(ps, xlab="p", main="")
+
diff --git a/demo/Chapter.2.6.R b/demo/Chapter.2.6.R
new file mode 100644
index 0000000..4e0e13b
--- /dev/null
+++ b/demo/Chapter.2.6.R
@@ -0,0 +1,33 @@
+########################
+# Section 2.6 Prediction
+########################
+
+ library(LearnBayes)
+
+ p=seq(0.05, 0.95, by=.1)
+ prior = c(1, 5.2, 8, 7.2, 4.6, 2.1, 0.7, 0.1, 0, 0)
+ prior=prior/sum(prior)
+ m=20; ys=0:20
+ pred=pdiscp(p, prior, m, ys)
+ cbind(0:20,pred)
+
+ ab=c(3.26, 7.19)
+ m=20; ys=0:20
+ pred=pbetap(ab, m, ys)
+
+ p=rbeta(1000,3.26, 7.19)
+
+ y = rbinom(1000, 20, p)
+
+ table(y)
+
+ freq=table(y)
+ ys=as.integer(names(freq))
+ predprob=freq/sum(freq)
+ plot(ys,predprob,type="h",xlab="y",
+ ylab="Predictive Probability")
+
+ dist=cbind(ys,predprob)
+
+ covprob=.9
+ discint(dist,covprob)
diff --git a/demo/Chapter.3.2.R b/demo/Chapter.3.2.R
new file mode 100644
index 0000000..4de2928
--- /dev/null
+++ b/demo/Chapter.3.2.R
@@ -0,0 +1,17 @@
+######################################################################
+# Section 3.2 Normal Distribution with Known Mean but Unknown Variance
+######################################################################
+
+ library(LearnBayes)
+
+ data(footballscores)
+ attach(footballscores)
+ d = favorite - underdog - spread
+ n = length(d)
+ v = sum(d^2)
+
+ P = rchisq(1000, n)/v
+ s = sqrt(1/P)
+ hist(s)
+
+ quantile(s, probs = c(0.025, 0.5, 0.975))
diff --git a/demo/Chapter.3.3.R b/demo/Chapter.3.3.R
new file mode 100644
index 0000000..e2340f6
--- /dev/null
+++ b/demo/Chapter.3.3.R
@@ -0,0 +1,31 @@
+##########################################################
+# Section 3.3 Estimating a Heart Transplant Mortality Rate
+##########################################################
+
+ alpha=16;beta=15174
+ yobs=1; ex=66
+ y=0:10
+ lam=alpha/beta
+ py=dpois(y, lam*ex)*dgamma(lam, shape = alpha,
+ rate = beta)/dgamma(lam, shape= alpha + y,
+ rate = beta + ex)
+ cbind(y, round(py, 3))
+
+ lambdaA = rgamma(1000, shape = alpha + yobs, rate = beta + ex)
+
+ ex = 1767; yobs=4
+ y = 0:10
+ py = dpois(y, lam * ex) * dgamma(lam, shape = alpha,
+ rate = beta)/dgamma(lam, shape = alpha + y,
+ rate = beta + ex)
+ cbind(y, round(py, 3))
+
+ lambdaB = rgamma(1000, shape = alpha + yobs, rate = beta + ex)
+
+ par(mfrow = c(2, 1))
+ plot(density(lambdaA), main="HOSPITAL A", xlab="lambdaA", lwd=3)
+ curve(dgamma(x, shape = alpha, rate = beta), add=TRUE)
+ legend("topright",legend=c("prior","posterior"),lwd=c(1,3))
+ plot(density(lambdaB), main="HOSPITAL B", xlab="lambdaB", lwd=3)
+ curve(dgamma(x, shape = alpha, rate = beta), add=TRUE)
+ legend("topright",legend=c("prior","posterior"),lwd=c(1,3))
diff --git a/demo/Chapter.3.4.R b/demo/Chapter.3.4.R
new file mode 100644
index 0000000..3cf9875
--- /dev/null
+++ b/demo/Chapter.3.4.R
@@ -0,0 +1,58 @@
+####################################################
+# Section 3.4 An Illustration of Bayesian Robustness
+####################################################
+
+ library(LearnBayes)
+
+ quantile1=list(p=.5,x=100); quantile2=list(p=.95,x=120)
+ normal.select(quantile1, quantile2)
+
+ mu = 100
+ tau = 12.16
+ sigma = 15
+ n = 4
+ se = sigma/sqrt(4)
+ ybar = c(110, 125, 140)
+ tau1 = 1/sqrt(1/se^2 + 1/tau^2)
+ mu1 = (ybar/se^2 + mu/tau^2) * tau1^2
+ summ1=cbind(ybar, mu1, tau1)
+ summ1
+
+ tscale = 20/qt(0.95, 2)
+ tscale
+
+ par(mfrow=c(1,1))
+ curve(1/tscale*dt((x-mu)/tscale,2),
+ from=60, to=140, xlab="theta", ylab="Prior Density")
+ curve(dnorm(x,mean=mu,sd=tau), add=TRUE, lwd=3)
+ legend("topright",legend=c("t density","normal density"),
+ lwd=c(1,3))
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ norm.t.compute=function(ybar) {
+ theta = seq(60, 180, length = 500)
+ like = dnorm(theta,mean=ybar,sd=sigma/sqrt(n))
+ prior = dt((theta - mu)/tscale, 2)
+ post = prior * like
+ post = post/sum(post)
+ m = sum(theta * post)
+ s = sqrt(sum(theta^2 * post) - m^2)
+ c(ybar, m, s) }
+
+summ2=t(sapply(c(110, 125, 140),norm.t.compute))
+dimnames(summ2)[[2]]=c("ybar","mu1 t","tau1 t")
+summ2
+
+ cbind(summ1,summ2)
+
+ theta=seq(60, 180, length=500)
+ normpost = dnorm(theta, mu1[3], tau1)
+ normpost = normpost/sum(normpost)
+ windows()
+ plot(theta,normpost,type="l",lwd=3,ylab="Posterior Density")
+ like = dnorm(theta,mean=140,sd=sigma/sqrt(n))
+ prior = dt((theta - mu)/tscale, 2)
+ tpost = prior * like / sum(prior * like)
+ lines(theta,tpost)
+ legend("topright",legend=c("t prior","normal prior"),lwd=c(1,3))
diff --git a/demo/Chapter.3.5.R b/demo/Chapter.3.5.R
new file mode 100644
index 0000000..4541345
--- /dev/null
+++ b/demo/Chapter.3.5.R
@@ -0,0 +1,25 @@
+#######################################################
+# Section 3.5 Mixtures of Conjugate Priors
+#######################################################
+
+library(LearnBayes)
+
+curve(.5*dbeta(x, 6, 14) + .5*dbeta(x, 14, 6), from=0, to=1,
+ xlab="P", ylab="Density")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+probs=c(.5,.5)
+beta.par1=c(6, 14)
+beta.par2=c(14, 6)
+betapar=rbind(beta.par1, beta.par2)
+data=c(7,3)
+post=binomial.beta.mix(probs,betapar,data)
+post
+
+windows()
+curve(post$probs[1]*dbeta(x,13,17)+post$probs[2]*dbeta(x,21,9),
+ from=0, to=1, lwd=3, xlab="P", ylab="DENSITY")
+curve(.5*dbeta(x,6,12)+.5*dbeta(x,12,6),0,1,add=TRUE)
+legend("topleft",legend=c("Prior","Posterior"),lwd=c(1,3))
+
diff --git a/demo/Chapter.3.6.R b/demo/Chapter.3.6.R
new file mode 100644
index 0000000..ce7885a
--- /dev/null
+++ b/demo/Chapter.3.6.R
@@ -0,0 +1,42 @@
+#######################################################
+# Section 3.6 A Bayesian Test of the Fairness of a Coin
+#######################################################
+
+library(LearnBayes)
+
+ pbinom(5, 20, 0.5)
+
+ n = 20
+ y = 5
+ a = 10
+ p = 0.5
+ m1 = dbinom(y, n, p) * dbeta(p, a, a)/dbeta(p, a + y, a + n -
+ y)
+ lambda = dbinom(y, n, p)/(dbinom(y, n, p) + m1)
+ lambda
+
+ pbetat(p,.5,c(a,a),c(y,n-y))
+
+prob.fair=function(log.a)
+{
+ a = exp(log.a)
+ m2 = dbinom(y, n, p) * dbeta(p, a, a)/
+ dbeta(p, a + y, a + n - y)
+ dbinom(y, n, p)/(dbinom(y, n, p) + m2)
+}
+
+n = 20; y = 5; p = 0.5
+curve(prob.fair(x), from=-4, to=5, xlab="log a",
+ ylab="Prob(coin is fair)", lwd=2)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ n=20
+ y=5
+ a=10
+ p=.5
+ m2=0
+ for (k in 0:y)
+ m2=m2+dbinom(k,n,p)*dbeta(p,a,a)/dbeta(p,a+k,a+n-k)
+ lambda=pbinom(y,n,p)/(pbinom(y,n,p)+m2)
+ lambda
diff --git a/demo/Chapter.4.2.R b/demo/Chapter.4.2.R
new file mode 100644
index 0000000..c8339ef
--- /dev/null
+++ b/demo/Chapter.4.2.R
@@ -0,0 +1,21 @@
+######################################################
+# Section 4.2 Normal Data with Both Parameters Unknown
+######################################################
+
+library(LearnBayes)
+
+ data(marathontimes)
+ attach(marathontimes)
+ d = mycontour(normchi2post, c(220, 330, 500, 9000), time,
+ xlab="mean",ylab="variance")
+
+ S = sum((time - mean(time))^2)
+ n = length(time)
+ sigma2 = S/rchisq(1000, n - 1)
+ mu = rnorm(1000, mean = mean(time), sd = sqrt(sigma2)/sqrt(n))
+
+ points(mu, sigma2)
+
+ quantile(mu, c(0.025, 0.975))
+
+ quantile(sqrt(sigma2), c(0.025, 0.975))
diff --git a/demo/Chapter.4.3.R b/demo/Chapter.4.3.R
new file mode 100644
index 0000000..13b23f7
--- /dev/null
+++ b/demo/Chapter.4.3.R
@@ -0,0 +1,39 @@
+###################################################
+# Section 4.3 A Multinomial Model
+###################################################
+
+library(LearnBayes)
+
+ alpha = c(728, 584, 138)
+ theta = rdirichlet(1000, alpha)
+
+ hist(theta[, 1] - theta[, 2], main="")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+###########################################
+
+data(election.2008)
+attach(election.2008)
+
+prob.Obama=function(j)
+{
+p=rdirichlet(5000,
+ 500*c(M.pct[j],O.pct[j],100-M.pct[j]-O.pct[j])/100+1)
+mean(p[,2]>p[,1])
+}
+
+Obama.win.probs=sapply(1:51,prob.Obama)
+
+sim.election=function()
+{
+winner=rbinom(51,1,Obama.win.probs)
+sum(EV*winner)
+}
+
+sim.EV=replicate(1000,sim.election())
+
+windows()
+hist(sim.EV,min(sim.EV):max(sim.EV),col="blue")
+abline(v=365,lwd=3) # Obama received 365 votes
+text(375,30,"Actual \n Obama \n total")
diff --git a/demo/Chapter.4.4.R b/demo/Chapter.4.4.R
new file mode 100644
index 0000000..0a6fcac
--- /dev/null
+++ b/demo/Chapter.4.4.R
@@ -0,0 +1,62 @@
+###################################################
+# Section 4.4 A Bioassay Experiment
+###################################################
+
+library(LearnBayes)
+
+ x = c(-0.86, -0.3, -0.05, 0.73)
+ n = c(5, 5, 5, 5)
+ y = c(0, 1, 3, 5)
+ data = cbind(x, n, y)
+
+ glmdata = cbind(y, n - y)
+ results = glm(glmdata ~ x, family = binomial)
+ summary(results)
+
+# when x = -.7, median and 90th percentile of p are (.2,.4)
+# when x = +.6, median and 90th percentile of p are (.8, .95)
+
+a1.b1=beta.select(list(p=.5,x=.2),list(p=.9,x=.5))
+a2.b2=beta.select(list(p=.5,x=.8),list(p=.9,x=.98))
+
+prior=rbind(c(-0.7, 4.68, 1.12),
+ c(0.6, 2.10, 0.74))
+data.new=rbind(data, prior)
+
+# plot prior #######################################
+
+plot(c(-1,1),c(0,1),type="n",xlab="Dose",ylab="Prob(death)")
+lines(-0.7*c(1,1),qbeta(c(.25,.75),a1.b1[1],a1.b1[2]),lwd=4)
+lines(0.6*c(1,1),qbeta(c(.25,.75),a2.b2[1],a2.b2[2]),lwd=4)
+points(c(-0.7,0.6),qbeta(.5,c(a1.b1[1],a2.b2[1]),c(a1.b1[2],a2.b2[2])),
+ pch=19,cex=2)
+text(-0.3,.2,"Beta(1.12, 3.56)")
+text(.2,.8,"Beta(2.10, 0.74)")
+response=rbind(a1.b1,a2.b2)
+x=c(-0.7,0.6)
+fit = glm(response ~ x, family = binomial)
+curve(exp(fit$coef[1]+fit$coef[2]*x)/
+ (1+exp(fit$coef[1]+fit$coef[2]*x)),add=T)
+
+#######################################################
+S=readline(prompt="Type <Return> to continue : ")
+
+windows()
+mycontour(logisticpost,c(-3,3,-1,9),data.new,
+ xlab="beta0", ylab="beta1")
+
+s=simcontour(logisticpost,c(-2,3,-1,11),data.new,1000)
+points(s)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+windows()
+ plot(density(s$y),xlab="beta1")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ theta=-s$x/s$y
+windows()
+ hist(theta,xlab="LD-50",breaks=20)
+
+ quantile(theta,c(.025,.975))
diff --git a/demo/Chapter.4.5.R b/demo/Chapter.4.5.R
new file mode 100644
index 0000000..dabe3d4
--- /dev/null
+++ b/demo/Chapter.4.5.R
@@ -0,0 +1,31 @@
+###########################################
+# Section 4.5 Comparing Two Proportions
+###########################################
+
+library(LearnBayes)
+
+ sigma=c(2,1,.5,.25)
+ plo=.0001;phi=.9999
+ par(mfrow=c(2,2))
+ for (i in 1:4)
+ mycontour(howardprior,c(plo,phi,plo,phi),c(1,1,1,1,sigma[i]),
+ main=paste("sigma=",as.character(sigma[i])),
+ xlab="p1",ylab="p2")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ sigma=c(2,1,.5,.25)
+ windows()
+ par(mfrow=c(2,2))
+ for (i in 1:4)
+ {
+ mycontour(howardprior,c(plo,phi,plo,phi),
+ c(1+3,1+15,1+7,1+5,sigma[i]),
+ main=paste("sigma=",as.character(sigma[i])),
+ xlab="p1",ylab="p2")
+ lines(c(0,1),c(0,1))
+ }
+
+ s=simcontour(howardprior,c(plo,phi,plo,phi),
+ c(1+3,1+15,1+7,1+5,2),1000)
+ sum(s$x>s$y)/1000
diff --git a/demo/Chapter.5.10.R b/demo/Chapter.5.10.R
new file mode 100644
index 0000000..4f2d7fd
--- /dev/null
+++ b/demo/Chapter.5.10.R
@@ -0,0 +1,18 @@
+##############################################
+# Section 5.10 Sampling Importance Resampling
+##############################################
+
+library(LearnBayes)
+data(cancermortality)
+fit=laplace(betabinexch,c(-7,6),cancermortality)
+
+tpar=list(m=fit$mode,var=2*fit$var,df=4)
+
+theta.s=sir(betabinexch,tpar,10000,cancermortality)
+
+ S=bayes.influence(theta.s,cancermortality)
+
+ plot(c(0,0,0),S$summary,type="b",lwd=3,xlim=c(-1,21),
+ ylim=c(5,11), xlab="Observation removed",ylab="log K")
+ for (i in 1:20)
+ lines(c(i,i,i),S$summary.obs[i,],type="b")
diff --git a/demo/Chapter.5.4.R b/demo/Chapter.5.4.R
new file mode 100644
index 0000000..34ebe49
--- /dev/null
+++ b/demo/Chapter.5.4.R
@@ -0,0 +1,16 @@
+#####################################################
+# Section 5.4 A Beta-Binomial Model for Overdispersion
+#####################################################
+
+ library(LearnBayes)
+
+ data(cancermortality)
+
+ mycontour(betabinexch0,c(.0001,.003,1,20000),cancermortality,
+ xlab="eta",ylab="K")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+windows()
+ mycontour(betabinexch,c(-8,-4.5,3,16.5),cancermortality,
+ xlab="logit eta",ylab="log K")
diff --git a/demo/Chapter.5.6.R b/demo/Chapter.5.6.R
new file mode 100644
index 0000000..8377ef2
--- /dev/null
+++ b/demo/Chapter.5.6.R
@@ -0,0 +1,19 @@
+######################################################
+# Section 5.6 The Example
+######################################################
+
+library(LearnBayes)
+
+data(cancermortality)
+
+ fit=laplace(betabinexch,c(-7,6),cancermortality)
+ fit
+
+npar=list(m=fit$mode,v=fit$var)
+mycontour(lbinorm,c(-8,-4.5,3,16.5),npar,
+ xlab="logit eta", ylab="log K")
+
+ se=sqrt(diag(fit$var))
+ fit$mode-1.645*se
+ fit$mode+1.645*se
+
diff --git a/demo/Chapter.5.7.R b/demo/Chapter.5.7.R
new file mode 100644
index 0000000..0dba653
--- /dev/null
+++ b/demo/Chapter.5.7.R
@@ -0,0 +1,8 @@
+#########################################################
+# Section 5.7 Monte Carlo Method for Computing Integrals
+#########################################################
+
+ p=rbeta(1000, 14.26, 23.19)
+ est=mean(p^2)
+ se=sd(p^2)/sqrt(1000)
+ c(est,se)
diff --git a/demo/Chapter.5.8.R b/demo/Chapter.5.8.R
new file mode 100644
index 0000000..46e414b
--- /dev/null
+++ b/demo/Chapter.5.8.R
@@ -0,0 +1,33 @@
+#########################################################
+# Section 5.8 Rejection Sampling
+#########################################################
+
+library(LearnBayes)
+
+data(cancermortality)
+fit=laplace(betabinexch,c(-7,6),cancermortality)
+
+betabinT=function(theta,datapar)
+{
+data=datapar$data
+tpar=datapar$par
+d=betabinexch(theta,data)-dmt(theta,mean=c(tpar$m),
+ S=tpar$var,df=tpar$df,log=TRUE)
+return(d)
+}
+
+tpar=list(m=fit$mode,var=2*fit$var,df=4)
+datapar=list(data=cancermortality,par=tpar)
+
+ start=c(-6.9,12.4)
+ fit1=laplace(betabinT,start,datapar)
+ fit1$mode
+
+ betabinT(fit1$mode,datapar)
+
+ theta=rejectsampling(betabinexch,tpar,-569.2813,10000,cancermortality)
+ dim(theta)
+
+ mycontour(betabinexch,c(-8,-4.5,3,16.5),cancermortality,
+ xlab="logit eta",ylab="log K")
+ points(theta[,1],theta[,2])
diff --git a/demo/Chapter.5.9.R b/demo/Chapter.5.9.R
new file mode 100644
index 0000000..dcff7d6
--- /dev/null
+++ b/demo/Chapter.5.9.R
@@ -0,0 +1,48 @@
+#############################################
+# Section 5.9 Importance Sampling
+#############################################
+
+library(LearnBayes)
+
+data(cancermortality)
+
+fit=laplace(betabinexch,c(-7,6),cancermortality)
+
+betabinexch.cond=function (log.K, data)
+{
+eta = exp(-6.818793)/(1 + exp(-6.818793))
+K = exp(log.K)
+y = data[, 1]; n = data[, 2]; N = length(y)
+logf=0*log.K
+for (j in 1:length(y))
+logf = logf + lbeta(K * eta + y[j], K * (1 -
+eta) + n[j] - y[j]) - lbeta(K * eta, K * (1 - eta))
+val = logf + log.K - 2 * log(1 + K)
+return(exp(val-max(val)))
+}
+
+
+I=integrate(betabinexch.cond,2,16,cancermortality)
+par(mfrow=c(2,2))
+curve(betabinexch.cond(x,cancermortality)/I$value,from=3,to=16,
+ylab="Density", xlab="log K",lwd=3, main="Densities")
+curve(dnorm(x,8,2),add=TRUE)
+legend("topright",legend=c("Exact","Normal"),lwd=c(3,1))
+curve(betabinexch.cond(x,cancermortality)/I$value/
+ dnorm(x,8,2),from=3,to=16, ylab="Weight",xlab="log K",
+ main="Weight = g/p")
+
+curve(betabinexch.cond(x,cancermortality)/I$value,from=3,to=16,
+ ylab="Density", xlab="log K",lwd=3, main="Densities")
+curve(1/2*dt(x-8,df=2),add=TRUE)
+legend("topright",legend=c("Exact","T(2)"),lwd=c(3,1))
+curve(betabinexch.cond(x,cancermortality)/I$value/
+ (1/2*dt(x-8,df=2)),from=3,to=16, ylab="Weight",xlab="log K",
+ main="Weight = g/p")
+
+tpar=list(m=fit$mode,var=2*fit$var,df=4)
+ myfunc=function(theta)
+ return(theta[2])
+ s=impsampling(betabinexch,tpar,myfunc,10000,cancermortality)
+ cbind(s$est,s$se)
+
diff --git a/demo/Chapter.6.10.R b/demo/Chapter.6.10.R
new file mode 100644
index 0000000..47d8186
--- /dev/null
+++ b/demo/Chapter.6.10.R
@@ -0,0 +1,40 @@
+#############################################################
+# Section 6.10 Analysis of the Stanford Heart Transplant Data
+#############################################################
+
+library(LearnBayes)
+
+data(stanfordheart)
+
+ start=c(0,3,-1)
+ laplacefit=laplace(transplantpost,start,stanfordheart)
+ laplacefit
+
+ proposal=list(var=laplacefit$var,scale=2)
+ s=rwmetrop(transplantpost,proposal,start,10000,stanfordheart)
+ s$accept
+
+ par(mfrow=c(2,2))
+ tau=exp(s$par[,1])
+ plot(density(tau),main="TAU")
+ lambda=exp(s$par[,2])
+ plot(density(lambda),main="LAMBDA")
+ p=exp(s$par[,3])
+ plot(density(p),main="P")
+
+ apply(exp(s$par),2,quantile,c(.05,.5,.95))
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ par(mfrow=c(1,1))
+ t=seq(1,240)
+ p5=0*t; p50=0*t; p95=0*t
+ for (j in 1:240)
+ { S=(lambda/(lambda+t[j]))^p
+ q=quantile(S,c(.05,.5,.95))
+ p5[j]=q[1]; p50[j]=q[2]; p95[j]=q[3]}
+ windows()
+ plot(t,p50,type="l",ylim=c(0,1),ylab="Prob(Survival)",
+ xlab="time")
+ lines(t,p5,lty=2)
+ lines(t,p95,lty=2)
diff --git a/demo/Chapter.6.2.R b/demo/Chapter.6.2.R
new file mode 100644
index 0000000..d43d14b
--- /dev/null
+++ b/demo/Chapter.6.2.R
@@ -0,0 +1,22 @@
+####################################################
+# Section 6.2 Introduction to Discrete Markov Chains
+####################################################
+
+ P=matrix(c(.5,.5,0,0,0,0,.25,.5,.25,0,0,0,0,.25,.5,.25,0,0,
+ 0,0,.25,.5,.25,0,0,0,0,.25,.5,.25,0,0,0,0,.5,.5),
+ nrow=6,ncol=6,byrow=TRUE)
+ P
+
+ s=array(0,c(50000,1))
+
+ s[1]=3
+ for (j in 2:50000)
+ s[j]=sample(1:6,size=1,prob=P[s[j-1],])
+
+ m=c(500,2000,8000,50000)
+ for (i in 1:4)
+ print(table(s[1:m[i]])/m[i])
+
+ w=matrix(c(.1,.2,.2,.2,.2,.1),nrow=1,ncol=6)
+ w%*%P
+
diff --git a/demo/Chapter.6.7.R b/demo/Chapter.6.7.R
new file mode 100644
index 0000000..3105905
--- /dev/null
+++ b/demo/Chapter.6.7.R
@@ -0,0 +1,38 @@
+##################################################################
+# Section 6.7 Learning about a Normal Population from Grouped Data
+##################################################################
+
+library(LearnBayes)
+
+ d=list(int.lo=c(-Inf,seq(66,74,by=2)),
+ int.hi=c(seq(66,74,by=2), Inf),
+ f=c(14,30,49,70,33,15))
+
+y=c(rep(65,14),rep(67,30),rep(69,49),rep(71,70),rep(73,33),
+ rep(75,15))
+ mean(y)
+
+ log(sd(y))
+
+ start=c(70,1)
+ fit=laplace(groupeddatapost,start,d)
+ fit
+
+ modal.sds=sqrt(diag(fit$var))
+
+ proposal=list(var=fit$var,scale=2)
+ fit2=rwmetrop(groupeddatapost,proposal,start,10000,d)
+
+ fit2$accept
+
+ post.means=apply(fit2$par,2,mean)
+ post.sds=apply(fit2$par,2,sd)
+
+ cbind(c(fit$mode),modal.sds)
+
+ cbind(post.means,post.sds)
+
+ mycontour(groupeddatapost,c(69,71,.6,1.3),d,
+ xlab="mu",ylab="log sigma")
+ points(fit2$par[5001:10000,1],fit2$par[5001:10000,2])
+
diff --git a/demo/Chapter.6.8.R b/demo/Chapter.6.8.R
new file mode 100644
index 0000000..dcba058
--- /dev/null
+++ b/demo/Chapter.6.8.R
@@ -0,0 +1,50 @@
+##################################################
+# Section 6.8 Example of Output Analysis
+##################################################
+ library(LearnBayes)
+
+ d=list(int.lo=c(-Inf,seq(66,74,by=2)),
+ int.hi=c(seq(66,74,by=2), Inf),
+ f=c(14,30,49,70,33,15))
+
+ library(coda)
+ library(lattice)
+
+ start=c(70,1)
+ fit=laplace(groupeddatapost,start,d)
+
+ start=c(65,1)
+ proposal=list(var=fit$var,scale=0.2)
+ bayesfit=rwmetrop(groupeddatapost,proposal,start,10000,d)
+
+ dimnames(bayesfit$par)[[2]]=c("mu","log sigma")
+ xyplot(mcmc(bayesfit$par[-c(1:2000),]),col="black")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ windows()
+ par(mfrow=c(2,1))
+ autocorr.plot(mcmc(bayesfit$par[-c(1:2000),]),auto.layout=FALSE)
+ summary(mcmc(bayesfit$par[-c(1:2000),]))
+ batchSE(mcmc(bayesfit$par[-c(1:2000),]), batchSize=50)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ start=c(70,1)
+ proposal=list(var=fit$var,scale=2.0)
+ bayesfit=rwmetrop(groupeddatapost,proposal,start,10000,d)
+
+ dimnames(bayesfit$par)[[2]]=c("mu","log sigma")
+ sim.parameters=mcmc(bayesfit$par[-c(1:2000),])
+ windows()
+ xyplot(mcmc(bayesfit$par[-c(1:2000),]),col="black")
+
+s=readline(prompt="Type <Return> to continue : ")
+
+ windows()
+ par(mfrow=c(2,1))
+ autocorr.plot(sim.parameters,auto.layout=FALSE)
+ summary(sim.parameters)
+ batchSE(sim.parameters, batchSize=50)
+
+
diff --git a/demo/Chapter.6.9.R b/demo/Chapter.6.9.R
new file mode 100644
index 0000000..cc6ac0d
--- /dev/null
+++ b/demo/Chapter.6.9.R
@@ -0,0 +1,58 @@
+###################################################
+# Section 6.9 Modeling Data with Cauchy Errors
+###################################################
+
+library(LearnBayes)
+
+ data(darwin)
+ attach(darwin)
+ mean(difference)
+
+ log(sd(difference))
+
+ laplace(cauchyerrorpost,c(21.6,3.6),difference)
+
+ laplace(cauchyerrorpost,.1*c(21.6,3.6),difference)$mode
+
+ c(24.7-4*sqrt(34.96),24.7+4*sqrt(34.96))
+ c(2.77-4*sqrt(.138),2.77+4*sqrt(.138))
+
+ mycontour(cauchyerrorpost,c(-10,60,1,4.5),difference,
+ xlab="mu",ylab="log sigma")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ fitlaplace=laplace(cauchyerrorpost,c(21.6,3.6), difference)
+ windows()
+ mycontour(lbinorm,c(-10,60,1,4.5),list(m=fitlaplace$mode,
+ v=fitlaplace$var), xlab="mu",ylab="log sigma")
+
+ proposal=list(var=fitlaplace$var,scale=2.5)
+ start=c(20,3)
+ m=1000
+ s=rwmetrop(cauchyerrorpost,proposal,start,m,difference)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ windows()
+ mycontour(cauchyerrorpost,c(-10,60,1,4.5),difference,
+ xlab="mu",ylab="log sigma")
+ points(s$par[,1],s$par[,2])
+
+ fitgrid=simcontour(cauchyerrorpost,c(-10,60,1,4.5),difference,
+ 50000)
+ proposal=list(var=fitlaplace$var,scale=2.5)
+ start=c(20,3)
+ fitrw=rwmetrop(cauchyerrorpost,proposal,start,50000,
+ difference)
+ proposal2=list(var=fitlaplace$var,mu=t(fitlaplace$mode))
+ fitindep=indepmetrop(cauchyerrorpost,proposal2,start,50000,
+ difference)
+ fitgibbs=gibbs(cauchyerrorpost,start,50000,c(12,.75),
+ difference)
+
+ apply(fitrw$par,2,mean)
+
+ apply(fitrw$par,2,sd)
+
+
diff --git a/demo/Chapter.7.10.R b/demo/Chapter.7.10.R
new file mode 100644
index 0000000..732210c
--- /dev/null
+++ b/demo/Chapter.7.10.R
@@ -0,0 +1,38 @@
+#################################################
+# Section 7.10 Posterior Predictive Model Checking
+#################################################
+
+library(LearnBayes)
+ data(hearttransplants)
+ attach(hearttransplants)
+
+ datapar = list(data = hearttransplants, z0 = 0.53)
+
+ start = c(4, -7)
+ fitgibbs = gibbs(poissgamexch, start, 1000, c(1,.15), datapar)
+
+ lam94=rgamma(1000,y[94]+alpha,e[94]+alpha/mu)
+
+ ys94=rpois(1000,e[94]*lam94)
+
+ hist(ys94,breaks=seq(-0.5,max(ys94)+0.5))
+ lines(y[94]*c(1,1),c(0,100),lwd=3)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+prob.out=function(i)
+{
+ lami=rgamma(1000,y[i]+alpha,e[i]+alpha/mu)
+ ysi=rpois(1000,e[i]*lami)
+ pleft=sum(ysi<=y[i])/1000
+ pright=sum(ysi>=y[i])/1000
+ min(pleft,pright)
+ }
+pout.exchange=sapply(1:94,prob.out)
+
+ windows()
+ plot(pout,pout.exchange,xlab="P(extreme), equal means",
+ ylab="P(extreme), exchangeable")
+ abline(0,1)
+
+
diff --git a/demo/Chapter.7.2.R b/demo/Chapter.7.2.R
new file mode 100644
index 0000000..5db614a
--- /dev/null
+++ b/demo/Chapter.7.2.R
@@ -0,0 +1,28 @@
+#####################################################
+# Section 7.2 Introduction to Hierarchical Modeling
+#####################################################
+
+library(LearnBayes)
+library(lattice)
+
+data(sluggerdata)
+
+# fit logistic model for home run data for a particular player
+
+logistic.fit=function(player)
+{
+d=subset(sluggerdata,Player==player)
+x=d$Age; x2=d$Age^2
+response=cbind(d$HR, d$AB-d$HR)
+list(Age=x, p=glm(response~x+x2,family=binomial)$fitted)
+}
+
+names=unique(sluggerdata$Player); newdata=NULL
+for (j in 1:9)
+{
+ fit=logistic.fit(as.character(names[j]))
+ newdata=rbind(newdata,data.frame(as.character(names[j]),fit$Age,fit$p))
+}
+names(newdata)=c("Player","Age","Fitted")
+xyplot(Fitted~Age|Player, data=newdata, type="l",lwd=3,col="black")
+
diff --git a/demo/Chapter.7.3.R b/demo/Chapter.7.3.R
new file mode 100644
index 0000000..7798e98
--- /dev/null
+++ b/demo/Chapter.7.3.R
@@ -0,0 +1,10 @@
+##############################################
+# Section 7.3 Individual or Combined Estimates
+##############################################
+
+ library(LearnBayes)
+ data(hearttransplants)
+ attach(hearttransplants)
+
+ plot(log(e), y/e, xlim=c(6,9.7), xlab="log(e)", ylab="y/e")
+ text(log(e),y/e,labels=as.character(y),pos=4)
diff --git a/demo/Chapter.7.4.R b/demo/Chapter.7.4.R
new file mode 100644
index 0000000..37f1584
--- /dev/null
+++ b/demo/Chapter.7.4.R
@@ -0,0 +1,30 @@
+##############################################
+# Section 7.4 Equal Mortality Rates?
+##############################################
+
+library(LearnBayes)
+data(hearttransplants)
+attach(hearttransplants)
+
+sum(y)
+sum(e)
+
+ lambda=rgamma(1000,shape=277,rate=294681)
+ ys94=rpois(1000,e[94]*lambda)
+
+ hist(ys94,breaks=seq(0.5,max(ys94)+0.5))
+ lines(c(y[94],y[94]),c(0,120),lwd=3)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+lambda=rgamma(1000,shape=277,rate=294681)
+prob.out=function(i)
+{
+ ysi=rpois(1000,e[i]*lambda)
+ pleft=sum(ysi<=y[i])/1000
+ pright=sum(ysi>=y[i])/1000
+ min(pleft,pright)
+ }
+pout=sapply(1:94,prob.out)
+windows()
+plot(log(e),pout,ylab="Prob(extreme)")
diff --git a/demo/Chapter.7.5.R b/demo/Chapter.7.5.R
new file mode 100644
index 0000000..fcf05e4
--- /dev/null
+++ b/demo/Chapter.7.5.R
@@ -0,0 +1,18 @@
+########################################################
+# Section 7.5 Modeling a Prior Belief of Exchangeability
+########################################################
+
+library(LearnBayes)
+
+pgexchprior=function(lambda,pars)
+{
+alpha=pars[1]; a=pars[2]; b=pars[3]
+(alpha-1)*log(prod(lambda))-(2*alpha+a)*log(alpha*sum(lambda)+b)
+}
+
+alpha=c(5,20,80,400); par(mfrow=c(2,2))
+for (j in 1:4)
+ mycontour(pgexchprior,c(.001,5,.001,5),c(alpha[j],10,10),
+ main=paste("ALPHA = ",alpha[j]),xlab="LAMBDA 1",ylab="LAMBDA 2")
+
+
diff --git a/demo/Chapter.7.7.R b/demo/Chapter.7.7.R
new file mode 100644
index 0000000..d476df7
--- /dev/null
+++ b/demo/Chapter.7.7.R
@@ -0,0 +1,51 @@
+#########################################################
+# Section 7.7 Simulating from the Posterior
+#########################################################
+
+ library(LearnBayes)
+ data(hearttransplants)
+ attach(hearttransplants)
+
+ datapar = list(data = hearttransplants, z0 = 0.53)
+ start=c(2, -7)
+ fit = laplace(poissgamexch, start, datapar)
+ fit
+
+ par(mfrow = c(1, 1))
+ mycontour(poissgamexch, c(0, 8, -7.3, -6.6), datapar,
+ xlab="log alpha",ylab="log mu")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ start = c(4, -7)
+ fitgibbs = gibbs(poissgamexch, start, 1000, c(1,.15), datapar)
+ fitgibbs$accept
+
+ windows()
+ mycontour(poissgamexch, c(0, 8, -7.3, -6.6), datapar,
+ xlab="log alpha",ylab="log mu")
+ points(fitgibbs$par[, 1], fitgibbs$par[, 2])
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ windows()
+ plot(density(fitgibbs$par[, 1], bw = 0.2))
+
+ alpha = exp(fitgibbs$par[, 1])
+ mu = exp(fitgibbs$par[, 2])
+ lam1 = rgamma(1000, y[1] + alpha, e[1] + alpha/mu)
+
+ alpha = exp(fitgibbs$par[, 1])
+ mu = exp(fitgibbs$par[, 2])
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ windows()
+ plot(log(e), y/e, pch = as.character(y))
+ for (i in 1:94) {
+ lami = rgamma(1000, y[i] + alpha, e[i] + alpha/mu)
+ probint = quantile(lami, c(0.05, 0.95))
+ lines(log(e[i]) * c(1, 1), probint)
+ }
+
+
diff --git a/demo/Chapter.7.8.R b/demo/Chapter.7.8.R
new file mode 100644
index 0000000..88cf978
--- /dev/null
+++ b/demo/Chapter.7.8.R
@@ -0,0 +1,53 @@
+##########################################################
+# Section 7.8 Posterior Inferences
+##########################################################
+
+ library(LearnBayes)
+ data(hearttransplants)
+ attach(hearttransplants)
+
+ datapar = list(data = hearttransplants, z0 = 0.53)
+ start=c(2, -7)
+ fit = laplace(poissgamexch, start, datapar)
+ fit
+
+ par(mfrow = c(1, 1))
+ mycontour(poissgamexch, c(0, 8, -7.3, -6.6), datapar,
+ xlab="log alpha",ylab="log mu")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ start = c(4, -7)
+ fitgibbs = gibbs(poissgamexch, start, 1000, c(1,.15), datapar)
+
+alpha = exp(fitgibbs$par[, 1])
+ mu = exp(fitgibbs$par[, 2])
+
+shrink=function(i) mean(alpha/(alpha + e[i] * mu))
+ shrinkage=sapply(1:94, shrink)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ windows()
+ plot(log(e), shrinkage)
+
+ mrate=function(i) mean(rgamma(1000, y[i] + alpha, e[i] + alpha/mu))
+ hospital=1:94
+ meanrate=sapply(hospital,mrate)
+ hospital[meanrate==min(meanrate)]
+
+###########################################################
+
+sim.lambda=function(i) rgamma(1000,y[i]+alpha,e[i]+alpha/mu)
+LAM=sapply(1:94,sim.lambda)
+
+compare.rates <- function(x) {
+ nc <- NCOL(x)
+ ij <- as.matrix(expand.grid(1:nc, 1:nc))
+ m <- as.matrix(x[,ij[,1]] > x[,ij[,2]])
+ matrix(colMeans(m), nc, nc, byrow = TRUE)
+}
+
+better=compare.rates(LAM)
+
+better[1:24,85]
diff --git a/demo/Chapter.7.9.R b/demo/Chapter.7.9.R
new file mode 100644
index 0000000..691df7b
--- /dev/null
+++ b/demo/Chapter.7.9.R
@@ -0,0 +1,54 @@
+#################################################
+# Section 7.9 Bayesian Sensitivity Analysis
+#################################################
+
+ library(LearnBayes)
+ data(hearttransplants)
+ attach(hearttransplants)
+
+ datapar = list(data = hearttransplants, z0 = 0.53)
+
+ start = c(4, -7)
+ fitgibbs = gibbs(poissgamexch, start, 1000, c(1,.15), datapar)
+
+
+sir.old.new=function(theta, prior, prior.new)
+{
+log.g=log(prior(theta))
+log.g.new=log(prior.new(theta))
+wt=exp(log.g.new-log.g-max(log.g.new-log.g))
+probs=wt/sum(wt)
+n=length(probs)
+indices=sample(1:n,size=n,prob=probs,replace=TRUE)
+theta[indices]
+}
+
+prior=function(theta)
+0.53*exp(theta)/(exp(theta)+0.53)^2
+prior.new=function(theta)
+5*exp(theta)/(exp(theta)+5)^2
+
+log.alpha=fitgibbs$par[, 1]
+log.alpha.new=sir.old.new(log.alpha, prior, prior.new)
+
+############ drawing figure
+
+library(lattice)
+draw.graph=function()
+{
+LOG.ALPHA=data.frame("prior",log.alpha)
+names(LOG.ALPHA)=c("Prior","log.alpha")
+LOG.ALPHA.NEW=data.frame("new.prior",log.alpha.new)
+names(LOG.ALPHA.NEW)=c("Prior","log.alpha")
+D=densityplot(~log.alpha,group=Prior,data=rbind(LOG.ALPHA,LOG.ALPHA.NEW),
+ plot.points=FALSE,main="Original Prior and Posterior (solid),
+ New Prior and Posterior (dashed)",
+ lwd=4,adjust=2,lty=c(1,2),xlab="log alpha",xlim=c(-3,5),col="black")
+update(D, panel=function(...){
+ panel.curve(prior(x),lty=1,lwd=2,col="black")
+ panel.curve(prior.new(x),lty=2, lwd=2,col="black")
+ panel.densityplot(...)
+})}
+
+draw.graph()
+
diff --git a/demo/Chapter.8.3.R b/demo/Chapter.8.3.R
new file mode 100644
index 0000000..63503f6
--- /dev/null
+++ b/demo/Chapter.8.3.R
@@ -0,0 +1,39 @@
+###############################################
+# Section 8.3 A One-Sided Test of a Normal Mean
+###############################################
+
+library(LearnBayes)
+
+ pmean=170; pvar=25
+ probH=pnorm(175,pmean,sqrt(pvar))
+ probA=1-probH
+ prior.odds=probH/probA
+ prior.odds
+
+ weights=c(182, 172, 173, 176, 176, 180, 173, 174, 179, 175)
+ xbar=mean(weights)
+ sigma2=3^2/length(weights)
+
+ post.precision=1/sigma2+1/pvar
+ post.var=1/post.precision
+
+ post.mean=(xbar/sigma2+pmean/pvar)/post.precision
+ c(post.mean,sqrt(post.var))
+
+ post.odds=pnorm(175,post.mean,sqrt(post.var))/
+ (1-pnorm(175,post.mean,sqrt(post.var)))
+ post.odds
+
+ BF = post.odds/prior.odds
+ BF
+
+ postH=probH*BF/(probH*BF+probA)
+ postH
+
+ z=sqrt(length(weights))*(mean(weights)-175)/3
+ 1-pnorm(z)
+
+ weights=c(182, 172, 173, 176, 176, 180, 173, 174, 179, 175)
+ data=c(mean(weights),length(weights),3)
+ prior.par=c(170,1000)
+ mnormt.onesided(175,prior.par,data)
diff --git a/demo/Chapter.8.4.R b/demo/Chapter.8.4.R
new file mode 100644
index 0000000..b837e3a
--- /dev/null
+++ b/demo/Chapter.8.4.R
@@ -0,0 +1,10 @@
+#################################################
+# Section 8.4 A Two-Sided Test of a Normal Mean
+#################################################
+
+library(LearnBayes)
+
+ weights=c(182, 172, 173, 176, 176, 180, 173, 174, 179, 175)
+ data=c(mean(weights),length(weights),3)
+ t=c(.5,1,2,4,8)
+ mnormt.twosided(170,.5,t,data)
diff --git a/demo/Chapter.8.6.R b/demo/Chapter.8.6.R
new file mode 100644
index 0000000..3eadc70
--- /dev/null
+++ b/demo/Chapter.8.6.R
@@ -0,0 +1,22 @@
+#################################################
+# Section 8.6 Models for Soccer Goals
+#################################################
+
+library(LearnBayes)
+
+ data(soccergoals)
+ attach(soccergoals)
+ datapar=list(data=goals,par=c(4.57,1.43))
+ fit1=laplace(logpoissgamma,.5,datapar)
+ datapar=list(data=goals,par=c(1,.5))
+ fit2=laplace(logpoissnormal,.5,datapar)
+ datapar=list(data=goals,par=c(2,.5))
+ fit3=laplace(logpoissnormal,.5,datapar)
+ datapar=list(data=goals,par=c(1,2))
+ fit4=laplace(logpoissnormal,.5,datapar)
+
+ postmode=c(fit1$mode,fit2$mode,fit3$mode,fit4$mode)
+ postsd=sqrt(c(fit1$var,fit2$var,fit3$var,fit4$var))
+ logmarg=c(fit1$int,fit2$int,fit3$int,fit4$int)
+ cbind(postmode,postsd,logmarg)
+
diff --git a/demo/Chapter.8.7.R b/demo/Chapter.8.7.R
new file mode 100644
index 0000000..b64072a
--- /dev/null
+++ b/demo/Chapter.8.7.R
@@ -0,0 +1,19 @@
+###################################################
+# Section 8.7 Is a Baseball Hitter Really Streaky?
+###################################################
+
+library(LearnBayes)
+
+ data(jeter2004)
+ attach(jeter2004)
+ data=cbind(H,AB)
+ data1=regroup(data,5)
+
+ log.marg=function(logK)
+ laplace(bfexch,0,list(data=data1,K=exp(logK)))$int
+
+ log.K=seq(2,6)
+ K=exp(log.K)
+ log.BF=sapply(log.K,log.marg)
+ BF=exp(log.BF)
+ round(data.frame(log.K,K,log.BF,BF),2)
\ No newline at end of file
diff --git a/demo/Chapter.8.8.R b/demo/Chapter.8.8.R
new file mode 100644
index 0000000..d061052
--- /dev/null
+++ b/demo/Chapter.8.8.R
@@ -0,0 +1,23 @@
+###################################################################
+# Section 8.8 A Test of Independence in a Two-Way Contingency Table
+###################################################################
+
+library(LearnBayes)
+
+ data=matrix(c(11,9,68,23,3,5),c(2,3))
+ data
+
+ chisq.test(data)
+
+ a=matrix(rep(1,6),c(2,3))
+ a
+
+ ctable(data,a)
+
+ log.K=seq(2,7)
+ compute.log.BF=function(log.K)
+ log(bfindep(data,exp(log.K),100000)$bf)
+ log.BF=sapply(log.K,compute.log.BF)
+ BF=exp(log.BF)
+
+round(data.frame(log.K,log.BF,BF),2)
\ No newline at end of file
diff --git a/demo/Chapter.9.2.R b/demo/Chapter.9.2.R
new file mode 100644
index 0000000..be98e6d
--- /dev/null
+++ b/demo/Chapter.9.2.R
@@ -0,0 +1,111 @@
+################################
+# Section 9.2.6 An Example
+################################
+
+library(LearnBayes)
+
+ data(birdextinct)
+ attach(birdextinct)
+ logtime=log(time)
+ plot(nesting,logtime)
+ out = (logtime > 3)
+ text(nesting[out], logtime[out], label=species[out], pos = 2)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ windows()
+ plot(jitter(size),logtime,xaxp=c(0,1,1))
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ windows()
+ plot(jitter(status),logtime,xaxp=c(0,1,1))
+
+##### Least-squares fit
+
+ fit=lm(logtime~nesting+size+status,data=birdextinct,x=TRUE,y=TRUE)
+ summary(fit)
+
+##### Sampling from posterior
+
+ theta.sample=blinreg(fit$y,fit$x,5000)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+ windows()
+ par(mfrow=c(2,2))
+ hist(theta.sample$beta[,2],main="NESTING",
+ xlab=expression(beta[1]))
+ hist(theta.sample$beta[,3],main="SIZE",
+ xlab=expression(beta[2]))
+ hist(theta.sample$beta[,4],main="STATUS",
+ xlab=expression(beta[3]))
+ hist(theta.sample$sigma,main="ERROR SD",
+ xlab=expression(sigma))
+
+ apply(theta.sample$beta,2,quantile,c(.05,.5,.95))
+
+ quantile(theta.sample$sigma,c(.05,.5,.95))
+
+S=readline(prompt="Type <Return> to continue : ")
+
+###### Estimating mean extinction times
+
+ cov1=c(1,4,0,0)
+ cov2=c(1,4,1,0)
+ cov3=c(1,4,0,1)
+ cov4=c(1,4,1,1)
+ X1=rbind(cov1,cov2,cov3,cov4)
+ mean.draws=blinregexpected(X1,theta.sample)
+
+ c.labels=c("A","B","C","D")
+ windows()
+ par(mfrow=c(2,2))
+ for (j in 1:4)
+ hist(mean.draws[,j],
+ main=paste("Covariate set",c.labels[j]),xlab="log TIME")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+######## Predicting extinction times
+
+ cov1=c(1,4,0,0)
+ cov2=c(1,4,1,0)
+ cov3=c(1,4,0,1)
+ cov4=c(1,4,1,1)
+ X1=rbind(cov1,cov2,cov3,cov4)
+ pred.draws=blinregpred(X1,theta.sample)
+
+ c.labels=c("A","B","C","D")
+ windows()
+ par(mfrow=c(2,2))
+ for (j in 1:4)
+ hist(pred.draws[,j],
+ main=paste("Covariate set",c.labels[j]),xlab="log TIME")
+
+S=readline(prompt="Type <Return> to continue : ")
+
+######### Model checking via posterior predictive distribution
+
+ pred.draws=blinregpred(fit$x,theta.sample)
+ pred.sum=apply(pred.draws,2,quantile,c(.05,.95))
+ par(mfrow=c(1,1))
+ ind=1:length(logtime)
+ windows()
+ matplot(rbind(ind,ind),pred.sum,type="l",lty=1,col=1,
+ xlab="INDEX",ylab="log TIME")
+ points(ind,logtime,pch=19)
+ out=(logtime>pred.sum[2,])
+ text(ind[out], logtime[out], label=species[out], pos = 4)
+
+S=readline(prompt="Type <Return> to continue : ")
+
+######### Model checking via bayes residuals
+
+ prob.out=bayesresiduals(fit,theta.sample,2)
+ windows()
+ par(mfrow=c(1,1))
+ plot(nesting,prob.out)
+ out = (prob.out > 0.35)
+ text(nesting[out], prob.out[out], label=species[out], pos = 4)
+
diff --git a/demo/Chapter.9.3.R b/demo/Chapter.9.3.R
new file mode 100644
index 0000000..3b4da11
--- /dev/null
+++ b/demo/Chapter.9.3.R
@@ -0,0 +1,38 @@
+##############################################
+# Section 9.3 Modeling Using Zellner's g Prior
+##############################################
+
+library(LearnBayes)
+
+# illustrating the role of the parameter c
+
+data(puffin)
+X=cbind(1, puffin$Distance - mean(puffin$Distance))
+c.prior=c(0.1,0.5,5,2)
+fit=vector("list",4)
+for (j in 1:4)
+{
+ prior=list(b0=c(8,0), c0=c.prior[j])
+ fit[[j]]=blinreg(puffin$Nest, X, 1000, prior)
+}
+BETA=NULL
+for (j in 1:4)
+ {
+ s=data.frame(Prior=paste("c =",as.character(c.prior[j])),
+ beta0=fit[[j]]$beta[,1],beta1=fit[[j]]$beta[,2])
+ BETA=rbind(BETA,s)
+ }
+library(lattice)
+with(BETA,xyplot(beta1~beta0|Prior,type=c("p","g"),col="black"))
+
+S=readline(prompt="Type <Return> to continue : ")
+
+# model selection
+
+data=list(y=puffin$Nest, X=cbind(1,puffin$Grass,puffin$Soil))
+prior=list(b0=c(0,0,0), c0=100)
+beta.start=with(puffin,lm(Nest~Grass+Soil)$coef)
+laplace(reg.gprior.post,c(beta.start,0),list(data=data,prior=prior))$int
+
+X=puffin[,-1]; y=puffin$Nest; c=100
+bayes.model.selection(y,X,c,constant=FALSE)
diff --git a/demo/Chapter.9.4.R b/demo/Chapter.9.4.R
new file mode 100644
index 0000000..d8d082c
--- /dev/null
+++ b/demo/Chapter.9.4.R
@@ -0,0 +1,28 @@
+##############################################
+# Section 9.4 Survival Modeling
+##############################################
+
+ library(LearnBayes)
+
+ data(chemotherapy)
+ attach(chemotherapy)
+ library(survival)
+ survreg(Surv(time,status)~factor(treat)+age,dist="weibull")
+
+ start=c(-.5,9,.5,-.05)
+ d=cbind(time,status,treat-1,age)
+ fit=laplace(weibullregpost,start,d)
+ fit
+
+ proposal=list(var=fit$var,scale=1.5)
+ bayesfit=rwmetrop(weibullregpost,proposal,fit$mode,10000,d)
+ bayesfit$accept
+
+ par(mfrow=c(2,2))
+ sigma=exp(bayesfit$par[,1])
+ mu=bayesfit$par[,2]
+ beta1=bayesfit$par[,3]
+ beta2=bayesfit$par[,4]
+ hist(beta1,xlab="treatment",main="")
+ hist(beta2,xlab="age",main="")
+ hist(sigma,xlab="sigma",main="")
diff --git a/inst/doc/BayesFactors.R b/inst/doc/BayesFactors.R
new file mode 100644
index 0000000..c0746ea
--- /dev/null
+++ b/inst/doc/BayesFactors.R
@@ -0,0 +1,63 @@
+### R code from vignette source 'BayesFactors.Rnw'
+
+###################################################
+### code chunk number 1: BayesFactors.Rnw:34-46
+###################################################
+fire.counts <- c(75, 88, 84, 99, 79, 68, 86, 109, 73, 85, 101, 85,
+ 75, 81, 64, 77, 83, 83, 88, 83, 78, 83, 78, 80,
+ 82, 90, 74, 72, 69, 72, 76, 76, 104, 86, 92, 88)
+hist(fire.counts, probability=TRUE, ylim=c(0, .08))
+x <- 60:110
+lines(x, dpois(x, lambda=mean(fire.counts)), col="red")
+lines(x, dnorm(x, mean=mean(fire.counts), sd=12), col="blue")
+lines(x, dnorm(x, mean=mean(fire.counts), sd=6), col="green")
+legend("topright", legend=c("M1: Poisson(theta)",
+ "M2: N(theta, 12)",
+ "M3: N(theta, 6)"),
+ col=c("red", "blue", "green"), lty=1)
+
+
+###################################################
+### code chunk number 2: BayesFactors.Rnw:67-71
+###################################################
+model.1 <- function(theta, y){
+ sum(log(dpois(y, theta))) +
+ dgamma(theta, shape=280, rate=4)
+}
+
+
+###################################################
+### code chunk number 3: BayesFactors.Rnw:74-77
+###################################################
+library(LearnBayes)
+log.pred.1 <- laplace(model.1, 80, fire.counts)$int
+log.pred.1
+
+
+###################################################
+### code chunk number 4: BayesFactors.Rnw:81-91
+###################################################
+model.2 <- function(theta, y){
+ sum(log(dnorm(y, theta, 6))) +
+ dgamma(theta, shape=280, rate=4)
+}
+model.3 <- function(theta, y){
+ sum(log(dnorm(y, theta, 12))) +
+ dgamma(theta, shape=280, rate=4)
+}
+log.pred.2 <- laplace(model.2, 80, fire.counts)$int
+log.pred.3 <- laplace(model.3, 80, fire.counts)$int
+
+
+###################################################
+### code chunk number 5: BayesFactors.Rnw:95-96
+###################################################
+data.frame(Model=1:3, log.pred=c(log.pred.1, log.pred.2, log.pred.3))
+
+
+###################################################
+### code chunk number 6: BayesFactors.Rnw:99-100
+###################################################
+exp(log.pred.1 - log.pred.3)
+
+
diff --git a/inst/doc/BayesFactors.Rnw b/inst/doc/BayesFactors.Rnw
new file mode 100644
index 0000000..ade6c96
--- /dev/null
+++ b/inst/doc/BayesFactors.Rnw
@@ -0,0 +1,107 @@
+\documentclass{article}
+
+%\VignetteIndexEntry{Introduction to Bayes Factors}
+%\VignetteDepends{LearnBayes}
+
+\begin{document}
+\SweaveOpts{concordance=TRUE}
+
+\title{Introduction to Bayes Factors}
+\author{Jim Albert}
+
+\maketitle
+
+\section*{Models for Fire Calls}
+
+To motivate the discussion of plausible models, the website \newline {\tt http://www.franklinvillefire.org/callstatistics.htm} gives the number of fire calls for each month in Franklinville, NC for the last several years.
+
+
+Suppose we observe the fire call counts $y_1, ..., y_N$ for $N$ consecutive months. Here is a general model for these data.
+\begin{itemize}
+\item $y_1, ..., y_N$ are independent $f(y | \theta)$
+\item $\theta$ has a prior $g(\theta)$
+\end{itemize}
+Also suppose we have some prior beliefs about the mean fire count $E(y)$. We believe that this mean is about 70 and the standard deviation of this guess is 10.
+Given this general model structure, we have to think of possible choices for $f$, the sampling density. We think of the popular distributions, say Poisson, normal, exponential, etc. Also we should think about different choices for the prior density. For the prior, there are many possible choices -- we typically choose one that can represent my prior information.
+
+Once we decide on several plausible choices of sampling density and prior, then we'll compare the models by Bayes factors. To do this, we compute the prior predictive density of the actual data for each possible model. The Laplace method provides a convenient and accurate approximation to the logarithm of the predictive density and we'll use the function {\tt laplace} from the {\tt LearnBayes} package.
+
+Continuing our example, suppose our prior beliefs about the mean count of fire calls $\theta$ is Gamma(280, 4). (Essentially this says that our prior guess at $\theta$ is 70 and the prior standard deviation is about 4.2.) But we're unsure about the sampling model -- it could be (model $M_1$) Poisson($\theta$), (model $M_2$) normal with mean $\theta$ and standard deviation 12, or (model $M_3$) normal with mean $\theta$ and standard deviation 6.
+
+To get some sense about the best sampling model, a histogram of the fire call counts are graphed below. I have overlaid fitted Poisson and normal distributions where I estimate $\theta$ by the sample mean.
+The Poisson model appears to be the best fit, followed by the Normal model with standard deviation 6, and the Normal model with standard deviation 12. We want to formalize this comparison by computation of Bayes factors.
+
+<<fig=TRUE,echo=TRUE>>=
+fire.counts <- c(75, 88, 84, 99, 79, 68, 86, 109, 73, 85, 101, 85,
+ 75, 81, 64, 77, 83, 83, 88, 83, 78, 83, 78, 80,
+ 82, 90, 74, 72, 69, 72, 76, 76, 104, 86, 92, 88)
+hist(fire.counts, probability=TRUE, ylim=c(0, .08))
+x <- 60:110
+lines(x, dpois(x, lambda=mean(fire.counts)), col="red")
+lines(x, dnorm(x, mean=mean(fire.counts), sd=12), col="blue")
+lines(x, dnorm(x, mean=mean(fire.counts), sd=6), col="green")
+legend("topright", legend=c("M1: Poisson(theta)",
+ "M2: N(theta, 12)",
+ "M3: N(theta, 6)"),
+ col=c("red", "blue", "green"), lty=1)
+@
+
+\section*{Bayesian Model Comparison}
+
+Under the general model, the predictive density of $y$ is given by the integral
+$$
+f(y) = \int \prod_{j=1}^N f(y_j | \theta) g(\theta) d\theta.
+$$
+This density can be approximated by the Laplace method implemented in the {\tt laplace} function.
+
+One compares the suitability of two Bayesian models by comparing the corresponding values of the predictive density. The Bayes factor in support of model $M_1$ over model $M_2$ is given by the ratio
+$$
+BF_{12} = \frac{f_1(y)}{f_2(y)}.
+$$
+Computationally, it is convenient to compute the predictive densities on the log scale, so the Bayes factor can be expressed as
+$$
+BF_{12} = \exp \left(\log f_1(y) - \log f_2(y)\right).
+$$
+
+To compute the predictive density for a model, say model $M_1$, we initially define a function {\tt model.1} which gives the log posterior.
+<<>>=
+model.1 <- function(theta, y){
+ sum(log(dpois(y, theta))) +
+ dgamma(theta, shape=280, rate=4)
+}
+@
+Then the log predictive density at $y$ is computed by using the {\tt laplace} function with inputs the function name, a guess at the posterior mode, and the data (vector of fire call counts). The component {\tt int} gives the log of $f(y)$
+<<>>=
+library(LearnBayes)
+log.pred.1 <- laplace(model.1, 80, fire.counts)$int
+log.pred.1
+@
+
+We similarly find the predictive densities of the models $M_2$ and $M_3$ by defining functions for the corresponding posteriors and using {\tt laplace}:
+<<>>=
+model.2 <- function(theta, y){
+ sum(log(dnorm(y, theta, 6))) +
+ dgamma(theta, shape=280, rate=4)
+}
+model.3 <- function(theta, y){
+ sum(log(dnorm(y, theta, 12))) +
+ dgamma(theta, shape=280, rate=4)
+}
+log.pred.2 <- laplace(model.2, 80, fire.counts)$int
+log.pred.3 <- laplace(model.3, 80, fire.counts)$int
+@
+
+Displaying the three models and predictive densities, we see that model $M_1$ is preferred to $M_3$ which is preferred to model $M_2$.
+<<>>=
+data.frame(Model=1:3, log.pred=c(log.pred.1, log.pred.2, log.pred.3))
+@
+The Bayes factor in support of model $M_1$ over model $M_3$ is given by
+<<>>=
+exp(log.pred.1 - log.pred.3)
+@
+
+
+
+
+
+\end{document}
\ No newline at end of file
diff --git a/inst/doc/BayesFactors.pdf b/inst/doc/BayesFactors.pdf
new file mode 100644
index 0000000..238244b
Binary files /dev/null and b/inst/doc/BayesFactors.pdf differ
diff --git a/inst/doc/BinomialInference.R b/inst/doc/BinomialInference.R
new file mode 100644
index 0000000..54d6443
--- /dev/null
+++ b/inst/doc/BinomialInference.R
@@ -0,0 +1,40 @@
+### R code from vignette source 'BinomialInference.Rnw'
+
+###################################################
+### code chunk number 1: BinomialInference.Rnw:17-20
+###################################################
+library(LearnBayes)
+beta.par <- beta.select(list(p=0.5, x=0.2), list(p=0.75, x=.28))
+beta.par
+
+
+###################################################
+### code chunk number 2: BinomialInference.Rnw:32-33
+###################################################
+triplot(beta.par, c(6, 4))
+
+
+###################################################
+### code chunk number 3: BinomialInference.Rnw:40-43
+###################################################
+beta.post.par <- beta.par + c(6, 4)
+post.sample <- rbeta(1000, beta.post.par[1], beta.post.par[2])
+quantile(post.sample, c(0.05, 0.95))
+
+
+###################################################
+### code chunk number 4: BinomialInference.Rnw:50-51
+###################################################
+predplot(beta.par, 10, 6)
+
+
+###################################################
+### code chunk number 5: BinomialInference.Rnw:60-65
+###################################################
+n <- 20
+s <- 0:n
+pred.probs <- pbetap(beta.par, n, s)
+plot(s, pred.probs, type="h")
+discint(cbind(s, pred.probs), 0.90)
+
+
diff --git a/inst/doc/BinomialInference.Rnw b/inst/doc/BinomialInference.Rnw
new file mode 100644
index 0000000..b0a3834
--- /dev/null
+++ b/inst/doc/BinomialInference.Rnw
@@ -0,0 +1,70 @@
+\documentclass{article}
+
+%\VignetteIndexEntry{Learning About a Binomial Proportion}
+%\VignetteDepends{LearnBayes}
+
+\begin{document}
+\SweaveOpts{concordance=TRUE}
+
+\title{Learning About a Binomial Proportion}
+\author{Jim Albert}
+
+\maketitle
+
+\section*{Constructing a Beta Prior}
+
+Suppose we are interested in the proportion $p$ on sunny days in my town. The function {\tt bayes.select} is a convenient tool for specifying a beta prior based on knowledge of two prior quantiles. Suppose my prior median for the proportion of sunny days is $.2$ and my 75th percentile is $.28$.
+<<>>=
+library(LearnBayes)
+beta.par <- beta.select(list(p=0.5, x=0.2), list(p=0.75, x=.28))
+beta.par
+@
+A beta(2.95, 10.82) prior matches this prior information
+
+\section*{Updating with Data}
+
+Next, I observe the weather for 10 days and observe 6 sunny days. (There are 6 ``successes" and 4 ``failures".) The posterior distribution is beta with shape parameters 2.95 + 6 and 10.82 + 4.
+
+\section*{Triplot}
+
+The {\tt triplot} function shows the prior, likelihood, and posterior on the same display; the inputs are the vector of prior parameters and the data vector.
+
+<<fig=TRUE,echo=TRUE>>=
+triplot(beta.par, c(6, 4))
+@
+
+\section*{Simulating from Posterior to Perform Inference}
+
+One can perform inference about the proportion $p$ by simulating a large number of draws from the posterior and summarizing the simulated sample. Here the {\tt rbeta} function is used to simulate from the beta posterior and the {\tt quantile} function is used to construct a 90 percent probability interval for $p$.
+
+<<>>=
+beta.post.par <- beta.par + c(6, 4)
+post.sample <- rbeta(1000, beta.post.par[1], beta.post.par[2])
+quantile(post.sample, c(0.05, 0.95))
+@
+
+\section*{Predictive Checking}
+
+One can check the suitability of this model by means of a predictive check. The function {\tt predplot} displays the prior predictive density for the number of successes and overlays the observed number of successes.
+
+<<fig=TRUE,echo=TRUE>>=
+predplot(beta.par, 10, 6)
+@
+
+The observed data is in the tail of the predictive distribution suggesting some incompability of the prior information and the sample.
+
+\section*{Prediction of a Future Sample}
+
+Suppose we want to predict the number of sunny days in the future 20 days. The function {\tt pbetap} computes the posterior predictive distribution with a beta prior. The inputs are the vector of beta prior parameters, the future sample size, and the vector of number of successes in the future experiment.
+
+<<fig=TRUE,echo=TRUE>>=
+n <- 20
+s <- 0:n
+pred.probs <- pbetap(beta.par, n, s)
+plot(s, pred.probs, type="h")
+discint(cbind(s, pred.probs), 0.90)
+@
+
+The probability that we will observe between 0 and 8 successes in the future sample is .92.
+
+\end{document}
\ No newline at end of file
diff --git a/inst/doc/BinomialInference.pdf b/inst/doc/BinomialInference.pdf
new file mode 100644
index 0000000..dbc746d
Binary files /dev/null and b/inst/doc/BinomialInference.pdf differ
diff --git a/inst/doc/DiscreteBayes.R b/inst/doc/DiscreteBayes.R
new file mode 100644
index 0000000..bdfd296
--- /dev/null
+++ b/inst/doc/DiscreteBayes.R
@@ -0,0 +1,91 @@
+### R code from vignette source 'DiscreteBayes.Rnw'
+
+###################################################
+### code chunk number 1: DiscreteBayes.Rnw:21-23
+###################################################
+p <- seq(0, 1, by = 0.01)
+prior <- 1 / 101 + 0 * p
+
+
+###################################################
+### code chunk number 2: DiscreteBayes.Rnw:25-28
+###################################################
+plot(p, prior,
+ type="h",
+ main="Prior Distribution")
+
+
+###################################################
+### code chunk number 3: DiscreteBayes.Rnw:35-37
+###################################################
+library(LearnBayes)
+post <- pdisc(p, prior, c(20, 12))
+
+
+###################################################
+### code chunk number 4: DiscreteBayes.Rnw:39-42
+###################################################
+plot(p, post,
+ type="h",
+ main="Posterior Distribution")
+
+
+###################################################
+### code chunk number 5: DiscreteBayes.Rnw:47-48
+###################################################
+discint(cbind(p, post), 0.90)
+
+
+###################################################
+### code chunk number 6: DiscreteBayes.Rnw:57-60
+###################################################
+n <- 20
+s <- 0:20
+pred.probs <- pdiscp(p, post, n, s)
+
+
+###################################################
+### code chunk number 7: DiscreteBayes.Rnw:63-66
+###################################################
+plot(s, pred.probs,
+ type="h",
+ main="Predictive Distribution")
+
+
+###################################################
+### code chunk number 8: DiscreteBayes.Rnw:72-74
+###################################################
+prior <- rep(1/11, 11)
+names(prior) <- 20:30
+
+
+###################################################
+### code chunk number 9: DiscreteBayes.Rnw:78-79
+###################################################
+y <- c(24, 25, 31, 31, 22, 21, 26, 20, 16, 22)
+
+
+###################################################
+### code chunk number 10: DiscreteBayes.Rnw:83-84
+###################################################
+post <- discrete.bayes(dpois, prior, y)
+
+
+###################################################
+### code chunk number 11: DiscreteBayes.Rnw:89-90
+###################################################
+print(post)
+
+
+###################################################
+### code chunk number 12: DiscreteBayes.Rnw:93-94
+###################################################
+plot(post)
+
+
+###################################################
+### code chunk number 13: DiscreteBayes.Rnw:97-98
+###################################################
+summary(post)
+
+
diff --git a/inst/doc/DiscreteBayes.Rnw b/inst/doc/DiscreteBayes.Rnw
new file mode 100644
index 0000000..3c1877c
--- /dev/null
+++ b/inst/doc/DiscreteBayes.Rnw
@@ -0,0 +1,101 @@
+\documentclass{article}
+
+%\VignetteIndexEntry{Introduction to Bayes using Discrete Priors}
+%\VignetteDepends{LearnBayes}
+
+\begin{document}
+\SweaveOpts{concordance=TRUE}
+
+\title{Introduction to Bayes using Discrete Priors}
+\author{Jim Albert}
+
+\maketitle
+
+\section*{Learning About a Proportion}
+
+\subsection*{A Discrete Prior}
+
+Consider a population of ``successes" and ``failures" where the proportion of successes is $p$.
+Suppose $p$ takes on the discrete set of values 0, .01, ..., .99, 1 and one assigns a uniform prior on these values. We enter the values of $p$ and the associated probabilities into the vectors {\tt p} and {\tt prior}, respectively.
+
+<<>>=
+p <- seq(0, 1, by = 0.01)
+prior <- 1 / 101 + 0 * p
+@
+<<fig=TRUE,echo=TRUE>>=
+plot(p, prior,
+ type="h",
+ main="Prior Distribution")
+@
+
+\subsection*{Posterior Distribution}
+
+Suppose one takes a random sample from the population without replacement and observes 20 successes and 12 failiures. The function {\tt pdisc} in the {\tt LearnBayes} package computes the associated posterior probabilities for $p$. The inputs to {\tt pdisc} are the prior (vector of values of $p$ and vector of prior probabilities) and a vector containing the number of successes and failures.
+
+<<>>=
+library(LearnBayes)
+post <- pdisc(p, prior, c(20, 12))
+@
+<<fig=TRUE,echo=TRUE>>=
+plot(p, post,
+ type="h",
+ main="Posterior Distribution")
+@
+
+A highest probability interval for a discrete distribution is obtained using the {\tt discint} function. This function has two inputs: the probability distribution matrix where the first column contains the values and the second column contains the probabilities, and the desired probability content. To illustrate, we compute a 90 percent probability interval for $p$ from the posterior distribution.
+
+<<>>=
+discint(cbind(p, post), 0.90)
+@
+The probability that $p$ falls in the interval (0.49, 0.75)
+is approximately 0.90.
+
+\subsection*{Prediction}
+
+Suppose a new sample of size 20 is to be taken and we're interested in predicting the number of successes. The current opinion about the proportion is reflected in the posterior distribution stored in the vectors {\tt p} and {\tt post}. We store the possible number of successes in the future sample in {\tt s} and the function {\tt pdiscp} computes the corresponding predictive probabilities.
+
+<<>>=
+n <- 20
+s <- 0:20
+pred.probs <- pdiscp(p, post, n, s)
+@
+
+<<fig=TRUE,echo=TRUE>>=
+plot(s, pred.probs,
+ type="h",
+ main="Predictive Distribution")
+@
+
+\section*{Learning About a Poisson Mean}
+
+Discrete models can be used for other sampling distributions using the {\tt discrete.bayes} function. To illustrate, suppose the number of accidents in a particular year is Poisson with mean $\lambda$. A priori one believes that $\lambda$ is equally likely to take on the values 20, 21, ..., 30. We put the prior probabilities 1/11, ..., 1/11 in the vector {\tt prior} and use the {\tt names} function to name the components of this vector with the values of $\lambda$.
+<<>>=
+prior <- rep(1/11, 11)
+names(prior) <- 20:30
+@
+
+One observes the number of accidents for ten weeks -- these values are placed in the vector {\tt y}:
+<<>>=
+y <- c(24, 25, 31, 31, 22, 21, 26, 20, 16, 22)
+@
+
+To compute the posterior probabilities, we use the function {\tt discrete.bayes}; the inputs are the Poisson sampling density {\tt dpois}, the vector of prior probabilities {\tt prior}, and the vector of observations {\tt y}.
+<<>>=
+post <- discrete.bayes(dpois, prior, y)
+@
+
+One can display the posterior probabilities by use of the {\tt print} method, one displays the posterior probabilites by the {\tt plot} method, and one summarizes the posterior distribution by the {\tt summary} method.
+
+<<>>=
+print(post)
+@
+
+<<fig=TRUE,echo=TRUE>>=
+plot(post)
+@
+
+<<>>=
+summary(post)
+@
+
+\end{document}
\ No newline at end of file
diff --git a/inst/doc/DiscreteBayes.pdf b/inst/doc/DiscreteBayes.pdf
new file mode 100644
index 0000000..22eb500
Binary files /dev/null and b/inst/doc/DiscreteBayes.pdf differ
diff --git a/inst/doc/MCMCintro.R b/inst/doc/MCMCintro.R
new file mode 100644
index 0000000..6f755b3
--- /dev/null
+++ b/inst/doc/MCMCintro.R
@@ -0,0 +1,68 @@
+### R code from vignette source 'MCMCintro.Rnw'
+
+###################################################
+### code chunk number 1: MCMCintro.Rnw:34-42
+###################################################
+minmaxpost <- function(theta, data){
+ mu <- theta[1]
+ sigma <- exp(theta[2])
+ dnorm(data$min, mu, sigma, log=TRUE) +
+ dnorm(data$max, mu, sigma, log=TRUE) +
+ (data$n - 2) * log(pnorm(data$max, mu, sigma) -
+ pnorm(data$min, mu, sigma))
+}
+
+
+###################################################
+### code chunk number 2: MCMCintro.Rnw:51-55
+###################################################
+data <- list(n=10, min=52, max=84)
+library(LearnBayes)
+fit <- laplace(minmaxpost, c(70, 2), data)
+fit
+
+
+###################################################
+### code chunk number 3: MCMCintro.Rnw:60-64
+###################################################
+mycontour(minmaxpost, c(45, 95, 1.5, 4), data,
+ xlab=expression(mu), ylab=expression(paste("log ",sigma)))
+mycontour(lbinorm, c(45, 95, 1.5, 4),
+ list(m=fit$mode, v=fit$var), add=TRUE, col="red")
+
+
+###################################################
+### code chunk number 4: MCMCintro.Rnw:73-78
+###################################################
+mcmc.fit <- rwmetrop(minmaxpost,
+ list(var=fit$v, scale=3),
+ c(70, 2),
+ 10000,
+ data)
+
+
+###################################################
+### code chunk number 5: MCMCintro.Rnw:82-83
+###################################################
+mcmc.fit$accept
+
+
+###################################################
+### code chunk number 6: MCMCintro.Rnw:88-92
+###################################################
+mycontour(minmaxpost, c(45, 95, 1.5, 4), data,
+ xlab=expression(mu),
+ ylab=expression(paste("log ",sigma)))
+points(mcmc.fit$par)
+
+
+###################################################
+### code chunk number 7: MCMCintro.Rnw:105-110
+###################################################
+mu <- mcmc.fit$par[, 1]
+sigma <- exp(mcmc.fit$par[, 2])
+P.75 <- mu + 0.674 * sigma
+plot(density(P.75),
+ main="Posterior Density of Upper Quartile")
+
+
diff --git a/inst/doc/MCMCintro.Rnw b/inst/doc/MCMCintro.Rnw
new file mode 100644
index 0000000..de5fe63
--- /dev/null
+++ b/inst/doc/MCMCintro.Rnw
@@ -0,0 +1,114 @@
+\documentclass{article}
+
+%\VignetteIndexEntry{Introduction to Markov Chain Monte Carlo}
+%\VignetteDepends{LearnBayes}
+
+\begin{document}
+\SweaveOpts{concordance=TRUE}
+
+\title{Introduction to Markov Chain Monte Carlo}
+\author{Jim Albert}
+
+\maketitle
+
+\section*{A Selected Data Problem}
+
+Here is an interesting problem with ``selected data". Suppose you are measuring the speeds of cars driving on an interstate. You assume the speeds are normally distributed with mean $\mu$ and standard deviation $\sigma$. You see 10 cars pass by and you only record the minimum and maximum speeds. What have you learned about the normal parameters?
+
+First we focus on the construction of the likelihood. Given values of the normal parameters, what is the probability of observing minimum = $x$ and the maximum = $y$ in a sample of size n?
+
+Essentially we're looking for the joint density of two order statistics which is a standard result. Let $f$ and $F $denote the density and cdf of a normal density with mean $\mu$ and standard deviation $\sigma$. Then the joint density of $(x, y)$ is given by
+
+$$f(x, y | \mu, \sigma) \propto f(x) f(y) [F(y) - F(x)]^{n-2}, x < y$$
+
+After we observe data, the likelihood is this sampling density viewed as function of the parameters. Suppose we take a sample of size 10 and we observe $x = 52, y = 84$. Then the likelihood is given by
+
+$$
+L(\mu, \sigma) \propto f(52) f(84) [F(84) - F(52)]^{8}
+$$
+
+\section*{Defining the log posterior}
+
+First I write a short function {\tt minmaxpost} that computes the logarithm of the posterior density. The arguments to this function are $\theta = (\mu, \log \sigma)$ and data which is a list with components {\tt n}, {\tt min}, and {\tt max}. I'd recommend using the R functions {\tt pnorm} and {\tt dnorm} in computing the density -- it saves typing errors.
+
+<<>>=
+minmaxpost <- function(theta, data){
+ mu <- theta[1]
+ sigma <- exp(theta[2])
+ dnorm(data$min, mu, sigma, log=TRUE) +
+ dnorm(data$max, mu, sigma, log=TRUE) +
+ (data$n - 2) * log(pnorm(data$max, mu, sigma) -
+ pnorm(data$min, mu, sigma))
+}
+@
+
+\section*{Normal approximation to posterior}
+
+We work with the parameterization $(\mu, \log \sigma)$ which will give us a better normal approximation. A standard noninformative prior is uniform on $(\mu, \log \sigma)$.
+
+The function {\tt laplace} is used to summarize this posterior. The arguments to {\tt laplace} are the name of the log posterior function, an initial estimate at $\theta$, and the data that is used in the log posterior function. The output of laplace includes mode, the posterior mode, and var, the corresponding estimate at the variance-covariance matrix.
+
+<<>>=
+data <- list(n=10, min=52, max=84)
+library(LearnBayes)
+fit <- laplace(minmaxpost, c(70, 2), data)
+fit
+@
+
+In this example, this gives a pretty good approximation in this situation. The {\tt mycontour} function is used to display contours of the exact posterior and overlay the matching normal approximation using a second application of {\tt mycontour}.
+
+<<fig=TRUE,echo=TRUE>>=
+mycontour(minmaxpost, c(45, 95, 1.5, 4), data,
+ xlab=expression(mu), ylab=expression(paste("log ",sigma)))
+mycontour(lbinorm, c(45, 95, 1.5, 4),
+ list(m=fit$mode, v=fit$var), add=TRUE, col="red")
+@
+
+\section*{Random Walk Metropolis Sampling}
+
+The {\tt rwmetrop} function implements the M-H random walk algorithm. There are four inputs: (1) the function defining the log posterior, (2) a list containing var, the estimated var-cov matrix, and scale, the M-H random walk scale constant, (3) the starting value in the Markov Chain simulation, (4) the number of iterations of the algorithm, and (5) any data and prior parameters used in the log posterior density.
+
+Here we use {\tt fit\$v} as our estimated var-cov matrix, use a scale value of 3, start the simulation at $(\mu, \log \sigma) = (70, 2)$ and try 10,000 iterations.
+
+<<>>=
+mcmc.fit <- rwmetrop(minmaxpost,
+ list(var=fit$v, scale=3),
+ c(70, 2),
+ 10000,
+ data)
+@
+
+I display the acceptance rate -- here it is 19\% which is a reasonable value.
+<<>>=
+mcmc.fit$accept
+@
+
+We display the contours of the exact posterior and overlay the simulated draws.
+
+<<fig=TRUE,echo=TRUE>>=
+mycontour(minmaxpost, c(45, 95, 1.5, 4), data,
+ xlab=expression(mu),
+ ylab=expression(paste("log ",sigma)))
+points(mcmc.fit$par)
+@
+
+It appears like we have been successful in getting a good sample from this posterior distribution.
+
+\section*{Random Walk Metropolis Sampling}
+
+To illustrate simulation-based inference, suppose one is interested in learning about the upper quartile
+$$
+P.75 = \mu + 0.674 \times \sigma
+$$
+of the car speed distribution. For each simulated draw of $(\mu, \sigma)$ from the posterior, we compute the upper quartile $P.75$. We use the {\tt density} function to construct a density estimate of the simulated sample of $P.75$.
+
+<<fig=TRUE,echo=TRUE>>=
+mu <- mcmc.fit$par[, 1]
+sigma <- exp(mcmc.fit$par[, 2])
+P.75 <- mu + 0.674 * sigma
+plot(density(P.75),
+ main="Posterior Density of Upper Quartile")
+@
+
+
+\end{document}
\ No newline at end of file
diff --git a/inst/doc/MCMCintro.pdf b/inst/doc/MCMCintro.pdf
new file mode 100644
index 0000000..84a9fe2
Binary files /dev/null and b/inst/doc/MCMCintro.pdf differ
diff --git a/inst/doc/MultilevelModeling.R b/inst/doc/MultilevelModeling.R
new file mode 100644
index 0000000..ab15976
--- /dev/null
+++ b/inst/doc/MultilevelModeling.R
@@ -0,0 +1,74 @@
+### R code from vignette source 'MultilevelModeling.Rnw'
+
+###################################################
+### code chunk number 1: MultilevelModeling.Rnw:17-24
+###################################################
+d <- data.frame(Name=c("Clemente", "Robinson", "Howard", "Johnstone",
+ "Berry", "Spencer", "Kessinger", "Alvarado", "Santo",
+ "Swaboda", "Petrocelli", "Rodriguez", "Scott", "Unser",
+ "Williams", "Campaneris", "Munson", "Alvis"),
+ Hits=c(18, 17, 16, 15, 14, 14, 13, 12, 11,
+ 11, 10, 10, 10, 10, 10, 9, 8, 7),
+ At.Bats=45)
+
+
+###################################################
+### code chunk number 2: MultilevelModeling.Rnw:59-64
+###################################################
+library(LearnBayes)
+laplace.fit <- laplace(betabinexch,
+ c(0, 0),
+ d[, c("Hits", "At.Bats")])
+laplace.fit
+
+
+###################################################
+### code chunk number 3: MultilevelModeling.Rnw:68-73
+###################################################
+mcmc.fit <- rwmetrop(betabinexch,
+ list(var=laplace.fit$var, scale=2),
+ c(0, 0),
+ 5000,
+ d[, c("Hits", "At.Bats")])
+
+
+###################################################
+### code chunk number 4: MultilevelModeling.Rnw:77-81
+###################################################
+mycontour(betabinexch, c(-1.5, -0.5, 2, 12),
+ d[, c("Hits", "At.Bats")],
+ xlab="Logit ETA", ylab="Log K")
+with(mcmc.fit, points(par))
+
+
+###################################################
+### code chunk number 5: MultilevelModeling.Rnw:88-99
+###################################################
+eta <- with(mcmc.fit, exp(par[, 1]) / (1 + exp(par[, 1])))
+K <- exp(mcmc.fit$par[, 2])
+p.estimate <- function(j, eta, K){
+ yj <- d[j, "Hits"]
+ nj <- d[j, "At.Bats"]
+ p.sim <- rbeta(5000, yj + K * eta, nj - yj + K * (1 - eta))
+ quantile(p.sim, c(0.05, 0.50, 0.95))
+}
+E <- t(sapply(1:18, p.estimate, eta, K))
+rownames(E) <- d[, "Name"]
+round(E, 3)
+
+
+###################################################
+### code chunk number 6: MultilevelModeling.Rnw:105-115
+###################################################
+plot(d$Hits / 45, E[, 2], pch=19,
+ ylim=c(.15, .40),
+ xlab="Observed AVG", ylab="True Probability",
+ main="90 Percent Probability Intervals")
+for (j in 1:18)
+ lines(d$Hits[j] / 45 * c(1, 1), E[j, c(1, 3)])
+abline(a=0, b=1, col="blue")
+abline(h=mean(d$Hits) / 45, col="red")
+legend("topleft", legend=c("Individual", "Combined"),
+ lty=1, col=c("blue", "red"))
+
+
diff --git a/inst/doc/MultilevelModeling.Rnw b/inst/doc/MultilevelModeling.Rnw
new file mode 100644
index 0000000..2fd1da5
--- /dev/null
+++ b/inst/doc/MultilevelModeling.Rnw
@@ -0,0 +1,118 @@
+\documentclass{article}
+
+%\VignetteIndexEntry{Introduction to Multilevel Modeling}
+%\VignetteDepends{LearnBayes}
+
+\begin{document}
+\SweaveOpts{concordance=TRUE}
+
+\title{Introduction to Multilevel Modeling}
+\author{Jim Albert}
+
+\maketitle
+
+\section*{Efron and Morris Baseball Data}
+
+Efron and Morris, in a famous 1975 JASA paper, introduced the problem of estimating the true batting averages for 18 players during the 1971 baseball season. In the table, we observe the number of hits for each player in the first 35 batting opportunities in the season.
+<<>>=
+d <- data.frame(Name=c("Clemente", "Robinson", "Howard", "Johnstone",
+ "Berry", "Spencer", "Kessinger", "Alvarado", "Santo",
+ "Swaboda", "Petrocelli", "Rodriguez", "Scott", "Unser",
+ "Williams", "Campaneris", "Munson", "Alvis"),
+ Hits=c(18, 17, 16, 15, 14, 14, 13, 12, 11,
+ 11, 10, 10, 10, 10, 10, 9, 8, 7),
+ At.Bats=45)
+@
+
+\section*{The Multilevel Model}
+
+One can simultaneously estimate the true batting averages by the following multilevel model. We assume the hits for the $j$th player $y_j$ has a binomial distribution with sample size $n_j$ and probability of success $p_j$, $j = 1, ..., 18$. The true batting averages $p_1, .., p_{18}$ are assumed to be a random sample from a beta($a, b$) distribution. It is convenient to reparameterize $a$ and $b$ into the mean $\eta = a / (a + b)$ and precision $K = a + b$. We assign $(\eta, K)$ the [...]
+$$
+g(\eta, K) \propto \frac{1}{\eta (1 - \eta)}\frac{1}{(1 + K)^2}
+$$
+
+After data $y$ is observed, the posterior distribution of the parameters $(\{p_j\}, \eta, K)$ has the convenient representation
+$$
+g(\{p_j\}, \eta, K | y) = g(\eta, K | y) \times g(\{p_j\} | \eta, K, y).
+$$
+Conditional on $\eta$ and $K$, the posterior distributions of $p_1, ..., p_{18}$ are independent, where
+$$
+p_j \sim Beta(y_j + K \eta, n_j - y_j + K ( 1 - \eta)).
+$$
+The posterior density of $(\eta, K)$ is given by
+$$
+g(\eta, K| y) \propto \prod_{j=1}^{18}
+\left(\frac{B(y_j + K \eta, n_j - y_j + K (1 - \eta))}
+ {B(K \eta, n_j - y_j + K (1 - \eta))}\right)
+ \frac{1}{\eta (1 - \eta)}\frac{1}{(1 + K)^2}.
+$$
+
+\section*{Simulation of the Posterior of $(\eta, K)$}
+
+For computational purposes, it is convenient to reparameterize $\eta$ and $K$ to the real-valued parameters
+$$
+\theta_1 = \log \frac{\eta}{1 - \eta}, \theta_2 = \log K.
+$$
+The log posterior of the vector $\theta = (\theta_1, \theta_2)$ is programmed in the function {\tt betaabinexch}.
+
+We initially use the {\tt laplace} function to find the posterior mode and associated variance-covariance matrix. The inputs are the log posterior function, an initial guess at the mode, and the data.
+<<>>=
+library(LearnBayes)
+laplace.fit <- laplace(betabinexch,
+ c(0, 0),
+ d[, c("Hits", "At.Bats")])
+laplace.fit
+@
+
+The outputs from {\tt laplace} are used to inform the inputs of a random walk Metropolis algorithm in the function {\tt rwmetrop}. The inputs are the function defining the log posterior, the estimate of the variance-covarance matrix and scale for the proposal density, the starting value in the Markov Chain, and the data.
+<<>>=
+mcmc.fit <- rwmetrop(betabinexch,
+ list(var=laplace.fit$var, scale=2),
+ c(0, 0),
+ 5000,
+ d[, c("Hits", "At.Bats")])
+@
+
+To demonstrate that this MCMC algorithm produces a reasonable sample from the posterior, the {\tt mycontour} function displays a contour graph of the exact posterior density and the {\tt points} function is used to overlay 5000 draws from the MCMC algorithm.
+<<fig=TRUE,echo=TRUE>>=
+mycontour(betabinexch, c(-1.5, -0.5, 2, 12),
+ d[, c("Hits", "At.Bats")],
+ xlab="Logit ETA", ylab="Log K")
+with(mcmc.fit, points(par))
+@
+
+\section*{Simulation of the Posterior of the Probabilities}
+
+One can simulate from the joint posterior of $(\{p_j\}, \eta, K)$, by (1) simulating $(\eta, K)$ from its marginal posterior, and (2) simulating $p_1, ..., p_{18}$ from the conditional distribution
+$[\{p_j\} | \eta, K]$. In the R script, I store the simulated draws from the posterior of $K$ and $\eta$ in the vectors {\tt K} and {\tt eta}. Then the function {\tt p.estimate} simulates draws from the posterior of the $j$th probability and computes a 90\% probability interval by extracting the 5th and 95th percentiles. I repeat this process for all 18 players by the {\tt sapply} function and display the 90\% intervals for all players.
+<<>>=
+eta <- with(mcmc.fit, exp(par[, 1]) / (1 + exp(par[, 1])))
+K <- exp(mcmc.fit$par[, 2])
+p.estimate <- function(j, eta, K){
+ yj <- d[j, "Hits"]
+ nj <- d[j, "At.Bats"]
+ p.sim <- rbeta(5000, yj + K * eta, nj - yj + K * (1 - eta))
+ quantile(p.sim, c(0.05, 0.50, 0.95))
+}
+E <- t(sapply(1:18, p.estimate, eta, K))
+rownames(E) <- d[, "Name"]
+round(E, 3)
+@
+
+The following graph displays the 90 percent probability intervals for
+the players' true batting averages. The blue line represents {\it individual estimates} where each batting probability is estimated by the observed batting average. The red line represents the {\it combined estimate} where one combines all of the data. The multilevel estimate represented by the dot is a compromise between the individual estimate and the combined estimate.
+
+<<fig=TRUE,echo=TRUE>>=
+plot(d$Hits / 45, E[, 2], pch=19,
+ ylim=c(.15, .40),
+ xlab="Observed AVG", ylab="True Probability",
+ main="90 Percent Probability Intervals")
+for (j in 1:18)
+ lines(d$Hits[j] / 45 * c(1, 1), E[j, c(1, 3)])
+abline(a=0, b=1, col="blue")
+abline(h=mean(d$Hits) / 45, col="red")
+legend("topleft", legend=c("Individual", "Combined"),
+ lty=1, col=c("blue", "red"))
+@
+
+\end{document}
\ No newline at end of file
diff --git a/inst/doc/MultilevelModeling.pdf b/inst/doc/MultilevelModeling.pdf
new file mode 100644
index 0000000..f62a713
Binary files /dev/null and b/inst/doc/MultilevelModeling.pdf differ
diff --git a/man/achievement.Rd b/man/achievement.Rd
new file mode 100644
index 0000000..26dcf4a
--- /dev/null
+++ b/man/achievement.Rd
@@ -0,0 +1,28 @@
+\name{achievement}
+\alias{achievement}
+\docType{data}
+\title{School achievement data}
+\description{
+Achievement data for a group of Austrian school children
+}
+\usage{
+achievement
+}
+\format{
+ A data frame with 109 observations on the following 7 variables.
+ \describe{
+ \item{Gen}{gender of child where 0 is male and 1 is female}
+ \item{Age}{age in months}
+ \item{IQ}{iq score}
+ \item{math1}{test score on mathematics computation}
+ \item{math2}{test score on mathematics problem solving}
+ \item{read1}{test score on reading speed}
+ \item{read2}{test score on reading comprehension}
+ }
+}
+\source{
+Abraham, B., and Ledolter, J. (2006), Introduction to Regression Modeling,
+Duxbury.
+}
+
+\keyword{datasets}
diff --git a/man/baseball.1964.Rd b/man/baseball.1964.Rd
new file mode 100644
index 0000000..a157da8
--- /dev/null
+++ b/man/baseball.1964.Rd
@@ -0,0 +1,27 @@
+\name{baseball.1964}
+\alias{baseball.1964}
+\docType{data}
+\title{Team records in the 1964 National League baseball season}
+\description{
+Head to head records for all teams in the 1964 National League baseball
+season. Teams are coded as Cincinnati (1), Chicago (2), Houston (3),
+Los Angeles (4), Milwaukee (5), New York (6), Philadelphia (7),
+Pittsburgh (8), San Francisco (9), and St. Louis (10).
+}
+\usage{
+baseball.1964
+}
+\format{
+ A data frame with 45 observations on the following 4 variables.
+ \describe{
+ \item{Team.1}{Number of team 1}
+ \item{Team.2}{Number of team 2}
+ \item{Wins.Team1}{Number of games won by team 1}
+ \item{Wins.Team2}{Number of games won by team 2}
+ }
+}
+\source{
+www.baseball-reference.com website.
+}
+
+\keyword{datasets}
diff --git a/man/bayes.influence.Rd b/man/bayes.influence.Rd
new file mode 100644
index 0000000..6ffb22f
--- /dev/null
+++ b/man/bayes.influence.Rd
@@ -0,0 +1,32 @@
+\name{bayes.influence}
+\alias{bayes.influence}
+\title{Observation sensitivity analysis in beta-binomial model}
+\description{
+ Computes probability intervals for the log precision parameter K
+in a beta-binomial model for all "leave one out" models using sampling
+importance resampling
+}
+\usage{
+bayes.influence(theta,data)
+}
+\arguments{
+ \item{theta}{matrix of simulated draws from the posterior of (logit eta, log K)}
+ \item{data}{matrix with columns of counts and sample sizes}
+}
+\value{
+\item{summary}{vector of 5th, 50th, 95th percentiles of log K for complete sample posterior}
+\item{summary.obs}{matrix where the ith row contains the 5th, 50th, 95th percentiles
+of log K for posterior when the ith observation is removed}
+}
+\author{Jim Albert}
+
+\examples{
+data(cancermortality)
+start=array(c(-7,6),c(1,2))
+fit=laplace(betabinexch,start,cancermortality)
+tpar=list(m=fit$mode,var=2*fit$var,df=4)
+theta=sir(betabinexch,tpar,1000,cancermortality)
+intervals=bayes.influence(theta,cancermortality)
+}
+
+\keyword{models}
diff --git a/man/bayes.model.selection.Rd b/man/bayes.model.selection.Rd
new file mode 100644
index 0000000..4b1ed98
--- /dev/null
+++ b/man/bayes.model.selection.Rd
@@ -0,0 +1,32 @@
+\name{bayes.model.selection}
+\alias{bayes.model.selection}
+\title{Bayesian regression model selection using G priors}
+\description{
+Using Zellner's G priors, computes the log marginal density for all possible regression models
+}
+\usage{
+bayes.model.selection(y, X, c, constant=TRUE)
+}
+\arguments{
+ \item{y}{vector of response values}
+ \item{X}{matrix of covariates}
+ \item{c}{parameter of the G prior}
+ \item{constant}{logical variable indicating if a constant term is in the matrix X}
+}
+
+\value{
+\item{mod.prob}{data frame specifying the model, the value of the log marginal density and
+the value of the posterior model probability}
+\item{converge}{logical vector indicating if the laplace algorithm converged for each model}
+}
+
+\author{Jim Albert}
+
+\examples{
+data(birdextinct)
+logtime=log(birdextinct$time)
+X=cbind(1,birdextinct$nesting,birdextinct$size,birdextinct$status)
+bayes.model.selection(logtime,X,100)
+}
+
+\keyword{models}
diff --git a/man/bayes.probit.Rd b/man/bayes.probit.Rd
new file mode 100644
index 0000000..9bb77c9
--- /dev/null
+++ b/man/bayes.probit.Rd
@@ -0,0 +1,35 @@
+\name{bayes.probit}
+\alias{bayes.probit}
+\title{Simulates from a probit binary response regression model using data augmentation and Gibbs sampling}
+\description{
+ Gives a simulated sample from the joint posterior distribution of the regression
+vector for a binary response regression model with a probit link and a
+informative normal(beta, P) prior. Also computes the log marginal likelihood when
+a subjective prior is used.
+}
+\usage{
+bayes.probit(y,X,m,prior=list(beta=0,P=0))
+}
+\arguments{
+ \item{y}{vector of binary responses}
+ \item{X}{covariate matrix}
+ \item{m}{number of simulations desired}
+ \item{prior}{list with components beta, the prior mean, and P, the prior precision matrix}
+}
+
+\value{
+\item{beta}{matrix of simulated draws of regression vector beta where each row corresponds to one draw}
+\item{log.marg}{simulation estimate at log marginal likelihood of the model}
+}
+\author{Jim Albert}
+
+\examples{
+response=c(0,1,0,0,0,1,1,1,1,1)
+covariate=c(1,2,3,4,5,6,7,8,9,10)
+X=cbind(1,covariate)
+prior=list(beta=c(0,0),P=diag(c(.5,10)))
+m=1000
+s=bayes.probit(response,X,m,prior)
+}
+
+\keyword{models}
diff --git a/man/bayesresiduals.Rd b/man/bayesresiduals.Rd
new file mode 100644
index 0000000..fa51b48
--- /dev/null
+++ b/man/bayesresiduals.Rd
@@ -0,0 +1,34 @@
+\name{bayesresiduals}
+\alias{bayesresiduals}
+\title{Computation of posterior residual outlying probabilities for a linear regression model}
+\description{
+ Computes the posterior probabilities that Bayesian residuals exceed a cutoff value for a
+linear regression model with a noninformative prior
+}
+\usage{
+bayesresiduals(lmfit,post,k)
+}
+\arguments{
+ \item{lmfit}{output of the regression function lm}
+ \item{post}{list with components beta, matrix of simulated draws of regression parameter, and
+sigma, vector of simulated draws of sampling standard deviation}
+ \item{k}{cut-off value that defines an outlier}
+}
+
+\value{
+vector of posterior outlying probabilities
+}
+\author{Jim Albert}
+
+\examples{
+chirps=c(20,16.0,19.8,18.4,17.1,15.5,14.7,17.1,15.4,16.2,15,17.2,16,17,14.1)
+temp=c(88.6,71.6,93.3,84.3,80.6,75.2,69.7,82,69.4,83.3,78.6,82.6,80.6,83.5,76.3)
+X=cbind(1,chirps)
+lmfit=lm(temp~X)
+m=1000
+post=blinreg(temp,X,m)
+k=2
+bayesresiduals(lmfit,post,k)
+}
+
+\keyword{models}
diff --git a/man/bermuda.grass.Rd b/man/bermuda.grass.Rd
new file mode 100644
index 0000000..1e3bed0
--- /dev/null
+++ b/man/bermuda.grass.Rd
@@ -0,0 +1,26 @@
+\name{bermuda.grass}
+\alias{bermuda.grass}
+\docType{data}
+\title{Bermuda grass experiment data}
+\description{
+Yields of bermuda grass for a factorial design of nutrients
+nitrogen,
+phosphorus, and potassium.
+}
+\usage{
+bermuda.grass
+}
+\format{
+ A data frame with 64 observations on the following 4 variables.
+ \describe{
+ \item{y}{yield of bermuda grass in tons per acre}
+ \item{Nit}{level of nitrogen}
+ \item{Phos}{level of phosphorus}
+ \item{Pot}{level of potassium}
+ }
+}
+\source{
+McCullagh, P., and Nelder, J. (1989), Generalized Linear Models,
+Chapman and Hall.
+}
+\keyword{datasets}
diff --git a/man/beta.select.Rd b/man/beta.select.Rd
new file mode 100644
index 0000000..899ec03
--- /dev/null
+++ b/man/beta.select.Rd
@@ -0,0 +1,32 @@
+\name{beta.select}
+\alias{beta.select}
+\title{Selection of Beta Prior Given Knowledge of Two Quantiles}
+\description{
+ Finds the shape parameters of a beta density that matches knowledge of
+two quantiles of the distribution.
+}
+\usage{
+beta.select(quantile1, quantile2)
+}
+\arguments{
+ \item{quantile1}{list with components p, the value of the first probability,
+ and x, the value of the first quantile}
+ \item{quantile2}{list with components p, the value of the second probability,
+ and x, the value of the second quantile}
+}
+
+\value{
+vector of shape parameters of the matching beta distribution
+}
+
+\author{Jim Albert}
+
+\examples{
+# person believes the median of the prior is 0.25
+# and the 90th percentile of the prior is 0.45
+quantile1=list(p=.5,x=0.25)
+quantile2=list(p=.9,x=0.45)
+beta.select(quantile1,quantile2)
+}
+
+\keyword{models}
diff --git a/man/betabinexch.Rd b/man/betabinexch.Rd
new file mode 100644
index 0000000..fdd2e3f
--- /dev/null
+++ b/man/betabinexch.Rd
@@ -0,0 +1,29 @@
+\name{betabinexch}
+\alias{betabinexch}
+\title{Log posterior of logit mean and log precision for Binomial/beta exchangeable model}
+\description{
+ Computes the log posterior density of logit mean and log precision for a Binomial/beta exchangeable model
+}
+\usage{
+betabinexch(theta,data)
+}
+\arguments{
+ \item{theta}{vector of parameter values of logit eta and log K}
+ \item{data}{a matrix with columns y (counts) and n (sample sizes)}
+}
+
+\value{
+value of the log posterior
+}
+
+\author{Jim Albert}
+
+\examples{
+n=c(20,20,20,20,20)
+y=c(1,4,3,6,10)
+data=cbind(y,n)
+theta=c(-1,0)
+betabinexch(theta,data)
+}
+
+\keyword{models}
diff --git a/man/betabinexch0.Rd b/man/betabinexch0.Rd
new file mode 100644
index 0000000..633f367
--- /dev/null
+++ b/man/betabinexch0.Rd
@@ -0,0 +1,28 @@
+\name{betabinexch0}
+\alias{betabinexch0}
+\title{Log posterior of mean and precision for Binomial/beta exchangeable model}
+\description{
+ Computes the log posterior density of mean and precision for a Binomial/beta exchangeable model
+}
+\usage{
+betabinexch0(theta,data)
+}
+\arguments{
+ \item{theta}{vector of parameter values of eta and K}
+ \item{data}{a matrix with columns y (counts) and n (sample sizes)}
+}
+
+\value{
+value of the log posterior}
+
+\author{Jim Albert}
+
+\examples{
+n=c(20,20,20,20,20)
+y=c(1,4,3,6,10)
+data=cbind(y,n)
+theta=c(.1,10)
+betabinexch0(theta,data)
+}
+
+\keyword{models}
diff --git a/man/bfexch.Rd b/man/bfexch.Rd
new file mode 100644
index 0000000..9c16da7
--- /dev/null
+++ b/man/bfexch.Rd
@@ -0,0 +1,32 @@
+\name{bfexch}
+\alias{bfexch}
+\title{Logarithm of integral of Bayes factor for testing homogeneity of proportions}
+\description{
+Computes the logarithm of the integral of the Bayes factor for testing homogeneity of a set of proportions
+}
+\usage{
+bfexch(theta,datapar)
+}
+\arguments{
+ \item{theta}{value of the logit of the prior mean hyperparameter}
+ \item{datapar}{list with components data, matrix with columns y (counts) and n (sample sizes), and K, prior
+precision hyperparameter}
+}
+
+\value{
+value of the logarithm of the integral
+}
+
+\author{Jim Albert}
+
+\examples{
+y=c(1,3,2,4,6,4,3)
+n=c(10,10,10,10,10,10,10)
+data=cbind(y,n)
+K=20
+datapar=list(data=data,K=K)
+theta=1
+bfexch(theta,datapar)
+}
+
+\keyword{models}
diff --git a/man/bfindep.Rd b/man/bfindep.Rd
new file mode 100644
index 0000000..b109503
--- /dev/null
+++ b/man/bfindep.Rd
@@ -0,0 +1,30 @@
+\name{bfindep}
+\alias{bfindep}
+\title{Bayes factor against independence assuming alternatives close to independence}
+\description{
+Computes a Bayes factor against independence for a two-way contingency table assuming
+a "close to independence" alternative model}
+\usage{
+bfindep(y,K,m)
+}
+\arguments{
+ \item{y}{matrix of counts}
+ \item{K}{Dirichlet precision hyperparameter}
+ \item{m}{number of simulations}
+}
+
+\value{
+\item{bf}{value of the Bayes factor against hypothesis of independence}
+\item{nse}{estimate of the simulation standard error of the computed Bayes factor}
+}
+
+\author{Jim Albert}
+
+\examples{
+y=matrix(c(10,4,6,3,6,10),c(2,3))
+K=20
+m=1000
+bfindep(y,K,m)
+}
+
+\keyword{models}
diff --git a/man/binomial.beta.mix.Rd b/man/binomial.beta.mix.Rd
new file mode 100644
index 0000000..8b95d38
--- /dev/null
+++ b/man/binomial.beta.mix.Rd
@@ -0,0 +1,32 @@
+\name{binomial.beta.mix}
+\alias{binomial.beta.mix}
+\title{Computes the posterior for binomial sampling and a mixture of betas prior}
+\description{
+ Computes the parameters and mixing probabilities for a binomial sampling problem
+where the prior is a discrete mixture of beta densities.
+}
+\usage{
+binomial.beta.mix(probs,betapar,data)
+}
+\arguments{
+ \item{probs}{vector of probabilities of the beta components of the prior}
+ \item{betapar}{matrix where each row contains the shape parameters for a beta component of the prior}
+ \item{data}{vector of number of successes and number of failures}
+}
+
+\value{
+\item{probs}{vector of probabilities of the beta components of the posterior}
+\item{betapar}{matrix where each row contains the shape parameters for a beta component of the posterior}
+}
+\author{Jim Albert}
+
+\examples{
+probs=c(.5, .5)
+beta.par1=c(15,5)
+beta.par2=c(10,10)
+betapar=rbind(beta.par1,beta.par2)
+data=c(20,15)
+binomial.beta.mix(probs,betapar,data)
+}
+
+\keyword{models}
diff --git a/man/birdextinct.Rd b/man/birdextinct.Rd
new file mode 100644
index 0000000..1f6e7b1
--- /dev/null
+++ b/man/birdextinct.Rd
@@ -0,0 +1,28 @@
+\name{birdextinct}
+\alias{birdextinct}
+\docType{data}
+\title{Bird measurements from British islands}
+\description{
+Measurements on breedings pairs of landbird species were collected from 16
+islands about Britain over several decades.
+}
+\usage{
+birdextinct
+}
+\format{
+A data frame with 62 observations on the following 5 variables.
+ \describe{
+ \item{species}{name of bird species}
+ \item{time}{average time of extinction on the islands}
+ \item{nesting}{average number of nesting pairs}
+ \item{size}{size of the species, 1 or 0 if large or small}
+ \item{status}{staus of the species, 1 or 0 if resident or migrant}
+ }
+}
+\source{
+Pimm, S., Jones, H., and Diamond, J. (1988),
+On the risk of extinction,
+American Naturalists, 132, 757-785.
+}
+
+\keyword{datasets}
diff --git a/man/birthweight.Rd b/man/birthweight.Rd
new file mode 100644
index 0000000..48bb585
--- /dev/null
+++ b/man/birthweight.Rd
@@ -0,0 +1,24 @@
+\name{birthweight}
+\alias{birthweight}
+\docType{data}
+\title{Birthweight regression study}
+\description{
+Dobson describes a study where one is interested in predicting
+a baby's birthweight based on the gestational age and the baby's
+gender.
+}
+\usage{
+birthweight
+}
+\format{
+ A data frame with 24 observations on the following 3 variables.
+ \describe{
+ \item{age}{gestational age in weeks}
+ \item{gender}{gender of the baby where 0 (1) is male (female)}
+ \item{weight}{birthweight of baby in grams}
+ }
+}
+\source{Dobson, A. (2001), An Introduction to Generalized Linear Models, New York:
+Chapman and Hall.}
+
+\keyword{datasets}
diff --git a/man/blinreg.Rd b/man/blinreg.Rd
new file mode 100644
index 0000000..ec76f33
--- /dev/null
+++ b/man/blinreg.Rd
@@ -0,0 +1,33 @@
+\name{blinreg}
+\alias{blinreg}
+\title{Simulation from Bayesian linear regression model}
+\description{
+ Gives a simulated sample from the joint posterior distribution of the regression
+vector and the error standard deviation for a linear regression model with a
+ noninformative or g prior.
+}
+\usage{
+blinreg(y,X,m,prior=NULL)
+}
+\arguments{
+ \item{y}{vector of responses}
+ \item{X}{design matrix}
+ \item{m}{number of simulations desired}
+ \item{prior}{list with components c0 and beta0 of Zellner's g prior}
+}
+
+\value{
+\item{beta}{matrix of simulated draws of beta where each row corresponds to one draw}
+\item{sigma}{vector of simulated draws of the error standard deviation}
+}
+\author{Jim Albert}
+
+\examples{
+chirps=c(20,16.0,19.8,18.4,17.1,15.5,14.7,17.1,15.4,16.2,15,17.2,16,17,14.1)
+temp=c(88.6,71.6,93.3,84.3,80.6,75.2,69.7,82,69.4,83.3,78.6,82.6,80.6,83.5,76.3)
+X=cbind(1,chirps)
+m=1000
+s=blinreg(temp,X,m)
+}
+
+\keyword{models}
diff --git a/man/blinregexpected.Rd b/man/blinregexpected.Rd
new file mode 100644
index 0000000..dfd22f1
--- /dev/null
+++ b/man/blinregexpected.Rd
@@ -0,0 +1,35 @@
+\name{blinregexpected}
+\alias{blinregexpected}
+\title{Simulates values of expected response for linear regression model}
+\description{
+Simulates draws of the posterior distribution of an expected response for
+a linear regression model with a noninformative prior}
+\usage{
+blinregexpected(X1,theta.sample)
+}
+\arguments{
+ \item{X1}{matrix where each row corresponds to a covariate set}
+ \item{theta.sample}{list with components beta, matrix of simulated draws of regression vector,
+and sigma, vector of simulated draws of sampling error standard deviation}
+}
+
+\value{
+matrix where a column corresponds to the simulated draws of the expected response for a given
+covariate set
+}
+
+\author{Jim Albert}
+
+\examples{
+chirps=c(20,16.0,19.8,18.4,17.1,15.5,14.7,17.1,15.4,16.2,15,17.2,16,17,14.1)
+temp=c(88.6,71.6,93.3,84.3,80.6,75.2,69.7,82,69.4,83.3,78.6,82.6,80.6,83.5,76.3)
+X=cbind(1,chirps)
+m=1000
+theta.sample=blinreg(temp,X,m)
+covset1=c(1,15)
+covset2=c(1,20)
+X1=rbind(covset1,covset2)
+blinregexpected(X1,theta.sample)
+}
+
+\keyword{models}
diff --git a/man/blinregpred.Rd b/man/blinregpred.Rd
new file mode 100644
index 0000000..c4c03dc
--- /dev/null
+++ b/man/blinregpred.Rd
@@ -0,0 +1,35 @@
+\name{blinregpred}
+\alias{blinregpred}
+\title{Simulates values of predicted response for linear regression model}
+\description{
+Simulates draws of the predictive distribution of a future response for
+a linear regression model with a noninformative prior}
+\usage{
+blinregpred(X1,theta.sample)
+}
+\arguments{
+ \item{X1}{matrix where each row corresponds to a covariate set}
+ \item{theta.sample}{list with components beta, matrix of simulated draws of regression vector,
+and sigma, vector of simulated draws of sampling error standard deviation}
+}
+
+\value{
+matrix where a column corresponds to the simulated draws of the predicted response for a given
+covariate set
+}
+
+\author{Jim Albert}
+
+\examples{
+chirps=c(20,16.0,19.8,18.4,17.1,15.5,14.7,17.1,15.4,16.2,15,17.2,16,17,14.1)
+temp=c(88.6,71.6,93.3,84.3,80.6,75.2,69.7,82,69.4,83.3,78.6,82.6,80.6,83.5,76.3)
+X=cbind(1,chirps)
+m=1000
+theta.sample=blinreg(temp,X,m)
+covset1=c(1,15)
+covset2=c(1,20)
+X1=rbind(covset1,covset2)
+blinregpred(X1,theta.sample)
+}
+
+\keyword{models}
diff --git a/man/bprobit.probs.Rd b/man/bprobit.probs.Rd
new file mode 100644
index 0000000..b138218
--- /dev/null
+++ b/man/bprobit.probs.Rd
@@ -0,0 +1,34 @@
+\name{bprobit.probs}
+\alias{bprobit.probs}
+\title{Simulates fitted probabilities for a probit regression model}
+\description{
+ Gives a simulated sample for fitted probabilities for a binary response
+regression model with a probit link and noninformative prior.
+}
+\usage{
+bprobit.probs(X1,fit)
+}
+\arguments{
+ \item{X1}{matrix where each row corresponds to a covariate set}
+ \item{fit}{simulated matrix of draws of the regression vector}
+}
+
+\value{
+matrix of simulated draws of the fitted probabilities, where a column corresponds to a
+particular covariate set
+}
+\author{Jim Albert}
+
+\examples{
+response=c(0,1,0,0,0,1,1,1,1,1)
+covariate=c(1,2,3,4,5,6,7,8,9,10)
+X=cbind(1,covariate)
+m=1000
+fit=bayes.probit(response,X,m)
+x1=c(1,3)
+x2=c(1,8)
+X1=rbind(x1,x2)
+fittedprobs=bprobit.probs(X1,fit$beta)
+}
+
+\keyword{models}
diff --git a/man/bradley.terry.post.Rd b/man/bradley.terry.post.Rd
new file mode 100644
index 0000000..bba7a1d
--- /dev/null
+++ b/man/bradley.terry.post.Rd
@@ -0,0 +1,30 @@
+\name{bradley.terry.post}
+\alias{bradley.terry.post}
+\title{Log posterior of a Bradley Terry random effects model}
+\description{
+ Computes the log posterior density of the talent parameters and the log
+standard deviation for a Bradley Terry model with normal
+random effects
+}
+\usage{
+bradley.terry.post(theta,data)
+}
+
+\arguments{
+ \item{theta}{vector of talent parameters and log standard deviation}
+ \item{data}{data matrix with columns team1, team2, wins by team1, and
+wins by team2}
+}
+
+\value{value of the log posterior}
+
+\author{Jim Albert}
+
+\examples{
+data(baseball.1964)
+team.strengths=rep(0,10)
+log.sigma=0
+bradley.terry.post(c(team.strengths,log.sigma),baseball.1964)
+}
+
+\keyword{models}
diff --git a/man/breastcancer.Rd b/man/breastcancer.Rd
new file mode 100644
index 0000000..29a318e
--- /dev/null
+++ b/man/breastcancer.Rd
@@ -0,0 +1,24 @@
+\name{breastcancer}
+\alias{breastcancer}
+\docType{data}
+\title{Survival experience of women with breast cancer under treatment}
+\description{
+Collett (1994) describes a study to evaluate the effectiveness of a
+histochemical marker in predicting the survival experience of women with
+breast cancer.
+}
+\usage{
+breastcancer
+}
+\format{
+ A data frame with 45 observations on the following 3 variables.
+ \describe{
+ \item{time}{survival time in months}
+ \item{status}{censoring indicator where 1 (0) indicates a complete (censored) survival time}
+ \item{stain}{indicates by a 0 (1) if tumor was negatively (positively) stained}
+ }
+}
+\source{Collett, D. (1994), Modelling Survival Data in Medical Research, London:
+Chapman and Hall.}
+
+\keyword{datasets}
diff --git a/man/calculus.grades.Rd b/man/calculus.grades.Rd
new file mode 100644
index 0000000..932b695
--- /dev/null
+++ b/man/calculus.grades.Rd
@@ -0,0 +1,21 @@
+\name{calculus.grades}
+\alias{calculus.grades}
+\docType{data}
+\title{Calculus grades dataset}
+\description{
+Grades and other variables collected for a
+sample of calculus students.
+}
+\usage{
+calculus.grades
+}
+\format{
+ A data frame with 100 observations on the following 3 variables.
+ \describe{
+ \item{grade}{indicates if student received a A or B in class}
+ \item{prev.grade}{indicates if student received a A in prerequisite math class}
+ \item{act}{score on the ACT math test}
+ }
+}
+\source{Collected by a colleague of the author at his university.}
+\keyword{datasets}
diff --git a/man/cancermortality.Rd b/man/cancermortality.Rd
new file mode 100644
index 0000000..0eebe02
--- /dev/null
+++ b/man/cancermortality.Rd
@@ -0,0 +1,23 @@
+\name{cancermortality}
+\alias{cancermortality}
+\docType{data}
+\title{Cancer mortality data}
+\description{
+Number of cancer deaths and number at risk for 20 cities in Missouri.
+}
+\usage{
+cancermortality
+}
+\format{
+A data frame with 20 observations on the following 2 variables.
+ \describe{
+ \item{y}{number of cancer deaths}
+ \item{n}{number at risk}
+ }
+}
+\source{Tsutakawa, R., Shoop, G., and Marienfeld, C. (1985),
+Empirical Bayes Estimation
+of Cancer Mortality Rates, Statistics in Medicine, 4, 201-212.
+}
+
+\keyword{datasets}
diff --git a/man/careertraj.setup.Rd b/man/careertraj.setup.Rd
new file mode 100644
index 0000000..d3ce101
--- /dev/null
+++ b/man/careertraj.setup.Rd
@@ -0,0 +1,32 @@
+\name{careertraj.setup}
+\alias{careertraj.setup}
+\title{Setup for Career Trajectory Application}
+\description{
+ Setups the data matrices for the use of WinBUGS
+ in the career trajectory application.
+}
+\usage{
+careertraj.setup(data)
+}
+\arguments{
+ \item{data}{data matrix for ballplayers with variables Player, Year, Age, G, AB, R, H, X2B, X3B, HR, RBI, BB, SO}
+}
+
+\value{
+\item{player.names}{vector of player names}
+\item{y}{matrix of home runs for players where a row corresponds to the home runs for a player during
+all the years of his career}
+\item{n}{matrix of AB-SO for all players}
+\item{x}{matrix of ages for all players for all years of their careers}
+\item{T}{vector of number of seasons for all players}
+\item{N}{number of players}
+}
+
+\author{Jim Albert}
+
+\examples{
+data(sluggerdata)
+careertraj.setup(sluggerdata)
+}
+
+\keyword{models}
diff --git a/man/cauchyerrorpost.Rd b/man/cauchyerrorpost.Rd
new file mode 100644
index 0000000..62ca264
--- /dev/null
+++ b/man/cauchyerrorpost.Rd
@@ -0,0 +1,29 @@
+\name{cauchyerrorpost}
+\alias{cauchyerrorpost}
+\title{Log posterior of median and log scale parameters for Cauchy sampling}
+\description{
+ Computes the log posterior density of (M,log S) when
+a sample is taken from a Cauchy density with location M and scale S and a uniform
+prior distribution is taken on (M, log S)
+}
+\usage{
+cauchyerrorpost(theta,data)
+}
+\arguments{
+ \item{theta}{vector of parameter values of M and log S}
+ \item{data}{vector containing sample of observations}
+}
+
+\value{
+value of the log posterior
+}
+
+\author{Jim Albert}
+
+\examples{
+data=c(108, 51, 7, 43, 52, 54, 53, 49, 21, 48)
+theta=c(40,1)
+cauchyerrorpost(theta,data)
+}
+
+\keyword{models}
diff --git a/man/chemotherapy.Rd b/man/chemotherapy.Rd
new file mode 100644
index 0000000..0931e6c
--- /dev/null
+++ b/man/chemotherapy.Rd
@@ -0,0 +1,29 @@
+\name{chemotherapy}
+\alias{chemotherapy}
+\docType{data}
+\title{Chemotherapy treatment effects on ovarian cancer}
+\description{
+Edmunson et al (1979) studied the effect of different chemotherapy
+treatments following surgical treatment of ovarian cancer.
+}
+\usage{
+chemotherapy
+}
+\format{
+ A data frame with 26 observations on the following 5 variables.
+ \describe{
+ \item{patient}{patient number}
+ \item{time}{survival time in days following treatment}
+ \item{status}{indicates if time is censored (0) or actually observed (1)}
+ \item{treat}{control group (0) or treatment group (1)}
+ \item{age}{age of the patient}
+ }
+}
+\source{Edmonson, J., Felming, T., Decker, D., Malkasian, G., Jorgensen, E.,
+Jefferies,
+J.,Webb, M., and Kvols, L. (1979), Different chemotherapeutic sensitivities
+and host factors affecting prognosis in advanced ovarian carcinoma versus
+minimal residual disease, Cancer Treatment Reports, 63, 241-247.
+}
+
+\keyword{datasets}
diff --git a/man/ctable.Rd b/man/ctable.Rd
new file mode 100644
index 0000000..b814714
--- /dev/null
+++ b/man/ctable.Rd
@@ -0,0 +1,27 @@
+\name{ctable}
+\alias{ctable}
+\title{Bayes factor against independence using uniform priors}
+\description{
+Computes a Bayes factor against independence for a two-way contingency table assuming
+uniform prior distributions}
+\usage{
+ctable(y,a)
+}
+\arguments{
+ \item{y}{matrix of counts}
+ \item{a}{matrix of prior hyperparameters}
+}
+
+\value{
+value of the Bayes factor against independence
+}
+
+\author{Jim Albert}
+
+\examples{
+y=matrix(c(10,4,6,3,6,10),c(2,3))
+a=matrix(rep(1,6),c(2,3))
+ctable(y,a)
+}
+
+\keyword{models}
diff --git a/man/darwin.Rd b/man/darwin.Rd
new file mode 100644
index 0000000..81d48d0
--- /dev/null
+++ b/man/darwin.Rd
@@ -0,0 +1,20 @@
+\name{darwin}
+\alias{darwin}
+\docType{data}
+\title{Darwin's data on plants}
+\description{
+Fifteen differences of the heights of cross and self fertilized plants quoted by
+Fisher (1960)}
+
+\usage{
+darwin
+}
+\format{
+A data frame with 15 observations on the following 1 variable.
+ \describe{
+ \item{difference}{difference of heights of two types of plants}
+ }
+}
+\source{Fisher, R. (1960), Statistical Methods for Research Workers,
+Edinburgh: Oliver and Boyd.}
+\keyword{datasets}
diff --git a/man/discint.Rd b/man/discint.Rd
new file mode 100644
index 0000000..abb4cac
--- /dev/null
+++ b/man/discint.Rd
@@ -0,0 +1,31 @@
+\name{discint}
+\alias{discint}
+\title{Highest probability interval for a discrete distribution}
+\description{
+Computes a highest probability interval for a discrete probability
+distribution
+}
+\usage{
+discint(dist, prob)
+}
+\arguments{
+ \item{dist}{probability distribution written as a matrix where
+ the first column contain the values and the second column
+ the probabilities}
+ \item{prob}{probability content of interest}
+}
+
+\value{
+ \item{prob}{exact probability content of interval}
+ \item{set}{set of values of the probability interval}
+}
+\author{Jim Albert}
+
+\examples{
+x=0:10
+probs=dbinom(x,size=10,prob=.3)
+dist=cbind(x,probs)
+pcontent=.8
+discint(dist,pcontent)
+}
+\keyword{models}
diff --git a/man/discrete.bayes.2.Rd b/man/discrete.bayes.2.Rd
new file mode 100644
index 0000000..c6bd416
--- /dev/null
+++ b/man/discrete.bayes.2.Rd
@@ -0,0 +1,41 @@
+\name{discrete.bayes.2}
+\alias{discrete.bayes.2}
+\alias{plot.bayes2}
+\title{Posterior distribution of two parameters with discrete priors}
+\description{
+Computes the posterior distribution for an arbitrary two parameter distribution
+ for a discrete prior
+distribution.
+}
+\usage{
+discrete.bayes.2(df,prior,y=NULL,...)
+}
+\arguments{
+ \item{df}{name of the function defining the sampling density of
+two parameters}
+ \item{prior}{matrix defining the prior density; the row names and column names
+of the matrix define respectively the values of parameter 1 and values of
+parameter 2 and the entries of the matrix give the prior probabilities}
+ \item{y}{y is a matrix of data values, where each row corresponds to a
+single observation}
+ \item{...}{any further fixed parameter values
+used in the sampling density function}
+}
+
+\value{
+ \item{prob}{matrix of posterior probabilities}
+ \item{pred}{scalar with prior predictive probability}
+}
+
+\author{Jim Albert}
+
+\examples{
+p1 = seq(0.1, 0.9, length = 9)
+p2 = p1
+prior = matrix(1/81, 9, 9)
+dimnames(prior)[[1]] = p1
+dimnames(prior)[[2]] = p2
+discrete.bayes.2(twoproplike,prior)
+}
+
+\keyword{models}
diff --git a/man/discrete.bayes.Rd b/man/discrete.bayes.Rd
new file mode 100644
index 0000000..f8deb2c
--- /dev/null
+++ b/man/discrete.bayes.Rd
@@ -0,0 +1,39 @@
+\name{discrete.bayes}
+\alias{discrete.bayes}
+\alias{print.bayes}
+\alias{plot.bayes}
+\alias{summary.bayes}
+\title{Posterior distribution with discrete priors}
+\description{
+Computes the posterior distribution for an arbitrary one parameter distribution
+ for a discrete prior
+distribution.
+}
+\usage{
+discrete.bayes(df,prior,y,...)
+}
+\arguments{
+ \item{df}{name of the function defining the sampling density}
+ \item{prior}{vector defining the prior density; names of the vector
+ define the parameter values and entries of the vector define
+ the prior probabilities}
+ \item{y}{vector of data values}
+ \item{...}{any further fixed parameter values used in the sampling density
+ function}
+}
+\value{
+ \item{prob}{vector of posterior probabilities}
+ \item{pred}{scalar with prior predictive probability}
+}
+
+\author{Jim Albert}
+
+\examples{
+prior=c(.25,.25,.25,.25)
+names(prior)=c(.2,.25,.3,.35)
+y=5
+n=10
+discrete.bayes(dbinom,prior,y,size=n)
+}
+
+\keyword{models}
diff --git a/man/dmnorm.Rd b/man/dmnorm.Rd
new file mode 100644
index 0000000..080c543
--- /dev/null
+++ b/man/dmnorm.Rd
@@ -0,0 +1,30 @@
+\name{dmnorm}
+\alias{dmnorm}
+\title{The probability density function for the multivariate normal (Gaussian) probability distribution }
+\description{
+ Computes the density of a multivariate normal distribution
+}
+\usage{
+dmnorm(x, mean = rep(0, d), varcov, log = FALSE)
+}
+\arguments{
+ \item{x}{vector of length d or matrix with d columns, giving the coordinates of points where density is to evaluated}
+ \item{mean}{numeric vector giving the location parameter of the distribution}
+ \item{varcov}{a positive definite matrix representing the scale matrix of the distribution}
+ \item{log}{a logical value; if TRUE, the logarithm of the density is to be computed}
+}
+\value{
+vector of density values
+}
+
+\author{Jim Albert}
+
+\examples{
+mu <- c(1,12,2)
+Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
+x <- c(2,14,0)
+f <- dmnorm(x, mu, Sigma)
+}
+
+\keyword{models}
+
diff --git a/man/dmt.Rd b/man/dmt.Rd
new file mode 100644
index 0000000..e6a2306
--- /dev/null
+++ b/man/dmt.Rd
@@ -0,0 +1,32 @@
+\name{dmt}
+\alias{dmt}
+\title{Probability density function for multivariate t}
+\description{
+ Computes the density of a multivariate t distribution
+}
+\usage{
+dmt(x, mean = rep(0, d), S, df = Inf, log=FALSE)
+}
+\arguments{
+ \item{x}{vector of length d or matrix with d columns, giving the coordinates of points where density is to evaluated}
+ \item{mean}{numeric vector giving the location parameter of the distribution}
+ \item{S}{a positive definite matrix representing the scale matrix of the distribution}
+ \item{df}{degrees of freedom}
+ \item{log}{a logical value; if TRUE, the logarithm of the density is to be computed}
+}
+\value{
+vector of density values
+}
+
+\author{Jim Albert}
+
+\examples{
+mu <- c(1,12,2)
+Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
+df <- 4
+x <- c(2,14,0)
+f <- dmt(x, mu, Sigma, df)
+}
+
+\keyword{models}
+
diff --git a/man/donner.Rd b/man/donner.Rd
new file mode 100644
index 0000000..5969ee3
--- /dev/null
+++ b/man/donner.Rd
@@ -0,0 +1,23 @@
+\name{donner}
+\alias{donner}
+\docType{data}
+\title{Donner survival study}
+\description{
+Data contains the age, gender and survival status for 45 members of the Donner Party
+who experienced difficulties in crossing the Sierra Nevada mountains in California.
+}
+\usage{
+donner
+}
+\format{
+A data frame with 45 observations on the following 3 variables.
+ \describe{
+ \item{age}{age of person}
+ \item{male}{gender that is 1 (0) if person is male (female)}
+ \item{survival}{survival status, 1 or 0 if person survived or died}
+ }
+}
+\source{Grayson, D. (1960), Donner party deaths: a demographic
+assessment, Journal of Anthropological Assessment, 46, 223-242.}
+
+\keyword{datasets}
diff --git a/man/election.2008.Rd b/man/election.2008.Rd
new file mode 100644
index 0000000..25811e2
--- /dev/null
+++ b/man/election.2008.Rd
@@ -0,0 +1,22 @@
+\name{election.2008}
+\alias{election.2008}
+\docType{data}
+\title{Poll data from 2008 U.S. Presidential Election}
+\description{
+Results of recent state polls in the 2008 United States Presidential
+Election between Barack Obama and John McCain.
+}
+\usage{
+election.2008
+}
+\format{
+ A data frame with 51 observations on the following 4 variables.
+ \describe{
+ \item{State}{name of the state}
+ \item{M.pct}{percentage of poll survey for McCain}
+ \item{O.pct}{precentage of poll survey for Obama}
+ \item{EV}{number of electoral votes}
+ }
+}
+\source{Data collected by author in November 2008 from www.cnn.com website.}
+\keyword{datasets}
diff --git a/man/election.Rd b/man/election.Rd
new file mode 100644
index 0000000..5a8f807
--- /dev/null
+++ b/man/election.Rd
@@ -0,0 +1,24 @@
+\name{election}
+\alias{election}
+\docType{data}
+\title{Florida election data}
+\description{
+For each of the Florida counties in the 2000 presidential election, the number of
+votes for George Bush, Al Gore, and Pat Buchanan is recorded. Also the number of
+votes for the minority candidate Ross Perot in the 1996 presidential election is
+recorded.
+}
+\usage{
+election
+}
+\format{
+ A data frame with 67 observations on the following 5 variables.
+ \describe{
+ \item{county}{name of Florida county}
+ \item{perot}{number of votes for Ross Perot in 1996 election}
+ \item{gore}{number of votes for Al Gore in 2000 election}
+ \item{bush}{number of votes for George Bush in 2000 election}
+ \item{buchanan}{number of votes for Pat Buchanan in 2000 election}
+ }
+}
+\keyword{datasets}
diff --git a/man/footballscores.Rd b/man/footballscores.Rd
new file mode 100644
index 0000000..f6a8d65
--- /dev/null
+++ b/man/footballscores.Rd
@@ -0,0 +1,27 @@
+\name{footballscores}
+\alias{footballscores}
+\docType{data}
+\title{Game outcomes and point spreads for American football}
+\description{
+Game outcomes and point spreads for 672 professional
+American football games.
+}
+\usage{
+footballscores
+}
+\format{
+ A data frame with 672 observations on the following 8 variables.
+ \describe{
+ \item{year}{year of game}
+ \item{home}{indicates if favorite is the home team}
+ \item{favorite}{score of favorite team}
+ \item{underdog}{score of underdog team}
+ \item{spread}{point spread}
+ \item{favorite.name}{name of favorite team}
+ \item{underdog.name}{name of underdog team}
+ \item{week}{week number of the season}
+ }
+}
+\source{Gelman, A., Carlin, J., Stern, H., and Rubin, D. (2003),
+Bayesian Data Analysis, Chapman and Hall.}
+\keyword{datasets}
diff --git a/man/gibbs.Rd b/man/gibbs.Rd
new file mode 100644
index 0000000..4019e22
--- /dev/null
+++ b/man/gibbs.Rd
@@ -0,0 +1,34 @@
+\name{gibbs}
+\alias{gibbs}
+\title{Metropolis within Gibbs sampling algorithm of a posterior distribution}
+\description{
+ Implements a Metropolis-within-Gibbs sampling algorithm for an arbitrary real-valued
+posterior density defined by the user
+}
+\usage{
+gibbs(logpost,start,m,scale,...)
+}
+\arguments{
+ \item{logpost}{function defining the log posterior density}
+ \item{start}{array with a single row that gives the starting value of the parameter vector}
+ \item{m}{the number of iterations of the chain}
+ \item{scale}{vector of scale parameters for the random walk Metropolis steps}
+ \item{...}{data that is used in the function logpost}
+}
+
+\value{
+\item{par}{a matrix of simulated values where each row corresponds to a value of the vector parameter}
+\item{accept}{vector of acceptance rates of the Metropolis steps of the algorithm}
+}
+
+\author{Jim Albert}
+
+\examples{
+data=c(6,2,3,10)
+start=array(c(1,1),c(1,2))
+m=1000
+scale=c(2,2)
+s=gibbs(logctablepost,start,m,scale,data)
+}
+
+\keyword{models}
diff --git a/man/groupeddatapost.Rd b/man/groupeddatapost.Rd
new file mode 100644
index 0000000..22b393a
--- /dev/null
+++ b/man/groupeddatapost.Rd
@@ -0,0 +1,32 @@
+\name{groupeddatapost}
+\alias{groupeddatapost}
+\title{Log posterior of normal parameters when data is in grouped form}
+\description{
+ Computes the log posterior density of (M,log S) for normal sampling where the data is
+observed in grouped form
+}
+\usage{
+groupeddatapost(theta,data)
+}
+\arguments{
+ \item{theta}{vector of parameter values M and log S}
+ \item{data}{list with components int.lo, a vector of left endpoints,
+int.hi, a vector of right endpoints, and f, a vector of bin frequencies}
+}
+
+\value{
+value of the log posterior
+}
+
+\author{Jim Albert}
+
+\examples{
+int.lo=c(-Inf,10,15,20,25)
+int.hi=c(10,15,20,25,Inf)
+f=c(2,5,8,4,2)
+data=list(int.lo=int.lo,int.hi=int.hi,f=f)
+theta=c(20,1)
+groupeddatapost(theta,data)
+}
+
+\keyword{models}
diff --git a/man/hearttransplants.Rd b/man/hearttransplants.Rd
new file mode 100644
index 0000000..547f244
--- /dev/null
+++ b/man/hearttransplants.Rd
@@ -0,0 +1,25 @@
+\name{hearttransplants}
+\alias{hearttransplants}
+\docType{data}
+\title{Heart transplant mortality data}
+\description{
+The number of deaths within 30 days of heart transplant surgery for 94 U.S. hospitals
+that performed at least 10 heart transplant surgeries. Also the exposure, the expected number of deaths, is
+recorded for each hospital.}
+
+\usage{
+hearttransplants
+}
+\format{
+A data frame with 94 observations on the following 2 variables.
+ \describe{
+ \item{e}{expected number of deaths (the exposure)}
+ \item{y}{observed number of deaths within 30 days of heart transplant surgery}
+ }
+}
+\source{Christiansen, C. and Morris, C. (1995), Fitting and checking
+a two-level Poisson model: modeling patient mortality rates in heart
+transplant patients, in Berry, D. and Stangl, D., eds, Bayesian
+Biostatistics, Marcel Dekker.}
+
+\keyword{datasets}
diff --git a/man/hiergibbs.Rd b/man/hiergibbs.Rd
new file mode 100644
index 0000000..6305970
--- /dev/null
+++ b/man/hiergibbs.Rd
@@ -0,0 +1,30 @@
+\name{hiergibbs}
+\alias{hiergibbs}
+\title{Gibbs sampling for a hierarchical regression model}
+\description{
+ Implements Gibbs sampling for estimating a two-way table of means
+under a hierarchical regression model.
+}
+\usage{
+hiergibbs(data,m)
+}
+\arguments{
+ \item{data}{data matrix with columns observed sample means, sample sizes, and values of two covariates}
+ \item{m}{number of cycles of Gibbs sampling}
+}
+
+\value{
+ \item{beta}{matrix of simulated values of regression vector}
+ \item{mu}{matrix of simulated values of cell means}
+ \item{var}{vector of simulated values of second-stage prior variance}
+}
+
+\author{Jim Albert}
+
+\examples{
+data(iowagpa)
+m=1000
+s=hiergibbs(iowagpa,m)
+}
+
+\keyword{models}
diff --git a/man/histprior.Rd b/man/histprior.Rd
new file mode 100644
index 0000000..3480ec3
--- /dev/null
+++ b/man/histprior.Rd
@@ -0,0 +1,30 @@
+\name{histprior}
+\alias{histprior}
+\title{Density function of a histogram distribution}
+\description{
+Computes the density of a probability distribution defined on a set
+of equal-width intervals
+}
+\usage{
+histprior(p,midpts,prob)
+}
+\arguments{
+ \item{p}{vector of values for which density is to be computed}
+ \item{midpts}{vector of midpoints of the intervals}
+ \item{prob}{vector of probabilities of the intervals}
+}
+
+\value{
+vector of values of the probability density
+}
+
+\author{Jim Albert}
+
+\examples{
+midpts=c(.1,.3,.5,.7,.9)
+prob=c(.2,.2,.4,.1,.1)
+p=seq(.01,.99,by=.01)
+plot(p,histprior(p,midpts,prob),type="l")
+}
+
+\keyword{models}
diff --git a/man/howardprior.Rd b/man/howardprior.Rd
new file mode 100644
index 0000000..8be1d87
--- /dev/null
+++ b/man/howardprior.Rd
@@ -0,0 +1,28 @@
+\name{howardprior}
+\alias{howardprior}
+\title{Logarithm of Howard's dependent prior for two proportions}
+\description{
+Computes the logarithm of a dependent prior on two proportions
+proposed by Howard in a Statistical Science paper in 1998.
+}
+\usage{
+howardprior(xy,par)
+}
+\arguments{
+ \item{xy}{vector of proportions p1 and p2}
+ \item{par}{vector containing parameter values alpha, beta, gamma, delta, sigma}
+}
+
+\value{
+value of the log posterior
+}
+
+\author{Jim Albert}
+
+\examples{
+param=c(1,1,1,1,2)
+p=c(.1,.5)
+howardprior(p,param)
+}
+
+\keyword{models}
diff --git a/man/impsampling.Rd b/man/impsampling.Rd
new file mode 100644
index 0000000..0deed6b
--- /dev/null
+++ b/man/impsampling.Rd
@@ -0,0 +1,36 @@
+\name{impsampling}
+\alias{impsampling}
+\title{Importance sampling using a t proposal density}
+\description{
+ Implements importance sampling to compute the posterior mean of a function
+using a multivariate t proposal density
+}
+\usage{
+impsampling(logf,tpar,h,n,data)
+}
+\arguments{
+ \item{logf}{function that defines the logarithm of the density of interest}
+ \item{tpar}{list of parameters of t proposal density including the mean m, scale matrix var,
+and degrees of freedom df}
+ \item{h}{function that defines h(theta)}
+ \item{n}{number of simulated draws from proposal density}
+ \item{data}{data and or parameters used in the function logf}
+}
+\value{
+\item{est}{estimate at the posterior mean}
+\item{se}{simulation standard error of estimate}
+\item{theta}{matrix of simulated draws from proposal density}
+\item{wt}{vector of importance sampling weights}
+}
+\author{Jim Albert}
+
+\examples{
+data(cancermortality)
+start=c(-7,6)
+fit=laplace(betabinexch,start,cancermortality)
+tpar=list(m=fit$mode,var=2*fit$var,df=4)
+myfunc=function(theta) return(theta[2])
+theta=impsampling(betabinexch,tpar,myfunc,1000,cancermortality)
+}
+
+\keyword{models}
diff --git a/man/indepmetrop.Rd b/man/indepmetrop.Rd
new file mode 100644
index 0000000..6d84067
--- /dev/null
+++ b/man/indepmetrop.Rd
@@ -0,0 +1,31 @@
+\name{indepmetrop}
+\alias{indepmetrop}
+\title{Independence Metropolis independence chain of a posterior distribution}
+\description{
+ Simulates iterates of an independence Metropolis chain with a normal proposal density for an arbitrary real-valued
+posterior density defined by the user}
+\usage{
+indepmetrop(logpost,proposal,start,m,...)
+}
+\arguments{
+ \item{logpost}{function defining the log posterior density}
+ \item{proposal}{a list containing mu, an estimated mean and var, an estimated variance-covariance matrix, of the normal proposal density}
+ \item{start}{vector containing the starting value of the parameter}
+ \item{m}{the number of iterations of the chain}
+ \item{...}{data that is used in the function logpost}
+}
+\value{
+\item{par}{a matrix of simulated values where each row corresponds to a value of the vector parameter}
+\item{accept}{the acceptance rate of the algorithm}
+}
+\author{Jim Albert}
+
+\examples{
+data=c(6,2,3,10)
+proposal=list(mu=array(c(2.3,-.1),c(2,1)),var=diag(c(1,1)))
+start=array(c(0,0),c(1,2))
+m=1000
+fit=indepmetrop(logctablepost,proposal,start,m,data)
+}
+
+\keyword{models}
diff --git a/man/iowagpa.Rd b/man/iowagpa.Rd
new file mode 100644
index 0000000..3415f1b
--- /dev/null
+++ b/man/iowagpa.Rd
@@ -0,0 +1,25 @@
+\name{iowagpa}
+\alias{iowagpa}
+\docType{data}
+\title{Admissions data for an university}
+\description{
+Students at a major university are categorized with respect to their
+high school rank and their ACT score. For each combination of high school rank and
+ACT score, one records the mean grade point average (GPA).
+}
+\usage{
+iowagpa
+}
+\format{
+ A data frame with 40 observations on the following 4 variables.
+ \describe{
+ \item{gpa}{mean grade point average}
+ \item{n}{sample size}
+ \item{HSR}{high school rank}
+ \item{ACT}{act score}
+ }
+}
+\source{Albert, J. (1994), A Bayesian approach to estimation of GPA's
+of University of Iowa freshmen under order restrictions,
+Journal of Educational Statistics, 19, 1-22.}
+\keyword{datasets}
diff --git a/man/jeter2004.Rd b/man/jeter2004.Rd
new file mode 100644
index 0000000..806ee70
--- /dev/null
+++ b/man/jeter2004.Rd
@@ -0,0 +1,27 @@
+\name{jeter2004}
+\alias{jeter2004}
+\docType{data}
+\title{Hitting data for Derek Jeter}
+\description{
+Batting data for the baseball player Derek Jeter for all 154 games in the 2004 season.}
+
+\usage{
+jeter2004
+}
+\format{
+ A data frame with 154 observations on the following 10 variables.
+ \describe{
+ \item{Game}{the game number}
+ \item{AB}{the number of at-bats}
+ \item{R}{the number of runs scored}
+ \item{H}{the number of hits}
+ \item{X2B}{the number of doubles}
+ \item{X3B}{the number of triples}
+ \item{HR}{the number of home runs}
+ \item{RBI}{the number of runs batted in}
+ \item{BB}{the number of walks}
+ \item{SO}{the number of strikeouts}
+ }
+}
+\source{Collected from game log data from www.retrosheet.org.}
+\keyword{datasets}
diff --git a/man/laplace.Rd b/man/laplace.Rd
new file mode 100644
index 0000000..f11abd0
--- /dev/null
+++ b/man/laplace.Rd
@@ -0,0 +1,38 @@
+\name{laplace}
+\alias{laplace}
+\title{Summarization of a posterior density by the Laplace method}
+\description{
+ For a general posterior density, computes the posterior mode,
+the associated variance-covariance matrix, and an estimate at the
+logarithm at the normalizing constant.
+}
+\usage{
+laplace(logpost,mode,...)
+}
+\arguments{
+ \item{logpost}{function that defines the logarithm of the posterior density}
+ \item{mode}{vector that is a guess at the posterior mode}
+ \item{...}{vector or list of parameters associated with the function logpost}
+}
+
+\value{
+\item{mode}{current estimate at the posterior mode}
+\item{var}{current estimate at the associated variance-covariance matrix}
+\item{int}{estimate at the logarithm of the normalizing constant}
+\item{converge}{indication (TRUE or FALSE) if the algorithm converged}
+}
+
+\author{Jim Albert}
+
+\examples{
+logpost=function(theta,data)
+{
+s=5
+sum(-log(1+(data-theta)^2/s^2))
+}
+data=c(10,12,14,13,12,15)
+start=10
+laplace(logpost,start,data)
+}
+
+\keyword{models}
diff --git a/man/lbinorm.Rd b/man/lbinorm.Rd
new file mode 100644
index 0000000..2584c16
--- /dev/null
+++ b/man/lbinorm.Rd
@@ -0,0 +1,29 @@
+\name{lbinorm}
+\alias{lbinorm}
+\title{Logarithm of bivariate normal density}
+\description{
+Computes the logarithm of a bivariate normal density
+}
+\usage{
+lbinorm(xy,par)
+}
+\arguments{
+ \item{xy}{vector of values of two variables x and y}
+ \item{par}{list with components m, a vector of means, and v, a variance-covariance matrix}
+}
+
+\value{
+value of the kernel of the log density
+}
+
+\author{Jim Albert}
+
+\examples{
+mean=c(0,0)
+varcov=diag(c(1,1))
+value=c(1,1)
+param=list(m=mean,v=varcov)
+lbinorm(value,param)
+}
+
+\keyword{models}
diff --git a/man/logctablepost.Rd b/man/logctablepost.Rd
new file mode 100644
index 0000000..6125444
--- /dev/null
+++ b/man/logctablepost.Rd
@@ -0,0 +1,30 @@
+\name{logctablepost}
+\alias{logctablepost}
+\title{Log posterior of difference and sum of logits in a 2x2 table}
+\description{
+ Computes the log posterior density for the difference and sum of logits
+in a 2x2 contingency table for independent binomial samples and uniform
+prior placed on the logits
+}
+\usage{
+logctablepost(theta,data)
+}
+\arguments{
+ \item{theta}{vector of parameter values "difference of logits" and "sum of logits")}
+ \item{data}{vector containing number of successes and failures for first sample, and then second sample}
+}
+
+\value{
+value of the log posterior
+}
+
+\author{Jim Albert}
+
+\examples{
+s1=6; f1=2; s2=3; f2=10
+data=c(s1,f1,s2,f2)
+theta=c(2,4)
+logctablepost(theta,data)
+}
+
+\keyword{models}
diff --git a/man/logisticpost.Rd b/man/logisticpost.Rd
new file mode 100644
index 0000000..7f124a5
--- /dev/null
+++ b/man/logisticpost.Rd
@@ -0,0 +1,32 @@
+\name{logisticpost}
+\alias{logisticpost}
+\title{Log posterior for a binary response model with a logistic link and a uniform prior}
+\description{
+ Computes the log posterior density of (beta0, beta1) when
+yi are independent binomial(ni, pi) and logit(pi)=beta0+beta1*xi and
+a uniform prior is placed on (beta0, beta1)
+}
+\usage{
+logisticpost(beta,data)
+}
+\arguments{
+ \item{beta}{vector of parameter values beta0 and beta1}
+ \item{data}{matrix of columns of covariate values x, sample sizes n, and number of successes y}
+}
+
+\value{
+value of the log posterior
+}
+
+\author{Jim Albert}
+
+\examples{
+x = c(-0.86,-0.3,-0.05,0.73)
+n = c(5,5,5,5)
+y = c(0,1,3,5)
+data = cbind(x, n, y)
+beta=c(2,10)
+logisticpost(beta,data)
+}
+
+\keyword{models}
diff --git a/man/logpoissgamma.Rd b/man/logpoissgamma.Rd
new file mode 100644
index 0000000..5af9b72
--- /dev/null
+++ b/man/logpoissgamma.Rd
@@ -0,0 +1,31 @@
+\name{logpoissgamma}
+\alias{logpoissgamma}
+\title{Log posterior with Poisson sampling and gamma prior}
+\description{
+Computes the logarithm of the posterior density of a Poisson log mean
+with a gamma prior
+}
+\usage{
+logpoissgamma(theta,datapar)
+}
+\arguments{
+ \item{theta}{vector of values of the log mean parameter}
+ \item{datapar}{list with components data, vector of observations, and par, vector of
+parameters of the gamma prior}
+}
+
+\value{
+vector of values of the log posterior for all values in theta
+}
+
+\author{Jim Albert}
+
+\examples{
+data=c(2,4,3,6,1,0,4,3,10,2)
+par=c(1,1)
+datapar=list(data=data,par=par)
+theta=c(-1,0,1,2)
+logpoissgamma(theta,datapar)
+}
+
+\keyword{models}
diff --git a/man/logpoissnormal.Rd b/man/logpoissnormal.Rd
new file mode 100644
index 0000000..ea68123
--- /dev/null
+++ b/man/logpoissnormal.Rd
@@ -0,0 +1,31 @@
+\name{logpoissnormal}
+\alias{logpoissnormal}
+\title{Log posterior with Poisson sampling and normal prior}
+\description{
+Computes the logarithm of the posterior density of a Poisson log mean
+with a normal prior
+}
+\usage{
+logpoissnormal(theta,datapar)
+}
+\arguments{
+ \item{theta}{vector of values of the log mean parameter}
+ \item{datapar}{list with components data, vector of observations, and par, vector of
+parameters of the normal prior}
+}
+
+\value{
+vector of values of the log posterior for all values in theta
+}
+
+\author{Jim Albert}
+
+\examples{
+data=c(2,4,3,6,1,0,4,3,10,2)
+par=c(0,1)
+datapar=list(data=data,par=par)
+theta=c(-1,0,1,2)
+logpoissnormal(theta,datapar)
+}
+
+\keyword{models}
diff --git a/man/marathontimes.Rd b/man/marathontimes.Rd
new file mode 100644
index 0000000..e30c556
--- /dev/null
+++ b/man/marathontimes.Rd
@@ -0,0 +1,19 @@
+\name{marathontimes}
+\alias{marathontimes}
+\docType{data}
+\title{Marathon running times}
+\description{
+Running times in minutes for twenty male runners between the ages 20 and 29
+who ran the New York Marathon.
+}
+\usage{
+marathontimes
+}
+\format{
+A data frame with 20 observations on the following 1 variable.
+ \describe{
+ \item{time}{running time}
+ }
+}
+\source{www.nycmarathon.org website.}
+\keyword{datasets}
diff --git a/man/mnormt.onesided.Rd b/man/mnormt.onesided.Rd
new file mode 100644
index 0000000..e29e351
--- /dev/null
+++ b/man/mnormt.onesided.Rd
@@ -0,0 +1,34 @@
+\name{mnormt.onesided}
+\alias{mnormt.onesided}
+\title{Bayesian test of one-sided hypothesis about a normal mean}
+\description{
+Computes a Bayesian test of the hypothesis that a normal mean is
+less than or equal to a specified value}
+\usage{
+mnormt.onesided(m0,normpar,data)
+}
+\arguments{
+ \item{m0}{value of the normal mean to be tested}
+ \item{normpar}{vector of mean and standard deviation of the normal prior distribution}
+ \item{data}{vector of sample mean, sample size, and known value of the population standard deviation}
+}
+
+\value{
+\item{BF}{Bayes factor in support of the null hypothesis}
+\item{prior.odds}{prior odds of the null hypothesis}
+\item{post.odds}{posterior odds of the null hypothesis}
+\item{postH}{posterior probability of the null hypothesis}
+}
+
+\author{Jim Albert}
+
+\examples{
+y=c(182,172,173,176,176,180,173,174,179,175)
+pop.s=3
+data=c(mean(y),length(data),pop.s)
+m0=175
+normpar=c(170,1000)
+mnormt.onesided(m0,normpar,data)
+}
+
+\keyword{models}
diff --git a/man/mnormt.twosided.Rd b/man/mnormt.twosided.Rd
new file mode 100644
index 0000000..a3b52ec
--- /dev/null
+++ b/man/mnormt.twosided.Rd
@@ -0,0 +1,40 @@
+\name{mnormt.twosided}
+\alias{mnormt.twosided}
+\title{Bayesian test of a two-sided hypothesis about a normal mean}
+\description{
+ Bayesian test that a normal mean is equal to a specified
+ value using a normal prior}
+\usage{
+mnormt.twosided(m0, prob, t, data)
+}
+\arguments{
+ \item{m0}{value of the mean to be tested }
+ \item{prob}{prior probability of the hypothesis}
+ \item{t}{vector of values of the prior standard
+ deviation under the alternative hypothesis}
+ \item{data}{vector containing the sample mean,
+ the sample size, and the known value of the
+ population standard deviation}
+}
+
+\value{
+ \item{bf}{vector of values of the Bayes factor
+ in support of the null hypothesis}
+ \item{post}{vector of posterior probabilities
+ of the null hypothesis}
+}
+
+\author{Jim Albert}
+
+\examples{
+m0=170
+prob=.5
+tau=c(.5,1,2,4,8)
+samplesize=10
+samplemean=176
+popsd=3
+data=c(samplemean,samplesize,popsd)
+mnormt.twosided(m0,prob,tau,data)
+}
+
+\keyword{models}
diff --git a/man/mycontour.Rd b/man/mycontour.Rd
new file mode 100644
index 0000000..335965e
--- /dev/null
+++ b/man/mycontour.Rd
@@ -0,0 +1,32 @@
+\name{mycontour}
+\alias{mycontour}
+\title{Contour plot of a bivariate density function}
+\description{
+ For a general two parameter density, draws a contour graph where the
+contour lines are drawn at 10 percent, 1 percent, and .1 percent of
+the height at the mode.
+}
+\usage{
+mycontour(logf,limits,data,...)
+}
+\arguments{
+ \item{logf}{function that defines the logarithm of the density}
+ \item{limits}{limits (xlo, xhi, ylo, yhi) where the graph is to be drawn}
+ \item{data}{vector or list of parameters associated with the function logpost}
+ \item{...}{further arguments to pass to contour}
+}
+
+\value{
+A contour graph of the density is drawn
+}
+
+\author{Jim Albert}
+
+\examples{
+m=array(c(0,0),c(2,1))
+v=array(c(1,.6,.6,1),c(2,2))
+normpar=list(m=m,v=v)
+mycontour(lbinorm,c(-4,4,-4,4),normpar)
+}
+
+\keyword{models}
diff --git a/man/normal.normal.mix.Rd b/man/normal.normal.mix.Rd
new file mode 100644
index 0000000..fa517f6
--- /dev/null
+++ b/man/normal.normal.mix.Rd
@@ -0,0 +1,32 @@
+\name{normal.normal.mix}
+\alias{normal.normal.mix}
+\title{Computes the posterior for normal sampling and a mixture of normals prior}
+\description{
+ Computes the parameters and mixing probabilities for a normal sampling problem, variance known,
+where the prior is a discrete mixture of normal densities.
+}
+\usage{
+normal.normal.mix(probs,normalpar,data)
+}
+\arguments{
+ \item{probs}{vector of probabilities of the normal components of the prior}
+ \item{normalpar}{matrix where each row contains the mean and variance parameters for a normal component of the prior}
+ \item{data}{vector of observation and sampling variance}
+}
+\value{
+\item{probs}{vector of probabilities of the normal components of the posterior}
+\item{normalpar}{matrix where each row contains the mean and variance parameters for a normal component of the posterior}
+}
+\author{Jim Albert}
+
+\examples{
+probs=c(.5, .5)
+normal.par1=c(0,1)
+normal.par2=c(2,.5)
+normalpar=rbind(normal.par1,normal.par2)
+y=1; sigma2=.5
+data=c(y,sigma2)
+normal.normal.mix(probs,normalpar,data)
+}
+
+\keyword{models}
diff --git a/man/normal.select.Rd b/man/normal.select.Rd
new file mode 100644
index 0000000..d6e447e
--- /dev/null
+++ b/man/normal.select.Rd
@@ -0,0 +1,33 @@
+\name{normal.select}
+\alias{normal.select}
+\title{Selection of Normal Prior Given Knowledge of Two Quantiles}
+\description{
+ Finds the mean and standard deviation of a normal density that matches knowledge of
+two quantiles of the distribution.
+}
+\usage{
+normal.select(quantile1, quantile2)
+}
+\arguments{
+ \item{quantile1}{list with components p, the value of the first probability,
+ and x, the value of the first quantile}
+ \item{quantile2}{list with components p, the value of the second probability,
+ and x, the value of the second quantile}
+}
+
+\value{
+\item{mean}{mean of the matching normal distribution}
+\item{sigma}{standard deviation of the matching normal distribution}
+}
+
+\author{Jim Albert}
+
+\examples{
+# person believes the 15th percentile of the prior is 100
+# and the 70th percentile of the prior is 150
+quantile1=list(p=.15,x=100)
+quantile2=list(p=.7,x=150)
+normal.select(quantile1,quantile2)
+}
+
+\keyword{models}
diff --git a/man/normchi2post.Rd b/man/normchi2post.Rd
new file mode 100644
index 0000000..f6fe8ed
--- /dev/null
+++ b/man/normchi2post.Rd
@@ -0,0 +1,25 @@
+\name{normchi2post}
+\alias{normchi2post}
+\title{Log posterior density for mean and variance for normal sampling}
+\description{
+ Computes the log of the posterior density of a mean M and a variance S2 when a sample is taken from a normal density and a standard noninformative prior is used.
+}
+\usage{
+normchi2post(theta,data)
+}
+\arguments{
+ \item{theta}{vector of parameter values M and S2}
+ \item{data}{vector containing the sample observations}
+}
+\value{
+value of the log posterior
+}
+\author{Jim Albert}
+
+\examples{
+parameter=c(25,5)
+data=c(20, 32, 21, 43, 33, 21, 32)
+normchi2post(parameter,data)
+}
+
+\keyword{models}
diff --git a/man/normnormexch.Rd b/man/normnormexch.Rd
new file mode 100644
index 0000000..640fe79
--- /dev/null
+++ b/man/normnormexch.Rd
@@ -0,0 +1,29 @@
+\name{normnormexch}
+\alias{normnormexch}
+\title{Log posterior of mean and log standard deviation for Normal/Normal exchangeable model}
+\description{
+ Computes the log posterior density of mean and log standard deviation for a Normal/Normal exchangeable model where (mean, log sd) is given a uniform prior.
+}
+\usage{
+normnormexch(theta,data)
+}
+\arguments{
+ \item{theta}{vector of parameter values of mu and log tau}
+ \item{data}{a matrix with columns y (observations) and v (sampling variances)}
+}
+
+\value{
+value of the log posterior
+}
+
+\author{Jim Albert}
+
+\examples{
+s.var <- c(0.05, 0.05, 0.05, 0.05, 0.05)
+y.means <- c(1, 4, 3, 6,10)
+data=cbind(y.means, s.var)
+theta=c(-1, 0)
+normnormexch(theta,data)
+}
+
+\keyword{models}
diff --git a/man/normpostpred.Rd b/man/normpostpred.Rd
new file mode 100644
index 0000000..00805ad
--- /dev/null
+++ b/man/normpostpred.Rd
@@ -0,0 +1,30 @@
+\name{normpostpred}
+\alias{normpostpred}
+\title{Posterior predictive simulation from Bayesian normal sampling model}
+\description{
+ Given simulated draws from the posterior from a normal sampling model, outputs
+simulated draws from the posterior predictive distribution of a statistic of interest.
+}
+\usage{
+normpostpred(parameters,sample.size,f=min)
+}
+\arguments{
+ \item{parameters}{list of simulated draws from the posterior where mu contains the normal mean
+and sigma2 contains the normal variance}
+ \item{sample.size}{size of sample of future sample}
+ \item{f}{function defining the statistic}
+}
+
+\value{
+simulated sample of the posterior predictive distribution of the statistic}
+\author{Jim Albert}
+
+\examples{
+# finds posterior predictive distribution of the min statistic of a future sample of size 15
+data(darwin)
+s=normpostsim(darwin$difference)
+sample.size=15
+sim.stats=normpostpred(s,sample.size,min)
+}
+
+\keyword{models}
diff --git a/man/normpostsim.Rd b/man/normpostsim.Rd
new file mode 100644
index 0000000..ce0856f
--- /dev/null
+++ b/man/normpostsim.Rd
@@ -0,0 +1,32 @@
+\name{normpostsim}
+\alias{normpostsim}
+\title{Simulation from Bayesian normal sampling model}
+\description{
+ Gives a simulated sample from the joint posterior distribution of the mean and
+variance for a normal sampling prior with a noninformative or
+informative prior. The prior assumes mu and sigma2 are independent with
+mu assigned a normal prior with mean mu0 and variance tau2, and sigma2 is
+assigned a inverse gamma prior with parameters a and b.
+}
+\usage{
+normpostsim(data,prior=NULL,m=1000)
+}
+\arguments{
+ \item{data}{vector of observations}
+ \item{prior}{list with components mu, a vector with the prior mean
+ and variance, and sigma2, a vector of the inverse gamma parameters}
+ \item{m}{number of simulations desired}
+}
+
+\value{
+\item{mu}{vector of simulated draws of normal mean}
+\item{sigma2}{vector of simulated draws of normal variance}
+}
+\author{Jim Albert}
+
+\examples{
+data(darwin)
+s=normpostsim(darwin$difference)
+}
+
+\keyword{models}
diff --git a/man/ordergibbs.Rd b/man/ordergibbs.Rd
new file mode 100644
index 0000000..f4ca751
--- /dev/null
+++ b/man/ordergibbs.Rd
@@ -0,0 +1,28 @@
+\name{ordergibbs}
+\alias{ordergibbs}
+\title{Gibbs sampling for a hierarchical regression model}
+\description{
+ Implements Gibbs sampling for estimating a two-way table of means
+under a order restriction.
+}
+\usage{
+ordergibbs(data,m)
+}
+\arguments{
+ \item{data}{data matrix with first two columns observed sample means and sample sizes}
+ \item{m}{number of cycles of Gibbs sampling}
+}
+
+\value{
+ matrix of simulated draws of the normal means where each row represents one simulated draw
+}
+
+\author{Jim Albert}
+
+\examples{
+data(iowagpa)
+m=1000
+s=ordergibbs(iowagpa,m)
+}
+
+\keyword{models}
diff --git a/man/pbetap.Rd b/man/pbetap.Rd
new file mode 100644
index 0000000..e47ff7b
--- /dev/null
+++ b/man/pbetap.Rd
@@ -0,0 +1,31 @@
+\name{pbetap}
+\alias{pbetap}
+\title{Predictive distribution for a binomial sample with a beta prior}
+\description{
+ Computes predictive distribution for number of successes of
+ future binomial experiment
+ with a beta prior distribution for the proportion.
+}
+\usage{
+pbetap(ab, n, s)
+}
+\arguments{
+ \item{ab}{vector of parameters of the beta prior}
+ \item{n}{size of future binomial sample}
+ \item{s}{vector of number of successes for future binomial experiment}
+}
+
+\value{
+ vector of predictive probabilities for the values in the vector s
+}
+
+\author{Jim Albert}
+
+\examples{
+ab=c(3,12)
+n=10
+s=0:10
+pbetap(ab,n,s)
+}
+
+\keyword{models}
diff --git a/man/pbetat.Rd b/man/pbetat.Rd
new file mode 100644
index 0000000..79f927e
--- /dev/null
+++ b/man/pbetat.Rd
@@ -0,0 +1,34 @@
+\name{pbetat}
+\alias{pbetat}
+\title{Bayesian test of a proportion}
+\description{
+ Bayesian test that a proportion is equal to a specified
+ value using a beta prior}
+\usage{
+pbetat(p0,prob,ab,data)
+}
+\arguments{
+ \item{p0}{value of the proportion to be tested }
+ \item{prob}{prior probability of the hypothesis}
+ \item{ab}{vector of parameter values of the beta prior under the alternative hypothesis}
+ \item{data}{vector containing the number of successes and number of failures}
+}
+
+\value{
+ \item{bf}{the Bayes factor
+ in support of the null hypothesis}
+ \item{post}{the posterior probability
+ of the null hypothesis}
+}
+
+\author{Jim Albert}
+
+\examples{
+p0=.5
+prob=.5
+ab=c(10,10)
+data=c(5,15)
+pbetat(p0,prob,ab,data)
+}
+
+\keyword{models}
diff --git a/man/pdisc.Rd b/man/pdisc.Rd
new file mode 100644
index 0000000..ee35f8d
--- /dev/null
+++ b/man/pdisc.Rd
@@ -0,0 +1,30 @@
+\name{pdisc}
+\alias{pdisc}
+\title{Posterior distribution for a proportion with discrete priors}
+\description{
+Computes the posterior distribution for a proportion for a discrete prior
+distribution.
+}
+\usage{
+pdisc(p, prior, data)
+}
+\arguments{
+ \item{p}{vector of proportion values}
+ \item{prior}{vector of prior probabilities}
+ \item{data}{vector consisting of number of successes and number of
+ failures}
+}
+\value{
+ vector of posterior probabilities
+}
+
+\author{Jim Albert}
+
+\examples{
+p=c(.2,.25,.3,.35)
+prior=c(.25,.25,.25,.25)
+data=c(5,10)
+pdisc(p,prior,data)
+}
+
+\keyword{models}
diff --git a/man/pdiscp.Rd b/man/pdiscp.Rd
new file mode 100644
index 0000000..de0604f
--- /dev/null
+++ b/man/pdiscp.Rd
@@ -0,0 +1,33 @@
+\name{pdiscp}
+\alias{pdiscp}
+\title{Predictive distribution for a binomial sample with a discrete prior}
+\description{
+ Computes predictive distribution for number of successes of
+ future binomial experiment
+ with a discrete distribution for the proportion.
+}
+\usage{
+pdiscp(p, probs, n, s)
+}
+\arguments{
+ \item{p}{vector of proportion values}
+ \item{probs}{vector of probabilities}
+ \item{n}{size of future binomial sample}
+ \item{s}{vector of number of successes for future binomial experiment}
+}
+
+\value{
+ vector of predictive probabilities for the values in the vector s
+}
+
+\author{Jim Albert}
+
+\examples{
+p=c(.1,.2,.3,.4,.5,.6,.7,.8,.9)
+prob=c(0.05,0.10,0.10,0.15,0.20,0.15,0.10,0.10,0.05)
+n=10
+s=0:10
+pdiscp(p,prob,n,s)
+}
+
+\keyword{models}
diff --git a/man/poissgamexch.Rd b/man/poissgamexch.Rd
new file mode 100644
index 0000000..57f69ab
--- /dev/null
+++ b/man/poissgamexch.Rd
@@ -0,0 +1,31 @@
+\name{poissgamexch}
+\alias{poissgamexch}
+\title{Log posterior of Poisson/gamma exchangeable model}
+\description{
+ Computes the log posterior density of log alpha and log mu for a Poisson/gamma exchangeable model
+}
+\usage{
+poissgamexch(theta,datapar)
+}
+\arguments{
+ \item{theta}{vector of parameter values of log alpha and log mu}
+ \item{datapar}{list with components data, a matrix with columns e and y, and z0, prior hyperparameter}
+}
+
+\value{
+value of the log posterior
+}
+
+\author{Jim Albert}
+
+\examples{
+e=c(532,584,672,722,904)
+y=c(0,0,2,1,1)
+data=cbind(e,y)
+theta=c(-4,0)
+z0=.5
+datapar=list(data=data,z0=z0)
+poissgamexch(theta,datapar)
+}
+
+\keyword{models}
diff --git a/man/poisson.gamma.mix.Rd b/man/poisson.gamma.mix.Rd
new file mode 100644
index 0000000..f6da28e
--- /dev/null
+++ b/man/poisson.gamma.mix.Rd
@@ -0,0 +1,32 @@
+\name{poisson.gamma.mix}
+\alias{poisson.gamma.mix}
+\title{Computes the posterior for Poisson sampling and a mixture of gammas prior}
+\description{
+ Computes the parameters and mixing probabilities for a Poisson sampling problem
+where the prior is a discrete mixture of gamma densities.
+}
+\usage{
+poisson.gamma.mix(probs,gammapar,data)
+}
+\arguments{
+ \item{probs}{vector of probabilities of the gamma components of the prior}
+ \item{gammapar}{matrix where each row contains the shape and rate parameters for a gamma component of the prior}
+ \item{data}{list with components y, vector of counts, and t, vector of time intervals}
+}
+
+\value{
+\item{probs}{vector of probabilities of the gamma components of the posterior}
+\item{gammapar}{matrix where each row contains the shape and rate parameters for a gamma component of the posterior}
+}
+\author{Jim Albert}
+
+\examples{
+probs=c(.5, .5)
+gamma.par1=c(1,1)
+gamma.par2=c(10,2)
+gammapar=rbind(gamma.par1,gamma.par2)
+y=c(1,3,2,4,10); t=c(1,1,1,1,1)
+data=list(y=y,t=t)
+poisson.gamma.mix(probs,gammapar,data)}
+
+\keyword{models}
diff --git a/man/predplot.Rd b/man/predplot.Rd
new file mode 100644
index 0000000..5b2a653
--- /dev/null
+++ b/man/predplot.Rd
@@ -0,0 +1,26 @@
+\name{predplot}
+\alias{predplot}
+\title{Plot of predictive distribution for binomial sampling with a beta prior}
+\description{
+For a proportion problem with a beta prior, plots the prior predictive distribution
+of the number of successes in n trials and displays the observed number of successes.
+}
+\usage{
+predplot(prior,n,yobs)
+}
+\arguments{
+ \item{prior}{vector of parameters for beta prior}
+ \item{n}{sample size}
+ \item{yobs}{observed number of successes}
+}
+
+\author{Jim Albert}
+
+\examples{
+prior=c(3,10) # proportion has a beta(3, 10) prior
+n=20 # sample size
+yobs=10 # observed number of successes
+predplot(prior,n,yobs)
+}
+
+\keyword{models}
diff --git a/man/prior.two.parameters.Rd b/man/prior.two.parameters.Rd
new file mode 100644
index 0000000..a589f39
--- /dev/null
+++ b/man/prior.two.parameters.Rd
@@ -0,0 +1,25 @@
+\name{prior.two.parameters}
+\alias{prior.two.parameters}
+\title{Construct discrete uniform prior for two parameters}
+\description{
+Constructs a discrete uniform prior distribution for two parameters
+}
+\usage{
+prior.two.parameters(parameter1, parameter2)
+}
+\arguments{
+ \item{parameter1}{vector of values of first parameter}
+ \item{parameter2}{vector of values of second parameter}
+}
+\value{
+ matrix of uniform probabilities where the rows and columns are
+labelled with the parameter values
+}
+
+\author{Jim Albert}
+
+\examples{
+prior.two.parameters(c(1,2,3,4),c(2,4,7))
+}
+
+\keyword{models}
diff --git a/man/puffin.Rd b/man/puffin.Rd
new file mode 100644
index 0000000..9cefa2d
--- /dev/null
+++ b/man/puffin.Rd
@@ -0,0 +1,24 @@
+\name{puffin}
+\alias{puffin}
+\docType{data}
+\title{Bird measurements from British islands}
+\description{
+Measurements on breedings of the common puffin on different
+habits at Great Island, Newfoundland.
+}
+\usage{
+puffin
+}
+\format{
+ A data frame with 38 observations on the following 5 variables.
+ \describe{
+ \item{Nest}{nesting frequency (burrows per 9 square meters)}
+ \item{Grass}{grass cover (percentage)}
+ \item{Soil}{mean soil depth (in centimeters)}
+ \item{Angle}{angle of slope (in degrees)}
+ \item{Distance}{distance from cliff edge (in meters)}
+ }
+}
+\source{Peck, R., Devore, J., and Olsen, C. (2005), Introduction to Statistics
+And Data Analysis, Thomson Learning.}
+\keyword{datasets}
diff --git a/man/rdirichlet.Rd b/man/rdirichlet.Rd
new file mode 100644
index 0000000..27cf1e4
--- /dev/null
+++ b/man/rdirichlet.Rd
@@ -0,0 +1,27 @@
+\name{rdirichlet}
+\alias{rdirichlet}
+\title{Random draws from a Dirichlet distribution}
+\description{
+Simulates a sample from a Dirichlet distribution
+}
+\usage{
+rdirichlet(n,par)
+}
+\arguments{
+ \item{n}{number of simulations required}
+ \item{par}{vector of parameters of the Dirichlet distribution}
+}
+
+\value{
+matrix of simulated draws where each row corresponds to a single draw
+}
+
+\author{Jim Albert}
+
+\examples{
+par=c(2,5,4,10)
+n=10
+rdirichlet(n,par)
+}
+
+\keyword{models}
diff --git a/man/reg.gprior.post.Rd b/man/reg.gprior.post.Rd
new file mode 100644
index 0000000..4dff449
--- /dev/null
+++ b/man/reg.gprior.post.Rd
@@ -0,0 +1,28 @@
+\name{reg.gprior.post}
+\alias{reg.gprior.post}
+\title{Computes the log posterior of a normal regression model with a g prior.}
+\description{
+ Computes the log posterior of (beta, log sigma) for a normal regression
+model with a g prior with parameters beta0 and c0.
+}
+\usage{
+reg.gprior.post(theta, dataprior)
+}
+\arguments{
+ \item{theta}{vector of components of beta and log sigma}
+ \item{dataprior}{list with components data and prior; data is a list
+with components y and X, prior is a list with components b0 and c0}
+}
+\value{
+ value of the log posterior
+}
+\author{Jim Albert}
+
+\examples{
+data(puffin)
+data=list(y=puffin$Nest, X=cbind(1,puffin$Distance))
+prior=list(b0=c(0,0), c0=10)
+reg.gprior.post(c(20,-.5,1),list(data=data,prior=prior))
+}
+
+\keyword{models}
diff --git a/man/regroup.Rd b/man/regroup.Rd
new file mode 100644
index 0000000..3f3eedf
--- /dev/null
+++ b/man/regroup.Rd
@@ -0,0 +1,25 @@
+\name{regroup}
+\alias{regroup}
+\title{Collapses a matrix by summing over rows}
+\description{
+Collapses a matrix by summing over a specific number of rows
+}
+\usage{
+regroup(data,g)
+}
+\arguments{
+ \item{data}{a matrix}
+ \item{g}{a positive integer beween 1 and the number of rows of data}
+}
+
+\value{
+ reduced matrix found by summing over rows
+}
+\author{Jim Albert}
+
+\examples{
+data=matrix(c(1:20),nrow=4,ncol=5)
+g=2
+regroup(data,2)
+}
+\keyword{models}
diff --git a/man/rejectsampling.Rd b/man/rejectsampling.Rd
new file mode 100644
index 0000000..f3318c8
--- /dev/null
+++ b/man/rejectsampling.Rd
@@ -0,0 +1,32 @@
+\name{rejectsampling}
+\alias{rejectsampling}
+\title{Rejecting sampling using a t proposal density}
+\description{
+ Implements a rejection sampling algorithm for a probability density
+using a multivariate t proposal density
+}
+\usage{
+rejectsampling(logf,tpar,dmax,n,data)
+}
+\arguments{
+ \item{logf}{function that defines the logarithm of the density of interest}
+ \item{tpar}{list of parameters of t proposal density including the mean m, scale matrix var,
+and degrees of freedom df}
+ \item{dmax}{logarithm of the rejection sampling constant}
+ \item{n}{number of simulated draws from proposal density}
+ \item{data}{data and or parameters used in the function logf}
+}
+\value{
+matrix of simulated draws from density of interest
+}
+\author{Jim Albert}
+
+\examples{
+data(cancermortality)
+start=c(-7,6)
+fit=laplace(betabinexch,start,cancermortality)
+tpar=list(m=fit$mode,var=2*fit$var,df=4)
+theta=rejectsampling(betabinexch,tpar,-569.2813,1000,cancermortality)
+}
+
+\keyword{models}
diff --git a/man/rigamma.Rd b/man/rigamma.Rd
new file mode 100644
index 0000000..5c04673
--- /dev/null
+++ b/man/rigamma.Rd
@@ -0,0 +1,30 @@
+\name{rigamma}
+\alias{rigamma}
+\title{Random number generation for inverse gamma distribution}
+\description{
+ Simulates from a inverse gamma (a, b) distribution with density
+proportional to $y^(-a-1) exp(-b/y)$
+}
+\usage{
+rigamma(n, a, b)
+}
+\arguments{
+ \item{n}{number of random numbers to be generated}
+ \item{a}{inverse gamma shape parameter}
+ \item{b}{inverse gamma rate parameter}
+}
+\value{
+vector of n simulated draws
+}
+
+\author{Jim Albert}
+
+\examples{
+a=10
+b=5
+n=20
+rigamma(n,a,b)
+}
+
+\keyword{models}
+
diff --git a/man/rmnorm.Rd b/man/rmnorm.Rd
new file mode 100644
index 0000000..570ed50
--- /dev/null
+++ b/man/rmnorm.Rd
@@ -0,0 +1,28 @@
+\name{rmnorm}
+\alias{rmnorm}
+\title{Random number generation for multivariate normal}
+\description{
+ Simulates from a multivariate normal distribution
+}
+\usage{
+rmnorm(n = 1, mean = rep(0, d), varcov)
+}
+\arguments{
+ \item{n}{number of random numbers to be generated}
+ \item{mean}{numeric vector giving the mean of the distribution}
+ \item{varcov}{a positive definite matrix representing the variance-covariance matrix of the distribution}
+}
+\value{
+matrix of n rows of random vectors
+}
+
+\author{Jim Albert}
+
+\examples{
+mu <- c(1,12,2)
+Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
+x <- rmnorm(10, mu, Sigma)
+}
+
+\keyword{models}
+
diff --git a/man/rmt.Rd b/man/rmt.Rd
new file mode 100644
index 0000000..414f95c
--- /dev/null
+++ b/man/rmt.Rd
@@ -0,0 +1,30 @@
+\name{rmt}
+\alias{rmt}
+\title{Random number generation for multivariate t}
+\description{
+ Simulates from a multivariate t distribution
+}
+\usage{
+rmt(n = 1, mean = rep(0, d), S, df = Inf)
+}
+\arguments{
+ \item{n}{number of random numbers to be generated}
+ \item{mean}{numeric vector giving the location parameter of the distribution}
+ \item{S}{a positive definite matrix representing the scale matrix of the distribution}
+ \item{df}{degrees of freedom}
+}
+\value{
+matrix of n rows of random vectors
+}
+
+\author{Jim Albert}
+
+\examples{
+mu <- c(1,12,2)
+Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
+df <- 4
+x <- rmt(10, mu, Sigma, df)
+}
+
+\keyword{models}
+
diff --git a/man/robustt.Rd b/man/robustt.Rd
new file mode 100644
index 0000000..57f0419
--- /dev/null
+++ b/man/robustt.Rd
@@ -0,0 +1,26 @@
+\name{robustt}
+\alias{robustt}
+\title{Gibbs sampling for a robust regression model}
+\description{
+ Implements Gibbs sampling for a robust t sampling model with location mu, scale sigma, and degrees of freedom v }
+\usage{
+robustt(y,v,m)
+}
+\arguments{
+ \item{y}{vector of data values}
+ \item{v}{degrees of freedom for t model}
+ \item{m}{the number of cycles of the Gibbs sampler}
+}
+\value{
+\item{mu}{vector of simulated values of mu}
+\item{s2}{vector of simulated values of sigma2}
+\item{lam}{matrix of simulated draws of lambda, where each row corresponds to a single draw}
+}
+\author{Jim Albert}
+
+\examples{
+data=c(-67,-48,6,8,14,16,23,24,28,29,41,49,67,60,75)
+fit=robustt(data,4,1000)
+}
+
+\keyword{models}
diff --git a/man/rtruncated.Rd b/man/rtruncated.Rd
new file mode 100644
index 0000000..f885362
--- /dev/null
+++ b/man/rtruncated.Rd
@@ -0,0 +1,36 @@
+\name{rtruncated}
+\alias{rtruncated}
+\title{Simulates from a truncated probability distribution}
+\description{
+ Simulates a sample from a truncated distribution where the functions for the cdf and inverse cdf are available.
+}
+\usage{
+rtruncated(n,lo,hi,pf,qf,...)
+}
+\arguments{
+ \item{n}{size of simulated sample}
+ \item{lo}{low truncation point}
+ \item{hi}{high truncation point}
+ \item{pf}{function containing cdf of untruncated distribution}
+ \item{qf}{function containing inverse cdf of untruncated distribution}
+ \item{...}{parameters used in the functions pf and qf}
+}
+
+\value{
+vector of simulated draws from distribution}
+
+\author{Jim Albert}
+
+\examples{
+# want a sample of 10 from normal(2, 1) distribution truncated below by 3
+n=10
+lo=3
+hi=Inf
+rtruncated(n,lo,hi,pnorm,qnorm,mean=2,sd=1)
+# want a sample of 20 from beta(2, 5) distribution truncated to (.3, .8)
+n=20
+lo=0.3
+hi=0.8
+rtruncated(n,lo,hi,pbeta,qbeta,2,5)
+}
+\keyword{models}
diff --git a/man/rwmetrop.Rd b/man/rwmetrop.Rd
new file mode 100644
index 0000000..f5aba47
--- /dev/null
+++ b/man/rwmetrop.Rd
@@ -0,0 +1,35 @@
+\name{rwmetrop}
+\alias{rwmetrop}
+\title{Random walk Metropolis algorithm of a posterior distribution}
+\description{
+ Simulates iterates of a random walk Metropolis chain for an arbitrary real-valued
+posterior density defined by the user
+}
+\usage{
+rwmetrop(logpost,proposal,start,m,...)
+}
+\arguments{
+ \item{logpost}{function defining the log posterior density}
+ \item{proposal}{a list containing var, an estimated variance-covariance matrix, and scale, the Metropolis scale factor}
+ \item{start}{vector containing the starting value of the parameter}
+ \item{m}{the number of iterations of the chain}
+ \item{...}{data that is used in the function logpost}
+}
+
+\value{
+\item{par}{a matrix of simulated values where each row corresponds to a value of the vector parameter}
+\item{accept}{the acceptance rate of the algorithm}
+}
+
+\author{Jim Albert}
+
+\examples{
+data=c(6,2,3,10)
+varcov=diag(c(1,1))
+proposal=list(var=varcov,scale=2)
+start=array(c(1,1),c(1,2))
+m=1000
+s=rwmetrop(logctablepost,proposal,start,m,data)
+}
+
+\keyword{models}
diff --git a/man/schmidt.Rd b/man/schmidt.Rd
new file mode 100644
index 0000000..c3198ac
--- /dev/null
+++ b/man/schmidt.Rd
@@ -0,0 +1,32 @@
+\name{schmidt}
+\alias{schmidt}
+\docType{data}
+\title{Batting data for Mike Schmidt}
+\description{
+Batting statistics for the baseball player Mike Schmidt
+during all the seasons of his career.
+}
+\usage{
+schmidt
+}
+\format{
+ A data frame with 18 observations on the following 14 variables.
+ \describe{
+ \item{Year}{year of the season}
+ \item{Age}{Schmidt's age that season}
+ \item{G}{games played}
+ \item{AB}{at-bats}
+ \item{R}{runs scored}
+ \item{H}{number of hits}
+ \item{X2B}{number of doubles}
+ \item{X3B}{number of triples}
+ \item{HR}{number of home runs}
+ \item{RBI}{number of runs batted in}
+ \item{SB}{number of stolen bases}
+ \item{CS}{number of times caught stealing}
+ \item{BB}{number of walks}
+ \item{SO}{number of strikeouts}
+ }
+}
+\source{Sean Lahman's baseball database from www.baseball1.com.}
+\keyword{datasets}
diff --git a/man/simcontour.Rd b/man/simcontour.Rd
new file mode 100644
index 0000000..5ab6a1c
--- /dev/null
+++ b/man/simcontour.Rd
@@ -0,0 +1,32 @@
+\name{simcontour}
+\alias{simcontour}
+\title{Simulated draws from a bivariate density function on a grid}
+\description{
+ For a general two parameter density defined on a grid, simulates a random sample.
+}
+\usage{
+simcontour(logf,limits,data,m)
+}
+\arguments{
+ \item{logf}{function that defines the logarithm of the density}
+ \item{limits}{limits (xlo, xhi, ylo, yhi) that cover the joint probability density}
+ \item{data}{vector or list of parameters associated with the function logpost}
+ \item{m}{size of simulated sample}
+}
+
+\value{
+\item{x}{vector of simulated draws of the first parameter}
+\item{y}{vector of simulated draws of the second parameter}
+}
+
+\author{Jim Albert}
+
+\examples{
+m=array(c(0,0),c(2,1))
+v=array(c(1,.6,.6,1),c(2,2))
+normpar=list(m=m,v=v)
+s=simcontour(lbinorm,c(-4,4,-4,4),normpar,1000)
+plot(s$x,s$y)
+}
+
+\keyword{models}
diff --git a/man/sir.Rd b/man/sir.Rd
new file mode 100644
index 0000000..aa67e22
--- /dev/null
+++ b/man/sir.Rd
@@ -0,0 +1,31 @@
+\name{sir}
+\alias{sir}
+\title{Sampling importance resampling}
+\description{
+ Implements sampling importance resampling for a multivariate
+t proposal density.
+}
+\usage{
+sir(logf,tpar,n,data)
+}
+\arguments{
+ \item{logf}{function defining logarithm of density of interest}
+ \item{tpar}{list of parameters of multivariate t proposal density including
+the mean m, the scale matrix var, and the degrees of freedom df}
+ \item{n}{number of simulated draws from the posterior}
+ \item{data}{data and parameters used in the function logf}
+}
+\value{
+matrix of simulated draws from the posterior where each row corresponds to a single draw
+}
+\author{Jim Albert}
+
+\examples{
+data(cancermortality)
+start=c(-7,6)
+fit=laplace(betabinexch,start,cancermortality)
+tpar=list(m=fit$mode,var=2*fit$var,df=4)
+theta=sir(betabinexch,tpar,1000,cancermortality)
+}
+
+\keyword{models}
diff --git a/man/sluggerdata.Rd b/man/sluggerdata.Rd
new file mode 100644
index 0000000..e4b385b
--- /dev/null
+++ b/man/sluggerdata.Rd
@@ -0,0 +1,31 @@
+\name{sluggerdata}
+\alias{sluggerdata}
+\docType{data}
+\title{Hitting statistics for ten great baseball players}
+\description{
+Career hitting statistics for ten great baseball players
+}
+
+\usage{
+sluggerdata
+}
+\format{
+ A data frame with 199 observations on the following 13 variables.
+ \describe{
+ \item{Player}{names of the ballplayer}
+ \item{Year}{season played}
+ \item{Age}{age of the player during the season}
+ \item{G}{games played}
+ \item{AB}{number of at-bats}
+ \item{R}{number of runs scored}
+ \item{H}{number of hits}
+ \item{X2B}{number of doubles}
+ \item{X3B}{number of triples}
+ \item{HR}{number of home runs}
+ \item{RBI}{runs batted in}
+ \item{BB}{number of base on balls}
+ \item{SO}{number of strikeouts}
+ }
+}
+\source{Sean Lahman's baseball database from www.baseball1.com.}
+\keyword{datasets}
diff --git a/man/soccergoals.Rd b/man/soccergoals.Rd
new file mode 100644
index 0000000..ce895d4
--- /dev/null
+++ b/man/soccergoals.Rd
@@ -0,0 +1,19 @@
+\name{soccergoals}
+\alias{soccergoals}
+\docType{data}
+\title{Goals scored by professional soccer team}
+\description{
+Number of goals scored by a single professional soccer team
+during the 2006 Major League Soccer season}
+
+\usage{
+soccergoals
+}
+\format{
+ A data frame with 35 observations on the following 1 variable.
+ \describe{
+ \item{goals}{number of goals scored}
+ }
+}
+\source{Collected by author from the www.espn.com website.}
+\keyword{datasets}
diff --git a/man/stanfordheart.Rd b/man/stanfordheart.Rd
new file mode 100644
index 0000000..df40d1b
--- /dev/null
+++ b/man/stanfordheart.Rd
@@ -0,0 +1,23 @@
+\name{stanfordheart}
+\alias{stanfordheart}
+\docType{data}
+\title{Data from Stanford Heart Transplanation Program}
+\description{
+Heart transplant data for 82 patients from Stanford Heart Transplanation Program}
+
+\usage{
+stanfordheart
+}
+\format{
+A data frame with 82 observations on the following 4 variables.
+ \describe{
+ \item{survtime}{survival time in months}
+ \item{transplant}{variable that is 1 or 0 if patient had transplant or not}
+ \item{timetotransplant}{time a transplant patient waits for operation}
+ \item{state}{variable that is 1 or 0 if time is censored or not}
+ }
+}
+\source{Turnbull, B., Brown, B. and Hu, M. (1974), Survivorship
+analysis of heart transplant data, Journal of the
+ American Statistical Association, 69, 74-80.}
+\keyword{datasets}
diff --git a/man/strikeout.Rd b/man/strikeout.Rd
new file mode 100644
index 0000000..79f8a29
--- /dev/null
+++ b/man/strikeout.Rd
@@ -0,0 +1,23 @@
+\name{strikeout}
+\alias{strikeout}
+\docType{data}
+\title{Baseball strikeout data}
+\description{
+For all professional baseball players in the 2004 season, dataset gives the
+number of strikeouts and at-bats when runners are in scoring position and when
+runners are not in scoring position.
+}
+\usage{
+strikeout
+}
+\format{
+ A data frame with 438 observations on the following 4 variables.
+ \describe{
+ \item{r}{number of strikeouts of player when runners are not in scoring position}
+ \item{n}{number of at-bats of player when runners are not in scoring position}
+ \item{s}{number of strikeouts of player when runners are in scoring position}
+ \item{m}{number of at-bats of player when runners are in scoring position}
+ }
+}
+\source{Collected from www.espn.com website.}
+\keyword{datasets}
diff --git a/man/studentdata.Rd b/man/studentdata.Rd
new file mode 100644
index 0000000..0a6d750
--- /dev/null
+++ b/man/studentdata.Rd
@@ -0,0 +1,30 @@
+\name{studentdata}
+\alias{studentdata}
+\docType{data}
+\title{Student dataset}
+\description{
+Answers to a sheet of questions given to a large number
+of students in introductory statistics classes
+}
+\usage{
+studentdata
+}
+\format{
+ A data frame with 657 observations on the following 11 variables.
+ \describe{
+ \item{Student}{student number}
+ \item{Height}{height in inches}
+ \item{Gender}{gender}
+ \item{Shoes}{number of pairs of shoes owned}
+ \item{Number}{number chosen between 1 and 10}
+ \item{Dvds}{name of movie dvds owned}
+ \item{ToSleep}{time the person went to sleep the previous night (hours past midnight)}
+ \item{WakeUp}{time the person woke up the next morning}
+ \item{Haircut}{cost of last haircut including tip}
+ \item{Job}{number of hours working on a job per week}
+ \item{Drink}{usual drink at suppertime among milk, water, and pop}
+ }
+}
+
+\source{Collected by the author during the Fall 2006 semester.}
+\keyword{datasets}
diff --git a/man/transplantpost.Rd b/man/transplantpost.Rd
new file mode 100644
index 0000000..af34a0f
--- /dev/null
+++ b/man/transplantpost.Rd
@@ -0,0 +1,29 @@
+\name{transplantpost}
+\alias{transplantpost}
+\title{Log posterior of a Pareto model for survival data}
+\description{
+ Computes the log posterior density of (log tau, log lambda, log p) for a Pareto
+model for survival data
+}
+\usage{
+transplantpost(theta,data)
+}
+\arguments{
+ \item{theta}{vector of parameter values of log tau, log lambda, and log p}
+ \item{data}{data matrix with columns survival time, transplant indicator, time to transplant, and
+censoring indicator}
+}
+
+\value{
+value of the log posterior
+}
+
+\author{Jim Albert}
+
+\examples{
+data(stanfordheart)
+theta=c(0,3,-1)
+transplantpost(theta,stanfordheart)
+}
+
+\keyword{models}
diff --git a/man/triplot.Rd b/man/triplot.Rd
new file mode 100644
index 0000000..74e0fbc
--- /dev/null
+++ b/man/triplot.Rd
@@ -0,0 +1,26 @@
+\name{triplot}
+\alias{triplot}
+\title{Plot of prior, likelihood and posterior for a proportion}
+\description{
+For a proportion problem with a beta prior, plots the prior, likelihood and posterior
+on one graph.
+}
+\usage{
+triplot(prior,data,where="topright")
+}
+\arguments{
+ \item{prior}{vector of parameters for beta prior}
+ \item{data}{vector consisting of number of successes and number of
+ failures}
+ \item{where}{the location of the legend for the plot}
+}
+
+\author{Jim Albert}
+
+\examples{
+prior=c(3,10) # proportion has a beta(3, 10) prior
+data=c(10,6) # observe 10 successes and 6 failures
+triplot(prior,data)
+}
+
+\keyword{models}
diff --git a/man/weibullregpost.Rd b/man/weibullregpost.Rd
new file mode 100644
index 0000000..7a43a0f
--- /dev/null
+++ b/man/weibullregpost.Rd
@@ -0,0 +1,30 @@
+\name{weibullregpost}
+\alias{weibullregpost}
+\title{Log posterior of a Weibull proportional odds model for survival data}
+\description{
+ Computes the log posterior density of (log sigma, mu, beta) for a Weibull
+proportional odds regression model
+}
+\usage{
+weibullregpost(theta,data)
+}
+\arguments{
+ \item{theta}{vector of parameter values log sigma, mu, and beta}
+ \item{data}{data matrix with columns survival time, censoring variable, and covariate matrix}
+}
+
+\value{
+value of the log posterior
+}
+
+\author{Jim Albert}
+
+\examples{
+data(chemotherapy)
+attach(chemotherapy)
+d=cbind(time,status,treat-1,age)
+theta=c(-.6,11,.6,0)
+weibullregpost(theta,d)
+}
+
+\keyword{models}
diff --git a/vignettes/BayesFactors.Rnw b/vignettes/BayesFactors.Rnw
new file mode 100644
index 0000000..ade6c96
--- /dev/null
+++ b/vignettes/BayesFactors.Rnw
@@ -0,0 +1,107 @@
+\documentclass{article}
+
+%\VignetteIndexEntry{Introduction to Bayes Factors}
+%\VignetteDepends{LearnBayes}
+
+\begin{document}
+\SweaveOpts{concordance=TRUE}
+
+\title{Introduction to Bayes Factors}
+\author{Jim Albert}
+
+\maketitle
+
+\section*{Models for Fire Calls}
+
+To motivate the discussion of plausible models, the website \newline {\tt http://www.franklinvillefire.org/callstatistics.htm} gives the number of fire calls for each month in Franklinville, NC for the last several years.
+
+
+Suppose we observe the fire call counts $y_1, ..., y_N$ for $N$ consecutive months. Here is a general model for these data.
+\begin{itemize}
+\item $y_1, ..., y_N$ are independent $f(y | \theta)$
+\item $\theta$ has a prior $g(\theta)$
+\end{itemize}
+Also suppose we have some prior beliefs about the mean fire count $E(y)$. We believe that this mean is about 70 and the standard deviation of this guess is 10.
+Given this general model structure, we have to think of possible choices for $f$, the sampling density. We think of the popular distributions, say Poisson, normal, exponential, etc. Also we should think about different choices for the prior density. For the prior, there are many possible choices -- we typically choose one that can represent my prior information.
+
+Once we decide on several plausible choices of sampling density and prior, then we'll compare the models by Bayes factors. To do this, we compute the prior predictive density of the actual data for each possible model. The Laplace method provides a convenient and accurate approximation to the logarithm of the predictive density and we'll use the function {\tt laplace} from the {\tt LearnBayes} package.
+
+Continuing our example, suppose our prior beliefs about the mean count of fire calls $\theta$ is Gamma(280, 4). (Essentially this says that our prior guess at $\theta$ is 70 and the prior standard deviation is about 4.2.) But we're unsure about the sampling model -- it could be (model $M_1$) Poisson($\theta$), (model $M_2$) normal with mean $\theta$ and standard deviation 12, or (model $M_3$) normal with mean $\theta$ and standard deviation 6.
+
+To get some sense about the best sampling model, a histogram of the fire call counts are graphed below. I have overlaid fitted Poisson and normal distributions where I estimate $\theta$ by the sample mean.
+The Poisson model appears to be the best fit, followed by the Normal model with standard deviation 6, and the Normal model with standard deviation 12. We want to formalize this comparison by computation of Bayes factors.
+
+<<fig=TRUE,echo=TRUE>>=
+fire.counts <- c(75, 88, 84, 99, 79, 68, 86, 109, 73, 85, 101, 85,
+ 75, 81, 64, 77, 83, 83, 88, 83, 78, 83, 78, 80,
+ 82, 90, 74, 72, 69, 72, 76, 76, 104, 86, 92, 88)
+hist(fire.counts, probability=TRUE, ylim=c(0, .08))
+x <- 60:110
+lines(x, dpois(x, lambda=mean(fire.counts)), col="red")
+lines(x, dnorm(x, mean=mean(fire.counts), sd=12), col="blue")
+lines(x, dnorm(x, mean=mean(fire.counts), sd=6), col="green")
+legend("topright", legend=c("M1: Poisson(theta)",
+ "M2: N(theta, 12)",
+ "M3: N(theta, 6)"),
+ col=c("red", "blue", "green"), lty=1)
+@
+
+\section*{Bayesian Model Comparison}
+
+Under the general model, the predictive density of $y$ is given by the integral
+$$
+f(y) = \int \prod_{j=1}^N f(y_j | \theta) g(\theta) d\theta.
+$$
+This density can be approximated by the Laplace method implemented in the {\tt laplace} function.
+
+One compares the suitability of two Bayesian models by comparing the corresponding values of the predictive density. The Bayes factor in support of model $M_1$ over model $M_2$ is given by the ratio
+$$
+BF_{12} = \frac{f_1(y)}{f_2(y)}.
+$$
+Computationally, it is convenient to compute the predictive densities on the log scale, so the Bayes factor can be expressed as
+$$
+BF_{12} = \exp \left(\log f_1(y) - \log f_2(y)\right).
+$$
+
+To compute the predictive density for a model, say model $M_1$, we initially define a function {\tt model.1} which gives the log posterior.
+<<>>=
+model.1 <- function(theta, y){
+ sum(log(dpois(y, theta))) +
+ dgamma(theta, shape=280, rate=4)
+}
+@
+Then the log predictive density at $y$ is computed by using the {\tt laplace} function with inputs the function name, a guess at the posterior mode, and the data (vector of fire call counts). The component {\tt int} gives the log of $f(y)$
+<<>>=
+library(LearnBayes)
+log.pred.1 <- laplace(model.1, 80, fire.counts)$int
+log.pred.1
+@
+
+We similarly find the predictive densities of the models $M_2$ and $M_3$ by defining functions for the corresponding posteriors and using {\tt laplace}:
+<<>>=
+model.2 <- function(theta, y){
+ sum(log(dnorm(y, theta, 6))) +
+ dgamma(theta, shape=280, rate=4)
+}
+model.3 <- function(theta, y){
+ sum(log(dnorm(y, theta, 12))) +
+ dgamma(theta, shape=280, rate=4)
+}
+log.pred.2 <- laplace(model.2, 80, fire.counts)$int
+log.pred.3 <- laplace(model.3, 80, fire.counts)$int
+@
+
+Displaying the three models and predictive densities, we see that model $M_1$ is preferred to $M_3$ which is preferred to model $M_2$.
+<<>>=
+data.frame(Model=1:3, log.pred=c(log.pred.1, log.pred.2, log.pred.3))
+@
+The Bayes factor in support of model $M_1$ over model $M_3$ is given by
+<<>>=
+exp(log.pred.1 - log.pred.3)
+@
+
+
+
+
+
+\end{document}
\ No newline at end of file
diff --git a/vignettes/BinomialInference.Rnw b/vignettes/BinomialInference.Rnw
new file mode 100644
index 0000000..b0a3834
--- /dev/null
+++ b/vignettes/BinomialInference.Rnw
@@ -0,0 +1,70 @@
+\documentclass{article}
+
+%\VignetteIndexEntry{Learning About a Binomial Proportion}
+%\VignetteDepends{LearnBayes}
+
+\begin{document}
+\SweaveOpts{concordance=TRUE}
+
+\title{Learning About a Binomial Proportion}
+\author{Jim Albert}
+
+\maketitle
+
+\section*{Constructing a Beta Prior}
+
+Suppose we are interested in the proportion $p$ on sunny days in my town. The function {\tt bayes.select} is a convenient tool for specifying a beta prior based on knowledge of two prior quantiles. Suppose my prior median for the proportion of sunny days is $.2$ and my 75th percentile is $.28$.
+<<>>=
+library(LearnBayes)
+beta.par <- beta.select(list(p=0.5, x=0.2), list(p=0.75, x=.28))
+beta.par
+@
+A beta(2.95, 10.82) prior matches this prior information
+
+\section*{Updating with Data}
+
+Next, I observe the weather for 10 days and observe 6 sunny days. (There are 6 ``successes" and 4 ``failures".) The posterior distribution is beta with shape parameters 2.95 + 6 and 10.82 + 4.
+
+\section*{Triplot}
+
+The {\tt triplot} function shows the prior, likelihood, and posterior on the same display; the inputs are the vector of prior parameters and the data vector.
+
+<<fig=TRUE,echo=TRUE>>=
+triplot(beta.par, c(6, 4))
+@
+
+\section*{Simulating from Posterior to Perform Inference}
+
+One can perform inference about the proportion $p$ by simulating a large number of draws from the posterior and summarizing the simulated sample. Here the {\tt rbeta} function is used to simulate from the beta posterior and the {\tt quantile} function is used to construct a 90 percent probability interval for $p$.
+
+<<>>=
+beta.post.par <- beta.par + c(6, 4)
+post.sample <- rbeta(1000, beta.post.par[1], beta.post.par[2])
+quantile(post.sample, c(0.05, 0.95))
+@
+
+\section*{Predictive Checking}
+
+One can check the suitability of this model by means of a predictive check. The function {\tt predplot} displays the prior predictive density for the number of successes and overlays the observed number of successes.
+
+<<fig=TRUE,echo=TRUE>>=
+predplot(beta.par, 10, 6)
+@
+
+The observed data is in the tail of the predictive distribution suggesting some incompability of the prior information and the sample.
+
+\section*{Prediction of a Future Sample}
+
+Suppose we want to predict the number of sunny days in the future 20 days. The function {\tt pbetap} computes the posterior predictive distribution with a beta prior. The inputs are the vector of beta prior parameters, the future sample size, and the vector of number of successes in the future experiment.
+
+<<fig=TRUE,echo=TRUE>>=
+n <- 20
+s <- 0:n
+pred.probs <- pbetap(beta.par, n, s)
+plot(s, pred.probs, type="h")
+discint(cbind(s, pred.probs), 0.90)
+@
+
+The probability that we will observe between 0 and 8 successes in the future sample is .92.
+
+\end{document}
\ No newline at end of file
diff --git a/vignettes/DiscreteBayes.Rnw b/vignettes/DiscreteBayes.Rnw
new file mode 100644
index 0000000..3c1877c
--- /dev/null
+++ b/vignettes/DiscreteBayes.Rnw
@@ -0,0 +1,101 @@
+\documentclass{article}
+
+%\VignetteIndexEntry{Introduction to Bayes using Discrete Priors}
+%\VignetteDepends{LearnBayes}
+
+\begin{document}
+\SweaveOpts{concordance=TRUE}
+
+\title{Introduction to Bayes using Discrete Priors}
+\author{Jim Albert}
+
+\maketitle
+
+\section*{Learning About a Proportion}
+
+\subsection*{A Discrete Prior}
+
+Consider a population of ``successes" and ``failures" where the proportion of successes is $p$.
+Suppose $p$ takes on the discrete set of values 0, .01, ..., .99, 1 and one assigns a uniform prior on these values. We enter the values of $p$ and the associated probabilities into the vectors {\tt p} and {\tt prior}, respectively.
+
+<<>>=
+p <- seq(0, 1, by = 0.01)
+prior <- 1 / 101 + 0 * p
+@
+<<fig=TRUE,echo=TRUE>>=
+plot(p, prior,
+ type="h",
+ main="Prior Distribution")
+@
+
+\subsection*{Posterior Distribution}
+
+Suppose one takes a random sample from the population without replacement and observes 20 successes and 12 failiures. The function {\tt pdisc} in the {\tt LearnBayes} package computes the associated posterior probabilities for $p$. The inputs to {\tt pdisc} are the prior (vector of values of $p$ and vector of prior probabilities) and a vector containing the number of successes and failures.
+
+<<>>=
+library(LearnBayes)
+post <- pdisc(p, prior, c(20, 12))
+@
+<<fig=TRUE,echo=TRUE>>=
+plot(p, post,
+ type="h",
+ main="Posterior Distribution")
+@
+
+A highest probability interval for a discrete distribution is obtained using the {\tt discint} function. This function has two inputs: the probability distribution matrix where the first column contains the values and the second column contains the probabilities, and the desired probability content. To illustrate, we compute a 90 percent probability interval for $p$ from the posterior distribution.
+
+<<>>=
+discint(cbind(p, post), 0.90)
+@
+The probability that $p$ falls in the interval (0.49, 0.75)
+is approximately 0.90.
+
+\subsection*{Prediction}
+
+Suppose a new sample of size 20 is to be taken and we're interested in predicting the number of successes. The current opinion about the proportion is reflected in the posterior distribution stored in the vectors {\tt p} and {\tt post}. We store the possible number of successes in the future sample in {\tt s} and the function {\tt pdiscp} computes the corresponding predictive probabilities.
+
+<<>>=
+n <- 20
+s <- 0:20
+pred.probs <- pdiscp(p, post, n, s)
+@
+
+<<fig=TRUE,echo=TRUE>>=
+plot(s, pred.probs,
+ type="h",
+ main="Predictive Distribution")
+@
+
+\section*{Learning About a Poisson Mean}
+
+Discrete models can be used for other sampling distributions using the {\tt discrete.bayes} function. To illustrate, suppose the number of accidents in a particular year is Poisson with mean $\lambda$. A priori one believes that $\lambda$ is equally likely to take on the values 20, 21, ..., 30. We put the prior probabilities 1/11, ..., 1/11 in the vector {\tt prior} and use the {\tt names} function to name the components of this vector with the values of $\lambda$.
+<<>>=
+prior <- rep(1/11, 11)
+names(prior) <- 20:30
+@
+
+One observes the number of accidents for ten weeks -- these values are placed in the vector {\tt y}:
+<<>>=
+y <- c(24, 25, 31, 31, 22, 21, 26, 20, 16, 22)
+@
+
+To compute the posterior probabilities, we use the function {\tt discrete.bayes}; the inputs are the Poisson sampling density {\tt dpois}, the vector of prior probabilities {\tt prior}, and the vector of observations {\tt y}.
+<<>>=
+post <- discrete.bayes(dpois, prior, y)
+@
+
+One can display the posterior probabilities by use of the {\tt print} method, one displays the posterior probabilites by the {\tt plot} method, and one summarizes the posterior distribution by the {\tt summary} method.
+
+<<>>=
+print(post)
+@
+
+<<fig=TRUE,echo=TRUE>>=
+plot(post)
+@
+
+<<>>=
+summary(post)
+@
+
+\end{document}
\ No newline at end of file
diff --git a/vignettes/MCMCintro.Rnw b/vignettes/MCMCintro.Rnw
new file mode 100644
index 0000000..de5fe63
--- /dev/null
+++ b/vignettes/MCMCintro.Rnw
@@ -0,0 +1,114 @@
+\documentclass{article}
+
+%\VignetteIndexEntry{Introduction to Markov Chain Monte Carlo}
+%\VignetteDepends{LearnBayes}
+
+\begin{document}
+\SweaveOpts{concordance=TRUE}
+
+\title{Introduction to Markov Chain Monte Carlo}
+\author{Jim Albert}
+
+\maketitle
+
+\section*{A Selected Data Problem}
+
+Here is an interesting problem with ``selected data". Suppose you are measuring the speeds of cars driving on an interstate. You assume the speeds are normally distributed with mean $\mu$ and standard deviation $\sigma$. You see 10 cars pass by and you only record the minimum and maximum speeds. What have you learned about the normal parameters?
+
+First we focus on the construction of the likelihood. Given values of the normal parameters, what is the probability of observing minimum = $x$ and the maximum = $y$ in a sample of size n?
+
+Essentially we're looking for the joint density of two order statistics which is a standard result. Let $f$ and $F $denote the density and cdf of a normal density with mean $\mu$ and standard deviation $\sigma$. Then the joint density of $(x, y)$ is given by
+
+$$f(x, y | \mu, \sigma) \propto f(x) f(y) [F(y) - F(x)]^{n-2}, x < y$$
+
+After we observe data, the likelihood is this sampling density viewed as function of the parameters. Suppose we take a sample of size 10 and we observe $x = 52, y = 84$. Then the likelihood is given by
+
+$$
+L(\mu, \sigma) \propto f(52) f(84) [F(84) - F(52)]^{8}
+$$
+
+\section*{Defining the log posterior}
+
+First I write a short function {\tt minmaxpost} that computes the logarithm of the posterior density. The arguments to this function are $\theta = (\mu, \log \sigma)$ and data which is a list with components {\tt n}, {\tt min}, and {\tt max}. I'd recommend using the R functions {\tt pnorm} and {\tt dnorm} in computing the density -- it saves typing errors.
+
+<<>>=
+minmaxpost <- function(theta, data){
+ mu <- theta[1]
+ sigma <- exp(theta[2])
+ dnorm(data$min, mu, sigma, log=TRUE) +
+ dnorm(data$max, mu, sigma, log=TRUE) +
+ (data$n - 2) * log(pnorm(data$max, mu, sigma) -
+ pnorm(data$min, mu, sigma))
+}
+@
+
+\section*{Normal approximation to posterior}
+
+We work with the parameterization $(\mu, \log \sigma)$ which will give us a better normal approximation. A standard noninformative prior is uniform on $(\mu, \log \sigma)$.
+
+The function {\tt laplace} is used to summarize this posterior. The arguments to {\tt laplace} are the name of the log posterior function, an initial estimate at $\theta$, and the data that is used in the log posterior function. The output of laplace includes mode, the posterior mode, and var, the corresponding estimate at the variance-covariance matrix.
+
+<<>>=
+data <- list(n=10, min=52, max=84)
+library(LearnBayes)
+fit <- laplace(minmaxpost, c(70, 2), data)
+fit
+@
+
+In this example, this gives a pretty good approximation in this situation. The {\tt mycontour} function is used to display contours of the exact posterior and overlay the matching normal approximation using a second application of {\tt mycontour}.
+
+<<fig=TRUE,echo=TRUE>>=
+mycontour(minmaxpost, c(45, 95, 1.5, 4), data,
+ xlab=expression(mu), ylab=expression(paste("log ",sigma)))
+mycontour(lbinorm, c(45, 95, 1.5, 4),
+ list(m=fit$mode, v=fit$var), add=TRUE, col="red")
+@
+
+\section*{Random Walk Metropolis Sampling}
+
+The {\tt rwmetrop} function implements the M-H random walk algorithm. There are four inputs: (1) the function defining the log posterior, (2) a list containing var, the estimated var-cov matrix, and scale, the M-H random walk scale constant, (3) the starting value in the Markov Chain simulation, (4) the number of iterations of the algorithm, and (5) any data and prior parameters used in the log posterior density.
+
+Here we use {\tt fit\$v} as our estimated var-cov matrix, use a scale value of 3, start the simulation at $(\mu, \log \sigma) = (70, 2)$ and try 10,000 iterations.
+
+<<>>=
+mcmc.fit <- rwmetrop(minmaxpost,
+ list(var=fit$v, scale=3),
+ c(70, 2),
+ 10000,
+ data)
+@
+
+I display the acceptance rate -- here it is 19\% which is a reasonable value.
+<<>>=
+mcmc.fit$accept
+@
+
+We display the contours of the exact posterior and overlay the simulated draws.
+
+<<fig=TRUE,echo=TRUE>>=
+mycontour(minmaxpost, c(45, 95, 1.5, 4), data,
+ xlab=expression(mu),
+ ylab=expression(paste("log ",sigma)))
+points(mcmc.fit$par)
+@
+
+It appears like we have been successful in getting a good sample from this posterior distribution.
+
+\section*{Random Walk Metropolis Sampling}
+
+To illustrate simulation-based inference, suppose one is interested in learning about the upper quartile
+$$
+P.75 = \mu + 0.674 \times \sigma
+$$
+of the car speed distribution. For each simulated draw of $(\mu, \sigma)$ from the posterior, we compute the upper quartile $P.75$. We use the {\tt density} function to construct a density estimate of the simulated sample of $P.75$.
+
+<<fig=TRUE,echo=TRUE>>=
+mu <- mcmc.fit$par[, 1]
+sigma <- exp(mcmc.fit$par[, 2])
+P.75 <- mu + 0.674 * sigma
+plot(density(P.75),
+ main="Posterior Density of Upper Quartile")
+@
+
+
+\end{document}
\ No newline at end of file
diff --git a/vignettes/MultilevelModeling.Rnw b/vignettes/MultilevelModeling.Rnw
new file mode 100644
index 0000000..2fd1da5
--- /dev/null
+++ b/vignettes/MultilevelModeling.Rnw
@@ -0,0 +1,118 @@
+\documentclass{article}
+
+%\VignetteIndexEntry{Introduction to Multilevel Modeling}
+%\VignetteDepends{LearnBayes}
+
+\begin{document}
+\SweaveOpts{concordance=TRUE}
+
+\title{Introduction to Multilevel Modeling}
+\author{Jim Albert}
+
+\maketitle
+
+\section*{Efron and Morris Baseball Data}
+
+Efron and Morris, in a famous 1975 JASA paper, introduced the problem of estimating the true batting averages for 18 players during the 1971 baseball season. In the table, we observe the number of hits for each player in the first 35 batting opportunities in the season.
+<<>>=
+d <- data.frame(Name=c("Clemente", "Robinson", "Howard", "Johnstone",
+ "Berry", "Spencer", "Kessinger", "Alvarado", "Santo",
+ "Swaboda", "Petrocelli", "Rodriguez", "Scott", "Unser",
+ "Williams", "Campaneris", "Munson", "Alvis"),
+ Hits=c(18, 17, 16, 15, 14, 14, 13, 12, 11,
+ 11, 10, 10, 10, 10, 10, 9, 8, 7),
+ At.Bats=45)
+@
+
+\section*{The Multilevel Model}
+
+One can simultaneously estimate the true batting averages by the following multilevel model. We assume the hits for the $j$th player $y_j$ has a binomial distribution with sample size $n_j$ and probability of success $p_j$, $j = 1, ..., 18$. The true batting averages $p_1, .., p_{18}$ are assumed to be a random sample from a beta($a, b$) distribution. It is convenient to reparameterize $a$ and $b$ into the mean $\eta = a / (a + b)$ and precision $K = a + b$. We assign $(\eta, K)$ the [...]
+$$
+g(\eta, K) \propto \frac{1}{\eta (1 - \eta)}\frac{1}{(1 + K)^2}
+$$
+
+After data $y$ is observed, the posterior distribution of the parameters $(\{p_j\}, \eta, K)$ has the convenient representation
+$$
+g(\{p_j\}, \eta, K | y) = g(\eta, K | y) \times g(\{p_j\} | \eta, K, y).
+$$
+Conditional on $\eta$ and $K$, the posterior distributions of $p_1, ..., p_{18}$ are independent, where
+$$
+p_j \sim Beta(y_j + K \eta, n_j - y_j + K ( 1 - \eta)).
+$$
+The posterior density of $(\eta, K)$ is given by
+$$
+g(\eta, K| y) \propto \prod_{j=1}^{18}
+\left(\frac{B(y_j + K \eta, n_j - y_j + K (1 - \eta))}
+ {B(K \eta, n_j - y_j + K (1 - \eta))}\right)
+ \frac{1}{\eta (1 - \eta)}\frac{1}{(1 + K)^2}.
+$$
+
+\section*{Simulation of the Posterior of $(\eta, K)$}
+
+For computational purposes, it is convenient to reparameterize $\eta$ and $K$ to the real-valued parameters
+$$
+\theta_1 = \log \frac{\eta}{1 - \eta}, \theta_2 = \log K.
+$$
+The log posterior of the vector $\theta = (\theta_1, \theta_2)$ is programmed in the function {\tt betaabinexch}.
+
+We initially use the {\tt laplace} function to find the posterior mode and associated variance-covariance matrix. The inputs are the log posterior function, an initial guess at the mode, and the data.
+<<>>=
+library(LearnBayes)
+laplace.fit <- laplace(betabinexch,
+ c(0, 0),
+ d[, c("Hits", "At.Bats")])
+laplace.fit
+@
+
+The outputs from {\tt laplace} are used to inform the inputs of a random walk Metropolis algorithm in the function {\tt rwmetrop}. The inputs are the function defining the log posterior, the estimate of the variance-covarance matrix and scale for the proposal density, the starting value in the Markov Chain, and the data.
+<<>>=
+mcmc.fit <- rwmetrop(betabinexch,
+ list(var=laplace.fit$var, scale=2),
+ c(0, 0),
+ 5000,
+ d[, c("Hits", "At.Bats")])
+@
+
+To demonstrate that this MCMC algorithm produces a reasonable sample from the posterior, the {\tt mycontour} function displays a contour graph of the exact posterior density and the {\tt points} function is used to overlay 5000 draws from the MCMC algorithm.
+<<fig=TRUE,echo=TRUE>>=
+mycontour(betabinexch, c(-1.5, -0.5, 2, 12),
+ d[, c("Hits", "At.Bats")],
+ xlab="Logit ETA", ylab="Log K")
+with(mcmc.fit, points(par))
+@
+
+\section*{Simulation of the Posterior of the Probabilities}
+
+One can simulate from the joint posterior of $(\{p_j\}, \eta, K)$, by (1) simulating $(\eta, K)$ from its marginal posterior, and (2) simulating $p_1, ..., p_{18}$ from the conditional distribution
+$[\{p_j\} | \eta, K]$. In the R script, I store the simulated draws from the posterior of $K$ and $\eta$ in the vectors {\tt K} and {\tt eta}. Then the function {\tt p.estimate} simulates draws from the posterior of the $j$th probability and computes a 90\% probability interval by extracting the 5th and 95th percentiles. I repeat this process for all 18 players by the {\tt sapply} function and display the 90\% intervals for all players.
+<<>>=
+eta <- with(mcmc.fit, exp(par[, 1]) / (1 + exp(par[, 1])))
+K <- exp(mcmc.fit$par[, 2])
+p.estimate <- function(j, eta, K){
+ yj <- d[j, "Hits"]
+ nj <- d[j, "At.Bats"]
+ p.sim <- rbeta(5000, yj + K * eta, nj - yj + K * (1 - eta))
+ quantile(p.sim, c(0.05, 0.50, 0.95))
+}
+E <- t(sapply(1:18, p.estimate, eta, K))
+rownames(E) <- d[, "Name"]
+round(E, 3)
+@
+
+The following graph displays the 90 percent probability intervals for
+the players' true batting averages. The blue line represents {\it individual estimates} where each batting probability is estimated by the observed batting average. The red line represents the {\it combined estimate} where one combines all of the data. The multilevel estimate represented by the dot is a compromise between the individual estimate and the combined estimate.
+
+<<fig=TRUE,echo=TRUE>>=
+plot(d$Hits / 45, E[, 2], pch=19,
+ ylim=c(.15, .40),
+ xlab="Observed AVG", ylab="True Probability",
+ main="90 Percent Probability Intervals")
+for (j in 1:18)
+ lines(d$Hits[j] / 45 * c(1, 1), E[j, c(1, 3)])
+abline(a=0, b=1, col="blue")
+abline(h=mean(d$Hits) / 45, col="red")
+legend("topleft", legend=c("Individual", "Combined"),
+ lty=1, col=c("blue", "red"))
+@
+
+\end{document}
\ No newline at end of file
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-learnbayes.git
More information about the debian-science-commits
mailing list