[r-cran-zelig] 90/102: New upstream version 5.0-13

Andreas Tille tille at debian.org
Sun Jan 8 17:00:20 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-zelig.

commit 15d66ddc8fa239fc3652c03fdd449f34b62ba029
Author: Andreas Tille <tille at debian.org>
Date:   Sun Jan 8 10:01:25 2017 +0100

    New upstream version 5.0-13
---
 CHANGES                                     |    26 -
 COPYING                                     |   278 -
 DESCRIPTION                                 |    65 +-
 MD5                                         |   611 +-
 NAMESPACE                                   |   336 +-
 NEWS                                        |   148 -
 R/GetObject.R                               |    10 -
 R/GetSlot.R                                 |    12 -
 R/GetSlot.zelig.R                           |    38 -
 R/MCMChook.R                                |    75 -
 R/MLutils.R                                 |   303 -
 R/Zelig-package.R                           |    37 -
 R/as.dataframe.setx.R                       |    15 -
 R/as.matrix.pooled.setx.R                   |    32 -
 R/as.matrix.setx.R                          |    26 -
 R/as.parameters.R                           |   102 -
 R/as.qi.R                                   |   247 -
 R/as.summarized.R                           |    29 -
 R/as.summarized.list.R                      |    12 -
 R/attach.env.R                              |    55 -
 R/bootfn.default.R                          |   263 -
 R/bootstrap.R                               |    35 -
 R/bootstrap.gamma.R                         |    17 -
 R/bootstrap.negbinom.R                      |    17 -
 R/bootstrap.normal.R                        |    23 -
 R/callToString.R                            |    10 -
 R/cluster.formula.R                         |    22 -
 R/cmvglm.R                                  |    89 -
 R/common-methods.R                          |    18 -
 R/create-json.R                             |   191 +
 R/describe.R                                |     7 -
 R/describe.default.R                        |    18 -
 R/describe.zelig.R                          |    13 -
 R/description.R                             |   155 -
 R/exp.R                                     |   140 -
 R/factor.bayes.R                            |    55 -
 R/gamma.R                                   |   131 -
 R/gamma.gee.R                               |   161 -
 R/gamma.survey.R                            |   176 -
 R/get.package.R                             |    94 -
 R/getPredictorTerms.R                       |    45 -
 R/getResponseTerms.R                        |    10 -
 R/getResponseTerms.formula.R                |   130 -
 R/getResponseTerms.list.R                   |    29 -
 R/help.zelig.R                              |    65 -
 R/ignore.R                                  |    22 -
 R/is.formula.R                              |     9 -
 R/list.depth.R                              |    35 -
 R/logit.R                                   |   152 -
 R/logit.bayes.R                             |    89 -
 R/logit.gee.R                               |    98 -
 R/logit.survey.R                            |   191 -
 R/lognorm.R                                 |   107 -
 R/ls.R                                      |    94 -
 R/make.parameters.R                         |    49 -
 R/makeModelMatrix.R                         |    39 -
 R/mi.R                                      |    25 -
 R/mlogit.bayes.R                            |   103 -
 R/model-ar.R                                |    25 +
 R/model-arima.R                             |   379 +
 R/model-bayes.R                             |   139 +
 R/model-binchoice-gee.R                     |    32 +
 R/model-binchoice-survey.R                  |    23 +
 R/model-binchoice.R                         |    38 +
 R/model-exp.R                               |    77 +
 R/model-factor-bayes.R                      |   145 +
 R/model-gamma-gee.R                         |    30 +
 R/model-gamma-survey.R                      |    54 +
 R/model-gamma.R                             |    70 +
 R/model-gee.R                               |    74 +
 R/model-glm.R                               |    33 +
 R/model-logit-bayes.R                       |    43 +
 R/model-logit-gee.R                         |    22 +
 R/model-logit-survey.R                      |    35 +
 R/model-logit.R                             |    41 +
 R/model-lognorm.R                           |   110 +
 R/model-ls.R                                |   169 +
 R/model-ma.R                                |    25 +
 R/model-mlogit-bayes.R                      |    61 +
 R/model-negbinom.R                          |    83 +
 R/model-normal-bayes.R                      |    53 +
 R/model-normal-gee.R                        |    30 +
 R/model-normal-survey.R                     |    74 +
 R/model-normal.R                            |    70 +
 R/model-oprobit-bayes.R                     |    89 +
 R/model-poisson-bayes.R                     |    44 +
 R/model-poisson-gee.R                       |    39 +
 R/model-poisson-survey.R                    |    51 +
 R/model-poisson.R                           |    54 +
 R/model-probit-bayes.R                      |    42 +
 R/model-probit-gee.R                        |    22 +
 R/model-probit-survey.R                     |    34 +
 R/model-probit.R                            |    39 +
 R/model-quantile.R                          |    94 +
 R/model-relogit.R                           |   199 +
 R/model-survey.R                            |    74 +
 R/model-timeseries.R                        |   152 +
 R/model-tobit-bayes.R                       |    56 +
 R/model-tobit.R                             |   108 +
 R/model-weibull.R                           |   110 +
 R/model-zelig.R                             |  1274 ++
 R/model.frame.multiple.R                    |   107 -
 R/model.matrix.multiple.R                   |   109 -
 R/model.matrix.parseFormula.R               |    32 -
 R/model.warnings.R                          |    71 -
 R/multi.dataset.R                           |   144 -
 R/multipleUtil.R                            |    70 -
 R/names.relogit.R                           |     0
 R/negbinom.R                                |   119 -
 R/normal.R                                  |   122 -
 R/normal.bayes.R                            |    90 -
 R/normal.gee.R                              |    71 -
 R/normal.survey.R                           |   161 -
 R/oprobit.bayes.R                           |   140 -
 R/param.R                                   |    60 -
 R/parameters.R                              |   132 -
 R/parse.formula.R                           |   319 -
 R/parseFormula.R                            |   120 -
 R/plots.R                                   |  1577 +-
 R/poisson.R                                 |   116 -
 R/poisson.bayes.R                           |    90 -
 R/poisson.gee.R                             |    71 -
 R/poisson.survey.R                          |   155 -
 R/print.R                                   |   446 -
 R/probit.R                                  |    73 -
 R/probit.bayes.R                            |    48 -
 R/probit.gee.R                              |    71 -
 R/probit.survey.R                           |   101 -
 R/qi.R                                      |    38 -
 R/qi.summarized.R                           |   103 -
 R/relogit.R                                 |   379 -
 R/repl.R                                    |    81 -
 R/robust.glm.hook.R                         |    32 -
 R/robust.hook.R                             |    20 -
 R/setx.R                                    |   312 -
 R/sim.MI.R                                  |    42 -
 R/sim.R                                     |    92 -
 R/sim.default.R                             |   268 -
 R/simulation.matrix.R                       |   116 -
 R/simulations.plot.R                        |   186 -
 R/summarize.R                               |   145 -
 R/summary.R                                 |   404 -
 R/t.setx.R                                  |    14 -
 R/terms.R                                   |   235 -
 R/termsFromFormula.R                        |    15 -
 R/tobit.R                                   |   143 -
 R/twosls.R                                  |   279 -
 R/user.prompt.R                             |    14 -
 R/utils.R                                   |   221 +
 R/vcov.R                                    |    17 -
 R/wrappers.R                                |   269 +
 R/z.R                                       |    53 -
 R/zelig.R                                   |   323 -
 R/zelig.skeleton.R                          |   133 -
 R/zelig2.R                                  |    49 -
 R/zeligBuildWeights.R                       |   175 -
 R/zzz.R                                     |  1383 --
 README                                      |   301 -
 build/vignette.rds                          |   Bin 290 -> 0 bytes
 data/MatchIt.url.tab.gz                     |   Bin
 data/PErisk.txt.bz2                         |   Bin 1221 -> 0 bytes
 data/PErisk.txt.gz                          |   Bin 0 -> 1378 bytes
 data/SupremeCourt.txt.gz                    |   Bin 293 -> 295 bytes
 data/Weimar.txt.gz                          |   Bin
 data/Zelig.url.tab.gz                       |   Bin 687 -> 687 bytes
 data/approval.tab.bz2                       |   Bin 885 -> 0 bytes
 data/approval.tab.gz                        |   Bin 0 -> 996 bytes
 data/bivariate.tab.bz2                      |   Bin 321 -> 0 bytes
 data/bivariate.tab.gz                       |   Bin 0 -> 401 bytes
 data/coalition.tab.gz                       |   Bin 0 -> 2422 bytes
 data/coalition.tab.xz                       |   Bin 2000 -> 0 bytes
 data/coalition2.txt.gz                      |   Bin 0 -> 2677 bytes
 data/coalition2.txt.xz                      |   Bin 2148 -> 0 bytes
 data/eidat.txt.gz                           |   Bin
 data/free1.tab.bz2                          |   Bin 3519 -> 0 bytes
 data/free1.tab.gz                           |   Bin 0 -> 4685 bytes
 data/free2.tab.bz2                          |   Bin 3519 -> 0 bytes
 data/free2.tab.gz                           |   Bin 0 -> 4685 bytes
 data/friendship.RData                       |   Bin 2781 -> 2782 bytes
 data/grunfeld.txt.gz                        |   Bin 508 -> 509 bytes
 data/hoff.tab.gz                            |   Bin
 data/homerun.txt.gz                         |   Bin 0 -> 1740 bytes
 data/homerun.txt.xz                         |   Bin 992 -> 0 bytes
 data/immi1.tab.bz2                          |   Bin 20477 -> 0 bytes
 data/immi1.tab.gz                           |   Bin 0 -> 25531 bytes
 data/immi2.tab.bz2                          |   Bin 20440 -> 0 bytes
 data/immi2.tab.gz                           |   Bin 0 -> 25484 bytes
 data/immi3.tab.bz2                          |   Bin 20414 -> 0 bytes
 data/immi3.tab.gz                           |   Bin 0 -> 25570 bytes
 data/immi4.tab.bz2                          |   Bin 20423 -> 0 bytes
 data/immi4.tab.gz                           |   Bin 0 -> 25541 bytes
 data/immi5.tab.bz2                          |   Bin 20422 -> 0 bytes
 data/immi5.tab.gz                           |   Bin 0 -> 25460 bytes
 data/immigration.tab.bz2                    |   Bin 11707 -> 0 bytes
 data/immigration.tab.gz                     |   Bin 0 -> 15855 bytes
 data/klein.txt.gz                           |   Bin
 data/kmenta.txt.gz                          |   Bin
 data/macro.tab.gz                           |   Bin 0 -> 5534 bytes
 data/macro.tab.xz                           |   Bin 3788 -> 0 bytes
 data/mexico.tab.bz2                         |   Bin 14290 -> 0 bytes
 data/mexico.tab.gz                          |   Bin 0 -> 20322 bytes
 data/mid.tab.bz2                            |   Bin 40063 -> 0 bytes
 data/mid.tab.gz                             |   Bin 0 -> 49174 bytes
 data/newpainters.txt.bz2                    |   Bin 519 -> 0 bytes
 data/newpainters.txt.gz                     |   Bin 0 -> 584 bytes
 data/sanction.tab.bz2                       |   Bin 482 -> 0 bytes
 data/sanction.tab.gz                        |   Bin 0 -> 565 bytes
 data/seatshare.rda                          |   Bin 0 -> 3213 bytes
 data/sna.ex.RData                           |   Bin 23681 -> 23665 bytes
 data/swiss.txt.bz2                          |   Bin 874 -> 0 bytes
 data/swiss.txt.gz                           |   Bin 0 -> 972 bytes
 data/tobin.txt.gz                           |   Bin
 data/turnout.tab.bz2                        |   Bin 10282 -> 0 bytes
 data/turnout.tab.gz                         |   Bin 0 -> 15500 bytes
 data/voteincome.txt.bz2                     |   Bin 2979 -> 0 bytes
 data/voteincome.txt.gz                      |   Bin 0 -> 3851 bytes
 demo/00Index                                |    73 +-
 demo/Zelig.HelloWorld.R                     |   173 -
 demo/demo-amelia.R                          |    48 +
 demo/demo-data-table.R                      |    21 +
 demo/demo-dplyr.R                           |    40 +
 demo/demo-exp.R                             |    25 +
 demo/demo-factor-bayes.R                    |    65 +
 demo/demo-feedback.R                        |    10 +
 demo/demo-gamma-gee.R                       |    34 +
 demo/demo-gamma.R                           |    22 +
 demo/demo-json.R                            |    18 +
 demo/demo-logit-bayes.R                     |    28 +
 demo/demo-logit-gee.R                       |    23 +
 demo/demo-logit.R                           |    51 +
 demo/demo-lognorm.R                         |    25 +
 demo/demo-ls.R                              |    39 +
 demo/demo-mlogit-bayes.R                    |    23 +
 demo/demo-negbinom.R                        |    24 +
 demo/demo-normal-bayes.R                    |    24 +
 demo/demo-normal-gee.R                      |    27 +
 demo/demo-normal.R                          |    21 +
 demo/demo-oprobit-bayes.R                   |    26 +
 demo/demo-poisson-bayes.R                   |    36 +
 demo/demo-poisson-gee.R                     |    27 +
 demo/demo-poisson.R                         |    48 +
 demo/demo-probit-bayes.R                    |    27 +
 demo/demo-probit-gee.R                      |    25 +
 demo/demo-probit.R                          |    27 +
 demo/demo-quantile.R                        |    86 +
 demo/demo-range.R                           |    61 +
 demo/demo-relogit.R                         |    87 +
 demo/demo-roc.R                             |     5 +
 demo/demo-scope.R                           |    61 +
 demo/demo-setx-2.R                          |    48 +
 demo/demo-setx-fn.R                         |    29 +
 demo/demo-setx.R                            |    19 +
 demo/demo-show.R                            |     8 +
 demo/demo-signif-stars.R                    |     4 +
 demo/demo-strata.R                          |    49 +
 demo/demo-tobit.R                           |    28 +
 demo/demo-tobitbayes.R                      |     9 +
 demo/demo-weibull.R                         |    19 +
 demo/demo-wrappers.R                        |    18 +
 demo/demo-zip.R                             |    56 +
 demo/exp.R                                  |    29 -
 demo/factor.bayes.R                         |    35 -
 demo/gamma.R                                |    30 -
 demo/gamma.gee.R                            |    43 -
 demo/gamma.survey.R                         |   100 -
 demo/logit.R                                |    54 -
 demo/logit.bayes.R                          |    57 -
 demo/logit.gee.R                            |   111 -
 demo/logit.survey.R                         |   105 -
 demo/lognorm.R                              |    26 -
 demo/ls.R                                   |    67 -
 demo/mi.R                                   |     9 -
 demo/mlogit.bayes.R                         |    58 -
 demo/negbinom.R                             |    26 -
 demo/normal.R                               |    34 -
 demo/normal.bayes.R                         |    58 -
 demo/normal.gee.R                           |    36 -
 demo/normal.survey.R                        |   101 -
 demo/oprobit.bayes.R                        |    67 -
 demo/poisson.R                              |    26 -
 demo/poisson.bayes.R                        |    59 -
 demo/poisson.gee.R                          |    39 -
 demo/poisson.survey.R                       |   103 -
 demo/probit.R                               |    51 -
 demo/probit.bayes.R                         |    59 -
 demo/probit.gee.R                           |   111 -
 demo/probit.survey.R                        |   106 -
 demo/relogit.R                              |    37 -
 demo/twosls.R                               |    24 -
 demo/vertci.R                               |    41 -
 inst/CITATION                               |    35 +
 inst/JSON/zelig5models.json                 |   446 +
 inst/doc/gamma.pdf                          |   Bin 193871 -> 0 bytes
 inst/doc/logit.pdf                          |   Bin 205148 -> 0 bytes
 inst/doc/ls.pdf                             |   Bin 192727 -> 0 bytes
 inst/doc/manual-bayes.R                     |   439 -
 inst/doc/manual-bayes.pdf                   |   Bin 291461 -> 0 bytes
 inst/doc/manual-gee.R                       |   327 -
 inst/doc/manual-gee.pdf                     |   Bin 360015 -> 0 bytes
 inst/doc/manual.R                           |   377 -
 inst/doc/manual.pdf                         |   Bin 419156 -> 0 bytes
 inst/doc/negbinom.pdf                       |   Bin 203459 -> 0 bytes
 inst/doc/normal.pdf                         |   Bin 191988 -> 0 bytes
 inst/doc/parse.formula.pdf                  |   Bin 77954 -> 0 bytes
 inst/doc/poisson.pdf                        |   Bin 198613 -> 0 bytes
 inst/doc/probit.pdf                         |   Bin 170007 -> 0 bytes
 inst/doc/twosls.R                           |    56 -
 inst/doc/twosls.pdf                         |   Bin 217599 -> 0 bytes
 inst/templates/DESCRIPTION                  |    11 -
 inst/templates/PACKAGE.R                    |    20 -
 inst/templates/ZELIG.README                 |     0
 inst/templates/describe.R                   |    10 -
 inst/templates/param.R                      |    13 -
 inst/templates/qi.R                         |    16 -
 inst/templates/zelig2.R                     |    14 -
 man/GetObject.Rd                            |    19 -
 man/GetSlot.Rd                              |    26 -
 man/GetSlot.zelig.Rd                        |    30 -
 man/MCMChook.Rd                             |    35 -
 man/Max.Rd                                  |    21 -
 man/McmcHookFactor.Rd                       |    34 -
 man/Median.Rd                               |    14 +-
 man/Min.Rd                                  |    21 -
 man/Mode.Rd                                 |    15 +-
 man/TexCite.Rd                              |    17 -
 man/Zelig-ar-class.Rd                       |    11 +
 man/Zelig-arima-class.Rd                    |    11 +
 man/Zelig-bayes-class.Rd                    |    19 +
 man/Zelig-binchoice-class.Rd                |    11 +
 man/Zelig-binchoice-gee-class.Rd            |    13 +
 man/Zelig-binchoice-survey-class.Rd         |    13 +
 man/Zelig-class.Rd                          |   136 +
 man/Zelig-exp-class.Rd                      |    17 +
 man/Zelig-factor-bayes-class.Rd             |    17 +
 man/Zelig-gamma-class.Rd                    |    11 +
 man/Zelig-gamma-gee-class.Rd                |    11 +
 man/Zelig-gamma-survey-class.Rd             |    11 +
 man/Zelig-gee-class.Rd                      |    17 +
 man/Zelig-glm-class.Rd                      |    17 +
 man/Zelig-logit-bayes-class.Rd              |    11 +
 man/Zelig-logit-class.Rd                    |    11 +
 man/Zelig-logit-gee-class.Rd                |    11 +
 man/Zelig-logit-survey-class.Rd             |    11 +
 man/Zelig-lognorm-class.Rd                  |    17 +
 man/Zelig-ls-class.Rd                       |    17 +
 man/Zelig-ma-class.Rd                       |    11 +
 man/Zelig-mlogit-bayes-class.Rd             |    11 +
 man/Zelig-negbin-class.Rd                   |    17 +
 man/Zelig-normal-bayes-class.Rd             |    11 +
 man/Zelig-normal-class.Rd                   |    11 +
 man/Zelig-normal-gee-class.Rd               |    11 +
 man/Zelig-normal-survey-class.Rd            |    11 +
 man/Zelig-oprobit-bayes-class.Rd            |    11 +
 man/Zelig-package.Rd                        |    38 -
 man/Zelig-poisson-bayes-class.Rd            |    11 +
 man/Zelig-poisson-class.Rd                  |    11 +
 man/Zelig-poisson-gee-class.Rd              |    11 +
 man/Zelig-poisson-survey-class.Rd           |    11 +
 man/Zelig-probit-bayes-class.Rd             |    11 +
 man/Zelig-probit-class.Rd                   |    11 +
 man/Zelig-probit-gee-class.Rd               |    11 +
 man/Zelig-probit-survey-class.Rd            |    11 +
 man/Zelig-quantile-class.Rd                 |    17 +
 man/Zelig-relogit-class.Rd                  |    17 +
 man/Zelig-survey-class.Rd                   |    17 +
 man/Zelig-timeseries-class.Rd               |    21 +
 man/Zelig-tobit-bayes-class.Rd              |    11 +
 man/Zelig-tobit-class.Rd                    |    17 +
 man/Zelig-weibull-class.Rd                  |    17 +
 man/ZeligDescribeModel.Rd                   |    23 -
 man/ZeligListModels.Rd                      |    27 -
 man/ZeligListTitles.Rd                      |    13 -
 man/alpha.Rd                                |    21 -
 man/as.bootlist.Rd                          |    25 -
 man/as.bootvector.Rd                        |    30 -
 man/as.data.frame.setx.Rd                   |    31 -
 man/as.description.Rd                       |    28 -
 man/as.description.description.Rd           |    21 -
 man/as.description.list.Rd                  |    21 -
 man/as.matrix.pooled.setx.Rd                |    33 -
 man/as.matrix.setx.Rd                       |    33 -
 man/as.parameters.Rd                        |    43 -
 man/as.parameters.default.Rd                |    27 -
 man/as.parameters.list.Rd                   |    28 -
 man/as.parameters.parameters.Rd             |    25 -
 man/as.qi.Rd                                |    40 -
 man/as.qi.default.Rd                        |    19 -
 man/as.qi.list.Rd                           |    31 -
 man/as.qi.qi.Rd                             |    19 -
 man/as.summarized.Rd                        |    28 -
 man/as.summarized.list.Rd                   |    23 -
 man/as.summarized.summarized.qi.Rd          |    21 -
 man/attach.env.Rd                           |    40 -
 man/avg.Rd                                  |    18 +
 man/bootfn.default.Rd                       |    34 -
 man/bootstrap.Rd                            |    33 -
 man/bootstrap.default.Rd                    |    26 -
 man/bootstrap.gamma.Rd                      |    26 -
 man/bootstrap.negbinom.Rd                   |    26 -
 man/bootstrap.normal.Rd                     |    29 -
 man/callToString.Rd                         |    22 -
 man/ci.plot.Rd                              |    63 +
 man/cite.Rd                                 |    19 -
 man/cluster.formula.Rd                      |    14 +-
 man/cmvglm.Rd                               |    25 -
 man/coef-Zelig-method.Rd                    |    18 +
 man/coef.parameters.Rd                      |    28 -
 man/combine.Rd                              |    24 -
 man/constructDataFrame.Rd                   |    24 -
 man/constructDesignMatrix.Rd                |    22 -
 man/createJSON.Rd                           |    16 +
 man/depends.on.zelig.Rd                     |    24 -
 man/describe.Rd                             |    19 -
 man/describe.default.Rd                     |    25 -
 man/describe.exp.Rd                         |    19 -
 man/describe.gamma.Rd                       |    19 -
 man/describe.logit.Rd                       |    19 -
 man/describe.ls.Rd                          |    22 -
 man/describe.negbinom.Rd                    |    22 -
 man/describe.normal.Rd                      |    19 -
 man/describe.poisson.Rd                     |    19 -
 man/describe.probit.Rd                      |    19 -
 man/describe.tobit.Rd                       |    19 -
 man/describe.zelig.Rd                       |    28 -
 man/description.Rd                          |    37 -
 man/find.match.Rd                           |    35 -
 man/fitted-Zelig-method.Rd                  |    18 +
 man/get.package.Rd                          |    26 -
 man/getPredictorTerms.Rd                    |    26 -
 man/getResponseTerms.Formula-not-formula.Rd |    31 -
 man/getResponseTerms.Rd                     |    20 -
 man/getResponseTerms.formula.Rd             |    30 -
 man/getResponseTerms.list.Rd                |    23 -
 man/has.zelig2.Rd                           |    23 -
 man/help.zelig.Rd                           |    19 -
 man/homerun.Rd                              |     2 +-
 man/ignore.Rd                               |    26 -
 man/is.formula.Rd                           |    24 -
 man/is.qi.Rd                                |    20 -
 man/is.valid.qi.list.Rd                     |    19 -
 man/is.zelig.compliant.Rd                   |    30 -
 man/is.zelig.package.Rd                     |    21 -
 man/link.Rd                                 |    21 -
 man/linkinv.Rd                              |    23 -
 man/list.depth.Rd                           |    21 -
 man/list.zelig.dependent.packages.Rd        |    20 -
 man/list.zelig.models.Rd                    |    19 -
 man/loadDependencies.Rd                     |    36 -
 man/make.parameters.Rd                      |    26 -
 man/makeModelMatrix.Rd                      |    22 -
 man/makeZeligObject.Rd                      |    37 -
 man/mi.Rd                                   |    27 -
 man/mid.Rd                                  |     3 +-
 man/mix.Rd                                  |    24 -
 man/model.frame.multiple.Rd                 |    28 -
 man/model.matrix.multiple.Rd                |    28 -
 man/model.matrix.parseFormula.Rd            |    32 -
 man/multilevel.Rd                           |    29 -
 man/name.object.Rd                          |    30 -
 man/names.qi.Rd                             |    29 -
 man/param.Rd                                |    67 -
 man/param.default.Rd                        |    21 -
 man/param.exp.Rd                            |    27 -
 man/param.gamma.Rd                          |    25 -
 man/param.logit.Rd                          |    27 -
 man/param.ls.Rd                             |    27 -
 man/param.negbinom.Rd                       |    27 -
 man/param.normal.Rd                         |    27 -
 man/param.poisson.Rd                        |    27 -
 man/param.probit.Rd                         |    27 -
 man/param.relogit.Rd                        |    26 -
 man/param.relogit2.Rd                       |    26 -
 man/param.tobit.Rd                          |    27 -
 man/parameters.Rd                           |    34 -
 man/parse.formula.Rd                        |    24 -
 man/parseFormula.Rd                         |    25 -
 man/parseFormula.formula.Rd                 |    24 -
 man/parseFormula.list.Rd                    |    21 -
 man/plot-Zelig-ANY-method.Rd                |    20 +
 man/plot.MI.sim.Rd                          |    21 -
 man/plot.ci.Rd                              |    60 -
 man/plot.pooled.sim.Rd                      |    62 -
 man/plot.sim.Rd                             |    23 -
 man/plot.simulations.Rd                     |    28 -
 man/predict-Zelig-method.Rd                 |    18 +
 man/print.qi.Rd                             |    22 -
 man/print.qi.summarized.Rd                  |    25 -
 man/print.setx.Rd                           |    21 -
 man/print.setx.mi.Rd                        |    21 -
 man/print.sim.Rd                            |    22 -
 man/print.summary.MCMCZelig.Rd              |    27 -
 man/print.summary.pooled.sim.Rd             |    28 -
 man/print.summary.relogit.Rd                |    24 -
 man/print.summary.relogit2.Rd               |    22 -
 man/print.summary.sim.Rd                    |    22 -
 man/print.summarySim.MI.Rd                  |    21 -
 man/print.zelig.Rd                          |    21 -
 man/qi.Rd                                   |    61 -
 man/qi.exp.Rd                               |    34 -
 man/qi.plot.Rd                              |    21 +
 man/qi.summarize.Rd                         |    36 -
 man/reduce.Rd                               |    30 +
 man/reduceMI.Rd                             |    24 -
 man/relogit.Rd                              |    27 +-
 man/repl.Rd                                 |    21 -
 man/repl.default.Rd                         |    23 -
 man/repl.sim.Rd                             |    45 -
 man/replace.call.Rd                         |    26 -
 man/robust.gee.hook.Rd                      |    32 -
 man/robust.glm.hook.Rd                      |    26 -
 man/rocplot.Rd                              |    99 +-
 man/seatshare.Rd                            |    24 +
 man/setfactor.Rd                            |    21 +
 man/setval.Rd                               |    21 +
 man/setx.MI.Rd                              |    33 -
 man/setx.Rd                                 |    73 +-
 man/setx.default.Rd                         |    31 -
 man/sim.MI.Rd                               |    38 -
 man/sim.Rd                                  |   189 +-
 man/sim.default.Rd                          |    43 -
 man/simacf.Rd                               |    13 +
 man/simulation.matrix.Rd                    |    28 -
 man/simulations.parameters.Rd               |    28 -
 man/simulations.plot.Rd                     |    48 +-
 man/special_print_LIST.Rd                   |    26 -
 man/special_print_MATRIX.Rd                 |    25 -
 man/splitUp.Rd                              |    33 -
 man/stat.Rd                                 |    24 +
 man/statlevel.Rd                            |    24 +
 man/statmat.Rd                              |    22 +
 man/store.object.Rd                         |    41 -
 man/structuralToReduced.Rd                  |    20 -
 man/summarize.Rd                            |    21 -
 man/summarize.default.Rd                    |    20 -
 man/summary-Zelig-method.Rd                 |    18 +
 man/summary.Arima.Rd                        |    20 +
 man/summary.MI.Rd                           |    23 -
 man/summary.MI.sim.Rd                       |    22 -
 man/summary.Relogit2.Rd                     |    18 -
 man/summary.glm.robust.Rd                   |    22 -
 man/summary.pooled.sim.Rd                   |    28 -
 man/summary.relogit.Rd                      |    18 -
 man/summary.sim.Rd                          |    21 -
 man/summary.zelig.Rd                        |    22 -
 man/t.setx.Rd                               |    23 -
 man/table.levels.Rd                         |    21 +-
 man/terms.multiple.Rd                       |    21 -
 man/terms.vglm.Rd                           |    21 -
 man/terms.zelig.Rd                          |    19 -
 man/termsFromFormula.Rd                     |    17 -
 man/toBuildFormula.Rd                       |    21 -
 man/tolmerFormat.Rd                         |    25 -
 man/ucfirst.Rd                              |    20 -
 man/user.prompt.Rd                          |    20 -
 man/vcov-Zelig-method.Rd                    |    18 +
 man/z.Rd                                    |    29 -
 man/zelig.Rd                                |    96 +-
 man/zelig.call.Rd                           |    27 -
 man/zelig.skeleton.Rd                       |    62 -
 man/zelig2-bayes.Rd                         |    49 -
 man/zelig2-core.Rd                          |    69 -
 man/zelig2-gee.Rd                           |    48 -
 man/zelig2-survey.Rd                        |   148 -
 man/zelig2.Rd                               |    58 -
 man/zeligACFplot.Rd                         |    16 +
 man/zeligARMAbreakforecaster.Rd             |    14 +
 man/zeligARMAlongrun.Rd                     |    14 +
 man/zeligARMAnextstep.Rd                    |    14 +
 man/zeligArimaWrapper.Rd                    |    14 +
 man/zeligBuildWeights.Rd                    |    47 -
 man/zeligPlyrMutate.Rd                      |    13 +
 po/R-en.po                                  |   524 -
 tests/MatchIt.R                             |    19 -
 tests/amelia.R                              |    45 -
 tests/by.R                                  |     9 -
 tests/lognorm.R                             |    26 -
 tests/mi.R                                  |     9 -
 tests/mix.R                                 |    28 -
 tests/models-bayes.R                        |   135 -
 tests/models-core.R                         |   144 -
 tests/models-gee.R                          |   152 -
 tests/models-survey.R                       |   326 -
 tests/plot-ci.R                             |    30 -
 tests/pooled.R                              |    11 -
 tests/relogit.R                             |    20 -
 tests/summary.MI.R                          |    13 -
 tests/testthat.R                            |     5 +
 tests/testthat/test-logit.R                 |     3 +
 tests/testthat/test-lognom.R                |     3 +
 tests/testthat/test-ls.R                    |     3 +
 tests/testthat/test-negbin.R                |     3 +
 tests/testthat/test-poisson.R               |     3 +
 tests/testthat/test-probit.R                |     3 +
 tests/twosls.R                              |    22 -
 vignettes/Zelig.bib                         |    65 -
 vignettes/Zelig.sty                         |    33 -
 vignettes/gk.bib                            | 20130 --------------------------
 vignettes/gkpubs.bib                        |  2259 ---
 598 files changed, 9706 insertions(+), 46418 deletions(-)

diff --git a/CHANGES b/CHANGES
deleted file mode 100644
index 78adab7..0000000
--- a/CHANGES
+++ /dev/null
@@ -1,26 +0,0 @@
-4.0-6:
- * Improving parse.formula function
- * Renamed 'parse.formula' to 'parseFormula', to avoid name confusion and
-   adhere to common naming standards in Zelig.
-
-4.0-5 (August 25th, 2011):
- * Removed dependency on 'iterators' package
- * Updated NAMESPACE and DESCRIPTION files correspondingly
- * Restructured internals of 'zelig' function
- * Added CHANGES file to summarize detail-oriented code changes
- * Print methods for multiply-imputed data-sets have been improved
- * Print methods for setx objects have been improved
- * zelig now returns a specially constructed list when handling multiply-
-   imputed data-sets
- * The 'state' variable has been added to the the zelig and MI objects.
-   This variable stores shared information between an "MI" (zelig-list) object
-   and all its children.
- * Beautified output of print.setx
- * Added terms.zelig as a method with documentation
- * Added a 'formula' index to the 'zelig' class. This contains a formula
-   formula identical that contained in the result index (within the zelig 
-   object)
- * Added "old-formula" variable to the "state" attribute (this attribute is an
-   environment. That is, the attribute "state", which is an environment, now
-   contains a variable titled "old-formula". This specifies the original
-   formula submitted to the 'zelig' function
diff --git a/COPYING b/COPYING
deleted file mode 100644
index 727ef8f..0000000
--- a/COPYING
+++ /dev/null
@@ -1,278 +0,0 @@
-		    GNU GENERAL PUBLIC LICENSE
-		       Version 2, June 1991
-
- Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-                          675 Mass Ave, Cambridge, MA 02139, USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-			    Preamble
-
-  The licenses for most software are designed to take away your
-freedom to share and change it.  By contrast, the GNU General Public
-License is intended to guarantee your freedom to share and change free
-software--to make sure the software is free for all its users.  This
-General Public License applies to most of the Free Software
-Foundation's software and to any other program whose authors commit to
-using it.  (Some other Free Software Foundation software is covered by
-the GNU Library General Public License instead.)  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-this service if you wish), that you receive source code or can get it
-if you want it, that you can change the software or use pieces of it
-in new free programs; and that you know you can do these things.
-
-  To protect your rights, we need to make restrictions that forbid
-anyone to deny you these rights or to ask you to surrender the rights.
-These restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must give the recipients all the rights that
-you have.  You must make sure that they, too, receive or can get the
-source code.  And you must show them these terms so they know their
-rights.
-
-  We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-  Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software.  If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-  Finally, any free program is threatened constantly by software
-patents.  We wish to avoid the danger that redistributors of a free
-program will individually obtain patent licenses, in effect making the
-program proprietary.  To prevent this, we have made it clear that any
-patent must be licensed for everyone's free use or not licensed at all.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-		    GNU GENERAL PUBLIC LICENSE
-   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-  0. This License applies to any program or other work which contains
-a notice placed by the copyright holder saying it may be distributed
-under the terms of this General Public License.  The "Program", below,
-refers to any such program or work, and a "work based on the Program"
-means either the Program or any derivative work under copyright law:
-that is to say, a work containing the Program or a portion of it,
-either verbatim or with modifications and/or translated into another
-language.  (Hereinafter, translation is included without limitation in
-the term "modification".)  Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope.  The act of
-running the Program is not restricted, and the output from the Program
-is covered only if its contents constitute a work based on the
-Program (independent of having been made by running the Program).
-Whether that is true depends on what the Program does.
-
-  1. You may copy and distribute verbatim copies of the Program's
-source code as you receive it, in any medium, provided that you
-conspicuously and appropriately publish on each copy an appropriate
-copyright notice and disclaimer of warranty; keep intact all the
-notices that refer to this License and to the absence of any warranty;
-and give any other recipients of the Program a copy of this License
-along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-  2. You may modify your copy or copies of the Program or any portion
-of it, thus forming a work based on the Program, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any
-    part thereof, to be licensed as a whole at no charge to all third
-    parties under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a
-    notice that there is no warranty (or else, saying that you provide
-    a warranty) and that users may redistribute the program under
-    these conditions, and telling the user how to view a copy of this
-    License.  (Exception: if the Program itself is interactive but
-    does not normally print such an announcement, your work based on
-    the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole.  If
-identifiable sections of that work are not derived from the Program,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works.  But when you
-distribute the same sections as part of a whole which is a work based
-on the Program, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
-  3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections
-    1 and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your
-    cost of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer
-    to distribute corresponding source code.  (This alternative is
-    allowed only for noncommercial distribution and only if you
-    received the program in object code or executable form with such
-    an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it.  For an executable work, complete source
-code means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to
-control compilation and installation of the executable.  However, as a
-special exception, the source code distributed need not include
-anything that is normally distributed (in either source or binary
-form) with the major components (compiler, kernel, and so on) of the
-operating system on which the executable runs, unless that component
-itself accompanies the executable.
-
-If distribution of executable or object code is made by offering
-access to copy from a designated place, then offering equivalent
-access to copy the source code from the same place counts as
-distribution of the source code, even though third parties are not
-compelled to copy the source along with the object code.
-

-  4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License.  Any attempt
-otherwise to copy, modify, sublicense or distribute the Program is
-void, and will automatically terminate your rights under this License.
-However, parties who have received copies, or rights, from you under
-this License will not have their licenses terminated so long as such
-parties remain in full compliance.
-
-  5. You are not required to accept this License, since you have not
-signed it.  However, nothing else grants you permission to modify or
-distribute the Program or its derivative works.  These actions are
-prohibited by law if you do not accept this License.  Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Program or works based on it.
-
-  6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions.  You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties to
-this License.
-
-  7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Program at all.  For example, if a patent
-license would not permit royalty-free redistribution of the Program by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is
-implemented by public license practices.  Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
-  8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License
-may add an explicit geographical distribution limitation excluding
-those countries, so that distribution is permitted only in or among
-countries not thus excluded.  In such case, this License incorporates
-the limitation as if written in the body of this License.
-
-  9. The Free Software Foundation may publish revised and/or new versions
-of the General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-Each version is given a distinguishing version number.  If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and conditions
-either of that version or of any later version published by the Free
-Software Foundation.  If the Program does not specify a version number of
-this License, you may choose any version ever published by the Free Software
-Foundation.
-
-  10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the author
-to ask for permission.  For software which is copyrighted by the Free
-Software Foundation, write to the Free Software Foundation; we sometimes
-make exceptions for this.  Our decision will be guided by the two goals
-of preserving the free status of all derivatives of our free software and
-of promoting the sharing and reuse of software generally.
-
-			    NO WARRANTY
-
-  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
-FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
-OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
-PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
-OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
-TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
-PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
-REPAIR OR CORRECTION.
-
-  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
-REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
-INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
-OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
-TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
-YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
-PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGES.
diff --git a/DESCRIPTION b/DESCRIPTION
index 8c1f1aa..2be036c 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,29 +1,46 @@
 Package: Zelig
-Maintainer: James Honaker <zelig-zee at iq.harvard.edu>
+Maintainer: James Honaker <zelig.zee at gmail.com>
 License: GPL (>= 2)
 Title: Everyone's Statistical Software
-Author: Matt Owen <mowen at iq.harvard.edu>, Kosuke Imai
-    <kimai at Princeton.Edu>, Gary King <king at harvard.edu>, Olivia Lau
-    <olau at fas.harvard.edu>
-Description: Zelig is an easy-to-use program that can estimate, and
-    help interpret the results of, an enormous range of statistical
-    models. It literally is ``everyone's statistical software'' because
-    Zelig's simple unified framework incorporates everyone else's (R)
-    code. We also hope it will become ``everyone's statistical
-    software'' for applications and teaching, and so have designed
-    Zelig so that anyone can easily use it or add their programs to it.
-    Zelig also comes with infrastructure that facilitates the use of
-    any existing method, such as by allowing multiply imputed data for
-    any model, and mimicking the program Clarify (for Stata) that takes
-    the raw output of existing statistical procedures and translates
-    them into quantities of direct interest.
-Version: 4.2-1
-URL: http://gking.harvard.edu/zelig
-Date: 2013-09-12
-Depends: R (>= 2.14), boot, MASS, methods, sandwich
-Suggests: Amelia, mvtnorm, Formula, gee, survey, survival, systemfit,
-        MatchIt, MCMCpack, coda
-Packaged: 2013-09-20 23:15:59 UTC; jhonaker
+Author: Christine Choirat, James Honaker, Kosuke Imai, Gary King, Olivia Lau
+Description: A framework that brings together an abundance of common
+    statistical models found across packages into a unified interface, and
+    provides a common architecture for estimation and interpretation, as well
+    as bridging functions to absorb increasingly more models into the
+    collective library.  Zelig allows each individual package, for each
+    statistical model, to be accessed by a common uniformly structured call and
+    set of arguments.  Moreover, Zelig automates all the surrounding building
+    blocks of a statistical work-flow --procedures and algorithms that may be
+    essential to one user's application but which the original package
+    developer did not use in their own research and might not themselves
+    support. These include bootstrapping, jackknifing, and re-weighting of data.
+    In particular, Zelig automatically generates predicted and simulated
+    quantities of interest (such as relative risk ratios, average treatment
+    effects, first differences and predicted and expected values) to interpret
+    and visualize complex models.
+Version: 5.0-13
+Date: 2016-11-01
+Imports: sandwich, methods, MASS, survival, VGAM, jsonlite, AER, plyr,
+        dplyr (>= 0.3.0.2), quantreg, geepack, MCMCpack, coda, Amelia,
+        MatchIt, maxLik, survey
+Suggests: testthat, knitr
+Collate: 'utils.R' 'model-zelig.R' 'model-weibull.R' 'model-tobit.R'
+        'model-bayes.R' 'model-tobit-bayes.R' 'model-glm.R'
+        'model-gee.R' 'model-gamma.R' 'model-binchoice.R'
+        'model-logit.R' 'model-relogit.R' 'model-quantile.R'
+        'model-probit.R' 'model-binchoice-gee.R' 'model-probit-gee.R'
+        'model-probit-bayes.R' 'model-poisson.R' 'model-poisson-gee.R'
+        'model-poisson-bayes.R' 'model-oprobit-bayes.R'
+        'model-normal.R' 'model-normal-gee.R' 'model-normal-bayes.R'
+        'model-negbinom.R' 'model-mlogit-bayes.R' 'model-ls.R'
+        'model-lognorm.R' 'model-logit-gee.R' 'model-logit-bayes.R'
+        'model-gamma-gee.R' 'model-factor-bayes.R' 'model-exp.R'
+        'create-json.R' 'model-survey.R' 'model-binchoice-survey.R'
+        'model-gamma-survey.R' 'model-logit-survey.R'
+        'model-normal-survey.R' 'model-poisson-survey.R'
+        'model-probit-survey.R' 'model-timeseries.R' 'model-arima.R'
+        'model-ar.R' 'model-ma.R' 'plots.R' 'wrappers.R'
 NeedsCompilation: no
+Packaged: 2016-11-01 23:23:33 UTC; tercer
 Repository: CRAN
-Date/Publication: 2013-09-21 08:14:26
+Date/Publication: 2016-11-02 18:19:57
diff --git a/MD5 b/MD5
index ca28a44..2ca9286 100644
--- a/MD5
+++ b/MD5
@@ -1,436 +1,243 @@
-577c21becf5e9c56c5fe0e528be7424d *CHANGES
-263b03fca1747f78c6f7dc74e8d028b3 *COPYING
-edc38b6ac6225e48a65c07509990e71a *DESCRIPTION
-1ac2c7f3615bd20410c3c28653a88a97 *NAMESPACE
-bd30b95abbc5202460180cb71124ac1b *NEWS
-b087530253b66e2f221a18045b8b2797 *R/GetObject.R
-04b1a37fe9c0ea566ee393326e1b1e1d *R/GetSlot.R
-fec78b51d14f64c8fd150423b150ea6c *R/GetSlot.zelig.R
-dced6b0eac63aed97fb5a4dda097b21e *R/MCMChook.R
-9b6ed3cf40cf3c85d9f0027d16b2d0a2 *R/MLutils.R
-922284ab917d3c8020c260774dd310c6 *R/Zelig-package.R
-38ff46f6c8894a6edb307ab769c1df47 *R/as.dataframe.setx.R
-8c993d074f1cdb9f04ac32d7c2059cf0 *R/as.matrix.pooled.setx.R
-9fb636319620cbc85fafa03eb71c1564 *R/as.matrix.setx.R
-e0953e2dca569c7777227d6ca1c575a7 *R/as.parameters.R
-af7d00382f5752ed4d43da5987251fd3 *R/as.qi.R
-a069447d1fef9f59f5c5baa13868d4cc *R/as.summarized.R
-fce511662ec23766b6c8008f9487ea9b *R/as.summarized.list.R
-281ca3ae7ed98ff52d143c443d967cf9 *R/attach.env.R
-a8d5150931b2b16fe54ec095103384f9 *R/bootfn.default.R
-57a7b009cdd02bac27d8de7eec9b7579 *R/bootstrap.R
-f3baaa4332c8a63ee08391380c8610b6 *R/bootstrap.gamma.R
-b15d34bc142939e4187892e1f87aaa67 *R/bootstrap.negbinom.R
-1e627353517c14db57aaa418fbe47927 *R/bootstrap.normal.R
-18dcd166adb42d5394871285446aace1 *R/callToString.R
-bbb80a37728924859ec04c51e4012f8f *R/cluster.formula.R
-034ffe0bb6847bae195f0a4b4b622cfa *R/cmvglm.R
-605fd97789b918b1551ef1ba115c21d9 *R/common-methods.R
-3baba2387850a5790bed8a37791e5351 *R/describe.R
-25c13325bf3130035c20b7aae04e3cb4 *R/describe.default.R
-7725517c5014cb8bf5f61d158e90ec32 *R/describe.zelig.R
-86cd18c299b633531983e9b3c6d47c80 *R/description.R
-9dbbfc42a46347db9838de2c55255c45 *R/exp.R
-c2df466056cad5fcdb6b69094966df03 *R/factor.bayes.R
-122238465b2d50bc98c3472166c8083f *R/gamma.R
-d5b500d351bf6bf0504d5e0e0f91e8cb *R/gamma.gee.R
-e240ebf5f63cfea783259426b4439b8e *R/gamma.survey.R
-d4d37567c7a668594f16aa1fb2b87524 *R/get.package.R
-2d4d275c23474b79dd5a44433c2a1fe9 *R/getPredictorTerms.R
-2b0ecd0287f4a0261e715ee86d195e28 *R/getResponseTerms.R
-3e72a999f3dd2e3bab609a66cd0b7f6a *R/getResponseTerms.formula.R
-8c60d36664b0f5b65460c8038899aa02 *R/getResponseTerms.list.R
-23bebcebf65e49089e37367ab0d4d1e2 *R/help.zelig.R
-1a9c2fdc0277364a211a9b051a734ed9 *R/ignore.R
-da6bd58b3374044577642c068d6bd45f *R/is.formula.R
-5f1230fa11c859fc902f2c026521a20e *R/list.depth.R
-884b01095b353823da0ecc4f9ec0e803 *R/logit.R
-390532016c7c3259b80f261771917c72 *R/logit.bayes.R
-d8f4ccc940d87ceeca7a9d49872d8147 *R/logit.gee.R
-8afab5bb4e93510fa4d2889813314109 *R/logit.survey.R
-7fb40166a123fdfcc57f95d53626e1df *R/lognorm.R
-a392f5cc85177a05e18aebf1411c3e85 *R/ls.R
-1be0c5070cd5bbeae803a4e4c0ea9782 *R/make.parameters.R
-722f70b4ab267ed63c12775d26d97210 *R/makeModelMatrix.R
-f80e2219a6824b74d020fa3d73c5bcf2 *R/mi.R
-c6dba740ff5b5b45b10d08ee84f02a81 *R/mlogit.bayes.R
-47a223326c0d43f7682ee7ee09c11a8c *R/model.frame.multiple.R
-057b9fe88d7a95248d344659c747c741 *R/model.matrix.multiple.R
-e335ed7e0bf3ce1218bfa659edabfd98 *R/model.matrix.parseFormula.R
-e3e1629fa2bdd28a9a4606501723437e *R/model.warnings.R
-e8eeed1cda2ed6af7b4513f171ee99cd *R/multi.dataset.R
-a2338a7f2a569d976a7431c516a55ce4 *R/multipleUtil.R
-d41d8cd98f00b204e9800998ecf8427e *R/names.relogit.R
-1dc7173a8065deed986ce0d102afc4d2 *R/negbinom.R
-bde1114a0a7f7dd4686d69fd6a66fc6f *R/normal.R
-c9ea43bfb803fdf16569fa19bb23bd9e *R/normal.bayes.R
-be1a168ba1761809c8e7c3e2fa45a662 *R/normal.gee.R
-c532712f4284ce882a4fb8ef708a2161 *R/normal.survey.R
-b419598efacb4f6db0a75c41387cb3bc *R/oprobit.bayes.R
-5b8d60db6ebb46faf99d0ace8d8cdee1 *R/param.R
-246f95bbf2338c3d83b3f573e3d74613 *R/parameters.R
-4256c2a803299e053aef257d076cae8a *R/parse.formula.R
-80ca8d82671353a2c58694af079fd50a *R/parseFormula.R
-dd2d201c03933dcd464fc7eb0ca33b50 *R/plots.R
-995511490db5fd0770d8bf1f797e337a *R/poisson.R
-06f7988988376b2fcbe80d550b41546b *R/poisson.bayes.R
-d365262df07215b227411820d0a7b6c6 *R/poisson.gee.R
-490a754a436a5aea141d57b6028e84e1 *R/poisson.survey.R
-88cad5ed1eeb242eeea3ad46118a4e43 *R/print.R
-91e476019d62c0d863dbf20f0d25774a *R/probit.R
-0810aa237d64f6108655e2ce6cc96dbe *R/probit.bayes.R
-b99bcb6128cee77d276e7842835f6cce *R/probit.gee.R
-9146c4b6f8823d638fbd9edfd6d49359 *R/probit.survey.R
-94221b6f88ee82df95fd82560b91cf74 *R/qi.R
-457528566b2e4e2d4555fe6d0763ac71 *R/qi.summarized.R
-9d6a844d7dd8e981c54873c6d9e73b1f *R/relogit.R
-2e86f2447930d4d4c8bec285e5d99fdd *R/repl.R
-8c9d7d6f667dbd1685020090536e8472 *R/robust.glm.hook.R
-b5aa8503dcb438182d3c4abe2116cf66 *R/robust.hook.R
-90a390fe2a8e84fb283a73648a6c5faf *R/setx.R
-1d0ec64c99e67ef82349ad39ed1bb5f6 *R/sim.MI.R
-07784710c5dc5b989f320184ddfa70fc *R/sim.R
-c271f5826afc0d1905363a718becfe83 *R/sim.default.R
-273efcd0bd95588cb76835a799f913c8 *R/simulation.matrix.R
-530a23c14badb456da3f26cd18574810 *R/simulations.plot.R
-13d14dc0c552b55857c56d10ba43cb3a *R/summarize.R
-9545402c696aa3c3c8328bc539e90f37 *R/summary.R
-5f82bb886519200bfb8ef9c6b7e2c3b2 *R/t.setx.R
-26a1ddd198da5b23b007bd5154add412 *R/terms.R
-aa36e193d4ac3b6a77eaf39d36c6b109 *R/termsFromFormula.R
-60a72741864b42dc622f729d7f103235 *R/tobit.R
-a63434f6d1e10509df1a2a2d916d68ac *R/twosls.R
-ad6237fff8d120414edbb13e090eb5b2 *R/user.prompt.R
-b1547c95d94d113b0b2b759c0aa52222 *R/vcov.R
-6b22678c08fed5bb69c63a89018c8e49 *R/z.R
-7f404f13a7309f2edabeb2c5f6ab2cbf *R/zelig.R
-a6e8f15dbebb2be11fe21a697e7fe79d *R/zelig.skeleton.R
-0b2980cf9cc2abfd8d935a4847d4c2f3 *R/zelig2.R
-778221f28a111f964ae64f328edb59e8 *R/zeligBuildWeights.R
-bb500fd793bba24dd5794f4da6401132 *R/zzz.R
-5dde81c7d95c4d5ee7c2d6112a56ed45 *README
-3cc5f0b138fc2b0b28111f49c37e09ac *build/vignette.rds
+2eec4c44bb7644239e0318f840972f97 *DESCRIPTION
+5342f221cbe89af530fac3f4edd2771c *NAMESPACE
+051a11b0783a6a6a0f4c57abd50259a0 *R/create-json.R
+c7d9a1d3b894bb729a56108024f4d594 *R/model-ar.R
+046159268b1fc4b3a6e6e8ddab697494 *R/model-arima.R
+039b9b18940b3c69c2981b73bae6d86b *R/model-bayes.R
+de52ae95a161be627e87bb004698c03a *R/model-binchoice-gee.R
+b67bd8ffcdc291f632417c95ad0b9fc5 *R/model-binchoice-survey.R
+78abc68d47e75e693a9091758d23c096 *R/model-binchoice.R
+3ec696aafb31df38f674049420224ba8 *R/model-exp.R
+987d6967515a7946987fa766600a2136 *R/model-factor-bayes.R
+af8917e305af4d0380e737282cf3b0df *R/model-gamma-gee.R
+77295b183a96a5ca254f363c93ae572c *R/model-gamma-survey.R
+a9dcf39bb6a5876ff891df7cfb06bc7c *R/model-gamma.R
+2981c79836f75d5d118cdcc379692ad5 *R/model-gee.R
+2eee0bff4baac2416469135cfa4a1b87 *R/model-glm.R
+7959f3e4b3fac18420ea9e0697dde0b5 *R/model-logit-bayes.R
+7947bfaedb645ceb97144ef53bf4fc6e *R/model-logit-gee.R
+2d189df09e83f0a35aeffbff7828019d *R/model-logit-survey.R
+aabe0bc5fb82e512b23a101beed6a746 *R/model-logit.R
+c3b88bc2cb85d3f5ab12f6859787cc36 *R/model-lognorm.R
+210d657a1ffd8629feb09c2745130b42 *R/model-ls.R
+09ae801ecd181125085bdbdb190bc9a0 *R/model-ma.R
+42a3a8a5bbd51d836b97857bc35c1512 *R/model-mlogit-bayes.R
+a83bed106c1331f09b75053458121147 *R/model-negbinom.R
+3296b6dcfa3cb1b0899b5c83b50796f7 *R/model-normal-bayes.R
+e9ee642bad0c1dad28641e8447434a97 *R/model-normal-gee.R
+8690617c3e011a819c43bf3a095c3cc4 *R/model-normal-survey.R
+0508c3ba1a20417c91be465fe0c31c3a *R/model-normal.R
+b32462f4cc48c0cf841e5d601f8e165f *R/model-oprobit-bayes.R
+1d457854b8618d9de25368903ce41dca *R/model-poisson-bayes.R
+73e78da3c259ed29e1eec24dded08c53 *R/model-poisson-gee.R
+bf96a97c7a0d17e2e6415a06de2a764d *R/model-poisson-survey.R
+c0fcf0ea5c57c9bea82d3a10bde49364 *R/model-poisson.R
+5cbe34319e7b90b17f6deea444b402d7 *R/model-probit-bayes.R
+4346f6383f3d4c088feda4a76ae600b6 *R/model-probit-gee.R
+f82b5d81e7d1eecb48dc9653c850fa37 *R/model-probit-survey.R
+f1f960b401716365e6384e5f59a8ad85 *R/model-probit.R
+321b016f7bc09ebfedc73d2b0e1591df *R/model-quantile.R
+d9758a77725eef823f3f729283f06faf *R/model-relogit.R
+43bfd65e7f664f57b5236c6fc2e76a63 *R/model-survey.R
+b0696658a8de3573066d890eebf75898 *R/model-timeseries.R
+05afb39e5194a5254e932f511155c327 *R/model-tobit-bayes.R
+2fb75077534b3895dde170227320a9c4 *R/model-tobit.R
+e9bd975aaf78acee9ad3db35b7706f4d *R/model-weibull.R
+7e78e6542e26dcf540fa5ed944219481 *R/model-zelig.R
+646c500af1a734d8304d7bcb2689e930 *R/plots.R
+b9d05ac25338f2a01f6e644ffb4274d5 *R/utils.R
+7bd01d83a73c55cd11df638525488274 *R/wrappers.R
 d8568ae5389248b670f8c744a6393bc5 *data/MatchIt.url.tab.gz
-42bfcc353eae9f5f6903d5018fd21f17 *data/PErisk.txt.bz2
-118d8ee31df10e8303e70d33299afde0 *data/SupremeCourt.txt.gz
+a6f9d73b7928a4b1b3098db3f47e5daa *data/PErisk.txt.gz
+92a23a476e24f1cd6d24d0da91400dfd *data/SupremeCourt.txt.gz
 b7e99eba34328eb8666a65215d295aec *data/Weimar.txt.gz
-2b28913437fe43e4a443c2135967db7a *data/Zelig.url.tab.gz
-563377d20b9e0aaf15d021fe889495b1 *data/approval.tab.bz2
-9117157e9719f41f4d6a2d0687717ed6 *data/bivariate.tab.bz2
-40b068ce0210035c9a2a0f7c7cd70f6e *data/coalition.tab.xz
-03b4d236774f27b217f9c8a63eecbfcd *data/coalition2.txt.xz
+10c152956b65fb8dd9ec77e5b7e292f0 *data/Zelig.url.tab.gz
+0032352d73cb7588e5380d280032f3f0 *data/approval.tab.gz
+55d9d1a669d8be91391c84f3fa043d73 *data/bivariate.tab.gz
+9d8e26c166e0e37c0db973a83101a6b9 *data/coalition.tab.gz
+7f751eba795fe4a5fc05fa59db936639 *data/coalition2.txt.gz
 809c9dc00afa3a9a2fac98c6a5beb07a *data/eidat.txt.gz
-9d604cbab664c61aecb2029b22e1ff09 *data/free1.tab.bz2
-9d604cbab664c61aecb2029b22e1ff09 *data/free2.tab.bz2
-e6d3d10b9f34dc691c29b473ce907117 *data/friendship.RData
-66063f43a7ab713fe9902234fff20336 *data/grunfeld.txt.gz
+d96e13fe15af9acc7acfe60c5ed49202 *data/free1.tab.gz
+d96e13fe15af9acc7acfe60c5ed49202 *data/free2.tab.gz
+f7e30143b828d9579a885df15166c437 *data/friendship.RData
+ef951783ffa4e6d1c30ba0ae5d826f95 *data/grunfeld.txt.gz
 350bdb7fcd6af5111de558dc4d95dbdc *data/hoff.tab.gz
-21e5c0751ad485d6f448f45b327f8296 *data/homerun.txt.xz
-5b1c5c74480e42e2f30dd4073339c996 *data/immi1.tab.bz2
-21ff00f1c79aee15b3f1d5b8bcbd2ee8 *data/immi2.tab.bz2
-8ef232b45f22946cded1429acb6214a6 *data/immi3.tab.bz2
-75769e27e13bd6ac0332421b54f5ea20 *data/immi4.tab.bz2
-816d147c1d03cb1c975f40616129339d *data/immi5.tab.bz2
-a22a6142a8adf930c06bd35bdaea3a3e *data/immigration.tab.bz2
+96667c7fa64956e37c98d27da28d6323 *data/homerun.txt.gz
+4b5a0ad83503b53d3938ef096f0b49dc *data/immi1.tab.gz
+ad3aeedcfc3efaf07b97eed891eb54a4 *data/immi2.tab.gz
+20e7a626848c89890dd244a1f5f5fe3c *data/immi3.tab.gz
+c9e7da59ab5939e3ab3a1b13997c6066 *data/immi4.tab.gz
+c78cb1b6027462372e6554ca9347ec02 *data/immi5.tab.gz
+95877625cd68d0528e0e82c44991539c *data/immigration.tab.gz
 758ac52b426648bfdfa6cb5890525322 *data/klein.txt.gz
 4b90f1abe69813998c0e883ea50d8d1d *data/kmenta.txt.gz
-d9c7b186a14fecbe1de9de9a92678a07 *data/macro.tab.xz
-d4d8ae34bc9283fb37eed1e4823b7530 *data/mexico.tab.bz2
-f0f226b3b844530f36498e42c75b1703 *data/mid.tab.bz2
-c10afea1fb3a11e9aa8b6c5717bc6e2f *data/newpainters.txt.bz2
-839ca4b77a23441da065a7db969b8882 *data/sanction.tab.bz2
-a263796f7d1d4dacd9104088e923cb29 *data/sna.ex.RData
-bc8dca44e8c9f5b07da91bc0a10cb26a *data/swiss.txt.bz2
+a40a04e03f5a6b7c6fb5eb2df4e114e8 *data/macro.tab.gz
+80de03b905bf13c6a8f6fc0f4656dc84 *data/mexico.tab.gz
+b533bad8842a7e90edec8e48fae4344f *data/mid.tab.gz
+95428f80b455ff968eaa1f1664de79b7 *data/newpainters.txt.gz
+021b1ecd5eb60a3473ae630319248695 *data/sanction.tab.gz
+4056a7cc6e8f06f472496c0304828584 *data/seatshare.rda
+336854cdacb726631f77466eb046efd7 *data/sna.ex.RData
+e68f058f062c39262205a8284c04322f *data/swiss.txt.gz
 6ac34a147ed09bb7cbc393d35382cd85 *data/tobin.txt.gz
-b7ffde3c50f11549db21d920a600605d *data/turnout.tab.bz2
-6346b190eb5375586fbbb87cc8926124 *data/voteincome.txt.bz2
-b3278389e0d96f8465a1aa1ee5cdb070 *demo/00Index
-3b0efd341facce5bf27391cfb0e10d79 *demo/Zelig.HelloWorld.R
-6c5015399f7e0b91ac107fcbd790ce33 *demo/exp.R
-dbc45d1cfb23ef9e0741e13a985aae74 *demo/factor.bayes.R
-9a1be8041e1ec3b22b6ebfb641abf783 *demo/gamma.R
-5b05bfcdc9d10a518c4ede23e2c44400 *demo/gamma.gee.R
-c3be56905783df81f90f20f61c5de12c *demo/gamma.survey.R
-4507c6194f692d249751fc798c8d08cf *demo/logit.R
-81c44823cfeccee63ed0614756972e06 *demo/logit.bayes.R
-47e45df8683896d5da1fda6e536e2a7f *demo/logit.gee.R
-49456e875d48127913cbb4965e3b5f9c *demo/logit.survey.R
-588688ad2bf33680472aca01c1928149 *demo/lognorm.R
-483314f54b7b8a8414a6c7c070952ce9 *demo/ls.R
-34a1e173102a8e3580402fd97af5f516 *demo/mi.R
-f5861bcbf70c2d7c2c70d81d86d39af1 *demo/mlogit.bayes.R
-7db69d65a9c2c46773eeb7a4b1f3a9de *demo/negbinom.R
-14c8b85bca57a7cb1451bc3dfb10e3fa *demo/normal.R
-495061a7c0b681610c99c1b0dcc28fdd *demo/normal.bayes.R
-4384f1f9cac3718c31713dcb589c1b4d *demo/normal.gee.R
-445550b68a8fa873ae2e3221921dac62 *demo/normal.survey.R
-ab9b753894065112812022419b4194e2 *demo/oprobit.bayes.R
-dd0649acf889bb4d92ff4a9ac0f1f94c *demo/poisson.R
-4f8d7638e1a58166b74ba97c4b78753f *demo/poisson.bayes.R
-637f75deedcde3780f381c550cb0007f *demo/poisson.gee.R
-c92a8fead665a2caa59d7c1dce82c0c5 *demo/poisson.survey.R
-c87c01971457e57842e3052a22ad607b *demo/probit.R
-0e63f5a84bc11793b58e3c10b366081b *demo/probit.bayes.R
-3063e1bb3f9c31ea1cc0cffd4d93d075 *demo/probit.gee.R
-1b1c5bd37ec746d04910dafd9e9d67e8 *demo/probit.survey.R
-7e02b50c97835c7f3a20fc51b10a5fbf *demo/relogit.R
-fb3e86404eb48271f47eb48c0dfe0db5 *demo/twosls.R
-aaf49ebca2bd4abd8b90a152da0f9b0e *demo/vertci.R
-0d0d1c14fbef07ec707ccbeaa1806efc *inst/doc/gamma.pdf
-167daeb0529d31f16b9f352c12736e5b *inst/doc/logit.pdf
-91030650a8d82f3f59d01187c630aef7 *inst/doc/ls.pdf
-ea1a15763d2d9cb490f624942e823784 *inst/doc/manual-bayes.R
-f654c2c81608dea5d6d611522454682e *inst/doc/manual-bayes.pdf
-d6e77d18e7c793cddb4305ac51cca002 *inst/doc/manual-gee.R
-768766facf9002600b590c5c2cbfa145 *inst/doc/manual-gee.pdf
-d1227d16d2cbd838ef3bac6641568c1b *inst/doc/manual.R
-b7d5c2e9d4861d5a31b564235eecc857 *inst/doc/manual.pdf
-b110ff07eda2be42ed1430efcb0c0051 *inst/doc/negbinom.pdf
-995acc30b4cf84e3555c850a6d2f677a *inst/doc/normal.pdf
-33df7df4e1113c852bffda80a0529056 *inst/doc/parse.formula.pdf
-a235b51b2668e7905bb97fca42a0d879 *inst/doc/poisson.pdf
-d8439c38a20251d17224b157326b7d8a *inst/doc/probit.pdf
-024a6c851932cfcf47054a89696cf5fd *inst/doc/twosls.R
-604b2e6ff000dc5b0d697181b59c3a21 *inst/doc/twosls.pdf
-bbc5b26d487010691036f3a2626e03c5 *inst/templates/DESCRIPTION
-1f675b08283031c5ed15607ae39eb3b8 *inst/templates/PACKAGE.R
-d41d8cd98f00b204e9800998ecf8427e *inst/templates/ZELIG.README
-fc182c4100f4789333fd2dd78bf7f92c *inst/templates/describe.R
-f17c6c109624641719be76a9e5ba5ede *inst/templates/param.R
-2aed8671075ebf1457a96da96879b28e *inst/templates/qi.R
-530c754f2afa6b440664b9b2cc040c75 *inst/templates/zelig2.R
-c9bd6c94c6336ebd9598acec18f97bc0 *man/GetObject.Rd
-4a74d5cdef2fbd4d6bfe97305cceac6c *man/GetSlot.Rd
-453e3d46105fc8199620512e0b6c4e82 *man/GetSlot.zelig.Rd
-784832952993d1e953cef1faf1335554 *man/MCMChook.Rd
+e479597b35b8b696e886e90a37946a5a *data/turnout.tab.gz
+4ee261cada9146f1cabf2df5066c2e24 *data/voteincome.txt.gz
+e525746279fd027400ee48d568771f03 *demo/00Index
+a9fad624825d526e5067e2409df97f85 *demo/demo-amelia.R
+c611996fadb7b4ed335bba95d05fda9e *demo/demo-data-table.R
+e0c8634e970b08cc9dc653051fe51110 *demo/demo-dplyr.R
+0d91b8ce900b4687ae8ca019b39cbaa9 *demo/demo-exp.R
+f0eb3a9a910994a06440cc3e8a79bde8 *demo/demo-factor-bayes.R
+0192fa39a7f468a12aaf6acf01062b29 *demo/demo-feedback.R
+1551ee39a2f804d0bbc6568a21d64e79 *demo/demo-gamma-gee.R
+e2d6e270d837a692cbcb6171fcd97f22 *demo/demo-gamma.R
+f264e4daf97834cefda3c6222874fbff *demo/demo-json.R
+2b53a708e3f5e7a7d0965e52be7b3c30 *demo/demo-logit-bayes.R
+3e5489eb432cace8e8ccdb8df294d99d *demo/demo-logit-gee.R
+f6f23c6ab9fc662bf8fcf14b08d91c5b *demo/demo-logit.R
+f0ad9a6e90ed6438cf0d210c54e124f4 *demo/demo-lognorm.R
+68cb46b64b0d4c5e4c28c2b60bfcc1ce *demo/demo-ls.R
+3d8bfcba47412887f66f144e9f675e22 *demo/demo-mlogit-bayes.R
+b060b17612c33e334c8ad63ff6d3e63b *demo/demo-negbinom.R
+d89fb97556e1a693aa43859b87cd4da0 *demo/demo-normal-bayes.R
+152364fd7fe4b58adabbbcf06e3390d9 *demo/demo-normal-gee.R
+7d8a0771c82765390f352178ca7d0f74 *demo/demo-normal.R
+ea0602add67e89226ae6fb683b660515 *demo/demo-oprobit-bayes.R
+41ce2668b451f5c2f66a566cb746ee39 *demo/demo-poisson-bayes.R
+aa03078575a1657c2eb076c0dfe2261e *demo/demo-poisson-gee.R
+70f78812f7bcfdbd5a8b8d22617c3961 *demo/demo-poisson.R
+20239e8aa9838180e06fd5ddaa1db2b4 *demo/demo-probit-bayes.R
+a57496c5d2bcc0baa81b393a7c63a73d *demo/demo-probit-gee.R
+b35646cfb2c447f91e46d9d5c83de9a2 *demo/demo-probit.R
+4f2dc0f4895f90aea438ceac3cee848b *demo/demo-quantile.R
+736f7a21886c5bbcf57db40b7dc5731d *demo/demo-range.R
+478ee8ed557f62591896b870f66da4e6 *demo/demo-relogit.R
+684ba2bc133f88bfaa6abfce5d0ef768 *demo/demo-roc.R
+4d1fc7a299e0167dbb838ac37afe9af1 *demo/demo-scope.R
+7248c3b2455d37e95d1aaf195e32a11a *demo/demo-setx-2.R
+c24ecf5eeb027bfaa9282eb4a8d786b9 *demo/demo-setx-fn.R
+843fafd67c3b006d54fcc8b808025f17 *demo/demo-setx.R
+faa230de3f733f092b0e164989bfbb9f *demo/demo-show.R
+8d17688b76ea32f350d8ace43b6f7e5c *demo/demo-signif-stars.R
+285465e8ed14733e60bc31921ff9866c *demo/demo-strata.R
+91941c2d2fb88ba97c6ab57c81997d87 *demo/demo-tobit.R
+153bd3beedc99704080d3d7318b4e061 *demo/demo-tobitbayes.R
+2295e2c8c2c417d743fe0fec7fdeb29a *demo/demo-weibull.R
+802e49193f38523d4b2c53042eee31b6 *demo/demo-wrappers.R
+705ed5d9b2ba6b974c8fc62b2821d784 *demo/demo-zip.R
+1991a163805286e252e97ca46aaa36df *inst/CITATION
+527a6a05ae881a3a54cba0522a4212df *inst/JSON/zelig5models.json
 6dffb5b20df0d6fa72529763c7f62a27 *man/MatchIt.url.Rd
-0c3084f4758abddde8685ff64c411db2 *man/Max.Rd
-f5e18a14c6b0d61846a0121daafb4b7c *man/McmcHookFactor.Rd
-3d559d57f5f1960561ab873c81549f89 *man/Median.Rd
-e356125658c18d9ce162080fc443e79c *man/Min.Rd
-9ad339b46e6e3653d3ee823eea2830d7 *man/Mode.Rd
+1b3aaacdbe0696c2819ef607d8b13201 *man/Median.Rd
+6fa194c4d41f831d93268af800d29e71 *man/Mode.Rd
 0641d8ba40e205316b2d2fbe0fb5eaf5 *man/PErisk.Rd
 58172f8c13fe9864a8ac2e26fbd391de *man/SupremeCourt.Rd
-5a035cf4a11e64ae120b7dc2f9700008 *man/TexCite.Rd
 fe15364db9b4e80c56722eec238629e7 *man/Weimar.Rd
-643e1e6e1be099a7018af34911cd710f *man/Zelig-package.Rd
+a57fc5867c8901359d2575a25a4cb201 *man/Zelig-ar-class.Rd
+c17aa3158358198923ec2f73cc016e00 *man/Zelig-arima-class.Rd
+9fedab8989602e846756b54f0f63b3fb *man/Zelig-bayes-class.Rd
+f00cb4b7c0b1269a0bcf3fcda1ff6c9b *man/Zelig-binchoice-class.Rd
+adcc9bafd98950ad536552665d2427ac *man/Zelig-binchoice-gee-class.Rd
+c2b68fcdf0e3ec8ce12a63f8976fde74 *man/Zelig-binchoice-survey-class.Rd
+e39cad5afe0e83f67853143be8995719 *man/Zelig-class.Rd
+4efd286f6d5304a753b4b0e0a77735fc *man/Zelig-exp-class.Rd
+5d358e886b8eae38c565296dfeb99dd0 *man/Zelig-factor-bayes-class.Rd
+c0ff271634bcdfd4fb203c40195239dd *man/Zelig-gamma-class.Rd
+946ac982c28539b0cc912d09b614f8b6 *man/Zelig-gamma-gee-class.Rd
+baa39da194a3d3703424884cd58f4870 *man/Zelig-gamma-survey-class.Rd
+f66f262a52a512f898d804921edc0f27 *man/Zelig-gee-class.Rd
+2eb0a064dd90091b3683f9823b82032e *man/Zelig-glm-class.Rd
+e2a6a7c531f2b990df37d8e07d855070 *man/Zelig-logit-bayes-class.Rd
+8518010b27a4d91fb4e08d0db8a97245 *man/Zelig-logit-class.Rd
+dfaca9363822a44ec9549fcf84633221 *man/Zelig-logit-gee-class.Rd
+94376fc321915dbe987afd4044132916 *man/Zelig-logit-survey-class.Rd
+31ba0d6618b6af7cd6ec179e4a8246e2 *man/Zelig-lognorm-class.Rd
+86a0159b33472bdb154ade3022c0876e *man/Zelig-ls-class.Rd
+ebe48d4ab399082e8b4c577987eb1288 *man/Zelig-ma-class.Rd
+30ac5351a32b847943b2396f7df4bf28 *man/Zelig-mlogit-bayes-class.Rd
+b3b6411a495e90973b0b8b324b1b393f *man/Zelig-negbin-class.Rd
+264a5d9e3b4c5b065f95e4adc9294102 *man/Zelig-normal-bayes-class.Rd
+bb7310b9852c4d2c7df880174fd627b3 *man/Zelig-normal-class.Rd
+4632d73d4dc32935513fa899d9f68650 *man/Zelig-normal-gee-class.Rd
+0f36f746d424303d16ae35232cbba5c0 *man/Zelig-normal-survey-class.Rd
+01008820ae4b088af4be82c6d5742001 *man/Zelig-oprobit-bayes-class.Rd
+58f615c5f56e6e947f296b0f44921750 *man/Zelig-poisson-bayes-class.Rd
+d88505525b77cd3df00833fe9e013d0b *man/Zelig-poisson-class.Rd
+eb602bc7c8db8c8abc5c2bb173d005ec *man/Zelig-poisson-gee-class.Rd
+1482435b0204be37d4940bccd6ed7ee3 *man/Zelig-poisson-survey-class.Rd
+2e09c43c4dd85ba41e762d2a46a39794 *man/Zelig-probit-bayes-class.Rd
+a406fafcf4da6a6541912c5f17a71533 *man/Zelig-probit-class.Rd
+7b11c18d278cabad05c83c922832a3e4 *man/Zelig-probit-gee-class.Rd
+8cdd47ed59486ccee9938a6b16594c70 *man/Zelig-probit-survey-class.Rd
+740d614e76dc353c792bf035262ad17b *man/Zelig-quantile-class.Rd
+fd22d33254e8cd3a29aef34684bfdb3b *man/Zelig-relogit-class.Rd
+34081b184244f98fe869dc25860dd3b5 *man/Zelig-survey-class.Rd
+2a84e12cc3707485041f7e39acdfbc27 *man/Zelig-timeseries-class.Rd
+013ea0128c082056761dbfca462a9e35 *man/Zelig-tobit-bayes-class.Rd
+263884e6e68641f86c4be6b4ba2c9695 *man/Zelig-tobit-class.Rd
+b0629bf43b9a3e9618c9a84c5b274358 *man/Zelig-weibull-class.Rd
 8ded77c2eb2977abe80918da28c0782a *man/Zelig.url.Rd
-233f3a62ca4cd15cbd9bcfa16a979881 *man/ZeligDescribeModel.Rd
-aead67c0c6da91ab1d7f19af2824d037 *man/ZeligListModels.Rd
-6fa7bfa9d92779c30481d6f113bde54a *man/ZeligListTitles.Rd
-e1a3a7386d920fa043fb322abe8756fe *man/alpha.Rd
 7e5422c7821d99df3cd21a9e789c5cb6 *man/approval.Rd
-b0e49b8c8af1a58c1ffec7f9c5fb85da *man/as.bootlist.Rd
-98c5b6a6e86079e6dc3a840d6294ed3f *man/as.bootvector.Rd
-c6197a492a799f5028bbfaffeae74cff *man/as.data.frame.setx.Rd
-cb25845ba9fdb44eef75f01afc59305e *man/as.description.Rd
-68fed7a45cb4432bdf9f66da04d5d7b6 *man/as.description.description.Rd
-ea835225f64f0b57d3ff642d5883d119 *man/as.description.list.Rd
-738db6158fd3beabb5ced5403beb98ba *man/as.matrix.pooled.setx.Rd
-851f39fc7c78240e2e69df1d737da0a8 *man/as.matrix.setx.Rd
-10958eb02ef264ad5a639432294b848c *man/as.parameters.Rd
-b6d87e40368e8f26413112fe647d3b4f *man/as.parameters.default.Rd
-2f5a002c1dc83ddc03c1bfae46fab8e8 *man/as.parameters.list.Rd
-c6d321f1daca4d0a333c8e590b8c4d36 *man/as.parameters.parameters.Rd
-b45437817da882d869e3017a3ccefc3a *man/as.qi.Rd
-10f4fcd3618643b9339a8288b4ad1406 *man/as.qi.default.Rd
-b64e11467fc8952a22b1cc95d1601f10 *man/as.qi.list.Rd
-5d9f612735bf8a60c2c2f49f736d5217 *man/as.qi.qi.Rd
-3cf9ae08fd13f68ebf7c0efaafe31365 *man/as.summarized.Rd
-862925d5cde1fc83b59f74a0752668d6 *man/as.summarized.list.Rd
-2c47c7167bc70c1fcf7d8b96a2d2b0f9 *man/as.summarized.summarized.qi.Rd
-6828b0d881bc787ab5d08665770916ec *man/attach.env.Rd
+92968bbf5ff34c74ccee6083b0f259e0 *man/avg.Rd
 83d85754bfcbadc14cfe8dc538007d0b *man/bivariate.Rd
-41d681b024e1156e65dbf19ef732b68d *man/bootfn.default.Rd
-45ab871f55572cfe62b1e5954a2460a8 *man/bootstrap.Rd
-4c937f3a46fa2c4cd17636a6629cf107 *man/bootstrap.default.Rd
-06b3b50467d814f0232240357c683547 *man/bootstrap.gamma.Rd
-42617ae2cf1b45be1c70f2657db9a934 *man/bootstrap.negbinom.Rd
-70d4bf51840d417a42756b30be007553 *man/bootstrap.normal.Rd
-524cb5ea071b054abed5c4d4958c06dd *man/callToString.Rd
-5a0f6a763f1b4e93bfb0c3675cf1f5f4 *man/cite.Rd
-294f05247a62c832331330d9263fcee7 *man/cluster.formula.Rd
-f57c88e9649b4188a10635c6833bc33c *man/cmvglm.Rd
+1b0dafcdc7958d53148aa17fe2fdf0f7 *man/ci.plot.Rd
+6d39b5a3abbd52c9e9dae565de6ed13f *man/cluster.formula.Rd
 3b01d1373c2b9f311a70f150f8a3e7cf *man/coalition.Rd
 d9588301df675d5e63882097e8130ea2 *man/coalition2.Rd
-6b1d516559cd05f32dc64b14a40ff321 *man/coef.parameters.Rd
-f60d8fa916719234a99bcfc58fa44977 *man/combine.Rd
-ba3a632f7ec6a5f903ebfd1465448cb7 *man/constructDataFrame.Rd
-55a88929afcdbc4d351ea8500bc795ec *man/constructDesignMatrix.Rd
-4b09bd9ab242c0b67e5626e0b7d32da2 *man/depends.on.zelig.Rd
-98315ff01f7c1ecd2ad1c7cc96ebea1d *man/describe.Rd
-34336df2b30c26705d57728707ef48fd *man/describe.default.Rd
-637599adac074b6ceb1e63711e39e7ac *man/describe.exp.Rd
-4d646e38b5d6d52b162fffd1ef152c9a *man/describe.gamma.Rd
-f142f11f4df7402bcfd27882facb9465 *man/describe.logit.Rd
-8681bc0f95fbf3cc724fe45a3888f12c *man/describe.ls.Rd
-a3d647c361183857fdab12c3465b2b2e *man/describe.negbinom.Rd
-120b7375c8097e1cf5b8daf24aaeb164 *man/describe.normal.Rd
-39e04467b04c947a7647acf3283f2a40 *man/describe.poisson.Rd
-644d8e676e7735a8043189b78a70523c *man/describe.probit.Rd
-16d54cde09a2ada7394b2c02435b1287 *man/describe.tobit.Rd
-87c6fd1b4f212d2592c112e0682f8590 *man/describe.zelig.Rd
-6f08d366da6bc44fdd951a705e8115f1 *man/description.Rd
+f181a5bdf146e9408f9a2c062933aec4 *man/coef-Zelig-method.Rd
+2483f73d340a18257e80b39ac0569429 *man/createJSON.Rd
 11ad69ed866d12262602fc3b786091d4 *man/eidat.Rd
-73a2f7194321c4edeb5d69c878c37167 *man/find.match.Rd
+beeec8603d8ac0afa41d9d94fb81aa08 *man/fitted-Zelig-method.Rd
 d8e4df6b181afc5e10fee0d774850c90 *man/free1.Rd
 788c8364b3a5ff56275ed6f1de9b7790 *man/free2.Rd
 9308b489e0e1fa78db5aa3a0267058c1 *man/friendship.Rd
-3c0993ec2cccedfa85723963fd153611 *man/get.package.Rd
-2d06d33e4f904368f053bb261437a862 *man/getPredictorTerms.Rd
-465423b551f5a638a20bd99550f3c157 *man/getResponseTerms.Formula-not-formula.Rd
-221b400f09d18267827cc6d864d81f5e *man/getResponseTerms.Rd
-cbc0c02ce6572fc96d8d2c8713baed62 *man/getResponseTerms.formula.Rd
-9becd5adc4ce12ee099cdfbb41a87712 *man/getResponseTerms.list.Rd
 1f77e073ad9ed30b57064d284fe0f2a6 *man/grunfeld.Rd
-face801c31d1dc268b6289a1ea5aa8c0 *man/has.zelig2.Rd
-e9d755c538423b59f86ae485fd9f615f *man/help.zelig.Rd
 2c288905c76033d24c8f7f57b0d65d40 *man/hoff.Rd
-5f0c67b452fcfdfb90eb29a5d8ed1097 *man/homerun.Rd
-065cad3e06bc5b280ad685677abb0d74 *man/ignore.Rd
+83362d717a9795a33d0e11c8d90ad7cc *man/homerun.Rd
 20131069ca801fde8baa331de4b7657e *man/immigration.Rd
-bd950ad3a6dd8c54ad6655872c7dfb69 *man/is.formula.Rd
-338f8a5835bea2f84b7fa6dcf0af657e *man/is.qi.Rd
-b681dcd3ebf33d9c5ceeb51ef40c826f *man/is.valid.qi.list.Rd
-4b386091dbdb2f05991417274ba37d1f *man/is.zelig.compliant.Rd
-c7ee6bc2ceeb30482f0340167d36a9f7 *man/is.zelig.package.Rd
 81c4ba627b9e0c72a52277a18b8baa7a *man/klein.Rd
 e01f00d574aa52df6ae5c03e836c79b3 *man/kmenta.Rd
-9a85bd994b7c1306c6721151f15566de *man/link.Rd
-897e4e2473be3f9de1c597c3270069f0 *man/linkinv.Rd
-581ff0fd47c5280102e0c32ac3cb188e *man/list.depth.Rd
-b0a27dc8fbd7472a907ce1efcd5d61d8 *man/list.zelig.dependent.packages.Rd
-c67df5f8da39b1d03d9057a70d525a6b *man/list.zelig.models.Rd
-631d28c57a183d19abc2c3393469d7de *man/loadDependencies.Rd
 58bda9cf87e4f5c413a73aedc14bb159 *man/macro.Rd
-2ba5cbca95a93f318d474f6b3fb69832 *man/make.parameters.Rd
-6b04dd54072499f51a6d907887b6ff41 *man/makeModelMatrix.Rd
-7f77974ebd56cb8c3cb286a7a871c42c *man/makeZeligObject.Rd
 f9c9396da5c2e9ab849dd470df38c0f5 *man/mexico.Rd
-0d2a6b5e4916ff0c691c773a91e5644a *man/mi.Rd
-485a9a9403ecf50f15440f191801f2a2 *man/mid.Rd
-67fd27df704501a7488a7354343b9c8d *man/mix.Rd
-f2989d1582d56b7ed47a09fd179936ff *man/model.frame.multiple.Rd
-71b500e88dc689d6991e0267994c7c20 *man/model.matrix.multiple.Rd
-78206eb5459fe64390498ae12548b4b1 *man/model.matrix.parseFormula.Rd
-5254acd8bc4fef34301afc991fc07252 *man/multilevel.Rd
-d1c1a887fa9678ca30d132892ef762de *man/name.object.Rd
-d7cda1a9c4a73cdc91befb37c36a8901 *man/names.qi.Rd
+8c578fbc0e4ecb684033111f6db818ff *man/mid.Rd
 d7905236f8793559d3c452facbc3ea4c *man/newpainters.Rd
-e04b2f0a71aa447253375a12f195a3ea *man/param.Rd
-803d9a00ea8f7e3d8a75ab6feae27931 *man/param.default.Rd
-cd29ee9bdf3ea6256769153fde38869c *man/param.exp.Rd
-f76209fa73c1b36644ac56b233d4122f *man/param.gamma.Rd
-c94a6c248d1fc6a88ce7ed8063277651 *man/param.logit.Rd
-1b5d754ef9e96e292000f42271c183b9 *man/param.ls.Rd
-55f5a3a524678ffe2af11db1ea84ccd2 *man/param.negbinom.Rd
-01e5d1cd2e766188a57c2fd87f4bf91a *man/param.normal.Rd
-90b8bc399c84ae33dfd3b1a365044852 *man/param.poisson.Rd
-4ceab9c1912859dd59a9cb8b1e1d11ce *man/param.probit.Rd
-39b0c8be9d995f7642b1f9519c5efc82 *man/param.relogit.Rd
-3e6d072f5a4059bdad5873ff32017eca *man/param.relogit2.Rd
-d0b92597318ad87473d4d10c7cfce53c *man/param.tobit.Rd
-c2ca077a4c40ab85f6f3a248d1a0acf7 *man/parameters.Rd
-50e3c177cba2d0c0b122d85c43cc09b4 *man/parse.formula.Rd
-4b0740eaeb69d90ba468ef6575bf3563 *man/parseFormula.Rd
-7d20becde042ea0facb79e610cb71122 *man/parseFormula.formula.Rd
-821d71ca6ae7206b242590dedaa6864a *man/parseFormula.list.Rd
-f2eb66e75eaab06acbc38b201dd7d965 *man/plot.MI.sim.Rd
-892eb691848679fffe6e366ebc9ce4b5 *man/plot.ci.Rd
-b8aef126fd49de819fff3453cc20d359 *man/plot.pooled.sim.Rd
-34507a7ed081d653c8e5509d0c959a58 *man/plot.sim.Rd
-d0c103e40b38d54a4108a9c6013104aa *man/plot.simulations.Rd
-ac17ad9be163a17d0a5d7b30350d4b76 *man/print.qi.Rd
-367b0c6d18525152b27cb1013a3f9555 *man/print.qi.summarized.Rd
-b27ee0bc8c9491f75c953ca27fc24d7b *man/print.setx.Rd
-df13b5983368d41f3612043d95b38a35 *man/print.setx.mi.Rd
-b5072e4e129ba0b28c7f5c6ea47dcf2e *man/print.sim.Rd
-ef5ee63ca6e4f23c25a63914ca16feec *man/print.summary.MCMCZelig.Rd
-fa80a23aae29624ac831bb90f32c14ef *man/print.summary.pooled.sim.Rd
-ec8c2c06c81608e34f09fc5b7ed5653c *man/print.summary.relogit.Rd
-e66e81ef463297415de8ade84e242dc5 *man/print.summary.relogit2.Rd
-203891c1e2c0576052d2da6717399bb5 *man/print.summary.sim.Rd
-c7eb506e8c71f699afbc00d1c1b4fe7f *man/print.summarySim.MI.Rd
-6b28ce03dca907fa837480069fa56bad *man/print.zelig.Rd
-680cd1c79176cf28ef6c5a1dcca245f5 *man/qi.Rd
-b6bdef3602275edb000eb71e64d1ca59 *man/qi.exp.Rd
-2546cd1df4831fe7c1fb368f9d09ae53 *man/qi.summarize.Rd
-dde5d2eb226a14bbaf9541386b4407ce *man/reduceMI.Rd
-30941e963f829a38020b64a094212803 *man/relogit.Rd
-64db643c8032b1362cac56cdc9b98e26 *man/repl.Rd
-656fef44714f9e5f2cb63e39f9572368 *man/repl.default.Rd
-aa0c4a9184cb6a5f34d67827c0a64af6 *man/repl.sim.Rd
-a496bcce7e71378d22cd0938bf7563f7 *man/replace.call.Rd
-d46cf72bf76964907d1e15cee9a583c7 *man/robust.gee.hook.Rd
-9727c64c5b8d6e24045d78d689c5dbf7 *man/robust.glm.hook.Rd
-194900341a4145076a510bd4b3b69b2e *man/rocplot.Rd
+62e0a89586494f63f6426970bbff06ae *man/plot-Zelig-ANY-method.Rd
+87f7797532b2ed947e9eaa6105f275b7 *man/predict-Zelig-method.Rd
+2e6e3f9fe17f7bfa8f0e510faeaaf712 *man/qi.plot.Rd
+bca3950a9907c24c0ae3bc18305f474d *man/reduce.Rd
+f6d36f6ea7a798aa490d001001ec16f8 *man/relogit.Rd
+8b86a9fbfbf8b5f39958b3cc4d8f5ed4 *man/rocplot.Rd
 685e8fe4738e2aad2ad73d7f2388570b *man/sanction.Rd
-2443219ee36a1d7f1a995adfbb03eca2 *man/setx.MI.Rd
-fbca11d6a833ef32c79001dc7660f534 *man/setx.Rd
-35ae732054417b4dd15561df6eea76c2 *man/setx.default.Rd
-b828c382fe49b52e0768d3c8f58246fe *man/sim.MI.Rd
-c1506f57a058d26b1dcafaf9a5329e93 *man/sim.Rd
-0f8cd4ff64927ac5c040605c19eff20f *man/sim.default.Rd
-77cc07e347939e579b117c93ee9acd3b *man/simulation.matrix.Rd
-fdaa2a66e1a6f52bab44d95d13ffceb3 *man/simulations.parameters.Rd
-ed6b11c524a1bdf7164c42721bc23f8c *man/simulations.plot.Rd
+3a3cf6aabdba4eda08f28d587e164135 *man/seatshare.Rd
+781af65d857e568022ecc2f16dfd6c24 *man/setfactor.Rd
+aef4984b4b9e6eecc9606cacaa764558 *man/setval.Rd
+9799eb33918050382009d7d58312374f *man/setx.Rd
+85dd7a8c95a763edb60858cbda5e5e7b *man/sim.Rd
+1a553fcae192d6c3ba3fbd7c3e8e03df *man/simacf.Rd
+e4fee54a953b478f0de1812a5d708892 *man/simulations.plot.Rd
 1eab2cf2e0c82ea67d16c8ad05571a9c *man/sna.ex.Rd
-54d6dd5011a574c16c7f494eae45fc48 *man/special_print_LIST.Rd
-781ec28f6c60ee7aaece1571725a3922 *man/special_print_MATRIX.Rd
-7a064c38895979a1f9562891031c82fd *man/splitUp.Rd
-a84301cb431946f8887d01cc756ef364 *man/store.object.Rd
-4f966930f5b6422521bb408266b1d154 *man/structuralToReduced.Rd
-64a34068369970e69c9fb556d3aed895 *man/summarize.Rd
-a87af5212ad0e40d71379862d6dc2551 *man/summarize.default.Rd
-3879f433725da0db03d1cb6600e1028f *man/summary.MI.Rd
-288c2380bbb272af77d89d70ec648644 *man/summary.MI.sim.Rd
-0e1ad76e17a9597f7218d3863cc60c1d *man/summary.Relogit2.Rd
-2ba6219325aee97b57e05e13d1a61e21 *man/summary.glm.robust.Rd
-ee86a5246f90b4ed876b026442cac539 *man/summary.pooled.sim.Rd
-1bd5a6763e3d675293bd4449a43d0746 *man/summary.relogit.Rd
-df0d723d1afa54ac3ee04f2379c9b43d *man/summary.sim.Rd
-6459266f8831aec535e4b81000b45d83 *man/summary.zelig.Rd
+6934c81e538e6176f35c044d6f0c5bd1 *man/stat.Rd
+e65e17b444cd67b7ed1bc63caf10cc80 *man/statlevel.Rd
+a69e24b76c6139900bdb0ac1d7ccfcf4 *man/statmat.Rd
+aec137e9a15b58e5e1e28832fd71adb2 *man/summary-Zelig-method.Rd
+07836e99b57abd4ec83e791427a8b4e4 *man/summary.Arima.Rd
 ca14c12e0087b1815d741b09dba0f5cc *man/swiss.Rd
-29cd4b01a20aedd254d64c8bddf6f481 *man/t.setx.Rd
-e03c72941cd05a1a87ec1e96bf4e7a2f *man/table.levels.Rd
-2b62155d275a1850ce6834c70b92b2b6 *man/terms.multiple.Rd
-5c3cd23a97d6f35d4e9cbd649875a14d *man/terms.vglm.Rd
-0cd8cf800eb4b6de6fdc253078af0f56 *man/terms.zelig.Rd
-77f7851d9f7813d81f8e461fd42c7015 *man/termsFromFormula.Rd
-6ff4e69066aedfcd7882e397d91b1dfa *man/toBuildFormula.Rd
+87791b4b338ca4bff2eca4739f2073e9 *man/table.levels.Rd
 a75e0696550ade6ffe2e44144e98d75b *man/tobin.Rd
-b0c4b0f3838717ea98f43da5fe4f8b25 *man/tolmerFormat.Rd
 f7b42178326db13f916c540e911d3864 *man/turnout.Rd
-54d6ad9e9be6c4abc531fd18c4d0b07a *man/ucfirst.Rd
-69c49f3e5d195827788453416aad89f0 *man/user.prompt.Rd
+dcbffb854770136e46cb8750fdf627b1 *man/vcov-Zelig-method.Rd
 01c9c5b45b38c6240e5a1831e153390c *man/voteincome.Rd
-08bb4fc168852c1af1dfe900a99e416e *man/z.Rd
-d5a57f78acdf7e3665275691b7d51d0d *man/zelig.Rd
-db5d3d911b778db51d0ed336355060d4 *man/zelig.call.Rd
-bf3bf749ecafeb03ebaf312ce24e8751 *man/zelig.skeleton.Rd
-73478e1acb2a5ab5a2ddce166461fe85 *man/zelig2-bayes.Rd
-982fbf939e18d3a501c1a4edd9660e71 *man/zelig2-core.Rd
-142acdbd460032c48b2fa9ab46ae9ae2 *man/zelig2-gee.Rd
-92798192843543bd475f7a1127abebcd *man/zelig2-survey.Rd
-0582491d8b2073639b1202c677d839ce *man/zelig2.Rd
-585a585b5c00f26b8d87ea34234e8b58 *man/zeligBuildWeights.Rd
-438bb2e5447a9d566fbcae4657974c34 *po/R-en.po
-247f0490337165f500224fd5eab1de8b *tests/MatchIt.R
-7104b3beb85e184be2193da209497f77 *tests/amelia.R
-b47aea86fa83382112dfa9e531d4fabc *tests/by.R
-9ce2df193b74ae47a82024f81a35bf50 *tests/lognorm.R
-04720577fdbcc28544b396b55164efe9 *tests/mi.R
-c0458e644bb50ace45e4b89f3adc235a *tests/mix.R
-04b6f9b189a9fb6e4dbdfd959794521c *tests/models-bayes.R
-4712575f3142cbe8894db95db0393f87 *tests/models-core.R
-b64512358b907c88cab39c8808ddd62f *tests/models-gee.R
-7f82704b0c25112224ac2cdd89ebfaf9 *tests/models-survey.R
-312e3a847f2874327c4338f9bcd10820 *tests/plot-ci.R
-cff6618ed3f2a58687d22c115ab745af *tests/pooled.R
-32522f6db5aa52a087c7fed252054cd5 *tests/relogit.R
-4f5bf07089b9425a121ab6cdae418c6a *tests/summary.MI.R
-bbb4157a80472a791a3fa21a06eaf2a2 *tests/twosls.R
-55c2ecd46b3b0d9576b9aa82d0545124 *vignettes/Zelig.bib
-fa7f97b865034c25ca528ebfe02e0d0f *vignettes/Zelig.sty
-c9a0058c2df7ec58641689e43e66e9fc *vignettes/gk.bib
-a35da60f3f139a9a7cd6749353eb430f *vignettes/gkpubs.bib
+d7cbce1fcc16d6f6f15a293964d0996b *man/zelig.Rd
+8f8ea839fd68a3db16905f069ecc2bc5 *man/zeligACFplot.Rd
+2f870270f2b2aac1cc6efc7c8a6820cb *man/zeligARMAbreakforecaster.Rd
+e67d812ba401c50a5c45f12714c50685 *man/zeligARMAlongrun.Rd
+e13a43d880d3d4679056c7d519ec372a *man/zeligARMAnextstep.Rd
+9d058fa04748c97569c112f6cd951f1c *man/zeligArimaWrapper.Rd
+3eb6eb705a7b78a5751e97cec00eaadd *man/zeligPlyrMutate.Rd
+39495cdc9f15afdc79627ca1cc3f18bd *tests/testthat.R
+efb5dab067e591bd264fe510930fbbe4 *tests/testthat/test-logit.R
+331221df9c227e4a5f43257e15b5f1d2 *tests/testthat/test-lognom.R
+5e78a0dd0b2c77577d0e31fbc6744357 *tests/testthat/test-ls.R
+9a5ac5e2f69f4d34a7daf5e5e8d88a16 *tests/testthat/test-negbin.R
+9255d9ae3f8d7056e090cc97409a0653 *tests/testthat/test-poisson.R
+b19ab8b7e3f1e2090c19996a006f7065 *tests/testthat/test-probit.R
diff --git a/NAMESPACE b/NAMESPACE
old mode 100644
new mode 100755
index 79b17e4..226ec0f
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,276 +1,60 @@
-export(.getRandAndFixedTerms)
-export(.reduceFurther)
-export(.ZeligModelCategories)
-export(alpha)
-export(as.description)
-export(as.qi.default)
-export(as.qi.list)
-export(as.qi.qi)
-export(as.summarized.list)
-export(as.summarized.summarized.qi)
-export(as.summarized)
-export(bootstrap)
-export(cite)
-export(cmvglm)
-export(coef.parameters)
-export(combine)
-export(depends.on.zelig)
-export(describe.default)
-export(describe.gamma)
-export(describe.logit)
-export(describe.ls)
-export(describe.negbinom)
-export(describe.normal)
-export(describe.poisson)
-export(describe.probit)
-export(describe)
-export(description)
-export(GetObject)
-export(getPredictorTerms)
-export(getResponseTerms)
-export(GetSlot.zelig)
-export(GetSlot)
-export(has.zelig2)
-export(help.zelig)
-export(ignore)
-export(link)
-export(linkinv)
-export(loadDependencies)
-export(make.parameters)
-export(Max)
-export(MCMChook)
-export(McmcHookFactor)
-export(Median)
-export(mi)
-export(Min)
-export(mix)
-export(Mode)
-export(param.default)
-export(param)
-export(parameters)
-export(parse.formula)
-export(parseFormula)
-export(plot.ci)
-export(print.setx)
-export(print.zelig)
-export(qi)
-export(reduceMI)
-export(relogit)
-export(repl)
-export(robust.glm.hook)
-export(robust.gee.hook)
-export(rocplot)
-export(setx)
-export(sim)
-export(simulation.matrix)
-export(simulations.parameters)
-export(splitUp)
-export(structuralToReduced)
-export(summarize)
-export(summary.MI.sim)
-export(summary.sim)
-export(summary.zelig)
-export(termsFromFormula)
-export(TexCite)
-export(tolmerFormat)
-export(user.prompt)
-export(z)
-export(zelig.call)
-export(zelig.skeleton)
-export(zelig)
-export(zelig2exp)
-export(zelig2factor.bayes)
-export(zelig2gamma.gee)
-export(zelig2gamma.survey)
-export(zelig2gamma)
-export(zelig2logit.bayes)
-export(zelig2logit.gee)
-export(zelig2logit.survey)
-export(zelig2logit)
-export(zelig2lognorm)
-export(zelig2ls)
-export(zelig2mlogit.bayes)
-export(zelig2negbinom)
-export(zelig2normal.bayes)
-export(zelig2normal.gee)
-export(zelig2normal.survey)
-export(zelig2normal)
-export(zelig2oprobit.bayes)
-export(zelig2poisson.bayes)
-export(zelig2poisson.gee)
-export(zelig2poisson.survey)
-export(zelig2poisson)
-export(zelig2probit.bayes)
-export(zelig2probit.gee)
-export(zelig2probit.survey)
-export(zelig2probit)
-export(zelig2relogit)
-export(zelig2tobit)
-export(zelig2twosls)
-export(ZeligDescribeModel)
-export(ZeligListModels)
-export(ZeligListTitles)
-export(zeligBuildWeights)
-S3method("[[",qi)
-S3method("[[",zelig)
-S3method(as.data.frame,setx)
-S3method(as.description,description)
-S3method(as.description,list)
-S3method(as.matrix,pooled.setx)
-S3method(as.matrix,setx)
-S3method(bootstrap,default)
-S3method(bootstrap,gamma)
-S3method(bootstrap,negbinom)
-S3method(bootstrap,normal)
-S3method(coef,parameters)
-S3method(coef,zelig)
-S3method(describe,default)
-S3method(describe,exp)
-S3method(describe,factor.bayes)
-S3method(describe,gamma.gee)
-S3method(describe,gamma.survey)
-S3method(describe,logit.bayes)
-S3method(describe,logit.gee)
-S3method(describe,logit.survey)
-S3method(describe,logit)
-S3method(describe,lognorm)
-S3method(describe,ls)
-S3method(describe,mlogit.bayes)
-S3method(describe,negbinom)
-S3method(describe,normal.bayes)
-S3method(describe,normal.gee)
-S3method(describe,normal.survey)
-S3method(describe,normal)
-S3method(describe,oprobit.bayes)
-S3method(describe,poisson.bayes)
-S3method(describe,poisson.gee)
-S3method(describe,poisson.survey)
-S3method(describe,poisson)
-S3method(describe,probit.bayes)
-S3method(describe,probit.gee)
-S3method(describe,probit.survey)
-S3method(describe,relogit)
-S3method(describe,tobit)
-S3method(describe,twosls)
-S3method(describe,zelig)
-S3method(getResponseTerms,formula)
-S3method(getResponseTerms,Formula)
-S3method(getResponseTerms,list)
-S3method(logLik,zelig)
-S3method(model.frame,multiple)
-S3method(model.matrix,multiple)
-S3method(model.matrix,parseFormula)
-S3method(names,qi)
-S3method(param,default)
-S3method(param,exp)
-S3method(param,factor.bayes)
-S3method(param,gamma.gee)
-S3method(param,gamma.survey)
-S3method(param,gamma)
-S3method(param,logit.bayes)
-S3method(param,logit.gee)
-S3method(param,logit.survey)
-S3method(param,logit)
-S3method(param,lognorm)
-S3method(param,ls)
-S3method(param,mlogit.bayes)
-S3method(param,negbinom)
-S3method(param,normal.bayes)
-S3method(param,normal.gee)
-S3method(param,normal.survey)
-S3method(param,oprobit.bayes)
-S3method(param,poisson.bayes)
-S3method(param,poisson.gee)
-S3method(param,poisson.survey)
-S3method(param,probit.bayes)
-S3method(param,probit.gee)
-S3method(param,probit.survey)
-S3method(param,relogit)
-S3method(param,relogit2)
-S3method(param,tobit)
-S3method(param,twosls)
-S3method(parseFormula,formula)
-S3method(parseFormula,Formula)
-S3method(parseFormula,list)
-S3method(plot,MI.sim)
-S3method(plot,pooled.sim)
-S3method(plot,sim.cloglog.net)
-S3method(plot,sim.gamma.gee)
-S3method(plot,sim.logit.gee)
-S3method(plot,sim.normal.gee)
-S3method(plot,sim.poisson.gee)
-S3method(plot,sim.probit.gee)
-S3method(plot,sim.twosls)
-S3method(plot,sim)
-S3method(plot,zelig)
-S3method(print,qi.summarized)
-S3method(print,qi)
-S3method(print,setx.mi)
-S3method(print,setx)
-S3method(print,sim)
-S3method(print,summary.MCMCZelig)
-S3method(print,summary.pooled.sim)
-S3method(print,summary.relogit)
-S3method(print,summary.relogit2)
-S3method(print,summary.setx)
-S3method(print,summary.sim)
-S3method(print,summary.MCMCZelig)
-S3method(print,summaryMI)
-S3method(print,summarySim.MI)
-S3method(print,zelig)
-S3method(qi,exp)
-S3method(qi,gamma.gee)
-S3method(qi,gamma.survey)
-S3method(qi,gamma)
-S3method(qi,logit.bayes)
-S3method(qi,logit.gee)
-S3method(qi,logit.survey)
-S3method(qi,logit)
-S3method(qi,lognorm)
-S3method(qi,ls)
-S3method(qi,mlogit.bayes)
-S3method(qi,negbinom)
-S3method(qi,normal.bayes)
-S3method(qi,normal.gee)
-S3method(qi,normal.survey)
-S3method(qi,normal)
-S3method(qi,oprobit.bayes)
-S3method(qi,poisson.gee)
-S3method(qi,poisson.survey)
-S3method(qi,poisson)
-S3method(qi,probit.bayes)
-S3method(qi,probit.gee)
-S3method(qi,probit.survey)
-S3method(qi,probit)
-S3method(qi,relogit)
-S3method(qi,relogit2)
-S3method(qi,tobit)
-S3method(qi,twosls)
-S3method(repl,default)
-S3method(repl,sim)
-S3method(setx,default)
-S3method(setx,MI)
-S3method(sim,default)
-S3method(sim,MI)
-S3method(simulation.matrix,pooled.sim)
-S3method(simulation.matrix,sim)
-S3method(summarize,default)
-S3method(summary,glm.robust)
-S3method(summary,MCMCZelig)
-S3method(summary,MI.sim)
-S3method(summary,MI)
-S3method(summary,pooled.sim)
-S3method(summary,Relogit)
-S3method(summary,Relogit2)
-S3method(summary,setx)
-S3method(summary,sim)
-S3method(summary,zelig)
-S3method(t,setx)
-S3method(terms,multiple)
-S3method(terms,vglm)
-S3method(terms,zelig)
-S3method(vcov,gee.naive)
-S3method(vcov,gee.robust)
-S3method(vcov,glm.robust)
-S3method(vcov,Relogit)
-S3method(vcov,zelig)
+import(sandwich, methods, survival, jsonlite, dplyr,
+       geepack, MCMCpack, coda, Amelia, MatchIt, maxLik, survey)
+
+importFrom("plyr", "llply")
+importFrom("MASS", "glm.nb", "rnegbin", "mvrnorm", "gamma.shape")
+importFrom("VGAM", "vglm")
+importFrom("AER", "tobit")
+importFrom("quantreg", "rq", "summary.rq", "bandwidth.rq")
+importFrom("grDevices", "col2rgb", "heat.colors", "rgb")
+importFrom("graphics", "abline", "axis", "barplot", "box", "image",
+             "layout", "lines", "par", "polygon", "text")
+importFrom("stats", "binomial", "complete.cases", "density", "glm",
+             "lm", "lm.influence", "median", "model.frame",
+             "model.matrix", "model.response", "na.omit", "quantile",
+             "sd", "terms", "update", "ARMAacf", "rnorm")
+
+importClassesFrom("VGAM", "vglm")
+importMethodsFrom("VGAM", "coef", "fitted", "predict", "vcov")
+
+
+S3method(summary, Arima)
+
+exportPattern("^[[:alpha:]]+")
+exportClasses(
+     "Zelig",
+     "Zelig-ls",
+     "Zelig-glm",
+     "Zelig-binchoice",
+     "Zelig-logit",
+     "Zelig-probit",
+     "Zelig-gamma",
+     "Zelig-exp",
+     "Zelig-negbin",
+     "Zelig-normal",
+     "Zelig-poisson",
+     "Zelig-lognorm",
+     "Zelig-tobit",
+     "Zelig-gee",
+     "Zelig-binchoice-gee",
+     "Zelig-logit-gee",
+     "Zelig-probit-gee",
+     "Zelig-gamma-gee",
+     "Zelig-normal-gee",
+     "Zelig-poisson-gee",
+     "Zelig-bayes",
+     "Zelig-factor-bayes",
+     "Zelig-logit-bayes",
+     "Zelig-mlogit-bayes",
+     "Zelig-normal-bayes",
+     "Zelig-oprobit-bayes",
+     "Zelig-poisson-bayes",
+     "Zelig-probit-bayes",
+     "Zelig-tobit-bayes",
+     "Zelig-weibull",
+     "Zelig-timeseries",
+     "Zelig-arima",
+     "Zelig-ar",
+     "Zelig-ma"
+)
+
diff --git a/NEWS b/NEWS
deleted file mode 100644
index 2784aa5..0000000
--- a/NEWS
+++ /dev/null
@@ -1,148 +0,0 @@
-The Zelig core team is pleased to announce the alpha release of Zelig 4.
-
-Designated as the "Developer Update", Zelig 4 offers a wide-range of improvements to ease the process of adding new statistical models to the already extensive Zelig software suite. Significantly, this release is packaged with a brand-new API, geared towards reducing the complexity and length of Zelig's development functions - the zelig2, param and qi methods. In addition to this, Zelig now brandishes a package-creator (zelig.skeleton) that operates in the same vein as R's core function  [...]
-
-In addition to changes in the development toolkit, Zelig has now been split across 13 distinct packages. This change has been made to refine the scope of Zelig and its add-ons. In particular, this restructuring of Zelig into a full software suite allows developers to contribute, develop and repair add-on packages without tinkering with the Zelig API and core functionality. 
-
-While this release's prime focus has been improving the developer toolkit and restructuring the software suite, Zelig 4 offers an end-user experience completely identical to previous versions. That is, zelig's basic functions - zelig, setx and sim - ostensibly remain unchanged in functionality for available statistical models.
-
-For full details concerning changes between Zelig 3.5 and Zelig 4, please refer to:
- http://zeligdev.github.com/
-
-
-New Features
-------------
-
-Some of the new available features are:
-
-A revised developer API. The primary developer methods - zelig2, param and sim - have been reimplemented to use a sleeker, simpler API. For information, please read the Zelig developer's manual found here:
-  http://zeligdev.github.com/files/booklet.pdf
-
-The core package has been restructured and minimized. In particular, Zelig core now contains only code essential to its operation, while all non-essential tasks have been made into specific R-packages. For a complete list of official Zelig packages, please refer to:
-  https://github.com/zeligdev
-
-Development tools for contributors have been added to the core package. In particular, the "zelig.skeleton" function is packaged within Zelig-core in order to facilitate the rapid development of new Zelig packages.
-
-The Zelig software suite has grown to include a total of 7 R-packages. This change offers a simple and easy method for ensuring that development and bug-fixing within any particular Zelig add-on will leave the remainder of the Zelig software suite unchanged.
-
-A hook API has been integrated into the core package, in order to reduce the necessity to directly alter the zelig, setx and sim methods.
-
-Roxygen-compliant documentation has become standard in all Zelig packages. This offers an easy way to manage Rd documentation, dependencies and exports from within the R code itself. That is, documentation is more tightly paired with the actual R code. For more information about Roxygen, please refer to:
-  http://roxygen.org/
-
-
-GitHub
-------
-
-Zelig is now on GitHub! Fork an add-on package or contribute bug-finds today!
-
-For a full listing of official packages and their repositories, please see:
- https://github.com/zeligdev
-
-
-Links
------
-
-The following comprises a list of relevant information for Zelig 4:
- * Website: http://zeligdev.github.com/
- * Package Repositories: https://github.com/zeligdev/
- * Installation Guide: http://zeligdev.github.com/files/zelig.pdf
- * Zelig Manual: http://zeligdev.github.com/#install
- * Available and Missing Add-on Packages: http://zeligdev.github.com/#models
-
-
-Questions
----------
-
-For any particular questions on developing new Zelig models, please send all mail to:
-  zelig at lists.gking.harvard.edu
-
-
-Zelig v4.0-4 Release Notes (May 16, 2011)
-
-
-Introduction
-================================================================================
-This document is a brief overview of the current state of the Zelig project as
-of the 4.0-3 release. This release hopes to maintain the canonical Zelig syntax
-and interface for end-users, while supplying developers with tools to aid in
-the development of effective statistical modeling techniques. Emphasis has been
-placed on readability and modularity.
-
-As a result of this gargantuan change, a plethora of features, API
-functionality, and documentation has been added to the Zelig R-package. Several
-previously existing models, however, have been removed temporarily or moved
-from the Zelig core package to more-specific Zelig extensions.
-
-
-Project Information
-================================================================================
-The Zelig software suite is an easy-to-use R-package geared towards making
-complex statistical techniques available to end users, particularly those
-researching the quantitative social sciences. In particular, it offers unifying
-syntax and programming-style between seemingly disparate and unrelated 
-statistical mdoels.
-
-To facilitate this purpose, Zelig (as of May 16th, 2011) includes an array of
-programming tools, geared towards allowing the rapid development, debugging, 
-and inclusion of new statistical models. That is, Zelig now facilitates and
-encourages collaboration between novel and pre-existing statistical packages.
-
-
-Author Information
-================================================================================
-Zelig is a collaborative effort by Harvard's Institute for Quantitive Social
-Sciences (Harvard IQSS). Publications, software releases, and additional
-information can be found at:
-  http://gking.harvard.edu/
-  http://iq.harvard.edu/
-
-
-Licensing
-================================================================================
-Zelig is licensed under the GNU General Public License version 2, and as such
-can be freely used and edited given proper attribution to Harvard's IQSS
-Department.
-
-
-What's New in this Release?
-================================================================================
-This release offers a large variety of coding style changes, as well as core
-functionality. Please carefully read the following:
-
-Major Changes (from version 3.5)
---------------------------------------------------------------------------------
-- Models are now added to Zelig as separate extensions. The method
-  'zelig.skeleton' has been added to the R-package to facilitate this change.
-- The main Zelig package now contains a mere 8 models. 22 additional models are
-  available via extensions, geared towards adding specific functionality to the
-  Zelig software suite.
-- zelig.skeleton: a method used to create blank zelig packages. This follows
-  the style and format of the R-core method 'package.skeleton'
-- Simplified zelig2-function API. See "?zelig2" within an R-session for help
-- Enhanced API for 'param' and 'qi' functions. See develoepr documentation for
-  more information
-
-Minor Changes (from version 3.5)
---------------------------------------------------------------------------------
-- Slight changes to the plotting of simulated quantities of interest. Most
-  changes are stylistic
-- Quantities of interest using two different sets of explanatory variables now
-  output information containing concerning the simulations of the second 'setx'
-  object's Predicted Values and Expected Values. This was previuosly not the
-  case
-- ZeligListModels: a method used to list available models, installed on the
-  current operating system.
-- More robust support for various ways of describing 'terms' of a statistical
-  model. This is essentially light error-detection
-
-Missing Features
---------------------------------------------------------------------------------
-- The 'setx' method currently doesn not support setting multiple
-  counterfactuals in a single call to 'setx'. This feature is under current
-  development, and should soon be admitted into the main branch.
-- "ternaryplot" plotting style is ommitted from the core package, and will be 
-  instead moved to the 'bivariate' and 'multinomial' Zelig modules.
-- "Average Treatment Effect" quantities of interest are not being included in 
-  zelig models temporarily. Simulation of these qi's will return pending a 
-  minor update to the 'setx' function.
diff --git a/R/GetObject.R b/R/GetObject.R
deleted file mode 100644
index da78da3..0000000
--- a/R/GetObject.R
+++ /dev/null
@@ -1,10 +0,0 @@
-#' Extract the fitted model object from the Zelig object
-#'
-#' @param obj an object of type `zelig'
-#' @return the fitted model object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-GetObject <- function(obj) {
-  if (inherits(obj, 'zelig'))
-    obj$result
-}
diff --git a/R/GetSlot.R b/R/GetSlot.R
deleted file mode 100644
index 4f68ec8..0000000
--- a/R/GetSlot.R
+++ /dev/null
@@ -1,12 +0,0 @@
-#' Generic method for extracting variables from both
-#' S3 and S4 fitted model object
-#'
-#' @param obj an object of type `zelig'
-#' @param key a character-string specifying the name
-#'            of the variable to extract
-#' @param ... typically ignored parameters
-#' @return the value of that extracted object or NULL
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-GetSlot <- function(obj, key, ...)
-  UseMethod("GetSlot")
diff --git a/R/GetSlot.zelig.R b/R/GetSlot.zelig.R
deleted file mode 100644
index b9e99a9..0000000
--- a/R/GetSlot.zelig.R
+++ /dev/null
@@ -1,38 +0,0 @@
-#' Return a Value from a \code{zelig} Fitted Model
-#'
-#' Returns a value from the result of a model fitting function
-#' @usage \method{GetSlot}{zelig}(obj, key, ...)
-#' @note This function is primarily used by Zelig developers within \code{qi}
-#'   functions
-#' @param obj a \code{zelig} object
-#' @param key a character-string specifying the which value to extract from
-#'   the fitted model object  
-#' @param ... subsequent values to extract from the fitted model object
-#' @return values of the specified keys
-#' @export
-#' @author Matt Owen \emph{mowen@@iq.harvard.edu}
-GetSlot.zelig <- function(obj, key, ...) {
-  # expand dots
-  dots <- list(...)
-
-  # error-catching
-  if (!all(sapply(dots, is.character)))
-    stop("all dot parameters must be characters")
-
-  # get result of zelig object
-  obj <- obj$result
-  #
-  res <- obj[[key]]
-
-  for (key in dots) {
-    # 
-    res <- try(res[[key]], silent=T)
-
-    # if it doesn't exist, then NULL
-    if (inherits(res, "try-error"))
-      return(NULL)
-  }
-
-  # return
-  res
-}
diff --git a/R/MCMChook.R b/R/MCMChook.R
deleted file mode 100644
index b7e67f8..0000000
--- a/R/MCMChook.R
+++ /dev/null
@@ -1,75 +0,0 @@
-#' Hook to Clean-up MCMC Objects
-#'
-#' This method gives valid methods to the resulting MCMC object so that it can
-#' be used with Zelig.
-#' @note This function is used internally by the ZeligBayesian package.
-#' @param obj the fitted model object (in this case a \code{mcmc} object.
-#' @param model.call the call made to the external model
-#' @param zelig.call the actual call to zelig itself
-#' @param seed a seed for the MCMC algorithm
-#' @param ... ignored parameters
-#' @return an object useable by Zelig
-#' @author Olivia Lau, Kosuke Imai, Gary King and Matt Owen
-#' @export
-MCMChook <- function (obj, model.call, zelig.call, seed=NULL, ..., data = NULL) {
-
-  # Create a new object
-  res <- list()
-
-  attr(obj, "call") <- NULL
-
-  # Add the bare necessities for a zelig object
-  res$coefficients <- obj
-  res$formula <- zelig.call$formula
-  res$data <- data
-  res$model <- model.frame(eval(res$formula), data = data)
-  res$terms <- attr(res$model, "terms")
-  res$call <- model.call
-
-  # Ensure that a "seed" element exists
-  res$seed <- if (is.null(seed))
-    NA
-  else
-    seed
-
-  class(res) <- "MCMCZelig"
-
-  res
-}
-
-#' Hook to Clean-up MCMC Factor Object
-#'
-#' This method gives valid methods to the resulting MCMC object so that it can
-#' be used with Zelig.
-#' @note This function is used internally by the ZeligBayesian package.
-#' @param obj the fitted model object (in this case a \code{mcmc} object.
-#' @param model.call the call made to the external model
-#' @param zelig.call the actual call to zelig itself
-#' @param seed a seed for the MCMC algorithm
-#' @param ... ignored parameters
-#' @return an object useable by Zelig
-#' @author Olivia Lau, Kosuke Imai, Gary King and Matt Owen
-#' @export
-McmcHookFactor <- function (obj, model.call, zelig.call, seed = NULL, ...) {
-
-  out <- list()
-
-  out$coefficients <- obj
-  out$formula <- zelig.call$formula
-  out$data <- zelig.call$data
-  out$model <- model.frame(eval(out$formula), eval(out$data))
-  out$terms <- attr(out$model, "terms")
-  out$call <- model.call
-
-  # Factors have no intercept term?
-  attr(out$terms,"intercept") <- 0
-
-  if (is.null(zelig.call$seed))
-    out$seed <- NA
-  else
-    out$seed <- zelig.call$seed
-
-  class(out) <- "MCMCZelig"
-
-  out
-}
diff --git a/R/MLutils.R b/R/MLutils.R
deleted file mode 100644
index dfcd766..0000000
--- a/R/MLutils.R
+++ /dev/null
@@ -1,303 +0,0 @@
-#' Reduce MI Formulas
-#' Take a formula in any of the reduced form or in a structural form and return
-#' the most reduced form of that formula
-#' @note This formula is used primarily by 'zelig2' functions of multivariate
-#'   Zelig models
-#' @param f a formula
-#' @export
-#' @author Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
-reduceMI <-function(f){
-        if(class(f)=="list")
-          f <- structuralToReduced(f)
-        return(.reduceFurther(f))
-}
-
-
-#' Transform the Multilevel's Structural Formulas Into Reduced Form
-#' @param f a list of formulas
-#' @return a formula in reduced form
-#' @export
-#' @author Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
-# possible bug: what if class(f) is 'multiple' and not a list?
-structuralToReduced <- function(f){
-
-        ## input should be a list
-        if(class(f) != "list" || (class(f)=="list" && length(f)<2))
-          stop("the input should be a list of formulas")
-
-        ## take the first formula; It should be of length 3
-        main.fml <- f[[1]]
-        if(length(main.fml)!=3)
-          stop("the main formula in the extended form should be of length 3 !")
-        lhs<- main.fml[[2]]
-        TT <- terms(main.fml,specials="tag")
-        TT.labels <- attr(TT,"term.labels")
-        TT.vars <- attr(TT,"variables")
-        tagattr<-attr(TT,"specials")$tag
-        hastag<-!(is.null(tagattr))
-        if (hastag){
-                for(j in tagattr){
-                        lind<-j-1
-                        vind<-j+1
-                        tg<- .deparseTag(TT.vars[[vind]])
-                        whicheq<-which(names(f) %in% tg$label)
-                        if (length(whicheq)!=0)
-                          tg$label<-deparse(f[[whicheq]][[2]])
-                        else
-                          stop("one of the equation's name is expected to be ",tg$label)
-                        TT.labels[[lind]]<-.newTag(tg)
-                        res<-(as.formula(paste(lhs,"~",paste(TT.labels,collapse="+"))))
-                }
-        } else
-        stop("tag is missing in the first equation\n")
-        return(res)
-}
-
-#' Convert a Formula into 'lmer' Representation from Reduced Form
-#' Take a formula in its reducd from and return it as a 'lmer' representation
-#' (from the lme4 package). This is basically removing the starting 'tag' from
-#' each term.
-#' @param f a formula in reduced form
-#' @return the 'lmer' representation of 'f'
-#' @export
-#' @author Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
-tolmerFormat<-function(f){
-        lhs <- f[[2]]
-        tt <- terms(f, specials="tag")
-        tt.labels<-attr(tt,"term.labels")
-        for (i in 1:length(tt.labels)){
-                tt.labels[[i]]<-.trim(tt.labels[[i]])
-                tt.labels[[i]]<-gsub('^tag',"",tt.labels[[i]])
-        }
-        rhs <- paste(tt.labels,collapse="+")
-        res <- as.formula(paste(lhs,"~",rhs,sep=""))
-        return(res)
-}
-
-#' Further Reduce Formulas in Reduced Form
-#' Given a formula in a reduced form, output the most reduced one.
-#' @param f a formula in reduced form
-#' @return an even-more reduced formula
-#' @export
-#' @author Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
-.reduceFurther <- function(f){
-        
-        if(length(f)!=3)
-          stop("the main formula in the extended form should be of length 3 !")
-        lhs<- f[[2]]
-        TT <- terms(f,specials="tag")
-        TT.labels <- attr(TT,"term.labels")
-        TT.vars <- attr(TT,"variables")
-        tagattr<-attr(TT,"specials")$tag
-        hastag<-!(is.null(tagattr))
-        lstOfTags<-c()
-        if (hastag){
-                for(j in tagattr){
-                        vind<-j+1
-                        lstOfTags<-c(lstOfTags,.expandTag(.deparseTag(TT.vars[[vind]])))
-                }
-        } else
-        stop("tag is missing in the first equation\n")
-        tmp <- paste(lstOfTags,collapse="+")
-        rhs<-paste(.replace(TT.labels,tagattr-1,tmp),collapse="+" )
-        return(as.formula(paste(lhs,"~",rhs,sep="")))
-}
-
-###
-## Helper function which takes the term with tag and return
-## all its parts
-##
-## input: a tag like call/list i.e. tag(z1,w1+w2 | state)
-## output: list(var= "z1", label="w1 + w2", id="state")
-
-.deparseTag <- function(f){
-
-        f <- as.character(f)
-        res<-list()
-        if(length(f) == 3){
-                ## tag(var,label|id) or tag(var,label)
-                res$var <- f[[2]]
-                tmp <- .trim(unlist(strsplit(f[[3]],"|",fixed=TRUE)))
-                if(length(tmp) == 2){
-                        ## tag(var,label|id)
-                        res$label <- tmp[[1]]
-                        res$id <- tmp[[2]]
-                }else{
-                        ## tag(var,label)
-                        if(length(tmp)==1){
-                                res$label <-tmp[[1]]
-                                res$id <- "none"
-                        }else
-                        stop("wrong use of tag function!!")
-                }
-                
-        } else {
-                ## tag(var|id)
-                tmp <- .trim(unlist(strsplit(f[[2]],"|",fixed=TRUE)))
-                res$var <- tmp[[1]]
-                res$id <- tmp[[2]]
-                res$label="none"
-        }  
-        return(res)
-}
-
-###
-## takes the output from .deparseTag (a list) and construct a new tag
-## as a string 
-## i.e. takes list(var="z",label="w1",id="state") and output "tag(z,w1|state)
-
-.newTag <- function(lst){
-
-        res <- "tag("
-        if (lst$var != "none")                        # must have var
-          res <- paste(res,lst$var,sep="")
-        else
-          stop("wrong use of tag(); variable is missing")
-        if (lst$label !="none"){                      ## tag(z,gamma??)
-                res <-paste(res,",",sep="")
-                res <- paste(res,lst$label,sep="")
-                if(lst$id != "none")                  ## tag(z,gamma|state)
-                  res <- paste(res,"|",lst$id,sep="")
-        }else{                                        ## tag(z|state)
-                res <-paste(res,"|",sep="")
-                if(lst$id !="none")
-                  res <- paste(res,lst$id,sep="")
-                else
-                  stop("wrong use of tab")  # tag(x |)
-        }
-        res <- paste(res,")",sep = "")
-        return(res)
-}
-
-
-###
-## expands tag.  tag(1,w1+w2 | state) => tag(w1|state) + tag(w2|state)
-##               tag(z,w1+w2 | state) => tag(z:w1|state)+ tag(z:w2|state)
-## input tag as a list; i.e the output from .deparseTag
-
-
-
-.expandTag <- function(l){
-
-        if(l$var == "1" && l$label!="none"){
-                ## tag(1,z1 | state) == tag (z1|state)
-                l$var <- l$label
-                l$label <- "none"
-          
-        }
-        if(l$label =="none"){
-                ## tag(1+z1|state)
-                vars<-unlist(strsplit(l$var,"+", fixed=TRUE))
-        }else{
-                ## tag(z1,w1+w2|state)
-                vars<-unlist(strsplit(l$label,"+", fixed=TRUE))
-        }
-        if(length(vars) == 1){
-                ## nothing to expand
-                return (.newTag(l))
-        }else{
-                alltgs<-c()
-                for(i in 1:length(vars)){
-                        if(l$label == "none")
-                          alltgs <- c(alltgs,.newTag(list(label="none",var=vars[[i]],id=l$id)))
-                        else
-                          alltgs <- c(alltgs,.newTag(list(label="none",var=paste(l$var,":",vars[[i]],sep=""),id=l$id)))
-                        
-                }
-        }
-        return (paste(alltgs,collapse="+"))
-
-}
-
-###
-## In the vector 'src' replace the element in the position
-## 'index' with elementSSS in vector 'dest'
-
-.replace<-function(src,index,dest){
-        "%w/o%" <- function(x,y) x[!x %in% y]
-return (c(src %w/o% src[index],dest))
-        
-        if(1==2){
-                if(index <1 || index > length(src))
-                  stop("wrong index arguemnt in function .replace")
-                if(index==1)
-                  beforeEls<-c()
-                else
-                  beforeEls<-src[1:(index-1)]
-                
-                if(index == length(src))
-                  afterEls<-c()
-                else
-                  afterEls<-src[(index+1):length(src)]
-                
-                return(c(beforeEls,dest,afterEls))
-        }
-        
-}
-
-
-##
-# Trim the word's white spaces
-# input : one word string s
-# output: trimed version of s
-
-.trim <-function(v){
-        for(i in 1:length(v)){
-        v[[i]] <- gsub('^[[:space:]]+', '', v[[i]])
-       v[[i]]<- gsub('[[:space:]]+$', '', v[[i]])
-}
-        return(v)
-}
-
-##
-#   Reaction ~ Days + tag(1 + Days | subject) ==>
-#   list (fixed = ~ Days,
-#         random = ~ 1 + Days)
-#
-
-#' @export
-.getRandAndFixedTerms <- function (fml){
-        f <- function(x){
-                as.formula(paste("~",paste(x, collapse = "+")))
-        }
-        res <- list()
-        if(length(fml)!=3)
-          stop("the main formula in the extended form should be of length 3 !")
-        lhs <- fml[[2]]
-        rhs <- fml[[3]]
-        TT <- terms(fml,specials="tag")
-        TT.labels <- attr(TT,"term.labels")
-        TT.vars <- attr(TT,"variables")
-        tagattr<-attr(TT,"specials")$tag
-
-        hastag<-!(is.null(tagattr))
-
-        if (hastag){
-                ## fixed
-                F.labels <- TT.labels[-(tagattr-1)]
-                if (!length(F.labels))
-                  F.labels <- 1
-                res$fixed <- as.formula(paste("~",paste(F.labels,collapse="+")))
-
-                ## random
-                random <- list()
-                idx = 1
-                for (j in tagattr){
-                        vind <- j + 1
-                        tmp <- .deparseTag(TT.vars[[vind]])
-                        idx <- idx + 1
-                        ## if tags have the same id, merge them together
-                        if (tmp$id %in% names(random)){
-                            random[[tmp$id]] <- c(random[[tmp$id]], tmp$var)    
-                        } else {
-                                random[[tmp$id]] <- tmp$var
-                        }
-                }
-                res$random <- lapply(random,f)
-                
-        } else {
-                res$fixed <- fml
-        }
-        return(res)
-}
-
diff --git a/R/Zelig-package.R b/R/Zelig-package.R
deleted file mode 100644
index 4c06a82..0000000
--- a/R/Zelig-package.R
+++ /dev/null
@@ -1,37 +0,0 @@
-#' Zelig Everyone's Statistical Software
-#'
-#' Zelig is an easy-to-use program that can estimate, and
-#' help interpret the results of, an enormous range of statistical models. It
-#' literally is ``everyone's statistical software'' because Zelig's simple
-#' unified framework incorporates everyone else's (R) code. We also hope it will 
-#' become ``everyone's statistical software'' for applications and teaching,
-#' and so have designed Zelig so that anyone can easily use it or add their
-#' programs to it.  Zelig also comes with infrastructure that facilitates the
-#' use of any existing method, such as by allowing multiply imputed data for
-#' any model, and mimicking the program Clarify (for Stata) that takes the raw
-#' output of existing statistical procedures and translates them into
-#' quantities of direct interest.
-#' 
-#' \tabular{ll}{
-#' Package: \tab Zelig\cr
-#' Version: \tab 4.1-2\cr
-#' Date: \tab 2013-01-11\cr
-#' Depends: \tab R (>= 2.14), boot, MASS, methods, sandwich, survival\cr
-#' Suggests: \tab mvtnorm, Formula \cr
-#' License: \tab GPL version 2 or newer\cr
-#' URL: \tab http://gking.harvard.edu/zelig\cr
-#' }
-#'
-#' @name Zelig-package
-#' @aliases Zelig
-#' @docType package
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Kosuke Imai, Olivia Lau,
-#'   and Gary King 
-#' @keywords package
-#' @seealso zelig setx sim
-NULL
-
-# SUPER SECRET VARIABLES...
-# These squelch "R CMD CHECK" issues for dynamically (though constantly added)
-# local variables to the "bootstrap", "param" and "qi" functions.
-.call <- .fitted <- .object <- NULL
diff --git a/R/as.dataframe.setx.R b/R/as.dataframe.setx.R
deleted file mode 100644
index 7cca4ad..0000000
--- a/R/as.dataframe.setx.R
+++ /dev/null
@@ -1,15 +0,0 @@
-#' Coerce a \code{setx} Object into a \code{data.frame}
-#' @usage \method{as.data.frame}{setx}(x, row.names=NULL, optional=FALSE, ...)
-#' @note In subsequent versions of Zelig, this version is expected to undergo
-#'   minor modifications.
-#' @param x a \code{setx} object
-#' @param row.names ignored parameter
-#' @param optional ignored parameter
-#' @param ... ignored parameters
-#' @return the \code{setx} object interpretted as a \code{data.frame}. The
-#'   column-names of the resulting \code{data.frame} are specified by the names
-#'   of the \code{setx} object. The row-names are typically unlabeled.
-#' @S3method as.data.frame setx
-as.data.frame.setx <- function (x, row.names=NULL, optional=FALSE, ...) {
-  x$matrix
-}
diff --git a/R/as.matrix.pooled.setx.R b/R/as.matrix.pooled.setx.R
deleted file mode 100644
index 66438f5..0000000
--- a/R/as.matrix.pooled.setx.R
+++ /dev/null
@@ -1,32 +0,0 @@
-#' Convert a ``pooled.setx'' Object to a Matrix
-#'
-#' The setx object is, in its most basic form, a list of column names and values
-#' specified for each of these column names. This function simply converts the
-#' key-value pairs of column-name and specified value into a matrix.
-#'
-#' @note This method allows basic matrix arithmetic operations on data objects,
-#' which mirror values stored within setx objects. In many scenarios,
-#' simulations require matrix-multiplication, etc. to be performed on a
-#' data-set. This function faciliates that need.
-#' 
-#' @usage \method{as.matrix}{pooled.setx}(x, ...)
-#' @S3method as.matrix pooled.setx
-#' @param x a setx object
-#' @param ... ignored parameters
-#' @return a matrix containing columns and rows corrseponding to the explanatory
-#' variables specified in the call to the 'setx' function
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.matrix.pooled.setx <- function(x, ...) {
-  big.matrix <- NULL
-  for (label in names(x)) {
-    small.matrix <- as.matrix(x[[label]])
-    big.matrix <- rbind(big.matrix, small.matrix)
-  }
-
-  rownames(big.matrix) <- names(x)
-  attr(big.matrix, "labels") <- names(x)
-  attr(big.matrix, "which") <- 1:nrow(big.matrix)
-  names(attr(big.matrix, "which")) <- names(x)
-
-  big.matrix
-}
diff --git a/R/as.matrix.setx.R b/R/as.matrix.setx.R
deleted file mode 100644
index f58645b..0000000
--- a/R/as.matrix.setx.R
+++ /dev/null
@@ -1,26 +0,0 @@
-#' Convert a 'setx' Object to a Matrix
-#'
-#' The setx object is, in its most basic form, a list of column names and values
-#' specified for each of these column names. This function simply converts the
-#' key-value pairs of column-name and specified value into a matrix.
-#'
-#' @note This method allows basic matrix arithmetic operations on data objects,
-#' which mirror values stored within setx objects. In many scenarios,
-#' simulations require matrix-multiplication, etc. to be performed on a
-#' data-set. This function faciliates that need.
-#' 
-#' @usage \method{as.matrix}{setx}(x, ...)
-#' @S3method as.matrix setx
-#' @param x a setx object
-#' @param ... ignored parameters
-#' @return a matrix containing columns and rows corrseponding to the explanatory
-#' variables specified in the call to the 'setx' function
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.matrix.setx <- function(x, ...) {
-  if (!is.null(x$matrix))
-    #as.matrix(x$updated[, x$explan])
-    x$matrix
-  
-  else
-    stop("unspecified error")
-}
diff --git a/R/as.parameters.R b/R/as.parameters.R
deleted file mode 100644
index 38254da..0000000
--- a/R/as.parameters.R
+++ /dev/null
@@ -1,102 +0,0 @@
-#' Generic Method for Converting Objects into 'parameters'
-#'
-#' Converts list-style objects into Parameter lists primarily used by the 'qi'
-#' methods. These list-style objects may contain keys specifying: 'link' (the 
-#' link function of a statistical model), 'linkinv' (the inverse-link
-#'function), 'family' (a object of 'family' class used to specify the model's
-#' classification), 'alpha' (a vector of ancillary parameters, and 'simulations'
-#' (a vector of simulated draws from the model's underlying distribution.
-#'
-#' @note Only three scenarios may exist - converting 'parameters' to
-#'   'parameters', 'list' to 'parameters', and vectors to 'parameters'. The
-#'   third in particular is needed only for backwards compatibility, and support
-#'   will likely be deprecated.
-#'
-#'   Furthermore, this function should be exlusively used implicitly and
-#'   by Zelig.
-#' 
-#' @param params the object to be casted
-#' @param ... parameters reserved for future revisions
-#' @return an object of type `parameters'
-#' @seealso as.parameters.list as.parameters.parameters, as.parameters.default
-#' @author Matt Owen \email{mowen@@ig.harvard.edu}
-as.parameters <- function(params, ...)
-  UseMethod("as.parameters")
-
-
-#' list -> parameters
-#'
-#' The list may contain: 'link', 'linkinv', 'family', 'alpha', and
-#' 'simulations' keys.
-#'
-#' @param params a list object
-#' @param num an integer specifying the number of simulations
-#'        to be taken
-#' @param ... ignored parameters
-#' @return an object of type `parameters'
-#' @seealso as.parameters
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.parameters.list <- function(params, num=NULL, ...) {
- #
-  coefficients <- if ("simulations" %in% names(params))
-    params$simulations
-  else if (num < length(params))
-    params[1:num]
-  else
-    params[[1]]
-
-  # Extract alpha parameters from Zelig
-  alpha <- if ("alpha" %in% names(params))
-    params$alpha
-  else if (num < length(params))
-    tail(params, -num)
-
-  # link function
-  if ("link" %in% names(params))
-    link <- params$link
-
-  # link-inverse function
-  if ("linkinv" %in% names(params))
-    linkinv <- params$linkinv
-
-  # family object, has both a link and link-inverse
-  fam <- if ("family" %in% names(params))
-    params$family
-  else if ("fam" %in% names(params))
-    params$fam
-  else
-    NULL
-
-  # Return
-  parameters(coefficients, alpha, fam=fam, link=link, linkinv=linkinv)
-}
-
-#' parameters -> parameters
-#' This is merely an identity function when casting 'parameters' objects into
-#' 'parameters'.
-#' @param params a parameters object
-#' @param ... ignored parameters
-#' @return the same parameter object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.parameters.parameters <- function(params, ...)
-  params
-
-#' ??? -> parameters
-#' @note This function should be deprecated.
-#' @param params any non-supported data-type
-#' @param num an integer specifying the number of simulations to compute
-#' @param ... ignored
-#' @return the object passed in
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.parameters.default <- function(params, num=NULL, ...) {
-  if (!missing(num)) {
-    alpha <- if (num < nrow(params))
-      tail(params, -num)
-
-    #
-    parameters(simulations=head(params, num), alpha=alpha)
-  }
-  
-  else
-    parameters(simulations=params, alpha=NULL)
-}
diff --git a/R/as.qi.R b/R/as.qi.R
deleted file mode 100644
index 4832ebd..0000000
--- a/R/as.qi.R
+++ /dev/null
@@ -1,247 +0,0 @@
-#' Generic Method for Converting Various Objects into 'qi' Objects
-#' 'qi' objects are list-style objects used by the 'summarize' function to 
-#' compute simple summaries about the simulated data. For readability and
-#' and simplicity purposes, the 'qi' function typically returns a list of
-#' named simulations. This list is converted internally by Zelig into a 'qi'
-#' object so that several methods can be easily applied to the Quantities of
-#' Interest: plot, summarize, and print
-#' @note These functions are primarily used internall by Zelig and should not
-#'   be used in the Global namespace.
-#' @param s the object to be casted
-#' @return an object of type `qi'
-#' @seealso as.qi.default as.qi.qi as.qi.list
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.qi <- function(s)
-  UseMethod("as.qi")
-
-
-#' ??? -> qi
-#'
-#' @param s any unsupported object
-#' @return an object of type `qi'
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.qi.default <- function(s)
-  stop("as.qi does not yet support this data-type")
-
-
-#' qi -> qi
-#'
-#' @param s an object of type `qi'
-#' @return s an object of type `qi'
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.qi.qi <- function(s)
-  s
-
-
-#' list -> qi
-#' This function has a lot of room to go wrong. It tries o detect whether the
-#' zelig model is old-style or new-style (as of 4/4/2011). Eventually this
-#' feature should be phased out.
-#' @note This method has peculiar behavior when the list contains only two
-#' elements. The crucial fix is to simply remove the portion of code which
-#' intentionally implements this perculiar behavior.
-#' @param s a list
-#' @return an object of type `qi'
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.qi.list <- function(s) {
-  #q <- list(titles=list(), stats=list())
-  titles <- list()
-  stats <- list()
-
-  # divide the list into ones with/without keys
-  keys <- splitUp(s)
-
-  fail.names <- paste("qi", 1:length(s), sep="")
-  success.names <- unlist(Map(.acronym, names(s), fail=''))
-  success.names <- .number.list(success.names)
-
-  # create new environment
-  env <- new.env()
-
-  # iterator
-  k <- 1
-
-  long  <- list()
-  short <- list()
-  stats <- list()
-
-  # add the named entries
-  for (title in names(keys$wordful)) {
-    key <- if (regexpr("^[a-zA-Z]", success.names[k]) != -1)
-      success.names[k]
-    else
-      ''
-
-    stats[[key]] <- keys$wordful[[title]]
-    long[[title]] <- key
-    #attr(stats, title) <- key
-
-    # increment counter
-    k <- k + 1
-  }
-
-  attr(stats, ".index") <- long
-
-  q <- stats
-
-  # cast as `qi' object, and return
-  class(q) <- "qi"
-
-  q    
-}
-
-
-#' Print a Quantity of Interest in Human-Readable Form
-#'
-#' Print simulated quantities of interest in a human-readable form
-#' 
-#' @usage \method{print}{qi}(x, ...)
-#' @S3method print qi
-#' @param x a qi object
-#' @param ... ignored parameters
-#' @return the object that was printed (invisibly)
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-print.qi <- function(x, ...) {
-  self <- x
-
-  # error-catching
-  if (length(self$titles) != length(self$stats))
-    stop("corrupted object!  titles and stats length mismatch")
-
-  qi.length <- length(self)
-
-  # iterate through
-  for (k in 1:qi.length) {
-    # output title
-    message(self$titles[[k]])
-
-    # output qi
-    print(self$stats[[k]])
-
-    # just to prevent extra end-line
-    if (k != qi.length)
-      message()
-  }
-
-  invisible(x)
-}
-
-
-#' The Names of a 'qi' Object
-#' 
-#' Function to get the names of a 'qi' object. This function does not entirely
-#' parallel the functionality of traditional 'names' methods; this is because
-#' the \code{$} operator has been overloaded to support a unique style of value
-#' extraction. For technical details, please see the source code.
-#' @note No method exists to set the names of a 'qi' object, once it is 
-#' constructed. This will be a feature added later.
-#' @usage \method{names}{qi}(x)
-#' @S3method names qi
-#' @param x a 'qi' object
-#' @return a character-vector containing the names of the Quantities of
-#' Interest
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-names.qi <- function(x) {
-  names(attr(x, ".index"))
-}
-
-
-#' Convert a Vector of Character Strings into Acronyms
-#' This function will convert a vector of character strings into their
-#' appropriately titled acronym forms. That is, the two Quantity of Interest
-#' titles:
-#' \begin{itemize}
-#'    \item "Expected Values (for X): E(Y|X)"
-#'    \item "Expected Values (for X1): E(Y|X1)"
-#' \end{itemize}
-#' The result will be: "ev1" and "ev2". That is, the acronym will not contain
-#' information kept in paranetheses or after a colon. 
-#' @note This function currently includes preopositions as parts of acroynms
-#' @param str a vector of character strings to convert into acronymns
-#' @param fail a result to produce upon failure
-#' @return a vector of character-strings
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-.acronym <- function(str, fail=str) {
-  ignored.words <- c(
-                     "in", "for", "by",
-                     "the", "a", "an"
-                     )
-  
-  # remove all text after colon
-  # remove trailing whitespace
-  # remove leading whitespace
-  # remove paranthetical statements
-  # remove non-alphanumerics
-  reduced <- sub(':.*$', '', str)
-  reduced <- sub('\\s+$', '', reduced, perl=TRUE)
-  reduced <- sub('^\\s+', '', reduced, perl=TRUE)
-  reduced <- gsub('\\(.*?\\)', '', reduced, perl=TRUE)
-  
-  # if we get an empty string, return whatever the fail value is
-  if (nchar(reduced) < 1)
-    return(fail)
-
-  # splitted is not a word, I know
-  #  1. split the reduced string into non-whitespace characters
-  #  2. take the first letter of each
-  #  3. put into lowercase
-  splitted <- unlist(strsplit(reduced, '\\s+'))
-
-  # remove ignored words
-##   splitted <- Filter(
-##                      function (char) regexpr(
-##                      splitted
-##                      )
-  
-  splitted <- substr(splitted, 1, 1)
-  splitted <- tolower(splitted)
-
-  # remove all non-letters
-  acronym <- Filter(
-                    function (char)
-                    regexpr('^[a-zA-Z]$', char, perl=TRUE),
-                    splitted
-                    )
-
-  # paste together, and return
-  paste(acronym, sep="", collapse="")
-}
-
-
-#' Append Numbers to Identically Valued Strings
-#' This function ensures that vectors of strings are uniquely named.
-#' @note This function is used in tandem with '.acronym' to correctly produce
-#'   short-names for quantities of interest.
-#' @param vec a vector of character-string
-#' @return a vector of character-strings of shorter length. Duplicate hits on
-#'   short-titled names append a number to the end. E.g.: the character vector
-#'   if vec equals c('ev', 'ev', 'pr'), then the result will be:
-#'   c('ev1', 'ev2', 'pr')
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-.number.list <- function(vec) {
-  if (!is.character(vec)) {
-    warning()
-    return(vec)
-  }
-
-  final.list <- c()
-  unique.vec <- unique(vec)
-
-  for (k in 1:length(vec)) {
-    val <- vec[k]
-
-    hits <- sum(val == vec[1:k])
-    total.hits <- sum(val == vec)
-
-    final.list[names(vec)[k]] <- if (total.hits > 1)
-      paste(val, hits, sep="")
-    else
-      val
-  }
-
-  # return
-  final.list
-}
diff --git a/R/as.summarized.R b/R/as.summarized.R
deleted file mode 100644
index 3d1e46f..0000000
--- a/R/as.summarized.R
+++ /dev/null
@@ -1,29 +0,0 @@
-#' Generic Method for Casting Objectst as 'summarized' Objects
-#' 
-#' This function is particularly for use by the 'summarize' method, which
-#' summarizes the simulations taken from the 'qi' method. The generic function
-#' 'summary' when applied to a Zelig Simulation implicitly uses this function.
-#' 
-#' @note This is made available on the Global namespace as a matter of potential
-#' future compliancy.
-#' @param x an object
-#' @param ... unspecified parameters
-#' @return a 'summarized.qi' object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.summarized <- function(x, ...) {
-  UseMethod("as.summarized")
-}
-
-#' summarized.qi -> summarized.qi
-#' 
-#' Identity operation on ``summarized.qi'' objects
-#' @usage \method{as.summarized}{summarized.qi}(x, ...)
-#' @param x an object of type 'summarized.qi'
-#' @param ... ignored parameters
-#' @return the same 'summarized.qi' object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.summarized.summarized.qi <- function(x, ...) {
-  x
-}
diff --git a/R/as.summarized.list.R b/R/as.summarized.list.R
deleted file mode 100644
index 68d4eda..0000000
--- a/R/as.summarized.list.R
+++ /dev/null
@@ -1,12 +0,0 @@
-#' list -> summarized.qi
-#' Convert a list into a ``summarized.qi'' object
-#' @usage \method{as.summarized}{list}(x, ...)
-#' @param x a list
-#' @param ... ignored parameters
-#' @return a ``summarized.qi'' object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.summarized.list <- function(x, ...) {
-  class(x) <- "summarized.qi"
-  x
-}
diff --git a/R/attach.env.R b/R/attach.env.R
deleted file mode 100644
index 01a916b..0000000
--- a/R/attach.env.R
+++ /dev/null
@@ -1,55 +0,0 @@
-#' Attach Variables to a Function
-#'
-#' Returns a function, specified by the user, with the variables of a specified
-#' environment attached. This, in essence, allows programmers to write functions
-#' that have forms of private memory. This makes the function behave similarly
-#' to an object.
-#' 
-#' @note This function is used by Zelig to ensure that particular method calls -
-#' param, qi, bootstap - will contain the private variables: ``.fitted'',
-#' ``.model'', ``.call'' and ``.env'' which respectively contain the fitted
-#' model object, the name of the zelig model being invoked, the original call
-#' to the model-fitting function and the environment in which to call the
-#' function call.
-#'
-#' @param f a function which will be modified
-#' @param env an environment variable which will be attached to the function
-#' being returned
-#' @param ... arbitrary key-value paired parameters which will be assigned to
-#' the environment of the function being returned
-#' @return the original function ``f'' with a different environment attached to
-#' it.
-#'
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-attach.env <- function (f, env = NULL, ...) {
-
-  # Ensure that a valid environment is passed in
-  if (is.null(env))
-    env <- new.env()
-
-
-  # Expand dot parameters
-  dots <- list(...)
-
-  # Ensure that "env" is a valid environment
-  if (is.null(env))
-    env <- new.env()
-
-  else if (!inherits(env, "environment")) {
-    warning('Environment "env" is not a valid environment variable. ',
-            'A default environment will be applied to "f" instead.')
-    env <- new.env()
-  }
-
-  if (length(dots)) {
-    # Add variables to the newly created environment
-    for (key in names(dots))
-      assign(key, dots[[key]], env)
-  }
-
-  # Modify the default environment of the function
-  environment(f) <- env
-
-  # Return the modified function
-  f
-}
diff --git a/R/bootfn.default.R b/R/bootfn.default.R
deleted file mode 100644
index 4cd25e6..0000000
--- a/R/bootfn.default.R
+++ /dev/null
@@ -1,263 +0,0 @@
-#' Default Boot-strapping procedure
-#' 
-#' The default procedure for extracting bootstrap information. Note that this
-#' method re-fits the data and resamples the data frequently. This is a good
-#' candidate for fixing-up.
-#'
-#' @param data a data.frame
-#' @param i an integer or chacter-string specifying the index of the row to
-#' be used in the bootstrapping procedure.
-#' @param object the fitted model object
-#' @param bootstrapfn a function used to bootstrap the object
-#' @param num an integer specifying the number of samples to simulate
-#' @param ... unspecified parameters
-#' @return a list of paramters
-bootfn.default <- function(data, i, object, bootstrapfn=NULL, num, ...) {
-
-  # This is mostly here to squelch R-check notes, however sloppy programming
-  # can potentially prevent the ".model" variable from being defined in the
-  # attached environment. To make sense of this line, see the "sim.default"
-  # function where an environment (containing the variable ".model"  is
-  # explicity attached to the boot function
-  if (!exists(".model"))
-    .model <- "default"
-
-  # Get a random sample of the data set
-  d <- data[i,]
-
-  # Extract the call object
-  # Naming it "jeez" because it's really hard to find names for call objects
-  # that are meaningful and not reserved for other functions
-  jeez <- .call
-
-  # Replace the data frame with an appropriate one
-  jeez$data <- d
-
-  .env <- if (exists('.env'))
-    .env
-  else
-    NULL
-
-  # Fit the model
-  fit <- eval(jeez)
-
-  # If "bootstrapfn" is unspecified, then we try to search its appropriate value
-  # down
-  if (is.null(bootstrapfn))
-    bootstrapfn <- getS3method("bootstrap", .model, TRUE)
-
-  # If bootstrap search came up sour, get default
-  
-## CRAN is opposed to ::: within same package, 
-## but I'm opposed to S4 environment artifacts
-##  if (is.null(bootstrapfn))
-##    bootstrapfn <- Zelig:::bootstrap.default
-## So this obviously makes my code better:
-
-  if (is.null(bootstrapfn)){
-    localbootstrap.default <- function (obj, ...)
-    list(
-       alpha = NULL,
-       beta = coef(obj)
-       )
-    bootstrapfn <- localbootstrap.default
-  }
-
-
-  # Attach the ".num" private variable
-  bootstrapfn <- attach.env(bootstrapfn, NULL, .num = num, .fitted = object)
-
-  # Get a result
-  res <- bootstrapfn(fit)
-
-  # Return vectorized bootstrap simulation to "boot" function
-  as.bootvector(res)$vector
-}
-
-#' Convert Boot Object to a Vector
-#'
-#' Receives a list with 2 slots as its input, and returns a vector of the two
-#' smashed together alongwith the offsets used to reverse-construct the object.
-#'
-#' @note This method is used internally by Zelig to allow an intuitive,
-#' ``param''-like API for bootstrapping.
-#'
-#' @param obj a list with two slots: ``alpha'' and ``beta''. Respectively, these
-#' represent bootstrap samples for ancillary parameters and systematic
-#' component of the bootstrapped GLM.
-#' @return a list containing the resulting vector, as well as an object used to
-#' reverse-build the list (``obj'') from the resulting call to ``bootstrap''.
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.bootvector <- function (obj) {
-
-  # If this fails, something is really wrong.
-  a <- obj$alpha
-  b <- obj$beta
-
-  # Error-checking
-  if (!(is.vector(a) || is.null(a)))
-    stop('The "alpha" slot of "obj" must be a vector or NULL.')
-
-  if (!(is.vector(b)))
-    stop('The "beta" slot of "obj" must be a vector')
-
-  # Return
-  list(
-       # For antiquity, beta should be placed before alpha. This is partially
-       # because alpha is not always specified.
-       vector = c(b, a),
-
-       # The respective lengths of each vector
-       lengths = c(beta = length(b), alpha = length(a)),
-
-       # Names
-       names = list(beta = names(b), alpha = names(a))
-       )
-}
-
-#' Convert of Vector of Bootstrapped Parameters to a List-style Boot Object
-#'
-#' This inverts the ``as.bootvector'' function, and returns a list containing
-#' the slots ``alpha'' and ``beta''.
-#'
-#' @param bootstraps ...
-#' @param lengths ...
-#' @param names a character-vector specifying the names of the boot terms
-#' @return ...
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.bootlist <- function (bootstraps, lengths, names) {
-
-  # Error-checking. "bootstraps" and "lengths" must:
-  #  1. "bootstraps" must be a matrix && have at least 1 value
-  #  2. "lengths" must be a vector
-  #  3. The sum of all the lengths must perfectly add up to the number of
-  #     columns in bootstraps
-  if (!is.matrix(bootstraps) && ncol(bootstraps) > 0 && nrow(bootstraps) > 0)
-    stop('The parameter "bootstraps" must be a matrix')
-
-  if (!is.vector(lengths))
-    stop('The parameter "lengths" must be a vector.')
-
-  if (sum(lengths) != ncol(bootstraps))
-    stop('The parameters "bootstraps" and "lengths" must be ',
-         'the same length.'
-         )
-
-  # Actual work begins here. This could be made more general, but if there's
-  # more info besides "alpha" and "beta", it's not very much like a bootstrap...
-  # In the future, we might need to add support for "link", "inverse link" and
-  # "family" slots, but there is overlap here with the "param" method.
-
-  # Note that to make sense of the below, it has to be understood that the
-  # canonical form of these bootstrapped values is:
-  # (beta, alpha)
-  # where "beta" is several columns of systematic parameters and
-  # "alpha" is several columns of ancillary parameters
-  a <- b <- NULL
-
-  # If beta is 0-sized, then we should ignore it
-  if (lengths[["beta"]] > 0) {
-    # Extract the "beta" portion of "bootstraps". These values should represent
-    # the systematic parameters
-    b <- bootstraps[ , 1:lengths[["beta"]] ]
-
-    # Change the column names of the system's parameter (beta) simulations
-    b <- name.object(b, names$beta)
-  }
-
-  # Note that 1 + 1:2 is 2:3, so that this statement offsets subsetting by the
-  # length of "a". 
-  if (lengths[["alpha"]] > 0) {
-    # Extract several columns from "bootstraps". These values should represent
-    # the model's ancillary parameters
-    a <- bootstraps[ , lengths[["beta"]] + 1:lengths[["alpha"]] ]
-
-    # Change the column names of the ancillary parameter (alpha) simulations
-    a <- name.object(a, names$alpha)
-  }
-
-  # Return the appropriate
-  list(alpha = a, beta = b)
-}
-
-#' Name Elements of an Object
-#'
-#' Returns an object
-#' @note This method is used internally by Zelig to name the columns and
-#' elements of matrices and vectors for simulations and bootstrapped parameters.
-#' @param obj a vector or matrix
-#' @param names a character-vector specifying names
-#' @return the original object, with a "colnames" or "names" equal to the
-#' parameter "names". If "names" is larger than "obj", the "names" parameter
-#' is truncated appropriately. If it is smaller, then the latter part of "obj"
-#' is replaced with a numbered generic column name
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-name.object <- function (obj, names) {
-
-  # Handle the special case, which shouldn't really happen...
-  if (is.null(names)) {
-    if (is.matrix(obj))
-      colnames(obj) <- NULL
-    else if (is.vector(obj))
-      names(obj) <- NULL
-    return(obj)
-  }
-
-  # Get the length of names
-  names.len <- length(names)
-
-  # Get the 'length' of the object, regardless of whether it is a vector or
-  # matrix. Note that in our case, length is equivalient to "ncol" if the
-  # object is a matrix
-  obj.len <- if (is.matrix(obj))
-    ncol(obj)
-  else if (is.vector(obj))
-    length(obj)
-  else {
-    # Warn the user. This might be necessary, but it helps debug for
-    # developers. Ideally this case never crops up in well-made Zelig models
-    warning('"name.object" ignores objects that are not matrices or vectors')
-
-    # Bail out of the function
-    return(obj)
-  }
-
-  # Ensure that names is the exact length of "obj" by
-  if (names.len < obj.len) {
-    # Create vector equal in size to the length of the object being named
-    temp <- paste(rep("col", obj.len), 1:obj.len, sep = "")
-
-    # Replace default values (col1, col2, ... colN) with the value that
-    # *should* there in a perfect world, where there is never any glitchy code
-    temp[1:names.len] <- names
-
-    # Replace "names" with the newly constructed, appropriately size, vector
-    # of names
-    names <- temp
-  }
-
-  # Truncate the "names" parameter if it is too largit is too large
-  else if (names.len > obj.len) {
-    # Warn the user. This is probably only useful/meaningful to developers. 
-    # This case should not crop up in well made Zelig models.
-    warning('"names.object" is truncating the names parameter, because it ',
-            'is larger than "obj" the object of the function.')
-
-    # Truncate "names"
-    names <- names[1:obj.len]
-  }
-
-  # After all the prep work, finally name the object
-  if (is.matrix(obj))
-    colnames(obj) <- names
-
-  else if (is.vector(obj))
-    names(obj) <- names
-
-  else
-    warning('"obj" must be a matrix or a vector. ',
-            'Returning the "obj" untouched.')
-
-  # Return modified object
-  obj
-}
diff --git a/R/bootstrap.R b/R/bootstrap.R
deleted file mode 100644
index a455338..0000000
--- a/R/bootstrap.R
+++ /dev/null
@@ -1,35 +0,0 @@
-#' Generic Method for ``bootstrap''
-#'
-#' This method is intended to be overried by statistical models that would like
-#' to support statistical bootstrapping.
-#' @note This method has private memory storage and can reference the objects:
-#' ``.fitted'', ``.data'', ``.call'', ``.env'', despite having no declaration in
-#' the argument list.
-#' @param obj a fitted model object that will be used to produce boot-strapped
-#' parameters. This object usually inherits the class ``glm'' or ``lm'' object
-#' @param ... unspecified parameters
-#' @return a list with the ``alpha'' and ``beta'' slots set. Note that ``alpha''
-#' corresponds to ancillary parameters and ``beta'' corresponds to systematic
-#' components of the model
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-bootstrap <- function (obj, ...)
-  UseMethod("bootstrap")
-
-#' Produce Boot-strapped Parameters for a Statistical Model
-#'
-#' This method is a fallback for bootstrapping models that do not have a defined
-#' ``bootstrap'' method. For most models, this default is sufficient, so long as
-#' the model follows the usual convention that ``coef(obj)'' returns the
-#' systematic parameters of a fitted model.
-#' @usage \method{bootstrap}{default}(obj, ...)
-#' @S3method bootstrap default
-#' @param obj a fitted model object. This is typically of type ``glm'' or ``lm''
-#' @param ... unspecified parameters
-#' @return a list with the ``alpha'' and ``beta'' slots set
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-bootstrap.default <- function (obj, ...)
-  list(
-       alpha = NULL,
-       beta = coef(obj)
-       )
diff --git a/R/bootstrap.gamma.R b/R/bootstrap.gamma.R
deleted file mode 100644
index f3c52a9..0000000
--- a/R/bootstrap.gamma.R
+++ /dev/null
@@ -1,17 +0,0 @@
-#' Bootstrap Parameters for Zelig ``gamma'' GLM
-#'
-#' Returns bootstrapped parameter estimates for a ``gamma'' GLM.
-#' @usage \method{bootstrap}{gamma}(obj, ...)
-#' @S3method bootstrap gamma
-#' @param obj a ``zelig'' object that will be used to produce boot-strapped
-#' parameters
-#' @param ... extra parameters to be passed to the ``boot'' method. These are
-#' typically ignored, but is included for further expansion.
-#' @return a list containing information concerning link, link-inverses, etc.
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-bootstrap.gamma <- function (obj, ...) {
-  list(
-       alpha = gamma.shape(.fitted)$alpha,
-       beta = coef(.fitted)
-       )
-}
diff --git a/R/bootstrap.negbinom.R b/R/bootstrap.negbinom.R
deleted file mode 100644
index 699ab79..0000000
--- a/R/bootstrap.negbinom.R
+++ /dev/null
@@ -1,17 +0,0 @@
-#' Bootstrap Parameters for Zelig ``negbinom'' GLM
-#'
-#' Returns bootstrapped parameter estimates for a negative-binomial GLM.
-#' @usage \method{bootstrap}{negbinom}(obj, ...)
-#' @S3method bootstrap negbinom
-#' @param obj a ``zelig'' object that will be used to produce boot-strapped
-#' parameters
-#' @param ... extra parameters to be passed to the ``boot'' method. These are
-#' typically ignored, but is included for further expansion.
-#' @return a list containing information concerning link, link-inverses, etc.
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-bootstrap.negbinom <- function (obj, ...) {
-  list(
-       alpha = .fitted$theta,
-       beta = coef(.fitted)
-       )
-}
diff --git a/R/bootstrap.normal.R b/R/bootstrap.normal.R
deleted file mode 100644
index 3809401..0000000
--- a/R/bootstrap.normal.R
+++ /dev/null
@@ -1,23 +0,0 @@
-#' Bootstrap Parameters for Zelig ``normal'' GLM
-#'
-#' Returns bootstrapped parameter estimates for a Gaussian GLM.
-#' @usage \method{bootstrap}{normal}(obj, num, ...)
-#' @S3method bootstrap normal
-#' @param obj a ``zelig'' object that will be used to produce boot-strapped
-#' parameters
-#' @param num an integer specifying the number of simulations to produce
-#' @param ... extra parameters to be passed to the ``boot'' method. These are
-#' typically ignored, but is included for further expansion.
-#' @return a list containing information concerning link, link-inverses, etc.
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-bootstrap.normal <- function (obj, num, ...) {
-
-  degrees.freedom <- obj[["df.residual"]]
-  sig2 <- summary(obj)$dispersion
-  alpha <- sqrt(degrees.freedom * sig2 / rchisq(20, degrees.freedom))
-
-  list(
-       alpha = alpha,
-       beta = coef(obj)
-       )
-}
diff --git a/R/callToString.R b/R/callToString.R
deleted file mode 100644
index 16e5d88..0000000
--- a/R/callToString.R
+++ /dev/null
@@ -1,10 +0,0 @@
-#' Convert \code{call} Object to a String
-#'
-#' This method concerts \code{call} objects into a simple, intuitive 
-#' human-readable form.
-#' @param x a \code{call} object
-#' @param ... ignored parameters
-#' @return a character-string representing the \code{call} object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-callToString <- function (x, ...)
-  as.character(as.expression(x))
diff --git a/R/cluster.formula.R b/R/cluster.formula.R
deleted file mode 100644
index 2fbf487..0000000
--- a/R/cluster.formula.R
+++ /dev/null
@@ -1,22 +0,0 @@
-#' Generate Formulae that Consider Clustering
-#'
-#' This method is used internally by the "Zelig" Package to interpret
-#' clustering.
-#' @param formula a formula object
-#' @param cluster a vector
-#' @return a formula object describing clustering
-cluster.formula <- function (formula, cluster) { 
-
-  # Convert LHS of formula to a string
-  lhs <- deparse(formula[[2]])
-
-  cluster.part <- if (is.null(cluster))
-    # NULL values require
-    sprintf("cluster(1:nrow(%s))", lhs)
-
-  else
-    # Otherwise we trust user input
-    sprintf("cluster(%s)", cluster)
-
-  update(formula, paste(". ~ .", cluster.part, sep=" + "))
-}
diff --git a/R/cmvglm.R b/R/cmvglm.R
deleted file mode 100644
index cf3f16f..0000000
--- a/R/cmvglm.R
+++ /dev/null
@@ -1,89 +0,0 @@
-#' cmvglm
-#' @param formula a formula
-#' @param model the names of the Zelig model
-#' @param ndim the number of dimensions in the statistical model
-#' @param data a data-frame
-#' @param fact ???
-#' @author Kosuke Imai and Olivia Lau
-#' @export
-cmvglm <- function(formula, model, ndim,data=NULL, fact=NULL){
-
-  toBuildFormula<-function(Xnames,sepp="+"){
-    lng<-length(Xnames)
-    rhs<-NULL
-    if (lng!=0){
-      if(lng==1){
-        rhs=Xnames
-      }else{
-        for (j in 1:(lng-1)){
-          rhs<-paste(rhs,as.name(Xnames[[j]]))
-          rhs<-paste(rhs,sepp)
-        }
-        rhs<-paste(rhs,Xnames[[lng]])
-      }
-    }
-    return (rhs)
-  }
-  tt<-terms(formula)
-  attr(tt,"systEqns")<-names(formula)
-  p<-make.parameters(tt,shape="matrix")
-  vars<-rownames(p)
-  cm<-vector("list", length(vars))
-  names(cm)<-vars
-  
-    for(i in 1:length(cm))
-      cm[[i]]<-diag(1, ndim)
-
-  constrain<-attr(tt,"constraints")
-  if(!is.logical(constrain)){
-    tmp <- sort(colnames(constrain))
-    for (i in 1:length(tmp)) {
-      ci<-constrain[,i]
-      if (is.null(na.omit(ci)) || length(unique(na.omit(ci)))!=1)
-        stop("invalid input for constrain")
-      minj <- match(FALSE, is.na(ci))
-      whatvar <- pmatch(unique(na.omit(ci)), names(cm))
-      for (j in 1:3)
-        if (!is.na(ci[j])) {
-          cm[[whatvar]][j,j]<-0
-          cm[[whatvar]][j,minj]<-1
-        }
-    }
-  }
-  for(i in rownames(p)){
-    for(j in 1:ncol(p)){
-      if(is.na(p[i,j]))
-        cm[[i]][j,j]<-0
-    }
-  }
-    
- # if(!is.null(constant))
- #   for(i in 1:length(constant))
- #     for(j in 1:length(cm))
- #       if(names(cm)[j]!="(Intercept)")
- #         cm[[j]][constant[i],]<-matrix(0, ncol=ncol(cm[[j]]))
-
-  for(i in 1:length(cm))
-    cm[[i]]<-as.matrix(cm[[i]][,apply(cm[[i]], 2, sum)!=0])
-  rhs<-toBuildFormula(attr(tt,"indVars"))
-  if(!(is.null(rhs)))
-    rhs<-(paste("~",rhs))
-  else
-    rhs<-"~1"
-  Ynames<-unlist(attr(tt,"depVars"))
-  if(!is.null(fact))
-    lhs<-fact
-  else{
-    if(length(Ynames)>1){
-      lhs<-toBuildFormula(Ynames,",")
-      if (!(is.null(lhs))){
-        lhs<-paste("cbind(",lhs)
-        lhs<-paste(lhs,")")
-      }
-    }else{
-      lhs=Ynames
-    }
-  }
-  formula<-as.formula(paste(lhs,rhs))
-  list("formula"=formula, "constraints"=cm)
-}
diff --git a/R/common-methods.R b/R/common-methods.R
deleted file mode 100644
index bea7155..0000000
--- a/R/common-methods.R
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file is a quick-hack to fix a mistake placed in Zelig Core on Oct. 1st.
-# The issue in Zelig should be fixed by November `12. :(
-
-#' @S3method coef zelig
-coef.zelig <- function (object, ...)
-  coef(object$result, ...)
-
-#' @S3method logLik zelig
-logLik.zelig <- function (object, ...)
-  logLik(object$result, ...)
-
-#' @S3method plot zelig
-plot.zelig <- function (x, ...)
-  plot(x$result, ...)
-
-#' @S3method vcov zelig
-vcov.zelig <- function (object, ...)
-  vcov(object$result, ...)
diff --git a/R/create-json.R b/R/create-json.R
new file mode 100755
index 0000000..c429712
--- /dev/null
+++ b/R/create-json.R
@@ -0,0 +1,191 @@
+#' @include utils.R
+#' @include model-zelig.R
+#' @include model-ls.R
+#' @include model-glm.R
+#' @include model-binchoice.R
+#' @include model-logit.R
+#' @include model-probit.R
+#' @include model-poisson.R
+#' @include model-normal.R
+#' @include model-gamma.R
+#' @include model-negbinom.R
+#' @include model-exp.R
+#' @include model-lognorm.R
+#' @include model-tobit.R
+#' @include model-quantile.R
+#' @include model-relogit.R
+#' @include model-gee.R
+#' @include model-binchoice-gee.R
+#' @include model-logit-gee.R
+#' @include model-probit-gee.R
+#' @include model-gamma-gee.R
+#' @include model-normal-gee.R
+#' @include model-poisson-gee.R
+#' @include model-bayes.R
+#' @include model-factor-bayes.R
+#' @include model-logit-bayes.R
+#' @include model-mlogit-bayes.R
+#' @include model-normal-bayes.R
+#' @include model-oprobit-bayes.R
+#' @include model-poisson-bayes.R
+#' @include model-probit-bayes.R
+#' @include model-tobit-bayes.R
+#' @include model-weibull.R
+#' @include model-timeseries.R
+#' @include model-arima.R
+#' @include model-ar.R
+#' @include model-ma.R
+
+#library(jsonlite)
+
+createJSON <- function(){
+
+  z5ls <- zls$new()
+  z5ls$toJSON()
+
+  z5logit <- zlogit$new()
+  z5logit$toJSON()
+
+  z5probit <- zprobit$new()
+  z5probit$toJSON()
+
+  z5poisson <- zpoisson$new()
+  z5poisson$toJSON()
+
+  z5normal <- znormal$new()
+  z5normal$toJSON()
+
+  z5gamma <- zgamma$new()
+  z5gamma$toJSON()
+
+  z5negbin <- znegbin$new()
+  z5negbin$toJSON()
+
+  z5exp <- zexp$new()
+  z5exp$toJSON()
+
+  z5lognorm <- zlognorm$new()
+  z5lognorm$toJSON()
+
+  z5tobit <- ztobit$new()
+  z5tobit$toJSON()
+
+  z5quantile <- zquantile$new()
+  z5quantile$toJSON()
+
+  z5relogit <- zrelogit$new()
+  z5relogit$toJSON()
+
+  z5logitgee <- zlogitgee$new()
+  z5logitgee$toJSON()
+
+  z5probitgee <- zprobitgee$new()
+  z5probitgee$toJSON()
+
+  z5gammagee <- zgammagee$new()
+  z5gammagee$toJSON()
+
+  z5normalgee <- znormalgee$new()
+  z5normalgee$toJSON()
+
+  z5poissongee <- zpoissongee$new()
+  z5poissongee$toJSON()
+
+  z5factorbayes <- zfactorbayes$new()
+  z5factorbayes$toJSON()
+
+  z5logitbayes <- zlogitbayes$new()
+  z5logitbayes$toJSON()
+
+  z5mlogitbayes <- zmlogitbayes$new()
+  z5mlogitbayes$toJSON()
+
+  z5normalbayes <- znormalbayes$new()
+  z5normalbayes$toJSON()
+
+  z5oprobitbayes <- zoprobitbayes$new()
+  z5oprobitbayes$toJSON()
+
+  z5poissonbayes <- zpoissonbayes$new()
+  z5poissonbayes$toJSON()
+
+  z5probitbayes <- zprobitbayes$new()
+  z5probitbayes$toJSON()
+
+  z5tobitbayes <- ztobitbayes$new()
+  z5tobitbayes$toJSON()
+
+  z5weibull <- zweibull$new()
+  z5weibull$toJSON()
+
+  z5logitsurvey <- zlogitsurvey$new()
+  z5logitsurvey$toJSON()
+
+  z5probitsurvey <- zprobitsurvey$new()
+  z5probitsurvey$toJSON()
+
+  z5gammasurvey <- zgammasurvey$new()
+  z5gammasurvey$toJSON()
+
+  z5normalsurvey <- znormalsurvey$new()
+  z5normalsurvey$toJSON()
+
+  z5poissonsurvey <- zpoissonsurvey$new()
+  z5poissonsurvey$toJSON()
+  
+  z5arima <- zarima$new()
+  z5arima$toJSON()
+
+  z5ar <- zar$new()
+  z5ar$toJSON()
+
+  z5ma <- zma$new()
+  z5ma$toJSON()
+
+  zeligmodels <- list(zelig5models = list("ls" = z5ls$ljson,
+                    "logit" = z5logit$ljson,
+                    "probit" = z5probit$ljson,
+                    "poisson" = z5poisson$ljson,
+                    "normal" = z5normal$ljson,
+                    "gamma" = z5gamma$ljson,
+                    "negbin" = z5negbin$ljson,
+                    "exp" = z5exp$ljson,
+                    "lognorm" = z5lognorm$ljson,
+                    "tobit" = z5tobit$ljson,
+                    "quantile" = z5quantile$ljson,
+                    "relogit" = z5relogit$ljson,
+                    "logitgee" = z5logitgee$ljson,
+                    "probitgee" = z5probitgee$ljson,
+                    "gammagee" = z5gammagee$ljson,
+                    "normalgee" = z5normalgee$ljson,
+                    "poissongee" = z5poissongee$ljson,
+                    "factorbayes" = z5factorbayes$ljson,
+                    "logitbayes" = z5logitbayes$ljson,
+                    "mlogitbayes" = z5mlogitbayes$ljson,
+                    "normalbayes" = z5normalbayes$ljson,
+                    "oprobitbayes" = z5oprobitbayes$ljson,
+                    "poissonbayes" = z5poissonbayes$ljson,
+                    "probitbayes" = z5probitbayes$ljson,
+                    "tobitbayes" = z5tobitbayes$ljson,
+                    "weibull" = z5weibull$ljson,
+                    "logitsurvey" = z5logitsurvey$ljson,
+                    "probitsurvey" = z5probitsurvey$ljson,
+                    "normalsurvey" = z5normalsurvey$ljson,
+                    "gammasurvey" = z5gammasurvey$ljson,
+                    "poissonsurvey" = z5poissonsurvey$ljson,
+                    "arima" = z5arima$ljson, 
+                    "ma" = z5ma$ljson,
+                    "ar" = z5ar$ljson))
+
+  # cat(toJSON(zeligmodels, pretty = TRUE), file = file.path("tools", "zelig5models.json"))
+  # file.copy(from = file.path("tools", "zelig5models.json"), to = file.path("inst", "JSON", "zelig5models.json"))
+
+  cat(toJSON(zeligmodels, pretty = TRUE), "\n", file = file.path("zelig5models.json"))
+  file.rename(from = file.path("zelig5models.json"), to = file.path("inst", "JSON", "zelig5models.json"))
+  file.remove(file.path("zelig5models.json"))
+
+  # cat(toJSON(zeligmodels, pretty = TRUE))
+  # j <- jsonlite::fromJSON(txt = readLines(file.path("..", "/JSON", "/zelig5models.json")))
+  
+  return(TRUE)
+}
diff --git a/R/describe.R b/R/describe.R
deleted file mode 100644
index 4f4d8b0..0000000
--- a/R/describe.R
+++ /dev/null
@@ -1,7 +0,0 @@
-#' Method to describe a model to Zelig
-#' @param ... parameters which are typically ignored
-#' @return a list to be processed by `as.description'
-#' @export describe
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-describe <- function(...)
-  UseMethod("describe")
diff --git a/R/describe.default.R b/R/describe.default.R
deleted file mode 100644
index 4589ff9..0000000
--- a/R/describe.default.R
+++ /dev/null
@@ -1,18 +0,0 @@
-#' Default describe function for an arbitrary model
-#' This method exists solely as a backup when an author does not contribute a
-#' 'describe' function for their model
-#' @usage \method{describe}{default}(...)
-#' @S3method describe default
-#' @param ... dummy parameters purely to cast the correct object. That is, the
-#'   parameters of the function should not
-#'            BE referenced specifically
-#' @return a list to be processed by \code{as.description}
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-describe.default <- function(...) {
-  warning("The 'describe' method for this function is unspecified")
-  list(
-       authors = "Unknown Author",
-       year    = as.numeric(format(Sys.Date(), "%Y"))
-       )
-}
diff --git a/R/describe.zelig.R b/R/describe.zelig.R
deleted file mode 100644
index c89227a..0000000
--- a/R/describe.zelig.R
+++ /dev/null
@@ -1,13 +0,0 @@
-#' Get Description Object Used to Cite this Zelig Model
-#' @note This function should be reevaluated in design, since 'description'
-#' objects are exclusively used internally. In particular, this method would
-#' be more useful to users as a 'cite' method.
-#' @usage \method{describe}{zelig}(object, ...)
-#' @S3method describe zelig
-#' @param object a 'zelig' object
-#' @param ... ignored parameters
-#' @return a 'description' object used internally to produce citation text
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-describe.zelig <- function(object, ...) {
-  append(list(model=object$name), NextMethod("describe"))
-}
diff --git a/R/description.R b/R/description.R
deleted file mode 100644
index 02720d4..0000000
--- a/R/description.R
+++ /dev/null
@@ -1,155 +0,0 @@
-#' Constructor for the 'description' class
-#'
-#' @param authors a character-vector of author names
-#' @param year a numeric specifying the year
-#' @param model a character-string specifying model name
-#' @param text a character-string specifying the title of the model. This
-#'   typically includes more exact information than 'model'. E.g., for the
-#'   'logit' the title 'Logistic Regression for Dichotomous Variables' would be
-#'   a suitable text parameter.
-#' @param url a character-string specifying the model's software page
-#' @param category deprecated until data-verse bindings are reevaluated
-#' @return an object of type 'description'
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-description <- function(authors=c("Kosuke Imai", "Gary King", "Olivia Lau"),
-                        year=NULL, model="", text="", url="",
-                        category = NULL) {
-  # error-catching
-  if (!is.character(authors))
-    author <- "Kosuke Imai, Gary King, and Olivia Lau"
-
-  else if (length(authors) > 1) {
-    # collapse author names if it is a character-vector bigger than 1
-    authors <- paste(paste(head(authors, -1), collapse=", "),
-                     ", and ",
-                     tail(authors, 1),
-                     sep = ""
-                     )
-  }
-
-  if (!is.numeric(year))
-    year <- as.numeric(format(Sys.Date(), "%Y"))
-
-  if (!is.character(model) || length(model) != 1) {
-    print(model)
-    stop("model must be a character-string")
-  }
-
-  if (length(text) > 1)
-    stop("text must be a character-vector of length 1")
-
-  if (is.null(url))
-    url <- "http://gking.harvard.edu/zelig"
-
-  if (!is.character(category))
-    category <- ""
-
-  else if (length(url) > 1 || !is.character(url))
-    stop("url must be a character-vector of length 1")
-
-  # double back-up, even though this should be impossible now
-  authors <- ifelse(nchar(authors) > 0, authors, "NAMELESS AUTHOR")
-  year <- ifelse(!is.null(year), year, "UNKNOWN YEAR")
-  model <- ifelse(nchar(model) > 0, model, "UNNAMED MODEL")
-
-  # construct object
-  self <- list(authors = authors,
-               year    = year,
-               model   = model,
-               text    = text,
-               url     = url
-               )
-  class(self) <- "description"
-  self
-}
-
-
-#' Citation information for a 'description' object
-#' @param descr an object of type 'description'
-#' @return a character-string giving citation info
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-cite <- function(descr) {
-  #
-  if (inherits(descr, "list"))
-    descr <- as.description(descr)
-  else if (!inherits(descr, "description"))
-    descr <- description()
-
-  # 
-  url <- "http://gking.harvard.edu/zelig"
-
-  title <- if (is.null(descr$text))
-    descr$model
-  else
-    paste(descr$model, ": ", descr$text, sep="")
-
-  # quote
-  title <- paste('"', title, '"', sep="")
-
-  # construct string.  This should be done much more elegantly
-  # and with localization
-  str <- "How to cite this model in Zelig:\n  "
-  str <- paste(str, descr$authors, ". ", descr$year, ".\n  ", title, sep="")
-  str <- paste(str, "\n  in Kosuke Imai, Gary King, and Olivia Lau, ", sep="")
-  str <- paste(str, "\"Zelig: Everyone's Statistical Software,\"", sep="")
-  str <- paste(str, "\n  ", url, "\n", sep="")
-  str
-}
-
-
-#' Generic Method for Casting 'description' Objects
-#' 
-#' Convert the result of a call to the 'describe' method into an object 
-#' parseble by Zelig. Currently conversions only exist for lists and 
-#' description objects.
-#' @param descr an object to cast an object of type 'description'
-#' @param ... parameters which are reserved for future Zelig revisions
-#' @return an object of type 'description'
-#' @export
-#' @seealso as.description.description as.description.list
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.description <- function(descr, ...)
-  UseMethod("as.description")
-
-
-#' description -> description
-#'
-#' Identity operation on a description object.
-#' @S3method as.description description
-#' @usage \method{as.description}{description}(descr, ...)
-#' @param descr an object of type 'description'
-#' @param ... ignored
-#' @return the same object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.description.description <- function(descr, ...)
-  descr
-
-
-#' list -> description
-#'
-#' Convert list into a description object.
-#' @usage \method{as.description}{list}(descr, ...)
-#' @S3method as.description list
-#' @param descr a list
-#' @param ... ignored
-#' @return an object of type 'description'
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-as.description.list <- function(descr, ...) {
-
-  text <- if (!is.null(descr$text))
-    descr$text
-  else if (!is.null(descr$description))
-    descr$description
-  else
-    NULL
-  
-  description(authors = descr$authors,
-              year    = descr$year,
-              model   = descr$model,
-              text    = text,
-              url     = descr$url,
-              category= descr$category
-              )
-}
diff --git a/R/exp.R b/R/exp.R
deleted file mode 100644
index d3ca1bc..0000000
--- a/R/exp.R
+++ /dev/null
@@ -1,140 +0,0 @@
-#' Interface between the Zelig Model exp and 
-#' the Pre-existing Model-fitting Method
-#' @param formula a formula
-#' @param ... additonal parameters
-#' @param robust a boolean specifying whether to use robust error estimates
-#' @param cluster a vector describing the clustering of the data
-#' @param data a data.frame 
-#' @return a list specifying '.function'
-#' @export
-zelig2exp <- function (formula, ..., robust = FALSE, cluster = NULL, data) {
-
-  loadDependencies("survival")
-
-  if (!(is.null(cluster) || robust))
-    stop("If cluster is specified, then `robust` must be TRUE")
-
-  # Add cluster term
-  if (robust || !is.null(cluster))
-    formula <- cluster.formula(formula, cluster)
-
-  # Return
-  z(
-    .function = "survreg",
-    formula = formula,
-    dist = "exponential",
-    robust = robust,
-    data = data,
-    ...
-    )
-}
-
-
-stratify.rqs <- function (obj) {
-  x <- vector("list", length(obj$tau))
-
-  for(i in 1:length(obj$tau)) {
-    xi <- obj
-
-    xi$coefficients <- xi$coefficients[, i]
-    xi$residuals <- xi$residuals[, i]
-    xi$tau <- xi$tau[i]
-    class(xi) <- "rq"
-
-    x[[i]] <- xi 
-  }
-
-  names(x) <- obj$tau
-  x
-}
-#' Param Method for the \code{exp} Zelig Model
-#' @note This method is used by the \code{param} Zelig model
-#' @usage \method{param}{exp}(obj, num, ...)
-#' @S3method param exp
-#' @param obj a 'zelig' object
-#' @param num an integer specifying the number of simulations to sample
-#' @param ... ignored parameters
-#' @return a list to be cast as a 'parameters' object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-param.exp <- function(obj, num=1000, ...) {
-  cov <- vcov(.object)
-  mu <- coef(.object)
-
-  # Return
-  list(
-       coef = mvrnorm(num, mu=mu, Sigma=cov),
-       linkinv = survreg.distributions[["exponential"]]$itrans
-       )
-}
-#' Compute quantities of interest for 'exp' Zelig models
-#' @usage \method{qi}{exp}(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
-#' @S3method qi exp
-#' @param obj a 'zelig' object
-#' @param x a 'setx' object or NULL
-#' @param x1 an optional 'setx' object
-#' @param y this parameter is reserved for simulating average treatment effects,
-#' though this feature is currentlysupported by only a handful of models
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a parameters object
-#' @return a list of key-value pairs specifying pairing titles of quantities of
-#' interest with their simulations
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-qi.exp <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-
-  linkinv <- linkinv(param)
-
-  # Compute Expected Values for the "exp" Regression
-  # @param simulations 
-  # @param x
-  # @return a matrix
-  compute.ev <- function (simulations, x) {
-
-    if (is.null(x) || is.na(x))
-      # If there are missing explanatory variables, ignore them
-      return(NA)
-
-    # Compute eta, which is the "flattened" prediction.
-    # This value must be *inverted* to be restored to the true "observed" value
-    eta <- simulations %*% t(x)
-
-    # Return as a matrix, since this should be a vector at this point.
-    as.matrix(apply(eta, 2, linkinv))
-  }
-
-
-  # Compute Predicted Values
-  compute.pv <- function (ev, param) {
-    rexp(length(ev), rate = 1/ev)
-  }
-
-
-  # Compute expected values for X and X1
-  ev1 <- compute.ev(coef(param), x)
-  ev2 <- compute.ev(coef(param), x1)
-
-  # Compute Predicted values for X and X1
-  pr1 <- compute.pv(ev1, x)
-  pr2 <- compute.pv(ev2, x1)
-
-  # Return quantities of Interest
-  list("Expected Values: E(Y|X)"  = ev1,
-       "Expected Values: E(Y|X1)" = ev2,
-       "Predicted Values: Y|X"    = pr1,
-       "Predicted Values: Y|X1"   = pr2,
-       "First Differences: E(Y|X1) - E(Y|X)" = ev2 - ev1
-       )
-}
-#' Describe a ``exp'' model to Zelig
-#' @usage \method{describe}{exp}(...)
-#' @S3method describe exp
-#' @param ... ignored parameters
-#' @return a list to be processed by `as.description'
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-describe.exp <- function(...) {
-  list(
-       authors = c("Olivia Lau", "Kosuke Imai", "Gary King"),
-       year = 2011,
-       category = "bounded",
-       text = "Exponential Regression for Duration Dependent Variables"
-       )
-}
diff --git a/R/factor.bayes.R b/R/factor.bayes.R
deleted file mode 100644
index cf55d4b..0000000
--- a/R/factor.bayes.R
+++ /dev/null
@@ -1,55 +0,0 @@
-#' @export
-zelig2factor.bayes <- function (
-                                formula, 
-                                factors = 2,
-                                burnin = 1000, mcmc = 20000, 
-                                verbose=0, 
-                                ..., 
-                                data
-                                ) {
-
-  loadDependencies("MCMCpack", "coda")
-
-  if (missing(verbose))
-    verbose <- round((mcmc + burnin)/10)
-
-  if (factors < 2)
-    stop("Number of factors needs to be at least 2")
-
-  x <- as.matrix(model.response(model.frame(formula, data=data, na.action=NULL)))
-
-  list(
-       .function = "MCMCfactanal",
-       .hook = "McmcHookFactor",
-
-       formula = formula,
-       x = x,
-       burnin = burnin,
-       mcmc   = mcmc,
-       verbose= verbose,
-       data   = data,
-       factors = factors,
-       ...
-       )
-}
-
-#' @S3method param factor.bayes
-param.factor.bayes <- function (...) {
-}
-
-#' @S3method param factor.bayes
-qi.factor.bayes <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-  stop('There is no qi function for the "factor.bayes" model')
-  list(
-       "Expected Value: E(Y|X)" = NA
-       )
-}
-
-#' @S3method describe factor.bayes
-describe.factor.bayes <- function(...) {
-  list(
-       authors = c("Ben Goodrich", "Ying Lu"),
-       text = "Bayesian Factor Analysis",
-       year = 2013
-       )
-}
diff --git a/R/gamma.R b/R/gamma.R
deleted file mode 100644
index 4b6e86b..0000000
--- a/R/gamma.R
+++ /dev/null
@@ -1,131 +0,0 @@
-#' Interface between gamma model and Zelig
-#' This function is exclusively for use by the `zelig' function
-#' @param formula a formula
-#' @param ... ignored parameters
-#' @param data a data.frame
-#' @return a list to be coerced into a zelig.call object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-zelig2gamma <- function(formula, ..., data)
-  z(
-    glm,
-    # .hook = "robust.glm.hook",
-
-    formula = formula,
-    family  = Gamma(),
-    model   = F,
-    data    = data
-    )
-#' param method for the `gamma' Zelig model
-#'
-#' Return parameter estimates for the ``gamma'' GLM in Zelig.
-#' @usage \method{param}{gamma}(obj, num, ...)
-#' @S3method param gamma
-#' @param obj a `zelig' object
-#' @param num an integer specifying the number of simulations to sample
-#' @param ... ignored parameters
-#' @return a list to be cast as a `parameters' object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-param.gamma <- function(obj, num = 1000, ...) {
-  # Extract shape parameters, which will be used to simulate the ancillary
-  # parameters
-  shape <- gamma.shape(.object)
-
-  # Simulate ancillary parameters
-  alpha <- rnorm(n=num, mean=shape$alpha, sd=shape$SE)
-
-  #
-  list(
-       simulations  = mvrnorm(n=num, mu=coef(.object), Sigma=vcov(.object)),
-       alpha = alpha,
-       family = Gamma()
-       )
-}
-#' Compute quantities of interest for 'gamma' Zelig models
-#' @usage \method{qi}{gamma}(obj, x, x1=NULL, y=NULL, num=1000, param=NULL)
-#' @S3method qi gamma
-#' @param obj a \code{zelig} object
-#' @param x a 'setx' object or NULL
-#' @param x1 an optional 'setx' object
-#' @param y this parameter is reserved for simulating average treatment effects,
-#' though this feature is currentlysupported by only a handful of models
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a parameters object
-#' @return a list of key-value pairs specifying pairing titles of quantities of
-#' interest with their simulations
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-qi.gamma <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
-  # Get parameters
-  shape <- gamma.shape(.fitted)
-  alpha <- rnorm(num, mean = shape$alpha, sd = shape$SE)
-  coef <- coef(param)
-
-
-  # Compute eta
-  eta <- coef %*% t(x)
-
-  # Compute theta (apply inverse)
-  theta <- matrix(1/eta, nrow = nrow(coef))
-
-  ev <- theta
-  pr <- matrix(NA, nrow = nrow(theta), ncol = ncol(theta))
-
-  # Default to not available
-  ev1 <- pr1 <- fd <- NA
-
-  # Compute predicted values
-  for (i in 1:nrow(ev))
-    pr[i,] <- rgamma(
-                     ncol(ev),
-                     shape = alpha[i],
-                     scale = theta[i,]/alpha[i]
-                     )
-
-  # if x1 is not NULL, run more simultations
-  # ...
-
-  if (!is.null(x1)) {
-
-    eta1 <- coef %*% t(x1)
-    ev1 <- theta1 <- matrix(1/eta1, nrow = nrow(coef))
-    pr1 <- matrix(NA, nrow = nrow(theta1), ncol = ncol(theta1))
-
-    for (i in 1:nrow(ev1))
-      pr1[i, ] <- rgamma(ncol(ev1), shape = alpha[i], scale = theta1[i,]/alpha[i])
-
-    fd <- ev1 - ev
-  }
-
-  # Return
-  list("Expected Values: E(Y|X)"  = ev,
-       "Expected Values: E(Y|X1)" = ev1,
-       "Predicted Values: Y|X"    = pr,
-       "Predicted Values: Y|X1"   = pr1,
-       "First Differences: E(Y|X1) - E(Y|X)" = fd
-       )
-}
-#' Describe the \code{gamma} model to Zelig
-#' @usage \method{describe}{gamma}(...)
-#' @S3method describe default
-#' @param ... ignored parameters
-#' @return a list of important information
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-describe.gamma <- function(...) {
-  # parameters object
-  parameters <- list(lambda = list(
-                       equations = c(1, 1),
-                       tags.allowed = FALSE,
-                       dep.var = TRUE,
-                       exp.var = TRUE
-                       )
-                     )
-
-  # return list
-  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
-       year     = 2007,
-       category = "bounded",
-       parameters = parameters,
-       text = "Gamma Regression for Continuous, Positive Dependent Variables"
-       )
-}
diff --git a/R/gamma.gee.R b/R/gamma.gee.R
deleted file mode 100644
index a3ea284..0000000
--- a/R/gamma.gee.R
+++ /dev/null
@@ -1,161 +0,0 @@
-#' Interface between the Zelig Model gamma.gee and 
-#' the Pre-existing Model-fitting Method
-#' @param formula a formula
-#' @param id a character-string specifying the column of the data-set to use
-#'   for clustering
-#' @param robust a logical specifying whether to robustly or naively compute
-#'   the covariance matrix. This parameter is ignore in the \code{zelig2}
-#'   method, and instead used in the \code{robust.hook} function, which
-#'   executes after the call to the \code{gee} function
-#' @param ... ignored parameters
-#' @param R a square-matrix specifying the correlation
-#' @param corstr a character-string specifying the correlation structure
-#' @param data a data.frame 
-#' @return a list specifying the call to the external model
-#' @export
-zelig2gamma.gee <- function (formula, id, robust = FALSE, ..., R = NULL, corstr = "independence", data) {
-
-  loadDependencies("gee")
-
-  if (corstr == "fixed" && is.null(R))
-    stop("R must be defined")
-
-  # if id is a valid column-name in data, then we just need to extract the
-  # column and re-order the data.frame and cluster information
-  if (is.character(id) && length(id) == 1 && id %in% colnames(data)) {
-    id <- data[, id]
-    data <- data[order(id), ]
-    id <- sort(id)
-  }
-
-  z(
-    .function = gee,
-    .hook = robust.gee.hook,
-
-    formula = formula,
-    id = id,
-    corstr = corstr,
-    family  = Gamma,
-    data = data,
-    ...
-    )
-}
-
-#' @S3method param gamma.gee
-param.gamma.gee <- function(obj, num=1000, ...) {
-
-  # Extract means to compute maximum likelihood
-  mu <- coef(.fitted)
-
-  # Extract covariance matrix to compute maximum likelihood
-  Sigma <- .fitted$naive.variance
-
-
-  #
-  list(
-       coef = mvrnorm(num, mu, Sigma),
-       fam = Gamma()
-       )
-}
-
-#' @S3method qi gamma.gee
-qi.gamma.gee <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-
-  coef <- coef(param)
-  inverse <- linkinv(param)
-
-  eta1 <- coef %*% t(x)
-  ev1 <- theta1 <- matrix(inverse(eta1), nrow=num)
-
-  # default to NA
-  ev2 <- fd <- NA
-
-  if (!is.null(x1)) {
-    eta2 <- coef %*% t(x1)
-    ev2 <- theta1 <- matrix(inverse(eta2), nrow=num)
-
-    fd <- ev2 - ev1
-  }
-
-  list(
-       "Expected Values (for x): E(Y|X)"   = ev1,
-       "Expected Values (for x1): E(Y|X1)" = ev2,
-       "First Differences: E(Y|X1) - E(Y|X)" = fd
-       )
-}
-
-#' @S3method describe gamma.gee
-describe.gamma.gee <- function(...) {
-  list(
-       authors = "Patrick Lam",
-       text = "General Estimating Equation for Gamma Regression",
-       year = 2011
-       )
-}
-
-# Remove Negative Simulations from Gamma GEE Parameter Simulations
-# @param object a \code{zelig} object
-# @param x a \code{setx} object
-# @param x1 a \code{setx} object
-# @param bootstrap a logical specifying whether the model is using a boot function
-# @param bootfn the boot function
-# @param data a data.frame used to simulated parameters
-# @param param the original \code{param} object
-# @param num an integer specifying the number of simulations to produce
-clean.up.gamma.gee <- function(object, x, x1=NULL,
-                            bootstrap = FALSE, bootfn = NULL,
-                            data = NULL,
-                            param, num = 1000) {
-  coef <- coef(param)
-  eta <- coef %*% t(x)
-
-  if(!is.null(x1))
-    eta1 <- coef %*% t(x1)
-  else
-    eta1 <- NULL
-
-  # define good.parameters (???)
-  good.params <- function(par, x, x1=NULL) {
-    eta <- par %*% t(x)
-    if(!is.null(x1)) {
-      eta1 <- par %*% t(x1)
-      pos <- which(eta>0 & eta1>0)
-    }
-    else {
-      pos <- which(apply(eta > 0,1,all))
-    }
-
-    matrix(par[pos,], nrow=length(pos), ncol=ncol(par))
-  }
-
-
-
-      if(length(which(apply(eta<=0,1,any)))>0 | (!is.null(eta1) & any(eta1<=0))){
-              warning(paste("Negative expected values in simulations.  Rejection sampling method used."))
-              sum.neg <- length(which(apply(eta<=0,1,any)))
-              coef <- good.params(par=coef, x=x, x1=x1)
-              counter <- 1
-              while(sum.neg > 0){
-                      if(!bootstrap)
-                              new.coef <- matrix(mvrnorm(sum.neg, mu = coef(object), Sigma = vcov(object)), nrow=sum.neg)
-			#else
-			#	new.coef <- matrix(boot(data, bootfn, R = sum.neg, object = object)$t, nrow=sum.neg)
-				
-			new.coef <- good.params(par=new.coef, x=x, x1=x1)
-			coef <- rbind(coef, new.coef)	
-			sum.neg <- num - nrow(coef)
-			counter <- counter + 1
-			if(counter==200)
-				warning(paste("Suitable parameters not found after 200 iterations of rejection sampling.  Iterations will continue, but choosing another x is suggested for non-conditional prediction models."))
-			if(counter==2000)
-				stop("Rejection sampling stopped after 2000 iterations.  Please choose another x value.")
-		}
-	}
-
-  #
-  list(
-       coefficients=coef,
-       fam=Gamma(),
-       linkinv = Gamma()$linkinv
-       )
-}
diff --git a/R/gamma.survey.R b/R/gamma.survey.R
deleted file mode 100644
index e972497..0000000
--- a/R/gamma.survey.R
+++ /dev/null
@@ -1,176 +0,0 @@
-#' @export
-zelig2gamma.survey <- function(
-                               formula,
-                               weights=NULL, 
-                               ids=NULL,
-                               probs=NULL,
-                               strata = NULL,  
-                               fpc = NULL,
-                               nest = FALSE,
-                               check.strata = !nest,
-                               repweights = NULL,
-                               type,
-                               combined.weights = FALSE,
-                               rho = NULL,
-                               bootstrap.average = NULL, 
-                               scale = NULL,
-                               rscales = NULL,
-                               fpctype = "fraction",
-                               return.replicates=FALSE,
-                               na.action = "na.omit",
-                               start = NULL,
-                               etastart = NULL, 
-                               mustart = NULL,
-                               offset = NULL, 	      		
-                               model1 = TRUE,
-                               method = "glm.fit",
-                               x = FALSE,
-                               y = TRUE,
-                               contrasts = NULL,
-                               design = NULL,
-                               link = "inverse",
-                               data,
-                               ...
-                               ) {
-
-  loadDependencies("survey")
-
-  if (is.null(ids))
-    ids <- ~1
-
-  # the following lines designate the design
-  # NOTE: nothing truly special goes on here;
-  #       the below just makes sure the design is created correctly
-  #       for whether or not the replication weights are set
-  design <- if (is.null(repweights)) {
-    svydesign(
-              data=data,
-              ids=ids,
-              probs=probs,
-              strata=strata,
-              fpc=fpc,
-              nest=nest,
-              check.strata=check.strata,
-              weights=weights
-              )
-  }
-
-  else {
-    # Using the "z" function stores this implicitly in a namespace
-    .survey.prob.weights <- weights
-    
-    # 
-    svrepdesign(
-                data=data,
-                repweights=repweights, 	
-                type=type,
-                weights=weights,
-                combined.weights=combined.weights, 
-                rho=rho,
-                bootstrap.average=bootstrap.average,
-                scale=scale,
-                rscales=rscales,
-                fpctype=fpctype,
-                fpc=fpc
-                )
-  }
-
-  z(.function = svyglm,
-    formula = formula,
-    design  = design,
-    family  = Gamma()
-    )
-}
-
-#' @S3method param gamma.survey
-param.gamma.survey <- function(obj, num=1000, ...) {
-  shape <- gamma.shape(.fitted)
-
-  list(
-       # .fitted is the fitted model object
-       simulations = mvrnorm(num, coef(.fitted), vcov(.fitted)),
-       alpha = rnorm(num, shape$alpha, shape$SE),
-       fam   = Gamma()
-       )
-}
-
-#' @S3method qi gamma.survey
-qi.gamma.survey <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
-  model <- GetObject(obj)
-
-  coef <- coef(param)
-  alpha <- alpha(param)
-
-  eta <- coef %*% t(x)
-
-  link.inverse <- linkinv(param)
-
-  theta <- matrix(link.inverse(eta), nrow=nrow(coef))
-
-  pr <- ev <- matrix(NA, nrow=nrow(theta), ncol(theta))
-
-  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
-
-
-  ev <- theta
-
-  for (i in 1:nrow(ev)) {
-    pr[i,] <- rgamma(
-                     n     = length(ev[i,]),
-                     shape = alpha[i],
-                     scale = theta[i,]/alpha[i]
-                     )
-  }
-
-
-  # ensure these are no-show
-  pr1 <- ev1 <- fd <- NA
-
-  
-  # if x1 is available
-  if (!is.null(x1)) {
-    ev1 <- theta1 <- matrix(link.inverse(coef %*% t(x1)), nrow(coef))
-    fd <- ev1-ev
-  }
-
-
-  # ensure these are no-show
-  att.pr <- att.ev <- NA
-
-
-  # I have no clue if this even works
-  if (!is.null(y)) {
-
-    yvar <- matrix(
-                   rep(y, nrow(param)),
-                   nrow = nrow(param),
-                   byrow = TRUE
-                   )
-    
-    tmp.ev <- yvar - ev
-    tmp.pr <- yvar - pr
-
-    att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(param))
-    att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = nrow(param))
-  }
-
-
-  list(
-       "Expected Values: E(Y|X)" = ev,
-       "Expected Values for (X1): E(Y|X1)" = ev1,
-       "Predicted Values: Y|X" = pr,
-       "Predicted Values (for X1): Y|X1" = pr1,
-       "First Differences E(Y|X1)-E(Y|X)" = fd,
-       "Average Treatment Effect: Y-EV" = att.ev,
-       "Average Treatment Effect: Y-PR" = att.pr
-       )
-}
-
-#' @S3method describe gamma.survey
-describe.gamma.survey <- function(...) {
-  list(
-       authors = "Nicholas Carnes",
-       year = 2008,
-       description = "Survey-Weighted Gamma Regression for Continuous, Positive Dependent Variables"
-       )
-}
diff --git a/R/get.package.R b/R/get.package.R
deleted file mode 100644
index b2bc1fb..0000000
--- a/R/get.package.R
+++ /dev/null
@@ -1,94 +0,0 @@
-#' Find the Zelig package that a particular model belong to
-#'
-#' This method is used to help transition Zelig v3.5 users to Zelig v4
-#' @param model a character-string specifying a Zelig model
-#' @param quiet a logical indicating whether to display messages and warnings
-#' @param ... ignored parameters
-#' @return NA or a character-string specifying the name of the package which 
-#' contains a specific model
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-get.package <- function (model, quiet=TRUE, ...) {
-
-  # Bad variable-types return NULL
-  if (is.null(model))
-    return(NA)
-
-  else if (!is.character(model))
-    return(NA)
-
-  else if (length(model) != 1)
-    return (NA)
-
-  # Create list (auto-generated using another script.
-  # This is a copy-and-paster of that below
-  descr <- c(
-    gamma = "Zelig",
-    logit = "Zelig",
-    ls = "Zelig",
-    negbinom = "Zelig",
-    normal = "Zelig",
-    poisson = "Zelig",
-    probit = "Zelig",
-
-    gamma.gee = "Zelig",
-    logit.gee = "Zelig",
-    normal.gee = "Zelig",
-    poisson.gee = "Zelig",
-    probit.gee = "Zelig",
-
-    factor.bayes = "Zelig",
-    logit.bayes = "Zelig",
-    mlogit.bayes = "Zelig",
-    normal.bayes = "Zelig",
-    oprobit.bayes = "Zelig",
-    poisson.bayes = "Zelig",
-    probit.bayes = "Zelig",
-
-    aov = "Zelig",
-    sur = "Zelig",
-    twosls = "Zelig",
-    threesls = "Zelig",
-
-    blogit = "ZeligChoice",
-    bprobit = "ZeligChoice",
-    mlogit = "ZeligChoice",
-    mprobit = "ZeligChoice",
-    ologit = "ZeligChoice",
-    oprobit = "ZeligChoice",
-
-
-
-    logit.gam = "ZeligGAM",
-    normal.gam = "ZeligGAM",
-    poisson.gam = "ZeligGAM",
-    probit.gam = "ZeligGAM",
-
-    gamma.mixed = "ZeligMultilevel",
-    logit.mixed = "ZeligMultilevel",
-    ls.mixed = "ZeligMultilevel",
-    normal.mixed = "ZeligMultilevel",
-    poisson.mixed = "ZeligMultilevel",
-    probit.mixed = "ZeligMultilevel",
-
-    gamma.survey = "ZeligSurvey",
-    logit.survey = "ZeligSurvey",
-    normal.survey = "ZeligSurvey",
-    poisson.survey = "ZeligSurvey",
-    probit.survey = "ZeligSurvey",
-
-    cloglog.net = "ZeligNetwork",
-    gamma.net = "ZeligNetwork",
-    logit.net = "ZeligNetwork",
-    ls.net = "ZeligNetwork",
-    negbinom.net = "ZeligNetwork",
-    normal.net = "ZeligNetwork",
-    poisson.net = "ZeligNetwork",
-    probit.net = "ZeligNetwork"
-  )
-
-  if (model %in% names(descr))
-    descr[[model]]
-
-  else
-    NA
-}
diff --git a/R/getPredictorTerms.R b/R/getPredictorTerms.R
deleted file mode 100644
index f7e9723..0000000
--- a/R/getPredictorTerms.R
+++ /dev/null
@@ -1,45 +0,0 @@
-#' Get Predictor Terms from Zelig-style Formulae
-#'
-#' This function extracts the predictor terms from a Zelig-style object.
-#' @note This function is used exclusively in the development of Zelig-core.
-#' @param x a Zelig-style formula ('formula' or 'list')
-#' @param ... ignored parameters
-#' @return a character-vector or NA
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-getPredictorTerms <- function (x, ...) {
-  # The following functions are unsafe for general input, so they are being
-  # kept as nested functions.
-
-  # Extract "predictor" terms from a formula
-  # @param x a formula
-  # @param ... ignored parameters
-  # @return a character-vector specifying the 
-  # @author Matt Owen
-  extractFromFormula <- function (form, ...) {
-    TERMS <- terms(form)
-    attr(TERMS, "term.labels")
-  }
-
-  # Extract "predictor" terms from a list of formulae
-  # @param x a list
-  # @param ... ignored parameters
-  # @return a character-vector specifying the 
-  # @author Matt Owen
-  extractFromList <- function (x, ...) {
-    as.vector(unlist(Map(extractFromFormula, x)))
-  }
-
-  # Beginning of work for function
-  if (is.list(x))
-    unique(extractFromList(x))
-
-  else if ("formula" %in% class(x))
-    unique(extractFromFormula(x))
-
-  else {
-    warning("The model formula must either ",
-            "be a list of formula to work properly")
-    NA
-  }
-}
diff --git a/R/getResponseTerms.R b/R/getResponseTerms.R
deleted file mode 100644
index 0c83a8a..0000000
--- a/R/getResponseTerms.R
+++ /dev/null
@@ -1,10 +0,0 @@
-#' Get Response Terms from a Zelig-style Formula
-#'
-#' This method acquires the response variables from Zelig-style input.
-#' @param x a formula or list of formulae
-#' @param ... ignored parameters
-#' @return a character-vector specifying a the of response terms in this formula
-#' @export
-getResponseTerms <- function (x, ...) {
-  UseMethod("getResponseTerms")
-}
diff --git a/R/getResponseTerms.formula.R b/R/getResponseTerms.formula.R
deleted file mode 100644
index 20eeaa2..0000000
--- a/R/getResponseTerms.formula.R
+++ /dev/null
@@ -1,130 +0,0 @@
-#' Get Response Terms from a Standard Formula
-#'
-#' This method gets the response terms from a standard formula
-#' @usage
-#' \method{getResponseTerms}{formula}(x, ..., single.only=FALSE, duplicates=TRUE)
-#' @param x a formula
-#' @param ... ignored parameters
-#' @param single.only a logical specifying whether 'cbind' or 'list' keywords
-#' are allowed
-#' @param duplicates a logical specifying whether the returned character-vector
-#' will only return duplicates.
-#' @return a character-vector specifying the response terms of the formula
-#' @S3method getResponseTerms formula
-#' @author Matt Owen
-getResponseTerms.formula <- function (x, ..., single.only=FALSE, duplicates=TRUE)
-{
-  # Handle 
-  handle.formula.err <- function (e) {
-    message("\n\n")
-    message("The formula ", x, " seems to have no dependent variables")
-    stop("The formula for the ")
-  }
-
-  rhs <- tryCatch(x[[3]], error = handle.formula.err)
-  lhs <- tryCatch(x[[2]], error = handle.formula.err)
-
-  # Reponse terms are always specified in the lefthand-side of the equation
-  if (is.name(lhs)) {
-    # If the lhs is a name, this implies it's a single variable with no function
-    # applied to it. Thus, it's a term.
-    return(tryCatch(
-                    callToString(lhs),
-                    error = function (e) as.character(lhs)
-           ))
-  }
-
-  # Otherwise, it is either a function being applied or the keywords "cbind" or
-  # "list"
-  op <- callToString(lhs[[1]])
-
-  if (op %in% c("cbind", "list")) {
-
-    if (single.only) {
-      # If only single outcome response terms are allowed, then 'cbind' and
-      # 'list' cannot be used.
-      warning("'cbind' and 'list' may not be used ",
-              "in this formula specification.")
-      return(vector("character", 0))
-    }
-
-    # If it is one of the keywords, we extract these terms individually
-    lis <- as.list(lhs[-1])
-    lis <- unlist(Map(callToString, lis))
-
-    if (!duplicates)
-      # If duplicates flag is FALSE, remove all duplicate entries
-      lis <- unique(lis)
-
-    # Remove all emptry strings and return
-    Filter(nchar, lis)
-  }
-
-  else {
-    # Otherwise, we can treat them as one single term. That is the formula:
-    #   x + y ~ 1
-    # will have a single response term:
-    #   x + y
-    callToString(lhs)
-  }
-}
-
-
-
-#' Get Response Terms from a ``Formula'' Object
-#'
-#' This method gets the response terms from a ``Formula'' Object
-#' @rdname getResponseTerms.Formula-not-formula
-#' @aliases getResponse.Formula
-#' @usage
-#' \method{getResponseTerms}{Formula}(x, ..., single.only=FALSE, duplicates=TRUE)
-#' @param x a formula
-#' @param ... ignored parameters
-#' @param single.only a logical specifying whether 'cbind' or 'list' keywords
-#' are allowed
-#' @param duplicates a logical specifying whether the returned character-vector
-#' will only return duplicates.
-#' @return a character-vector specifying the response terms of the formula
-#' @S3method getResponseTerms Formula
-#' @author Matt Owen
-getResponseTerms.Formula <- function (x, ..., single.only=FALSE, duplicates=TRUE)
-{
-  # Create and empty list
-  list.formula <- list()
-
-  # This loop goes through all the list response and predictor terms and 
-  # creates a "Zelig-style" list based on it. This is so we can extract response
-  # and predictor terms with "getResponstTerms" and "getPredictorTerms" in a
-  # manageable way!
-  for (resp in attr(x, "lhs")) {
-    # Iterate through all response variables
-
-    for (pred in attr(x, "rhs")) {
-      # Iterate through all predictor variables
-
-      # Append response variable and predictor terms
-      # "ccc" is probably going to be convention for a call object in Zelig
-      # models since "CALL", "call", "Call" all seem too similar to "call".
-      # And we need to break
-      ccc <- call("~", resp, pred)
-
-      # Cast from a "call" object to a "formula" object
-      ccc <- as.formula(ccc)
-
-      # Append to list
-      list.formula <- append(list.formula, ccc)
-
-    }
-  }
-  
-  # Important to send 'single.only'/'duplicates' into this function
-  resp <- getResponseTerms(list.formula, ..., single.only, duplicates)
-
-  # Apply unique only if 'duplicates' is FALSE
-  # This ensures the list has the expected properties
-  if (duplicates)
-    resp
-
-  else
-    unique(resp)
-}
diff --git a/R/getResponseTerms.list.R b/R/getResponseTerms.list.R
deleted file mode 100644
index 8233f33..0000000
--- a/R/getResponseTerms.list.R
+++ /dev/null
@@ -1,29 +0,0 @@
-#' Get Response Terms from a List-style Formula
-#'
-#' This method gets the response terms from a standard formula
-#' @usage \method{getResponseTerms}{list}(x, ...)
-#' @param x a list of formulae
-#' @param ... ignored parameters
-#' @return a character-vector specifying the response terms of the formula
-#' @S3method getResponseTerms list
-#' @author Matt Owen
-getResponseTerms.list <- function (x, ...) {
-  if (! all(unlist(Map(is.formula, x)))) {
-    # If not all the elements are formulae, then we should strip them from 'x'
-    warning("All non-formula will be removed from this list.")
-
-    x <- Filter(is.formula, x)
-  }
-
-  if (length(x) == 0)
-    # Zero-sized lists will have no available response terms, and should thus
-    # return a zero-length character vector. Note this is intended to ensure
-    # the result of 'getResponseTerms' is always a character-string.
-    vector("character", 0)
-
-  else
-    # Get response terms of each element of 'x',
-    # then transform the list into a vector, which should always be flat, since
-    # getResponseTerms should always return a character-string
-    unique(unlist(Map(getResponseTerms, x, single.only=TRUE)))
-}
diff --git a/R/help.zelig.R b/R/help.zelig.R
deleted file mode 100644
index 577a550..0000000
--- a/R/help.zelig.R
+++ /dev/null
@@ -1,65 +0,0 @@
-#' Help system for Zelig models
-#' @param ... the help files to look-up
-#' @return results of calling the specific help function
-#' @export
-#' @author Matt Owen \emph{mowen@@iq.harvard.edu}
-help.zelig <- function (...)  {
-        driver  <- match.call()
-        driver  <- as.character(driver)
-        name <- NULL
-        if(length(driver) > 1){ 
-		name <- driver[2]
-	} else {
-                print(do.call("help",list(package="Zelig"), envir=parent.frame()))
-                return(invisible(NULL))
-	}
-        if (name == "models"){
-                print(do.call("vignette", list(package="Zelig")))
-                return(invisible(NULL))
-        }
-                  
-        filesPDf <- NULL
-        
-        helpfile <- try(system.file("Meta", "vignette.rds", package="Zelig"))
-        
-        if(helpfile!=""){
-                helpMtrx <- readRDS(helpfile)
-                ix <- grep("[pP][dD][fF]", colnames(helpMtrx))
-                if(length(ix)) filesPDF <- helpMtrx[,ix]
-                
-                if(length(filesPDF) && length(name))
-                  {
-                          fl  <- paste("^",name,".pdf$",sep="")
-                          ix  <- grep(fl,filesPDF)
-                          if(length(ix)){
-                                  file <- filesPDF[ix]
-                                  print(do.call("vignette", c(list(topic=name),list(package="Zelig"))))
-                                  return(invisible(list()))
-                          }
-                  }
-        }
-        helpfile  <- try(system.file("Meta", "hsearch.rds", package="Zelig"))
-        
-        fileshtml <- NULL
-        if(helpfile != "")
-          {
-                  helpMtrx  <- readRDS(helpfile)
-                  fileshtml <- helpMtrx[[2]][,"Aliases"]
-                  ix <- grep("url$", fileshtml)
-                  if(length(ix))
-                    fileshtml <- fileshtml[-ix]
-                  if(length(fileshtml) && length(name))
-                    {
-                            ix <- grep(name, fileshtml)
-                            
-                            if(length(ix)){
-                                    
-                                    print(do.call("help", c(list(as.name(name)), list(package="Zelig")), envir=parent.frame()))
-                                    return(invisible(NULL))
-                            }
-                            
-                                              }
-          }
-        ##message("Not valid input...Showing package description")
-        do.call("help", c(list("Zelig"), list(package="Zelig")), envir=parent.frame())
-}
diff --git a/R/ignore.R b/R/ignore.R
deleted file mode 100644
index a795b8c..0000000
--- a/R/ignore.R
+++ /dev/null
@@ -1,22 +0,0 @@
-#' Constructor for the 'ignore' class
-#' This class is included for future use, and is currently
-#' not used in any Zelig model. It is designed for use with
-#' zelig2* functions
-#' @param default default value
-#' @param type ignored parameter
-#' @return an 'ignore' object
-#' @export
-#' @author Matt Owen \emph{mowen@@iq.harvard.edu}
-ignore <- function (default = NULL, type = "no pass") {
-
-  self <- default
-  class(self) <- "ignore"
-
-  # store information, set class, and return
-  self <- list(
-               default = default,
-               type    = type
-               )
-  class(self) <- "ignore"
-  self
-}
diff --git a/R/is.formula.R b/R/is.formula.R
deleted file mode 100644
index 2b1c1b8..0000000
--- a/R/is.formula.R
+++ /dev/null
@@ -1,9 +0,0 @@
-#' Whether an Object is a Formula
-#' 
-#' This is a boolean-check to see whether an object is a formula.
-#' @note This will not be shared in the Zelig/ZeligFormulae namespace.
-#' @param x an object
-#' @return a logical specifying whether an object is a formula
-#' @author Matt Owen
-is.formula <- function (x)
-  "formula" %in% class(x)
diff --git a/R/list.depth.R b/R/list.depth.R
deleted file mode 100644
index a0ba415..0000000
--- a/R/list.depth.R
+++ /dev/null
@@ -1,35 +0,0 @@
-#' Count the Depth of a List Object
-#'
-#' This function recursively computes the depth of a list object. That is, it
-#' determines how many layers or levels exist within the object.
-#' @note This function is used internally by Zelig.
-#' @param obj a vector or list object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-list.depth <- function (obj) {
-
-  # Stop-recursing conditions
-
-  if (length(obj) == 0)
-    return(0)
-
-  else if (is.atomic(obj))
-    # Atomic vectors can only have one level
-    return(1)
-
-  else if (!is.list(obj))
-    # If the object is not a list, then we have the option whether to compute
-    # the depth of its elements.
-    return(1)
-
-  # Produce a list of integers, specifying the depth of each element
-  results <- Map(list.depth, obj)
-
-  # Ensure that the result is a non-list
-  results <- unlist(results)
-
-  # Find the maximum, ensuring that the value is neither negative nor -Inf
-  max.depth <- max(results, 0)
-
-  # Add one for the level that we are on
-  1 + max.depth
-}
diff --git a/R/logit.R b/R/logit.R
deleted file mode 100644
index a1d25ea..0000000
--- a/R/logit.R
+++ /dev/null
@@ -1,152 +0,0 @@
-#' Interface between logit model and Zelig
-#'
-#' This function is exclusively for use by the `zelig' function
-#' @param formula a formula
-#' @param weights a numeric vector
-#' @param robust a boolean (logical) specifying whether robust error estimates
-#' should be used
-#' @param ... ignored parameters
-#' @param data a data.frame
-#' @return a list to be coerced into a zelig.call object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-zelig2logit <- function(formula, weights=NULL, robust = F, ..., data) {
-  w <- weights
-  z(
-    glm,
-    formula = formula,
-    weights = w,
-    family  = binomial(link="logit"),
-    model   = F,
-    data    = data
-    )
-}
-
-#' Param Method for the \code{logit} Zelig Model
-#' @note This method is used by the \code{logit} Zelig model
-#' @usage \method{param}{logit}(obj, num, ...)
-#' @S3method param logit
-#' @param obj a 'zelig' object
-#' @param num an integer specifying the number of simulations to sample
-#' @param ... ignored parameters
-#' @return a list to be cast as a 'parameters' object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-param.logit <- function(obj, num, ...) {
-  list(
-       simulations = mvrnorm(n=num, mu=coef(.object), Sigma=vcov(.object)),
-       alpha       = NULL,
-       fam = binomial(link="logit")
-       )
-}
-
-#' Compute quantities of interest for 'logit' Zelig models
-#' @usage \method{qi}{logit}(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
-#' @S3method qi logit
-#' @param obj a 'zelig' object
-#' @param x a 'setx' object or NULL
-#' @param x1 an optional 'setx' object
-#' @param y this parameter is reserved for simulating average treatment effects,
-#' though this feature is currentlysupported by only a handful of models
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a parameters object
-#' @return a list of key-value pairs specifying pairing titles of quantities of
-#' interest with their simulations
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-qi.logit <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-
-  # Compute expected values
-  compute.ev <- function(obj, x=NULL, num=1000, param=NULL) {
-    if (is.null(x))
-      return(NA)
-
-    coef <- coef(param)
-    link.inverse <- linkinv(param)
-
-    eta <- coef %*% t(x)
-    eta <- Filter(function (y) !is.na(y), eta)
-
-    theta <- matrix(link.inverse(eta), nrow = nrow(coef))
-
-    ev <- matrix(link.inverse(eta), ncol=ncol(theta))
-
-    ev
-  }
-
-  # Simulate quantities of interest for "x"
-  ev1 <- compute.ev(obj, x, num, param)
-  pr1 <- matrix(nrow=nrow(ev1), ncol=ncol(ev1))
-
-  # Simulate the quantities of interest for "x1"
-  ev2 <- compute.ev(obj, x1, num, param)
-  pr2 <- fd <- NA
-
-  
-  # Produce 0 or 1 (FALSE/TRUE) results for "x"
-  for (i in 1:ncol(ev1))
-    pr1[,i] <- as.character(rbinom(length(ev1[,i]), 1, ev1[,i]))
-
-  # Produce 0 or 1 (FALSE/TRUE) results for "x1" and comppute first-differences
-  if (!is.null(x1)) {
-    pr2 <- matrix(nrow=nrow(ev2), ncol=ncol(ev2))
-
-    for (i in 1:ncol(ev2))
-      pr2[,i] <- as.character(rbinom(length(ev2[,i]), 1, ev2[,i]))
-
-    # This is the computation of the first difference...
-    fd <- ev2 - ev1
-  }
-
-  # Ensure that the correct levels are passed along.
-  levels(pr1) <- levels(pr2) <- c('0', '1')
-
-  # return
-  list("Expected Values: E(Y|X)"  = ev1,
-       "Expected Values: E(Y|X1)" = ev2,
-       "Predicted Values: Y|X"    = pr1,
-       "Predicted Values: Y|X1"   = pr2,
-       "First Differences: E(Y|X1) - E(Y|X)" = fd
-       )
-}
-
-.compute.ev <- function(obj, x=NULL, num=1000, param=NULL) {
-
-  if (is.null(x))
-    return(NA)
-
-  coef <- coef(param)
-  link.inverse <- linkinv(param)
-
-  eta <- coef %*% t(x)
-
-  theta <- matrix(link.inverse(eta), nrow = nrow(coef))
-
-  ev <- matrix(link.inverse(eta), ncol=ncol(theta))
-
-  ev
-}
-
-#' Describe a `logit' model to Zelig
-#' @usage \method{describe}{logit}(...)
-#' @S3method describe logit
-#' @param ... ignored parameters
-#' @return a list to be processed by `as.description'
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-describe.logit <- function(...) {
-  # parameters object
-  parameters <- list(pi = list(
-                       equations = c(1, 1),
-                       tags.allowed = FALSE,
-                       dep.var = TRUE,
-                       exp.var = TRUE
-                       )
-                     )
-
-  # return list
-  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
-       year     = 2008,
-       category = "dichotomous",
-       parameters = parameters,
-       text = "Logistic Regression for Dichotomous Dependent Variables"
-       )
-}
diff --git a/R/logit.bayes.R b/R/logit.bayes.R
deleted file mode 100644
index 387458f..0000000
--- a/R/logit.bayes.R
+++ /dev/null
@@ -1,89 +0,0 @@
-#' @export
-zelig2logit.bayes <- function (
-                               formula, 
-                               burnin = 1000, mcmc = 10000, 
-                               verbose=0, 
-                               ..., 
-                               data
-                               ) {
-
-  loadDependencies("MCMCpack", "coda")
-
-  if (missing(verbose))
-    verbose <- round((mcmc + burnin)/10)
-
-  list(
-       .function = "MCMClogit",
-       .hook = "MCMChook",
-
-       formula = formula,
-       data   = data,
-       burnin = burnin,
-       mcmc   = mcmc,
-       verbose= verbose,
-
-       # Most parameters can be simply passed forward
-       ...
-       )
-}
-
-
-#' @S3method param logit.bayes
-param.logit.bayes <- function(obj, num=1000, ...) {
-  list(
-       coef = coef(obj),
-       fam  = binomial(link="logit")
-       )
-}
-
-#' @S3method qi logit.bayes
-qi.logit.bayes <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-
-  # Use a Helper-Function that computes expected values and predicted values
-  # simultaneously.
-  res1 <- logit.ev(x, param)
-  res2 <- logit.ev(x1, param)
-
-  # Return quantities of interest
-  list(
-       "Expected Value: E(Y|X)" = res1$ev,
-       "Predicted Value: Y|X" = res1$pv,
-       "Expected Value (for X1): E(Y|X1)" = res2$ev,
-       "Predicted Value (for X1): Y|X1" = res2$pv,
-       "First Differences: E(Y|X1)-E(Y|X)" = res2$ev - res1$ev
-       )
-}
-
-logit.ev <- function (x, param) {
-  # If either of the parameters are invalid,
-  # Then return NA for both qi's
-  if (is.null(x) || is.na(x) || is.null(param))
-    return(list(ev=NA, pv=NA))
-
-  # Extract inverse-link and simulated parameters (respectively)
-  inv <- linkinv(param)
-  eta <- coef(param) %*% t(x)
-
-  # Give matrix identical rows/columns to the simulated parameters
-  ev <- pv <- matrix(NA, nrow(eta), ncol(eta))
-  dimnames(ev) <- dimnames(pv) <- dimnames(eta)
-
-  # Compute Expected Values
-  ev <- inv(eta)
-
-  # Compute Predicted Values
-  for (i in 1:ncol(ev)) 
-    pv[,i] <- as.character(rbinom(length(ev[,i]), 1, ev[,i])) 
-
-  # Return
-  list(ev=ev, pv=pv)
-}
-
-#' @S3method describe logit.bayes
-describe.logit.bayes <- function(...) {
-  list(
-       authors = c("Ben Goodrich", "Ying Lu"),
-       text = "Bayesian Logistic Regression for Dichotomous Dependent Variables",
-       year = 2013
-       )
-}
diff --git a/R/logit.gee.R b/R/logit.gee.R
deleted file mode 100644
index aea9640..0000000
--- a/R/logit.gee.R
+++ /dev/null
@@ -1,98 +0,0 @@
-#' General Estimating Equation for Logit Regression
-#' @param formula a formula
-#' @param id a character-string specifying the column of the data-set to use
-#' for clustering
-#' @param robust a logical specifying whether to robustly or naively compute
-#' the covariance matrix. This parameter is ignore in the \code{zelig2}
-#' method, and instead used in the \code{robust.hook} function, which
-#' executes after the call to the \code{gee} function
-#' @param ... ignored parameters
-#' @param R a square-matrix specifying the correlation
-#' @param corstr a character-string specifying the correlation structure
-#' @param data a data.frame 
-#' @return a list specifying the call to the external model
-#' @export zelig2logit.gee
-#' @name logit.gee
-#' @aliases zelig2logit.gee
-zelig2logit.gee <- function (formula, id, robust, ..., R = NULL, corstr = "independence", data) {
-
-  loadDependencies("gee")
-
-  if (corstr == "fixed" && is.null(R))
-    stop("R must be defined")
-
-  # if id is a valid column-name in data, then we just need to extract the
-  # column and re-order the data.frame and cluster information
-  if (is.character(id) && length(id) == 1 && id %in% colnames(data)) {
-    id <- data[, id]
-    data <- data[order(id), ]
-    id <- sort(id)
-  }
-
-  z(
-    .function = gee,
-    .hook = robust.gee.hook,
-
-    formula = formula,
-    id = id,
-    corstr = corstr,
-    family  = binomial(link="logit"),
-    data = data,
-    R = R,
-    ...
-    )
-}
-
-
-#' @S3method param logit.gee
-param.logit.gee <- function(obj, num=1000, ...) {
-  # Extract means to compute maximum likelihood
-  mu <- coef(obj)
-
-  # Extract covariance matrix to compute maximum likelihood
-  Sigma <- vcov(obj)
-
-  list(
-       coef = mvrnorm(num, mu, Sigma),
-       fam = binomial(link="logit")
-       )
-}
-
-
-#' @S3method qi logit.gee
-qi.logit.gee <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-
-  coef <- coef(param)
-  inverse <- linkinv(param)
-
-  eta1 <- coef %*% t(x)
-  ev1 <- theta1 <- matrix(inverse(eta1), nrow=num)
-
-  # default to NA
-  rr <- ev2 <- fd <- NA
-
-  if (!is.null(x1)) {
-    eta2 <- coef %*% t(x1)
-    ev2 <- theta1 <- matrix(inverse(eta2), nrow=num)
-
-    fd <- ev2 - ev1
-    rr <- ev2/ev1
-  }
-
-  list(
-       "Expected Values (for x): E(Y|X)"   = ev1,
-       "Expected Values (for x1): E(Y|X1)" = ev2,
-       "First Differences: E(Y|X1) - E(Y|X)" = fd,
-       "Risk Ratios: E(Y|X1)/E(Y|X)" = rr
-       )
-}
-
-
-#' @S3method describe logit.gee
-describe.logit.gee <- function(...) {
-  list(
-       authors = "Patrick Lam",
-       text = "General Estimating Equation for Logistic Regression",
-       year = 2011
-       )
-}
diff --git a/R/logit.survey.R b/R/logit.survey.R
deleted file mode 100644
index 88eafe0..0000000
--- a/R/logit.survey.R
+++ /dev/null
@@ -1,191 +0,0 @@
-#' @export
-zelig2logit.survey <- function(
-                               formula,
-                               weights=NULL, 
-                               ids=NULL,
-                               probs=NULL,
-                               strata = NULL,  
-                               fpc=NULL,
-                               nest = FALSE,
-                               check.strata = !nest,
-                               repweights = NULL,
-                               type,
-                               combined.weights=FALSE,
-                               rho = NULL,
-                               bootstrap.average=NULL, 
-                               scale=NULL,
-                               rscales=NULL,
-                               fpctype="fraction",
-                               return.replicates=FALSE,
-                               na.action="na.omit",
-                               start=NULL,
-                               etastart=NULL, 
-                               mustart=NULL,
-                               offset=NULL, 	      		
-                               model1=TRUE,
-                               method="glm.fit",
-                               x=FALSE,
-                               y=TRUE,
-                               contrasts=NULL,
-                               design=NULL,
-                               data
-                               ) {
-
-  loadDependencies("survey")
-
-  if (is.null(ids))
-    ids <- ~1
-
-  # the following lines designate the design
-  # NOTE: nothing truly special goes on here;
-  #       the below just makes sure the design is created correctly
-  #       for whether or not the replication weights are set
-  design <- if (is.null(repweights))
-    svydesign(
-              data=data,
-              ids=ids,
-              probs=probs,
-              strata=strata,
-              fpc=fpc,
-              nest=nest,
-              check.strata=check.strata,
-              weights=weights
-              )
-
-  else {
-    .survey.prob.weights <- weights
-    
-    svrepdesign(
-                data=data,
-                repweights=repweights, 	
-                type=type,
-                weights=weights,
-                combined.weights=combined.weights, 
-                rho=rho,
-                bootstrap.average=bootstrap.average,
-                scale=scale,
-                rscales=rscales,
-                fpctype=fpctype,
-                fpc=fpc
-                )
-  }
-
-  # we cannot plug in family=Gamma yet because of weird issues
-  # with glm. Uncomment the below lines for an explanation:
-
-  ## fails:
-  # test <- Gauss
-  # svyglm(formula=formula, design=design, family=test)
-
-  ## works:
-  # svyglm(formula=formula, design=design, family=Gauss)
-
-  # this is because of how glm is written (it evaluates the
-  # family variable as a function in the parent.frame)
-
-  z(.function = svyglm,
-    formula = formula,
-    design  = design,
-    family  = quasibinomial(link="logit")
-    )
-}
-
-#' @S3method param logit.survey
-param.logit.survey <- function(obj, num=1000, ...) {
-  list(
-       simulations = mvrnorm(num, coef(obj), vcov(obj)),
-       alpha = NULL,
-       fam   = binomial(link="logit")
-       )
-}
-
-#' @S3method qi logit.survey
-qi.logit.survey <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
-
-  model <- GetObject(obj)
-
-  coef <- coef(param)
-  alpha <- alpha(param)
-
-  eta <- coef %*% t(x)
-
-  link.inverse <- linkinv(param)
-
-  theta <- matrix(link.inverse(eta), nrow=nrow(coef))
-
-  pr <- ev <- matrix(NA, nrow=nrow(theta), ncol(theta))
-
-  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
-
-  ev <- theta
-
-  for (k in 1:ncol(theta)) {
-    pr[,k] <- rbinom(length(ev[,k]), 1, ev[,k])
-    pr[,k] <- as.character(pr[,k])
-  }
-
-  levels(pr) <- c("0", "1")
-  
-  if (!is.null(y) && NCOL(y))
-    y <- y[,1]
-
-
-  # invisiblify 
-  pr1 <- ev1 <- fd <- rr <- NA
-
-  
-  if (!is.null(x1)) {
-    ev1 <- theta1 <- matrix(link.inverse(coef %*% t(x1)),
-                            nrow = nrow(coef)
-                            )
-
-
-    pr1 <- matrix(NA, nrow=nrow(theta), ncol(theta))
-
-    for (k in 1:ncol(theta)) {
-      pr1[,k] <- rbinom(length(ev1[,k]), 1, ev1[,k])
-      pr1[,k] <- as.character(pr1[,k])
-    }
-
-    levels(pr1) <- c("0", "1")
-    
-    fd <- ev1-ev
-    rr <- ev1/ev
-  }
-
-
-  att.ev <- att.pr <- NA
-
-  if (!is.null(y)) {
-
-    yvar <- matrix(rep(y, nrow(coef)),
-                   nrow = nrow(coef)
-                   )
-
-    tmp.ev <- yvar - ev
-    tmp.pr <- yvar - as.integer(pr)
-
-    att.ev <- matrix(apply(tmp.ev, 1, mean), nrow=nrow(coef))
-    att.pr <- matrix(apply(tmp.pr, 1, mean), nrow=nrow(coef))
-  }
-
-  list(
-       "Expected Values: E(Y|X)" = ev,
-       "Expected Values (for X1): E(Y|X1)" = ev1,
-       "Predicted Values: Y|X" = pr,
-       "Predicted Values (for X1): Y|X1" = pr1,
-       "First Differences: E(Y|X1) - E(Y|X)" = fd,
-       "Risk Ratios: P(Y=1|X1)/P(Y=0|X)" = rr,
-       "Average Treatment Effect: Y - EV" = att.ev,
-       "Average Treatment Effect: Y - PR" = att.pr
-       )
-}
-
-#' @S3method describe logit.survey
-describe.logit.survey <- function(...) {
-  list(
-       authors = "Nicholas Carnes",
-       year = 2008,
-       description = "Survey-Weighted Logitistic Regression for Continuous, Positive Dependent Variables"
-       )
-}
diff --git a/R/lognorm.R b/R/lognorm.R
deleted file mode 100644
index e569e3d..0000000
--- a/R/lognorm.R
+++ /dev/null
@@ -1,107 +0,0 @@
-#' Interface between the Zelig Model lognorm and 
-#' the Pre-existing Model-fitting Method
-#' @param formula a formula
-#' @param ... additonal parameters
-#' @param data a data.frame 
-#' @return a list specifying '.function'
-#' @export
-zelig2lognorm <- function (formula, ..., robust = FALSE, cluster = NULL, data) {
-
-  loadDependencies("survival")
-
-  if (!(is.null(cluster) || robust))
-    stop("If cluster is specified, then `robust` must be TRUE")
-
-  # Add cluster term
-  if (robust || !is.null(cluster))
-    formula <- cluster.formula(formula, cluster)
-
-  # Return
-  list(
-       .function = "survreg",
-       formula = formula,
-       dist = "lognormal",
-       robust = robust,
-       data = data,
-       ...
-       )
-}
-
-#' @S3method param lognorm
-param.lognorm <- function(obj, num=1000, ...) {
-
-  # These are the fitted parameters
-  coef <- coef(obj)
-
-  # Append the log-scale
-  mu <- c(coef, log(obj$result$scale))
-
-  # These are their correlations
-  cov <- vcov(obj)
-
-  # Simulate the results
-  simulations <- mvrnorm(num, mu, cov)
-
-  # Return
-  list(
-       coef = as.matrix(simulations[, 1:length(coef)]),
-       alpha = as.matrix(simulations[, -(1:length(coef))]),
-       linkinv = survreg.distributions[["lognormal"]]$itrans
-       )
-}
-
-#' @S3method qi lognorm
-qi.lognorm <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-
-  linkinv <- linkinv(param)
-  alpha <- alpha(param)
-  beta <- coef(param)
-
-  # Compute expected values for "lognorm" regression
-  #
-  # This function is nested within qi.lognorm for code-clarity and because it
-  # will not be used by any other function
-  # @param coef
-  # @param alpha sim.scale
-  # @param x
-  # @return a matrix
-  compute.ev <- function (coef, alpha, x) {
-    if (is.null(x) || is.na(x))
-      # If there are missing explanatory variables, ignore them
-      return(NA)
-
-    # Compute eta
-    # This value must be *inverted* to be restored to the true "observed" value
-    eta <- coef %*% t(x)
-
-    # Apply inverse link function
-    theta <- as.matrix(apply(eta, 2, linkinv))
-
-    # Copied from qi.survreg in Zelig v3.5
-    ev <- exp(log(theta) + 0.5*(exp(alpha))^2)
-    dimnames(ev) <- dimnames(theta)
-
-    # Return
-    as.matrix(ev)
-  }
-
-  # Compute expected values for X and X1
-  ev1 <- compute.ev(beta, alpha, x)
-  ev2 <- compute.ev(beta, alpha, x1)
-
-
-  list(
-       "Expected Value: E(Y|X)" = ev1,
-       "Expected Value: E(Y|X1)" = ev2,
-       "First Differences: E(Y|X1) - E(Y|X)" = ev2 - ev1
-       )
-}
-
-#' @S3method describe lognorm
-describe.lognorm <- function(...) {
-  list(
-       authors = c("Matthew Owen", "Olivia Lau", "Kosuke Imai", "Gary King"),
-       text = "Log-Normal Regression for Duration Dependent Variables",
-       year = 2007
-       )
-}
diff --git a/R/ls.R b/R/ls.R
deleted file mode 100644
index e6536be..0000000
--- a/R/ls.R
+++ /dev/null
@@ -1,94 +0,0 @@
-#' Interface between ls model and Zelig
-#' This function is exclusively for use by the `zelig' function
-#' @param formula a formula
-#' @param weights a numeric vector
-#' @param ... ignored parameters
-#' @param data a data.frame
-#' @return a list to be coerced into a zelig.call object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-zelig2ls <- function(formula, ..., data, weights=NULL)
-  z(
-    lm,
-    formula = formula,
-    weights = weights,
-    model   = F,
-    data    = data
-    )
-#' Param Method for the 'ls' Zelig Model
-#' @note This method currently returns via a deprectated style
-#' @usage \method{param}{ls}(obj, num, \dots)
-#' @S3method param ls
-#' @param obj a 'zelig' object
-#' @param num an integer specifying the number of simulations to sample
-#' @param ... ignored parameters
-#' @return a list to be cast as a 'parameters' object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-param.ls <- function(obj, num, ...) {
-  mvrnorm(n=num, mu=coef(.object), Sigma=vcov(.object))
-}
-#' Compute quantities of interest for 'ls' Zelig models
-#' @usage \method{qi}{ls}(obj, x, x1=NULL, y=NULL, num=1000, param=NULL)
-#' @S3method qi ls
-#' @param obj a \code{zelig} object
-#' @param x a 'setx' object or NULL
-#' @param x1 an optional 'setx' object
-#' @param y this parameter is reserved for simulating average treatment effects,
-#'   though this feature is currentlysupported by only a handful of models
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a parameters object
-#' @return a list of key-value pairs specifying pairing titles of quantities of
-#'   interest with their simulations
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-qi.ls <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
-  # error-catching
-  if (missing(x))
-    stop("x cannot be missing while computing the `ls' model")
-
-  # Get coefficients of the linear model
-  coefs <- coef(param)
-
-  # compute expected value
-  ev <- coefs %*% t(x)
-
-  ev1 <- NA
-  fd <- NA
-  
-  if (!is.null(x1)) {
-    ev1 <- coefs %*% t(x1)
-    fd <- ev1 - ev
-  }
-
-  # return
-  list("Expected Values: E(Y|X)"  = ev,
-       "Expected Values: E(Y|X1)" = ev1,
-       "Predicted Values: Y|X"    = ev,
-       "Predicted Values: Y|X1"   = ev1,
-       "First Differences: E(Y|X1) - E(Y|X)" = fd
-       )
-}
-#' Describe a \code{ls} model to Zelig
-#' @note \code{ls} stands for "least squares fit"
-#' @usage \method{describe}{ls}(...)
-#' @S3method describe ls
-#' @param ... ignored parameters
-#' @return a list to be processed by \code{as.description}
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-describe.ls <- function(...){
-  parameters <-list(mu = list(
-                      equations = c(1,1),
-                      tags.allowed = FALSE,
-                      dep.vars = TRUE,
-                      exp.vars = TRUE
-                      )
-                    )
-  
-  # return
-  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
-       year     = 2007,
-       category = "continuous",
-       parameters = parameters,
-       text = "Least Squares Regression for Continuous Dependent Variables"
-       )
-}
diff --git a/R/make.parameters.R b/R/make.parameters.R
deleted file mode 100644
index 4e06884..0000000
--- a/R/make.parameters.R
+++ /dev/null
@@ -1,49 +0,0 @@
-#' ??? For use with cmvglm
-#' @param terms ???
-#' @param shape ???
-#' @param ancillary ???
-#' @param eqns ???
-#' @return ???
-#' @export
-#' @author Kosuke Imai and Olivia Lau
-make.parameters <- function(terms, shape = "vector", ancillary = TRUE,eqns=NULL) {
-  if (!shape %in% c("matrix", "vector"))
-    stop("not a valid 'shape' for parameters.  Choose from \"matrix\" or \"vector\".")
- #comment 
-  if(is.null(eqns))
-    eqns<-names(terms)
-  ints <- attr(terms, "intercept")[eqns]
-  labs <- attr(terms, "term.labels")[eqns]
-  const <- attr(terms, "constraints")
-  for (i in 1:length(eqns)) {
-    if (ints[[i]] == 1)
-      labs[[i]] <- c("(Intercept)", labs[[i]])
-  }
-  fixed<-eqns[eqns %in% attr(terms,"ancilEqns")]
-  syst<-eqns[eqns %in% attr(terms,"systEqns")]
-#  syst<-eqns
-  vars <- unique(unlist(labs))
-  pars <- matrix(NA, ncol = length(syst), nrow = length(vars))
-  colnames(pars) <- syst
-  rownames(pars) <- vars
-  for (i in syst) {
-    idx <- which(!is.na(match(vars, labs[[i]])))
-    pars[idx,i] <- paste(labs[[i]], i, sep = ":")
-  }
-  if (!is.logical(const)) {
-    const <- attr(terms, "constraints")[syst,,drop=FALSE]
-    for (i in 1:ncol(const)) {
-      cidx <- which(!is.na(const[,i]))
-      ridx <- match(const[cidx, i], rownames(pars))
-      pars[cbind(ridx, cidx)] <- colnames(const)[i]
-    }
-  }  
-  if (shape == "matrix")
-    out <- pars
-  if (shape == "vector") {
-    out <- unique(na.omit(c(t(pars))))
-    if (ancillary) 
-      out <- c(out, fixed)
-  }
-  out
-}
diff --git a/R/makeModelMatrix.R b/R/makeModelMatrix.R
deleted file mode 100644
index d6f54c7..0000000
--- a/R/makeModelMatrix.R
+++ /dev/null
@@ -1,39 +0,0 @@
-#' Make a Model Matrix from a Zelig-Style Formula
-#' 
-#' This is a helper function that creates a \code{model.matrix} like object
-#' of Zelig-style formulae.
-#' @param formula a Zelig-style formula
-#' @param data a \code{data.frame}
-#' @return a design (or model) matrix
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-makeModelMatrix <- function (formula, data) {
-
-  if (missing(data) || is.null(data))
-    return(NULL)
-
-  # This is kludge and should be generalized
-  if (inherits(formula, "Formula")) {
-  }
-
-  if (is.list(formula)) {
-    m <- NULL
-
-    for (form in formula) {
-      m <- cbind(m, model.matrix(form, data))
-    }
-
-    t(as.matrix(m[, unique(colnames(m))]))
-  }
-
-  else {
-    return(model.matrix(formula, data))
-  }
-}
-
-
-#
-#
-#
-makeModelMatrixFromFormula <- function (formula, data) {
-
-}
diff --git a/R/mi.R b/R/mi.R
deleted file mode 100644
index 4beffc4..0000000
--- a/R/mi.R
+++ /dev/null
@@ -1,25 +0,0 @@
-#' Bundle Data-sets for Multiple Imputation
-#' 
-#' This object prepares data-sets for processing with multiple imputation.
-#' @note This function is largely identical to simply creating a list object,
-#'   with the exception that any unnamed data-sets are automatically labeled
-#'   via the \code{substitute} function
-#' @param ... a set of \code{data.frame}'s
-#' @return an \code{almost.mi} object, which contains the important internals
-#'   of a valid, useful \code{mi} object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-mi <- function (...) {
-
-  # Get arguments as list
-  data.frames <- list(...)
-
-  # Ensure that everything is data.fram
-  for (k in length(data.frames):1) {
-    if (!is.data.frame(data.frames[[k]]))
-      data.frames[[k]] <- NULL
-  }
-
-  # Return
-  data.frames
-}
diff --git a/R/mlogit.bayes.R b/R/mlogit.bayes.R
deleted file mode 100644
index 404a6a5..0000000
--- a/R/mlogit.bayes.R
+++ /dev/null
@@ -1,103 +0,0 @@
-#' @export
-zelig2mlogit.bayes <- function (
-                               formula, 
-                               burnin = 1000, mcmc = 10000, 
-                               verbose=0, 
-                               ..., 
-                               data
-                               ) {
-
-  loadDependencies("MCMCpack", "coda")
-
-  list(
-       .function = "MCMCmnl",
-       .hook = "MCMChook",
-
-       formula = formula,
-       data   = data,
-       burnin = burnin,
-       mcmc   = mcmc,
-       verbose= verbose,
-
-       # Most parameters can be simply passed forward
-       ...
-       )
-}
-
-#' @S3method param mlogit.bayes
-param.mlogit.bayes <- function(obj, num=1000, ...) {
-  list(
-       coef = coef(obj),
-       linkinv = NULL
-       )
-}
-
-#' @S3method qi mlogit.bayes
-qi.mlogit.bayes <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-
-  res1 <- compute.mlogit.bayes(.fitted, x, y, num, param)
-  res2 <- compute.mlogit.bayes(.fitted, x1, y, num, param)
-
-  list(
-       "Expected Value: E(Y|X)" = res1$ev,
-       "Predicted Value: Y|X"   = res1$pv,
-       "Expected Value (for X1): E(Y|X1)" = res2$ev,
-       "Predicted Value (for X1): Y|X1"   = res2$pv,
-       "First Differences"   = res2$ev - res1$ev
-       )
-}
-
-compute.mlogit.bayes <- function (obj, x, y, num, param) {
-  # If either of the parameters are invalid,
-  # Then return NA for both qi's
-  if (is.null(x) || is.na(x) || is.null(param))
-    return(list(ev=NA, pv=NA))
-
-  # 
-  resp <- model.response(model.frame(obj))
-
-  level <- length(table(resp))
-  p <- dim(model.matrix(eval(obj),data=obj$data))[2]
-  coef <- coef(obj)
-  eta <- array(NA, c(nrow(coef),level, nrow(x$matrix)))
-
-
-
-  eta[, 1, ] <- matrix(0, nrow(coef), nrow(x$matrix))
-
-  for (j in 2:level) {
-    ind <- (1:p)*(level-1)-(level-j)
-    eta[,j,]<- coef[,ind]%*%t(x)
-  }
-
-  eta<-exp(eta)
-  ev <- array(NA, c(nrow(coef), level, nrow(x$matrix)))
-  pr <- matrix(NA, nrow(coef), nrow(x$matrix))
-  colnames(ev) <- rep(NA, level)
-
-  for (k in 1:nrow(x$matrix)) {
-    for (j in 1:level)
-      ev[,j,k] <- eta[,j,k]/rowSums(eta[,,k])
-  }
-
-  for (j in 1:level) {
-    colnames(ev)[j] <- paste("P(Y=", j, ")", sep="")
-  }
-
-  for (k in 1:nrow(x$matrix)) {             
-    probs <- as.matrix(ev[,,k])
-    temp <- apply(probs, 1, FUN=rmultinom, n=1, size=1)
-    temp <- as.matrix(t(temp)%*%(1:nrow(temp)))
-    pr <- apply(temp,2,as.character)
-  }
-  list(ev = ev, pv = pr)
-}
-
-#' @S3method describe mlogit.bayes
-describe.mlogit.bayes <- function(...) {
-  list(
-       authors = c("Ben Goodrich", "Ying Lu"),
-       text = "Bayesian Multinomial Logistic Regression for Dependent Variables with Unordered Categorical Values",
-       year = 2013
-       )
-}
diff --git a/R/model-ar.R b/R/model-ar.R
new file mode 100755
index 0000000..b873d36
--- /dev/null
+++ b/R/model-ar.R
@@ -0,0 +1,25 @@
+#' Time-Series Model with Autoregressive Disturbance
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-ar.html}
+#' @import methods
+#' @export Zelig-ar
+#' @exportClass Zelig-ar
+#'
+#' @include model-zelig.R
+#' @include model-timeseries.R
+  
+zar <- setRefClass("Zelig-ar",
+                       contains = "Zelig-timeseries")
+
+zar$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "ar"
+    .self$link <- "identity"
+    .self$fn <- quote(zeligArimaWrapper)
+    .self$description = "Time-Series Model with Autoregressive Disturbance"
+    .self$packageauthors <- "R Core Team"
+    .self$outcome <- "continuous"
+    .self$wrapper <- "timeseries"
+  }
+)
diff --git a/R/model-arima.R b/R/model-arima.R
new file mode 100755
index 0000000..622af30
--- /dev/null
+++ b/R/model-arima.R
@@ -0,0 +1,379 @@
+#' Autoregressive and Moving-Average Models with Integration for Time-Series Data
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-arima.html}
+#' @import methods
+#' @export Zelig-arima
+#' @exportClass Zelig-arima
+#'
+#' @include model-zelig.R
+#' @include model-timeseries.R
+
+zarima <- setRefClass("Zelig-arima",
+                        contains = "Zelig-timeseries")
+
+zarima$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "arima"
+    .self$link <- "identity"
+    #.self$family <- "gaussian"
+    .self$fn <- quote(zeligArimaWrapper)
+    #.self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    .self$description <- "Autoregressive Moving-Average Models for Time-Series Data"
+    # JSON
+    .self$outcome <- "continuous"
+    .self$wrapper <- "timeseries"
+  }
+)
+
+zarima$methods(
+  qi = function(simparam, mm, mm1=NULL){ 
+
+    myorder <- eval(.self$zelig.call$order)
+    mycoef <- coef(.self$zelig.out$z.out[[1]])
+    sd <- sqrt(.self$zelig.out$z.out[[1]]$sigma2)
+
+    ## Check mm and mm1.  Particularly for issues surrounding intercept.
+    rebuildMM <- function(simparam, x){
+      xnames <- colnames(x)
+      snames <- colnames(simparam)
+      ## parameter "intercept" can be spelt "(Intercept)"" in model matrix
+      if("(Intercept)" %in% xnames){     
+        flag <- xnames == "(Intercept)"
+        xnames[flag] <- "intercept"
+        colnames(x)[flag]<- "intercept" # this is equivalent to: colnames(x) <- xnames  
+      }
+      ## "intercept" can be included in model matrix when not an estimated parameter (for example in models with integration)
+      xnamesflag <- xnames %in% snames
+      x <- x[, xnamesflag, drop=FALSE]
+      return(x)
+    }
+
+    mm <- rebuildMM(simparam, mm)
+    if(!is.null(mm1)){
+      mm1 <- rebuildMM(simparam, mm1)
+    }
+
+
+    ## Make ACF 
+    acf <- simacf(coef=mycoef, order=myorder, params=simparam, alpha=0.05)
+    acf.length <- length(acf$expected.acf)
+    t1 <- 2*acf.length
+    t2 <- 2*acf.length
+
+
+    if(.self$bsetx1){             # could also check if mm1 is NULL
+      # zeligARMAbreakforecaster() calls zeligARMAlongrun() internally
+      #  return(y.shock = yseries, y.innovation = y.innov, ev.shock = evseries, ev.innovation = ev.innov)  
+      yseries <- zeligARMAbreakforecaster(y.init=NULL, x=mm, x1=mm1, simparam=simparam, order=myorder, sd=sd, t1=t1, t2=t2) 
+      # maybe check nrow(yseries)=t1 + t2 ?
+
+      pv <- yseries$y.innovation[t1,]                # could use either $innovation or $shock here
+      pv.shortrun <- yseries$y.innovation[t1+1,]     # could use either $innovation or $shock here
+      pv.longrun <- yseries$y.innovation[t1+t2,]     # must use $innovation here
+
+      # Remember, these are expectations using the same simparam in each expectation.
+      ev <- yseries$ev.innovation[t1,]
+      ev.shortrun <- yseries$ev.innovation[t1+1,]
+      ev.longrun <- yseries$ev.innovation[t1+t2,]
+
+      return(list(acf = acf, ev = ev, pv = pv, pv.shortrun=pv.shortrun, pv.longrun=pv.longrun, ev.shortrun=ev.shortrun, ev.longrun=ev.longrun, 
+                pvseries.shock=yseries$y.shock, pvseries.innovation=yseries$y.innovation,
+                evseries.shock=yseries$ev.shock, evseries.innovation=yseries$ev.innovation))
+
+    }else{
+      # just call zeligARMAlongrun()
+      yseries <- zeligARMAlongrun(y.init=NULL, x=mm, simparam=simparam, order=myorder, sd=sd) 
+      pv <- yseries$y[1,]   # zeligARMAlongrun returns the series in reverse order to zeligARMAbreakforecaster
+      # Remember, these are expectations using the same simparam in each expectation:
+      ev <- yseries$ev[1,]
+      return(list(acf = acf, ev = ev, pv = pv))
+    }
+  }
+)
+
+zarima$methods(
+  mcfun = function(x, b0=0, b1=1, ..., sim=TRUE){
+    mu <- exp(b0 + b1 * x)
+    if(sim){
+      y <- rnorm(n=length(x), mean=mu)
+      return(y)
+    }else{
+      return(mu)
+    }
+  }
+)
+
+#' Estimation wrapper function for arima models, to easily fit with Zelig architecture
+#' @keywords internal
+
+zeligArimaWrapper <- function(formula, order=c(1,0,0), ... , include.mean=TRUE, data){
+    
+    # Using with():
+    # myArimaCall <- quote( arima(x=, order =, xreg= ) )
+    # output <- with(data, myArimaCall )
+
+
+    # Using arima() directly:
+    mf <- model.frame(formula, data)
+
+    acf3 <- as.character(formula[[3]])
+    
+    yflag <- names(mf) %in% all.vars(formula[-3]) 
+    xflag <- names(mf) %in% all.vars(formula[-2]) 
+    
+    myx <- as.matrix(mf[,yflag, drop=FALSE])  # could use get_all_vars()
+    myxreg <- as.matrix(mf[,xflag, drop=FALSE])
+    
+    if (("1" %in% acf3 ) & ("-" %in% acf3 )){
+        include.mean <- FALSE
+    }
+    
+    output <- stats::arima(x=myx, order=order, xreg=myxreg, include.mean=include.mean, ...)
+
+}
+
+
+#' Construct Autocorrelation Function from Zelig object and simulated parameters
+#' @keywords internal
+
+simacf <- function(coef, order, params, alpha = 0.5){
+
+  #order <- eval(.self$zelig.call$order)
+  myar <- myma <- myar.seq <- myma.seq <- NULL
+
+  if(order[1]>0){
+    arnames <- paste("ar", 1:order[1], sep="")
+    myar <- coef[arnames]
+    myar.seq <- params[, arnames, drop=FALSE]
+  }
+
+  if(order[3]>0){
+    manames <- paste("ma", 1:order[3], sep="")
+    myma <- coef[manames]
+    myma.seq <- params[, manames, drop=FALSE]
+  }
+
+  mylag.max<-10  # Need to set automatically.  
+
+  n.sims<-nrow(params)
+  expected.acf <- ARMAacf(ar=myar, ma=myma, lag.max=mylag.max)
+  acf.history<-matrix(NA, nrow=n.sims, ncol=length(expected.acf))      # length(expected.acf) = mylag.max +1 
+  for(i in 1:n.sims){
+    acf.history[i,] <- ARMAacf(ar=myar.seq[i,], ma=myma.seq[i,], lag.max=mylag.max)
+  }
+
+
+  # Define functions to compute confidence intervals for each column in a matrix
+  ci.matrix <- function(x, alpha) {
+    pos.hi <- max(round((1-(alpha/2))*nrow(x)), 1)
+    pos.low <-max(round((alpha/2)*nrow(x)), 1)
+
+    ci.lower <- ci.upper <- rep(NA, ncol(x))
+    for(i in 1:ncol(x)){
+        temp<-sort(x[,i])
+        ci.lower[i]<-temp[pos.low]
+        ci.upper[i]<-temp[pos.hi]
+    }
+    return(list(ci.lower=ci.lower, ci.upper=ci.upper))
+  }
+  ci.acf <- ci.matrix(x=acf.history, alpha=0.05)
+
+  return(list(expected.acf=expected.acf, ci.acf=ci.acf, sims.acf=acf.history))
+}
+
+
+#' Construct Simulated Next Step in Dynamic Series
+#' @keywords internal
+
+zeligARMAnextstep <- function(yseries=NULL, xseries, wseries=NULL, beta, ar=NULL, i=NULL, ma=NULL, sd){
+  
+  ## Check inputs
+  # t is obs across time
+  # s is sims
+  # k is covariates
+  # order is (p,q,r)
+  # assume yseries (t x sims), xseries (t x k), wseries (t x s), beta (s x k), ar (s x p), ma (s x r) are matrix
+  # assume sd is scalar
+
+  ## Could construct these by using known order more deliberatively
+
+  if(is.vector(yseries)){
+    #print("warning: yseries is vector")
+    yseries <- matrix(yseries, nrow=1)        # Assume if y is a vector, that we are only running one simulation chain of y, so y is (t x 1)
+  }
+  if(is.vector(xseries)){
+    #print("warning: xseries is vector")
+    xseries <- matrix(xseries, nrow=1)        # Assume if x is a vector, that there are no lagged terms, so x is (1 x k)
+  }
+  if(is.vector(wseries)){
+    #print("warning: wseries is vector")
+    wseries <- matrix(wseries, nrow=1)        # Assume if w is a vector, that we are only running one simulation chain of y, so w is (t x 1)
+  }
+  if(is.vector(beta)){
+    #print("warning: beta is vector")
+    beta <- matrix(beta, ncol=1)
+  }
+  if(is.vector(ar)){
+    #print("warning: ar is vector")
+    ar <- matrix(ar, ncol=1)
+  }
+  if(is.vector(ma)){
+    #print("warning: ma is vector")
+    ma <- matrix(ma, ncol=1)
+  }
+
+  ar.term <- function(yseries, ar, n){
+    yshort <- yseries[1:ncol(ar), , drop=FALSE]           # because we only need the diagonal of a square matrix, we can avoid full matrix multiplication
+    return( rowSums( ar * t(yshort) ) )       # diag[(s x p) . (p x s)] = diag[(s x s)] = (s x 1)  
+  }
+  xt.term <- function(xseries, beta){
+    return( as.vector(beta %*% t(xseries)) )  # (s x k) . t(1 x k) = (s x 1)
+  }
+  ma.term <- function(wseries, ma){    
+    wshort <- wseries[1:ncol(ma), , drop=FALSE]
+    return( rowSums( ma * t(wshort)) )        # diag[(s x r) . (r x s)] = diag[(s x s)] = (s x 1)
+  }
+
+  n.sims <- ncol(yseries)   
+  w <- rnorm(n=n.sims, mean=0, sd=sd)
+  y <- xt.term(xseries,beta) + w              # conformable if xt is vector and w vector
+  if(!is.null(ar)){
+    y <- y + ar.term(yseries,ar)              # conformable if y vector and ar vector 
+  }
+  if(!is.null(ma)){
+    y <- y + ma.term(wseries,ma)              # conformable if y vector and ma vector 
+  }
+
+  exp.y <- y - w                              # one interpretation of an EV QI:  E(y| l(w), l(y))
+  return(list(y=y, w=w, exp.y=exp.y))
+}
+
+
+#' Calculate the Long Run Exquilibrium for Fixed X
+#' @keywords internal
+
+zeligARMAlongrun <- function(y.init=NULL, x, simparam, order, sd, tol=NULL, burnin=20){
+  if(is.null(tol)){
+    tol<-0.01
+  }
+  ar <- i <- ma <- NULL
+
+  ## Ensure parameter simulations in same order as model matrix
+  xnames <- colnames(x)
+  beta <- simparam[,xnames]
+
+  ## Extract AR and MA terms
+  if(order[1]>0){
+    arnames <- paste("ar", 1:order[1], sep="")
+    ar <- simparam[,arnames]
+  }
+  if(order[3]>0){
+    manames <- paste("ma", 1:order[3], sep="")
+    ma <- simparam[,manames]
+  }
+  timepast <- max(order[1],order[3])
+  
+  n.sims <- nrow(simparam)
+
+  if(is.vector(x)){
+    x<-matrix(x,nrow=1, ncol=length(x))
+  }
+
+  if(is.null(y.init)){
+    betabar <- t(apply(beta,2, mean))
+    y.init <- x %*% t(beta)
+  }
+
+  yseries <- matrix(y.init, nrow=timepast, ncol=n.sims, byrow=TRUE)
+  wseries <- matrix(rnorm(n=timepast*n.sims), nrow=timepast, ncol=n.sims)
+  evseries <- matrix(NA, nrow=timepast, ncol=n.sims)
+
+  finished <- FALSE
+  count <- 0
+  while(!finished){
+    y <- zeligARMAnextstep(yseries=yseries[1:timepast, ], xseries=x, wseries=wseries[1:timepast, ], beta=beta, ar=ar, i=i, ma=ma, sd=sd)
+    yseries <- rbind(y$y, yseries)
+    wseries <- rbind(y$w, wseries)
+    evseries<- rbind(y$exp.y, evseries)
+
+    #diff <- mean(abs(y.1 - y.0))  # Eventually need to determine some automated stopping rule
+    count <- count+1
+    finished <- count>burnin #| (diff < tol)
+  }
+
+  return(list(y.longrun=yseries, w.longrun=wseries, ev.longrun=evseries))
+}
+
+
+#' Construct Simulated Series with Internal Discontinuity in X
+#' @keywords internal
+
+zeligARMAbreakforecaster <- function(y.init=NULL, x, x1, simparam, order, sd, t1=5, t2=10){
+
+  longrun.out <- zeligARMAlongrun(y.init=y.init, x=x, simparam=simparam, order=order, sd=sd)   
+  yseries  <- longrun.out$y.longrun
+  wseries  <- longrun.out$w.longrun
+  evseries <- longrun.out$ev.longrun
+
+  ## Ensure parameter simulations in same order as model matrix
+  xnames <- colnames(x)
+  beta <- simparam[,xnames]
+
+  ## Extract AR and MA terms
+  ar <- i <- ma <- NULL
+  if(order[1]>0){                                      
+    arnames <- paste("ar", 1:order[1], sep="")
+    ar <- simparam[,arnames]
+  }
+  if(order[3]>0){
+    manames <- paste("ma", 1:order[3], sep="")
+    ma <- simparam[,manames]
+  }
+  timepast <- max(order[1],order[3]) # How many steps backward are needed in the series  --  could we be more precise?
+
+  # Take a step at covariates x
+  for(i in 2:t1){
+    nextstep <- zeligARMAnextstep(yseries=yseries[1:timepast, ], xseries=x, wseries=wseries[1:timepast, ], beta=beta, ar=ar, i=i, ma=ma, sd=sd)
+    yseries  <- rbind(nextstep$y, yseries)   # Could just change arguments so nextstep(nextstep) doesn't need to copy elsewhere.
+    wseries  <- rbind(nextstep$w, wseries)
+    evseries <- rbind(nextstep$exp.y, evseries)
+  }
+
+  # Introduce shock
+    nextstep <- zeligARMAnextstep(yseries=yseries[1:timepast, ], xseries=x1, wseries=wseries[1:timepast, ], beta=beta, ar=ar, i=i, ma=ma, sd=sd)
+    yseries  <- rbind(nextstep$y, yseries)   # Could just change arguments so nextstep(nextstep) doesn't need to copy elsewhere.
+    wseries  <- rbind(nextstep$w, wseries)
+    evseries <- rbind(nextstep$exp.y, evseries)
+
+    y.innov  <- yseries
+    w.innov  <- wseries  # Note: sequence of stocastic terms are going to depart now
+    ev.innov <- evseries
+
+  for(i in 2:t2){
+    # Take further steps at covariates x1 (an introduction of an innovation)
+    nextstep <- zeligARMAnextstep(yseries=y.innov[1:timepast, ], xseries=x1, wseries=w.innov[1:timepast, ], beta=beta, ar=ar, i=i, ma=ma, sd=sd)
+    y.innov  <- rbind(nextstep$y, y.innov)  # Could just change arguments so nextstep(nextstep) doesn't need to copy elsewhere.
+    w.innov  <- rbind(nextstep$w, w.innov)
+    ev.innov <- rbind(nextstep$exp.y, ev.innov)
+
+    # And take steps returning to old covariates (an introduction of a shock)
+    nextstep <- zeligARMAnextstep(yseries=yseries[1:timepast, ], xseries=x, wseries=wseries[1:timepast, ], beta=beta, ar=ar, i=i, ma=ma, sd=sd)
+    yseries  <- rbind(nextstep$y, yseries)   # Could just change arguments so nextstep(nextstep) doesn't need to copy elsewhere.
+    wseries  <- rbind(nextstep$w, wseries)
+    evseries <- rbind(nextstep$exp.y, evseries)
+
+  }
+
+  yseries <- yseries[1:(t1 + t2), ]  # Truncate series to last periods, removing burn-in to equilibrium
+  y.innov <- y.innov[1:(t1 + t2), ]
+  evseries <- evseries[1:(t1 + t2), ]
+  ev.innov <- ev.innov[1:(t1 + t2), ]
+
+  yseries <- yseries[nrow(yseries):1,]  # Change y to conventional row ordering by time before returning
+  y.innov <- y.innov[nrow(y.innov):1,]
+  evseries <- evseries[nrow(evseries):1, ]
+  ev.innov <- ev.innov[nrow(ev.innov):1, ]
+
+  return(list(y.shock = yseries, y.innovation = y.innov, ev.shock = evseries, ev.innovation = ev.innov))  
+}
diff --git a/R/model-bayes.R b/R/model-bayes.R
new file mode 100644
index 0000000..9d5322f
--- /dev/null
+++ b/R/model-bayes.R
@@ -0,0 +1,139 @@
+#' Bayes Model object for inheritance across models in Zelig
+#'
+#' @import methods
+#' @export Zelig-bayes
+#' @exportClass Zelig-bayes
+#'
+#' @include model-zelig.R
+zbayes <- setRefClass("Zelig-bayes",
+                      contains = "Zelig")
+
+zbayes$methods(
+  initialize = function() {
+    callSuper()
+    .self$packageauthors <- "Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park"
+    .self$modelauthors <- "Ben Goodrich, and Ying Lu"
+  }
+)
+
+zbayes$methods(
+  zelig = function(formula, 
+                   burnin = 1000, mcmc = 10000, 
+                   verbose = 0, 
+                   ..., 
+                   data,
+                   by = NULL,
+                   bootstrap = FALSE) {
+    if(!identical(bootstrap,FALSE)){
+      stop("Error: The bootstrap is not available for Markov chain Monte Carlo (MCMC) models.")
+    }
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    .self$model.call <- .self$zelig.call
+    if (missing(verbose))
+      verbose <- round((mcmc + burnin) / 10)
+#     .self$model.call$family <- call(.self$family, .self$link)
+    .self$model.call$verbose <- verbose
+    .self$num <- mcmc # CC: check
+    callSuper(formula = formula, data = data, ..., by = by, bootstrap = FALSE)
+  }
+)
+
+zbayes$methods(
+  param = function(z.out) {
+    return(z.out)
+  }
+)
+
+zbayes$methods(
+  getcoef = function() {
+    "Get estimated model coefficients"
+    return(.self$zelig.out$z.out[[1]])
+  } 
+)
+
+zbayes$methods(
+  geweke.diag = function() {
+    diag <- lapply(.self$zelig.out$z.out, coda::geweke.diag)
+    # Collapse if only one list element for prettier printing
+    if(length(diag)==1){
+        diag<-diag[[1]]
+    }
+
+
+    if(!citation("coda") %in% .self$refs){
+      .self$refs<-c(.self$refs,citation("coda"))
+    }
+    ref1<-bibentry(
+            bibtype="InCollection",
+            title = "Evaluating the accuracy of sampling-based approaches to calculating posterior moments.",
+            booktitle = "Bayesian Statistics 4",
+            author = person("John", "Geweke"),
+            year = 1992,
+            publisher = "Clarendon Press",
+            address = "Oxford, UK",
+            editor = c(person("JM", "Bernado"), person("JO", "Berger"), person("AP", "Dawid"), person("AFM", "Smith")) 
+            )
+    .self$refs<-c(.self$refs,ref1)
+    return(diag)
+  } 
+)
+
+zbayes$methods(
+  heidel.diag = function() {
+    diag <- lapply(.self$zelig.out$z.out, coda::heidel.diag)
+    # Collapse if only one list element for prettier printing
+    if(length(diag)==1){
+        diag<-diag[[1]]
+    }
+
+
+    if(!citation("coda") %in% .self$refs){
+      .self$refs<-c(.self$refs,citation("coda"))
+    }
+    ref1<-bibentry(
+            bibtype="Article",
+            title = "Simulation run length control in the presence of an initial transient.",
+            author = c(person("P", "Heidelberger"), person("PD", "Welch")),
+            journal = "Operations Research",
+            volume = 31,
+            year = 1983,
+            pages = "1109--44")
+    .self$refs<-c(.self$refs,ref1)
+    return(diag)
+  } 
+)
+
+zbayes$methods(
+  raftery.diag = function() {
+    diag <- lapply(.self$zelig.out$z.out, coda::raftery.diag)
+    # Collapse if only one list element for prettier printing
+    if(length(diag)==1){
+        diag<-diag[[1]]
+    }
+
+
+    if(!citation("coda") %in% .self$refs){
+      .self$refs<-c(.self$refs,citation("coda"))
+    }
+    ref1<-bibentry(
+            bibtype="Article",
+            title = "One long run with diagnostics: Implementation strategies for Markov chain Monte Carlo.",
+            author = c(person("Adrian E", "Raftery"), person("Steven M", "Lewis")),
+            journal = "Statistical Science",
+            volume = 31,
+            year = 1992,
+            pages = "1109--44")
+    ref2<-bibentry(
+            bibtype="InCollection",
+            title = "The number of iterations, convergence diagnostics and generic Metropolis algorithms.",
+            booktitle = "Practical Markov Chain Monte Carlo",
+            author = c(person("Adrian E", "Raftery"), person("Steven M", "Lewis")),
+            year = 1995,
+            publisher = "Chapman and Hall",
+            address = "London, UK",
+            editor = c(person("WR", "Gilks"), person("DJ", "Spiegelhalter"), person("S", "Richardson")) 
+            )
+    .self$refs<-c(.self$refs,ref1,ref2)
+    return(diag)
+  } 
+)
diff --git a/R/model-binchoice-gee.R b/R/model-binchoice-gee.R
new file mode 100644
index 0000000..3f413f6
--- /dev/null
+++ b/R/model-binchoice-gee.R
@@ -0,0 +1,32 @@
+#' Object for Binary Choice outcomes in Generalized Estimating Equations 
+#' for inheritance across models in Zelig
+#'
+#' @import methods
+#' @export Zelig-binchoice-gee
+#' @exportClass Zelig-binchoice-gee
+#'
+#' @include model-zelig.R
+#' @include model-binchoice.R
+#' @include model-gee.R
+zbinchoicegee <- setRefClass("Zelig-binchoice-gee",
+                           contains = c("Zelig-gee",
+                                        "Zelig-binchoice"))
+
+zbinchoicegee$methods(
+  initialize = function() {
+    callSuper()
+    .self$family <- "binomial"
+    .self$year <- 2011
+    .self$category <- "continuous"
+    .self$authors <- "Patrick Lam"
+    .self$fn <- quote(geepack::geeglm)
+    # JSON from parent
+  }
+)
+
+zbinchoicegee$methods(
+  param = function(z.out, method="mvn") {
+    simparam.local <- callSuper(z.out, method=method)
+    return(simparam.local$simparam) # no ancillary parameter
+  }
+)
diff --git a/R/model-binchoice-survey.R b/R/model-binchoice-survey.R
new file mode 100644
index 0000000..bd9b6f1
--- /dev/null
+++ b/R/model-binchoice-survey.R
@@ -0,0 +1,23 @@
+#' Object for Binary Choice outcomes with Survey Weights
+#' for inheritance across models in Zelig
+#'
+#' @import methods
+#' @export Zelig-binchoice-survey
+#' @exportClass Zelig-binchoice-survey
+#'
+#' @include model-zelig.R
+#' @include model-binchoice.R
+#' @include model-survey.R
+zbinchoicesurvey <- setRefClass("Zelig-binchoice-survey",
+                           contains = c("Zelig-survey",
+                                        "Zelig-binchoice"))
+
+zbinchoicesurvey$methods(
+  initialize = function() {
+    callSuper()
+    .self$family <- "binomial"
+    .self$category <- "continuous"
+    # JSON from parent
+  }
+)
+
diff --git a/R/model-binchoice.R b/R/model-binchoice.R
new file mode 100755
index 0000000..d3ef68e
--- /dev/null
+++ b/R/model-binchoice.R
@@ -0,0 +1,38 @@
+#' Binary Choice object for inheritance across models in Zelig
+#'
+#' @import methods
+#' @export Zelig-binchoice
+#' @exportClass Zelig-binchoice
+#'
+#' @include model-zelig.R
+#' @include model-glm.R
+zbinchoice <- setRefClass("Zelig-binchoice",
+                          contains = "Zelig-glm")
+  
+zbinchoice$methods(
+  initialize = function() {
+    callSuper()
+    .self$authors <- "Kosuke Imai, Gary King, Olivia Lau"
+    .self$year <- 2007
+    .self$category <- "dichotomous"
+    .self$family <- "binomial"
+    # JSON
+    .self$outcome <- "binary"
+  }
+)
+
+zbinchoice$methods(
+  qi = function(simparam, mm) {
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    coeff <- simparam
+    eta <- simparam %*% t(mm)
+    eta <- Filter(function (y) !is.na(y), eta)
+    theta <- matrix(.self$linkinv(eta), nrow = nrow(coeff))
+    ev <- matrix(.self$linkinv(eta), ncol = ncol(theta))
+    pv <- matrix(nrow = nrow(ev), ncol = ncol(ev))
+    for (j in 1:ncol(ev))
+      pv[, j] <- rbinom(length(ev[, j]), 1, prob = ev[, j])
+    levels(pv) <- c(0, 1)
+    return(list(ev = ev, pv = pv))
+  }
+)
diff --git a/R/model-exp.R b/R/model-exp.R
new file mode 100755
index 0000000..e7f2a3f
--- /dev/null
+++ b/R/model-exp.R
@@ -0,0 +1,77 @@
+#' Exponential Regression for Duration Dependent Variables
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-exp.html}
+#' @import methods
+#' @export Zelig-exp
+#' @exportClass Zelig-exp
+#' 
+#' @include model-zelig.R
+
+zexp <- setRefClass("Zelig-exp",
+                        contains = "Zelig",
+                        fields = list(simalpha = "list",
+                                      linkinv = "function"))
+
+zexp$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "exp"
+    .self$authors <- "Olivia Lau, Kosuke Imai, Gary King"
+    .self$packageauthors <- "Terry M. Therneau, and Thomas Lumley"
+    .self$year <- 2011
+    .self$description <- "Exponential Regression for Duration Dependent Variables"
+    .self$fn <- quote(survival::survreg)
+    .self$linkinv <- survreg.distributions[["exponential"]]$itrans
+    # JSON
+    .self$outcome <- "continous"
+    .self$wrapper <- "exp"
+    .self$acceptweights <- TRUE
+  }
+)
+
+zexp$methods(
+  zelig = function(formula, ..., robust = FALSE, cluster = NULL, data, weights = NULL, by = NULL, bootstrap = FALSE) {
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    .self$model.call <- .self$zelig.call
+    if (!(is.null(cluster) || robust))
+      stop("If cluster is specified, then `robust` must be TRUE")
+    # Add cluster term
+    if (robust || !is.null(cluster))
+      formula <- cluster.formula(formula, cluster)
+    .self$model.call$dist <- "exponential"
+    .self$model.call$model <- FALSE
+    callSuper(formula = formula, data = data, ..., robust = robust,
+              cluster = cluster,  weights = weights, by = by, bootstrap = bootstrap)
+    rse<-plyr::llply(.self$zelig.out$z.out, (function(x) vcovHC(x,type="HC0")))
+    .self$test.statistics<- list(robust.se = rse)
+  }
+)
+
+zexp$methods(
+  qi = function(simparam, mm) {
+    eta <- simparam %*% t(mm)
+    ev <- as.matrix(apply(eta, 2, linkinv))
+    pv <- as.matrix(rexp(length(ev), rate = 1 / ev))
+    return(list(ev = ev, pv = pv))
+  }
+)
+
+zexp$methods(
+  mcfun = function(x, b0=0, b1=1, alpha=1, sim=TRUE){
+    .self$mcformula <- as.formula("Surv(y.sim, event) ~ x.sim")
+    
+    lambda <-exp(b0 + b1 * x)
+    event <- rep(1, length(x))
+    y.sim <- rexp(n=length(x), rate=lambda)
+    y.hat <- 1/lambda
+    
+    if(sim){
+        data <- data.frame(y.sim=y.sim, event=event, x.sim=x)
+        return(data)
+    }else{
+        data <- data.frame(y.hat=y.hat, event=event, x.seq=x)
+        return(data)
+    }
+  }
+)
+
diff --git a/R/model-factor-bayes.R b/R/model-factor-bayes.R
new file mode 100644
index 0000000..e23c1af
--- /dev/null
+++ b/R/model-factor-bayes.R
@@ -0,0 +1,145 @@
+#' Bayesian Factor Analysis
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-factorbayes.html}
+#' @import methods
+#' @export Zelig-factor-bayes
+#' @exportClass Zelig-factor-bayes
+#' 
+#' @include model-zelig.R
+
+zfactorbayes <- setRefClass("Zelig-factor-bayes",
+                            contains = c("Zelig"))
+
+zfactorbayes$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "factor-bayes"
+    .self$year <- 2013
+    .self$authors <- "Ben Goodrich, Ying Lu"
+    .self$packageauthors <- "Andrew D. Martin, Kevin M. Quinn, and Jong Hee Park"
+    .self$description = "Bayesian Factor Analysis"
+    .self$fn <- quote(MCMCpack::MCMCfactanal)
+    # JSON from parent
+    .self$wrapper <- "factor.bayes"
+  }
+)
+
+zfactorbayes$methods(
+  zelig = function(formula, 
+                   factors = 2,
+                   burnin = 1000, mcmc = 20000, 
+                   verbose = 0, 
+                   ..., 
+                   data,
+                   by = NULL,
+                   bootstrap = FALSE) {
+    if(!identical(bootstrap,FALSE)){
+      stop("Error: The bootstrap is not available for Markov chain Monte Carlo (MCMC) models.")
+    }
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    .self$model.call <- .self$zelig.call
+    if (missing(verbose))
+      verbose <- round((mcmc + burnin) / 10)
+    if (factors < 2)
+      stop("Number of factors needs to be at least 2")
+    .self$model.call$verbose <- verbose
+    .self$model.call$x <- formula
+    .self$model.call$factors <- factors
+    callSuper(formula = formula, data = data,..., by = by, bootstrap = FALSE)
+  }
+)
+
+zfactorbayes$methods(
+  qi = function() {
+    return(NULL)
+  }
+)
+
+# The following diagnostics are also in Zelig-bayes, which unfortunately Zelig-factor-bayes does not currently inherit.
+zfactorbayes$methods(
+  geweke.diag = function() {
+    diag <- lapply(.self$zelig.out$z.out, coda::geweke.diag)
+    # Collapse if only one list element for prettier printing
+    if(length(diag)==1){
+        diag<-diag[[1]]
+    }
+
+
+    if(!citation("coda") %in% .self$refs){
+      .self$refs<-c(.self$refs,citation("coda"))
+    }
+    ref1<-bibentry(
+            bibtype="InCollection",
+            title = "Evaluating the accuracy of sampling-based approaches to calculating posterior moments.",
+            booktitle = "Bayesian Statistics 4",
+            author = person("John", "Geweke"),
+            year = 1992,
+            publisher = "Clarendon Press",
+            address = "Oxford, UK",
+            editor = c(person("JM", "Bernado"), person("JO", "Berger"), person("AP", "Dawid"), person("AFM", "Smith")) 
+            )
+    .self$refs<-c(.self$refs,ref1)
+    return(diag)
+  } 
+)
+
+zfactorbayes$methods(
+  heidel.diag = function() {
+    diag <- lapply(.self$zelig.out$z.out, coda::heidel.diag)
+    # Collapse if only one list element for prettier printing
+    if(length(diag)==1){
+        diag<-diag[[1]]
+    }
+
+
+    if(!citation("coda") %in% .self$refs){
+      .self$refs<-c(.self$refs,citation("coda"))
+    }
+    ref1<-bibentry(
+            bibtype="Article",
+            title = "Simulation run length control in the presence of an initial transient.",
+            author = c(person("P", "Heidelberger"), person("PD", "Welch")),
+            journal = "Operations Research",
+            volume = 31,
+            year = 1983,
+            pages = "1109--44")
+    .self$refs<-c(.self$refs,ref1)
+    return(diag)
+  } 
+)
+
+zfactorbayes$methods(
+  raftery.diag = function() {
+    diag <- lapply(.self$zelig.out$z.out, coda::raftery.diag)
+    # Collapse if only one list element for prettier printing
+    if(length(diag)==1){
+        diag<-diag[[1]]
+    }
+
+
+    if(!citation("coda") %in% .self$refs){
+      .self$refs<-c(.self$refs,citation("coda"))
+    }
+    ref1<-bibentry(
+            bibtype="Article",
+            title = "One long run with diagnostics: Implementation strategies for Markov chain Monte Carlo.",
+            author = c(person("Adrian E", "Raftery"), person("Steven M", "Lewis")),
+            journal = "Statistical Science",
+            volume = 31,
+            year = 1992,
+            pages = "1109--44")
+    ref2<-bibentry(
+            bibtype="InCollection",
+            title = "The number of iterations, convergence diagnostics and generic Metropolis algorithms.",
+            booktitle = "Practical Markov Chain Monte Carlo",
+            author = c(person("Adrian E", "Raftery"), person("Steven M", "Lewis")),
+            year = 1995,
+            publisher = "Chapman and Hall",
+            address = "London, UK",
+            editor = c(person("WR", "Gilks"), person("DJ", "Spiegelhalter"), person("S", "Richardson")) 
+            )
+    .self$refs<-c(.self$refs,ref1,ref2)
+    return(diag)
+  } 
+)
+
diff --git a/R/model-gamma-gee.R b/R/model-gamma-gee.R
new file mode 100755
index 0000000..c6b57b7
--- /dev/null
+++ b/R/model-gamma-gee.R
@@ -0,0 +1,30 @@
+#' Generalized Estimating Equation for Gamma Regression
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-gammagee.html}
+#' @import methods
+#' @export Zelig-gamma
+#' @exportClass Zelig-gamma
+#' 
+#' @include model-zelig.R
+#' @include model-gee.R
+#' @include model-gamma.R
+
+zgammagee <- setRefClass("Zelig-gamma-gee",
+                           contains = c("Zelig-gee", "Zelig-gamma"))
+
+zgammagee$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "gamma-gee"
+    .self$family <- "Gamma"
+    .self$link <- "inverse"
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    .self$year <- 2011
+    .self$category <- "continuous"
+    .self$authors <- "Patrick Lam"
+    .self$description = "General Estimating Equation for Gamma Regression"
+    .self$fn <- quote(geepack::geeglm)
+    # JSON from parent
+    .self$wrapper <- "gamma.gee"
+  }
+)
diff --git a/R/model-gamma-survey.R b/R/model-gamma-survey.R
new file mode 100755
index 0000000..93f45aa
--- /dev/null
+++ b/R/model-gamma-survey.R
@@ -0,0 +1,54 @@
+#' Gamma Regression with Survey Weights
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-gammasurvey.html}
+#' @import methods
+#' @export Zelig-gamma
+#' @exportClass Zelig-gamma
+#' 
+#' @include model-zelig.R
+#' @include model-survey.R
+#' @include model-gamma.R
+
+zgammasurvey <- setRefClass("Zelig-gamma-survey",
+                           contains = c("Zelig-survey", "Zelig-gamma"))
+
+zgammasurvey$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "gamma-survey"
+    .self$family <- "Gamma"
+    .self$link <- "inverse"
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    .self$category <- "continuous"
+    .self$description = "Gamma Regression with Survey Weights"
+    # JSON from parent
+    .self$wrapper <- "gamma.survey"
+  }
+)
+
+zgammasurvey$methods(
+  param = function(z.out, method="mvn") {
+    shape <- MASS::gamma.shape(z.out)
+    if(identical(method,"mvn")){
+      simalpha <- rnorm(n = .self$num, mean = shape$alpha, sd = shape$SE)
+      simparam.local <- mvrnorm(n = .self$num, mu = coef(z.out),
+                                   Sigma = vcov(z.out))
+      simparam.local <- list(simparam = simparam.local, simalpha = simalpha)
+      return(simparam.local)
+    } else if(identical(method,"point")){
+      return(list(simparam = t(as.matrix(coef(z.out))), simalpha = shape$alpha))
+    }
+  }
+)
+
+zgammasurvey$methods(
+  mcfun = function(x, b0=0, b1=1, alpha=1, sim=TRUE){
+    lambda <- 1/(b0 + b1 * x)
+    if(sim){
+        y <- rgamma(n=length(x), shape=lambda, scale = alpha)
+        return(y)
+    }else{
+        return(alpha * lambda)
+    }
+  }
+)
\ No newline at end of file
diff --git a/R/model-gamma.R b/R/model-gamma.R
new file mode 100755
index 0000000..ecc0305
--- /dev/null
+++ b/R/model-gamma.R
@@ -0,0 +1,70 @@
+#' Gamma Regression for Continuous, Positive Dependent Variables
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-gamma.html}
+#' @import methods
+#' @export Zelig-gamma
+#' @exportClass Zelig-gamma
+#' 
+#' @include model-zelig.R
+#' @include model-glm.R
+
+zgamma <- setRefClass("Zelig-gamma",
+                      contains = "Zelig-glm")
+
+zgamma$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "gamma"
+    .self$family <- "Gamma"
+    .self$link <- "inverse"
+    .self$authors <- "Kosuke Imai, Gary King, Olivia Lau"
+    .self$year <- 2007
+    .self$category <- "bounded"
+    .self$description <- "Gamma Regression for Continuous, Positive Dependent Variables"
+    # JSON
+    .self$outcome <- "continous"
+    .self$wrapper <- "gamma"
+  }
+)
+
+zgamma$methods(
+  param = function(z.out, method="mvn") {
+    shape <- MASS::gamma.shape(z.out)
+    if(identical(method, "mvn")){
+      simalpha <- rnorm(n = .self$num, mean = shape$alpha, sd = shape$SE)
+      simparam.local <- mvrnorm(n = .self$num, mu = coef(z.out),
+                                   Sigma = vcov(z.out))
+      simparam.local <- list(simparam = simparam.local, simalpha = simalpha)
+      return(simparam.local)
+    } else if(identical(method,"point")){
+      return(list(simparam = t(as.matrix(coef(z.out))), simalpha = shape$alpha ))
+    }
+  }
+)
+
+zgamma$methods(
+  qi = function(simparam, mm) {
+    coeff <- simparam$simparam
+    eta <- coeff %*% t(mm)
+    theta <- matrix(1 / eta, nrow = nrow(coeff))
+    ev <- theta
+    pv <- matrix(NA, nrow = nrow(theta), ncol = ncol(theta))
+    for (ii in 1:nrow(ev))
+      pv[ii, ] <- rgamma(ncol(ev), shape = simparam$simalpha[ii], 
+                         scale = theta[ii] / simparam$simalpha[ii])
+    return(list(ev = ev, pv = pv))
+  }
+)
+
+zgamma$methods(
+  mcfun = function(x, b0=0, b1=1, alpha=1, sim=TRUE){
+    lambda <- 1/(b0 + b1 * x)
+    if(sim){
+        y <- rgamma(n=length(x), shape=lambda, scale = alpha)
+        return(y)
+    }else{
+        return(alpha * lambda)
+    }
+  }
+)
+
diff --git a/R/model-gee.R b/R/model-gee.R
new file mode 100755
index 0000000..7d2ac0b
--- /dev/null
+++ b/R/model-gee.R
@@ -0,0 +1,74 @@
+#' Generalized Estimating Equations Model object for inheritance across models in Zelig
+#'
+#' @import methods
+#' @export Zelig-gee
+#' @exportClass Zelig-gee
+#'
+#' @include model-zelig.R
+
+zgee <- setRefClass("Zelig-gee",
+                    contains = "Zelig")
+
+zgee$methods(
+  initialize = function() {
+    callSuper()
+    .self$packageauthors <- "Soren Hojsgaard, Ulrich Halekoh, and Jun Yan"
+    .self$modelauthors <- "Patrick Lam"
+    .self$acceptweights <- TRUE
+  }
+)
+
+
+zgee$methods(
+  zelig = function(formula, id, ..., zcor = NULL, corstr = "independence", data, weights = NULL, by = NULL, bootstrap = FALSE) {
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    .self$model.call <- .self$zelig.call
+    if (corstr == "fixed" && is.null(zcor))
+      stop("R must be defined")
+    # if id is a valid column-name in data, then we just need to extract the
+    # column and re-order the data.frame and cluster information
+    if (is.character(id) && length(id) == 1 && id %in% colnames(data)) {
+      id <- data[, id]
+      data <- data[order(id), ]
+      id <- sort(id)
+    }
+    .self$model.call$family <- call(.self$family, .self$link)
+    .self$model.call$id <- id
+    .self$model.call$zcor <- zcor
+    .self$model.call$corstr <- corstr
+    callSuper(formula = formula, data = data, ..., weights = weights, by = by, bootstrap = bootstrap)
+    # Prettify summary display without modifying .self$model.call
+    for (i in length(.self$zelig.out$z.out)) {
+      .self$zelig.out$z.out[[i]]$call$id <- .self$zelig.call$id
+      .self$zelig.out$z.out[[i]]$call$zcor <- "zcor"
+    }
+  }
+)
+   
+zgee$methods(
+  param = function(z.out, method="mvn") {
+    so <- summary(z.out)
+    shape <- so$dispersion
+    if(identical(method,"point")){
+      return( list(simparam = t(as.matrix(coef(z.out))), simalpha = shape[1][1] ))
+    }else if(identical(method,"mvn")){
+      simalpha <- rnorm(n = .self$num,
+                      mean = shape[1][[1]],
+                      sd = shape[2][[1]])
+      simparam.local <- mvrnorm(n = .self$num,
+                        mu = coef(z.out),
+                        Sigma = so$cov.unscaled)
+      simparam.local <- list(simparam = simparam.local, simalpha = simalpha)
+      return(simparam.local)
+    }
+  }
+)
+
+# zgee$methods(
+#   show = function() {
+#     for (i in length(.self$zelig.out$z.out)) {
+#       .self$zelig.out$z.out[[i]]$call$id <- "id"
+#     }
+#     callSuper()
+#   }
+# )
diff --git a/R/model-glm.R b/R/model-glm.R
new file mode 100755
index 0000000..01b3786
--- /dev/null
+++ b/R/model-glm.R
@@ -0,0 +1,33 @@
+#' Generalized Linear Model object for inheritance across models in Zelig
+#'
+#' @import methods
+#' @export Zelig-glm
+#' @exportClass Zelig-glm
+#'
+#' @include model-zelig.R
+
+zglm <- setRefClass("Zelig-glm",
+                    contains = "Zelig",
+                    fields = list(family = "character",
+                                  link = "character",
+                                  linkinv = "function"))
+  
+zglm$methods(
+  initialize = function() {
+    callSuper()
+    .self$fn <- quote(stats::glm)
+    .self$packageauthors <- "R Core Team"
+    .self$acceptweights <- FALSE # "Why glm refers to the number of trials as weight is a trick question to the developers' conscience."
+  }
+)
+
+zglm$methods(
+  zelig = function(formula, data, ..., weights = NULL, by = NULL, bootstrap = FALSE) {
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    .self$model.call <- .self$zelig.call
+    .self$model.call$family <- call(.self$family, .self$link)
+    callSuper(formula = formula, data = data, ..., weights = weights, by = by, bootstrap = bootstrap)
+    rse <- plyr::llply(.self$zelig.out$z.out, (function(x) vcovHC(x, type = "HC0")))
+    .self$test.statistics <- list(robust.se = rse)
+  }
+)
diff --git a/R/model-logit-bayes.R b/R/model-logit-bayes.R
new file mode 100644
index 0000000..6ca9041
--- /dev/null
+++ b/R/model-logit-bayes.R
@@ -0,0 +1,43 @@
+#' Bayesian Logit Regression
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-logitbayes.html}
+#' @import methods
+#' @export Zelig-logit-bayes
+#' @exportClass Zelig-logit-bayes
+#' 
+#' @include model-zelig.R
+#' @include model-bayes.R
+#' @include model-logit.R
+
+zlogitbayes <- setRefClass("Zelig-logit-bayes",
+                             contains = c("Zelig-bayes",
+                                          "Zelig-logit"))
+
+zlogitbayes$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "logit-bayes"
+    .self$family <- "binomial"
+    .self$link <- "logit"
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    .self$year <- 2013
+    .self$category <- "dichotomous"
+    .self$authors <- "Ben Goodrich, Ying Lu"
+    .self$description = "Bayesian Logistic Regression for Dichotomous Dependent Variables"
+    .self$fn <- quote(MCMCpack::MCMClogit)
+    # JSON from parent
+    .self$wrapper <- "logit.bayes"
+  }
+)
+
+zlogitbayes$methods(
+  mcfun = function(x, b0=0, b1=1, ..., sim=TRUE){
+    mu <- 1/(1 + exp(-b0 - b1 * x))
+    if(sim){
+        y <- rbinom(n=length(x), size=1, prob=mu)
+        return(y)
+    }else{
+        return(mu)
+    }
+  }
+)
diff --git a/R/model-logit-gee.R b/R/model-logit-gee.R
new file mode 100755
index 0000000..776d618
--- /dev/null
+++ b/R/model-logit-gee.R
@@ -0,0 +1,22 @@
+#' Generalized Estimating Equation for Logit Regression
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-logitgee.html}
+#' @import methods
+#' @export Zelig-logit-gee
+#' @exportClass Zelig-logit-gee
+#' 
+#' @include model-zelig.R
+#' @include model-binchoice-gee.R
+
+zlogitgee <- setRefClass("Zelig-logit-gee",
+                           contains = c("Zelig-binchoice-gee"))
+
+zlogitgee$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "logit-gee"
+    .self$link <- "logit"
+    .self$description <- "General Estimating Equation for Logistic Regression"
+    .self$wrapper <- "logit.gee"
+  }
+)
\ No newline at end of file
diff --git a/R/model-logit-survey.R b/R/model-logit-survey.R
new file mode 100755
index 0000000..b46c6f9
--- /dev/null
+++ b/R/model-logit-survey.R
@@ -0,0 +1,35 @@
+#' Logit Regression with Survey Weights
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-logitsurvey.html}
+#' @import methods
+#' @export Zelig-logit-survey
+#' @exportClass Zelig-logit-survey
+#' 
+#' @include model-zelig.R
+#' @include model-binchoice-survey.R
+
+zlogitsurvey <- setRefClass("Zelig-logit-survey",
+                           contains = c("Zelig-binchoice-survey"))
+
+zlogitsurvey$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "logit-survey"
+    .self$link <- "logit"
+    .self$description <- "Logistic Regression with Survey Weights"
+    .self$wrapper <- "logit.survey"
+  }
+)
+
+
+zlogitsurvey$methods(
+  mcfun = function(x, b0=0, b1=1, ..., sim=TRUE){
+    mu <- 1/(1 + exp(-b0 - b1 * x))
+    if(sim){
+        y <- rbinom(n=length(x), size=1, prob=mu)
+        return(y)
+    }else{
+        return(mu)
+    }
+  }
+)
\ No newline at end of file
diff --git a/R/model-logit.R b/R/model-logit.R
new file mode 100755
index 0000000..734aa9c
--- /dev/null
+++ b/R/model-logit.R
@@ -0,0 +1,41 @@
+#' Logistic Regression for Dichotomous Dependent Variables
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-logit.html}
+#' @import methods
+#' @export Zelig-logit
+#' @exportClass Zelig-logit
+#' 
+#' @include model-zelig.R
+#' @include model-gee.R
+#' @include model-gamma.R
+#' @include model-zelig.R
+#' @include model-glm.R
+#' @include model-binchoice.R
+
+zlogit <- setRefClass("Zelig-logit",
+                      contains = "Zelig-binchoice")
+  
+zlogit$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "logit"
+    .self$link <- "logit"
+    .self$description = "Logistic Regression for Dichotomous Dependent Variables"
+    .self$packageauthors <- "R Core Team"
+    .self$wrapper <- "logit"
+  }
+)
+
+
+zlogit$methods(
+  mcfun = function(x, b0=0, b1=1, ..., sim=TRUE){
+    mu <- 1/(1 + exp(-b0 - b1 * x))
+    if(sim){
+        y <- rbinom(n=length(x), size=1, prob=mu)
+        return(y)
+    }else{
+        return(mu)
+    }
+  }
+)
+
diff --git a/R/model-lognorm.R b/R/model-lognorm.R
new file mode 100755
index 0000000..82032d2
--- /dev/null
+++ b/R/model-lognorm.R
@@ -0,0 +1,110 @@
+#' Log-Normal Regression for Duration Dependent Variables
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-lognorm.html}
+#' @import methods
+#' @export Zelig-lognorm
+#' @exportClass Zelig-lognorm
+#' 
+#' @include model-zelig.R
+
+zlognorm <- setRefClass("Zelig-lognorm",
+                        contains ="Zelig",
+                        fields = list(linkinv = "function"))
+
+zlognorm$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "lognorm"
+    .self$authors <- "Matthew Owen, Olivia Lau, Kosuke Imai, Gary King"
+    .self$packageauthors <- "Terry M Therneau, and Thomas Lumley"
+    .self$year <- 2007
+    .self$description <- "Log-Normal Regression for Duration Dependent Variables"
+    .self$fn <- quote(survival::survreg)
+    .self$linkinv <- survreg.distributions[["lognormal"]]$itrans
+    # JSON
+    .self$outcome <- "discrete"
+    .self$wrapper <- "lognorm"
+    .self$acceptweights <- TRUE
+  }
+)
+
+zlognorm$methods(
+  zelig = function(formula, ..., robust = FALSE, cluster = NULL, data, weights = NULL, by = NULL, bootstrap = FALSE) {
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    .self$model.call <- .self$zelig.call
+    if (!(is.null(cluster) || robust))
+      stop("If cluster is specified, then `robust` must be TRUE")
+    # Add cluster term
+    if (robust || !is.null(cluster))
+      formula <- cluster.formula(formula, cluster)
+    .self$model.call$dist <- "lognormal"
+    .self$model.call$model <- FALSE
+    callSuper(formula = formula, data = data, ..., robust = robust,
+              cluster = cluster, weights = weights, by = by, bootstrap = bootstrap)
+              
+    if(!robust){
+      fn2 <- function(fc, data) {
+        fc$data <- data
+        return(fc)
+      }
+      robust.model.call <- .self$model.call
+      robust.model.call$robust <- TRUE
+      
+      robust.zelig.out <- .self$data %>%
+      group_by_(.self$by) %>%
+      do(z.out = eval(fn2(robust.model.call, quote(as.data.frame(.))))$var )
+      
+      .self$test.statistics<- list(robust.se = robust.zelig.out$z.out)
+    }
+  }
+)
+
+zlognorm$methods(
+  param = function(z.out, method="mvn") {
+    if(identical(method,"mvn")){
+      coeff <- coef(z.out)
+      mu <- c(coeff, log(z.out$scale))
+      cov <- vcov(z.out)
+      simulations <- mvrnorm(.self$num, mu = mu, Sigma = cov)
+      simparam.local <- as.matrix(simulations[, 1:length(coeff)])
+      simalpha <- as.matrix(simulations[, -(1:length(coeff))])
+      simparam.local <- list(simparam = simparam.local, simalpha = simalpha)
+      return(simparam.local)
+    } else if(identical(method,"point")){
+      return(list(simparam = t(as.matrix(coef(z.out))), simalpha = log(z.out$scale) ))
+    }
+  }
+)
+
+zlognorm$methods(
+  qi = function(simparam, mm) {
+    alpha <- simparam$simalpha
+    beta <- simparam$simparam
+    coeff <- simparam$simparam
+    eta <- coeff %*% t(mm)
+    theta <- as.matrix(apply(eta, 2, linkinv))
+    ev <- exp(log(theta) + 0.5 * (exp(alpha))^2)
+    pv <- as.matrix(rlnorm(n=length(ev), meanlog=log(theta), sdlog=exp(alpha)), nrow=length(ev), ncol=1)
+    dimnames(ev) <- dimnames(theta)
+    return(list(ev = ev, pv = pv))
+  }
+)
+
+zlognorm$methods(
+  mcfun = function(x, b0=0, b1=1, alpha=1, sim=TRUE){
+    .self$mcformula <- as.formula("Surv(y.sim, event) ~ x.sim")
+    
+    mu <- b0 + b1 * x
+    event <- rep(1, length(x))
+    y.sim <- rlnorm(n=length(x), meanlog=mu, sdlog=alpha)
+    y.hat <- exp(mu + 0.5*alpha^2)
+    
+    if(sim){
+        data <- data.frame(y.sim=y.sim, event=event, x.sim=x)
+        return(data)
+    }else{
+        data <- data.frame(y.hat=y.hat, event=event, x.seq=x)
+        return(data)
+    }
+  }
+)
diff --git a/R/model-ls.R b/R/model-ls.R
new file mode 100755
index 0000000..daf8e6b
--- /dev/null
+++ b/R/model-ls.R
@@ -0,0 +1,169 @@
+#' Least Squares Regression for Continuous Dependent Variables
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-ls.html}
+#' @import methods
+#' @export Zelig-ls
+#' @exportClass Zelig-ls
+#'
+#' @include model-zelig.R
+
+zls <- setRefClass("Zelig-ls", contains = "Zelig")
+
+zls$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "ls"
+    .self$year <- 2007
+    .self$category <- "continuous"
+    .self$description <- "Least Squares Regression for Continuous Dependent Variables"
+    .self$packageauthors <- "R Core Team"
+    .self$fn <- quote(stats::lm)
+    # JSON
+    .self$outcome <- "continous"
+    .self$wrapper <- "ls"
+    .self$acceptweights <- TRUE
+  }
+)
+
+zls$methods(
+  zelig = function(formula, data, ..., weights = NULL, by = NULL, bootstrap = FALSE) {
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    .self$model.call <- .self$zelig.call
+    callSuper(formula = formula, data = data, ...,
+              weights = weights, by = by, bootstrap = bootstrap)
+    # Automated Background Test Statistics and Criteria
+    rse<-plyr::llply(.self$zelig.out$z.out, (function(x) vcovHC(x,type="HC0")))
+    rse.se <- sqrt(diag(rse[[1]]))                 # Needs to work with "by" argument
+    est.se <- sqrt(diag(.self$getvcov()[[1]]))
+    quickGim <- any( est.se > 1.5*rse.se | rse.se > 1.5*est.se )
+    .self$test.statistics<- list(robust.se = rse, gim.criteria = quickGim)
+  }
+)
+
+zls$methods(
+  param = function(z.out, method="mvn") {
+    if(identical(method,"mvn")){
+      return(list(simparam=mvrnorm(.self$num, coef(z.out), vcov(z.out)), simalpha=rep( summary(z.out)$sigma, .self$num) )  )
+    } else if(identical(method,"point")){
+      return(list(simparam=t(as.matrix(coef(z.out))), simalpha=summary(z.out)$sigma))
+    } else {
+      stop("param called with method argument of undefined type.")
+    }
+  }
+)
+
+zls$methods(
+  qi = function(simparam, mm) {
+    ev <- simparam$simparam %*% t(mm)
+    pv <- as.matrix(rnorm(n=length(ev), mean=ev, sd=simparam$simalpha), nrow=length(ev), ncol=1)
+    return(list(ev = ev, pv = pv))
+  }
+)
+
+zls$methods(
+  gim = function(B=50, B2=50) {
+    ll.normal.bsIM <- function(par,y,X,sigma){
+        beta <- par[1:length(X)]
+        sigma2 <- sigma
+        -1/2 * (sum(log(sigma2) + (y -(X%*%beta))^2/sigma2))
+    }
+    
+    getVb<-function(Dboot){
+      Dbar <- matrix(apply(Dboot,2,mean),nrow=B, ncol=length(Dhat), byrow=TRUE)
+      Diff <- Dboot - Dbar
+      Vb <- (t(Diff) %*% Diff) / (nrow(Dboot)-1)
+      return(Vb)
+    }
+    
+    getSigma<-function(lm.obj){
+      return(sum(lm.obj$residuals^2)/(nrow(model.matrix(lm.obj))-ncol(model.matrix(lm.obj))))
+    }
+    
+    D.est<-function(formula,data){
+      lm1 <- lm(formula,data, y=TRUE)
+      mm <- model.matrix(lm1)
+      y <- lm1$y
+      sigma <- getSigma(lm1)
+    
+      grad <- apply(cbind(y,mm),1,function(x) numericGradient(ll.normal.bsIM, lm1$coefficients, y=x[1], X=x[2:length(x)], sigma=sigma))
+      meat <- grad%*%t(grad)
+      bread <- -solve(vcov(lm1))
+      Dhat <- nrow(mm)^(-1/2)* as.vector(diag(meat + bread))
+      return(Dhat)
+    }
+
+    D.est.vb<-function(formula,data){
+        lm1 <- lm(formula,data, y=TRUE)
+        mm <- model.matrix(lm1)
+        y <- lm1$y
+        sigma <- getSigma(lm1)
+        
+        grad <- apply(cbind(y,mm),1,function(x) numericGradient(ll.normal.bsIM, lm1$coefficients, y=x[1], X=x[2:length(x)], sigma=sigma))
+        meat <- grad%*%t(grad)
+        bread <- -solve(vcov(lm1))
+        Dhat <- nrow(mm)^(-1/2)* as.vector(diag(meat + bread))
+
+        muB<-lm1$fitted.values
+        DB <- matrix(NA, nrow=B2, ncol=length(Dhat))
+            
+        for(j in 1:B2){
+          yB2 <- rnorm(nrow(data), muB, sqrt(sigma))
+          lm1B2 <- lm(yB2 ~ mm-1)
+          sigmaB2 <- getSigma(lm1B2)
+
+          grad <- apply(cbind(yB2,model.matrix(lm1B2)),1,function(x) numericGradient(ll.normal.bsIM, lm1B2$coefficients, y=x[1], X=x[2:length(x)], sigma=sigmaB2))
+          meat <- grad%*%t(grad)
+          bread <- -solve(vcov(lm1B2))
+          DB[j,] <- nrow(mm)^(-1/2)*diag((meat + bread))
+        }
+        Vb <- getVb(DB)
+        T<- t(Dhat)%*%solve(Vb)%*%Dhat
+
+        return(list(Dhat=Dhat,T=T))
+    }
+    
+    Dhat <- D.est(formula=.self$formula, data=.self$data)
+    lm1 <- lm(formula=.self$formula, data=.self$data)
+    mu <- lm1$fitted.values
+    sigma <- getSigma(lm1)
+    n <- length(mu)
+    yname <- all.vars(.self$formula[[2]])
+    
+    Dboot <- matrix(NA, nrow=B, ncol=length(Dhat))
+    bootdata<-data
+    for(i in 1:B){
+        yB <- rnorm(n, mu, sqrt(sigma))
+        bootdata[yname] <- yB
+        result <- D.est.vb(formula=.self$formula, data=bootdata)
+        Dboot[i,] <- result$Dhat
+        T[i] <- result$T
+    }
+
+    Vb <- getVb(Dboot)
+    omega <- t(Dhat) %*% solve(Vb) %*% Dhat
+    pb = (B+1-sum(T< as.numeric(omega)))/(B+1)
+    
+    .self$test.statistics$gim <- list(stat=omega, pval=pb)
+
+    # When method used, add to references
+    gimreference <- bibentry(
+        bibtype="Article",
+        title = "How Robust Standard Errors Expose Methodological Problems They Do Not Fix, and What to Do About It",
+        author = c(
+        person("Gary", "King"),
+        person("Margret E.", "Roberts")
+        ),
+        journal = "Political Analysis",
+        year = 2014,
+        pages = "1-21",
+        url =  "http://j.mp/InK5jU")
+    .self$refs <- c(.self$refs, gimreference)
+  }
+)
+
+zls$methods(
+  mcfun = function(x, b0=0, b1=1, alpha=1, sim=TRUE){
+    y <- b0 + b1*x + sim * rnorm(n=length(x), sd=alpha)
+    return(y)
+  }
+)
diff --git a/R/model-ma.R b/R/model-ma.R
new file mode 100755
index 0000000..1daa725
--- /dev/null
+++ b/R/model-ma.R
@@ -0,0 +1,25 @@
+#' Time-Series Model with Moving Average
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-ma.html}
+#' @import methods
+#' @export Zelig-ma
+#' @exportClass Zelig-ma
+#'
+#' @include model-zelig.R
+#' @include model-timeseries.R
+  
+zma <- setRefClass("Zelig-ma",
+                       contains = "Zelig-timeseries")
+
+zma$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "ma"
+    .self$link <- "identity"
+    .self$fn <- quote(zeligArimaWrapper)
+    .self$description = "Time-Series Model with Moving Average"
+    .self$packageauthors <- "R Core Team"
+    .self$outcome <- "continuous"
+    .self$wrapper <- "timeseries"
+  }
+)
diff --git a/R/model-mlogit-bayes.R b/R/model-mlogit-bayes.R
new file mode 100644
index 0000000..fa78a0f
--- /dev/null
+++ b/R/model-mlogit-bayes.R
@@ -0,0 +1,61 @@
+#' Bayesian Multinomial Logistic Regression
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-mlogitbayes.html}
+#' @import methods
+#' @export Zelig-mlogit-bayes
+#' @exportClass Zelig-mlogit-bayes
+#'
+#' @include model-zelig.R
+#' @include model-bayes.R
+
+zmlogitbayes <- setRefClass("Zelig-mlogit-bayes",
+                             contains = c("Zelig-bayes"))
+
+zmlogitbayes$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "mlogit-bayes"
+    .self$year <- 2013
+    .self$category <- "discrete"
+    .self$authors <- "Ben Goodrich, Ying Lu"
+    .self$description = "Bayesian Multinomial Logistic Regression for Dependent Variables with Unordered Categorical Values"
+    .self$fn <- quote(MCMCpack::MCMCmnl)
+    # JSON from parent
+    .self$wrapper <- "mlogit.bayes"
+  }
+)
+
+zmlogitbayes$methods(
+  qi = function(simparam, mm) {
+    resp <- model.response(model.frame(.self$formula, data = .self$data))
+    level <- length(table(resp))
+    p <- dim(model.matrix(eval(.self$formula), data = .self$data))[2]
+    coef <- simparam
+    eta <- array(NA, c(nrow(coef), level, nrow(mm)))
+    eta[, 1, ] <- matrix(0, nrow(coef), nrow(mm))
+    for (j in 2:level) {
+      ind <- (1:p) * (level - 1) - (level - j)
+      eta[, j, ]<- coef[, ind] %*% t(mm)
+    }
+    eta <- exp(eta)
+    ev <- array(NA, c(nrow(coef), level, nrow(mm)))
+    pv <- matrix(NA, nrow(coef), nrow(mm))
+    colnames(ev) <- rep(NA, level)
+    for (k in 1:nrow(mm)) {
+      for (j in 1:level)
+        ev[, j, k] <- eta[, j, k] / rowSums(eta[, , k])
+    }
+    for (j in 1:level) {
+      colnames(ev)[j] <- paste("P(Y=", j, ")", sep="")
+    }
+    for (k in 1:nrow(mm)) {             
+      probs <- as.matrix(ev[, , k])
+      temp <- apply(probs, 1, FUN = rmultinom, n = 1, size = 1)
+      temp <- as.matrix(t(temp) %*% (1:nrow(temp)))
+      pv <- apply(temp, 2, as.character)
+      pv <- as.factor(pv)
+    }
+    return(list(ev = ev, pv = pv))
+  }
+)
+
diff --git a/R/model-negbinom.R b/R/model-negbinom.R
new file mode 100755
index 0000000..6bdd173
--- /dev/null
+++ b/R/model-negbinom.R
@@ -0,0 +1,83 @@
+#' Negative Binomial Regression for Event Count Dependent Variables
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-negbin.html}
+#' @import methods
+#' @export Zelig-negbin
+#' @exportClass Zelig-negbin
+#'
+#' @include model-zelig.R
+
+znegbin <- setRefClass("Zelig-negbin",
+                         contains = "Zelig",
+                         field = list(simalpha = "list" # ancillary parameters
+                         ))
+
+znegbin$methods(
+  initialize = function() {
+    callSuper()
+    .self$fn <- quote(MASS::glm.nb)
+    .self$name <- "negbin"
+    .self$authors <- "Kosuke Imai, Gary King, Olivia Lau"
+    .self$packageauthors <- "William N. Venables, and Brian D. Ripley"
+    .self$year <- 2008
+    .self$category <- "count"
+    .self$description <- "Negative Binomial Regression for Event Count Dependent Variables"
+    # JSON
+    .self$outcome <- "discrete"
+    .self$wrapper <- "negbin"
+    .self$acceptweights <- TRUE
+  }
+)
+
+znegbin$methods(
+  zelig = function(formula, data, ..., weights=NULL, by = NULL, bootstrap = FALSE) {
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    .self$model.call <- .self$zelig.call
+    callSuper(formula=formula, data=data, ..., weights=weights, by = by, bootstrap = bootstrap)
+    rse<-plyr::llply(.self$zelig.out$z.out, (function(x) vcovHC(x,type="HC0")))
+    .self$test.statistics<- list(robust.se = rse)
+  }
+)
+
+znegbin$methods(
+  param = function(z.out, method="mvn") {
+    simalpha.local <- z.out$theta
+    if(identical(method,"mvn")){
+      simparam.local <- mvrnorm(n = .self$num, mu = coef(z.out),
+                        Sigma = vcov(z.out))
+      simparam.local <- list(simparam = simparam.local, simalpha = simalpha.local)
+      return(simparam.local)
+    } else if(identical(method,"point")){
+      return(list(simparam = t(as.matrix(coef(z.out))), simalpha = simalpha.local))
+    }
+  }
+)
+
+znegbin$methods(
+  qi = function(simparam, mm) {
+    coeff <- simparam$simparam
+    alpha <- simparam$simalpha
+    inverse <- family(.self$zelig.out$z.out[[1]])$linkinv
+    eta <- coeff %*% t(mm)
+    theta <- matrix(inverse(eta), nrow=nrow(coeff))
+    ev <- theta
+    pv <- matrix(NA, nrow=nrow(theta), ncol=ncol(theta))
+    #
+    for (i in 1:ncol(ev))
+      pv[, i] <- rnegbin(nrow(ev), mu = ev[i, ], theta = alpha[i])
+    return(list(ev  = ev, pv = pv))
+  }
+)
+
+znegbin$methods(
+  mcfun = function(x, b0=0, b1=1, ..., sim=TRUE){
+    mu <- exp(b0 + b1 * x)
+    if(sim){
+        y <- rnbinom(n=length(x), 1, mu=mu)
+        return(y)
+    }else{
+        return(mu)
+    }
+  }
+)
+
diff --git a/R/model-normal-bayes.R b/R/model-normal-bayes.R
new file mode 100644
index 0000000..37f4e12
--- /dev/null
+++ b/R/model-normal-bayes.R
@@ -0,0 +1,53 @@
+#' Bayesian Normal Linear Regression
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-normalbayes.html}
+#' @import methods
+#' @export Zelig-normal-bayes
+#' @exportClass Zelig-normal-bayes
+#'  
+#' @include model-zelig.R
+#' @include model-bayes.R
+#' @include model-normal.R
+
+znormalbayes <- setRefClass("Zelig-normal-bayes",
+                             contains = c("Zelig-bayes",
+                                          "Zelig-normal"))
+
+znormalbayes$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "normal-bayes" # CC: should't it be lsbayes?
+    .self$year <- 2013
+    .self$category <- "continuous"
+    .self$authors <- "Ben Goodrich, Ying Lu"
+    .self$description = "Bayesian Normal Linear Regression"
+    .self$fn <- quote(MCMCpack::MCMCregress)
+    # JSON from parent
+    .self$wrapper <- "normal.bayes"
+  }
+)
+
+znormalbayes$methods(
+  qi = function(simparam, mm) {
+    # Extract simulated parameters and get column names
+    coef <- simparam
+    cols <- colnames(coef)
+    # Place the simulated variances in their own vector
+    sigma2 <- coef[, ncol(coef)]
+    # Remove the "sigma2" (variance) parameter
+    # which should already be placed
+    # in the simulated parameters
+    cols <- cols[ ! "sigma2" == cols ]
+    coef <- coef[, cols]
+    ev <- coef %*% t(mm)
+    pv <- matrix(rnorm(nrow(ev), ev, sqrt(sigma2)))
+    return(list(ev = ev, pv = pv))
+  }
+)
+
+znormalbayes$methods(
+  mcfun = function(x, b0=0, b1=1, alpha=1, sim=TRUE){
+    y <- b0 + b1*x + sim * rnorm(n=length(x), sd=alpha)
+    return(y)
+  }
+)
\ No newline at end of file
diff --git a/R/model-normal-gee.R b/R/model-normal-gee.R
new file mode 100755
index 0000000..e74ff04
--- /dev/null
+++ b/R/model-normal-gee.R
@@ -0,0 +1,30 @@
+#' Generalized Estimating Equation for Normal Regression
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-normalgee.html}
+#' @import methods
+#' @export Zelig-normal-gee
+#' @exportClass Zelig-normal-gee
+#'  
+#' @include model-zelig.R
+#' @include model-gee.R
+#' @include model-normal.R
+
+znormalgee <- setRefClass("Zelig-normal-gee",
+                           contains = c("Zelig-gee", "Zelig-normal"))
+
+znormalgee$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "normal-gee"
+    .self$family <- "gaussian"
+    .self$link <- "identity"
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    .self$year <- 2011
+    .self$category <- "continuous"
+    .self$authors <- "Patrick Lam"
+    .self$description = "General Estimating Equation for Normal Regression"
+    .self$fn <- quote(geepack::geeglm)
+    # JSON from parent
+    .self$wrapper <- "normal.gee"
+  }
+)
diff --git a/R/model-normal-survey.R b/R/model-normal-survey.R
new file mode 100755
index 0000000..2c30bd6
--- /dev/null
+++ b/R/model-normal-survey.R
@@ -0,0 +1,74 @@
+#' Normal Regression for Continuous Dependent Variables with Survey Weights
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-normalsurvey.html}
+#' @import methods
+#' @export Zelig-normal
+#' @exportClass Zelig-normal
+#'
+#' @include model-zelig.R
+#' @include model-survey.R
+#' @include model-normal.R
+
+
+znormalsurvey <- setRefClass("Zelig-normal-survey",
+                       contains = c("Zelig-survey"),
+                       fields = list(family = "character",
+                                  link = "character",
+                                  linkinv = "function"))
+                                  #, "Zelig-normal"))
+
+znormalsurvey$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "normal-survey"
+    .self$family <- "gaussian"
+    .self$link <- "identity"
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    .self$category <- "continuous"
+    .self$description <- "Normal Regression for Continuous Dependent Variables with Survey Weights"
+    .self$outcome <- "continuous"
+    # JSON
+    .self$wrapper <- "normal.survey"
+  }
+)
+
+znormalsurvey$methods(
+  param = function(z.out, method="mvn") {
+    degrees.freedom <- z.out$df.residual
+    sig2 <- base::summary(z.out)$dispersion # not to call class summary method
+    simalpha <- sqrt(degrees.freedom * sig2 
+                     / rchisq(.self$num, degrees.freedom))
+
+    if(identical(method,"mvn")){
+      simparam.local <- mvrnorm(n = .self$num,
+                              mu = coef(z.out),
+                              Sigma = vcov(z.out))
+      simparam.local <- list(simparam = simparam.local, simalpha = simalpha)
+      return(simparam.local)
+    } else if(identical(method,"point")){
+      return(list(simparam = t(as.matrix(coef(z.out))), simalpha = simalpha))
+    }
+
+  }
+)
+
+znormalsurvey$methods(
+  qi = function(simparam, mm) {
+    theta <- matrix(simparam$simparam %*% t(mm),
+                    nrow = nrow(simparam$simparam))
+    ev <- theta
+    pv <- matrix(NA, nrow = nrow(theta), ncol = ncol(theta))
+    for (j in 1:nrow(ev))
+      pv[j, ] <- rnorm(ncol(ev),
+                       mean = ev[j, ],
+                       sd = simparam$simalpha[j])
+    return(list(ev = ev, pv = pv))
+  }
+)
+
+znormalsurvey$methods(
+  mcfun = function(x, b0=0, b1=1, alpha=1, sim=TRUE){
+    y <- b0 + b1*x + sim * rnorm(n=length(x), sd=alpha)
+    return(y)
+  }
+)
diff --git a/R/model-normal.R b/R/model-normal.R
new file mode 100755
index 0000000..5d8d201
--- /dev/null
+++ b/R/model-normal.R
@@ -0,0 +1,70 @@
+#' Normal Regression for Continuous Dependent Variables
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-normal.html}
+#' @import methods
+#' @export Zelig-normal
+#' @exportClass Zelig-normal
+#'
+#' @include model-zelig.R
+#' @include model-glm.R
+
+znormal <- setRefClass("Zelig-normal",
+                       contains = "Zelig-glm")
+
+znormal$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "normal"
+    .self$family <- "gaussian"
+    .self$link <- "identity"
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    .self$authors <- "Kosuke Imai, Gary King, Olivia Lau"
+    .self$year <- 2008
+    .self$category <- "continuous"
+    .self$description <- "Normal Regression for Continuous Dependent Variables"
+    # JSON
+    .self$outcome <- "continuous"
+    .self$wrapper <- "normal"
+  }
+)
+
+znormal$methods(
+  param = function(z.out, method="mvn") {
+    degrees.freedom <- z.out$df.residual
+    sig2 <- base::summary(z.out)$dispersion # not to call class summary method
+    simalpha <- sqrt(degrees.freedom * sig2 
+                     / rchisq(.self$num, degrees.freedom))
+
+    if(identical(method,"mvn")){
+      simparam.local <- mvrnorm(n = .self$num,
+                              mu = coef(z.out),
+                              Sigma = vcov(z.out))
+      simparam.local <- list(simparam = simparam.local, simalpha = simalpha)
+      return(simparam.local)
+    } else if(identical(method,"point")){
+      return(list(simparam = t(as.matrix(coef(z.out))), simalpha = simalpha))
+    }
+
+  }
+)
+
+znormal$methods(
+  qi = function(simparam, mm) {
+    theta <- matrix(simparam$simparam %*% t(mm),
+                    nrow = nrow(simparam$simparam))
+    ev <- theta
+    pv <- matrix(NA, nrow = nrow(theta), ncol = ncol(theta))
+    for (j in 1:nrow(ev))
+      pv[j, ] <- rnorm(ncol(ev),
+                       mean = ev[j, ],
+                       sd = simparam$simalpha[j])
+    return(list(ev = ev, pv = pv))
+  }
+)
+
+znormal$methods(
+  mcfun = function(x, b0=0, b1=1, alpha=1, sim=TRUE){
+    y <- b0 + b1*x + sim * rnorm(n=length(x), sd=alpha)
+    return(y)
+  }
+)
diff --git a/R/model-oprobit-bayes.R b/R/model-oprobit-bayes.R
new file mode 100644
index 0000000..da330e7
--- /dev/null
+++ b/R/model-oprobit-bayes.R
@@ -0,0 +1,89 @@
+#' Bayesian Ordered Probit Regression
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-oprobitbayes.html}
+#' @import methods
+#' @export Zelig-oprobit-bayes
+#' @exportClass Zelig-oprobit-bayes
+#'
+#' @include model-zelig.R
+#' @include model-bayes.R
+
+zoprobitbayes <- setRefClass("Zelig-oprobit-bayes",
+                            contains = c("Zelig-bayes"))
+
+zoprobitbayes$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "oprobit-bayes"
+    .self$year <- 2013
+    .self$category <- "discrete"
+    .self$authors <- "Ben Goodrich, Ying Lu"
+    .self$description = "Bayesian Probit Regression for Dichotomous Dependent Variables"
+    .self$fn <- quote(MCMCpack::MCMCoprobit)
+    # JSON from parent
+    .self$wrapper <- "oprobit.bayes"
+  }
+)
+
+zoprobitbayes$methods(
+  param = function(z.out) {
+    simparam <- callSuper(z.out)
+    # Produce the model matrix in order to get all terms (explicit and implicit)
+    # from the regression model.
+    mat <- model.matrix(.self$formula, data = .self$data)
+    # Response Terms
+    p <- ncol(mat)
+    # All coefficients
+    coefficients <- simparam
+    # Coefficients for predictor variables
+    beta <- coefficients[, 1:p]
+    # Middle values of "gamma" matrix
+    mid.gamma <- coefficients[, -(1:p)]
+    # ...
+    level <- ncol(coefficients) - p + 2
+    # Initialize the "gamma" parameters
+    gamma <- matrix(NA, nrow(coefficients), level + 1)
+    # The first, second and last values are fixed
+    gamma[, 1] <- -Inf
+    gamma[, 2] <- 0
+    gamma[, ncol(gamma)] <- Inf
+    # All others are determined by the coef-matrix (now stored in mid.gamma)
+    if (ncol(gamma) > 3)
+      gamma[, 3:(ncol(gamma) - 1)] <- mid.gamma
+    # return
+    simparam <- list(simparam = beta, simalpha = gamma)
+    return(simparam)
+  }
+)
+
+zoprobitbayes$methods(
+  qi = function(simparam, mm) {
+    beta <- simparam$simparam
+    gamma <- simparam$simalpha    
+    labels <- levels(model.response(model.frame(.self$formula, data = .self$data)))
+    # x is implicitly cast into a matrix
+    eta <- beta %*% t(mm)
+    # **TODO: Sort out sizes of matrices for these things.
+    ev <- array(NA, c(nrow(eta), ncol(gamma) - 1, ncol(eta)))
+    pv <- matrix(NA, nrow(eta), ncol(eta))
+    # Compute Expected Values
+    # ***********************
+    # Note that the inverse link function is:
+    #   pnorm(gamma[, j+1]-eta) - pnorm(gamma[, j]-eta)
+    for (j in 1:(ncol(gamma) - 1)) {
+      ev[, j, ] <- pnorm(gamma[, j + 1] - eta) - pnorm(gamma[, j] - eta)
+    }
+    colnames(ev) <- labels
+    # Compute Predicted Values
+    # ************************
+    for (j in 1:nrow(pv)) {
+      mu <- eta[j, ]
+      pv[j, ] <- as.character(cut(mu, gamma[j, ], labels = labels))
+    }
+    pv <- as.factor(pv)
+    # **TODO: Update summarize to work with at most 3-dimensional arrays
+    ev <- ev[, , 1]
+    return(list(ev = ev, pv = pv))
+  }
+)
+
diff --git a/R/model-poisson-bayes.R b/R/model-poisson-bayes.R
new file mode 100644
index 0000000..2e129dc
--- /dev/null
+++ b/R/model-poisson-bayes.R
@@ -0,0 +1,44 @@
+#' Bayesian Poisson Regression
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-poissonbayes.html}
+#' @import methods
+#' @export Zelig-poisson-bayes
+#' @exportClass Zelig-poisson-bayes
+#'
+#' @include model-zelig.R
+#' @include model-bayes.R
+#' @include model-poisson.R
+
+zpoissonbayes <- setRefClass("Zelig-poisson-bayes",
+                             contains = c("Zelig-bayes",
+                                          "Zelig-poisson"))
+
+zpoissonbayes$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "poisson-bayes"
+    .self$family <- "poisson"
+    .self$link <- "log"
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    .self$year <- 2013
+    .self$category <- "continuous"
+    .self$authors <- "Ben Goodrich, Ying Lu"
+    .self$description = "Bayesian Poisson Regression"
+    .self$fn <- quote(MCMCpack::MCMCpoisson)
+    # JSON from parent
+    .self$wrapper <- "poisson.bayes"
+  }
+)
+
+
+zpoissonbayes$methods(
+  mcfun = function(x, b0=0, b1=1, ..., sim=TRUE){
+    lambda <- exp(b0 + b1 * x)
+    if(sim){
+        y <- rpois(n=length(x), lambda=lambda)
+        return(y)
+    }else{
+        return(lambda)
+    }
+  }
+)
diff --git a/R/model-poisson-gee.R b/R/model-poisson-gee.R
new file mode 100755
index 0000000..eba2da2
--- /dev/null
+++ b/R/model-poisson-gee.R
@@ -0,0 +1,39 @@
+#' Generalized Estimating Equation for Poisson Regression
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-poissongee.html}
+#' @import methods
+#' @export Zelig-poisson-gee
+#' @exportClass Zelig-poisson-gee
+#'
+#' @include model-zelig.R
+#' @include model-gee.R
+#' @include model-poisson.R
+
+zpoissongee <- setRefClass("Zelig-poisson-gee",
+                           contains = c("Zelig-gee", "Zelig-poisson"))
+
+zpoissongee$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "poisson-gee"
+    .self$family <- "poisson"
+    .self$link <- "log"
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    .self$year <- 2011
+    .self$category <- "continuous"
+    .self$authors <- "Patrick Lam"
+    .self$description = "General Estimating Equation for Poisson Regression"
+    .self$fn <- quote(geepack::geeglm)
+    # JSON from parent
+    .self$wrapper <- "poisson.gee"
+  }
+)
+
+
+zpoissongee$methods(
+  param = function(z.out, method="mvn") {
+    simparam.local <- callSuper(z.out, method=method)
+    return(simparam.local$simparam) # no ancillary parameter
+  }
+)
+
diff --git a/R/model-poisson-survey.R b/R/model-poisson-survey.R
new file mode 100755
index 0000000..34f89ae
--- /dev/null
+++ b/R/model-poisson-survey.R
@@ -0,0 +1,51 @@
+#' Poisson Regression with Survey Weights
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-poissonsurvey.html}
+#' @import methods
+#' @export Zelig-poisson-gee
+#' @exportClass Zelig-poisson-gee
+#'
+#' @include model-zelig.R
+#' @include model-survey.R
+#' @include model-poisson.R
+
+zpoissonsurvey <- setRefClass("Zelig-poisson-survey",
+                           contains = c("Zelig-survey", "Zelig-poisson"))
+
+zpoissonsurvey$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "poisson-survey"
+    .self$family <- "poisson"
+    .self$link <- "log"
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    .self$category <- "continuous"
+    .self$description = "Poisson Regression with Survey Weights"
+    # JSON from parent
+    .self$wrapper <- "poisson.survey"
+  }
+)
+
+zpoissonsurvey$methods(
+  qi = function(simparam, mm) {
+    eta <- simparam %*% t(mm)
+    theta.local <- matrix(.self$linkinv(eta), nrow = nrow(simparam))
+    ev <- theta.local
+    pv <- matrix(NA, nrow = nrow(theta.local), ncol = ncol(theta.local))
+    for (i in 1:ncol(theta.local))
+      pv[, i] <- rpois(nrow(theta.local), lambda = theta.local[, i])
+    return(list(ev = ev, pv = pv))
+  }
+)
+
+zpoissonsurvey$methods(
+  mcfun = function(x, b0=0, b1=1, ..., sim=TRUE){
+    lambda <- exp(b0 + b1 * x)
+    if(sim){
+        y <- rpois(n=length(x), lambda=lambda)
+        return(y)
+    }else{
+        return(lambda)
+    }
+  }
+)
diff --git a/R/model-poisson.R b/R/model-poisson.R
new file mode 100755
index 0000000..5464925
--- /dev/null
+++ b/R/model-poisson.R
@@ -0,0 +1,54 @@
+#' Poisson Regression for Event Count Dependent Variables
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-poisson.html}
+#' @import methods
+#' @export Zelig-poisson
+#' @exportClass Zelig-poisson
+#'
+#' @include model-zelig.R
+#' @include model-glm.R
+
+zpoisson <- setRefClass("Zelig-poisson",
+                        contains = "Zelig-glm",
+                        fields = list(theta = "ANY"))
+
+zpoisson$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "poisson"
+    .self$family <- "poisson"
+    .self$link <- "log"
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    .self$authors <- "Kosuke Imai, Gary King, Olivia Lau"
+    .self$year <- 2007
+    .self$category <- "count"
+    .self$description <- "Poisson Regression for Event Count Dependent Variables"
+    # JSON
+    .self$outcome <- "discrete"
+    .self$wrapper <- "poisson"
+  }
+)
+
+zpoisson$methods(
+  qi = function(simparam, mm) {
+    eta <- simparam %*% t(mm)
+    theta.local <- matrix(.self$linkinv(eta), nrow = nrow(simparam))
+    ev <- theta.local
+    pv <- matrix(NA, nrow = nrow(theta.local), ncol = ncol(theta.local))
+    for (i in 1:ncol(theta.local))
+      pv[, i] <- rpois(nrow(theta.local), lambda = theta.local[, i])
+    return(list(ev = ev, pv = pv))
+  }
+)
+
+zpoisson$methods(
+  mcfun = function(x, b0=0, b1=1, ..., sim=TRUE){
+    lambda <- exp(b0 + b1 * x)
+    if(sim){
+        y <- rpois(n=length(x), lambda=lambda)
+        return(y)
+    }else{
+        return(lambda)
+    }
+  }
+)
diff --git a/R/model-probit-bayes.R b/R/model-probit-bayes.R
new file mode 100644
index 0000000..c4a0fad
--- /dev/null
+++ b/R/model-probit-bayes.R
@@ -0,0 +1,42 @@
+#' Bayesian Probit Regression
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-probitbayes.html}
+#' @import methods
+#' @export Zelig-probit-bayes
+#' @exportClass Zelig-probit-bayes
+#'
+#' @include model-zelig.R
+#' @include model-probit.R
+
+zprobitbayes <- setRefClass("Zelig-probit-bayes",
+                             contains = c("Zelig-bayes",
+                                          "Zelig-probit"))
+
+zprobitbayes$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "probit-bayes"
+    .self$family <- "binomial"
+    .self$link <- "probit"
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    .self$year <- 2013
+    .self$category <- "dichotomous"
+    .self$authors <- "Ben Goodrich, Ying Lu"
+    .self$description = "Bayesian Probit Regression for Dichotomous Dependent Variables"
+    .self$fn <- quote(MCMCpack::MCMCprobit)
+    # JSON from parent
+    .self$wrapper <- "probit.bayes"
+  }
+)
+
+zprobitbayes$methods(
+  mcfun = function(x, b0=0, b1=1, ..., sim=TRUE){
+    mu <- pnorm(b0 + b1 * x)
+    if(sim){
+        y <- rbinom(n=length(x), size=1, prob=mu)
+        return(y)
+    }else{
+        return(mu)
+    }
+  }
+)
\ No newline at end of file
diff --git a/R/model-probit-gee.R b/R/model-probit-gee.R
new file mode 100755
index 0000000..229ce57
--- /dev/null
+++ b/R/model-probit-gee.R
@@ -0,0 +1,22 @@
+#' Generalized Estimating Equation for Probit Regression
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-probitgee.html}
+#' @import methods
+#' @export Zelig-probit-gee
+#' @exportClass Zelig-probit-gee
+#'
+#' @include model-zelig.R
+#' @include model-binchoice-gee.R
+
+zprobitgee <- setRefClass("Zelig-probit-gee",
+                          contains = c("Zelig-binchoice-gee"))
+
+zprobitgee$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "probit-gee"
+    .self$link <- "probit"
+    .self$description <- "General Estimating Equation for Probit Regression"
+    .self$wrapper <- "probit.gee"
+  }
+)
\ No newline at end of file
diff --git a/R/model-probit-survey.R b/R/model-probit-survey.R
new file mode 100755
index 0000000..0a1db2d
--- /dev/null
+++ b/R/model-probit-survey.R
@@ -0,0 +1,34 @@
+#' Probit Regression with Survey Weights
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-probitsurvey.html}
+#' @import methods
+#' @export Zelig-probit-survey
+#' @exportClass Zelig-probit-survey
+#'
+#' @include model-zelig.R
+#' @include model-binchoice-survey.R
+
+zprobitsurvey <- setRefClass("Zelig-probit-survey",
+                          contains = c("Zelig-binchoice-survey"))
+
+zprobitsurvey$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "probit-survey"
+    .self$link <- "probit"
+    .self$description <- "Probit Regression with Survey Weights"
+    .self$wrapper <- "probit.survey"
+  }
+)
+
+zprobitsurvey$methods(
+  mcfun = function(x, b0=0, b1=1, ..., sim=TRUE){
+    mu <- pnorm(b0 + b1 * x)
+    if(sim){
+        y <- rbinom(n=length(x), size=1, prob=mu)
+        return(y)
+    }else{
+        return(mu)
+    }
+  }
+)
\ No newline at end of file
diff --git a/R/model-probit.R b/R/model-probit.R
new file mode 100755
index 0000000..657e4ec
--- /dev/null
+++ b/R/model-probit.R
@@ -0,0 +1,39 @@
+#' Probit Regression for Dichotomous Dependent Variables
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-probit.html}
+#' @import methods
+#' @export Zelig-probit
+#' @exportClass Zelig-probit
+#'
+#' @include model-zelig.R
+#' @include model-glm.R
+#' @include model-binchoice.R
+  
+zprobit <- setRefClass("Zelig-probit",
+                       contains = "Zelig-binchoice")
+
+zprobit$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "probit"
+    .self$link <- "probit"
+    .self$description = "Probit Regression for Dichotomous Dependent Variables"
+    .self$packageauthors <- "R Core Team"
+    .self$wrapper <- "probit"
+  }
+)
+
+zprobit$methods(
+  mcfun = function(x, b0=0, b1=1, ..., sim=TRUE){
+    mu <- pnorm(b0 + b1 * x)
+    if(sim){
+        y <- rbinom(n=length(x), size=1, prob=mu)
+        return(y)
+    }else{
+        return(mu)
+    }
+  }
+)
+
+
+
diff --git a/R/model-quantile.R b/R/model-quantile.R
new file mode 100755
index 0000000..d7878fb
--- /dev/null
+++ b/R/model-quantile.R
@@ -0,0 +1,94 @@
+#' Quantile Regression for Continuous Dependent Variables
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-quantile.html}
+#' @import methods
+#' @export Zelig-quantile
+#' @exportClass Zelig-quantile
+#'
+#' @include model-zelig.R
+
+zquantile <- setRefClass("Zelig-quantile",
+                         contains = "Zelig",
+                         field = list(tau = "ANY"
+                         ))
+
+zquantile$methods(
+  initialize = function() {
+    callSuper()
+    .self$fn <- quote(quantreg::rq)
+    .self$name <- "quantile"
+    .self$authors <- "Alexander D'Amour"
+    .self$packageauthors <- "Roger Koenker"
+    .self$modelauthors <- "Alexander D'Amour"
+    .self$year <- 2008
+    .self$category <- "continuous"
+    .self$description <- "Quantile Regression for Continuous Dependent Variables"
+    # JSON
+    .self$outcome <- "continuous"
+    .self$wrapper <- "rq"
+    .self$acceptweights <- TRUE
+  }
+)
+
+zquantile$methods(
+  zelig = function(formula, data, ..., weights = NULL, by = NULL, bootstrap = FALSE) {
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    .self$model.call <- match.call(expand.dots = TRUE)
+    if (!is.null(.self$model.call$tau)) {
+      .self$tau <- eval(.self$model.call$tau)
+      if (length(.self$tau)) {
+        data <- rbind_all(lapply(eval(.self$tau),
+                                 function(tau) cbind(tau, data)))
+        by <- cbind("tau", by)
+      }
+    }
+    else 
+      .self$tau <- 0.5
+    callSuper(formula = formula, data = data, ..., weights = weights, by = by, bootstrap = bootstrap)
+    
+    rse<-plyr::llply(.self$zelig.out$z.out, (function(x) quantreg::summary.rq(x,se="nid", cov=TRUE)$cov))
+    .self$test.statistics<- list(robust.se = rse)
+  }
+)
+
+zquantile$methods(
+  param = function(z.out, method="mvn") {
+    object <- z.out
+    if(identical(method,"mvn")){
+      rq.sum <- summary.rq(object, cov = TRUE, se = object$se)
+      return(mvrnorm(n = .self$num, mu = object$coef, Sigma = rq.sum$cov))
+    }else if(identical(method,"point")){
+      return(t(as.matrix(object$coef)))
+    }
+  }
+)
+
+zquantile$methods(
+  qi = function(simparam, mm) {
+    object <- mm
+    coeff <- simparam
+    eps <- .Machine$double.eps^(2/3)
+    ev <- coeff %*% t(object)
+    pv <- ev
+    n <- nrow(.self$data)
+    h <- bandwidth.rq(.self$tau, n) # estimate optimal bandwidth for sparsity
+    if (.self$tau + h > 1)
+      stop("tau + h > 1. Sparsity estimate failed. Please specify a tau closer to 0.5")
+    if (.self$tau - h < 0)
+      stop("tau - h < 0. Sparsity estimate failed. Please specify a tau closer to 0.5")
+    beta_high <- rq(.self$formula, data = .self$data, tau = .self$tau + h )$coef
+    beta_low <- rq(.self$formula, data = .self$data, tau = .self$tau - h)$coef
+    F_diff <- mm %*% (beta_high - beta_low)
+    if (any(F_diff <= 0))
+      warning(paste(sum(F_diff <= 0),
+                    "density estimates were non-positive. Predicted values will likely be non-sensical."))
+    # Includes machine error correction as per summary.rq for nid case
+    f <- pmax(0, (2 * h) / (F_diff - eps))
+    # Use asymptotic approximation of Q(tau|X,beta) distribution
+    for(ii in 1:nrow(ev))
+      # Asymptotic distribution as per Koenker 2005 _Quantile Regression_ p. 72
+      pv[ii, ] <- rnorm(length(ev[ii, ]), mean = ev[ii, ],
+                        sqrt((.self$tau * (1 - .self$tau))) / (f * sqrt(n)))
+    return(list(ev  = ev, pv = pv))
+  }
+)
diff --git a/R/model-relogit.R b/R/model-relogit.R
new file mode 100755
index 0000000..8f57c16
--- /dev/null
+++ b/R/model-relogit.R
@@ -0,0 +1,199 @@
+#' Rare Events Logistic Regression for Dichotomous Dependent Variables
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-relogit.html}
+#' @import methods
+#' @export Zelig-relogit
+#' @exportClass Zelig-relogit
+#'
+#' @include model-zelig.R
+#' @include model-glm.R
+#' @include model-binchoice.R
+#' @include model-logit.R
+
+zrelogit <- setRefClass("Zelig-relogit",
+                      contains = "Zelig",
+                      fields = list(family = "character",
+                                    link = "character",
+                                    linkinv = "function"))
+
+zrelogit$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "relogit"
+    .self$description <- "Rare Events Logistic Regression for Dichotomous Dependent Variables"
+    .self$fn <- quote(relogit)
+    .self$family <- "binomial"
+    .self$link <- "logit"
+    .self$wrapper <- "relogit"
+    ref1<-bibentry(
+            bibtype="Article",
+            title = "Logistic Regression in Rare Events Data",
+            author = c(
+                person("Gary", "King"),
+                person("Langche", "Zeng")
+                ),
+            journal = "Political Analysis",
+            volume = 9,
+            number = 2,
+            year = 2001,
+            pages = "137--163")
+    ref2<-bibentry(
+            bibtype="Article",
+            title = "Explaining Rare Events in International Relations",
+            author = c(
+                person("Gary", "King"),
+                person("Langche", "Zeng")
+                ),
+            journal = "International Organization",
+            volume = 55,
+            number = 3,
+            year = 2001,
+            pages = "693--715")
+    .self$refs<-c(.self$refs,ref1,ref2)
+  }
+)
+
+zrelogit$methods(
+  zelig = function(formula, ..., tau = NULL, bias.correct = NULL, case.control = NULL, data, by = NULL, bootstrap = FALSE) {
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    .self$model.call <- .self$zelig.call
+    # Catch NULL case.control
+    if (is.null(case.control))
+      case.control <- "prior"
+    # Catch NULL bias.correct
+    if (is.null(bias.correct))
+      bias.correct = TRUE
+    # Construct formula. Relogit models have the structure:
+    #   cbind(y, 1-y) ~ x1 + x2 + x3 + ... + xN
+    # Where y is the response.
+    form <- update(formula, cbind(., 1 - .) ~ .)
+    .self$model.call$formula <- form
+    .self$model.call$case.control <- case.control
+    .self$model.call$bias.correct <- bias.correct
+    .self$model.call$tau <- tau
+    callSuper(formula = formula, data = data, ..., weights = NULL, by = by, bootstrap = bootstrap)
+  }
+)
+
+zrelogit$methods(
+  qi = function(simparam, mm) {
+    .self$linkinv <- eval(call(.self$family, .self$link))$linkinv
+    coeff <- simparam
+    eta <- simparam %*% t(mm)
+    eta <- Filter(function (y) !is.na(y), eta)
+    theta <- matrix(.self$linkinv(eta), nrow = nrow(coeff))
+    ev <- matrix(.self$linkinv(eta), ncol = ncol(theta))
+    pv <- matrix(nrow = nrow(ev), ncol = ncol(ev))
+    for (j in 1:ncol(ev))
+      pv[, j] <- rbinom(length(ev[, j]), 1, prob = ev[, j])
+    levels(pv) <- c(0, 1)
+    return(list(ev = ev, pv = pv))
+  }
+)
+
+
+#' Estimation function for rare events logit models
+#' @keywords internal
+relogit <- function(formula,
+                    data = sys.parent(),
+                    tau = NULL,
+                    bias.correct = TRUE,
+                    case.control = "prior",
+                    ...){
+  mf <- match.call()
+  mf$tau <- mf$bias.correct <- mf$case.control <- NULL
+  if (!is.null(tau)) {
+    tau <- unique(tau)
+    if (length(case.control) > 1)
+      stop("You can only choose one option for case control correction.")
+    ck1 <- grep("p", case.control)
+    ck2 <- grep("w", case.control)
+    if (length(ck1) == 0 & length(ck2) == 0)
+      stop("choose either case.control = \"prior\" ",
+           "or case.control = \"weighting\"")
+    if (length(ck2) == 0)
+      weighting <- FALSE
+    else 
+      weighting <- TRUE
+  }
+  else
+    weighting <- FALSE
+  if (length(tau) > 2)
+    stop("tau must be a vector of length less than or equal to 2")
+  else if (length(tau) == 2) {
+    mf[[1]] <- relogit
+    res <- list()
+    mf$tau <- min(tau)
+    res$lower.estimate <- eval(as.call(mf), parent.frame())
+    mf$tau <- max(tau)
+    res$upper.estimate <- eval(as.call(mf), parent.frame())
+    res$formula <- formula
+    class(res) <- c("Relogit2", "Relogit")
+    return(res)
+  }
+  else {
+    mf[[1]] <- glm
+    mf$family <- binomial(link = "logit")
+    y2 <- model.response(model.frame(mf$formula, data))
+    if (is.matrix(y2))
+      y <- y2[,1]
+    else
+      y <- y2
+    ybar <- mean(y)
+    if (weighting) {
+      w1 <- tau / ybar
+      w0 <- (1-tau) / (1-ybar)
+      wi <- w1 * y + w0 * (1 - y)
+      mf$weights <- wi
+    }
+    res <- eval(as.call(mf), parent.frame())
+    res$call <- match.call(expand.dots = TRUE)
+    res$tau <- tau
+    X <- model.matrix(res)
+    ## bias correction
+    if (bias.correct){
+      pihat <- fitted(res)
+      if (is.null(tau)) # w_i = 1
+        wi <- rep(1, length(y))
+      else if (weighting) 
+        res$weighting <- TRUE
+      else {
+        w1 <- tau/ybar
+        w0 <- (1 - tau) / (1 - ybar)
+        wi <- w1 * y + w0 * (1 - y)
+        res$weighting <- FALSE
+      }
+      W <- pihat * (1 - pihat) * wi
+      ##Qdiag <- diag(X%*%solve(t(X)%*%diag(W)%*%X)%*%t(X))
+      Qdiag <- lm.influence(lm(y ~ X - 1, weights = W))$hat / W
+      if (is.null(tau)) # w_1=1 since tau=ybar
+        xi <- 0.5 * Qdiag * (2 * pihat - 1)
+      else
+        xi <- 0.5 * Qdiag * ((1 + w0) * pihat - w0)
+      res$coefficients <- res$coefficients -
+        lm(xi ~ X - 1, weights = W)$coefficients
+      res$bias.correct <- TRUE
+    }
+    else
+      res$bias.correct <- FALSE
+    ## prior correction 
+    if (!is.null(tau) & !weighting){      
+      if (tau <= 0 || tau >= 1) 
+        stop("\ntau needs to be between 0 and 1.\n") 
+      res$coefficients["(Intercept)"] <- res$coefficients["(Intercept)"] - 
+        log(((1 - tau) / tau) * (ybar / (1 - ybar)))
+      res$prior.correct <- TRUE
+      res$weighting <- FALSE
+    }
+    else
+      res$prior.correct <- FALSE
+    if (is.null(res$weighting))
+      res$weighting <- FALSE
+    
+    res$linear.predictors <- t(res$coefficients) %*% t(X) 
+    res$fitted.values <- 1 / (1 + exp(-res$linear.predictors))
+    res$zelig <- "Relogit"
+    class(res) <- c("Relogit", "glm")
+    return(res)
+  }
+}
diff --git a/R/model-survey.R b/R/model-survey.R
new file mode 100755
index 0000000..62fdfcf
--- /dev/null
+++ b/R/model-survey.R
@@ -0,0 +1,74 @@
+#' Survey models in Zelig for weights for complex sampling designs
+#'
+#' @import methods
+#' @export Zelig-survey
+#' @exportClass Zelig-survey
+#'
+#' @include model-zelig.R
+zsurvey <- setRefClass("Zelig-survey",
+                    contains = "Zelig")
+
+zsurvey$methods(
+  initialize = function() {
+    callSuper()
+    .self$fn <- quote(survey::svyglm)
+    .self$packageauthors <- "Thomas Lumley"
+    .self$modelauthors <- "Nicholas Carnes"
+    .self$acceptweights <- TRUE
+  }
+)
+
+zsurvey$methods(
+  zelig = function(formula, data, ids = ~1, probs = NULL, strata = NULL, fpc = NULL, nest = FALSE, check.strata = !nest, 
+                   repweights = NULL, type = NULL, combined.weights = FALSE, rho = NULL, bootstrap.average = NULL, scale = NULL,
+                   rscales = NULL, fpctype = "fraction", ... , weights = NULL, by = NULL, bootstrap = FALSE) {
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+
+    recastString2Formula <- function(a){
+      if(is.character(a)){
+        a <- as.formula(paste("~",a))
+      }
+      return(a)
+    }
+
+    checkLogical <- function(a, name=""){
+      if(!("logical" %in% class(a))){
+        cat(paste("Warning: argument ",name," is a logical and should be set to TRUE for FALSE.", sep=""))
+        return(FALSE)
+      }else{
+        return(TRUE)
+      }
+
+    }
+
+    ## Check arguments:
+
+    ## Zelig generally accepts formula names of variables present in dataset, 
+    ##   but survey package looks for formula expressions or data frames, 
+    ##   so make conversion of any character arguments.
+    ids<-recastString2Formula(ids)
+    probs<-recastString2Formula(probs)
+    weights<-recastString2Formula(weights)
+    strata<-recastString2Formula(strata)
+    fpc<-recastString2Formula(fpc)   
+    checkforerror <- checkLogical(nest, "nest")
+    checkforerror <- checkLogical(check.strata, "check.strata")
+    repweights<-recastString2Formula(repweights)
+    # type should be a string 
+    checkforerror <- checkLogical(combined.weights, "combined.weights")
+    # rho is shrinkage factor
+    # scale is scaling constant
+    # rscales is scaling constant
+
+    if(is.null(repweights)){
+      design <- survey::svydesign(data=data, ids=~1, probs=probs, strata=strata, fpc=fpc, nest=nest, check.strata=check.strata, weights=weights)
+    }else{
+      design <- survey::svrepdesign(data=data, repweights=repweights, type=type, weights=weights, combined.weights=combined.weights, rho=rho, 
+                                    bootstrap.average=bootstrap.average, scale=scale, rscales=rscales, fpctype=fpctype, fpc=fpc)
+    }
+    .self$model.call <- as.call(list(.self$fn, formula=.self$zelig.call$formula,  design=design))  # fn will be set again by super, but initialized here for clarity
+    .self$model.call$family <- call(.self$family, .self$link)
+    callSuper(formula = formula, data = data, ..., by = by, bootstrap = bootstrap)
+  }
+)
+
diff --git a/R/model-timeseries.R b/R/model-timeseries.R
new file mode 100755
index 0000000..dbe6fa7
--- /dev/null
+++ b/R/model-timeseries.R
@@ -0,0 +1,152 @@
+#' Time-series models in Zelig
+#'
+#' @import methods
+#' @export Zelig-timeseries
+#' @exportClass Zelig-timeseries
+#'
+#' @include model-zelig.R
+ztimeseries <- setRefClass("Zelig-timeseries",
+                    contains = "Zelig",
+                    fields = list(link = "character",
+                                  linkinv = "function"))
+
+
+ztimeseries$methods(
+  initialize = function() {
+    callSuper()
+    .self$packageauthors <- "R Core Team"
+    .self$modelauthors <- "James Honaker"
+    .self$acceptweights <- FALSE  #  Need to deal with block bootstrap
+    .self$category <- "timeseries"
+    .self$setx.labels <- list(ev  = "Expected Values: E(Y|X)",
+                              ev1 = "Expected Values: E(Y|X1)",
+                              pv  = "Predicted Values: Y|X",
+                              pv1 = "Predicted Values: Y|X1",
+                              fd  = "First Differences: E(Y|X1) - E(Y|X)",
+                              acf = "Autocorrelation Function",
+                              ev.shortrun = "Expected Values Immediately Resulting from Shock",
+                              ev.longrun = "Long Run Expected Values after Innovation",
+                              pv.shortrun = "Predicted Values Immediately Resulting from Shock",
+                              pv.longrun = "Long Run Predicted Values after Innovation",
+                              evseries.shock = "Expected Values Over Time from Shock",
+                              evseries.innovation ="Expected Values Over Time from Innovation",
+                              pvseries.shock = "Predicted Values Over Time from Shock",
+                              pvseries.innovation ="Predicted Values Over Time from Innovation")
+  }
+)
+
+ztimeseries$methods(
+  zelig = function(formula, data, order=c(1,0,0), ts=NULL, cs=NULL, ..., weights=NULL, by=NULL, bootstrap = FALSE){
+    if(!identical(bootstrap,FALSE)){
+      stop("Error: The bootstrap is not implemented for time-series models")
+    }
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    if(identical(.self$name,"ar")){
+      order<-c(1,0,0)
+      .self$zelig.call$order <- order
+    } else if(identical(.self$name,"ma")){
+      order<-c(0,0,1)
+      .self$zelig.call$order <- order
+    }
+    .self$model.call <- .self$zelig.call
+
+    ## Sort dataset by time and cross-section
+    ## Should add checks that ts, cs, are valid, and consider how to interact with by.
+    ## This follows handling from Amelia::prep.r, which also has code to deal with lags, should we add those.
+    if(!identical(ts,NULL)){
+      .self$model.call$ts <- NULL
+      if (!identical(cs,NULL)) {
+        .self$model.call$cs <- NULL
+        tsarg<-list(data[,cs],data[,ts])
+        by <- cs  # Use by architecture to deal with cross-sections in time-series models that do not support such.  Currently overrides.
+      } else {
+        tsarg<-list(data[,ts])
+      }
+
+      tssort<-do.call("order",tsarg)
+      data<-data[tssort,]
+    }
+
+    ## ts and cs are used to reorganize dataset, and do not get further passed on to Super
+    callSuper(formula = formula, data = data, order=order, ..., weights = weights, by = by, bootstrap = FALSE)
+  }
+)
+
+# replace packagename method as stats::arima() has a second layer of wrapping in zeligArimaWrapper().
+
+ztimeseries$methods(
+  packagename = function() {
+    "Automatically retrieve wrapped package name"
+    return("stats")
+  }
+)
+
+
+# replace simx method to add ACF as QI.
+
+ztimeseries$methods(
+  simx = function() {
+    d <- zeligPlyrMutate(.self$zelig.out, simparam = .self$simparam$simparam)
+    d <- zeligPlyrMutate(d, mm = .self$setx.out$x$mm)
+    .self$sim.out$x <-  d %>%
+      do(qi = .self$qi(.$simparam, .$mm)) %>%
+      do(acf = .$qi$acf, ev = .$qi$ev, pv = .$qi$pv)
+  }
+)
+
+ztimeseries$methods(
+  simx1 = function() {
+    d <- zeligPlyrMutate(.self$zelig.out, simparam = .self$simparam$simparam)
+    d <- zeligPlyrMutate(d, mm = .self$setx.out$x$mm)
+    d <- zeligPlyrMutate(d, mm1 = .self$setx.out$x1$mm)
+
+#      return(list(acf = acf, ev = ev, pv = pv, pv.shortrun=pv.shortrun, pv.longrun=pv.longrun, ev.shortrun=ev.shortrun, ev.longrun=ev.longrun, 
+#                pvseries.shock=yseries$y.shock, pvseries.innovation=yseries$y.innovation,
+#                evseries.shock=yseries$ev.shock, evseries.innovation=yseries$ev.innovation))
+
+    .self$sim.out$x1 <-  d %>%
+      do(qi = .self$qi(.$simparam, .$mm, .$mm1)) %>%
+      do(acf = .$qi$acf, ev = .$qi$ev, pv = .$qi$pv, ev.shortrun = .$qi$ev.shortrun, pv.shortrun = .$qi$pv.shortrun, ev.longrun = .$qi$ev.longrun, pv.longrun = .$qi$pv.longrun, pvseries.shock = .$qi$pvseries.shock, evseries.shock = .$qi$evseries.shock, pvseries.innovation = .$qi$pvseries.innovation,  evseries.innovation = .$qi$evseries.innovation)
+      # Will eventually have to then move acf, ev, and pv from .self$setx.out$x1 to .self$setx.out$x
+      # This will also effect next line:
+
+    d <- zeligPlyrMutate(.self$sim.out$x1, ev0 = .self$sim.out$x1$ev)    # Eventually, when ev moves, then this path for ev0 changes.  (Or make movement happen after fd calculation.)
+    d <- d %>%
+      do(fd = .$ev.longrun - .$ev0)
+    .self$sim.out$x1 <- zeligPlyrMutate(.self$sim.out$x1, fd = d$fd) #JH
+  }
+)
+
+# replace sim method to skip {simx, simx1, simrange, simrange1} methods as they are not separable
+# instead go directly to qi method
+
+ztimeseries$methods(
+  sim = function(num = 1000) {
+    "Timeseries Method for Computing and Organizing Simulated Quantities of Interest"
+    if (length(.self$num) == 0) 
+      .self$num <- num
+    .self$simparam <- .self$zelig.out %>%
+      do(simparam = .self$param(.$z.out))
+
+    # NOTE difference here from standard Zelig approach.  
+    # Normally these are done in sequence, but now we do one or the other.  
+    if (.self$bsetx1){
+      .self$simx1()
+    }else{
+      .self$simx()
+    }
+  }
+)
+
+# There is no fitting summary function for objects of class Arima.  
+# So this passes the object through to print, and z$summary() is essentially print(summary(x)).
+
+#' Summary of an object of class Arima
+#' @method summary Arima
+#' @param object An object of class Arima 
+#' @param ... Additional parameters
+#' @return The original object 
+#' @export
+
+
+summary.Arima = function(object, ...) object
diff --git a/R/model-tobit-bayes.R b/R/model-tobit-bayes.R
new file mode 100644
index 0000000..037cab9
--- /dev/null
+++ b/R/model-tobit-bayes.R
@@ -0,0 +1,56 @@
+#' Bayesian Tobit Regression
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-tobitbayes.html}
+#' @import methods
+#' @export Zelig-tobit-bayes
+#' @exportClass Zelig-tobit-bayes
+#'
+#' @include model-zelig.R
+#' @include model-bayes.R
+#' @include model-tobit.R
+
+ztobitbayes <- setRefClass("Zelig-tobit-bayes",
+                           contains = c("Zelig-bayes",
+                                        "Zelig-tobit"))
+
+ztobitbayes$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "tobit-bayes"
+    .self$year <- 2013
+    .self$category <- "dichotomous"
+    .self$authors <- "Ben Goodrich, Ying Lu"
+    .self$description = "Bayesian Tobit Regression for a Censored Dependent Variable"
+    .self$fn <- quote(MCMCpack::MCMCtobit)
+    # JSON from parent
+    .self$wrapper <- "tobit.bayes"
+  }
+)
+
+ztobitbayes$methods(
+  param = function(z.out) {
+    if (length(.self$below) == 0)
+      .self$below <- 0
+    if (length(.self$above) == 0)
+      .self$above <- Inf
+    simparam.local <- list()
+    simparam.local$simparam <- z.out[, 1:(ncol(z.out) - 1)]
+    simparam.local$simalpha <- sqrt(z.out[, ncol(z.out)])
+    return(simparam.local)
+  }
+)
+
+ztobitbayes$methods(
+  mcfun = function(x, b0=0, b1=1, alpha=1, sim=TRUE){
+    mu <- b0 + b1 * x
+    ystar <- rnorm(n=length(x), mean=mu, sd=alpha)
+    if(sim){
+        y <- (ystar>0) * ystar  # censoring from below at zero
+        return(y)
+    }else{
+        y.uncensored.hat.tobit<- mu + dnorm(mu, mean=0, sd=alpha)/pnorm(mu, mean=0, sd=alpha)
+        y.hat.tobit<- y.uncensored.hat.tobit * (1- pnorm(0, mean=mu, sd=alpha) )  # expected value of censored outcome
+        return(y.hat.tobit)
+    }
+  }
+)
\ No newline at end of file
diff --git a/R/model-tobit.R b/R/model-tobit.R
new file mode 100755
index 0000000..9d72775
--- /dev/null
+++ b/R/model-tobit.R
@@ -0,0 +1,108 @@
+#' Linear Regression for a Left-Censored Dependent Variable
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-tobit.html}
+#' @import methods
+#' @export Zelig-tobit
+#' @exportClass Zelig-tobit
+#'
+#' @include model-zelig.R
+
+ztobit <- setRefClass("Zelig-tobit",
+                      contains = "Zelig",
+                      fields = list(above = "numeric",
+                                    below = "numeric"))
+
+ztobit$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "tobit"
+    .self$authors <- "Kosuke Imai, Gary King, Olivia Lau"
+    .self$packageauthors <- "Christian Kleiber, and Achim Zeileis"
+    .self$year <- 2011
+    .self$description = "Linear regression for Left-Censored Dependent Variable"
+    .self$fn <- quote(AER::tobit)
+    # JSON
+    .self$outcome <- "continous"
+    .self$wrapper <- "tobit"
+    .self$acceptweights <- TRUE
+  }
+)
+
+ztobit$methods(
+  zelig = function(formula, ..., below = 0, above = Inf,
+                   robust = FALSE, data, weights = NULL, by = NULL, bootstrap = FALSE) {
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    .self$model.call <- .self$zelig.call
+    .self$below <- below
+    .self$above <- above
+    .self$model.call$below <- NULL
+    .self$model.call$above <- NULL
+    .self$model.call$left <- below
+    .self$model.call$right <- above
+    callSuper(formula = formula, data = data, ..., weights = weights, by = by, bootstrap = bootstrap)
+
+    if(!robust){
+        fn2 <- function(fc, data) {
+            fc$data <- data
+            return(fc)
+        }
+        robust.model.call <- .self$model.call
+        robust.model.call$robust <- TRUE
+        
+        robust.zelig.out <- .self$data %>%
+        group_by_(.self$by) %>%
+        do(z.out = eval(fn2(robust.model.call, quote(as.data.frame(.))))$var )
+        
+        .self$test.statistics<- list(robust.se = robust.zelig.out$z.out)
+    }
+  }
+)
+
+
+ztobit$methods(
+  param = function(z.out, method="mvn") {
+    if(identical(method,"mvn")){
+      mu <- c(coef(z.out), log(z.out$scale))
+      simfull <- mvrnorm(n = .self$num, mu = mu, Sigma = vcov(z.out))
+      simparam.local <- as.matrix(simfull[, -ncol(simfull)])
+      simalpha <- exp(as.matrix(simfull[, ncol(simfull)]))
+      simparam.local <- list(simparam = simparam.local, simalpha = simalpha)
+      return(simparam.local)
+    } else if(identical(method,"point")){
+      return(list(simparam = t(as.matrix(coef(z.out))), simalpha = log(z.out$scale) ))
+    }
+  }
+)
+
+ztobit$methods(
+  qi = function(simparam, mm) {
+    Coeff <- simparam$simparam %*% t(mm)
+    SD <- simparam$simalpha
+    alpha <- simparam$simalpha
+    lambda <- dnorm(Coeff / SD) / (pnorm(Coeff / SD))
+    ev <- pnorm(Coeff / SD) * (Coeff + SD * lambda)
+    pv <- ev
+    pv <- matrix(nrow = nrow(ev), ncol = ncol(ev))
+    for (j in 1:ncol(ev)) {
+      pv[, j] <- rnorm(nrow(ev), mean = ev[, j], sd = SD)
+      pv[, j] <- pmin(pmax(pv[, j], .self$below), .self$above)
+    }
+    return(list(ev = ev, pv = pv))
+  }
+)
+
+ztobit$methods(
+  mcfun = function(x, b0=0, b1=1, alpha=1, sim=TRUE){
+    mu <- b0 + b1 * x
+    ystar <- rnorm(n=length(x), mean=mu, sd=alpha)
+    if(sim){
+        y <- (ystar>0) * ystar  # censoring from below at zero
+        return(y)
+    }else{
+        y.uncensored.hat.tobit<- mu + dnorm(mu, mean=0, sd=alpha)/pnorm(mu, mean=0, sd=alpha)
+        y.hat.tobit<- y.uncensored.hat.tobit * (1- pnorm(0, mean=mu, sd=alpha) )  # expected value of censored outcome
+        return(y.hat.tobit)
+    }
+  }
+)
+
diff --git a/R/model-weibull.R b/R/model-weibull.R
new file mode 100644
index 0000000..c914352
--- /dev/null
+++ b/R/model-weibull.R
@@ -0,0 +1,110 @@
+#' Weibull Regression for Duration Dependent Variables
+#'
+#' Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-weibull.html}
+#' @import methods
+#' @export Zelig-tobit-bayes
+#' @exportClass Zelig-tobit-bayes
+#'
+#' @include model-zelig.R
+
+zweibull <- setRefClass("Zelig-weibull",
+                        contains = "Zelig",
+                        fields = list(simalpha = "list",
+                                      linkinv = "function",
+                                      lambda = "ANY"))
+  
+zweibull$methods(
+  initialize = function() {
+    callSuper()
+    .self$name <- "weibull"
+    .self$authors <- "Olivia Lau, Kosuke Imai, Gary King"
+    .self$packageauthors <- "Terry M Therneau, and Thomas Lumley"
+    .self$year <- 2007
+    .self$description <- "Weibull Regression for Duration Dependent Variables"
+    .self$fn <- quote(survival::survreg)
+    .self$linkinv <- survreg.distributions[["weibull"]]$itrans
+    # JSON
+    .self$outcome <- "bounded"
+    .self$wrapper <- "weibull"
+    .self$acceptweights <- TRUE
+  }
+)
+
+zweibull$methods(
+  zelig = function(formula, ..., robust = FALSE, cluster = NULL, data, weights = NULL, by = NULL, bootstrap = FALSE) {
+    .self$zelig.call <- match.call(expand.dots = TRUE)
+    .self$model.call <- .self$zelig.call
+    if (!(is.null(cluster) || robust))
+      stop("If cluster is specified, then `robust` must be TRUE")
+    # Add cluster term
+    if (robust || !is.null(cluster))
+      formula <- cluster.formula(formula, cluster)
+    .self$model.call$dist <- "weibull"
+    .self$model.call$model <- FALSE
+    callSuper(formula = formula, data = data, ..., robust = robust,
+              cluster = cluster,  weights = weights, by = by, bootstrap = bootstrap)
+
+    if(!robust){
+      fn2 <- function(fc, data) {
+        fc$data <- data
+        return(fc)
+      }
+      robust.model.call <- .self$model.call
+      robust.model.call$robust <- TRUE
+    
+      robust.zelig.out <- .self$data %>%
+      group_by_(.self$by) %>%
+      do(z.out = eval(fn2(robust.model.call, quote(as.data.frame(.))))$var )
+    
+      .self$test.statistics<- list(robust.se = robust.zelig.out$z.out)
+    }
+  }
+)
+
+zweibull$methods(
+  param = function(z.out, method="mvn") {
+    if(identical(method,"mvn")){
+      coeff <- coef(z.out)
+      mu <- c(coeff, z.out$scale)
+      cov <- vcov(z.out)
+      simulations <- mvrnorm(.self$num, mu = mu, Sigma = cov)
+      simparam.local <- as.matrix(simulations[, 1:length(coeff)])
+      simalpha.local <- as.matrix(simulations[, -(1:length(coeff))])
+      simparam.local <- list(simparam = simparam.local, simalpha = simalpha.local)
+      return(simparam.local)
+    } else if(identical(method,"point")){
+      return(list(simparam = t(as.matrix(coef(z.out))), simalpha = z.out$scale))
+    }
+  }
+)
+
+zweibull$methods(
+  qi = function(simparam, mm) {
+    eta <- simparam$simparam %*% t(mm)
+    theta <- as.matrix(apply(eta, 2, linkinv))
+    ev <- theta * gamma(1 + 1/exp(simparam$simalpha))
+    pv <- as.matrix(rweibull(length(ev), shape = exp(simparam$simalpha), scale = theta))
+    return(list(ev = ev, pv = pv))
+  }
+)
+
+zweibull$methods(
+  mcfun = function(x, b0=0, b1=1, alpha=1, sim=TRUE){
+    .self$mcformula <- as.formula("Surv(y.sim, event) ~ x.sim")
+    
+    
+    lambda <-exp(b0 + b1 * x)
+    event <- rep(1, length(x))
+    y.sim <- rweibull(n=length(x), shape=alpha, scale=lambda)
+    y.hat <- lambda * gamma(1 + (1/alpha))
+    
+    if(sim){
+        data <- data.frame(y.sim=y.sim, event=event, x.sim=x)
+        return(data)
+    }else{
+        data <- data.frame(y.hat=y.hat, event=event, x.seq=x)
+        return(data)
+    }
+  }
+)
+
diff --git a/R/model-zelig.R b/R/model-zelig.R
new file mode 100755
index 0000000..7be9b85
--- /dev/null
+++ b/R/model-zelig.R
@@ -0,0 +1,1274 @@
+#' Zelig reference class
+#'
+#' Zelig website: \url{http://zeligproject.org/}
+#'
+#' @import methods
+#' @export Zelig
+#' @exportClass Zelig
+#' 
+#' @field fn R function to call to wrap
+#' @field formula Zelig formula
+#' @field weights forthcoming
+#' @field name name of the Zelig model
+#' @field data data frame or matrix
+#' @field by split the data by factors
+#' @field mi work with imputed dataset
+#' @field idx model index
+#' @field zelig.call Zelig function call
+#' @field model.call wrapped function call
+#' @field zelig.out estimated zelig model(s)
+#' @field setx.out set values
+#' @field setx.labels pretty-print qi
+#' @field bsetx is x set?
+#' @field bsetx1 is x1 set?
+#' @field bsetrange is range set?
+#' @field bsetrange1 is range1 set?
+#' @field range range
+#' @field range1 range1
+#' @field test.statistics list of test statistics
+#' @field sim.out simulated qi's
+#' @field simparam simulated parameters
+#' @field num  number of simulations
+#' @field authors Zelig model authors
+#' @field zeligauthors Zelig authors
+#' @field modelauthors wrapped model authors
+#' @field packageauthors wrapped package authors
+#' @field refs citation information
+#' @field year model is released
+#' @field description model description
+#' @field url model URL
+#' @field url.docs model documentation URL
+#' @field category model category
+#' @field vignette.url vignette URL
+#' @field json JSON export
+#' @field ljson JSON export
+#' @field outcome JSON export
+#' @field wrapper JSON export
+#' @field explanatory JSON export
+#' @field mcunit.test unit testing
+#' @field with.feedback Feedback
+
+z <- setRefClass("Zelig", fields = list(fn = "ANY", # R function to call to wrap
+                                        formula = "ANY", # Zelig formula
+                                        weights = "ANY", 
+                                        acceptweights = "logical",
+                                        name = "character", # name of the Zelig model
+                                        data = "ANY", # data frame or matrix,
+                                        originaldata = "ANY", # data frame or matrix,
+                                        originalweights = "ANY",
+                                        # ddata = "ANY",
+                                        # data.by = "ANY", # data frame or matrix
+                                        by = "ANY",
+                                        mi = "logical",
+                                        matched = "logical",
+                                        
+                                        avg = "ANY",
+                                        
+                                        idx = "ANY", # model index
+                                        
+                                        zelig.call = "call", # Zelig function call
+                                        model.call = "call", # wrapped function call
+                                        zelig.out = "ANY", # estimated zelig model(s)
+                                        signif.stars = "logical",
+                                        signif.stars.default = "logical", # significance stars default
+                                        
+                                        setx.out = "ANY", # set values
+                                        setx.labels = "list", # pretty-print qi,
+                                        bsetx = "logical",
+                                        bsetx1 = "logical",
+                                        bsetrange = "logical",
+                                        bsetrange1 = "logical",
+                                        range = "ANY",
+                                        range1 = "ANY",
+                                        setforeveryby = "logical",
+
+                                        test.statistics = "ANY",
+                                        
+                                        sim.out = "list", # simulated qi's
+                                        simparam = "ANY", # simulated parameters
+                                        num = "numeric", # nb of simulations
+                                        bootstrap = "logical", # use bootstrap
+                                        bootstrap.num = "numeric", # number of bootstraps to use
+
+                                        authors = "character", # Zelig model description
+                                        zeligauthors = "character",
+                                        modelauthors = "character",
+                                        packageauthors = "character",
+                                        refs = "ANY", # is there a way to recognize class "bibentry"?,
+
+                                        year = "numeric",
+                                        description = "character",
+                                        url = "character",
+                                        url.docs = "character",
+                                        category = "character",
+                                        
+                                        vignette.url = "character",
+                                        
+                                        json = "ANY", # JSON export
+                                        ljson = "ANY",
+                                        outcome = "ANY",
+                                        wrapper = "character",
+                                        explanatory = "ANY",
+                                        
+                                        #Unit Testing
+                                        mcunit.test = "ANY",
+                                        mcformula = "ANY",
+                                        
+                                        # Feedback
+                                        with.feedback = "logical"))
+
+z$methods(
+  initialize = function() {
+    .self$authors <- "Kosuke Imai, Gary King, and Olivia Lau"
+    .self$zeligauthors <- "Christine Choirat, James Honaker, Kosuke Imai, Gary King, and Olivia Lau"
+    .self$refs <- bibentry()
+    .self$year <- as.numeric(format(Sys.Date(), "%Y"))
+    .self$url <- "http://zeligproject.org/"
+    .self$url.docs <- "http://docs.zeligproject.org/en/latest/"
+    .self$setx.out <- list()
+    .self$setx.labels <- list(ev  = "Expected Values: E(Y|X)",
+                              ev1 = "Expected Values: E(Y|X1)",
+                              pv  = "Predicted Values: Y|X",
+                              pv1 = "Predicted Values: Y|X1",
+                              fd  = "First Differences: E(Y|X1) - E(Y|X)")
+    .self$bsetx <- FALSE
+    .self$bsetx1 <- FALSE
+    .self$bsetrange <- FALSE
+    .self$bsetrange1 <- FALSE
+    .self$acceptweights <- FALSE
+
+    .self$bootstrap <- FALSE
+    .self$bootstrap.num <- 100
+    # JSON
+    .self$vignette.url <- paste(.self$url.docs, tolower(class(.self)[1]), ".html", sep = "")
+    .self$vignette.url <- sub("-gee", "gee", .self$vignette.url)
+    .self$vignette.url <- sub("-bayes", "bayes", .self$vignette.url)
+    # .self$vignette.url <- paste(.self$url.docs, "zelig-", sub("-", "", .self$name), ".html", sep = "")
+    .self$category <- "undefined"
+    .self$explanatory <- c("continuous",
+                           "discrete",
+                           "nominal",
+                           "ordinal",
+                           "binary")
+    .self$outcome <- ""
+    .self$wrapper <- "wrapper"
+    # Is 'ZeligFeedback' package installed?
+    .self$with.feedback <- "ZeligFeedback" %in% installed.packages()
+    .self$setforeveryby <- TRUE
+    
+    .self$avg <- function(val) {
+      if (is.numeric(val))
+        mean(val)
+      else if (is.ordered(val))
+        Median(val)
+      else
+        Mode(val)
+    }
+  }
+)
+
+z$methods(
+  packagename = function() {
+    "Automatically retrieve wrapped package name"
+    # If this becomes "quote(mypackage::myfunction) then
+    # regmatches(.self$fn,regexpr("(?<=\\()(.*?)(?=\\::)",.self$fn, perl=TRUE))
+    # would extract "mypackage"
+    return(as.character(.self$fn)[2])
+  }
+)
+
+z$methods(
+  cite = function() {
+    "Provide citation information about Zelig and Zelig model, and about wrapped package and wrapped model"
+    title <- paste(.self$name, ": ", .self$description, sep="")
+    localauthors <- ""
+    if (length(.self$modelauthors) & (!identical(.self$modelauthors,""))){   # covers both empty styles: character(0) and "" --the latter being length 1.
+        localauthors<-.self$modelauthors
+    }else if (length(.self$packageauthors) & (!identical(.self$packageauthors,""))){
+        localauthors<-.self$packageauthors
+    }else{
+        localauthors<-.self$zeligauthors
+    }
+    cat("How to cite this model in Zelig:\n  ",
+    localauthors, ". ", .self$year, ".\n  ", title,
+    "\n  in ", .self$zeligauthors,
+    ",\n  \"Zelig: Everyone's Statistical Software,\" ",
+    .self$url, "\n", sep = "")
+  }
+)
+
+# Construct a reference list specific to a Zelig model
+# Styles available from the bibentry print method: "text", "Bibtex", "citation", "html", "latex", "R", "textVersion"
+# The "sphinx" style reformats "text" style with some markdown substitutions
+
+z$methods(
+  references = function(style="sphinx") {
+    "Construct a reference list specific to a Zelig model."
+    mystyle <- style
+    if (mystyle=="sphinx"){
+        mystyle <- "text"
+    }
+    mycites<-.self$refs
+    if(!is.na(.self$packagename() )){
+      mycites<-c(mycites, citation(.self$packagename()))  # Concatentate model specific Zelig references with package references
+    }
+    mycites<-mycites[!duplicated(mycites)]                            # Remove duplicates (many packages have duplicate references in their lists)
+    s<-capture.output(print(mycites, style = mystyle))
+    if(style == "sphinx"){                          # format the "text" style conventions for sphinx markdown for building docs for zeligproject.org
+      s<-gsub("\\*","\\*\\*",s, perl=TRUE)
+      s<-gsub("_","\\*",s, perl=TRUE)
+      s<-gsub("\\*\\(","\\* \\(",s, perl=TRUE)
+    }
+    cat(s, sep="\n")
+  }
+)
+
+z$methods(
+  zelig = function(formula, data, model=NULL, ..., weights=NULL, by, bootstrap=FALSE) {
+    "The zelig command estimates a variety of statistical models"
+    fn2 <- function(fc, data) {
+      fc$data <- data
+      return(fc)
+    }
+
+    .self$formula <- formula
+    # Overwrite formula with mc unit test formula into correct environment, if it exists
+    # Requires fixing R scoping issue
+    if("formula" %in% class(.self$mcformula)){
+      .self$formula <- as.formula( deparse(.self$mcformula), env=environment(.self$formula) )
+      .self$model.call$formula <- as.formula( deparse(.self$mcformula), env=globalenv() )
+    }else if(is.character(.self$mcformula)) {
+      .self$formula <- as.formula( .self$mcformula, env=environment(.self$formula) )
+      .self$model.call$formula <- as.formula( .self$mcformula, env=globalenv() )
+    }
+    if(!is.null(model)){
+      cat("Argument model is only valid for the Zelig wrapper, but not the Zelig method, and will be ignored.\n")
+      flag <- !(names(.self$model.call)=="model")
+      .self$model.call <- .self$model.call[flag]
+      flag <- !(names(.self$zelig.call)=="model")
+      .self$zelig.call <- .self$zelig.call[flag]
+    }
+
+
+    .self$by <- by
+    .self$originaldata <- data
+    .self$originalweights <- weights
+    datareformed <- FALSE
+
+    if(is.numeric(bootstrap)){
+      .self$bootstrap <- TRUE
+      .self$bootstrap.num <- bootstrap
+    }else if(is.logical(bootstrap)){
+      .self$bootstrap <- bootstrap
+    }
+    # Remove bootstrap argument from model call
+    .self$model.call$bootstrap <- NULL
+    # Check if bootstrap possible by checking whether param method has method argument available
+    if(.self$bootstrap){
+      if(!("method" %in% names(formals(.self$param)))){
+        stop("The bootstrap does not appear to be implemented for this Zelig model.  Check that the param() method allows point predictions.")
+      }
+      .self$setforeveryby <- FALSE  # compute covariates in set() at the dataset-level
+    }
+
+
+    # Matched datasets from MatchIt
+    if ("matchit" %in% class(data)){
+      idata <- MatchIt::match.data(data) 
+      iweights <- idata$weights
+
+      .self$matched <- TRUE
+      .self$data <- idata
+      datareformed <- TRUE
+
+      # Check if noninteger valued weights exist and are incompatible with zelig model
+      validweights <- TRUE
+      if(!.self$acceptweights){           # This is a convoluted way to do this, but avoids the costly "any()" calculation if not necessary
+      	if(any(iweights != ceiling(iweights))){  # any(y != ceiling(y)) tests slightly faster than all(y == ceiling(y))
+      		validweights <- FALSE
+      	}
+      }
+      if(!validweights){   # could also be  if((!acceptweights) & (any(iweights != ceiling(iweights))  but avoid the long any for big datasets
+      	cat("The weights created by matching for this dataset have noninteger values,\n",
+             "however, the statistical model you have chosen is only compatible with integer weights.\n",
+             "Either change the matching method (such as to `optimal' matching with a 1:1 ratio)\n",
+             "or change the statistical model in Zelig.\n",
+             "We will round matching weights up to integers to proceed.\n\n")
+      	.self$weights <- ceiling(iweights)
+      }else{
+        .self$weights <- iweights
+      }
+
+      # Set references appropriate to matching methods used 
+      .self$refs <- c(.self$refs, citation("MatchIt"))
+      if(m.out$call$method=="cem" & ("cem" %in% installed.packages())) .self$refs <- c(.self$refs, citation("cem"))
+      #if(m.out$call$method=="exact") .self$refs <- c(.self$refs, citation(""))
+      if((m.out$call$method=="full") & ("optmatch" %in% installed.packages())) .self$refs <- c(.self$refs, citation("optmatch"))
+      if(m.out$call$method=="genetic" & ("Matching" %in% installed.packages())) .self$refs <- c(.self$refs, citation("Matching"))
+      #if(m.out$call$method=="nearest") .self$refs <- c(.self$refs, citation(""))
+      if(m.out$call$method=="optimal" & ("optmatch" %in% installed.packages())) .self$refs <- c(.self$refs, citation("optmatch"))
+      #if(m.out$call$method=="subclass") .self$refs <- c(.self$refs, citation(""))
+    } else {
+      .self$matched  <- FALSE
+    }
+    # Multiply Imputed datasets from Amelia  
+    # Notice Amelia objects ignore weights currently, which is reasonable as the Amelia package ignores weights
+    if ("amelia" %in% class(data)){
+      idata <- data$imputations
+      .self$data <- rbind_all(lapply(seq(length(idata)),
+                                     function(imputationNumber)
+                                       cbind(imputationNumber, idata[[imputationNumber]])))
+      .self$weights <- NULL  # This should be considered or addressed
+      datareformed <- TRUE
+      .self$by <- c("imputationNumber", by)
+      .self$mi <- TRUE
+      .self$setforeveryby <- FALSE  # compute covariates in set() at on the entire stacked dataset
+      .self$refs<-c(.self$refs, citation("Amelia"))
+    } else {
+      .self$mi <- FALSE
+    }
+
+    if (!datareformed){
+      .self$data <- data  # If none of the above package integrations have already reformed the data from another object, use the supplied data
+
+      # Run some checking on weights argument, and see if is valid string or vector
+      if(!is.null(weights)){
+      	if(is.character(weights)){
+      		if(weights %in% names(.self$data)){
+      			.self$weights <- .self$data[[weights]]  # This is a way to convert data.frame portion to type numeric (as data.frames are lists)
+      		}else{
+      			cat("Variable name given for weights not found in dataset, so will be ignored.\n\n")
+      			.self$weights <- NULL  # No valid weights
+            .self$model.call$weights <- NULL
+      		}
+      	}else if(is.vector(weights)){
+      		if(length(weights)==nrow(.self$data) & is.vector(weights)){
+      			if(min(weights)<0){
+      				weights[weights < 0] <- 0
+      				cat("Negative valued weights were supplied and will be replaced with zeros.")
+      			}
+      			.self$weights <- weights # Weights 
+      		}else{
+      			cat("Length of vector given for weights is not equal to number of observations in dataset, and will be ignored.\n\n")
+      			.self$weights <- NULL # No valid weights
+            .self$model.call$weights <- NULL
+      		}
+      	}else{
+      		cat("Supplied weights argument is not a vector or a variable name in the dataset, and will be ignored.\n\n")
+      		.self$weights <- NULL # No valid weights
+          .self$model.call$weights <- NULL
+      	}
+      }else{
+        .self$weights <- NULL  # No weights set, so weights are NULL
+        .self$model.call$weights <- NULL
+      }
+    } 
+
+    # If the Zelig model does not not accept weights, but weights are provided, we rebuild the data 
+    #   by bootstrapping using the weights as probabilities
+    #   or by duplicating rows proportional to the ceiling of their weight
+    # Otherwise we pass the weights to the model call  
+    if(!is.null(.self$weights)){
+      if ((!.self$acceptweights)){
+        .self$buildDataByWeights2()   # Could use alternative method $buildDataByWeights() for duplication approach.  Maybe set as argument?\
+        .self$model.call$weights <- NULL
+	  } else {
+		.self$model.call$weights <- .self$weights   # NEED TO CHECK THIS IS THE NAME FOR ALL MODELS, or add more generic field containing the name for the weights argument
+	  }
+	}
+
+    if(.self$bootstrap){
+      .self$buildDataByBootstrap()
+    }
+
+    .self$model.call[[1]] <- .self$fn
+    .self$model.call$by <- NULL
+    if (is.null(.self$by)) {
+      .self$data <- cbind(1, .self$data)
+      names(.self$data)[1] <- "by"
+      .self$by <- "by"
+    }
+
+    #cat("zelig.call:\n")
+    #print(.self$zelig.call)
+    #cat("model.call:\n")
+    #print(.self$model.call)
+    .self$data <- tbl_df(.self$data)
+    #.self$zelig.out <- eval(fn2(.self$model.call, quote(as.data.frame(.)))) # shortened test version that bypasses "by"
+    .self$zelig.out <- .self$data %>% 
+      group_by_(.self$by) %>% 
+        do(z.out = eval(fn2(.self$model.call, quote(as.data.frame(.)))))
+  }
+)
+
+z$methods(
+  set = function(..., fn = list(numeric = mean, ordered = Median, other = Mode)) {
+    "Setting Explanatory Variable Values"
+    .self$avg <- function(val) {
+      if (is.numeric(val))
+        ifelse(is.null(fn$numeric), mean(val), fn$numeric(val))
+      else if (is.ordered(val))
+        ifelse(is.null(fn$ordered), Median(val), fn$ordered(val))
+      else
+        ifelse(is.null(fn$other), Mode(val), fn$other(val))
+    }
+    s <-list(...)
+    # This eliminates warning messages when factor rhs passed to lm() model in reduce() utility function
+    if(.self$category=="multinomial"){  # Perhaps find more robust way to test if dep.var. is factor
+      f2 <- update(.self$formula, as.numeric(.) ~ .)
+    }else{
+      f2 <- .self$formula
+    }
+      f <- update(.self$formula, 1 ~ .)      
+    # update <- na.omit(.self$data) %>% # remove missing values
+
+    # compute on each slice of the dataset defined by "by"
+    if(.self$setforeveryby){  
+      update <- .self$data %>%
+        group_by_(.self$by) %>%
+        do(mm = model.matrix(f, reduce(dataset = "MEANINGLESS ARGUMENT", s, 
+                                       formula = f2, 
+                                       data = ., avg = .self$avg))) # fix in last argument from data=.self$data to data=.  (JH)
+      
+    # compute over the entire dataset  - currently used for mi and bootstrap.  Should be opened up to user.    
+    } else {  
+      if(.self$bootstrap){
+        flag <- .self$data$bootstrapIndex == (.self$bootstrap.num + 1) # These are the original observations
+        tempdata <- .self$data[flag,]  
+      }else{
+        tempdata <- .self$data # presently this is for mi.  And this is then the entire stacked dataset.
+      }
+
+      allreduce <- reduce(dataset = "MEANINGLESS ARGUMENT", s, 
+                          formula = f2, 
+                          data = tempdata,
+                          avg = .self$avg)
+      allmm <- model.matrix(f, allreduce) 
+      update <- .self$data %>%
+        group_by_(.self$by) %>%
+        do(mm = allmm)  
+    }
+    return(update)
+  }
+)
+
+z$methods(
+  setx = function(..., fn = list(numeric = mean, ordered = Median, other = Mode)) {
+    .self$bsetx <- TRUE
+    .self$setx.out$x  <- .self$set(..., fn = fn)
+  }
+)
+
+z$methods(
+  setx1 = function(..., fn = list(numeric = mean, ordered = Median, other = Mode)) {
+    .self$bsetx1 <- TRUE
+    .self$setx.out$x1 <- .self$set(...)
+  }
+)
+
+z$methods(
+  setrange = function(..., fn = list(numeric = mean, ordered = Median, other = Mode)) {
+    .self$bsetrange <- TRUE
+    rng <- list()
+    s <- list(...)
+    m <- expand.grid(s)
+    .self$range <- m
+    .self$setx.out$range <- list()
+    for (i in 1:nrow(m)) {
+      l <- as.list(as.list(m[i, ]))
+      names(l) <- names(m)
+      .self$setx.out$range[[i]] <- .self$set(l)
+    }
+  }
+)
+
+z$methods(
+  setrange1 = function(..., fn = list(numeric = mean, ordered = Median, other = Mode)) {
+    .self$bsetrange1 <- TRUE
+    rng <- list()
+    s <- list(...)
+    m <- expand.grid(s)
+    .self$range1 <- m
+    .self$setx.out$range1 <- list()
+    for (i in 1:nrow(m)) {
+      l <- as.list(as.list(m[i, ]))
+      names(l) <- names(m)
+      .self$setx.out$range1[[i]] <- .self$set(l)
+    }
+  }
+)
+
+z$methods(
+  param = function(z.out, method="mvn") {
+    if(identical(method,"mvn")){
+      return(mvrnorm(.self$num, coef(z.out), vcov(z.out))) 
+    } else if(identical(method,"point")){
+      return(t(as.matrix(coef(z.out))))
+    } else {
+      stop("param called with method argument of undefined type.")
+    }
+  }
+)
+
+z$methods(
+  sim = function(num = NULL) {
+    "Generic Method for Computing and Organizing Simulated Quantities of Interest"
+
+    ## If num is defined by user, it overrides the value stored in the .self$num field.
+    ## If num is not defined by user, but is also not yet defined in .self$num, then it defaults to 1000.
+    if (length(.self$num) == 0){
+      if(is.null(num)){
+        num <- 1000
+      }
+    }
+    if(!is.null(num)){
+      .self$num <- num
+    }
+
+    # This was previous version, that assumed sim only called once, or only method to access/write .self$num field:
+    #if (length(.self$num) == 0) 
+    #  .self$num <- num
+
+    # Divide simulations among imputed datasets
+    if(.self$mi){
+      am.m<-length(.self$getcoef())
+      .self$num <- ceiling(.self$num/am.m)
+    }
+
+    # If bootstrapped, use distribution of estimated parameters, 
+    #  otherwise use $param() method for parametric bootstrap.    
+    if (.self$bootstrap & ! .self$mi){
+      .self$num <- 1 
+      .self$simparam <- .self$zelig.out %>% 
+        do(simparam = .self$param(.$z.out, method="point"))
+    } else {
+      .self$simparam <- .self$zelig.out %>%
+        do(simparam = .self$param(.$z.out))
+    }
+
+    if (.self$bsetx)
+      .self$simx()
+    if (.self$bsetx1)
+      .self$simx1()
+    if (.self$bsetrange)
+      .self$simrange()
+    if (.self$bsetrange1)
+      .self$simrange1()
+  }
+)
+
+z$methods(
+  simx = function() {
+    d <- zeligPlyrMutate(.self$zelig.out, simparam = .self$simparam$simparam)
+    d <- zeligPlyrMutate(d, mm = .self$setx.out$x$mm)
+    .self$sim.out$x <-  d %>%
+      do(qi = .self$qi(.$simparam, .$mm)) %>%
+      do(ev = .$qi$ev, pv = .$qi$pv)
+  }
+)
+
+z$methods(
+  simx1 = function() {
+    d <- zeligPlyrMutate(.self$zelig.out, simparam = .self$simparam$simparam)
+    d <- zeligPlyrMutate(d, mm = .self$setx.out$x1$mm)
+    .self$sim.out$x1 <-  d %>%
+      do(qi = .self$qi(.$simparam, .$mm)) %>%
+      do(ev = .$qi$ev, pv = .$qi$pv)
+    d <- zeligPlyrMutate(.self$sim.out$x1, ev0 = .self$sim.out$x$ev)
+    d <- d %>%
+      do(fd = .$ev - .$ev0)
+    .self$sim.out$x1 <- zeligPlyrMutate(.self$sim.out$x1, fd = d$fd) #JH
+  }
+)
+
+z$methods(
+  simrange = function() {
+    .self$sim.out$range <- list()
+    for (i in 1:nrow(.self$range)) {
+      d <- zeligPlyrMutate(.self$zelig.out, simparam = .self$simparam$simparam)
+      d <- zeligPlyrMutate(d, mm = .self$setx.out$range[[i]]$mm)
+      .self$sim.out$range[[i]] <-  d %>%
+        do(qi = .self$qi(.$simparam, .$mm)) %>%
+        do(ev = .$qi$ev, pv = .$qi$pv)
+    }
+  }
+)
+
+z$methods(
+  simrange1 = function() {
+    .self$sim.out$range1 <- list()
+    for (i in 1:nrow(.self$range1)) {
+      d <- zeligPlyrMutate(.self$zelig.out, simparam = .self$simparam$simparam)
+      d <- zeligPlyrMutate(d, mm = .self$setx.out$range1[[i]]$mm)
+      .self$sim.out$range1[[i]] <-  d %>%
+        do(qi = .self$qi(.$simparam, .$mm)) %>%
+        do(ev = .$qi$ev, pv = .$qi$pv)
+    }
+  }
+)
+
+
+
+z$methods(
+  simx = function() {
+    d <- zeligPlyrMutate(.self$zelig.out, simparam = .self$simparam$simparam)
+    d <- zeligPlyrMutate(d, mm = .self$setx.out$x$mm)
+    .self$sim.out$x <-  d %>%
+      do(qi = .self$qi(.$simparam, .$mm)) %>%
+      do(ev = .$qi$ev, pv = .$qi$pv)
+  }
+)
+
+
+z$methods(
+  ATT = function(treatment, treated=1, quietly=TRUE, num=NULL) {
+    "Generic Method for Computing Simulated (Sample) Average Treatment Effects on the Treated"
+
+    ## Checks on user provided arguments
+    if(!is.character(treatment)){
+      stop("Argument treatment should be the name of the treatment variable in the dataset.")
+    }
+    if(!(treatment %in% names(.self$data))){
+      stop(cat("Specified treatment variable", treatment, "is not in the dataset."))
+    }
+    # Check treatment variable included in model.
+    # Check treatment variable is 0 or 1 (or generalize to dichotomous).
+    # Check argument "treated" is 0 or 1 (or generalize to values of "treatment").
+    # Check "ev" is available QI.
+    # Check if multiple equation model (which will need method overwrite).
+
+
+    ## If num is defined by user, it overrides the value stored in the .self$num field.
+    ## If num is not defined by user, but is also not yet defined in .self$num, then it defaults to 1000.
+    if (length(.self$num) == 0){
+      if(is.null(num)){
+        num <- 1000
+      }
+    }
+    if(!is.null(num)){
+      if(!identical(num,.self$num)){   # .self$num changed, so regenerate simparam
+        .self$num <- num
+        .self$simparam <- .self$zelig.out %>%
+          do(simparam = .self$param(.$z.out))
+      }
+    }
+
+    ## Extract name of dependent variable, treated units
+    depvar <- as.character(.self$zelig.call[[2]][2]) 
+
+    ## Use dplyr to cycle over all splits of dataset
+    ## NOTE: THIS IS GOING TO USE THE SAME simparam SET FOR EVERY SPLIT
+    .self$sim.out$TE <- .self$data %>% 
+      group_by_(.self$by) %>% 
+        do(ATT = .self$simATT(simparam=.self$simparam$simparam[[1]], data=. , depvar=depvar, treatment=treatment, treated=treated) )   # z.out = eval(fn2(.self$model.call, quote(as.data.frame(.)))))
+
+    if(!quietly){
+      return(.self$sim.out$TE)  # The $getqi() method may generalize, otherwise, write a $getter.
+    } 
+  }
+)
+
+# Has calls to .self, so constructed as method rather than function internal to $ATT()
+# Function to simulate ATT 
+
+z$methods(
+  simATT = function(simparam, data, depvar, treatment, treated){
+    "Simulate an Average Treatment on the Treated"
+    
+    flag <- data[[treatment]]==treated
+    data[[treatment]] <- 1-treated
+
+    cf.mm <- model.matrix(.self$formula, data) # Counterfactual model matrix
+    cf.mm <- cf.mm[flag,]
+    
+    y1 <- data[flag, depvar] 
+    y1.n <- sum(flag)
+
+    ATT <- matrix(NA, nrow=y1.n, ncol= .self$num)
+    for(i in 1:y1.n){                   # Maybe $qi() generally works for all mm? Of all dimensions? If so, loop not needed.
+      ATT[i,] <- as.numeric(y1[i,1]) - .self$qi(simparam=simparam, mm=cf.mm[i, , drop=FALSE])$ev   
+    }
+    ATT <- apply(ATT, 2, mean) 
+    return(ATT)
+  }
+)
+
+
+
+
+z$methods(
+  show = function(signif.stars = FALSE, subset = NULL, bagging = FALSE) {
+    "Display a Zelig object"
+    .self$signif.stars <- signif.stars
+    .self$signif.stars.default <- getOption("show.signif.stars")
+    options(show.signif.stars = .self$signif.stars)
+    if ("uninitializedField" %in% class(.self$zelig.out))
+      cat("Next step: Use 'zelig' method")
+    else if (length(.self$setx.out) == 0) {
+
+      #############################################################################	
+      # Current workaround to display call as $zelig.call rather than $model.call
+      # This is becoming a more complex workaround than revising the summary method
+      # should improve this approach in future:
+      for(jj in 1:length(.self$zelig.out$z.out)){
+        if("S4" %in% typeof(.self$zelig.out$z.out[[jj]]) ){
+          slot(.self$zelig.out$z.out[[jj]],"call") <- .self$zelig.call
+        } else {
+          if("call" %in% names(.self$zelig.out$z.out[[jj]])){
+      	    .self$zelig.out$z.out[[jj]]$call <- .self$zelig.call
+          } else if ("call" %in% names(attributes(.self$zelig.out$z.out[[1]])) ){
+            attr(.self$zelig.out$z.out[[1]],"call")<- .self$zelig.call
+          }
+        }
+      }	
+      #############################################################################
+
+      if((.self$mi) & is.null(subset)){
+        cat("Model: Combined Imputations \n")
+        vcovlist <-.self$getvcov()
+        coeflist <-.self$getcoef()
+        am.m<-length(coeflist)
+        am.k<-length(coeflist[[1]])
+        q <- matrix(unlist(coeflist), nrow=am.m, ncol=am.k, byrow=TRUE)
+        se <- matrix(NA, nrow=am.m, ncol=am.k)
+        for(i in 1:am.m){
+          se[i,]<-sqrt(diag(vcovlist[[i]]))
+        }
+        ones <- matrix(1, nrow = 1, ncol = am.m)
+        imp.q <- (ones %*% q)/am.m        # Slightly faster than "apply(b,2,mean)"
+        ave.se2 <- (ones %*% (se^2))/am.m # Similarly, faster than "apply(se^2,2,mean)"
+        diff <- q - matrix(1, nrow = am.m, ncol = 1) %*% imp.q
+        sq2 <- (ones %*% (diff^2))/(am.m - 1)
+        imp.se <- sqrt(ave.se2 + sq2 * (1 + 1/am.m))
+            
+        Estimate<-as.vector(imp.q)
+        Std.Error<-as.vector(imp.se)
+        zvalue<-Estimate/Std.Error
+        Pr.z<-2*(1-pnorm(abs(zvalue)))
+        stars<-rep("",am.k)
+        stars[Pr.z<.05]<-"."
+        stars[Pr.z<.01]<-"*"
+        stars[Pr.z<.001]<-"**"
+        stars[Pr.z<.0001]<-"***"
+
+        results<-data.frame(Estimate,Std.Error,zvalue,Pr.z,stars,row.names=names(coeflist[[1]]))
+        names(results)<-c("Estimate","Std.Error","z value","Pr(>|z|)","")
+        print(results, digits=max(3, getOption("digits") - 3))
+        cat("---\nSignif. codes:  '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n")
+        cat("\n")
+        cat("For results from individual imputed datasets, use summary(x, subset = i:j)\n")
+      }else if ((.self$mi) & !is.null(subset)) {
+        for(i in subset){
+            cat("Imputed Dataset ",i,sep="")
+            print(base::summary(.self$zelig.out$z.out[[i]]))
+        }
+      }else if ((.self$bootstrap) & is.null(subset)) {  
+        # Much reuse of Rubin's Rules from above.  Probably able to better generalize across these two cases:
+        cat("Model: Combined Bootstraps \n")
+        vcovlist <-.self$getvcov()
+        coeflist <-.self$getcoef()
+        am.m<-length(coeflist) - 1
+        am.k<-length(coeflist[[1]])
+        q <- matrix(unlist(coeflist[-(am.m+1)]), nrow=am.m, ncol=am.k, byrow=TRUE)
+        #se <- matrix(NA, nrow=am.m, ncol=am.k)
+        #for(i in 1:am.m){
+        #  se[i,]<-sqrt(diag(vcovlist[[i]]))
+        #}
+        ones <- matrix(1, nrow = 1, ncol = am.m)
+        imp.q <- (ones %*% q)/am.m        # Slightly faster than "apply(b,2,mean)"
+        #ave.se2 <- (ones %*% (se^2))/am.m # Similarly, faster than "apply(se^2,2,mean)"
+        diff <- q - matrix(1, nrow = am.m, ncol = 1) %*% imp.q
+        sq2 <- (ones %*% (diff^2))/(am.m - 1)
+        #imp.se <- sqrt(ave.se2 + sq2 * (1 + 1/am.m))
+        imp.se <- sqrt(sq2 * (1 + 1/am.m))  # Note departure from Rubin's rules here.  
+        
+        if(bagging){    
+          Estimate<-as.vector(imp.q)
+        }else{
+          Estimate<-coeflist[[am.m+1]]
+        }
+        Std.Error<-as.vector(imp.se)
+        zvalue<-Estimate/Std.Error
+        Pr.z<-2*(1-pnorm(abs(zvalue)))
+        stars<-rep("",am.k)
+        stars[Pr.z<.05]<-"."
+        stars[Pr.z<.01]<-"*"
+        stars[Pr.z<.001]<-"**"
+        stars[Pr.z<.0001]<-"***"
+
+        results<-data.frame(Estimate,Std.Error,zvalue,Pr.z,stars,row.names=names(coeflist[[1]]))
+        names(results)<-c("Estimate","Std.Error","z value","Pr(>|z|)","")
+        print(results, digits=max(3, getOption("digits") - 3))
+        cat("---\nSignif. codes:  '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1\n")
+        cat("\n")
+        cat("For results from individual bootstrapped datasets, use summary(x, subset = i:j)\n")
+
+      }else if ((.self$bootstrap) & !is.null(subset)) {
+        for(i in subset){
+            cat("Bootstrapped Dataset ",i,sep="")
+            print(base::summary(.self$zelig.out$z.out[[i]]))
+        }
+      }else{
+        summ <- .self$zelig.out %>%
+        do(summ = {cat("Model: \n")
+          if (length(.self$by) == 1) {
+              if (.self$by == "by") {
+                  cat()
+              }
+              else {
+                  print(.[.self$by])
+              }
+          } else {
+              print(.[.self$by])
+          }
+          if("S4" %in% typeof(.$z.out)){  # Need to change summary method here for some classes
+              print(summary(.$z.out))    
+          }else{
+              print(base::summary(.$z.out))
+          }
+        })
+      }
+
+
+      if("gim.criteria" %in% names(.self$test.statistics)){
+          if(.self$test.statistics$gim.criteria){
+#               cat("According to the GIM-rule-of-thumb, your model probably has some type of specification error.\n",
+#               "We suggest you run model diagnostics and seek to fix the problem.\n",
+#               "You may also wish to run the full GIM test (which takes more time) to be sure.\n",
+#               "See http://.... for more information.\n \n")
+            cat("Statistical Warning: The GIM test suggests this model is misspecified\n",
+                "(based on comparisons between classical and robust SE's; see http://j.mp/GIMtest).\n",
+                "We suggest you run diagnostics to ascertain the cause, respecify the model\n",
+                "and run it again.\n\n")
+          }
+      }
+      
+      cat("Next step: Use 'setx' method\n")
+    } else if (length(.self$setx.out) != 0 & length(.self$sim.out) == 0) {
+      niceprint<-function(obj, name){
+        if(!is.null(obj[[1]])){
+          cat(name,":\n", sep="")
+          screenoutput<-obj[[1]]
+          attr(screenoutput,"assign")<-NULL
+          print(screenoutput, digits=max(3, getOption("digits") - 3))
+        }
+      }
+      niceprint(obj=.self$setx.out$x$mm, name="setx")
+      niceprint(obj=.self$setx.out$x1$mm, name="setx1")
+      niceprint(obj=.self$setx.out$range[[1]]$mm, name="range")
+      niceprint(obj=.self$setx.out$range1[[1]]$mm, name="range1")
+      cat("\nNext step: Use 'sim' method\n")
+    } else { # sim.out
+      pstat <- function(s.out, what = "sim x") {
+        simu <- s.out %>%
+          do(simu = {cat("\n", what, ":\n")
+                     cat(" -----\n")
+                     cat("ev\n")
+                     print(stat(.$ev, .self$num))
+                     cat("pv\n")
+                     print(stat(.$pv, .self$num))
+                     if (!is.null(.$fd)) {
+                       cat("fd\n")
+                       print(stat(.$fd, .self$num))}
+          }
+          )
+      }
+      pstat(.self$sim.out$x)
+      pstat(.self$sim.out$x1, "sim x1")
+      if (!is.null(.self$setx.out$range)) {
+        for (i in seq(.self$sim.out$range)) {
+          cat("\n")
+          print(.self$range[i, ])
+          cat("\n")
+          pstat(.self$sim.out$range[[i]], "sim range")
+          cat("\n")
+        }
+      }
+      if (!is.null(.self$setx.out$range1)) {
+        for (i in seq(.self$sim.out$range1)) {
+          cat("\n")
+          print(.self$range1[i, ])
+          cat("\n")
+          pstat(.self$sim.out$range1[[i]], "sim range")
+          cat("\n")
+        }
+      }
+    }
+    options(show.signif.stars = .self$signif.stars.default)
+  }
+)
+
+z$methods(
+  graph = function() {
+    "Plot the quantities of interest"
+    qi.plot(.self)
+  }
+)
+
+z$methods(
+  summarize = function(...) {
+    "Display a Zelig object"
+    show(...)
+  }
+)
+
+z$methods(
+  summarise = function(...) {
+    "Display a Zelig object"
+    show(...)
+  }
+)
+
+z$methods(
+  help = function() {
+    "Open the model vignette from http://zeligproject.org/"
+#     vignette(class(.self)[1])
+    browseURL(.self$vignette.url)
+  } 
+)
+
+z$methods(
+  getcoef = function() {
+    "Get estimated model coefficients"
+    result <- try(lapply(.self$zelig.out$z.out, coef), silent = TRUE)
+    if ("try-error" %in% class(result))
+      stop("'coef' method' not implemented for model '", .self$name, "'")
+    else
+      return(result)
+  } 
+)
+
+z$methods(
+  getvcov = function() {
+    "Get estimated model variance-covariance matrix"
+    result <- lapply(.self$zelig.out$z.out, vcov)
+    if ("try-error" %in% class(result))
+      stop("'vcov' method' not implemented for model '", .self$name, "'")
+    else
+      return(result)
+  }
+)
+
+z$methods(
+  getfitted = function() {
+    result <- lapply(.self$zelig.out$z.out, fitted)
+    if ("try-error" %in% class(result))
+      stop("'predict' method' not implemented for model '", .self$name, "'")
+    else
+      return(result)
+  }
+)
+
+z$methods(
+  getpredict = function() {
+    "Get predicted values"
+    result <- lapply(.self$zelig.out$z.out, predict)
+    if ("try-error" %in% class(result))
+      stop("'predict' method' not implemented for model '", .self$name, "'")
+    else
+      return(result)
+  }
+)
+
+z$methods(
+  getqi = function(qi="ev", xvalue="x", subset=NULL){
+    "Get quantities of interest"
+    possiblexvalues <- names(.self$sim.out)
+    if(!(xvalue %in% possiblexvalues)){
+      stop(paste("xvalue must be ", paste(possiblexvalues, collapse=" or ") , ".", sep=""))
+    }
+    possibleqivalues <- c(names(.self$sim.out[[xvalue]]), names(.self$sim.out[[xvalue]][[1]]))
+    if(!(qi %in% possibleqivalues)){
+      stop(paste("qi must be ", paste(possibleqivalues, collapse=" or ") , ".", sep=""))
+    }
+    if(.self$mi){
+      if(is.null(subset)){
+        am.m<-length(.self$getcoef())
+        subset <- 1:am.m
+      }
+      tempqi <- do.call(rbind, .self$sim.out[[xvalue]][[qi]][subset])
+    } else if(.self$bootstrap){
+      if(is.null(subset)){
+        subset <- 1:.self$bootstrap.num
+      }
+      tempqi <- do.call(rbind, .self$sim.out[[xvalue]][[qi]][subset])
+    } else if(xvalue %in% c("range", "range1")) {
+      tempqi <- do.call(rbind, .self$sim.out[[xvalue]])[[qi]]
+    } else {
+      tempqi<- .self$sim.out[[xvalue]][[qi]][[1]]   # also works:   tempqi <- do.call(rbind, .self$sim.out[[xvalue]][[qi]])
+    }
+    return(tempqi)
+  }
+)
+
+z$methods(
+  toJSON = function() {
+    "Convert Zelig object to JSON format"
+    if (!is.list(.self$json))
+      .self$json <- list()
+    .self$json$"name" <- .self$name
+    .self$json$"description" <- .self$description
+    .self$json$"outcome" <- list(modelingType = .self$outcome)
+    .self$json$"explanatory" <- list(modelingType = .self$explanatory)
+    .self$json$"vignette.url" <- .self$vignette.url
+    .self$json$"wrapper" <- .self$wrapper
+    tree <- c(class(.self)[1], .self$.refClassDef at refSuperClasses)
+    .self$json$tree <- head(tree, match("Zelig", tree) - 1)
+    .self$ljson <- .self$json
+    .self$json <- jsonlite::toJSON(json, pretty = TRUE)
+    return(.self$json)
+  }
+)
+
+# empty default data generating process to avoid error if not created as model specific method
+z$methods(
+  mcfun = function(x, ...){
+    return( rep(1,length(x)) )
+  }
+)
+
+# Monte Carlo unit test
+z$methods(
+  mcunit = function(nsim=500, minx=-2, maxx=2, b0=0, b1=1, alpha=1, ci=0.95, plot = TRUE, ...){
+    
+    passes <- TRUE
+    n.short <- 10      # number of p
+    alpha.ci <- 1-ci   # alpha values for ci bounds, not speed parameter
+    x.sim <- runif(n=nsim, min=minx, max=maxx)
+    x.seq <- seq(from=minx, to=maxx, length = nsim)
+    
+    data.hat <- .self$mcfun(x=x.seq, b0=b0, b1=b1, alpha=alpha, ..., sim=FALSE)
+    if(!is.data.frame(data.hat)){
+        data.hat<-data.frame(x.seq=x.seq, y.hat=data.hat)
+    }
+    data.sim <- .self$mcfun(x=x.sim, b0=b0, b1=b1, alpha=alpha, ..., sim=TRUE)
+    if(!is.data.frame(data.sim)){
+        data.sim<-data.frame(x.sim=x.sim, y.sim=data.sim)
+    }
+
+    ## Estimate Zelig model and create numerical bounds on expected values
+    # This should be the solution, but requires fixing R scoping issue:
+    #.self$zelig(y.sim~x.sim, data=data.sim)      # formula will be overwritten in zelig() if .self$mcformula has been set
+    
+    ## Instead, remove formula field and set by hard code
+    .self$mcformula <- NULL
+    if(.self$name %in% c("exp","weibull","lognorm")){
+      .self$zelig(Surv(y.sim,event)~x.sim, data=data.sim)
+    }else{
+      .self$zelig(y.sim~x.sim, data=data.sim)
+    }
+    
+    x.short.seq<-seq(from=minx, to=maxx, length=n.short)
+    .self$setrange(x.sim=x.short.seq)
+    .self$sim()
+    
+    data.short.hat <- .self$mcfun(x=x.short.seq, b0=b0, b1=b1, alpha=alpha, ..., sim=FALSE)
+    if(!is.data.frame(data.short.hat)){
+        data.short.hat<-data.frame(x.seq=x.short.seq, y.hat=data.short.hat)
+    }
+    
+    history.ev <- history.pv <- matrix(NA, nrow=n.short, ncol=2)
+    for(i in 1:n.short){
+        xtemp<-x.short.seq[i]
+        .self$setx(x.sim=xtemp)
+        .self$sim()
+        #temp<-sort( .self$sim.out$x$ev[[1]] )
+        temp<-.self$sim.out$range[[i]]$ev[[1]]
+        # This is for ev's that are a probability distribution across outcomes, like ordered logit/probit
+        if(ncol(temp)>1){
+          temp <- temp %*% as.numeric(sort(unique(data.sim$y.sim)))  #as.numeric(colnames(temp))
+        }
+        temp <- sort(temp)
+        
+        #calculate bounds of expected values
+        history.ev[i,1]<-temp[max(round(length(temp)*(alpha.ci/2)),1) ]     # Lower ci bound
+        history.ev[i,2]<-temp[round(length(temp)*(1 - (alpha.ci/2)))]       # Upper ci bound
+        #temp<-sort( .self$sim.out$x$pv[[1]] )
+        temp<-sort( .self$sim.out$range[[i]]$pv[[1]] )
+        
+        #check that ci contains true value
+        passes <- passes & (min(history.ev[i,]) <= data.short.hat$y.hat[i] ) & (max(history.ev[i,]) >= data.short.hat$y.hat[i] )
+        
+        #calculate bounds of predicted values
+        history.pv[i,1]<-temp[max(round(length(temp)*(alpha.ci/2)),1) ]     # Lower ci bound
+        history.pv[i,2]<-temp[round(length(temp)*(1 - (alpha.ci/2)))]       # Upper ci bound
+    }
+    
+    ## Plot Monte Carlo Data
+    if(plot){
+      all.main = substitute(
+        paste(modelname, "(", beta[0], "=", b0, ", ", beta[1], "=", b1,",", alpha, "=", a0, ")"),
+        list(modelname = .self$name, b0 = b0, b1=b1, a0 = alpha)
+      )
+    
+      all.ylim<-c( min(c(data.sim$y.sim, data.hat$y.hat)) , max(c(data.sim$y.sim, data.hat$y.hat)) )
+    
+      plot(data.sim$x.sim, data.sim$y.sim, main=all.main, ylim=all.ylim, xlab="x", ylab="y", col="steelblue")
+      par(new=TRUE)
+      plot(data.hat$x.seq, data.hat$y.hat, main="", ylim=all.ylim, xlab="", ylab="", xaxt="n", yaxt="n", type="l", col="green", lwd=2)
+  
+      for(i in 1:n.short){
+        lines(x=rep(x.short.seq[i],2), y=c(history.pv[i,1],history.pv[i,2]), col="lightpink", lwd=1.6)
+        lines(x=rep(x.short.seq[i],2), y=c(history.ev[i,1],history.ev[i,2]), col="firebrick", lwd=1.6)
+      }
+    }
+    return(passes)
+    
+  }
+)
+
+# rebuild dataset by duplicating observations by (rounded) weights
+z$methods(
+  buildDataByWeights = function() {
+    if(!.self$acceptweights){
+      idata <- .self$data
+      iweights <- .self$weights
+      ceilweights <- ceiling(iweights)
+      n.obs <- nrow(idata)
+      windex <- rep(1:n.obs, ceilweights)
+      idata <- idata[windex,]
+      .self$data <- idata
+      if(any(iweights != ceiling(iweights))){
+        cat("Noninteger weights were set, but the model in Zelig is only able to use integer valued weights.\n",
+      	   "Each weight has been rounded up to the nearest integer.\n\n")
+  	  }
+    }
+  }
+)
+
+# rebuild dataset by bootstrapping using weights as probabilities
+z$methods(
+  buildDataByWeights2 = function() {
+    if(!.self$acceptweights){
+      iweights <- .self$weights
+      if(any(iweights != ceiling(iweights))){
+        cat("Noninteger weights were set, but the model in Zelig is only able to use integer valued weights.\n",
+      	   "A bootstrapped version of the dataset was constructed using the weights as sample probabilities.\n\n")
+        idata <- .self$data	
+        n.obs <- nrow(idata)
+        n.w   <- sum(iweights)
+        iweights <- iweights/n.w
+        windex <- sample(x=1:n.obs, size=n.w, replace=TRUE, prob=iweights)  # Should size be n.w or n.obs?  Relatedly, n.w might not be integer.
+        idata <- idata[windex,]
+        .self$data <- idata
+      }else{
+         .self$buildDataByWeights()  # If all weights are integers, just use duplication to rebuild dataset.
+  	  }
+    }
+  }
+)
+
+
+# rebuild dataset by bootstrapping using weights as probabilities
+#   might possibly combine this method with $buildDataByWeights2()
+z$methods(
+  buildDataByBootstrap = function() {
+      idata <- .self$data 
+      n.boot <- .self$bootstrap.num
+      n.obs <- nrow(idata)
+
+      if(!is.null(.self$weights)){
+        iweights <- .self$weights
+        n.w   <- sum(iweights)
+        iweights <- iweights/n.w
+      }else{
+        iweights <- NULL
+      }
+
+      windex <- bootstrapIndex <- NULL
+      for(i in 1:n.boot){
+        windex <- c(windex, sample(x=1:n.obs, size=n.obs, replace=TRUE, prob=iweights))
+        bootstrapIndex <- c(bootstrapIndex, rep(i,n.obs))
+      } 
+      # Last dataset is original data
+      idata <- rbind(idata[windex,], idata)
+      bootstrapIndex <- c(bootstrapIndex, rep(n.boot+1,n.obs))
+
+      idata$bootstrapIndex <- bootstrapIndex
+      .self$data <- idata
+      .self$by <- c("bootstrapIndex", .self$by)
+  }
+)
+
+
+
+
+
+z$methods(
+  feedback = function() {
+    "Send feedback to the Zelig team"
+    if (!.self$with.feedback)
+      return("ZeligFeedback package not installed")
+    # If ZeligFeedback is installed
+    print("ZeligFeedback package installed")
+    print(ZeligFeedback::feedback(.self))
+  }
+)
+
+# z$methods(
+#   finalize = function() {
+#     if (!.self$with.feedback)
+#       return("ZeligFeedback package not installed")
+#     # If ZeligFeedback is installed
+#     print("Thanks for providing Zelig usage information")
+#     # print(ZeligFeedback::feedback(.self))
+#     write(paste("feedback", ZeligFeedback::feedback(.self)),
+#           file = paste0("test-zelig-finalize-", date(), ".txt"))
+#   }
+# )
+
+
+#' Summary method for Zelig objects"
+#' @param object An Object of Class Zelig
+#' @param ... Additional parameters to be passed to summary
+setMethod("summary", "Zelig",
+          function(object, ...) {
+            object$summarize(...)
+          }
+)
+
+#' Plot method for Zelig objects
+#' @param x An Object of Class Zelig
+#' @param y unused
+#' @param ... Additional parameters to be passed to plot
+setMethod("plot", "Zelig",
+          function(x, ...) {
+            x$graph()
+          }
+)
+
+#' Variance-covariance method for Zelig objects
+#' @param object An Object of Class Zelig
+#' @param ... Additional parameters to be passed to vcov
+setMethod("vcov", "Zelig",
+          function(object, ...) {
+            object$getvcov()
+          }
+)
+
+#' Method for extracting estimated coefficients from Zelig objects
+#' @param object An Object of Class Zelig
+#' @param ... Additional parameters to be passed to coef
+setMethod("coef", "Zelig",
+          function(object, ...) {
+            object$getcoef()
+          }
+)
+
+#' Method for extracting estimated fitted values from Zelig objects
+#' @param object An Object of Class Zelig
+#' @param ... Additional parameters to be passed to fitted
+setMethod("fitted", "Zelig",
+          function(object, ...) {
+            object$getfitted()
+          }
+)
+
+#' Method for getting predicted values from Zelig objects
+#' @param object An Object of Class Zelig
+#' @param ... Additional parameters to be passed to predict
+setMethod("predict", "Zelig",
+          function(object, ...) {
+            object$getpredict()
+          }
+)
diff --git a/R/model.frame.multiple.R b/R/model.frame.multiple.R
deleted file mode 100644
index f97ed00..0000000
--- a/R/model.frame.multiple.R
+++ /dev/null
@@ -1,107 +0,0 @@
-#' Create Model Frame from \code{multiple} Object
-#'
-#' This method creates a \code{model.frame} from a \code{multiple} object. This
-#' method will be deprecated as the development of Zelig 4 progresses.
-#' @usage \method{model.frame}{multiple}(formula,data,eqn=NULL,...)
-#' @S3method model.frame multiple
-#' @param formula an object of both type \code{formula} and \code{multiple}
-#' @param data a \code{data.frame}
-#' @param eqn the number of equations in the formula
-#' @param ... ignored parameters
-#' @return a \code{model.frame} object
-#' @author Kosuke Imai, Olivia Lau, Gary King and Ferdinand Alimadhi
-model.frame.multiple <- function (formula,data,eqn=NULL,...){
-  if(class(formula)[[1]]=="terms"){
-    terms <-formula
-  }else{
-    terms<-terms(formula)
-  }
-
-  #is multilevel?
-  if(!(is.logical(attr(terms,"subs"))))
-    return(multilevel(tt=terms,data=data,eqn=eqn,mode=2))
-
-
-  "%w/o%" <- function(x,y) x[!x %in% y]
-
-
-  eqns<-names(formula)
-  eqns<-attr(terms,"systEqns")
-  nrEquations<-length(eqns)
-  termlabels<-attr(terms,"term.labels")
-  depVars<-attr(terms,"depVars")
-  Xs<-Ys<-tlNew<-dvNew<-list()
-  for (i in 1:nrEquations){
-    rhs<-toBuildFormula(termlabels[[eqns[[i]]]])
-    if(!(is.null(rhs))){
-      rhs<-paste(rhs,"-1")
-      rhs<-as.formula(paste("~",rhs))
-     Xs[[eqns[[i]]]]<-model.matrix.default(rhs,data=data)
-      tlNew[[eqns[[i]]]]<-colnames(Xs[[eqns[[i]]]])
-      tlNew[[eqns[[i]]]]<-gsub("as.factor\\(.*\\)","",tlNew[[eqns[[i]]]])
-      colnames(Xs[[eqns[[i]]]])<-tlNew[[eqns[[i]]]]
-    }
-  }
-  depFactors<-attr(terms,"depFactors")
- 
-  if(!(is.logical(depFactors)))
-    depVars<- paste("as.factor(",depFactors[[1]],")",sep="")
-  #print(depVars)
-  lhs<-toBuildFormula(unique(unlist(depVars)))
-  if(!(is.null(lhs))){
-    lhs<-paste(lhs,"-1")
-    lhs<-as.formula(paste("~",lhs))
-    Ys<-model.matrix.default(lhs,data=data)
-    dvNew<-colnames(Ys)
-    dvNew<-gsub("as.factor\\(.*\\)","",dvNew)
-    colnames(Ys)<-dvNew
-  }
-  attr(terms,"term.labels")[names(tlNew)]<-tlNew
-  attr(terms,"depVars")[names(dvNew)]<-dvNew
-
-  ronames<-rownames(data)
-  ronr<-nrow(data)
-  Xnames<-unique(unlist(tlNew))
-  Ynames<-unique(unlist(dvNew))
-  if(!(is.logical(depFactors)))
-    Ynames<-c(depFactors[[2]],Ynames %w/o% depFactors[[2]])
-  X<-matrix(0,nrow=ronr,ncol=length(Xnames),dimnames=list(ronames,Xnames))
-  Y<-matrix(0,nrow=ronr,ncol=length(Ynames),dimnames=list(ronames,Ynames))
-  if(length(tlNew)>0)
-  for(i in 1:length(tlNew)){
-    xtmp<-intersect(tlNew[[i]],Xnames)
-    X[,xtmp]<-Xs[[i]][,xtmp]
-  }
-  Y<-Ys
-  my.data.frame<-as.data.frame(cbind(Y,X))
-  rhs<-toBuildFormula(Xnames)
-  if(!(is.null(rhs)))
-    rhs<-(paste("~",rhs))
-  else
-    rhs<-"~1"
-  cb<-FALSE
-  if(length(Ynames)>1){
-    lhs<-toBuildFormula(Ynames,",")
-    if (!(is.null(lhs))){
-      lhs<-paste("cbind(",lhs)
-      lhs<-paste(lhs,")")
-      cb<-TRUE
-    }
-  }else{
-    lhs=Ynames
-  }
-  lhs<-as.formula(paste(lhs,rhs))
-  Y<-model.frame.default(lhs,data=my.data.frame)
-  result=Y
-  if(cb)
-    names(result)[[1]]<-"response"
-  new.response<-attr(attr(result,"terms"),"response")
-  attr(terms,"response")<-new.response
-  attr(result,"terms")<-terms
-  class(result)<-c(class(result),"multiple")
-  return(result)
-
-}
-
-
-
diff --git a/R/model.matrix.multiple.R b/R/model.matrix.multiple.R
deleted file mode 100644
index 22f740d..0000000
--- a/R/model.matrix.multiple.R
+++ /dev/null
@@ -1,109 +0,0 @@
-#' Create Design Matrix of a \code{multiple} Object
-#'
-#' This method is used to generate a \code{model.matrix} adhering to the
-#' specifications in the help document "model.matrix".
-#' @usage
-#' \method{model.matrix}{multiple}(object,data,shape="compact",eqn=NULL,...)
-#' @note This method is scheduled to be deprecated.
-#' @param object an object of type \code{multiple}. This represents a Zelig 3.5
-#' formula
-#' @param data a \code{data.frame}
-#' @param shape a character-string specifying the shape of the matrix
-#' @param eqn an integer specifying the number of equations
-#' @param ... ignored parameters
-#' @S3method model.matrix multiple
-model.matrix.multiple <- function (object,data,shape="compact",eqn=NULL,...){
-  
-  intersect <- function(x, y) y[match(x, y, nomatch = 0)]
-
-  #olny for multilevel
-  if(class(formula)[[1]]=="terms"){
-    terms <-object
-  }else{
-    terms<-terms(object)
-  }
-  if(!(is.logical(attr(terms,"subs"))))
-    return(multilevel(tt=terms,data=data,eqn=eqn,mode=1))
-      
-  ##
-
-  if((shape != "compact") && (shape != "array") && (shape !="stacked"))
-    stop("wrong shape argument! Choose from \"compact\", \"array\" or \"stacked\" \n")
-  
-  if(!(any(class(object)=="multiple")))
-    stop("Please run first parse.formula() on your formula ...\n")
-  
-  if(!(any(class(data)=="multiple")))
-    data<-model.frame(object,data)
- 
- 
-  terms<-attr(data,"terms")
-  
-  whiche<-which(eqn %in% names(terms)==FALSE)
-  if (length(whiche)!=0)
-    stop("Unknown eqn name \"",eqn[whiche],"\"\n")
-  
-  intercAttr<-attr(terms,"intercept")           
-  systEqns<-attr(terms,"systEqns")
-  ancilEqns<-attr(terms,"ancilEqns")
-  
-  if (is.null(eqn))
-    eqn=systEqns
-  
- # if (!(all(eqn %in% systEqns)))
- #   stop("all eqn names should be from systematic parameters")
-  
-  termlabels<-attr(terms,"term.labels")[eqn]          
-  nrEquations<-length(eqn)
-  if (length(eqn)==1)
-    shape="compact"
-  
-  Xnames<-unique(unlist(termlabels))
-  
-  rhs<-toBuildFormula(Xnames)
-  if(!(is.null(rhs)))
-    rhs<-as.formula(paste("~",rhs))
-  else
-    rhs<-as.formula("~1")
-  
-  rawX<-model.matrix.default(rhs,data=data)
-  if (shape=="compact"){
-    result<-rawX
-    if(all(intercAttr==0)){
-      result<-result[,colnames(result)!="(Intercept)"]
-    }
-    attr(terms,"response")<-0
-    attr(result,"terms")<-terms
-    return(result)
-  }
-  
-  ronames<-rownames(data)
-  ronr<-nrow(data)
-  
-  parsMat<-make.parameters(terms, shape = "matrix", ancillary = FALSE,eqns=eqn)
-  parsVec <- unique(na.omit(c(t(parsMat))))
-  
-  result<-list()
-  result<-array(0,dim=c(ronr,length(parsVec),length(eqn)),dimnames=list(ronames,parsVec,eqn))
-  for(i in 1:nrEquations){
-    eqni<-eqn[[i]]
-    whiche<-which(is.na(parsMat[,eqni])==FALSE)
-    result[,,eqni][,parsMat[names(whiche),eqni]]<-rawX[,names(whiche)]
-  }
-  
-  if(shape=="array"){
-    res<-result
-  }
-  if(shape=="stacked"){
-    res<-result[,,eqn[[1]]]
-    if(length(eqn)>1)
-      for(i in 2:length(eqn))
-        res<-rbind(res,result[,,eqn[[i]]])
-    rownames(res)<-c(1:nrow(res))
-  }
-  attr(terms,"response")<-0
-  attr(res,"terms")<-terms
-  return(res)
-}
-
- 
diff --git a/R/model.matrix.parseFormula.R b/R/model.matrix.parseFormula.R
deleted file mode 100644
index 487a607..0000000
--- a/R/model.matrix.parseFormula.R
+++ /dev/null
@@ -1,32 +0,0 @@
-#' Construct Design Matrix from a Parsed, Zelig-style Formula
-#'
-#' This method constructs a design matrix from a Zelig-style formula. This
-#' matrix is commonly used in statistical simulation, and will likely be
-#' relevent as the relevant form of a \code{setx} object.
-#' @usage \method{model.matrix}{parseFormula}(object, data = NULL, ...)
-#' @note This method is primarily used by the \code{setx} function.
-#' @param object a "parseFormula" object
-#' @param data a "data.frame"
-#' @param ... ignored parameters
-#' @return a "model.matrix" specifying information relevant to a statistical
-#' model
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @S3method model.matrix parseFormula
-model.matrix.parseFormula <- function (object, data = NULL, ...) {
-
-#   if (is.null(object$model.matrix))
-#     # Note that if data is NULL, then "makeModelMatrix" will return NULL
-#     makeModelMatrix(formula(object), data)
-# 
-#   else if (!missing(data))
-#     # If data is supplied, recompute the model matrix
-#     makeModelMatrix(formula(object), data)
-# 
-#   else
-#     # Otherwise use the previous stored value (which still might be NULL)
-#     object$model.matrix
-
-
-  makeModelMatrix(formula(object), data)
-
-}
diff --git a/R/model.warnings.R b/R/model.warnings.R
deleted file mode 100644
index 65cc787..0000000
--- a/R/model.warnings.R
+++ /dev/null
@@ -1,71 +0,0 @@
-# This code is rough looking. It needs to be made more elegant
-# but R doesn't really support block quotes
-model.warnings <- function (model) {
-  # Get appropriate Zelig package
-  pkg <- get.package(model)
-
-  # Get 
-  zelig2 <- paste("zelig2", as.character(model), sep="")
-  zelig2 <- tryCatch(
-                     { get(zelig2, mode="function"); 1 },
-                     error = function (e) NA
-                     )
-
-  #
-  #
-  #
-  if (is.na(zelig2) && is.na(pkg)) {
-
-      msg <- '
-
-** The model "%s" is not available with the currently loaded packages,
-** and is not an official Zelig package.
-** The model\'s name may be a typo.
-
-'
-    message(sprintf(msg, model))
-
-  }
-
-
-  else if (is.na(zelig2) && !is.na(pkg)) {
-
-    if (pkg %in% .packages(TRUE)) {
-      # The package is available on the system
-
-      msg <- '
-
-** The model "%s" is not available with the currently loaded packages,
-** however it *is* installed on your system.
-**
-** To load this model\'s package, please type:
-library("%s")
-
-'
-      message(sprintf(msg, model, pkg))
-    }
-
-    #
-    #
-    #
-    else {
-      # Else... the package is not available on the system
-
-      repos <- "http://r.iq.harvard.edu/"
-      msg <- '
-
-** The model "%s" is not installed on your system,
-** however it *is* available for download from Harvard.
-**
-** To install and load this model\'s package, please type:
-install.packages("%s", repos="%s", type="source")
-library("%s")
-
-'
-      message(sprintf(msg, model, pkg, repos, pkg))
-    }
-  }
-
-
-  invisible()
-}
diff --git a/R/multi.dataset.R b/R/multi.dataset.R
deleted file mode 100644
index 9e31cdb..0000000
--- a/R/multi.dataset.R
+++ /dev/null
@@ -1,144 +0,0 @@
-# Make a ``multi.dataset'' Object
-# @param datasets a list containing data.frames
-# @param labels a character vector labeling indices of the dataset
-make.multi.dataset <- function (datasets, labels=NULL) {
-  md <- datasets
-
-  if (!missing(labels))
-    names(md) <- labels
-
-  # Set super important attributes
-  #attr(md, "something") <- "red"
-  class(md) <- "multi.dataset"
-
-  # Return object
-  md
-}
-
-# Multiple Dataset Object
-multi.dataset <- function (obj, ...) {
-  UseMethod("multi.dataset")
-}
-
-# Create a Multiple Dataset Object from a data.frame
-# @param obj a data.frame to conver
-# @return a ``multi.dataset'' object
-multi.dataset.data.frame <- function (obj, ...) {
-  # Place inside a list and label according to the name from the function call
-  label <- as.character(as.expression(substitute(obj)))
-  make.multi.dataset(list(obj), label)
-}
-
-# Create a Multiple Dataset Object from a data.frame
-# @param obj a list of data.frame's
-# @return a ``multi.dataset'' object
-multi.dataset.list <- function (obj, ...) {
-
-  # Iterate backwards through list, so that we can remove elements
-  for (k in length(obj):1) {
-    if (!is.data.frame(obj[[k]])) {
-      warning('"obj" contains an element that is not a data.frame... removing.')
-      obj[[k]] <- NULL
-    }
-  }
-
-  LABELS <- names(obj)
-
-  # If there are no labels, or they are uneven
-  if (is.null(LABELS) || length(LABELS) != length(obj))
-    LABELS <- paste("data-set-", 1:length(obj), sep = "")
-
-  # Otherwise, we have a nice matching of labels, but we might still have some
-  # that are empty
-  else {
-    for (k in 1:length(LABELS)) {
-      lab <- LABELS[k]
-
-      if (is.na(lab) || is.null(lab) || (is.character(lab) && nchar(lab) == 0))
-        LABELS[k] <- paste("data-set-", k, sep = "")
-    }
-  }
-
-  # Return object
-  make.multi.dataset(obj, LABELS)
-}
-
-# Create a Multiple Dataset Object from a data.frame
-# @param obj a list of data.frame's
-# @return a ``multi.dataset'' object
-multi.dataset.amelia <- function (obj, ...) {
-  data.frames <- obj$imputations
-  class(data.frames) <- NULL
-  make.multi.dataset(data.frames, names(data.frames))
-}
-
-# Divide a Data Frame or Matrix Into Subsets
-# @param obj a data.frame or matrix to be split into subsets, divided by the
-# categorical variable
-# @param by a character-string, specifying the column to subset
-# @return a list, containing the subsetted data sets. The names of the list
-# correspond to the value of the subsetted list
-divide <- function (obj, by) {
-
-  # Ensure that "obj" is valid (a data.frame or matrix)
-  if (!is.data.frame(obj) && !is.matrix(obj)) {
-    warning('"obj" is not a data.frame or matrix')
-    return(list(obj))
-  }
-
-  # Ensure that "by" is valid (a character-string)
-  if (!is.character(by) && length(by) == 1) {
-    warning('"by" is not a character-string')
-    return(list(obj))
-  }
-
-  # Ensure that "by" is a column in "obj"
-  if (! by %in% colnames(obj)) {
-    warning('"by" is not a valid column of "obj"')
-    return(list(obj))
-  }
-
-  # Get the set of possible values
-  column.levels <-if (is.factor(obj[, by]))
-    levels(obj[, by])
-  else
-    unique(obj[, by])
-
-
-  # A list used to store each individual data.frame
-  res <- list()
-
-  # Iterate through all possible values and store each subset in a separate
-  # entry in the list
-  for (val in column.levels) {
-    # Determine which rows match this value
-    hits <- obj[, by] == val
-
-    # Store data set temporarily in a local value
-    data.set <- obj[hits, ]
-
-    # Assign levels to the column. This adds levels to string data.
-    levels(data.set[, by]) <- column.levels
-
-    # Store data set in list
-    res[[val]] <- data.set
-  }
-
-  # Return list
-  res
-}
-
-# Print a ``multi.dataset'' Object
-# @param x a multi.dataset object, essentially a list of data.frames
-# @param ... parameters to pass to the print.data.frame object
-# @return x (invisibly)
-print.multi.dataset <- function (x, ...) {
-  for (key in names(x)) {
-    cat("label =", key, "\n")
-    print(x[[key]], ...)
-    cat("\n")
-  }
-
-  # Return printed object (invisibly)
-  invisible(x)
-}
diff --git a/R/multipleUtil.R b/R/multipleUtil.R
deleted file mode 100644
index 0c0ddc2..0000000
--- a/R/multipleUtil.R
+++ /dev/null
@@ -1,70 +0,0 @@
-#' Build Formula ???
-#' 
-#' This function builds a formula
-#' @param Xnames a character-vector
-#' @param sepp a seperator (???)
-#' @return a character-string
-#' @author ???
-toBuildFormula<-function(Xnames,sepp="+"){
-  lng<-length(Xnames)
-  rhs<-NULL
-    if (lng!=0){
-      if(lng==1){
-        rhs=Xnames
-      }else{
-        for (j in 1:(lng-1)){
-          rhs<-paste(rhs,as.name(Xnames[[j]]))
-          rhs<-paste(rhs,sepp)
-        }
-        rhs<-paste(rhs,Xnames[[lng]])
-      }
-    }
-    return (rhs)
-  }
-
-
-#' Multilevel
-#' 
-#' This function currently has no documentation, but is essential in Zelig 3.5's
-#' implementation of formulae.
-#' @param tt a terms object
-#' @param data a \code{data.frame}
-#' @param mode ???
-#' @param eqn an integer specifying the number of equations in a model
-#' @param ... ignored parameters
-#' @return a list with the "terms" attribute specified
-#' @author Kosuke Imai, Olivia Lau, Gary King and Ferdinand Alimadhi
-multilevel<-function(tt,data,mode,eqn,...){
-  if(!(mode %in% c(1,2)))
-    stop("Wrong mode argument")
-  if(is.null(eqn))
-    stop("Please provide an equations")
-res<-list()
-  eqns<-attr(tt,"systEqns")
-  subs<-attr(tt,"subs")
-depVars<-attr(tt,"depVars")
-
-  nrEquations<-length(eqns)
-  termlabels<-attr(tt,"term.labels")
-#for(i in 1:nrEquations){
-  rhs<-toBuildFormula(termlabels[[eqn]],"+")
-  if(!is.null(rhs))
-    rhs<-paste("~",rhs)
-  else
-    rhs<-"~1"
-  Ynamei<-depVars[[eqn]]
-  if(!(Ynamei %in% colnames(subs)))
-    lhs<-Ynamei
-  else
-    lhs<-NULL
-  f<-as.formula(paste(lhs,rhs))
-  if(mode==1)
-    res<-model.matrix.default(f,data)
-  #    res[[eqns[[i]]]]<-f
-  else
-    res<-model.frame.default(f,data)
- # res[[eqns[[i]]]]<-f
-#}
-attr(res,"terms")<-tt
-return(res)
-}
diff --git a/R/names.relogit.R b/R/names.relogit.R
deleted file mode 100644
index e69de29..0000000
diff --git a/R/negbinom.R b/R/negbinom.R
deleted file mode 100644
index 3edac11..0000000
--- a/R/negbinom.R
+++ /dev/null
@@ -1,119 +0,0 @@
-#' Interface between negbinom model and Zelig
-#' This function is exclusively for use by the `zelig' function
-#' @param formula a formula
-#' @param weights a numeric vector
-#' @param ... ignored parameters
-#' @param data a data.frame
-#' @return a list to be coerced into a zelig.call object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-zelig2negbinom <- function(formula, weights=NULL, ..., data)
-  z(
-    .function = "glm.nb",
-    .hook = robust.glm.hook,
-
-    weights = weights,
-    formula = formula,
-    data    = data
-    )
-#' Param Method for the 'negbinom' Zelig Model
-#' @note This method is used by the 'negbinom' Zelig model
-#' @usage \method{param}{negbinom}(obj, num=1000, ...)
-#' @S3method param negbinom
-#' @param obj a 'zelig' object
-#' @param num an integer specifying the number of simulations to sample
-#' @param ... ignored
-#' @return a list to be cast as a 'parameters' object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-param.negbinom <- function(obj, num=1000, ...) {
-  list(
-       simulations = mvrnorm(num, mu=coef(.fitted), Sigma=vcov(.fitted)),
-       alpha = .fitted$theta,
-       link = function (e) e,
-       linkinv = function (e) e
-       )
-}
-#' Compute quantities of interest for 'negbinom' Zelig models
-#' @usage \method{qi}{negbinom}(obj, x, x1=NULL, y=NULL, num=1000, param=NULL)
-#' @S3method qi negbinom
-#' @param obj a 'zelig' object
-#' @param x a 'setx' object or NULL
-#' @param x1 an optional 'setx' object
-#' @param y this parameter is reserved for simulating average treatment effects,
-#'   though this feature is currentlysupported by only a handful of models
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a parameters object
-#' @return a list of key-value pairs specifying pairing titles of quantities of
-#'   interest with their simulations
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-qi.negbinom <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-  #
-  coef <- coef(param)
-  alpha <- alpha(param)
-
-  # get inverse function
-  inverse <- obj[["family", "linkinv"]]
-
-  #
-  eta <- coef %*% t(x)
-  theta <- matrix(inverse(eta), nrow=nrow(coef))
-
-  # ...
-  ev <- theta
-  pr <- matrix(NA, nrow=nrow(theta), ncol=ncol(theta))
-
-  # default values
-  ev1 <- pr1 <- fd <- NA
-
-  #
-  for (i in 1:ncol(ev))
-    pr[,i] <- as.character(rnegbin(nrow(ev), mu = ev[i,], theta = alpha[i]))
-
-
-  if (!is.null(x1)) {
-
-    # quantities of interest
-    results <- qi(obj=obj, x=x1, num=num, param=param)
-
-    # pass values over
-    ev1 <- results[["Expected Values: E(Y|X)"]]
-    pr1 <- results[["Predicted Values: Y|X"]]
-
-    # compute first differences
-    fd <- ev1 - ev
-  }
-
-  # Return quantities of interest, paired off with their titles
-  list("Expected Values: E(Y|X)"  = ev,
-       "Expected Values: E(Y|X1)" = ev1,
-       "Predicted Values: Y|X"    = pr,
-       "Predicted Values: Y|X1"   = pr1,
-       "First Differences: E(Y|X1) - E(Y|X)" = fd
-       )
-}
-#' Describe the \code{negbinom} model to Zelig
-#' @note \code{negbinom} stands for "negative binomial"
-#' @usage \method{describe}{negbinom}(...)
-#' @S3method describe negbinom
-#' @param ... ignored parameters
-#' @return a list to be processed by \code{as.description}
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-describe.negbinom <- function(...) {
-  # parameters object
-  parameters <- list(pi = list(
-                       equations = c(1, 1),
-                       tags.allowed = FALSE,
-                       dep.var = TRUE,
-                       exp.var = TRUE
-                       )
-                     )
-
-  # return list
-  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
-       year     = 2008,
-       category = "count",
-       parameters = parameters,
-       text = "Negative Binomial Regression for Event Count Dependent Variables"
-       )
-}
diff --git a/R/normal.R b/R/normal.R
deleted file mode 100644
index 7a08a37..0000000
--- a/R/normal.R
+++ /dev/null
@@ -1,122 +0,0 @@
-#' Interface between normal model and Zelig
-#' This function is exclusively for use by the `zelig' function
-#' @param formula a formula
-#' @param weights a numeric vector
-#' @param ... ignored parameters
-#' @param data a data.frame
-#' @return a list to be coerced into a zelig.call object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-zelig2normal <- function(formula, weights=NULL, ..., data)
-  z(
-    glm,
-    # .hook = "robust.glm.hook",
-    formula = formula,
-    weights = weights,
-    family  = gaussian,
-    model   = F,
-    data    = data
-    )
-#' Param Method for the 'normal' Zelig Model
-#' @note This method is used by the 'normal' Zelig model
-#' @usage \method{param}{normal}(obj, num=1000, ...)
-#' @S3method param negbinom
-#' @param obj a 'zelig' object
-#' @param num an integer specifying the number of simulations to sample
-#' @param ... ignored
-#' @return a list to be cast as a 'parameters' object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-param.normal <- function(obj, num=1000, ...) {
-  degrees.freedom <- .fitted$df.residual
-  sig2 <- summary(.fitted)$dispersion
-
-  list(
-       simulations = mvrnorm(n=num, mu=coef(.fitted), Sigma=vcov(.fitted)),
-       alpha = sqrt(degrees.freedom * sig2 / rchisq(num, degrees.freedom)),
-       link = function (x) x,
-       linkinv = function (x) x
-       )
-}
-#' Compute quantities of interest for 'normal' Zelig models
-#' @usage \method{qi}{normal}(obj, x, x1=NULL, y=NULL, num=1000, param=NULL)
-#' @S3method qi normal
-#' @param obj a 'zelig' object
-#' @param x a 'setx' object or NULL
-#' @param x1 an optional 'setx' object
-#' @param y this parameter is reserved for simulating average treatment effects,
-#'   though this feature is currentlysupported by only a handful of models
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a parameters object
-#' @return a list of key-value pairs specifying pairing titles of quantities of
-#'   interest with their simulations
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-qi.normal <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
-  # get `num` samples from the underlying distribution
-  coef <- coef(param)
-  alpha <- alpha(param)
-
-  # theta = eta, because inverse of 
-  # normal models' link function is
-  # the identity
-  theta <- matrix(coef %*% t(x), nrow=nrow(coef))
-
-  #
-  pr <- matrix(NA, nrow=nrow(theta), ncol=ncol(theta))
-
-  #
-  ev <- theta
-  ev1 <- pr1 <- fd <- NA
-  
-  for (i in 1:nrow(ev))
-    pr[i,] <- rnorm(ncol(ev), mean = ev[i,], sd = alpha[i])
-
-
-  # if x1 is not NULL, run more simultations
-  # ...
-
-  if (!is.null(x1)) {
-
-    # quantities of interest
-    lis1 <- qi(obj, x1, num=num, param=param)
-
-    # pass values over
-    ev1 <- lis1[[1]]
-    pr1 <- lis1[[3]]
-
-    # compute first differences
-    fd <- ev1 - ev
-  }
-
-  # return
-  list("Expected Values: E(Y|X)"  = ev,
-       "Expected Values: E(Y|X1)" = ev1,
-       "Predicted Values: Y|X"    = pr,
-       "Predicted Values: Y|X1"   = pr1,
-       "First Differences: E(Y|X1) - E(Y|X)" = fd
-       )
-}
-#' Describe the \code{normal} model to Zelig
-#' @usage \method{describe}{normal}(...)
-#' @S3method describe normal
-#' @param ... ignored parameters
-#' @return a list to be processed by `as.description'
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-describe.normal <- function(...) {
-  # parameters object
-  parameters <- list(pi = list(
-                       equations = c(1, 1),
-                       tags.allowed = FALSE,
-                       dep.var = TRUE,
-                       exp.var = TRUE
-                       )
-                     )
-
-  # return list
-  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
-       year     = 2008,
-       category = "continuous",
-       parameters = parameters,
-       text = "Normal Regression for Continuous Dependent Variables"
-       )
-}
diff --git a/R/normal.bayes.R b/R/normal.bayes.R
deleted file mode 100644
index fde4514..0000000
--- a/R/normal.bayes.R
+++ /dev/null
@@ -1,90 +0,0 @@
-#' Interface between the Zelig Model normal.bayes and the Pre-existing Model-fitting Method
-#' @param formula a formula
-#' @param ... additonal parameters
-#' @param data a data.frame 
-#' @return a list specifying '.function'
-#' @export
-zelig2normal.bayes <- function (
-                               formula, 
-                               burnin = 1000, mcmc = 10000, 
-                               verbose = 0, 
-                               ..., 
-                               data
-                               ) {
-
-  loadDependencies("MCMCpack", "coda")
-
-  list(
-       .function = "MCMCregress",
-       .hook = "MCMChook",
-
-       formula = formula,
-       data   = data,
-       burnin = burnin,
-       mcmc   = mcmc,
-       verbose= verbose,
-
-       # Most parameters can be simply passed forward
-       ...
-       )
-}
-
-#' @S3method param normal.bayes
-param.normal.bayes <- function(obj, num=1000, ...) {
-  list(
-       coef = coef(obj),
-       linkinv = gaussian()
-       )
-}
-
-#' @S3method qi normal.bayes
-qi.normal.bayes <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-  normal.ev <- function (x, param) {
-    # If either of the parameters are invalid,
-    # Then return NA for both qi's
-    if (is.null(x) || is.na(x) || is.null(param))
-      return(list(ev=NA, pv=NA))
-
-    # Extract simulated parameters and get column names
-    coef <- coef(param)
-    cols <- colnames(coef)
-
-    # Place the simulated variances in their own vector
-    sigma2 <- coef[, ncol(coef)]
-
-    # Remove the "sigma2" (variance) parameter which should already be placed
-    # in the simulated parameters
-    cols <- cols[ ! "sigma2" == cols ]
-    
-    #
-    coef <- coef[, cols]
-
-    #
-    ev <- coef %*% t(x)
-    pv <- rnorm(nrow(ev), ev, sqrt(sigma2))
-
-    #
-    list(ev = ev, pv = pv)
-  }
-
-  res1 <- normal.ev(x, param)
-  res2 <- normal.ev(x1, param)
-
-  list(
-       "Expected Value: E(Y|X)" = res1$ev,
-       "Predicted Value: Y|X" = res1$pv,
-       "Expected Value (for X1): E(Y|X1)" = res2$ev,
-       "Predicted Value (for X1): Y|X1" = res2$pv,
-       "First Differences: E(Y|X1) - E(Y|X)" = res2$ev - res1$ev
-       )
-}
-
-
-#' @S3method describe normal.bayes
-describe.normal.bayes <- function(...) {
-  list(
-       authors = c("Ben Goodrich", "Ying Lu"),
-       text = "Bayesian Normal Linear Regression",
-       year = 2013
-       )
-}
diff --git a/R/normal.gee.R b/R/normal.gee.R
deleted file mode 100644
index 19fb0a4..0000000
--- a/R/normal.gee.R
+++ /dev/null
@@ -1,71 +0,0 @@
-#' Interface between the Zelig Model normal.gee and 
-#' the Pre-existing Model-fitting Method
-#' @param formula a formula
-#' @param id a character-string specifying the column of the data-set to use
-#'   for clustering
-#' @param robust a logical specifying whether to robustly or naively compute
-#'   the covariance matrix. This parameter is ignore in the \code{zelig2}
-#'   method, and instead used in the \code{robust.hook} function, which
-#'   executes after the call to the \code{gee} function
-#' @param ... ignored parameters
-#' @param R a square-matrix specifying the correlation
-#' @param corstr a character-string specifying the correlation structure
-#' @param data a data.frame 
-#' @return a list specifying the call to the external model
-#' @export
-zelig2normal.gee <- function (formula, id, robust, ..., R = NULL, corstr = "independence", data) {
-
-  loadDependencies("gee")
-
-  if (corstr == "fixed" && is.null(R))
-    stop("R must be defined")
-
-  # if id is a valid column-name in data, then we just need to extract the
-  # column and re-order the data.frame and cluster information
-  if (is.character(id) && length(id) == 1 && id %in% colnames(data)) {
-    id <- data[, id]
-    data <- data[order(id), ]
-    id <- sort(id)
-  }
-
-  z(
-    .function = gee,
-    .hook = robust.gee.hook,
-
-    formula = formula,
-    id = id,
-    corstr = corstr,
-    family  = gaussian(),
-    R = R,
-    data = data,
-    ...
-    )
-}
-
-#' @S3method param normal.gee
-param.normal.gee <- function(obj, num=1000, ...) {
-
-  # Extract means to compute maximum likelihood
-  mu <- coef(obj)
-
-  # Extract covariance matrix to compute maximum likelihood
-  Sigma <- vcov(obj)
-
-  #
-  list(
-       coef = mvrnorm(num, mu, Sigma),
-       linkinv = function (x) x
-       )
-}
-
-#' @S3method qi normal.gee
-qi.normal.gee <- qi.gamma.gee
-
-#' @S3method describe normal.gee
-describe.normal.gee <- function(...) {
-  list(
-       authors = "Patrick Lam",
-       text = "General Estimating Equation for Normal Regression",
-       year = 2011
-       )
-}
diff --git a/R/normal.survey.R b/R/normal.survey.R
deleted file mode 100644
index 5f37d5e..0000000
--- a/R/normal.survey.R
+++ /dev/null
@@ -1,161 +0,0 @@
-#' @export
-zelig2normal.survey <- function(
-                               formula,
-                               weights=NULL, 
-                               ids=NULL,
-                               probs=NULL,
-                               strata = NULL,  
-                               fpc=NULL,
-                               nest = FALSE,
-                               check.strata = !nest,
-                               repweights = NULL,
-                               type,
-                               combined.weights=FALSE,
-                               rho = NULL,
-                               bootstrap.average=NULL, 
-                               scale=NULL,
-                               rscales=NULL,
-                               fpctype="fraction",
-                               return.replicates=FALSE,
-                               na.action="na.omit",
-                               start=NULL,
-                               etastart=NULL, 
-                               mustart=NULL,
-                               offset=NULL, 	      		
-                               model1=TRUE,
-                               method="glm.fit",
-                               x=FALSE,
-                               y=TRUE,
-                               contrasts=NULL,
-                               design=NULL,
-                               data
-                               ) {
-
-  loadDependencies("survey")
-
-  if (is.null(ids))
-    ids <- ~1
-
-  # the following lines designate the design
-  # NOTE: nothing truly special goes on here;
-  #       the below just makes sure the design is created correctly
-  #       for whether or not the replication weights are set
-  design <- if (is.null(repweights))
-    svydesign(
-              data=data,
-              ids=ids,
-              probs=probs,
-              strata=strata,
-              fpc=fpc,
-              nest=nest,
-              check.strata=check.strata,
-              weights=weights
-              )
-
-  else {
-    .survey.prob.weights <- weights
-    
-    svrepdesign(
-                data=data,
-                repweights=repweights, 	
-                type=type,
-                weights=weights,
-                combined.weights=combined.weights, 
-                rho=rho,
-                bootstrap.average=bootstrap.average,
-                scale=scale,
-                rscales=rscales,
-                fpctype=fpctype,
-                fpc=fpc
-                )
-  }
-
-  
-  z(.function = svyglm,
-    formula = formula,
-    design  = design
-    )
-}
-
-  
-#' @S3method param normal.survey
-param.normal.survey <- function(obj, num=1000, ...) {
-  df <- obj$result$df.residual
-  sig2 <- summary(obj)$dispersion
-  
-  list(
-       simulations = mvrnorm(num, coef(obj), vcov(obj)),
-       alpha = sqrt(df*sig2/rchisq(num, df=df)),
-
-       # note: assignment of link and link-inverse are
-       #       implicit when the family is assigned
-       fam   = gaussian()
-       )
-}
-#' @S3method qi normal.survey
-qi.normal.survey <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
-  model <- GetObject(obj)
-
-  coef <- coef(param)
-  alpha <- alpha(param)
-
-  eta <- coef %*% t(x)
-
-  link.inverse <- linkinv(param)
-
-  theta <- matrix(link.inverse(eta), nrow=nrow(coef))
-
-  pr <- ev <- matrix(NA, nrow=nrow(theta), ncol(theta))
-
-  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
-
-
-  ev <- theta
-
-
-  for (k in 1:nrow(ev))
-    pr[k, ] <- rnorm(length(ev[k, ]), ev[k,], alpha[k])
-
-
-
-  ev1 <- pr1 <- fd <- NA
-
-  if (!is.null(x1)) {
-    ev1 <- theta1 <- matrix(link.inverse(coef %*% t(x1)),
-                            nrow = nrow(coef)
-                            )
-
-    fd <- ev1-ev
-  }
-
-  att.ev <- att.pr <- NA
-
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(coef)), nrow=nrow(coef), byrow=TRUE)
-
-    tmp.ev <- yvar - ev
-    tmp.pr <- yvar - pr
-
-    att.ev <- matrix(apply(tmp.ev, 1, mean), nrow=nrow(coef))
-    att.pr <- matrix(apply(tmp.pr, 1, mean), nrow=nrow(coef))
-  }
-
-
-  list(
-       "Expected Values: E(Y|X)" = ev,
-       "Expected Values for (X1): E(Y|X1)" = ev1,
-       "Predicted Values: Y|X" = pr,
-       "Predicted Values (for X1): Y|X1" = pr1,
-       "First Differences E(Y|X1)-E(Y|X)" = fd,
-       "Average Treatment Effect: Y-EV" = att.ev,
-       "Average Treatment Effect: Y-PR" = att.pr
-       )
-}
-#' @S3method describe normal.survey
-describe.normal.survey <- function(...) {
-  list(
-       authors = "Nicholas Carnes",
-       year = 2008,
-       description = "Survey-Weighted Normal Regression for Continuous, Positive Dependent Variables"
-       )
-}
diff --git a/R/oprobit.bayes.R b/R/oprobit.bayes.R
deleted file mode 100644
index 354b77f..0000000
--- a/R/oprobit.bayes.R
+++ /dev/null
@@ -1,140 +0,0 @@
-#' @export
-zelig2oprobit.bayes <- function (
-                               formula, 
-                               burnin = 1000, mcmc = 10000, 
-                               verbose=0, 
-                               ..., 
-                               data
-                               ) {
-
-  loadDependencies("MCMCpack", "coda")
-
-  if (missing(verbose))
-    verbose <- round((mcmc + burnin)/10)
-
-  list(
-       .function = "MCMCoprobit",
-       .hook = "MCMChook",
-
-       formula = formula,
-       data   = data,
-       burnin = burnin,
-       mcmc   = mcmc,
-       verbose= verbose,
-
-       # Most parameters can be simply passed forward
-       ...
-       )
-}
-#' @S3method param oprobit.bayes
-param.oprobit.bayes <- function(obj, num=1000, ...) {
-
-  # Produce the model matrix in order to get all terms (explicit and implicit)
-  # from the regression model.
-  mat <- model.matrix(obj$result, data=obj$data)
-
-  # Response Terms
-  p <- ncol(mat)
-
-  # All coefficients
-  coefficients <- coef(obj)
-
-  # Coefficients for predictor variables
-  beta <- coefficients[, 1:p]
-
-  # Middle values of "gamma" matrix
-  mid.gamma <- coefficients[, -(1:p)]
-
-  # ...
-  level <- ncol(coefficients) - p + 2
-
-
-  # Initialize the "gamma" parameters
-  gamma <- matrix(NA, nrow(coefficients), level + 1)
-
-  # The first, second and last values are fixed
-  gamma[, 1] <- -Inf
-  gamma[, 2] <- 0
-  gamma[, ncol(gamma)] <- Inf
-
-  # All others are determined by the coef-matrix (now stored in mid.gamma)
-  if (ncol(gamma) > 3)
-    gamma[, 3:(ncol(gamma)-1)] <- mid.gamma
-
-  # return
-  list(
-       simulations = beta,
-       alpha   = gamma,
-       linkinv = NULL
-       )
-}
-#' @S3method qi oprobit.bayes
-qi.oprobit.bayes <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
-{
-  labels <- levels(model.response(model.frame(obj$result)))
-
-  res1 <- compute.oprobit.bayes(x, param, labels)
-  res2 <- compute.oprobit.bayes(x1, param, labels)
-
-  # 
-  list(
-       "Expected Value: E(Y|X)" = res1$ev,
-       "Predicted Value: Y|X"   = res1$pv,
-       "Expected Value (for X1): E(Y|X1)" = res2$ev,
-       "Predicted Value (for X1): Y|X1"   = res2$pv,
-       "First Differences: E(Y|X1) - E(Y|X)" = res2$ev - res1$ev
-       )
-}
-# Helper function used to generate expected values
-compute.oprobit.bayes <- function (x, param, labels) {
-  # If either of the parameters are invalid,
-  # Then return NA for both qi's
-  if (is.null(x) || is.na(x) || is.null(param))
-    return(list(ev=NA, pv=NA))
-
-
-  # Extract simulated parameters
-  beta <- coef(param)
-  gamma <- alpha(param)
-
-  # x is implicitly cast into a matrix
-  eta <- beta %*% t(x)
-
-  # **TODO: Sort out sizes of matrices for these things.
-  ev <- array(NA, c(nrow(eta), ncol(gamma) - 1, ncol(eta)))
-  pv <- matrix(NA, nrow(eta), ncol(eta))
-
-  # Compute Expected Values
-  # ***********************
-  # Note that the inverse link function is:
-  #   pnorm(gamma[, j+1]-eta) - pnorm(gamma[, j]-eta)
-  for (j in 1:(ncol(gamma)-1)) {
-    ev[, j, ] <- pnorm(gamma[, j+1]-eta) - pnorm(gamma[, j]-eta)
-  }
-
-  colnames(ev) <- labels
-
-
-  # Compute Predicted Values
-  # ************************
-  for (j in 1:nrow(pv)) {
-    mu <- eta[j, ]
-    pv[j, ] <- as.character(cut(mu, gamma[j, ], labels=labels))
-  }
-
-
-  # **TODO: Update summarize to work with at most 3-dimensional arrays
-  ev <- ev[, , 1]
-
-
-  # Return
-  list(ev = ev, pv = pv)
-}
-#' @S3method describe oprobit.bayes
-describe.oprobit.bayes <- function(...) {
-  list(
-       text = "Bayesian Probit Regression for Dichotomous Dependent Variables",
-       authors = c("Ben Goodrich", "Ying Lu"),
-       year = 2013
-       )
-}
diff --git a/R/param.R b/R/param.R
deleted file mode 100644
index 942a621..0000000
--- a/R/param.R
+++ /dev/null
@@ -1,60 +0,0 @@
-#' The \code{param} method is used by developers to specify simulated and fixed
-#' ancillary parameters of the Zelig statistical model. That is, this method
-#' is used between the \link{zelig2} function and the \link{qi}
-#' as a helper function that specifies all the necessary details needed to 
-#' simulate quantities of interest, given the fitted statistical model produced
-#' by the \code{zelig2} function.
-#'
-#' @title Generic Method for Simulating Ancillary/Auxillary Parameters of Zelig
-#'   Models
-#' @note The 'param' function is a method meant to be overloaded by Zelig
-#'   Developers
-#' @param obj a \code{zelig} object
-#' @param num an integer specifying the number of simulations to sample
-#' @param ... optional parameters which will likely be ignored
-#' @return
-#'   The main purpose of the \code{param} function is to return a list of 
-#'   key-value pairs, specifuing information that should be shared between
-#'   the \code{qi} function and the fitted statistical model (produced by the
-#'   \code{zelig2} function. This list can contain the following entries:
-#'
-#'   \item{\code{simulations}}{specifies a set of simulated parameters used to
-#'     describe the statistical model's underlying distribution}
-#'   \item{\code{alpha}}{specifies the fixed (non-simulated) ancillary
-#'     parameters used by the statistical model's underlying distribution}
-#'   \item{\code{family}}{specifies a family object used to implicitly define
-#'     the \code{link} and \code{linkinv} functions. That is, this specifies
-#'     the "link" and "inverse link" functions of generalized linear models}
-#'   \item{\code{link}}{specifies the \code{link} function to be used. This 
-#'     parameter is largely unimportant compared to the "inverse link"
-#'     function}
-#'   \item{\code{linkinv}}{specifies the \code{linkinv} function to be used.}
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @examples
-#' param.some.model <- function (obj, num, ...) {
-#'   list(
-#'        simulations = NULL,
-#'        alpha = NULL,
-#'        link = NULL,
-#'        linkinv = NULL,
-#'        fam = NULL
-#'        )
-#' }
-param <- function (obj, num, ...)
-  UseMethod("param")
-
-
-#' Default Method for ``param''
-#'
-#' If no \code{param} function is set for a Zelig model, then this function will
-#' return NULL.
-#' @usage \method{param}{default}(obj, num, ...)
-#' @S3method param default
-#' @param obj ignored parameter
-#' @param num ignored parameter
-#' @param ... ignored parameters
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-param.default <- function (obj, num, ...)
-  list()
diff --git a/R/parameters.R b/R/parameters.R
deleted file mode 100644
index c5f9807..0000000
--- a/R/parameters.R
+++ /dev/null
@@ -1,132 +0,0 @@
-#' Constructor for `parameters' class
-#'
-#'
-#' @param simulations a vector or matrix containing simulated values
-#' @param alpha ancillary parameters for the Zelig statistical model
-#' @param fam a family object which implicitly specifies the link
-#'            and link-inverse functions for the 
-#' @param link the link function of the specified statistical model.
-#'             The `linkinv' parameter is implicitly defined by
-#'             by the `link' parameter, when `linkinv' is omitted
-#' @param linkinv the inverse link function
-#' @return a `parameters' object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-parameters <- function(simulations,
-                       alpha,
-                       fam=NULL,
-                       link=NULL,
-                       linkinv=NULL
-                       )
-{
-  if (is.function(fam))
-    fam <- fam()
-
-  #
-  if (!missing(fam) && isS4(fam)) {
-    link <- fam at link
-    linkinv <- fam at inverse
-  }
-  else if (!missing(fam) && inherits(fam, "family")) {
-    link <- fam$linkfun
-    linkinv <- fam$linkinv
-  }
-  else if (missing(link)) {
-    #warning("no link function")
-  }
-
-  else if (missing(linkinv)) {
-    #warning("no inverse link function")
-    linkinv <- .NumInverse(link)
-  }
-
-  # Construct object
-  p <- list(coefficients = simulations,
-            alpha = alpha,
-            link = link,
-            linkinv = linkinv
-            )
-
-  # cast, and return
-  class(p) <- "parameters"
-  p  
-}
-
-
-#' Extract ancillary parameters from
-#' `parameters' objects
-#'
-#' @param param a `parameters' object
-#' @return the ancillary parameters \emph{specified} for
-#'         the statistical model
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-alpha <- function(param)
-  param$alpha
-
-
-#' Return Simulations of Parameter Coefficients
-#'
-#' Returns simulated parameters of coefficients for use in statistical 
-#' simulation. The values are set by the model-fitting function and the 
-#' developer of the qi.<model name> method.
-#'
-#' @note This function may not differ at all from coef.default
-#' @usage \method{coef}{parameters}(object, ...)
-#' @S3method coef parameters
-#' @param object a 'parameters' object
-#' @param \dots ignored
-#' @return simulations, specified by the Zelig model, of
-#'         the ancillary parameters
-#' @export 
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-coef.parameters <- function(object, ...) {
-  object$coefficients
-}
-  
-#' Return Simulations of Parameter Coefficients
-#'
-#' Returns simulated parameters of coefficients for use in statistical 
-#' simulation. The values are set by the model-fitting function and the 
-#' developer of the qi.<model name> method.
-#'
-#' @note This function does not differ at all from coef.default
-#' @usage \method{simulations}{parameters}(object, ...)
-#' @S3method coef parameters
-#' @param object a 'parameters' object
-#' @param \dots ignored
-#' @return simulations, specified by the Zelig model, of
-#'         the ancillary parameters
-#' @export 
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-simulations.parameters <- function(object, ...)
-  object$coefficients
-
-
-#' Method for extracting the link function from 'parameters' objects
-#' @param param a 'parameters' object
-#' @return the link function specified by the `param' function for the given 
-#' Zelig model
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-link <- function(param)
-  param$link
-
-
-#' Method for extracting the inverse link function from 'parameters' objects
-#'
-#' Returns the inverse link function of a ``parameters'' object. If the
-#' model's developer did not specify one (but did specify a link function) this
-#' function returns a numerical approximation of the link function.
-#' @param param a 'parameters' object
-#' @return the inverse link function specified by the 'param' function for the
-#' given Zelig model
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-linkinv <- function(param) {
-  if (is.null(param$linkinv))
-    .NumInverse(param$link)
-
-  else
-    param$linkinv
-}
diff --git a/R/parse.formula.R b/R/parse.formula.R
deleted file mode 100644
index f3c7fe7..0000000
--- a/R/parse.formula.R
+++ /dev/null
@@ -1,319 +0,0 @@
-#' Parse Formulas for Zelig Models
-#' @note This is used typically in multinomial and multivariate Zelig models
-#' @param formula a formula
-#' @param model a Zelid model
-#' @param data a data-frame
-#' @export
-#' @author Kosuke Imai and Olivia Lau
-parse.formula<-function( formula, model,data=NULL){
-        if(class(formula)[[1]]=="multiple")
-          return(formula)
-        nrUserOpt<-nrUserReq<-nrUserFixed<-nrUserSubreq<-0
-        userOpt<-userReq<-userFixed<-userSubreq<-list()
-        
-        fc <- paste("describe.", model, sep = "")
-        if (!exists(fc))
-          stop("describe.",model," does not exsist")
-        modelReq<-do.call(fc,list())
-        modelReq <-modelReq$parameters
-        
-        checkNrReq<-function(modelNumEqn,nrUserReq,modelParsReq){
-                if(length(modelNumEqn)==1){
-                        if(nrUserReq != modelNumEqn)
-                          stop("The parameter \"",modelParsReq,"\" requires ",modelNumEqn, " equation(s).
-            You have provided ", nrUserReq, " See model doc. for more details")
-                }else{
-                        if(!(betweenf(nrUserReq,modelNumEqn)))
-                          stop("The parameter \"",modelParsReq,"\" requires between ",modelNumEqn[[1]],"
-            and ",modelNumEqn[[2]], " equation(s). You have provided ", nrUserReq, " See model doc. for more details")    
-                }
-        }
-        
-        
-        checkNrOpt<-function(modelNumEqn,nrUserOpt,modelParsOpt,userOpt){
-                if(!(betweenf(nrUserOpt,modelNumEqn)))
-                  if(nrUserOpt < modelNumEqn[[1]]){
-                          if(modelNumEqn[[1]]==1)
-                            userOpt[[modelParsOpt]]<- as.formula("~1")
-                          else
-                            for(i in (nrUserOpt+1):modelNumEqn[[1]]){
-                                    userOpt[[i]]<-as.formula("~1")
-                                    names(userOpt)[[i]]<-paste(modelParsOpt,i,sep="")
-                            }
-                  }else
-                stop("The parameter \"",modelParsOpt,"\" requires between ",modelNumEqn[[1]]," and ",modelNumEqn[[2]], " equation(s). You have provided ", nrUserOpt, " See model doc. for more details")    
-                
-                return(userOpt)
-        }
-        
-        betweenf<-function(a,range){
-                if (is.finite(range[[2]]))
-                  return(a >= range[[1]] && a<=range[[2]])
-                else
-                  return(a>=range[[1]])
-        }
-        
-        "%w/o%" <- function(x,y) x[!x %in% y]
-        
-        matchPars<-function(parName,userNames){
-                res<-c()
-                for(i in 1:length(userNames)){
-                        a<-substr(userNames[[i]],nchar(parName)+1,nchar(userNames[[i]]))
-                        b<-substr(userNames[[i]],1,nchar(parName))
-                        if(b==parName && (!is.na(suppressWarnings(as.numeric(a))) || userNames[[i]]==parName))
-                          res<-c(res,userNames[[i]])
-                }
-                return (res)
-        }
-        
-        fMode<-function(b){
-                if(b$depVar == TRUE && b$expVar == TRUE) return (1)
-                if(b$depVar == FALSE && b$expVar == TRUE) return (2)
-                if(b$depVar == FALSE && b$expVar == FALSE) return (3)
-                if(b$depVar == TRUE && b$expVar == FALSE) return (4)
-                stop("some error occurred ... please contact the Zelig team")
-  }
-        
-        parsType<-lapply(modelReq,fMode)
-        modelParsReq<-names(parsType[parsType==1])
-        modelParsOpt<-names(parsType[parsType==2])
-        modelParsFixed<-names(parsType[parsType==3])
-        modelParsSubreq<-names(parsType[parsType==4])
-        
-        modelNrParsReq<-length(modelParsReq)
-        modelNrParsOpt<-length(modelParsOpt)
-        modelNrParsFixed<-length(modelParsFixed)
-        modelNrParsSubreq<-length(modelParsSubreq)
-        
-        userNrLevels<-0
-        dataNrLevels<-0
-        userLevels<-c()
-        
-        if(class(formula)[[1]]=="formula")
-          formula<-list(formula)
-        
-        nreqns <-length(formula)                      
-        
-        if(is.null(names(formula))){
-                if(modelNrParsReq >1)         
-                  stop("You should name the equations. The model requires more than 1 systematic component. Please see model documentation for more details")
-                for (i in 1:nreqns){
-                        eqni<-formula[[i]]
-                        if (length(eqni)==3){                            
-                                rootNames<-modelParsReq                    
-                                lhs<-eqni[[2]]
-                                rhs<-deparse(eqni[[3]],width.cutoff=500)
-                                if(length(lhs)>1 && (lhs[[1]]=="cbind" || lhs[[1]]=="as.factor" || lhs[[1]]=="id")){
-                                        if( lhs[[1]]=="cbind"){
-                                        #rhs=deparse(rhs)
-                                                g<- as.list(lhs)[-1]
-                                                for (j in 1:length(g)){
-                                                        e<-paste(g[[j]],"~",sep="")
-                                                        if(rhs!="1"){
-                                                                nrUserReq=nrUserReq+1
-                                                                userReq[[nrUserReq]]<-as.formula(paste(e,rhs,sep=""))
-                                                        }else{
-                                                                nrUserSubreq=nrUserSubreq+1
-                                                                userSubreq[[nrUserSubreq]]<-as.formula(paste(e,rhs,sep=""))
-                                                        }
-                                                }    
-                                        }else{
-                                                if(is.null(data))
-                                                  stop("Data argument is required when you use as.factor() or id() as a dependent variable\n")
-                                                if(lhs[[1]]=="as.factor"){
-                                                        varname<-as.character(lhs[[2]])
-                                                        userLevels<-levels(as.factor(data[[varname]]))[-1]
-                                                        userNrLevels<-length(userLevels)
-                                                        for (j in 1:userNrLevels){
-                                                                e<-paste("id(",lhs[[2]],",\"",userLevels[[j]],"\")","~",sep="")
-                                                                if(rhs!="1"){
-                                                                        nrUserReq=nrUserReq+1
-                                                                        userReq[[nrUserReq]]<-as.formula(paste(e,rhs,sep=""))
-                                                                }else{
-                                                                        nrUserSubreq=nrUserSubreq+1
-                                                                        userSubreq[[nrUserSubreq]]<-as.formula(paste(e,rhs,sep=""))
-                                                                }
-                                                        }     
-                                                }else{  
-                                                        varname<-as.character(lhs[[2]])
-                                                        userLevels<-c(userLevels,lhs[[3]])
-                                                        userNrLevels<-length(userLevels)
-                                                        levels<-levels(data[[varname]])
-                                                        lhs<-deparse(lhs)
-                                        #  rhs<-deparse(rhs)
-                                                        e<-paste(lhs,"~",sep="")
-                                                        if(rhs !="1"){
-                                                                nrUserReq=nrUserReq+1
-                                                                userReq[[nrUserReq]]<-as.formula(paste(e,rhs,sep=""))
-                                                        }else{
-                                                                nrUserSubreq<-nrUserSubreq+1
-                                                                userSubreq[[nrUserSubreq]]<-as.formula(paste(e,rhs,sep=""))
-                                                        }
-                                                }
-                                        }
-                                }else{ 
-                                        lhs<-deparse(lhs)
-                                        #  rhs<-deparse(rhs)
-                                        e<-paste(lhs,"~",sep="")
-                                        if(rhs !="1"){
-                                                nrUserReq=nrUserReq+1
-                                                userReq[[nrUserReq]]<-as.formula(paste(e,rhs,sep=""))
-                                        }else{
-                                                nrUserSubreq<-nrUserSubreq+1
-                                                userSubreq[[nrUserSubreq]]<-as.formula(paste(e,rhs,sep=""))
-                                        }
-                                }
-                        }else{                            
-                                rhs<-deparse(eqni[[2]])
-                                if(rhs !="1"){
-                                        nrUserOpt=nrUserOpt+1
-                                        userOpt[[nrUserOpt]]<-as.formula(paste("~",rhs,sep=""))
-                                }else{
-                                        nrUserFixed=nrUserFixed+1
-                                        userFixed[[nrUserFixed]]<-as.formula(paste("~",rhs,sep=""))
-                                }
-                        }
-                }
-                if (modelNrParsOpt==0){         
-                        if (nrUserOpt !=0){
-                                stop("the equation(s) ",userOpt," does not match model requirements!")}
-                }else{                                
-                        modelNumEqn<-modelReq[[modelParsOpt]]$equations
-                        userOpt<-checkNrOpt(modelNumEqn,nrUserOpt,modelParsOpt,userOpt)
-                        if(length(userOpt)==1)
-                          names(userOpt)<-modelParsOpt
-                        else
-                          names(userOpt)<-paste(modelParsOpt,1:length(userOpt),sep="")
-                }
-                
-                if(length(modelParsFixed)>0){                   
-                        modelNumFixedEqns<-modelReq[[modelParsFixed]]$equations
-                        for(i in 1:modelNumFixedEqns)
-                          userFixed[[i]]<-as.formula("~1")
-                        if(modelNumFixedEqns==1)
-                          names(userFixed)<-modelParsFixed
-                        else
-                          names(userFixed)<-paste(modelParsFixed,1:modelNumFixedEqns,sep="")
-                }
-                if (modelNrParsReq==0){             
-                        if (nrUserReq !=0){
-                                stop("the equation(s) ",userReq," does not match model requirements!")}
-                }else{
-                        modelNumEqn<-modelReq[[modelParsReq]]$equations 
-                        checkNrReq(modelNumEqn,nrUserReq,modelParsReq)
-                        if(userNrLevels>0){
-                                if(userNrLevels !=nrUserReq)
-                                  stop("The number of equation for the systematic component should be equal to the number of levels -1\n")
-                                names(userReq)<-userLevels
-                        }else{
-                                if(nrUserReq==1)
-                                  names(userReq)<-modelParsReq
-                                else
-                                  names(userReq)<-paste(modelParsReq,1:length(userReq),sep="")
-                        }
-                }
-                
-                if (modelNrParsSubreq==0){              
-                        if (nrUserSubreq !=0){
-                                stop("the equation(s) ",userSubreq," does not match model requirements!")}
-    }else{                                
-            modelNumEqn<-modelReq[[modelParsSubreq]]$equations
-            checkNrReq(modelNumEqn,nrUserSubreq,modelParsSubreq)
-            if(nrUserSubreq==1)
-              names(userSubreq)<-modelParsSubreq
-            else
-              names(userSubreq)<-paste(modelParsSubreq,1:length(userSubreq),sep="")
-    }
-                result<-c(userReq,userOpt,userFixed,userSubreq)
-        }else{    ##user provides names for formulas
-                modelPars<-names(modelReq)
-                parsS<-names(sort(sapply(modelPars,nchar),decreasing=TRUE))    
-                userNames<-names(formula)
-                userEqnNamesByPars<-list()
-                tmpUserNames<-userNames
-                for (i in 1:length(parsS)){
-                        userEqnNamesByPars[[parsS[[i]]]]<-matchPars(parsS[[i]],tmpUserNames)
-                        tmpUserNames<-"%w/o%"(tmpUserNames,userEqnNamesByPars[[parsS[[i]]]])
-                }
-                tmp<-"%w/o%"(userNames,unlist(userEqnNamesByPars))
-                if (length(tmp)>0)
-                  stop("Ambigous equation name ","\"",tmp,"\"")
-                res<-list()
-                userPars<-names(userEqnNamesByPars)
-                for (i in 1:length(modelPars)){ 
-                        modelPar<-modelPars[[i]]                  
-                        userNumEqn<-length(userEqnNamesByPars[[modelPar]])
-                        modelNumEqn<-modelReq[[modelPar]]$equations
-                        mode<-fMode(modelReq[[modelPar]])
-                        tmplst<-formula[userEqnNamesByPars[[modelPar]]]                
-                        if(modelNumEqn[[1]]==1 && modelNumEqn[[2]]==1 )
-                          tmpNames<-modelPar
-                        else
-                          tmpNames<-paste(modelPar,1:userNumEqn,sep="")                   
-                        if(mode==1){         
-                                whiche<-which(lapply(formula[(userEqnNamesByPars[[modelPar]])],length)!=3)
-                                if(length(whiche)!=0)
-                                  stop("The equation ",formula[[names(whiche)]]," is not conform model requirements or its name is ambigous . DepVar/ExpVar is missing.\n")
-                                checkNrReq(modelNumEqn,userNumEqn,modelPar)
-                                whiche<-which((names(tmplst) %in% tmpNames)==FALSE)
-                                if(length(whiche)!=0){
-                                        warning("The name \"",names(tmplst)[whiche],"\" is ambigous. The equations of the paramter \"",modelPar,"\" are renamed\n")
-                                        names(tmplst)<-tmpNames
-                                }
-                        }else{
-                                if(mode==2){
-                                        whiche<-which(lapply(formula[(userEqnNamesByPars[[modelPar]])],length)!=2)
-                                        if(length(whiche)!=0)
-                                          stop("The equation ",formula[names(whiche)]," is not conform model requirements or its name is ambigous A .\n")
-                                        whiche<-which((names(tmplst) %in% tmpNames)==FALSE)
-                                        if(length(whiche)!=0){
-                                                warning("The name \"",names(tmplst)[whiche],"\" is ambigous. The equations of the paramter \"",modelPar,"\" are renamed\n")
-                                                names(tmplst)<-tmpNames
-                                        }       
-                                        tmplst<- checkNrOpt(modelNumEqn,userNumEqn,modelPar,tmplst)
-                                }else{
-                                        if (mode==3){
-                                                whiche<-which(tmplst !="~1")
-                                                if(length(whiche)>0)
-                                                  warning("You cannot specify a formula for the parameter \"",modelPar,"\" . All your equation for this parameter are set to their default value.For example your equation:\n",deparse(formula[names(whiche)]),"\n")
-                                                if(userNumEqn !=modelNumEqn)
-                                                  warning("The parameter \"",modelPar,"\" requires ",modelNumEqn, "equation(s). You are providing ",userNumEqn, " equation(s) for this parameter. This problem is fixed. All the equations for this parameter are set to the default value \n")
-                                                
-                                                tmplst<-list()
-                                                if(modelNumEqn==1)
-                                                  tmpname<-modelPar
-                                                else
-                                                  tmpname<-paste(modelPar,1:modelNumEqn,sep="")
-                                                for(i in 1:modelNumEqn)
-                                                  tmplst[[tmpname[[i]]]]<-as.formula("~1")
-                                        }else{
-                                                if(mode==4)
-                                                  {
-                                                          whiche<-which(lapply(formula[(userEqnNamesByPars[[modelPar]])],length)!=3)
-                                                          whicha<-which(lapply(formula[(userEqnNamesByPars[[modelPar]])],FUN=function(a){if (a[[3]]=="1") return (TRUE) else return(FALSE)})==FALSE)
-                                                          if(length(whiche)!=0 )
-                                                            stop("The equation ",formula[names(whiche)]," is not conform model requirements or its name is ambigous . DepVar/ExpVar is missing.\n")
-                                                          else{
-                                                                  if (length(whicha)!=0)
-                                                                    stop("The equation ",formula[names(whicha)]," is not conform model requirements or its name is ambigous . Its right hand side shoule be \"1\".\n")
-                                                          }
-                                                          checkNrReq(modelNumEqn, userNumEqn, modelPar)
-                                                          whiche<-which((names(tmplst) %in% tmpNames)==FALSE)
-                                                          if(length(whiche)!=0){
-                                                                  warning("The name \"",names(tmplst)[whiche],"\" is ambigous. The equations of the paramter \"",modelPar,"\" are renamed\n")
-                                                                  names(tmplst)<-tmpNames
-                                                          }
-                                                  }
-                                        }
-                                }
-                        }
-                        res[[modelPar]]<-tmplst
-                }
-                result<-c()
-                for(i in 1:length(res))
-                  result<-c(result,res[[i]])
-                
-        }
-        class(result)<-c("multiple","list")
-        return(result)
-}
diff --git a/R/parseFormula.R b/R/parseFormula.R
deleted file mode 100644
index a3bbf5e..0000000
--- a/R/parseFormula.R
+++ /dev/null
@@ -1,120 +0,0 @@
-#' Parse Zelig-style Formulae
-#'
-#' Zelig uses three distinct types of formulae. This method is a re-design
-#' of the Zelig function \code{parse.formula}.
-#' @param obj a list or formula
-#' @param data the data set associated with the formula object
-#' @return an object of type "parseFormula". This object has slots specifying:
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-parseFormula <- function (obj, data=NULL) {
-  UseMethod("parseFormula")
-}
-
-
-
-#' Parse Standard Formulae
-#'
-#' This method parses a formula-style Zelig formula
-#' @usage \method{parseFormula}{formula}(obj, data=NULL)
-#' @param obj a formula
-#' @param data a data frame
-#' @return an object of type "parseFormula"
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @S3method parseFormula formula
-parseFormula.formula <- function (obj, data=NULL) {
-
-  # Extract terms
-  TERMS <- terms(obj)
-
-  #
-  MODEL.MATRIX <- tryCatch(model.matrix(obj, data), error = function (e) NULL)
-
-  # Build the object
-  res <- list(
-              formula = obj,
-              terms = TERMS,
-              response = getResponseTerms(obj),
-              predictor = getPredictorTerms(obj),
-              model.matrix = MODEL.MATRIX
-              )
-
-  # Return
-  class(res) <- "parseFormula"
-  res
-}
-
-
-
-#' Parse List-Style Zelig Formulae
-#'
-#' This method parses a list-style Zelig formula.
-#' @usage \method{parseFormula}{list}(obj, data=NULL)
-#' @param obj a list of formulae
-#' @param data a data frame
-#' @return an object of type "parseFormula"
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @S3method parseFormula list
-parseFormula.list <- function (obj, data=NULL) {
-
-  # Extract terms (and place in a list)
-  TERMS <- Map(terms, obj)
-
-  # 
-  MODEL.MATRIX <- makeModelMatrix(obj, data)
-
-  # Build the object
-  res <- list(
-              formula = obj,
-              terms = TERMS,
-              response = getResponseTerms(obj),
-              predictor = getPredictorTerms(obj),
-              model.matrix = MODEL.MATRIX
-              )
-
-  # Return
-  class(res) <- "parseFormula"
-  res
-}
-
-
-
-#' Parse ``Formula''-style Zelig Formulae
-#'
-#' This method parses a ``Formula''-style Zelig formula. This is to support the
-#' ``Formula'' object. It seems like it has the right idea when it comes to 
-#' expressing multiple responses.
-#' @usage \method{parseFormula}{Formula}(obj, data=NULL)
-#' @param obj a list of formulae
-#' @param data a data frame
-#' @return an object of type ``parseFormula''
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @S3method parseFormula Formula
-parseFormula.Formula <- function (obj, data=NULL) {
-
-
-  message("parseFormula.Formula")
-  # Create the actual object
-  res <- list(
-              # Remember the source class
-              source.class = class(obj),
-
-              # Store the original copy of the formula
-              formula = obj,
-
-              # Store the terms
-              terms   = terms(obj),
-
-              # Use Zelig-style methods to get the responseTerms
-              response  = getResponseTerms(obj),
-              predictor = getPredictorTerms(obj),
-
-              # Create the design matrix from the ``Formula'' package
-              model.matrix = NULL
-              )
-
-
-  # Return
-  class(res) <- "parseFormula"
-  res
-}
diff --git a/R/plots.R b/R/plots.R
old mode 100644
new mode 100755
index 34446d3..efa27c9
--- a/R/plots.R
+++ b/R/plots.R
@@ -1,169 +1,474 @@
-#' @S3method plot sim.gamma.gee
-plot.sim.gamma.gee <- function (x, ...) {
-
-  # store device settings
-  original.par <- par(no.readonly=TRUE)
-
-  if (is.null(x$x))
-    return()
-
-  panels <- if (is.null(x$x1)) {
-    palette <- rep("black", 3)
-    matrix(1, nrow=1, ncol=1)
-    # How the layout window will look:
-    # +---+
-    # | 1 |
-    # +---+
-  }
-
-  else {
-    palette <- c('red', 'navy', 'black')
-    matrix(c(1, 2, 3, 3), nrow=2, ncol=2, byrow=TRUE)
-    # How the layout window will look:
-    # +-------+
-    # | 1 | 2 |
-    # +-------+
-    # |   3   |
-    # +-------+
-  }
-
-  layout(panels)
+#' Plot Quantities of Interest in a Zelig-fashion
+#'
+#' Various graph generation for different common types of simulated results from
+#' Zelig
+#' @usage simulations.plot(y, y1=NULL, xlab="", ylab="", main="", col=NULL, line.col=NULL,
+#' axisnames=TRUE)
+#' @param y A matrix or vector of simulated results generated by Zelig, to be
+#' graphed.
+#' @param y1 For comparison of two sets of simulated results at different
+#' choices of covariates, this should be an object of the same type and
+#' dimension as y.  If no comparison is to be made, this should be NULL.
+#' @param xlab Label for the x-axis.
+#' @param ylab Label for the y-axis.
+#' @param main Main plot title.
+#' @param col A vector of colors.  Colors will be used in turn as the graph is
+#' built for main plot objects. For nominal/categorical data, this colors
+#' renders as the bar color, while for numeric data it renders as the background
+#' color.
+#' @param line.col  A vector of colors.  Colors will be used in turn as the graph is
+#' built for line color shading of plot objects.
+#' @param axisnames a character-vector, specifying the names of the axes
+#' @return nothing
+#' @author James Honaker
+simulations.plot <-function(y, y1=NULL, xlab="", ylab="", main="", col=NULL, line.col=NULL, axisnames=TRUE) {
+    
+    binarytest <- function(j){
+      if(!is.null(attr(j,"levels"))) return(identical( sort(levels(j)),c(0,1)))
+      return(FALSE)
+    }
 
-  # extract quantities of interest
-  ev1 <- x$qi$ev1
-  ev2 <- x$qi$ev2
-  fd <- x$qi$fd
 
-  # Plot ev1
-  .plot.density(ev1, "Expected Values (for X): E(Y|X)", palette[1])
 
-  if (!is.null(x$x1)) {
-    .plot.density(ev2, "Expected Values (for X1): E(Y|X1)", palette[2])
-    .plot.density(fd, "First Differences: E(Y|X1) - E(Y|X)", palette[3])
-  }
-    
-  # return plotting device
-  par(original.par)
+    ## Univariate Plots ##
+    if(is.null(y1)){
+        
+        if (is.null(col))
+        col <- rgb(100,149,237,maxColorValue=255)
+        
+        if (is.null(line.col))
+        line.col <- "black"
+        
+        # Integer Values
+        if ((length(unique(y))<11 & all(as.integer(y) == y)) | is.factor(y) | is.character(y)) {
+            
+                if(is.factor(y) | is.character(y)){
+                    y <- as.numeric(y)
+                }
+
+                # Create a sequence of names
+                nameseq <- paste("Y=", min(y):max(y), sep="")
+                
+                # Set the heights of the barplots.
+                # Note that tablar requires that all out values are greater than zero.
+                # So, we subtract the min value (ensuring everything is at least zero)
+                # then add 1
+                bar.heights <- tabulate(y - min(y) + 1) / length(y)
+                
+                # Barplot with (potentially) some zero columns
+                output <- barplot(bar.heights, xlab=xlab, ylab=ylab, main=main, col=col[1],
+                    axisnames=axisnames, names.arg=nameseq)
+
+        # Vector of 1's and 0's
+        } else if(ncol(as.matrix(y))>1 & binarytest(y) ){
+
+            n.y <- nrow(y)
+            # Precedence is names > colnames > 1:n
+            if(is.null(names(y))){
+                if(is.null(colnames(y) )){
+                    all.names <- 1:n.y
+                }else{
+                    all.names <- colnames(y)
+                }
+            }else{
+                all.names <- names(y)
+            }
+            
+            # Barplot with (potentially) some zero columns
+            output <- barplot( apply(y,2,sum)/n.y, xlab=xlab, ylab=ylab, main=main, col=col[1],
+                axisnames=axisnames, names.arg=all.names)
+
+        # Continuous Values
+        } else if(is.numeric(y)){
+            if(ncol(as.matrix(y))>1){
+                ncoly <- ncol(y)
+                hold.dens <- list()
+                ymax <- xmax <- xmin <- rep(0,ncol(y))
+                for(i in 1:ncoly){
+                    hold.dens[[i]] <- density(y[,i])
+                    ymax[i] <- max(hold.dens[[i]]$y)
+                    xmax[i] <- max(hold.dens[[i]]$x)
+                    xmin[i] <- min(hold.dens[[i]]$x)
+                }
+                shift <- 0:ncoly
+                all.xlim <- c(min(xmin), max(xmax))
+                all.ylim <- c(0,ncoly)
+
+                # Precedence is names > colnames > 1:n
+                if(is.null(names(y))){
+                    if(is.null(colnames(y) )){
+                        all.names <- 1:ncoly
+                    }else{
+                        all.names <- colnames(y)
+                    }
+                }else{
+                    all.names <- names(y)
+                }
+                shrink <- 0.9
+                for(i in 1:ncoly ){
+                    if(i<ncoly){
+                        output <- plot(hold.dens[[i]]$x, shrink*hold.dens[[i]]$y/ymax[i] + shift[i], xaxt="n", yaxt="n", xlab="", ylab="", main="", col=line.col[1], xlim=all.xlim, ylim=all.ylim, type="l")
+                        if(!identical(col[1],"n")){
+                            polygon(hold.dens[[i]]$x, shrink*hold.dens[[i]]$y/ymax[i] + shift[i], col=col[1])
+                        }
+                        abline(h=shift[i+1])
+                        text(x=all.xlim[1], y=(shift[i] + shift[i+1])/2, labels=all.names[i], pos=4)
+                        par(new=TRUE)
+                    }else{
+                        output <- plot(hold.dens[[i]]$x, shrink*hold.dens[[i]]$y/ymax[i] + shift[i], yaxt="n", xlab=xlab, ylab=ylab, main=main, col=line.col[1], xlim=all.xlim, ylim=all.ylim, type="l")
+                        if(!identical(col[1],"n")){
+                            polygon(hold.dens[[i]]$x, shrink*hold.dens[[i]]$y/ymax[i] + shift[i], col=col[1])
+                        }
+                        text(x=all.xlim[1], y=(shift[i] + shift[i+1])/2, labels=all.names[i], pos=4)
+                    }
+                }
+
+            }else{
+                den.y <- density(y)
+                output <- plot(den.y, xlab=xlab, ylab=ylab, main=main, col=line.col[1])
+                if(!identical(col[1],"n")){
+                    polygon(den.y$x, den.y$y, col=col[1])
+                }
+            }
+        }
+        
+    ## Comparison Plots ##
+        
+    }else{
+        
+        # Integer - Plot and shade a matrix
+        if(( length(unique(y))<11 & all(as.integer(y) == y) ) | is.factor(y) | is.character(y)){
+            
+            if(is.factor(y) | is.character(y)){
+                y <- as.numeric(y)
+                y1 <- as.numeric(y1)
+            }
+
+            yseq<-min(c(y,y1)):max(c(y,y1))
+            nameseq<- paste("Y=",yseq,sep="")
+            n.y<-length(yseq)
+            
+            colors<-rev(heat.colors(n.y^2))
+            lab.colors<-c("black","white")
+            comp<-matrix(NA,nrow=n.y,ncol=n.y)
+            
+            for(i in 1:n.y){
+                for(j in 1:n.y){
+                    flag<- y==yseq[i] & y1==yseq[j]
+                    comp[i,j]<-mean(flag)
+                }
+            }
+            
+            old.pty<-par()$pty
+            old.mai<-par()$mai
+            
+            par(pty="s")
+            par(mai=c(0.3,0.3,0.3,0.1))
+            
+            image(z=comp, axes=FALSE, col=colors, zlim=c(min(comp),max(comp)),main=main )
+            
+            locations.x<-seq(from=0,to=1,length=nrow(comp))
+            locations.y<-locations.x
+            
+            for(m in 1:n.y){
+                for(n in 1:n.y){
+                    text(x=locations.x[m],y=locations.y[n],labels=paste(round(100*comp[m,n])), col=lab.colors[(comp[m,n]> ((max(comp)-min(comp))/2) )+1])
+                }
+            }
+            
+            axis(side=1,labels=nameseq, at=seq(0,1,length=n.y), cex.axis=1, las=1)
+            axis(side=2,labels=nameseq, at=seq(0,1,length=n.y), cex.axis=1, las=3)
+            box()
+            par(pty=old.pty,mai=old.mai)
+        ##  Two Vectors of 1's and 0's
+        }else if( ncol(as.matrix(y))>1 & binarytest(y) & ncol(as.matrix(y1))>1 & binarytest(y1)   )  {
+
+            # Everything in this section assumes ncol(y)==ncol(y1)
+
+            # Precedence is names > colnames > 1:n
+            if(is.null(names(y))){
+                if(is.null(colnames(y) )){
+                    nameseq <- 1:n.y
+                }else{
+                    nameseq <- colnames(y)
+                }
+            }else{
+                nameseq <- names(y)
+            }
+
+            n.y <- ncol(y)
+            yseq <- 1:n.y
+
+            y <- y %*% yseq
+            y1 <- y1 %*% yseq
+
+            ## FROM HERE ON -- Replicates above.  Should address more generically
+            colors<-rev(heat.colors(n.y^2))
+            lab.colors<-c("black","white")
+            comp<-matrix(NA,nrow=n.y,ncol=n.y)
+            
+            for(i in 1:n.y){
+                for(j in 1:n.y){
+                    flag<- y==yseq[i] & y1==yseq[j]
+                    comp[i,j]<-mean(flag)
+                }
+            }
+            
+            old.pty<-par()$pty
+            old.mai<-par()$mai
+            
+            par(pty="s")
+            par(mai=c(0.3,0.3,0.3,0.1))
+            
+            image(z=comp, axes=FALSE, col=colors, zlim=c(min(comp),max(comp)),main=main )
+            
+            locations.x<-seq(from=0,to=1,length=nrow(comp))
+            locations.y<-locations.x
+            
+            for(m in 1:n.y){
+                for(n in 1:n.y){
+                    text(x=locations.x[m],y=locations.y[n],labels=paste(round(100*comp[m,n])), col=lab.colors[(comp[m,n]> ((max(comp)-min(comp))/2) )+1])
+                }
+            }
+            
+            axis(side=1,labels=nameseq, at=seq(0,1,length=n.y), cex.axis=1, las=1)
+            axis(side=2,labels=nameseq, at=seq(0,1,length=n.y), cex.axis=1, las=3)
+            box()
+            par(pty=old.pty,mai=old.mai)
+         
+        ## Numeric - Plot two densities on top of each other
+        }else if(is.numeric(y) & is.numeric(y1)){
+            
+            if(is.null(col)){
+                semi.col.x <-rgb(142,229,238,150,maxColorValue=255)
+                semi.col.x1<-rgb(255,114,86,150,maxColorValue=255)
+                col<-c(semi.col.x,semi.col.x1)
+            }else if(length(col)<2){
+                col<-c(col,col)
+            }
+
+            if(ncol(as.matrix(y))>1){
+                shrink <- 0.9
+                ncoly <- ncol(y)  # Assumes columns of y match cols y1.  Should check or enforce.
+                # Precedence is names > colnames > 1:n
+                if(is.null(names(y))){
+                    if(is.null(colnames(y) )){
+                        all.names <- 1:ncoly
+                    }else{
+                        all.names <- colnames(y)
+                    }
+                }else{
+                    all.names <- names(y)
+                }
+
+                hold.dens.y <- hold.dens.y1 <- list()
+                ymax <- xmax <- xmin <- rep(0,ncoly)
+                for(i in 1:ncoly){
+                    hold.dens.y[[i]] <- density(y[,i])
+                    hold.dens.y1[[i]] <- density(y1[,i], bw=hold.dens.y[[i]]$bw)
+                    ymax[i] <- max(hold.dens.y[[i]]$y, hold.dens.y1[[i]]$y)
+                    xmax[i] <- max(hold.dens.y[[i]]$x, hold.dens.y1[[i]]$x)
+                    xmin[i] <- min(hold.dens.y[[i]]$x, hold.dens.y1[[i]]$x)
+                }
+                all.xlim <- c(min(xmin), max(xmax))
+                all.ylim <- c(0,ncoly)
+                shift <- 0:ncoly
+                for(i in 1:ncoly ){
+                    if(i<ncoly){
+                        output <- plot(hold.dens.y[[i]]$x, shrink*hold.dens.y[[i]]$y/ymax[i] + shift[i], xaxt="n", yaxt="n", xlab="", ylab="", main="", col=line.col[1], xlim=all.xlim, ylim=all.ylim, type="l")
+                        par(new=TRUE)
+                        output <- plot(hold.dens.y1[[i]]$x, shrink*hold.dens.y1[[i]]$y/ymax[i] + shift[i], xaxt="n", yaxt="n", xlab="", ylab="", main="", col=line.col[2], xlim=all.xlim, ylim=all.ylim, type="l")
+
+                        if(!identical(col[1],"n")){
+                            polygon(hold.dens.y[[i]]$x, shrink*hold.dens.y[[i]]$y/ymax[i] + shift[i], col=col[1])
+                        }
+                        if(!identical(col[2],"n")){
+                            polygon(hold.dens.y1[[i]]$x, shrink*hold.dens.y1[[i]]$y/ymax[i] + shift[i], col=col[2])
+                        }
+                        abline(h=shift[i+1])
+                        text(x=all.xlim[1], y=(shift[i] + shift[i+1])/2, labels=all.names[i], pos=4)
+                        par(new=TRUE)
+                    }else{
+                        output <- plot(hold.dens.y[[i]]$x, shrink*hold.dens.y[[i]]$y/ymax[i] + shift[i], yaxt="n", xlab=xlab, ylab=ylab, main=main, col=line.col[1], xlim=all.xlim, ylim=all.ylim, type="l")
+                        par(new=TRUE)
+                        output <- plot(hold.dens.y1[[i]]$x, shrink*hold.dens.y1[[i]]$y/ymax[i] + shift[i], yaxt="n", xlab=xlab, ylab=ylab, main=main, col=line.col[1], xlim=all.xlim, ylim=all.ylim, type="l")
+
+                        if(!identical(col[1],"n")){
+                            polygon(hold.dens.y[[i]]$x, shrink*hold.dens.y[[i]]$y/ymax[i] + shift[i], col=col[1])
+                        }
+                        if(!identical(col[2],"n")){
+                            polygon(hold.dens.y1[[i]]$x, shrink*hold.dens.y1[[i]]$y/ymax[i] + shift[i], col=col[2])
+                        }
+                        text(x=all.xlim[1], y=(shift[i] + shift[i+1])/2, labels=all.names[i], pos=4)
+                    }
+                } 
+            }else{
+                den.y<-density(y)
+                den.y1<-density(y1,bw=den.y$bw)
+            
+                all.xlim<-c(min(c(den.y$x,den.y1$x)),max(c(den.y$x,den.y1$x)))
+                all.ylim<-c(min(c(den.y$y,den.y1$y)),max(c(den.y$y,den.y1$y)))
+            
+                output<-plot(den.y,xlab=xlab,ylab=ylab,main=main,col=col[1],xlim=all.xlim,ylim=all.ylim)
+                par(new=TRUE)
+                output<-plot(den.y1,xlab=xlab,ylab=ylab,main="",col=col[2],xlim=all.xlim,ylim=all.ylim)
+            
+                if(!identical(col[1],"n")){
+                    polygon(den.y$x,den.y$y,col=col[1])
+                }
+                if(!identical(col[2],"n")){
+                    polygon(den.y1$x,den.y1$y,col=col[2])
+                }
+            }
+        }
+    }
 }
 
-#' @S3method plot sim.normal.gee
-plot.sim.normal.gee <- plot.sim.gamma.gee
 
-#' @S3method plot sim.poisson.gee
-plot.sim.poisson.gee <- plot.sim.gamma.gee
 
-#' @S3method plot sim.logit.gee
-plot.sim.logit.gee <- function (x, ...) {
 
-  # store device settings
-  original.par <- par(no.readonly=TRUE)
 
-  if (is.null(x$x))
-    return()
 
-  panels <- if (is.null(x$x1)) {
-    palette <- rep("black", 4)
-    matrix(1, nrow=1, ncol=1)
-    # How the layout window will look:
-    # +---+
-    # | 1 |
-    # +---+
-  }
+#' Default Plot Design For Zelig Model QI's
+#' 
+#' @usage qi.plot(obj, ...)
+#' @param obj A reference class zelig5 object
+#' @param ... Parameters to be passed to the `truehist' function which is 
+#' implicitly called for numeric simulations
+#' @author James Honaker with panel layouts from Matt Owen
+
+qi.plot <- function (obj, ...) {
+    # Save old state
+    old.par <- par(no.readonly=T)
+
+    if("timeseries" %in% obj$category){
+        par(mfcol=c(3,1))
+        zeligACFplot(obj$getqi("acf", xvalue="x1"))
+        ci.plot(obj, qi="pvseries.shock")
+        ci.plot(obj, qi="pvseries.innovation")
+        return()
+    }
 
-  else {
-    palette <- c('red', 'navy', 'black', 'black')
-    matrix(c(1, 2, 3, 3, 4, 4), nrow=3, ncol=2, byrow=TRUE)
-    # How the layout window will look:
-    # +-------+
-    # | 1 | 2 |
-    # +-------+
-    # |   3   |
-    # +-------+
-    # |   4   |
-    # +-------+
-  }
+    # Determine whether two "Expected Values" qi's exist
+         both.ev.exist <- (length(obj$sim.out$x$ev)>0) & (length(obj$sim.out$x1$ev)>0)
+    # Determine whether two "Predicted Values" qi's exist
+         both.pv.exist <- (length(obj$sim.out$x$pv)>0) & (length(obj$sim.out$x1$pv)>0)
 
-  layout(panels)
+    color.x <- rgb(242, 122, 94, maxColorValue=255)
+    color.x1 <- rgb(100, 149, 237, maxColorValue=255)
+    # Interpolation of the above colors in rgb color space:
+    color.mixed <- rgb(t(round((col2rgb(color.x) + col2rgb(color.x1))/2)), maxColorValue=255)
+    
+    if (! ("x" %in% names(obj$sim.out))) {
+        return(par(old.par))
+    } else if (! ("x1" %in% names(obj$sim.out))) {
 
-  # extract quantities of interest
-  ev1 <- x$qi$ev1
-  ev2 <- x$qi$ev2
-  fd <- x$qi$fd
-  rr <- x$qi$rr
 
-  # Plot ev1
-  .plot.density(ev1, "Expected Values (for X): E(Y|X)", palette[1])
-  .plot.density(ev2, "Expected Values (for X1): E(Y|X1)", palette[2])
-  .plot.density(fd, "First Differences: E(Y|X1) - E(Y|X)", palette[3])
-  .plot.density(rr, "Risk Ratios: E(Y|X1)/E(Y|X)", palette[4])
+    panels <- matrix(1:2, 2, 1)
+        
+        # The plotting device:
+        #
+        # +-----------+
+        # |     1     |
+        # +-----------+
+        # |     2     |
+        # +-----------+
+    } else {
+        panels <- matrix(c(1:5, 5), ncol=2, nrow=3, byrow = TRUE)
+        
+        # the plotting device:
+        #
+        # +-----+-----+
+        # |  1  |  2  |
+        # +-----+-----+
+        # |  3  |  4  |
+        # +-----+-----+
+        # |     5     |
+        # +-----------+
+        
+        panels <- if (xor(both.ev.exist, both.pv.exist))
+        rbind(panels, c(6, 6))
+        
+        # the plotting device:
+        #
+        # +-----+-----+
+        # |  1  |  2  |
+        # +-----+-----+
+        # |  3  |  4  |
+        # +-----+-----+
+        # |     5     |
+        # +-----------+
+        # |     6     |
+        # +-----------+
+        
+        else if (both.ev.exist && both.pv.exist)
+        rbind(panels, c(6, 7))
+        else
+        panels
+        
+        # the plotting device:
+        #
+        # +-----+-----+
+        # |  1  |  2  |
+        # +-----+-----+
+        # |  3  |  4  |
+        # +-----+-----+
+        # |     5     |
+        # +-----+-----+
+        # |  6  |  7  |
+        # +-----+-----+
+    }
     
-  # return plotting device
-  par(original.par)
-}
+    layout(panels)
+    
+    titles <- obj$setx.labels
+    
+    # Plot each simulation
+    if(length(obj$sim.out$x$pv)>0)
+        simulations.plot(obj$getqi(qi="pv", xvalue="x"), main = titles$pv, col = color.x, line.col = "black")
+    
+    if(length(obj$sim.out$x1$pv)>0)
+        simulations.plot(obj$getqi(qi="pv", xvalue="x1"), main = titles$pv1, col = color.x1, line.col = "black")
+        
+    if(length(obj$sim.out$x$ev)>0)
+        simulations.plot(obj$getqi(qi="ev", xvalue="x"), main = titles$ev, col = color.x, line.col = "black")
 
-#' @S3method plot sim.probit.gee
-plot.sim.probit.gee <- plot.sim.logit.gee
+    if(length(obj$sim.out$x1$ev)>0)
+        simulations.plot(obj$getqi(qi="ev", xvalue="x1"), main = titles$ev1, col = color.x1, line.col = "black")
 
-# Plot Density Graphs for GEE Quantities of Interest
-# @param x a vector containing quantities of interest
-# @param main the main title of the plot
-# @param col the color of the line-plot
-.plot.density <- function (x, main, col) {
-  if (all(is.na(x)))
-    return()
+    if(length(obj$sim.out$x1$fd)>0)
+        simulations.plot(obj$getqi(qi="fd", xvalue="x1"), main = titles$fd, col = color.mixed, line.col = "black")
+    
+    if(both.pv.exist)
+        simulations.plot(y=obj$getqi(qi="pv", xvalue="x"), y1=obj$getqi(qi="pv", xvalue="x1"), main = "Comparison of Y|X and Y|X1", col = paste(c(color.x, color.x1), "80", sep=""), line.col = "black")
+        
+    if(both.ev.exist)
+        simulations.plot(y=obj$getqi(qi="ev", xvalue="x"), y1=obj$getqi(qi="ev", xvalue="x1"), main = "Comparison of E(Y|X) and E(Y|X1)", col = paste(c(color.x, color.x1), "80", sep=""), line.col = "black")
 
-  density <- density(x)
-  plot(density(x), main = main, col = col)
+    
+    # Restore old state
+    par(old.par)
+    
+    # Return old parameter invisibly
+    invisible(old.par)
 }
-#' Plot graphs of simulated multiply-imputed data
-#'
-#' This function combines results across multiply imputed results
-#'    and then calls the appropriate plot for that class.
-#'
-#' @usage \method{plot}{MI.sim}(...)
-#' @S3method plot MI.sim
-#' @param ... ignored parameters
-#'
-#' @return the return of the appropriate plot method
-#' @author James Honaker \email{jhonaker@@iq.harvard.edu}
-plot.MI.sim <- function(x, ...) {
 
-  m<-length(x)                           # The number of imputed datasets
-  reformed<-x[[1]]                       # Simplified object of the original class
-  all.qi<-attributes(x[[1]]$qi)$names    # Convoluted given the current structure of objects
-
-  ## Currently, we're appending all the qi's together into one object
-  ## Note - everything that is not a qi, will just come from the first imputed dataset
-  
-  if(m>1){
-    for(i in 2:m){
-      for(j in all.qi){    # Could do this by position number as "in 1:length(all.qi)"
-        ## The $qi's are themselves lists, so this is difficult:
-        reformed$qi[j][[1]]<-rbind(reformed$qi[j][[1]],x[[i]]$qi[j][[1]])
-      }
-    }
-  }
-
-  output<-plot(reformed)
-  ## Return any plot returns invisibly
-  invisible(output)
-}
 
 
-#' Method for plotting pooled simulations by confidence intervals
+#' Method for plotting qi simulations across a range within a variable, with confidence intervals
 #'
-#' Plot confidence intervals of pooled simulated values.
-#' 
-#' @param x A `sim' object
+#' @param obj A reference class zelig5 object
 #' @param qi a character-string specifying the quantity of interest to plot
 #' @param var The variable to be used on the x-axis. Default is the variable
 #' across all the chosen values with smallest nonzero variance
-#' @param ... Parameters to be passed to the `truehist' function which is 
+#' @param ... Parameters to be passed to the `truehist' function which is
 #' implicitly called for numeric simulations
 #' @param main a character-string specifying the main heading of the plot
 #' @param sub a character-string specifying the sub heading of the plot
 #' @param xlab a character-string specifying the label for the x-axis
 #' @param ylab a character-string specifying the label for the y-axis
+#' @param xlim Limits to the x-axis
+#' @param ylim Limits to the y-axis
 #' @param legcol ``legend color'', an valid color used for plotting the line
 #' colors in the legend
 #' @param col a valid vector of colors of at least length 3 to use to color the
@@ -173,605 +478,517 @@ plot.MI.sim <- function(x, ...) {
 #' ``NE'' respectively
 #' @param legpos ``legend type'', exact coordinates and sizes for legend.
 #' Overrides argment ``leg.type''
+#' @param ci vector of length three of confidence interval levels to draw.
+#' @param discont optional point of discontinuity along the x-axis at which 
+#' to interupt the graph
 #' @return the current graphical parameters. This is subject to change in future
 #' implementations of Zelig
-#' @author James Honaker, adapted by Matt Owen \email{mowen@@iq.harvard.edu}
+#' @author James Honaker
 #' @export plot.ci
-#' @usage \method{plot}{ci}(x, qi="ev", var=NULL, ..., legcol="gray20", col=NULL, leg=1, legpos=NULL)
-plot.ci <- function(x, qi="ev", var=NULL, ..., main = NULL, sub = NULL, xlab = NULL, ylab = NULL, xlim = NULL, ylim = NULL, legcol="gray20", col=NULL, leg=1, legpos=NULL, ci=c(80,95,99.9)) {
-
-  if(length(ci)<3){
-  	ci<-rep(ci,3)
-  }
-  if(length(ci)>3){
-  	ci<-ci[1:3]
-  }
-  ci<-sort(ci)
-
-  if (! "pooled.sim" %in% class(x)) {
-    something <- list(x=x)
-    class(something) <- "pooled.sim"
-    attr(something, "titles") <- x$titles
-    x <- something
-  }
-
-  xmatrix<-matrix(NA,nrow=length(x),ncol=length(x[[1]]$x$data))
-
-  for(i in 1:length(x)){
-    xmatrix[i,]<-as.matrix(x[[i]]$x$data)
-  }
-
-  if (length(x) == 1 && is.null(var)) {
-    warning("Must specify the `var` parameter when plotting the confidence interval of an unvarying model. Plotting nothing.")
-    return(invisible(FALSE))
-  }
-
-  if(is.character(var)){
-    if( !(var %in% names(x[[1]]$x$data) ) ){
-      warning("Specified variable for confidence interval plot is not in estimated model.  Plotting nothing.")
-      return(invisible(FALSE))
-    }  
-  }
-
+#' @usage ci.plot(obj, qi="ev", var=NULL, ..., main = NULL, sub = 
+#'  NULL, xlab = NULL, ylab = NULL, xlim = NULL, ylim = 
+#'  NULL, legcol="gray20", col=NULL, leg=1, legpos=
+#'  NULL, ci = c(80, 95, 99.9), discont=NULL)
+ci.plot <- function(obj, qi="ev", var=NULL, ..., main = NULL, sub = NULL, xlab = NULL, ylab = NULL, xlim = NULL, ylim = NULL, legcol="gray20", col=NULL, leg=1, legpos=NULL, ci=c(80,95,99.9), discont=NULL) {
+ 
+    ########################### 
+    #### Utility Functions ####
+
+    # Define function to cycle over range list and extract correct qi's
+    ## CAN THESE NOW BE REPLACED WITH THE GETTER METHODS?
+
+    extract.sims<-function(obj,qi){
+        d<-length(obj$sim.out$range)
+        k<-length(obj$sim.out$range[[1]][qi][[1]][[1]])   # THAT IS A LONG PATH THAT MAYBE SHOULD BE CHANGED
+        hold<-matrix(NA,nrow=k, ncol=d)
+        for(i in 1:d){
+            hold[,i]<-obj$sim.out$range[[i]][qi][[1]][[1]]  # THAT IS A LONG PATH THAT MAYBE SHOULD BE CHANGED
+        }
+        return(hold)
+    }
 
-  if (is.null(var)) {
-    each.var <- apply(xmatrix,2,sd) 
-    flag <- each.var>0
-    min.var<-min(each.var[flag])
-    var.seq<-1:ncol(xmatrix)
-    position<-var.seq[each.var==min.var]
-  } else {
-    if(is.numeric(var)){   
-      position<-var
-    }else if(is.character(var)){
-      position<-grep(var,names(x[[1]]$x$data))
+    extract.sims1<-function(obj,qi){    #Should find better architecture for alternate range sims
+        d<-length(obj$sim.out$range1)
+        k<-length(obj$sim.out$range1[[1]][qi][[1]][[1]])   # THAT IS A LONG PATH THAT MAYBE SHOULD BE CHANGED
+        hold<-matrix(NA,nrow=k, ncol=d)
+        for(i in 1:d){
+            hold[,i]<-obj$sim.out$range1[[i]][qi][[1]][[1]]  # THAT IS A LONG PATH THAT MAYBE SHOULD BE CHANGED
+        }
+        return(hold)
     }
-  }
-  position<-min(position)
-  xseq<-xmatrix[,position]
-  xname<-names(x[[1]]$x$data[position])
 
+    # Define functions to compute confidence intervals
+    ## CAN WE MERGE THESE TOGETHER SO AS NOT TO HAVE TO SORT TWICE?
 
-  # Use "qi" argument to select quantities of interest and set labels
-  ev1<-NULL
-  if(qi=="pv"){
-    request<-"Predicted Values: Y|X"
-    if(!is.null(x[[1]]$x1)){
-      ev1<-simulation.matrix(x, "Predicted Values: Y|X1")
+    ci.upper <- function (x, alpha) {
+        pos <- max(round((1-(alpha/100))*length(x)), 1)
+        return(sort(x)[pos])
     }
-  } else if(qi=="fd") {
-    request<-"First Differences: E(Y|X1) - E(Y|X)"
-  } else {
-    request<-"Expected Values: E(Y|X)"
-    if(!is.null(x[[1]]$x1)){
-      ev1<-simulation.matrix(x, "Expected Values: E(Y|X1)")
+    
+    ci.lower <- function (x, alpha) {
+        pos<-max(round((alpha/100)*length(x)), 1)
+        return(sort(x)[pos])
     }
-  }
-  ev<-simulation.matrix(x, request)
-  if (is.null(ylab)){
-    ylab <- request
-  }
 
+    ###########################
 
-  # Define functions to compute confidence intervals
-  ci.upper <- function (x, alpha) {
-    pos <- max(round((1-(alpha/100))*length(x)), 1)
-    return(sort(x)[pos])
-  }
-
-  ci.lower <- function (x, alpha) {
-    pos<-max(round((alpha/100)*length(x)), 1)
-    return(sort(x)[pos])
-  }
-
-  #
-  k<-ncol(ev)
-  n<-nrow(ev)
-
-  #
-  if(is.null(col)){
-    myblue1<-rgb( 100, 149, 237, alpha=50, maxColorValue=255)
-    myblue2<-rgb( 152, 245, 255, alpha=50, maxColorValue=255)
-    myblue3<-rgb( 191, 239, 255, alpha=70, maxColorValue=255)
-    myred1 <-rgb( 237, 149, 100, alpha=50, maxColorValue=255)
-    myred2 <-rgb( 255, 245, 152, alpha=50, maxColorValue=255)
-    myred3 <-rgb( 255, 239, 191, alpha=70, maxColorValue=255)
-
-    col<-c(myblue1,myblue2,myblue3,myred1,myred2,myred3)
-  }else{
-  	if(length(col)<6){
-  	  col<-rep(col,6)[1:6]
+    if(length(ci)<3){
+        ci<-rep(ci,3)
     }
-  }
-
+    if(length(ci)>3){
+        ci<-ci[1:3]
+    }
+    ci<-sort(ci)
+    
+    
+    ## Timeseries:
+    if("timeseries" %in% obj$category){
+        #xmatrix<-              ## Do we need to know the x in which the shock/innovation occcured?  For secondary graphs, titles, legends?
+        xname <- "Time"
+        qiseries <- c("pvseries.shock","pvseries.innovation","evseries.shock","evseries.innovation")
+        if (!qi %in% qiseries){
+            cat(paste("Error: For Timeseries models, argument qi must be one of ", paste(qiseries, collapse=" or ") ,".\n", sep="") )
+            return()
+        }
+        ev<-t( obj$getqi(qi=qi, xvalue="x1") )   # NOTE THE NECESSARY TRANSPOSE.  Should we more clearly standardize this?
+        d<-ncol(ev)
+        xseq<-1:d  
+        ev1 <- NULL  # Maybe want to add ability to overlay another graph?
+
+        # Define xlabel
+        if (is.null(xlab))
+        xlab <- xname
+        if (is.null(ylab)){
+            if(qi %in% c("pvseries.shock", "pvseries.innovation"))
+                ylab<- as.character(obj$setx.labels["pv"])
+            if(qi %in% c("evseries.shock", "evseries.innovation"))
+                ylab<- as.character(obj$setx.labels["ev"])    
+        }
+
+        if (is.null(main))
+        main <- as.character(obj$setx.labels[qi])
+        if (is.null(discont))
+        discont <- 22.5    # NEED TO SET AUTOMATICALLY
+
+    ## Everything Else:
+    }else{
+        d<-length(obj$sim.out$range)
+    
+        if (d<1) {
+            return()  # Should add warning
+        }
+
+        xmatrix<-matrix(NA,nrow=d, ncol=length( obj$setx.out$range[[1]]$mm[[1]] ))    # THAT IS A LONG PATH THAT MAYBE SHOULD BE CHANGED
+        for(i in 1:d){
+            xmatrix[i,]<-as.matrix( obj$setx.out$range[[i]]$mm[[1]] )   # THAT IS A LONG PATH THAT MAYBE SHOULD BE CHANGED
+        }
+
+        if (d == 1 && is.null(var)) {
+            warning("Must specify the `var` parameter when plotting the confidence interval of an unvarying model. Plotting nothing.")
+            return(invisible(FALSE))
+        }
+    
+        xvarnames<-names(as.data.frame( obj$setx.out$range[[1]]$mm[[1]]))  # MUST BE A BETTER WAY/PATH TO GET NAMES
+    
+        if(is.character(var)){
+            if( !(var %in% xvarnames   ) ){
+                warning("Specified variable for confidence interval plot is not in estimated model.  Plotting nothing.")
+                return(invisible(FALSE))
+            }
+        }
+
+        if (is.null(var)) {
+            each.var <- apply(xmatrix,2,sd)
+            flag <- each.var>0
+            min.var<-min(each.var[flag])
+            var.seq<-1:ncol(xmatrix)
+            position<-var.seq[each.var==min.var]
+        } else {
+            if(is.numeric(var)){
+                position<-var
+            }else if(is.character(var)){
+                position<-grep(var,xvarnames )
+            }
+        }
+        position<-min(position)
+        xseq<-xmatrix[,position]
+        xname<-xvarnames[position] 
+        # Define xlabel
+        if (is.null(xlab))
+        xlab <- paste("Range of",xname)
+
+        # Use "qi" argument to select quantities of interest and set labels
+        ev1<-NULL
+        if(!is.null(obj$sim.out$range1)){
+            ev1<-extract.sims1(obj,qi=qi)
+        }
+        ev<-extract.sims(obj,qi=qi)
+        if (is.null(ylab)){
+            ylab <- as.character(obj$setx.labels[qi])
+        }
 
-  form.history <- function (k,xseq,results,ci=c(80,95,99.9)){
-  
-    history<-matrix(NA, nrow=k,ncol=8)
-    for (i in 1:k) {
-      v <- c(
-             xseq[i],
-             median(results[,i]),
- 
-             ci.upper(results[,i],ci[1]),
-             ci.lower(results[,i],ci[1]),
+    }
+    
 
-             ci.upper(results[,i],ci[2]),
-             ci.lower(results[,i],ci[2]),
 
-             ci.upper(results[,i],ci[3]),
-             ci.lower(results[,i],ci[3])
-             )
 
-      history[i, ] <- v
+    #
+    k<-ncol(ev)
+    n<-nrow(ev)
+    
+    #
+    if(is.null(col)){
+        myblue1<-rgb( 100, 149, 237, alpha=50, maxColorValue=255)
+        myblue2<-rgb( 152, 245, 255, alpha=50, maxColorValue=255)
+        myblue3<-rgb( 191, 239, 255, alpha=70, maxColorValue=255)
+        myred1 <-rgb( 237, 149, 100, alpha=50, maxColorValue=255)
+        myred2 <-rgb( 255, 245, 152, alpha=50, maxColorValue=255)
+        myred3 <-rgb( 255, 239, 191, alpha=70, maxColorValue=255)
+        
+        col<-c(myblue1,myblue2,myblue3,myred1,myred2,myred3)
+    }else{
+        if(length(col)<6){
+            col<-rep(col,6)[1:6]
+        }
     }
-    if (k == 1) {
-      left <- c(
-             xseq[1]-.5,
-             median(results[,1]),
-
-             ci.upper(results[,1],ci[1]),
-             ci.lower(results[,1],ci[1]),
-
-             ci.upper(results[,1],ci[2]),
-             ci.lower(results[,1],ci[2]),
-
-             ci.upper(results[,1],ci[3]),
-             ci.lower(results[,1],ci[3])
-             )
-      right <- c(
-             xseq[1]+.5,
-             median(results[,1]),
-
-             ci.upper(results[,1],ci[1]),
-             ci.lower(results[,1],ci[1]),
-
-             ci.upper(results[,1],ci[2]),
-             ci.lower(results[,1],ci[2]),
-
-             ci.upper(results[,1],ci[3]),
-             ci.lower(results[,1],ci[3])
-             )
-      v <- c(
-             xseq[1],
-             median(results[,1]),
-
-             ci.upper(results[,1],ci[1]),
-             ci.lower(results[,1],ci[1]),
-
-             ci.upper(results[,1],ci[2]),
-             ci.lower(results[,1],ci[2]),
-
-             ci.upper(results[,1],ci[3]),
-             ci.lower(results[,1],ci[3])
-             )
-      history <- rbind(left, v, right)
+    
+    # Define function to numerically extract summaries of distributions from set of all simulated qi's
+    form.history <- function (k,xseq,results,ci=c(80,95,99.9)){
+        
+        history<-matrix(NA, nrow=k,ncol=8)
+        for (i in 1:k) {
+            v <- c(
+            xseq[i],
+            median(results[,i]),
+            
+            ci.upper(results[,i],ci[1]),
+            ci.lower(results[,i],ci[1]),
+            
+            ci.upper(results[,i],ci[2]),
+            ci.lower(results[,i],ci[2]),
+            
+            ci.upper(results[,i],ci[3]),
+            ci.lower(results[,i],ci[3])
+            )
+            
+            history[i, ] <- v
+        }
+        if (k == 1) {
+            left <- c(
+            xseq[1]-.5,
+            median(results[,1]),
+            
+            ci.upper(results[,1],ci[1]),
+            ci.lower(results[,1],ci[1]),
+            
+            ci.upper(results[,1],ci[2]),
+            ci.lower(results[,1],ci[2]),
+            
+            ci.upper(results[,1],ci[3]),
+            ci.lower(results[,1],ci[3])
+            )
+            right <- c(
+            xseq[1]+.5,
+            median(results[,1]),
+            
+            ci.upper(results[,1],ci[1]),
+            ci.lower(results[,1],ci[1]),
+            
+            ci.upper(results[,1],ci[2]),
+            ci.lower(results[,1],ci[2]),
+            
+            ci.upper(results[,1],ci[3]),
+            ci.lower(results[,1],ci[3])
+            )
+            v <- c(
+            xseq[1],
+            median(results[,1]),
+            
+            ci.upper(results[,1],ci[1]),
+            ci.lower(results[,1],ci[1]),
+            
+            ci.upper(results[,1],ci[2]),
+            ci.lower(results[,1],ci[2]),
+            
+            ci.upper(results[,1],ci[3]),
+            ci.lower(results[,1],ci[3])
+            )
+            history <- rbind(left, v, right)
+        }
+        
+        return(history)
     }
 
-    return(history)
-  }
-
-  history<-  form.history(k,xseq,ev,ci)
-  if(!is.null(ev1)){
-    history1<- form.history(k,xseq,ev1,ci)
-  }else{
-    history1<-NULL
-  }
-
-  # This is for small sets that have been duplicated so as to have observable volume
-  if(k==1){
-    k<-3
-  }
-
-  # Specify x-axis length
-  all.xlim <- if (is.null(xlim))
+    history<-  form.history(k,xseq,ev,ci)
+    if(!is.null(ev1)){
+        history1<- form.history(k,xseq,ev1,ci)
+    }else{
+        history1<-NULL
+    }
+    
+    # This is for small sets that have been duplicated so as to have observable volume
+    if(k==1){
+        k<-3
+    }
+   
+    # Specify x-axis length
+    all.xlim <- if (is.null(xlim))
     c(min(c(history[, 1],history1[, 1])),max(c(history[, 1],history1[, 1])))
-  else
+    else
     xlim
-
-
-  # Specify y-axis length
-  all.ylim <-if (is.null(ylim))
+    
+    
+    # Specify y-axis length
+    all.ylim <-if (is.null(ylim))
     c(min(c(history[, -1],history1[, -1])),max(c(history[, -1],history1[, -1])))
-  else
+    else
     ylim
+    
 
-
-  # Define xlabel
-  if (is.null(xlab))
-    xlab <- paste("Range of",xname)
-
-  if (is.null(ylab))
+    # Define y label
+    if (is.null(ylab))
     ylab <- "Expected Values: E(Y|X)"
 
-  ## This is the plot
-
-  par(bty="n")
-
-  plot(x=history[, 1], y=history[, 2], type="l", xlim=all.xlim, ylim=all.ylim, main = main, sub = sub, xlab=xlab, ylab=ylab)
-
-  polygon(c(history[,1],history[k:1,1]),c(history[,7],history[k:1,8]),col=col[3],border="white")
-  polygon(c(history[,1],history[k:1,1]),c(history[,5],history[k:1,6]),col=col[2],border="gray90")
-  polygon(c(history[,1],history[k:1,1]),c(history[,3],history[k:1,4]),col=col[1],border="gray60")
-  polygon(c(history[,1],history[k:1,1]),c(history[,7],history[k:1,8]),col=NA,border="white")
-
-  if(!is.null(ev1)){
-  lines(x=history1[, 1], y=history1[, 2], type="l")
-
-  polygon(c(history1[,1],history1[k:1,1]),c(history1[,7],history1[k:1,8]),col=col[6],border="white")
-  polygon(c(history1[,1],history1[k:1,1]),c(history1[,5],history1[k:1,6]),col=col[5],border="gray90")
-  polygon(c(history1[,1],history1[k:1,1]),c(history1[,3],history1[k:1,4]),col=col[4],border="gray60")
-  polygon(c(history1[,1],history1[k:1,1]),c(history1[,7],history1[k:1,8]),col=NA,border="white")
-
-  }
-
+    
+    ## This is the plot
+    
+    par(bty="n")
+    centralx<-history[,1]
+    centraly<-history[,2]
 
-  ## This is the legend
 
-  if(is.null(legpos)){
-    if(leg==1){
-      legpos<-c(.91,.04,.2,.05)
-    }else if(leg==2){
-      legpos<-c(.09,.04,.2,.05)
-    }else if(leg==3){
-      legpos<-c(.09,.04,.8,.05)
+    if(is.null(discont)){
+        gotok <- k
     }else{
-      legpos<-c(.91,.04,.8,.05)
+        gotok <- sum(xseq < discont)
+        if((gotok<2) | (gotok>(k-2))){
+            cat("Warning: Discontinuity is located at edge or outside the range of x-axis.\n") 
+            gotok<-k   
+            discont<-NULL 
+        }
+        if(gotok<k){
+            gotokp1<- gotok+1
+            centralx<-c(centralx[1:gotok], NA, centralx[gotok+1:length(centralx)])
+            centraly<-c(centraly[1:gotok], NA, centraly[gotok+1:length(centraly)])
+        }
     }
-  }
 
-  lx<-min(all.xlim)+ legpos[1]*(max(all.xlim)- min(all.xlim))
-  hx<-min(all.xlim)+ (legpos[1]+legpos[2])*(max(all.xlim)- min(all.xlim))
+    plot(x=centralx, y=centraly, type="l", xlim=all.xlim, ylim=all.ylim, main = main, sub = sub, xlab=xlab, ylab=ylab)
 
-  deltax<-(hx-lx)*.1
+    polygon(c(history[1:gotok,1],history[gotok:1,1]),c(history[1:gotok,7],history[gotok:1,8]),col=col[3],border="white")
+    polygon(c(history[1:gotok,1],history[gotok:1,1]),c(history[1:gotok,5],history[gotok:1,6]),col=col[2],border="gray90")
+    polygon(c(history[1:gotok,1],history[gotok:1,1]),c(history[1:gotok,3],history[gotok:1,4]),col=col[1],border="gray60")
+    polygon(c(history[1:gotok,1],history[gotok:1,1]),c(history[1:gotok,7],history[gotok:1,8]),col=NA,border="white")
 
-  my<-min(all.ylim) +legpos[3]*min(max(all.ylim) - min(all.ylim))
-  dy<-legpos[4]*(max(all.ylim) - min(all.ylim))
-
-
-  lines(c(hx+deltax,hx+2*deltax,hx+2*deltax,hx+deltax),c(my+3*dy,my+3*dy,my-3*dy,my-3*dy),col=legcol)
-  lines(c(hx+3*deltax,hx+4*deltax,hx+4*deltax,hx+3*deltax),c(my+1*dy,my+1*dy,my-1*dy,my-1*dy),col=legcol)
-  lines(c(lx-deltax,lx-2*deltax,lx-2*deltax,lx-deltax),c(my+2*dy,my+2*dy,my-2*dy,my-2*dy),col=legcol)
-  lines(c(lx-5*deltax,lx),c(my,my),col="white",lwd=3)
-  lines(c(lx-5*deltax,lx),c(my,my),col=legcol)
-  lines(c(lx,hx),c(my,my))
-
-  polygon(c(lx,lx,hx,hx),c(my-3*dy,my+3*dy,my+3*dy,my-3*dy),col=col[3],border="white")
-  polygon(c(lx,lx,hx,hx),c(my-2*dy,my+2*dy,my+2*dy,my-2*dy),col=col[2],border="gray90")
-  polygon(c(lx,lx,hx,hx),c(my-1*dy,my+1*dy,my+1*dy,my-1*dy),col=col[1],border="gray60")
-  polygon(c(lx,lx,hx,hx),c(my-3*dy,my+3*dy,my+3*dy,my-3*dy),col=NA,border="white")
-
-  text(lx,my,labels="median",pos=2,cex=0.5,col=legcol)
-  text(lx,my+2*dy,labels=paste("ci",ci[2],sep=""),pos=2,cex=0.5,col=legcol)
-  text(hx,my+1*dy,labels=paste("ci",ci[1],sep=""),pos=4,cex=0.5,col=legcol)
-  text(hx,my+3*dy,labels=paste("ci",ci[3],sep=""),pos=4,cex=0.5,col=legcol)
+    if(!is.null(discont)){
+        polygon(c(history[gotokp1:k,1],history[k:gotokp1,1]),c(history[gotokp1:k,7],history[k:gotokp1,8]),col=col[3],border="white")
+        polygon(c(history[gotokp1:k,1],history[k:gotokp1,1]),c(history[gotokp1:k,5],history[k:gotokp1,6]),col=col[2],border="gray90")
+        polygon(c(history[gotokp1:k,1],history[k:gotokp1,1]),c(history[gotokp1:k,3],history[k:gotokp1,4]),col=col[1],border="gray60")
+        polygon(c(history[gotokp1:k,1],history[k:gotokp1,1]),c(history[gotokp1:k,7],history[k:gotokp1,8]),col=NA,border="white")
+        abline(v=discont, lty=5, col="grey85")
+    }
+  
+    if(!is.null(ev1)){
+
+        lines(x=history1[1:gotok, 1], y=history1[1:gotok, 2], type="l")
+        if(!is.null(discont)){
+            lines(x=history1[gotokp1:k, 1], y=history1[gotokp1:k, 2], type="l")
+        }
+
+        polygon(c(history1[1:gotok,1],history1[gotok:1,1]),c(history1[1:gotok,7],history1[gotok:1,8]),col=col[6],border="white")
+        polygon(c(history1[1:gotok,1],history1[gotok:1,1]),c(history1[1:gotok,5],history1[gotok:1,6]),col=col[5],border="gray90")
+        polygon(c(history1[1:gotok,1],history1[gotok:1,1]),c(history1[1:gotok,3],history1[gotok:1,4]),col=col[4],border="gray60")
+        polygon(c(history1[1:gotok,1],history1[gotok:1,1]),c(history1[1:gotok,7],history1[gotok:1,8]),col=NA,border="white")
+
+        if(!is.null(discont)){
+            polygon(c(history1[gotokp1:k,1],history1[k:gotokp1,1]),c(history1[gotokp1:k,7],history1[k:gotokp1,8]),col=col[6],border="white")
+            polygon(c(history1[gotokp1:k,1],history1[k:gotokp1,1]),c(history1[gotokp1:k,5],history1[k:gotokp1,6]),col=col[5],border="gray90")
+            polygon(c(history1[gotokp1:k,1],history1[k:gotokp1,1]),c(history1[gotokp1:k,3],history1[k:gotokp1,4]),col=col[4],border="gray60")
+            polygon(c(history1[gotokp1:k,1],history1[k:gotokp1,1]),c(history1[gotokp1:k,7],history1[k:gotokp1,8]),col=NA,border="white")
+        }        
+    }
+  
+    ## This is the legend
+    
+    if(is.null(legpos)){
+        if(leg==1){
+            legpos<-c(.91,.04,.2,.05)
+        }else if(leg==2){
+            legpos<-c(.09,.04,.2,.05)
+        }else if(leg==3){
+            legpos<-c(.09,.04,.8,.05)
+        }else{
+            legpos<-c(.91,.04,.8,.05)
+        }
+    }
+    
+    lx<-min(all.xlim)+ legpos[1]*(max(all.xlim)- min(all.xlim))
+    hx<-min(all.xlim)+ (legpos[1]+legpos[2])*(max(all.xlim)- min(all.xlim))
+    
+    deltax<-(hx-lx)*.1
+    
+    my<-min(all.ylim) +legpos[3]*min(max(all.ylim) - min(all.ylim))
+    dy<-legpos[4]*(max(all.ylim) - min(all.ylim))
+    
+    
+    lines(c(hx+deltax,hx+2*deltax,hx+2*deltax,hx+deltax),c(my+3*dy,my+3*dy,my-3*dy,my-3*dy),col=legcol)
+    lines(c(hx+3*deltax,hx+4*deltax,hx+4*deltax,hx+3*deltax),c(my+1*dy,my+1*dy,my-1*dy,my-1*dy),col=legcol)
+    lines(c(lx-deltax,lx-2*deltax,lx-2*deltax,lx-deltax),c(my+2*dy,my+2*dy,my-2*dy,my-2*dy),col=legcol)
+    lines(c(lx-5*deltax,lx),c(my,my),col="white",lwd=3)
+    lines(c(lx-5*deltax,lx),c(my,my),col=legcol)
+    lines(c(lx,hx),c(my,my))
+    
+    polygon(c(lx,lx,hx,hx),c(my-3*dy,my+3*dy,my+3*dy,my-3*dy),col=col[3],border="white")
+    polygon(c(lx,lx,hx,hx),c(my-2*dy,my+2*dy,my+2*dy,my-2*dy),col=col[2],border="gray90")
+    polygon(c(lx,lx,hx,hx),c(my-1*dy,my+1*dy,my+1*dy,my-1*dy),col=col[1],border="gray60")
+    polygon(c(lx,lx,hx,hx),c(my-3*dy,my+3*dy,my+3*dy,my-3*dy),col=NA,border="white")
+    
+    text(lx,my,labels="median",pos=2,cex=0.5,col=legcol)
+    text(lx,my+2*dy,labels=paste("ci",ci[2],sep=""),pos=2,cex=0.5,col=legcol)
+    text(hx,my+1*dy,labels=paste("ci",ci[1],sep=""),pos=4,cex=0.5,col=legcol)
+    text(hx,my+3*dy,labels=paste("ci",ci[3],sep=""),pos=4,cex=0.5,col=legcol)
 }
 
-#' Method for plotting pooled simulations by confidence intervals
+#' Receiver Operator Characteristic Plots
 #'
-#' Plot pooled simulated quantities of interest.
-#' @usage \method{plot}{pooled.sim}(x, qi="ev", var=NULL,  ...,  legcol="gray20", col=NULL, leg=1, legpos=NULL)
-#' @S3method plot pooled.sim
-#' @param x A `sim' object
-#' @param qi a character-string specifying the quantity of interest to plot
-#' @param var The variable to be used on the x-axis. Default is the variable
-#' across all the chosen values with smallest nonzero variance
-#' @param ... Parameters to be passed to the `truehist' function which is 
-#' implicitly called for numeric simulations
-#' @param legcol ``legend color'', an valid color used for plotting the line
-#' colors in the legend
-#' @param col a valid vector of colors of at least length 3 to use to color the
-#' confidence intervals
-#' @param leg ``legend position'', an integer from 1 to 4, specifying the
-#' position of the legend. 1 to 4 correspond to ``SE'', ``SW'', ``NW'', and
-#' ``NE'' respectively
-#' @param legpos ``legend type'', exact coordinates and sizes for legend.
-#' Overrides argment ``leg.type''
-#' @return the current graphical parameters. This is subject to change in future
-#' implementations of Zelig
-#' @author James Honaker, adapted by Matt Owen \email{mowen@@iq.harvard.edu}
-plot.pooled.sim <- plot.ci
-#' Method for plotting simulations
+#' The 'rocplot' command generates a receiver operator characteristic plot to
+#' compare the in-sample (default) or out-of-sample fit for two logit or probit
+#' regressions.
 #'
-#' Plot simulated quantities of interest.
-#' @usage \method{plot}{sim}(x, ...)
-#' @S3method plot sim
-#' @param x a `sim' object
-#' @param ... parameters to be passed to the `truehist' function which is 
-#' implicitly called for numeric simulations
-#' @return nothing
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-plot.sim <- function (x, ...) {
-
-  env <- tryCatch(
-    asNamespace(x$package.name),
-    # 
-    error = function (e) { 
-      warning("")
-      globalenv()
-    }
-    )
-
-  # If plotPackageName
-  if (exists("plot.simulations", envir = env, mode="function")) {
-    # Get the simulation, because we know it exists
-    .plotter <- get("plot.simulations", envir = env, mode="function")
-
-    # Pass to a temporary variable to improve the visibility of the traceback
-    # if there is an error
-    res <- .plotter(x, ...)
-
-    # Return object (whatever it is)
-    return(invisible(res))
-  }
-
-  # Otherwise we just use this fall-back
-  old.par <- par(no.readonly = T)
-
-  # Some numbers we use to make things
-  count<- 0
-  for(i in 1:length(names(x$qi))){
-      if (!all( is.na(x$qi[[i]]) ) & !is.null(x$qi[[i]]) )
-      count<-count+1
-      
-  }
-  total.qis<-max(count,1)
-  #total.qis <- length(names(x$qi))
-
-    
-  palette <- rainbow(total.qis)
-  total.cols <- 2
-  total.rows <- ceiling(total.qis/total.cols)
-  if(total.rows==1){
-    total.rows<-2
-    total.cols<-1
-  }
-
-  vals <- ifelse(total.qis %% 2, c(1:total.qis, total.qis), 1:total.qis)
-
-  # Colors!
-  color.blue <- rgb(100, 149, 237, maxColorValue=255)
-
-  #
-  vals <- if (total.qis %% 2) {
-    c(1:total.qis, total.qis)
+#' @usage
+#' rocplot(z1, z2,
+#' cutoff = seq(from=0, to=1, length=100), lty1="solid",
+#' lty2="dashed", lwd1=par("lwd"), lwd2=par("lwd"),
+#' col1=par("col"), col2=par("col"),
+#' main="ROC Curve",
+#' xlab = "Proportion of 1's Correctly Predicted",
+#' ylab="Proportion of 0's Correctly Predicted",
+#' plot = TRUE, 
+#' ...
+#' )
+#'
+#' @param z1 first model
+#' @param z2 second model
+#' @param cutoff A vector of cut-off values between 0 and 1, at which to
+#'   evaluate the proportion of 0s and 1s correctly predicted by the first and
+#'   second model.  By default, this is 100 increments between 0 and 1
+#'   inclusive
+#' @param lty1 the line type of the first model (defaults to 'line')
+#' @param lty2 the line type of the second model (defaults to 'dashed')
+#' @param lwd1 the line width of the first model (defaults to 1)
+#' @param lwd2 the line width of the second model (defaults to 1)
+#' @param col1 the color of the first model (defaults to 'black')
+#' @param col2 the color of the second model (defaults to 'black')
+#' @param main a title for the plot (defaults to "ROC Curve")
+#' @param xlab a label for the X-axis
+#' @param ylab a lavel for the Y-axis
+#' @param plot whether to generate a plot to the selected device
+#' @param \dots additional parameters to be passed to the plot
+#' @return if plot is TRUE, rocplot simply generates a plot. Otherwise, a list
+#'   with the following is produced:
+#'   \item{roc1}{a matrix containing a vector of x-coordinates and
+#'     y-coordinates corresponding to the number of ones and zeros correctly
+#'     predicted for the first model.}
+#'   \item{roc2}{a matrix containing a vector of x-coordinates and
+#'     y-coordinates corresponding to the number of ones and zeros correctly
+#'     predicted for the second model.}
+#'   \item{area1}{the area under the first ROC curve, calculated using
+#'     Reimann sums.}
+#'   \item{area2}{the area under the second ROC curve, calculated using
+#'     Reimann sums.}
+#' @export
+#" @author Kosuke Imai and Olivia Lau
+rocplot <- function(z1, z2,
+                    cutoff = seq(from=0, to=1, length=100), lty1="solid",
+                    lty2="dashed", lwd1=par("lwd"), lwd2=par("lwd"),
+                    col1=par("col"), col2=par("col"),
+                    main="ROC Curve",
+                    xlab = "Proportion of 1's Correctly Predicted",
+                    ylab="Proportion of 0's Correctly Predicted",
+                    plot = TRUE, 
+                    ...) {
+  y1 <- z1$data[as.character(z1$formula[[2]])]
+  y2 <- z2$data[as.character(z2$formula[[2]])]
+  fitted1 <- fitted(z1)[[1]]
+  fitted2 <- fitted(z2)[[1]]
+  roc1 <- roc2 <- matrix(NA, nrow = length(cutoff), ncol = 2)
+  colnames(roc1) <- colnames(roc2) <- c("ones", "zeros")
+  for (i in 1:length(cutoff)) {
+    roc1[i,1] <- mean(fitted1[y1==1] >= cutoff[i]) 
+    roc2[i,1] <- mean(fitted2[y2==1] >= cutoff[i])
+    roc1[i,2] <- mean(fitted1[y1==0] < cutoff[i])
+    roc2[i,2] <- mean(fitted2[y2==0] < cutoff[i])
+  }
+  if (plot) {
+    plot(0:1, 0:1, type = "n", xaxs = "i", yaxs = "i",
+         main=main, xlab=xlab, ylab=ylab, ...)
+    lines(roc1, lty = lty1, lwd = lwd1, col=col1)
+    lines(roc2, lty = lty2, lwd = lwd2, col=col2)
+    abline(1, -1, lty = "dotted")
   }
   else {
-    1:total.qis
-  }
-
-  # Construct layout
-  layout(matrix(vals, total.rows, total.cols, byrow=TRUE))
-
-  k <- 1
-  for (title in names(x$qi)) {
-    simulations.plot(x$qi[[title]], main = title, col = palette[k], line.col = "black")
-    k <- k + 1
-  }
-
-
-  #
-  return(par(old.par))
-}
-
-#' @S3method plot sim.cloglog.net
-plot.sim.cloglog.net <- function (x, ...) {
-
-  env <- tryCatch(
-    asNamespace(x$package.name),
-    error = function (e) { 
-      warning("")
-      globalenv()
+    area1 <- area2 <- array()
+    for (i in 2:length(cutoff)) {
+      area1[i-1] <- (roc1[i,2] - roc1[(i-1),2]) * roc1[i,1] 
+      area2[i-1] <- (roc2[i,2] - roc2[(i-1),2]) * roc2[i,1] 
     }
-  )
-
-  # If plotPackageName
-  if (exists("plot.simulations", envir = env, mode="function")) {
-    # Get the simulation, because we know it exists
-    .plotter <- get("plot.simulations", envir = env, mode="function")
-
-    # Pass to a temporary variable to improve the visibility of the traceback
-    # if there is an error
-    res <- .plotter(x, ...)
-
-    # Return object (whatever it is)
-    return(invisible(res))
+    return(list(roc1 = roc1, 
+                roc2 = roc2,
+                area1 = sum(na.omit(area1)),
+                area2 = sum(na.omit(area2))))
   }
+}
 
-  # Otherwise we just use this fall-back
-  old.par <- par(no.readonly = T)
 
-  # Some numbers we use to make things
-  total.qis <- length(names(x$qi))
-  palette <- rainbow(total.qis)
-  total.cols <- 2
-  total.rows <- ceiling(total.qis/total.cols)
+#' Plot Autocorrelation Function from Zelig QI object
+#' @keywords internal
 
-  vals <- ifelse(total.qis %% 2, c(1:total.qis, total.qis), 1:total.qis)
 
-  # Colors!
-  color.blue <- rgb(100, 149, 237, maxColorValue=255)
+zeligACFplot <- function(z, omitzero=FALSE,  barcol="black", epsilon=0.1, col=NULL, main="Autocorrelation Function", xlab="Period", ylab="Correlation of Present Shock with Future Outcomes", ylim=NULL, ...){
 
-  #
-  vals <- if (total.qis %% 2) {
-    c(1:total.qis, total.qis)
-  }
-  else {
-    1:total.qis
-  }
-
-  # Construct layout
-  layout(matrix(vals, total.rows, total.cols, byrow=TRUE))
+    x <- z$expected.acf
+    ci.x <- z$ci.acf
 
-  k <- 1
-  for (title in names(x$qi)) {
-    simulations.plot(x$qi[[title]], main = title, col = palette[k], line.col = "black")
-    k <- k + 1
-  }
+    if(omitzero){
+        x<-x[2:length(x)]
+        ci.x$ci.upper <- ci.x$ci.upper[2:length(ci.x$ci.upper)]
+        ci.x$ci.lower <- ci.x$ci.lower[2:length(ci.x$ci.lower)]
+    }
 
+    if(is.null(ylim)){
+        ylim<-c(min( c(ci.x$ci.lower, 0, x) ), max( c(ci.x$ci.upper, 0 , x) ))
+    
+    }
+    if(is.null(col)){
+        col <- rgb(100,149,237,maxColorValue=255)
+    }
 
-  #
-  return(par(old.par))
+    bout <- barplot(x, col=col, main=main, xlab=xlab, ylab=ylab, ylim=ylim, ...)
+    
+    n <- length(x)
+    xseq <- as.vector(bout)
+    NAseq <- rep(NA, n)
+
+    xtemp <- cbind( xseq-epsilon, xseq+epsilon, NAseq)
+    xtemp <- as.vector(t(xtemp))
+    ytemp <- cbind(ci.x$ci.upper, ci.x$ci.upper, NAseq)
+    ytemp <- as.vector(t(ytemp))
+    lines(x=xtemp ,y=ytemp, col=barcol)
+        
+    ytemp <- cbind(ci.x$ci.lower, ci.x$ci.lower, NAseq)
+    ytemp <- as.vector(t(ytemp))
+    lines(x=xtemp ,y=ytemp, col=barcol)
+        
+    xtemp <- cbind( xseq, xseq, NAseq)
+    xtemp <- as.vector(t(xtemp))
+    ytemp <- cbind(ci.x$ci.upper, ci.x$ci.lower, NAseq)
+    ytemp <- as.vector(t(ytemp))
+    lines(x=xtemp ,y=ytemp, col=barcol)
 }
 
 
-#' Plot Any Simulation from the Zelig Core Package
-#'
-#' Plots any simulation from the core package. In general, this function can
-#' \emph{neatly} plot simulations containing five of the popular ``quantities
-#' of interest'' - ``Expected Values: E(Y|X)'', ``Predicted Values: Y|X'',
-#' ``Expected Values (for X1): E(Y|X1)'', ``Predicted Values (for X1): Y|X1''
-#' and ``First Differences: E(Y|X1) - E(Y|X)''.
-#' @param x an object
-#' @param ... parameters passed to the ``plot'' and ``barplot'' functions
-#' @return the original graphical parameters
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-plot.simulations <- function (x, ...) {
-  # Save old state
-  old.par <- par(no.readonly=T)
-    
-  # Quantities of Interest
-  qi <- x$qi
-
-  # Define Relevant quantity of interest titles that have special properties
-  ev.titles <- c('Expected Values: E(Y|X)', 'Expected Values: E(Y|X1)')
-  pv.titles <- c('Predicted Values: Y|X', 'Predicted Values: Y|X1')
-
-  # Determine whether two "Expected Values" qi's exist
-  both.ev.exist <- all(ev.titles %in% names(qi))
-  # Determine whether two "Predicted Values" qi's exist
-  both.pv.exist <- all(pv.titles %in% names(qi))
-
-  # Color of x should always be this pertty blue
-  color.x <- rgb(242, 122, 94, maxColorValue=255)
-  color.x1 <- rgb(100, 149, 237, maxColorValue=255)
-
-  # This mixes the above two colors, and converts the result into hexadecimal
-  color.mixed <- rgb(t(round((col2rgb(color.x) + col2rgb(color.x1))/2)), maxColorValue=255)
-
-  if (is.null(x$x)) {
-    return(par(old.par))
-  }
-  else if (is.null(x$x1) || is.na(x$x1)) {
-    panels <- matrix(1:2, 2, 1)
-
-    # The plotting device:
-    # +--------+
-    # |   1    |
-    # +--------+
-    # |   2    |
-    # +--------+
-  }
-  else {
-
-    panels <- matrix(c(1:5, 5), ncol=2, nrow=3, byrow = TRUE)
-
-    panels <- if (xor(both.ev.exist, both.pv.exist))
-      rbind(panels, c(6, 6))
-    else if (both.ev.exist && both.pv.exist)
-      rbind(panels, c(6, 7))
-    else
-      panels
-
-
-    # the plotting device:
-    #
-    # +-----------+    +-----------+
-    # |  1  |  2  |    |  1  |  2  |
-    # +-----+-----+    +-----+-----+
-    # |  3  |  4  |    |  3  |  4  |
-    # +-----+-----+ OR +-----+-----+
-    # |     5     |    |     5     |
-    # +-----------+    +-----------+
-    # |  6  |  7  |    |     6     |
-    # +-----+-----+    +-----+-----+
-  }
 
-  #
-  layout(panels)
 
-  titles <- list(
-    ev  = "Expected Values: E(Y|X)",
-    ev1 = "Expected Values: E(Y|X1)",
-    pv  = "Predicted Values: Y|X",
-    pv1 = "Predicted Values: Y|X1",
-    fd  = "First Differences: E(Y|X1) - E(Y|X)"
-    )
-  
-  # Plot each simulation
-  simulations.plot(qi[[titles$pv]], main = titles$pv, col = color.x, line.col = "black")
-  simulations.plot(qi[[titles$pv1]], main = titles$pv1, col = color.x1, line.col = "black")
-  simulations.plot(qi[[titles$ev]], main = titles$ev, col = color.x, line.col = "black")
-  simulations.plot(qi[[titles$ev1]], main = titles$ev1, col = color.x1, line.col = "black")
-  simulations.plot(qi[[titles$fd]], main = titles$fd, col = color.mixed, line.col = "black")
-
-  if (both.pv.exist) {
-    simulations.plot(
-      qi[["Predicted Values: Y|X"]],
-      qi[["Predicted Values: Y|X1"]],
-      main = "Comparison of Y|X and Y|X1",
-      # Note that we are adding transparency to this
-      col = paste(c(color.x, color.x1), "80", sep=""),
-      line.col = "black")
-  }
 
-  if (both.ev.exist) {
-    simulations.plot(
-      qi[["Expected Values: E(Y|X)"]],
-      qi[["Expected Values: E(Y|X1)"]],
-      main = "Comparison of E(Y|X) and E(Y|X1)",
-      # Note that we are adding transparency to this
-      col = paste(c(color.x, color.x1), "80", sep=""),
-      line.col = "black")
-  }
-
-  # Restore old state
-  par(old.par)
 
-  # Return old parameter invisibly
-  invisible(old.par)
-}
 
-
-plot.zelig.relogit <- function(x, xlab ="", user.par = FALSE, alt.col = "red",
-                               ylab = NULL, samples = 100, ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  if (dim(x$qi[[1]])[2] == 1) {
-    pr <- x$qi$pr
-    y0 <- 100 * sum(pr == 0)/length(pr)
-    y1 <- 100 * sum(pr == 1)/length(pr)
-    barplot(c(y0, y1), horiz = TRUE, col = alt.col, las = 1,
-            names.arg = c("Y = 0", "Y = 1"),
-            xlab = "Percentage of Simulations",
-            main = x$qi.name$pr, xlim = c(0, 100))
-    x$qi$pr <- x$qi.name$pr <- NULL
-    for (i in 1:(k-1)) {
-      qi <- as.vector(x$qi[[i]])
-      plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-    }    
-  }
-  else {
-    for (i in 1:k) {
-      qi <- x$qi[[i]]
-      main <- as.character(x$qi.name[i])
-      if (is.null(rownames(qi)))
-        rownames(qi) <- 1:dim(qi)[1]
-      idx <- as.integer(sample(rownames(qi), 100))
-      tmp <- qi[idx,,1]
-      xlim <- c(min(qi[,1,1]), max(qi[,2,1]))
-      if (is.null(ylab))
-        ylab <- paste("Observations (n = ", samples, ")", sep = "")
-      plot(xlim, type = "n", xlab = xlab, ylab = ylab,
-           main = main, ylim = c(0, 100), xlim = xlim, ...)
-      for (j in 1:nrow(tmp))
-        lines(c(tmp[j,1], tmp[j,2]), c(j,j), col = alt.col)
-      abline(v = mean(qi[,1,1]))
-      abline(v = mean(qi[,2,1]))
-    }
-  }
-  par(op)
-}
diff --git a/R/poisson.R b/R/poisson.R
deleted file mode 100644
index 7ae2f40..0000000
--- a/R/poisson.R
+++ /dev/null
@@ -1,116 +0,0 @@
-#' Interface between poisson model and Zelig
-#' This function is exclusively for use by the `zelig' function
-#' @param formula a formula
-#' @param weights a numeric vector
-#' @param ... ignored parameters
-#' @param data a data.frame
-#' @return a list to be coerced into a zelig.call object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-zelig2poisson <- function(formula, weights=NULL, ..., data) {
-  z(
-    glm,
-    # .hook = "robust.glm.hook",
-    formula = formula,
-    weights = weights,
-    family  = poisson(),
-    model   = F,
-    data    = data
-    )
-}
-#' Param Method for the 'poisson' Zelig Model
-#' @note This method is used by the 'poisson' Zelig model
-#' @usage \method{param}{poisson}(obj, num=1000, ...)
-#' @S3method param negbinom
-#' @param obj a 'zelig' object
-#' @param num an integer specifying the number of simulations to sample
-#' @param ... ignored
-#' @return a list to be cast as a 'parameters' object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-param.poisson <- function (obj, num=1000, ...) {
-  list(
-       simulations = mvrnorm(num, mu=coef(.fitted), Sigma=vcov(.fitted)),
-       fam = poisson()
-       )
-}
-#' Compute quantities of interest for 'poisson' Zelig models
-#' @usage \method{qi}{poisson}(obj, x, x1=NULL, y=NULL, num=1000, param=NULL)
-#' @S3method qi poisson
-#' @param obj a 'zelig' object
-#' @param x a 'setx' object or NULL
-#' @param x1 an optional 'setx' object
-#' @param y this parameter is reserved for simulating average treatment effects,
-#'   though this feature is currentlysupported by only a handful of models
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a parameters object
-#' @return a list of key-value pairs specifying pairing titles of quantities of
-#'   interest with their simulations
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-qi.poisson <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-  # 
-  coef <- coef(param)
-
-  # get inverse function
-  inverse <- linkinv(param)
-
-  #
-  eta <- coef %*% t(x)
-  theta <- matrix(inverse(eta), nrow=nrow(coef))
-
-  # ...
-  ev <- theta
-  pr <- matrix(NA, nrow=nrow(theta), ncol=ncol(theta))
-
-  # default values
-  ev1 <- pr1 <- fd <- NA
-
-  for (i in 1:ncol(ev))
-    pr[,i] <- rpois(nrow(ev), lambda = ev[,i])
-
-
-  if (!is.null(x1)) {
-
-    # quantities of interest
-    results <- qi(obj, x1, num=num, param=param)
-
-    # pass values over
-    ev1 <- results[["Expected Values: E(Y|X)"]]
-    pr1 <- results[["Predicted Values: Y|X"]]
-
-    # compute first differences
-    fd <- ev1 - ev
-  }
-
-  # Return quantities of interest
-  list("Expected Values: E(Y|X)"  = ev,
-       "Expected Values: E(Y|X1)" = ev1,
-       "Predicted Values: Y|X"    = pr,
-       "Predicted Values: Y|X1"   = pr1,
-       "First Differences: E(Y|X1) - E(Y|X)" = fd
-       )
-}
-#' Describe the `poisson' model to Zelig
-#' @usage \method{describe}{poisson}(...)
-#' @S3method describe poisson
-#' @param ... ignored parameters
-#' @return a list to be processed by `as.description'
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-describe.poisson <- function(...) {
-  # parameters object
-  parameters <- list(lambda = list(
-                       equations = c(1, 1),
-                       tags.allowed = FALSE,
-                       dep.vars = TRUE,
-                       exp.vars = TRUE
-                       )
-                     )
-
-  # return list
-  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
-       year     = 2007,
-       category = "count",
-       parameters = parameters,
-       text = "Poisson Regression for Event Count Dependent Variables"
-       )
-}
diff --git a/R/poisson.bayes.R b/R/poisson.bayes.R
deleted file mode 100644
index afc5589..0000000
--- a/R/poisson.bayes.R
+++ /dev/null
@@ -1,90 +0,0 @@
-#' Interface between the Zelig Model poisson.bayes and the Pre-existing Model-fitting Method
-#' @param formula a formula
-#' @param ... additonal parameters
-#' @param data a data.frame 
-#' @return a list specifying '.function'
-#' @export
-zelig2poisson.bayes <- function (
-                               formula, 
-                               burnin = 1000, mcmc = 10000, 
-                               verbose = 0, 
-                               ..., 
-                               data
-                               ) {
-
-  loadDependencies("MCMCpack", "coda")
-
-  if (missing(verbose))
-    verbose <- round((mcmc + burnin)/10)
-
-  list(
-       .function = "MCMCpoisson",
-       .hook = "MCMChook",
-
-       formula = formula,
-       data   = data,
-       burnin = burnin,
-       mcmc   = mcmc,
-       verbose= verbose,
-
-       # Most parameters can be simply passed forward
-       ...
-       )
-}
-
-#' @S3method param poisson.bayes
-param.poisson.bayes <- function(obj, num=1000, ...) {
-  list(
-       coef = coef(obj),
-       fam = poisson()
-       )
-}
-
-#' @S3method qi normal.bayes
-qi.poisson.bayes <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
-{
-
-  res1 <- poisson.ev(x, param)
-  res2 <- poisson.ev(x1, param)
-
-  list(
-       "Expected Value: E(Y|X)" = res1$ev,
-       "Predicted Value: Y|X" = res1$pv,
-       "Expected Value (for X1): E(Y|X1)" = res2$ev,
-       "Predicted Value (for X1): Y|X1" = res2$pv,
-       "First Differences: E(Y|X1) - E(Y|X)" = res2$ev - res1$ev
-       )
-}
-
-poisson.ev <- function (x, param) {
-  # If either of the parameters are invalid,
-  # Then return NA for both qi's
-  if (is.null(x) || is.na(x) || is.null(param))
-    return(list(ev=NA, pv=NA))
-
-  # Extract inverse-link and simulated parameters (respectively)
-  inv <- linkinv(param)
-  eta <- coef(param) %*% t(x)
-
-  # Give matrix identical rows/columns to the simulated parameters
-  ev <- pv <- matrix(NA, nrow(eta), ncol(eta))
-  dimnames(ev) <- dimnames(pv) <- dimnames(eta)
-
-  # Compute Expected Values
-  ev <- inv(eta)
-
-  # Compute Predicted Values
-  for (i in 1:ncol(ev))
-    pv[, i] <- rpois(length(ev[, i]), ev[, i])
-
-  list(ev=ev, pv=pv)
-}
-
-#' @S3method describe poisson.bayes
-describe.poisson.bayes <- function(...) {
-  list(
-       description  = "Bayesian Poisson Regression",
-       authors = c("Ben Goodrich", "Ying Lu"),
-       year = 2013
-       )
-}
diff --git a/R/poisson.gee.R b/R/poisson.gee.R
deleted file mode 100644
index 5545da9..0000000
--- a/R/poisson.gee.R
+++ /dev/null
@@ -1,71 +0,0 @@
-#' Interface between the Zelig Model poisson.gee and 
-#' the Pre-existing Model-fitting Method
-#' @param formula a formula
-#' @param id a character-string specifying the column of the data-set to use
-#'   for clustering
-#' @param robust a logical specifying whether to robustly or naively compute
-#'   the covariance matrix. This parameter is ignore in the \code{zelig2}
-#'   method, and instead used in the \code{robust.hook} function, which
-#'   executes after the call to the \code{gee} function
-#' @param ... ignored parameters
-#' @param R a square-matrix specifying the correlation
-#' @param corstr a character-string specifying the correlation structure
-#' @param data a data.frame 
-#' @return a list specifying the call to the external model
-#' @export
-zelig2poisson.gee <- function (formula, id, robust, ..., R = NULL, corstr = "independence", data) {
-
-  loadDependencies("gee")
-
-  if (corstr == "fixed" && is.null(R))
-    stop("R must be defined")
-
-  # if id is a valid column-name in data, then we just need to extract the
-  # column and re-order the data.frame and cluster information
-  if (is.character(id) && length(id) == 1 && id %in% colnames(data)) {
-    id <- data[, id]
-    data <- data[order(id), ]
-    id <- sort(id)
-  }
-
-  z(
-    .function = gee,
-    .hook = robust.gee.hook,
-
-    formula = formula,
-    id = id,
-    corstr = corstr,
-    family  = poisson(),
-    data = data,
-    R = R,
-    ...
-    )
-}
-
-#' @S3method param poisson.gee
-param.poisson.gee <- function(obj, num=1000, ...) {
-
-  # Extract means to compute maximum likelihood
-  mu <- coef(obj)
-
-  # Extract covariance matrix to compute maximum likelihood
-  Sigma <- vcov(obj)
-
-  #
-  list(
-       coef = mvrnorm(num, mu, Sigma),
-       fam = Gamma()
-       )
-}
-
-#' @S3method qi poisson.gee
-qi.poisson.gee <- qi.gamma.gee
-
-#' @S3method describe poisson.gee
-describe.poisson.gee <- function(...) {
-  list(
-       authors = "Patrick Lam",
-       text = "General Estimating Equation for Poisson Regression",
-       year = 2011
-       )
-}
diff --git a/R/poisson.survey.R b/R/poisson.survey.R
deleted file mode 100644
index 8f995e4..0000000
--- a/R/poisson.survey.R
+++ /dev/null
@@ -1,155 +0,0 @@
-#' @export
-zelig2poisson.survey <- function(
-                               formula,
-                               weights=NULL, 
-                               ids=NULL,
-                               probs=NULL,
-                               strata = NULL,  
-                               fpc=NULL,
-                               nest = FALSE,
-                               check.strata = !nest,
-                               repweights = NULL,
-                               type,
-                               combined.weights=FALSE,
-                               rho = NULL,
-                               bootstrap.average=NULL, 
-                               scale=NULL,
-                               rscales=NULL,
-                               fpctype="fraction",
-                               return.replicates=FALSE,
-                               na.action="na.omit",
-                               start=NULL,
-                               etastart=NULL, 
-                               mustart=NULL,
-                               offset=NULL, 	      		
-                               model1=TRUE,
-                               method="glm.fit",
-                               x=FALSE,
-                               y=TRUE,
-                               contrasts=NULL,
-                               design=NULL,
-                               data
-                               ) {
-  loadDependencies("survey")
-
-  if (is.null(ids))
-    ids <- ~1
-
-  # the following lines designate the design
-  # NOTE: nothing truly special goes on here;
-  #       the below just makes sure the design is created correctly
-  #       for whether or not the replication weights are set
-  design <- if (is.null(repweights))
-    svydesign(
-              data=data,
-              ids=ids,
-              probs=probs,
-              strata=strata,
-              fpc=fpc,
-              nest=nest,
-              check.strata=check.strata,
-              weights=weights
-              )
-
-  else {
-    .survey.prob.weights <- weights
-    svrepdesign(
-                data=data,
-                repweights=repweights, 	
-                type=type,
-                weights=weights,
-                combined.weights=combined.weights, 
-                rho=rho,
-                bootstrap.average=bootstrap.average,
-                scale=scale,
-                rscales=rscales,
-                fpctype=fpctype,
-                fpc=fpc
-                )
-  }
-
-  
-  z(.function = svyglm,
-    formula = formula,
-    design  = design,
-    family  = poisson()
-    )
-}
-#' @S3method param poisson.survey
-param.poisson.survey <- function(obj, num=1000, ...) {
-  list(
-       simulations = mvrnorm(num, coef(obj), vcov(obj)),
-       alpha = NULL,
-
-       # note: assignment of link and link-inverse are
-       #       implicit when the family is assigned
-       fam   = poisson()
-       )
-}
-#' @S3method qi poisson.survey
-qi.poisson.survey <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
-  model <- GetObject(obj)
-
-  coef <- coef(param)
-  alpha <- alpha(param)
-
-  eta <- coef %*% t(x)
-
-  link.inverse <- linkinv(param)
-
-  theta <- matrix(link.inverse(eta), nrow=nrow(coef))
-
-  pr <- ev <- matrix(NA, nrow=nrow(theta), ncol(theta))
-
-  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
-
-
-  ev <- theta
-
-
-  for (k in 1:nrow(ev))
-    pr[k, ] <- rpois(length(ev[k, ]), lambda=ev[k, ])
-
-
-
-  ev1 <- pr1 <- fd <- NA
-
-  if (!is.null(x1)) {
-    ev1 <- theta1 <- matrix(link.inverse(coef %*% t(x1)),
-                            nrow = nrow(coef)
-                            )
-
-    fd <- ev1-ev
-  }
-
-  att.ev <- att.pr <- NA
-
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(coef)), nrow=nrow(coef), byrow=TRUE)
-
-    tmp.ev <- yvar - ev
-    tmp.pr <- yvar - pr
-
-    att.ev <- matrix(apply(tmp.ev, 1, mean), nrow=nrow(coef))
-    att.pr <- matrix(apply(tmp.pr, 1, mean), nrow=nrow(coef))
-  }
-
-
-  list(
-       "Expected Values: E(Y|X)" = ev,
-       "Expected Values for (X1): E(Y|X1)" = ev1,
-       "Predicted Values: Y|X" = pr,
-       "Predicted Values (for X1): Y|X1" = pr1,
-       "First Differences E(Y|X1)-E(Y|X)" = fd,
-       "Average Treatment Effect: Y-EV" = att.ev,
-       "Average Treatment Effect: Y-PR" = att.pr
-       )
-}
-#' @S3method describe poisson.survey
-describe.poisson.survey <- function(...) {
-  list(
-       authors = "Nicholas Carnes",
-       year = 2008,
-       description = "Survey-Weighted Poisson Regression for Continuous, Positive Dependent Variables"
-       )
-}
diff --git a/R/print.R b/R/print.R
deleted file mode 100644
index f7fa3c6..0000000
--- a/R/print.R
+++ /dev/null
@@ -1,446 +0,0 @@
-#' Print a Zelig Object
-#' @S3method print zelig
-print.zelig <- function (x, ...) {
-  name <- x$name
-  package.name <- x$package.name
-  call <- x$call
-
-  cat("Model Name: ", name, "\n")
-  cat("Package Name: ", package.name, "\n")
-  cat("Call:\n")
-  print(call)
-
-  # 
-  message("\nFor information about the fitted model, use the summary() function.")
-
-  # Return invisibly
-  invisible(x)
-}
-#' Print a Bundle of Data-sets
-#'
-#' @S3method print setx.mi
-#' @usage \method{print}{setx.mi}(x, ...)
-#' @param x a \code{setx} object to print
-#' @param ... ignored parameters
-#' @return the \code{setx} object (invisibly)
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-print.setx.mi <- function(x, ...) {
-  # Store size for readability
-  size <- length(x)
-
-  for (k in 1:size) {
-    # Print object
-    print(x[[k]])
-
-    # If this is not the last element, print a new-line
-    if (k < size)
-      cat("\n")
-  }
-
-  invisible(x)
-}
-#' Print values of `setx' objects
-#'
-#' Print a ``setx'' object in human-readable form.
-#' @usage \method{print}{setx}(x, ...)
-#' @S3method print setx
-#' @param x a `setx' object
-#' @param ... ignored parameters
-#' @return the value of x (invisibly)
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-print.setx <- function(x, ...) {
-  model <- x$name
-  formula <- x$formula
-  label <- x$label
-
-  cat("Call:\n")
-  print(x$call)
-
-  cat("Model name = ", model, "\n")
-  cat("Formula    = ")
-  print(formula)
-
-  cat("\nComplete data.frame:\n")
-  print(x$updated)
-
-  cat("\nModel Matrix (Design Matrix):\n")
-  print(x$matrix)
-
-  invisible()
-}
-#' @S3method print summary.setx
-print.summary.setx <- function (x, ...) {
-  cat("\nModel name =", x$model.name, "\n")
-  cat("Label      =", x$label, "\n")
-  cat("Formula    = ")
-  print(x$formula)
-
-  cat("\nCall:\n")
-  print(x$call)
-
-  cat("\nModel Matrix (Design Matrix):\n")
-  print(x$model.matrix)
-
-  invisible(x)
-}
-#' Print values of `sim' objects
-#' 
-#' This function is currently unimplemented, and included for future development
-#' @usage \method{print}{sim}(x, ...)
-#' @S3method print sim
-#' @param x a `sim' object (ignored)
-#' @param ... ignored parameters
-#' @return NULL (invisibly)
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-print.sim <- function(x, ...) {
-  o <- x
-  class(o) <- 'list'
-  print(o)
-}
-#' Print a Summary MCMCZelig Object
-#'
-#' This method prints a summary object for \code{MCMCZelig} objects
-#' @param x an "MCMCZelig" object
-#' @param digits a numeric specifying the precision of the summary object
-#' @param ... ignored parameters
-#' @return a \code{summary.MCMCZelig} object
-#' @S3method print summary.MCMCZelig
-print.summary.MCMCZelig <- function(x, digits=max(3, getOption("digits") - 
-3), ...) {
-  cat("\nCall: ") 
-  print(x$call) 
-  cat("\n", "Iterations = ", x$start, ":", x$end, "\n", sep = "")
-  cat("Thinning interval =", x$thin, "\n")
-  cat("Number of chains =", x$nchain, "\n")
-  cat("Sample size per chain =", (x$end -
-  x$start)/x$thin + 1, "\n")
-  cat("\n", "Mean, standard deviation, and quantiles for marginal posterior distributions.", "\n")
-  print(round(x$summary, digits=digits))
-  cat("\n")
-}
-print.summary.glm.robust <-
-    function (x, digits = max(3, getOption("digits") - 3),
-	      symbolic.cor = x$symbolic.cor,
-	      signif.stars = getOption("show.signif.stars"), ...)
-{
-  class(x) <- "summary.glm"
-  print(x)
-  cat("\nRobust standard errors computed using", x$robust)
-  cat("\n")
-  invisible(x)
-}
-#' Print a Summary of a Set of Pooled Simulated Interests
-#'
-#' Prints the summary information from a set of pooled simulated interests. This
-#' method assumes that quantities of interest are kept in a data type which can
-#' be used with ``rbind''.
-#' @usage \method{print}{summary.pooled.sim}(x, ...)
-#' @S3method print summary.pooled.sim
-#' @param x a ``summary.pooled.sim'' object, containing summarized information
-#' about simulated quantities of interest
-#' @param ... Optional parameters that will be passed onward to ``print.matrix''
-#' (the matrix printing function)
-#' @return a ``summary.pooled.sim'' object storing the quantities of interest
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-print.summary.pooled.sim <- function (x, ...) {
-  # los labels... kinda like spanish for "the labels"
-  # labels is function name in base, so we needed a name that said "labels,"
-  # without using "labels". You know?
-  los.labels <- x$labels
-  los.titles <- x$titles
-
-  # Pooled summarized data
-  for (title in los.titles) {
-
-    # This will implicity become a matrix
-    m <- NULL
-
-    for (label in los.labels)
-      m <- rbind(m, x$stats[[label]][[title]])
-
-    rownames(m) <- paste("[", los.labels, "]", sep="")
-
-    cat(title, "\n")
-    print(m)
-    cat("\n\n")
-  }
-}
-#' Print Summary of a Rare-event Logistic Model
-#'
-#' Prints the 
-#' @usage
-#' \method{print}{summary.relogit}(x, digits = max(3, getOption("digits") - 3), ...)
-#' @S3method print summary.relogit
-#' @param x an ``relogit.summary'' object produced by the ``summary'' method.
-#' @param digits an integer specifying the number of digits of precision to
-#' specify
-#' @param ... parameters passed forward to the ``print.glm'' function
-#' @return x (invisibly)
-print.summary.relogit <- function(
-                                  x,
-                                  digits = max(3, getOption("digits") - 3),
-                                  ...
-
-                                  ) {
-  # Straight-forwardly print the model using glm's method
-  ## Was:
-  ##stats:::print.glm(x, digits = digits, ...)
-
-  ##  ":::" not allowed by CRAN
-  ## Copied from Stats Internals
-  ## Temporary Patch / Need to write print method now
-
- print.relogitglm<-function (x, digits = max(3L, getOption("digits") - 3L), ...)
-  {
-    cat("\nCall:  ", paste(deparse(x$call), sep = "\n", collapse = "\n"), 
-        "\n\n", sep = "")
-    if (length(coef(x))) {
-        cat("Coefficients")
-        if (is.character(co <- x$contrasts)) 
-            cat("  [contrasts: ", apply(cbind(names(co), co), 
-                1L, paste, collapse = "="), "]")
-        cat(":\n")
-        print.default(format(x$coefficients, digits = digits), 
-            print.gap = 2, quote = FALSE)
-    }
-    else cat("No coefficients\n\n")
-    cat("\nDegrees of Freedom:", x$df.null, "Total (i.e. Null); ", 
-        x$df.residual, "Residual\n")
-    if (nzchar(mess <- naprint(x$na.action))) 
-        cat("  (", mess, ")\n", sep = "")
-    cat("Null Deviance:\t   ", format(signif(x$null.deviance, 
-        digits)), "\nResidual Deviance:", format(signif(x$deviance, 
-        digits)), "\tAIC:", format(signif(x$aic, digits)))
-    cat("\n")
-    invisible(x)
-}
-
- print.relogitglm(x, digits = digits, ...)
-  
-  
-
-  #  Additional slots
-
-  # Prior co
-  if (x$prior.correct) 
-    cat("\nPrior correction performed with tau =", x$tau, "\n")
-
-  # Weighting? Sure, if it exists, we'll print it.
-  if (x$weighting) 
-    cat("\nWeighting performed with tau =", x$tau, "\n")
-
-  # If there is bias-correction
-  if (x$bias.correct)
-    cat("Rare events bias correction performed\n")
-
-  # If robust errors are computed...
-  if (!is.null(x$robust))
-    cat("\nRobust standard errors computed using", x$robust, "\n")
-
-  # This is not a mutator assignment!
-  class(x) <- "summary.glm"
-
-  # Return object to be printed invisibly
-  invisible(x)  
-}
-#' Print Summary of a Rare-event Logistic Model
-#'
-#' ...
-#' @usage
-#' \method{print}{summary.relogit2}(x, digits = max(3, getOption("digits") - 3), ...)
-#' @S3method print summary.relogit2
-#' @param x the object to print
-#' @param digits an integer specifying the number of digits of precision
-#' @param ... ignored parameters
-#' @return x (invisibly)
-print.summary.relogit2 <- function(x,
-                                   digits = max(3, getOption("digits") - 3),
-                                  ...
-                                  ) {
-  cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
-  print(x$lower.estimate)
-  print(x$upper.estimate)
-}
-#' Print Values of a Summarized ``sim'' Object
-#'
-#' Print values of simulated quantities of interest (stored in a ``summary.sim''
-#' object.
-#' @usage \method{print}{summary.sim}(x, ...)
-#' @S3method print summary.sim
-#' @param x a 'summary.sim' object
-#' @param ... ignored parameters
-#' @return the value of the `summary.sim' object (invisibly)
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-print.summary.sim <- function(x, ...) {
-  # Rename 'x' 'summary'
-  summary <- x
-
-  obj <- summary$zeligcall
-  model <- summary$model
-  x <- summary$x
-  x1 <- summary$x1
-  stats <- summary$stats
-  num <- summary$num
-
-  # Error if there are no statistics to display
-  if (is.null(stats))
-    stop("stats object cannot be NULL")
-
-  # new-line
-  cat("\n")
-
-  # Print model name
-  cat("Model: ", model, "\n")
-
-  # Print number of simulations
-  cat("Number of simulations: ", num, "\n")
-
-  # new-line
-  cat("\n")
-
-  # Display information about the X setx object
-  # This should probably be reconsidered in the future
-  if (!is.null(x$matrix)) {
-    cat("Values of X\n")
-    print(as.matrix(x$matrix))
-
-    # new-line
-    cat("\n")
-  }
-  else if (is.list(x$s.x)) {
-    # add special hooks here?
-  }
-
-  # Display information about the X1 setx object
-  # This should probably be reconsidered in the future
-  if (!is.null(x1$matrix)) {
-    cat("Values of X1\n")
-    print(as.matrix(x1$matrix))
-
-    # new-line
-    cat("\n")
-  }
-
-  # Decrementing the size of the list will give us an easy way to print
-  size <- length(stats)
-
-  # Loop across all qi's
-  for (key in names(stats)) {
-    # Create variable for code clarity
-    val <- stats[[key]]
-
-    if (!is.qi(val))
-      next
-
-    # Display Title
-    cat(key, "\n")
-
-    # Round value if numeric
-    if (is.numeric(val))
-      print(round(val*(1000))/1000)
-
-    # Simply print if anything else
-    else
-      print(val)
-
-    # Print a new-line between qi's
-    if (size <- size - 1) {
-      cat("\n")
-    }
-  }
-
-  # Return invisibly
-  invisible(x)
-}
-#' Print Multiply Imputed Simulations Summary
-#'
-#' Prints summary information about Multiply Imputed Fits
-#' @usage \method{print}{summarySim.MI}(x, digits=3, ...)
-#' @S3method print summarySim.MI
-#' @param x a 'summarySim.MI' object
-#' @param digits an integer specifying the number of digits of precision to
-#'   print
-#' @param ... ignored parameters
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-print.summarySim.MI <- function(x, digits=3, ...) {
-  for (qi.name in names(x)) {
-    if (!is.valid.qi.list(x[[qi.name]]))
-      next
-
-    summed.qi <- qi.summarize(qi.name, x[[qi.name]])
-    print(summed.qi)
-    cat("\n")
-  }
-
-  invisible(x)
-}
-
-#' Row-bind Matrices and Lists
-#' @param x a list or a matrix
-#' @param y a list or a matrix
-#' @return a matrix
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-.bind <- function (x, y) {
-
-  # Get names for future columns
-
-  if (!is.matrix(x))
-    x <- matrix(x, nrow=1, ncol=length(x), dimnames=list(NULL, names(x)))
-
-  if (missing(y))
-    return(x)
-
-  if (!is.matrix(y))
-    y <- matrix(y, nrow=1, ncol=length(y), dimnames-list(NULL, names(y)))
-
-  names <- unique(c(colnames(x), colnames(y)))
-
-  ncol <- length(names)
-
-  X <- matrix(NA, nrow=nrow(x), ncol=ncol, dimnames=list(NULL, names))
-  Y <- matrix(NA, nrow=nrow(y), ncol=ncol, dimnames=list(NULL, names))
-
-  X[, colnames(x)] <- x
-  Y[, colnames(y)] <- y
-
-  rbind(X, Y)
-}
-
-#' Check If Object Is a List of Valid Quantities of Interest
-#' @param x an object to be tested
-#' @return TRUE or FALSE
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-is.valid.qi.list <- function (x) {
-
-  # if it is not a list or that list has no entries
-  if (!(is.list(x) && length(x)))
-    return(FALSE)
-
-  # if any are not a matrix
-
-  for (val in x) {
-
-    if (is.matrix(val) && !(ncol(val) && ncol(val)))
-      return(FALSE)
-
-    else if (is.list(val) && !length(val))
-      return(FALSE)
-  }
-
-  TRUE
-}
-#' Print values of ``zelig'' objects
-#'
-#' Print the zelig object as a list
-#' @usage \method{print}{zelig}(x, ...)
-#' @S3method print zelig
-#' @param x a `zelig' object
-#' @param ... ignored parameters
-#' @return the `zelig' object (invisibly)
-#' @export 
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-print.zelig <- function(x, ...) {
-  class(x) <- "list"
-  print(x)
-}
diff --git a/R/probit.R b/R/probit.R
deleted file mode 100644
index 953ee05..0000000
--- a/R/probit.R
+++ /dev/null
@@ -1,73 +0,0 @@
-#' Interface between probit model and Zelig
-#' This function is exclusively for use by the `zelig' function
-#' @param formula a formula
-#' @param weights a numeric vector
-#' @param ... ignored parameters
-#' @param data a data.frame
-#' @return a list to be coerced into a zelig.call object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-zelig2probit <- function(formula, weights=NULL, ..., data)
-  z(
-    glm,
-    # .hook = "robust.glm.hook",
-    formula = formula,
-    weights = weights,
-    family  = binomial(link="probit"),
-    model   = F,
-    data    = data
-    )
-#' Param Method for the 'probit' Zelig Model
-#' @note This method is used by the 'probit' Zelig model
-#' @usage \method{param}{probit}(obj, num=1000, ...)
-#' @S3method param negbinom
-#' @param obj a 'zelig' object
-#' @param num an integer specifying the number of simulations to sample
-#' @param ... ignored
-#' @return a list to be cast as a 'parameters' object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-param.probit <- function(obj, num=1000, ...) {
-  list(
-       simulations = mvrnorm(n=num, mu=coef(.fitted), Sigma=vcov(.fitted)),
-       alpha = NULL,
-       fam = binomial(link="probit")
-       )
-}
-#' Compute quantities of interest for 'probit' Zelig models
-#' @usage \method{qi}{probit}(obj, x, x1=NULL, y=NULL, num=1000, param=NULL)
-#' @S3method qi probit
-#' @param obj a 'zelig' object
-#' @param x a 'setx' object or NULL
-#' @param x1 an optional 'setx' object
-#' @param y this parameter is reserved for simulating average treatment effects,
-#'   though this feature is currentlysupported by only a handful of models
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a parameters object
-#' @return a list of key-value pairs specifying pairing titles of quantities of
-#'   interest with their simulations
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-qi.probit <- qi.logit
-#' Describe the `probit' model to Zelig
-#' @usage \method{describe}{probit}(...)
-#' @S3method describe poisson
-#' @param ... ignored parameters
-#' @return a list to be processed by `as.description'
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-describe.probit <- function(...){
-  parameters <-list(mu = list(
-                      equations = c(1,1),
-                      tags.allowed = FALSE,
-                      dep.vars = TRUE,
-                      exp.vars = TRUE
-                      )
-                    )
-  
-  # return
-  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
-       year     = 2007,
-       category = "dichotomous",
-       parameters = parameters,
-       text = "Probit Regression for Dichotomous Dependent Variables"
-       )
-}
diff --git a/R/probit.bayes.R b/R/probit.bayes.R
deleted file mode 100644
index db10cbc..0000000
--- a/R/probit.bayes.R
+++ /dev/null
@@ -1,48 +0,0 @@
-#' @export
-zelig2probit.bayes <- function (
-                               formula, 
-                               burnin = 1000, mcmc = 10000, 
-                               verbose=0, 
-                               ..., 
-                               data
-                               ) {
-
-  loadDependencies("MCMCpack", "coda")
-
-  if (missing(verbose))
-    verbose <- round((mcmc + burnin)/10)
-
-  list(
-       .function = "MCMCprobit",
-       .hook = "MCMChook",
-
-       formula = formula,
-       data   = data,
-       burnin = burnin,
-       mcmc   = mcmc,
-       verbose= verbose,
-
-       # Most parameters can be simply passed forward
-       ...
-       )
-}
-
-#' @S3method param probit.bayes
-param.probit.bayes <- function(obj, num=1000, ...) {
-  list(
-       coef = coef(obj),
-       fam  = binomial(link="probit")
-       )
-}
-
-#' @S3method qi probit.bayes
-qi.probit.bayes <- qi.logit.bayes
-
-#' @S3method describe probit.bayes
-describe.probit.bayes <- function(...) {
-  list(
-       description  = "Bayesian Probit Regression for Dichotomous Dependent Variables",
-       authors = c("Ben Goodrich", "Ying Lu"),
-       year = 2013
-       )
-}
diff --git a/R/probit.gee.R b/R/probit.gee.R
deleted file mode 100644
index f12d259..0000000
--- a/R/probit.gee.R
+++ /dev/null
@@ -1,71 +0,0 @@
-#' Interface between the Zelig Model probit.gee and 
-#' the Pre-existing Model-fitting Method
-#' @param formula a formula
-#' @param id a character-string specifying the column of the data-set to use
-#'   for clustering
-#' @param robust a logical specifying whether to robustly or naively compute
-#'   the covariance matrix. This parameter is ignore in the \code{zelig2}
-#'   method, and instead used in the \code{robust.hook} function, which
-#'   executes after the call to the \code{gee} function
-#' @param ... ignored parameters
-#' @param R a square-matrix specifying the correlation
-#' @param corstr a character-string specifying the correlation structure
-#' @param data a data.frame 
-#' @return a list specifying the call to the external model
-#' @export
-zelig2probit.gee <- function (formula, id, robust, ..., R = NULL, corstr = "independence", data) {
-
-  loadDependencies("gee")
-
-  if (corstr == "fixed" && is.null(R))
-    stop("R must be defined")
-
-  # if id is a valid column-name in data, then we just need to extract the
-  # column and re-order the data.frame and cluster information
-  if (is.character(id) && length(id) == 1 && id %in% colnames(data)) {
-    id <- data[, id]
-    data <- data[order(id), ]
-    id <- sort(id)
-  }
-
-  z(
-    .function = gee,
-    .hook = robust.gee.hook,
-
-    formula = formula,
-    id = id,
-    corstr = corstr,
-    family  = binomial(link="probit"),
-    data = data,
-    R = R,
-    ...
-    )
-}
-
-#' @S3method param probit.gee
-param.probit.gee <- function(obj, num=1000, ...) {
-
-  # Extract means to compute maximum likelihood
-  mu <- coef(obj)
-
-  # Extract covariance matrix to compute maximum likelihood
-  Sigma <- vcov(obj)
-
-  #
-  list(
-       coef = mvrnorm(num, mu, Sigma),
-       fam = binomial(link="probit")
-       )
-}
-
-#' @S3method qi probit.gee
-qi.probit.gee <- qi.logit.gee
-
-#' @S3method describe probit.gee
-describe.probit.gee <- function(...) {
-  list(
-       authors = "Patrick Lam",
-       text = "General Estimating Equation for Poisson Regression",
-       year = 2011
-       )
-}
diff --git a/R/probit.survey.R b/R/probit.survey.R
deleted file mode 100644
index 1e3e982..0000000
--- a/R/probit.survey.R
+++ /dev/null
@@ -1,101 +0,0 @@
-#' @export
-zelig2probit.survey <- function(
-                               formula,
-                                weights=NULL, 
-                                ids=NULL,
-                                probs=NULL,
-                                strata = NULL,  
-                                fpc=NULL,
-                                nest = FALSE,
-                                check.strata = !nest,
-                                repweights = NULL,
-                                type,
-                                combined.weights=FALSE,
-                                rho = NULL,
-                                bootstrap.average=NULL, 
-                                scale=NULL,
-                                rscales=NULL,
-                                fpctype="fraction",
-                                return.replicates=FALSE,
-                                na.action="na.omit",
-                                start=NULL,
-                                etastart=NULL, 
-                                mustart=NULL,
-                                offset=NULL, 	      		
-                                model1=TRUE,
-                                method="glm.fit",
-                                x=FALSE,
-                                y=TRUE,
-                                contrasts=NULL,
-                                design=NULL,
-                                data
-                                ) {
-  loadDependencies("survey")
-
-  if (is.null(ids))
-    ids <- ~ 1
-
-  # the following lines designate the design
-  # NOTE: nothing truly special goes on here;
-  #       the below just makes sure the design is created correctly
-  #       for whether or not the replication weights are set
-  design <- if (is.null(repweights))
-    svydesign(
-              data=data,
-              ids=ids,
-              probs=probs,
-              strata=strata,
-              fpc=fpc,
-              nest=nest,
-              check.strata=check.strata,
-              weights=weights
-              )
-
-  else {
-    .survey.prob.weights <- weights
-    svrepdesign(
-                data=data,
-                repweights=repweights, 	
-                type=type,
-                weights=weights,
-                combined.weights=combined.weights, 
-                rho=rho,
-                bootstrap.average=bootstrap.average,
-                scale=scale,
-                rscales=rscales,
-                fpctype=fpctype,
-                fpc=fpc
-                )
-  }
-
-  
-  z(.function = svyglm,
-    formula = formula,
-    design  = design,
-    family  = quasibinomial(link="probit")
-    )
-}
-
-#' @S3method param probit.survey
-param.probit.survey <- function(obj, num=1000, ...) {
-  list(
-       simulations = mvrnorm(num, coef(obj), vcov(obj)),
-       alpha = NULL,
-
-       # note: assignment of link and link-inverse are
-       #       implicit when the family is assigned
-       fam   = binomial(link="probit")
-       )
-}
-
-#' @S3method qi probit.survey
-qi.probit.survey <- qi.logit.survey
-
-#' @S3method describe probit.survey
-describe.probit.survey <- function(...) {
-  list(
-       authors = "Nicholas Carnes",
-       year = 2008,
-       description = "Survey-Weighted Probit Regression for Continuous, Positive Dependent Variables"
-       )
-}
diff --git a/R/qi.R b/R/qi.R
deleted file mode 100644
index 4400685..0000000
--- a/R/qi.R
+++ /dev/null
@@ -1,38 +0,0 @@
-#' The \code{qi} function is used by developers to simulated quantities of
-#' interest. This method, as a result, is the most significant method of any
-#' Zelig statistical model.
-#'
-#' @title Generic Method for Computing Quantities of Interest
-#' @param obj a \code{zelig} object
-#' @param x a \code{setx} object or NULL
-#' @param x1 an optional \code{setx} object
-#' @param y this parameter is reserved for simulating average treatment effects,
-#'          though this feature is currentlysupported by only a
-#'          handful of models
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a parameters object
-#' @return a list of key-value pairs specifying pairing titles of
-#'         quantities of interest with their simulations
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @note Run \code{example(qi)} to see a trivial version of 
-#' @examples
-#' qi.some.model <- function(obj, x=NULL, x1=NULL, y=NULL, param=NULL) {
-#'   list(
-#'        "Expected Values: E(Y|X)" = NA,
-#'        "Predicted Values: Y|X"   = NA
-#'        )
-#' }
-qi <- function(obj, x=NULL, x1=NULL, y=NULL, num, param=NULL) {
-  if (!inherits(obj, "zelig"))
-    stop('"obj" must be of a "zelig" object')
-
-  if (!(is.null(x) || inherits(x, "setx")))
-    stop('"x" must be a "setx" object"')
-
-  if (!(is.null(x1) || inherits(x1, "setx")))
-    stop('"x1" must be a "setx" object')
-
-  # then use the method
-  UseMethod("qi")
-}
diff --git a/R/qi.summarized.R b/R/qi.summarized.R
deleted file mode 100644
index 702ae75..0000000
--- a/R/qi.summarized.R
+++ /dev/null
@@ -1,103 +0,0 @@
-#' Constructor for QI Summarized Class
-#' This class takes an arbitrary number of the _same_ type of 
-#' quantities of interest labels them, then
-#' merges them into one simple printable block. In particular,
-#' this class determines which print function to use based on the
-#' the type and size od data to be passed to the print function.
-#' @param title a character-string specifying the title of the QI
-#' @param x a list of summarized quantities of interest
-#' @param ... additional quantities of interest (the parameter that
-#'            titles these will be used as the name of the data.frame
-#' @return the list of QI's (invisibly)
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-qi.summarize <- function (title, x, ...) {
-  qis <- append(x, list(...))
-
-  attr(qis, 'title') <- title
-
-  class(qis) <- 'qi.summarized'
-
-  for (key in names(qis)) {
-    val <- x[[key]]
-
-    if (is.matrix(val))
-      next
-
-    qis[[key]] <- matrix(val, nrow=1, ncol=length(val))
-  }
-
-  nrows <- Map(nrow, qis)
-
-  if (all(nrows == 1))
-    attr(qis, 'print') <- 'matrix'
-
-  else
-    attr(qis, 'print') <- 'list'
-
-  invisible(qis)
-}
-
-#' Print Method for Summarized Quantities of Interest
-#' @usage \method{print}{qi.summarized}(x, \dots)
-#' @S3method print qi.summarized
-#' @param x a 'summarized.qi' object
-#' @param ... parameters to be passed to the specific print functions
-#' @return x (invisibly)
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @seealso \link{special_print_MATRIX} and
-#'   \link{special_print_LIST}
-print.qi.summarized <- function (x, ...) {
-
-  if (attr(x, 'print') == 'matrix')
-    .print.qi.summarized.MATRIX(x, ...)
-
-  else if (attr(x, 'print') == 'list')
-    .print.qi.summarized.LIST(x, ...)
-
-  else
-    print(x, ...)
-}
-
-#' Method for Printing Summarized QI's in a Matrix Form
-#' @name special_print_MATRIX
-#' @aliases special_print_MATRIX .print.qi.summarized.MATRIX
-#' @note This function is used internall by Zelig
-#' @param x a 'summarized.qi' object
-#' @param ... additional parameters
-#' @return x (invisibly)
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-.print.qi.summarized.MATRIX <- function (x, ...) {
-  m <- matrix(NA, 0, 0)
-
-  for (key in names(x)) {
-    m <- .bind(m, x[[key]])
-  }
-
-  rownames(m) <- names(x)
-
-  cat(attr(x, 'title'), "\n")
-  print(m, ...)
-
-  invisible(x)
-}
-
-#' Method for Printing Summarized QI's in a List Form
-#' @name special_print_LIST
-#' @aliases special_print_LIST .print.qi.summarized.LIST
-#' @note This function is used internall by Zelig
-#' @param x a 'summarized.qi' object
-#' @param ... additional parameters to be used by the 'print.matrix' method
-#' @return x (invisibly)
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-.print.qi.summarized.LIST <- function (x, ...) {
-
-  cat(attr(x, 'title'), "\n\n")
-
-  for (key in names(x)) {
-    cat('imputed data =  ', key, "\n")
-    print(x[[key]], ...)
-    cat("\n")
-  }
-  
-  invisible(x)
-}
diff --git a/R/relogit.R b/R/relogit.R
deleted file mode 100644
index 78c8b7e..0000000
--- a/R/relogit.R
+++ /dev/null
@@ -1,379 +0,0 @@
-#' Fit a rare-event logistic model in Zelig
-#' 
-#' Fits a rare-event (``relogit'') model.
-#' @param formula a formula object
-#' @param data ...
-#' @param tau ...
-#' @param bias.correct ...
-#' @param case.control ...
-#' @param ... ???
-#' @return a ``relogit'' ``glm'' object
-#' @export
-relogit <- function(
-                    formula,
-                    data = sys.parent(),
-                    tau = NULL,
-                    bias.correct = TRUE,
-                    case.control = "prior",
-                    ...
-                    ){
-  mf <- match.call()
-  mf$tau <- mf$bias.correct <- mf$case.control <- NULL
-  if (!is.null(tau)) {
-    tau <- unique(tau)
-    if (length(case.control) > 1)
-      stop("You can only choose one option for case control correction.")
-    ck1 <- grep("p", case.control)
-    ck2 <- grep("w", case.control)
-    if (length(ck1) == 0 & length(ck2) == 0)
-      stop("choose either case.control = \"prior\" ",
-           "or case.control = \"weighting\"")
-    if (length(ck2) == 0)
-      weighting <- FALSE
-    else 
-      weighting <- TRUE
-  }
-  else
-    weighting <- FALSE
-  if (length(tau) > 2)
-    stop("tau must be a vector of length less than or equal to 2")
-  else if (length(tau)==2) {
-    mf[[1]] <- relogit
-    res <- list()
-    mf$tau <- min(tau)
-    res$lower.estimate <- eval(as.call(mf))
-    mf$tau <- max(tau)
-    res$upper.estimate <- eval(as.call(mf))
-    res$formula <- formula
-    class(res) <- c("Relogit2", "Relogit")
-    return(res)
-  }
-  else {
-    mf[[1]] <- glm
-    mf$family <- binomial(link="logit")
-    y2 <- model.response(model.frame(mf$formula, data))
-    if (is.matrix(y2))
-      y <- y2[,1]
-    else
-      y <- y2
-    ybar <- mean(y)
-    if (weighting) {
-      w1 <- tau/ybar
-      w0 <- (1-tau)/(1-ybar)
-      wi <- w1*y + w0*(1-y)
-      mf$weights <- wi
-    }
-    res <- eval(as.call(mf))
-    res$call <- match.call(expand.dots = TRUE)
-    res$tau <- tau
-    X <- model.matrix(res)
-    ## bias correction
-    if (bias.correct){
-      pihat <- fitted(res)
-      if (is.null(tau)) # w_i = 1
-        wi <- rep(1, length(y))
-      else if (weighting) 
-        res$weighting <- TRUE
-      else {
-        w1 <- tau/ybar
-        w0 <- (1-tau)/(1-ybar)
-        wi <- w1*y + w0*(1-y)
-        res$weighting <- FALSE
-      }
-      W <- pihat * (1 - pihat) * wi
-      ##Qdiag <- diag(X%*%solve(t(X)%*%diag(W)%*%X)%*%t(X))
-      Qdiag <- lm.influence(lm(y ~ X-1, weights=W))$hat/W
-      if (is.null(tau)) # w_1=1 since tau=ybar
-        xi <- 0.5 * Qdiag * (2*pihat - 1)
-      else
-        xi <- 0.5 * Qdiag * ((1+w0)*pihat-w0)
-      res$coefficients <- res$coefficients -
-        lm(xi ~ X - 1, weights=W)$coefficients
-      res$bias.correct <- TRUE
-    }
-    else
-      res$bias.correct <- FALSE
-    ## prior correction 
-    if (!is.null(tau) & !weighting){      
-      if (tau <= 0 || tau >= 1) 
-        stop("\ntau needs to be between 0 and 1.\n") 
-      res$coefficients["(Intercept)"] <- res$coefficients["(Intercept)"] - 
-        log(((1-tau)/tau) * (ybar/(1-ybar)))
-      res$prior.correct <- TRUE
-      res$weighting <- FALSE
-    }
-    else
-      res$prior.correct <- FALSE
-    if (is.null(res$weighting))
-      res$weighting <- FALSE
-
-    res$linear.predictors <- t(res$coefficients) %*% t(X) 
-    res$fitted.values <- 1/(1+exp(-res$linear.predictors))
-    res$zelig <- "Relogit"
-    class(res) <- c("Relogit", "glm")
-    return(res)
-  }
-}
-
-#' Zelig2 bridge function
-#'
-#' ...
-#' @note  T
-#' @param formula a formula object
-#' @param ... ignored parameters
-#' @param tau ...
-#' @param bias.correct ...
-#' @param case.control ...
-  #' @param data a data.frame that will be used to fit the model
-#' @return a list used internally by zelig
-#' @export
-zelig2relogit <- function(
-                          formula,
-                          ...,
-                          tau = NULL,
-                          bias.correct = NULL,
-                          case.control = NULL,
-                          data
-                          ) {
-
-  # Catch NULL case.control
-  if (is.null(case.control))
-    case.control <- "prior"
-
-  # Catch NULL bias.correct
-  if (is.null(bias.correct))
-    bias.correct = TRUE
-
-  # Construct formula. Relogit models have the structure:
-  #   cbind(y, 1-y) ~ x1 + x2 + x3 + ... + xN
-  # Where y is the response.
-  form <- update(formula, cbind(., 1 - .) ~ .)
-
-  # Set the environment to be this function's
-  environment(form) <- environment()
-
-  # Return the obvious answer
-  z(
-    .function = relogit,
-    formula = form,
-    bias.correct = bias.correct,
-    case.control = case.control,
-    tau = tau,
-    data = data
-    )
-}
-#' Estimate Parameters for the ``relogit'' Zelig Mdoel
-#'
-#' Returns estimates on parameters, as well as, specifying link and
-#' inverse-link functions.
-#' @note This method merely calls ``param.logit''.
-#' @usage \method{param}{relogit}(obj, num, ...)
-#' @S3method param relogit
-#' @param obj a zelig object containing the fitted model
-#' @param num an integer specifying the number of simulations to compute
-#' @param ... unspecified parameters
-#' @return a list specifying important parameters for the ``relogit'' model
-param.relogit <- param.logit
-
-
-#' Estimate Parameters for the ``relogit'' Zelig Mdoel
-#'
-#' Returns estimates on parameters, as well as, specifying link and inverse-link
-#' functions.
-#' @usage \method{param}{relogit2}(obj, num, x, ...)
-#' @S3method param relogit2
-#' @param obj a zelig object containing the fitted model
-#' @param num an integer specifying the number of simulations to compute
-#' @param x ideally we should be able to remove this parameter
-#' @param ... unspecified parameters
-#' @return a list specifying important parameters for the ``relogit'' model
-param.relogit2 <- function (obj, num, x, ...) {
-  object <- obj
-  stop("Currently zelig does not support relogit models containing 2 ",
-       "tau parameters")
-
-  pping <- function(tmp0, tmp1, num, bootstrap, x) {
-
-    par0 <- param.relogit(tmp0, num=num, x=x, bootstrap=bootstrap)
-    par1 <- param.relogit(tmp1, num=num, x=x, bootstrap=bootstrap)
-
-    P00 <- qi.relogit(tmp0, par0, x=x)
-
-    P00 <- as.matrix(qi.relogit(tmp0, param = par0, x=x)$qi$ev)
-    message("P01")
-    P10 <- as.matrix(qi.relogit(tmp1, param = par1, x=x)$qi$ev)
-
-    test <- P00[,1] < P10[,1]
-    par0 <- as.matrix(par0[test,])
-    par1 <- as.matrix(par1[test,])
-    list(par0 = par0, par1 = par1)
-  }
-  tmp0 <- tmp1 <- object
-
-  tmp0$result <- object$result$lower.estimate
-  tmp1$result <- object$result$upper.estimate
-
-  tmp <- pping(tmp0, tmp1, num = num, bootstrap=bootstrap, x=x)
-
-  par0 <- tmp$par0
-  par1 <- tmp$par1
-
-
-  while (nrow(par0) < num) {
-    tmp <- pping(tmp0, tmp1, num=num, bootstrap=bootstrap, x=x)
-    par0 <- rbind(par0, tmp$par0)
-    par1 <- rbind(par1, tmp$par1)
-  }
-  if (nrow(par0) > num) {
-    par0 <- par0[1:num,]
-    par1 <- par1[1:num,]
-  }
-  par0 <- as.matrix(par0)
-  par1 <- as.matrix(par1)
-  rownames(par0) <- 1:nrow(par0)
-  rownames(par1) <- 1:nrow(par1)
-  return(list(par0 = par0, par1 = par1))    
-}
-#' simulate quantities of interest for the zelig ``relogit'' model
-#'
-#' ...
-#' @usage
-#' \method{qi}{relogit}(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
-#' @S3method qi relogit
-#' @param obj a zelig object, containing the fitted ``relogit'' model
-#' @param x a ``setx'' object
-#' @param x1 a ``setx'' object
-#' @param y this parameter is reserved for simulating average treatment effects,
-#' though this feature is currentlysupported by only a handful of models
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a ``parameter'' obejct containing information about the link,
-#' inverse-link, and simulated parameters
-#' @return a param
-qi.relogit <- qi.logit
-
-
-#' simulate quantities of interest for the zelig ``relogit'' model
-#'
-#' ...
-#' @usage
-#' \method{qi}{relogit2}(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
-#' @S3method qi relogit2
-#' @param obj a zelig object, containing the fitted ``relogit'' model
-#' @param x a ``setx'' object
-#' @param x1 a ``setx'' object
-#' @param y this parameter is reserved for simulating average treatment effects,
-#' though this feature is currentlysupported by only a handful of models
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a ``parameter'' obejct containing information about the link,
-#' inverse-link, and simulated parameters
-#' @return a param
-qi.relogit2 <- function (obj, x = NULL, x1 = NULL, y = NULL, num=1000, param = NULL) {
-  simpar <- param
-  # Aliased, because
-  object <- obj
-
-  # This model needs work, so it will be discontinued for now
-  stop("Relogit 2 is not currently supported")
-
-  num <- nrow(simpar$par0)
-  tmp0 <- object$result$lower.estimate
-  tmp1 <- object$result$upper.estimate
-  
-  low <- qi.relogit(tmp0, simpar$par0, x, x1)
-  up <- qi.relogit(tmp1, simpar$par1, x, x1)
-
-  PP <- PR <- array(NA, dim = c(num, 2, nrow(x)),
-                    dimnames = list(NULL, c("Lower Bound", "Upper Bound"),
-                      rownames(x)))
-  PP[,1,] <- P00 <- low$qi$ev
-  PP[,2,] <- P10 <- up$qi$ev
-  qi <- list(ev = PP)
-  qi.name <- list(ev = "Expected Values: E(Y|X)")
-  if (!is.null(x1)) {
-    FD <- RR <- array(NA, dim = c(num, 2, nrow(x)),
-                      dimnames = list(NULL,
-                                      d2 = c("Lower Bound", "Upper Bound"), 
-                                      rownames(x)
-                      ))
-
-    sim01 <- qi.relogit(tmp0, simpar$par0, x = x1, x1 = NULL)
-    sim11 <- qi.relogit(tmp1, simpar$par1, x = x1, x1 = NULL)
-    tau0 <- object$result$lower.estimate$tau
-    tau1 <- object$result$upper.estimate$tau
-    P01 <- as.matrix(sim01$qi$ev)
-    P11 <- as.matrix(sim11$qi$ev)
-    OR <- (P10/(1-P10)) / (P00/(1-P00))
-    RR[,1,] <- pmin(as.matrix(P01/P00), as.matrix(P11/P10))
-    RR[,2,] <- pmax(as.matrix(P01/P00), as.matrix(P11/P10))
-    RD0 <- as.matrix(P01-P00)
-    RD1 <- as.matrix(P11-P10)
-    RD <- as.matrix((sqrt(OR)-1) / (sqrt(OR)+1))
-    ## checking monotonicity
-    y.bar <- mean(object$y)
-    beta0.e <- coef(tmp0)
-    beta1.e <- coef(tmp1)
-    ## evaluating RD at tau0 and tau1
-    RD0.p <- 1/(1+exp(-t(beta0.e) %*% t(x1))) - 1/(1+exp(-t(beta0.e) %*% t(x)))
-    RD1.p <- 1/(1+exp(-t(beta1.e) %*% t(x1))) - 1/(1+exp(-t(beta1.e) %*% t(x)))
-    ## evaluating RD at tau0+e and tau1+e
-    e <- 0.001
-    beta0.e["(Intercept)"] <- beta0.e["(Intercept)"]+log(1-tau0)-log(tau0) -
-      log(1-tau0-0.001)+log(tau0+0.001)
-    beta1.e["(Intercept)"] <- beta1.e["(Intercept)"]+log(1-tau1)-log(tau1) -
-      log(1-tau1-e)+log(tau1+e)
-    RD0.e <- 1/(1+exp(-t(beta0.e) %*% t(x1))) - 1/(1+exp(-t(beta0.e) %*% t(x)))
-    RD1.e <- 1/(1+exp(-t(beta1.e) %*% t(x1))) - 1/(1+exp(-t(beta1.e) %*% t(x)))
-    ## checking the sign and computing the bounds
-    check <- sum((RD1.e-RD1.p) * (RD0.e-RD0.p))
-    if (check > 0) {
-      FD[,1,] <- pmin(RD0, RD1)
-      FD[,2,] <- pmax(RD0, RD1)
-    }
-    else {
-      FD[,1,] <- pmin(RD0, RD1, RD)
-      FD[,2,] <- pmax(RD0, RD1, RD)
-    }
-    qi$fd <- FD
-    qi$rr <- RR
-    qi.name$fd <- "First Differences: P(Y=1|X1) - P(Y=1|X)"
-    qi.name$rr <- "Risk Ratios: P(Y=1|X1) / P(Y=1|X)"
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, num), nrow = num, byrow = TRUE)
-#      tmp.ev <- qi$tt.ev <- yvar - qi$ev
-#      tmp.pr <- qi$tt.pr <- yvar - as.integer(qi$pr)
-#      qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-#      qi.name$tt.pr <- "Unit Treatment Effect for the Treated: Y - PR"
-    tmp.ev <- yvar - qi$ev
-    tmp.pr <- yvar - as.integer(qi$pr)
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = num)
-    qi$att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = num)
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-    qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-  }
-  return(list(qi = qi, qi.name = qi.name))
-}
-
-#' Describe a `logit' model to Zelig
-#' @usage \method{describe}{relogit}(...)
-#' @S3method describe relogit
-#' @param ... ignored parameters
-#' @return a list to be processed by `as.description'
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-describe.relogit <- function(...) {
-  # return list
-  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
-       year     = 2007,
-       category = "dichotomous",
-       text = "Rare Events Logistic Regression for Dichotomous Dependent Variables"
-       )
-}
-
-# Return Names of Relogit Model
-#
-names.Relogit <- function(x){
-  res <- list(default=names(unclass(x)),
-            estimate = names(x$lower.estimate), tau = x$tau)
-  class(res) <- "names.relogit"
-  res
-}
diff --git a/R/repl.R b/R/repl.R
deleted file mode 100644
index d2da35c..0000000
--- a/R/repl.R
+++ /dev/null
@@ -1,81 +0,0 @@
-#' Generic Method for Replicating Data
-#' @param object a 'zelig' object
-#' @param ... parameters
-#' @return a replicated object
-#' @export
-#' @author Kosuke Imai and Olivia Lau \email{mowen@@iq.harvard.edu}
-repl <- function(object, ...)
-  UseMethod("repl")
-#' Default Method for Replicating Statistics
-#'
-#' Replicate a simulation
-#' @usage \method{repl}{default}(object, data=NULL, ...)
-#' @S3method repl default
-#' @param object an object to replicate
-#' @param data a data.frame
-#' @param ... ignored parameters
-#' @return a replicated object
-#' @author Kosuke Imai and Olivia Lau \email{mowen@@iq.harvard.edu}
-repl.default <- function(object, data=NULL, ...) {
-  if (!is.null(data))
-    obectj$call$data <- data
-
-  eval(object$call$data, sys.parent())
-}
-#' Method for Replicating Simulated Quantities of Interest
-#'
-#' Replicate simulated quantities of interest
-#' @usage \method{repl}{sim}(object, x=NULL, x1=NULL, y=NULL,
-#'                     num=1000,
-#'                     prev = NULL, bootstrap = FALSE,
-#'                     boot.fn=NULL,
-#'                     cond.data = NULL, ...)
-#' @S3method repl sim
-#' @param object a 'zelig' object
-#' @param x a 'setx' object
-#' @param x1 a secondary 'setx' object used to perform particular computations
-#'   of quantities of interest
-#' @param y a parameter reserved for the computation of particular quantities of
-#'   interest (average treatment effects). Few models currently support this
-#'   parameter
-#' @param num an integer specifying the number of simulations to compute
-#' @param prev ignored
-#' @param bootstrap ignored
-#' @param boot.fn ignored
-#' @param cond.data ignored
-#' @param ... special parameters which are reserved for future versions of Zelig
-#' @return a 'sim' object storing the replicated quantities of interest
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-repl.sim <- function(object, x=NULL, x1=NULL, y=NULL,
-                     num=1000,
-                     prev = NULL, bootstrap = FALSE,
-                     boot.fn=NULL,
-                     cond.data = NULL, ...) {
-  # would rather use a factory function
-  new.call <- object$call
-
-
-  # this should always give the same value...
-  rep.zelig <- eval(object$zcall, sys.parent())
-
-  # 
-  new.call$z <- rep.zelig
-
-  # x
-  new.call$x <- if (is.null(x))
-    object$x
-  else
-    x
-
-  # x1
-  new.call$x1 <- if (is.null(x1))
-    object$x1
-  else
-    x1
-
-  # how is this EVER true?
-  if (!is.null(object$seed))
-    set.seed(object$seed)
-
-  eval(new.call, sys.parent())
-}
diff --git a/R/robust.glm.hook.R b/R/robust.glm.hook.R
deleted file mode 100644
index 93f2ef5..0000000
--- a/R/robust.glm.hook.R
+++ /dev/null
@@ -1,32 +0,0 @@
-#' Hook for ``glm'' Models in Zelig
-#'
-#' Adds support for robust error-estimates in the Zelig ``glm'' models.
-#' @param obj a zelig object
-#' @param zcall the original call to the zelig model
-#' @param call the call that will be evaluated for the 
-#' @param robust a logical specifying whether or not to use robust error
-#' estimates
-#' @param ... ignored parameters
-#' @return the fitted model object
-#' @export
-robust.glm.hook <- function (obj, zcall, call, robust = FALSE, ...) {
-
-  # If "robust" is a list, 
-  if (is.list(robust)) {
-
-    # if none of the entries of robust belong to the vector below
-    if (!any(robust$method %in% c("vcovHAC", "kernHAC", "weave")))
-      stop("robust contains elements that are not supported.")
-
-    # Acquire the value of the robust parameter
-    obj$robust <- robust
-  }
-  else if (!is.logical(robust))
-    stop("Invalid input for robust: choose either TRUE or a list of options.")
-
-  # Set as a robust generalized linear model model (in addition to other types)
-  class(obj) <- c("glm.robust", class(obj))
-
-  # Return...
-  obj
-}
diff --git a/R/robust.hook.R b/R/robust.hook.R
deleted file mode 100644
index 6862564..0000000
--- a/R/robust.hook.R
+++ /dev/null
@@ -1,20 +0,0 @@
-#' @export
-robust.gee.hook <- function(obj, Zall, Call, robust, ...) {
-  
-  # Assume robust, if nothing is specified
-  if (missing(robust) || is.null(robust))
-    robust <- TRUE
-
-  # Invalid robust parameters should stop program
-  if (!is.logical(robust))
-    stop("robust must be a logical (TRUE or FALSE)")
-
-  if (robust)
-    class(obj) <- c("gee.robust", class(obj))
-
-  else
-    class(obj) <- c("gee.naive", class(obj))
-
-  #
-  obj
-}
diff --git a/R/setx.R b/R/setx.R
deleted file mode 100644
index 7ef9926..0000000
--- a/R/setx.R
+++ /dev/null
@@ -1,312 +0,0 @@
-#' Setting Explanatory Variable Values
-#'
-#' The \code{setx} command uses the variables identified in
-#' the \code{formula} generated by \code{zelig} and sets the values of
-#' the explanatory variables to the selected values.  Use \code{setx}
-#' after \code{zelig} and before \code{sim} to simulate quantities of
-#' interest.
-#' @param obj the saved output from zelig
-#' @param fn a list of functions to apply to the data frame
-#' @param data a new data frame used to set the values of
-#'   explanatory variables. If data = NULL (the default), the
-#'   data frame called in zelig is used
-#' @param cond   a logical value indicating whether unconditional
-#'   (default) or conditional (choose \code{cond = TRUE}) prediction
-#'   should be performed.  If you choose \code{cond = TRUE}, \code{setx}
-#'   will coerce \code{fn = NULL} and ignore the additional arguments in 
-#'   \code{\dots}.  If \code{cond = TRUE} and \code{data = NULL},
-#'   \code{setx} will prompt you for a data frame.
-#' @param ... user-defined values of specific variables for overwriting the
-#'   default values set by the function \code{fn}.  For example, adding
-#'   \code{var1 = mean(data\$var1)} or \code{x1 = 12} explicitly sets the value
-#'   of \code{x1} to 12.  In addition, you may specify one explanatory variable
-#'   as a range of values, creating one observation for every unique value in
-#'   the range of values
-#' @return For unconditional prediction, \code{x.out} is a model matrix based
-#'   on the specified values for the explanatory variables.  For multiple
-#'   analyses (i.e., when choosing the \code{by} option in \code{\link{zelig}},
-#'   \code{setx} returns the selected values calculated over the entire
-#'   data frame.  If you wish to calculate values over just one subset of
-#'   the data frame, the 5th subset for example, you may use:  
-#'   \code{x.out <- setx(z.out[[5]])}
-#' @export
-#' @examples
-#'
-#' # Unconditional prediction:
-#' data(turnout)
-#' z.out <- zelig(vote ~ race + educate, model = "logit", data = turnout)
-#' x.out <- setx(z.out)
-#' s.out <- sim(z.out, x = x.out)
-#'
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Olivia Lau and Kosuke Imai 
-#' @seealso The full Zelig manual may be accessed online at
-#'   \url{http://gking.harvard.edu/zelig}
-#' @keywords file
-setx <- function(obj, fn=NULL, data=NULL, cond=FALSE, ...)
-  UseMethod("setx")
-#' Set explanatory variables
-#'
-#' Set explanatory variables
-#' @usage \method{setx}{default}(obj, fn=NULL, data=NULL, cond=FALSE, ...)
-#' @S3method setx default
-#' @param obj a 'zelig' object
-#' @param fn a list of key-value pairs specifying which function apply to
-#'           columns of the keys data-types
-#' @param data a data.frame
-#' @param cond ignored
-#' @param ... parameters specifying what to explicitly set each column as. This
-#'            is used to produce counterfactuals
-#' @return a 'setx' object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Kosuke Imai, and Olivia Lau 
-setx.default <- function(obj, fn=NULL, data=NULL, cond=FALSE, ...) {
-
-  # Warnings and errors
-  if (!missing(cond))
-    warning('"cond" is not currently supported by this version of Zelig')
-
-  # Get formula used for the call to the model
-  form <- formula(obj)
-
-  # Parsed formula. This is an intermediate for used for processin design
-  # matrices, etc.
-  parsed.formula <- parseFormula(form, data)
-
-  # If data.frame is not explicitly set, use the one from the Zelig call
-  if (is.null(data))
-    data <- obj$data
-
-  # Create a variable to hold the values of the dot parameters
-  dots <- list()
-
-  # Get the dots as a set of expressions
-  symbolic.dots <- match.call(expand.dots = FALSE)[["..."]]
-
-  # Assign values to the dot parameters
-  for (key in names(symbolic.dots)) {
-    result <- with(data, eval(symbolic.dots[[key]]))
-    dots[[key]] <- result
-  }
-
-  # Extract information about terms
-  # Note: the functions 'getPredictorTerms' and 'getOutcomeTerms' are in need
-  # of a rewrite. At the moment, they are pretty kludgey (written by Matt O.).
-  vars.obj <- getPredictorTerms(form)
-  not.vars <- getResponseTerms(form)
-
-  # Default the environment to the parent
-  env.obj <- parent.frame()
-
-  # explanatory variables
-  explan.obj <- Filter(function (x) x %in% vars.obj, names(dots))
-
-  # defaults for fn
-  if (missing(fn) || !is.list(fn))
-    # set fn to appropriate values, if NULL
-    fn <- list(numeric = mean,
-               ordered = Median,
-               other   = Mode
-               )
-
-  # res
-  res <- list()
-
-  # compute values
-  # if fn[[mode(data(, key))]] exists,
-  # then use that function to compute result
-  for (key in all.vars(form[[3]])) {
-    # skip values that are explicitly set
-    if (key %in% names(dots) || key %in% not.vars)
-      next
-
-    m <- class(data[,key])[[1]]
-
-    # Match the class-type with the correct function to call
-    if (m %in% names(fn))
-      res[[key]] <- fn[[m]](data[ ,key])
-
-    # If it is a numeric, then we just evaluate it like a numeric
-    else if (is.numeric(data[,key]))
-      res[[key]] <- fn$numeric(data[ ,key])
-
-    # If it's ordered, then we take the median, because that's the best we got
-    else if (is.ordered(data[,key]))
-      res[[key]] <- fn$ordered(data[ ,key])
-
-    # Otherwise we take the mode, because that always kinda makes sense.
-    else
-      res[[key]] <- fn$other(data[ ,key])
-  }
-
-  # Add explicitly set values
-  for (key in names(symbolic.dots)) {
-    if (! key %in% colnames(data)) {
-      warning("`", key,
-              "` is not an column in the data-set, and will be ignored")
-      next
-    }
-
-    res[[key]] <- if (is.factor(data[,key])) {
-      factor(dots[[key]], levels=levels(data[,key]))
-    }
-    else
-      dots[[key]]
-  }
-
-  # Convert "res" into a list of lists. This makes atomic entries into lists.
-  for (k in 1:length(res)) {
-    if (!is.factor(res[[k]]))
-      res[[k]] <- as.list(res[[k]])
-  }
-
-  # Combine all the sublists
-  res <- do.call("mix", res)
-
-  # A list containing paired design matrices and their corresponding data.frame's
-  frames.and.designs <- list()
-
-  # Iterate through all the results
-  for (k in 1:length(res)) {
-    #
-    label <- paste(names(res[[k]]), "=", res[[k]], sep="", collapse=", ")
-
-    # Get specified explanatory variables
-    specified <- res[[k]]
-
-    # Construct data-frame
-    d <- constructDataFrame(data, specified)
-
-    # Construct model/design matrix
-    # NOTE: THIS NEEDS TO BE MORE ROBUST
-    m <- constructDesignMatrix(d, parsed.formula)
-
-    # Model matrix, as a data.frame
-    dat <- tryCatch(as.data.frame(m), error = function (e) NA)
-
-    # Specify information
-    frames.and.designs[[label]] <- list(
-      label = label,
-      data.frame = d,
-      model.matrix = m,
-      as.data.frame = dat
-      )
-  }
-
-  # Phonetically... setx's
-  setexes <- list()
-
-  for (key in names(frames.and.designs)) {
-    mod <- frames.and.designs[[key]]$model.matrix
-    d <- frames.and.designs[[key]]$data.frame
-    dat <- frames.and.designs[[key]]$as.data.frame
-    specified <- res[[k]]
-
-    setexes[[key]] <- list(
-      name   = obj$name,
-      call   = match.call(),
-      formula= form,
-      matrix = mod,
-      updated = d,
-      data   = dat,
-      values = specified,
-      fn     = fn,
-      cond   = cond,
-      new.data = data,
-      special.parameters = dots,
-      symbolic.parameters = symbolic.dots,
-      label = obj$label,
-      explan = vars.obj,
-      pred   = not.vars,
-      package.name = obj$package.name
-    )
-    attr(setexes[[key]], "pooled") <- F
-    class(setexes[[key]]) <- c(obj$name, "setx")
-  }
-
-  if (length(setexes) == 1) {
-    attr(setexes, "pooled") <- FALSE
-    setexes <- setexes[[1]]
-    class(setexes) <- c(obj$name, "setx")
-  }
-  else {
-    attr(setexes, "pooled") <- TRUE
-    class(setexes) <- c(obj$name, "pooled.setx", "setx")
-  }
-
-  # Return
-  setexes
-}
-
-
-#' Construct Data Frame
-#' Construct and return a tiny (single-row) data-frame from a larger data-frame,
-#' a list of specified values, and a formula
-#' @param data a ``data.frame'' that will be used to create a small design matrix
-#' @param specified a list with key-value pairs that will be used to explicitly
-#' set several values
-#' @return a ``data.frame'' containing a single row
-constructDataFrame <- function (data, specified) {
-  # Make a tiny data-frame with all the necessary columns
-  d <- data[1,]
-
-  # Give the computed values to those entries
-  for (key in names(specified)) {
-    val <- specified[[key]]
-
-    if (is.factor(val) || !(is.numeric(val) || is.ordered(val)))
-      val <- factor(val, levels=levels(data[,key]))
-
-    d[, key] <- val
-  }
-
- 
-  # Return tiny data-frame
-  d
-}
-
-#' Construct Design Matrix from
-#' Construct and return a design matrix based on a tiny data-frame (single-row).
-#' @param data a ``data.frame'' (preferably single-rowed) that will be used to
-#' create a small design matrix
-#' @param formula a formula, whose predictor variables will be used to create a
-#' design matrix
-#' @return a design (model) matrix
-constructDesignMatrix <- function (data, formula) {
-  tryCatch(
-           # Attempt to generate the design matrix of the formula
-           model.matrix(formula, data), 
-
-           # If there is a warning... probably do nothing
-           # warning = function (w) w,
-
-           # If there is an error, warn the user and specify the design
-           # matrix as NA
-           error = function (e) {
-             NA
-           }
-           )
-}
-#' Set Explanatory Variables for Multiply Imputed Data-sets
-#' This function simply calls setx.default once for every fitted model
-#' within the 'zelig.MI' object
-#' @usage \method{setx}{MI}(obj, ..., data=NULL)
-#' @S3method setx MI
-#' @param obj a 'zelig' object
-#' @param ... user-defined values of specific variables for overwriting the
-#'   default values set by the function \code{fn}
-#' @param data a new data-frame
-#' @return a 'setx.mi' object used for computing Quantities of Interest by the
-#'   'sim' method
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @seealso \link{setx}
-setx.MI <- function(obj, ..., data = NULL) {
-
-  results.list <- list()
-
-  for (key in names(obj)) {
-    object <- obj[[key]]
-    results.list[[key]] <- setx(object, ..., data = data)
-  }
-
-  class(results.list) <- c("setx.mi", "setx")
-  results.list
-}
diff --git a/R/sim.MI.R b/R/sim.MI.R
deleted file mode 100644
index f8fc1c6..0000000
--- a/R/sim.MI.R
+++ /dev/null
@@ -1,42 +0,0 @@
-#' Simulate Multiply Imputed Data
-#' @usage \method{sim}{MI}(obj, x=NULL, x1=NULL, y=NULL, num=1000, ...)
-#' @S3method sim MI
-#' @param obj a 'zelig.MI' object containing several fits for two or more 
-#'   subsetted data-frames
-#' @param x a 'setx.mi' object containing explanatory variables for each
-#'   fitted model
-#' @param x1 a 'setx.mi' object containing explanatory variables for each
-#'   fitted model
-#' @param y this feature is currently unimplemented
-#' @param num an integer specifying the number of simulations to compute
-#' @param ... ignored parameters
-#' @return a 'sim.MI' with simulated quantities of interest for each fitted
-#'   contained by 'obj'
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @seealso \link{sim}
-sim.MI <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, ...) {
-
-  sim.results <- list()
-  m<-length(obj)
-  mi.num<-ceiling(num/m)
-
-  for (key in names(obj)) {
-    object <- obj[[key]]
-    new.x <- x[[key]]
-    new.x1 <- x1[[key]]
-    new.y <- y[[key]]
-
-    sim.results[[key]] <- sim(object, x=new.x, x1=new.x1, y=new.y, num=mi.num)
-  }
-
-  model <- get('model', attr(obj, 'state'))
-
-  class(sim.results) <- c(
-                          'MI.sim',
-                          paste(model, "mi-sim", sep="-"),
-                          paste(model, "mi.sim", sep=".")
-                          )
-
-  sim.results
-}
-
diff --git a/R/sim.R b/R/sim.R
deleted file mode 100644
index 6a5f282..0000000
--- a/R/sim.R
+++ /dev/null
@@ -1,92 +0,0 @@
-#' Generic Method for Computing and Organizing Simulated Quantities of Interest
-#' Simulate quantities of interest from the estimated model
-#' output from \code{zelig()} given specified values of explanatory
-#' variables established in \code{setx()}.  For classical \emph{maximum
-#' likelihood} models, \code{sim()} uses asymptotic normal
-#' approximation to the log-likelihood.  For \emph{Bayesian models},
-#' Zelig simulates quantities of interest from the posterior density,
-#' whenever possible.  For \emph{robust Bayesian models}, simulations
-#' are drawn from the identified class of Bayesian posteriors.
-#' Alternatively, you may generate quantities of interest using
-#' bootstrapped parameters.
-#' @param obj the output object from zelig
-#' @param x values of explanatory variables used for simulation,
-#'   generated by setx
-#' @param x1 optional values of explanatory variables (generated by a
-#'   second call of setx)
-#'           particular computations of quantities of interest
-#' @param y a parameter reserved for the computation of particular
-#'          quantities of interest (average treatment effects). Few
-#'          models currently support this parameter
-#' @param num an integer specifying the number of simulations to compute
-#' @param bootstrap currently unsupported
-#' @param bootfn currently unsupported
-#' @param cond.data currently unsupported
-#' @param ... arguments reserved future versions of Zelig
-#' @return The output stored in \code{s.out} varies by model.  Use the
-#'  \code{names} command to view the output stored in \code{s.out}.
-#'  Common elements include: 
-#'  \item{x}{the \code{\link{setx}} values for the explanatory variables,
-#'    used to calculate the quantities of interest (expected values,
-#'    predicted values, etc.). }
-#'  \item{x1}{the optional \code{\link{setx}} object used to simulate
-#'    first differences, and other model-specific quantities of
-#'    interest, such as risk-ratios.}
-#'  \item{call}{the options selected for \code{\link{sim}}, used to
-#'    replicate quantities of interest. } 
-#'  \item{zelig.call}{the original command and options for
-#'    \code{\link{zelig}}, used to replicate analyses. }
-#'  \item{num}{the number of simulations requested. }
-#'  \item{par}{the parameters (coefficients, and additional
-#'    model-specific parameters).  You may wish to use the same set of
-#'    simulated parameters to calculate quantities of interest rather
-#'    than simulating another set.}
-#'  \item{qi\$ev}{simulations of the expected values given the
-#'    model and \code{x}. }
-#'  \item{qi\$pr}{simulations of the predicted values given by the
-#'    fitted values. }
-#'  \item{qi\$fd}{simulations of the first differences (or risk
-#'    difference for binary models) for the given \code{x} and \code{x1}.
-#'    The difference is calculated by subtracting the expected values
-#'    given \code{x} from the expected values given \code{x1}.  (If do not
-#'    specify \code{x1}, you will not get first differences or risk
-#'    ratios.) }
-#'  \item{qi\$rr}{simulations of the risk ratios for binary and
-#'    multinomial models.  See specific models for details.}
-#'  \item{qi\$ate.ev}{simulations of the average expected
-#'    treatment effect for the treatment group, using conditional
-#'    prediction. Let \eqn{t_i} be a binary explanatory variable defining
-#'    the treatment (\eqn{t_i=1}) and control (\eqn{t_i=0}) groups.  Then the
-#'    average expected treatment effect for the treatment group is
-#'    \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
-#'      E[Y_i(t_i=0)] \mid t_i=1 \,],} 
-#'    where \eqn{Y_i(t_i=1)} is the value of the dependent variable for
-#'    observation \eqn{i} in the treatment group.  Variation in the
-#'    simulations are due to uncertainty in simulating \eqn{E[Y_i(t_i=0)]},
-#'    the counterfactual expected value of \eqn{Y_i} for observations in the
-#'    treatment group, under the assumption that everything stays the
-#'    same except that the treatment indicator is switched to \eqn{t_i=0}. }
-#'  \item{qi\$ate.pr}{simulations of the average predicted
-#'    treatment effect for the treatment group, using conditional
-#'    prediction. Let \eqn{t_i} be a binary explanatory variable defining
-#'    the treatment (\eqn{t_i=1}) and control (\eqn{t_i=0}) groups.  Then the
-#'    average predicted treatment effect for the treatment group is
-#'    \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
-#'      \widehat{Y_i(t_i=0)} \mid t_i=1 \,],} 
-#'    where \eqn{Y_i(t_i=1)} is the value of the dependent variable for
-#'    observation \eqn{i} in the treatment group.  Variation in the
-#'    simulations are due to uncertainty in simulating
-#'    \eqn{\widehat{Y_i(t_i=0)}}, the counterfactual predicted value of
-#'    \eqn{Y_i} for observations in the treatment group, under the
-#'    assumption that everything stays the same except that the
-#'    treatment indicator is switched to \eqn{t_i=0}.}
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Olivia Lau and Kosuke Imai 
-sim <- function(
-                obj,
-                x=NULL, x1=NULL, y=NULL, num=1000,
-                bootstrap=F, bootfn=NULL, cond.data=NULL,
-                ...
-                ) {
-  UseMethod("sim")
-}
diff --git a/R/sim.default.R b/R/sim.default.R
deleted file mode 100644
index 28b722f..0000000
--- a/R/sim.default.R
+++ /dev/null
@@ -1,268 +0,0 @@
-#' Method for Simulating Quantities of Interest wrom 'zelig' Objects
-#'
-#' Simulate quantities of interest
-#' @usage \method{sim}{default}(obj,
-#'                     x=NULL, x1=NULL, y=NULL,
-#'                     num=1000, bootstrap = FALSE,
-#'                     bootfn=NULL,
-#'                     cond.data = NULL,
-#'                     ...)
-#' @S3method sim default
-#' @param obj a 'zelig' object
-#' @param x a 'setx' object
-#' @param x1 a secondary 'setx' object used to perform particular computations
-#' of quantities of interest
-#' @param y a parameter reserved for the computation of particular quantities of
-#' interest (average treatment effects). Few models currently support this
-#' parameter
-#' @param num an integer specifying the number of simulations to compute
-#' @param bootstrap ignored
-#' @param bootfn ignored
-#' @param cond.data ignored
-#' @param ... parameters to be passed to the boot function, if one is supplied
-#' @return a 'sim' object storing the replicated quantities of interest
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-sim.default <- function(
-                        obj,
-                        x = NULL,
-                        x1 = NULL,
-                        y = NULL,
-                        num = 1000,
-                        bootstrap = FALSE,
-                        bootfn = NULL,
-                        cond.data = NULL,
-                        ...
-                        ) {
-  # Create environment of local variables
-  model.env <- new.env()
-
-  # Add local variables
-  assign(".object", obj$result, model.env)
-  assign(".fitted", obj$result, model.env)
-  assign(".model", "model-name", model.env)
-
-  # Get S3 methods
-  paramfunction <- getS3method("param", obj$name, FALSE)
-  qifunction <- getS3method("qi", obj$name, FALSE)
-  bootfunction <- getS3method("bootstrap", obj$name, TRUE)
-
-  parent.env(model.env) <- environment(paramfunction)
-
-  environment(paramfunction) <- model.env
-  environment(qifunction) <- model.env
-
-  # Begin function
-
-  if (length(attr(x, "pooled")) > 0 && attr(x, "pooled")) {
-
-    xes <- list()
-    titles <- NULL
-
-    # for (key in names(x)) {
-    for (k in 1:length(x)) {
-      key <- names(x)[[k]]
-      xes[[key]] <- sim(obj, x[[k]], x1[[k]], y, num, bootstrap, bootfn, cond.data, ...)
-      attr(xes[[key]], "pooled") <- FALSE
-      titles <- append(titles, xes[[key]]$titles)
-    }
-
-    attr(xes, "pooled") <- TRUE
-    attr(xes, "pooled.setx") <- x
-    attr(xes, "titles") <- unique(titles)
-
-    class(xes) <- c("pooled.sim")
-
-    return(xes)
-  }
-
-  # Stop on unimplemented features
-  if (!is.null(cond.data))
-    warning("conditions are not yet supported")
-
-  # Simulate Parameters
-  # param <- param(obj, num=num)
-  param <- paramfunction(obj, num=num)
-
-  # Cast list into a "parameters" object
-  param <- as.parameters(param, num)
-
-  # Define the pre-sim hook name
-  post.hook <- obj$zc$.post
-
-  # apply the hook if it exists
-  if (!is.null(post.hook)) {
-    zelig2 <- get(paste("zelig2", obj$name, sep=""))
-    envir <- environment(zelig2)
-
-    # Produce a warning if the post-hook defined cannot be found
-    if (!exists(post.hook, mode="function", envir=envir))
-      warning("the hook '", post.hook, "' cannot be found")
-    
-    # Otherwise, business as usual. Extract the hook and apply it to the zelig
-    # object. Note that a post-hook always has the arguments:
-    #   obj, x, x1, bootstrap, bootfn, param
-    else {
-      # Retrieve the hook, since it exists
-      hook <- get(post.hook, envir=envir)
-
-      # Assign the param object. In the case of bootstrapping, the param object
-      # might not have any meaning.
-      param <- if (bootstrap)
-        param
-
-      # Otherwise apply the hook and return it as the parameters
-      else
-        hook(obj, x, x1, bootstrap, bootfn, param=param)
-    }
-  }
-
-  # Get default boot-strapping function if boot is enabled and no boot-function
-  # is specified
-  if (bootstrap && missing(bootfn))
-    bootfn <- bootfn.default
-
-  # Boot-strapping!!
-  if (!missing(bootfn) && !is.null(bootfn)) {
-
-    # Get the appropriate 
-    d <- obj$data
-    d <- d[complete.cases(d), ]
-
-    # Add several private variables to bootfn:
-    #   .fitted : a fitted model object
-    #   .data : the data-set used to fit the original model
-    #   .call : the call used to fit the original model
-    #   .env : the environment in which the .call variable should/can be
-    #          evaluated
-    boot.env <- obj$method.env
-    bootfn <- attach.env(bootfn, obj$method.env)
-
-    # Bootstrapfn
-    bootstrapfn <- getS3method("bootstrap", obj$name, TRUE)
-    environment(bootstrapfn) <- model.env
-
-    # If is.null then we just get the default bootstrap fn, which is merely to
-    # simulate the systematic paramaters
-
-##   CRAN is opposed to ::: within same package, 
-##   but I'm opposed to S4 environment artifacts
-##    if (is.null(bootstrapfn))
-##      bootstrapfn <- Zelig:::bootstrap.default
-##   So this obviously makes my code better:
-
-    if (is.null(bootstrapfn)){
-      localbootstrap.default <- function (obj, ...)
-      list(
-         alpha = NULL,
-         beta = coef(obj)
-         )
-      bootstrapfn <- localbootstrap.default
-    }
-
-
-    # Attach the appropriate environment to the function
-    bootstrapfn <- attach.env(bootstrapfn, model.env)
-
-    # Get a sample, so we know how to re-size the result.
-    # Note: This "example" object will be used at the end of this if-clause to
-    # build an object similar in structure to that of "bootstrapfn(obj)"
-    example <- bootstrapfn(obj)
-    example <- as.bootvector(example)
-
-    # Bootstrap using a function with parameters: data, i, object
-    # Where data is a data.frame, i is an vector of integers used to sample the
-    # data.frame, and object is a fitted model object.
-    res <- boot(d, bootfn, num,
-                object = obj$result,
-                bootstrapfn = bootstrapfn,
-                num = num
-                )
-
-    # Copy the param object that was made earlier via ``param'' method
-    res.param <- param
-
-    # Reverse-construct a bootlist object from this
-    bl <- as.bootlist(res$t, example$lengths, example$names)
-
-    # Replace slots corresponding to "alpha" and "beta" on the "param" object
-    param$coefficients <- bl$beta
-    param$alpha <- bl$alpha
-  }
-
-  # Compute quantities of interest
-  res.qi <- qifunction(obj, x=x, x1=x1, y=y, param=param, num=num)
-  
-  # Cast as a "qi" object if it is not one
-  res.qi <- as.qi(res.qi)
-
-  # Assign class
-  class(res.qi) <- c(obj$name, class(res.qi))
-
-  # This is kludge (for now)
-  # This can be removed as of 4-27-2011
-  if (inherits(obj, "MI"))
-    class(res.qi) <- c("MI", class(res.qi))
-
-  # build object
-  s <- list(
-            model     = obj$name,
-            x        = x,
-            x1       = x1,
-            stats    = summarize(res.qi),
-            qi       = res.qi,
-            titles   = names(res.qi),
-            bootfn   = bootfn,
-            cond.data= cond.data,
-            zelig    = obj,
-            call     = match.call(),
-            zcall    = obj$call,
-            result   = obj$result,
-            num      = num,
-            special.parameters = list(...),
-            package.name = obj$package.name
-            )
-
-  # cast class
-  sim.class <- if (inherits(obj, "MI"))
-    sim.class <- "MI.sim"
-
-  attr(s, "titles") <- unique(names(res.qi))
-
-  class(s) <- c(sim.class,
-                paste("sim", obj$name, sep="."),
-                obj$name,
-                "sim"
-                )
-
-  # return
-  s
-}
-
-create.pooled.sim <- function(
-                        obj,
-                        x = NULL,
-                        x1 = NULL,
-                        y = NULL,
-                        num = 1000,
-                        bootstrap = FALSE,
-                        bootfn = NULL,
-                        cond.data = NULL,
-                        ...
-                        ) {
-  xes <- list()
-  titles <- NULL
-
-  for (key in names(x)) {
-    xes[[key]] <- sim(obj, x[[key]], x1[[key]], y, num, bootstrap, bootfn, cond.data, ...)
-    attr(xes[[key]], "pooled") <- FALSE
-    titles <- append(titles, xes[[key]]$titles)
-  }
-
-  attr(xes, "pooled") <- TRUE
-  attr(xes, "pooled.setx") <- x
-  attr(xes, "titles") <- unique(titles)
-
-  class(xes) <- c("pooled.sim")
-
-  return(xes)
-}
diff --git a/R/simulation.matrix.R b/R/simulation.matrix.R
deleted file mode 100644
index 5555454..0000000
--- a/R/simulation.matrix.R
+++ /dev/null
@@ -1,116 +0,0 @@
-#' Get Simulations as a Matrix
-#'
-#' Returns a MxN matrix where N is the number of simulations and M is the number
-#' of predicted values. Additionally, a ``labels'' attribute is attached that
-#' produces a human-readable identifier for each column.
-#' @param obj an object, typically a ``sim'' or ``pooled.sim'' object.
-#' @param which a character-vector specifying the \emph{titles} of quantities of
-#' interest to extract
-#' @param ... additional parameters
-#' @return a simulation matrix
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-simulation.matrix <- function (obj, which = NULL, ...) {
-  UseMethod("simulation.matrix")
-}
-
-#' @S3method simulation.matrix sim
-simulation.matrix.sim <- function (obj, which, ...) {
-
-  which <- find.match(which, attr(obj, "titles"))
-
-  if (is.na(which)) {
-    warning(
-      'The "which" parameter does not exist. Valid titles are:\n    ',
-      paste('"', names(obj$qi), '"', sep="", collapse=", ")
-      )
-
-    # Return a matrix containing the single entry NA
-    return(matrix(NA))
-  }
-
-  # Store the little matrix (probably a column-vector)
-  lil.matrix <- as.matrix(obj$qi[[which]])
-
-  # Specify what quantities of interest this matrix represents
-  attr(lil.matrix, "qi") <- which
-
-  # Return the little, modified matrix
-  lil.matrix
-}
-
-#' @S3method simulation.matrix pooled.sim
-simulation.matrix.pooled.sim <- function (obj, which, ...) {
-
-  # Get the best match for the value "which"
-  which <- find.match(which, attr(obj, "titles"))
-
-  # This will become the matrix that is returned
-  big.matrix <- NULL
-
-  # Iterate through all the results
-  for (label in names(obj)) {
-    # Get the matrix for the single quantity of interest
-    small.matrix <- simulation.matrix(obj[[label]], which = which, exact.match = FALSE)
-
-    # Column-bind this result with the total matrix.
-    # This might want to be wrapped by a tryCatch in case weird things happen
-    big.matrix <- cbind(big.matrix, small.matrix)
-  }
-
-  # Column-wise specification
-  attr(big.matrix, "labels") <- names(obj)
-  attr(big.matrix, "which") <- 1:ncol(big.matrix)
-  names(attr(big.matrix, "which")) <- names(obj)
-
-  # Specify what quantities of interest this matrix represents
-  attr(big.matrix, "qi") <- which
-
-  # Return the big matrix
-  big.matrix
-}
-
-#' Find a Partial or Exact Match from a Vector of Strings
-#' Searches a vector of character-string, and returns the best match.
-#' @param needle a character-string to search for in the 
-#' @param haystack a vector of character-strings
-#' @param fail the value to return in case no match is found. Defaults to NA
-#' @return the best-matched string or NA
-#' @details ``find.match'' attempts to use several common matching functions in
-#' an order that sequentially prefers less strict matching, until a suitable
-#' match is found. If none is found, then return the value of the ``fail''
-#' parameter (defaults to NA). The functions used for matching are: ``match'',
-#' ``charmatch'', and finally ``grep''.
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-find.match <- function (needle, haystack, fail = NA) {
-
-  # Having multiple approximate hits is bad form, since the string "x" can match
-  # "xe", "xen", "xs", etc. If it allows this possibility, we'll be constructing
-  # matrices out of potentially disparate quantities of interest. That is, it
-  # obviously would not be good to match the string "Average" with 
-  # "Averge Treatment Effect" and "Average Value".
-  # That is, we want our matrices to be constructed consistently
-  if (length(needle) != 1)
-    return(NA)
-
-  # Search the strings all at once for code clarity. We can write this smoother,
-  # but then it sacrifices readability for nested if clauses.
-  exact.match <- match(needle, haystack, nomatch = 0)
-  partial.match <- charmatch(needle, haystack, nomatch = 0)
-  grep.match <- grep(needle, haystack)[1]
-
-  # If we found an exact match, then we go with it.
-  if (exact.match != 0)
-    return(haystack[exact.match])
-
-  # If there is a unique partial match, then that will work too.
-  else if (partial.match != 0)
-    return(haystack[partial.match])
-
-  # If there are non-unique partial matches, then we take the first incidence
-  else if (!is.na(grep.match))
-    return(haystack[grep.match])
-
-  # If nothing else is good, then return whatever value a failure should be. NA by default
-  return(fail)
-}
diff --git a/R/simulations.plot.R b/R/simulations.plot.R
deleted file mode 100644
index b76b9b1..0000000
--- a/R/simulations.plot.R
+++ /dev/null
@@ -1,186 +0,0 @@
-#' Plot Quantities of Interest in a Zelig-fashion
-#'
-#' Various graph generation for different common types of simulated results from
-#' Zelig
-#' @usage simulations.plot(y, y1=NULL, xlab="", ylab="", main="", col=NULL, line.col=NULL, axisnames=TRUE)
-#' @param y A matrix or vector of simulated results generated by Zelig, to be
-#' graphed.
-#' @param y1 For comparison of two sets of simulated results at different
-#' choices of covariates, this should be an object of the same type and
-#' dimension as y.  If no comparison is to be made, this should be NULL. 
-#' @param xlab Label for the x-axis.
-#' @param ylab Label for the y-axis.
-#' @param main Main plot title.
-#' @param col A vector of colors.  Colors will be used in turn as the graph is
-#' built for main plot objects. For nominal/categorical data, this colors 
-#' renders as the bar color, while for numeric data it renders as the background
-#' color.
-#' @param line.col  A vector of colors.  Colors will be used in turn as the graph is
-#' built for line color shading of plot objects.
-#' @param axisnames a character-vector, specifying the names of the axes
-#' @return nothing
-#' @author James Honaker
-simulations.plot <-function(
-                      y, y1=NULL,
-                      xlab="", ylab="",
-                      main="",
-                      col=NULL,
-                      line.col=NULL,
-                      axisnames=TRUE
-                      ) {
-  ## Univariate Plots ##
-  if(is.null(y1)){
-
-    if (is.null(col))
-      col <- rgb(100,149,237,maxColorValue=255)
-
-    if (is.null(line.col))
-      line.col <- "black"
-
-    # Character
-    if (is.character(y)) {
-
-      # Try to cast y as integers, note that this is not always possible for the
-      # general case of characters
-      newy <- tryCatch(
-        as.numeric(y),
-        warning = function (w) NULL,
-        error = function (e) NULL
-        )
-
-      # If:
-      #   newy is not NULL (can be cast as a numeric) AND
-      #   newy is actually a collection of integers (not just numeric)
-      # Then:
-      #   we can tabulate (so sick)
-      if (!is.null(newy) && all(as.integer(y) == y)) {
-
-        # Create a sequence of names
-        nameseq <- paste("Y=", min(newy):max(newy), sep="")
-
-        # Set the heights of the barplots.
-        # Note that tablar requires that all out values are greater than zero.
-        # So, we subtract the min value (ensuring everything is at least zero)
-        # then add 1
-        bar.heights <- tabulate(newy - min(newy) + 1) / length(y)
-
-        # Barplot with (potentially) some zero columns
-        output <- barplot(
-          bar.heights,
-          xlab=xlab, ylab=ylab, main=main, col=col[1],
-          axisnames=axisnames, names.arg=nameseq
-          )
-      }
-
-      # Otherwise, we stick with old-style tables
-      else {
-        y <- if (is.null(levels(y)))
-          factor(y)
-        else
-          factor(y, levels = levels(y))
-
-        bar.heights <- table(y)/length(y)
-        bar.names <- paste("Y=", names(bar.heights), sep="")
-
-        output <- barplot(
-          bar.heights,
-          xlab=xlab, ylab=ylab, main=main, col=col[1],
-          axisnames=axisnames, names.arg=bar.names
-          )
-      }
-    }
-
-     ## Numeric
-     else if(is.numeric(y)){ 
-       den.y <- density(y)
-       output <- plot(den.y, xlab=xlab, ylab=ylab, main=main, col=line.col[1])
-       if(!identical(col[1],"n")){
-         polygon(den.y$x, den.y$y, col=col[1])
-       } 
-     }
-
-## Comparison Plots ##
-
-  }
-  else{
-
-## Character - Plot and shade a matrix  
-    if(is.character(y) & is.character(y1) & length(y)==length(y1) ){
-     
-       newy<-trunc(as.numeric(y))
-       newy1<-trunc(as.numeric(y1))
-
-       yseq<-min(c(newy,newy1)):max(c(newy,newy1))
-       nameseq<- paste("Y=",yseq,sep="")
-       n.y<-length(yseq)
-
-       colors<-rev(heat.colors(n.y^2))
-       lab.colors<-c("black","white")
-       comp<-matrix(NA,nrow=n.y,ncol=n.y)
-
-       for(i in 1:n.y){
-         for(j in 1:n.y){
-           flag<- newy==yseq[i] & newy1==yseq[j]
-           comp[i,j]<-mean(flag)
-         }
-       }
-
-       old.pty<-par()$pty
-       old.mai<-par()$mai
-
-       par(pty="s")
-       par(mai=c(0.3,0.3,0.3,0.1))
-
-       image(z=comp, axes=FALSE, col=colors, zlim=c(min(comp),max(comp)),main=main )  
- 
-       locations.x<-seq(from=0,to=1,length=nrow(comp))
-       locations.y<-locations.x
-
-       for(m in 1:n.y){
-         for(n in 1:n.y){
-           text(x=locations.x[m],y=locations.y[n],labels=paste(round(100*comp[m,n])), col=lab.colors[(comp[m,n]> ((max(comp)-min(comp))/2) )+1])
-         }
-       }
-
-       axis(side=1,labels=nameseq, at=seq(0,1,length=n.y), cex.axis=1, las=1)
-       axis(side=2,labels=nameseq, at=seq(0,1,length=n.y), cex.axis=1, las=3)
-       box()
-       par(pty=old.pty,mai=old.mai)
-
-
-## Numeric - Plot two densities on top of each other
-    }else if(is.numeric(y) & is.numeric(y1)){
-      if(is.null(col)){
-        col<-c("blue","red")
-      }else if(length(col)<2){
-        col<-c(col,col)
-      }
-
-      if(is.null(col)){
-        semi.col.x <-rgb(142,229,238,150,maxColorValue=255)
-        semi.col.x1<-rgb(255,114,86,150,maxColorValue=255)
-        col<-c(semi.col.x,semi.col.x1)
-      }else if(length(col)<2){
-        col<-c(col,col)
-      }
-
-      den.y<-density(y)
-      den.y1<-density(y1,bw=den.y$bw)
-
-      all.xlim<-c(min(c(den.y$x,den.y1$x)),max(c(den.y$x,den.y1$x)))
-      all.ylim<-c(min(c(den.y$y,den.y1$y)),max(c(den.y$y,den.y1$y)))
-
-      output<-plot(den.y,xlab=xlab,ylab=ylab,main=main,col=col[1],xlim=all.xlim,ylim=all.ylim)
-      par(new=TRUE)
-      output<-plot(den.y1,xlab=xlab,ylab=ylab,main="",col=col[2],xlim=all.xlim,ylim=all.ylim)
-  
-      if(!identical(col[1],"n")){
-        polygon(den.y$x,den.y$y,col=col[1])
-      }
-      if(!identical(col[1],"n")){
-        polygon(den.y1$x,den.y1$y,col=col[2])
-      }
-    }
-  }
-}
-
diff --git a/R/summarize.R b/R/summarize.R
deleted file mode 100644
index 5f7f4a2..0000000
--- a/R/summarize.R
+++ /dev/null
@@ -1,145 +0,0 @@
-#' Generic methonf for summarizing simualted quantities of interest
-#' 
-#' @S3method summarize default
-#'
-#' @param obj a \code{qi} object, storing simulations of quantities of interest
-#' @return a \code{summarized.qi} object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-summarize <- function(obj)
-  UseMethod("summarize")
-
-#' Summarize Simualted Quantities of Interest
-#'
-#' @usage \method{summarize}{default}(obj)
-#' @S3method summarize default
-#' @param obj a \code{qi} object, storing simulations of quantities of interest
-#' @return a 'summarized.qi' object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-summarize.default <- function(obj) {
-  res <- list()
-  titles <- names(attr(obj, ".index"))
-
-  for (key in titles) {
-    val <- obj[[key]]
-
-    if (!is.qi(val))
-      next
-
-    if (!is.matrix(val))
-      val <- matrix(val, ncol=1, nrow=length(val))
-
-    
-    # make a matrix that is data-friendly
-    m <- if (is.numeric(val)) {
-      matrix(NA, nrow=ncol(val), ncol=5)
-    }
-    else if (is.character(val) || is.factor(val)) {
-      levels <- levels(val)
-
-      if (is.null(levels)) {
-        #warning("Indeterminate number of levels for qi: ", key)
-        levels <- unique(c(val))
-      }
-
-      levels <- sort(levels)
-
-      matrix(NA, nrow=ncol(val), ncol=length(levels), dimnames=list(NULL, levels))
-    }
-
-    #
-    for (k in 1:ncol(val)) {
-      if (is.numeric(val[,k])) {
-        row <-c(
-                mean(val[,k], na.rm = TRUE),
-                sd(val[,k], na.rm = TRUE),
-                quantile(val[,k], c(.5, .025, .975), na.rm=TRUE)
-                ) 
-        m[k,] <- row
-
-
-        #
-        colnames(m) <- c("mean", "sd", "50%", "2.5%", "97.5%")
-      }
-    
-      else if (is.character(val[,k]) || is.factor(val[,k])) {
-
-        # A table specifying the _percentage_ of simulations matching
-        # each particular level of the factor qi's
-        result.table <- table.levels(val[,k], levels = levels)
-        result.table <- result.table/length(val[,k])
-
-        # A character-vector specifying the factors found in the qi
-        factor.names <- sort(names(result.table))
-
-        # This should prevent size errors for qi's with
-        # a NULL levels attribute
-        # in particular, it resolves issues 
-        m[k, ] <- 0
-        m[k, factor.names] <- result.table[factor.names]
-
-        m[k,] <- result.table
-        colnames(m) <- names(result.table)
-      }
-
-      else
-        m[k,] <- NA
-
-      col.names <- colnames(val)
-      rownames(m) <- if (is.null(col.names))
-        ""
-      else
-        col.names
-    }
-
-    # add to list
-    res[[key]] <- m
-  }
-
-  # cast as class - for some reason - then return
-  class(res) <- "summarized.qi"
-  res
-}
-
-
-#' Test If Value is Interpretable as a QI
-#' @param qi a potential quantity of interest
-#' @return a logical specifying whether this value should or should-not
-#'         be output
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-is.qi <- function(qi) {
-  if (is.null(qi))
-    return(FALSE)
-
-  else if (!length(qi))
-    return(FALSE)
-
-  else if (all(is.na(qi)))
-    return(FALSE)
-
-  TRUE
-}
-
-
-#' Create a table, but ensure that the correct
-#' columns exist. In particular, this allows for
-#' entires with zero as a value, which is not
-#' the default for standard tables
-#' @param x a vector
-#' @param levels a vector of levels
-#' @param ... parameters for table
-#' @return a table
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-table.levels <- function (x, levels, ...) {
-  # if levels are not explicitly set, then
-  # search inside of x
-  if (missing(levels)) {
-    levels <- attr(x, 'levels')
-    table(factor(x, levels=levels), ...)
-  }
-
-  # otherwise just do the normal thing
-  else {
-    table(factor(x, levels=levels), ...)
-  }
-}
diff --git a/R/summary.R b/R/summary.R
deleted file mode 100644
index 2a7576e..0000000
--- a/R/summary.R
+++ /dev/null
@@ -1,404 +0,0 @@
-# Summary of MCMCZelig Object
-#
-# This method produces a summary object for \code{MCMCZelig} objects
-# @param object an "MCMCZelig" object
-# @param quantiles a numeric vector specifying the quantiles to use in the
-# summary object.
-# @param ... ignored parameters
-# @return a \code{summary.MCMCZelig} object
-#' @S3method summary MCMCZelig
-summary.MCMCZelig <- function(object, quantiles = c(0.025, 0.5, 0.975), ...) {
-  out <- list()
-  out$summary <- cbind(
-                       summary(coef(object))$statistics[,1:2],
-                       summary(coef(object), quantiles=quantiles)$quantiles
-                       )
-                       
-  colnames(out$summary) <- c("Mean", "SD", paste(quantiles*100, "%",sep=""))
-  stuff <- attributes(coef(object))
-  out$call <- object$call
-  out$start <- stuff$mcpar[1]
-  out$end <- stuff$mcpar[2]
-  out$thin <- stuff$mcpar[3]
-  out$nchain <- 1
-  class(out) <- "summary.MCMCZelig"
-  out
-}
-#' Method for summarizing simulations of multiply imputed quantities of interest
-#'
-#' @S3method summary MI.sim
-#' @usage \method{summary}{MI.sim}(object, ...)
-#' @param object a `MI.sim' object
-#' @param ... ignored parameters
-#' @return a `summarized.MI.sim' object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-summary.MI.sim <- function(object, ...) {
-
-  summarized.list <- list()
-
-  for (key in names(object)) {
-
-    stats <- object[[key]]$stats
-
-    for (qi.name in names(stats))
-      summarized.list[[qi.name]][[key]] <- stats[[qi.name]]
-
-  }
-
-  class(summarized.list) <- "summarySim.MI"
-
-  summarized.list
-}
-#' Summary of Generalized Linear Model with Robust Error Estimates
-#'
-#' Returns summary of a glm model with robust error estimates. This only
-#' slightly differs from how the standard GLM's behave.
-#' @usage \method{summary}{glm.robust}(object, ...)
-#' @S3method summary glm.robust
-#' @param object a ``glm.robust'' fitted model
-#' @param ... parameters to pass to the standard ``summary.glm'' method
-#' @return a object of type ``summary.glm.robust'' and ``summary.glm''
-summary.glm.robust <- function(object, ...) {
-  class(object) <- c("glm", "lm")
-  res <- summary.glm(object, ...)
-  if (is.null(object$robust)) {
-    res$cov.unscaled <- covmat.unscaled <- vcovHAC(object)
-    res$robust <- "vcovHAC"
-  } else {
-    fn <- object$robust$method
-    res$robust <- object$robust$method
-    object$robust$method <- NULL
-    arg <- object$robust
-    arg$x <- object
-    res$cov.unscaled <- covmat.unscaled <- eval(do.call(fn, args=arg))
-  }
-  res$cov.scaled <- covmat <- covmat.unscaled*res$dispersion
-  if (!is.null(res$correlation)) {
-    dd <- sqrt(diag(res$cov.unscaled))
-    res$correlation <- res$cov.unscaled/outer(dd, dd)
-    dimnames(res$correlation) <- dimnames(res$cov.unscaled)
-  }
-
-  res$coefficients[,2] <- s.err <- sqrt(diag(covmat))
-  res$coefficients[,3] <- tvalue <- coefficients(object)/s.err
-  if (length(dimnames(res$coefficients)[[2]])>3) {
-    if (dimnames(res$coefficients)[[2]][3]=="z value")
-      res$coefficients[,4] <- 2 * pnorm(-abs(tvalue))
-    else
-      res$coefficients[,4] <- 2 * pt(-abs(tvalue), object$df.residual)
-  }
-  class(res) <- c("summary.glm.robust","summary.glm")
-  return(res)
-}
-#' Return a Summary of a Set of Pooled Simulated Interests
-#'
-#' Returns the summary information from a set of pooled simulated interests.
-#' The object returned contains the slots ``labels'', a character-vector
-#' specifying the labels (explanatory variable titles) of the qi's, ``titles'',
-#' a character vector specifying the names of the quantities of interest, and
-#" ``stats'', a list containing quantities of interests.
-#' @usage \method{summary}{pooled.sim}(object, ...)
-#' @S3method summary pooled.sim
-#' @param object a ``pooled.sim'' object, containing information about
-#' simulated quantities of interest
-#' @param ... Ignored parameters
-#' @return a ``summary.pooled.sim'' object storing the replicated quantities of
-#' interest
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-summary.pooled.sim <- function (object, ...) {
-  model <- list()
-  stats <- list()
-  titles <- list()
-  original <- list()
-  call <- list()
-  x <- list()
-  x1 <- list()
-
-  #
-  for (key in names(object)) {
-    o <- object[[key]]
-
-    stats[[key]] <- o$stats
-    titles[[key]] <- o$titles
-  }
-
-  s <- list(
-            labels = names(object),
-            titles = names(object[[1]]$stats),
-            stats = stats
-            )
-
-  class(s) <- "summary.pooled.sim"
-
-  s
-}
-#' Summary for ``Relogit'' Fitted Model
-#'
-#' Summarize important components of the ``relogit'' model
-#' @usage \method{summary}{Relogit}(object, ...)
-#' @S3method summary Relogit
-#' @param object a ``Relogit'' object
-#' @param ... other parameters
-#' @return a ``summary.relogit'' object
-summary.Relogit <- function(object, ...) {
-  dta <- model.matrix(terms(object), data=model.frame(object))
-  class(object) <- class(object)[2]
-  res <- summary(object, ...)
-  if (object$bias.correct) {
-    n <- nrow(dta)
-    k <- ncol(dta)
-    res$cov.unscaled <- res$cov.unscaled * (n/(n+k))^2
-    res$cov.scaled <- res$cov.unscaled * res$dispersion
-    res$coefficients[,2] <- sqrt(diag(res$cov.scaled))
-    res$coefficients[,3] <- res$coefficients[,1] / res$coefficients[,2]
-    res$coefficients[,4 ] <- 2*pt(-abs(res$coefficients[,3]), res$df.residual)
-  }
-  res$call <- object$call
-  res$tau <- object$tau
-  res$bias.correct <- object$bias.correct
-  res$prior.correct <- object$prior.correct
-  res$weighting <- object$weighting
-  class(res) <- "summary.relogit"
-  return(res)
-}
-#' Summary for ``Relogit2'' Fitted Model
-#'
-#' Summarize important components of the ``relogit'' model
-#' @usage \method{summary}{Relogit2}(object, ...)
-#' @S3method summary Relogit2
-#' @param object a ``Relogit2'' object
-#' @param ... other parameters
-#' @return a ``summary.relogit2'' object
-summary.Relogit2 <- function(object, ...) {
-  res <- list()
-  res$lower.estimate <- summary.Relogit(object$lower.estimate)
-  res$upper.estimate <- summary.Relogit(object$upper.estimate)
-  res$call <- object$call
-  class(res) <- "summary.relogit2"
-  return(res)
-}
-
-
-
-
-
-
-
-
-
-
-
-
-#' Method for summarizing simulations of quantities of interest
-#'
-#' Return a ``summary.sim'' object (typically for display)
-#' @S3method summary sim
-#' @usage \method{summary}{sim}(object, ...)
-#' @param object a 'MI.sim' object
-#' @param ... ignored parameters
-#' @return a 'summarized.MI.sim' object
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-summary.sim <- function(object, ...) {
-  res <- list(
-              model    = object$model,
-              stats    = object$stats,
-              titles   = object$titles,
-              original = object$result,
-              call     = object$call,
-              zeligcall= object$zcall,
-              x        = object$x,
-              x1       = object$x1,
-              num      = object$num
-              )
-  class(res) <- c(object$name, "summary.sim")
-  res
-}
-#' Zelig Object Summaries
-#'
-#' Compute summary data for zelig objects
-#' @S3method summary zelig
-#' @usage \method{summary}{zelig}(object, ...)
-#' @param object a zelig object
-#' @param ... parameters forwarded to the generic summary object
-#' @return the summary of the fitted model
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-summary.zelig <- function (object, ...) {
-  # For now, simply get the summary of the result object
-  obj <- eval(object$result)
-
-  if (isS4(obj)) {
-
-    sigs <- findMethodSignatures('summary')
-    classes <- class(obj)
-
-    # Remove classes that do not have 'summary' methods
-    intersection <- classes[ ! sigs %in% classes ]
-    intersection <- na.omit(intersection)
-    intersection <- as.character(intersection)
-
-    # Summary only has one parameter, so we only consider the first one
-    # This may be slightly dangerous, but it should not fail
-    sig <- intersection[1]
-    
-    # if an attempt to get the summary fails, replace with a call to the S3
-    SUMMARY <- tryCatch(getMethod('summary', sig), error = function(e) summary)
-
-    # return
-    SUMMARY(obj)
-  }
-
-  else
-    # S3 objects have no problem figuring out which method to use
-    summary(obj)
-}
-#' Sumary of ``setx'' Object
-#'
-#' Compute summary data for ``setx'' objects
-#' @S3method summary zelig
-#' @usage \method{summary}{zelig}(object, ...)
-#' @param object a zelig object
-#' @param ... parameters forwarded to the generic summary object
-#' @return the summary of the fitted model
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-summary.setx <- function (object, ...) {
-  mm <- object$matrix
-  attr(mm, "assign") <- NULL
-  attr(mm, "contrasts") <- NULL
-
-
-  structure(
-    list(
-      call = object$call,
-      label = object$label,
-      model.name = object$name,
-      formula = object$formula,
-      model.matrix = mm
-    ),
-    class = "summary.setx"
-    )
-}
-#' Summary of Multiply Imputed Statistical Models Using Rubin's Rule
-#'
-#' ...
-#' @S3method summary MI
-#' @usage \method{summary}{MI}(object, ...)
-#' @param object a set of fitted statistical models
-#' @param ... parameters to forward
-#' @return a list of summaries
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-summary.MI <- function (object, subset = NULL, ...) {
-
-  if (length(object) == 0)
-    stop('Invalid input for "subset"')
-
-  else if (length(object) == 1)
-    return(summary(object[[1]]))
-
-  #
-  getcoef <- function(obj) {
-    # S4
-    if (!isS4(obj))
-      coef(obj)
-    else if ("coef3" %in% slotNames(obj))
-      obj at coef3
-    else
-      obj at coef
-  }
-
-
-  #
-  res <- list()
-
-  # Get indices
-  subset <- if (is.null(subset))
-    1:length(object)
-  else
-    c(subset)
-
-  # Compute the summary of all objects
-  for (k in subset) {
-    res[[k]] <- summary(object[[k]])
-  }
-
-
-  # Answer
-  ans <- list(
-              zelig = object[[1]]$name,
-              call = object[[1]]$result$call,
-              all = res
-              )
-
-  #
-  coef1 <- se1 <- NULL
-
-  #
-  for (k in subset) {
-    tmp <-  getcoef(res[[k]])
-    coef1 <- cbind(coef1, tmp[, 1])
-    se1 <- cbind(se1, tmp[, 2])
-  }
-
-  rows <- nrow(coef1)
-  Q <- apply(coef1, 1, mean)
-  U <- apply(se1^2, 1, mean)
-  B <- apply((coef1-Q)^2, 1, sum)/(length(subset)-1)
-  var <- U+(1+1/length(subset))*B
-  nu <- (length(subset)-1)*(1+U/((1+1/length(subset))*B))^2
-
-  coef.table <- matrix(NA, nrow = rows, ncol = 4)
-  dimnames(coef.table) <- list(rownames(coef1),
-                               c("Value", "Std. Error", "t-stat", "p-value"))
-  coef.table[,1] <- Q
-  coef.table[,2] <- sqrt(var)
-  coef.table[,3] <- Q/sqrt(var)
-  coef.table[,4] <- pt(abs(Q/sqrt(var)), df=nu, lower.tail=F)*2
-  ans$coefficients <- coef.table
-  ans$cov.scaled <- ans$cov.unscaled <- NULL
-
-  for (i in 1:length(ans)) {
-    if (is.numeric(ans[[i]]) && !names(ans)[i] %in% c("coefficients")) {
-      tmp <- NULL
-      for (j in subset) {
-        r <- res[[j]]
-        tmp <- cbind(tmp, r[[pmatch(names(ans)[i], names(res[[j]]))]])
-      }
-      ans[[i]] <- apply(tmp, 1, mean)
-    }
-  }
-
-  class(ans) <- "summaryMI"
-  ans
-}
-
-print.summaryMI <- function(x, subset = NULL, ...){
-  m <- length(x$all)
-  if (m == 0)
-    m <- 1
-  if (any(subset > max(m)))
-    stop("the subset selected lies outside the range of available \n        observations in the MI regression output.")
-  cat("\n  Model:", x$zelig)
-  cat("\n  Number of multiply imputed data sets:", m, "\n")
-  if (is.null(subset)) {
-    cat("\nCombined results:\n\n")
-    cat("Call:\n")
-    print(x$call)
-    cat("\nCoefficients:\n")
-    print(x$coefficients)
-    cat("\nFor combined results from datasets i to j, use summary(x, subset = i:j).\nFor separate results, use print(summary(x), subset = i:j).\n\n")
-  }
-  else {
-    if (is.function(subset))
-      M <- 1:m
-    if (is.numeric(subset))
-      M <- subset
-    for(i in M){
-      cat(paste("\nResult with dataset", i, "\n"))
-      print(x$all[[i]], ...)
-    }
-  }
-}
-
diff --git a/R/t.setx.R b/R/t.setx.R
deleted file mode 100644
index 525c178..0000000
--- a/R/t.setx.R
+++ /dev/null
@@ -1,14 +0,0 @@
-#' Matrix Transpose of a ``setx'' Object
-#'
-#' Returns a ``setx'' object as column vector. If multiple values for each
-#' explanatory term has been set, then return a NxM matrix where `N'
-#' is the number of explanatory terms and `M' is the number of values set for
-#' each term.
-#'
-#' @S3method t setx
-#' @usage \method{t}{setx}(x)
-#' @param x a `setx' object
-#' @return a transposed matrix
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-t.setx <- function(x)
-  t(x$matrix)
diff --git a/R/terms.R b/R/terms.R
deleted file mode 100644
index cc8165c..0000000
--- a/R/terms.R
+++ /dev/null
@@ -1,235 +0,0 @@
-###
-## terms
-## -accept single and multiple equations:
-## -in case of single equations, the equation is named "mu". is this right?
-## -if mu=y~x:z then the attr(tt,"variable") gives list(y,x:z). Should it be list(y,x,z) ??
-## -
-
-
-#' Extract Terms from a \code{multiple} Object
-#'
-#' Extracts terms from Zelig-3.5-style formulae. This function is scheduled for
-#' removal.
-#' @usage \method{terms}{multiple}(x, data=NULL,...)
-#' @param x a Zelig v3.5 formula
-#' @param data a \code{data.frame}
-#' @param ... ignored parameters
-#' @author Kosuke Imai, Olivia Lau, Gary King and Ferdinand Alimadhi
-#' @S3method terms multiple
-terms.multiple<-function(x, data=NULL,...){
-        object <- x
-        termsexist<-attr(object,"terms")
-        if(!(is.null(termsexist)))
-          return (termsexist)
-        
-        nreq<-nrConstr<-nrEquationsNew<-0
-        constr<-XconstrEqn<-variables<-termlabels<-depVars<-objectNew<-intercAttr<-depFactors<-list()
-        depFactorVar<-depLevels<-namesConstr<-c()
-        if(!(any(class(object)=="list"))){
-                object<-list(object)
-                names(object)<-"mu"
-        }
-        namesOfEquations<- names(object)
-        nrEquations <-length(object)
-        "%w/o%" <- function(x,y) x[!x %in% y]
-        
-        for (i in 1:nrEquations){
-                TT<-terms.formula(object[[i]], specials=c("id","tag"))               
-                attrTTvars<-attr(TT,"variables")
-                attrTTlabels<-attr(TT,"term.labels")
-                
-                eqni<-object[[i]]                    
-                namei<-namesOfEquations[[i]]            
-                tagattr<-attr(TT,"specials")$tag         
-                hastag<-!(is.null(tagattr))
-                if (hastag){
-                        ## has tag so make a new list of variables and term.labels
-                        newVars<-list()           
-                        newLabels<-c()
-                        indxV<-indxL<-1
-                        constrTmp<-c()
-                        for(j in 1:length(tagattr)){
-                                taglabels<-c()
-                                if(length(eqni)==3)
-                                  lind<-tagattr[[j]]-1
-                                else
-                                  lind<-tagattr[[j]]
-                                vind<-tagattr[[j]]+1
-                                ## add all vars/terms prior to tag into new list of
-                                ## newVars and newLabels
-                                for(v in indxV:(vind))
-                                  newVars<-c(newVars,attrTTvars[[v]])
-                                newVars[[length(newVars)]]<-NULL
-                                indxV<-vind+1
-                                
-                                for(l in c(indxL:lind))
-                                  newLabels<-c(newLabels,attrTTlabels[[l]])
-                                newLabels<-newLabels[-(length(newLabels))]
-                                indxL<-lind+1
-                                
-                                ## deparse and fix the tag
-                                tagAsList <-.fixTag(.deparseTag(attrTTvars[[vind]]))
-                                for (tindx in 1:length(tagAsList)){
-                                        t<-tagAsList[[tindx]]
-                                        if(((t$var %in% namesOfEquations)==FALSE) && t$var != "none" && t$var != "1"){
-                                                newVars<-c(newVars,parse(text=t$var)[[1]])
-                                                newLabels<-c(newLabels,t$var)
-                                        }
-                                        if(((t$id %in% namesOfEquations)==FALSE) && t$id !="none" && t$id !="1"){
-                                                ##print(t$id)
-                                                newVars<-c(newVars,parse(text=t$id)[[1]])
-                                                newLabels<-c(newLabels,t$id)
-                                        }
-                                        ## constraints ?
-                                        if(t$var !="none" && t$label !="none" && t$id =="none"){
-                                                nrConstr<-nrConstr+1
-                                                namesConstr<-c(namesConstr,t$label)
-                                                constr[[nrConstr]]<-c(i,t$label,t$var)
-                                                constrTmp<-c(constrTmp,t$var)   ##???? what is constrTMP?
-                                        }
-                                }
-                        }
-                        ## if there is any var/term remaining after tags
-                        ## add them to newVars and newLabels
-                        if(length(attrTTvars)>vind){
-                                for(v in (vind+1):length(attrTTvars))
-                                  newVars<-c(newVars,attrTTvars[[v]])
-                        }
-                        
-                        if(length(attrTTlabels)>lind){
-                                for(l in (lind+1):length(attrTTlabels))
-                                  newLabels<-c(newLabels,attrTTlabels[[l]])
-                        }
-                        
-                        XconstrEqn[[i]]<-constrTmp
-
-                        ## make newVars and newLabels unique
-                        newVars<-unique(newVars)  
-                        newLabels <- unique(newLabels)
-                } else{
-                        ## there is no tag => newVars and newLabels remain unchanged
-                        newVars<-attrTTvars
-                        newLabels<-attrTTlabels
-                }
-                nrEquationsNew<-nrEquationsNew+1
-                objectNew[[namei]]<-eqni
-                if (length(eqni)==3){
-
-                        nreq=nreq+1    ## number of required equations
-                        lhs<-eqni[[2]]
-                        if (length(lhs)>1 && lhs[[1]]=="id"){
-                                depVars[[namei]]<-lhs[[3]]
-                                depFactorVar<-c(depFactors,deparse(lhs[[2]]))
-                                depLevels<-c(depLevels,lhs[[3]])
-                        }else
-                        depVars[[namei]]<-deparse(eqni[[2]])
-                        
-                }
-                attr(TT,"variables")<-as.call(newVars)
-                attr(TT,"term.labels")<-newLabels
-                variables[[namei]]<-attr(TT,"variables")
-                termlabels[[namei]]<-attr(TT,"term.labels")
-                intercAttr[[namei]]<-attr(TT,"intercept")
-        }  ## end of for each equation
-        
-        namesOfEquations<-names(objectNew)
-        myattr<-list()
-        result<-objectNew
-        constraints<-subs<-FALSE
-
-        ## construct constraints
-        namesConstr<-unique(namesConstr)
-        if(length(constr)>0){
-                constraints<-matrix(NA,nrow=nrEquationsNew,ncol=length(namesConstr),dimnames=list(namesOfEquations,namesConstr))
-                for(i in 1:length(constr)){
-                        constri<-constr[[i]]
-                        eqind<-constri[[1]]
-                        eq<-namesOfEquations[as.numeric(eqind)]
-                        lab<-constri[[2]]
-                        constraints[eq,lab]<-constri[[3]]
-                }
-        }
-        
-        indVars<-unique(unlist(termlabels))
-        if(length(depFactorVar) !=0)
-          depFactors<-list("depFactorVar"=unique(unlist(depFactorVar)),"depLevels"=depLevels)
-        else
-          depFactors<-FALSE
-        
-        whiche<-which(lapply(termlabels,length)!=0)
-        myattr$systEqns<-names(whiche)
-        myattr$ancilEqns<-"%w/o%"(namesOfEquations,myattr$systEqns)
-        
-        myattr$variables<-variables
-        myattr$term.labels<-termlabels
-        myattr$indVars<-indVars
-        
-        myattr$depVars<-depVars
-        myattr$depFactors<-depFactors
-        myattr$constraints<-constraints
-        myattr$subs<-subs
-        myattr$response<-1
-        myattr$intercept<-intercAttr
-        attributes(result)<-myattr
-        names(result)<-namesOfEquations
-        class(result)<-c("terms","multiple","list")
-        return(result)
-}
-
-###
-## Fix the deparsed tag
-## 
-
-
-.fixTag <- function(l){
-        
-        if(l$var == "1" && l$label!="none"){
-                ## tag(1,z1 | state) == tag (z1|state)
-                l$var <- l$label
-                l$label <- "none"
-                
-        }
-        if(l$label =="none"){
-                ## tag(1+z1|state)
-                vars<-.trim(unlist(strsplit(l$var,"+", fixed=TRUE)))
-        }else{
-                ## tag(z1,w1+w2|state)
-                vars<-.trim(unlist(strsplit(l$label,"+", fixed=TRUE)))
-        }
-        if(length(vars) == 1){
-                ## nothing to expand
-                return (list(l))
-        }else{
-                alltgs<-list()
-                for(i in 1:length(vars)){
-                        if(l$label == "none")
-                          alltgs[[i]] <- list(label="none",var=vars[[i]],id=l$id)
-                        else
-                          alltgs[[i]] <- list(label="none",var=paste(l$var,":",vars[[i]],sep=""),id=l$id)
-                        
-                }
-        }
-        return (alltgs)
-        
-}
-#' Model Terms for 'vglm' Models
-#' @usage \method{terms}{vglm}(x, ...)
-#' @S3method terms vglm
-#' @param x a fitted model object from the VGAM library
-#' @param ... ignored parameters
-#' @return the models terms of this fitted model object
-#' @author Ferdinand Alimadhi, Kosuke Imai and Olivia Lau
-terms.vglm <- function(x, ...)
-  x at terms$terms
-#' Model Terms for a Zelig Object
-#' 
-#' This method simply extracts the model terms for the fitted model passed to 
-#' the \code{zelig} function.
-#' @S3method terms zelig
-#' @usage \method{terms}{zelig}(x, ...)
-#' @param x a \code{zelig} object
-#' @param ... forwarded parameters
-#' @return terms of the original fitted model
-terms.zelig <- function (x, ...) {
-  terms(x$result, ...)
-}
diff --git a/R/termsFromFormula.R b/R/termsFromFormula.R
deleted file mode 100644
index c3397c4..0000000
--- a/R/termsFromFormula.R
+++ /dev/null
@@ -1,15 +0,0 @@
-#' Extract Terms from Zelig-style Formulae
-#'
-#' This method is a sugary function to extract terms from any type of 
-#' Zelig-style formula.
-#' @param obj a Zelig-style formula
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-termsFromFormula <- function (obj) {
-  # Do not put all of this function on one line, because it will make error
-  # messages long and confusing
-  obj <- parseFormula(obj)
-
-  # Actually extract the terms, then return
-  terms(obj)
-}
diff --git a/R/tobit.R b/R/tobit.R
deleted file mode 100644
index 948bec3..0000000
--- a/R/tobit.R
+++ /dev/null
@@ -1,143 +0,0 @@
-#' Interface between the Zelig Model tobit and 
-#' the Pre-existing Model-fitting Method
-#' @param formula a formula
-#' @param ... additonal parameters
-#' @param below a numeric or infinite specifying a lower boundary for censored
-#' responses
-#' @param above a numeric or infinite specifying an upper boundary for censored
-#' responses
-#' @param robust a boolean specifying whether to produce robust error estimates
-#' @param cluster ...
-#' @param data a data.frame 
-#' @return a list specifying '.function'
-#' @export
-zelig2tobit <- function (
-                         formula, ..., 
-                         below = 0, above = Inf, 
-                         robust = FALSE,
-                         cluster = NULL,
-                         data
-                         ) {
-
-  # Load survival
-  loadDependencies("survival")
-
-  if (!(is.null(cluster) || robust))
-    stop("If cluster is specified, then `robust` must be TRUE")
-
-  # Add cluster term
-  if (robust || !is.null(cluster))
-    formula <- cluster.formula(formula, cluster)
-
-  # Make surv demands that the model 
-  formula <- make.surv(formula, below, above)
-  formula <- cluster.formula(formula, cluster)
-
-  z(
-    .function = "survreg",
-
-    formula = formula,
-    dist = "gaussian",
-    data = data,
-    robust = robust,
-    ...
-    )
-}
-
-
-#
-make.surv <- function (formula, below, above) {
-
-  lhs <- formula[[2]]
-
-  if (grepl("Surv", as.character(lhs)))
-    return(formula)
-
-  if (!(is.numeric(below) && is.numeric(above))) {
-    warning("`below` and `above` must be numeric; ",
-            "returning the original formula")
-    return(formula)
-  }
-
-  if (above == Inf) {
-    # Empty?
-    # This seems like a mistake inherited from old code
-  }
-
-  else if (below == -Inf && above == Inf)
-    stop("This model does not support censoring. Try the \"normal\" model")
-
-  else if (below == -Inf && above != Inf)
-    stop("This model does not support right-censored data")
-
-  else if (is.finite(below) && is.finite(above))
-    stop("This model does not support interval-censored data")
-
-  # That is, this model only supports left-censored data
-  # Surv( <outcome> , <below> < <outcomes> )
-  lhs <- call("Surv", lhs, call("<", below, lhs), type="left")
-
-  # Place back within formula
-  formula[[2]] <- lhs
-
-  # Return
-  formula
-}
-#' Param Method for the \code{tobit} Zelig Model
-#' @note This method is used by the \code{tobit} Zelig model
-#' @usage \method{param}{tobit}(obj, num, ...)
-#' @S3method param tobit
-#' @param obj a 'zelig' object
-#' @param num an integer specifying the number of simulations to sample
-#' @param ... ignored parameters
-#' @return a list to be cast as a 'parameters' object
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-param.tobit <- function(obj, num=1000, ...) {
-  cov <- vcov(.fitted)
-  mu <- c(coef(.fitted), log(.fitted$scale))
-
-  # Return
-  list(
-       coef = mvrnorm(num, mu=mu, Sigma=cov),
-       linkinv = NULL
-       )
-}
-#' Compute quantities of interest for 'tobit' Zelig models
-#' @usage \method{qi}{tobit}(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
-#' @S3method qi tobit
-#' @param obj a 'zelig' object
-#' @param x a 'setx' object or NULL
-#' @param x1 an optional 'setx' object
-#' @param y this parameter is reserved for simulating average treatment effects,
-#' though this feature is currentlysupported by only a handful of models
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a parameters object
-#' @return a list of key-value pairs specifying pairing titles of quantities of
-#' interest with their simulations
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-qi.tobit <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-
-  # This needs to be fixed.
-  ev1 <- ev2 <- pr1 <- pr2 <- fd <- NA
-
-  # return
-  list("Expected Values: E(Y|X)"  = ev1,
-       "Expected Values: E(Y|X1)" = ev2,
-       "Predicted Values: Y|X"    = pr1,
-       "Predicted Values: Y|X1"   = pr2,
-       "First Differences: E(Y|X1) - E(Y|X)" = fd
-       )
-}
-#' Describe a ``tobit'' model to Zelig
-#' @usage \method{describe}{tobit}(...)
-#' @S3method describe tobit
-#' @param ... ignored parameters
-#' @return a list to be processed by `as.description'
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-describe.tobit <- function(...) {
-  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
-       year     = 2011,
-       category = "continuous",
-       text = "Linear regression for Left-Censored Dependent Variable"
-       )
-}
diff --git a/R/twosls.R b/R/twosls.R
deleted file mode 100644
index bbf40d3..0000000
--- a/R/twosls.R
+++ /dev/null
@@ -1,279 +0,0 @@
-#' Interface between the Zelig Model twosls and 
-#' the Pre-existing Model-fitting Method
-#' @param formula a formula
-#' @param ... additonal parameters
-#' @param data a data.frame 
-#' @return a list specifying '.function'
-#' @export
-zelig2twosls <- function (formula, ..., data) {
-
-  loadDependencies("systemfit")
-
-  # Helper function to perform set-difference
-  "%w/o%" <- function(x, y)
-    x[!x %in% y]
-
-  formula<-parse.formula(formula, "twosls")
-  tt<-terms(formula)
-
-  ins<-names(tt) %w/o% names(attr(tt,"depVars"))
-  if(length(ins)!=0)
-    if(length(ins)==1)
-      inst <- formula[[ins]]
-    else 
-      inst <- formula[ins]
-
-  else
-    stop("twosls model requires instrument!!\n")
-
-  class(formula) <- c("multiple", "list")
-
-  # Return
-  list(
-       .function = "callsystemfit",
-       formula = formula[names(attr(tt,"depVars"))],
-       method  = "2SLS",
-       inst    = inst,
-       data = data,
-       ...
-       )
-}
-
-#' @S3method param twosls
-param.twosls <- function(obj, num=1000, ...) {
-
-  # Produce a vector of all terms
-  big.coef <- coef(obj)
-
-  # Produce a pretty sparse matrix containing 3 vcov matrices.
-  #
-  # Note that this matrix will give a value of zero to any invalid row-column
-  # combination.
-  # In particular, any terms that do not belong to the same equation will have
-  # a zero value.
-  big.vcov <- vcov(obj)
-
-  # This is a complete list of the terms. This is largely ignored, aside from
-  # the fact that we need a list of the formulae. In general, terms.multiple
-  # produced a pretty unwieldy list of items.
-  all.terms <- terms(obj)
-
-  # This list stores the results
-  simulations.list <- list()
-
-  # Iterate through the set of terms, and simulate each list separately.
-  for (key in names(all.terms)) {
-
-    # Extract the terms for an individual model.
-    eq.terms <- terms(all.terms[[key]])
-
-    # Extract the labels for the terms
-    eq.term.labels <- attr(eq.terms, "term.labels")
-
-    # Add the labeled for the intercept column, if it should exist
-    if (attr(eq.terms, "intercept"))
-      eq.term.labels <- c("(Intercept)", eq.term.labels)
-
-    # Format the title, this should look like:
-    #   <list-item-name>_<term-label>
-    #
-    # So for the list: list(mu1 = y ~ x + sin(x))
-    # We get:
-    #   "mu1_(Intercept)" "mu1_x" "mu1_sin(x)"
-    entries <- paste(key, eq.term.labels, sep = "_")
-
-    # Extract the mean-value of this term (from the lumped-toegether vector)
-    eq.coef <- big.coef[entries]
-
-    # Extract the vcov matrix of this term (from the lumped-together matrix)
-    eq.vcov <- big.vcov[entries, entries]
-
-    # Simulate the parameters
-    eq.simulations <- mvrnorm(num, eq.coef, eq.vcov)
-
-    # Name the columns
-    colnames(eq.simulations) <- eq.term.labels
-
-    # Add to the list
-    simulations.list[[key]] <- eq.simulations
-
-  }
-
-
-  # Return.
-  list(
-       coef = simulations.list,
-       linkinv = NULL
-       )
-}
-
-#' @S3method qi twosls
-qi.twosls <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-
-  # Compute the expected value of multistage LS methods
-  compute.ev <- function (obj, x, param) {
-    #
-    if (is.null(x) || is.na(x)) {
-      return(NA)
-    }
-
-    # If 'x' has too many rows, there will currently be errors. This is an issue
-    # in Zelig-core
-    if (nrow(x$matrix) > 1) {
-      warning("This package does not currently support pooled results.")
-      x <- x[1, ]
-    }
-
-    # Begin regular function
-    terms <- terms(obj)
-
-    # :q
-    coef.list <- coef(param)
-
-    # Hold Results
-    eta <- list()
-
-    #
-    for (key in names(coef.list)) {
-      #
-      coef <- coef.list[[key]]
-      # print(colnames(coef))
-      small.x <- as.matrix(x$matrix[, colnames(coef)])
-      #
-      eta[[key]] <- coef %*% (small.x)
-    }
-
-
-    # Convert list into a matrix
-    eta <- Reduce(function (x, y) cbind(x, y), eta)
-    colnames(eta) <- names(terms)
-
-    eta
-  }
-
-  ev1 <- compute.ev(obj, x, param)
-  ev2 <- compute.ev(obj, x1, param)
-  fd <- ev2 - ev1
-
-  # Name each column after the associated equation
-
-  # Return the results
-  list(
-       "Expected Value: E(Y|X)" = ev1,
-       "Expected Value (for X1): E(Y|X1)" = ev2,
-       "First Differences: E(Y|X1)-E(Y|X)" = ev2 - ev1
-       )
-}
-
-#' @S3method describe twosls
-describe.twosls <- function (...) {
-  category <- "continuous"
-  description  <- "Two Stage Least Squares"
-  authors <- c("Ferdinand Alimadhi", "Ying Lu", "Elena Villalon")
-  year <- 2007
-
-  package <-list(
-                 name = "systemfit",
-		 version = "0.8"
-		 )
-
-  parameters <- list()
-  parameters$mu <-list(
-                       equations=c(2,Inf),
-                       tagsAllowed=TRUE,
-                       depVar=TRUE,
-                       expVar=TRUE
-                       )
-  parameters$inst<-list(
-                        equations=c(1,1),
-                        tagsAllowed=FALSE,
-                        depVar=FALSE,
-                        expVar=TRUE
-                        )
- 
-  list(category = category, authors = authors, year = year, description = description, package = package, parameters = parameters)
-}
-
-#' @S3method plot sim.twosls
-plot.sim.twosls <- function (x, ...) {
-
-  # Define locak function to plot a set of quantities of interest
-  plotSet <- function (title) {
-    for (col in colnames(qis[[title]])) {
-      q <- qis[[title]][, col]
-      plot(density(q), main = paste(col, title, sep=": "))
-    }
-  }
-
-  # Code begins here
-
-  cols <- c( rep(), rep(), rep() )
-
-  qis <- as.list.qi(x$qi)
-  qis <- Filter(function (y) any(!is.na(y)), qis)
-  qis <- Filter(is.matrix, qis)
-
-
-  max.cols <- max(unlist(Map(ncol, qis)))
-  layout.matrix <- matrix(0, length(qis), max.cols)
-  rownames(layout.matrix) <- names(qis)
-
-  count <- 1
-
-  for (title in names(qis)) {
-    for (k in 1:ncol(qis[[title]])) {
-      layout.matrix[title, k] <- count
-      count <- count + 1
-    }
-  }
-
-  layout(layout.matrix)
-
-  for (key in names(qis)) {
-    plotSet(key)
-  }
-}
-
-callsystemfit<-function(formula,data,method,inst=NULL,...){
-  # Call systemfit..
-  out <- systemfit(
-                   data = data,
-                   formula = formula,
-                   method = method,
-                   inst = inst,
-                   ...
-                   )
-
-  # Assign class to formula, so that it is correctly parsed
-  class(formula) <- c("multiple", "list")
-  
-  # Set the terms explicitly
-  attr(out,"terms") <- terms(formula)
-
-  # Set the class explicitly
-  class(out) <- c("multiple", class(out))
-
-  # Fin. Return the modified object
-  return(out)
-}
-
-as.list.qi <- function (x, names = "") {
-  class(x) <- "list"
-  indices <- attr(x, ".index")
-  attr(x, ".index") <- NULL
-  rename.keys(x, indices, names(indices))
-}
-
-rename.keys <- function (x, keys, to, warn = TRUE) {
-  all.names <- names(x)
-  indices <- match(keys, all.names)
-
-  if (any(is.na(indices)))
-    stop("Keys contains values that are not in `x`")
-
-  all.names[indices] <- to
-  names(x) <- all.names
-
-  x
-}
-
diff --git a/R/user.prompt.R b/R/user.prompt.R
deleted file mode 100644
index 0332698..0000000
--- a/R/user.prompt.R
+++ /dev/null
@@ -1,14 +0,0 @@
-#' Prompts user to hit enter
-#' @title Prompt User
-#' @param msg a character-string, specifying a message to be displayed
-#' @return This function is used for its side effects
-#' @export
-#' @note This function is primarily used by Zelig demo scripts
-user.prompt <- function (msg = NULL) {
-  if (is.null(msg))
-    msg <- "Press <return> to continue: "
-
-  msg <- paste("\n", msg, sep="")
-
-  invisible(readline(msg))
-}
diff --git a/R/utils.R b/R/utils.R
new file mode 100755
index 0000000..7d905df
--- /dev/null
+++ b/R/utils.R
@@ -0,0 +1,221 @@
+#' Compute the Statistical Mode of a Vector
+#' @aliases Mode mode
+#' @param x a vector of numeric, factor, or ordered values
+#' @return the statistical mode of the vector. If two modes exist, one is
+#'   randomly selected (by design)
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+Mode <- function (x) {
+  # build a table of values of x
+  tab <- table(as.factor(x))
+  # find the mode, then if there's more than one, select one randomly
+  v <- sample(names(which(tab == max(tab))), size = 1)
+  # if it came in as a factor, we need to re-cast it
+  # as a factor, with the same exact levels
+  if (is.factor(x))
+    return(factor(v, levels = levels(x)))
+  # re-cast as any other data-type
+  as(v, class(x))
+}
+
+## Zelig 3 and 4 backward compatibility
+mode <- Mode
+
+#' Compute the Statistical Median of a Vector
+#' @param x a vector of numeric or ordered values
+#' @param na.rm ignored
+#' @return the median of the vector
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+Median <- function (x, na.rm=NULL) {
+  v <- ifelse(is.numeric(x),
+              median(v),
+              levels(x)[ceiling(median(as.numeric(x)))]
+  )
+  if (is.ordered(x))
+    v <- factor(v, levels(x))
+  v
+}
+
+#' Create a table, but ensure that the correct
+#' columns exist. In particular, this allows for
+#' entires with zero as a value, which is not
+#' the default for standard tables
+#' @param x a vector
+#' @param levels a vector of levels
+#' @param ... parameters for table
+#' @return a table
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+table.levels <- function (x, levels, ...) {
+  # if levels are not explicitly set, then
+  # search inside of x
+  if (missing(levels)) {
+    levels <- attr(x, 'levels')
+    table(factor(x, levels=levels), ...)
+  }
+  # otherwise just do the normal thing
+  else {
+    table(factor(x, levels=levels), ...)
+  }
+}
+
+#' Compute central tendancy as approrpriate to data type
+#' @param val a vector of values
+#' @return a mean (if numeric) or a median (if ordered) or mode (otherwise)
+#' @export
+avg <- function(val) {
+  if (is.numeric(val))
+    mean(val)
+  else if (is.ordered(val))
+    Median(val)
+  else
+    Mode(val)
+}
+
+#' Set new value of a factor variable, checking for existing levels
+#' @param fv factor variable
+#' @param v value
+#' @return a factor variable with a value \code{val} and the same levels
+#' @keywords internal
+setfactor <- function (fv, v) {
+  lev <- levels(fv)
+  if (!v %in% lev)
+    stop("Wrong factor")
+  return(factor(v, levels = lev))
+}
+
+#' Set new value of a variable as approrpriate to data type
+#' @param val old value
+#' @param newval new value
+#' @return a variable of the same type with a value \code{val}
+#' @keywords internal
+setval <- function(val, newval) {
+  if (is.numeric(val))
+    newval
+  else if (is.ordered(val))
+    newval
+  else {
+    lev <- levels(val)
+    if (!newval %in% lev)
+      stop("Wrong factor")
+    return(factor(newval, levels = lev))
+  }
+} 
+
+#' Calculate the reduced dataset to be used in code{\link{setx}}
+#' 
+#' #' This method is used internally
+#' 
+#' @param dataset Zelig object data, possibly split to deal with \code{by} argument
+#' @param s list of variables and their tentative \code{setx} values
+#' @param formula a simplified version of the Zelig object formula (typically with 1 on the lhs)
+#' @param data Zelig object data
+#' @param avg function of data transformations
+#' @return a list of all the model variables either at their central tendancy or their \code{setx} value
+#' @export
+#' @keywords internal
+#' @author Christine Choirat
+reduce = function(dataset, s, formula, data, avg = avg) {
+  pred <- try(terms(fit <- lm(formula, data), "predvars"), silent = TRUE)
+  if ("try-error" %in% class(pred)) # exp and weibull
+    pred <- try(terms(fit <- survreg(formula, data), "predvars"), silent = TRUE)
+  dataset <- model.frame(fit)
+  ldata <- lapply(dataset, avg)
+  if (length(s) > 0) {
+    n <- union(as.character(attr(pred, "predvars"))[-1],
+               names(dataset))
+    if (is.list(s[[1]]))
+      s <- s[[1]]
+    m <- match(names(s), n)
+    ma <- m[!is.na(m)]
+    if (!all(complete.cases(m))) {
+      w <- paste("Variable '", names(s[is.na(m)]),
+                 "' not in data set.\n", sep = "")
+      warning(w)
+    }
+    for (i in seq(n[ma]))
+      ldata[n[ma]][i][[1]] <- setval(dataset[n[ma]][i][[1]],
+                                     s[n[ma]][i][[1]])
+  }
+  return(ldata)
+}
+
+#' Describe Here
+#' @param qi quantity of interest in the discrete case
+#' @return a formatted qi
+#' @keywords internal
+#' @author Christine Choirat
+statmat <- function(qi) {
+  m <- t(apply(qi, 2, quantile, c(.5, .025, .975), na.rm = TRUE))
+  n <- matrix(apply(qi, 2, mean, na.rm = TRUE))
+  colnames(n) <- "mean"
+  o <- matrix(apply(qi, 2, sd, na.rm = TRUE))
+  colnames(o) <- "sd"
+  p <- cbind(n, o, m)
+  return(p)
+}
+
+#' Describe Here
+#' @param qi quantity of interest in the discrete case
+#' @param num number of simulations
+#' @return a formatted qi
+#' @keywords internal
+#' @author Christine Choirat
+statlevel <- function(qi, num) {
+    if (is.matrix(qi)){
+        #m <- t(apply(qi, 2, table)) / num
+        all.levels <- levels(qi)
+        m <- t(apply(qi, 2, function(x) table(factor(x, levels=all.levels)))) / num
+    }else{
+        m <- table(qi) / num
+    }
+  return(m)
+}
+
+#' Pass Quantities of Interest to Appropriate Summary Function
+#' 
+#' @param qi quantity of interest (e.g., estimated value or predicted value)
+#' @param num number of simulations
+#' @return a formatted qi
+#' @keywords internal
+#' @author Christine Choirat
+stat <- function(qi, num) {
+  if (is.null(attr(qi, "levels")))
+    return(statmat(qi))
+  else
+    return(statlevel(qi, num))
+}
+
+#' Generate Formulae that Consider Clustering
+#'
+#' This method is used internally by the "Zelig" Package to interpret
+#' clustering in GEE models.
+#' @param formula a formula object
+#' @param cluster a vector
+#' @return a formula object describing clustering
+cluster.formula <- function (formula, cluster) { 
+  # Convert LHS of formula to a string
+  lhs <- deparse(formula[[2]])
+  cluster.part <- if (is.null(cluster))
+    # NULL values require
+    sprintf("cluster(1:nrow(%s))", lhs)
+  else
+    # Otherwise we trust user input
+    sprintf("cluster(%s)", cluster)
+  update(formula, paste(". ~ .", cluster.part, sep = " + "))
+}
+
+
+#' Zelig Copy of plyr::mutate to avoid namespace conflict with dplyr
+#' @keywords internal
+zeligPlyrMutate<-function (.data, ...) 
+{
+    stopifnot(is.data.frame(.data) || is.list(.data) || is.environment(.data))
+    cols <- as.list(substitute(list(...))[-1])
+    cols <- cols[names(cols) != ""]
+    for (col in names(cols)) {
+        .data[[col]] <- eval(cols[[col]], .data, parent.frame())
+    }
+    .data
+}
+
diff --git a/R/vcov.R b/R/vcov.R
deleted file mode 100644
index f64e685..0000000
--- a/R/vcov.R
+++ /dev/null
@@ -1,17 +0,0 @@
-#' @S3method vcov gee.naive
-vcov.gee.naive <- function(object, ...)
-  object$naive.variance
-
-#' @S3method vcov gee.robust
-vcov.gee.robust <- function(object, ...)
-  object$robust.variance
-
-#' @S3method vcov glm.robust
-vcov.glm.robust <- function(object, ...) {
-  so <- summary.glm.robust(object, corr=FALSE, ...)
-  so$dispersion * so$cov.unscaled
-}
-
-#' @S3method vcov Relogit
-vcov.Relogit <- function(object, ...) 
-  summary.Relogit(object, ...)$cov.scaled
diff --git a/R/wrappers.R b/R/wrappers.R
new file mode 100755
index 0000000..0965f41
--- /dev/null
+++ b/R/wrappers.R
@@ -0,0 +1,269 @@
+#' Estimating a Statistical Model
+#'
+#' The zelig command estimates a variety of statistical
+#' models.  Use \code{zelig} output with \code{setx} and \code{sim} to compute
+#' quantities of interest, such as predicted probabilities, expected values, and
+#' first differences, along with the associated measures of uncertainty
+#' (standard errors and confidence intervals).
+#'
+#' @param formula a symbolic representation of the model to be
+#'   estimated, in the form \code{y \~\, x1 + x2}, where \code{y} is the
+#'   dependent variable and \code{x1} and \code{x2} are the explanatory
+#'   variables, and \code{y}, \code{x1}, and \code{x2} are contained in the
+#'   same dataset.  (You may include more than two explanatory variables,
+#'   of course.)  The \code{+} symbol means ``inclusion'' not
+#'   ``addition.''  You may also include interaction terms and main
+#'   effects in the form \code{x1*x2} without computing them in prior
+#'   steps; \code{I(x1*x2)} to include only the interaction term and
+#'   exclude the main effects; and quadratic terms in the form
+#'   \code{I(x1^2)}
+#' @param model the name of a statistical model.
+#'   Type \code{help.zelig("models")} to see a list of currently supported
+#'   models
+#' @param data the name of a data frame containing the variables
+#'   referenced in the formula, or a list of multiply imputed data frames
+#'   each having the same variable names and row numbers (created by
+#'   \code{mi}) 
+#' @param ... additional arguments passed to \code{zelig},
+#'   depending on the model to be estimated
+#' @param by a factor variable contained in \code{data}.  Zelig will subset
+#'   the data frame based on the levels in the \code{by} variable, and
+#'   estimate a model for each subset.  This a particularly powerful option
+#'   which will allow you to save a considerable amount of effort.  For
+#'   example, to run the same model on all fifty states, you could type:
+#'   \code{z.out <- zelig(y ~ x1 + x2, data = mydata, model = "ls", by = "state")}
+#'   You may also use \code{by} to run models using MatchIt subclass
+#' @param cite If is set to "TRUE" (default), the model citation will be
+#' @return Depending on the class of model selected, \code{zelig} will return
+#'   an object with elements including \code{coefficients}, \code{residuals},
+#'   and \code{formula} which may be summarized using
+#'   \code{summary(z.out)} or individually extracted using, for example,
+#'   \code{z.out\$coefficients}.  See the specific models listed above
+#'   for additional output values, or simply type \code{names(z.out)}.  
+#'
+#' @name zelig
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Kosuke Imai, Olivia Lau, and
+#' Gary King 
+#' Maintainer: Matt Owen \email{mowen@@iq.harvard.edu}
+#' @keywords package
+
+zelig <- function(formula, model, data, ..., by = NULL, cite = TRUE) {
+  #   .Deprecated("\nz$new() \nz$zelig(...)")
+  # Zelig Core
+  zeligmodels <- system.file(file.path("JSON", "zelig5models.json"), package = "Zelig")
+  models <- jsonlite::fromJSON(txt = readLines(zeligmodels))$zelig5models
+  # Zelig Choice
+  zeligchoicemodels <- system.file(file.path("JSON", "zelig5choicemodels.json"),
+                                   package = "ZeligChoice")
+  if (zeligchoicemodels != "")
+    models <- c(models, jsonlite::fromJSON(txt = readLines(zeligchoicemodels))$zelig5choicemodels)
+  # Zelig Panel
+  zeligpanelmodels <- system.file(file.path("JSON", "zelig5panelmodels.json"),
+                                   package = "ZeligPanel")
+  if (zeligpanelmodels != "")
+    models <- c(models, jsonlite::fromJSON(txt = readLines(zeligpanelmodels))$zelig5panelmodels)
+  # Zelig GAM
+  zeligammodels <- system.file(file.path("JSON", "zelig5gammodels.json"),
+                               package = "ZeligGAM")
+  if (zeligammodels != "")
+    models <- c(models, jsonlite::fromJSON(txt = readLines(zeligammodels))$zelig5gammodels)
+  # Zelig Multilevel
+  zeligmixedmodels <- system.file(file.path("JSON", "zelig5mixedmodels.json"),
+                               package = "ZeligMultilevel")
+  if (zeligmixedmodels != "")
+    models <- c(models, jsonlite::fromJSON(txt = readLines(zeligmixedmodels))$zelig5mixedmodels)
+  # Aggregating all available models
+  models4 <- list()
+  for (i in seq(models)) {
+    models4[[models[[i]]$wrapper]] <- names(models)[i]
+  }
+  
+  model.init <- paste0("z", models4[[model]], "$new()")
+  z5 <- try(eval(parse(text = model.init)), silent = TRUE)
+  if ("try-error" %in% class(z5))
+    stop("Model '", model,"' not found")
+  ## End: Zelig 5 models
+  mf <- match.call()
+  mf$model <- NULL
+  mf$cite <- NULL
+  mf[[1]] <- quote(z5$zelig)
+  mf <- try(eval(mf, environment()), silent = TRUE)
+  if ("try-error" %in% class(mf))
+    z5$zelig(formula = formula, data = data, ..., by = by)
+  if (cite)
+    z5$cite()
+  return(z5)
+}
+
+#' Setting Explanatory Variable Values
+#'
+#' The \code{setx} command uses the variables identified in
+#' the \code{formula} generated by \code{zelig} and sets the values of
+#' the explanatory variables to the selected values.  Use \code{setx}
+#' after \code{zelig} and before \code{sim} to simulate quantities of
+#' interest.
+#' @param obj the saved output from zelig
+#' @param fn a list of functions to apply to the data frame
+#' @param data a new data frame used to set the values of
+#'   explanatory variables. If data = NULL (the default), the
+#'   data frame called in zelig is used
+#' @param cond   a logical value indicating whether unconditional
+#'   (default) or conditional (choose \code{cond = TRUE}) prediction
+#'   should be performed.  If you choose \code{cond = TRUE}, \code{setx}
+#'   will coerce \code{fn = NULL} and ignore the additional arguments in 
+#'   \code{\dots}.  If \code{cond = TRUE} and \code{data = NULL},
+#'   \code{setx} will prompt you for a data frame.
+#' @param ... user-defined values of specific variables for overwriting the
+#'   default values set by the function \code{fn}.  For example, adding
+#'   \code{var1 = mean(data\$var1)} or \code{x1 = 12} explicitly sets the value
+#'   of \code{x1} to 12.  In addition, you may specify one explanatory variable
+#'   as a range of values, creating one observation for every unique value in
+#'   the range of values
+#' @return For unconditional prediction, \code{x.out} is a model matrix based
+#'   on the specified values for the explanatory variables.  For multiple
+#'   analyses (i.e., when choosing the \code{by} option in \code{\link{zelig}},
+#'   \code{setx} returns the selected values calculated over the entire
+#'   data frame.  If you wish to calculate values over just one subset of
+#'   the data frame, the 5th subset for example, you may use:  
+#'   \code{x.out <- setx(z.out[[5]])}
+#' @export
+#' @examples
+#'
+#' # Unconditional prediction:
+#' data(turnout)
+#' z.out <- zelig(vote ~ race + educate, model = "logit", data = turnout)
+#' x.out <- setx(z.out)
+#' s.out <- sim(z.out, x = x.out)
+#'
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Olivia Lau and Kosuke Imai 
+#' @seealso The full Zelig manual may be accessed online at
+#'   \url{http://gking.harvard.edu/zelig}
+#' @keywords file
+
+setx <- function(obj, fn = NULL, data = NULL, cond = FALSE, ...) {
+  # .Deprecated("\nz$new() \nz$zelig(...) \nz$setx() or z$setx1 or z$setrange")
+  x5 <- obj$copy()
+  # This is the length of each argument in '...'s
+  s <- list(...)
+  if (length(s) > 0) {
+    hold <- rep(1, length(s))
+    for(i in 1:length(s)) {
+      hold[i] <- length(s[i][[1]])
+    }
+  } else {
+    hold <- 1
+  }
+  if (max(hold) > 1) {
+    x5$setrange(...)
+  } else {
+    x5$setx(...)
+  }
+  return(x5)
+}
+
+#' Generic Method for Computing and Organizing Simulated Quantities of Interest
+#' 
+#' Simulate quantities of interest from the estimated model
+#' output from \code{zelig()} given specified values of explanatory
+#' variables established in \code{setx()}.  For classical \emph{maximum
+#' likelihood} models, \code{sim()} uses asymptotic normal
+#' approximation to the log-likelihood.  For \emph{Bayesian models},
+#' Zelig simulates quantities of interest from the posterior density,
+#' whenever possible.  For \emph{robust Bayesian models}, simulations
+#' are drawn from the identified class of Bayesian posteriors.
+#' Alternatively, you may generate quantities of interest using
+#' bootstrapped parameters.
+#' @param obj the output object from zelig
+#' @param x values of explanatory variables used for simulation,
+#'   generated by setx
+#' @param x1 optional values of explanatory variables (generated by a
+#'   second call of setx)
+#'           particular computations of quantities of interest
+#' @param y a parameter reserved for the computation of particular
+#'          quantities of interest (average treatment effects). Few
+#'          models currently support this parameter
+#' @param num an integer specifying the number of simulations to compute
+#' @param bootstrap currently unsupported
+#' @param bootfn currently unsupported
+#' @param cond.data currently unsupported
+#' @param ... arguments reserved future versions of Zelig
+#' @return The output stored in \code{s.out} varies by model.  Use the
+#'  \code{names} command to view the output stored in \code{s.out}.
+#'  Common elements include: 
+#'  \item{x}{the \code{\link{setx}} values for the explanatory variables,
+#'    used to calculate the quantities of interest (expected values,
+#'    predicted values, etc.). }
+#'  \item{x1}{the optional \code{\link{setx}} object used to simulate
+#'    first differences, and other model-specific quantities of
+#'    interest, such as risk-ratios.}
+#'  \item{call}{the options selected for \code{\link{sim}}, used to
+#'    replicate quantities of interest. } 
+#'  \item{zelig.call}{the original command and options for
+#'    \code{\link{zelig}}, used to replicate analyses. }
+#'  \item{num}{the number of simulations requested. }
+#'  \item{par}{the parameters (coefficients, and additional
+#'    model-specific parameters).  You may wish to use the same set of
+#'    simulated parameters to calculate quantities of interest rather
+#'    than simulating another set.}
+#'  \item{qi\$ev}{simulations of the expected values given the
+#'    model and \code{x}. }
+#'  \item{qi\$pr}{simulations of the predicted values given by the
+#'    fitted values. }
+#'  \item{qi\$fd}{simulations of the first differences (or risk
+#'    difference for binary models) for the given \code{x} and \code{x1}.
+#'    The difference is calculated by subtracting the expected values
+#'    given \code{x} from the expected values given \code{x1}.  (If do not
+#'    specify \code{x1}, you will not get first differences or risk
+#'    ratios.) }
+#'  \item{qi\$rr}{simulations of the risk ratios for binary and
+#'    multinomial models.  See specific models for details.}
+#'  \item{qi\$ate.ev}{simulations of the average expected
+#'    treatment effect for the treatment group, using conditional
+#'    prediction. Let \eqn{t_i} be a binary explanatory variable defining
+#'    the treatment (\eqn{t_i=1}) and control (\eqn{t_i=0}) groups.  Then the
+#'    average expected treatment effect for the treatment group is
+#'    \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
+#'      E[Y_i(t_i=0)] \mid t_i=1 \,],} 
+#'    where \eqn{Y_i(t_i=1)} is the value of the dependent variable for
+#'    observation \eqn{i} in the treatment group.  Variation in the
+#'    simulations are due to uncertainty in simulating \eqn{E[Y_i(t_i=0)]},
+#'    the counterfactual expected value of \eqn{Y_i} for observations in the
+#'    treatment group, under the assumption that everything stays the
+#'    same except that the treatment indicator is switched to \eqn{t_i=0}. }
+#'  \item{qi\$ate.pr}{simulations of the average predicted
+#'    treatment effect for the treatment group, using conditional
+#'    prediction. Let \eqn{t_i} be a binary explanatory variable defining
+#'    the treatment (\eqn{t_i=1}) and control (\eqn{t_i=0}) groups.  Then the
+#'    average predicted treatment effect for the treatment group is
+#'    \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
+#'      \widehat{Y_i(t_i=0)} \mid t_i=1 \,],} 
+#'    where \eqn{Y_i(t_i=1)} is the value of the dependent variable for
+#'    observation \eqn{i} in the treatment group.  Variation in the
+#'    simulations are due to uncertainty in simulating
+#'    \eqn{\widehat{Y_i(t_i=0)}}, the counterfactual predicted value of
+#'    \eqn{Y_i} for observations in the treatment group, under the
+#'    assumption that everything stays the same except that the
+#'    treatment indicator is switched to \eqn{t_i=0}.}
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Olivia Lau and Kosuke Imai 
+
+sim <- function(obj, x = NULL, x1 = NULL, y = NULL, num = 1000, bootstrap = F, 
+                bootfn = NULL, cond.data = NULL, ...) {
+  # .Deprecated("\nz$new() \n[...] \nz$sim(...)")
+  s5 <- x$copy()
+  if (!is.null(x1)) {
+    s15 <- x1$copy()
+    if (!is.null(s15$setx.out$x)) {
+      s5$setx.out$x1 <- s15$setx.out$x
+      s5$bsetx1 <- TRUE
+    }
+    if (!is.null(s15$setx.out$range)) {
+      s5$range1<-s15$range
+      s5$setx.out$range1 <- s15$setx.out$range
+      s5$bsetrange1 <- TRUE
+    }
+  }
+  s5$sim(num = num)
+  return(s5)
+}
diff --git a/R/z.R b/R/z.R
deleted file mode 100644
index 0944f2c..0000000
--- a/R/z.R
+++ /dev/null
@@ -1,53 +0,0 @@
-#' Return value for a zelig2-function
-#'
-#' This is an API-function that bridges a model-fitting function with a zelig
-#' interface.
-#' @note This is used internally by Zelig-dependent packages to instruct Zelig
-#' how to evaluate the function call to a particular statistical model.
-#' @param .function a function
-#' @param ... a set of parameters to be evaluated symbolically
-#' @return a ``z'' object which specifies how to evaluate the fitted model
-#' @export
-z <- function (.function, ..., .hook = NULL) {
-  # Construct the function call
-  .call <- as.call(as.list(match.call())[-1])
-  .function.name <- as.character(.call[[1]])
-  .parent <- parent.frame()
-  .dots <- list(...)
-
-  # Ensure that hook works appropriately
-  if(!missing(.hook)) {
-    if (!is.function(.hook)) {
-      warning(".hook parameter must be a function. ignoring.")
-      .hook <- NULL
-    }
-  }
-
-  s <- append(list(as.name(.function.name)), list(...))
-  literal.call <- as.call(s)
-
-  # Construct the object
-  s <- list(
-            "function" = .function,
-            "hook" = .hook,
-
-            "call" = .call,
-            "env" = .parent,
-
-            "function.name" = .function.name,
-            "dots" = .dots,
-
-            "literal.call" = literal.call
-            )
-
-  # Set attributes
-  attr(s, 'baseenv') <- baseenv()
-  attr(s, 'call') <- match.call()
-  attr(s, 'function') <- substitute(.function)
-
-  # Set the class
-  class(s) <- 'z'
-
-  # Return
-  s
-}
diff --git a/R/zelig.R b/R/zelig.R
deleted file mode 100644
index 96a5966..0000000
--- a/R/zelig.R
+++ /dev/null
@@ -1,323 +0,0 @@
-#' Estimating a Statistical Model
-#'
-#' The zelig command estimates a variety of statistical
-#' models.  Use \code{zelig} output with \code{setx} and \code{sim} to compute
-#' quantities of interest, such as predicted probabilities, expected values, and
-#' first differences, along with the associated measures of uncertainty
-#' (standard errors and confidence intervals).
-#'
-#' @param formula a symbolic representation of the model to be
-#'   estimated, in the form \code{y \~\, x1 + x2}, where \code{y} is the
-#'   dependent variable and \code{x1} and \code{x2} are the explanatory
-#'   variables, and \code{y}, \code{x1}, and \code{x2} are contained in the
-#'   same dataset.  (You may include more than two explanatory variables,
-#'   of course.)  The \code{+} symbol means ``inclusion'' not
-#'   ``addition.''  You may also include interaction terms and main
-#'   effects in the form \code{x1*x2} without computing them in prior
-#'   steps; \code{I(x1*x2)} to include only the interaction term and
-#'   exclude the main effects; and quadratic terms in the form
-#'   \code{I(x1^2)}
-#' @param model the name of a statistical model.
-#'   Type \code{help.zelig("models")} to see a list of currently supported
-#'   models
-#' @param data the name of a data frame containing the variables
-#'   referenced in the formula, or a list of multiply imputed data frames
-#'   each having the same variable names and row numbers (created by
-#'   \code{mi}) 
-#' @param ... additional arguments passed to \code{zelig},
-#'   depending on the model to be estimated
-#' @param by a factor variable contained in \code{data}.  Zelig will subset
-#'   the data frame based on the levels in the \code{by} variable, and
-#'   estimate a model for each subset.  This a particularly powerful option
-#'   which will allow you to save a considerable amount of effort.  For
-#'   example, to run the same model on all fifty states, you could type:
-#'   \code{z.out <- zelig(y ~ x1 + x2, data = mydata, model = "ls", by = "state")}
-#'   You may also use \code{by} to run models using MatchIt subclass
-#' @param cite If is set to "TRUE" (default), the model citation will be
-#' @return Depending on the class of model selected, \code{zelig} will return
-#'   an object with elements including \code{coefficients}, \code{residuals},
-#'   and \code{formula} which may be summarized using
-#'   \code{summary(z.out)} or individually extracted using, for example,
-#'   \code{z.out\$coefficients}.  See the specific models listed above
-#'   for additional output values, or simply type \code{names(z.out)}.  
-#'
-#' @name zelig
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Kosuke Imai, Olivia Lau, and
-#' Gary King 
-#' Maintainer: Matt Owen \email{mowen@@iq.harvard.edu}
-#' @keywords package
-zelig <- function (formula, model, data, ..., by=NULL, cite=T) {
-
-  # Yea this forever
-  model.warnings(model)
-
-  # Split data.frame
-  if (!missing(by)) {
-    if (length(by) > 1) {
-      warning("by cannot have length greater than 1")
-      by <- NULL
-    }
-
-    if (!is.data.frame(data))
-      warning("")
-
-
-    else if (any(by %in% all.vars(formula))) {
-      warning("by cannot list contain a variable from the model's formula")
-      by <- NULL
-    }
-
-    else
-      data <- divide(data, by)
-  }
-
-  # Almost equivalient to:
-  #   data <- multi.dataset(data)
-  # 
-  # but we want to keep the name of the original data object as our title (sometimes).
-  divided.data <- eval(call("multi.dataset", substitute(data)))
-
-  # 
-  Call <- match.call()
-
-  # expand dot arguments
-  dots <- list()
-
-  # get non-dot arguments in a general fashion
-  notdots <- as.list(match.call(expand.dots=F)[-1])
-  notdots[["..."]] <- NULL
-
-  # only get the non-dot arguments
-  # that do not exist in the dot arguments
-  names.notdots <- Filter(function(x) !x%in%names(dots), names(notdots))
-  notdots <- notdots[names.notdots]
-
-  # build parameter list (including optional parameters)
-  params <- c(dots, notdots)
-
-  # set up list
-  res <- NULL
-  old.style.oop <- TRUE
-
-  # Call make.mi symbolically so that the function can implicitly label
-  # data.frame's from context. For example, the expression:
-  #   mi(turnout[1:1000, ], )
-  # will contain a data.frame labeled:
-  #   turnout[1:1000, ]
-  # m <- eval(call("multi.dataset", substitute(data), by=by))
-
-  # Ensure certain values remain consistent between any object on this list
-  # by giving them all a pointer to the same environment object which contains
-  # a few pieces of information
-  state <- new.env()
-
-  # Begin constructing zelig object
-  object <- list()
-
-  # Create zelig2* function
-  zelig2 <- paste("zelig2", as.character(model), sep="")
-  zelig2 <- get(zelig2, mode="function")
-
-  # Get package name. This is useful for writing methods that apply to all
-  # models within a particular software package
-  package.name <- getPackageName(environment(zelig2), FALSE)
-
-  # repeat
-  for (key in names(divided.data)) {
-    d.f <- divided.data[[key]]
-    label <- key
-
-
-    # catch end-of-list error
-    if (is.null(d.f))
-      next
-
-    zclist <- zelig2(formula, ..., data=d.f)
-
-    new.call <- zclist$call
-    env <- zclist$env
-
-    if (!inherits(zclist, "z")) {
-      if (!is.list(zclist))
-        warning("invalid object returned from `zelig2` method")
-
-      else {
-        wl <- zclist
-
-        # reserved words taken from the zelig2 method
-        .func <- as.name(wl$.function)
-        .hook <- wl$.hook
-
-        # remove the reserved words
-        wl$.function <- NULL
-        wl$.hook <- NULL
-        wl$.post <- NULL
-        wl$.model.matrix <- NULL
-
-        new.call <- as.call(append(list(.func), wl))
-        mock.call <- match.call()
-        env <- NULL
-      }
-    }
-    else if (inherits(zclist, "z")) {
-      new.call <- zclist$literal.call
-      mock.call <- zclist$call
-      env <- NULL
-    }
-    else {
-      warning("zelig2 function is returning an invalid type of object")
-    }
-
-    # Default value for result object
-    new.res <- NULL
-
-    tryCatch(
-      {
-        new.res <- eval(new.call)
-      },
-      error = function (e) {
-        warning("There was an error fitting this statistical model.")
-        print(e)
-      }
-      )
-
-    # Apply first hook if it exists
-    if (!is.null(zclist$.hook)) {
-      zclist$.hook <- get(zclist$.hook, mode='function')
-      new.res <- zclist$.hook(new.res, new.call, match.call(), ..., data = d.f)
-    }
-    else if (!is.null(zclist$hook) && is.function(zclist$hook)) {
-      new.res <- zclist$hook(new.res, new.call, match.call(), ..., data = d.f)
-    }
-    # Determine whether this is an S4 object
-    old.style.oop <- ! isS4(new.res)
-
-    if (exists("mock.call")) {
-      if (isS4(new.res))
-        new.res at call <- mock.call
-      else
-        new.res$call <- mock.call
-    }
-
-    # This is the only "obj" assignment that matters
-    obj <- makeZeligObject(new.res,
-                           model,
-                           new.call, match.call(),
-                           d.f, label,
-                           env,
-                           package.name = package.name
-                           )
-
-    # Specify the appropriate class
-
-    # Attach shared environment as an attribtute
-    attr(obj, 'state') <- state
-
-    # Add to list of results
-    object[[label]] <- obj
-  }
-
-  if (missing(by) && is.data.frame(data)) {
-    object <- object[[1]]
-  }
-  else {
-    attr(object, 'state') <- state
-    class(object) <- c(model, paste(model, 'mi', sep='-'), "MI")
-  }
-
-  # Update the shared environment
-  assign('old-formula', formula, state)
-  assign('args', list(...), state)
-  assign('parent', parent.frame(), state)
-  assign('call', match.call(), state)
-  assign('by', by, state)
-  # assign('methods', methods.env, state)
-  assign('methods', NULL, state)
-  assign('model', model, state)
-
-
-  # The below line should probably remain commented out
-  # assign('mi', m, state)
-
-  # Display citation information
-  if (cite) {
-    described <- describe(object)
-    descr <- description(
-                         authors = described$authors,
-                         year  = described$description,
-                         text  = described$text,
-                         url   = described$url,
-                         model = model
-                         )
-    cat("\n\n", cite(descr), "\n")
-  }
-
-  object
-}
-
-
-
-
-
-#' Make an Individual Zelig Object
-#'
-#' Returns a ``zelig'' object with the proper specifications
-#' @param object a fitted statistical model
-#' @param model a character-string specifying the name of the model
-#' @param call The call that produced the fitted model
-#' @param zelig_call The call made to the original zelig function
-#' @param data the data.frame used to fit the model
-#' @param label a character-string or symbol used as a human-readable label for
-#' the data-set
-#' @param env an environment variable that contains all variables to evaluate
-#' the call ``zelig_call''
-#' @param package.name a character-string specifyign the name of the package
-#' that is the source of the model used to fit this object
-#' @return A ``zelig'' object
-makeZeligObject <- function (object,
-                             model,
-                             call,
-                             zelig_call,
-                             data,
-                             label,
-                             env,
-                             package.name = NULL
-                             ) {
-  # This is a set of variables that will be visible to the following methods:
-  # param, bootstrap, qi
-  implied.variables <- new.env()
-
-  # The fitted model
-  assign(".fitted", object, implied.variables)
-
-  # The name of the model
-  assign(".model", model, implied.variables)
-
-  # The call to the model-fitting function
-  assign(".call", call, implied.variables)
-
-  # The environment used to evaluate the model-fitting functino
-  assign(".env", env, implied.variables)
-
-  # Create list-object
-  self <- list(
-               result = object,
-               formula = formula(object),
-               zelig.call = zelig_call,
-               name  = model,
-               label = label,
-               env  = env,
-               call = call,
-               data = data,
-               S4   = isS4(object),
-               method.env = implied.variables,
-               package.name = package.name
-               )
-
-  # Specify as a ``zelig'' object
-  class(self) <- c("zelig", model)
-
-  # Return 
-  self
-}
diff --git a/R/zelig.skeleton.R b/R/zelig.skeleton.R
deleted file mode 100644
index 6265690..0000000
--- a/R/zelig.skeleton.R
+++ /dev/null
@@ -1,133 +0,0 @@
-#' 'zelig.skeleton' generates the necessary files used to create a Zelig
-#' package. Based on (and using) R's 'package.skeleton' it removes some of the
-#' monotony of building statistical packages. In particular, 'zelig.skeleton'
-#' produces templates for the \code{zelig2}, \code{describe}, \code{param}, and
-#' \code{qi} methods. For more information about creating these files on an
-#' individual basis, please refer to the tech manuals, which are available 
-#' by typing: \code{?zelig2}, \code{?param}, or \code{?qi}.
-#' @title Creates a Skeleton for a New Zelig package
-#' @param pkg a character-string specifying the name of the Zelig package
-#' @param models a vector of strings specifying models to be included in the
-#'   package
-#' @param author a vector of strings specifying contributors to the package
-#' @param path a character-string specifying the path to the package
-#' @param force a logical specifying whether to overwrite files and create
-#'   necessary directories
-#' @param email a string specifying the email address of the package's
-#'   maintainer
-#' @param depends a vector of strings specifying package dependencies
-#' @param ... ignored parameters
-#' @param .gitignore a logical specifying whether to include a copy of a 
-#'   simple \code{.gitignore} in the appropriate folders (\code{inst/doc} and
-#'   the package root
-#' @param .Rbuildignore a logical specifying whether to include a copy of a 
-#'   simple \code{.Rbuildignore} in the appropriate folders (\code{inst/doc} 
-#'   and the package root
-#' @return nothing
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-zelig.skeleton <- function (
-    pkg, models=c(), author="UNKNOWN AUTHOR",
-    path = ".",
-    force = FALSE,
-    email = "maintainer at software-project.org",
-    depends = c(),
-    ...,
-    .gitignore = TRUE,
-    .Rbuildignore = TRUE
-    ) {
-
-
-  # WARNING BLOCK
-  # so that developers are aware of potential pitfalls that will prevent
-  # installation of their packages
-  if (!is.character(pkg)) {
-    warning("invalid 'pkg' parameter; should be a character string")
-    pkg <- as.character(pkg)
-  }
-
-  if (length(pkg) > 1) {
-    warning("invalid 'pkg' parameter; length cannot be greater than one")
-    pkg <- pkg[1]
-  }
-
-  if (!is.character(models)) {
-    warning("invalid 'models' parameter; should be a character vector")
-    models <- as.character(models)
-  }
-
-  if (!length(models))
-    warning("invalid 'models' parameter; should contain at least one model-name")
-
-  if (missing(author))
-    warning("missing 'author' parameter; please change the value in the",
-      "'DESCRIPTION' file's 'Author' field")
-
-  if (missing(email))
-    warning("Missing 'email' parameter; please change the value in the ",
-      "'DESCRIPTION' file's 'Maintainer' field")
-
-  if (missing(depends))
-    warning("Missing 'depends' parameter")
-
-  # new environment
-  e <- new.env()
-
-  for (m in models) {
-    # Place proper functions in
-    # correct environment (out of global)
-    # this technically doesn't work
-    # (bug in package.skeleton)
-    describe <- function (...) list()
-    zelig2 <- function (formula, ..., data) list(.function = "")
-    param <- function (obj, num, ...) list(coef=NULL)
-    qi <- function (obj, x, x1, y, param, num) list()
-
-    assign(paste("describe", m, sep="."), describe, e)
-    assign(paste("zelig2", m, sep=""), describe, e)
-    assign(paste("param", m, sep="."), describe, e)
-    assign(paste("qi", m, sep="."), describe, e)
-  }
-
-  # Invoke package.skeleton
-  package.skeleton(
-                   name = pkg,
-                   environment = e,
-                   path = path,
-                   force = force
-                   )
-
-  # Copy files over - as of 3/11 these files are blank
-  for (m in models) {
-    .copy.templates(m, pkg, path)
-  }
-
-  .make.description(pkg, author, email, depends, url, path)
-  .make.package.R(pkg, author, email, depends, url, path)
-
-  # copy .gitignore and .Rbuildignore
-  if (.gitignore) {
-    src <- system.file('hidden', 'gitignore', package='Zelig')
-    dest <- file.path(path, pkg, '.gitignore')
-    file.copy(src, dest)
-
-    dest <- file.path(path, pkg, 'man', '.gitignore')
-    file.copy(src, dest)
-  }
-
-  if (.Rbuildignore) {
-    src <- system.file('hidden', 'Rbuildignore', package='Zelig')
-    
-    dest <- file.path(path, pkg, '.Rbuildignore')
-    file.copy(src, dest)
-
-    dest <- file.path(path, pkg, 'inst', 'doc', '.Rbuildignore')
-    dir.create(file.path(path, pkg, 'inst', 'doc'), recursive=TRUE)
-    file.copy(src, dest)
-  }
-
-
-  # Why zero? Eh, maybe a return code thing. This function is really just used
-  # for side-effects
-  invisible(0)
-}
diff --git a/R/zelig2.R b/R/zelig2.R
deleted file mode 100644
index 596231b..0000000
--- a/R/zelig2.R
+++ /dev/null
@@ -1,49 +0,0 @@
-#' The \code{zelig2} function acts as a simple interface between a user's call
-#' to the \code{zelig} function and the zelig functions subsequent call to the
-#' pre-existing external model. The external model varies based on which model
-#' is being called.
-#'
-#' @title Interface Between Zelig Models and External Functions
-#' @note Writing \code{zelig2} functions is required of Zelig developers. In
-#'   particular, \code{zelig2} functions act as an interface between external
-#'   models (models not included in the Zelig package) and the \code{zelig}
-#'   function which must use that model.
-#'
-#'   \code{zelig2} is not an actual function. Rather, 
-#'
-#' @name zelig2
-#' @return
-#'   The main purpose of the \code{zelig2} function is to return a list of
-#'   key-value pairs, specifying how Zelig should interface with the external
-#'   method. This list has the following format:
-#'
-#'   \item{\code{.function}}{specifies the name of the external method to be
-#'     called by \code{zelig} function. Subsequent parameters, are called and
-#'     evaluated as a function call to the function of the named string.}
-#'   \item{\code{.hook}}{specifies the name of a hook function as a string. The
-#'     hook function is only evaluated on zelig object once the external method
-#'     fits the statistical model}
-#'   \item{...}{any parameters aside from \code{.function} and \code{.hook} is 
-#'     as part of the function call to the external model}
-#'
-#' @examples
-#'  zelig2some.model <- function (formula, weights, verbose, ..., data) {
-#'    list(
-#'         .function = 'some.other.method',
-#'         .hook = NULL,
-#'         formula = formula,
-#'         weights = 2 * weights,
-#'         data = data
-#'         )
-#'  }
-#'
-#' ## This \code{zelig2} function equates the following function call:
-#' ##  zelig(formula, weights = weights, verbose = TRUE, data = data, model="some.model")
-#' ##
-#' ## with:
-#' ##  some.other.method(formula = formula, weights = 2 * weights, data=data)
-#'
-#' ## Note that the 'verbose' parameter is ignored, since the 
-#' ## 'zelig2some.model' does not include the 'verbose' parameter in its return
-#' ## value.
-NULL
diff --git a/R/zeligBuildWeights.R b/R/zeligBuildWeights.R
deleted file mode 100644
index 3a94e78..0000000
--- a/R/zeligBuildWeights.R
+++ /dev/null
@@ -1,175 +0,0 @@
-#' Utility to build a vector (or sometimes matrix) of weights for analysis model.
-#' 
-#' This takes standardized Zelig user input about weights, and tailors it
-#'   via developer defined settings, to correspond with the format of 
-#'   weights acceptable by the model Zelig bridges to.  It also runs a
-#'   set of checks to uncover any potential errors in the specified weights.
-#' @param weights A set of non-negative value weights.  Overrides repweights 
-#'   if defined.
-#' @param repweights A set of whole number (non-negative integer) weights.  
-#'   useful if weights are just for making copies or deleting certain 
-#'   or frequency weights.
-#' @param zeros An option on how to deal with zero valued user supplied weights.
-#'   Default of "zero" allows zero weights, "epsilon" changes zeroes to 1e-08,
-#'   "remove" removes those observations from the dataset.
-#' @param rebuild An option to allow specified repweights to reconfigure the 
-#'   rows of the dataset to rebuild a corresponding dataset where every row is
-#'   of weight 1.  Useful if analysis model does not accept weights.  
-#' @param allowweights Defines if weights are allowed in model.
-#' @param allowrepweights Defines if repweights are allowed in model.  Overridden if
-#'   \code{useweights=TRUE}.
-#' @param data Dataset, required if weights are defined by variable name, or if
-#'   dataset is to be reconfigured (by \code{rebuild} or \code{zeros} options)
-#' @return weights A vector of weights of the structure defined by the
-#'   developer and required by the analysis model.  Or NULL if certain checks 
-#'   are failed.
-#' @return data A reconfigured dataset, if modified.
-#' @author James Honaker \email{zelig-zee@@iq.harvard.edu}
-#' @export
-
-zeligBuildWeights <- function (weights=NULL, repweights=NULL, zeros="zeros", rebuild=FALSE, allowweights=TRUE, allowrepweights=TRUE, data=NULL) {
-
-
-  ## Developer can turn off certain types of weights
-  ## NOTE: Can't currently turn off "repweights", if "weights" allowable.
-  if(!allowweights & !allowrepweights & !is.null(weights)){
-    warning("You have specified weights, but weighting is not available for this model.  Ignoring weights.  ")
-    return(list(weights=NULL, data=data))
-  }
-
-  if(!allowweights & !allowrepweights & !is.null(repweights)){
-    warning("You have specified repweights, but weighting is not available for this model.  Ignoring weights.  ")
-    return(list(weights=NULL, data=data))
-  }
-
-  if(!allowweights & !is.null(weights)){
-    warning("You have specified weights, but weights are not an option in this model.  Ignoring weights.  repweights may be an available option.")
-    weights=NULL
-  }
-
-
-  ## Override repweights with weights when in conflict.
-  if(!is.null(weights) & !is.null(repweights)){
-    warning("You have specified both weights and repweights.  The repweights will be ignored.")
-    repweights<-NULL
-  }
-
-
-  ## Turn weights as variable name into vector, with checking.
-  if(is.character(weights)){
-    if(is.null(data)){
-      warning("ZELIG DEVELOPER WARNING: You have named a weight variable in the dataset, but not supplied dataset to zeligBuildWeights.  Weights will be ignored in your model until amended.")
-      return(list(weights=NULL, data=data))
-    }else if (!(weights %in% names(data))){
-      warning("The variable name supplied for the weights is not present in the dataset. Ignoring weights.")
-      return(list(weights=NULL, data=data))
-    }else if ( !is.numeric(data[,weights])){
-      warning("The variable supplied for the weights is not numeric. Ignoring weights.")
-      return(list(weights=NULL, data=data))
-    }else{
-      weights<-data[,weights]
-    }
-  }
-
-  ## Turn repweights as variable name into vector, with checking.
-  if(is.character(repweights)){
-    if(is.null(data)){
-      warning("ZELIG DEVELOPER WARNING: You have named a repweight variable in the dataset, but not supplied dataset to zeligBuildWeights.  repweights will be ignored in your model until amended.")
-      return(list(weights=NULL, data=data))
-    }else if (!(repweights %in% names(data))){
-      warning("The variable name supplied for the repweights is not present in the dataset. Ignoring weights.")
-      return(list(weights=NULL, data=data))
-    }else if ( !is.numeric(data[,repweights])){
-      warning("The variable supplied for the repweights is not numeric. Ignoring weights.")
-      return(list(weights=NULL, data=data))
-    }else{
-      repweights<-data[,repweights]
-    }
-  }
-
-  ## Some checking/transforming on repweights
-  if(!is.null(repweights)){
-    if(!all(floor(repweights)==repweights)){
-      warning("Defined repweights are not integer, so will be rounded.")
-      repweights=round(repweights)   # Maybe allow floor/ceiling as other options?
-    }
-    if(sum(is.na(repweights))>0){  # any(is.na()) sometimes has issues
-      warning("Some defined repweights are missing values, so will be treated as zeros")
-      flag<-is.na(repweights)
-      repweights[flag]<-0;
-    }
-    if(min(repweights)<0){
-      warning("Some defined repweights are negative, so will be treated as zeros")
-      flag<-repweights<0
-      repweights[flag]<-0;
-    }
-    if(sum(repweights)==0){
-      warning("Defined repweights give no weight to any observation.  Ignoring weights.")
-      return(list(weights=NULL, data=data))
-    }
-  }
-
-
-  ## Some checking/transforming on weights
-  if(!is.null(weights)){
-    if(sum(is.na(weights))>0){  # any(is.na()) sometimes has issues
-      warning("Some defined weights are missing values, so will be treated as zeros")
-      flag<-is.na(weights)
-      weights[flag]<-0;
-    }
-    if(min(weights)<0){
-      warning("Some defined weights are negative, so will be treated as zeros")
-      flag<-weights<0
-      weights[flag]<-0;
-    }
-    if(sum(weights)==0){
-      warning("Defined weights give no weight to any observation.  Ignoring weights.")
-      return(list(weights=NULL, data=data))
-    }
-  }
-
-
-  ## If repweights not available to function, reconstruct a dataset
-  ## NOTE: "rebuild" overrides any setting of "zeros"
-  if( is.null(weights) & !is.null(repweights) & rebuild){
-    if(is.null(data)){
-      warning("ZELIG DEVELOPER WARNING: You have set zeligBuildWeights to rebuild dataset, but not supplied dataset to zeligBuildWeights function.  Weights will be ignored in your model until amended.")
-      return(list(weights=NULL, data=data))
-    }else{
-      ##  Rebuild dataset according to replication weights
-      newobs<-rep(1:nrow(data), repweights)  # Index of rows to use
-      data<-data[newobs,]                    # Copy relevant rows
-    }
-    weights<-NULL # Or, could be weights<-rep(1,nrow(data))
-  ## when repweights are correct, but need to be transfered to final output
-  }else if (is.null(weights) & !is.null(repweights) ){
-    weights<-repweights
-  }
-
-  ## From this point, only "weights" exists in a meaningful way.
-
-  if(!is.null(weights)){
-    ## Implement zeros option.
-    if(zeros=="epsilon"){
-      flag<-weights==0
-      weights[flag]<- .00000001
-    }else if (zeros=="remove"){
-      flag<-weights==0
-      weights<-weights[!flag]
-      data<-data[!flag,]
-    }
-  }
-
-
-
-  ## NOTE: Ideally, we could just pass back a vector of indexes to reformat the 
-  ##   data, rather than passing back and forwards the data.
-  ##   But simpler for developer this way.  
-
-  built <- list(weights=weights, data=data)
-
-  # Return
-  return(built)
-}
-
-
diff --git a/R/zzz.R b/R/zzz.R
deleted file mode 100644
index 3caab16..0000000
--- a/R/zzz.R
+++ /dev/null
@@ -1,1383 +0,0 @@
-# THIS FILE CONTAINS PACKAGE HOOKS FOR ZELIG
-# ------------------------------------------
-
-# @...: nothing
-# spill-over: output information about Zelig
-.onAttach <- function(...) {
-
-  package.name <- "Zelig"
-  mylib <- dirname(system.file(package = package.name))
-  ver <- packageDescription(package.name, lib.loc = mylib)$Version
-  build.date <- packageDescription(package.name, lib.loc = mylib)$Date
-
-
-  # build info
-  packageStartupMessage("ZELIG (Versions ", ver, ", built: ", build.date, ")")
-
-  # cat, for readability of the message text
-
-  # Zelig info - do not exceed 80char/line
-  packageStartupMessage("
-+----------------------------------------------------------------+
-|  Please refer to http://gking.harvard.edu/zelig for full       |
-|  documentation or help.zelig() for help with commands and      |
-|  models support by Zelig.                                      |
-|                                                                |
-|  Zelig project citations:                                      |
-|    Kosuke Imai, Gary King, and Olivia Lau.  (2009).            |
-|    ``Zelig: Everyone's Statistical Software,''                 |
-|    http://gking.harvard.edu/zelig                              |
-|   and                                                          |
-|    Kosuke Imai, Gary King, and Olivia Lau. (2008).             |
-|    ``Toward A Common Framework for Statistical Analysis        |
-|    and Development,'' Journal of Computational and             |
-|    Graphical Statistics, Vol. 17, No. 4 (December)             |
-|    pp. 892-913.                                                |
-|                                                                |
-|   To cite individual Zelig models, please use the citation     |
-|   format printed with each model run and in the documentation. |
-+----------------------------------------------------------------+
-
-")
-}
-
-# @param object a zelig object
-# @param envir an environment
-.GetGenericsS4 <- function(object, envir=parent.frame()) {
-  if (inherits(object$result, "list")) {
-    .ListS4Generics(classes=class(object$result[[1]]), env=envir)
-  }
-  else 
-    .ListS4Generics(classes=class(object$result), env=envir)
-}
-
-
-# @classes: classes
-# @where: compatibility with showMethods
-# @env: the environment to search for generics
-# return: a character-vector of function names
-# ********************************************
-# this function searches .AllMTable within the namespace
-# of the functions environment
-.ListS4Generics <- function(classes=NULL, where=NULL,
-                          env=topenv(parent.frame())) {
-  # get list of all generic functions
-  functions <- if (missing(where))
-    getGenerics()
-  else
-    getGenerics(where)
-
-  #
-  matches <- c()
-  functions <- as.character(functions)
-
-  #
-  for (f in functions) {
-    fdef <- getGeneric(f)
-    env <- environment(fdef)
-
-    table <- tryCatch(get(".AllMTable", envir=env), error=function(e) NULL)
-
-    if (is.null(table))
-      next
-
-    if (any(classes %in% ls(table)))
-      matches <- append(matches, f)
-  }
-
-  # return
-  flist <- c("zelig", "param", "as.parameters", "sim", "setx", "register", 'summary')
-  matches[ ! matches %in% flist ]
-}
-
-#' Describe a Zelig Model
-#'
-#' @param model.name 
-#' @param ... ignored parameters
-#' @return a 'description' object containing citation information
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-.ZeligDescribeModel <- function(model.name, ...) {
-  # lie to zelig
-  dummy.zelig <- "dummy"
-  class(dummy.zelig) <- model.name
-
-  # return as a description
-  as.description(describe(dummy.zelig))
-}
-
-#' Get a Character-Vector of All Models with a 'zelig2' Function
-#'
-#' @note In order for a Zelig model to either execute correctly or be listed as
-#'   a legal Zelig model, the function name must be prefixed with 'zelig2'.
-#' @param zelig.only a boolean specifying whether we want to search only the 
-#'   Zelig namespace
-#' @return a character-vector of the Zelig models loaded on the user's machine
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-ZeligListModels <- function(zelig.only=FALSE) {
-  results <- if (zelig.only)
-    ls(pattern="^zelig2", envir=asNamespace("Zelig"))
-  else
-    apropos("^zelig2", mode="function")
-
-  # substitute and return
-  sub("^zelig2", "", results)
-}
-
-#' Get a Text-Block of Citation Information about a Zelig Model
-#' 
-#' @note This function is strictly used internally by Zelig
-#' @param model.name the name of a Zelig model
-#' @return a block of text giving a human readable (and APA compliant) block
-#'   citation text
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-.GetModelCitationTex <- function(model.name)
-  cite(ZeligDescribeModel(model.name))
-
-#' Produce a 'description' Object from the Name of a Model
-#' @note The 'description' object is a list-style object containing citation
-#'   information
-#' @param model.name a character-string specifying a Zelig model
-#' @return a 'description' object specified by the 'model.name' parameter. This
-#'   object is created by executing the specified Zelig models' 'describe'
-#'   function
-#' @export
-ZeligDescribeModel <- function(model.name) {
-  dummy <-
-    "I love baseball.  You know, it doesn't have to mean anything.
-It's just very beautiful to watch."
-  
-  class(dummy) <- model.name
-
-  # describe
-  res <- describe(dummy)
-
-  # add model name
-  res$model <- model.name
-
-  # return
-  as.description(res)
-}
-
-#' Get a TeX-style Citation
-#' @param model a character-string specifying the name of the Zelig model of which 
-#'   to describe in TeX-style
-#' @return a string to be rendered as part of a LaTeX-style document
-#' @export
-TexCite <- function(model) {
-  # description object
-  descr <- ZeligDescribeModel(model)
-
-  # url
-  url <- "http://gking.harvard.edu/zelig"
-
-  # define title
-  title <- if (is.null(descr$text))
-    descr$model
-  else
-    paste(descr$model, ": ", descr$text, sep="")
-
-  # quote title string
-  title <- paste('"', title, '"', sep="")
-
-  # construct string
-  str <- paste(
-               "{\bf To cite this model in Zelig:}",
-               paste(descr$authors, descr$year, sep="."),
-               paste(title, "in Kosuke Imai, Gary King and Olivia Lau,"),
-               "\"Zelig: Everyone's Statistical Software,\"",
-               url,
-               sep = "\n"
-               )
-  str
-}
-
-#' Get a List of Categories for Describing Zelig Models
-#' @note This feature is being deprecated, as original functionality with the
-#'   Dataverse Project \url{thedata.org} is being reevaluated.
-#' @return a list of character-string specifying legal category types (as the
-#'   keys of the list) and their human-counterparts (as the values)
-#' @export
-.ZeligModelCategories <- function() {
-  list(continuous  = "Models for Continuous Dependent Variables",
-       dichotomous = "Models for Dichotomous Dependent Variables",
-       ordinal     = "Models for Ordinal Dependent Variables",
-       bounded     = "Models for Continous Bounded Dependent Variables",
-       multinomial = "Multinomial Choice Models",
-       count       = "Event Count Models",
-       mixed       = "Models for Mixed Dependent Variables",
-       ei          = "Ecological Inference Models"
-       )
-}
-
-#' List the Titles of the Zelig Statistical Models
-#' @return a list of manual titles for the Zelig software 
-#' @export
-ZeligListTitles <- function() {
-
-  #
-  models <- ZeligListModels()
-
-  #
-  lis <- list()
-
-  #
-  for (m in models)
-    lis[[m]] <- ZeligDescribeModel(m)$text
-
-  # turn into a vector with each entry having:
-  #  model_name: model_description
-  # e.g.
-  #  probit: Probit Regression for Dichotomous Dependent Variables
-  paste(names(lis), lis, sep=": ")
-}
-
-#' Whether an Arbitrary R-package has a Zelig2 Function within Its Namespace
-#' @note This function is used primarily internally to determine whether a
-#'   a package is contributing a function to the Zelig software suite
-#' @param pkg a character-string representing a package name
-#' @return whether the package contains any zelig2-functions
-#' @export
-has.zelig2 <- function(pkg) {
-  env <- asNamespace(pkg)
-  hits <- grep("^zelig2*", ls(envir=env))
-  length(hits) > 0
-}
-
-#' Whether a Statistical Package Depends on the Zelig Software Suite
-#' @note This function is used primarily internally to determine whether a
-#'   a package is contributing a function to the Zelig software suite
-#' @param package a character-string representing a package name
-#' @return whether the package lists Zelig as a dependency in its DESCRIPTION
-#' @export
-depends.on.zelig <- function(package="") {
-  zcomp <- packageDescription(package, fields="Depends")
-
-  if (is.na(zcomp))
-    return(FALSE)
-
-  zcomp <- unlist(strsplit(zcomp, " *, *"))
-
-  # "Zelig" %in% zcomp
-
-  # pattern to match things leading with Zelig, some spaces, and a parenthesis ending
-  # ex:
-  #     Zelig
-  #     Zelig (>= 3)
-  #     Zelig      (blah blah)
-  pattern <- "^Zelig *(?:\\(.*?\\))$"
-  length(grep(pattern, zcomp)) != 0
-}
-
-#' Get a List of Packages Installed on the Current Machine that Depend on Zelig
-#' @note This function is used primarily internally to determine whether a
-#'   a package is contributing a function to the Zelig software suite
-#' @return a character-vector of all zelig-dependent packages on the current
-#'   machine
-list.zelig.dependent.packages <- function() 
-  Filter(depends.on.zelig, .packages(all.available=TRUE))
-
-#' List Zelig Models Installed on the Current Machine
-#' @note This list is not necessarily complete
-#' @param with.namespace a boolean specifying whether 
-#' @return list of all zelig models
-list.zelig.models <- function(with.namespace=TRUE) {
-  # list the zelig-dependent packages
-  pkgs <- list.zelig.dependent.packages()
-
-  # include the core package
-  pkgs <- c("Zelig", pkgs)
-
-  # initialize functions variable
-  functions <- NULL
-
-  # create a list of every zelig2 function
-  for (pkg in pkgs) {
-    # get all zelig2 functions, then get their model name
-    models <- ls(pattern="^zelig2", envir=asNamespace(pkg))
-    models <- sub("^zelig2", "", models)
-
-    # add to results list
-    functions[models] <- pkg
-  }
-
-  # return
-  if (with.namespace)
-    # with model-name as the key, and namespace as the value
-    functions
-  
-  else
-    # with just a list of models
-    names(functions)
-}
-
-#' Append a Prefix to a Character String
-#' @note This function is exclusively used internally by Zelig
-#' @param name a character-string specifying the name of a variable
-#' @param envir an environment variable to search
-#' @param prefix a character-string to prefix the string with
-#'   this is applied until the name is unique
-#' @param sep a character-string that separates prefix and name
-.prefix <- function(name, envir, prefix="zelig", sep=".") {
-
-  # check to make sure this is an environment variable
-  if (!is.environment(envir)) {
-    warning()
-    envir <- globalenv()
-  }
-
-  # ensure some name is returned
-  if (!is.character(c(name, prefix, sep))) {
-    warning()
-    name
-  }
-
-  else if (length(name) > 1 || length(prefix) > 1 || length(sep) > 1) {
-    warning()
-    name
-  }
-
-  else if (!nchar(name)) {
-    warning()
-    sep <- "."
-  }
-
-  else {
-    while(exists(name, envir=envir))
-      name <- paste(prefix, name, sep=sep)
-
-    # return if nothing wonky happened
-    name
-  }
-}
-
-
-.GetGenerics <- function(...) UseMethod(".GetGenerics")
-
-# needs work
-.GetGenerics.MI <- function(...) new.env()
-
-# @zelig.object: a zelig object
-# @envir:        namespace to search with 'ls'
-# return:        a list of generic functions names to
-#                to define for zelig
-.GetGenerics.default <- function(zelig.object, envir=parent.frame()) {
-  if (is.null(zelig.object$S4))
-    stop(as.character(zelig.object$family[[1]]))
-  else if (zelig.object$S4) 
-    suppressWarnings(.GetGenericsS4(zelig.object, envir))
-  else
-    suppressWarnings(.GetGenericsS3(zelig.object, envir))
-}
-
-.GetGenericsS3 <- function(zelig.object, envir=parent.frame()) {
-  #
-  hash <- list()
-  cls <- class(zelig.object$result)
-  method.list <- as.character(unlist(mapply(methods, class=cls)))
-
-  regex <- paste("(", paste(cls, collapse="|"), ")", sep="|")
-
-
-  method.list <- gsub(regex, "", method.list)
-
-  meth.list <- c()
-  for (cl in c(class(zelig.object$result), "default")) {
-    method.list <- as.character(methods(class=cl))
-    method.list <- gsub(paste("\\.", cl, "$", sep=""), "", method.list)
-    meth.list <- unique(c(meth.list, method.list))
-  }
-
-  # final list
-  flist <- c("zelig", "param", "as.parameters", "sim", "setx", "register", 'qi', 'summary')
-  meth.list <- sort(unique(c(meth.list,
-                             names(get(".knownS3Generics")))))
-
-  meth.list[ ! meth.list %in% flist ]
-}
-
-# Numerical Derivative
-#
-# This method computes the numerical derivative at a point
-# @param f function (differentiable)
-# @param stencil number of points in stencil. This is currently ignored.
-# @param h size of mesh
-# @return anonymous function with the approximation
-# @note single variable numerical derivative
-.nderiv <- function(f, stencil=5, h=sqrt(.Machine$double.eps)) {
-  # return approximated derivative function
-  function (x) {
-    # construct the 5-point mesh, middle point omitted
-    # since it gets deleted anyway
-    x.stencil <- rep(x, 4) + c(2, 1, -1, -2)*h
-
-    # compute approximation
-    sum(sapply(x.stencil, f) %*% c(-1, 8, -8, 1))/12/h
-  }
-}
-
-
-
-# @F: function to invert
-# @f: derivative of function, or NULL to use numerical approximation
-# @x: initial guess
-# @tol: error-tolerance
-# @h: mesh size
-# @max.iter: number of iterations to perform before giving up
-# return: df(x_0)/dx
-# **note: newton-rhapson for single variables
-# **suggestion: replace with C code, otherwise won't be truly fast-enough
-.nr <- function(F, f=NULL, x = 1, a = 0,
-                tol      = sqrt(.Machine$double.eps),
-                h        = sqrt(.Machine$double.eps),
-                max.iter = 50) {
-  # save function to prevent recursions
-  saved.function <- F
-
-  # rewrite function to solve for a
-  if (!missing(a))
-    F <- function(x) saved.function(x) - a
-  
-  # if NULL assign numerical derivative
-  if (is.null(f))
-    f <- .nderiv(F)
-
-  #
-  count <- 1
-
-  #
-  while (abs(F(x)) > tol && count <= max.iter) {
-    # increment counter
-    count <- count + 1
-
-    # if derivative is zero, or near it
-    # (otherwise we have issues with solutions where x=0)
-    if (abs(f(x)) < 10^-8) {
-      x <- x + runif(1, min=-1, max=1)
-      next
-    }
-
-    # iterate
-    x <- x - F(x)/f(x)
-  }
-
-  if (count > max.iter)
-    warning("approximation failed to converge given specified tolerance")
-
-  # return result
-  x
-}
-
-
-# @F:
-# @f:
-# @x: initial guess
-# @tol: 
-# return: a functional form of the newton-rhapson approximation
-.NumInverse <- function(F, f=NULL, x = 1,
-                        tol      = (.Machine$double.eps)^.5,
-                        h        = sqrt(.Machine$double.eps),
-                        max.iter = 50) {
-  function (a) {
-    res <- c()
-
-    # kludgey, but just a hold-over for now
-    for (val in a) {
-      val <- .nr(F=F, f=f, x=x, a=val, tol=tol, h=h, max.iter=max.iter)
-      res <- c(res, val)
-    }
-
-    res
-  }
-}
-# This file contains overloaded operators 
-# However, developers - in general - should avoid the use of these features,
-# and instead use iterators when dealing with multiple fitted models or
-# quantities of interest.
-# The methods primarily come up when defining 'summarize' and 'plot' functions
-
-
-#' Extract a Value from a Fitted Model Object (Wrapped by Zelig)
-#' @S3method "[[" zelig
-#' @param z an object of type 'zelig'
-#' @param slot a character-string specifying the slot to extract from the fitted
-#'   model object
-#' @param ... subsequent slots to extract from the fitted model object
-#' @return contents of the specified slots
-#' @author Matt Owen \emph{mowen@@iq.harvard.edu}
-"[[.zelig" <- GetSlot.zelig
-
-#' Extraction Operator for Quantities of Interest
-#' This function is exclusively used internally by Zelig, and behaves in a very
-#' fishy manner. \code{qi} objects maintain an internal list of indices which
-#' are used to find the appropriate slot which holds a particular quantity of
-#' interest.
-#' When a \code{qi} object is defined, all the quantities of interest are
-#' converted into acronyms, so that elements of the \code{qi} object can be
-#' stored without a lengthy name containing spaces (since most qi's are
-#' human-readable). As a result, these objects contain an \code{.index}
-#' attribute which pairs every quantity of interest with its acronym. This
-#' index is then used to extract (using the \code{$} operator) the appropriate
-#' element of the list.
-#' In short, it pairs the key "Expected Value" with the slot \code{ev}. This
-#' allows that the following will always be true (in the mentioned example):
-#'   \code{qi$ev == qi[["Expected Value"]]}
-#' @note When possible, \code{qi} objects should be handled with iterators
-#'   rather than list-style extraction operators.
-#' @S3method "[[" qi
-#' @param self the \code{qi} object
-#' @param key a character-string specifying the title of the quantity of
-#'   interest to extract.
-#' @return if the quantity of interest exists, that entry. Otherwise,
-#'   \code{NULL}
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-"[[.qi" <- function(self, key) {
-
-  # Produce the index of titles of qi's
-  index <- attr(self, ".index")
-
-  # Find the 'short-name' matching
-  qi.short.name <- index[[key]]
-
-  if (is.null(qi.short.name))
-    NULL
-  else
-    # if this title => key pair is found, invoke the "$" operator on the
-    # shortname. In effect, this makes:
-    #   qi[['Expected Value']]
-    #
-    # equivalent to:
-    #   qi$ev
-    do.call("$", list(self, qi.short.name))
-}
-#' Receiver Operator Characteristic Plots
-#'
-#' The 'rocplot' command generates a receiver operator characteristic plot to
-#' compare the in-sample (default) or out-of-sample fit for two logit or probit
-#' regressions.
-#'
-#' @usage
-#' rocplot(y1, y2, fitted1, fitted2,
-#' cutoff = seq(from=0, to=1, length=100), lty1="solid",
-#' lty2="dashed", lwd1=par("lwd"), lwd2=par("lwd"),
-#' col1=par("col"), col2=par("col"),
-#' main="ROC Curve",
-#' xlab = "Proportion of 1's Correctly Predicted",
-#' ylab="Proportion of 0's Correctly Predicted",
-#' plot = TRUE, 
-#' ...
-#' )
-#'
-#' @param y1 response variable for the first model
-#' @param y2 response variable for the second model
-#' @param fitted1 fitted values for the first model. These values may represent
-#'   either the in-sample or out-of-sample fitted values
-#' @param fitted2 fitted values for the second model
-#' @param cutoff A vector of cut-off values between 0 and 1, at which to
-#'   evaluate the proportion of 0s and 1s correctly predicted by the first and
-#'   second model.  By default, this is 100 increments between 0 and 1
-#'   inclusive
-#' @param lty1 the line type of the first model (defaults to 'line')
-#' @param lty2 the line type of the second model (defaults to 'dashed')
-#' @param lwd1 the line width of the first model (defaults to 1)
-#' @param lwd2 the line width of the second model (defaults to 1)
-#' @param col1 the color of the first model (defaults to 'black')
-#' @param col2 the color of the second model (defaults to 'black')
-#' @param main a title for the plot (defaults to "ROC Curve")
-#' @param xlab a label for the X-axis
-#' @param ylab a lavel for the Y-axis
-#' @param plot whether to generate a plot to the selected device
-#' @param \dots additional parameters to be passed to the plot
-#' @return if plot is TRUE, rocplot simply generates a plot. Otherwise, a list
-#'   with the following is produced:
-#'   \item{roc1}{a matrix containing a vector of x-coordinates and
-#'     y-coordinates corresponding to the number of ones and zeros correctly
-#'     predicted for the first model.}
-#'   \item{roc2}{a matrix containing a vector of x-coordinates and
-#'     y-coordinates corresponding to the number of ones and zeros correctly
-#'     predicted for the second model.}
-#'   \item{area1}{the area under the first ROC curve, calculated using
-#'     Reimann sums.}
-#'   \item{area2}{the area under the second ROC curve, calculated using
-#'     Reimann sums.}
-#' @export
-#" @author Kosuke Imai and Olivia Lau
-rocplot <- function(y1, y2, fitted1, fitted2,
-                    cutoff = seq(from=0, to=1, length=100), lty1="solid",
-                    lty2="dashed", lwd1=par("lwd"), lwd2=par("lwd"),
-                    col1=par("col"), col2=par("col"),
-                    main="ROC Curve",
-                    xlab = "Proportion of 1's Correctly Predicted",
-                    ylab="Proportion of 0's Correctly Predicted",
-                    plot = TRUE, 
-                    ...) {
-  roc1 <- roc2 <- matrix(NA, nrow = length(cutoff), ncol = 2)
-  colnames(roc1) <- colnames(roc2) <- c("ones", "zeros")
-  for (i in 1:length(cutoff)) {
-    roc1[i,1] <- mean(fitted1[y1==1] >= cutoff[i]) 
-    roc2[i,1] <- mean(fitted2[y2==1] >= cutoff[i])
-    roc1[i,2] <- mean(fitted1[y1==0] < cutoff[i])
-    roc2[i,2] <- mean(fitted2[y2==0] < cutoff[i])
-  }
-  if (plot) {
-    plot(0:1, 0:1, type = "n", xaxs = "i", yaxs = "i",
-         main=main, xlab=xlab, ylab=ylab, ...)
-    lines(roc1, lty = lty1, lwd = lwd1, col=col1)
-    lines(roc2, lty = lty2, lwd = lwd2, col=col2)
-    abline(1, -1, lty = "dotted")
-  }
-  else {
-    area1 <- area2 <- array()
-    for (i in 2:length(cutoff)) {
-      area1[i-1] <- (roc1[i,2] - roc1[(i-1),2]) * roc1[i,1] 
-      area2[i-1] <- (roc2[i,2] - roc2[(i-1),2]) * roc2[i,1] 
-    }
-    return(list(roc1 = roc1, 
-                roc2 = roc2,
-                area1 = sum(na.omit(area1)),
-                area2 = sum(na.omit(area2))))
-  }
-}
-#' Create Function Call
-#'
-#' 
-#' @param Call a \code{call} object, typically specifying the original function
-#'   call to \code{zelig}
-#' @param zelig2 the return-value of the \code{zelig2} method
-#' @param remove a list of character vectors specifying which parameters to
-#'   ignore from the original call to \code{zelig}
-#' @return a function call used to fit the statistical model
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-zelig.call <- function(Call, zelig2, remove = NULL) {
-  #
-  envir <- new.env()
-
-  # reserved words taken from the zelig2 method
-  func <- as.name(zelig2$.function)
-  hook <- zelig2$.hook
-
-  # remove the reserved words
-  zelig2$.function <- NULL
-  zelig2$.hook <- NULL
-  zelig2$.post <- NULL
-  zelig2$.model.matrix <- NULL
-
-  # make a list of the parameters to be passed to the external model
-  args <- names(formals(as.character(func)))
-
-  # remove certain parameters
-   for (key in remove) {
-     if (key %in% names(Call))
-       Call[[key]] <- NULL
-   }
-
-  # remove invalid params
-  for (key in names(Call[-1])) {
-    if (! key %in% args)
-      Call[[key]] <- NULL
-  }
-
-
-
-  # A static list of objects that do not printout well or should be stored
-  # within a separate environment
-  messy.objects <- c("data.frame", "function", 'matrix', "family", "function")
-  neat.objects <- c("formula", "family")
-  skip <- c()
-
-  # Store values within 'messy.objects' within another environment, and give a 
-  # pseudonym
-  for (key in names(zelig2)) {
-    obj <- zelig2[[key]]
-    Class <- class(obj)
-    first.class <- Class[1]
-
-    if (is.object(obj)) {
-      if (all(Class %in% neat.objects)) {
-        Call[[key]] <- obj
-      }
-      else {
-        Name <- store.object(obj, envir, ucfirst(first.class))
-        Call[[key]] <- as.name(Name)
-        skip <- c(skip, key)
-      }
-    }
-
-    else if (is.function(obj)) {
-      Name <- store.object(obj, envir, "Function")
-      Call[[key]] <- as.name(Name)
-      skip <- c(skip, key)
-    }
-    else if (is.atomic(obj) && length(obj) > 5) {
-      Name <- store.object(obj, envir, paste(toupper(Class[1]), length(obj),
-                                             sep=""))
-      Call[[key]] <- as.name(Name)
-      skip <- c(skip, key)
-    }
-    else if (is.list(obj) && length(obj) > 5) {
-      Name <- store.object(obj, envir, paste("List", length(obj), sep=""))
-      Call[[key]] <- as.name(Name)
-      skip <- c(skip, key)
-    }
-    else {
-      # this is a hack to prevent removal of elements if the value is NULL
-      null.list <- list(NULL)
-      names(null.list) <- key
-
-      # the two statement are *slightly* different
-      if (is.null(obj)) {
-        Call <- as.call(append(as.list(Call), null.list))
-      }
-      else {
-        Call[[key]] <- obj
-      }
-    }
-
-  }
-
-  # Guarantee all zelig2 names are included (including model, etc)
-  for (key in names(zelig2)) {
-    if (key %in% skip)
-      next;
-
-    if (!is.null(zelig2[[key]]))
-      Call[[key]] <- zelig2[[key]]
-    else {
-      # Clear the entry. Don't worry. It's going to get re-added later in this
-      # Else-block.
-      Call[[key]] <- NULL
-
-      # Create the NULL paramater
-      dummylist <- list(NULL)
-      names(dummylist) <- key
-
-      # Cast as a list, so we can use append
-      Call <- as.list(Call)
-
-      # Append the entry
-      Call <- as.call(append(Call, dummylist))
-    }
-  }
-
-
-  # Change function value
-  Call[[1]] <- func
-
-  list(call=Call, envir=envir)
-}
-
-#' Store Object in Environment with a Fake Name
-#'
-#' This function takes the value of an object and stores it within a specified 
-#' environment. This is similar to simply using the \code{assign} function, but
-#' will not overwrite existing values in the specified environment. It
-#' accomplishes this by appending a prefix to the name of the variable until
-#' the name becomes unique.
-#' @note This method does not correct invalid names. That is, there is no test
-#'   to determine whether the submitted name is valid.
-#' @param obj any object
-#' @param envir an environment object, which will contain the object with the
-#'   assigned name
-#' @param name a character-string specifying the name that the object will be
-#'   stored as in the specified environment
-#' @param prefix a character string specifying the prefixes to append to names
-#'   that already have matches in the destination environment
-#' @return a character-string specifying the name of the object in the
-#'   destination environment
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-store.object <- function (obj, envir, name=NULL, prefix=".") {
-
-  variables <- ls(envir=envir)
-  
-  # ensure name is unique
-  while (name %in% variables)
-    name <- paste(prefix, name, sep="")
-
-  assign(name, obj, envir)
-
-  name
-}
-
-#' Uppercase First Letter of a String
-#' 
-#' This method sets the first character of a string to its uppercase,
-#' sets all other characters to lowercase.
-#' @param str a vector of charaqcter-strings
-#' @return a vector of character strings
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-ucfirst <- function (str) {
-  paste(
-        toupper(substring(str, 1, 1)),
-        tolower(substring(str, 2)),
-        sep = ""
-        )
-}
-#' Search for, Copy, and Customize Template for a Newly Create Zelig Package
-#' This is used internally by \code{zelig.skeleton}
-#' @param model a character-string specifying the name of the model
-#' @param the file template to search for and copy
-#' @param pkg a character-string specifying the name of the package
-#' @param path a character-string specifying the base path to the package's u
-#'  parent directory
-#' @return This function is used for its side-effects.
-.copy.templates <- function (model, pkg, path) {
-  # path to R folder
-  r.path <- file.path(path, pkg, 'R')
-
-  # source files
-  zelig2 <- system.file('templates', 'zelig2.R', package="Zelig")
-  param <- system.file('templates', 'param.R', package="Zelig")
-  qi <- system.file('templates', 'qi.R', package="Zelig")
-  describe <- system.file('templates', 'describe.R', package="Zelig")
-
-  # create R directory
-  dir.create(r.path, showWarnings=FALSE)
-
-  # destination files
-  zelig2.dest <- file.path(r.path, paste('zelig2', model, '.R', sep=""))
-  param.dest <- file.path(r.path, paste('param', model, 'R', sep="."))
-  qi.dest <- file.path(r.path, paste('qi', model, 'R', sep="."))
-  describe.dest <- file.path(r.path, paste('describe', model, 'R', sep="."))
-
-  # create blank files
-  file.create(zelig2.dest, param.dest, qi.dest)
-
-  # substitute
-  zelig2.lines <- .substitute.expressions(zelig2, model=model)
-  param.lines <- .substitute.expressions(param, model=model)
-  qi.lines <- .substitute.expressions(qi, model=model)
-  describe.lines <- .substitute.expressions(describe, model=model)
-
-  # write to file
-  writeLines(zelig2.lines, con = zelig2.dest)
-  writeLines(param.lines, con = param.dest)
-  writeLines(qi.lines, con = qi.dest)
-  writeLines(describe.lines, con = describe.dest)
-
-  TRUE
-}
-
-
-#' make a description file for a specific package
-#' param pkg a character-string specifying the name of the package
-#' param author a vector of strings specifying the names of contributors
-#' param email a character-string specifying the email of the maintainer
-#' param depends a vector of strings specifying package dependencies
-#' param url - ignored -
-#' param path a character-string specifying the location of the package
-#' return nothing
-.make.description <- function (pkg, author, email, depends, url, path='.') {
-  model <- pkg
-  description.file <- file.path(path, model, 'DESCRIPTION')
-
-  # make author list human-readable
-  author <- .get.list.as.text(author)
-
-  maintainer <- paste(author[1L], ' <', email, '>', sep="")
-
-  depends <- c("Zelig", depends)
-  depends <- unique(depends)
-  depends <- paste(depends, collapse=", ")
-
-  fields <- c(
-      Package = model,
-      Version = .1,
-      Date = as.character(Sys.Date()),
-      Title = "A Zelig Model",
-      Author = author,
-      Maintainer = maintainer,
-      Depends = depends,
-      Description = "A Zelig Model",
-      License = "GPL (>=2)",
-      URL = "http://gking.harvard.edu/zelig",
-      Packaged = gsub('\\s+', ' ', date())
-      )
-
-  # correctly write to file:
-  #   Package: 'model'
-  #   Version: .1
-  # etc.
-  writeLines(
-      paste(names(fields), ': ', fields, sep=""),
-      con = description.file
-      )
-}
-
-
-#' @note This function fails if passed non-alphanumeric variable names. In
-#'   particular, the parameters cannot contain periods, etc.
-#' @param .file the name of the file to replace
-#' @param ... 
-#' @return a character-string
-.substitute.expressions <- function(.file, ...) {
-  lines <- readLines(con = .file, warn = FALSE)
-
-  replacements <- list(...)
-
-  for (key in names(replacements)) {
-    val <- replacements[[key]]
-    expr <- paste('\\\\\\\\', key, '\\\\\\\\', sep="")
-
-    lines <- gsub(expr, val, lines)
-  }
-
-  lines
-}
-
-#' Make \code{pkg}-package.R File for Roxygen Compliancy
-#' @param pkg the package name
-#' @param author a vector of characters specifying the authors of the Zelig
-#'   models
-#' @param email the email address of the package's maintainer
-#' @param depends a vector specifying package dependencies
-#' @param URL specifying the package's website
-#' @param path location of the package
-#' @return NULL
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-.make.package.R <- function (pkg, author, email, depends, url, path='.') {
-  file <- system.file('templates', 'PACKAGE.R', package='Zelig')
-  dest <- file.path(path, pkg, 'R', paste(pkg, 'package.R', sep='-'))
-
-  author <- .get.list.as.text(author)
-  depends <- paste(c('Zelig', depends), collapse=', ', sep=', ')
-
-  lines <- .substitute.expressions(author=author, package=pkg, .file=file,
-    depends=depends
-    )
-
-  writeLines(lines, con = dest)
-}
-
-
-#' Convert Character-Strings into Human-Readable Lists
-#' This functions converts its parameters into a human-readable and
-#' grammatically correct series.
-#' @param ... character-vectors and list of characters
-#' @param final.comma whether to add the final comma to the series. Grammatical
-#'   correctness is debateable
-#' @return a comma delineated string
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-.get.list.as.text <- function (..., final.comma=FALSE) {
-
-  authors <- c(...)
-  length <- length(authors)
-
-  if (!length)
-    ""
-
-  else if (length == 1)
-    authors[[1L]]
-
-  else if (length == 2)
-    paste(authors, collapse = " and ")
-
-  else {
-    beginning <- head(authors, -1)
-    beginning <- paste(beginning, collapse= ', ')
-
-    end <- tail(authors, 1)
-
-    final.sep <- ifelse(final.comma, ', and ', ' and ')
-
-    paste(beginning, end, sep = final.sep)
-  }
-}
-#' Compute the Statistical Mode of a Vector
-#' @param x a vector of numeric, factor, or ordered values
-#' @return the statistical mode of the vector. If two modes exist, one is
-#'   randomly selected (by design)
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-Mode <- function (x) {
-  # build a table of values of x
-  tab <- table(as.factor(x))
-
-  # find the mode, then if there's more than one, select one randomly
-  v <- sample(names(which(tab == max(tab))), size=1)
-
-  # if it came in as a factor, we need to re-cast it
-  # as a factor, with the same exact levels
-  if (is.factor(x))
-    return(factor(v, levels=levels(x)))
-
-  # re-cast as any other data-type
-  as(v, class(x))
-}
-
-
-#' Compute the Statistical Median of a Vector
-#' @param x a vector of numeric or ordered values
-#' @param na.rm ignored
-#' @return the median of the vector
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-Median <- function (x, na.rm=NULL) {
-  v <- ifelse(is.numeric(x),
-              median(v),
-              levels(x)[ceiling(median(as.numeric(x)))]
-              )
-  if (is.ordered(x))
-    v <- factor(v, levels(x))
-  v
-}
-
-#' Compute the Maximum Value of a Vector
-#' @param x a numeric or ordered vector
-#' @param na.rm ignored
-#' @return the maximum value of the vector
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-Max <- function (x, na.rm=NULL) {
-  if (is.numeric(x))
-    return(max(x))
-  
-  else if (is.ordered(x))
-    return(factor(max(levels(x),
-                      na.rm=T
-                      ),
-                  levels=levels(x)
-                  )
-           )
-
-  else
-    stop("Error: max cannot be computed for non-numeric and non-ordered values")
-}
-
-#' Compute the Minumum Value of a Vector
-#' @param x a vector of numeric or ordered values
-#' @param na.rm ignored
-#' @return the minimum value of the vector
-#' @export
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-Min <- function (x, na.rm=NULL) {
-  if (is.numeric(x))
-    return(min(x))
-  
-  else if (is.ordered(x))
-    return(factor(min(levels(x),
-                      na.rm=T
-                      ),
-                  levels=levels(x)
-                  )
-           )
-
-  else
-    stop("Error: min cannot be computed for non-numeric and non-ordered values")
-}
-#' @export
-loadDependencies <- function (..., character.only = FALSE) {
-  # Get arguments that aren't "character.only"
-
-  if (character.only) {
-    packs <- match.call(expand.dots = TRUE)[-1]
-    packs$character.only <- NULL
-    packs <- as.character(packs)
-  }
-  else
-    packs <- as.character(list(...))
-
-  #
-  results <- list()
-
-  #
-  for (pkg in packs)
-    results[pkg] <- require(pkg, character.only = TRUE)
-
-  if (all(unlist(results)))
-    invisible(TRUE)
-  else {
-    failed.packs <- Filter(function (x) { return(x == FALSE) }, results)
-    list.of.packages <- paste('"', names(failed.packs), '"', sep = '', collapse = ', ')
-
-    message('The following packages did not load: ')
-    cat('  ')
-    message(list.of.packages)
-    message()
-
-    install.string <- paste('  install.packages(', names(failed.packs), ')', sep = '', collapse = '\n')
-
-    message('To run this model, install these packages with the following command:')
-    message(install.string)
-    message()
-
-    stop('')
-  }
-}
-
-#' Produce All Combinations of a Set of Lists
-#' @note This function is used internall by the 'mi' constructors in order to
-#' produce the complete set of combinations of data-frames and factors by to
-#' subset the data-frames.
-#' @param ... a set of lists to mix together
-#' @return all the combinations of the lists with repetition
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-mix <- function(...) {
-  # expand dot arguments
-  dots <- list(...)
-
-  # error-catching
-  if (length(dots) < 1)
-    return(NULL)
-
-  # prepare lists for first iteration
-  res <- dots[[1]]
-  dots <- dots[-1]
-
-  # this entire algorithm could be optimized,
-  # however, it will always be exponential time
-  while(length(dots) > 0) {
-    # get list to store new combinations in
-    new.list <- list()
-
-    # divide list
-    first <- dots[[1]]
-
-    # add new combinations
-    for (f in first) {
-      for (r in res) {
-        row <- append(as.list(r), f)
-        new.list[['']] <- row
-      }
-    }
-
-    # Update list
-    res <- new.list
-
-    # Shift first entry off
-    dots <- dots[-1]
-  }
-
-  # Appropriately name each entry
-  for (k in 1:length(res))
-    names(res[[k]]) <- names(list(...))
-
-  res
-}
-#' Produce All Combinations of a Set of Lists
-#' @note This function is used internall by the 'mi' constructors in order to
-#'   produce the complete set of combinations of data-frames and factors by
-#'   to subset the data-frames.
-#' @param ... a set of lists to mix together
-#' @return all the combinations of the lists with repetition
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-combine <- function(...) {
-  # expand dot arguments
-  dots <- list(...)
-
-  # error-catching
-  if (length(dots) < 1)
-    return(NULL)
-
-  # prepare lists for first iteration
-  res <- dots[[1]]
-  dots <- dots[-1]
-
-  # this entire algorithm could be optimized,
-  # however, it will always be exponential time
-  while(length(dots) > 0) {
-    # get list to store new combinations in
-    new.list <- list()
-
-    # divide list
-    first <- dots[[1]]
-
-    # add new combinations
-    for (f in first)
-      for (r in res)
-        new.list[['']] <- c(r, f)
-
-    # update list
-    res <- new.list
-
-    # shift first entry off
-    dots <- dots[-1]
-  }
-
-  # m, as in matrix
-  m <- NULL
-
-  # format results as a matrix
-  for (r in res)
-    m <- rbind(m, r)
-
-  # name rows/cols
-  rownames(m) <- 1:length(res)
-  colnames(m) <- names(list(...))
-
-  # return
-  m
-}
-
-#' Split a List into Two Lists
-#' This functions takes any list, and splits into two lists - one containing
-#' the values of arguments with specifically specified values and those without
-#' specified values.
-#' @note This function is a good candidate for deprecation
-#' @param args a list
-#' @return a list containing two entries: the key-value paired entires (titled
-#'   wordful) and the unkeyed entried (titled wordless)
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-#' @export
-#' @examples
-#' #list(wordful = list(x=1, y=2), wordless=list(2, "red"))
-splitUp <- function(args) {
-  wordless <- list()
-  wordful <- list()
-
-  k <- 1
-
-  if (is.null(names(args)))
-    return(list(wordless=unlist(args), wordfull=NULL))
-
-  for (key in names(args)) {
-    if (nchar(key) == 0)
-      wordless <- c(wordless, args[[k]])
-    else
-      wordful[[key]] <- args[[k]]
-
-    k <- k+1
-  }
-
-  list(wordless=wordless, wordful=wordful)
-}
-
-
-
-## Honaker: I had to remove this utility function because
-## CRAN has recently weighed against use of ":::"
-
-## @topic: character-string representing help-topic
-## @package: package containing help-topic
-## return: character-string of processed Rd file
-#.get.help.file <- function(topic, package) {
-#  # get package help-file if no topic is set
-#  if (missing(topic))
-#    topic <- package
-#
-  ## error-checking:
-  ##   ensure file and package are strings
-#  if (!is.character(topic) && length(topic) > 1L)
-#    stop()
-#
-#  if (!is.character(package) && length(package) > 1L)
-#    stop()
-#
-  ##
-#  directory <- system.file(package=package)
-#
-  ##
-#  path <- utils:::index.search(
-#                               topic=topic,
-#                               paths=directory
-#                               )
-#
-  ## search package-help-dataabase, get Rd file as string
-#  utils:::.getHelpFile(file=path)
-#}
-
-
-
-# @package: character-string specifying the name of a package to
-#           scan for help files
-# @as.table: boolean specifying whether the return value will be
-#            a table or names of Rd files
-# return: either a named vector (table), or an unnamed vector
-.list.help.files <- function(package, as.table=TRUE) {
-  # index for help files
-  fi <- file.path(
-                  system.file(package=package),
-                  "help",
-                  "AnIndex"
-                  )
-
-  if (file.exists(fi)) {
-    # get index of search-values and corresponding
-    #  Rd file
-    index <- scan(fi,
-                  what = list(names="", values=""),
-                  sep = "\t",
-                  quote = "",
-                  na.strings = "",
-                  quiet = TRUE
-                  )
-
-    # the if-else below is a return value
-    if (as.table)
-      # return as an index
-      structure(index$values, names=index$names)
-    
-    else
-      # return only the names of the Rd files
-      index$names
-  }
-  else {
-    warning("nothing was found")
-    NULL
-  }
-}
-
-#' Compute the Intersection of Two Sets
-#' @note This function is used internally by Zelig
-#' @param a a vector
-#' @param b a vector
-#' @param unique a boolean determining whether a intersect b will contain only
-#'   unique elements
-#' @return the intersection of a and b
-.intersection <- function(a, b, unique=TRUE) {
-  intersection <- a[a %in% b]
-
-  if (unique)
-    intersection <- unique(intersection)
-
-  if (is.null(intersection))
-    c()
-  else
-    intersection
-}
-
-#' Hook to Update the Zelig Call with the Appropriate Call Object
-#' @note This function is used internally by Zelig, and currently deprecated.
-#' @param zobj a 'zelig' object
-#' @param call1 the original call to Zelig
-#' @param call2 the manuafactured call to the model fitting function
-#' @return the 'zelig' object with a modified 'call' slot
-replace.call <- function(zobj, call1, call2) {
-  # what if it doesn't exist?
-  if (!is.null(zobj$result$call) && is.call(zobj$result$call2))
-    zobj$result$call <- call2
-
-  zobj
-}
-
-#' Wether an Installed R-Pack Depends on Zelig
-#' @note This package was used internally to determine whether an R-package is
-#'   Zelig compliant, but is now likely deprecated. This test is useless if not
-#'   paired with 
-#' @param package a character-string naming a package
-#' @return whether this package depends on Zelig
-is.zelig.package <- function(package="") {
-  "Zelig" %in% tools::pkgDepends(package)$Depends
-}
-
-#' Whether a R-Package Contains a 'Yes' in its DESCRIPTION File's 'Zelig' Field
-#' @note This package was used internally to determine whether an R-package is
-#'   Zelig compliant, but is now likely deprecated.
-#' @param package a character-string specifying an installed R-package
-#' @return whether the package's DESCRIPTION file specifies Zelig-compliancy
-#' @seealso is.zelig.package
-#' @author Matt Owen \email{mowen@@iq.harvard.edu}
-is.zelig.compliant <- function(package="") {
-  #
-  zcomp <- packageDescription(package, fields="Zelig-Compliant")
-  zcomp <- tolower(zcomp)
-
-  #
-
-  if (! zcomp %in% c('yes', 'no'))
-    stop("")
-
-  zcomp == "yes"
-}
diff --git a/README b/README
deleted file mode 100644
index 2972baa..0000000
--- a/README
+++ /dev/null
@@ -1,301 +0,0 @@
-4.0-6 (August 25th, 2011): Experimental branch to improve the formula parser.
-
-4.0-5 (August 25th, 2011): Stable release for R.13.1. Removed dependency on 
-  the 'iterators' package. This is part of a general move towards shrinking the
-  size of Zelig's dependency list. To facilitate this change, the 'mi' object
-  has been made more robust, and  the 'zframe' helper object has been removed.
-
-  For specifics, please refer to the CHANGES file
-
-4.0-2 (May 16, 2011): Stable release for R 2.12,1. Major version update, and
-  and addition of numerous API features. Core package now contains a mere 7
-  models. Dependencies having correspondingly been reduced to:
-    MASS
-    iterators
-    survival
-    methods
-
-  For the missing models, please see the software packages:
-    bivariate.zelig: bivaraite generalized linear regressions
-    mixed.zelig: multilevel (mixed) generalized linear regressions
-    multinomial.zelig: multinomial logit and probit regressions
-    ordinal.zelig: ordinal logit and probit regressions
-    survey.zelig: survey-weighted generalized linear models
-
-  These models can be found on the Harvard IQSS website at:
-    http://gking.harvard.edu/zelig/
-  
-  Or via installations with:
-    source('http://people.iq.harvard.edu/~mowen/install.R')
-
-
-2.8-3 (May 29, 2007):  Stable release for R 2.4.0-2.5.0.  Fixed bugs in 
-  help.zelig(), and summary for multinomial logit, bivariate probit, 
-  and bivariate logit with multiple imputation.  First version 
-  dependencies are as follows:  
-     MASS          7.2-34           
-     boot          1.2-27           
-     VGAM          0.7-1            
-     MCMCpack      0.8-2            
-     mvtnorm       0.7-5            
-     survival      2.31             
-     sandwich      2.0-0            
-     zoo           1.2-1            
-     coda          0.10-7           
-     nnet          7.2-34           
-     sna           1.4    
-
-2.8-2 (March 3, 2007):  Stable release for R 2.4.0-2.4.1.  Fixed bug in 
-  ARIMA simulation process.  
-
-2.8-1 (February 21, 2007):  Stable release for R 2.4.0-2.4.1.  Made  
-  setx() compatible with ordred factor variables (thanks to Mike Ward and 
-  Kirill Kalinin).  First order dependencies as in version 2.8-1.  
-
-2.8-0 (February 12, 2007):  Stable release for R 2.4.0-2.4.1.
-  Released ARIMA models and network analysis models (least
-  squares and logit) for sociomatrices.  First version dependencies
-  are as follows:
-    MASS          7.2-31           
-    boot          1.2-27           
-    VGAM          0.7-1            
-    MCMCpack      0.7-4            
-    mvtnorm       0.7-5            
-    survival      2.31             
-    sandwich      2.0-0            
-    zoo           1.2-1            
-    coda          0.10-7           
-    nnet          7.2-31           
-    sna           1.4    
-
-2.7-5 (December 25, 2006):  Stable release for R 2.4.0-2.4.1.
-  Fixed bug related to {\tt names.default()}, summary for multiple
-  imputation methods, and prediction for ordinal response models (thanks 
-  to Brian Ripley, Chris Lawrence, and Ian Yohai).
-
-2.7-3 (November 9, 2006):  Stable release for R 2.4.0.  Fixed bugs related 
-   to R check.
-
-2.7-2 (November 5, 2006):  Stable release for R 2.4.0.  Temporarily 
-  removed ARIMA models.
-     
-2.7-1 (November 3, 2006): Stable release for R 2.4.0. Made
-  changes regarding the S4 classes in VGAM. The ARIMA
-  model for time series data added by Justin Grimmer.  First level
-  dependencies are as follows:
-     MASS         7.2-29 
-     boot         1.2-26 
-     VGAM         0.7-1 
-     MCMCpack     0.7-4 
-     mvtnorm      0.7-5 
-     survival     2.29 
-     sandwich     2.0-0 
-     zoo          1.2-1 
-     coda         0.10-7 
-
-2.6-5 (September 14, 2006): Stable release for R 2.3.0-2.3.1.  Fixed bugs 
-  in bivariate logit, bivariate probit, multinomial logit, and 
-  model.matrix.multiple (related to version 2.6-4, but not previous 
-  versions,  thanks to Chris Lawrence).  First level dependencies are as 
-  follows:
-     MASS          7.2-27.1         
-     boot          1.2-26           
-     VGAM          0.6-9            
-     MCMCpack      0.7-1            
-     mvtnorm       0.7-2            
-     survival      2.28               
-     sandwich      1.1-1            
-     zoo           1.0-6            
-     coda          0.10-5 
-
-2.6-4 (September 8, 2006): Stable release for R 2.3.0-2.3.1.  Fixed 
-  bugs in {\tt setx()}, and bugs related to {\tt multiple}.  Added 
-  instructions for installing Fortran tools for Intel macs.  Added 
-  the RxC ecological inference model.
-
-2.6-3 (June 19, 2006): Stable release for R 2.0.0-2.3.1.
-  Fixed bug in VDC interface functions, and parse.formula().
-
-2.6-2 (June 7, 2006): Stable release for R 2.0.0-2.3.1. Removed R x C EI.  
-  Changed data = list() to data = mi() for multiply-imputed data frames.  
-  First level version compatabilities are as for version 2.6-1.
-
-2.6-1 (April 29, 2006): Stable release for R 2.0.0-2.2.1. Fixed major bug 
-  in ordinal logit and ordinal probit expected value simulation procedure 
-  (does not affect Bayesian ordinal probit). (reported by Ian Yohai) Added 
-  the following ecological inference (EI) models: Bayesian hierarchical 
-  EI, Bayesian dynamic EI, and RxC EI.  First level version compatabilties 
-  (at time of release) are as follows:
-     MASS          7.2-24 
-     boot          1.2-24     
-     VGAM          0.6-8       
-     MCMCpack      0.7-1      
-     mvtnorm       0.7-2       
-     survival      2.24       
-     sandwich      1.1-1       
-     zoo           1.0-6       
-     coda          0.10-5
-
-2.5-4 (March 16, 2006): Stable release for R 2.0.0-2.2.1.  Fixed bug related to 
-  windows build.  First-level dependencies are the same as in version 2.5-1.  
-
-2.5-3 (March 9, 2006): Stable release for R 2.0.0-2.2.1.  Fixed bugs related to VDC 
-  GUI.  First level dependencies are the same as in version 2.5-1.
-
-2.5-2 (February 3, 2006): Stable release for R 2.0.0-2.2.1.  Fixed bugs related to 
-  VDC GUI.  First level dependencies are the same as in version 2.5-1.  
-
-2.5-1 (January 31, 2006): Stable release for R 2.0.0-2.2.1.  Added methods for 
-  multiple equation models.  Fixed bugs related to robust estimation and upgrade of 
-  sandwich and zoo packages.  Revised setx() to use environments.  Added 
-  current.packages() to retrieve version of packages upon which Zelig depends.  
-  First level version dependencies are as follows:  
-     MASS        7.2-24           
-     boot        1.2-24           
-     VGAM        0.6-7            
-     mvtnorm     0.7-2            
-     survival    2.20             
-     sandwich    1.1-0            
-     zoo         1.0-4            
-     MCMCpack    0.6-6            
-     coda        0.10-3
-
-2.4-6 (October 27, 2005): Stable release for R 2.0.0-2.2.0.  Fixed bug 
-  related to simulation for Bayesian Normal regression.  
-
-2.4-5 (October 18, 2005): Stable release for R 2.0.0-2.2.0.  Updated 
-  installation instructions. 
-
-2.4-4 (September 29, 2005): Stable release for R 2.0.0-2.2.0.  Fixed 
-  links for help.zelig().
-
-2.4-3 (September 29, 2005): Stable release for R 2.0.0-2.2.0.  
-
-2.4-2 (August 30, 2005): Stable release for R 2.0.0-2.1.1.  Fixed bug in 
-  setx() related to as.factor() and I().  Streamlined qi.survreg().
-
-2.4-1 (August 15, 2005): Stable release for R 2.0.0-2.1.1.  Added the 
-  following Bayesian models:  factor analysis, mixed factor analysis, 
-  ordinal factor analysis, unidimensional item response theory, 
-  k-dimensional item response theory, logit, multinomial logit, normal, 
-  ordinal probit, Poisson, and tobit.  Also fixed minor bug in
-  formula (long variable names coerced to list). 
-
-2.3-2 (August 5, 2005): Stable release for R 2.0.0-2.1.1.  Fixed bug in 
-  simulation procedure for lognormal model.
-
-2.3-1 (August 4, 2005): Stable release for R 2.0.0-2.1.1.
-  Fixed documentation errors related to model parameterization and code 
-  bugs related to first differences and conditional prediction for 
-  exponential, lognormal, and Weibull models.  (reported by Alison Post)
-
-2.2-4 (July 30, 2005): Stable release for R 2.0.0-2.1.1.  Revised 
-  relogit, additing options for weightig in addition to prior
-  correction.  (reported by Martin Plöderl)
-
-2.2-3 (July 24, 2005): Stable release for R 2.0.0-2.1.1.  Fixed bug 
-  associated with robust standard errors for negative binomial.
-
-2.2-2 (July 13, 2005): Stable release for R 2.0.0-2.1.1.  Fixed bug in 
-  setx().  (reported by Ying Lu)
-
-2.2-1 (July 11, 2005):  Stable release for R 2.0.0-2.1.0.  Revised 
-  ordinal probit to use MASS library.  Added robust standard errors 
-  for the following regression models: exponential, gamma, logit, 
-  lognormal, least squares, negative binomial, normal (Gaussian), 
-  poisson, probit, and weibull.
-
-2.1-4 (May 22, 2005):  Stable release for R 1.9.1-2.1.0.  Revised 
-  help.zelig() to deal with CRAN build of Windows version.  Added
-  recode of slots to lists in NAMESPACE.  Revised install.R script
-  to deal with changes to install.packages().  (reported by Dan 
-  Powers and Ying Lu)
-
-2.1-3 (May 9, 2005):  Stable release for R 1.9.1-2.1.0.  Revised
-  param.lm() function to work with bootstrap simulation.  (reported by 
-  Jens Hainmueller)
-
-2.1-2 (April 14, 2005):  Stable release for R 1.9.1-2.1.  Revised 
-  summary.zelig().
-
-2.1-1 (April 7, 2005):  Stable release for R 1.9.1-2.1.  Fixed bugs in 
-  NAMESPACE and summary.vglm().  
-
-2.0-13 (March 11, 2005): Stable release for R 1.9.1-2.0.1.  Fixed bugs in 
-  NAMESPACE and rocplot.Rd. 
-
-2.0-12 (February 20, 2005): Stable release for R 1.9.1-2.0.1.  Added plot 
-  = TRUE option to rocplot().  
-
-2.0-11 (January 14, 2005): Stable release for R 1.9.1-2.0.1.  Changed 
-  class name for subsettted models from "multiple" to "strata", and 
-  modified affected functions.
-
-2.0-10 (January 4, 2005): Stable release for R 1.9.1 and R 2.0.0.  Fixed 
-  bug in simulation procedure for ordinal logit.  (Reported by Ian Yohai.)
-
-2.0-9 (October 21, 2004): Stable release for R 1.9.1 and R 2.0.0 (Linux 
-  only).  Fixed bugs in NAMESPACE.
-
-2.0-8 (October 18, 2004): Stable release for R 1.9.1 and R 2.0.0
-  (Linux only).  Revised Zelig for submission to CRAN.
-  
-2.0-7 (October 14, 2004): Stable release for R 1.9.1 \emph{and} R
-  2.0.0 (Linux only).  Fixed bugs in summary.zelig(), NAMESPACE, and
-  assorted bugs related to new R release.  Revised syntax for multiple
-  equation models.
-
-2.0-6 (October 4, 2004): Stable release for R 1.9.1. Fixed problem
-  with NAMESPACE.
-
-2.0-5 (September 25, 2004): Stable release for R 1.9.1.  Changed
-  installation procedure to source install.R from Zelig website.
-
-2.0-4 (September 22, 2004): Stable release for R 1.9.1.  Fixed typo in
-  installation directions, implemented NAMESPACE, rationalized
-  summary.zelig(), and tweaked documentation for least squares.
-
-2.0-3 (September 1, 2004): Stable release for R 1.9.1.  Fixed bug in
-  conditional prediction for survival models.
-
-2.0-2 (August 25, 2004): Stable release for R 1.9.1.  Removed
-  predicted values from ls.
-
-2.0-1b (July 16, 2004): Stable release for R 1.9.1.  MD5 checksum
-  problem fixed.  Revised plot.zelig() command to be a generic function
-  with methods assigned by the model.  Revised entire architecture to
-  accept multiply imputed data sets with strata.  Added functions to
-  simplify adding models.  Completely restructured reference manual.
-  Fixed bugs related to conditional prediction in setx and summarizing
-  strata in summary.zelig.
-
-1.1-2 (June 24, 2004): Stable release for R 1.9.1 (MD5
-  checksum problem not fixed, but does not seem to cause problems).
-  Fixed bug in help.zelig().  (reported by Michael L. Levitan)
-
-1.1-1 (June 14, 2004): Stable release for R 1.9.0.  Revised zelig()
-  procedure to use zelig2model() wrappers, revised help.zelig() to use a
-  data file with extension .url.tab, and revised setx() procedure to
-  take a list of fn to apply to variables, and such that fn = NULL
-  returns the entire model.matrix().
-
-1.0-8 (May 27, 2004): Stable release for R 1.9.0.  Fixed bug in
-  simulation procedure for survival models.  (reported by Elizabeth
-  Stuart)
-
-1.0-7 (May 26, 2004): Stable release for R 1.9.0. Fixed bug in relogit
-  simulation procedure.  (reported by Tom Vanwellingham)
-
-1.0-6 (May 11, 2004): Stable release for R 1.9.0.  Fixed bug in
-  setx.default, which had previously failed to ignore extraneous
-  variables in data frame.  (reported by Steve Purpura)
-
-1.0-5 (May 7, 2004): Replaced relogit procedure with memory-efficient
-  version. (reported by Tom Vanwellingham)
-
-1.0-4 (April 19, 2004): Stable release for R 1.9.0.  Added vcov.lm
-  method; changed print for summary.relogit.
-
-1.0-2 (April 16, 2004): Testing distribution for R 1.9.0. 
-
-1.0-1 (March, 23, 2004): Stable release for R 1.8.1. 
diff --git a/build/vignette.rds b/build/vignette.rds
deleted file mode 100644
index 972b136..0000000
Binary files a/build/vignette.rds and /dev/null differ
diff --git a/data/MatchIt.url.tab.gz b/data/MatchIt.url.tab.gz
old mode 100644
new mode 100755
diff --git a/data/PErisk.txt.bz2 b/data/PErisk.txt.bz2
deleted file mode 100644
index 056a8ed..0000000
Binary files a/data/PErisk.txt.bz2 and /dev/null differ
diff --git a/data/PErisk.txt.gz b/data/PErisk.txt.gz
new file mode 100755
index 0000000..2ac49df
Binary files /dev/null and b/data/PErisk.txt.gz differ
diff --git a/data/SupremeCourt.txt.gz b/data/SupremeCourt.txt.gz
old mode 100644
new mode 100755
index 5130495..78e89ad
Binary files a/data/SupremeCourt.txt.gz and b/data/SupremeCourt.txt.gz differ
diff --git a/data/Weimar.txt.gz b/data/Weimar.txt.gz
old mode 100644
new mode 100755
diff --git a/data/Zelig.url.tab.gz b/data/Zelig.url.tab.gz
old mode 100644
new mode 100755
index f3377f9..8eda995
Binary files a/data/Zelig.url.tab.gz and b/data/Zelig.url.tab.gz differ
diff --git a/data/approval.tab.bz2 b/data/approval.tab.bz2
deleted file mode 100644
index 4775980..0000000
Binary files a/data/approval.tab.bz2 and /dev/null differ
diff --git a/data/approval.tab.gz b/data/approval.tab.gz
new file mode 100755
index 0000000..c9de541
Binary files /dev/null and b/data/approval.tab.gz differ
diff --git a/data/bivariate.tab.bz2 b/data/bivariate.tab.bz2
deleted file mode 100644
index 0d6e11c..0000000
Binary files a/data/bivariate.tab.bz2 and /dev/null differ
diff --git a/data/bivariate.tab.gz b/data/bivariate.tab.gz
new file mode 100755
index 0000000..c3add84
Binary files /dev/null and b/data/bivariate.tab.gz differ
diff --git a/data/coalition.tab.gz b/data/coalition.tab.gz
new file mode 100755
index 0000000..9e8f06f
Binary files /dev/null and b/data/coalition.tab.gz differ
diff --git a/data/coalition.tab.xz b/data/coalition.tab.xz
deleted file mode 100644
index 75b5f0d..0000000
Binary files a/data/coalition.tab.xz and /dev/null differ
diff --git a/data/coalition2.txt.gz b/data/coalition2.txt.gz
new file mode 100755
index 0000000..0991618
Binary files /dev/null and b/data/coalition2.txt.gz differ
diff --git a/data/coalition2.txt.xz b/data/coalition2.txt.xz
deleted file mode 100644
index 7e03045..0000000
Binary files a/data/coalition2.txt.xz and /dev/null differ
diff --git a/data/eidat.txt.gz b/data/eidat.txt.gz
old mode 100644
new mode 100755
diff --git a/data/free1.tab.bz2 b/data/free1.tab.bz2
deleted file mode 100644
index 4c4be5e..0000000
Binary files a/data/free1.tab.bz2 and /dev/null differ
diff --git a/data/free1.tab.gz b/data/free1.tab.gz
new file mode 100755
index 0000000..4886e46
Binary files /dev/null and b/data/free1.tab.gz differ
diff --git a/data/free2.tab.bz2 b/data/free2.tab.bz2
deleted file mode 100644
index 4c4be5e..0000000
Binary files a/data/free2.tab.bz2 and /dev/null differ
diff --git a/data/free2.tab.gz b/data/free2.tab.gz
new file mode 100755
index 0000000..4886e46
Binary files /dev/null and b/data/free2.tab.gz differ
diff --git a/data/friendship.RData b/data/friendship.RData
old mode 100644
new mode 100755
index 3a59225..054a142
Binary files a/data/friendship.RData and b/data/friendship.RData differ
diff --git a/data/grunfeld.txt.gz b/data/grunfeld.txt.gz
old mode 100644
new mode 100755
index cb2176c..334dd8b
Binary files a/data/grunfeld.txt.gz and b/data/grunfeld.txt.gz differ
diff --git a/data/hoff.tab.gz b/data/hoff.tab.gz
old mode 100644
new mode 100755
diff --git a/data/homerun.txt.gz b/data/homerun.txt.gz
new file mode 100755
index 0000000..1ba3ee5
Binary files /dev/null and b/data/homerun.txt.gz differ
diff --git a/data/homerun.txt.xz b/data/homerun.txt.xz
deleted file mode 100644
index d7440b9..0000000
Binary files a/data/homerun.txt.xz and /dev/null differ
diff --git a/data/immi1.tab.bz2 b/data/immi1.tab.bz2
deleted file mode 100644
index 69a17d0..0000000
Binary files a/data/immi1.tab.bz2 and /dev/null differ
diff --git a/data/immi1.tab.gz b/data/immi1.tab.gz
new file mode 100755
index 0000000..3fe1f05
Binary files /dev/null and b/data/immi1.tab.gz differ
diff --git a/data/immi2.tab.bz2 b/data/immi2.tab.bz2
deleted file mode 100644
index 9a14ec4..0000000
Binary files a/data/immi2.tab.bz2 and /dev/null differ
diff --git a/data/immi2.tab.gz b/data/immi2.tab.gz
new file mode 100755
index 0000000..259d69a
Binary files /dev/null and b/data/immi2.tab.gz differ
diff --git a/data/immi3.tab.bz2 b/data/immi3.tab.bz2
deleted file mode 100644
index a2b5500..0000000
Binary files a/data/immi3.tab.bz2 and /dev/null differ
diff --git a/data/immi3.tab.gz b/data/immi3.tab.gz
new file mode 100755
index 0000000..da4b8c5
Binary files /dev/null and b/data/immi3.tab.gz differ
diff --git a/data/immi4.tab.bz2 b/data/immi4.tab.bz2
deleted file mode 100644
index 17e1296..0000000
Binary files a/data/immi4.tab.bz2 and /dev/null differ
diff --git a/data/immi4.tab.gz b/data/immi4.tab.gz
new file mode 100755
index 0000000..3d786a5
Binary files /dev/null and b/data/immi4.tab.gz differ
diff --git a/data/immi5.tab.bz2 b/data/immi5.tab.bz2
deleted file mode 100644
index 2272bd6..0000000
Binary files a/data/immi5.tab.bz2 and /dev/null differ
diff --git a/data/immi5.tab.gz b/data/immi5.tab.gz
new file mode 100755
index 0000000..4abd1da
Binary files /dev/null and b/data/immi5.tab.gz differ
diff --git a/data/immigration.tab.bz2 b/data/immigration.tab.bz2
deleted file mode 100644
index c8f62d4..0000000
Binary files a/data/immigration.tab.bz2 and /dev/null differ
diff --git a/data/immigration.tab.gz b/data/immigration.tab.gz
new file mode 100755
index 0000000..c016da4
Binary files /dev/null and b/data/immigration.tab.gz differ
diff --git a/data/klein.txt.gz b/data/klein.txt.gz
old mode 100644
new mode 100755
diff --git a/data/kmenta.txt.gz b/data/kmenta.txt.gz
old mode 100644
new mode 100755
diff --git a/data/macro.tab.gz b/data/macro.tab.gz
new file mode 100755
index 0000000..0931186
Binary files /dev/null and b/data/macro.tab.gz differ
diff --git a/data/macro.tab.xz b/data/macro.tab.xz
deleted file mode 100644
index 4ea5274..0000000
Binary files a/data/macro.tab.xz and /dev/null differ
diff --git a/data/mexico.tab.bz2 b/data/mexico.tab.bz2
deleted file mode 100644
index c7a845d..0000000
Binary files a/data/mexico.tab.bz2 and /dev/null differ
diff --git a/data/mexico.tab.gz b/data/mexico.tab.gz
new file mode 100755
index 0000000..d725306
Binary files /dev/null and b/data/mexico.tab.gz differ
diff --git a/data/mid.tab.bz2 b/data/mid.tab.bz2
deleted file mode 100644
index 571dfa6..0000000
Binary files a/data/mid.tab.bz2 and /dev/null differ
diff --git a/data/mid.tab.gz b/data/mid.tab.gz
new file mode 100755
index 0000000..3f1b10f
Binary files /dev/null and b/data/mid.tab.gz differ
diff --git a/data/newpainters.txt.bz2 b/data/newpainters.txt.bz2
deleted file mode 100644
index 5f00239..0000000
Binary files a/data/newpainters.txt.bz2 and /dev/null differ
diff --git a/data/newpainters.txt.gz b/data/newpainters.txt.gz
new file mode 100755
index 0000000..3289a7b
Binary files /dev/null and b/data/newpainters.txt.gz differ
diff --git a/data/sanction.tab.bz2 b/data/sanction.tab.bz2
deleted file mode 100644
index a7bfbec..0000000
Binary files a/data/sanction.tab.bz2 and /dev/null differ
diff --git a/data/sanction.tab.gz b/data/sanction.tab.gz
new file mode 100755
index 0000000..263e679
Binary files /dev/null and b/data/sanction.tab.gz differ
diff --git a/data/seatshare.rda b/data/seatshare.rda
new file mode 100644
index 0000000..ef62ebd
Binary files /dev/null and b/data/seatshare.rda differ
diff --git a/data/sna.ex.RData b/data/sna.ex.RData
old mode 100644
new mode 100755
index 69328ae..b80635c
Binary files a/data/sna.ex.RData and b/data/sna.ex.RData differ
diff --git a/data/swiss.txt.bz2 b/data/swiss.txt.bz2
deleted file mode 100644
index 27de507..0000000
Binary files a/data/swiss.txt.bz2 and /dev/null differ
diff --git a/data/swiss.txt.gz b/data/swiss.txt.gz
new file mode 100755
index 0000000..6c8f9ff
Binary files /dev/null and b/data/swiss.txt.gz differ
diff --git a/data/tobin.txt.gz b/data/tobin.txt.gz
old mode 100644
new mode 100755
diff --git a/data/turnout.tab.bz2 b/data/turnout.tab.bz2
deleted file mode 100644
index 14e86f6..0000000
Binary files a/data/turnout.tab.bz2 and /dev/null differ
diff --git a/data/turnout.tab.gz b/data/turnout.tab.gz
new file mode 100755
index 0000000..af3cefe
Binary files /dev/null and b/data/turnout.tab.gz differ
diff --git a/data/voteincome.txt.bz2 b/data/voteincome.txt.bz2
deleted file mode 100644
index 07d2166..0000000
Binary files a/data/voteincome.txt.bz2 and /dev/null differ
diff --git a/data/voteincome.txt.gz b/data/voteincome.txt.gz
new file mode 100755
index 0000000..7aceff1
Binary files /dev/null and b/data/voteincome.txt.gz differ
diff --git a/demo/00Index b/demo/00Index
index b2a555b..6e4da98 100644
--- a/demo/00Index
+++ b/demo/00Index
@@ -1,31 +1,42 @@
-exp                Exponential regression and simulation 
-gamma              Gamma regression and simulation
-logit              Logit regression and simulation 
-lognorm            Lognormal regression and simulation 
-ls                 Least Squares regression and simulation 
-mi                 Multiply imputed regressions and simulations
-negbinom           Negative Binomial regression and simulation
-normal             Normal (Gaussian) regression and simulation
-poisson            Poisson regression and simulation
-probit             Probit regression and simulation
-relogit            Rare events logit regression and simulation
-twosls             Two Stage Least Squares
-factor.bayes       MCMC factor analysis
-logit.bayes        MCMC logistic regression model and simulation
-normal.bayes       MCMC regression model and simulation
-probit.bayes       MCMC probit regression model and simulation
-poisson.bayes      MCMC poisson regression model and simulation
-mlogit.bayes       MCMC multinomial regression model and simulation
-oprobit.bayes      MCMC ordered probit regression model and simulation
-logit.gee          GEE logistic regression
-gamma.gee          GEE gamma regression
-normal.gee         GEE normal regression
-poisson.gee        GEE poisson regression
-probit.gee         GEE probit regression
-normal.survey	   Survey-Weighted Normal Regression for Continuous Dependent Variables
-logit.survey	   Survey-Weighted Logistic Regression for Dichotomous Dependent Variables
-probit.survey	   Survey-Weighted Probit Regression for Dichotomous Dependent Variables
-poisson.survey     Survey-Weighted Poisson Regression for Event-count Dependent Variables
-gamma.survey       Survey-Weighted Poisson Regression for Positive Continuous Dependent Variables 
-vertci	           Confidence intervals across the range of a continuous covariate
-Zelig.HelloWorld   Step-by-step demo on creating Zelig packages
+demo-amelia			use of output objects from Amelia package
+demo-data-table		use of data tables
+demo-dplyr			use of dplyr package
+demo-exp			use of exponential model
+demo-factor-bayes	use of Bayesian factor model
+demo-feedback		use of feedback function
+demo-gamma-gee		example of gamma model in generalized estimating equations 
+demo-gamma 			example of gamma regression model
+demo-json 			example of construction of json file of model inheriance
+demo-logit-bayes 	example of Bayesian logistic model
+demo-logit-gee 		example of logisitic model with generalized estimating equations
+demo-logit 			example of GLM logistic model
+demo-lognorm 		example of lognormal survival model
+demo-ls 			example of least squares regression
+demo-mlogit-bayes 	example of multinomial logistic model
+demo-negbinom 		example of negative binomial regression
+demo-normal-bayes 	example of Bayesian normally distributed outcomes
+demo-normal-gee 	example of normally distributed outcomes with generalized estimating equations
+demo-normal 		example of model of normally distributed outcomes
+demo-oprobit-bayes 	example of ordered probit model
+demo-poisson-bayes 	example of Bayesian Poisson regression
+demo-poisson-gee 	example of Poisson regression with generalized estimating equations
+demo-poisson 		example of Poisson regression
+demo-probit-bayes 	example of Bayesian Probit regression
+demo-probit-gee 	example of Probit model with generalized estimating equations
+demo-probit 		example of Probit model for binary choice outcomes
+demo-quantile 		example of quantile regression model
+demo-range 			example of use of range utility for estimating quantities of interest
+demo-relogit 		example of Rare Events Logisitc regression
+demo-roc 			example of ROC graph for comparing binary choice outcome models
+demo-scope 			example of scoping behavior 
+demo-setx-2 		additional example of setx function for estimating quantities of interest
+demo-setx 			example of setx function for estimating quantities of interest
+demo-setx-fn		example of setx function using the fn argument
+demo-show 			example of printing model coefficients
+demo-signif-stars 	example of modifying tables of model coefficients
+demo-strata 		example of estimatation with strata
+demo-tobit 			example of Tobit censored regression model
+demo-tobitbayes 	example of Bayesian Tobit censored regression model
+demo-weibull 		example of Weibull survival model
+demo-wrappers 		example of wrappers to emulate prior zelig call structure
+demo-zip 			example of zero inflated poisson mixture model
diff --git a/demo/Zelig.HelloWorld.R b/demo/Zelig.HelloWorld.R
deleted file mode 100644
index 2c07199..0000000
--- a/demo/Zelig.HelloWorld.R
+++ /dev/null
@@ -1,173 +0,0 @@
-## Load data
-data(turnout)
-
-# The following demo is a step-by-step instruction guide on building a Zelig
-# model. For the most part, the steps have been simplified, and the model
-# itself is simply written to show broad ideas, rather than the specifics
-# of developing a fully functioning statistical model
-
-user.prompt("Press <return> to Read about External Methods")
-
-# Step 1: Creating and Using External Methods (optional)
-# ======================================================
-# Create a model to be used to be used by the Zelig function. This method
-# should be designed with the singular purpose of fitting a statistical model.
-# That is, it should analyze a data-set given several parameters
-#
-# For the most part, this step is optional, as quite often R contains builtin
-# functions for doing these kinds of analyses. Regardless, this step is kept
-# here for completeness.
-#
-# The foreign model, in its simplest form, has only one of two requirements,
-# either:
-#   1. The model contains a slot labeled "formula", or
-#   2. There is a "formula" method defined for objects of this class
-
-user.prompt("Press <return> to Continue to Step 1")
-
-
-
-HelloWorldMethod <- function(formula, verbose=TRUE, data) {
-  if (verbose) {
-    print.form <- paste(as.character(formula), collapse=" ")
-    print.data <- as.character(substitute(data))
-  cat("Hello, Zelig!\n")
-  }
-
-  x <- list(formula = formula)
-  class(x) <- "HelloWorld"
-  x
-}
-
-user.prompt("Press <return> to Read about Describing Zelig Models")
-
-
-
-# Step 2: Describing Zelig Models (optional)
-# ==========================================
-# Describing the model is an optional, though important step if the developer
-# would like to be correctly cited in scholarly documents. In its most basic
-# form, it is simply a list specifying "authors", "text" as the title-text, 
-# and and publication year.
-
-user.prompt("Press <return> to Continute to Step 2")
-
-describe.hello <- function (...) {
-  list(authors = "You", text='A "Hello, World!" Model')
-}
-
-user.prompt("Press <return> to Read about zelig2 Functions")
-
-# Step 3: Interfacing between the External Model and Zelig (crucial)
-# ==================================================================
-# The 'zegli2' function of a model is named in the style the model's name
-# appended to "zelig2". This informs Zelig that a model by the appropriate
-# name exists. In this demo, "hello" is the model's name, and, as such,
-# the zelig2 function is named "zelig2hello".
-#
-# In the upcoming example, please note that the parameters of the external
-# method "HelloWorldMethod" are all included within the list that is being
-# returned from the "zelig2hello" function.
-#
-# In general, all "zelig2" functions follow this format. For more detailed
-# information concerning "zelig2" functions, type:
-#    ?zelig2
-#
-# within an R session.
-
-user.prompt("Press <return> to See an Example of a \"zelig2\" Method")
-
-
-
-zelig2hello <- function (formula, ..., data) {
-  list(                                            
-       .function = "HelloWorldMethod",
-       formula = formula,
-       data = data
-       )
-}
-
-user.prompt('Press <return> to Read about the "param" Functions')
-
-# Step 4: Simulating Parameters
-# =============================
-# The "param" function of a Zelig model is written by concatenating "param."
-# with the model's name. In the ongoing example, the "hello" model will have
-# a param function named "param.hello". 
-#
-# The retun value of a "param" function is a list optionally containing the 
-# values: simulations, alpha, link, linkinv, and family. For more detailed
-# concerning writing "param" functions, type:
-#   ?param
-#
-# within an R session.
-
-user.prompt('Press <return> to See an Example "param" Function')
-
-param.hello <- function(obj, num=1000, ...) {
-  list(
-       simulations = rbinom(n=num, 1, .5),
-       alpha = .5,
-       linkinv = NULL
-       )
-}
-
-user.prompt('Press <return> to Read about "qi" Methods')
-
-
-# Step 5: Simulating Quantities of Interest
-# =========================================
-# The "qi" method of a Zelig model is written by concatentating "qi." with the
-# model's name. In the ongoing example, the "hello" model will have a qi method
-# named "qi.hello".
-#
-# The return-value of a qi method is a list pairing titles of quantities of
-# interest and their simulations. For example, a model that computes
-# "Expected Values" will have a return value:
-#    list("Expected Values" = ev)
-#
-# where 'ev' is a variable containing the simulated expected value. For more 
-# detailed information concerning writing 'qi' methods, type:
-#   ?qi
-#
-# within an R session.
-
-user.prompt('Press <return> to See and Example "qi" Method')
-
-qi.hello <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-
-  possibilities <- c('Hello', 'World')
-  success.prob <- alpha(param)
-  
-  
-  sims <- rbinom(n=num, 1, success.prob) + 1
-  pv <- possibilities[sims]
-
-  list(
-       "Predicted Value: E(Y|X)" = pv
-       )
-}
-
-user.prompt('Press <return> to Read More about Zelig')
-
-# More Information about Zelig
-# ============================
-# That's it! Now that the zelig2, qi, and param methods are defined, Zelig can
-# run the "hello" model. For more detailed information concerning the Zelig
-# package, visit:
-#   http://gking.harvard.edu/zelig
-#
-# or type:
-#   ?Zelig
-#
-# within an R-session
-
-user.prompt('Press <return> to see the results of the "hello" model')
-
-## Run Zelig Functions
-z <- zelig(~ 1, model="hello", data=turnout)
-x <- setx(z)
-s <- sim(z)
-
-## Display Fictional Summary
-summary(s)
diff --git a/demo/demo-amelia.R b/demo/demo-amelia.R
new file mode 100755
index 0000000..01f491c
--- /dev/null
+++ b/demo/demo-amelia.R
@@ -0,0 +1,48 @@
+library(Amelia)
+data(africa)
+df <- head(africa, 100)
+imp <- amelia(x = df , cs = "country", m = 5) 
+imp2 <- imp$imputations
+
+# Zelig 4 code:
+library(Zelig4)
+z.out <- zelig(infl ~ trade + civlib, data = imp, model = "ls")
+summary(z.out)
+x.out <- setx(z.out, civlib = .5)
+set.seed(42)
+s.out <- sim(z.out, x.out, num = 100)
+summary(s.out)
+
+# Zelig 5 code:
+# library(data.table)
+library(Amelia)
+library(Zelig)
+z5 <- zls$new()
+z5$zelig(infl ~ trade + civlib, data = imp)
+z5$zelig(infl ~ trade + civlib, data = imp, by = "country")
+z5
+z5$zelig.out
+z5$model.call
+z5$zelig.call
+z5$setx(civlib = .5)
+z5$setrange(civlib = c(0.3, .5))
+z5$setx.out
+set.seed(42)
+z5$sim(num=1000)
+z5$sim.out
+z5$summarize()
+z5$cite()
+
+z5$setrange(speed = 30:32,speed = 30:32)
+z5$setx.out
+set.seed(42)
+z5$sim(num=3)
+z5$sim.out
+z5$summarize()
+
+data(freetrade)
+df <- head(freetrade, 1000)
+imp <- amelia(x = df, cs = "country", m = 10) 
+imp2 <- imp$imputations
+
+
diff --git a/demo/demo-data-table.R b/demo/demo-data-table.R
new file mode 100644
index 0000000..7252327
--- /dev/null
+++ b/demo/demo-data-table.R
@@ -0,0 +1,21 @@
+library(data.table)
+
+data(cars)
+Cars <- data.table(cars)
+z5 <- zls$new()
+z5$zelig(dist ~ speed, data = Cars)
+z5$setx1(speed = 30)
+z5$setx()
+set.seed(42)
+z5$sim()
+z5$summarize()
+z5$cite()
+
+library(Amelia)
+data(africa)
+Africa <- data.table(africa)
+imp <- amelia(x = Africa , cs = "country", m = 5) 
+
+z5 <- zls$new()
+z5$zelig(infl ~ trade + civlib, data = imp)
+
diff --git a/demo/demo-dplyr.R b/demo/demo-dplyr.R
new file mode 100755
index 0000000..8d93756
--- /dev/null
+++ b/demo/demo-dplyr.R
@@ -0,0 +1,40 @@
+library(Amelia)
+
+data(freetrade)
+df <- freetrade
+imp <- amelia(x = df , cs = "country", m = 10) 
+
+z5 <- zls$new()
+z5
+z5$zelig(gdp.pc ~ tariff + intresmi, data = imp)
+z5
+z5$zelig.out
+z5$setx(tariff = 0.4)
+z5$setx.out
+# z5$setx1(tariff = 0.6)
+z5$setx.out
+z5$zelig.out
+# z5$setrange(tariff = 1:3)
+z5$setx.out
+z5$zelig.out
+z5
+# TODO: Fix when re-running sim
+z5$sim(10)
+# z5$simx1(10)
+z5$sim.out
+# z5$zelig.out
+
+z5 <- zls$new()
+z5$zelig(gdp.pc ~ tariff + intresmi, data = dat, by = c("country", "year"))
+z5$zelig.out
+z5$setx(tariff = 0.4)
+z5$setx1(tariff = 0.6)
+z5$setrange(tariff = seq(0.2, 1.5, 0.1))
+z5$zelig.out
+z5$setx.out
+.self <- z5
+# TODO: Fix when re-running sim
+z5$sim(10)
+z5$zelig.out
+z5$sim.out
+z5$sim.out$range
diff --git a/demo/demo-exp.R b/demo/demo-exp.R
new file mode 100755
index 0000000..ab7f631
--- /dev/null
+++ b/demo/demo-exp.R
@@ -0,0 +1,25 @@
+library(survival)
+# Zelig 4 code:
+library(Zelig4)
+data(coalition)
+z.out <- zelig(Surv(duration, ciep12) ~ fract + numst2, model = "exp",
+               data = coalition)
+summary(z.out)
+x.low <- setx(z.out, numst2 = 0)
+set.seed(42)
+s.out <- sim(z.out, x = x.low, num=10)
+summary(s.out)
+
+# Zelig 5 code:
+data(coalition)
+z5 <- zexp$new()
+z5$zelig(Surv(duration, ciep12) ~ fract + numst2, model = "exp",
+         data = coalition)
+z5
+z5$zelig.out
+z5$setx(numst2 = 0)
+set.seed(42)
+z5$sim(num=10)
+z5$summarize()
+z5$cite()
+
diff --git a/demo/demo-factor-bayes.R b/demo/demo-factor-bayes.R
new file mode 100644
index 0000000..0978cc4
--- /dev/null
+++ b/demo/demo-factor-bayes.R
@@ -0,0 +1,65 @@
+# Zelig 4 code:
+library(Zelig4)
+data(swiss)
+names(swiss) <- c("Fert", "Agr", "Exam", "Educ", "Cath", "InfMort")
+z.out <- Zelig4::zelig(cbind(Agr, Exam, Educ, Cath, InfMort) ~ NULL,
+                       model = "factor.bayes", data = swiss, factors = 2, verbose = TRUE,
+                       a0 = 1, b0 = 0.15, burnin = 500, mcmc = 5000)
+
+summary(z.out)
+x.out <- Zelig4::setx(z.out, math = 30)
+set.seed(42)
+s.out <- Zelig4::sim(z.out, x = x.out, num = 1000)
+summary(s.out)
+
+
+# Zelig 5 code:
+z5 <- zfactorbayes$new()
+
+# z5$zelig(cbind(Agr, Exam, Educ, Cath, InfMort) ~ NULL,
+#          data = swiss, factors = 2, verbose = TRUE,
+#          a0 = 1, b0 = 0.15, burnin = 500, mcmc = 5000)
+
+z5$zelig(~ Agr + Exam + Educ + Cath + InfMort,
+         data = swiss, factors = 2, verbose = FALSE,
+         a0 = 1, b0 = 0.15, burnin = 500, mcmc = 5000)
+
+# z5$zelig(~Agriculture+Examination+Education+Catholic
+#          +Infant.Mortality, factors=2,
+#          lambda.constraints=list(Examination=list(1,"+"),
+#                                  Examination=list(2,"-"), Education=c(2,0),
+#                                  Infant.Mortality=c(1,0)),
+#          verbose=0, store.scores=FALSE, a0=1, b0=0.15,
+#          data=swiss, burnin=500, mcmc=500, thin=20)
+z5
+
+z.out <- zelig(~ Agr + Exam + Educ + Cath + InfMort,
+               model = "factor.bayes", data = swiss,
+               factors = 2, verbose = FALSE,
+               a0 = 1, b0 = 0.15, burnin = 500, mcmc = 5000)
+
+z.out <- zelig(~ Agr + Exam + Educ + Cath + InfMort,  
+               model = "factor.bayes", data = swiss, factors = 2,
+               lambda.constraints = list(Exam = list(1,"+"),
+                                         Exam = list(2,"-"), Educ = c(2, 0),
+                                         InfMort = c(1, 0)), 
+               verbose = FALSE, a0 = 1, b0 = 0.15, 
+               burnin = 500, mcmc = 5000)
+
+
+lapply(z5$zelig.out$z.out, geweke.diag)
+
+z5$zelig.call
+z5$model.call
+z5$zelig.out$z.out[[1]]
+z5$setx()
+# z5$setx(math = 30)
+set.seed(42)
+# z5$sim(num = 1000)
+# z5$summarize()
+z5$cite()
+
+geweke.diag(z5$zelig.out$z.out[[1]])
+heidel.diag(z5$zelig.out$z.out[[1]])
+raftery.diag(z5$zelig.out$z.out[[1]])
+summary(z5)
diff --git a/demo/demo-feedback.R b/demo/demo-feedback.R
new file mode 100644
index 0000000..172f77f
--- /dev/null
+++ b/demo/demo-feedback.R
@@ -0,0 +1,10 @@
+library(Zelig)
+
+z5 <- zlogit$new()
+z5$feedback()
+# z5$finalize() # Not clear how to trigger it with q()
+# quitting R triggers call to finalize method
+
+z6 <- zls$new()
+z6$feedback()
+
diff --git a/demo/demo-gamma-gee.R b/demo/demo-gamma-gee.R
new file mode 100755
index 0000000..b4ea94c
--- /dev/null
+++ b/demo/demo-gamma-gee.R
@@ -0,0 +1,34 @@
+# Zelig 4 code:
+library(Zelig4)
+data(coalition)
+coalition$cluster <- c(rep(c(1:62),5),rep(c(63),4))
+sorted.coalition <- coalition[order(coalition$cluster),]
+z.out <- Zelig4::zelig(duration ~ fract + numst2, model = "gamma.gee", id = "cluster",
+                       data = sorted.coalition)
+x.low <- Zelig4::setx(z.out, numst2 = 0)
+x.high <- Zelig4::setx(z.out, numst2 = 1)
+
+s.out <- Zelig4::sim(z.out, x = x.low, x1 = x.high)
+summary(s.out)
+
+# Zelig 5 code:
+set.seed(42)
+z5 <- zgammagee$new()
+z5$zelig(duration ~ fract + numst2, id = coalition$cluster,
+         data = coalition, corstr = "exchangeable")
+z5
+z5$zelig.call
+z5$model.call
+z5$setx(numst2 = 0)
+z5$setx1(numst2 = 1)
+set.seed(42)
+z5$sim(num=10)
+z5$summarize()
+z5$cite()
+
+# geepack::geeglm(formula = duration ~ fract + numst2, family = Gamma("inverse"), 
+#                 data = coalition, id = coalition$cluster, corstr = "exchangeable")
+# 
+# geepack::geeglm(formula = duration ~ fract + numst2, family = Gamma, 
+#                 data = coalition, id = coalition$cluster, corstr = "exchangeable")
+
diff --git a/demo/demo-gamma.R b/demo/demo-gamma.R
new file mode 100755
index 0000000..89e59f4
--- /dev/null
+++ b/demo/demo-gamma.R
@@ -0,0 +1,22 @@
+# Zelig 4 code:
+library(Zelig4)
+data(coalition)
+z.out <- zelig(duration ~ fract + numst2, model = "gamma", data = coalition)
+summary(z.out)
+x.low <- setx(z.out, numst2 = 0)
+set.seed(42)
+s.out <- sim(z.out, x = x.low, n=100000)
+summary(s.out)
+
+# Zelig 5 code:
+data(coalition)
+z5 <- zgamma$new()
+z5$zelig(duration ~ fract + numst2, data = coalition)
+z5
+z5$setx(numst2 = 0)
+set.seed(42)
+z5$sim(num=100000)
+statmat(z5$sim.out$x$ev[[1]])
+statmat(z5$sim.out$x$pv[[1]])
+z5$summarize()
+
diff --git a/demo/demo-json.R b/demo/demo-json.R
new file mode 100755
index 0000000..d5f267a
--- /dev/null
+++ b/demo/demo-json.R
@@ -0,0 +1,18 @@
+library(MASS)
+library(jsonlite)
+data(cars)
+
+source(file.path("..", "R", "utils.R"))
+source(file.path("..", "R", "model-zelig.R"))
+source(file.path("..", "R", "model-ls.R"))
+
+z5 <- zls$new()
+z5$zelig(dist ~ speed, data = cars)
+z5
+z5$setx(speed = 30)
+set.seed(42)
+z5$sim(num=100)
+z5$summarize()
+
+z5$toJSON()
+cat(z5$json)
diff --git a/demo/demo-logit-bayes.R b/demo/demo-logit-bayes.R
new file mode 100644
index 0000000..314db80
--- /dev/null
+++ b/demo/demo-logit-bayes.R
@@ -0,0 +1,28 @@
+library(MASS)
+mydata <- read.csv("http://www.ats.ucla.edu/stat/data/binary.csv")
+mydata$rank <- factor(mydata$rank)
+
+# Zelig 4 code:
+library(Zelig4)
+# epsilon does not seem to be taken into account
+z.out <- Zelig4::zelig(admit ~ gre + gpa, data = mydata,
+                       model="logit.bayes")
+summary(z.out)
+x.out <- Zelig4::setx(z.out, gpa = 1.3, gre = 4500, speed = 12)
+set.seed(42)
+s.out <- Zelig4::sim(z.out, x = x.out, num = 1000)
+summary(s.out)
+
+
+# Zelig 5 code:
+z5 <- zlogitbayes$new()
+z5$zelig(admit ~ gre + gpa, data = mydata)
+z5
+z5$zelig.out
+z5$setx(gpa = 1.3, gre = 4500, speed = 12)
+set.seed(42)
+z5$sim(num = 1000)
+z5$summarize()
+z5$cite()
+
+z.out <- zelig(admit ~ gre + gpa, data = mydata, model = "logit.bayes")
diff --git a/demo/demo-logit-gee.R b/demo/demo-logit-gee.R
new file mode 100755
index 0000000..bb07778
--- /dev/null
+++ b/demo/demo-logit-gee.R
@@ -0,0 +1,23 @@
+# Zelig 4 code:
+data(turnout)
+turnout$cluster <- rep(c(1:200), 10)
+sorted.turnout <- turnout[order(turnout$cluster), ]
+z.out1 <- Zelig4::zelig(vote ~ race + educate, model = "logit.gee", id = "cluster",
+                        data = sorted.turnout)
+x.out1 <- Zelig4::setx(z.out1)
+set.seed(42)
+s.out1 <- Zelig4::sim(z.out1, x = x.out1)
+summary(s.out1)
+
+# Zelig 5 code:
+z5 <- zlogitgee$new()
+z5$zelig(vote ~ race + educate, id = "cluster",
+         data = sorted.turnout)
+z5
+z5$zelig.call
+z5$model.call
+z5$setx()
+set.seed(42)
+z5$sim(500)
+z5$summarize()
+z5$cite()
diff --git a/demo/demo-logit.R b/demo/demo-logit.R
new file mode 100755
index 0000000..0739c56
--- /dev/null
+++ b/demo/demo-logit.R
@@ -0,0 +1,51 @@
+library(MASS)
+mydata <- read.csv("http://www.ats.ucla.edu/stat/data/binary.csv")
+mydata$rank <- factor(mydata$rank)
+
+# Zelig 4 code:
+library(Zelig4)
+# epsilon does not seem to be taken into account
+z.out <- zelig(admit ~ gre + gpa, data = mydata, model="logit",
+               epsilon=.0001)
+summary(z.out)
+x.out <- setx(z.out, gpa=1.3, gre=4500, speed=12)
+set.seed(42)
+s.out <- sim(z.out, x = x.out, num=1000)
+summary(s.out)
+
+
+# Zelig 5 code:
+z5 <- zlogit$new()
+z5$zelig(admit ~ gre + gpa, data = mydata,
+         epsilon=1)
+z5
+z5$zelig.out
+# removed epsilon to compare with Zelig 4
+z5$zelig(admit ~ gre + gpa, data = mydata)
+z5
+z5$setx(gpa=1.3, gre=4500, speed=12)
+set.seed(42)
+z5$sim(num=1000)
+z5$summarize()
+z5$cite()
+
+# Example 2
+data(turnout)
+z.out <- zelig(vote ~ race + educate,
+               data = turnout,
+               model = "logit")
+summary(z.out)
+x.out <- setx(z.out, educate = 12)
+set.seed(42)
+s.out <- sim(z.out, x = x.out, num = 1000)
+summary(s.out)
+
+z5 <- zlogit$new()
+z5$zelig(vote ~ race + educate,
+         data = turnout)
+z5$show()
+z5$setx(educate = 12)
+set.seed(42)
+z5$sim(num = 1000)
+z5$summarize()
+
diff --git a/demo/demo-lognorm.R b/demo/demo-lognorm.R
new file mode 100755
index 0000000..5833595
--- /dev/null
+++ b/demo/demo-lognorm.R
@@ -0,0 +1,25 @@
+# Zelig 4 code:
+library(Zelig4)
+data(coalition)
+z.out <- zelig(Surv(duration, ciep12) ~ fract + numst2, model = "lognorm",
+               data = coalition)
+summary(z.out)
+x.low <- setx(z.out, numst2 = 0)
+set.seed(42)
+s.out <- sim(z.out, x = x.low, num=10)
+summary(s.out)
+
+# Zelig 5 code:
+data(coalition)
+z5 <- zlognorm$new()
+z5$zelig(Surv(duration, ciep12) ~ fract + numst2, model = "lognorm",
+         data = coalition)
+z5
+z5$zelig.out
+z5$setx(numst2 = 0)
+set.seed(42)
+z5$sim(num=10)
+z5$sim.out
+mean(z5$sim.out$x$ev[[1]])
+z5$summarize()
+z5$cite()
diff --git a/demo/demo-ls.R b/demo/demo-ls.R
new file mode 100755
index 0000000..a725531
--- /dev/null
+++ b/demo/demo-ls.R
@@ -0,0 +1,39 @@
+library(MASS)
+data(cars)
+
+# Zelig 4 code:
+library(Zelig4)
+z.out <- zelig(dist ~ speed, cars, model = "ls")
+summary(z.out)
+x.out <- setx(z.out, speed = 30)
+set.seed(42)
+s.out <- sim(z.out, x.out, num = 100)
+summary(s.out)
+
+# Zelig 5 code:
+z5 <- zls$new()
+z5$zelig(dist ~ speed, data = cars)
+z5
+z5$zelig.out
+z5$model.call
+z5$zelig.call
+z5$setx(sascxasx = 9879, speed = 30, sdjchbsdc = 87, kcsbc = 8787)
+z5$setx(sascxasx = 9879)
+z5$setx1(speed = 30)
+z5$setx()
+z5$setx.out
+z5
+set.seed(42)
+z5$sim(num=3)
+z5$sim.out
+z5
+z5$summarize()
+z5$cite()
+
+z5$setrange(speed = 30:32,speed = 19:23)
+z5$setx.out
+set.seed(42)
+z5$sim(num=3)
+z5$sim.out
+z5$summarize()
+z5
diff --git a/demo/demo-mlogit-bayes.R b/demo/demo-mlogit-bayes.R
new file mode 100644
index 0000000..1b903f2
--- /dev/null
+++ b/demo/demo-mlogit-bayes.R
@@ -0,0 +1,23 @@
+# Zelig 4 code:
+library(Zelig4)
+data(mexico)
+z.out <- Zelig4::zelig(vote88 ~ pristr + othcok + othsocok, model = "mlogit.bayes",
+                       data = mexico)
+summary(z.out)
+x.out <- Zelig4::setx(z.out)
+set.seed(42)
+s.out <- Zelig4::sim(z.out, x = x.out, num = 1000)
+summary(s.out)
+
+
+# Zelig 5 code:
+z5 <- zmlogitbayes$new()
+z5$zelig(vote88 ~ pristr + othcok + othsocok,
+         data = mexico, verbose = FALSE)
+z5
+z5$zelig.out
+z5$setx()
+set.seed(42)
+z5$sim(num = 1000)
+z5$summarize()
+z5$cite()
diff --git a/demo/demo-negbinom.R b/demo/demo-negbinom.R
new file mode 100755
index 0000000..5a71196
--- /dev/null
+++ b/demo/demo-negbinom.R
@@ -0,0 +1,24 @@
+library(MASS)
+
+# Zelig 4 code:
+library(Zelig4)
+data(sanction)
+z.out <- zelig(num ~ target + coop, model = "negbinom", data = sanction)
+summary(z.out)
+x.out <- setx(z.out)
+set.seed(42)
+s.out <- sim(z.out, x = x.out, num=100)
+summary(s.out)
+
+# Zelig 5 code:
+data(sanction)
+z5 <- znegbin$new()
+z5$zelig(num ~ target + coop, data = sanction)
+z5
+z5$setx()
+set.seed(42)
+z5$sim(num=100)
+statmat(z5$sim.out$x$ev[[1]])
+statlevel(z5$sim.out$x$pv[[1]], 100)
+z5$summarize()
+
diff --git a/demo/demo-normal-bayes.R b/demo/demo-normal-bayes.R
new file mode 100644
index 0000000..d31b99a
--- /dev/null
+++ b/demo/demo-normal-bayes.R
@@ -0,0 +1,24 @@
+# Zelig 4 code:
+library(Zelig4)
+data(macro)
+z.out <- Zelig4::zelig(unem ~ gdp + capmob + trade, model = "normal.bayes",
+                       data = macro, verbose = TRUE)
+
+summary(z.out)
+x.out <- Zelig4::setx(z.out)
+set.seed(42)
+s.out <- Zelig4::sim(z.out, x = x.out, num = 1000)
+summary(s.out)
+
+
+# Zelig 5 code:
+z5 <- znormalbayes$new()
+z5$zelig(unem ~ gdp + capmob + trade, 
+       data = macro, verbose = FALSE)
+z5
+z5$zelig.out
+z5$setx()
+set.seed(42)
+z5$sim(num = 1000)
+z5$summarize()
+z5$cite()
diff --git a/demo/demo-normal-gee.R b/demo/demo-normal-gee.R
new file mode 100755
index 0000000..5988914
--- /dev/null
+++ b/demo/demo-normal-gee.R
@@ -0,0 +1,27 @@
+# Zelig 4 code:
+library(Zelig4)
+data(coalition)
+coalition$cluster <- c(rep(c(1:62), 5), rep(c(63), 4))
+sorted.coalition <- coalition[order(coalition$cluster),]
+z.out <- Zelig4::zelig(duration ~ fract + numst2, model = "normal.gee", 
+                       id = "cluster", data = coalition)
+x.low <- Zelig4::setx(z.out, numst2 = 0)
+x.high <- Zelig4::setx(z.out, numst2 = 1)
+set.seed(42)
+s.out <- Zelig4::sim(z.out, x = x.low, x1 = x.high)
+summary(s.out)
+
+# Zelig 5 code:
+set.seed(42)
+z5 <- znormalgee$new()
+z5$zelig(duration ~ fract + numst2, id = "cluster",
+         data = coalition, corstr = "exchangeable")
+z5
+z5$zelig.call
+z5$model.call
+z5$setx(numst2 = 0)
+z5$setx1(numst2 = 1)
+set.seed(42)
+z5$sim()
+z5$summarize()
+z5$cite()
diff --git a/demo/demo-normal.R b/demo/demo-normal.R
new file mode 100755
index 0000000..03a81aa
--- /dev/null
+++ b/demo/demo-normal.R
@@ -0,0 +1,21 @@
+# Zelig 4 code:
+library(Zelig4)
+data(macro)
+z.out1 <- Zelig4::zelig(unem ~ gdp + capmob + trade, model = "normal",
+                data = macro)
+summary(z.out1)
+x.high <- Zelig4::setx(z.out1, trade = 50)
+set.seed(42)
+s.out1 <- Zelig4::sim(z.out1, x = x.high)
+summary(s.out1)
+
+# Zelig 5 code:
+data(macro)
+z5 <- znormal$new()
+z5$zelig(unem ~ gdp + capmob + trade, data=macro)
+z5
+z5$setx(trade = 50)
+set.seed(42)
+z5$sim()
+z5$summarize()
+z5$cite()
diff --git a/demo/demo-oprobit-bayes.R b/demo/demo-oprobit-bayes.R
new file mode 100644
index 0000000..1079714
--- /dev/null
+++ b/demo/demo-oprobit-bayes.R
@@ -0,0 +1,26 @@
+# Zelig 4 code:
+library(Zelig4)
+data(sanction)
+sanction$ncost <- factor(sanction$ncost,
+                         ordered = TRUE,
+                         levels = c("net gain",
+                                    "little effect", "modest loss", "major loss"))
+z.out <- Zelig4:: zelig(ncost ~ mil + coop, model = "oprobit.bayes", data = sanction,
+                        verbose = TRUE)
+
+summary(z.out)
+x.out <- Zelig4::setx(z.out)
+set.seed(42)
+s.out <- Zelig4::sim(z.out, x = x.out, num = 1000)
+summary(s.out)
+
+# Zelig 5 code:
+z5 <- zoprobitbayes$new()
+z5$zelig(ncost ~ mil + coop, data = sanction, verbose = FALSE)
+z5
+z5$zelig.out
+z5$setx()
+set.seed(42)
+z5$sim(num = 1000)
+z5$summarize()
+z5$cite()
diff --git a/demo/demo-poisson-bayes.R b/demo/demo-poisson-bayes.R
new file mode 100644
index 0000000..5479c3b
--- /dev/null
+++ b/demo/demo-poisson-bayes.R
@@ -0,0 +1,36 @@
+p <- read.csv("http://www.ats.ucla.edu/stat/data/poisson_sim.csv")
+p <- within(p, {
+  prog <- factor(prog, levels = 1:3,
+                 labels = c("General", "Academic", "Vocational"))
+  id <- factor(id)
+})
+head(p)
+set.seed(42)
+w <- runif(200)
+
+# Zelig 4 code:
+library(Zelig4)
+z.out <- Zelig4::zelig(num_awards ~ prog + math, data=p,
+                       model="poisson.bayes",
+                       weights=w)
+summary(z.out)
+x.out <- Zelig4::setx(z.out, math = 30)
+set.seed(42)
+s.out <- Zelig4::sim(z.out, x = x.out, num = 1000)
+summary(s.out)
+
+
+# Zelig 5 code:
+z5 <- zpoissonbayes$new()
+z5$zelig(num_awards ~ prog + math, data = p)
+z5
+z5$zelig.call
+z5$model.call
+z5$zelig.out$z.out[[1]]
+z5$setx()
+z5$setx(math = 30)
+set.seed(42)
+z5$sim(num = 1000)
+z5$summarize()
+z5$cite()
+
diff --git a/demo/demo-poisson-gee.R b/demo/demo-poisson-gee.R
new file mode 100755
index 0000000..a98b558
--- /dev/null
+++ b/demo/demo-poisson-gee.R
@@ -0,0 +1,27 @@
+# Zelig 4 code:
+library(Zelig4)
+data(sanction)
+sanction$cluster <- c(rep(c(1:15), 5), rep(c(16), 3))
+sorted.sanction <- sanction[order(sanction$cluster), ]
+z.out <- Zelig4::zelig(num ~ target + coop, model = "poisson.gee", id = "cluster",
+                       data = sorted.sanction, robust = FALSE, corstr = "exchangeable")
+summary(z.out)
+x.out <- Zelig4::setx(z.out)
+set.seed(42)
+s.out <- Zelig4::sim(z.out, x.out)
+summary(s.out)
+
+# Zelig 5 code:
+set.seed(42)
+z5 <- zpoissongee$new()
+z5$zelig(num ~ target + coop, id = "cluster",
+         data = sorted.sanction, corstr = "exchangeable")
+z5
+z5$zelig.call
+z5$model.call
+z5$setx()
+set.seed(42)
+z5$sim()
+z5$summarize()
+z5$cite()
+
diff --git a/demo/demo-poisson.R b/demo/demo-poisson.R
new file mode 100755
index 0000000..f6b0811
--- /dev/null
+++ b/demo/demo-poisson.R
@@ -0,0 +1,48 @@
+p <- read.csv("http://www.ats.ucla.edu/stat/data/poisson_sim.csv")
+p <- within(p, {
+  prog <- factor(prog, levels = 1:3,
+                 labels = c("General", "Academic", "Vocational"))
+  id <- factor(id)
+})
+head(p)
+set.seed(42)
+w <- runif(200)
+
+# Zelig 4 code:
+library(Zelig4)
+z.out <- Zelig4::zelig(num_awards ~ prog + math, data=p, model="poisson",
+                       weights=w)
+summary(z.out)
+x.out <- Zelig4::setx(z.out, math = 40)
+set.seed(42)
+s.out <- Zelig4::sim(z.out, x = x.out, num = 1000)
+summary(s.out)
+
+# Zelig 5 code:
+z5 <- zpoisson$new()
+z5$zelig(num_awards ~ prog + math, data = p,
+         weights = w)
+z5
+z5$zelig.call
+z5$model.call
+z5$zelig.out$z.out
+z5$setx(math = 40)
+set.seed(42)
+z5$sim(num = 1000)
+z5$summarize()
+z5$cite()
+
+z5 <- zpoisson$new()
+z5$zelig(num_awards ~ + math, data = p, by = c("prog"))
+z5
+z5$zelig.call
+z5$model.call
+z5$zelig.out
+z5$zelig.out$z.out
+z5$setx(math = 40)
+set.seed(42)
+.self <- z5
+z5$sim(num = 1000)
+z5$summarize()
+z5$cite()
+
diff --git a/demo/demo-probit-bayes.R b/demo/demo-probit-bayes.R
new file mode 100644
index 0000000..508f5a4
--- /dev/null
+++ b/demo/demo-probit-bayes.R
@@ -0,0 +1,27 @@
+library(MASS)
+mydata <- read.csv("http://www.ats.ucla.edu/stat/data/binary.csv")
+mydata$rank <- factor(mydata$rank)
+
+# Zelig 4 code:
+library(Zelig4)
+z.out <- Zelig4::zelig(admit ~ gre + gpa, data = mydata,
+                       model="probit.bayes")
+summary(z.out)
+x.out <- Zelig4::setx(z.out, gpa = 1.3, gre = 4500, speed = 12)
+set.seed(42)
+s.out <- Zelig4::sim(z.out, x = x.out, num = 1000)
+summary(s.out)
+
+# Zelig 5 code:
+z5 <- zprobitbayes$new()
+z5$zelig(admit ~ gre + gpa, data = mydata)
+z5
+z5$zelig.out
+z5$setx(gpa = 1.3, gre = 4500, speed = 12)
+set.seed(42)
+z5$sim(num = 1000)
+z5$summarize()
+z5$cite()
+
+z.out <- zelig(admit ~ gre + gpa, data = mydata, model = "probit.bayes")
+z.out
diff --git a/demo/demo-probit-gee.R b/demo/demo-probit-gee.R
new file mode 100755
index 0000000..0d0d539
--- /dev/null
+++ b/demo/demo-probit-gee.R
@@ -0,0 +1,25 @@
+# Zelig 4 code:
+data(turnout)
+turnout$cluster <- rep(c(1:200), 10)
+sorted.turnout <- turnout[order(turnout$cluster), ]
+z.out1 <- Zelig4::zelig(vote ~ race + educate, model = "probit.gee", id = "cluster",
+                        data = sorted.turnout, corstr = "stat_M_dep",
+                        Mv = 3)
+x.out1 <- Zelig4::setx(z.out1)
+set.seed(42)
+s.out1 <- Zelig4::sim(z.out1, x = x.out1)
+summary(s.out1)
+
+# Zelig 5 code:
+z5 <- zprobitgee$new()
+z5$zelig(vote ~ race + educate, id = "cluster",
+         data = sorted.turnout, corstr = "unstructured")
+
+z5
+z5$zelig.call
+z5$model.call
+z5$setx()
+set.seed(42)
+z5$sim()
+z5$summarize()
+z5$cite()
diff --git a/demo/demo-probit.R b/demo/demo-probit.R
new file mode 100755
index 0000000..416586d
--- /dev/null
+++ b/demo/demo-probit.R
@@ -0,0 +1,27 @@
+mydata <- read.csv("http://www.ats.ucla.edu/stat/data/binary.csv")
+mydata$rank <- factor(mydata$rank)
+
+# Zelig 4 code:
+library(Zelig4)
+# epsilon does not seem to be taken into account
+z.out <- zelig(admit ~ gre + gpa, data = mydata, model="probit",
+               epsilon=.0001)
+summary(z.out)
+x.out <- setx(z.out, gpa=1.3, gre=4500, speed=12)
+set.seed(42)
+s.out <- sim(z.out, x = x.out, num=1000)
+summary(s.out)
+
+# Zelig 5 code:
+z5 <- zprobit$new()
+z5$zelig(admit ~ gre + gpa, data = mydata,
+         epsilon=.0001)
+# removed epsilon to compare with Zelig 4
+z5$zelig(admit ~ gre + gpa, data = mydata)
+z5
+z5$setx(gpa=1.3, gre=4500, speed=12)
+set.seed(42)
+z5$sim(num=1000)
+z5$summarize()
+z5$cite()
+
diff --git a/demo/demo-quantile.R b/demo/demo-quantile.R
new file mode 100755
index 0000000..b9eac15
--- /dev/null
+++ b/demo/demo-quantile.R
@@ -0,0 +1,86 @@
+data(CPS1988)
+z5 <- zquantile$new()
+z5$zelig(log(wage) ~ experience + I(experience^2) + education, data = CPS1988, tau = 0.75)
+z5
+z5$zelig.out
+z5$setx(education = 15)
+z5$setx1(education = 10)
+z5$sim(num = 10)
+z5$summarize()
+z5$graph()
+
+data(stackloss)
+z.out1 <- zquantile$new()
+z.out1$zelig(stack.loss ~ Air.Flow + Water.Temp + Acid.Conc.,
+             data = stackloss, tau = 0.5)
+
+z.out1 <- zelig(stack.loss ~ Air.Flow + Water.Temp + Acid.Conc.,
+                model = "rq", data = stackloss,
+                tau = 0.5)
+
+quantreg::rq(formula = log(wage) ~ experience + I(experience^2) + 
+               education, data = CPS1988, tau = 0.75)
+z.out1
+summary(z.out1$zelig.out$z.out[[1]])
+x.high <- z.out1$setx(Water.Temp = quantile(stackloss$Water.Temp, 0.8))
+x.low <- z.out1$setx1(Water.Temp = quantile(stackloss$Water.Temp, 0.2))
+z.out1$sim(num = 10000)
+z.out1$summarize()
+
+data(macro)
+z.out2 <- zquantile$new()
+z.out2$zelig(unem ~ gdp + trade + capmob + as.factor(country), tau = 0.5, data = macro)
+z.out2$setx(country = "United States")
+z.out2$setx1(country = "Japan")
+z.out2$sim()
+z.out1$summarize()
+
+data(engel)
+z.out3 <- zquantile$new()
+z.out3$zelig(foodexp ~ income, tau = seq(0.1, 0.3, by = 0.1), data = engel)
+z.out3$setx()
+z.out3$sim()
+
+# r <- .self$data %>% 
+#   group_by(tau) %>%
+#   do(model = rq(foodexp ~ income, data = ., tau = .$tau[1]))
+
+library(Zelig4)
+data(engel)
+z.out <- zelig(foodexp ~ income, tau = seq(0.1, 0.9, by = 0.1), data = engel, model = "rq")
+z.out3$setx()
+z.out3$sim()
+
+data(engel)
+z.out4 <- zquantile$new()
+z.out4$zelig(foodexp ~ income, data = engel, by = )
+z.out4$setx()
+z.out4$sim()
+
+
+summary(z.out3$zelig.out[[1]])
+plot(summary(z.out3$zelig.out[[1]]))
+
+z.out3$setx(income = quantile(engel$income, 0.25))
+z.out3$setx1(income = quantile(engel$income, 0.75))
+
+z.out3$sim()
+
+fit <- rq(foodexp ~ income, tau = seq(0.1, 0.9, by = 0.1), data = engel)
+plot(summary(fit))
+
+
+#####
+# data(engel)
+# z.out3 <- zelig(foodexp ~ income, model = "quantile",
+#                 tau = seq(0.1,0.9,by=0.1), data = engel)
+# summary(z.out3)
+# plot(summary(z.out3))
+# plot(z.out3)
+# 
+# x.bottom <- setx(z.out3, income=quantile(engel$income, 0.25))
+# x.top <- setx(z.out3, income=quantile(engel$income, 0.75))
+# 
+# s.out3 <- sim(z.out3, x = x.bottom, x1 = x.top)
+# 
+# summary(s.out3)
diff --git a/demo/demo-range.R b/demo/demo-range.R
new file mode 100755
index 0000000..fabfd8c
--- /dev/null
+++ b/demo/demo-range.R
@@ -0,0 +1,61 @@
+library(Zelig4)
+
+data(turnout)
+
+z.out <- zelig(vote ~ race + educate + age + I(age^2) + income,
+               model = "ls", data = turnout)
+summary(z.out)
+x.low <- setx(z.out, educate = 12, age = 18, race = "others")
+x.low
+x.high <- setx(z.out, educate = 13, age = 18, race = "others")
+x.high
+set.seed(42)
+s.out <- sim(z.out, x = x.low, x1 = x.high, num = 5)
+summary(s.out)
+plot(s.out)
+
+z.out <- zelig(vote ~ race + educate + age + I(age^2) + income,
+               model = "ls", data = turnout)
+summary(z.out)
+x.low <- setx(z.out, educate = 12:14, age = 18:22)
+x.low
+x.high <- setx(z.out, educate = 12:14, age = 18:22)
+x.high
+set.seed(42)
+s.out <- sim(z.out, x = x.low, x1 = x.high, num = 5)
+summary(s.out)
+
+## Zelig 5
+z5 <- zls$new()
+z5$zelig(vote ~ race + educate + age + I(age^2) + income, data = turnout)
+z5$zelig.out
+z5$setx(educate = 12, age = 18, race = "others")
+z5$setx.out
+z5$setx1(educate = 13, age = 18, race = "others")
+set.seed(42)
+z5$sim(num = 5)
+z5$summarize()
+
+
+s <- list(educate = 12, age = 18, race = "others")
+
+z5 <- zls$new()
+z5$zelig(vote ~ race + educate + age + I(age^2) + income, data = turnout)
+z5
+z5$setrange(educate = 12:13, age = 18)
+z5$setx.out
+set.seed(42)
+z5$sim(num = 5)
+z5$summarize()
+
+z5$setrange(educate = 12:13)
+z5$setx.out
+
+s <- list(educate = c(10, 15))
+expand.grid(s)
+
+z5$setrange(educate = c(10, 15), age = c(10, 18))
+
+s <- list(educate = 12:13, age = 18)
+expand.grid(s)
+
diff --git a/demo/demo-relogit.R b/demo/demo-relogit.R
new file mode 100755
index 0000000..c31994e
--- /dev/null
+++ b/demo/demo-relogit.R
@@ -0,0 +1,87 @@
+## Example 1
+
+# Zelig 4 code:
+library(Zelig4)
+data(mid)
+z.out1 <- Zelig4::zelig(conflict ~ major + contig + power + maxdem
+                + mindem + years, data = mid, model = "relogit", tau = 1042 / 303772)
+summary(z.out1)
+x.out1 <- Zelig4::setx(z.out1)
+set.seed(42)
+s.out1 <- Zelig4::sim(z.out1, x = x.out1, num = 1000)
+summary(s.out1)
+
+# Zelig 5 code:
+z5 <- zrelogit$new()
+z5$zelig(conflict ~ major + contig + power + maxdem
+         + mindem + years, data = mid, tau = 1042/303772)
+z5
+z5$setx()
+z5$setx.out
+set.seed(42)
+z5$sim(num=1000)
+z5$summarize()
+z5$cite()
+plot(z5)
+
+z.out <- zelig(conflict ~ major + contig + power + maxdem + mindem + years, data = mid,
+               tau = 1042/303772,
+               model = "relogit")
+
+## Example 2
+
+# Zelig 4 code:
+z.out2 <- zelig(conflict ~ major + contig + power + maxdem +
+                  mindem + years, data = mid, model = "relogit", tau = 1042/303772,
+                case.control = "weighting", robust = TRUE)
+summary(z.out2)
+x.out2 <- setx(z.out2)
+set.seed(42)
+s.out2 <- sim(z.out2, x = x.out2, num=1000)
+summary(s.out2)
+
+# Zelig 5 code:
+z5 <- zrelogit$new()
+z5$zelig(conflict ~ major + contig + power + maxdem +
+           mindem + years, data = mid, tau = 1042/303772,
+         case.control = "weighting")
+z5
+z5$setx()
+set.seed(42)
+z5$sim(num=1000)
+z5$summarize()
+z5$cite()
+
+## Example 3: broken in Zelig 4
+
+# # Zelig 4 code:
+# 
+# z.out2 <- zelig(conflict ~ major + contig + power + maxdem
+#                     + mindem + years, data = mid, model = "relogit", tau = c(0.002, 0.005))
+# summary(z.out2)
+# x.out2 <- setx(z.out2)
+# s.out <- sim(z.out2, x = x.out2)
+# summary(s.out2)
+# 
+# 
+# z.out3 <- zelig(conflict ~ major + contig + power + maxdem
+#                     + mindem + years, data = mid, model = "relogit", tau = c(0.002, 0.005))
+# summary(z.out3)
+# x.out3 <- setx(z.out3)
+# s.out3 <- sim(z.out3, x = x.out3)
+# summary(s.out3)
+# 
+# # Zelig 5 code:
+# z5 <- zrelogit$new()
+# z5$zelig(conflict ~ major + contig + power + maxdem
+#          + mindem + years, data = mid, tau = c(0.002, 0.005))
+# z5
+# z5$setx()
+# set.seed(42)
+# z5$sim(num=1000)
+# z5$summarize()
+# z5$cite()
+
+# r <- relogit(conflict ~ major + contig + power + maxdem + mindem + years,
+#              data = mid, tau = 1042/303772)
+# rs
diff --git a/demo/demo-roc.R b/demo/demo-roc.R
new file mode 100644
index 0000000..c39379b
--- /dev/null
+++ b/demo/demo-roc.R
@@ -0,0 +1,5 @@
+data(turnout)
+z.out1 <- zelig(vote ~ race + educate + age, model = "logit",
+                data = turnout)
+z.out2 <- zelig(vote ~ race + educate, model = "logit", data = turnout)
+rocplot(z.out1, z.out2)
diff --git a/demo/demo-scope.R b/demo/demo-scope.R
new file mode 100755
index 0000000..2c7b475
--- /dev/null
+++ b/demo/demo-scope.R
@@ -0,0 +1,61 @@
+z5 <- zls$new()
+z5$zelig(dist ~ speed, data = cars)
+z5
+
+z.out <- Zelig::zelig(dist ~ speed, model = "ls", data = cars)
+print(z.out)
+z.out$zelig.call
+
+
+## As expected
+f <- function() {
+  z5 <- zls$new()
+  z5$zelig(dist ~ speed, data = cars)
+  print(z5)
+  z5$setx(speed = 3)
+  z5$sim()
+  z5$summarize()
+  return(z5)
+}
+
+Z <- f()
+print(Z)
+Z$summarize()
+
+g <- function() {
+  z.out <- Zelig::zelig(dist ~ speed, model = "ls", data = cars)
+  print(z5)
+  Zelig::setx(z.out, speed = 3)
+  Zelig::sim(z.out)
+  Zelig::summary(z.out)
+  return(z.out)
+}
+
+ZZ <- g()
+ZZ
+g()$summarize()
+
+data(cars)
+x <- cars
+zelig(dist ~ speed, data = x, model = "ls")
+
+
+rm(list = ls())
+
+test_function <- function() {
+  data(cars)
+  x <- cars
+  zelig(dist ~ speed, data = x, model = "ls")
+}
+
+test_function()
+
+test_function_refclasses <- function() {
+  data(cars)
+  x <- cars
+  zref <- zls$new()
+  zref$zelig(dist ~ speed, data = x)
+  return(zref)
+}
+
+test_function_refclasses()
diff --git a/demo/demo-setx-2.R b/demo/demo-setx-2.R
new file mode 100755
index 0000000..6d4b573
--- /dev/null
+++ b/demo/demo-setx-2.R
@@ -0,0 +1,48 @@
+data(macro)
+
+## Zelig 4
+z.out <- zelig(unem ~ gdp + trade + capmob +
+                 country,
+               model = "ls", data = macro)
+summary(z.out)
+
+x.US <- setx(z.out, country = "United States")
+x.US
+x.Japan <- setx(z.out, country = "Japan")
+x.Japan
+
+s.out <- sim(z.out, x = x.US, x1 = x.Japan)
+summary(s.out)
+
+## Zelig 5
+z5 <- zls$new()
+z5$zelig(unem ~ gdp + trade + capmob +
+           country, data = macro)
+z5
+z5$setx(country = "United States", unem = 2)
+z5$setx.out
+
+z5$setx()
+z5$setx.out
+
+z5$setx1(country = "Japan")
+z5$setx.out$x1
+
+z5$sim()
+z5$summarize()
+
+z5$zelig(unem ~ ., data = macro)
+z5
+
+z5 <- zls$new()
+z5$zelig(unem ~ gdp + exp(trade) + capmob +
+           as.factor(country), data = macro)
+z5$zelig.out
+# model.matrix(z5$zelig.out)
+z5$setx(country = "United States", unem = 2)
+z5$setx.out
+
+fit <- lm(unem ~ gdp + exp(trade) + capmob +
+           as.factor(country), data = macro)
+fit
+model.matrix(fit)
diff --git a/demo/demo-setx-fn.R b/demo/demo-setx-fn.R
new file mode 100644
index 0000000..8b4a133
--- /dev/null
+++ b/demo/demo-setx-fn.R
@@ -0,0 +1,29 @@
+##----- See thread: https://groups.google.com/forum/#!topic/zelig-statistical-software/1ohCNA5S_0A
+
+library(Zelig)
+z5 <- zls$new()
+z5$zelig(Fertility ~ Agriculture + Education, data = swiss)
+z5$setx(Education = 5, fn = list(numeric = Mode))
+# setx:
+#   (Intercept) Agriculture Education
+# 1           1        84.6         5
+z5$setx(Education = 5, fn = list(numeric = mode)) # same: function 'mode' added for backward compatibility
+z5$setx(Education = 5, fn = list(numeric = median))
+# setx:
+#   (Intercept) Agriculture Education
+# 1           1        54.1         5
+z5$setx(Education = 5, fn = list(numeric = function(x) 1, other = function(x) 2))
+# setx:
+#   (Intercept) Agriculture Education
+# 1           1           1         5
+z5$setx1(Education = 10, fn = list(numeric = mode))
+# setx1:
+#   (Intercept) Agriculture Education
+# 1           1       50.66        10
+z5$setx(Education = 5)
+z5$setx1(Education = 10)
+# model summary
+summary(z5)
+z5$sim()
+# model summary
+summary(z5)
diff --git a/demo/demo-setx.R b/demo/demo-setx.R
new file mode 100755
index 0000000..c9fec6c
--- /dev/null
+++ b/demo/demo-setx.R
@@ -0,0 +1,19 @@
+data(cars)
+
+z5 <- zls$new()
+z5$zelig(dist ~ speed, data = cars)
+z5
+z5$setx(speed = 30)
+z5$setx.out$x
+
+z5$setx1(speed = 40)
+z5$setx.out$x1
+
+z5$setx1(xssx = 0, jbcjdhsb = 987)
+z5$setx.out$x1
+
+z5$setx1(scs =98457, speed = 50, dcksj = 34)
+z5$setx.out$x1
+
+z5$sim(num=10)
+z5$summarize()
diff --git a/demo/demo-show.R b/demo/demo-show.R
new file mode 100755
index 0000000..c588fd8
--- /dev/null
+++ b/demo/demo-show.R
@@ -0,0 +1,8 @@
+z5 <- zls$new()
+print(z5)
+z5$zelig(dist ~ speed, data = cars)
+print(z5)
+z5$setx()
+print(z5)
+z5$sim()
+print(z5)
\ No newline at end of file
diff --git a/demo/demo-signif-stars.R b/demo/demo-signif-stars.R
new file mode 100644
index 0000000..da6a680
--- /dev/null
+++ b/demo/demo-signif-stars.R
@@ -0,0 +1,4 @@
+data(cars)
+z.out <- zelig(dist ~ speed, cars, model = "ls")
+summary(z.out)
+summary(z.out, signif.stars = TRUE)
diff --git a/demo/demo-strata.R b/demo/demo-strata.R
new file mode 100755
index 0000000..a068171
--- /dev/null
+++ b/demo/demo-strata.R
@@ -0,0 +1,49 @@
+library(Zelig4)
+data(turnout)
+turnout$dedu <- ifelse(turnout$educate > 15, 1, 0)
+
+z.out <- zelig(vote ~ age , data = turnout, model = "ls",
+               by = c("race", "dedu"))
+summary(z.out)
+x.out <- setx(z.out, age = 18)
+x1.out <- setx(z.out, age = 20)
+set.seed(42)
+s.out <- sim(z.out, x.out, x1.out)
+summary(s.out)
+
+z5 <- zls$new()
+z5$zelig(vote ~ age , data = turnout, by = c("race", "dedu"))
+z5$zelig.out
+z5
+z5$setx(age = 18)
+z5$setx1(age=20)
+z5$setrange(age = 18:20)
+z5$setx.out
+z5$sim(10)
+z5$sim.out
+z5$summarize()
+
+z5 <- zls$new()
+z5$zelig(vote ~ age + income , data = turnout)
+z5$zelig.out
+z5
+z5$setx(age = 18)
+z5$setx1(age = 20)
+z5$setrange(age = 18:20)
+z5$setx.out
+z5$sim(10)
+z5$sim.out
+z5$summarize()
+
+by(turnout,  factor(turnout$race),
+   function(x) lm(vote ~ age , data = x))
+
+by(turnout,  factor(turnout$race),
+   function(x) Zelig::zelig(vote ~ age , data = x, model = "ls"))
+
+by(turnout,  factor(turnout$race),
+   function(x) {
+     z.out <<- zelig(vote ~ age , data = x, model = "ls")
+     x.out <<- setx(z.out, age = 30)
+     s.out <<- sim(z.out, x.out, age = 30)
+     })
diff --git a/demo/demo-tobit.R b/demo/demo-tobit.R
new file mode 100755
index 0000000..4bc6edb
--- /dev/null
+++ b/demo/demo-tobit.R
@@ -0,0 +1,28 @@
+# Zelig 4 code:
+library(Zelig4)
+data(tobin)
+z.out <- zelig(durable ~ age + quant, model = "tobit", data = tobin)
+summary(z.out)
+x.out <- setx(z.out)
+set.seed(42)
+s.out1 <- sim(z.out, x = x.out)
+summary(s.out1)
+
+# Zelig 5 code:
+data(tobin)
+z5 <- ztobit$new()
+z5$zelig(durable ~ age + quant, data = tobin, below = 1, above = 20)
+z5
+z5$setx()
+set.seed(42)
+z5$sim(num=10000)
+statmat(z5$sim.out$x$ev[[1]])
+statmat(z5$sim.out$x$pv[[1]])
+z5$summarize()
+z5$cite()
+
+# library(AER)  
+# fit <- tobit(durable ~ age + quant, data = tobin, left = 1, right = 2)
+# summary(fit)
+# fit$coefficients
+# coef(fit)
diff --git a/demo/demo-tobitbayes.R b/demo/demo-tobitbayes.R
new file mode 100644
index 0000000..2438a7d
--- /dev/null
+++ b/demo/demo-tobitbayes.R
@@ -0,0 +1,9 @@
+data(tobin)
+
+z5 <- ztobitbayes$new()
+z5$zelig(durable ~ age + quant, data = tobin, below = 1, above = 20, verbose = FALSE)
+z5
+z5$setx()
+set.seed(42)
+z5$sim()
+z5$summarize()
diff --git a/demo/demo-weibull.R b/demo/demo-weibull.R
new file mode 100644
index 0000000..7b0892f
--- /dev/null
+++ b/demo/demo-weibull.R
@@ -0,0 +1,19 @@
+data(coalition)
+
+z5 <- zweibull$new()
+z5
+z5$zelig(Surv(duration, ciep12) ~ fract + numst2, data = coalition)
+z5
+z5$setx() # works
+z5$setx(numst2 = 0)
+
+z.out <- z5$zelig.out$z.out[[1]]
+z.out
+
+# z5$setx(numst2 = 0) # fails
+# Error in terms(lm(formula, data), "predvars") : 
+#   error in evaluating the argument 'x' in selecting a method for function 'terms':
+#   Error in Ops.Surv(y, z$residuals) : Invalid operation on a survival time
+z5$sim()
+z5
+z5$graph()
diff --git a/demo/demo-wrappers.R b/demo/demo-wrappers.R
new file mode 100755
index 0000000..5b7f8b8
--- /dev/null
+++ b/demo/demo-wrappers.R
@@ -0,0 +1,18 @@
+## Zelig 5
+library(Zelig)
+z.out5 <- zelig(dist ~ speed, model = "ls", data = cars)
+print(z.out5)
+summary(z.out5)
+x.out5 <- Zelig::setx(z.out5, speed=30)
+x1.out5 <- Zelig::setx(z.out5, speed = 50)
+s.out5 <- Zelig::sim(z.out5, x.out5, x1.out5, num = 1000)
+print(s.out5)
+summary(s.out5)
+
+## Zelig 4
+library(Zelig4)
+z.out4 <- Zelig4::zelig(dist ~ speed, model = "ls", data = cars)
+x.out4 <- Zelig4::setx(z.out4, speed = 30)
+x1.out4 <- Zelig4::setx(z.out4, speed = 50)
+s.out4 <- Zelig4::sim(z.out4, x.out4, x1.out4, num = 1000)
+summary(s.out4)
diff --git a/demo/demo-zip.R b/demo/demo-zip.R
new file mode 100755
index 0000000..cc94920
--- /dev/null
+++ b/demo/demo-zip.R
@@ -0,0 +1,56 @@
+library(pscl)
+
+source(file.path("..", "R", "utils.R"))
+source(file.path("..", "R", "model-zelig.R"))
+source(file.path("..", "R", "model-zip.R"))
+
+
+data("bioChemists", package = "pscl")
+
+## without inflation
+## ("art ~ ." is "art ~ fem + mar + kid5 + phd + ment")
+fm_pois <- glm(art ~ ., data = bioChemists, family = poisson)
+fm_qpois <- glm(art ~ ., data = bioChemists, family = quasipoisson)
+fm_nb <- glm.nb(art ~ ., data = bioChemists)
+
+## with simple inflation (no regressors for zero component)
+fm_zip <- zeroinfl(art ~ . | 1, data = bioChemists)
+fm_zip2 <- zeroinfl(art ~ . | ., data = bioChemists)
+summary(fm_zip2)
+
+z5 <- zzip$new()
+z5$zelig(art ~ phd + ment | ., data = bioChemists)
+z5$zelig(art ~ . - phd - ment, data = bioChemists)
+z5
+.self <- z5
+z5$setx(phd = 3, ment = 8)
+z5$setx.out
+set.seed(42)
+z5$sim(num=100)
+z5$summarize()
+
+library(Zelig4)
+z.out <- zelig(art ~ . - phd - ment, data = bioChemists, model = "ls")
+summary(z.out)
+x.out <- setx(z.out, phd = 3, ment = 8)
+x.out
+set.seed(42)
+s.out <- sim(z.out, x.out, num = 100)
+summary(s.out)
+
+x.out <- setx(z.out)
+x.out
+set.seed(42)
+s.out <- sim(z.out, x.out, num = 100)
+s.out
+
+# z5$sim()
+
+library(Zelig4)
+z.out <- zelig(art ~ phd * ment , bioChemists, model = "ls")
+summary(z.out)
+x.out <- setx(z.out)
+x.out
+set.seed(42)
+s.out <- sim(z.out, x.out, num = 100)
+summary(s.out)
diff --git a/demo/exp.R b/demo/exp.R
deleted file mode 100644
index 63535b5..0000000
--- a/demo/exp.R
+++ /dev/null
@@ -1,29 +0,0 @@
-# exp
-# exp
-# exp
-
-# Fit the statistical model
-
-data(coalition)
-
-z.out <- zelig(Surv(duration, ciep12) ~ invest + polar + numst2 + crisis, model = "exp", data = coalition)
-
-user.prompt()
-
-# Set explanatory variables
-
-x.low<- setx(z.out, numst2 = 0)
-x.high <- setx(z.out, numst2 = 1)
-
-user.prompt()
-
-# Simulate quantities of interest
-
-s.out <- sim(z.out, x = x.low, x1 = x.high, num = 10)
-summary(s.out)
-
-user.prompt()
-
-# Plot simualted results
-
-plot(s.out)
diff --git a/demo/factor.bayes.R b/demo/factor.bayes.R
deleted file mode 100644
index 84d841d..0000000
--- a/demo/factor.bayes.R
+++ /dev/null
@@ -1,35 +0,0 @@
-## Attaching the example dataset:
-data(swiss)
-names(swiss) <- c("Fert","Agr","Exam","Educ","Cath","InfMort")
-
-user.prompt()
-
-## Estimating the model using MCMCfactanal:
-z.out <- zelig(cbind(Agr,Exam,Educ,Cath,InfMort)~NULL, 
-	       model="factor.bayes",
-               data=swiss, factors=2,
-               lambda.constraints=list(Exam=list(1,"+"),
-                                 Exam=list(2,"-"), Educ=c(2,0),
-                                 InfMort=c(1,0)),
-               verbose=TRUE, a0=1, b0=0.15,
-               burnin=5000, mcmc=50000)
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$result$coefficients)
-user.prompt()
-heidel.diag(z.out$result$coefficients)
-user.prompt()
-raftery.diag(z.out$result$coefficients)
-user.prompt()
-
-## summarizing the output
-summary(z.out)
-
-
-
-
-
-
-
-
diff --git a/demo/gamma.R b/demo/gamma.R
deleted file mode 100644
index 83c7a68..0000000
--- a/demo/gamma.R
+++ /dev/null
@@ -1,30 +0,0 @@
-data(coalition)
-
-# Fit the statistical model
-
-z.out <- zelig(duration ~ fract + numst2, model = "gamma", data = coalition)
-
-##  Setting the explanatory variables at their default values
-##  (mode for factor variables and mean for non-factor variables),
-##  with numst2 set to the vector 0 = no crisis, 1 = crisis. 
-
-x.low <- setx(z.out, numst2 = 0)
-x.high <- setx(z.out, numst2 = 1)
-
-
-##  Simulating draws using the default bootstrap method.
-
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-
-
-# Summary of fitted model
-
-summary(z.out)
-
-# Summary of simulated quantities of interest
-
-summary(s.out)
-
-# Plot of simulated quantities of interest
-
-plot(s.out)
diff --git a/demo/gamma.gee.R b/demo/gamma.gee.R
deleted file mode 100644
index cb7e5b9..0000000
--- a/demo/gamma.gee.R
+++ /dev/null
@@ -1,43 +0,0 @@
-#####  Example 1: Basic Example #####
-
-# Attach sample data and variable names:  
-data(coalition)
-
-#  Variable identifying clusters
-coalition$cluster <- c(rep(c(1:62),5),rep(c(63),4))
-
-# Sorting by cluster
-sorted.coalition <- coalition[order(coalition$cluster),]
-
-# Estimate model and present a summary:
-user.prompt()
-z.out <- zelig(duration ~ fract + numst2, model = "gamma.gee", id = "cluster", data = sorted.coalition, robust=TRUE, corstr="exchangeable")
-user.prompt()
-summary(z.out)
-
-#  Setting the explanatory variables at their default values
-#  (mode for factor variables and mean for non-factor variables),
-#  with numst2 set to the vector 0 = no crisis, 1 = crisis. 
-user.prompt()
-x.low <- setx(z.out, numst2 = 0)
-x.high <- setx(z.out, numst2 = 1)
-
-
-# Simulate quantities of interest
-user.prompt()
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-user.prompt()
-summary(s.out)
-
-# Generate a plot of quantities of interest:
-user.prompt()
-plot(s.out)
-
-
-
-
-
-
-
-
-
diff --git a/demo/gamma.survey.R b/demo/gamma.survey.R
deleted file mode 100644
index 1563851..0000000
--- a/demo/gamma.survey.R
+++ /dev/null
@@ -1,100 +0,0 @@
-#####  Example 1: User has Existing Sample Weights #####
-
-# Attach sample data and variable names:  
-data(api)
-
-# In this example, we will estimate a model using 
-# the percentages of students who receive subsidized 
-# lunch and an indicator for whether schooling is 
-# year-round to predict California public schools' 
-# academic performance index scores:
-
-z.out1 <- zelig(api00 ~ meals + yr.rnd, model = "gamma.survey",  
-  weights=~pw, data = apistrat)
-summary(z.out1)
-
-# Set explanatory variables to their default (mean/mode) values, and set
-# a high (80th percentile) and low (20th percentile) value for "meals,"
-# the percentage of students who receive subsidized meals:
-
-x.low <- setx(z.out1, meals= quantile(apistrat$meals, 0.2))
-x.high <- setx(z.out1, meals= quantile(apistrat$meals, 0.8))
-
-# Generate first differences for the effect of high versus low "meals" 
-# on academic performance:
-
-s.out1 <- sim(z.out1, x=x.high, x1=x.low)
-summary(s.out1)
-
-# Generate a second set of fitted values and a plot:
-
-plot(s.out1)
-
-
-
-####  Example 2: User has Details about Complex Survey Design  ####
-####  (but not sample weights) 					   ####
-
-# Suppose that the survey house that provided
-# the dataset excluded probability weights 
-# but made other details about the survey
-# design available.  We can still estimate 
-# a model without probability weights that takes
-# instead variables that identify each the stratum
-# and/or cluster from which each observation was
-# selected and the size of the finite sample from
-# which each observation was selected.
-
-z.out2 <- zelig(api00 ~ meals + yr.rnd, model = "gamma.survey",  
-  strata=~stype, fpc=~fpc, data = apistrat)
-summary(z.out2)
-
-# Note that these results are identical to the results obtained
-# when pre-existing sampling weights were used.  When sampling 
-# weights are omitted, Zelig estimates them automatically for 
-# "gamma.survey" models based on the user-defined description 
-# of sampling designs.  If no description is present, the default 
-# assumption is equal probability sampling.
-# 
-# setx() and sim() can then be run on z.out2 in the same fashion 
-# described in Example 1.
-
-
-
-#####  Example 3: User has Replicate Weights #####
-
-# Suppose that the survey house that published 
-# these data withheld details about the survey 
-# design and instead published replication weights
-
-# For the purpose of illustration, create a set of
-# jk1 replicate weights
-
-jk1reps <- jk1weights(psu=apistrat$dnum)
-
-# Estimate the model regressing api00 on the "meals" 
-# "yr.rnd" variables. 
-
-z.out3 <- zelig(api00 ~ meals + yr.rnd, model = "gamma.survey", 
-		data = apistrat, repweights=jk1reps$weights,
-		type="JK1")
-summary(z.out3)
-
-# Set the explanatory variable "meals" at high and low values
-
-x.low <- setx(z.out3, meals= quantile(apistrat$meals, 0.2))
-x.high <- setx(z.out3, meals= quantile(apistrat$meals, 0.8))
-
-# Generate first differences for the effect of the high
-# versus low concentrations of poverty on school performance
-
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-summary(s.out3)
-
-# Generate a second set of fitted values and a plot:
-
-plot(s.out3)
-
-#### The user should also refer to the gamma model demo, since  ####
-#### gamma.survey models can take many of the same options as   ####
-#### gamma models. 		 					    ####
\ No newline at end of file
diff --git a/demo/logit.R b/demo/logit.R
deleted file mode 100644
index c9952ef..0000000
--- a/demo/logit.R
+++ /dev/null
@@ -1,54 +0,0 @@
-# Attach the data frame
-data(turnout)
-
-##  Generating empirical estimates:
-
-z.out1 <- zelig(vote ~ age + race, model = "logit", data = turnout)
-
-##  Using setx to generate baseline and alternative velus for the
-##  explanatory variables.  
-
-x.out1 <- setx(z.out1, age = 36, race = "white")
-x.out1
-
-##  Simulating quantities of interest (predicted probabilites, risk
-##  ratios, and risk differences):
-
-s.out1 <- sim(z.out1, x = x.out1)
-
-# Summary of fitted statistical model
-
-summary(z.out1)
-
-# Summary of simulations of quantities of interest
-
-summary(s.out1)
-
-# Plot simulations of quantities of interest
-
-plot(s.out1)
-
-##  Example 2: First Differences
-
-# Fit the statistical model
-
-z.out2 <-  zelig(vote ~ race + educate, model = "logit", data = turnout)
-
-# Set alternate values
-
-x.high <- setx(z.out2, educate = quantile(turnout$educate, prob = 0.75))
-x.low <- setx(z.out2, educate = quantile(turnout$educate, prob = 0.25))
-
-s.out2 <- sim(z.out2, x = x.high, x1 = x.low)
-
-# Summary of the fitted model
-
-summary(z.out2)
-
-# Summary of the simulated quantities of interest
-
-summary(s.out2)
-
-# Plot of the simulated quantities of interest
-
-plot(s.out2)
diff --git a/demo/logit.bayes.R b/demo/logit.bayes.R
deleted file mode 100644
index 73b65c6..0000000
--- a/demo/logit.bayes.R
+++ /dev/null
@@ -1,57 +0,0 @@
-## Attaching the example dataset:
-data(turnout)
-
-## Estimating the model using MCMClogit:
-z.out <- zelig(vote ~ race + educate, model = "logit.bayes",
-                  data = turnout, verbose=TRUE)
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$result$coefficients)
-user.prompt()
-heidel.diag(z.out$result$coefficients)
-user.prompt()
-raftery.diag(z.out$result$coefficients)
-user.prompt()
-
-## summarizing the output
-summary(z.out)
-user.prompt()
-
-## Setting values for the explanatory variables to 
-## their sample averages:
-x.out <- setx(z.out)
-user.prompt()
-
-## Simulating quantities of interest from the posterior 
-## distribution given x.out:
-s.out1 <- sim(z.out, x = x.out)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out1)
-user.prompt()
-
-## Simulating First Differences:
-## Setting education is set to be between low(25th percentile) 
-## versus high(75th percentile) while all the other variables 
-## held at their default values.
-x.high <- setx(z.out, educate = quantile(turnout$educate, prob = 0.75))
-x.low <- setx(z.out, educate = quantile(turnout$educate, prob = 0.25))
-user.prompt()
-
-## Estimating the first difference for the effect of
-## high versus low trade on unemployment rate:
-s.out2 <- sim(z.out, x = x.high, x1 = x.low)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out2)
-
-
-
-
-
-
-
-
diff --git a/demo/logit.gee.R b/demo/logit.gee.R
deleted file mode 100644
index 26c6770..0000000
--- a/demo/logit.gee.R
+++ /dev/null
@@ -1,111 +0,0 @@
-##  Attaching the sample turnout dataset:
-data(turnout)
-
-##  Variable identifying clusters
-turnout$cluster <- rep(c(1:200),10)
-
-## Sorting by cluster
-sorted.turnout <- turnout[order(turnout$cluster),]
-
-#####  Example 1:  Simple Example with Stationary 3 Dependence
-
-##  Generating empirical estimates:
-user.prompt()
-z.out1 <- zelig(vote ~ race + educate, model = "logit.gee", id = "cluster", 
-	data = sorted.turnout, robust = T, corstr = "stat_M_dep", Mv=3)
-user.prompt()
-##  Viewing the regression output:
-summary(z.out1)
-
-##  Using setx to generate baseline and alternative values for the
-##  explanatory variables.  
-user.prompt()
-x.out1 <- setx(z.out1)
-
-##  Simulating quantities of interest: 
-user.prompt()
-s.out1 <- sim(z.out1, x = x.out1)
-user.prompt()
-## Summarizing the simulated quantities of interest:
-summary(s.out1)
-
-## Diagnostic plot of the s.out:
-user.prompt()
-plot(s.out1)
-
-##  Example 2: First Differences
-
-user.prompt()
-x.high <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.75))
-x.low <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.25))
-
-user.prompt()
-s.out2 <- sim(z.out1, x = x.high, x1 = x.low)
-user.prompt()
-summary(s.out2)
-user.prompt()
-plot(s.out2)
-
-#####  Example 3:  Example with Fixed Correlation Structure
-
-##  User-defined correlation structure
-user.prompt()
-corr.mat <- matrix(rep(0.5,100), nrow=10, ncol=10)
-diag(corr.mat) <- 1 
-
-##  Generating empirical estimates:
-user.prompt()
-z.out2 <- zelig(vote ~ race + educate, model = "logit.gee", id = "cluster", 
-	data = sorted.turnout, robust = T, corstr = "fixed", R=corr.mat)
-user.prompt()
-##  Viewing the regression output:
-summary(z.out2)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/demo/logit.survey.R b/demo/logit.survey.R
deleted file mode 100644
index 55e1546..0000000
--- a/demo/logit.survey.R
+++ /dev/null
@@ -1,105 +0,0 @@
-#####  Example 1: User has Existing Sample Weights #####
-
-# Attach sample data:  
-data(api, package="survey")
-
-# In this example, we will estimate a model using 
-# the percentages of students who receive subsidized 
-# lunch and the percentage who are new to a school
-# to predict whether each California public school 
-# attends classes year round.
-
-z.out1 <- zelig(yr.rnd ~ meals + mobility, model = "logit.survey", weights=~pw, data = apistrat)
-summary(z.out1)
-
-# Set explanatory variables to their default (mean/mode) values, and set
-# a high (80th percentile) and low (20th percentile) value for "meals,"
-# the percentage of students who receive subsidized meals:
-
-x.low <- setx(z.out1, meals= quantile(apistrat$meals, 0.2))
-x.high <- setx(z.out1, meals= quantile(apistrat$meals, 0.8))
-
-# Generate first differences for the effect of high versus low "meals" 
-# on the probability that a school will hold classes year round:
-
-s.out1 <- sim(z.out1, x=x.low, x1=x.high)
-summary(s.out1)
-
-# Generate a second set of fitted values and a plot:
-
-plot(s.out1)
-
-
-
-####  Example 2: User has Details about Complex Survey Design  ####
-####  (but not sample weights) 					   ####
-
-# Suppose that the survey house that provided
-# the dataset excluded probability weights 
-# but made other details about the survey
-# design available.  We can still estimate 
-# a model without probability weights that takes
-# instead variables that identify each the stratum
-# and/or cluster from which each observation was
-# selected and the size of the finite sample from
-# which each observation was selected.
-
-z.out2 <- zelig(yr.rnd ~ meals + mobility, model = "logit.survey", strata=~stype, fpc=~fpc, data = apistrat)
-summary(z.out2)
-
-# The coefficient estimates from this model are identical to 
-# point estimates in the previous example, but the standard errors
-# are smaller.  When sampling weights are omitted, Zelig estimates 
-# them automatically for "normal.survey" models based on the 
-# user-defined description of sampling designs.  In addition, 
-# when user-defined descriptions of the sampling design are 
-# entered as inputs, variance estimates are better and standard
-# errors are consequently smaller.
-#
-# setx() and sim() can then be run on z.out2 in the same fashion 
-# described in Example 1.
-
-
-
-#####  Example 3: User has Replicate Weights #####
-
-# Load data for a model using the number of out-of-hospital
-# cardiac arrests and the number of patients who arrive 
-# alive in hospitals to predict whether each hospital
-# has been sued (an indicator variable artificially created
-# here for the purpose of illustration).
-
-data(scd)
-scd$sued <- as.vector(c(0,0,0,1,1,1))
-
-# Again, for the purpose of illustration, create four Balanced 
-# Repeated Replicate (BRR) weights:
-
-BRRrep<-2*cbind(c(1,0,1,0,1,0), c(1,0,0,1,0,1), c(0,1,1,0,0,1),
-c(0,1,0,1,1,0))
-
-# Estimate the model using Zelig:
-
-z.out3 <- zelig(formula=sued ~ arrests + alive , model = "logit.survey", 
-  repweights=BRRrep, type="BRR", data=scd)
-summary(z.out3)
-
-# Set the explanatory variables at their means and set
-# arrests at its 20th and 80th quartiles
-
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8))
-
-# Generate first differences for the effect of the minimum
-# versus the maximum number of individuals who arrive
-# alive on the probability that a hospital will be sued:
-
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-summary(s.out3)
-
-# Generate a second set of fitted values and a plot:
-plot(s.out3)
-
-#### The user should also refer to the logit model demo, since ####
-#### logit.survey models can take many of the same options as  ####
-#### logit models.     	 					         ####
diff --git a/demo/lognorm.R b/demo/lognorm.R
deleted file mode 100644
index 24247fd..0000000
--- a/demo/lognorm.R
+++ /dev/null
@@ -1,26 +0,0 @@
-library(ZeligCommon)
-# Load the sample data:  
-data(coalition)
-
-# Estimate the model:
-user.prompt()
-z.out <- zelig(Surv(duration, ciep12) ~ fract + numst2, model = "lognorm",
-               data = coalition)
-user.prompt()
-# View the regression output:  
-summary(z.out)
-
-# Set the baseline values (with the ruling coalition in the minority)
-# and the alternative values (with the ruling coalition in the majority)
-# for X:
-user.prompt()
-x.low <- setx(z.out, numst2 = 0)
-x.high <- setx(z.out, numst2 = 1)
-
-# Simulate expected values qi$ev and first differences qi$fd:
-user.prompt()
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-user.prompt()
-summary(s.out)
-user.prompt()
-plot(s.out)
diff --git a/demo/ls.R b/demo/ls.R
deleted file mode 100644
index bfdc1bb..0000000
--- a/demo/ls.R
+++ /dev/null
@@ -1,67 +0,0 @@
-#####  Example 1: Basic Example with First Differences  #####
-
-# Attach sample data and variable names:  
-
-data(macro)
-
-# Estimate model and present a summary:
-
-z.out1 <- zelig(unem ~ gdp + capmob + trade, model = "ls", data = macro)
-
-# Set explanatory variables to their default (mean/mode) values, with
-# high (80th percentile) and low (20th percentile) values:
-
-x.high<- setx(z.out1, trade = quantile(macro$trade, 0.8))
-x.low <- setx(z.out1, trade = quantile(macro$trade, 0.2))
-
-x.high
-x.low
-
-
-# Generate first differences for the effect of high versus low trade on
-# GDP:
-
-s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-
-# Summary of fitted statistical model
-
-summary(z.out1)
-
-# Summary of simualted quantities of interest
-
-summary(s.out1)
-
-# Plot of simulated quantities of interest
-
-plot(s.out1)
-
-#####  Example 2:  Using Dummy Variables #####
-
-# Estimate a model with a dummy variable for each year and country.  
-# Note that you do not need to create dummy variables, as the program 
-# will automatically parse the unique values in the selected variables 
-# into dummy variables.
-
-z.out2 <- zelig(unem ~ gdp + trade + capmob + as.factor(country), 
-                model = "ls", data = macro)
-
-# Set values for the explanatory variables, using the default mean/mode
-# values, with country set to the United States and Japan, respectively:
-x.US <- setx(z.out2, country = "United States")
-x.Japan <- setx(z.out2, country = "Japan")
-
-
-
-# Simulate quantities of interest:
-s.out2 <- sim(z.out2, x = x.US, x1 = x.Japan)
-
-# Summary of fitted statistical model
-
-summary(z.out2)
-
-# Summary of simulated quantities of interest
-
-summary(s.out2)
-
-# Plot differences:  
-plot(s.out2)
diff --git a/demo/mi.R b/demo/mi.R
deleted file mode 100644
index 467e00d..0000000
--- a/demo/mi.R
+++ /dev/null
@@ -1,9 +0,0 @@
-library(Zelig)
-
-data(turnout)
-
-z <- zelig(vote ~ age, model = "logit", data = mi(turnout[1:10, ], turnout[100:110, ]))
-
-x <- setx(z, age = 90)
-
-s <- sim(z, x=x, num=20)
diff --git a/demo/mlogit.bayes.R b/demo/mlogit.bayes.R
deleted file mode 100644
index f097fc7..0000000
--- a/demo/mlogit.bayes.R
+++ /dev/null
@@ -1,58 +0,0 @@
-## Attaching the example dataset:
-data(mexico)
-
-## Estimating the model using mlogit.bayes:
-z.out <- zelig(vote88 ~ pristr + othcok + othsocok, model = "mlogit.bayes", 
-               data = mexico)
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-heidel.diag(z.out$result$coefficients)
-user.prompt()
-
-raftery.diag(z.out$result$coefficients)
-user.prompt()
-
-## Summarizing the output
-summary(z.out)
-user.prompt()
-
-## Setting values for the explanatory variables to 
-## their sample averages:
-x.out <- setx(z.out)
-user.prompt()
-
-## Simulating quantities of interest from the posterior 
-## distribution given x.out:
-s.out1 <- sim(z.out, x = x.out)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out1)
-user.prompt()
-
-## Simulating First Differences:
-## Setting explanatory variables to their default(mean/mode)
-## values, with pristr(the strength of PRI) equal to 1(weak) or
-## 3(strong)
-x.weak <- setx(z.out, pristr = 1)
-x.strong <- setx(z.out, pristr = 3)
-
-user.prompt()
-
-## Estimating the first difference for the effect of
-## military action on the probabilities of
-## incurring differnt level of cost:
-s.out2 <- sim(z.out, x = x.strong, x1 = x.weak)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out2)
-
-
-
-
-
-
-
-
diff --git a/demo/negbinom.R b/demo/negbinom.R
deleted file mode 100644
index e13b99a..0000000
--- a/demo/negbinom.R
+++ /dev/null
@@ -1,26 +0,0 @@
-# Attach the data-frame
-data(sanction)
-
-# Fit the statistical model
-
-z <- zelig(num ~ target + coop, model = "negbinom", data = sanction)
-
-# Set explanatory variables (in this case, nothing is explicitly set)
-
-x <- setx(z)
-
-# Simulate Quantities of Interest
-
-s <- sim(z, x)
-
-# Summarize the statistical model
-
-summary(z)
-
-# Summarize the simulated quantities of interest
-
-summary (s)
-
-# Plot the results
-
-plot(s)
diff --git a/demo/normal.R b/demo/normal.R
deleted file mode 100644
index 1762683..0000000
--- a/demo/normal.R
+++ /dev/null
@@ -1,34 +0,0 @@
-library(Zelig)
-
-#####  Example 1: Basic Example with First Differences  #####
-
-# Attach sample data and variable names:  
-data(macro)
-
-# Estimate model and present a summary:
-
-z.out1 <- zelig(unem ~ gdp + capmob + trade, model = "normal", data = macro)
-
-
-# Set explanatory variables to their default (mean/mode) values, with
-# high (80th percentile) and low (20th percentile) values:
-
-x.high <- setx(z.out1, trade = quantile(macro$trade, 0.8))
-x.low <- setx(z.out1, trade = quantile(macro$trade, 0.2))
-
-# Generate first differences for the effect of high versus low trade on
-# GDP:
-
-s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-
-# Summarize the fitted model
-
-summary(z.out1)
-
-# Summarize the simulated quantities of interest
-
-summary(s.out1)
-
-# Plot the simulated quantities of interest
-
-plot(s.out1)
diff --git a/demo/normal.bayes.R b/demo/normal.bayes.R
deleted file mode 100644
index c5ffd88..0000000
--- a/demo/normal.bayes.R
+++ /dev/null
@@ -1,58 +0,0 @@
-## Attaching the example dataset:
-data(macro)
-
-## Estimating the model using normal.bayes:
-z.out <- zelig(unem ~ gdp + capmob + trade, model = "normal.bayes", 
-                  data = macro, verbose=TRUE)
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$result$coefficients)  
-user.prompt()
-
-heidel.diag(z.out$result$coefficients)  
-user.prompt()
-
-raftery.diag(z.out$result$coefficients)  
-user.prompt()
-
-## summarizing the output
-summary(z.out)
-user.prompt()
-
-## Setting values for the explanatory variables to 
-## their sample averages:
-x.out <- setx(z.out)
-user.prompt()
-
-## Simulating quantities of interest from the posterior 
-## distribution given x.out:
-s.out1 <- sim(z.out, x = x.out)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out1)
-user.prompt()
-
-## Simulating First Differences
-## Set explanatory variables to their default(mean/mode) values, 
-## with high (80th percentile) and low (20th percentile) trade on GDP:
-x.high <- setx(z.out, trade = quantile(macro$trade, prob = 0.8))
-x.low <- setx(z.out, trade = quantile(macro$trade, prob = 0.2))
-user.prompt()
-
-## Estimating the first difference for the effect of
-## high versus low trade on unemployment rate:
-s.out2 <- sim(z.out, x = x.high, x1 = x.low)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out2)
-
-
-
-
-
-
-
-
diff --git a/demo/normal.gee.R b/demo/normal.gee.R
deleted file mode 100644
index 79befa1..0000000
--- a/demo/normal.gee.R
+++ /dev/null
@@ -1,36 +0,0 @@
-#####  Example 1: Basic Example with First Differences  #####
-
-# Attach sample data and variable names:  
-data(macro)
-
-# Estimate model and present a summary:
-user.prompt()
-z.out <- zelig(unem ~ gdp + capmob + trade, model = "normal.gee", id = "country", data = macro, robust=TRUE, corstr="AR-M", Mv=1)
-user.prompt()
-summary(z.out)
-
-# Set explanatory variables to their default (mean/mode) values, with
-# high (80th percentile) and low (20th percentile) values:
-user.prompt()
-x.high <- setx(z.out, trade = quantile(macro$trade, 0.8))
-x.low <- setx(z.out, trade = quantile(macro$trade, 0.2))
-
-# Generate first differences for the effect of high versus low trade on
-# GDP:
-user.prompt()
-s.out <- sim(z.out, x = x.high, x1 = x.low)
-user.prompt()
-summary(s.out)
-
-# Generate a plot of quantities of interest:
-user.prompt()
-plot(s.out)
-
-
-
-
-
-
-
-
-
diff --git a/demo/normal.survey.R b/demo/normal.survey.R
deleted file mode 100644
index cb3d278..0000000
--- a/demo/normal.survey.R
+++ /dev/null
@@ -1,101 +0,0 @@
-#####  Example 1: User has Existing Sample Weights #####
-
-# Attach sample data and variable names:  
-data(api)
-
-# In this example, we will estimate a model using 
-# the percentages of students who receive subsidized 
-# lunch and an indicator for whether schooling is 
-# year-round to predict California public schools' 
-# academic performance index scores:
-
-z.out1 <- zelig(api00 ~ meals + yr.rnd, model = "normal.survey",  
-  weights=~pw, data = apistrat)
-summary(z.out1)
-
-# Set explanatory variables to their default (mean/mode) values, and set
-# a high (80th percentile) and low (20th percentile) value for "meals,"
-# the percentage of students who receive subsidized meals:
-
-x.low <- setx(z.out1, meals= quantile(apistrat$meals, 0.2))
-x.high <- setx(z.out1, meals= quantile(apistrat$meals, 0.8))
-
-# Generate first differences for the effect of high versus low "meals" 
-# on academic performance:
-
-s.out1 <- sim(z.out1, x=x.high, x1=x.low)
-summary(s.out1)
-
-# Generate a second set of fitted values and a plot:
-
-plot(s.out1)
-
-
-
-####  Example 2: User has Details about Complex Survey Design  ####
-####  (but not sample weights) 					   ####
-
-# Suppose that the survey house that provided
-# the dataset excluded probability weights 
-# but made other details about the survey
-# design available.  We can still estimate 
-# a model without probability weights that takes
-# instead variables that identify each the stratum
-# and/or cluster from which each observation was
-# selected and the size of the finite sample from
-# which each observation was selected.
-
-z.out2 <- zelig(api00 ~ meals + yr.rnd, model = "normal.survey",  
-  strata=~stype, fpc=~fpc, data = apistrat)
-summary(z.out2)
-
-# Note that these results are identical to the results obtained
-# when pre-existing sampling weights were used.  When sampling 
-# weights are omitted, Zelig estimates them automatically for 
-# "normal.survey" models based on the user-defined description 
-# of sampling designs.  If no description is present, the default 
-# assumption is equal probability sampling.
-# 
-# setx() and sim() can then be run on z.out2 in the same fashion 
-# described in Example 1.
-
-
-
-#####  Example 3: User has Replicate Weights #####
-
-# Load data for a model using the number of out-of-hospital
-# cardiac arrests to predict the number of patients who arrive 
-# alive in hospitals: 
-
-data(scd)
-
-# Create four Balanced Repeated Replicate (BRR) weights:
-
-BRRrep<-2*cbind(c(1,0,1,0,1,0), c(1,0,0,1,0,1), c(0,1,1,0,0,1),
-c(0,1,0,1,1,0))
-
-# Estimate the model using Zelig:
-
-z.out3 <- zelig(formula=alive ~ arrests , model = "normal.survey", 
-  repweights=BRRrep, type="BRR", data=scd, na.action=NULL)
-summary(z.out3)
-
-# Set the explanatory variable at its minimum and maximum 
-
-x.min <- setx(z.out3, arrests = min(scd$alive))
-x.max <- setx(z.out3, arrests = max(scd$alive))
-
-# Generate first differences for the effect of the minimum
-# versus the maximum number of cardiac arrests on the number
-# of people who arrive alive:
-
-s.out3 <- sim(z.out3, x=x.max, x1=x.min)
-summary(s.out3)
-
-# Generate a second set of fitted values and a plot:
-plot(s.out3)
-
-#### The user should also refer to the normal model demo, since ####
-#### normal.survey models can take many of the same options as  ####
-#### normal models. 		 					    ####
-
diff --git a/demo/oprobit.bayes.R b/demo/oprobit.bayes.R
deleted file mode 100644
index 39d4319..0000000
--- a/demo/oprobit.bayes.R
+++ /dev/null
@@ -1,67 +0,0 @@
-## Attaching the example dataset:
-data(sanction)
-
-# Create an ordered dependent variable: 
-user.prompt()
-sanction$ncost <- factor(sanction$ncost, ordered = TRUE,
-                         levels = c("net gain", "little effect", 
-                         "modest loss", "major loss"))
-
-## Estimating the model using oprobit.bayes:
-z.out <- zelig(ncost ~ mil + coop, model = "oprobit.bayes",
-                  data = sanction, verbose=FALSE, tune=0.3)
-
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-#geweke.diag(z.out$coefficients)
-#user.prompt()
-
-heidel.diag(z.out$result$coefficients)
-user.prompt()
-
-raftery.diag(z.out$result$coefficients)
-user.prompt()
-
-## summarizing the output
-summary(z.out)
-user.prompt()
-
-## Setting values for the explanatory variables to 
-## their sample averages:
-x.out <- setx(z.out)
-user.prompt()
-
-## Simulating quantities of interest from the posterior 
-## distribution given x.out:
-s.out1 <- sim(z.out, x = x.out)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out1)
-user.prompt()
-
-## Simulating First Differences:
-## Setting explanatory variables to their default(mean/mode)
-## values, with military action to be yes(1) or no(0)
-x.high <- setx(z.out, mil=0)
-x.low <- setx(z.out, mil=1)
-user.prompt()
-
-## Estimating the first difference for the effect of
-## military action on the probabilities of
-## incurring differnt level of cost:
-
-s.out2 <- sim(z.out, x = x.high, x1 = x.low)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out2)
-
-
-
-
-
-
-
-
diff --git a/demo/poisson.R b/demo/poisson.R
deleted file mode 100644
index 53d0766..0000000
--- a/demo/poisson.R
+++ /dev/null
@@ -1,26 +0,0 @@
-# Attach the data frame
-data(sanction)
-
-# Fit the statistical model
-
-z.out <- zelig(num ~ target + coop, model = "poisson", data = sanction)
-
-# Set explanatory variables (in this case non are explicitly set)
-
-x.out <- setx(z.out)
-
-# Simulate the quantities of interest
-
-s.out <- sim(z.out, x = x.out)
-
-# Summary of the statistical model
-
-summary(z.out)
-
-# Summary of the simulated quantities of interest
-
-summary(s.out)
-
-# Plot the simulated quantities of interest
-
-plot(s.out)
diff --git a/demo/poisson.bayes.R b/demo/poisson.bayes.R
deleted file mode 100644
index 54e7ce9..0000000
--- a/demo/poisson.bayes.R
+++ /dev/null
@@ -1,59 +0,0 @@
-## Attaching the example dataset:
-data(sanction)
-
-## Estimating the model using poisson.bayes:
-z.out <- zelig(num ~ target + coop, model = "poisson.bayes",
-                  data = sanction, verbose=TRUE)
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$result$coefficients)
-user.prompt()
-
-heidel.diag(z.out$result$coefficients)
-user.prompt()
-
-raftery.diag(z.out$result$coefficients)
-user.prompt()
-
-## summarizing the output
-summary(z.out)
-user.prompt()
-
-## Setting values for the explanatory variables to 
-## their sample averages:
-x.out <- setx(z.out)
-user.prompt()
-
-## Simulating quantities of interest from the posterior 
-## distribution given x.out:
-s.out1 <- sim(z.out, x = x.out)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out1)
-user.prompt()
-
-## Simulating First Differences:
-## Setting explanatory variables to their default(mean/mode)
-## values, with the number of targets to be its maximum 
-## versus its minimum:
-x.max <- setx(z.out, target = max(sanction$target))
-x.min <- setx(z.out, target = min(sanction$target))
-user.prompt()
-
-## Estimating the first difference for the effect of
-## maximum versus minimum number of targets:
-s.out2 <- sim(z.out, x = x.max, x1 = x.min)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out2)
-
-
-
-
-
-
-
-
diff --git a/demo/poisson.gee.R b/demo/poisson.gee.R
deleted file mode 100644
index daf687c..0000000
--- a/demo/poisson.gee.R
+++ /dev/null
@@ -1,39 +0,0 @@
-#####  Example 1: Basic Example #####
-
-# Attach sample data and variable names:  
-data(sanction)
-
-#  Variable identifying clusters
-sanction$cluster <- c(rep(c(1:15),5),rep(c(16),3))
-
-# Sorting by cluster
-sorted.sanction <- sanction[order(sanction$cluster),]
-
-# Estimate model and present a summary:
-user.prompt()
-z.out <- zelig(num ~ target + coop, model = "poisson.gee", id = "cluster", data = sorted.sanction, robust=TRUE, corstr="exchangeable")
-user.prompt()
-summary(z.out)
-
-# Set explanatory variables to their default values:
-user.prompt()
-x.out <- setx(z.out)
-
-# Simulate quantities of interest
-user.prompt()
-s.out <- sim(z.out, x = x.out)
-user.prompt()
-summary(s.out)
-
-# Generate a plot of quantities of interest:
-user.prompt()
-plot(s.out)
-
-
-
-
-
-
-
-
-
diff --git a/demo/poisson.survey.R b/demo/poisson.survey.R
deleted file mode 100644
index 6b45529..0000000
--- a/demo/poisson.survey.R
+++ /dev/null
@@ -1,103 +0,0 @@
-#####  Example 1: User has Existing Sample Weights #####
-
-# Attach sample data:  
-data(api, package="survey")
-
-# In this example, we will estimate a model using 
-# each school's academic performance in 2000 and an
-# indicator for year-round schools to predict the 
-# number of students who enrolled in each California school.
-
-z.out1 <- zelig(enroll ~ api99 + yr.rnd , model = "poisson.survey", data = apistrat)
-summary(z.out1)
-
-# Set explanatory variables to their default (mean/mode) values, and set
-# a high (80th percentile) and low (20th percentile) value for the
-# measure of academic performance, "api00":
-
-x.low <- setx(z.out1, api00= quantile(apistrat$api00, 0.2))
-x.high <- setx(z.out1, api00= quantile(apistrat$api00, 0.8))
-
-# Generate first differences for the effect of high versus low "meals" 
-# on the probability that a school will hold classes year round:
-
-s.out1 <- sim(z.out1, x=x.low, x1=x.high)
-summary(s.out1)
-
-# Generate a second set of fitted values and a plot:
-
-plot(s.out1)
-
-
-
-####  Example 2: User has Details about Complex Survey Design  ####
-####  (but not sample weights) 					   ####
-
-# Suppose that the survey house that provided
-# the dataset excluded probability weights 
-# but made other details about the survey
-# design available.  We can still estimate 
-# a model without probability weights that takes
-# instead variables that identify each the stratum
-# and/or cluster from which each observation was
-# selected and the size of the finite sample from
-# which each observation was selected.
-
-z.out2 <- zelig(enroll ~ api99 + yr.rnd , model = "poisson.survey", data = apistrat, 
-  strata=~stype, fpc=~fpc)
-summary(z.out2)
-
-# The coefficient estimates from this model are identical to 
-# point estimates in the previous example, but the standard errors
-# are smaller.  When sampling weights are omitted, Zelig estimates 
-# them automatically for "normal.survey" models based on the 
-# user-defined description of sampling designs.  In addition, 
-# when user-defined descriptions of the sampling design are 
-# entered as inputs, variance estimates are better and standard
-# errors are consequently smaller.
-#
-# setx() and sim() can then be run on z.out2 in the same fashion 
-# described in Example 1.
-
-
-
-#####  Example 3: User has Replicate Weights #####
-
-# Load data for a model using the number of out-of-hospital
-# cardiac arrests to predict the number of patients who arrive 
-# alive in hospitals.
-
-data(scd, package="survey")
-
-# For the purpose of illustration, create four Balanced 
-# Repeated Replicate (BRR) weights:
-
-BRRrep<-2*cbind(c(1,0,1,0,1,0), c(1,0,0,1,0,1), c(0,1,1,0,0,1),
-c(0,1,0,1,1,0))
-
-# Estimate the model using Zelig:
-
-z.out3 <- zelig(alive ~ arrests , model = "poisson.survey", 
-  repweights=BRRrep, type="BRR", data=scd)
-summary(z.out3)
-
-# Set the explanatory variables at their means and set
-# arrests at its 20th and 80th quartiles
-
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8))
-
-# Generate first differences for the effect of the minimum
-# versus the maximum number of individuals who arrive
-# alive on the probability that a hospital will be sued:
-
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-summary(s.out3)
-
-# Generate a second set of fitted values and a plot:
-plot(s.out3)
-
-#### The user should also refer to the poisson model demo, since ####
-#### poisson.survey models can take many of the same options as  ####
-#### poisson models. 		 					     ####
-
diff --git a/demo/probit.R b/demo/probit.R
deleted file mode 100644
index 640c8f2..0000000
--- a/demo/probit.R
+++ /dev/null
@@ -1,51 +0,0 @@
-##  Attaching the sample turnout dataset:
-data(turnout)
-
-#####  Example 1:  Simple Example 
-
-##  Generating empirical estimates:
-
-z.out1 <- zelig(vote ~ race + educate, model = "probit", data = turnout)
-##  Viewing the regression output:
-
-
-##  Using setx to generate baseline and alternative velus for the
-##  explanatory variables.  
-
-x.out1 <- setx(z.out1)
-x.out1
-
-
-##  Simulating quantities of interest (predicted probabilites, risk
-##  ratios, and risk differences):
-
-s.out1 <- sim(z.out1, x = x.out1)
-
-# Summary of fitted the statistical model
-
-summary(z.out1)
-
-# Summary of the simulated quantities of interest
-
-summary(s.out1)
-
-## Diagnostic plot of the s.out:
-
-plot(s.out1)
-
-##  Example 2: First Differences
-
-x.low <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.75))
-x.high <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.25))
-
-# Simulate quantities of interest (include first-differences, etc.)
-
-s.out2 <- sim(z.out1, x = x.low, x1 = x.high)
-
-# Summary of quantities of interest (for difference in x.low and x.high
-
-summary(s.out2)
-
-# Plot of quantities of interest
-
-plot(s.out2)
diff --git a/demo/probit.bayes.R b/demo/probit.bayes.R
deleted file mode 100644
index 3029814..0000000
--- a/demo/probit.bayes.R
+++ /dev/null
@@ -1,59 +0,0 @@
-## Attaching the example dataset:
-data(turnout)
-
-## Estimating the model using probit.bayes:
-z.out <- zelig(vote ~ race + educate, model = "probit.bayes",
-                  data = turnout, verbose=TRUE)
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$result$coefficients)
-user.prompt()
-
-heidel.diag(z.out$result$coefficients)
-user.prompt()
-
-raftery.diag(z.out$result$coefficients)
-user.prompt()
-
-## summarizing the output
-summary(z.out)
-user.prompt()
-
-## Setting values for the explanatory variables to 
-## their sample averages:
-x.out <- setx(z.out)
-user.prompt()
-
-## Simulating quantities of interest from the posterior 
-## distribution given x.out:
-s.out1 <- sim(z.out, x = x.out)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out1)
-user.prompt()
-
-## Simulating First Differences:
-## Setting education is set to be between low(25th percentile) 
-## versus high(75th percentile) while all the other variables 
-## held at their default values.
-x.high <- setx(z.out, educate = quantile(turnout$educate, prob = 0.75))
-x.low <- setx(z.out, educate = quantile(turnout$educate, prob = 0.25))
-user.prompt()
-
-## Estimating the first difference for the effect of
-## high versus low trade on unemployment rate:
-s.out2 <- sim(z.out, x = x.high, x1 = x.low)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out2)
-
-
-
-
-
-
-
-
diff --git a/demo/probit.gee.R b/demo/probit.gee.R
deleted file mode 100644
index 3a13cf9..0000000
--- a/demo/probit.gee.R
+++ /dev/null
@@ -1,111 +0,0 @@
-##  Attaching the sample turnout dataset:
-data(turnout)
-
-##  Variable identifying clusters
-turnout$cluster <- rep(c(1:200),10)
-
-## Sorting by cluster
-sorted.turnout <- turnout[order(turnout$cluster),]
-
-#####  Example 1:  Simple Example with Stationary 3 Dependence
-
-##  Generating empirical estimates:
-user.prompt()
-z.out1 <- zelig(vote ~ race + educate, model = "probit.gee", id = "cluster", 
-	data = sorted.turnout, robust = T, corstr = "stat_M_dep", Mv=3)
-user.prompt()
-##  Viewing the regression output:
-summary(z.out1)
-
-##  Using setx to generate baseline and alternative values for the
-##  explanatory variables.  
-user.prompt()
-x.out1 <- setx(z.out1)
-
-##  Simulating quantities of interest:
-user.prompt()
-s.out1 <- sim(z.out1, x = x.out1)
-user.prompt()
-## Summarizing the simulated quantities of interest:
-summary(s.out1)
-
-## Diagnostic plot of the s.out:
-user.prompt()
-plot(s.out1)
-
-##  Example 2: First Differences
-
-user.prompt()
-x.high <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.75))
-x.low <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.25))
-
-user.prompt()
-s.out2 <- sim(z.out1, x = x.high, x1 = x.low)
-user.prompt()
-summary(s.out2)
-user.prompt()
-plot(s.out2)
-
-#####  Example 3:  Example with Fixed Correlation Structure
-
-##  User-defined correlation structure
-user.prompt()
-corr.mat <- matrix(rep(0.5,100), nrow=10, ncol=10)
-diag(corr.mat) <- 1 
-
-##  Generating empirical estimates:
-user.prompt()
-z.out2 <- zelig(vote ~ race + educate, model = "probit.gee", id = "cluster", 
-	data = sorted.turnout, robust = T, corstr = "fixed", R=corr.mat)
-user.prompt()
-##  Viewing the regression output:
-summary(z.out2)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/demo/probit.survey.R b/demo/probit.survey.R
deleted file mode 100644
index 61f8c3e..0000000
--- a/demo/probit.survey.R
+++ /dev/null
@@ -1,106 +0,0 @@
-#####  Example 1: User has Existing Sample Weights #####
-
-# Attach sample data:  
-data(api, package="survey")
-
-# In this example, we will estimate a model using 
-# the percentages of students who receive subsidized 
-# lunch and the percentage who are new to a school
-# to predict whether each California public school 
-# attends classes year round.
-
-z.out1 <- zelig(yr.rnd ~ meals + mobility, model = "probit.survey", weights=~pw, data = apistrat)
-summary(z.out1)
-
-# Set explanatory variables to their default (mean/mode) values, and set
-# a high (80th percentile) and low (20th percentile) value for "meals,"
-# the percentage of students who receive subsidized meals:
-
-x.low <- setx(z.out1, meals= quantile(apistrat$meals, 0.2))
-x.high <- setx(z.out1, meals= quantile(apistrat$meals, 0.8))
-
-# Generate first differences for the effect of high versus low "meals" 
-# on the probability that a school will hold classes year round:
-
-s.out1 <- sim(z.out1, x=x.low, x1=x.high)
-summary(s.out1)
-
-# Generate a second set of fitted values and a plot:
-
-plot(s.out1)
-
-
-
-####  Example 2: User has Details about Complex Survey Design  ####
-####  (but not sample weights) 					   ####
-
-# Suppose that the survey house that provided
-# the dataset excluded probability weights 
-# but made other details about the survey
-# design available.  We can still estimate 
-# a model without probability weights that takes
-# instead variables that identify each the stratum
-# and/or cluster from which each observation was
-# selected and the size of the finite sample from
-# which each observation was selected.
-
-z.out2 <- zelig(yr.rnd ~ meals + mobility, model = "probit.survey", strata=~stype, fpc=~fpc, data = apistrat)
-summary(z.out2)
-
-# The coefficient estimates from this model are identical to 
-# point estimates in the previous example, but the standard errors
-# are smaller.  When sampling weights are omitted, Zelig estimates 
-# them automatically for "normal.survey" models based on the 
-# user-defined description of sampling designs.  In addition, 
-# when user-defined descriptions of the sampling design are 
-# entered as inputs, variance estimates are better and standard
-# errors are consequently smaller.
-#
-# setx() and sim() can then be run on z.out2 in the same fashion 
-# described in Example 1.
-
-
-
-#####  Example 3: User has Replicate Weights #####
-
-# Load data for a model using the number of out-of-hospital
-# cardiac arrests and the number of patients who arrive 
-# alive in hospitals to predict whether each hospital
-# has been sued (an indicator variable artificially created
-# here for the purpose of illustration).
-
-data(scd)
-scd$sued <- as.vector(c(0,0,0,1,1,1))
-
-# Again, for the purpose of illustration, create four Balanced 
-# Repeated Replicate (BRR) weights:
-
-BRRrep<-2*cbind(c(1,0,1,0,1,0), c(1,0,0,1,0,1), c(0,1,1,0,0,1),
-c(0,1,0,1,1,0))
-
-# Estimate the model using Zelig:
-
-z.out3 <- zelig(formula=sued ~ arrests + alive , model = "probit.survey", 
-  repweights=BRRrep, type="BRR", data=scd)
-summary(z.out3)
-
-# Set the explanatory variables at their means and set
-# arrests at its 20th and 80th quartiles
-
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8))
-
-# Generate first differences for the effect of the minimum
-# versus the maximum number of individuals who arrive
-# alive on the probability that a hospital will be sued:
-
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-summary(s.out3)
-
-# Generate a second set of fitted values and a plot:
-plot(s.out3)
-
-#### The user should also refer to the probit model demo, since ####
-#### probit.survey models can take many of the same options as  ####
-#### probit models. 		 					    ####
-
diff --git a/demo/relogit.R b/demo/relogit.R
deleted file mode 100644
index 841ffb5..0000000
--- a/demo/relogit.R
+++ /dev/null
@@ -1,37 +0,0 @@
-data(mid)
-user.prompt()
-
-
-## prior correction + bias correction 
-z.out1 <- zelig(conflict ~ major + contig + power + maxdem + mindem + years,
-                data = mid, model = "relogit", tau = 1042/303772)
-user.prompt()
-
-summary(z.out1)
-user.prompt()
-
-x.out1 <- setx(z.out1)
-user.prompt()
-
-s.out1 <- sim(z.out1, x = x.out1)
-user.prompt()
-
-summary(s.out1)
-user.prompt()
-
-plot(s.out1)
-
-## weighting + bias correction + robust s.e.
-z.out2 <- zelig(conflict ~ major + contig + power + maxdem + mindem + years,
-                data = mid, model = "relogit", tau = 1042/303772,
-                case.control = "weighting", robust = TRUE)
-user.prompt()
-
-summary(z.out2)
-user.prompt()
-
-x.out2 <- setx(z.out2)
-user.prompt()
-
-s.out2 <- sim(z.out2, x = x.out2)
-user.prompt()
diff --git a/demo/twosls.R b/demo/twosls.R
deleted file mode 100644
index 771521e..0000000
--- a/demo/twosls.R
+++ /dev/null
@@ -1,24 +0,0 @@
-data(klein)
-
-formula <- list(
-                mu1 = C ~ Wtot + P1,
-                mu2 = I ~ P + P1 + K1,
-                mu3 = Wp ~ X + X1 + Tm,
-                inst= ~ P1 + K1 + X1 + Tm + Wg + G
-                )
-
-z.out<-zelig(formula=formula, model="twosls",data=klein, cite=F)
-
-x.out <-setx(z.out)
-
-s.out <-sim(z.out,x=x.out)
-
-summary(s.out)
-
-
-# Plot
-
-user.prompt()
-plot(s.out)
-
-
diff --git a/demo/vertci.R b/demo/vertci.R
deleted file mode 100644
index 3a1b2ed..0000000
--- a/demo/vertci.R
+++ /dev/null
@@ -1,41 +0,0 @@
-
-##  Attaching the sample turnout dataset:
-data(turnout)
-
-##  Estimate the model:
-user.prompt()
-z.out <- zelig(vote ~ race + educate + age + I(age^2) + income,
-               model = "logit", data = turnout)
-user.prompt()
-summary(z.out)
-
-##  Creating setx structures with education set to high school and
-##  post-college levels, for the whole range of the age variable.  
-user.prompt()
-x.low <- setx(z.out, educate = 12, age = 18:95)
-x.high <- setx(z.out, educate = 16, age = 18:95)
-
-##  Using sim to generate the simulated predicted probabilites:
-user.prompt()
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-user.prompt()
-plot.ci(s.out, xlab = "Age in Years",
-        ylab = "Predicted Probability of Voting",
-        main = "Effect of Education and Age on Voting Behavior")
-
-text(x=50,y=.95,labels="College Education (16 years)",cex=0.6)
-text(x=60,y=.8,labels="High School Education (12 years)",cex=0.6)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/inst/CITATION b/inst/CITATION
new file mode 100644
index 0000000..a3213b4
--- /dev/null
+++ b/inst/CITATION
@@ -0,0 +1,35 @@
+citHeader("To cite Zelig in publications please use:")
+
+if(!exists("meta") || is.null(meta)) meta <- packageDescription("Zelig")
+year <- sub(".*(2[[:digit:]]{3})-.*", "\\1", meta$Date)
+vers <- paste("Version", meta$Version)  
+
+bibentry(
+            bibtype="Manual",
+            title = "Zelig: Everyone's Statistical Software",
+            author = c(
+            	person("Christine", "Choirat", email="cchoirat at iq.harvard.edu", role = "aut"),
+            	person("James", "Honaker", email="jhonaker at iq.harvard.edu", role = "aut"),
+            	person("Kosuke", "Imai", role = "aut"),
+                person("Gary", "King", role = "aut"),
+                person("Olivia", "Lau", role = "aut")
+                ),
+            year = year,
+            note = vers,
+            url = "http://zeligproject.org/")
+
+
+bibentry(
+            bibtype="Article",
+            title = "Toward A Common Framework for Statistical Analysis and Development",
+            author = c(
+            	person("Kosuke", "Imai"),
+                person("Gary", "King"),
+                person("Olivia", "Lau")
+                ),
+            journal = "Journal of Computational Graphics and Statistics",
+            volume = 17,
+            number = 4,
+            year = 2008,
+            pages = "892-913",
+            url =  "http://j.mp/msE15c")
diff --git a/inst/JSON/zelig5models.json b/inst/JSON/zelig5models.json
new file mode 100644
index 0000000..13e4025
--- /dev/null
+++ b/inst/JSON/zelig5models.json
@@ -0,0 +1,446 @@
+{
+  "zelig5models": {
+    "ls": {
+      "name": ["ls"],
+      "description": ["Least Squares Regression for Continuous Dependent Variables"],
+      "outcome": {
+        "modelingType": ["continous"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-ls.html"],
+      "wrapper": ["ls"],
+      "tree": ["Zelig-ls"]
+    },
+    "logit": {
+      "name": ["logit"],
+      "description": ["Logistic Regression for Dichotomous Dependent Variables"],
+      "outcome": {
+        "modelingType": ["binary"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-logit.html"],
+      "wrapper": ["logit"],
+      "tree": ["Zelig-logit", "Zelig-binchoice", "Zelig-glm"]
+    },
+    "probit": {
+      "name": ["probit"],
+      "description": ["Probit Regression for Dichotomous Dependent Variables"],
+      "outcome": {
+        "modelingType": ["binary"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-probit.html"],
+      "wrapper": ["probit"],
+      "tree": ["Zelig-probit", "Zelig-binchoice", "Zelig-glm"]
+    },
+    "poisson": {
+      "name": ["poisson"],
+      "description": ["Poisson Regression for Event Count Dependent Variables"],
+      "outcome": {
+        "modelingType": ["discrete"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-poisson.html"],
+      "wrapper": ["poisson"],
+      "tree": ["Zelig-poisson", "Zelig-glm"]
+    },
+    "normal": {
+      "name": ["normal"],
+      "description": ["Normal Regression for Continuous Dependent Variables"],
+      "outcome": {
+        "modelingType": ["continuous"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-normal.html"],
+      "wrapper": ["normal"],
+      "tree": ["Zelig-normal", "Zelig-glm"]
+    },
+    "gamma": {
+      "name": ["gamma"],
+      "description": ["Gamma Regression for Continuous, Positive Dependent Variables"],
+      "outcome": {
+        "modelingType": ["continous"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-gamma.html"],
+      "wrapper": ["gamma"],
+      "tree": ["Zelig-gamma", "Zelig-glm"]
+    },
+    "negbin": {
+      "name": ["negbin"],
+      "description": ["Negative Binomial Regression for Event Count Dependent Variables"],
+      "outcome": {
+        "modelingType": ["discrete"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-negbin.html"],
+      "wrapper": ["negbin"],
+      "tree": ["Zelig-negbin"]
+    },
+    "exp": {
+      "name": ["exp"],
+      "description": ["Exponential Regression for Duration Dependent Variables"],
+      "outcome": {
+        "modelingType": ["continous"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-exp.html"],
+      "wrapper": ["exp"],
+      "tree": ["Zelig-exp"]
+    },
+    "lognorm": {
+      "name": ["lognorm"],
+      "description": ["Log-Normal Regression for Duration Dependent Variables"],
+      "outcome": {
+        "modelingType": ["discrete"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-lognorm.html"],
+      "wrapper": ["lognorm"],
+      "tree": ["Zelig-lognorm"]
+    },
+    "tobit": {
+      "name": ["tobit"],
+      "description": ["Linear regression for Left-Censored Dependent Variable"],
+      "outcome": {
+        "modelingType": ["continous"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-tobit.html"],
+      "wrapper": ["tobit"],
+      "tree": ["Zelig-tobit"]
+    },
+    "quantile": {
+      "name": ["quantile"],
+      "description": ["Quantile Regression for Continuous Dependent Variables"],
+      "outcome": {
+        "modelingType": ["continuous"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-quantile.html"],
+      "wrapper": ["rq"],
+      "tree": ["Zelig-quantile"]
+    },
+    "relogit": {
+      "name": ["relogit"],
+      "description": ["Rare Events Logistic Regression for Dichotomous Dependent Variables"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-relogit.html"],
+      "wrapper": ["relogit"],
+      "tree": ["Zelig-relogit"]
+    },
+    "logitgee": {
+      "name": ["logit-gee"],
+      "description": ["General Estimating Equation for Logistic Regression"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-logitgee.html"],
+      "wrapper": ["logit.gee"],
+      "tree": ["Zelig-logit-gee", "Zelig-binchoice-gee", "Zelig-gee"]
+    },
+    "probitgee": {
+      "name": ["probit-gee"],
+      "description": ["General Estimating Equation for Probit Regression"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-probitgee.html"],
+      "wrapper": ["probit.gee"],
+      "tree": ["Zelig-probit-gee", "Zelig-binchoice-gee", "Zelig-gee"]
+    },
+    "gammagee": {
+      "name": ["gamma-gee"],
+      "description": ["General Estimating Equation for Gamma Regression"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-gammagee.html"],
+      "wrapper": ["gamma.gee"],
+      "tree": ["Zelig-gamma-gee", "Zelig-gee"]
+    },
+    "normalgee": {
+      "name": ["normal-gee"],
+      "description": ["General Estimating Equation for Normal Regression"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-normalgee.html"],
+      "wrapper": ["normal.gee"],
+      "tree": ["Zelig-normal-gee", "Zelig-gee"]
+    },
+    "poissongee": {
+      "name": ["poisson-gee"],
+      "description": ["General Estimating Equation for Poisson Regression"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-poissongee.html"],
+      "wrapper": ["poisson.gee"],
+      "tree": ["Zelig-poisson-gee", "Zelig-gee"]
+    },
+    "factorbayes": {
+      "name": ["factor-bayes"],
+      "description": ["Bayesian Factor Analysis"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-factorbayes.html"],
+      "wrapper": ["factor.bayes"],
+      "tree": ["Zelig-factor-bayes"]
+    },
+    "logitbayes": {
+      "name": ["logit-bayes"],
+      "description": ["Bayesian Logistic Regression for Dichotomous Dependent Variables"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-logitbayes.html"],
+      "wrapper": ["logit.bayes"],
+      "tree": ["Zelig-logit-bayes", "Zelig-bayes"]
+    },
+    "mlogitbayes": {
+      "name": ["mlogit-bayes"],
+      "description": ["Bayesian Multinomial Logistic Regression for Dependent Variables with Unordered Categorical Values"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-mlogitbayes.html"],
+      "wrapper": ["mlogit.bayes"],
+      "tree": ["Zelig-mlogit-bayes", "Zelig-bayes"]
+    },
+    "normalbayes": {
+      "name": ["normal-bayes"],
+      "description": ["Bayesian Normal Linear Regression"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-normalbayes.html"],
+      "wrapper": ["normal.bayes"],
+      "tree": ["Zelig-normal-bayes", "Zelig-bayes"]
+    },
+    "oprobitbayes": {
+      "name": ["oprobit-bayes"],
+      "description": ["Bayesian Probit Regression for Dichotomous Dependent Variables"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-oprobitbayes.html"],
+      "wrapper": ["oprobit.bayes"],
+      "tree": ["Zelig-oprobit-bayes", "Zelig-bayes"]
+    },
+    "poissonbayes": {
+      "name": ["poisson-bayes"],
+      "description": ["Bayesian Poisson Regression"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-poissonbayes.html"],
+      "wrapper": ["poisson.bayes"],
+      "tree": ["Zelig-poisson-bayes", "Zelig-bayes"]
+    },
+    "probitbayes": {
+      "name": ["probit-bayes"],
+      "description": ["Bayesian Probit Regression for Dichotomous Dependent Variables"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-probitbayes.html"],
+      "wrapper": ["probit.bayes"],
+      "tree": ["Zelig-probit-bayes", "Zelig-bayes"]
+    },
+    "tobitbayes": {
+      "name": ["tobit-bayes"],
+      "description": ["Bayesian Tobit Regression for a Censored Dependent Variable"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-tobitbayes.html"],
+      "wrapper": ["tobit.bayes"],
+      "tree": ["Zelig-tobit-bayes", "Zelig-bayes"]
+    },
+    "weibull": {
+      "name": ["weibull"],
+      "description": ["Weibull Regression for Duration Dependent Variables"],
+      "outcome": {
+        "modelingType": ["bounded"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-weibull.html"],
+      "wrapper": ["weibull"],
+      "tree": ["Zelig-weibull"]
+    },
+    "logitsurvey": {
+      "name": ["logit-survey"],
+      "description": ["Logistic Regression with Survey Weights"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-logitsurvey.html"],
+      "wrapper": ["logit.survey"],
+      "tree": ["Zelig-logit-survey", "Zelig-binchoice-survey", "Zelig-survey"]
+    },
+    "probitsurvey": {
+      "name": ["probit-survey"],
+      "description": ["Probit Regression with Survey Weights"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-probitsurvey.html"],
+      "wrapper": ["probit.survey"],
+      "tree": ["Zelig-probit-survey", "Zelig-binchoice-survey", "Zelig-survey"]
+    },
+    "normalsurvey": {
+      "name": ["normal-survey"],
+      "description": ["Normal Regression for Continuous Dependent Variables with Survey Weights"],
+      "outcome": {
+        "modelingType": ["continuous"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-normalsurvey.html"],
+      "wrapper": ["normal.survey"],
+      "tree": ["Zelig-normal-survey", "Zelig-survey"]
+    },
+    "gammasurvey": {
+      "name": ["gamma-survey"],
+      "description": ["Gamma Regression with Survey Weights"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-gammasurvey.html"],
+      "wrapper": ["gamma.survey"],
+      "tree": ["Zelig-gamma-survey", "Zelig-survey"]
+    },
+    "poissonsurvey": {
+      "name": ["poisson-survey"],
+      "description": ["Poisson Regression with Survey Weights"],
+      "outcome": {
+        "modelingType": [""]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-poissonsurvey.html"],
+      "wrapper": ["poisson.survey"],
+      "tree": ["Zelig-poisson-survey", "Zelig-survey"]
+    },
+    "arima": {
+      "name": ["arima"],
+      "description": ["Autoregressive Moving-Average Models for Time-Series Data"],
+      "outcome": {
+        "modelingType": ["continuous"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-arima.html"],
+      "wrapper": ["arima"],
+      "tree": ["Zelig-arima", "Zelig-timeseries"]
+    },
+    "ma": {
+      "name": ["ma"],
+      "description": ["Time-Series Model with Moving Average"],
+      "outcome": {
+        "modelingType": ["continuous"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-ma.html"],
+      "wrapper": ["ma"],
+      "tree": ["Zelig-ma", "Zelig-timeseries"]
+    },    
+    "ar": {
+      "name": ["ar"],
+      "description": ["Time-Series Model with Autoregressive Disturbance"],
+      "outcome": {
+        "modelingType": ["continuous"]
+      },
+      "explanatory": {
+        "modelingType": ["continuous", "discrete", "nominal", "ordinal", "binary"]
+      },
+      "vignette.url": ["http://docs.zeligproject.org/en/latest/zelig-ar.html"],
+      "wrapper": ["ar"],
+      "tree": ["Zelig-ar", "Zelig-timeseries"]
+    }
+  }
+} 
diff --git a/inst/doc/gamma.pdf b/inst/doc/gamma.pdf
deleted file mode 100644
index 67b0e05..0000000
Binary files a/inst/doc/gamma.pdf and /dev/null differ
diff --git a/inst/doc/logit.pdf b/inst/doc/logit.pdf
deleted file mode 100644
index 62bc9bc..0000000
Binary files a/inst/doc/logit.pdf and /dev/null differ
diff --git a/inst/doc/ls.pdf b/inst/doc/ls.pdf
deleted file mode 100644
index 7c97903..0000000
Binary files a/inst/doc/ls.pdf and /dev/null differ
diff --git a/inst/doc/manual-bayes.R b/inst/doc/manual-bayes.R
deleted file mode 100644
index d0cd940..0000000
--- a/inst/doc/manual-bayes.R
+++ /dev/null
@@ -1,439 +0,0 @@
-### R code from vignette source 'manual-bayes.Rnw'
-
-###################################################
-### code chunk number 1: loadLibrary
-###################################################
-library(Zelig)
-library(MCMCpack)
-
-
-###################################################
-### code chunk number 2: BasicExample.data
-###################################################
- data(turnout)
-
-
-###################################################
-### code chunk number 3: BasicExample.zelig
-###################################################
- z.out <- zelig(vote ~ race + educate, model = "logit.bayes",
-                  data = turnout, verbose = FALSE)
-
-
-###################################################
-### code chunk number 4: BasicExample.geweke
-###################################################
- geweke.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 5: BasicExample.heidel
-###################################################
- heidel.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 6: BasicExample.raftery
-###################################################
- raftery.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 7: BasicExample.summary.zout
-###################################################
-summary(z.out)
-
-
-###################################################
-### code chunk number 8: BasicExample.setx
-###################################################
- x.out <- setx(z.out)
-
-
-###################################################
-### code chunk number 9: BasicExample.sim
-###################################################
- s.out1 <- sim(z.out, x = x.out)
-
-
-###################################################
-### code chunk number 10: BasicExample.summary.sim
-###################################################
-summary(s.out1)
-
-
-###################################################
-### code chunk number 11: FirstDifferences.setx.high
-###################################################
- x.high <- setx(z.out, educate = quantile(turnout$educate, prob = 0.75))
-
-
-###################################################
-### code chunk number 12: FirstDifferences.setx.low
-###################################################
-x.low <- setx(z.out, educate = quantile(turnout$educate, prob = 0.25))
-
-
-###################################################
-### code chunk number 13: FirstDifferences.sim
-###################################################
-s.out2 <- sim(z.out, x = x.high, x1 = x.low)
-
-
-###################################################
-### code chunk number 14: FirstDifferences.summary
-###################################################
-summary(s.out2)
-
-
-###################################################
-### code chunk number 15: BasicExample.data
-###################################################
- data(mexico)
-
-
-###################################################
-### code chunk number 16: BasicExample.zelig
-###################################################
- z.out <- zelig(vote88 ~ pristr + othcok + othsocok, model = "mlogit.bayes", 
-               data = mexico)
-
-
-###################################################
-### code chunk number 17: BasicExample.heidel
-###################################################
- heidel.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 18: BasicExample.raftery
-###################################################
-raftery.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 19: BasicExample.summary
-###################################################
-summary(z.out)
-
-
-###################################################
-### code chunk number 20: BasicExample.setx
-###################################################
- x.out <- setx(z.out)
-
-
-###################################################
-### code chunk number 21: BasicExample.sim
-###################################################
- s.out1 <- sim(z.out, x = x.out)
-
-
-###################################################
-### code chunk number 22: BasicExample.summary.sim
-###################################################
-summary(s.out1)
-
-
-###################################################
-### code chunk number 23: FirstDifferences.setx
-###################################################
- x.weak <- setx(z.out, pristr = 1)
- x.strong <- setx(z.out, pristr = 3)
-
-
-###################################################
-### code chunk number 24: FirstDifferences.sim
-###################################################
-s.out2 <- sim(z.out, x = x.strong, x1 = x.weak)
-
-
-###################################################
-### code chunk number 25: FirstDifferences.summary
-###################################################
-summary(s.out2)
-
-
-###################################################
-### code chunk number 26: BasicExample.data
-###################################################
- data(macro)
-
-
-###################################################
-### code chunk number 27: BasicExample.zelig
-###################################################
-z.out <- zelig(unem ~ gdp + capmob + trade, model = "normal.bayes",
-                  data = macro, verbose = FALSE)
-
-
-###################################################
-### code chunk number 28: BasicExample.geweke
-###################################################
- geweke.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 29: BasicExample.heidel
-###################################################
-heidel.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 30: BasicExample.raftery
-###################################################
-raftery.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 31: BasicExample.summary
-###################################################
-summary(z.out) 
-
-
-###################################################
-### code chunk number 32: BasicExample.setx
-###################################################
- x.out <- setx(z.out)
-
-
-###################################################
-### code chunk number 33: BasicExample.sim
-###################################################
- s.out1 <- sim(z.out, x = x.out)
-
-
-###################################################
-### code chunk number 34: BasicExample.summary.sim
-###################################################
-summary(s.out1)
-
-
-###################################################
-### code chunk number 35: FirstDifferences.setx
-###################################################
- x.high <- setx(z.out, trade = quantile(macro$trade, prob = 0.8))
- x.low <- setx(z.out, trade = quantile(macro$trade, prob = 0.2))
-
-
-###################################################
-### code chunk number 36: FirstDifferences.sim
-###################################################
- s.out2 <- sim(z.out, x = x.high, x1 = x.low)
-
-
-###################################################
-### code chunk number 37: FirstDifferences.summary.sim
-###################################################
- summary(s.out2)
-
-
-###################################################
-### code chunk number 38: BasicExample.data
-###################################################
- data(sanction)
-
-
-###################################################
-### code chunk number 39: BasicExample.zelig
-###################################################
- z.out <- zelig(ncost ~ mil + coop, model = "oprobit.bayes",
-                  data = sanction, verbose = FALSE)
-
-
-###################################################
-### code chunk number 40: BasicExample.factor
-###################################################
-sanction$ncost <- factor(sanction$ncost, ordered = TRUE,
-                         levels = c("net gain", "little effect", 
-                         "modest loss", "major loss"))
-
-
-###################################################
-### code chunk number 41: BasicExample.heidel
-###################################################
-heidel.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 42: BasicExample.raftery
-###################################################
-raftery.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 43: BasicExample.summary
-###################################################
-summary(z.out) 
-
-
-###################################################
-### code chunk number 44: BasicExample.setx
-###################################################
- x.out <- setx(z.out)
-
-
-###################################################
-### code chunk number 45: BasicExample.sim
-###################################################
- s.out1 <- sim(z.out, x = x.out)
- summary(s.out1)
-
-
-###################################################
-### code chunk number 46: FirstDifferences.setx
-###################################################
- x.high <- setx(z.out, mil=0)
- x.low <- setx(z.out, mil=1)
-
-
-###################################################
-### code chunk number 47: FirstDifferences.sim
-###################################################
-s.out2 <- sim(z.out, x = x.high, x1 = x.low)
- summary(s.out2)
-
-
-###################################################
-### code chunk number 48: BasicExample.data
-###################################################
- data(sanction)
-
-
-###################################################
-### code chunk number 49: BasicExample.zelig
-###################################################
- z.out <- zelig(num ~ target + coop, model = "poisson.bayes",
-                  data = sanction, verbose = FALSE)
-
-
-###################################################
-### code chunk number 50: BasicExample.geweke
-###################################################
- geweke.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 51: BasicExample.heidel
-###################################################
-heidel.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 52: BasicExample.raftery
-###################################################
-raftery.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 53: BasicExample.summary
-###################################################
-summary(z.out)
-
-
-###################################################
-### code chunk number 54: BasicExample.setx
-###################################################
- x.out <- setx(z.out)
-
-
-###################################################
-### code chunk number 55: BasicExample.sim
-###################################################
- s.out1 <- sim(z.out, x = x.out)
-
-
-###################################################
-### code chunk number 56: BasicExample.summary.sim
-###################################################
-summary(s.out1)
-
-
-###################################################
-### code chunk number 57: FirstDifferences.setx
-###################################################
- x.max <- setx(z.out, target = max(sanction$target))
- x.min <- setx(z.out, target = min(sanction$target))
-
-
-###################################################
-### code chunk number 58: FirstDifferences.sim
-###################################################
- s.out2 <- sim(z.out, x = x.max, x1 = x.min)
- summary(s.out2)
-
-
-###################################################
-### code chunk number 59: BasicExample.data
-###################################################
- data(turnout)
-
-
-###################################################
-### code chunk number 60: BasicExample.zelig
-###################################################
- z.out <- zelig(vote ~ race + educate, model = "probit.bayes",
-                  data = turnout, verbose = FALSE)
-
-
-###################################################
-### code chunk number 61: BasicExample.geweke
-###################################################
- geweke.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 62: BasicExample.heidel
-###################################################
- heidel.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 63: BasicExample.raftery
-###################################################
-raftery.diag(z.out$result$coefficients)
-
-
-###################################################
-### code chunk number 64: BasicExample.summary
-###################################################
-summary(z.out)
-
-
-###################################################
-### code chunk number 65: BasicExample.setx
-###################################################
- x.out <- setx(z.out)
-
-
-###################################################
-### code chunk number 66: BasicExample.sim
-###################################################
- s.out1 <- sim(z.out, x = x.out)
-
-
-###################################################
-### code chunk number 67: BasicExample.summary.sim
-###################################################
-summary(s.out1)
-
-
-###################################################
-### code chunk number 68: FirstDifferences.setx
-###################################################
- x.high <- setx(z.out, educate = quantile(turnout$educate, prob = 0.75))
- x.low <- setx(z.out, educate = quantile(turnout$educate, prob = 0.25))
-
-
-###################################################
-### code chunk number 69: FirstDifferences.sim
-###################################################
- s.out2 <- sim(z.out, x = x.high, x1 = x.low)
-
-
-###################################################
-### code chunk number 70: FirstDifferences.summary
-###################################################
-summary(s.out2)
-
-
diff --git a/inst/doc/manual-bayes.pdf b/inst/doc/manual-bayes.pdf
deleted file mode 100644
index 91b8f14..0000000
Binary files a/inst/doc/manual-bayes.pdf and /dev/null differ
diff --git a/inst/doc/manual-gee.R b/inst/doc/manual-gee.R
deleted file mode 100644
index 91727b5..0000000
--- a/inst/doc/manual-gee.R
+++ /dev/null
@@ -1,327 +0,0 @@
-### R code from vignette source 'manual-gee.Rnw'
-
-###################################################
-### code chunk number 1: loadLibrary
-###################################################
-library(Zelig)
-library(MCMCpack)
-
-
-###################################################
-### code chunk number 2: Example.data
-###################################################
-data(coalition)
-
-
-###################################################
-### code chunk number 3: Example.cluster
-###################################################
-coalition$cluster <- c(rep(c(1:62),5),rep(c(63),4))
-sorted.coalition <- coalition[order(coalition$cluster),]
-
-
-###################################################
-### code chunk number 4: Example.zelig
-###################################################
-z.out <- zelig(duration ~ fract + numst2, model = "gamma.gee", id = "cluster", data = sorted.coalition, robust=TRUE, corstr="exchangeable")
-summary(z.out)
-
-
-###################################################
-### code chunk number 5: Example.setx
-###################################################
-x.low <- setx(z.out, numst2 = 0)
-x.high <- setx(z.out, numst2 = 1)
-
-
-###################################################
-### code chunk number 6: Example.sim
-###################################################
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-summary(s.out)
-
-
-###################################################
-### code chunk number 7: ExamplePlot
-###################################################
-plot(s.out)
-
-
-###################################################
-### code chunk number 8: Example.data
-###################################################
-data(turnout)
-
-
-###################################################
-### code chunk number 9: Example.cluster
-###################################################
-turnout$cluster <- rep(c(1:200),10)
-
-
-###################################################
-### code chunk number 10: Example.sort
-###################################################
-sorted.turnout <- turnout[order(turnout$cluster),]
-
-
-###################################################
-### code chunk number 11: Example.zelig
-###################################################
-z.out1 <- zelig(vote ~ race + educate, model = "logit.gee", id = "cluster", data = sorted.turnout, robust = TRUE, corstr = "stat_M_dep", Mv=3)
-
-
-###################################################
-### code chunk number 12: Example.setx
-###################################################
-x.out1 <- setx(z.out1)
-
-
-###################################################
-### code chunk number 13: Example.sim
-###################################################
-s.out1 <- sim(z.out1, x = x.out1)
-
-
-###################################################
-### code chunk number 14: Example.summary.sim
-###################################################
-summary(s.out1)
-
-
-###################################################
-### code chunk number 15: ExamplePlot
-###################################################
-plot(s.out1)
-
-
-###################################################
-### code chunk number 16: FirstDifference.setx
-###################################################
-x.high <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.75))
-x.low <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.25))
-
-
-###################################################
-### code chunk number 17: FirstDifference.sim
-###################################################
-s.out2 <- sim(z.out1, x = x.high, x1 = x.low)
-
-
-###################################################
-### code chunk number 18: FirstDifference.summary.sim
-###################################################
-summary(s.out2)
-
-
-###################################################
-### code chunk number 19: FirstDifferencePlot
-###################################################
-plot(s.out2)
-
-
-###################################################
-### code chunk number 20: Example2.corr
-###################################################
-corr.mat <- matrix(rep(0.5,100), nrow=10, ncol=10)
-diag(corr.mat) <- 1
-
-
-###################################################
-### code chunk number 21: Example2.zelig
-###################################################
-z.out2 <- zelig(vote ~ race + educate, model = "logit.gee", id = "cluster", data = sorted.turnout, robust = TRUE, corstr = "fixed", R=corr.mat)
-
-
-###################################################
-### code chunk number 22: Example2.summary
-###################################################
-summary(z.out2)
-
-
-###################################################
-### code chunk number 23: Example.data
-###################################################
-data(macro)
-
-
-###################################################
-### code chunk number 24: Example.zelig
-###################################################
-z.out <- zelig(unem ~ gdp + capmob + trade, model = "normal.gee", id = "country", data = macro, robust=TRUE, corstr="AR-M", Mv=1)
-summary(z.out)
-
-
-###################################################
-### code chunk number 25: Example.setx
-###################################################
-x.high <- setx(z.out, trade = quantile(macro$trade, 0.8))
-x.low <- setx(z.out, trade = quantile(macro$trade, 0.2))
-
-
-###################################################
-### code chunk number 26: Example.sim
-###################################################
-s.out <- sim(z.out, x = x.high, x1 = x.low)
-
-
-###################################################
-### code chunk number 27: Example.summary.sim
-###################################################
-summary(s.out)
-
-
-###################################################
-### code chunk number 28: ExamplePlot
-###################################################
-plot(s.out)
-
-
-###################################################
-### code chunk number 29: Example.data
-###################################################
-data(sanction)
-
-
-###################################################
-### code chunk number 30: Example.cluster
-###################################################
-sanction$cluster <- c(rep(c(1:15),5),rep(c(16),3))
-
-
-###################################################
-### code chunk number 31: Example.sort
-###################################################
-sorted.sanction <- sanction[order(sanction$cluster),]
-
-
-###################################################
-### code chunk number 32: Example.zelig
-###################################################
-z.out <- zelig(num ~ target + coop, model = "poisson.gee", id = "cluster", data = sorted.sanction, robust=TRUE, corstr="exchangeable")
-summary(z.out)
-
-
-###################################################
-### code chunk number 33: Example.setx
-###################################################
-x.out <- setx(z.out)
-
-
-###################################################
-### code chunk number 34: Example.sim
-###################################################
-s.out <- sim(z.out, x = x.out)
-summary(s.out)
-
-
-###################################################
-### code chunk number 35: ExamplePlot
-###################################################
-plot(s.out)
-
-
-###################################################
-### code chunk number 36: Example.data
-###################################################
-data(turnout)
-
-
-###################################################
-### code chunk number 37: Example.cluster
-###################################################
-turnout$cluster <- rep(c(1:200),10)
-
-
-###################################################
-### code chunk number 38: Example.sort
-###################################################
-sorted.turnout <- turnout[order(turnout$cluster),]
-
-
-###################################################
-### code chunk number 39: Example.zelig
-###################################################
-z.out1 <- zelig(vote ~ race + educate, model = "probit.gee", id = "cluster", data = sorted.turnout, robust = TRUE, corstr = "stat_M_dep", Mv=3)
-
-
-###################################################
-### code chunk number 40: Example.setx
-###################################################
-x.out1 <- setx(z.out1)
-
-
-###################################################
-### code chunk number 41: Example.sim
-###################################################
-s.out1 <- sim(z.out1, x = x.out1)
-
-
-###################################################
-### code chunk number 42: Example.summary.sim
-###################################################
-summary(s.out1)
-
-
-###################################################
-### code chunk number 43: ExamplePlot
-###################################################
-plot(s.out1)
-
-
-###################################################
-### code chunk number 44: manual-gee.Rnw:1346-1347
-###################################################
-options(width=80)
-
-
-###################################################
-### code chunk number 45: FirstDifference.setx
-###################################################
-x.high <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.75))
-x.low <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.25))
-
-
-###################################################
-### code chunk number 46: FirstDifference.sim
-###################################################
-s.out2 <- sim(z.out1, x = x.high, x1 = x.low)
-
-
-###################################################
-### code chunk number 47: FirstDifference.summary.sim
-###################################################
-summary(s.out2)
-
-
-###################################################
-### code chunk number 48: FirstDifferencePlot
-###################################################
-plot(s.out2)
-
-
-###################################################
-### code chunk number 49: manual-gee.Rnw:1373-1374
-###################################################
-options(width=75)
-
-
-###################################################
-### code chunk number 50: Example2.corr
-###################################################
-corr.mat <- matrix(rep(0.5,100), nrow=10, ncol=10)
-diag(corr.mat) <- 1
-
-
-###################################################
-### code chunk number 51: Example2.zelig
-###################################################
-z.out2 <- zelig(vote ~ race + educate, model = "probit.gee", id = "cluster", data = sorted.turnout, robust = TRUE, corstr = "fixed", R=corr.mat)
-
-
-###################################################
-### code chunk number 52: Example2.summary
-###################################################
-summary(z.out2)
-
-
diff --git a/inst/doc/manual-gee.pdf b/inst/doc/manual-gee.pdf
deleted file mode 100644
index c50e2e2..0000000
Binary files a/inst/doc/manual-gee.pdf and /dev/null differ
diff --git a/inst/doc/manual.R b/inst/doc/manual.R
deleted file mode 100644
index d6174cd..0000000
--- a/inst/doc/manual.R
+++ /dev/null
@@ -1,377 +0,0 @@
-### R code from vignette source 'manual.Rnw'
-
-###################################################
-### code chunk number 1: loadLibrary
-###################################################
-library(Zelig)
-
-
-###################################################
-### code chunk number 2: Example.data
-###################################################
- data(coalition)
-
-
-###################################################
-### code chunk number 3: Example.zelig
-###################################################
- z.out <- zelig(duration ~ fract + numst2, model = "gamma", data = coalition)
-
-
-###################################################
-### code chunk number 4: Example.summary
-###################################################
- summary(z.out)
-
-
-###################################################
-### code chunk number 5: Example.setx
-###################################################
- x.low <- setx(z.out, numst2 = 0)
- x.high <- setx(z.out, numst2 = 1)
-
-
-###################################################
-### code chunk number 6: Example.sim
-###################################################
- s.out <- sim(z.out, x = x.low, x1 = x.high)
-
-
-###################################################
-### code chunk number 7: Example.summary
-###################################################
-summary(s.out)
-
-
-###################################################
-### code chunk number 8: gamma-ExamplePlot
-###################################################
- plot(s.out)
-
-
-###################################################
-### code chunk number 9: Example.data
-###################################################
- data(turnout)
-
-
-###################################################
-### code chunk number 10: Example.zelig
-###################################################
-
- z.out1 <- zelig(vote ~ age + race,  model = "logit", data = turnout) 
-
-
-
-###################################################
-### code chunk number 11: Example.setx
-###################################################
- x.out1 <- setx(z.out1, age = 36, race = "white")
-
-
-###################################################
-### code chunk number 12: Example.sim
-###################################################
- s.out1 <- sim(z.out1, x = x.out1)
-
-
-###################################################
-### code chunk number 13: Example.summary
-###################################################
- summary(s.out1)
-
-
-###################################################
-### code chunk number 14: logit-ExamplePlot
-###################################################
- plot(s.out1)
-
-
-###################################################
-### code chunk number 15: FirstDifferences.setx
-###################################################
- z.out2 <- zelig(vote ~ race + educate, model = "logit", data = turnout)
- x.high <- setx(z.out2, educate = quantile(turnout$educate, prob = 0.75))
- x.low <- setx(z.out2, educate = quantile(turnout$educate, prob = 0.25))
-
-
-###################################################
-### code chunk number 16: FirstDifferences.sim
-###################################################
- s.out2 <- sim(z.out2, x = x.high, x1 = x.low)
-
-
-###################################################
-### code chunk number 17: FirstDifferences.summary
-###################################################
- summary(s.out2)
-
-
-###################################################
-### code chunk number 18: logit-FirstDifferencesPlot
-###################################################
- plot(s.out2)
-
-
-###################################################
-### code chunk number 19: ROC.zelig
-###################################################
- z.out1 <- zelig(vote ~ race + educate + age, model = "logit", 
-                  data = turnout)
- z.out2 <- zelig(vote ~ race + educate, model = "logit", data = turnout)
-
-
-###################################################
-### code chunk number 20: logit-ROCPlot
-###################################################
-
-rocplot(z.out1$y, z.out2$y, fitted(z.out1), fitted(z.out2))
-
-
-###################################################
-### code chunk number 21: Examples.data
-###################################################
- data(macro)
-
-
-###################################################
-### code chunk number 22: Examples.zelig
-###################################################
- z.out1 <- zelig(unem ~ gdp + capmob + trade, model = "ls", data = macro)
-
-
-###################################################
-### code chunk number 23: Examples.summary
-###################################################
- summary(z.out1)
-
-
-###################################################
-### code chunk number 24: Examples.setx
-###################################################
- x.high <- setx(z.out1, trade = quantile(macro$trade, 0.8))
- x.low <- setx(z.out1, trade = quantile(macro$trade, 0.2))
-
-
-###################################################
-### code chunk number 25: Examples.sim
-###################################################
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-
-
-###################################################
-### code chunk number 26: Examples.summary.sim
-###################################################
-summary(s.out1)
-
-
-###################################################
-### code chunk number 27: Dummy.zelig
-###################################################
- z.out2 <- zelig(unem ~ gdp + trade + capmob + as.factor(country), 
-                  model = "ls", data = macro)
-
-
-###################################################
-### code chunk number 28: Dummy.setx
-###################################################
- x.US <- setx(z.out2, country = "United States")
- x.Japan <- setx(z.out2, country = "Japan")
-
-
-###################################################
-### code chunk number 29: Dummy.sim
-###################################################
- s.out2 <- sim(z.out2, x = x.US, x1 = x.Japan)
-
-
-###################################################
-### code chunk number 30: Example.data
-###################################################
- data(sanction)
-
-
-###################################################
-### code chunk number 31: Example.zelig
-###################################################
- z.out <- zelig(num ~ target + coop, model = "negbinom", data = sanction)
-
-
-###################################################
-### code chunk number 32: Example.summary
-###################################################
-summary(z.out)
-
-
-###################################################
-### code chunk number 33: Example.setx
-###################################################
- x.out <- setx(z.out)
-
-
-###################################################
-### code chunk number 34: Example.sim
-###################################################
- s.out <- sim(z.out, x = x.out)
-
-
-###################################################
-### code chunk number 35: Example.summary.sim
-###################################################
-summary(s.out)
-
-
-###################################################
-### code chunk number 36: negbinom-Example1Plot
-###################################################
- plot(s.out)
-
-
-###################################################
-### code chunk number 37: Examples.data
-###################################################
- data(macro)
-
-
-###################################################
-### code chunk number 38: Examples.zelig
-###################################################
- z.out1 <- zelig(unem ~ gdp + capmob + trade, model = "normal", 
-                  data = macro)
-
-
-###################################################
-### code chunk number 39: Examples.summary
-###################################################
- summary(z.out1)
-
-
-###################################################
-### code chunk number 40: Examples.setx
-###################################################
- x.high <- setx(z.out1, trade = quantile(macro$trade, 0.8))
- x.low <- setx(z.out1, trade = quantile(macro$trade, 0.2))
-
-
-###################################################
-### code chunk number 41: Examples.sim
-###################################################
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-
-
-###################################################
-### code chunk number 42: Examples.summary.sim
-###################################################
- summary(s.out1)
-
-
-###################################################
-### code chunk number 43: normal-ExamplesPlot
-###################################################
- plot(s.out1)
-
-
-###################################################
-### code chunk number 44: Dummy.zelig
-###################################################
- z.out2 <- zelig(unem ~ gdp + trade + capmob + as.factor(year) 
-                  + as.factor(country), model = "normal", data = macro)
-
-
-###################################################
-### code chunk number 45: Dummy.setx
-###################################################
-### x.US <- try(setx(z.out2, country = "United States"),silent=T)
-### x.Japan <- try(setx(z.out2, country = "Japan"),silent=T)
-
-
-###################################################
-### code chunk number 46: Dummy.sim
-###################################################
-### s.out2 <- try(sim(z.out2, x = x.US, x1 = x.Japan), silent=T)
-
-
-###################################################
-### code chunk number 47: Dummy.summary
-###################################################
-###try(summary(s.out2))
-
-
-###################################################
-### code chunk number 48: Example.data
-###################################################
- data(sanction)
-
-
-###################################################
-### code chunk number 49: Example.zelig
-###################################################
- z.out <- zelig(num ~ target + coop, model = "poisson", data = sanction)
-
-
-###################################################
-### code chunk number 50: Example.summary
-###################################################
-summary(z.out)
-
-
-###################################################
-### code chunk number 51: Example.setx
-###################################################
- x.out <- setx(z.out)
-
-
-###################################################
-### code chunk number 52: Example.sim
-###################################################
- s.out <- sim(z.out, x = x.out)
-
-
-###################################################
-### code chunk number 53: Example.summary.sim
-###################################################
-summary(s.out)
-
-
-###################################################
-### code chunk number 54: poisson-ExamplePlot
-###################################################
- plot(s.out)
-
-
-###################################################
-### code chunk number 55: Examples.data
-###################################################
- data(turnout)
-
-
-###################################################
-### code chunk number 56: Examples.zelig
-###################################################
- z.out <- zelig(vote ~ race + educate,  model = "probit", data = turnout) 
-
-
-###################################################
-### code chunk number 57: Examples.summary
-###################################################
- summary(z.out)
-
-
-###################################################
-### code chunk number 58: Examples.setx
-###################################################
- x.out <- setx(z.out)
-
-
-###################################################
-### code chunk number 59: Examples.sim
-###################################################
-s.out <- sim(z.out, x = x.out)
-
-
-###################################################
-### code chunk number 60: Examples.summary.sim
-###################################################
-summary(s.out)
-
-
diff --git a/inst/doc/manual.pdf b/inst/doc/manual.pdf
deleted file mode 100644
index 0d9cf40..0000000
Binary files a/inst/doc/manual.pdf and /dev/null differ
diff --git a/inst/doc/negbinom.pdf b/inst/doc/negbinom.pdf
deleted file mode 100644
index 7ccabd9..0000000
Binary files a/inst/doc/negbinom.pdf and /dev/null differ
diff --git a/inst/doc/normal.pdf b/inst/doc/normal.pdf
deleted file mode 100644
index c793c06..0000000
Binary files a/inst/doc/normal.pdf and /dev/null differ
diff --git a/inst/doc/parse.formula.pdf b/inst/doc/parse.formula.pdf
deleted file mode 100644
index b7bcccf..0000000
Binary files a/inst/doc/parse.formula.pdf and /dev/null differ
diff --git a/inst/doc/poisson.pdf b/inst/doc/poisson.pdf
deleted file mode 100644
index a65ba64..0000000
Binary files a/inst/doc/poisson.pdf and /dev/null differ
diff --git a/inst/doc/probit.pdf b/inst/doc/probit.pdf
deleted file mode 100644
index a2bc6bd..0000000
Binary files a/inst/doc/probit.pdf and /dev/null differ
diff --git a/inst/doc/twosls.R b/inst/doc/twosls.R
deleted file mode 100644
index 8421f39..0000000
--- a/inst/doc/twosls.R
+++ /dev/null
@@ -1,56 +0,0 @@
-### R code from vignette source 'twosls.Rnw'
-
-###################################################
-### code chunk number 1: loadLibrary
-###################################################
-library(Zelig)
-
-
-###################################################
-### code chunk number 2: Inputs.list
-###################################################
- fml <- list ("mu"  = Y ~ X + Z,
-               "inst" = Z ~ W + X)
-
-
-###################################################
-### code chunk number 3: Examples.data
-###################################################
- data(klein)
-
-
-###################################################
-### code chunk number 4: Examples.list
-###################################################
- formula <- list(mu1=C~Wtot + P + P1,
-               mu2=I~P + P1 + K1,
-               mu3=Wp~ X + X1 + Tm,
-               inst= ~ P1 + K1 + X1 + Tm + Wg + G)
-
-
-###################################################
-### code chunk number 5: Examples.zelig
-###################################################
- z.out<-zelig(formula=formula, model="twosls",data=klein)
- summary(z.out)
-
-
-###################################################
-### code chunk number 6: Examples.setx
-###################################################
- x.out <- setx(z.out)
-
-
-###################################################
-### code chunk number 7: Examples.sim
-###################################################
-s.out <-sim(z.out,x=x.out)
- summary(s.out)
-
-
-###################################################
-### code chunk number 8: Examplestwosls
-###################################################
-plot(s.out)
-
-
diff --git a/inst/doc/twosls.pdf b/inst/doc/twosls.pdf
deleted file mode 100644
index 8856177..0000000
Binary files a/inst/doc/twosls.pdf and /dev/null differ
diff --git a/inst/templates/DESCRIPTION b/inst/templates/DESCRIPTION
deleted file mode 100644
index a1dd7a6..0000000
--- a/inst/templates/DESCRIPTION
+++ /dev/null
@@ -1,11 +0,0 @@
-Package:
-Version: .1
-Date:
-Title: A Zelig Model
-Author:
-Maintainer:
-Depends:
-Description: A Zelig Model
-License: GPL (>=2)
-URL:
-Packaged: 
diff --git a/inst/templates/PACKAGE.R b/inst/templates/PACKAGE.R
deleted file mode 100644
index 6570107..0000000
--- a/inst/templates/PACKAGE.R
+++ /dev/null
@@ -1,20 +0,0 @@
-#' \\package\\
-#' 
-#' \tabular{ll}{
-#'   Package: \tab \\package\\\cr
-#'   Version: \tab 0.1\cr
-#'   Date: \tab 2011-04-25\cr
-#'   Depends: \\depends\\
-#'   License: \tab GPL version 2 or newer\cr
-#' }
-#'
-#' Edit this description
-#'
-#' @name \\package\\-package
-#' @aliases \\package\\-package \\package\\
-#' @docType package
-#' @importFrom Zelig describe param qi
-#' @author \\author\\
-#' @keywords package
-NULL
-
diff --git a/inst/templates/ZELIG.README b/inst/templates/ZELIG.README
deleted file mode 100644
index e69de29..0000000
diff --git a/inst/templates/describe.R b/inst/templates/describe.R
deleted file mode 100644
index 80db33c..0000000
--- a/inst/templates/describe.R
+++ /dev/null
@@ -1,10 +0,0 @@
-#' Describe the \\model\\ Zelig Model
-#' @param ... ignored parameters
-#' @return a list specifying author, title, etc. information
-#' @export
-describe.\\model\\ <- function(...) {
-  list(
-       authors = "",
-       text = ""
-       )
-}
diff --git a/inst/templates/param.R b/inst/templates/param.R
deleted file mode 100644
index 691dc4c..0000000
--- a/inst/templates/param.R
+++ /dev/null
@@ -1,13 +0,0 @@
-#' Extract Samples from a Distribution in Order to Pass Them to the \code{qi} Function
-#' (this is primarily a helper function for the \\model\\ model)
-#' @param obj a zelig object
-#' @param num an integer specifying the number of simulations to compute
-#' @param ... additional parameters
-#' @return a list specifying link, link-inverse, random samples, and ancillary parameters
-#' @export
-param.\\model\\ <- function(obj, num=1000, ...) {
-  list(
-       coef = NULL,
-       linkinv = NULL
-       )
-}
diff --git a/inst/templates/qi.R b/inst/templates/qi.R
deleted file mode 100644
index e23b68d..0000000
--- a/inst/templates/qi.R
+++ /dev/null
@@ -1,16 +0,0 @@
-#' Compute Quantities of Interest for the Zelig Model \\model\\
-#' @param obj a zelig object
-#' @param x a setx object
-#' @param x1 an optional setx object
-#' @param y ...
-#' @param num an integer specifying the number of simulations to compute
-#' @param param a parameters object
-#' @return a list of key-value pairs specifying pairing titles of quantities of interest
-#'         with their simulations
-#' @export
-qi.\\model\\ <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
-
-  list(
-       "Expected Value: E(Y|X)" = NA
-       )
-}
diff --git a/inst/templates/zelig2.R b/inst/templates/zelig2.R
deleted file mode 100644
index f341dc0..0000000
--- a/inst/templates/zelig2.R
+++ /dev/null
@@ -1,14 +0,0 @@
-#' Interface between the Zelig Model \\model\\ and 
-#' the Pre-existing Model-fitting Method
-#' @param formula a formula
-#' @param ... additonal parameters
-#' @param data a data.frame 
-#' @return a list specifying '.function'
-#' @export
-zelig2\\model\\ <- function (formula, ..., data) {
-  list(
-       .function = "",
-       formula = formula,
-       data = data
-       )
-}
diff --git a/man/GetObject.Rd b/man/GetObject.Rd
deleted file mode 100644
index 79b8337..0000000
--- a/man/GetObject.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{GetObject}
-\alias{GetObject}
-\title{Extract the fitted model object from the Zelig object}
-\usage{
-  GetObject(obj)
-}
-\arguments{
-  \item{obj}{an object of type `zelig'}
-}
-\value{
-  the fitted model object
-}
-\description{
-  Extract the fitted model object from the Zelig object
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/GetSlot.Rd b/man/GetSlot.Rd
deleted file mode 100644
index f08ade6..0000000
--- a/man/GetSlot.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{GetSlot}
-\alias{GetSlot}
-\title{Generic method for extracting variables from both
-S3 and S4 fitted model object}
-\usage{
-  GetSlot(obj, key, ...)
-}
-\arguments{
-  \item{obj}{an object of type `zelig'}
-
-  \item{key}{a character-string specifying the name of the
-  variable to extract}
-
-  \item{...}{typically ignored parameters}
-}
-\value{
-  the value of that extracted object or NULL
-}
-\description{
-  Generic method for extracting variables from both S3 and
-  S4 fitted model object
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/GetSlot.zelig.Rd b/man/GetSlot.zelig.Rd
deleted file mode 100644
index e3bfa71..0000000
--- a/man/GetSlot.zelig.Rd
+++ /dev/null
@@ -1,30 +0,0 @@
-\name{GetSlot.zelig}
-\alias{GetSlot.zelig}
-\title{Return a Value from a \code{zelig} Fitted Model}
-\usage{
-  \method{GetSlot}{zelig}(obj, key, ...)
-}
-\arguments{
-  \item{obj}{a \code{zelig} object}
-
-  \item{key}{a character-string specifying the which value
-  to extract from the fitted model object}
-
-  \item{...}{subsequent values to extract from the fitted
-  model object}
-}
-\value{
-  values of the specified keys
-}
-\description{
-  Returns a value from the result of a model fitting
-  function
-}
-\note{
-  This function is primarily used by Zelig developers
-  within \code{qi} functions
-}
-\author{
-  Matt Owen \emph{mowen at iq.harvard.edu}
-}
-
diff --git a/man/MCMChook.Rd b/man/MCMChook.Rd
deleted file mode 100644
index e1fffb6..0000000
--- a/man/MCMChook.Rd
+++ /dev/null
@@ -1,35 +0,0 @@
-\name{MCMChook}
-\alias{MCMChook}
-\title{Hook to Clean-up MCMC Objects}
-\usage{
-  MCMChook(obj, model.call, zelig.call, seed = NULL, ..., data = NULL)
-}
-\arguments{
-  \item{obj}{the fitted model object (in this case a
-  \code{mcmc} object.}
-
-  \item{model.call}{the call made to the external model}
-
-  \item{zelig.call}{the actual call to zelig itself}
-
-  \item{seed}{a seed for the MCMC algorithm}
-
-  \item{...}{ignored parameters}
-
-  \item{data}{the data.frame being used to fit the statistical model}
-}
-\value{
-  an object useable by Zelig
-}
-\description{
-  This method gives valid methods to the resulting MCMC
-  object so that it can be used with Zelig.
-}
-\note{
-  This function is used internally by the ZeligBayesian
-  package.
-}
-\author{
-  Olivia Lau, Kosuke Imai, Gary King and Matt Owen
-}
-
diff --git a/man/Max.Rd b/man/Max.Rd
deleted file mode 100644
index 11aa02e..0000000
--- a/man/Max.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{Max}
-\alias{Max}
-\title{Compute the Maximum Value of a Vector}
-\usage{
-  Max(x, na.rm = NULL)
-}
-\arguments{
-  \item{x}{a numeric or ordered vector}
-
-  \item{na.rm}{ignored}
-}
-\value{
-  the maximum value of the vector
-}
-\description{
-  Compute the Maximum Value of a Vector
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/McmcHookFactor.Rd b/man/McmcHookFactor.Rd
deleted file mode 100644
index dd7e5e6..0000000
--- a/man/McmcHookFactor.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-\name{McmcHookFactor}
-\alias{McmcHookFactor}
-\title{Hook to Clean-up MCMC Factor Object}
-\usage{
-  McmcHookFactor(obj, model.call, zelig.call, seed = NULL,
-    ...)
-}
-\arguments{
-  \item{obj}{the fitted model object (in this case a
-  \code{mcmc} object.}
-
-  \item{model.call}{the call made to the external model}
-
-  \item{zelig.call}{the actual call to zelig itself}
-
-  \item{seed}{a seed for the MCMC algorithm}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  an object useable by Zelig
-}
-\description{
-  This method gives valid methods to the resulting MCMC
-  object so that it can be used with Zelig.
-}
-\note{
-  This function is used internally by the ZeligBayesian
-  package.
-}
-\author{
-  Olivia Lau, Kosuke Imai, Gary King and Matt Owen
-}
-
diff --git a/man/Median.Rd b/man/Median.Rd
index fa9a892..9a0b51c 100644
--- a/man/Median.Rd
+++ b/man/Median.Rd
@@ -1,21 +1,23 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/utils.R
 \name{Median}
 \alias{Median}
 \title{Compute the Statistical Median of a Vector}
 \usage{
-  Median(x, na.rm = NULL)
+Median(x, na.rm = NULL)
 }
 \arguments{
-  \item{x}{a vector of numeric or ordered values}
+\item{x}{a vector of numeric or ordered values}
 
-  \item{na.rm}{ignored}
+\item{na.rm}{ignored}
 }
 \value{
-  the median of the vector
+the median of the vector
 }
 \description{
-  Compute the Statistical Median of a Vector
+Compute the Statistical Median of a Vector
 }
 \author{
-  Matt Owen \email{mowen at iq.harvard.edu}
+Matt Owen \email{mowen at iq.harvard.edu}
 }
 
diff --git a/man/Min.Rd b/man/Min.Rd
deleted file mode 100644
index cb76236..0000000
--- a/man/Min.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{Min}
-\alias{Min}
-\title{Compute the Minumum Value of a Vector}
-\usage{
-  Min(x, na.rm = NULL)
-}
-\arguments{
-  \item{x}{a vector of numeric or ordered values}
-
-  \item{na.rm}{ignored}
-}
-\value{
-  the minimum value of the vector
-}
-\description{
-  Compute the Minumum Value of a Vector
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/Mode.Rd b/man/Mode.Rd
index ec705b9..d567f40 100644
--- a/man/Mode.Rd
+++ b/man/Mode.Rd
@@ -1,20 +1,23 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/utils.R
 \name{Mode}
 \alias{Mode}
+\alias{mode}
 \title{Compute the Statistical Mode of a Vector}
 \usage{
-  Mode(x)
+Mode(x)
 }
 \arguments{
-  \item{x}{a vector of numeric, factor, or ordered values}
+\item{x}{a vector of numeric, factor, or ordered values}
 }
 \value{
-  the statistical mode of the vector. If two modes exist,
-  one is randomly selected (by design)
+the statistical mode of the vector. If two modes exist, one is
+  randomly selected (by design)
 }
 \description{
-  Compute the Statistical Mode of a Vector
+Compute the Statistical Mode of a Vector
 }
 \author{
-  Matt Owen \email{mowen at iq.harvard.edu}
+Matt Owen \email{mowen at iq.harvard.edu}
 }
 
diff --git a/man/TexCite.Rd b/man/TexCite.Rd
deleted file mode 100644
index 039fb7b..0000000
--- a/man/TexCite.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-\name{TexCite}
-\alias{TexCite}
-\title{Get a TeX-style Citation}
-\usage{
-  TexCite(model)
-}
-\arguments{
-  \item{model}{a character-string specifying the name of
-  the Zelig model of which to describe in TeX-style}
-}
-\value{
-  a string to be rendered as part of a LaTeX-style document
-}
-\description{
-  Get a TeX-style Citation
-}
-
diff --git a/man/Zelig-ar-class.Rd b/man/Zelig-ar-class.Rd
new file mode 100644
index 0000000..0c43dc3
--- /dev/null
+++ b/man/Zelig-ar-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-ar.R
+\docType{class}
+\name{Zelig-ar-class}
+\alias{Zelig-ar-class}
+\alias{zar}
+\title{Time-Series Model with Autoregressive Disturbance}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-ar.html}
+}
+
diff --git a/man/Zelig-arima-class.Rd b/man/Zelig-arima-class.Rd
new file mode 100644
index 0000000..1695edc
--- /dev/null
+++ b/man/Zelig-arima-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-arima.R
+\docType{class}
+\name{Zelig-arima-class}
+\alias{Zelig-arima-class}
+\alias{zarima}
+\title{Autoregressive and Moving-Average Models with Integration for Time-Series Data}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-arima.html}
+}
+
diff --git a/man/Zelig-bayes-class.Rd b/man/Zelig-bayes-class.Rd
new file mode 100644
index 0000000..ebdede3
--- /dev/null
+++ b/man/Zelig-bayes-class.Rd
@@ -0,0 +1,19 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-bayes.R
+\docType{class}
+\name{Zelig-bayes-class}
+\alias{Zelig-bayes-class}
+\alias{zbayes}
+\title{Bayes Model object for inheritance across models in Zelig}
+\description{
+Bayes Model object for inheritance across models in Zelig
+}
+\section{Methods}{
+
+\describe{
+\item{\code{getcoef()}}{Get estimated model coefficients}
+
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-binchoice-class.Rd b/man/Zelig-binchoice-class.Rd
new file mode 100644
index 0000000..61f624e
--- /dev/null
+++ b/man/Zelig-binchoice-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-binchoice.R
+\docType{class}
+\name{Zelig-binchoice-class}
+\alias{Zelig-binchoice-class}
+\alias{zbinchoice}
+\title{Binary Choice object for inheritance across models in Zelig}
+\description{
+Binary Choice object for inheritance across models in Zelig
+}
+
diff --git a/man/Zelig-binchoice-gee-class.Rd b/man/Zelig-binchoice-gee-class.Rd
new file mode 100644
index 0000000..ed5909f
--- /dev/null
+++ b/man/Zelig-binchoice-gee-class.Rd
@@ -0,0 +1,13 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-binchoice-gee.R
+\docType{class}
+\name{Zelig-binchoice-gee-class}
+\alias{Zelig-binchoice-gee-class}
+\alias{zbinchoicegee}
+\title{Object for Binary Choice outcomes in Generalized Estimating Equations 
+for inheritance across models in Zelig}
+\description{
+Object for Binary Choice outcomes in Generalized Estimating Equations 
+for inheritance across models in Zelig
+}
+
diff --git a/man/Zelig-binchoice-survey-class.Rd b/man/Zelig-binchoice-survey-class.Rd
new file mode 100644
index 0000000..d59164f
--- /dev/null
+++ b/man/Zelig-binchoice-survey-class.Rd
@@ -0,0 +1,13 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-binchoice-survey.R
+\docType{class}
+\name{Zelig-binchoice-survey-class}
+\alias{Zelig-binchoice-survey-class}
+\alias{zbinchoicesurvey}
+\title{Object for Binary Choice outcomes with Survey Weights
+for inheritance across models in Zelig}
+\description{
+Object for Binary Choice outcomes with Survey Weights
+for inheritance across models in Zelig
+}
+
diff --git a/man/Zelig-class.Rd b/man/Zelig-class.Rd
new file mode 100644
index 0000000..3d60f52
--- /dev/null
+++ b/man/Zelig-class.Rd
@@ -0,0 +1,136 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-zelig.R
+\docType{class}
+\name{Zelig-class}
+\alias{Zelig-class}
+\alias{z}
+\title{Zelig reference class}
+\description{
+Zelig website: \url{http://zeligproject.org/}
+}
+\section{Fields}{
+
+\describe{
+\item{\code{fn}}{R function to call to wrap}
+
+\item{\code{formula}}{Zelig formula}
+
+\item{\code{weights}}{forthcoming}
+
+\item{\code{name}}{name of the Zelig model}
+
+\item{\code{data}}{data frame or matrix}
+
+\item{\code{by}}{split the data by factors}
+
+\item{\code{mi}}{work with imputed dataset}
+
+\item{\code{idx}}{model index}
+
+\item{\code{zelig.call}}{Zelig function call}
+
+\item{\code{model.call}}{wrapped function call}
+
+\item{\code{zelig.out}}{estimated zelig model(s)}
+
+\item{\code{setx.out}}{set values}
+
+\item{\code{setx.labels}}{pretty-print qi}
+
+\item{\code{bsetx}}{is x set?}
+
+\item{\code{bsetx1}}{is x1 set?}
+
+\item{\code{bsetrange}}{is range set?}
+
+\item{\code{bsetrange1}}{is range1 set?}
+
+\item{\code{range}}{range}
+
+\item{\code{range1}}{range1}
+
+\item{\code{test.statistics}}{list of test statistics}
+
+\item{\code{sim.out}}{simulated qi's}
+
+\item{\code{simparam}}{simulated parameters}
+
+\item{\code{num}}{number of simulations}
+
+\item{\code{authors}}{Zelig model authors}
+
+\item{\code{zeligauthors}}{Zelig authors}
+
+\item{\code{modelauthors}}{wrapped model authors}
+
+\item{\code{packageauthors}}{wrapped package authors}
+
+\item{\code{refs}}{citation information}
+
+\item{\code{year}}{model is released}
+
+\item{\code{description}}{model description}
+
+\item{\code{url}}{model URL}
+
+\item{\code{url.docs}}{model documentation URL}
+
+\item{\code{category}}{model category}
+
+\item{\code{vignette.url}}{vignette URL}
+
+\item{\code{json}}{JSON export}
+
+\item{\code{ljson}}{JSON export}
+
+\item{\code{outcome}}{JSON export}
+
+\item{\code{wrapper}}{JSON export}
+
+\item{\code{explanatory}}{JSON export}
+
+\item{\code{mcunit.test}}{unit testing}
+
+\item{\code{with.feedback}}{Feedback}
+}}
+\section{Methods}{
+
+\describe{
+\item{\code{ATT(treatment, treated = 1, quietly = TRUE, num = NULL)}}{Generic Method for Computing Simulated (Sample) Average Treatment Effects on the Treated}
+
+\item{\code{cite()}}{Provide citation information about Zelig and Zelig model, and about wrapped package and wrapped model}
+
+\item{\code{feedback()}}{Send feedback to the Zelig team}
+
+\item{\code{getcoef()}}{Get estimated model coefficients}
+
+\item{\code{getpredict()}}{Get predicted values}
+
+\item{\code{getqi(qi = "ev", xvalue = "x", subset = NULL)}}{Get quantities of interest}
+
+\item{\code{getvcov()}}{Get estimated model variance-covariance matrix}
+
+\item{\code{graph()}}{Plot the quantities of interest}
+
+\item{\code{help()}}{Open the model vignette from http://zeligproject.org/}
+
+\item{\code{packagename()}}{Automatically retrieve wrapped package name}
+
+\item{\code{references(style = "sphinx")}}{Construct a reference list specific to a Zelig model.}
+
+\item{\code{set(..., fn = list(numeric = mean, ordered = Median, other = Mode))}}{Setting Explanatory Variable Values}
+
+\item{\code{sim(num = NULL)}}{Generic Method for Computing and Organizing Simulated Quantities of Interest}
+
+\item{\code{simATT(simparam, data, depvar, treatment, treated)}}{Simulate an Average Treatment on the Treated}
+
+\item{\code{summarise(...)}}{Display a Zelig object}
+
+\item{\code{summarize(...)}}{Display a Zelig object}
+
+\item{\code{toJSON()}}{Convert Zelig object to JSON format}
+
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-exp-class.Rd b/man/Zelig-exp-class.Rd
new file mode 100644
index 0000000..cebf970
--- /dev/null
+++ b/man/Zelig-exp-class.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-exp.R
+\docType{class}
+\name{Zelig-exp-class}
+\alias{Zelig-exp-class}
+\alias{zexp}
+\title{Exponential Regression for Duration Dependent Variables}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-exp.html}
+}
+\section{Methods}{
+
+\describe{
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-factor-bayes-class.Rd b/man/Zelig-factor-bayes-class.Rd
new file mode 100644
index 0000000..6d96578
--- /dev/null
+++ b/man/Zelig-factor-bayes-class.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-factor-bayes.R
+\docType{class}
+\name{Zelig-factor-bayes-class}
+\alias{Zelig-factor-bayes-class}
+\alias{zfactorbayes}
+\title{Bayesian Factor Analysis}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-factorbayes.html}
+}
+\section{Methods}{
+
+\describe{
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-gamma-class.Rd b/man/Zelig-gamma-class.Rd
new file mode 100644
index 0000000..7474793
--- /dev/null
+++ b/man/Zelig-gamma-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-gamma.R
+\docType{class}
+\name{Zelig-gamma-class}
+\alias{Zelig-gamma-class}
+\alias{zgamma}
+\title{Gamma Regression for Continuous, Positive Dependent Variables}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-gamma.html}
+}
+
diff --git a/man/Zelig-gamma-gee-class.Rd b/man/Zelig-gamma-gee-class.Rd
new file mode 100644
index 0000000..6a6b134
--- /dev/null
+++ b/man/Zelig-gamma-gee-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-gamma-gee.R
+\docType{class}
+\name{Zelig-gamma-gee-class}
+\alias{Zelig-gamma-gee-class}
+\alias{zgammagee}
+\title{Generalized Estimating Equation for Gamma Regression}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-gammagee.html}
+}
+
diff --git a/man/Zelig-gamma-survey-class.Rd b/man/Zelig-gamma-survey-class.Rd
new file mode 100644
index 0000000..7fd42e9
--- /dev/null
+++ b/man/Zelig-gamma-survey-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-gamma-survey.R
+\docType{class}
+\name{Zelig-gamma-survey-class}
+\alias{Zelig-gamma-survey-class}
+\alias{zgammasurvey}
+\title{Gamma Regression with Survey Weights}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-gammasurvey.html}
+}
+
diff --git a/man/Zelig-gee-class.Rd b/man/Zelig-gee-class.Rd
new file mode 100644
index 0000000..9880f16
--- /dev/null
+++ b/man/Zelig-gee-class.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-gee.R
+\docType{class}
+\name{Zelig-gee-class}
+\alias{Zelig-gee-class}
+\alias{zgee}
+\title{Generalized Estimating Equations Model object for inheritance across models in Zelig}
+\description{
+Generalized Estimating Equations Model object for inheritance across models in Zelig
+}
+\section{Methods}{
+
+\describe{
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-glm-class.Rd b/man/Zelig-glm-class.Rd
new file mode 100644
index 0000000..28749ff
--- /dev/null
+++ b/man/Zelig-glm-class.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-glm.R
+\docType{class}
+\name{Zelig-glm-class}
+\alias{Zelig-glm-class}
+\alias{zglm}
+\title{Generalized Linear Model object for inheritance across models in Zelig}
+\description{
+Generalized Linear Model object for inheritance across models in Zelig
+}
+\section{Methods}{
+
+\describe{
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-logit-bayes-class.Rd b/man/Zelig-logit-bayes-class.Rd
new file mode 100644
index 0000000..911bcb3
--- /dev/null
+++ b/man/Zelig-logit-bayes-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-logit-bayes.R
+\docType{class}
+\name{Zelig-logit-bayes-class}
+\alias{Zelig-logit-bayes-class}
+\alias{zlogitbayes}
+\title{Bayesian Logit Regression}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-logitbayes.html}
+}
+
diff --git a/man/Zelig-logit-class.Rd b/man/Zelig-logit-class.Rd
new file mode 100644
index 0000000..d9b788b
--- /dev/null
+++ b/man/Zelig-logit-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-logit.R
+\docType{class}
+\name{Zelig-logit-class}
+\alias{Zelig-logit-class}
+\alias{zlogit}
+\title{Logistic Regression for Dichotomous Dependent Variables}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-logit.html}
+}
+
diff --git a/man/Zelig-logit-gee-class.Rd b/man/Zelig-logit-gee-class.Rd
new file mode 100644
index 0000000..6bd8349
--- /dev/null
+++ b/man/Zelig-logit-gee-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-logit-gee.R
+\docType{class}
+\name{Zelig-logit-gee-class}
+\alias{Zelig-logit-gee-class}
+\alias{zlogitgee}
+\title{Generalized Estimating Equation for Logit Regression}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-logitgee.html}
+}
+
diff --git a/man/Zelig-logit-survey-class.Rd b/man/Zelig-logit-survey-class.Rd
new file mode 100644
index 0000000..9cbc37f
--- /dev/null
+++ b/man/Zelig-logit-survey-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-logit-survey.R
+\docType{class}
+\name{Zelig-logit-survey-class}
+\alias{Zelig-logit-survey-class}
+\alias{zlogitsurvey}
+\title{Logit Regression with Survey Weights}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-logitsurvey.html}
+}
+
diff --git a/man/Zelig-lognorm-class.Rd b/man/Zelig-lognorm-class.Rd
new file mode 100644
index 0000000..c200dae
--- /dev/null
+++ b/man/Zelig-lognorm-class.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-lognorm.R
+\docType{class}
+\name{Zelig-lognorm-class}
+\alias{Zelig-lognorm-class}
+\alias{zlognorm}
+\title{Log-Normal Regression for Duration Dependent Variables}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-lognorm.html}
+}
+\section{Methods}{
+
+\describe{
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-ls-class.Rd b/man/Zelig-ls-class.Rd
new file mode 100644
index 0000000..37d58a8
--- /dev/null
+++ b/man/Zelig-ls-class.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-ls.R
+\docType{class}
+\name{Zelig-ls-class}
+\alias{Zelig-ls-class}
+\alias{zls}
+\title{Least Squares Regression for Continuous Dependent Variables}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-ls.html}
+}
+\section{Methods}{
+
+\describe{
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-ma-class.Rd b/man/Zelig-ma-class.Rd
new file mode 100644
index 0000000..d7b6b18
--- /dev/null
+++ b/man/Zelig-ma-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-ma.R
+\docType{class}
+\name{Zelig-ma-class}
+\alias{Zelig-ma-class}
+\alias{zma}
+\title{Time-Series Model with Moving Average}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-ma.html}
+}
+
diff --git a/man/Zelig-mlogit-bayes-class.Rd b/man/Zelig-mlogit-bayes-class.Rd
new file mode 100644
index 0000000..916db40
--- /dev/null
+++ b/man/Zelig-mlogit-bayes-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-mlogit-bayes.R
+\docType{class}
+\name{Zelig-mlogit-bayes-class}
+\alias{Zelig-mlogit-bayes-class}
+\alias{zmlogitbayes}
+\title{Bayesian Multinomial Logistic Regression}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-mlogitbayes.html}
+}
+
diff --git a/man/Zelig-negbin-class.Rd b/man/Zelig-negbin-class.Rd
new file mode 100644
index 0000000..bfd709a
--- /dev/null
+++ b/man/Zelig-negbin-class.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-negbinom.R
+\docType{class}
+\name{Zelig-negbin-class}
+\alias{Zelig-negbin-class}
+\alias{znegbin}
+\title{Negative Binomial Regression for Event Count Dependent Variables}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-negbin.html}
+}
+\section{Methods}{
+
+\describe{
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-normal-bayes-class.Rd b/man/Zelig-normal-bayes-class.Rd
new file mode 100644
index 0000000..6f23606
--- /dev/null
+++ b/man/Zelig-normal-bayes-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-normal-bayes.R
+\docType{class}
+\name{Zelig-normal-bayes-class}
+\alias{Zelig-normal-bayes-class}
+\alias{znormalbayes}
+\title{Bayesian Normal Linear Regression}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-normalbayes.html}
+}
+
diff --git a/man/Zelig-normal-class.Rd b/man/Zelig-normal-class.Rd
new file mode 100644
index 0000000..fbd4c5d
--- /dev/null
+++ b/man/Zelig-normal-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-normal.R
+\docType{class}
+\name{Zelig-normal-class}
+\alias{Zelig-normal-class}
+\alias{znormal}
+\title{Normal Regression for Continuous Dependent Variables}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-normal.html}
+}
+
diff --git a/man/Zelig-normal-gee-class.Rd b/man/Zelig-normal-gee-class.Rd
new file mode 100644
index 0000000..c753a61
--- /dev/null
+++ b/man/Zelig-normal-gee-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-normal-gee.R
+\docType{class}
+\name{Zelig-normal-gee-class}
+\alias{Zelig-normal-gee-class}
+\alias{znormalgee}
+\title{Generalized Estimating Equation for Normal Regression}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-normalgee.html}
+}
+
diff --git a/man/Zelig-normal-survey-class.Rd b/man/Zelig-normal-survey-class.Rd
new file mode 100644
index 0000000..92bf8fc
--- /dev/null
+++ b/man/Zelig-normal-survey-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-normal-survey.R
+\docType{class}
+\name{Zelig-normal-survey-class}
+\alias{Zelig-normal-survey-class}
+\alias{znormalsurvey}
+\title{Normal Regression for Continuous Dependent Variables with Survey Weights}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-normalsurvey.html}
+}
+
diff --git a/man/Zelig-oprobit-bayes-class.Rd b/man/Zelig-oprobit-bayes-class.Rd
new file mode 100644
index 0000000..ad0b659
--- /dev/null
+++ b/man/Zelig-oprobit-bayes-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-oprobit-bayes.R
+\docType{class}
+\name{Zelig-oprobit-bayes-class}
+\alias{Zelig-oprobit-bayes-class}
+\alias{zoprobitbayes}
+\title{Bayesian Ordered Probit Regression}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-oprobitbayes.html}
+}
+
diff --git a/man/Zelig-package.Rd b/man/Zelig-package.Rd
deleted file mode 100644
index 796708f..0000000
--- a/man/Zelig-package.Rd
+++ /dev/null
@@ -1,38 +0,0 @@
-\docType{package}
-\name{Zelig-package}
-\alias{Zelig}
-\alias{Zelig-package}
-\title{Zelig Everyone's Statistical Software}
-\description{
-  Zelig is an easy-to-use program that can estimate, and
-  help interpret the results of, an enormous range of
-  statistical models. It literally is ``everyone's
-  statistical software'' because Zelig's simple unified
-  framework incorporates everyone else's (R) code. We also
-  hope it will become ``everyone's statistical software''
-  for applications and teaching, and so have designed Zelig
-  so that anyone can easily use it or add their programs to
-  it.  Zelig also comes with infrastructure that
-  facilitates the use of any existing method, such as by
-  allowing multiply imputed data for any model, and
-  mimicking the program Clarify (for Stata) that takes the
-  raw output of existing statistical procedures and
-  translates them into quantities of direct interest.
-}
-\details{
-  \tabular{ll}{ Package: \tab Zelig\cr Version: \tab
-  4.0-11\cr Date: \tab 2012-10-28\cr Depends: \tab R (>=
-  2.14), boot, MASS, methods, sandwich, survival\cr
-  Suggests: \tab mvtnorm, Formula \cr License: \tab GPL
-  version 2 or newer\cr URL: \tab
-  http://gking.harvard.edu/zelig\cr }
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}, Kosuke Imai,
-  Olivia Lau, and Gary King
-}
-\seealso{
-  zelig setx sim
-}
-\keyword{package}
-
diff --git a/man/Zelig-poisson-bayes-class.Rd b/man/Zelig-poisson-bayes-class.Rd
new file mode 100644
index 0000000..8733870
--- /dev/null
+++ b/man/Zelig-poisson-bayes-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-poisson-bayes.R
+\docType{class}
+\name{Zelig-poisson-bayes-class}
+\alias{Zelig-poisson-bayes-class}
+\alias{zpoissonbayes}
+\title{Bayesian Poisson Regression}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-poissonbayes.html}
+}
+
diff --git a/man/Zelig-poisson-class.Rd b/man/Zelig-poisson-class.Rd
new file mode 100644
index 0000000..4991d15
--- /dev/null
+++ b/man/Zelig-poisson-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-poisson.R
+\docType{class}
+\name{Zelig-poisson-class}
+\alias{Zelig-poisson-class}
+\alias{zpoisson}
+\title{Poisson Regression for Event Count Dependent Variables}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-poisson.html}
+}
+
diff --git a/man/Zelig-poisson-gee-class.Rd b/man/Zelig-poisson-gee-class.Rd
new file mode 100644
index 0000000..819ab2e
--- /dev/null
+++ b/man/Zelig-poisson-gee-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-poisson-gee.R
+\docType{class}
+\name{Zelig-poisson-gee-class}
+\alias{Zelig-poisson-gee-class}
+\alias{zpoissongee}
+\title{Generalized Estimating Equation for Poisson Regression}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-poissongee.html}
+}
+
diff --git a/man/Zelig-poisson-survey-class.Rd b/man/Zelig-poisson-survey-class.Rd
new file mode 100644
index 0000000..a67b28e
--- /dev/null
+++ b/man/Zelig-poisson-survey-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-poisson-survey.R
+\docType{class}
+\name{Zelig-poisson-survey-class}
+\alias{Zelig-poisson-survey-class}
+\alias{zpoissonsurvey}
+\title{Poisson Regression with Survey Weights}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-poissonsurvey.html}
+}
+
diff --git a/man/Zelig-probit-bayes-class.Rd b/man/Zelig-probit-bayes-class.Rd
new file mode 100644
index 0000000..ad39543
--- /dev/null
+++ b/man/Zelig-probit-bayes-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-probit-bayes.R
+\docType{class}
+\name{Zelig-probit-bayes-class}
+\alias{Zelig-probit-bayes-class}
+\alias{zprobitbayes}
+\title{Bayesian Probit Regression}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-probitbayes.html}
+}
+
diff --git a/man/Zelig-probit-class.Rd b/man/Zelig-probit-class.Rd
new file mode 100644
index 0000000..13cabf5
--- /dev/null
+++ b/man/Zelig-probit-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-probit.R
+\docType{class}
+\name{Zelig-probit-class}
+\alias{Zelig-probit-class}
+\alias{zprobit}
+\title{Probit Regression for Dichotomous Dependent Variables}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-probit.html}
+}
+
diff --git a/man/Zelig-probit-gee-class.Rd b/man/Zelig-probit-gee-class.Rd
new file mode 100644
index 0000000..e67c175
--- /dev/null
+++ b/man/Zelig-probit-gee-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-probit-gee.R
+\docType{class}
+\name{Zelig-probit-gee-class}
+\alias{Zelig-probit-gee-class}
+\alias{zprobitgee}
+\title{Generalized Estimating Equation for Probit Regression}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-probitgee.html}
+}
+
diff --git a/man/Zelig-probit-survey-class.Rd b/man/Zelig-probit-survey-class.Rd
new file mode 100644
index 0000000..1e98b4f
--- /dev/null
+++ b/man/Zelig-probit-survey-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-probit-survey.R
+\docType{class}
+\name{Zelig-probit-survey-class}
+\alias{Zelig-probit-survey-class}
+\alias{zprobitsurvey}
+\title{Probit Regression with Survey Weights}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-probitsurvey.html}
+}
+
diff --git a/man/Zelig-quantile-class.Rd b/man/Zelig-quantile-class.Rd
new file mode 100644
index 0000000..3c39dd2
--- /dev/null
+++ b/man/Zelig-quantile-class.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-quantile.R
+\docType{class}
+\name{Zelig-quantile-class}
+\alias{Zelig-quantile-class}
+\alias{zquantile}
+\title{Quantile Regression for Continuous Dependent Variables}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-quantile.html}
+}
+\section{Methods}{
+
+\describe{
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-relogit-class.Rd b/man/Zelig-relogit-class.Rd
new file mode 100644
index 0000000..83ee124
--- /dev/null
+++ b/man/Zelig-relogit-class.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-relogit.R
+\docType{class}
+\name{Zelig-relogit-class}
+\alias{Zelig-relogit-class}
+\alias{zrelogit}
+\title{Rare Events Logistic Regression for Dichotomous Dependent Variables}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-relogit.html}
+}
+\section{Methods}{
+
+\describe{
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-survey-class.Rd b/man/Zelig-survey-class.Rd
new file mode 100644
index 0000000..f3ce4a1
--- /dev/null
+++ b/man/Zelig-survey-class.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-survey.R
+\docType{class}
+\name{Zelig-survey-class}
+\alias{Zelig-survey-class}
+\alias{zsurvey}
+\title{Survey models in Zelig for weights for complex sampling designs}
+\description{
+Survey models in Zelig for weights for complex sampling designs
+}
+\section{Methods}{
+
+\describe{
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-timeseries-class.Rd b/man/Zelig-timeseries-class.Rd
new file mode 100644
index 0000000..9a865d3
--- /dev/null
+++ b/man/Zelig-timeseries-class.Rd
@@ -0,0 +1,21 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-timeseries.R
+\docType{class}
+\name{Zelig-timeseries-class}
+\alias{Zelig-timeseries-class}
+\alias{ztimeseries}
+\title{Time-series models in Zelig}
+\description{
+Time-series models in Zelig
+}
+\section{Methods}{
+
+\describe{
+\item{\code{packagename()}}{Automatically retrieve wrapped package name}
+
+\item{\code{sim(num = NULL)}}{Generic Method for Computing and Organizing Simulated Quantities of Interest}
+
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-tobit-bayes-class.Rd b/man/Zelig-tobit-bayes-class.Rd
new file mode 100644
index 0000000..455a472
--- /dev/null
+++ b/man/Zelig-tobit-bayes-class.Rd
@@ -0,0 +1,11 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-tobit-bayes.R
+\docType{class}
+\name{Zelig-tobit-bayes-class}
+\alias{Zelig-tobit-bayes-class}
+\alias{ztobitbayes}
+\title{Bayesian Tobit Regression}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-tobitbayes.html}
+}
+
diff --git a/man/Zelig-tobit-class.Rd b/man/Zelig-tobit-class.Rd
new file mode 100644
index 0000000..b1b170e
--- /dev/null
+++ b/man/Zelig-tobit-class.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-tobit.R
+\docType{class}
+\name{Zelig-tobit-class}
+\alias{Zelig-tobit-class}
+\alias{ztobit}
+\title{Linear Regression for a Left-Censored Dependent Variable}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-tobit.html}
+}
+\section{Methods}{
+
+\describe{
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/Zelig-weibull-class.Rd b/man/Zelig-weibull-class.Rd
new file mode 100644
index 0000000..d588b38
--- /dev/null
+++ b/man/Zelig-weibull-class.Rd
@@ -0,0 +1,17 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-weibull.R
+\docType{class}
+\name{Zelig-weibull-class}
+\alias{Zelig-weibull-class}
+\alias{zweibull}
+\title{Weibull Regression for Duration Dependent Variables}
+\description{
+Vignette: \url{http://docs.zeligproject.org/en/latest/zelig-weibull.html}
+}
+\section{Methods}{
+
+\describe{
+\item{\code{zelig(formula, data, model = NULL, ..., weights = NULL, by,
+  bootstrap = FALSE)}}{The zelig command estimates a variety of statistical models}
+}}
+
diff --git a/man/ZeligDescribeModel.Rd b/man/ZeligDescribeModel.Rd
deleted file mode 100644
index 76d1377..0000000
--- a/man/ZeligDescribeModel.Rd
+++ /dev/null
@@ -1,23 +0,0 @@
-\name{ZeligDescribeModel}
-\alias{ZeligDescribeModel}
-\title{Produce a 'description' Object from the Name of a Model}
-\usage{
-  ZeligDescribeModel(model.name)
-}
-\arguments{
-  \item{model.name}{a character-string specifying a Zelig
-  model}
-}
-\value{
-  a 'description' object specified by the 'model.name'
-  parameter. This object is created by executing the
-  specified Zelig models' 'describe' function
-}
-\description{
-  Produce a 'description' Object from the Name of a Model
-}
-\note{
-  The 'description' object is a list-style object
-  containing citation information
-}
-
diff --git a/man/ZeligListModels.Rd b/man/ZeligListModels.Rd
deleted file mode 100644
index e499eba..0000000
--- a/man/ZeligListModels.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{ZeligListModels}
-\alias{ZeligListModels}
-\title{Get a Character-Vector of All Models with a 'zelig2' Function}
-\usage{
-  ZeligListModels(zelig.only = FALSE)
-}
-\arguments{
-  \item{zelig.only}{a boolean specifying whether we want to
-  search only the Zelig namespace}
-}
-\value{
-  a character-vector of the Zelig models loaded on the
-  user's machine
-}
-\description{
-  Get a Character-Vector of All Models with a 'zelig2'
-  Function
-}
-\note{
-  In order for a Zelig model to either execute correctly or
-  be listed as a legal Zelig model, the function name must
-  be prefixed with 'zelig2'.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/ZeligListTitles.Rd b/man/ZeligListTitles.Rd
deleted file mode 100644
index 5248a7e..0000000
--- a/man/ZeligListTitles.Rd
+++ /dev/null
@@ -1,13 +0,0 @@
-\name{ZeligListTitles}
-\alias{ZeligListTitles}
-\title{List the Titles of the Zelig Statistical Models}
-\usage{
-  ZeligListTitles()
-}
-\value{
-  a list of manual titles for the Zelig software
-}
-\description{
-  List the Titles of the Zelig Statistical Models
-}
-
diff --git a/man/alpha.Rd b/man/alpha.Rd
deleted file mode 100644
index c2a1e8b..0000000
--- a/man/alpha.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{alpha}
-\alias{alpha}
-\title{Extract ancillary parameters from
-`parameters' objects}
-\usage{
-  alpha(param)
-}
-\arguments{
-  \item{param}{a `parameters' object}
-}
-\value{
-  the ancillary parameters \emph{specified} for the
-  statistical model
-}
-\description{
-  Extract ancillary parameters from `parameters' objects
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.bootlist.Rd b/man/as.bootlist.Rd
deleted file mode 100644
index 15e2fa6..0000000
--- a/man/as.bootlist.Rd
+++ /dev/null
@@ -1,25 +0,0 @@
-\name{as.bootlist}
-\alias{as.bootlist}
-\title{Convert of Vector of Bootstrapped Parameters to a List-style Boot Object}
-\usage{
-  as.bootlist(bootstraps, lengths, names)
-}
-\arguments{
-  \item{bootstraps}{...}
-
-  \item{lengths}{...}
-
-  \item{names}{a character-vector specifying the names of
-  the boot terms}
-}
-\value{
-  ...
-}
-\description{
-  This inverts the ``as.bootvector'' function, and returns
-  a list containing the slots ``alpha'' and ``beta''.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.bootvector.Rd b/man/as.bootvector.Rd
deleted file mode 100644
index 8da16bb..0000000
--- a/man/as.bootvector.Rd
+++ /dev/null
@@ -1,30 +0,0 @@
-\name{as.bootvector}
-\alias{as.bootvector}
-\title{Convert Boot Object to a Vector}
-\usage{
-  as.bootvector(obj)
-}
-\arguments{
-  \item{obj}{a list with two slots: ``alpha'' and ``beta''.
-  Respectively, these represent bootstrap samples for
-  ancillary parameters and systematic component of the
-  bootstrapped GLM.}
-}
-\value{
-  a list containing the resulting vector, as well as an
-  object used to reverse-build the list (``obj'') from the
-  resulting call to ``bootstrap''.
-}
-\description{
-  Receives a list with 2 slots as its input, and returns a
-  vector of the two smashed together alongwith the offsets
-  used to reverse-construct the object.
-}
-\note{
-  This method is used internally by Zelig to allow an
-  intuitive, ``param''-like API for bootstrapping.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.data.frame.setx.Rd b/man/as.data.frame.setx.Rd
deleted file mode 100644
index 6efb73c..0000000
--- a/man/as.data.frame.setx.Rd
+++ /dev/null
@@ -1,31 +0,0 @@
-\name{as.data.frame.setx}
-\alias{as.data.frame.setx}
-\title{Coerce a \code{setx} Object into a \code{data.frame}}
-\usage{
-  \method{as.data.frame}{setx}(x, row.names=NULL,
-    optional=FALSE, ...)
-}
-\arguments{
-  \item{x}{a \code{setx} object}
-
-  \item{row.names}{ignored parameter}
-
-  \item{optional}{ignored parameter}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  the \code{setx} object interpretted as a
-  \code{data.frame}. The column-names of the resulting
-  \code{data.frame} are specified by the names of the
-  \code{setx} object. The row-names are typically
-  unlabeled.
-}
-\description{
-  Coerce a \code{setx} Object into a \code{data.frame}
-}
-\note{
-  In subsequent versions of Zelig, this version is expected
-  to undergo minor modifications.
-}
-
diff --git a/man/as.description.Rd b/man/as.description.Rd
deleted file mode 100644
index 93f2029..0000000
--- a/man/as.description.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-\name{as.description}
-\alias{as.description}
-\title{Generic Method for Casting 'description' Objects}
-\usage{
-  as.description(descr, ...)
-}
-\arguments{
-  \item{descr}{an object to cast an object of type
-  'description'}
-
-  \item{...}{parameters which are reserved for future Zelig
-  revisions}
-}
-\value{
-  an object of type 'description'
-}
-\description{
-  Convert the result of a call to the 'describe' method
-  into an object parseble by Zelig. Currently conversions
-  only exist for lists and description objects.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-\seealso{
-  as.description.description as.description.list
-}
-
diff --git a/man/as.description.description.Rd b/man/as.description.description.Rd
deleted file mode 100644
index a8ca580..0000000
--- a/man/as.description.description.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{as.description.description}
-\alias{as.description.description}
-\title{description -> description}
-\usage{
-  \method{as.description}{description}(descr, ...)
-}
-\arguments{
-  \item{descr}{an object of type 'description'}
-
-  \item{...}{ignored}
-}
-\value{
-  the same object
-}
-\description{
-  Identity operation on a description object.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.description.list.Rd b/man/as.description.list.Rd
deleted file mode 100644
index d9be365..0000000
--- a/man/as.description.list.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{as.description.list}
-\alias{as.description.list}
-\title{list -> description}
-\usage{
-  \method{as.description}{list}(descr, ...)
-}
-\arguments{
-  \item{descr}{a list}
-
-  \item{...}{ignored}
-}
-\value{
-  an object of type 'description'
-}
-\description{
-  Convert list into a description object.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.matrix.pooled.setx.Rd b/man/as.matrix.pooled.setx.Rd
deleted file mode 100644
index cbe5d3b..0000000
--- a/man/as.matrix.pooled.setx.Rd
+++ /dev/null
@@ -1,33 +0,0 @@
-\name{as.matrix.pooled.setx}
-\alias{as.matrix.pooled.setx}
-\title{Convert a ``pooled.setx'' Object to a Matrix}
-\usage{
-  \method{as.matrix}{pooled.setx}(x, ...)
-}
-\arguments{
-  \item{x}{a setx object}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a matrix containing columns and rows corrseponding to the
-  explanatory variables specified in the call to the 'setx'
-  function
-}
-\description{
-  The setx object is, in its most basic form, a list of
-  column names and values specified for each of these
-  column names. This function simply converts the key-value
-  pairs of column-name and specified value into a matrix.
-}
-\note{
-  This method allows basic matrix arithmetic operations on
-  data objects, which mirror values stored within setx
-  objects. In many scenarios, simulations require
-  matrix-multiplication, etc. to be performed on a
-  data-set. This function faciliates that need.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.matrix.setx.Rd b/man/as.matrix.setx.Rd
deleted file mode 100644
index 20433fb..0000000
--- a/man/as.matrix.setx.Rd
+++ /dev/null
@@ -1,33 +0,0 @@
-\name{as.matrix.setx}
-\alias{as.matrix.setx}
-\title{Convert a 'setx' Object to a Matrix}
-\usage{
-  \method{as.matrix}{setx}(x, ...)
-}
-\arguments{
-  \item{x}{a setx object}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a matrix containing columns and rows corrseponding to the
-  explanatory variables specified in the call to the 'setx'
-  function
-}
-\description{
-  The setx object is, in its most basic form, a list of
-  column names and values specified for each of these
-  column names. This function simply converts the key-value
-  pairs of column-name and specified value into a matrix.
-}
-\note{
-  This method allows basic matrix arithmetic operations on
-  data objects, which mirror values stored within setx
-  objects. In many scenarios, simulations require
-  matrix-multiplication, etc. to be performed on a
-  data-set. This function faciliates that need.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.parameters.Rd b/man/as.parameters.Rd
deleted file mode 100644
index 6e34666..0000000
--- a/man/as.parameters.Rd
+++ /dev/null
@@ -1,43 +0,0 @@
-\name{as.parameters}
-\alias{as.parameters}
-\title{Generic Method for Converting Objects into 'parameters'}
-\usage{
-  as.parameters(params, ...)
-}
-\arguments{
-  \item{params}{the object to be casted}
-
-  \item{...}{parameters reserved for future revisions}
-}
-\value{
-  an object of type `parameters'
-}
-\description{
-  Converts list-style objects into Parameter lists
-  primarily used by the 'qi' methods. These list-style
-  objects may contain keys specifying: 'link' (the link
-  function of a statistical model), 'linkinv' (the
-  inverse-link function), 'family' (a object of 'family'
-  class used to specify the model's classification),
-  'alpha' (a vector of ancillary parameters, and
-  'simulations' (a vector of simulated draws from the
-  model's underlying distribution.
-}
-\note{
-  Only three scenarios may exist - converting 'parameters'
-  to 'parameters', 'list' to 'parameters', and vectors to
-  'parameters'. The third in particular is needed only for
-  backwards compatibility, and support will likely be
-  deprecated.
-
-  Furthermore, this function should be exlusively used
-  implicitly and by Zelig.
-}
-\author{
-  Matt Owen \email{mowen at ig.harvard.edu}
-}
-\seealso{
-  as.parameters.list as.parameters.parameters,
-  as.parameters.default
-}
-
diff --git a/man/as.parameters.default.Rd b/man/as.parameters.default.Rd
deleted file mode 100644
index 7ac30f8..0000000
--- a/man/as.parameters.default.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{as.parameters.default}
-\alias{as.parameters.default}
-\title{??? -> parameters}
-\usage{
-  as.parameters.default(params, num = NULL, ...)
-}
-\arguments{
-  \item{params}{any non-supported data-type}
-
-  \item{num}{an integer specifying the number of
-  simulations to compute}
-
-  \item{...}{ignored}
-}
-\value{
-  the object passed in
-}
-\description{
-  ??? -> parameters
-}
-\note{
-  This function should be deprecated.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.parameters.list.Rd b/man/as.parameters.list.Rd
deleted file mode 100644
index 7890dda..0000000
--- a/man/as.parameters.list.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-\name{as.parameters.list}
-\alias{as.parameters.list}
-\title{list -> parameters}
-\usage{
-  as.parameters.list(params, num = NULL, ...)
-}
-\arguments{
-  \item{params}{a list object}
-
-  \item{num}{an integer specifying the number of
-  simulations to be taken}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  an object of type `parameters'
-}
-\description{
-  The list may contain: 'link', 'linkinv', 'family',
-  'alpha', and 'simulations' keys.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-\seealso{
-  as.parameters
-}
-
diff --git a/man/as.parameters.parameters.Rd b/man/as.parameters.parameters.Rd
deleted file mode 100644
index 03dcc65..0000000
--- a/man/as.parameters.parameters.Rd
+++ /dev/null
@@ -1,25 +0,0 @@
-\name{as.parameters.parameters}
-\alias{as.parameters.parameters}
-\title{parameters -> parameters
-This is merely an identity function when casting 'parameters' objects into
-'parameters'.}
-\usage{
-  as.parameters.parameters(params, ...)
-}
-\arguments{
-  \item{params}{a parameters object}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  the same parameter object
-}
-\description{
-  parameters -> parameters This is merely an identity
-  function when casting 'parameters' objects into
-  'parameters'.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.qi.Rd b/man/as.qi.Rd
deleted file mode 100644
index a3d1297..0000000
--- a/man/as.qi.Rd
+++ /dev/null
@@ -1,40 +0,0 @@
-\name{as.qi}
-\alias{as.qi}
-\title{Generic Method for Converting Various Objects into 'qi' Objects
-'qi' objects are list-style objects used by the 'summarize' function to
-compute simple summaries about the simulated data. For readability and
-and simplicity purposes, the 'qi' function typically returns a list of
-named simulations. This list is converted internally by Zelig into a 'qi'
-object so that several methods can be easily applied to the Quantities of
-Interest: plot, summarize, and print}
-\usage{
-  as.qi(s)
-}
-\arguments{
-  \item{s}{the object to be casted}
-}
-\value{
-  an object of type `qi'
-}
-\description{
-  Generic Method for Converting Various Objects into 'qi'
-  Objects 'qi' objects are list-style objects used by the
-  'summarize' function to compute simple summaries about
-  the simulated data. For readability and and simplicity
-  purposes, the 'qi' function typically returns a list of
-  named simulations. This list is converted internally by
-  Zelig into a 'qi' object so that several methods can be
-  easily applied to the Quantities of Interest: plot,
-  summarize, and print
-}
-\note{
-  These functions are primarily used internall by Zelig and
-  should not be used in the Global namespace.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-\seealso{
-  as.qi.default as.qi.qi as.qi.list
-}
-
diff --git a/man/as.qi.default.Rd b/man/as.qi.default.Rd
deleted file mode 100644
index 2af74ac..0000000
--- a/man/as.qi.default.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{as.qi.default}
-\alias{as.qi.default}
-\title{??? -> qi}
-\usage{
-  as.qi.default(s)
-}
-\arguments{
-  \item{s}{any unsupported object}
-}
-\value{
-  an object of type `qi'
-}
-\description{
-  ??? -> qi
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.qi.list.Rd b/man/as.qi.list.Rd
deleted file mode 100644
index a45d7ad..0000000
--- a/man/as.qi.list.Rd
+++ /dev/null
@@ -1,31 +0,0 @@
-\name{as.qi.list}
-\alias{as.qi.list}
-\title{list -> qi
-This function has a lot of room to go wrong. It tries o detect whether the
-zelig model is old-style or new-style (as of 4/4/2011). Eventually this
-feature should be phased out.}
-\usage{
-  as.qi.list(s)
-}
-\arguments{
-  \item{s}{a list}
-}
-\value{
-  an object of type `qi'
-}
-\description{
-  list -> qi This function has a lot of room to go wrong.
-  It tries o detect whether the zelig model is old-style or
-  new-style (as of 4/4/2011). Eventually this feature
-  should be phased out.
-}
-\note{
-  This method has peculiar behavior when the list contains
-  only two elements. The crucial fix is to simply remove
-  the portion of code which intentionally implements this
-  perculiar behavior.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.qi.qi.Rd b/man/as.qi.qi.Rd
deleted file mode 100644
index a04b300..0000000
--- a/man/as.qi.qi.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{as.qi.qi}
-\alias{as.qi.qi}
-\title{qi -> qi}
-\usage{
-  as.qi.qi(s)
-}
-\arguments{
-  \item{s}{an object of type `qi'}
-}
-\value{
-  s an object of type `qi'
-}
-\description{
-  qi -> qi
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.summarized.Rd b/man/as.summarized.Rd
deleted file mode 100644
index e495a26..0000000
--- a/man/as.summarized.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-\name{as.summarized}
-\alias{as.summarized}
-\title{Generic Method for Casting Objectst as 'summarized' Objects}
-\usage{
-  as.summarized(x, ...)
-}
-\arguments{
-  \item{x}{an object}
-
-  \item{...}{unspecified parameters}
-}
-\value{
-  a 'summarized.qi' object
-}
-\description{
-  This function is particularly for use by the 'summarize'
-  method, which summarizes the simulations taken from the
-  'qi' method. The generic function 'summary' when applied
-  to a Zelig Simulation implicitly uses this function.
-}
-\note{
-  This is made available on the Global namespace as a
-  matter of potential future compliancy.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.summarized.list.Rd b/man/as.summarized.list.Rd
deleted file mode 100644
index a495064..0000000
--- a/man/as.summarized.list.Rd
+++ /dev/null
@@ -1,23 +0,0 @@
-\name{as.summarized.list}
-\alias{as.summarized.list}
-\title{list -> summarized.qi
-Convert a list into a ``summarized.qi'' object}
-\usage{
-  \method{as.summarized}{list}(x, ...)
-}
-\arguments{
-  \item{x}{a list}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a ``summarized.qi'' object
-}
-\description{
-  list -> summarized.qi Convert a list into a
-  ``summarized.qi'' object
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/as.summarized.summarized.qi.Rd b/man/as.summarized.summarized.qi.Rd
deleted file mode 100644
index 6cdb02a..0000000
--- a/man/as.summarized.summarized.qi.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{as.summarized.summarized.qi}
-\alias{as.summarized.summarized.qi}
-\title{summarized.qi -> summarized.qi}
-\usage{
-  \method{as.summarized}{summarized.qi}(x, ...)
-}
-\arguments{
-  \item{x}{an object of type 'summarized.qi'}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  the same 'summarized.qi' object
-}
-\description{
-  Identity operation on ``summarized.qi'' objects
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/attach.env.Rd b/man/attach.env.Rd
deleted file mode 100644
index 47e1e2c..0000000
--- a/man/attach.env.Rd
+++ /dev/null
@@ -1,40 +0,0 @@
-\name{attach.env}
-\alias{attach.env}
-\title{Attach Variables to a Function}
-\usage{
-  attach.env(f, env = NULL, ...)
-}
-\arguments{
-  \item{f}{a function which will be modified}
-
-  \item{env}{an environment variable which will be attached
-  to the function being returned}
-
-  \item{...}{arbitrary key-value paired parameters which
-  will be assigned to the environment of the function being
-  returned}
-}
-\value{
-  the original function ``f'' with a different environment
-  attached to it.
-}
-\description{
-  Returns a function, specified by the user, with the
-  variables of a specified environment attached. This, in
-  essence, allows programmers to write functions that have
-  forms of private memory. This makes the function behave
-  similarly to an object.
-}
-\note{
-  This function is used by Zelig to ensure that particular
-  method calls - param, qi, bootstap - will contain the
-  private variables: ``.fitted'', ``.model'', ``.call'' and
-  ``.env'' which respectively contain the fitted model
-  object, the name of the zelig model being invoked, the
-  original call to the model-fitting function and the
-  environment in which to call the function call.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/avg.Rd b/man/avg.Rd
new file mode 100644
index 0000000..47d141c
--- /dev/null
+++ b/man/avg.Rd
@@ -0,0 +1,18 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/utils.R
+\name{avg}
+\alias{avg}
+\title{Compute central tendancy as approrpriate to data type}
+\usage{
+avg(val)
+}
+\arguments{
+\item{val}{a vector of values}
+}
+\value{
+a mean (if numeric) or a median (if ordered) or mode (otherwise)
+}
+\description{
+Compute central tendancy as approrpriate to data type
+}
+
diff --git a/man/bootfn.default.Rd b/man/bootfn.default.Rd
deleted file mode 100644
index 79033a3..0000000
--- a/man/bootfn.default.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-\name{bootfn.default}
-\alias{bootfn.default}
-\title{Default Boot-strapping procedure}
-\usage{
-  bootfn.default(data, i, object, bootstrapfn = NULL, num,
-    ...)
-}
-\arguments{
-  \item{data}{a data.frame}
-
-  \item{i}{an integer or chacter-string specifying the
-  index of the row to be used in the bootstrapping
-  procedure.}
-
-  \item{object}{the fitted model object}
-
-  \item{bootstrapfn}{a function used to bootstrap the
-  object}
-
-  \item{num}{an integer specifying the number of samples to
-  simulate}
-
-  \item{...}{unspecified parameters}
-}
-\value{
-  a list of paramters
-}
-\description{
-  The default procedure for extracting bootstrap
-  information. Note that this method re-fits the data and
-  resamples the data frequently. This is a good candidate
-  for fixing-up.
-}
-
diff --git a/man/bootstrap.Rd b/man/bootstrap.Rd
deleted file mode 100644
index da6c506..0000000
--- a/man/bootstrap.Rd
+++ /dev/null
@@ -1,33 +0,0 @@
-\name{bootstrap}
-\alias{bootstrap}
-\title{Generic Method for ``bootstrap''}
-\usage{
-  bootstrap(obj, ...)
-}
-\arguments{
-  \item{obj}{a fitted model object that will be used to
-  produce boot-strapped parameters. This object usually
-  inherits the class ``glm'' or ``lm'' object}
-
-  \item{...}{unspecified parameters}
-}
-\value{
-  a list with the ``alpha'' and ``beta'' slots set. Note
-  that ``alpha'' corresponds to ancillary parameters and
-  ``beta'' corresponds to systematic components of the
-  model
-}
-\description{
-  This method is intended to be overried by statistical
-  models that would like to support statistical
-  bootstrapping.
-}
-\note{
-  This method has private memory storage and can reference
-  the objects: ``.fitted'', ``.data'', ``.call'', ``.env'',
-  despite having no declaration in the argument list.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/bootstrap.default.Rd b/man/bootstrap.default.Rd
deleted file mode 100644
index 40543f3..0000000
--- a/man/bootstrap.default.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{bootstrap.default}
-\alias{bootstrap.default}
-\title{Produce Boot-strapped Parameters for a Statistical Model}
-\usage{
-  \method{bootstrap}{default}(obj, ...)
-}
-\arguments{
-  \item{obj}{a fitted model object. This is typically of
-  type ``glm'' or ``lm''}
-
-  \item{...}{unspecified parameters}
-}
-\value{
-  a list with the ``alpha'' and ``beta'' slots set
-}
-\description{
-  This method is a fallback for bootstrapping models that
-  do not have a defined ``bootstrap'' method. For most
-  models, this default is sufficient, so long as the model
-  follows the usual convention that ``coef(obj)'' returns
-  the systematic parameters of a fitted model.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/bootstrap.gamma.Rd b/man/bootstrap.gamma.Rd
deleted file mode 100644
index ad1cb88..0000000
--- a/man/bootstrap.gamma.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{bootstrap.gamma}
-\alias{bootstrap.gamma}
-\title{Bootstrap Parameters for Zelig ``gamma'' GLM}
-\usage{
-  \method{bootstrap}{gamma}(obj, ...)
-}
-\arguments{
-  \item{obj}{a ``zelig'' object that will be used to
-  produce boot-strapped parameters}
-
-  \item{...}{extra parameters to be passed to the ``boot''
-  method. These are typically ignored, but is included for
-  further expansion.}
-}
-\value{
-  a list containing information concerning link,
-  link-inverses, etc.
-}
-\description{
-  Returns bootstrapped parameter estimates for a ``gamma''
-  GLM.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/bootstrap.negbinom.Rd b/man/bootstrap.negbinom.Rd
deleted file mode 100644
index 5fdf331..0000000
--- a/man/bootstrap.negbinom.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{bootstrap.negbinom}
-\alias{bootstrap.negbinom}
-\title{Bootstrap Parameters for Zelig ``negbinom'' GLM}
-\usage{
-  \method{bootstrap}{negbinom}(obj, ...)
-}
-\arguments{
-  \item{obj}{a ``zelig'' object that will be used to
-  produce boot-strapped parameters}
-
-  \item{...}{extra parameters to be passed to the ``boot''
-  method. These are typically ignored, but is included for
-  further expansion.}
-}
-\value{
-  a list containing information concerning link,
-  link-inverses, etc.
-}
-\description{
-  Returns bootstrapped parameter estimates for a
-  negative-binomial GLM.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/bootstrap.normal.Rd b/man/bootstrap.normal.Rd
deleted file mode 100644
index e6d2f82..0000000
--- a/man/bootstrap.normal.Rd
+++ /dev/null
@@ -1,29 +0,0 @@
-\name{bootstrap.normal}
-\alias{bootstrap.normal}
-\title{Bootstrap Parameters for Zelig ``normal'' GLM}
-\usage{
-  \method{bootstrap}{normal}(obj, num, ...)
-}
-\arguments{
-  \item{obj}{a ``zelig'' object that will be used to
-  produce boot-strapped parameters}
-
-  \item{num}{an integer specifying the number of
-  simulations to produce}
-
-  \item{...}{extra parameters to be passed to the ``boot''
-  method. These are typically ignored, but is included for
-  further expansion.}
-}
-\value{
-  a list containing information concerning link,
-  link-inverses, etc.
-}
-\description{
-  Returns bootstrapped parameter estimates for a Gaussian
-  GLM.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/callToString.Rd b/man/callToString.Rd
deleted file mode 100644
index fb78a61..0000000
--- a/man/callToString.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-\name{callToString}
-\alias{callToString}
-\title{Convert \code{call} Object to a String}
-\usage{
-  callToString(x, ...)
-}
-\arguments{
-  \item{x}{a \code{call} object}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a character-string representing the \code{call} object
-}
-\description{
-  This method concerts \code{call} objects into a simple,
-  intuitive human-readable form.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/ci.plot.Rd b/man/ci.plot.Rd
new file mode 100644
index 0000000..ae7b02b
--- /dev/null
+++ b/man/ci.plot.Rd
@@ -0,0 +1,63 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plots.R
+\name{ci.plot}
+\alias{ci.plot}
+\title{Method for plotting qi simulations across a range within a variable, with confidence intervals}
+\usage{
+ci.plot(obj, qi="ev", var=NULL, ..., main = NULL, sub = 
+ NULL, xlab = NULL, ylab = NULL, xlim = NULL, ylim = 
+ NULL, legcol="gray20", col=NULL, leg=1, legpos=
+ NULL, ci = c(80, 95, 99.9), discont=NULL)
+}
+\arguments{
+\item{obj}{A reference class zelig5 object}
+
+\item{qi}{a character-string specifying the quantity of interest to plot}
+
+\item{var}{The variable to be used on the x-axis. Default is the variable
+across all the chosen values with smallest nonzero variance}
+
+\item{...}{Parameters to be passed to the `truehist' function which is
+implicitly called for numeric simulations}
+
+\item{main}{a character-string specifying the main heading of the plot}
+
+\item{sub}{a character-string specifying the sub heading of the plot}
+
+\item{xlab}{a character-string specifying the label for the x-axis}
+
+\item{ylab}{a character-string specifying the label for the y-axis}
+
+\item{xlim}{Limits to the x-axis}
+
+\item{ylim}{Limits to the y-axis}
+
+\item{legcol}{``legend color'', an valid color used for plotting the line
+colors in the legend}
+
+\item{col}{a valid vector of colors of at least length 3 to use to color the
+confidence intervals}
+
+\item{leg}{``legend position'', an integer from 1 to 4, specifying the
+position of the legend. 1 to 4 correspond to ``SE'', ``SW'', ``NW'', and
+``NE'' respectively}
+
+\item{legpos}{``legend type'', exact coordinates and sizes for legend.
+Overrides argment ``leg.type''}
+
+\item{ci}{vector of length three of confidence interval levels to draw.}
+
+\item{discont}{optional point of discontinuity along the x-axis at which 
+to interupt the graph}
+}
+\value{
+the current graphical parameters. This is subject to change in future
+implementations of Zelig
+}
+\description{
+Method for plotting qi simulations across a range within a variable, with confidence intervals
+}
+\author{
+James Honaker
+}
+
diff --git a/man/cite.Rd b/man/cite.Rd
deleted file mode 100644
index 1798afc..0000000
--- a/man/cite.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{cite}
-\alias{cite}
-\title{Citation information for a 'description' object}
-\usage{
-  cite(descr)
-}
-\arguments{
-  \item{descr}{an object of type 'description'}
-}
-\value{
-  a character-string giving citation info
-}
-\description{
-  Citation information for a 'description' object
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/cluster.formula.Rd b/man/cluster.formula.Rd
index c37f0b5..eaca2c1 100644
--- a/man/cluster.formula.Rd
+++ b/man/cluster.formula.Rd
@@ -1,19 +1,21 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/utils.R
 \name{cluster.formula}
 \alias{cluster.formula}
 \title{Generate Formulae that Consider Clustering}
 \usage{
-  cluster.formula(formula, cluster)
+cluster.formula(formula, cluster)
 }
 \arguments{
-  \item{formula}{a formula object}
+\item{formula}{a formula object}
 
-  \item{cluster}{a vector}
+\item{cluster}{a vector}
 }
 \value{
-  a formula object describing clustering
+a formula object describing clustering
 }
 \description{
-  This method is used internally by the "Zelig" Package to
-  interpret clustering.
+This method is used internally by the "Zelig" Package to interpret
+clustering in GEE models.
 }
 
diff --git a/man/cmvglm.Rd b/man/cmvglm.Rd
deleted file mode 100644
index 3651c81..0000000
--- a/man/cmvglm.Rd
+++ /dev/null
@@ -1,25 +0,0 @@
-\name{cmvglm}
-\alias{cmvglm}
-\title{cmvglm}
-\usage{
-  cmvglm(formula, model, ndim, data = NULL, fact = NULL)
-}
-\arguments{
-  \item{formula}{a formula}
-
-  \item{model}{the names of the Zelig model}
-
-  \item{ndim}{the number of dimensions in the statistical
-  model}
-
-  \item{data}{a data-frame}
-
-  \item{fact}{???}
-}
-\description{
-  cmvglm
-}
-\author{
-  Kosuke Imai and Olivia Lau
-}
-
diff --git a/man/coef-Zelig-method.Rd b/man/coef-Zelig-method.Rd
new file mode 100644
index 0000000..36da50f
--- /dev/null
+++ b/man/coef-Zelig-method.Rd
@@ -0,0 +1,18 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-zelig.R
+\docType{methods}
+\name{coef,Zelig-method}
+\alias{coef,Zelig-method}
+\title{Method for extracting estimated coefficients from Zelig objects}
+\usage{
+\S4method{coef}{Zelig}(object, ...)
+}
+\arguments{
+\item{object}{An Object of Class Zelig}
+
+\item{...}{Additional parameters to be passed to coef}
+}
+\description{
+Method for extracting estimated coefficients from Zelig objects
+}
+
diff --git a/man/coef.parameters.Rd b/man/coef.parameters.Rd
deleted file mode 100644
index 54a0252..0000000
--- a/man/coef.parameters.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-\name{coef.parameters}
-\alias{coef.parameters}
-\title{Return Simulations of Parameter Coefficients}
-\usage{
-  \method{coef}{parameters}(object, ...)
-}
-\arguments{
-  \item{object}{a 'parameters' object}
-
-  \item{\dots}{ignored}
-}
-\value{
-  simulations, specified by the Zelig model, of the
-  ancillary parameters
-}
-\description{
-  Returns simulated parameters of coefficients for use in
-  statistical simulation. The values are set by the
-  model-fitting function and the developer of the qi.<model
-  name> method.
-}
-\note{
-  This function may not differ at all from coef.default
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/combine.Rd b/man/combine.Rd
deleted file mode 100644
index 04e9fa9..0000000
--- a/man/combine.Rd
+++ /dev/null
@@ -1,24 +0,0 @@
-\name{combine}
-\alias{combine}
-\title{Produce All Combinations of a Set of Lists}
-\usage{
-  combine(...)
-}
-\arguments{
-  \item{...}{a set of lists to mix together}
-}
-\value{
-  all the combinations of the lists with repetition
-}
-\description{
-  Produce All Combinations of a Set of Lists
-}
-\note{
-  This function is used internall by the 'mi' constructors
-  in order to produce the complete set of combinations of
-  data-frames and factors by to subset the data-frames.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/constructDataFrame.Rd b/man/constructDataFrame.Rd
deleted file mode 100644
index ea9ec97..0000000
--- a/man/constructDataFrame.Rd
+++ /dev/null
@@ -1,24 +0,0 @@
-\name{constructDataFrame}
-\alias{constructDataFrame}
-\title{Construct Data Frame
-Construct and return a tiny (single-row) data-frame from a larger data-frame,
-a list of specified values, and a formula}
-\usage{
-  constructDataFrame(data, specified)
-}
-\arguments{
-  \item{data}{a ``data.frame'' that will be used to create
-  a small design matrix}
-
-  \item{specified}{a list with key-value pairs that will be
-  used to explicitly set several values}
-}
-\value{
-  a ``data.frame'' containing a single row
-}
-\description{
-  Construct Data Frame Construct and return a tiny
-  (single-row) data-frame from a larger data-frame, a list
-  of specified values, and a formula
-}
-
diff --git a/man/constructDesignMatrix.Rd b/man/constructDesignMatrix.Rd
deleted file mode 100644
index 42b9e6c..0000000
--- a/man/constructDesignMatrix.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-\name{constructDesignMatrix}
-\alias{constructDesignMatrix}
-\title{Construct Design Matrix from
-Construct and return a design matrix based on a tiny data-frame (single-row).}
-\usage{
-  constructDesignMatrix(data, formula)
-}
-\arguments{
-  \item{data}{a ``data.frame'' (preferably single-rowed)
-  that will be used to create a small design matrix}
-
-  \item{formula}{a formula, whose predictor variables will
-  be used to create a design matrix}
-}
-\value{
-  a design (model) matrix
-}
-\description{
-  Construct Design Matrix from Construct and return a
-  design matrix based on a tiny data-frame (single-row).
-}
-
diff --git a/man/createJSON.Rd b/man/createJSON.Rd
new file mode 100644
index 0000000..dc04726
--- /dev/null
+++ b/man/createJSON.Rd
@@ -0,0 +1,16 @@
+\name{createJSON}
+\alias{createJSON}
+\title{Utility function for constructing JSON file that encodes the hierarchy of available statistical models in Zelig}
+\usage{
+createJSON()
+}
+\value{
+Returns TRUE on successful completion of json file
+}
+\description{
+Utility function for construction a JSON file that encodes the hierarchy of available statistical models.  
+}
+\author{
+Christine Choirat, Vito D'Orazio
+}
+
diff --git a/man/depends.on.zelig.Rd b/man/depends.on.zelig.Rd
deleted file mode 100644
index 84763ee..0000000
--- a/man/depends.on.zelig.Rd
+++ /dev/null
@@ -1,24 +0,0 @@
-\name{depends.on.zelig}
-\alias{depends.on.zelig}
-\title{Whether a Statistical Package Depends on the Zelig Software Suite}
-\usage{
-  depends.on.zelig(package = "")
-}
-\arguments{
-  \item{package}{a character-string representing a package
-  name}
-}
-\value{
-  whether the package lists Zelig as a dependency in its
-  DESCRIPTION
-}
-\description{
-  Whether a Statistical Package Depends on the Zelig
-  Software Suite
-}
-\note{
-  This function is used primarily internally to determine
-  whether a a package is contributing a function to the
-  Zelig software suite
-}
-
diff --git a/man/describe.Rd b/man/describe.Rd
deleted file mode 100644
index 9a67f49..0000000
--- a/man/describe.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{describe}
-\alias{describe}
-\title{Method to describe a model to Zelig}
-\usage{
-  describe(...)
-}
-\arguments{
-  \item{...}{parameters which are typically ignored}
-}
-\value{
-  a list to be processed by `as.description'
-}
-\description{
-  Method to describe a model to Zelig
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/describe.default.Rd b/man/describe.default.Rd
deleted file mode 100644
index 33c095f..0000000
--- a/man/describe.default.Rd
+++ /dev/null
@@ -1,25 +0,0 @@
-\name{describe.default}
-\alias{describe.default}
-\title{Default describe function for an arbitrary model
-This method exists solely as a backup when an author does not contribute a
-'describe' function for their model}
-\usage{
-  \method{describe}{default}(...)
-}
-\arguments{
-  \item{...}{dummy parameters purely to cast the correct
-  object. That is, the parameters of the function should
-  not BE referenced specifically}
-}
-\value{
-  a list to be processed by \code{as.description}
-}
-\description{
-  Default describe function for an arbitrary model This
-  method exists solely as a backup when an author does not
-  contribute a 'describe' function for their model
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/describe.exp.Rd b/man/describe.exp.Rd
deleted file mode 100644
index b07a311..0000000
--- a/man/describe.exp.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{describe.exp}
-\alias{describe.exp}
-\title{Describe a ``exp'' model to Zelig}
-\usage{
-  \method{describe}{exp}(...)
-}
-\arguments{
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be processed by `as.description'
-}
-\description{
-  Describe a ``exp'' model to Zelig
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/describe.gamma.Rd b/man/describe.gamma.Rd
deleted file mode 100644
index d218923..0000000
--- a/man/describe.gamma.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{describe.gamma}
-\alias{describe.gamma}
-\title{Describe the \code{gamma} model to Zelig}
-\usage{
-  \method{describe}{gamma}(...)
-}
-\arguments{
-  \item{...}{ignored parameters}
-}
-\value{
-  a list of important information
-}
-\description{
-  Describe the \code{gamma} model to Zelig
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/describe.logit.Rd b/man/describe.logit.Rd
deleted file mode 100644
index be99219..0000000
--- a/man/describe.logit.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{describe.logit}
-\alias{describe.logit}
-\title{Describe a `logit' model to Zelig}
-\usage{
-  \method{describe}{logit}(...)
-}
-\arguments{
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be processed by `as.description'
-}
-\description{
-  Describe a `logit' model to Zelig
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/describe.ls.Rd b/man/describe.ls.Rd
deleted file mode 100644
index e49e808..0000000
--- a/man/describe.ls.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-\name{describe.ls}
-\alias{describe.ls}
-\title{Describe a \code{ls} model to Zelig}
-\usage{
-  \method{describe}{ls}(...)
-}
-\arguments{
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be processed by \code{as.description}
-}
-\description{
-  Describe a \code{ls} model to Zelig
-}
-\note{
-  \code{ls} stands for "least squares fit"
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/describe.negbinom.Rd b/man/describe.negbinom.Rd
deleted file mode 100644
index 6d72eb9..0000000
--- a/man/describe.negbinom.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-\name{describe.negbinom}
-\alias{describe.negbinom}
-\title{Describe the \code{negbinom} model to Zelig}
-\usage{
-  \method{describe}{negbinom}(...)
-}
-\arguments{
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be processed by \code{as.description}
-}
-\description{
-  Describe the \code{negbinom} model to Zelig
-}
-\note{
-  \code{negbinom} stands for "negative binomial"
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/describe.normal.Rd b/man/describe.normal.Rd
deleted file mode 100644
index bc20e51..0000000
--- a/man/describe.normal.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{describe.normal}
-\alias{describe.normal}
-\title{Describe the \code{normal} model to Zelig}
-\usage{
-  \method{describe}{normal}(...)
-}
-\arguments{
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be processed by `as.description'
-}
-\description{
-  Describe the \code{normal} model to Zelig
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/describe.poisson.Rd b/man/describe.poisson.Rd
deleted file mode 100644
index b48168e..0000000
--- a/man/describe.poisson.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{describe.poisson}
-\alias{describe.poisson}
-\title{Describe the `poisson' model to Zelig}
-\usage{
-  \method{describe}{poisson}(...)
-}
-\arguments{
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be processed by `as.description'
-}
-\description{
-  Describe the `poisson' model to Zelig
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/describe.probit.Rd b/man/describe.probit.Rd
deleted file mode 100644
index 954b823..0000000
--- a/man/describe.probit.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{describe.probit}
-\alias{describe.probit}
-\title{Describe the `probit' model to Zelig}
-\usage{
-  \method{describe}{probit}(...)
-}
-\arguments{
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be processed by `as.description'
-}
-\description{
-  Describe the `probit' model to Zelig
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/describe.tobit.Rd b/man/describe.tobit.Rd
deleted file mode 100644
index b2e1220..0000000
--- a/man/describe.tobit.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{describe.tobit}
-\alias{describe.tobit}
-\title{Describe a ``tobit'' model to Zelig}
-\usage{
-  \method{describe}{tobit}(...)
-}
-\arguments{
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be processed by `as.description'
-}
-\description{
-  Describe a ``tobit'' model to Zelig
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/describe.zelig.Rd b/man/describe.zelig.Rd
deleted file mode 100644
index a644e6e..0000000
--- a/man/describe.zelig.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-\name{describe.zelig}
-\alias{describe.zelig}
-\title{Get Description Object Used to Cite this Zelig Model}
-\usage{
-  \method{describe}{zelig}(object, ...)
-}
-\arguments{
-  \item{object}{a 'zelig' object}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a 'description' object used internally to produce
-  citation text
-}
-\description{
-  Get Description Object Used to Cite this Zelig Model
-}
-\note{
-  This function should be reevaluated in design, since
-  'description' objects are exclusively used internally. In
-  particular, this method would be more useful to users as
-  a 'cite' method.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/description.Rd b/man/description.Rd
deleted file mode 100644
index 4186693..0000000
--- a/man/description.Rd
+++ /dev/null
@@ -1,37 +0,0 @@
-\name{description}
-\alias{description}
-\title{Constructor for the 'description' class}
-\usage{
-  description(authors = c("Kosuke Imai", "Gary King", "Olivia Lau"),
-    year = NULL, model = "", text = "", url = "",
-    category = NULL)
-}
-\arguments{
-  \item{authors}{a character-vector of author names}
-
-  \item{year}{a numeric specifying the year}
-
-  \item{model}{a character-string specifying model name}
-
-  \item{text}{a character-string specifying the title of
-  the model. This typically includes more exact information
-  than 'model'. E.g., for the 'logit' the title 'Logistic
-  Regression for Dichotomous Variables' would be a suitable
-  text parameter.}
-
-  \item{url}{a character-string specifying the model's
-  software page}
-
-  \item{category}{deprecated until data-verse bindings are
-  reevaluated}
-}
-\value{
-  an object of type 'description'
-}
-\description{
-  Constructor for the 'description' class
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/find.match.Rd b/man/find.match.Rd
deleted file mode 100644
index 2ade535..0000000
--- a/man/find.match.Rd
+++ /dev/null
@@ -1,35 +0,0 @@
-\name{find.match}
-\alias{find.match}
-\title{Find a Partial or Exact Match from a Vector of Strings
-Searches a vector of character-string, and returns the best match.}
-\usage{
-  find.match(needle, haystack, fail = NA)
-}
-\arguments{
-  \item{needle}{a character-string to search for in the}
-
-  \item{haystack}{a vector of character-strings}
-
-  \item{fail}{the value to return in case no match is
-  found. Defaults to NA}
-}
-\value{
-  the best-matched string or NA
-}
-\description{
-  Find a Partial or Exact Match from a Vector of Strings
-  Searches a vector of character-string, and returns the
-  best match.
-}
-\details{
-  ``find.match'' attempts to use several common matching
-  functions in an order that sequentially prefers less
-  strict matching, until a suitable match is found. If none
-  is found, then return the value of the ``fail'' parameter
-  (defaults to NA). The functions used for matching are:
-  ``match'', ``charmatch'', and finally ``grep''.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/fitted-Zelig-method.Rd b/man/fitted-Zelig-method.Rd
new file mode 100644
index 0000000..59206e6
--- /dev/null
+++ b/man/fitted-Zelig-method.Rd
@@ -0,0 +1,18 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-zelig.R
+\docType{methods}
+\name{fitted,Zelig-method}
+\alias{fitted,Zelig-method}
+\title{Method for extracting estimated fitted values from Zelig objects}
+\usage{
+\S4method{fitted}{Zelig}(object, ...)
+}
+\arguments{
+\item{object}{An Object of Class Zelig}
+
+\item{...}{Additional parameters to be passed to fitted}
+}
+\description{
+Method for extracting estimated fitted values from Zelig objects
+}
+
diff --git a/man/get.package.Rd b/man/get.package.Rd
deleted file mode 100644
index 606b2a4..0000000
--- a/man/get.package.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{get.package}
-\alias{get.package}
-\title{Find the Zelig package that a particular model belong to}
-\usage{
-  get.package(model, quiet = TRUE, ...)
-}
-\arguments{
-  \item{model}{a character-string specifying a Zelig model}
-
-  \item{quiet}{a logical indicating whether to display
-  messages and warnings}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  NA or a character-string specifying the name of the
-  package which contains a specific model
-}
-\description{
-  This method is used to help transition Zelig v3.5 users
-  to Zelig v4
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/getPredictorTerms.Rd b/man/getPredictorTerms.Rd
deleted file mode 100644
index 5dffa10..0000000
--- a/man/getPredictorTerms.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{getPredictorTerms}
-\alias{getPredictorTerms}
-\title{Get Predictor Terms from Zelig-style Formulae}
-\usage{
-  getPredictorTerms(x, ...)
-}
-\arguments{
-  \item{x}{a Zelig-style formula ('formula' or 'list')}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a character-vector or NA
-}
-\description{
-  This function extracts the predictor terms from a
-  Zelig-style object.
-}
-\note{
-  This function is used exclusively in the development of
-  Zelig-core.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/getResponseTerms.Formula-not-formula.Rd b/man/getResponseTerms.Formula-not-formula.Rd
deleted file mode 100644
index 50247a3..0000000
--- a/man/getResponseTerms.Formula-not-formula.Rd
+++ /dev/null
@@ -1,31 +0,0 @@
-\name{getResponseTerms.Formula}
-\alias{getResponse.Formula}
-\alias{getResponseTerms.Formula}
-\title{Get Response Terms from a ``Formula'' Object}
-\usage{
-  \method{getResponseTerms}{Formula}(x, ...,
-    single.only=FALSE, duplicates=TRUE)
-}
-\arguments{
-  \item{x}{a formula}
-
-  \item{...}{ignored parameters}
-
-  \item{single.only}{a logical specifying whether 'cbind'
-  or 'list' keywords are allowed}
-
-  \item{duplicates}{a logical specifying whether the
-  returned character-vector will only return duplicates.}
-}
-\value{
-  a character-vector specifying the response terms of the
-  formula
-}
-\description{
-  This method gets the response terms from a ``Formula''
-  Object
-}
-\author{
-  Matt Owen
-}
-
diff --git a/man/getResponseTerms.Rd b/man/getResponseTerms.Rd
deleted file mode 100644
index 6ece6cc..0000000
--- a/man/getResponseTerms.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-\name{getResponseTerms}
-\alias{getResponseTerms}
-\title{Get Response Terms from a Zelig-style Formula}
-\usage{
-  getResponseTerms(x, ...)
-}
-\arguments{
-  \item{x}{a formula or list of formulae}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a character-vector specifying a the of response terms in
-  this formula
-}
-\description{
-  This method acquires the response variables from
-  Zelig-style input.
-}
-
diff --git a/man/getResponseTerms.formula.Rd b/man/getResponseTerms.formula.Rd
deleted file mode 100644
index 2e664ab..0000000
--- a/man/getResponseTerms.formula.Rd
+++ /dev/null
@@ -1,30 +0,0 @@
-\name{getResponseTerms.formula}
-\alias{getResponseTerms.formula}
-\title{Get Response Terms from a Standard Formula}
-\usage{
-  \method{getResponseTerms}{formula}(x, ...,
-    single.only=FALSE, duplicates=TRUE)
-}
-\arguments{
-  \item{x}{a formula}
-
-  \item{...}{ignored parameters}
-
-  \item{single.only}{a logical specifying whether 'cbind'
-  or 'list' keywords are allowed}
-
-  \item{duplicates}{a logical specifying whether the
-  returned character-vector will only return duplicates.}
-}
-\value{
-  a character-vector specifying the response terms of the
-  formula
-}
-\description{
-  This method gets the response terms from a standard
-  formula
-}
-\author{
-  Matt Owen
-}
-
diff --git a/man/getResponseTerms.list.Rd b/man/getResponseTerms.list.Rd
deleted file mode 100644
index b29d5cc..0000000
--- a/man/getResponseTerms.list.Rd
+++ /dev/null
@@ -1,23 +0,0 @@
-\name{getResponseTerms.list}
-\alias{getResponseTerms.list}
-\title{Get Response Terms from a List-style Formula}
-\usage{
-  \method{getResponseTerms}{list}(x, ...)
-}
-\arguments{
-  \item{x}{a list of formulae}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a character-vector specifying the response terms of the
-  formula
-}
-\description{
-  This method gets the response terms from a standard
-  formula
-}
-\author{
-  Matt Owen
-}
-
diff --git a/man/has.zelig2.Rd b/man/has.zelig2.Rd
deleted file mode 100644
index f92531c..0000000
--- a/man/has.zelig2.Rd
+++ /dev/null
@@ -1,23 +0,0 @@
-\name{has.zelig2}
-\alias{has.zelig2}
-\title{Whether an Arbitrary R-package has a Zelig2 Function within Its Namespace}
-\usage{
-  has.zelig2(pkg)
-}
-\arguments{
-  \item{pkg}{a character-string representing a package
-  name}
-}
-\value{
-  whether the package contains any zelig2-functions
-}
-\description{
-  Whether an Arbitrary R-package has a Zelig2 Function
-  within Its Namespace
-}
-\note{
-  This function is used primarily internally to determine
-  whether a a package is contributing a function to the
-  Zelig software suite
-}
-
diff --git a/man/help.zelig.Rd b/man/help.zelig.Rd
deleted file mode 100644
index a32eaec..0000000
--- a/man/help.zelig.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{help.zelig}
-\alias{help.zelig}
-\title{Help system for Zelig models}
-\usage{
-  help.zelig(...)
-}
-\arguments{
-  \item{...}{the help files to look-up}
-}
-\value{
-  results of calling the specific help function
-}
-\description{
-  Help system for Zelig models
-}
-\author{
-  Matt Owen \emph{mowen at iq.harvard.edu}
-}
-
diff --git a/man/homerun.Rd b/man/homerun.Rd
index ddd05ad..e5dc78c 100644
--- a/man/homerun.Rd
+++ b/man/homerun.Rd
@@ -18,7 +18,7 @@
   }
 }
 
-\source{\url{http://www.amstat.org}}
+\source{\url{https://ww2.amstat.org/publications/jse/v6n3/datasets.simonoff.html}}
 
 \references{Simonoff, Jeffrey S. 1998. ``Move Over, Roger Maris: Breaking Baseball's Most Famous Record.'' \emph{Journal of Statistics Education} 6(3). Data used are a subset of the data in the article.}
 
diff --git a/man/ignore.Rd b/man/ignore.Rd
deleted file mode 100644
index 643a478..0000000
--- a/man/ignore.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{ignore}
-\alias{ignore}
-\title{Constructor for the 'ignore' class
-This class is included for future use, and is currently
-not used in any Zelig model. It is designed for use with
-zelig2* functions}
-\usage{
-  ignore(default = NULL, type = "no pass")
-}
-\arguments{
-  \item{default}{default value}
-
-  \item{type}{ignored parameter}
-}
-\value{
-  an 'ignore' object
-}
-\description{
-  Constructor for the 'ignore' class This class is included
-  for future use, and is currently not used in any Zelig
-  model. It is designed for use with zelig2* functions
-}
-\author{
-  Matt Owen \emph{mowen at iq.harvard.edu}
-}
-
diff --git a/man/is.formula.Rd b/man/is.formula.Rd
deleted file mode 100644
index d4d8c7e..0000000
--- a/man/is.formula.Rd
+++ /dev/null
@@ -1,24 +0,0 @@
-\name{is.formula}
-\alias{is.formula}
-\title{Whether an Object is a Formula}
-\usage{
-  is.formula(x)
-}
-\arguments{
-  \item{x}{an object}
-}
-\value{
-  a logical specifying whether an object is a formula
-}
-\description{
-  This is a boolean-check to see whether an object is a
-  formula.
-}
-\note{
-  This will not be shared in the Zelig/ZeligFormulae
-  namespace.
-}
-\author{
-  Matt Owen
-}
-
diff --git a/man/is.qi.Rd b/man/is.qi.Rd
deleted file mode 100644
index d0e3d59..0000000
--- a/man/is.qi.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-\name{is.qi}
-\alias{is.qi}
-\title{Test If Value is Interpretable as a QI}
-\usage{
-  is.qi(qi)
-}
-\arguments{
-  \item{qi}{a potential quantity of interest}
-}
-\value{
-  a logical specifying whether this value should or
-  should-not be output
-}
-\description{
-  Test If Value is Interpretable as a QI
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/is.valid.qi.list.Rd b/man/is.valid.qi.list.Rd
deleted file mode 100644
index c07fa6b..0000000
--- a/man/is.valid.qi.list.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{is.valid.qi.list}
-\alias{is.valid.qi.list}
-\title{Check If Object Is a List of Valid Quantities of Interest}
-\usage{
-  is.valid.qi.list(x)
-}
-\arguments{
-  \item{x}{an object to be tested}
-}
-\value{
-  TRUE or FALSE
-}
-\description{
-  Check If Object Is a List of Valid Quantities of Interest
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/is.zelig.compliant.Rd b/man/is.zelig.compliant.Rd
deleted file mode 100644
index 2f43505..0000000
--- a/man/is.zelig.compliant.Rd
+++ /dev/null
@@ -1,30 +0,0 @@
-\name{is.zelig.compliant}
-\alias{is.zelig.compliant}
-\title{Whether a R-Package Contains a 'Yes' in its DESCRIPTION File's 'Zelig' Field}
-\usage{
-  is.zelig.compliant(package = "")
-}
-\arguments{
-  \item{package}{a character-string specifying an installed
-  R-package}
-}
-\value{
-  whether the package's DESCRIPTION file specifies
-  Zelig-compliancy
-}
-\description{
-  Whether a R-Package Contains a 'Yes' in its DESCRIPTION
-  File's 'Zelig' Field
-}
-\note{
-  This package was used internally to determine whether an
-  R-package is Zelig compliant, but is now likely
-  deprecated.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-\seealso{
-  is.zelig.package
-}
-
diff --git a/man/is.zelig.package.Rd b/man/is.zelig.package.Rd
deleted file mode 100644
index 9db9839..0000000
--- a/man/is.zelig.package.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{is.zelig.package}
-\alias{is.zelig.package}
-\title{Wether an Installed R-Pack Depends on Zelig}
-\usage{
-  is.zelig.package(package = "")
-}
-\arguments{
-  \item{package}{a character-string naming a package}
-}
-\value{
-  whether this package depends on Zelig
-}
-\description{
-  Wether an Installed R-Pack Depends on Zelig
-}
-\note{
-  This package was used internally to determine whether an
-  R-package is Zelig compliant, but is now likely
-  deprecated. This test is useless if not paired with
-}
-
diff --git a/man/link.Rd b/man/link.Rd
deleted file mode 100644
index 14689f7..0000000
--- a/man/link.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{link}
-\alias{link}
-\title{Method for extracting the link function from 'parameters' objects}
-\usage{
-  link(param)
-}
-\arguments{
-  \item{param}{a 'parameters' object}
-}
-\value{
-  the link function specified by the `param' function for
-  the given Zelig model
-}
-\description{
-  Method for extracting the link function from 'parameters'
-  objects
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/linkinv.Rd b/man/linkinv.Rd
deleted file mode 100644
index 22c3c05..0000000
--- a/man/linkinv.Rd
+++ /dev/null
@@ -1,23 +0,0 @@
-\name{linkinv}
-\alias{linkinv}
-\title{Method for extracting the inverse link function from 'parameters' objects}
-\usage{
-  linkinv(param)
-}
-\arguments{
-  \item{param}{a 'parameters' object}
-}
-\value{
-  the inverse link function specified by the 'param'
-  function for the given Zelig model
-}
-\description{
-  Returns the inverse link function of a ``parameters''
-  object. If the model's developer did not specify one (but
-  did specify a link function) this function returns a
-  numerical approximation of the link function.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/list.depth.Rd b/man/list.depth.Rd
deleted file mode 100644
index eec5413..0000000
--- a/man/list.depth.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{list.depth}
-\alias{list.depth}
-\title{Count the Depth of a List Object}
-\usage{
-  list.depth(obj)
-}
-\arguments{
-  \item{obj}{a vector or list object}
-}
-\description{
-  This function recursively computes the depth of a list
-  object. That is, it determines how many layers or levels
-  exist within the object.
-}
-\note{
-  This function is used internally by Zelig.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/list.zelig.dependent.packages.Rd b/man/list.zelig.dependent.packages.Rd
deleted file mode 100644
index 112c91f..0000000
--- a/man/list.zelig.dependent.packages.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-\name{list.zelig.dependent.packages}
-\alias{list.zelig.dependent.packages}
-\title{Get a List of Packages Installed on the Current Machine that Depend on Zelig}
-\usage{
-  list.zelig.dependent.packages()
-}
-\value{
-  a character-vector of all zelig-dependent packages on the
-  current machine
-}
-\description{
-  Get a List of Packages Installed on the Current Machine
-  that Depend on Zelig
-}
-\note{
-  This function is used primarily internally to determine
-  whether a a package is contributing a function to the
-  Zelig software suite
-}
-
diff --git a/man/list.zelig.models.Rd b/man/list.zelig.models.Rd
deleted file mode 100644
index 1af2286..0000000
--- a/man/list.zelig.models.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{list.zelig.models}
-\alias{list.zelig.models}
-\title{List Zelig Models Installed on the Current Machine}
-\usage{
-  list.zelig.models(with.namespace = TRUE)
-}
-\arguments{
-  \item{with.namespace}{a boolean specifying whether}
-}
-\value{
-  list of all zelig models
-}
-\description{
-  List Zelig Models Installed on the Current Machine
-}
-\note{
-  This list is not necessarily complete
-}
-
diff --git a/man/loadDependencies.Rd b/man/loadDependencies.Rd
deleted file mode 100644
index 2b0d3cd..0000000
--- a/man/loadDependencies.Rd
+++ /dev/null
@@ -1,36 +0,0 @@
-\name{loadDependencies}
-
-\alias{loadDependencies}
-\alias{load.dependencies}
-
-\title{Load External Dependencies Safely and Dynamically}
-
-\usage{
-  loadDependencies(..., character.only = FALSE)
-}
-
-\arguments{
-  \item{\ldots}{
-    A collection of packages to load. If ``character.only''=FALSE, these can be
-    entered symbolically (e.g. loadDependencies(MASS)). Otherwise, these
-    arguments are character-strings.
-  }
-
-  \item{character.only}{
-    A boolean specifying whether the arguments are strictly character-strings.
-  }
-}
-
-\value{
-  TRUE (invisibly) if successful. Otherwise the script is stopped.
-}
-
-\description{
-  ``loadDependencies'' is a helper function for loading external dependencies
-  at runtime.
-}
-
-\note{
-  This is used by Zelig developers to dynamically load ``dependent'' pacakges at
-  runtime.
-}
diff --git a/man/make.parameters.Rd b/man/make.parameters.Rd
deleted file mode 100644
index c7662fd..0000000
--- a/man/make.parameters.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{make.parameters}
-\alias{make.parameters}
-\title{??? For use with cmvglm}
-\usage{
-  make.parameters(terms, shape = "vector",
-    ancillary = TRUE, eqns = NULL)
-}
-\arguments{
-  \item{terms}{???}
-
-  \item{shape}{???}
-
-  \item{ancillary}{???}
-
-  \item{eqns}{???}
-}
-\value{
-  ???
-}
-\description{
-  ??? For use with cmvglm
-}
-\author{
-  Kosuke Imai and Olivia Lau
-}
-
diff --git a/man/makeModelMatrix.Rd b/man/makeModelMatrix.Rd
deleted file mode 100644
index 8bf2e27..0000000
--- a/man/makeModelMatrix.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-\name{makeModelMatrix}
-\alias{makeModelMatrix}
-\title{Make a Model Matrix from a Zelig-Style Formula}
-\usage{
-  makeModelMatrix(formula, data)
-}
-\arguments{
-  \item{formula}{a Zelig-style formula}
-
-  \item{data}{a \code{data.frame}}
-}
-\value{
-  a design (or model) matrix
-}
-\description{
-  This is a helper function that creates a
-  \code{model.matrix} like object of Zelig-style formulae.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/makeZeligObject.Rd b/man/makeZeligObject.Rd
deleted file mode 100644
index f6bb678..0000000
--- a/man/makeZeligObject.Rd
+++ /dev/null
@@ -1,37 +0,0 @@
-\name{makeZeligObject}
-\alias{makeZeligObject}
-\title{Make an Individual Zelig Object}
-\usage{
-  makeZeligObject(object, model, call, zelig_call, data,
-    label, env, package.name = NULL)
-}
-\arguments{
-  \item{object}{a fitted statistical model}
-
-  \item{model}{a character-string specifying the name of
-  the model}
-
-  \item{call}{The call that produced the fitted model}
-
-  \item{zelig_call}{The call made to the original zelig
-  function}
-
-  \item{data}{the data.frame used to fit the model}
-
-  \item{label}{a character-string or symbol used as a
-  human-readable label for the data-set}
-
-  \item{env}{an environment variable that contains all
-  variables to evaluate the call ``zelig_call''}
-
-  \item{package.name}{a character-string specifyign the
-  name of the package that is the source of the model used
-  to fit this object}
-}
-\value{
-  A ``zelig'' object
-}
-\description{
-  Returns a ``zelig'' object with the proper specifications
-}
-
diff --git a/man/mi.Rd b/man/mi.Rd
deleted file mode 100644
index 856bf83..0000000
--- a/man/mi.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{mi}
-\alias{mi}
-\title{Bundle Data-sets for Multiple Imputation}
-\usage{
-  mi(...)
-}
-\arguments{
-  \item{...}{a set of \code{data.frame}'s}
-}
-\value{
-  an \code{almost.mi} object, which contains the important
-  internals of a valid, useful \code{mi} object
-}
-\description{
-  This object prepares data-sets for processing with
-  multiple imputation.
-}
-\note{
-  This function is largely identical to simply creating a
-  list object, with the exception that any unnamed
-  data-sets are automatically labeled via the
-  \code{substitute} function
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/mid.Rd b/man/mid.Rd
index 1bc0f83..3799a24 100644
--- a/man/mid.Rd
+++ b/man/mid.Rd
@@ -5,8 +5,7 @@
 \title{Militarized Interstate Disputes}
 
 \description{
-  A small sample from the militarized interstate disputes database,
-  available at \url{http://pss.la.psu.edu/MID_DATA.HTM}.  
+  A small sample from the militarized interstate disputes (MID) database.
 }
 
 \usage{data(mid)}
diff --git a/man/mix.Rd b/man/mix.Rd
deleted file mode 100644
index a96e5e7..0000000
--- a/man/mix.Rd
+++ /dev/null
@@ -1,24 +0,0 @@
-\name{mix}
-\alias{mix}
-\title{Produce All Combinations of a Set of Lists}
-\usage{
-  mix(...)
-}
-\arguments{
-  \item{...}{a set of lists to mix together}
-}
-\value{
-  all the combinations of the lists with repetition
-}
-\description{
-  Produce All Combinations of a Set of Lists
-}
-\note{
-  This function is used internall by the 'mi' constructors
-  in order to produce the complete set of combinations of
-  data-frames and factors by to subset the data-frames.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/model.frame.multiple.Rd b/man/model.frame.multiple.Rd
deleted file mode 100644
index ca05376..0000000
--- a/man/model.frame.multiple.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-\name{model.frame.multiple}
-\alias{model.frame.multiple}
-\title{Create Model Frame from \code{multiple} Object}
-\usage{
-  \method{model.frame}{multiple}(formula,data,eqn=NULL,...)
-}
-\arguments{
-  \item{formula}{an object of both type \code{formula} and
-  \code{multiple}}
-
-  \item{data}{a \code{data.frame}}
-
-  \item{eqn}{the number of equations in the formula}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a \code{model.frame} object
-}
-\description{
-  This method creates a \code{model.frame} from a
-  \code{multiple} object. This method will be deprecated as
-  the development of Zelig 4 progresses.
-}
-\author{
-  Kosuke Imai, Olivia Lau, Gary King and Ferdinand Alimadhi
-}
-
diff --git a/man/model.matrix.multiple.Rd b/man/model.matrix.multiple.Rd
deleted file mode 100644
index a8ff19e..0000000
--- a/man/model.matrix.multiple.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-\name{model.matrix.multiple}
-\alias{model.matrix.multiple}
-\title{Create Design Matrix of a \code{multiple} Object}
-\usage{
-  \method{model.matrix}{multiple}(object,data,shape="compact",eqn=NULL,...)
-}
-\arguments{
-  \item{object}{an object of type \code{multiple}. This
-  represents a Zelig 3.5 formula}
-
-  \item{data}{a \code{data.frame}}
-
-  \item{shape}{a character-string specifying the shape of
-  the matrix}
-
-  \item{eqn}{an integer specifying the number of equations}
-
-  \item{...}{ignored parameters}
-}
-\description{
-  This method is used to generate a \code{model.matrix}
-  adhering to the specifications in the help document
-  "model.matrix".
-}
-\note{
-  This method is scheduled to be deprecated.
-}
-
diff --git a/man/model.matrix.parseFormula.Rd b/man/model.matrix.parseFormula.Rd
deleted file mode 100644
index 495ebf1..0000000
--- a/man/model.matrix.parseFormula.Rd
+++ /dev/null
@@ -1,32 +0,0 @@
-\name{model.matrix.parseFormula}
-\alias{model.matrix.parseFormula}
-\title{Construct Design Matrix from a Parsed, Zelig-style Formula}
-\usage{
-  \method{model.matrix}{parseFormula}(object, data = NULL,
-    ...)
-}
-\arguments{
-  \item{object}{a "parseFormula" object}
-
-  \item{data}{a "data.frame"}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a "model.matrix" specifying information relevant to a
-  statistical model
-}
-\description{
-  This method constructs a design matrix from a Zelig-style
-  formula. This matrix is commonly used in statistical
-  simulation, and will likely be relevent as the relevant
-  form of a \code{setx} object.
-}
-\note{
-  This method is primarily used by the \code{setx}
-  function.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/multilevel.Rd b/man/multilevel.Rd
deleted file mode 100644
index 447c621..0000000
--- a/man/multilevel.Rd
+++ /dev/null
@@ -1,29 +0,0 @@
-\name{multilevel}
-\alias{multilevel}
-\title{Multilevel}
-\usage{
-  multilevel(tt, data, mode, eqn, ...)
-}
-\arguments{
-  \item{tt}{a terms object}
-
-  \item{data}{a \code{data.frame}}
-
-  \item{mode}{???}
-
-  \item{eqn}{an integer specifying the number of equations
-  in a model}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a list with the "terms" attribute specified
-}
-\description{
-  This function currently has no documentation, but is
-  essential in Zelig 3.5's implementation of formulae.
-}
-\author{
-  Kosuke Imai, Olivia Lau, Gary King and Ferdinand Alimadhi
-}
-
diff --git a/man/name.object.Rd b/man/name.object.Rd
deleted file mode 100644
index 29290d0..0000000
--- a/man/name.object.Rd
+++ /dev/null
@@ -1,30 +0,0 @@
-\name{name.object}
-\alias{name.object}
-\title{Name Elements of an Object}
-\usage{
-  name.object(obj, names)
-}
-\arguments{
-  \item{obj}{a vector or matrix}
-
-  \item{names}{a character-vector specifying names}
-}
-\value{
-  the original object, with a "colnames" or "names" equal
-  to the parameter "names". If "names" is larger than
-  "obj", the "names" parameter is truncated appropriately.
-  If it is smaller, then the latter part of "obj" is
-  replaced with a numbered generic column name
-}
-\description{
-  Returns an object
-}
-\note{
-  This method is used internally by Zelig to name the
-  columns and elements of matrices and vectors for
-  simulations and bootstrapped parameters.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/names.qi.Rd b/man/names.qi.Rd
deleted file mode 100644
index 0aa5459..0000000
--- a/man/names.qi.Rd
+++ /dev/null
@@ -1,29 +0,0 @@
-\name{names.qi}
-\alias{names.qi}
-\title{The Names of a 'qi' Object}
-\usage{
-  \method{names}{qi}(x)
-}
-\arguments{
-  \item{x}{a 'qi' object}
-}
-\value{
-  a character-vector containing the names of the Quantities
-  of Interest
-}
-\description{
-  Function to get the names of a 'qi' object. This function
-  does not entirely parallel the functionality of
-  traditional 'names' methods; this is because the \code{$}
-  operator has been overloaded to support a unique style of
-  value extraction. For technical details, please see the
-  source code.
-}
-\note{
-  No method exists to set the names of a 'qi' object, once
-  it is constructed. This will be a feature added later.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/param.Rd b/man/param.Rd
deleted file mode 100644
index 4a10e07..0000000
--- a/man/param.Rd
+++ /dev/null
@@ -1,67 +0,0 @@
-\name{param}
-\alias{param}
-\title{Generic Method for Simulating Ancillary/Auxillary Parameters of Zelig
-  Models}
-\usage{
-  param(obj, num, ...)
-}
-\arguments{
-  \item{obj}{a \code{zelig} object}
-
-  \item{num}{an integer specifying the number of
-  simulations to sample}
-
-  \item{...}{optional parameters which will likely be
-  ignored}
-}
-\value{
-  The main purpose of the \code{param} function is to
-  return a list of key-value pairs, specifuing information
-  that should be shared between the \code{qi} function and
-  the fitted statistical model (produced by the
-  \code{zelig2} function. This list can contain the
-  following entries:
-
-  \item{\code{simulations}}{specifies a set of simulated
-  parameters used to describe the statistical model's
-  underlying distribution} \item{\code{alpha}}{specifies
-  the fixed (non-simulated) ancillary parameters used by
-  the statistical model's underlying distribution}
-  \item{\code{family}}{specifies a family object used to
-  implicitly define the \code{link} and \code{linkinv}
-  functions. That is, this specifies the "link" and
-  "inverse link" functions of generalized linear models}
-  \item{\code{link}}{specifies the \code{link} function to
-  be used. This parameter is largely unimportant compared
-  to the "inverse link" function}
-  \item{\code{linkinv}}{specifies the \code{linkinv}
-  function to be used.}
-}
-\description{
-  The \code{param} method is used by developers to specify
-  simulated and fixed ancillary parameters of the Zelig
-  statistical model. That is, this method is used between
-  the \code{zelig2} function and the \link{qi} as a helper
-  function that specifies all the necessary details needed
-  to simulate quantities of interest, given the fitted
-  statistical model produced by the \code{zelig2} function.
-}
-\note{
-  The 'param' function is a method meant to be overloaded
-  by Zelig Developers
-}
-\examples{
-param.some.model <- function (obj, num, ...) {
-  list(
-       simulations = NULL,
-       alpha = NULL,
-       link = NULL,
-       linkinv = NULL,
-       fam = NULL
-       )
-}
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/param.default.Rd b/man/param.default.Rd
deleted file mode 100644
index 7df1f96..0000000
--- a/man/param.default.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{param.default}
-\alias{param.default}
-\title{Default Method for ``param''}
-\usage{
-  \method{param}{default}(obj, num, ...)
-}
-\arguments{
-  \item{obj}{ignored parameter}
-
-  \item{num}{ignored parameter}
-
-  \item{...}{ignored parameters}
-}
-\description{
-  If no \code{param} function is set for a Zelig model,
-  then this function will return NULL.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/param.exp.Rd b/man/param.exp.Rd
deleted file mode 100644
index 6f6e87c..0000000
--- a/man/param.exp.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{param.exp}
-\alias{param.exp}
-\title{Param Method for the \code{exp} Zelig Model}
-\usage{
-  \method{param}{exp}(obj, num, ...)
-}
-\arguments{
-  \item{obj}{a 'zelig' object}
-
-  \item{num}{an integer specifying the number of
-  simulations to sample}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be cast as a 'parameters' object
-}
-\description{
-  Param Method for the \code{exp} Zelig Model
-}
-\note{
-  This method is used by the \code{param} Zelig model
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/param.gamma.Rd b/man/param.gamma.Rd
deleted file mode 100644
index 7ba8e0e..0000000
--- a/man/param.gamma.Rd
+++ /dev/null
@@ -1,25 +0,0 @@
-\name{param.gamma}
-\alias{param.gamma}
-\title{param method for the `gamma' Zelig model}
-\usage{
-  \method{param}{gamma}(obj, num, ...)
-}
-\arguments{
-  \item{obj}{a `zelig' object}
-
-  \item{num}{an integer specifying the number of
-  simulations to sample}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be cast as a `parameters' object
-}
-\description{
-  Return parameter estimates for the ``gamma'' GLM in
-  Zelig.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/param.logit.Rd b/man/param.logit.Rd
deleted file mode 100644
index 63957c9..0000000
--- a/man/param.logit.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{param.logit}
-\alias{param.logit}
-\title{Param Method for the \code{logit} Zelig Model}
-\usage{
-  \method{param}{logit}(obj, num, ...)
-}
-\arguments{
-  \item{obj}{a 'zelig' object}
-
-  \item{num}{an integer specifying the number of
-  simulations to sample}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be cast as a 'parameters' object
-}
-\description{
-  Param Method for the \code{logit} Zelig Model
-}
-\note{
-  This method is used by the \code{logit} Zelig model
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/param.ls.Rd b/man/param.ls.Rd
deleted file mode 100644
index 28e2a10..0000000
--- a/man/param.ls.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{param.ls}
-\alias{param.ls}
-\title{Param Method for the 'ls' Zelig Model}
-\usage{
-  \method{param}{ls}(obj, num, \dots)
-}
-\arguments{
-  \item{obj}{a 'zelig' object}
-
-  \item{num}{an integer specifying the number of
-  simulations to sample}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be cast as a 'parameters' object
-}
-\description{
-  Param Method for the 'ls' Zelig Model
-}
-\note{
-  This method currently returns via a deprectated style
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/param.negbinom.Rd b/man/param.negbinom.Rd
deleted file mode 100644
index 86c9f5a..0000000
--- a/man/param.negbinom.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{param.negbinom}
-\alias{param.negbinom}
-\title{Param Method for the 'negbinom' Zelig Model}
-\usage{
-  \method{param}{negbinom}(obj, num=1000, ...)
-}
-\arguments{
-  \item{obj}{a 'zelig' object}
-
-  \item{num}{an integer specifying the number of
-  simulations to sample}
-
-  \item{...}{ignored}
-}
-\value{
-  a list to be cast as a 'parameters' object
-}
-\description{
-  Param Method for the 'negbinom' Zelig Model
-}
-\note{
-  This method is used by the 'negbinom' Zelig model
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/param.normal.Rd b/man/param.normal.Rd
deleted file mode 100644
index d918aec..0000000
--- a/man/param.normal.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{param.normal}
-\alias{param.normal}
-\title{Param Method for the 'normal' Zelig Model}
-\usage{
-  \method{param}{normal}(obj, num=1000, ...)
-}
-\arguments{
-  \item{obj}{a 'zelig' object}
-
-  \item{num}{an integer specifying the number of
-  simulations to sample}
-
-  \item{...}{ignored}
-}
-\value{
-  a list to be cast as a 'parameters' object
-}
-\description{
-  Param Method for the 'normal' Zelig Model
-}
-\note{
-  This method is used by the 'normal' Zelig model
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/param.poisson.Rd b/man/param.poisson.Rd
deleted file mode 100644
index 0fb637f..0000000
--- a/man/param.poisson.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{param.poisson}
-\alias{param.poisson}
-\title{Param Method for the 'poisson' Zelig Model}
-\usage{
-  \method{param}{poisson}(obj, num=1000, ...)
-}
-\arguments{
-  \item{obj}{a 'zelig' object}
-
-  \item{num}{an integer specifying the number of
-  simulations to sample}
-
-  \item{...}{ignored}
-}
-\value{
-  a list to be cast as a 'parameters' object
-}
-\description{
-  Param Method for the 'poisson' Zelig Model
-}
-\note{
-  This method is used by the 'poisson' Zelig model
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/param.probit.Rd b/man/param.probit.Rd
deleted file mode 100644
index 2237681..0000000
--- a/man/param.probit.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{param.probit}
-\alias{param.probit}
-\title{Param Method for the 'probit' Zelig Model}
-\usage{
-  \method{param}{probit}(obj, num=1000, ...)
-}
-\arguments{
-  \item{obj}{a 'zelig' object}
-
-  \item{num}{an integer specifying the number of
-  simulations to sample}
-
-  \item{...}{ignored}
-}
-\value{
-  a list to be cast as a 'parameters' object
-}
-\description{
-  Param Method for the 'probit' Zelig Model
-}
-\note{
-  This method is used by the 'probit' Zelig model
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/param.relogit.Rd b/man/param.relogit.Rd
deleted file mode 100644
index c43dcea..0000000
--- a/man/param.relogit.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{param.relogit}
-\alias{param.relogit}
-\title{Estimate Parameters for the ``relogit'' Zelig Mdoel}
-\usage{
-  \method{param}{relogit}(obj, num, ...)
-}
-\arguments{
-  \item{obj}{a zelig object containing the fitted model}
-
-  \item{num}{an integer specifying the number of
-  simulations to compute}
-
-  \item{...}{unspecified parameters}
-}
-\value{
-  a list specifying important parameters for the
-  ``relogit'' model
-}
-\description{
-  Returns estimates on parameters, as well as, specifying
-  link and inverse-link functions.
-}
-\note{
-  This method merely calls ``param.logit''.
-}
-
diff --git a/man/param.relogit2.Rd b/man/param.relogit2.Rd
deleted file mode 100644
index c0e44e0..0000000
--- a/man/param.relogit2.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{param.relogit2}
-\alias{param.relogit2}
-\title{Estimate Parameters for the ``relogit'' Zelig Mdoel}
-\usage{
-  \method{param}{relogit2}(obj, num, x, ...)
-}
-\arguments{
-  \item{obj}{a zelig object containing the fitted model}
-
-  \item{num}{an integer specifying the number of
-  simulations to compute}
-
-  \item{x}{ideally we should be able to remove this
-  parameter}
-
-  \item{...}{unspecified parameters}
-}
-\value{
-  a list specifying important parameters for the
-  ``relogit'' model
-}
-\description{
-  Returns estimates on parameters, as well as, specifying
-  link and inverse-link functions.
-}
-
diff --git a/man/param.tobit.Rd b/man/param.tobit.Rd
deleted file mode 100644
index 8c13c29..0000000
--- a/man/param.tobit.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{param.tobit}
-\alias{param.tobit}
-\title{Param Method for the \code{tobit} Zelig Model}
-\usage{
-  \method{param}{tobit}(obj, num, ...)
-}
-\arguments{
-  \item{obj}{a 'zelig' object}
-
-  \item{num}{an integer specifying the number of
-  simulations to sample}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a list to be cast as a 'parameters' object
-}
-\description{
-  Param Method for the \code{tobit} Zelig Model
-}
-\note{
-  This method is used by the \code{tobit} Zelig model
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/parameters.Rd b/man/parameters.Rd
deleted file mode 100644
index 0211e84..0000000
--- a/man/parameters.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-\name{parameters}
-\alias{parameters}
-\title{Constructor for `parameters' class}
-\usage{
-  parameters(simulations, alpha, fam = NULL, link = NULL,
-    linkinv = NULL)
-}
-\arguments{
-  \item{simulations}{a vector or matrix containing
-  simulated values}
-
-  \item{alpha}{ancillary parameters for the Zelig
-  statistical model}
-
-  \item{fam}{a family object which implicitly specifies the
-  link and link-inverse functions for the}
-
-  \item{link}{the link function of the specified
-  statistical model.  The `linkinv' parameter is implicitly
-  defined by by the `link' parameter, when `linkinv' is
-  omitted}
-
-  \item{linkinv}{the inverse link function}
-}
-\value{
-  a `parameters' object
-}
-\description{
-  Constructor for `parameters' class
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/parse.formula.Rd b/man/parse.formula.Rd
deleted file mode 100644
index d60c7d9..0000000
--- a/man/parse.formula.Rd
+++ /dev/null
@@ -1,24 +0,0 @@
-\name{parse.formula}
-\alias{parse.formula}
-\title{Parse Formulas for Zelig Models}
-\usage{
-  parse.formula(formula, model, data = NULL)
-}
-\arguments{
-  \item{formula}{a formula}
-
-  \item{model}{a Zelid model}
-
-  \item{data}{a data-frame}
-}
-\description{
-  Parse Formulas for Zelig Models
-}
-\note{
-  This is used typically in multinomial and multivariate
-  Zelig models
-}
-\author{
-  Kosuke Imai and Olivia Lau
-}
-
diff --git a/man/parseFormula.Rd b/man/parseFormula.Rd
deleted file mode 100644
index 8fd899c..0000000
--- a/man/parseFormula.Rd
+++ /dev/null
@@ -1,25 +0,0 @@
-\name{parseFormula}
-\alias{parseFormula}
-\title{Parse Zelig-style Formulae}
-\usage{
-  parseFormula(obj, data = NULL)
-}
-\arguments{
-  \item{obj}{a list or formula}
-
-  \item{data}{the data set associated with the formula
-  object}
-}
-\value{
-  an object of type "parseFormula". This object has slots
-  specifying:
-}
-\description{
-  Zelig uses three distinct types of formulae. This method
-  is a re-design of the Zelig function
-  \code{parse.formula}.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/parseFormula.formula.Rd b/man/parseFormula.formula.Rd
deleted file mode 100644
index f4b4e89..0000000
--- a/man/parseFormula.formula.Rd
+++ /dev/null
@@ -1,24 +0,0 @@
-\name{parseFormula.Formula}
-\alias{parseFormula.Formula}
-\title{Parse ``Formula''-style Zelig Formulae}
-\usage{
-  \method{parseFormula}{Formula}(obj, data=NULL)
-}
-\arguments{
-  \item{obj}{a list of formulae}
-
-  \item{data}{a data frame}
-}
-\value{
-  an object of type ``parseFormula''
-}
-\description{
-  This method parses a ``Formula''-style Zelig formula.
-  This is to support the ``Formula'' object. It seems like
-  it has the right idea when it comes to expressing
-  multiple responses.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/parseFormula.list.Rd b/man/parseFormula.list.Rd
deleted file mode 100644
index 17c2ca3..0000000
--- a/man/parseFormula.list.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{parseFormula.list}
-\alias{parseFormula.list}
-\title{Parse List-Style Zelig Formulae}
-\usage{
-  \method{parseFormula}{list}(obj, data=NULL)
-}
-\arguments{
-  \item{obj}{a list of formulae}
-
-  \item{data}{a data frame}
-}
-\value{
-  an object of type "parseFormula"
-}
-\description{
-  This method parses a list-style Zelig formula.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/plot-Zelig-ANY-method.Rd b/man/plot-Zelig-ANY-method.Rd
new file mode 100644
index 0000000..ab7770f
--- /dev/null
+++ b/man/plot-Zelig-ANY-method.Rd
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-zelig.R
+\docType{methods}
+\name{plot,Zelig,ANY-method}
+\alias{plot,Zelig,ANY-method}
+\title{Plot method for Zelig objects}
+\usage{
+\S4method{plot}{Zelig,ANY}(x, y, ...)
+}
+\arguments{
+\item{x}{An Object of Class Zelig}
+
+\item{y}{unused}
+
+\item{...}{Additional parameters to be passed to plot}
+}
+\description{
+Plot method for Zelig objects
+}
+
diff --git a/man/plot.MI.sim.Rd b/man/plot.MI.sim.Rd
deleted file mode 100644
index c4aff50..0000000
--- a/man/plot.MI.sim.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{plot.MI.sim}
-\alias{plot.MI.sim}
-\title{Plot graphs of simulated multiply-imputed data}
-\usage{
-  \method{plot}{MI.sim}(x, ...)
-}
-\arguments{
-  \item{x}{A zelig `sim' object, with multiply imputed data}	
-  \item{...}{ignored parameters}
-}
-\value{
-  NULL (invisibly)
-}
-\description{
-  This function is currently unimplemented, and reserved
-  for future use.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/plot.ci.Rd b/man/plot.ci.Rd
deleted file mode 100644
index 4328d2a..0000000
--- a/man/plot.ci.Rd
+++ /dev/null
@@ -1,60 +0,0 @@
-\name{plot.ci}
-\alias{plot.ci}
-\title{Method for plotting pooled simulations by confidence intervals}
-\usage{
-  \method{plot}{ci}(x, qi="ev", var=NULL, ..., main = NULL, sub = NULL, xlab = NULL, 
-  ylab = NULL, xlim = NULL, ylim = NULL, legcol="gray20", col=NULL, leg=1, legpos=NULL,
-   ci=c(80,95,99.9))
-}
-\arguments{
-  \item{x}{A `sim' object}
-
-  \item{qi}{a character-string specifying the quantity of
-  interest to plot: "ev" expected values, "pv" predicted values, "fd" first differences}
-  
-  \item{var}{The variable to be used on the x-axis}
-
-  \item{...}{Parameters to be passed to the `truehist'
-  function which is implicitly called for numeric
-  simulations}
-
-  \item{main}{A character-string, specifying the main title of the plot}
-
-  \item{sub}{A character-string, specifying the sub-title of the plot}
-
-  \item{xlab}{A character-string, specifying the label for the x-axis}
-
-  \item{ylab}{A character-string, specifying the label for the y-axis}
-
-  \item{xlim}{A vector of length 2, specifying the left-most and right-most values for the plot}
-
-  \item{ylim}{A vector of length 2, specifying the bottom-most and top-most values for the plot}
-
-  \item{legcol}{``legend color'', an valid color used for
-  plotting the line colors in the legend}
-
-  \item{col}{a valid vector of colors of at least length 3
-  to use to color the confidence intervals}
-
-  \item{leg}{``legend position'', an integer from 1 to 4,
-  specifying the position of the legend. 1 to 4 correspond
-  to ``SE'', ``SW'', ``NW'', and ``NE'' respectively}
-
-  \item{legpos}{``legend type'', exact coordinates and
-  sizes for legend. Overrides argment ``leg.type''}
-  
-  \item{ci}{A vector of length up to 3, specifying the three confidence interval levels to 
-  plot on the graph (where confidence is expressed on the scale 0-100).}
-}
-\value{
-  the current graphical parameters. This is subject to
-  change in future implementations of Zelig
-}
-\description{
-  Plot confidence intervals of simulated quantities of interest, across a range of a variable.
-}
-\author{
-  James Honaker, adapted by Matt Owen
-  \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/plot.pooled.sim.Rd b/man/plot.pooled.sim.Rd
deleted file mode 100644
index 2761fed..0000000
--- a/man/plot.pooled.sim.Rd
+++ /dev/null
@@ -1,62 +0,0 @@
-\name{plot.pooled.sim}
-\alias{plot.pooled.sim}
-\title{Method for plotting pooled simulations by confidence intervals}
-\usage{
-  \method{plot}{pooled.sim}(x, qi="ev", var=NULL, ..., main = NULL, sub = NULL, 
-  xlab = NULL, ylab = NULL, xlim = NULL, ylim = NULL, legcol="gray20", col=NULL, leg=1,
-   legpos=NULL, ci=c(80,95,99.9))
-}
-\arguments{
-  \item{x}{A `sim' object}
-
-  \item{qi}{a character-string specifying the quantity of
-  interest to plot}
-
-  \item{var}{The variable to be used on the x-axis. Default
-  is the variable across all the chosen values with
-  smallest nonzero variance}
-
-  \item{...}{Parameters to be passed to the `truehist'
-  function which is implicitly called for numeric
-  simulations}
-
-  \item{main}{A character-string, specifying the main title of the plot}
-
-  \item{sub}{A character-string, specifying the sub-title of the plot}
-
-  \item{xlab}{A character-string, specifying the label for the x-axis}
-
-  \item{ylab}{A character-string, specifying the label for the y-axis}
-
-  \item{xlim}{A vector of length 2, specifying the left-most and right-most values for the plot}
-
-  \item{ylim}{A vector of length 2, specifying the bottom-most and top-most values for the plot}
-
-  \item{legcol}{``legend color'', an valid color used for
-  plotting the line colors in the legend}
-
-  \item{col}{a valid vector of colors of at least length 3
-  to use to color the confidence intervals}
-
-  \item{leg}{``legend position'', an integer from 1 to 4,
-  specifying the position of the legend. 1 to 4 correspond
-  to ``SE'', ``SW'', ``NW'', and ``NE'' respectively}
-
-  \item{legpos}{``legend type'', exact coordinates and
-  sizes for legend. Overrides argment ``leg.type''}
-
-  \item{ci}{A numeric triple, specifying the three levels 
-  to plot confidence intervals for, scaled 0 to 100}
-}
-\value{
-  the current graphical parameters. This is subject to
-  change in future implementations of Zelig
-}
-\description{
-  Plot pooled simulated quantities of interest.
-}
-\author{
-  James Honaker, adapted by Matt Owen
-  \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/plot.sim.Rd b/man/plot.sim.Rd
deleted file mode 100644
index 1acbb82..0000000
--- a/man/plot.sim.Rd
+++ /dev/null
@@ -1,23 +0,0 @@
-\name{plot.sim}
-\alias{plot.sim}
-\title{Method for plotting simulations}
-\usage{
-  \method{plot}{sim}(x, ...)
-}
-\arguments{
-  \item{x}{a `sim' object}
-
-  \item{...}{parameters to be passed to the `truehist'
-  function which is implicitly called for numeric
-  simulations}
-}
-\value{
-  nothing
-}
-\description{
-  Plot simulated quantities of interest.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/plot.simulations.Rd b/man/plot.simulations.Rd
deleted file mode 100644
index 1398b41..0000000
--- a/man/plot.simulations.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-\name{plot.simulations}
-\alias{plot.simulations}
-\title{Plot Any Simulation from the Zelig Core Package}
-\usage{
-  plot.simulations(x, ...)
-}
-\arguments{
-  \item{x}{an object}
-
-  \item{...}{parameters passed to the ``plot'' and
-  ``barplot'' functions}
-}
-\value{
-  the original graphical parameters
-}
-\description{
-  Plots any simulation from the core package. In general,
-  this function can \emph{neatly} plot simulations
-  containing five of the popular ``quantities of interest''
-  - ``Expected Values: E(Y|X)'', ``Predicted Values: Y|X'',
-  ``Expected Values (for X1): E(Y|X1)'', ``Predicted Values
-  (for X1): Y|X1'' and ``First Differences: E(Y|X1) -
-  E(Y|X)''.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/predict-Zelig-method.Rd b/man/predict-Zelig-method.Rd
new file mode 100644
index 0000000..9986dd4
--- /dev/null
+++ b/man/predict-Zelig-method.Rd
@@ -0,0 +1,18 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-zelig.R
+\docType{methods}
+\name{predict,Zelig-method}
+\alias{predict,Zelig-method}
+\title{Method for getting predicted values from Zelig objects}
+\usage{
+\S4method{predict}{Zelig}(object, ...)
+}
+\arguments{
+\item{object}{An Object of Class Zelig}
+
+\item{...}{Additional parameters to be passed to predict}
+}
+\description{
+Method for getting predicted values from Zelig objects
+}
+
diff --git a/man/print.qi.Rd b/man/print.qi.Rd
deleted file mode 100644
index 95a0b77..0000000
--- a/man/print.qi.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-\name{print.qi}
-\alias{print.qi}
-\title{Print a Quantity of Interest in Human-Readable Form}
-\usage{
-  \method{print}{qi}(x, ...)
-}
-\arguments{
-  \item{x}{a qi object}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  the object that was printed (invisibly)
-}
-\description{
-  Print simulated quantities of interest in a
-  human-readable form
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/print.qi.summarized.Rd b/man/print.qi.summarized.Rd
deleted file mode 100644
index ced2448..0000000
--- a/man/print.qi.summarized.Rd
+++ /dev/null
@@ -1,25 +0,0 @@
-\name{print.qi.summarized}
-\alias{print.qi.summarized}
-\title{Print Method for Summarized Quantities of Interest}
-\usage{
-  \method{print}{qi.summarized}(x, \dots)
-}
-\arguments{
-  \item{x}{a 'summarized.qi' object}
-
-  \item{...}{parameters to be passed to the specific print
-  functions}
-}
-\value{
-  x (invisibly)
-}
-\description{
-  Print Method for Summarized Quantities of Interest
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-\seealso{
-  \link{special_print_MATRIX} and \link{special_print_LIST}
-}
-
diff --git a/man/print.setx.Rd b/man/print.setx.Rd
deleted file mode 100644
index 6a4c8f1..0000000
--- a/man/print.setx.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{print.setx}
-\alias{print.setx}
-\title{Print values of `setx' objects}
-\usage{
-  \method{print}{setx}(x, ...)
-}
-\arguments{
-  \item{x}{a `setx' object}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  the value of x (invisibly)
-}
-\description{
-  Print a ``setx'' object in human-readable form.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/print.setx.mi.Rd b/man/print.setx.mi.Rd
deleted file mode 100644
index f6fe655..0000000
--- a/man/print.setx.mi.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{print.setx.mi}
-\alias{print.setx.mi}
-\title{Print a Bundle of Data-sets}
-\usage{
-  \method{print}{setx.mi}(x, ...)
-}
-\arguments{
-  \item{x}{a \code{setx} object to print}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  the \code{setx} object (invisibly)
-}
-\description{
-  Print a Bundle of Data-sets
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/print.sim.Rd b/man/print.sim.Rd
deleted file mode 100644
index 20106dd..0000000
--- a/man/print.sim.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-\name{print.sim}
-\alias{print.sim}
-\title{Print values of `sim' objects}
-\usage{
-  \method{print}{sim}(x, ...)
-}
-\arguments{
-  \item{x}{a `sim' object (ignored)}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  NULL (invisibly)
-}
-\description{
-  This function is currently unimplemented, and included
-  for future development
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/print.summary.MCMCZelig.Rd b/man/print.summary.MCMCZelig.Rd
deleted file mode 100644
index 6e72e49..0000000
--- a/man/print.summary.MCMCZelig.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{print.summary.MCMCZelig}
-
-\alias{print.summary.MCMCZelig}
-
-\title{Print a Summary MCMCZelig Object}
-
-\usage{
-  \method{print}{summary.MCMCZelig}(x, digits=max(3, getOption("digits") - 3), ...)
-}
-
-\arguments{
-  \item{x}{an "MCMCZelig" object}
-
-  \item{digits}{a numeric specifying the precision of the
-  summary object}
-
-  \item{...}{ignored parameters}
-}
-
-\value{
-  a \code{summary.MCMCZelig} object
-}
-
-\description{
-  This method prints a summary object for \code{MCMCZelig}
-  objects
-}
diff --git a/man/print.summary.pooled.sim.Rd b/man/print.summary.pooled.sim.Rd
deleted file mode 100644
index b3b97c1..0000000
--- a/man/print.summary.pooled.sim.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-\name{print.summary.pooled.sim}
-\alias{print.summary.pooled.sim}
-\title{Print a Summary of a Set of Pooled Simulated Interests}
-\usage{
-  \method{print}{summary.pooled.sim}(x, ...)
-}
-\arguments{
-  \item{x}{a ``summary.pooled.sim'' object, containing
-  summarized information about simulated quantities of
-  interest}
-
-  \item{...}{Optional parameters that will be passed onward
-  to ``print.matrix'' (the matrix printing function)}
-}
-\value{
-  a ``summary.pooled.sim'' object storing the quantities of
-  interest
-}
-\description{
-  Prints the summary information from a set of pooled
-  simulated interests. This method assumes that quantities
-  of interest are kept in a data type which can be used
-  with ``rbind''.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/print.summary.relogit.Rd b/man/print.summary.relogit.Rd
deleted file mode 100644
index d753d1b..0000000
--- a/man/print.summary.relogit.Rd
+++ /dev/null
@@ -1,24 +0,0 @@
-\name{print.summary.relogit}
-\alias{print.summary.relogit}
-\title{Print Summary of a Rare-event Logistic Model}
-\usage{
-  \method{print}{summary.relogit}(x, digits = max(3,
-    getOption("digits") - 3), ...)
-}
-\arguments{
-  \item{x}{an ``relogit.summary'' object produced by the
-  ``summary'' method.}
-
-  \item{digits}{an integer specifying the number of digits
-  of precision to specify}
-
-  \item{...}{parameters passed forward to the ``print.glm''
-  function}
-}
-\value{
-  x (invisibly)
-}
-\description{
-  Prints the
-}
-
diff --git a/man/print.summary.relogit2.Rd b/man/print.summary.relogit2.Rd
deleted file mode 100644
index eeef863..0000000
--- a/man/print.summary.relogit2.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-\name{print.summary.relogit2}
-\alias{print.summary.relogit2}
-\title{Print Summary of a Rare-event Logistic Model}
-\usage{
-  \method{print}{summary.relogit2}(x, digits = max(3,
-    getOption("digits") - 3), ...)
-}
-\arguments{
-  \item{x}{the object to print}
-
-  \item{digits}{an integer specifying the number of digits
-  of precision}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  x (invisibly)
-}
-\description{
-  ...
-}
-
diff --git a/man/print.summary.sim.Rd b/man/print.summary.sim.Rd
deleted file mode 100644
index 40a6434..0000000
--- a/man/print.summary.sim.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-\name{print.summary.sim}
-\alias{print.summary.sim}
-\title{Print Values of a Summarized ``sim'' Object}
-\usage{
-  \method{print}{summary.sim}(x, ...)
-}
-\arguments{
-  \item{x}{a 'summary.sim' object}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  the value of the `summary.sim' object (invisibly)
-}
-\description{
-  Print values of simulated quantities of interest (stored
-  in a ``summary.sim'' object.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/print.summarySim.MI.Rd b/man/print.summarySim.MI.Rd
deleted file mode 100644
index 3760575..0000000
--- a/man/print.summarySim.MI.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{print.summarySim.MI}
-\alias{print.summarySim.MI}
-\title{Print Multiply Imputed Simulations Summary}
-\usage{
-  \method{print}{summarySim.MI}(x, digits=3, ...)
-}
-\arguments{
-  \item{x}{a 'summarySim.MI' object}
-
-  \item{digits}{an integer specifying the number of digits
-  of precision to print}
-
-  \item{...}{ignored parameters}
-}
-\description{
-  Prints summary information about Multiply Imputed Fits
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/print.zelig.Rd b/man/print.zelig.Rd
deleted file mode 100644
index 8560a44..0000000
--- a/man/print.zelig.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{print.zelig}
-\alias{print.zelig}
-\title{Print values of ``zelig'' objects}
-\usage{
-  \method{print}{zelig}(x, ...)
-}
-\arguments{
-  \item{x}{a `zelig' object}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  the `zelig' object (invisibly)
-}
-\description{
-  Print the zelig object as a list
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/qi.Rd b/man/qi.Rd
deleted file mode 100644
index 5b9625e..0000000
--- a/man/qi.Rd
+++ /dev/null
@@ -1,61 +0,0 @@
-\name{qi}
-
-\alias{qi}
-\alias{qi.exp.Rd}
-\alias{qi.logit.Rd}
-\alias{qi.negbinom.Rd}
-\alias{qi.normal.survey.Rd}
-\alias{qi.poisson.survey.Rd}
-\alias{qi.relogit.Rd}
-\alias{qi.gamma.Rd}
-\alias{qi.ls.Rd}
-\alias{qi.normal.Rd}
-\alias{qi.poisson.Rd}
-\alias{qi.probit.Rd}
-\alias{qi.relogit2.Rd}
-\alias{qi.tobit.Rd}
-
-\title{Generic Method for Computing Quantities of Interest}
-\usage{
-  qi(obj, x = NULL, x1 = NULL, y = NULL, num, param = NULL)
-}
-\arguments{
-  \item{obj}{a \code{zelig} object}
-
-  \item{x}{a \code{setx} object or NULL}
-
-  \item{x1}{an optional \code{setx} object}
-
-  \item{y}{this parameter is reserved for simulating
-  average treatment effects, though this feature is
-  currentlysupported by only a handful of models}
-
-  \item{num}{an integer specifying the number of
-  simulations to compute}
-
-  \item{param}{a parameters object}
-}
-\value{
-  a list of key-value pairs specifying pairing titles of
-  quantities of interest with their simulations
-}
-\description{
-  The \code{qi} function is used by developers to simulated
-  quantities of interest. This method, as a result, is the
-  most significant method of any Zelig statistical model.
-}
-\note{
-  Run \code{example(qi)} to see a trivial version of
-}
-\examples{
-qi.some.model <- function(obj, x=NULL, x1=NULL, y=NULL, param=NULL) {
-  list(
-       "Expected Values: E(Y|X)" = NA,
-       "Predicted Values: Y|X"   = NA
-       )
-}
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/qi.exp.Rd b/man/qi.exp.Rd
deleted file mode 100644
index 6df220a..0000000
--- a/man/qi.exp.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-\name{qi.exp}
-\alias{qi.exp}
-\title{Compute quantities of interest for 'exp' Zelig models}
-\usage{
-  \method{qi}{exp}(obj, x=NULL, x1=NULL, y=NULL, num=1000,
-    param=NULL)
-}
-\arguments{
-  \item{obj}{a 'zelig' object}
-
-  \item{x}{a 'setx' object or NULL}
-
-  \item{x1}{an optional 'setx' object}
-
-  \item{y}{this parameter is reserved for simulating
-  average treatment effects, though this feature is
-  currentlysupported by only a handful of models}
-
-  \item{num}{an integer specifying the number of
-  simulations to compute}
-
-  \item{param}{a parameters object}
-}
-\value{
-  a list of key-value pairs specifying pairing titles of
-  quantities of interest with their simulations
-}
-\description{
-  Compute quantities of interest for 'exp' Zelig models
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/qi.plot.Rd b/man/qi.plot.Rd
new file mode 100644
index 0000000..df0593c
--- /dev/null
+++ b/man/qi.plot.Rd
@@ -0,0 +1,21 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plots.R
+\name{qi.plot}
+\alias{qi.plot}
+\title{Default Plot Design For Zelig Model QI's}
+\usage{
+qi.plot(obj, ...)
+}
+\arguments{
+\item{obj}{A reference class zelig5 object}
+
+\item{...}{Parameters to be passed to the `truehist' function which is 
+implicitly called for numeric simulations}
+}
+\description{
+Default Plot Design For Zelig Model QI's
+}
+\author{
+James Honaker with panel layouts from Matt Owen
+}
+
diff --git a/man/qi.summarize.Rd b/man/qi.summarize.Rd
deleted file mode 100644
index fc047cd..0000000
--- a/man/qi.summarize.Rd
+++ /dev/null
@@ -1,36 +0,0 @@
-\name{qi.summarize}
-\alias{qi.summarize}
-\title{Constructor for QI Summarized Class
-This class takes an arbitrary number of the _same_ type of
-quantities of interest labels them, then
-merges them into one simple printable block. In particular,
-this class determines which print function to use based on the
-the type and size od data to be passed to the print function.}
-\usage{
-  qi.summarize(title, x, ...)
-}
-\arguments{
-  \item{title}{a character-string specifying the title of
-  the QI}
-
-  \item{x}{a list of summarized quantities of interest}
-
-  \item{...}{additional quantities of interest (the
-  parameter that titles these will be used as the name of
-  the data.frame}
-}
-\value{
-  the list of QI's (invisibly)
-}
-\description{
-  Constructor for QI Summarized Class This class takes an
-  arbitrary number of the _same_ type of quantities of
-  interest labels them, then merges them into one simple
-  printable block. In particular, this class determines
-  which print function to use based on the the type and
-  size od data to be passed to the print function.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/reduce.Rd b/man/reduce.Rd
new file mode 100644
index 0000000..5f5fcd2
--- /dev/null
+++ b/man/reduce.Rd
@@ -0,0 +1,30 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/utils.R
+\name{reduce}
+\alias{reduce}
+\title{Calculate the reduced dataset to be used in code{\link{setx}}}
+\usage{
+reduce(dataset, s, formula, data, avg = avg)
+}
+\arguments{
+\item{dataset}{Zelig object data, possibly split to deal with \code{by} argument}
+
+\item{s}{list of variables and their tentative \code{setx} values}
+
+\item{formula}{a simplified version of the Zelig object formula (typically with 1 on the lhs)}
+
+\item{data}{Zelig object data}
+
+\item{avg}{function of data transformations}
+}
+\value{
+a list of all the model variables either at their central tendancy or their \code{setx} value
+}
+\description{
+#' This method is used internally
+}
+\author{
+Christine Choirat
+}
+\keyword{internal}
+
diff --git a/man/reduceMI.Rd b/man/reduceMI.Rd
deleted file mode 100644
index e042068..0000000
--- a/man/reduceMI.Rd
+++ /dev/null
@@ -1,24 +0,0 @@
-\name{reduceMI}
-\alias{reduceMI}
-\title{Reduce MI Formulas
-Take a formula in any of the reduced form or in a structural form and return
-the most reduced form of that formula}
-\usage{
-  reduceMI(f)
-}
-\arguments{
-  \item{f}{a formula}
-}
-\description{
-  Reduce MI Formulas Take a formula in any of the reduced
-  form or in a structural form and return the most reduced
-  form of that formula
-}
-\note{
-  This formula is used primarily by 'zelig2' functions of
-  multivariate Zelig models
-}
-\author{
-  Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
-}
-
diff --git a/man/relogit.Rd b/man/relogit.Rd
index 8d4751f..885e48a 100644
--- a/man/relogit.Rd
+++ b/man/relogit.Rd
@@ -1,27 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-relogit.R
 \name{relogit}
 \alias{relogit}
-\title{Fit a rare-event logistic model in Zelig}
+\title{Estimation function for rare events logit models}
 \usage{
-  relogit(formula, data = sys.parent(), tau = NULL,
-    bias.correct = TRUE, case.control = "prior", ...)
-}
-\arguments{
-  \item{formula}{a formula object}
-
-  \item{data}{...}
-
-  \item{tau}{...}
-
-  \item{bias.correct}{...}
-
-  \item{case.control}{...}
-
-  \item{...}{???}
-}
-\value{
-  a ``relogit'' ``glm'' object
+relogit(formula, data = sys.parent(), tau = NULL, bias.correct = TRUE,
+  case.control = "prior", ...)
 }
 \description{
-  Fits a rare-event (``relogit'') model.
+Estimation function for rare events logit models
 }
+\keyword{internal}
 
diff --git a/man/repl.Rd b/man/repl.Rd
deleted file mode 100644
index 2b26275..0000000
--- a/man/repl.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{repl}
-\alias{repl}
-\title{Generic Method for Replicating Data}
-\usage{
-  repl(object, ...)
-}
-\arguments{
-  \item{object}{a 'zelig' object}
-
-  \item{...}{parameters}
-}
-\value{
-  a replicated object
-}
-\description{
-  Generic Method for Replicating Data
-}
-\author{
-  Kosuke Imai and Olivia Lau \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/repl.default.Rd b/man/repl.default.Rd
deleted file mode 100644
index 02cbdbf..0000000
--- a/man/repl.default.Rd
+++ /dev/null
@@ -1,23 +0,0 @@
-\name{repl.default}
-\alias{repl.default}
-\title{Default Method for Replicating Statistics}
-\usage{
-  \method{repl}{default}(object, data=NULL, ...)
-}
-\arguments{
-  \item{object}{an object to replicate}
-
-  \item{data}{a data.frame}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a replicated object
-}
-\description{
-  Replicate a simulation
-}
-\author{
-  Kosuke Imai and Olivia Lau \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/repl.sim.Rd b/man/repl.sim.Rd
deleted file mode 100644
index 8cac275..0000000
--- a/man/repl.sim.Rd
+++ /dev/null
@@ -1,45 +0,0 @@
-\name{repl.sim}
-\alias{repl.sim}
-\title{Method for Replicating Simulated Quantities of Interest}
-\usage{
-  \method{repl}{sim}(object, x=NULL, x1=NULL, y=NULL,
-    num=1000, prev = NULL, bootstrap = FALSE, boot.fn=NULL,
-    cond.data = NULL, ...)
-}
-\arguments{
-  \item{object}{a 'zelig' object}
-
-  \item{x}{a 'setx' object}
-
-  \item{x1}{a secondary 'setx' object used to perform
-  particular computations of quantities of interest}
-
-  \item{y}{a parameter reserved for the computation of
-  particular quantities of interest (average treatment
-  effects). Few models currently support this parameter}
-
-  \item{num}{an integer specifying the number of
-  simulations to compute}
-
-  \item{prev}{ignored}
-
-  \item{bootstrap}{ignored}
-
-  \item{boot.fn}{ignored}
-
-  \item{cond.data}{ignored}
-
-  \item{...}{special parameters which are reserved for
-  future versions of Zelig}
-}
-\value{
-  a 'sim' object storing the replicated quantities of
-  interest
-}
-\description{
-  Replicate simulated quantities of interest
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/replace.call.Rd b/man/replace.call.Rd
deleted file mode 100644
index e511dd6..0000000
--- a/man/replace.call.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{replace.call}
-\alias{replace.call}
-\title{Hook to Update the Zelig Call with the Appropriate Call Object}
-\usage{
-  replace.call(zobj, call1, call2)
-}
-\arguments{
-  \item{zobj}{a 'zelig' object}
-
-  \item{call1}{the original call to Zelig}
-
-  \item{call2}{the manuafactured call to the model fitting
-  function}
-}
-\value{
-  the 'zelig' object with a modified 'call' slot
-}
-\description{
-  Hook to Update the Zelig Call with the Appropriate Call
-  Object
-}
-\note{
-  This function is used internally by Zelig, and currently
-  deprecated.
-}
-
diff --git a/man/robust.gee.hook.Rd b/man/robust.gee.hook.Rd
deleted file mode 100644
index e62d57b..0000000
--- a/man/robust.gee.hook.Rd
+++ /dev/null
@@ -1,32 +0,0 @@
-\name{robust.gee.hook}
-\alias{robust.gee.hook}
-\title{Classify Fitted Object as Naive or Robust}
-\usage{
-  robust.gee.hook(obj, Zall, Call, robust, ...)
-}
-\arguments{
-  \item{obj}{a \code{zelig} object}
-
-  \item{Zall}{the call made to the \code{zelig} function}
-
-  \item{Call}{the call made to the external model}
-
-  \item{robust}{a logical specifying whether to use the
-  naive or robust covariance matrix}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a \code{zelig} object with the additional class
-  \code{gee.robust} or \code{gee.naive}
-}
-\description{
-  This hook is ran after the call to the external mode. It
-  sets the class of the object (in addition to its other
-  designations) as 'gee.naive' or 'gee.robust' depending on
-  the value of the \code{robust} parameter.
-}
-\author{
-  Skyler
-}
-
diff --git a/man/robust.glm.hook.Rd b/man/robust.glm.hook.Rd
deleted file mode 100644
index ccdeb84..0000000
--- a/man/robust.glm.hook.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{robust.glm.hook}
-\alias{robust.glm.hook}
-\title{Hook for ``glm'' Models in Zelig}
-\usage{
-  robust.glm.hook(obj, zcall, call, robust = FALSE, ...)
-}
-\arguments{
-  \item{obj}{a zelig object}
-
-  \item{zcall}{the original call to the zelig model}
-
-  \item{call}{the call that will be evaluated for the}
-
-  \item{robust}{a logical specifying whether or not to use
-  robust error estimates}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  the fitted model object
-}
-\description{
-  Adds support for robust error-estimates in the Zelig
-  ``glm'' models.
-}
-
diff --git a/man/rocplot.Rd b/man/rocplot.Rd
index 6d388e6..fdc2c3d 100644
--- a/man/rocplot.Rd
+++ b/man/rocplot.Rd
@@ -1,80 +1,69 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plots.R
 \name{rocplot}
 \alias{rocplot}
 \title{Receiver Operator Characteristic Plots}
 \usage{
-  rocplot(y1, y2, fitted1, fitted2, cutoff = seq(from=0,
-    to=1, length=100), lty1="solid", lty2="dashed",
-    lwd1=par("lwd"), lwd2=par("lwd"), col1=par("col"),
-    col2=par("col"), main="ROC Curve",
-    xlab = "Proportion of 1's Correctly Predicted",
-    ylab="Proportion of 0's Correctly Predicted",
-    plot = TRUE, ... )
+rocplot(z1, z2,
+cutoff = seq(from=0, to=1, length=100), lty1="solid",
+lty2="dashed", lwd1=par("lwd"), lwd2=par("lwd"),
+col1=par("col"), col2=par("col"),
+main="ROC Curve",
+xlab = "Proportion of 1's Correctly Predicted",
+ylab="Proportion of 0's Correctly Predicted",
+plot = TRUE, 
+...
+)
 }
 \arguments{
-  \item{y1}{response variable for the first model}
+\item{z1}{first model}
 
-  \item{y2}{response variable for the second model}
+\item{z2}{second model}
 
-  \item{fitted1}{fitted values for the first model. These
-  values may represent either the in-sample or
-  out-of-sample fitted values}
+\item{cutoff}{A vector of cut-off values between 0 and 1, at which to
+evaluate the proportion of 0s and 1s correctly predicted by the first and
+second model.  By default, this is 100 increments between 0 and 1
+inclusive}
 
-  \item{fitted2}{fitted values for the second model}
+\item{lty1}{the line type of the first model (defaults to 'line')}
 
-  \item{cutoff}{A vector of cut-off values between 0 and 1,
-  at which to evaluate the proportion of 0s and 1s
-  correctly predicted by the first and second model.  By
-  default, this is 100 increments between 0 and 1
-  inclusive}
+\item{lty2}{the line type of the second model (defaults to 'dashed')}
 
-  \item{lty1}{the line type of the first model (defaults to
-  'line')}
+\item{lwd1}{the line width of the first model (defaults to 1)}
 
-  \item{lty2}{the line type of the second model (defaults
-  to 'dashed')}
+\item{lwd2}{the line width of the second model (defaults to 1)}
 
-  \item{lwd1}{the line width of the first model (defaults
-  to 1)}
+\item{col1}{the color of the first model (defaults to 'black')}
 
-  \item{lwd2}{the line width of the second model (defaults
-  to 1)}
+\item{col2}{the color of the second model (defaults to 'black')}
 
-  \item{col1}{the color of the first model (defaults to
-  'black')}
+\item{main}{a title for the plot (defaults to "ROC Curve")}
 
-  \item{col2}{the color of the second model (defaults to
-  'black')}
+\item{xlab}{a label for the X-axis}
 
-  \item{main}{a title for the plot (defaults to "ROC
-  Curve")}
+\item{ylab}{a lavel for the Y-axis}
 
-  \item{xlab}{a label for the X-axis}
+\item{plot}{whether to generate a plot to the selected device}
 
-  \item{ylab}{a lavel for the Y-axis}
-
-  \item{plot}{whether to generate a plot to the selected
-  device}
-
-  \item{\dots}{additional parameters to be passed to the
-  plot}
+\item{\dots}{additional parameters to be passed to the plot}
 }
 \value{
-  if plot is TRUE, rocplot simply generates a plot.
-  Otherwise, a list with the following is produced:
-  \item{roc1}{a matrix containing a vector of x-coordinates
-  and y-coordinates corresponding to the number of ones and
-  zeros correctly predicted for the first model.}
-  \item{roc2}{a matrix containing a vector of x-coordinates
-  and y-coordinates corresponding to the number of ones and
-  zeros correctly predicted for the second model.}
-  \item{area1}{the area under the first ROC curve,
-  calculated using Reimann sums.} \item{area2}{the area
-  under the second ROC curve, calculated using Reimann
-  sums.}
+if plot is TRUE, rocplot simply generates a plot. Otherwise, a list
+  with the following is produced:
+  \item{roc1}{a matrix containing a vector of x-coordinates and
+    y-coordinates corresponding to the number of ones and zeros correctly
+    predicted for the first model.}
+  \item{roc2}{a matrix containing a vector of x-coordinates and
+    y-coordinates corresponding to the number of ones and zeros correctly
+    predicted for the second model.}
+  \item{area1}{the area under the first ROC curve, calculated using
+    Reimann sums.}
+  \item{area2}{the area under the second ROC curve, calculated using
+    Reimann sums.}
 }
 \description{
-  The 'rocplot' command generates a receiver operator
-  characteristic plot to compare the in-sample (default) or
-  out-of-sample fit for two logit or probit regressions.
+The 'rocplot' command generates a receiver operator characteristic plot to
+compare the in-sample (default) or out-of-sample fit for two logit or probit
+regressions.
 }
 
diff --git a/man/seatshare.Rd b/man/seatshare.Rd
new file mode 100644
index 0000000..d869fed
--- /dev/null
+++ b/man/seatshare.Rd
@@ -0,0 +1,24 @@
+\name{seatshare}
+
+\alias{seatshare}
+
+\title{Left Party Seat Share in 11 OECD Countries}
+
+\description{
+  This data set contains time-series data of the seat shares in the lower legislative house of left leaning parties over time, as well as the level of unemployment.  Data follows the style used in Hibbs (1977).}
+
+\usage{data(seatshare)}
+
+\format{A table containing N variables ("country","year","unemp","leftseat") and 384 observations split across 11 countries.}
+
+\source{OECD data and Mackie and Rose (1991), extended to further years.}
+
+\references{
+	Douglas A. Hibbs. (1977).  \emph{Political Parties and Macroeconomic Policy}. American Political Science Review 71(4):1467-1487.
+
+	Thomas T. Mackie and Richard Rose.  (1991).  \emph{The International Almanac of Electoral History}  Macmillan: London.
+}
+
+\keyword{datasets}
+
+
diff --git a/man/setfactor.Rd b/man/setfactor.Rd
new file mode 100644
index 0000000..63d6392
--- /dev/null
+++ b/man/setfactor.Rd
@@ -0,0 +1,21 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/utils.R
+\name{setfactor}
+\alias{setfactor}
+\title{Set new value of a factor variable, checking for existing levels}
+\usage{
+setfactor(fv, v)
+}
+\arguments{
+\item{fv}{factor variable}
+
+\item{v}{value}
+}
+\value{
+a factor variable with a value \code{val} and the same levels
+}
+\description{
+Set new value of a factor variable, checking for existing levels
+}
+\keyword{internal}
+
diff --git a/man/setval.Rd b/man/setval.Rd
new file mode 100644
index 0000000..7ec9555
--- /dev/null
+++ b/man/setval.Rd
@@ -0,0 +1,21 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/utils.R
+\name{setval}
+\alias{setval}
+\title{Set new value of a variable as approrpriate to data type}
+\usage{
+setval(val, newval)
+}
+\arguments{
+\item{val}{old value}
+
+\item{newval}{new value}
+}
+\value{
+a variable of the same type with a value \code{val}
+}
+\description{
+Set new value of a variable as approrpriate to data type
+}
+\keyword{internal}
+
diff --git a/man/setx.MI.Rd b/man/setx.MI.Rd
deleted file mode 100644
index e0c0b41..0000000
--- a/man/setx.MI.Rd
+++ /dev/null
@@ -1,33 +0,0 @@
-\name{setx.MI}
-\alias{setx.MI}
-\title{Set Explanatory Variables for Multiply Imputed Data-sets
-This function simply calls setx.default once for every fitted model
-within the 'zelig.MI' object}
-\usage{
-  \method{setx}{MI}(obj, ..., data=NULL)
-}
-\arguments{
-  \item{obj}{a 'zelig' object}
-
-  \item{...}{user-defined values of specific variables for
-  overwriting the default values set by the function
-  \code{fn}}
-
-  \item{data}{a new data-frame}
-}
-\value{
-  a 'setx.mi' object used for computing Quantities of
-  Interest by the 'sim' method
-}
-\description{
-  Set Explanatory Variables for Multiply Imputed Data-sets
-  This function simply calls setx.default once for every
-  fitted model within the 'zelig.MI' object
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-\seealso{
-  \link{setx}
-}
-
diff --git a/man/setx.Rd b/man/setx.Rd
index ac33610..ded5b39 100644
--- a/man/setx.Rd
+++ b/man/setx.Rd
@@ -1,65 +1,64 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/wrappers.R
 \name{setx}
 \alias{setx}
 \title{Setting Explanatory Variable Values}
 \usage{
-  setx(obj, fn = NULL, data = NULL, cond = FALSE, ...)
+setx(obj, fn = NULL, data = NULL, cond = FALSE, ...)
 }
 \arguments{
-  \item{obj}{the saved output from zelig}
+\item{obj}{the saved output from zelig}
 
-  \item{fn}{a list of functions to apply to the data frame}
+\item{fn}{a list of functions to apply to the data frame}
 
-  \item{data}{a new data frame used to set the values of
-  explanatory variables. If data = NULL (the default), the
-  data frame called in zelig is used}
+\item{data}{a new data frame used to set the values of
+explanatory variables. If data = NULL (the default), the
+data frame called in zelig is used}
 
-  \item{cond}{a logical value indicating whether
-  unconditional (default) or conditional (choose \code{cond
-  = TRUE}) prediction should be performed.  If you choose
-  \code{cond = TRUE}, \code{setx} will coerce \code{fn =
-  NULL} and ignore the additional arguments in
-  \code{\dots}.  If \code{cond = TRUE} and \code{data =
-  NULL}, \code{setx} will prompt you for a data frame.}
+\item{cond}{a logical value indicating whether unconditional
+(default) or conditional (choose \code{cond = TRUE}) prediction
+should be performed.  If you choose \code{cond = TRUE}, \code{setx}
+will coerce \code{fn = NULL} and ignore the additional arguments in 
+\code{\dots}.  If \code{cond = TRUE} and \code{data = NULL},
+\code{setx} will prompt you for a data frame.}
 
-  \item{...}{user-defined values of specific variables for
-  overwriting the default values set by the function
-  \code{fn}.  For example, adding \code{var1 =
-  mean(data\$var1)} or \code{x1 = 12} explicitly sets the
-  value of \code{x1} to 12.  In addition, you may specify
-  one explanatory variable as a range of values, creating
-  one observation for every unique value in the range of
-  values}
+\item{...}{user-defined values of specific variables for overwriting the
+default values set by the function \code{fn}.  For example, adding
+\code{var1 = mean(data\$var1)} or \code{x1 = 12} explicitly sets the value
+of \code{x1} to 12.  In addition, you may specify one explanatory variable
+as a range of values, creating one observation for every unique value in
+the range of values}
 }
 \value{
-  For unconditional prediction, \code{x.out} is a model
-  matrix based on the specified values for the explanatory
-  variables.  For multiple analyses (i.e., when choosing
-  the \code{by} option in \code{\link{zelig}}, \code{setx}
-  returns the selected values calculated over the entire
-  data frame.  If you wish to calculate values over just
-  one subset of the data frame, the 5th subset for example,
-  you may use: \code{x.out <- setx(z.out[[5]])}
+For unconditional prediction, \code{x.out} is a model matrix based
+  on the specified values for the explanatory variables.  For multiple
+  analyses (i.e., when choosing the \code{by} option in \code{\link{zelig}},
+  \code{setx} returns the selected values calculated over the entire
+  data frame.  If you wish to calculate values over just one subset of
+  the data frame, the 5th subset for example, you may use:  
+  \code{x.out <- setx(z.out[[5]])}
 }
 \description{
-  The \code{setx} command uses the variables identified in
-  the \code{formula} generated by \code{zelig} and sets the
-  values of the explanatory variables to the selected
-  values.  Use \code{setx} after \code{zelig} and before
-  \code{sim} to simulate quantities of interest.
+The \code{setx} command uses the variables identified in
+the \code{formula} generated by \code{zelig} and sets the values of
+the explanatory variables to the selected values.  Use \code{setx}
+after \code{zelig} and before \code{sim} to simulate quantities of
+interest.
 }
 \examples{
+
 # Unconditional prediction:
 data(turnout)
 z.out <- zelig(vote ~ race + educate, model = "logit", data = turnout)
 x.out <- setx(z.out)
 s.out <- sim(z.out, x = x.out)
+
 }
 \author{
-  Matt Owen \email{mowen at iq.harvard.edu}, Olivia Lau and
-  Kosuke Imai
+Matt Owen \email{mowen at iq.harvard.edu}, Olivia Lau and Kosuke Imai
 }
 \seealso{
-  The full Zelig manual may be accessed online at
+The full Zelig manual may be accessed online at
   \url{http://gking.harvard.edu/zelig}
 }
 \keyword{file}
diff --git a/man/setx.default.Rd b/man/setx.default.Rd
deleted file mode 100644
index beda3a6..0000000
--- a/man/setx.default.Rd
+++ /dev/null
@@ -1,31 +0,0 @@
-\name{setx.default}
-\alias{setx.default}
-\title{Set explanatory variables}
-\usage{
-  \method{setx}{default}(obj, fn=NULL, data=NULL,
-    cond=FALSE, ...)
-}
-\arguments{
-  \item{obj}{a 'zelig' object}
-
-  \item{fn}{a list of key-value pairs specifying which
-  function apply to columns of the keys data-types}
-
-  \item{data}{a data.frame}
-
-  \item{cond}{ignored}
-
-  \item{...}{parameters specifying what to explicitly set
-  each column as. This is used to produce counterfactuals}
-}
-\value{
-  a 'setx' object
-}
-\description{
-  Set explanatory variables
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}, Kosuke Imai, and
-  Olivia Lau
-}
-
diff --git a/man/sim.MI.Rd b/man/sim.MI.Rd
deleted file mode 100644
index c55f64b..0000000
--- a/man/sim.MI.Rd
+++ /dev/null
@@ -1,38 +0,0 @@
-\name{sim.MI}
-\alias{sim.MI}
-\title{Simulate Multiply Imputed Data}
-\usage{
-  \method{sim}{MI}(obj, x=NULL, x1=NULL, y=NULL, num=1000,
-    ...)
-}
-\arguments{
-  \item{obj}{a 'zelig.MI' object containing several fits
-  for two or more subsetted data-frames}
-
-  \item{x}{a 'setx.mi' object containing explanatory
-  variables for each fitted model}
-
-  \item{x1}{a 'setx.mi' object containing explanatory
-  variables for each fitted model}
-
-  \item{y}{this feature is currently unimplemented}
-
-  \item{num}{an integer specifying the number of
-  simulations to compute}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a 'sim.MI' with simulated quantities of interest for each
-  fitted contained by 'obj'
-}
-\description{
-  Simulate Multiply Imputed Data
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-\seealso{
-  \link{sim}
-}
-
diff --git a/man/sim.Rd b/man/sim.Rd
index d10da7d..171e5a3 100644
--- a/man/sim.Rd
+++ b/man/sim.Rd
@@ -1,123 +1,108 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/wrappers.R
 \name{sim}
 \alias{sim}
-\title{Generic Method for Computing and Organizing Simulated Quantities of Interest
-Simulate quantities of interest from the estimated model
-output from \code{zelig()} given specified values of explanatory
-variables established in \code{setx()}.  For classical \emph{maximum
-likelihood} models, \code{sim()} uses asymptotic normal
-approximation to the log-likelihood.  For \emph{Bayesian models},
-Zelig simulates quantities of interest from the posterior density,
-whenever possible.  For \emph{robust Bayesian models}, simulations
-are drawn from the identified class of Bayesian posteriors.
-Alternatively, you may generate quantities of interest using
-bootstrapped parameters.}
+\title{Generic Method for Computing and Organizing Simulated Quantities of Interest}
 \usage{
-  sim(obj, x = NULL, x1 = NULL, y = NULL, num = 1000,
-    bootstrap = F, bootfn = NULL, cond.data = NULL, ...)
+sim(obj, x = NULL, x1 = NULL, y = NULL, num = 1000, bootstrap = F,
+  bootfn = NULL, cond.data = NULL, ...)
 }
 \arguments{
-  \item{obj}{the output object from zelig}
+\item{obj}{the output object from zelig}
 
-  \item{x}{values of explanatory variables used for
-  simulation, generated by setx}
+\item{x}{values of explanatory variables used for simulation,
+generated by setx}
 
-  \item{x1}{optional values of explanatory variables
-  (generated by a second call of setx) particular
-  computations of quantities of interest}
+\item{x1}{optional values of explanatory variables (generated by a
+second call of setx)
+        particular computations of quantities of interest}
 
-  \item{y}{a parameter reserved for the computation of
-  particular quantities of interest (average treatment
-  effects). Few models currently support this parameter}
+\item{y}{a parameter reserved for the computation of particular
+quantities of interest (average treatment effects). Few
+models currently support this parameter}
 
-  \item{num}{an integer specifying the number of
-  simulations to compute}
+\item{num}{an integer specifying the number of simulations to compute}
 
-  \item{bootstrap}{currently unsupported}
+\item{bootstrap}{currently unsupported}
 
-  \item{bootfn}{currently unsupported}
+\item{bootfn}{currently unsupported}
 
-  \item{cond.data}{currently unsupported}
+\item{cond.data}{currently unsupported}
 
-  \item{...}{arguments reserved future versions of Zelig}
+\item{...}{arguments reserved future versions of Zelig}
 }
 \value{
-  The output stored in \code{s.out} varies by model.  Use
-  the \code{names} command to view the output stored in
-  \code{s.out}.  Common elements include: \item{x}{the
-  \code{\link{setx}} values for the explanatory variables,
-  used to calculate the quantities of interest (expected
-  values, predicted values, etc.). } \item{x1}{the optional
-  \code{\link{setx}} object used to simulate first
-  differences, and other model-specific quantities of
-  interest, such as risk-ratios.} \item{call}{the options
-  selected for \code{\link{sim}}, used to replicate
-  quantities of interest. } \item{zelig.call}{the original
-  command and options for \code{\link{zelig}}, used to
-  replicate analyses. } \item{num}{the number of
-  simulations requested. } \item{par}{the parameters
-  (coefficients, and additional model-specific parameters).
-  You may wish to use the same set of simulated parameters
-  to calculate quantities of interest rather than
-  simulating another set.} \item{qi\$ev}{simulations of the
-  expected values given the model and \code{x}. }
-  \item{qi\$pr}{simulations of the predicted values given
-  by the fitted values. } \item{qi\$fd}{simulations of the
-  first differences (or risk difference for binary models)
-  for the given \code{x} and \code{x1}.  The difference is
-  calculated by subtracting the expected values given
-  \code{x} from the expected values given \code{x1}.  (If
-  do not specify \code{x1}, you will not get first
-  differences or risk ratios.) } \item{qi\$rr}{simulations
-  of the risk ratios for binary and multinomial models.
-  See specific models for details.}
-  \item{qi\$ate.ev}{simulations of the average expected
-  treatment effect for the treatment group, using
-  conditional prediction. Let \eqn{t_i} be a binary
-  explanatory variable defining the treatment (\eqn{t_i=1})
-  and control (\eqn{t_i=0}) groups.  Then the average
-  expected treatment effect for the treatment group is
-  \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
-  E[Y_i(t_i=0)] \mid t_i=1 \,],} where \eqn{Y_i(t_i=1)} is
-  the value of the dependent variable for observation
-  \eqn{i} in the treatment group.  Variation in the
-  simulations are due to uncertainty in simulating
-  \eqn{E[Y_i(t_i=0)]}, the counterfactual expected value of
-  \eqn{Y_i} for observations in the treatment group, under
-  the assumption that everything stays the same except that
-  the treatment indicator is switched to \eqn{t_i=0}. }
-  \item{qi\$ate.pr}{simulations of the average predicted
-  treatment effect for the treatment group, using
-  conditional prediction. Let \eqn{t_i} be a binary
-  explanatory variable defining the treatment (\eqn{t_i=1})
-  and control (\eqn{t_i=0}) groups.  Then the average
-  predicted treatment effect for the treatment group is
-  \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
-  \widehat{Y_i(t_i=0)} \mid t_i=1 \,],} where
-  \eqn{Y_i(t_i=1)} is the value of the dependent variable
-  for observation \eqn{i} in the treatment group.
-  Variation in the simulations are due to uncertainty in
-  simulating \eqn{\widehat{Y_i(t_i=0)}}, the counterfactual
-  predicted value of \eqn{Y_i} for observations in the
-  treatment group, under the assumption that everything
-  stays the same except that the treatment indicator is
-  switched to \eqn{t_i=0}.}
+The output stored in \code{s.out} varies by model.  Use the
+ \code{names} command to view the output stored in \code{s.out}.
+ Common elements include: 
+ \item{x}{the \code{\link{setx}} values for the explanatory variables,
+   used to calculate the quantities of interest (expected values,
+   predicted values, etc.). }
+ \item{x1}{the optional \code{\link{setx}} object used to simulate
+   first differences, and other model-specific quantities of
+   interest, such as risk-ratios.}
+ \item{call}{the options selected for \code{\link{sim}}, used to
+   replicate quantities of interest. } 
+ \item{zelig.call}{the original command and options for
+   \code{\link{zelig}}, used to replicate analyses. }
+ \item{num}{the number of simulations requested. }
+ \item{par}{the parameters (coefficients, and additional
+   model-specific parameters).  You may wish to use the same set of
+   simulated parameters to calculate quantities of interest rather
+   than simulating another set.}
+ \item{qi\$ev}{simulations of the expected values given the
+   model and \code{x}. }
+ \item{qi\$pr}{simulations of the predicted values given by the
+   fitted values. }
+ \item{qi\$fd}{simulations of the first differences (or risk
+   difference for binary models) for the given \code{x} and \code{x1}.
+   The difference is calculated by subtracting the expected values
+   given \code{x} from the expected values given \code{x1}.  (If do not
+   specify \code{x1}, you will not get first differences or risk
+   ratios.) }
+ \item{qi\$rr}{simulations of the risk ratios for binary and
+   multinomial models.  See specific models for details.}
+ \item{qi\$ate.ev}{simulations of the average expected
+   treatment effect for the treatment group, using conditional
+   prediction. Let \eqn{t_i} be a binary explanatory variable defining
+   the treatment (\eqn{t_i=1}) and control (\eqn{t_i=0}) groups.  Then the
+   average expected treatment effect for the treatment group is
+   \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
+     E[Y_i(t_i=0)] \mid t_i=1 \,],} 
+   where \eqn{Y_i(t_i=1)} is the value of the dependent variable for
+   observation \eqn{i} in the treatment group.  Variation in the
+   simulations are due to uncertainty in simulating \eqn{E[Y_i(t_i=0)]},
+   the counterfactual expected value of \eqn{Y_i} for observations in the
+   treatment group, under the assumption that everything stays the
+   same except that the treatment indicator is switched to \eqn{t_i=0}. }
+ \item{qi\$ate.pr}{simulations of the average predicted
+   treatment effect for the treatment group, using conditional
+   prediction. Let \eqn{t_i} be a binary explanatory variable defining
+   the treatment (\eqn{t_i=1}) and control (\eqn{t_i=0}) groups.  Then the
+   average predicted treatment effect for the treatment group is
+   \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
+     \widehat{Y_i(t_i=0)} \mid t_i=1 \,],} 
+   where \eqn{Y_i(t_i=1)} is the value of the dependent variable for
+   observation \eqn{i} in the treatment group.  Variation in the
+   simulations are due to uncertainty in simulating
+   \eqn{\widehat{Y_i(t_i=0)}}, the counterfactual predicted value of
+   \eqn{Y_i} for observations in the treatment group, under the
+   assumption that everything stays the same except that the
+   treatment indicator is switched to \eqn{t_i=0}.}
 }
 \description{
-  Generic Method for Computing and Organizing Simulated
-  Quantities of Interest Simulate quantities of interest
-  from the estimated model output from \code{zelig()} given
-  specified values of explanatory variables established in
-  \code{setx()}.  For classical \emph{maximum likelihood}
-  models, \code{sim()} uses asymptotic normal approximation
-  to the log-likelihood.  For \emph{Bayesian models}, Zelig
-  simulates quantities of interest from the posterior
-  density, whenever possible.  For \emph{robust Bayesian
-  models}, simulations are drawn from the identified class
-  of Bayesian posteriors. Alternatively, you may generate
-  quantities of interest using bootstrapped parameters.
+Simulate quantities of interest from the estimated model
+output from \code{zelig()} given specified values of explanatory
+variables established in \code{setx()}.  For classical \emph{maximum
+likelihood} models, \code{sim()} uses asymptotic normal
+approximation to the log-likelihood.  For \emph{Bayesian models},
+Zelig simulates quantities of interest from the posterior density,
+whenever possible.  For \emph{robust Bayesian models}, simulations
+are drawn from the identified class of Bayesian posteriors.
+Alternatively, you may generate quantities of interest using
+bootstrapped parameters.
 }
 \author{
-  Matt Owen \email{mowen at iq.harvard.edu}, Olivia Lau and
-  Kosuke Imai
+Matt Owen \email{mowen at iq.harvard.edu}, Olivia Lau and Kosuke Imai
 }
 
diff --git a/man/sim.default.Rd b/man/sim.default.Rd
deleted file mode 100644
index 92a7ee8..0000000
--- a/man/sim.default.Rd
+++ /dev/null
@@ -1,43 +0,0 @@
-\name{sim.default}
-\alias{sim.default}
-\title{Method for Simulating Quantities of Interest wrom 'zelig' Objects}
-\usage{
-  \method{sim}{default}(obj, x=NULL, x1=NULL, y=NULL,
-    num=1000, bootstrap = FALSE, bootfn=NULL, cond.data =
-    NULL, ...)
-}
-\arguments{
-  \item{obj}{a 'zelig' object}
-
-  \item{x}{a 'setx' object}
-
-  \item{x1}{a secondary 'setx' object used to perform
-  particular computations of quantities of interest}
-
-  \item{y}{a parameter reserved for the computation of
-  particular quantities of interest (average treatment
-  effects). Few models currently support this parameter}
-
-  \item{num}{an integer specifying the number of
-  simulations to compute}
-
-  \item{bootstrap}{ignored}
-
-  \item{bootfn}{ignored}
-
-  \item{cond.data}{ignored}
-
-  \item{...}{parameters to be passed to the boot function,
-  if one is supplied}
-}
-\value{
-  a 'sim' object storing the replicated quantities of
-  interest
-}
-\description{
-  Simulate quantities of interest
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/simacf.Rd b/man/simacf.Rd
new file mode 100644
index 0000000..bf20a69
--- /dev/null
+++ b/man/simacf.Rd
@@ -0,0 +1,13 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-arima.R
+\name{simacf}
+\alias{simacf}
+\title{Construct Autocorrelation Function from Zelig object and simulated parameters}
+\usage{
+simacf(coef, order, params, alpha = 0.5)
+}
+\description{
+Construct Autocorrelation Function from Zelig object and simulated parameters
+}
+\keyword{internal}
+
diff --git a/man/simulation.matrix.Rd b/man/simulation.matrix.Rd
deleted file mode 100644
index 8117fbb..0000000
--- a/man/simulation.matrix.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-\name{simulation.matrix}
-\alias{simulation.matrix}
-\title{Get Simulations as a Matrix}
-\usage{
-  simulation.matrix(obj, which = NULL, ...)
-}
-\arguments{
-  \item{obj}{an object, typically a ``sim'' or
-  ``pooled.sim'' object.}
-
-  \item{which}{a character-vector specifying the
-  \emph{titles} of quantities of interest to extract}
-
-  \item{...}{additional parameters}
-}
-\value{
-  a simulation matrix
-}
-\description{
-  Returns a MxN matrix where N is the number of simulations
-  and M is the number of predicted values. Additionally, a
-  ``labels'' attribute is attached that produces a
-  human-readable identifier for each column.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/simulations.parameters.Rd b/man/simulations.parameters.Rd
deleted file mode 100644
index 5a15d39..0000000
--- a/man/simulations.parameters.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-\name{simulations.parameters}
-\alias{simulations.parameters}
-\title{Return Simulations of Parameter Coefficients}
-\usage{
-  \method{simulations}{parameters}(object, ...)
-}
-\arguments{
-  \item{object}{a 'parameters' object}
-
-  \item{\dots}{ignored}
-}
-\value{
-  simulations, specified by the Zelig model, of the
-  ancillary parameters
-}
-\description{
-  Returns simulated parameters of coefficients for use in
-  statistical simulation. The values are set by the
-  model-fitting function and the developer of the qi.<model
-  name> method.
-}
-\note{
-  This function does not differ at all from coef.default
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/simulations.plot.Rd b/man/simulations.plot.Rd
index 6d7ec3e..e8179bc 100644
--- a/man/simulations.plot.Rd
+++ b/man/simulations.plot.Rd
@@ -1,46 +1,44 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plots.R
 \name{simulations.plot}
 \alias{simulations.plot}
 \title{Plot Quantities of Interest in a Zelig-fashion}
 \usage{
-  simulations.plot(y, y1=NULL, xlab="", ylab="", main="",
-    col=NULL, line.col=NULL, axisnames=TRUE)
+simulations.plot(y, y1=NULL, xlab="", ylab="", main="", col=NULL, line.col=NULL,
+axisnames=TRUE)
 }
 \arguments{
-  \item{y}{A matrix or vector of simulated results
-  generated by Zelig, to be graphed.}
+\item{y}{A matrix or vector of simulated results generated by Zelig, to be
+graphed.}
 
-  \item{y1}{For comparison of two sets of simulated results
-  at different choices of covariates, this should be an
-  object of the same type and dimension as y.  If no
-  comparison is to be made, this should be NULL.}
+\item{y1}{For comparison of two sets of simulated results at different
+choices of covariates, this should be an object of the same type and
+dimension as y.  If no comparison is to be made, this should be NULL.}
 
-  \item{xlab}{Label for the x-axis.}
+\item{xlab}{Label for the x-axis.}
 
-  \item{ylab}{Label for the y-axis.}
+\item{ylab}{Label for the y-axis.}
 
-  \item{main}{Main plot title.}
+\item{main}{Main plot title.}
 
-  \item{col}{A vector of colors.  Colors will be used in
-  turn as the graph is built for main plot objects. For
-  nominal/categorical data, this colors renders as the bar
-  color, while for numeric data it renders as the
-  background color.}
+\item{col}{A vector of colors.  Colors will be used in turn as the graph is
+built for main plot objects. For nominal/categorical data, this colors
+renders as the bar color, while for numeric data it renders as the background
+color.}
 
-  \item{line.col}{A vector of colors.  Colors will be used
-  in turn as the graph is built for line color shading of
-  plot objects.}
+\item{line.col}{A vector of colors.  Colors will be used in turn as the graph is
+built for line color shading of plot objects.}
 
-  \item{axisnames}{a character-vector, specifying the names
-  of the axes}
+\item{axisnames}{a character-vector, specifying the names of the axes}
 }
 \value{
-  nothing
+nothing
 }
 \description{
-  Various graph generation for different common types of
-  simulated results from Zelig
+Various graph generation for different common types of simulated results from
+Zelig
 }
 \author{
-  James Honaker
+James Honaker
 }
 
diff --git a/man/special_print_LIST.Rd b/man/special_print_LIST.Rd
deleted file mode 100644
index f32e7fb..0000000
--- a/man/special_print_LIST.Rd
+++ /dev/null
@@ -1,26 +0,0 @@
-\name{special_print_LIST}
-\alias{.print.qi.summarized.LIST}
-\alias{special_print_LIST}
-\title{Method for Printing Summarized QI's in a List Form}
-\usage{
-  .print.qi.summarized.LIST(x, ...)
-}
-\arguments{
-  \item{x}{a 'summarized.qi' object}
-
-  \item{...}{additional parameters to be used by the
-  'print.matrix' method}
-}
-\value{
-  x (invisibly)
-}
-\description{
-  Method for Printing Summarized QI's in a List Form
-}
-\note{
-  This function is used internall by Zelig
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/special_print_MATRIX.Rd b/man/special_print_MATRIX.Rd
deleted file mode 100644
index 2b52a97..0000000
--- a/man/special_print_MATRIX.Rd
+++ /dev/null
@@ -1,25 +0,0 @@
-\name{special_print_MATRIX}
-\alias{.print.qi.summarized.MATRIX}
-\alias{special_print_MATRIX}
-\title{Method for Printing Summarized QI's in a Matrix Form}
-\usage{
-  .print.qi.summarized.MATRIX(x, ...)
-}
-\arguments{
-  \item{x}{a 'summarized.qi' object}
-
-  \item{...}{additional parameters}
-}
-\value{
-  x (invisibly)
-}
-\description{
-  Method for Printing Summarized QI's in a Matrix Form
-}
-\note{
-  This function is used internall by Zelig
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/splitUp.Rd b/man/splitUp.Rd
deleted file mode 100644
index a770f20..0000000
--- a/man/splitUp.Rd
+++ /dev/null
@@ -1,33 +0,0 @@
-\name{splitUp}
-\alias{splitUp}
-\title{Split a List into Two Lists
-This functions takes any list, and splits into two lists - one containing
-the values of arguments with specifically specified values and those without
-specified values.}
-\usage{
-  splitUp(args)
-}
-\arguments{
-  \item{args}{a list}
-}
-\value{
-  a list containing two entries: the key-value paired
-  entires (titled wordful) and the unkeyed entried (titled
-  wordless)
-}
-\description{
-  Split a List into Two Lists This functions takes any
-  list, and splits into two lists - one containing the
-  values of arguments with specifically specified values
-  and those without specified values.
-}
-\note{
-  This function is a good candidate for deprecation
-}
-\examples{
-#list(wordful = list(x=1, y=2), wordless=list(2, "red"))
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/stat.Rd b/man/stat.Rd
new file mode 100644
index 0000000..25373c6
--- /dev/null
+++ b/man/stat.Rd
@@ -0,0 +1,24 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/utils.R
+\name{stat}
+\alias{stat}
+\title{Pass Quantities of Interest to Appropriate Summary Function}
+\usage{
+stat(qi, num)
+}
+\arguments{
+\item{qi}{quantity of interest (e.g., estimated value or predicted value)}
+
+\item{num}{number of simulations}
+}
+\value{
+a formatted qi
+}
+\description{
+Pass Quantities of Interest to Appropriate Summary Function
+}
+\author{
+Christine Choirat
+}
+\keyword{internal}
+
diff --git a/man/statlevel.Rd b/man/statlevel.Rd
new file mode 100644
index 0000000..9572789
--- /dev/null
+++ b/man/statlevel.Rd
@@ -0,0 +1,24 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/utils.R
+\name{statlevel}
+\alias{statlevel}
+\title{Describe Here}
+\usage{
+statlevel(qi, num)
+}
+\arguments{
+\item{qi}{quantity of interest in the discrete case}
+
+\item{num}{number of simulations}
+}
+\value{
+a formatted qi
+}
+\description{
+Describe Here
+}
+\author{
+Christine Choirat
+}
+\keyword{internal}
+
diff --git a/man/statmat.Rd b/man/statmat.Rd
new file mode 100644
index 0000000..705d25a
--- /dev/null
+++ b/man/statmat.Rd
@@ -0,0 +1,22 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/utils.R
+\name{statmat}
+\alias{statmat}
+\title{Describe Here}
+\usage{
+statmat(qi)
+}
+\arguments{
+\item{qi}{quantity of interest in the discrete case}
+}
+\value{
+a formatted qi
+}
+\description{
+Describe Here
+}
+\author{
+Christine Choirat
+}
+\keyword{internal}
+
diff --git a/man/store.object.Rd b/man/store.object.Rd
deleted file mode 100644
index 2803fb5..0000000
--- a/man/store.object.Rd
+++ /dev/null
@@ -1,41 +0,0 @@
-\name{store.object}
-\alias{store.object}
-\title{Store Object in Environment with a Fake Name}
-\usage{
-  store.object(obj, envir, name = NULL, prefix = ".")
-}
-\arguments{
-  \item{obj}{any object}
-
-  \item{envir}{an environment object, which will contain
-  the object with the assigned name}
-
-  \item{name}{a character-string specifying the name that
-  the object will be stored as in the specified
-  environment}
-
-  \item{prefix}{a character string specifying the prefixes
-  to append to names that already have matches in the
-  destination environment}
-}
-\value{
-  a character-string specifying the name of the object in
-  the destination environment
-}
-\description{
-  This function takes the value of an object and stores it
-  within a specified environment. This is similar to simply
-  using the \code{assign} function, but will not overwrite
-  existing values in the specified environment. It
-  accomplishes this by appending a prefix to the name of
-  the variable until the name becomes unique.
-}
-\note{
-  This method does not correct invalid names. That is,
-  there is no test to determine whether the submitted name
-  is valid.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/structuralToReduced.Rd b/man/structuralToReduced.Rd
deleted file mode 100644
index 29e866d..0000000
--- a/man/structuralToReduced.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-\name{structuralToReduced}
-\alias{structuralToReduced}
-\title{Transform the Multilevel's Structural Formulas Into Reduced Form}
-\usage{
-  structuralToReduced(f)
-}
-\arguments{
-  \item{f}{a list of formulas}
-}
-\value{
-  a formula in reduced form
-}
-\description{
-  Transform the Multilevel's Structural Formulas Into
-  Reduced Form
-}
-\author{
-  Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
-}
-
diff --git a/man/summarize.Rd b/man/summarize.Rd
deleted file mode 100644
index 231bb8c..0000000
--- a/man/summarize.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{summarize}
-\alias{summarize}
-\title{Generic methonf for summarizing simualted quantities of interest}
-\usage{
-  summarize(obj)
-}
-\arguments{
-  \item{obj}{a \code{qi} object, storing simulations of
-  quantities of interest}
-}
-\value{
-  a \code{summarized.qi} object
-}
-\description{
-  Generic methonf for summarizing simualted quantities of
-  interest
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/summarize.default.Rd b/man/summarize.default.Rd
deleted file mode 100644
index 4b9d039..0000000
--- a/man/summarize.default.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-\name{summarize.default}
-\alias{summarize.default}
-\title{Summarize Simualted Quantities of Interest}
-\usage{
-  \method{summarize}{default}(obj)
-}
-\arguments{
-  \item{obj}{a \code{qi} object, storing simulations of
-  quantities of interest}
-}
-\value{
-  a 'summarized.qi' object
-}
-\description{
-  Summarize Simualted Quantities of Interest
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/summary-Zelig-method.Rd b/man/summary-Zelig-method.Rd
new file mode 100644
index 0000000..6cbedba
--- /dev/null
+++ b/man/summary-Zelig-method.Rd
@@ -0,0 +1,18 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-zelig.R
+\docType{methods}
+\name{summary,Zelig-method}
+\alias{summary,Zelig-method}
+\title{Summary method for Zelig objects"}
+\usage{
+\S4method{summary}{Zelig}(object, ...)
+}
+\arguments{
+\item{object}{An Object of Class Zelig}
+
+\item{...}{Additional parameters to be passed to summary}
+}
+\description{
+Summary method for Zelig objects"
+}
+
diff --git a/man/summary.Arima.Rd b/man/summary.Arima.Rd
new file mode 100644
index 0000000..348c935
--- /dev/null
+++ b/man/summary.Arima.Rd
@@ -0,0 +1,20 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-timeseries.R
+\name{summary.Arima}
+\alias{summary.Arima}
+\title{Summary of an object of class Arima}
+\usage{
+\method{summary}{Arima}(object, ...)
+}
+\arguments{
+\item{object}{An object of class Arima}
+
+\item{...}{Additional parameters}
+}
+\value{
+The original object
+}
+\description{
+Summary of an object of class Arima
+}
+
diff --git a/man/summary.MI.Rd b/man/summary.MI.Rd
deleted file mode 100644
index f25cb79..0000000
--- a/man/summary.MI.Rd
+++ /dev/null
@@ -1,23 +0,0 @@
-\name{summary.MI}
-\alias{summary.MI}
-\title{Summarry of Multiply Imputed Statistical Models}
-\usage{
-  \method{summary}{MI}(object, subset = NULL, ...)
-}
-\arguments{
-  \item{object}{a set of fitted statistical models}
-
-  \item{subset}{an integer vector, specifying the indices of the data.frames to be used in the subset}
-
-  \item{...}{parameters to forward}
-}
-\value{
-  a list of summaries
-}
-\description{
-  ...
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/summary.MI.sim.Rd b/man/summary.MI.sim.Rd
deleted file mode 100644
index 61a8ae1..0000000
--- a/man/summary.MI.sim.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-\name{summary.MI.sim}
-\alias{summary.MI.sim}
-\title{Method for summarizing simulations of multiply imputed quantities of interest}
-\usage{
-  \method{summary}{MI.sim}(object, ...)
-}
-\arguments{
-  \item{object}{a `MI.sim' object}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a `summarized.MI.sim' object
-}
-\description{
-  Method for summarizing simulations of multiply imputed
-  quantities of interest
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/summary.Relogit2.Rd b/man/summary.Relogit2.Rd
deleted file mode 100644
index ae2e22b..0000000
--- a/man/summary.Relogit2.Rd
+++ /dev/null
@@ -1,18 +0,0 @@
-\name{summary.Relogit2}
-\alias{summary.Relogit2}
-\title{Summary for ``Relogit2'' Fitted Model}
-\usage{
-  \method{summary}{Relogit2}(object, ...)
-}
-\arguments{
-  \item{object}{a ``Relogit2'' object}
-
-  \item{...}{other parameters}
-}
-\value{
-  a ``summary.relogit2'' object
-}
-\description{
-  Summarize important components of the ``relogit'' model
-}
-
diff --git a/man/summary.glm.robust.Rd b/man/summary.glm.robust.Rd
deleted file mode 100644
index c9f4c26..0000000
--- a/man/summary.glm.robust.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-\name{summary.glm.robust}
-\alias{summary.glm.robust}
-\title{Summary of Generalized Linear Model with Robust Error Estimates}
-\usage{
-  \method{summary}{glm.robust}(object, ...)
-}
-\arguments{
-  \item{object}{a ``glm.robust'' fitted model}
-
-  \item{...}{parameters to pass to the standard
-  ``summary.glm'' method}
-}
-\value{
-  a object of type ``summary.glm.robust'' and
-  ``summary.glm''
-}
-\description{
-  Returns summary of a glm model with robust error
-  estimates. This only slightly differs from how the
-  standard GLM's behave.
-}
-
diff --git a/man/summary.pooled.sim.Rd b/man/summary.pooled.sim.Rd
deleted file mode 100644
index d03d810..0000000
--- a/man/summary.pooled.sim.Rd
+++ /dev/null
@@ -1,28 +0,0 @@
-\name{summary.pooled.sim}
-\alias{summary.pooled.sim}
-\title{Return a Summary of a Set of Pooled Simulated Interests}
-\usage{
-  \method{summary}{pooled.sim}(object, ...)
-}
-\arguments{
-  \item{object}{a ``pooled.sim'' object, containing
-  information about simulated quantities of interest}
-
-  \item{...}{Ignored parameters}
-}
-\value{
-  a ``summary.pooled.sim'' object storing the replicated
-  quantities of interest
-}
-\description{
-  Returns the summary information from a set of pooled
-  simulated interests. The object returned contains the
-  slots ``labels'', a character-vector specifying the
-  labels (explanatory variable titles) of the qi's,
-  ``titles'', a character vector specifying the names of
-  the quantities of interest, and
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/summary.relogit.Rd b/man/summary.relogit.Rd
deleted file mode 100644
index 5f5a412..0000000
--- a/man/summary.relogit.Rd
+++ /dev/null
@@ -1,18 +0,0 @@
-\name{summary.Relogit}
-\alias{summary.Relogit}
-\title{Summary for ``Relogit'' Fitted Model}
-\usage{
-  \method{summary}{Relogit}(object, ...)
-}
-\arguments{
-  \item{object}{a ``Relogit'' object}
-
-  \item{...}{other parameters}
-}
-\value{
-  a ``summary.relogit'' object
-}
-\description{
-  Summarize important components of the ``relogit'' model
-}
-
diff --git a/man/summary.sim.Rd b/man/summary.sim.Rd
deleted file mode 100644
index 804a9c6..0000000
--- a/man/summary.sim.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{summary.sim}
-\alias{summary.sim}
-\title{Method for summarizing simulations of quantities of interest}
-\usage{
-  \method{summary}{sim}(object, ...)
-}
-\arguments{
-  \item{object}{a 'MI.sim' object}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  a 'summarized.MI.sim' object
-}
-\description{
-  Return a ``summary.sim'' object (typically for display)
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/summary.zelig.Rd b/man/summary.zelig.Rd
deleted file mode 100644
index e71209c..0000000
--- a/man/summary.zelig.Rd
+++ /dev/null
@@ -1,22 +0,0 @@
-\name{summary.zelig}
-\alias{summary.zelig}
-\title{Zelig Object Summaries}
-\usage{
-  \method{summary}{zelig}(object, ...)
-}
-\arguments{
-  \item{object}{a zelig object}
-
-  \item{...}{parameters forwarded to the generic summary
-  object}
-}
-\value{
-  the summary of the fitted model
-}
-\description{
-  Compute summary data for zelig objects
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/t.setx.Rd b/man/t.setx.Rd
deleted file mode 100644
index 95e87be..0000000
--- a/man/t.setx.Rd
+++ /dev/null
@@ -1,23 +0,0 @@
-\name{t.setx}
-\alias{t.setx}
-\title{Matrix Transpose of a ``setx'' Object}
-\usage{
-  \method{t}{setx}(x)
-}
-\arguments{
-  \item{x}{a `setx' object}
-}
-\value{
-  a transposed matrix
-}
-\description{
-  Returns a ``setx'' object as column vector. If multiple
-  values for each explanatory term has been set, then
-  return a NxM matrix where `N' is the number of
-  explanatory terms and `M' is the number of values set for
-  each term.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/table.levels.Rd b/man/table.levels.Rd
index 58d37f7..5da0ed6 100644
--- a/man/table.levels.Rd
+++ b/man/table.levels.Rd
@@ -1,3 +1,5 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/utils.R
 \name{table.levels}
 \alias{table.levels}
 \title{Create a table, but ensure that the correct
@@ -5,24 +7,25 @@ columns exist. In particular, this allows for
 entires with zero as a value, which is not
 the default for standard tables}
 \usage{
-  table.levels(x, levels, ...)
+table.levels(x, levels, ...)
 }
 \arguments{
-  \item{x}{a vector}
+\item{x}{a vector}
 
-  \item{levels}{a vector of levels}
+\item{levels}{a vector of levels}
 
-  \item{...}{parameters for table}
+\item{...}{parameters for table}
 }
 \value{
-  a table
+a table
 }
 \description{
-  Create a table, but ensure that the correct columns
-  exist. In particular, this allows for entires with zero
-  as a value, which is not the default for standard tables
+Create a table, but ensure that the correct
+columns exist. In particular, this allows for
+entires with zero as a value, which is not
+the default for standard tables
 }
 \author{
-  Matt Owen \email{mowen at iq.harvard.edu}
+Matt Owen \email{mowen at iq.harvard.edu}
 }
 
diff --git a/man/terms.multiple.Rd b/man/terms.multiple.Rd
deleted file mode 100644
index 15246a3..0000000
--- a/man/terms.multiple.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{terms.multiple}
-\alias{terms.multiple}
-\title{Extract Terms from a \code{multiple} Object}
-\usage{
-  \method{terms}{multiple}(x, data=NULL,...)
-}
-\arguments{
-  \item{x}{a Zelig v3.5 formula}
-
-  \item{data}{a \code{data.frame}}
-
-  \item{...}{ignored parameters}
-}
-\description{
-  Extracts terms from Zelig-3.5-style formulae. This
-  function is scheduled for removal.
-}
-\author{
-  Kosuke Imai, Olivia Lau, Gary King and Ferdinand Alimadhi
-}
-
diff --git a/man/terms.vglm.Rd b/man/terms.vglm.Rd
deleted file mode 100644
index 02ba4fa..0000000
--- a/man/terms.vglm.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{terms.vglm}
-\alias{terms.vglm}
-\title{Model Terms for 'vglm' Models}
-\usage{
-  \method{terms}{vglm}(x, ...)
-}
-\arguments{
-  \item{x}{a fitted model object from the VGAM library}
-
-  \item{...}{ignored parameters}
-}
-\value{
-  the models terms of this fitted model object
-}
-\description{
-  Model Terms for 'vglm' Models
-}
-\author{
-  Ferdinand Alimadhi, Kosuke Imai and Olivia Lau
-}
-
diff --git a/man/terms.zelig.Rd b/man/terms.zelig.Rd
deleted file mode 100644
index d6d6b3f..0000000
--- a/man/terms.zelig.Rd
+++ /dev/null
@@ -1,19 +0,0 @@
-\name{terms.zelig}
-\alias{terms.zelig}
-\title{Model Terms for a Zelig Object}
-\usage{
-  \method{terms}{zelig}(x, ...)
-}
-\arguments{
-  \item{x}{a \code{zelig} object}
-
-  \item{...}{forwarded parameters}
-}
-\value{
-  terms of the original fitted model
-}
-\description{
-  This method simply extracts the model terms for the
-  fitted model passed to the \code{zelig} function.
-}
-
diff --git a/man/termsFromFormula.Rd b/man/termsFromFormula.Rd
deleted file mode 100644
index dfc48b0..0000000
--- a/man/termsFromFormula.Rd
+++ /dev/null
@@ -1,17 +0,0 @@
-\name{termsFromFormula}
-\alias{termsFromFormula}
-\title{Extract Terms from Zelig-style Formulae}
-\usage{
-  termsFromFormula(obj)
-}
-\arguments{
-  \item{obj}{a Zelig-style formula}
-}
-\description{
-  This method is a sugary function to extract terms from
-  any type of Zelig-style formula.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/toBuildFormula.Rd b/man/toBuildFormula.Rd
deleted file mode 100644
index 3b2d1c0..0000000
--- a/man/toBuildFormula.Rd
+++ /dev/null
@@ -1,21 +0,0 @@
-\name{toBuildFormula}
-\alias{toBuildFormula}
-\title{Build Formula ???}
-\usage{
-  toBuildFormula(Xnames, sepp = "+")
-}
-\arguments{
-  \item{Xnames}{a character-vector}
-
-  \item{sepp}{a seperator (???)}
-}
-\value{
-  a character-string
-}
-\description{
-  This function builds a formula
-}
-\author{
-  ???
-}
-
diff --git a/man/tolmerFormat.Rd b/man/tolmerFormat.Rd
deleted file mode 100644
index 89d2302..0000000
--- a/man/tolmerFormat.Rd
+++ /dev/null
@@ -1,25 +0,0 @@
-\name{tolmerFormat}
-\alias{tolmerFormat}
-\title{Convert a Formula into 'lmer' Representation from Reduced Form
-Take a formula in its reducd from and return it as a 'lmer' representation
-(from the lme4 package). This is basically removing the starting 'tag' from
-each term.}
-\usage{
-  tolmerFormat(f)
-}
-\arguments{
-  \item{f}{a formula in reduced form}
-}
-\value{
-  the 'lmer' representation of 'f'
-}
-\description{
-  Convert a Formula into 'lmer' Representation from Reduced
-  Form Take a formula in its reducd from and return it as a
-  'lmer' representation (from the lme4 package). This is
-  basically removing the starting 'tag' from each term.
-}
-\author{
-  Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
-}
-
diff --git a/man/ucfirst.Rd b/man/ucfirst.Rd
deleted file mode 100644
index da86fdf..0000000
--- a/man/ucfirst.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-\name{ucfirst}
-\alias{ucfirst}
-\title{Uppercase First Letter of a String}
-\usage{
-  ucfirst(str)
-}
-\arguments{
-  \item{str}{a vector of charaqcter-strings}
-}
-\value{
-  a vector of character strings
-}
-\description{
-  This method sets the first character of a string to its
-  uppercase, sets all other characters to lowercase.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/user.prompt.Rd b/man/user.prompt.Rd
deleted file mode 100644
index 83c6c61..0000000
--- a/man/user.prompt.Rd
+++ /dev/null
@@ -1,20 +0,0 @@
-\name{user.prompt}
-\alias{user.prompt}
-\title{Prompt User}
-\usage{
-  user.prompt(msg = NULL)
-}
-\arguments{
-  \item{msg}{a character-string, specifying a message to be
-  displayed}
-}
-\value{
-  This function is used for its side effects
-}
-\description{
-  Prompts user to hit enter
-}
-\note{
-  This function is primarily used by Zelig demo scripts
-}
-
diff --git a/man/vcov-Zelig-method.Rd b/man/vcov-Zelig-method.Rd
new file mode 100644
index 0000000..387d0cd
--- /dev/null
+++ b/man/vcov-Zelig-method.Rd
@@ -0,0 +1,18 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-zelig.R
+\docType{methods}
+\name{vcov,Zelig-method}
+\alias{vcov,Zelig-method}
+\title{Variance-covariance method for Zelig objects}
+\usage{
+\S4method{vcov}{Zelig}(object, ...)
+}
+\arguments{
+\item{object}{An Object of Class Zelig}
+
+\item{...}{Additional parameters to be passed to vcov}
+}
+\description{
+Variance-covariance method for Zelig objects
+}
+
diff --git a/man/z.Rd b/man/z.Rd
deleted file mode 100644
index 6f0fa8e..0000000
--- a/man/z.Rd
+++ /dev/null
@@ -1,29 +0,0 @@
-\name{z}
-\alias{z}
-\title{Return value for a zelig2-function}
-\usage{
-  z(.function, ..., .hook = NULL)
-}
-\arguments{
-  \item{.function}{a function}
-
-  \item{...}{a set of parameters to be evaluated
-  symbolically}
-
-  \item{.hook}{a function to be applied after the external, model-fitting
-  function is called}
-}
-\value{
-  a ``z'' object which specifies how to evaluate the fitted
-  model
-}
-\description{
-  This is an API-function that bridges a model-fitting
-  function with a zelig interface.
-}
-\note{
-  This is used internally by Zelig-dependent packages to
-  instruct Zelig how to evaluate the function call to a
-  particular statistical model.
-}
-
diff --git a/man/zelig.Rd b/man/zelig.Rd
index 3a28af1..b0da87b 100644
--- a/man/zelig.Rd
+++ b/man/zelig.Rd
@@ -1,71 +1,65 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/wrappers.R
 \name{zelig}
 \alias{zelig}
 \title{Estimating a Statistical Model}
 \usage{
-  zelig(formula, model, data, ..., by = NULL, cite = T)
+zelig(formula, model, data, ..., by = NULL, cite = TRUE)
 }
 \arguments{
-  \item{formula}{a symbolic representation of the model to
-  be estimated, in the form \code{y \~\, x1 + x2}, where
-  \code{y} is the dependent variable and \code{x1} and
-  \code{x2} are the explanatory variables, and \code{y},
-  \code{x1}, and \code{x2} are contained in the same
-  dataset.  (You may include more than two explanatory
-  variables, of course.)  The \code{+} symbol means
-  ``inclusion'' not ``addition.'' You may also include
-  interaction terms and main effects in the form
-  \code{x1*x2} without computing them in prior steps;
-  \code{I(x1*x2)} to include only the interaction term and
-  exclude the main effects; and quadratic terms in the form
-  \code{I(x1^2)}}
+\item{formula}{a symbolic representation of the model to be
+estimated, in the form \code{y \~\, x1 + x2}, where \code{y} is the
+dependent variable and \code{x1} and \code{x2} are the explanatory
+variables, and \code{y}, \code{x1}, and \code{x2} are contained in the
+same dataset.  (You may include more than two explanatory variables,
+of course.)  The \code{+} symbol means ``inclusion'' not
+``addition.''  You may also include interaction terms and main
+effects in the form \code{x1*x2} without computing them in prior
+steps; \code{I(x1*x2)} to include only the interaction term and
+exclude the main effects; and quadratic terms in the form
+\code{I(x1^2)}}
 
-  \item{model}{the name of a statistical model.  Type
-  \code{help.zelig("models")} to see a list of currently
-  supported models}
+\item{model}{the name of a statistical model.
+Type \code{help.zelig("models")} to see a list of currently supported
+models}
 
-  \item{data}{the name of a data frame containing the
-  variables referenced in the formula, or a list of
-  multiply imputed data frames each having the same
-  variable names and row numbers (created by \code{mi})}
+\item{data}{the name of a data frame containing the variables
+referenced in the formula, or a list of multiply imputed data frames
+each having the same variable names and row numbers (created by
+\code{mi})}
 
-  \item{...}{additional arguments passed to \code{zelig},
-  depending on the model to be estimated}
+\item{...}{additional arguments passed to \code{zelig},
+depending on the model to be estimated}
 
-  \item{by}{a factor variable contained in \code{data}.
-  Zelig will subset the data frame based on the levels in
-  the \code{by} variable, and estimate a model for each
-  subset.  This a particularly powerful option which will
-  allow you to save a considerable amount of effort.  For
-  example, to run the same model on all fifty states, you
-  could type: \code{z.out <- zelig(y ~ x1 + x2, data =
-  mydata, model = "ls", by = "state")} You may also use
-  \code{by} to run models using MatchIt subclass}
+\item{by}{a factor variable contained in \code{data}.  Zelig will subset
+the data frame based on the levels in the \code{by} variable, and
+estimate a model for each subset.  This a particularly powerful option
+which will allow you to save a considerable amount of effort.  For
+example, to run the same model on all fifty states, you could type:
+\code{z.out <- zelig(y ~ x1 + x2, data = mydata, model = "ls", by = "state")}
+You may also use \code{by} to run models using MatchIt subclass}
 
-  \item{cite}{If is set to "TRUE" (default), the model
-  citation will be}
+\item{cite}{If is set to "TRUE" (default), the model citation will be}
 }
 \value{
-  Depending on the class of model selected, \code{zelig}
-  will return an object with elements including
-  \code{coefficients}, \code{residuals}, and \code{formula}
-  which may be summarized using \code{summary(z.out)} or
-  individually extracted using, for example,
-  \code{z.out\$coefficients}.  See the specific models
-  listed above for additional output values, or simply type
-  \code{names(z.out)}.
+Depending on the class of model selected, \code{zelig} will return
+  an object with elements including \code{coefficients}, \code{residuals},
+  and \code{formula} which may be summarized using
+  \code{summary(z.out)} or individually extracted using, for example,
+  \code{z.out\$coefficients}.  See the specific models listed above
+  for additional output values, or simply type \code{names(z.out)}.
 }
 \description{
-  The zelig command estimates a variety of statistical
-  models.  Use \code{zelig} output with \code{setx} and
-  \code{sim} to compute quantities of interest, such as
-  predicted probabilities, expected values, and first
-  differences, along with the associated measures of
-  uncertainty (standard errors and confidence intervals).
+The zelig command estimates a variety of statistical
+models.  Use \code{zelig} output with \code{setx} and \code{sim} to compute
+quantities of interest, such as predicted probabilities, expected values, and
+first differences, along with the associated measures of uncertainty
+(standard errors and confidence intervals).
 }
 \author{
-  Matt Owen \email{mowen at iq.harvard.edu}, Kosuke Imai,
-  Olivia Lau, and Gary King Maintainer: Matt Owen
-  \email{mowen at iq.harvard.edu}
+Matt Owen \email{mowen at iq.harvard.edu}, Kosuke Imai, Olivia Lau, and
+Gary King 
+Maintainer: Matt Owen \email{mowen at iq.harvard.edu}
 }
 \keyword{package}
 
diff --git a/man/zelig.call.Rd b/man/zelig.call.Rd
deleted file mode 100644
index f98f2ba..0000000
--- a/man/zelig.call.Rd
+++ /dev/null
@@ -1,27 +0,0 @@
-\name{zelig.call}
-\alias{zelig.call}
-\title{Create Function Call}
-\usage{
-  zelig.call(Call, zelig2, remove = NULL)
-}
-\arguments{
-  \item{Call}{a \code{call} object, typically specifying
-  the original function call to \code{zelig}}
-
-  \item{zelig2}{the return-value of the \code{zelig2}
-  method}
-
-  \item{remove}{a list of character vectors specifying
-  which parameters to ignore from the original call to
-  \code{zelig}}
-}
-\value{
-  a function call used to fit the statistical model
-}
-\description{
-  Create Function Call
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/zelig.skeleton.Rd b/man/zelig.skeleton.Rd
deleted file mode 100644
index 79da88a..0000000
--- a/man/zelig.skeleton.Rd
+++ /dev/null
@@ -1,62 +0,0 @@
-\name{zelig.skeleton}
-\alias{zelig.skeleton}
-\title{Creates a Skeleton for a New Zelig package}
-\usage{
-  zelig.skeleton(pkg, models = c(),
-    author = "UNKNOWN AUTHOR", path = ".", force = FALSE,
-    email = "maintainer at software-project.org",
-    depends = c(), ..., .gitignore = TRUE,
-    .Rbuildignore = TRUE)
-}
-\arguments{
-  \item{pkg}{a character-string specifying the name of the
-  Zelig package}
-
-  \item{models}{a vector of strings specifying models to be
-  included in the package}
-
-  \item{author}{a vector of strings specifying contributors
-  to the package}
-
-  \item{path}{a character-string specifying the path to the
-  package}
-
-  \item{force}{a logical specifying whether to overwrite
-  files and create necessary directories}
-
-  \item{email}{a string specifying the email address of the
-  package's maintainer}
-
-  \item{depends}{a vector of strings specifying package
-  dependencies}
-
-  \item{...}{ignored parameters}
-
-  \item{.gitignore}{a logical specifying whether to include
-  a copy of a simple \code{.gitignore} in the appropriate
-  folders (\code{inst/doc} and the package root}
-
-  \item{.Rbuildignore}{a logical specifying whether to
-  include a copy of a simple \code{.Rbuildignore} in the
-  appropriate folders (\code{inst/doc} and the package
-  root}
-}
-\value{
-  nothing
-}
-\description{
-  'zelig.skeleton' generates the necessary files used to
-  create a Zelig package. Based on (and using) R's
-  'package.skeleton' it removes some of the monotony of
-  building statistical packages. In particular,
-  'zelig.skeleton' produces templates for the
-  \code{zelig2}, \code{describe}, \code{param}, and
-  \code{qi} methods. For more information about creating
-  these files on an individual basis, please refer to the
-  tech manuals, which are available by typing:
-  \code{?zelig2}, \code{?param}, or \code{?qi}.
-}
-\author{
-  Matt Owen \email{mowen at iq.harvard.edu}
-}
-
diff --git a/man/zelig2-bayes.Rd b/man/zelig2-bayes.Rd
deleted file mode 100644
index dff7038..0000000
--- a/man/zelig2-bayes.Rd
+++ /dev/null
@@ -1,49 +0,0 @@
-\name{zelig2-bayes}
-
-\alias{zelig2factor.bayes}
-\alias{zelig2logit.bayes}
-\alias{zelig2mlogit.bayes}
-\alias{zelig2normal.bayes}
-\alias{zelig2oprobit.bayes}
-\alias{zelig2poisson.bayes}
-\alias{zelig2probit.bayes}
-
-\title{Zelig Bridge Functions to Bayesian Models}
-
-\usage{
-  zelig2factor.bayes(formula, factors = 2, burnin = 1000, mcmc = 20000, verbose = 0, ...,
-   data)
-  zelig2logit.bayes(formula, burnin = 1000, mcmc = 10000, verbose = 0, ..., data)
-  zelig2mlogit.bayes(formula, burnin = 1000, mcmc = 10000, verbose = 0, ..., data)
-  zelig2normal.bayes(formula, burnin = 1000, mcmc = 10000, verbose = 0, ..., data)
-  zelig2oprobit.bayes(formula, burnin = 1000, mcmc = 10000, verbose = 0, ..., data)
-  zelig2poisson.bayes(formula, burnin = 1000, mcmc = 10000, verbose = 0, ..., data)
-  zelig2probit.bayes(formula, burnin = 1000, mcmc = 10000, verbose = 0, ..., data)
-}
-
-\arguments{
-  \item{formula}{a formula}
-
-  \item{...}{additonal parameters}
-
-  \item{data}{a data.frame}
-
-  \item{factors}{a list of factors}
-
-  \item{burnin}{a parameter corresponding to the 'burnin'
-  paramater for the MCMCprobit function}
-
-  \item{mcmc}{a parameter corresponding to the 'mcmc'
-  paramater for the MCMCprobit function}
-
-  \item{verbose}{a parameter corresponding to the 'verbose'
-  paramater for the MCMCprobit function}
-}
-
-\value{
-  a list specifying '.function'
-}
-
-\description{
-  Interface between Zelig and the bayesian models.
-}
diff --git a/man/zelig2-core.Rd b/man/zelig2-core.Rd
deleted file mode 100644
index 4f28639..0000000
--- a/man/zelig2-core.Rd
+++ /dev/null
@@ -1,69 +0,0 @@
-\name{zelig2-core}
-
-\alias{zelig2exp}
-\alias{zelig2gamma}
-\alias{zelig2logit}
-\alias{zelig2lognorm}
-\alias{zelig2ls}
-\alias{zelig2negbinom}
-\alias{zelig2normal}
-\alias{zelig2poisson}
-\alias{zelig2probit}
-\alias{zelig2relogit}
-\alias{zelig2tobit}
-\alias{zelig2twosls}
-
-\title{
-  Zelig to Basic GLM Fitting Functions
-}
-
-\usage{
-  zelig2exp(formula, ..., robust = FALSE, cluster = NULL, data)
-  zelig2gamma(formula, ..., data)
-  zelig2logit(formula, weights = NULL, robust = F, ..., data)
-  zelig2lognorm(formula, ..., robust = FALSE, cluster = NULL, data)
-  zelig2ls(formula, ..., data, weights = NULL)
-  zelig2negbinom(formula, weights = NULL, ..., data)
-  zelig2normal(formula, weights = NULL, ..., data)
-  zelig2poisson(formula, weights = NULL, ..., data)
-  zelig2probit(formula, weights = NULL, ..., data)
-  zelig2relogit(formula, ..., tau = NULL, bias.correct = NULL, case.control = NULL, data)
-  zelig2tobit(formula, ..., below = 0, above = Inf, robust = FALSE, cluster = NULL, data)
-  zelig2twosls(formula, ..., data)
-}
-
-\arguments{
-  \item{formula}{a formula}
-
-  \item{...}{additonal parameters}
-
-  \item{weights}{a numeric vector}
-
-  \item{robust}{a boolean specifying whether to use robust
-  error estimates}
-
-  \item{cluster}{a vector describing the clustering of the
-  data}
-
-  \item{data}{a data.frame}
-
-  \item{tau}{...}
-
-  \item{bias.correct}{...}
-
-  \item{case.control}{...}
-
-  \item{below}{a numeric or infinite specifying a lower
-  boundary for censored responses}
-
-  \item{above}{a numeric or infinite specifying an upper
-  boundary for censored responses}
-}
-
-\value{
-  a list used by Zelig to call the model-fitting function
-}
-
-\description{
-  Interface between Zelig and Basic GLM Fitting Functions
-}
diff --git a/man/zelig2-gee.Rd b/man/zelig2-gee.Rd
deleted file mode 100644
index 0106b60..0000000
--- a/man/zelig2-gee.Rd
+++ /dev/null
@@ -1,48 +0,0 @@
-\name{zelig2-gee}
-
-\alias{zelig2gamma.gee}
-\alias{zelig2logit.gee}
-\alias{zelig2normal.gee}
-\alias{zelig2poisson.gee}
-\alias{zelig2probit.gee}
-
-\title{Bridge between Zelig and the GEE Model Fitting Functions}
-
-\usage{
-  zelig2gamma.gee(formula, id, robust, ..., R, corstr = "independence", data)
-  zelig2logit.gee(formula, id, robust, ..., R, corstr = "independence", data)
-  zelig2normal.gee(formula, id, robust, ..., R, corstr = "independence", data)
-  zelig2poisson.gee(formula, id, robust, ..., R, corstr = "independence", data)
-  zelig2probit.gee(formula, id, robust, ..., R, corstr = "independence", data)
-}
-
-\arguments{
-  \item{formula}{a formula}
-
-  \item{id}{a character-string specifying the column of the
-  data-set to use for clustering}
-
-  \item{robust}{a logical specifying whether to robustly or
-  naively compute the covariance matrix. This parameter is
-  ignore in the \code{zelig2} method, and instead used in
-  the \code{robust.hook} function, which executes after the
-  call to the \code{gee} function}
-
-  \item{...}{ignored parameters}
-
-  \item{R}{a square-matrix specifying the correlation}
-
-  \item{corstr}{a character-string specifying the
-  correlation structure}
-
-  \item{data}{a data.frame}
-}
-
-\value{
-  a list specifying the call to the external model
-}
-
-\description{
-  Bridge between Zelig and the GEE Model Fitting Functions
-}
-
diff --git a/man/zelig2-survey.Rd b/man/zelig2-survey.Rd
deleted file mode 100644
index 7abe49a..0000000
--- a/man/zelig2-survey.Rd
+++ /dev/null
@@ -1,148 +0,0 @@
-\name{zelig2-survey}
-
-\alias{zelig2gamma.survey}
-\alias{zelig2logit.survey}
-\alias{zelig2normal.survey}
-\alias{zelig2poisson.survey}
-\alias{zelig2probit.survey}
-
-\title{Interface between \code{Zelig} and \code{svyglm}}
-
-\usage{
-  zelig2gamma.survey(formula, weights = NULL, ids = NULL,
-    probs = NULL, strata = NULL, fpc = NULL, nest = FALSE,
-    check.strata = !nest, repweights = NULL, type,
-    combined.weights = FALSE, rho = NULL,
-    bootstrap.average = NULL, scale = NULL, rscales = NULL,
-    fpctype = "fraction", return.replicates = FALSE,
-    na.action = "na.omit", start = NULL, etastart = NULL,
-    mustart = NULL, offset = NULL, model1 = TRUE,
-    method = "glm.fit", x = FALSE, y = TRUE,
-    contrasts = NULL, design = NULL, link = "inverse",
-    data, ...)
-
-  zelig2logit.survey(formula, weights = NULL, ids = NULL,
-    probs = NULL, strata = NULL, fpc = NULL, nest = FALSE,
-    check.strata = !nest, repweights = NULL, type,
-    combined.weights = FALSE, rho = NULL,
-    bootstrap.average = NULL, scale = NULL, rscales = NULL,
-    fpctype = "fraction", return.replicates = FALSE,
-    na.action = "na.omit", start = NULL, etastart = NULL,
-    mustart = NULL, offset = NULL, model1 = TRUE,
-    method = "glm.fit", x = FALSE, y = TRUE,
-    contrasts = NULL, design = NULL, data)
-
-  zelig2normal.survey(formula, weights = NULL, ids = NULL,
-    probs = NULL, strata = NULL, fpc = NULL, nest = FALSE,
-    check.strata = !nest, repweights = NULL, type,
-    combined.weights = FALSE, rho = NULL,
-    bootstrap.average = NULL, scale = NULL, rscales = NULL,
-    fpctype = "fraction", return.replicates = FALSE,
-    na.action = "na.omit", start = NULL, etastart = NULL,
-    mustart = NULL, offset = NULL, model1 = TRUE,
-    method = "glm.fit", x = FALSE, y = TRUE,
-    contrasts = NULL, design = NULL, data)
-
-  zelig2poisson.survey(formula, weights = NULL, ids = NULL,
-    probs = NULL, strata = NULL, fpc = NULL, nest = FALSE,
-    check.strata = !nest, repweights = NULL, type,
-    combined.weights = FALSE, rho = NULL,
-    bootstrap.average = NULL, scale = NULL, rscales = NULL,
-    fpctype = "fraction", return.replicates = FALSE,
-    na.action = "na.omit", start = NULL, etastart = NULL,
-    mustart = NULL, offset = NULL, model1 = TRUE,
-    method = "glm.fit", x = FALSE, y = TRUE,
-    contrasts = NULL, design = NULL, data)
-
-  zelig2probit.survey(formula, weights = NULL, ids = NULL,
-    probs = NULL, strata = NULL, fpc = NULL, nest = FALSE,
-    check.strata = !nest, repweights = NULL, type,
-    combined.weights = FALSE, rho = NULL,
-    bootstrap.average = NULL, scale = NULL, rscales = NULL,
-    fpctype = "fraction", return.replicates = FALSE,
-    na.action = "na.omit", start = NULL, etastart = NULL,
-    mustart = NULL, offset = NULL, model1 = TRUE,
-    method = "glm.fit", x = FALSE, y = TRUE,
-    contrasts = NULL, design = NULL, data)
-}
-
-\arguments{
-  \item{formula}{a \code{formula}}
-
-  \item{weights}{...}
-
-  \item{ids}{...}
-
-  \item{probs}{...}
-
-  \item{strata}{...}
-
-  \item{fpc}{...}
-
-  \item{nest}{...}
-
-  \item{check.strata}{...}
-
-  \item{repweights}{...}
-
-  \item{type}{...}
-
-  \item{combined.weights}{...}
-
-  \item{rho}{...}
-
-  \item{bootstrap.average}{...}
-
-  \item{scale}{...}
-
-  \item{rscales}{...}
-
-  \item{fpctype}{...}
-
-  \item{return.replicates}{...}
-
-  \item{na.action}{...}
-
-  \item{start}{...}
-
-  \item{etastart}{...}
-
-  \item{mustart}{...}
-
-  \item{offset}{...}
-
-  \item{model1}{...}
-
-  \item{method}{...}
-
-  \item{x}{...}
-
-  \item{y}{...}
-
-  \item{contrasts}{...}
-
-  \item{design}{...}
-
-  \item{link}{an object specifying a link function between the predictor and response variables}
-
-  \item{data}{a \code{data.frame}}
-
-  \item{\dots}{
-    Additional parameters passed to teh ``gamma.survey'' model fitting function
-  }
-}
-\value{
-  a \code{list} used to construct parameters for the
-  \code{svyglm} function
-}
-\description{
-  Interface between \code{zelig} and \code{svyglm} for the
-  \code{logit.survey}
-}
-\note{
-  This manual file is largely incomplete, and needs a
-  significant amount of filling out. This, in itself, might
-  be motivation to divide this package into more models
-  with more specific function.
-}
-
diff --git a/man/zelig2.Rd b/man/zelig2.Rd
deleted file mode 100644
index 7929bca..0000000
--- a/man/zelig2.Rd
+++ /dev/null
@@ -1,58 +0,0 @@
-\name{zelig2}
-\alias{zelig2}
-\title{Interface Between Zelig Models and External Functions}
-\value{
-  The main purpose of the \code{zelig2} function is to
-  return a list of key-value pairs, specifying how Zelig
-  should interface with the external method. This list has
-  the following format:
-
-  \item{\code{.function}}{specifies the name of the
-  external method to be called by \code{zelig} function.
-  Subsequent parameters, are called and evaluated as a
-  function call to the function of the named string.}
-  \item{\code{.hook}}{specifies the name of a hook function
-  as a string. The hook function is only evaluated on zelig
-  object once the external method fits the statistical
-  model} \item{...}{any parameters aside from
-  \code{.function} and \code{.hook} is as part of the
-  function call to the external model}
-}
-\description{
-  The \code{zelig2} function acts as a simple interface
-  between a user's call to the \code{zelig} function and
-  the zelig functions subsequent call to the pre-existing
-  external model. The external model varies based on which
-  model is being called.
-}
-\note{
-  Writing \code{zelig2} functions is required of Zelig
-  developers. In particular, \code{zelig2} functions act as
-  an interface between external models (models not included
-  in the Zelig package) and the \code{zelig} function which
-  must use that model.
-
-  \code{zelig2} is not an actual function. Rather,
-}
-\examples{
-zelig2some.model <- function (formula, weights, verbose, ..., data) {
-   list(
-        .function = 'some.other.method',
-        .hook = NULL,
-        formula = formula,
-        weights = 2 * weights,
-        data = data
-        )
- }
-
-## This \\code{zelig2} function equates the following function call:
-##  zelig(formula, weights = weights, verbose = TRUE, data = data, model="some.model")
-##
-## with:
-##  some.other.method(formula = formula, weights = 2 * weights, data=data)
-
-## Note that the 'verbose' parameter is ignored, since the
-## 'zelig2some.model' does not include the 'verbose' parameter in its return
-## value.
-}
-
diff --git a/man/zeligACFplot.Rd b/man/zeligACFplot.Rd
new file mode 100644
index 0000000..a4298e9
--- /dev/null
+++ b/man/zeligACFplot.Rd
@@ -0,0 +1,16 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/plots.R
+\name{zeligACFplot}
+\alias{zeligACFplot}
+\title{Plot Autocorrelation Function from Zelig QI object}
+\usage{
+zeligACFplot(z, omitzero = FALSE, barcol = "black", epsilon = 0.1,
+  col = NULL, main = "Autocorrelation Function", xlab = "Period",
+  ylab = "Correlation of Present Shock with Future Outcomes", ylim = NULL,
+  ...)
+}
+\description{
+Plot Autocorrelation Function from Zelig QI object
+}
+\keyword{internal}
+
diff --git a/man/zeligARMAbreakforecaster.Rd b/man/zeligARMAbreakforecaster.Rd
new file mode 100644
index 0000000..9600db6
--- /dev/null
+++ b/man/zeligARMAbreakforecaster.Rd
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-arima.R
+\name{zeligARMAbreakforecaster}
+\alias{zeligARMAbreakforecaster}
+\title{Construct Simulated Series with Internal Discontinuity in X}
+\usage{
+zeligARMAbreakforecaster(y.init = NULL, x, x1, simparam, order, sd, t1 = 5,
+  t2 = 10)
+}
+\description{
+Construct Simulated Series with Internal Discontinuity in X
+}
+\keyword{internal}
+
diff --git a/man/zeligARMAlongrun.Rd b/man/zeligARMAlongrun.Rd
new file mode 100644
index 0000000..35493ca
--- /dev/null
+++ b/man/zeligARMAlongrun.Rd
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-arima.R
+\name{zeligARMAlongrun}
+\alias{zeligARMAlongrun}
+\title{Calculate the Long Run Exquilibrium for Fixed X}
+\usage{
+zeligARMAlongrun(y.init = NULL, x, simparam, order, sd, tol = NULL,
+  burnin = 20)
+}
+\description{
+Calculate the Long Run Exquilibrium for Fixed X
+}
+\keyword{internal}
+
diff --git a/man/zeligARMAnextstep.Rd b/man/zeligARMAnextstep.Rd
new file mode 100644
index 0000000..69fb3fb
--- /dev/null
+++ b/man/zeligARMAnextstep.Rd
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-arima.R
+\name{zeligARMAnextstep}
+\alias{zeligARMAnextstep}
+\title{Construct Simulated Next Step in Dynamic Series}
+\usage{
+zeligARMAnextstep(yseries = NULL, xseries, wseries = NULL, beta,
+  ar = NULL, i = NULL, ma = NULL, sd)
+}
+\description{
+Construct Simulated Next Step in Dynamic Series
+}
+\keyword{internal}
+
diff --git a/man/zeligArimaWrapper.Rd b/man/zeligArimaWrapper.Rd
new file mode 100644
index 0000000..fed5a54
--- /dev/null
+++ b/man/zeligArimaWrapper.Rd
@@ -0,0 +1,14 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/model-arima.R
+\name{zeligArimaWrapper}
+\alias{zeligArimaWrapper}
+\title{Estimation wrapper function for arima models, to easily fit with Zelig architecture}
+\usage{
+zeligArimaWrapper(formula, order = c(1, 0, 0), ..., include.mean = TRUE,
+  data)
+}
+\description{
+Estimation wrapper function for arima models, to easily fit with Zelig architecture
+}
+\keyword{internal}
+
diff --git a/man/zeligBuildWeights.Rd b/man/zeligBuildWeights.Rd
deleted file mode 100644
index c45ee28..0000000
--- a/man/zeligBuildWeights.Rd
+++ /dev/null
@@ -1,47 +0,0 @@
-\name{zeligBuildWeights}
-\alias{zeligBuildWeights}
-\title{Developer Utility Function for Dealing with Observation Weighting}
-\usage{
-  zeligBuildWeights(weights=NULL, repweights=NULL, zeros="zeros", rebuild=FALSE,
-   allowweights=TRUE, allowrepweights=TRUE, data=NULL) 
-}
-\arguments{
-\item{weights}{A set of non-negative value weights.  Overrides repweights if defined.}
-\item{repweights}{A set of whole number (non-negative integer) weights.  Useful if 
-   weights are just for making copies of or deleting certain observations or for
-   frequency weights.}
-\item{zeros}{An option on how to deal with zero valued user supplied weights.
-   Default of "zero" allows zero weights, "epsilon" changes zeroes to 1e-08,
-   "remove" removes those observations from the dataset.}
-\item{rebuild}{An option to allow specified repweights to reconfigure the 
-   rows of the dataset to rebuild a corresponding dataset where every row is
-   of weight 1.  Useful if analysis model does not accept weights.}  
-\item{allowweights}{Defines if weights are allowed in model.}
-\item{allowrepweights}{Defines if repweights are allowed in model.  Overridden if
-   \code{useweights=TRUE}.}
-\item{data}{Dataset, required if weights are defined by variable name, or if
-     dataset is to be reconfigured (by \code{rebuild} or \code{zeros} options)}
-}
-\value{
-\item{weights}{A vector of weights of the structure defined by the developer and 
-      required by the analysis model.  Or NULL if certain checks are failed.}
-\item{data}{A reconfigured dataset, if modified.}
-}
-\description{
-  The \code{zeligBuildWeights} utility allows developers
-  building models or modules for Zelig, to easily define
-  what types of weights can be set by the user and passed
-  to estimation function.  In some cases it can reconfigure
-  the dataset by duplication to functionally invoke discrete
-  weighting by replication of observations, when the estimator
-  itself can not utilize weights.
-}
-\author{
-  James Honaker \email{jhonaker at iq.harvard.edu}
-}
-\seealso{
-  The full Zelig developer manual may be accessed online at
-  \url{http://gking.harvard.edu/zelig}
-}
-\keyword{weights}
-
diff --git a/man/zeligPlyrMutate.Rd b/man/zeligPlyrMutate.Rd
new file mode 100644
index 0000000..d26d08a
--- /dev/null
+++ b/man/zeligPlyrMutate.Rd
@@ -0,0 +1,13 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/utils.R
+\name{zeligPlyrMutate}
+\alias{zeligPlyrMutate}
+\title{Zelig Copy of plyr::mutate to avoid namespace conflict with dplyr}
+\usage{
+zeligPlyrMutate(.data, ...)
+}
+\description{
+Zelig Copy of plyr::mutate to avoid namespace conflict with dplyr
+}
+\keyword{internal}
+
diff --git a/po/R-en.po b/po/R-en.po
deleted file mode 100644
index 95d07b9..0000000
--- a/po/R-en.po
+++ /dev/null
@@ -1,524 +0,0 @@
-msgid "describe canned"
-msgstr
-"describe.<<model name>> <- function () {\n"
-"package <- list(name=\"stats\",\n"
-"version=\".9\"\n"
-")\n"
-"\n"
-"# edit the below line to add a description\n"
-"# to this zelig model\n"
-"description <- \"a zelig model\"\n"
-"\n"
-"# edit the below with information about\n"
-"# the model that this zelig module is based on\n"
-"# the citation year may differ than the auto-generated one\n"
-"list(category = \"\",\n"
-"authors = \"<<author>>\",\n"
-"year = <<year>>,\n"
-"description = description,\n"
-"package = package,\n"
-"parameters=list(list())\n"
-")\n"
-"}"
-
-
-msgid "describe how-to"
-msgstr
-"# HOW-TO WRITE A DESCRIBE FUNCTION\n"
-"# ================================\n"
-"# 1. Fill in the \"description\" variable with a *short*\n"
-"#    description of the model. e.g. \"multinomial probit model\"\n"
-"# 2. Fill in the return-value for \"category\"\n"
-"# 3. Fill in the return-value for \"authors\" with either:\n"
-"#    i.  <<author name>>, or\n"
-"#    ii. c(<<author 1>>, <author 2>>, <<author 3>>, ...)\n"
-"# 4. Fill in the return-value for \"year\"\n"
-"# 5. <optional> Fill in the package variable"
-
-
-msgid "describe credit"
-msgstr
-"# @author: <<author>>\n"
-"# @date:   <<date>>\n"
-"# .<<model>>.R\n"
-"# auto-generated by zkeleton, written by Matt Owen\n"
-"# info: describe.<<model>> generates citation information\n"
-"#       for the zelig model <<model>>"
-
-
-msgid "zelig2 canned"
-msgstr
-"zelig2<<model name>> <- function (formula, model, data, M, ...) {\n"
-"# this file acts as an interface to the original model\n"
-"# the return (\"mf\") is a function call that zelig will later invoke\n"
-"# in order to process the specified data set\n"
-"#\n"
-"# any parameters unnecessary to the model\n"
-"# should be set to NULL (e.g. mf$M, mf$robust, etc...)\n"
-"\n"
-"mf <- match.call(expand.dots=T)\n"
-"mf$M <- mf$robust <- NULL\n"
-"mf[[1]] <- <<model function>>\n"
-"mf$model <- NULL\n"
-"mf$data <- data\n"
-"mf$formula <- formula\n"
-"as.call(mf)\n"
-"}"
-
-
-msgid "zelig2 how-to"
-msgstr
-"# 0. the zelig2 function acts as an interface between\n"
-"#    the existing model and the zelig module that is\n"
-"#    being created.\n"
-"# 1. construct a call object containing all the parameters\n"
-"#    passed into \"zelig2<<model name>>\"\n"
-"# 2. remove all parameters that will not make sense within\n"
-"#    the original model (in the demo code, e.g. robust, M, etc.)\n"
-"# 3. re-assign the call object's first entry to the name of the\n"
-"#    model that must be called.\n"
-"#    This step is crucial, as it is how your model invokes the\n"
-"#    pre-existing model's name\n"
-"# 4. attach the data frame (mf$data <- data)\n"
-"# 5. return the call (as a call)\n"
-"#\n"
-"# NOTE: the returned value is going to be evaluated, and -\n"
-"#       as a result - call the pre-existing model.  Any\n"
-"#       parameters passed to the original zelig function\n"
-"#       will be forwarded to the model unless set to NULL\n"
-"#\n"
-"#\n"
-"# NOTE: THIS FUNCTION IS INVOKED BY THE ZELIG FUNCTION\n"
-"#       (NOT THE USER)\n"
-"#\n"
-"# call order: zelig -> zelig2<<model name>>\""
-
-
-msgid "zelig2 credit"
-msgstr
-"# @author: <<author>>\n"
-"# @date:   <<date>>\n"
-"# zelig2<<model>>.R\n"
-"# auto-generated by zkeleton, written by Matt Owen\n"
-"# info: re-interprets parameters passed into zelig as\n"
-"#       legal parameters to pass into <<model function>>"
-
-
-msgid "zelig3 canned"
-msgstr
-"zelig3<<model name>> <- function (res, ...) {\n"
-"class(res) <- c(\"<<model name>>\", class(res))\n"
-"\n"
-"# give it a terms object\n"
-"if (is.null(res$terms))\n"
-"res$terms <- terms(res$call$formula)\n"
-"\n"
-"# return\n"
-"res\n"
-"}"
-
-
-msgid "zelig3 how-to"
-msgstr
-"# HOW-TO WRITE A ZELIG3 FUNCTION\n"
-"# ==============================\n"
-"# 0. zelig3 functions act as in-betweens between\n"
-"#    the zelig2 function and the zelig and sim function.\n"
-"#    That is, after the model runs its initial computations,\n"
-"#    it is often important to cast the result as an object of\n"
-"#    the class which it is named after.  This is crucial to\n"
-"#    ensure that setx invokes the correct method\n"
-"# 1. set the class of the res passed into with the line:\n"
-"#      class(res) <- class(\"<<model name>>\", class(res))\n"
-"# 2. return the obj (with class newly extended)\n"
-"#\n"
-"# NOTE: if the class \"<<model name>>\" is not added, the function\n"
-"#       setx.default will be used, which may have unexpected\n"
-"#       results"
-
-
-msgid "zelig3 credit"
-msgstr
-"# @author: <<author>>\n"
-"# @date:   <<date>>\n"
-"# zelig3<<model>>.R\n"
-"# auto-generated by zkeleton, written by Matt Owen\n"
-"# info: not always necessary, but is useful to cast data\n"
-"#       the result of a zelig2* call into whatever data-type\n"
-"#       we want our setx function to work with"
-
-
-msgid "setx canned"
-msgstr
-"setx.<<model name>> <- function(obj, data=NULL, ...) {\n"
-"# send to default\n"
-"res <- setx.default(obj, ...)\n"
-"\n"
-"# cast as appropriate data-type, then return\n"
-"class(res) <- \"setx.<<model name>>\"\n"
-"\n"
-"# attach data frame here, if the model\n"
-"# requires sophisticated number-crunching\n"
-"# after setx is called\n"
-"# if not, remove the below line\n"
-"if (!is.null(data)) {\n"
-"res$data <- data\n"
-"}\n"
-"\n"
-"res\n"
-"}"
-
-
-msgid "setx how-to"
-msgstr
-"# HOW-TO WRITE A SETX FUNCTION\n"
-"# ============================\n"
-"# 0. For most purposes setx.default will compute\n"
-"#    correctly values of interest, which are needed\n"
-"#    to compute the quantities of interest.  However,\n"
-"#    some models will not provide data in the correct\n"
-"#    fashion, etc. (e.g. computing a covariance matrix\n"
-"#    of the explanatory variables may not make sense\n"
-"#    or be relevant for certain models)\n"
-"# 1. parameters are passed in as a zelig model and potentially\n"
-"#    a new data-set.  The new data-set is used in place of the\n"
-"#    original one that was passed into zelig.  This\n"
-"# 2. the result of the setx function should be of class\n"
-"#    \"setx.<<model name>>\"\n"
-"#    this is important, because it ensures that the correct\n"
-"#    qi function and sim function are called\n"
-"# 3. <optional> the data frame used to compute this setx may\n"
-"#    be attached with the line\n"
-"#      res$data <- data\n"
-"#    if the sim function needs to make further computations\n"
-"#    if this is not the case, please omit that line"
-
-
-msgid "setx credit"
-msgstr
-"# @author: <<author>>\n"
-"# @date:   <<date>>\n"
-"# setx.<<model name>>\n"
-"# auto-generated by zkeleton, written by Matt Owen\n"
-"# info: produces data based on the explanatory variables\n"
-"#       in the model (set by user, not developer)"
-
-
-msgid "sim canned"
-msgstr
-"sim.<<model name>> <- function(obj,\n"
-"x=NULL,\n"
-"x1=NULL,\n"
-"num=c(1000, 100),\n"
-"prev = NULL,\n"
-"cond.data = NULL, ...\n"
-") {\n"
-"# error-catching\n"
-"if (is.null(x))\n"
-"stop(\"Error: x cannot be NULL\")\n"
-"\n"
-"# simulate qi's for x\n"
-"# invoke qi.<model name>\n"
-"res <- qi.<<model name>>(obj, x=x, x1=x1)\n"
-"\n"
-"# change call name\n"
-"obj$call[[1]] <- as.name(\"sim\")\n"
-"\n"
-"\n"
-"# append\n"
-"res$call <- match.call(expand.dots=T)\n"
-"res$zelig.call <- obj$call\n"
-"res$par <- NA\n"
-"res$obj <- obj\n"
-"\n"
-"# change class so correct summary/print function\n"
-"# can be called\n"
-"class(res) <- \"sim.<<model name>>\"\n"
-"\n"
-"res\n"
-"}"
-
-
-msgid "sim how-to"
-msgstr
-"# HOW-TO WRITE A SIM FUNCTION\n"
-"# ===========================\n"
-"# 0. The sim function invokes the qi function.\n"
-"#    Then, returns the quantities of interests\n"
-"#    alongside a host of other relevant data, that\n"
-"#    is presented along with summary and print\n"
-"# 1. importantly, sim should always have the line:\n"
-"#      res <- qi(obj, x=x, x1=x1)\n"
-"#    this enesure that the qi's are computed with the\n"
-"#    exact parameters that enter the sim function itself\n"
-"# 2. the call to sim and the call should be returned along\n"
-"#    with the quantities of interest (qi.stat) and their\n"
-"#    titles (qi.name)\n"
-"# 3. the returned object should have class type:\n"
-"#     \"sim.<<model name>>:"
-
-
-msgid "sim credit"
-msgstr
-"# @author: <<author>>\n"
-"# @date:   <<date>>\n"
-"# sim.<<model>>.R\n"
-"# auto-generated by zkeleton, written by Matt Owen\n"
-"# info: simulates quantities of interest, then arranges\n"
-"#       the data in an easily interprettable manner.\n"
-"#       invokes qi.<<model>>.R"
-
-
-msgid "sim.setx canned"
-msgstr
-"sim.setx.<<model name>> <- function (obj, x, ...) {\n"
-"# this function exists so that if\n"
-"# sim(obj) and sim(obj, x) should have radically\n"
-"# different behavior, we will be able to place them\n"
-"# in seperate files easily\n"
-"sim.<<model name>>(obj, x, ...)\n"
-"}"
-
-
-msgid "sim.setx how-to"
-msgstr
-"# HOW-TO WRITE A SIM.SETX FILE\n"
-"# ============================\n"
-"# 0. sim.setx functions offer alternative ways\n"
-"#    to simulate quantities of interest.  That is,\n"
-"#    sim functions are called in this fashion\n"
-"#      sim(zelig.out)\n"
-"#\n"
-"#    while sim.setx functs are called as:\n"
-"#      sim(zelig.out, x)\n"
-"#    or\n"
-"#      sim(zelig.out, x, x1)\n"
-"#\n"
-"#    this allows the developer to separate\n"
-"#    the different types of simulation algorithms\n"
-"#    that may exist for his or her model\n"
-"#\n"
-"#    if the model simulates quantities of interest\n"
-"#    identically to that of the standard sim function\n"
-"#    it should then only contain the line:\n"
-"#      sim.<<model name>>(obj, x, ...)\n"
-"#\n"
-"# 1. invoke qi with\n"
-"#      qi(obj, x=x, x=x1)\n"
-"#\n"
-"#    do relevant computation on the return quantities\n"
-"#    of interest, and attach relevant data that needs\n"
-"#    to be passed to print and summary functions"
-
-
-msgid "sim.setx credit"
-msgstr
-"# @author: <<author>>\n"
-"# @date:   <<date>>\n"
-"# sim.setx.<<model>>.R\n"
-"# auto-generated by zkeleton, written by Matt Owen\n"
-"# info: simulates qi's when additional explanatory\n"
-"#       information is provided.  usually simply\n"
-"#       invokes the method specified in sim.<<model>>.R"
-
-
-msgid "summary.sim canned"
-msgstr
-"summary.sim.<<model name>> <- function(obj, ...) {\n"
-"# set important summary objects\n"
-"# zelig models always have qi.stat, and qi.name\n"
-"# elements\n"
-"res <- list(model=\"mprobit\",\n"
-"qi.stat   = obj$qi.stat,\n"
-"qi.name    = obj$qi.name,\n"
-"original   = obj$obj,\n"
-"call       = obj$call,\n"
-"zelig.call = obj$zelig.call\n"
-")\n"
-"\n"
-"# cast as class\n"
-"class(res) <- \"summary.sim.<<model name>>\"\n"
-"\n"
-"res\n"
-"}"
-
-
-msgid "summary.sim how-to"
-msgstr
-"# HOW TO WRITE A SUMMARY.SIM FUNCTION\n"
-"# ===================================\n"
-"# 0. summary.sim functions exclusively return\n"
-"#    a list of important data, *summarizing*\n"
-"#    important features of the result of the sim\n"
-"#    function\n"
-"# 1. like summaries of most objects, the result should\n"
-"#    contain a reference to the call that created it,\n"
-"#    information on the class-type, etc.:\n"
-"#      list(call = obj$call,\n"
-"#           zelig.call = obj$zelig.call,\n"
-"#           ...)\n"
-"# 2. importantly, summary.sim must return a qi.stat\n"
-"#    and a qi.name data object.  the indices of these\n"
-"#    objects must have the same values for consistency\n"
-"#    that is, names(qi.stat) == names(qi.name)\n"
-"#    the return should resemble:\n"
-"#      list(\n"
-"#           qi.stat    = obj$qi.stat,\n"
-"#           qi.name    = obj$qi.name,\n"
-"#           call       = obj$call,\n"
-"#           zelig.call = obj$zelig.call\n"
-"#           ...)"
-
-
-msgid "summary.sim credit"
-msgstr
-"# @author: <<author>>\n"
-"# @date:   <<date>>\n"
-"# summary.sim.<<model>>.R\n"
-"# auto-generated by zkeleton, written by Matt Owen\n"
-"# info: returns a list of data summarizing the sim object\n"
-"#       should always include qi.stat, qi.name entry"
-
-
-msgid "print.summary.sim canned"
-msgstr
-"print.summary.sim.<<model name>> <- function (obj, digits=F, print.x=F, ...) {\n"
-"# prints typically have qi, and qi.names defined as part of the summary object\n"
-"if (is.null(obj$qi.stat) || is.null(obj$qi.name)) {\n"
-"stop(\"Error: \")\n"
-"}\n"
-"\n"
-"# warn if name lists do not match\n"
-"if (any(sort(names(obj$qi.stat)) != sort(names(obj$qi.name)))) {\n"
-"warning(\"warning: quantities of interest do not match its name list\")\n"
-"}\n"
-"\n"
-"print(obj$original)\n"
-"\n"
-"for (key in names(obj$qi.stat)) {\n"
-"# value\n"
-"val <- obj$qi.stat[[key]]\n"
-"\n"
-"# pass-by conditions\n"
-"if (is.na(val) || (is.list(val) && !length(val)) || is.null(val))\n"
-"next\n"
-"\n"
-"# print the title of the qi\n"
-"s <- gsub(\"\\\\s+$\", \"\", obj$qi.name[[key]])\n"
-"message(s)\n"
-"message(rep(\"=\", min(nchar(s), 30)))\n"
-"\n"
-"# print the qi (should be a simple data-type, such as matrix or float)\n"
-"print(val)\n"
-"\n"
-"# line-feed\n"
-"message()\n"
-"}\n"
-"\n"
-"# return invisibly\n"
-"invisible(obj)\n"
-"}"
-
-
-msgid "print.summary.sim how-to"
-msgstr
-"# HOW TO WRITE A PRINT.SUMMARY.SIM FUNCTION\n"
-"# =========================================\n"
-"# 0. print.summary functions typically display the result\n"
-"#    from a summary object (a list) in an organized fashion\n"
-"#    with various text-formatting.\n"
-"# 1. for most purpose the default print function (below) should\n"
-"#    work, however, various formatting, etc. can be added typically\n"
-"#    without any impact on the operation of the program"
-
-
-msgid "print.summary.sim credit"
-msgstr
-"# @author: <<author>>\n"
-"# @date:   <<date>>\n"
-"# .<<model>>.R\n"
-"# auto-generated by zkeleton, written by Matt Owen\n"
-"# info: print.summary.sim.<<model>>.R outputs summary\n"
-"#       information from the zelig model <<model>>"
-
-
-msgid "qi canned"
-msgstr
-"# @obj:    zelig object\n"
-"# @simpar: parameters passed to the qi\n"
-"# return:  qi list (qi.stat) and qi.names list (qi.name)\n"
-"\n"
-"# NOTE THIS FILE MUST ALWAYS BE EDITED!!!!\n"
-"# IT IS THE MOST IMPORTANT COMPONENT TO\n"
-"# ANY ZELIG MODULE\n"
-"qi.<<model name>> <- function(obj, simpar=NULL, x, x1=NULL, y=NULL) {\n"
-"# initialize values that necessarily must be\n"
-"# returned.\n"
-"qi.stat <- list()\n"
-"qi.name <- list()\n"
-"\n"
-"\n"
-"# add entries to qi.stat and qi.name\n"
-"# in the end, names(qi.stat) should == names(qi.name)\n"
-"# so that printing can be handled by the auto-generated\n"
-"# function\n"
-"\n"
-"# ...\n"
-"\n"
-"\n"
-"# qi computation must be written by the developer,\n"
-"# as it is impossible to tell automatically what is\n"
-"# the statistic of interest (or how to compute it)\n"
-"\n"
-"# ...\n"
-"\n"
-"\n"
-"# compute the quantities of interest\n"
-"# of this model\n"
-"list(qi.stat=qi.stat,\n"
-"qi.name=qi.name\n"
-")\n"
-"}"
-
-
-msgid "qi how-to"
-msgstr
-"# HOW-TO WRITE A QI FILE\n"
-"# ======================\n"
-"# qi functions are the heart of any zelig module.\n"
-"# The qi function is passed information from the setx\n"
-"# function (via x, x1), parameters (simpar), and the\n"
-"# original zelig model (obj or object)\n"
-"# The developer (you) then writes the software that he/she\n"
-"# believes produces a significant quantity of interest.\n"
-"# The result should always be returned in the fashion\n"
-"# list(qi.stat=qi.stat\n"
-"#      qi.name=qi.name\n"
-"#     )\n"
-"# where qi.stat is a list of qi.stat and qi.name have the form\n"
-"# qi.stat <- list(qi.1 = <<qi.1>>,\n"
-"#                 qi.2 = <<qi.2>>,\n"
-"#                 qi.3 = <<qi.3>>,\n"
-"#                 ...\n"
-"#                 )\n"
-"#\n"
-"# qi.name <- list(qi.1 = <<qi.1 name>>,\n"
-"#                 qi.2 = <<qi.2 name>>,\n"
-"#                 qi.3 = <<qi.3 name>>,\n"
-"#                 ...\n"
-"#                 )\n"
-"#\n"
-"# qi.1, qi.2, etc. should be named in an easy to comprehend manner\n"
-"# the indices of qi.stat and qi.name (qi.1, qi.2, etc.) should match,\n"
-"# otherwise a warning will be displayed during the print stage"
-
-
-msgid "qi credit"
-msgstr
-"# @author: <<author>>\n"
-"# @date:   <<date>>\n"
-"# qi.<<model name>>, auto-generated by zkeleton, written by Matt Owen\n"
-"# ===========================\n"
-"# info: produced quantities of interest for zelig model <<model>>"
-
-
diff --git a/tests/MatchIt.R b/tests/MatchIt.R
deleted file mode 100644
index f0de776..0000000
--- a/tests/MatchIt.R
+++ /dev/null
@@ -1,19 +0,0 @@
-library(MatchIt)
-library(Zelig)
-
-data(lalonde)
-
-m <- matchit(
-             treat ~ age + educ + black + hispan + nodegree + married + re74 + re75,
-             data = lalonde,
-             method = "subclass",
-             subclass = 4
-             )
-
-z <- zelig(re78 ~ re74 + re75 + distance, 
-           data = match.data(m, "control"), 
-           model = "ls",
-           by = "subclass"
-           )
-
-# Fin.
diff --git a/tests/amelia.R b/tests/amelia.R
deleted file mode 100644
index 416049c..0000000
--- a/tests/amelia.R
+++ /dev/null
@@ -1,45 +0,0 @@
-library(Zelig)
-library(Amelia)
-
-# Create data set
-
-beta <- c(.3, -10)
-
-.x1 <- runif(1000, -5, 5)
-.x2 <- runif(1000, -2, 2)
-.x3 <- sample(1:4, 1000, TRUE)
-.y <- t(beta %*% rbind(.x1 + rnorm(1000, 0, 1.2), .x2 + rnorm(1000, 0, .1))) + 3 + rnorm(1000, 0, .3)
-
-data.set <- data.frame(y = .y, x1 = .x1, x2 = .x2, x3 = .x3)
-
-# Add missing data
-
-missing.data.percent <- .3
-missing.data.column <- "x1"
-missing.data.rows <- sample(1:nrow(data.set), round(missing.data.percent * nrow(data.set)))
-
-data.set[missing.data.rows, missing.data.column] <- NA
-
-# Impute
-
-imputed.data <- amelia(data.set)
-
-# Remove unused data sets
-
-rm(.y, .x1, .x2)
-
-# Print amelia obj
-
-imputed.data
-
-# Fit statistical model
-
-z <- zelig(y ~ x1 + x2, model = "ls", data = imputed.data)
-x <- setx(z)
-s <- sim(z, x)
-
-#
-
-summary(s)
-
-# Fin.
diff --git a/tests/by.R b/tests/by.R
deleted file mode 100644
index 3803d54..0000000
--- a/tests/by.R
+++ /dev/null
@@ -1,9 +0,0 @@
-library(Amelia)
-library(Zelig)
-
-data(turnout)
-
-z <- zelig(vote ~ educate + income, model = "logit", by = "race", data = turnout)
-x <- setx(z, educate = 4)
-s <- sim(z, x)
-summary(s)
diff --git a/tests/lognorm.R b/tests/lognorm.R
deleted file mode 100644
index 35926f5..0000000
--- a/tests/lognorm.R
+++ /dev/null
@@ -1,26 +0,0 @@
-library(Zelig)
-# Load the sample data:  
-data(coalition)
-
-# Estimate the model:
-user.prompt()
-z.out <- zelig(Surv(duration, ciep12) ~ fract + numst2, model = "lognorm",
-               data = coalition)
-user.prompt()
-# View the regression output:  
-summary(z.out)
-
-# Set the baseline values (with the ruling coalition in the minority)
-# and the alternative values (with the ruling coalition in the majority)
-# for X:
-user.prompt()
-x.low <- setx(z.out, numst2 = 0)
-x.high <- setx(z.out, numst2 = 1)
-
-# Simulate expected values qi$ev and first differences qi$fd:
-user.prompt()
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-user.prompt()
-summary(s.out)
-user.prompt()
-plot(s.out)
diff --git a/tests/mi.R b/tests/mi.R
deleted file mode 100644
index e71577e..0000000
--- a/tests/mi.R
+++ /dev/null
@@ -1,9 +0,0 @@
-library(Zelig)
-
-data(turnout)
-
-z <- zelig(vote ~ age, model = "logit", data = mi(turnout[1:10, ], turnout[100:110, ]))
-
-x <- setx(z, age = 90)
-
-s.out1 <- sim(z, x=x, num=20)
diff --git a/tests/mix.R b/tests/mix.R
deleted file mode 100644
index d672f0c..0000000
--- a/tests/mix.R
+++ /dev/null
@@ -1,28 +0,0 @@
-library(Zelig)
-
-# mix(list('a'))
-# mix(list('a', 'b', 'c'), list(1, 2, 3, 4))
-#
-#
-#
-
-data(turnout)
-
-z1 <- zelig(vote ~ race, model = "logit", data = turnout)
-x1 <- setx(z1, race = "others")
-summary(x1)
-
-z2 <- zelig(vote ~ race, model = "logit", data = turnout)
-x2 <- setx(z1, race = c("white", "others"))
-summary(x2)
-
-z3 <- zelig(vote ~ race + educate, model = "logit", data = turnout)
-x3 <- setx(z2, race = "others", educate = 10:15)
-class(x3)
-summary(x3)
-
-z4 <- zelig(vote ~ race + educate, model = "logit", data = turnout)
-x4 <- setx(z3)
-summary(x4)
-
-# Fin.
diff --git a/tests/models-bayes.R b/tests/models-bayes.R
deleted file mode 100644
index 2169a54..0000000
--- a/tests/models-bayes.R
+++ /dev/null
@@ -1,135 +0,0 @@
-library(Zelig)
-
-data(turnout)
-data(mexico)
-data(macro)
-data(sanction)
-
-# mlogit.bayes
-# mlogit.bayes
-# mlogit.bayes
-
-z.out <- zelig(
-               vote88 ~ pristr + othcok + othsocok,
-               model = "mlogit.bayes", 
-               data = mexico
-               )
-x.out <- setx(z.out)
-
-s.out <- sim(z.out, x = x.out)
-
-summary(z.out)
-summary(s.out)
-
-# logit.bayes
-# logit.bayes
-# logit.bayes
-
-names(swiss) <- c("Fert","Agr","Exam","Educ","Cath","InfMort")
-
-z.out <- zelig(
-               vote ~ race + educate,
-               model = "logit.bayes",
-               verbose = FALSE,
-               data  = turnout
-               )
-summary(z.out)
-
-x.out <- setx(z.out, age=65)
-x1.out <- setx(z.out, age=10, educate=5)
-
-s.out <- sim(z.out, x.out, x1.out)
-
-summary(s.out)
-
-# normal.bayes
-# normal.bayes
-# normal.bayes
-
-z.out <- zelig(
-               unem ~ gdp + capmob + trade,
-               model = "normal.bayes", 
-               data = macro,
-               verbose=TRUE
-               )
-
-x.out <- setx(z.out)
-x1.out <- setx(z.out, gdp = 10)
-
-s.out <- sim(z.out, x.out, x1.out)
-
-summary(z.out)
-summary(s.out)
-
-sanction$ncost <- factor(sanction$ncost, ordered = TRUE,
-                         levels = c("net gain", "little effect", 
-                         "modest loss", "major loss"))
-
-
-z.out <- zelig(
-               ncost ~ mil + coop,
-               model = "oprobit.bayes",
-               data = sanction, verbose=FALSE
-               )
-
-x.out <- setx(z.out)
-x1.out <- setx(z.out, coop=3)
-
-s.out <- sim(z.out, x = x.out, num=10000)
-
-summary(z.out)
-summary(s.out)
-
-z.out <- zelig(
-               num ~ target + coop, 
-               model = "poisson.bayes",
-               data = sanction, 
-               verbose=TRUE
-               )
-
-x.out <- setx(z.out)
-x1.out <- setx(z.out, coop=3)
-
-s.out <- sim(z.out, x.out, x1.out)
-
-summary(z.out)
-summary(s.out)
-
-z.out <- zelig(
-               vote ~ race + educate,
-               model = "probit.bayes",
-               verbose = FALSE,
-               data  = turnout
-               )
-
-x.out <- setx(z.out, age=65)
-x1.out <- setx(z.out, age=10, educate=5)
-
-s.out <- sim(z.out, x.out, x1.out)
-
-summary(s.out)
-
-
-z.out <- zelig(cbind(Agr,Exam,Educ,Cath,InfMort)~NULL, 
-               model="factor.bayes",
-               data=swiss, factors=2,
-               lambda.constraints=list(Exam=list(1,"+"),
-                                 Exam=list(2,"-"), Educ=c(2,0),
-                                 InfMort=c(1,0)),
-               verbose=TRUE, a0=1, b0=0.15,
-               burnin=5000, mcmc=10000)
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(coef(z.out))
-heidel.diag(coef(z.out))
-raftery.diag(coef(z.out))
-
-## summarizing the output
-summary(z.out)
-
-# These methods should not work.
-#setx(z.out)
-#sim(z.out)
-
-
-
diff --git a/tests/models-core.R b/tests/models-core.R
deleted file mode 100644
index eef2d6f..0000000
--- a/tests/models-core.R
+++ /dev/null
@@ -1,144 +0,0 @@
-library(Zelig)
-
-data(coalition)
-data(macro)
-data(mid)
-data(tobin)
-data(turnout)
-data(sanction)
-
-# exp
-# exp
-# exp
-
-z.out <- zelig(Surv(duration, ciep12) ~ invest + polar + numst2 + crisis, model = "exp", data = coalition[1:100,])
-
-x.low<- setx(z.out, numst1 = 0)
-x.high <- setx(z.out, numst2 = 1)
-
-s.out <- sim(z.out, x = x.low, x1 = x.high, num = 10)
-
-plot(s.out)
-
-# gamma
-# gamma
-# gamma
-
-z <- zelig(duration ~ fract + numst2, model = "gamma", data = coalition)
-
-x.low <- setx(z, numst2 = 0)
-x.high <- setx(z, numst2 = 1)
-
-s <- sim(z, x = x.low, x1 = x.high, num = 10)
-
-plot(s)
-
-# logit
-# logit
-# logit
-
-z <- zelig(vote ~ age*educate + race, model = "logit", data = turnout)
-
-x.high <- setx(z, educate = quantile(turnout$educate, probs = 0.75))
-x.low <- setx(z, educate = quantile(turnout$educate, probs = 0.25))
-
-s <- sim(z, x = x.low, x1 = x.high, num = 10)
-
-plot(s)
-
-# ls
-# ls
-# ls
-
-z <- zelig(unem ~ gdp + capmob + trade, model = "ls", data = macro)
-
-x.high <- setx(z, trade = quantile(trade, 0.8))
-x.low <- setx(z, trade = quantile(trade, 0.2))
-
-s <- sim(z, x = x.high, x1 = x.low, num = 10)
-
-plot(s)
-
-# negbinom
-# negbinom
-# negbinom
-
-z <- zelig(num ~ target + coop, model = "negbinom", data = sanction)
-
-x <- setx(z)
-
-s <- sim(z, x = x, num = 10)
-
-plot(s)
-
-# normal
-# normal
-# normal
-
-z <- zelig(unem ~ gdp + capmob + trade, model = "normal", data = macro)
-
-x.high <- setx(z, trade = quantile(trade, 0.8))
-x.low <- setx(z, trade = quantile(trade, 0.2))
-
-s <- sim(z, x = x.high, x1 = x.low)
-
-plot(s)
-
-# poisson
-# poisson
-# poisson
-
-z <- zelig(num ~ target + coop, model = "poisson", data = sanction)
-
-x <- setx(z)
-
-s <- sim(z, x = x, num = 10)
-
-plot(s)
-
-# probit
-# probit
-# probit
-
-z <- zelig(vote ~ race + educate, model = "probit", data = turnout)
-
-x.low <- setx(z, educate = quantile(turnout$educate, probs = 0.75))
-x.high <- setx(z, educate = quantile(turnout$educate, probs = 0.25))
-
-s <- sim(z, x = x.low, x1 = x.high, num = 10)
-
-plot(s)
-
-# relogit
-# relogit
-# relogit
-
-z.out1 <- zelig(conflict ~ major + contig + power + maxdem + mindem + years,
-                data = mid, model = "relogit",
-                tau = 1042/303772)
-
-z.out2 <- zelig(
-                conflict ~ major + contig + power + maxdem + mindem + years,
-                data = mid,
-                model = "relogit",
-                tau = 1042/303772,
-                case.control = "weighting",
-                robust = TRUE
-                )
-
-x.out1 <- setx(z.out1)
-x.out2 <- setx(z.out2)
-
-s.out1 <- sim(z.out1, x = x.out1, num=10)
-s.out2 <- sim(z.out2, x = x.out2, num=10)
-
-plot(s.out1)
-plot(s.out2)
-
-# tobit
-# tobit
-# tobit
-
-z <- zelig(durable ~ age + quant, data = tobin, model = "tobit")
-x <- setx(z)
-s <- sim(z, x = x, num = 10)
diff --git a/tests/models-gee.R b/tests/models-gee.R
deleted file mode 100644
index 7065f09..0000000
--- a/tests/models-gee.R
+++ /dev/null
@@ -1,152 +0,0 @@
-library(Zelig)
-
-data(coalition)
-data(turnout)
-data(macro)
-data(sanction)
-
-
-cluster <- c(rep(c(1:62),5), rep(c(63),4))
-coalition$cluster <- cluster
-
-z.out <- zelig(duration ~ fract + numst2, 
-               id = "cluster",
-               model = "gamma.gee",
-               data = coalition,
-               corstr="exchangeable"
-               )
-
-summary(z.out)
-
-#  Setting the explanatory variables at their default values
-#  (mode for factor variables and mean for non-factor variables),
-#  with numst2 set to the vector 0 = no crisis, 1 = crisis. 
-x.low <- setx(z.out, numst2 = 0)
-x.high <- setx(z.out, numst2 = 1)
-
-# Simulate quantities of interest
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-
-summary(s.out)
-
-# Generate a plot of quantities of interest:
-plot(s.out)
-
-
-
-##  Attaching the sample turnout dataset:
-
-turnout$cluster <- rep(c(1:200),10)
-sorted.turnout <- turnout[order(turnout$cluster),]
-
-z.out1 <- zelig(
-                vote ~ race + educate, model = "logit.gee",
-                id = "cluster", 
-	        data = turnout,
-                corstr = "stat_M_dep",
-                Mv=3
-                )
-summary(z.out1)
-
-x.out1 <- setx(z.out1)
-s.out1 <- sim(z.out1, x = x.out1)
-
-summary(s.out1)
-plot(s.out1)
-
-
-x.high <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.75))
-x.low <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.25))
-s.out2 <- sim(z.out1, x = x.high, x1 = x.low)
-
-summary(s.out2)
-plot(s.out2)
-
-#####  Example 3:  Example with Fixed Correlation Structure
-
-##  User-defined correlation structure
-corr.mat <- matrix(rep(0.5,100), nrow=10, ncol=10)
-diag(corr.mat) <- 1 
-
-##  Generating empirical estimates:
-z.out2 <- zelig(vote ~ race + educate, model = "logit.gee", id = "cluster", 
-	data = sorted.turnout, robust = T, corstr = "fixed", R=corr.mat)
-
-##  Viewing the regression output:
-summary(z.out2)
-
-# NORMAL.GEE
-# NORMAL.GEE
-# NORMAL.GEE
-
-z.out <- zelig(unem ~ gdp + capmob + trade, model = "normal.gee", id = "country", data = macro, robust=TRUE, corstr="AR-M", Mv=1)
-summary(z.out)
-
-# Set explanatory variables to their default (mean/mode) values, with
-# high (80th percentile) and low (20th percentile) values:
-x.high <- setx(z.out, trade = quantile(macro$trade, 0.8))
-x.low <- setx(z.out, trade = quantile(macro$trade, 0.2))
-
-# Generate first differences for the effect of high versus low trade on
-# GDP:
-s.out <- sim(z.out, x = x.high, x1 = x.low)
-summary(s.out)
-
-# Generate a plot of quantities of interest:
-plot(s.out)
-
-#
-#
-#
-
-sanction$cluster <- c(rep(c(1:15),5),rep(c(16),3))
-
-z.out <- zelig(num ~ target + coop, model = "poisson.gee", id = "cluster", data = sanction, robust=TRUE, corstr="exchangeable")
-summary(z.out)
-
-x.out <- setx(z.out)
-s.out <- sim(z.out, x = x.out)
-
-summary(s.out)
-plot(s.out)
-
-
-
-
-
-
-
-
-
-turnout$cluster <- rep(c(1:200),10)
-
-z.out1 <- zelig(vote ~ race + educate, model = "probit.gee", id = "cluster", 
-	data = turnout, robust = T, corstr = "stat_M_dep", Mv=3)
-summary(z.out1)
-
-x.out1 <- setx(z.out1)
-s.out1 <- sim(z.out1, x = x.out1)
-
-plot(s.out1)
-
-x.high <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.75))
-x.low <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.25))
-
-s.out2 <- sim(z.out1, x = x.high, x1 = x.low)
-
-summary(s.out2)
-
-plot(s.out2)
-
-#####  Example 3:  Example with Fixed Correlation Structure
-
-##  User-defined correlation structure
-corr.mat <- matrix(rep(0.5,100), nrow=10, ncol=10)
-diag(corr.mat) <- 1 
-
-##  Generating empirical estimates:
-z.out2 <- zelig(vote ~ race + educate, model = "probit.gee", id = "cluster", 
-	data = turnout, robust = T, corstr = "fixed", R=corr.mat)
-
-##  Viewing the regression output:
-summary(z.out2)
diff --git a/tests/models-survey.R b/tests/models-survey.R
deleted file mode 100644
index 0beb280..0000000
--- a/tests/models-survey.R
+++ /dev/null
@@ -1,326 +0,0 @@
-library(Zelig)
-
-data(api, package = 'survey')
-data(scd, package = 'survey')
-
-# gamma.survey (1 of 3)
-# gamma.survey (1 of 3)
-# gamma.survey (1 of 3)
-
-# TEST 1
-z.out1 <- zelig(
-                api00 ~ meals + yr.rnd,
-                model   = 'gamma.survey',  
-                weights = ~ pw,
-                data    = apistrat
-                )
-summary(z.out1)
-
-x.low <- setx(z.out1, meals= quantile(apistrat$meals, 0.2))
-x.high <- setx(z.out1, meals= quantile(apistrat$meals, 0.8))
-
-x.low
-x.high
-
-s.out1 <- sim(z.out1, x=x.high, x1=x.low)
-
-plot(s.out1)
-
-# gamma.survey (2 of 3)
-# gamma.survey (2 of 3)
-# gamma.survey (2 of 3)
-
-z.out2 <- zelig(
-                api00 ~ meals + yr.rnd,
-                model = "gamma.survey",  
-                strata=~stype,
-                fpc=~fpc,
-                data = apistrat
-                )
-
-summary(z.out2)
-
-jk1reps <- jk1weights(psu=apistrat$dnum)
-
-# gamma.survey (2 of 3)
-# gamma.survey (2 of 3)
-# gamma.survey (2 of 3)
-
-z.out3 <- zelig(
-                api00 ~ meals + yr.rnd,
-                model = "gamma.survey", 
-		data = apistrat,
-                repweights=jk1reps$weights,
-		type="JK1"
-                )
-
-summary(z.out3)
-
-x.low <- setx(z.out3, meals= quantile(apistrat$meals, 0.2))
-x.high <- setx(z.out3, meals= quantile(apistrat$meals, 0.8))
-
-x.low
-x.high
-
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-
-
-plot(s.out3)
-
-# logit.survey (1 of 3)
-# logit.survey (1 of 3)
-# logit.survey (1 of 3)
-
-data(api, package="survey")
-
-
-# TEST 1
-z.out1 <- zelig(
-                yr.rnd ~ meals + mobility,
-                model = "logit.survey",
-                weights=~pw,
-                data = apistrat
-                )
-summary(z.out1)
-
-
-x.low <- setx(z.out1, meals= quantile(apistrat$meals, 0.2))
-x.high <- setx(z.out1, meals= quantile(apistrat$meals, 0.8))
-
-# 
-x.low
-x.high
-
-s.out1 <- sim(z.out1, x=x.low, x1=x.high)
-
-plot(s.out1)
-
-# logit.survey (2 of 3)
-# logit.survey (2 of 3)
-# logit.survey (2 of 3)
-
-z.out2 <- zelig(
-                yr.rnd ~ meals + mobility,
-                model = "logit.survey",
-                strata=~stype,
-                fpc=~fpc,
-                data = apistrat
-                )
-summary(z.out2)
-
-# logit.survey (3 of 3)
-# logit.survey (3 of 3)
-# logit.survey (3 of 3)
-
-data(scd)
-
-scd$sued <- as.vector(c(0,0,0,1,1,1))
-
-BRRrep<-2 * cbind(
-                  c(1,0,1,0,1,0),
-                  c(1,0,0,1,0,1),
-                  c(0,1,1,0,0,1),
-                  c(0,1,0,1,1,0)
-                  )
-
-
-z.out3 <- zelig(
-                formula=sued ~ arrests + alive,
-                model = "logit.survey",
-                repweights=BRRrep,
-                type="BRR",
-                data=scd
-                )
-
-summary(z.out3)
-
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8))
-
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-
-# normal.survey (1 of 3)
-# normal.survey (1 of 3)
-# normal.survey (1 of 3)
-
-z.out1 <- zelig(
-                api00 ~ meals + yr.rnd,
-                model = "normal.survey",  
-                weights=~pw,
-                data = apistrat
-                )
-
-summary(z.out1)
-
-x.low <- setx(z.out1, meals= quantile(apistrat$meals, 0.2))
-x.high <- setx(z.out1, meals= quantile(apistrat$meals, 0.8))
-
-x.low
-x.high
-
-s.out1 <- sim(z.out1, x=x.high, x1=x.low)
-
-plot(s.out1)
-
-z.out2 <- zelig(
-                api00 ~ meals + yr.rnd,
-                model = "normal.survey",  
-                strata=~stype,
-                fpc=~fpc,
-                data = apistrat
-                )
-
-summary(z.out2)
-
-# normal.survey (2 of 3)
-# normal.survey (2 of 3)
-# normal.survey (2 of 3)
-
-BRRrep<-2 * cbind(
-                  c(1,0,1,0,1,0),
-                  c(1,0,0,1,0,1),
-                  c(0,1,1,0,0,1),
-                  c(0,1,0,1,1,0)
-                  )
-
-z.out3 <- zelig(
-                formula=alive ~ arrests,
-                model = "normal.survey", 
-                repweights=BRRrep,
-                type="BRR",
-                data=scd,
-                na.action=NULL
-                )
-
-summary(z.out3)
-
-x.min <- setx(z.out3, arrests = min(scd$alive))
-x.max <- setx(z.out3, arrests = max(scd$alive))
-
-x.min
-x.max
-
-s.out3 <- sim(z.out3, x=x.max, x1=x.min)
-
-plot(s.out3)
-
-data(api, package="survey")
-
-# TEST 1
-z.out1 <- zelig(enroll ~ api99 + yr.rnd , model = "poisson.survey", data = apistrat)
-summary(z.out1)
-
-x.low <- setx(z.out1, api00= quantile(apistrat$api00, 0.2))
-x.high <- setx(z.out1, api00= quantile(apistrat$api00, 0.8))
-
-x.low
-x.high
-
-s.out1 <- sim(z.out1, x=x.low, x1=x.high)
-
-plot(s.out1)
-
-
-# TEST 2
-z.out2 <- zelig(
-                enroll ~ api99 + yr.rnd,
-                model = "poisson.survey",
-                data = apistrat, 
-                strata=~stype,
-                fpc=~fpc
-                )
-
-summary(z.out2)
-
-data(scd, package="survey")
-
-BRRrep<-2*cbind(
-                c(1,0,1,0,1,0),
-                c(1,0,0,1,0,1),
-                c(0,1,1,0,0,1),
-                c(0,1,0,1,1,0)
-                )
-
-z.out3 <- zelig(
-                alive ~ arrests,
-                model = "poisson.survey", 
-                repweights=BRRrep,
-                type="BRR",
-                data=scd
-                )
-
-summary(z.out3)
-
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8))
-
-x.low
-x.high
-
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-
-plot(s.out3)
-
-data(api, package="survey")
-
-z.out1 <- zelig(
-                yr.rnd ~ meals + mobility,
-                model = "probit.survey",
-                weights=~pw,
-                data = apistrat
-                )
-
-summary(z.out1)
-
-x.low <- setx(z.out1, meals= quantile(apistrat$meals, 0.2))
-x.high <- setx(z.out1, meals= quantile(apistrat$meals, 0.8))
-
-x.low
-x.high
-
-s.out1 <- sim(z.out1, x=x.low, x1=x.high)
-
-
-plot(s.out1)
-
-
-# TEST 2
-z.out2 <- zelig(
-                yr.rnd ~ meals + mobility,
-                model = "probit.survey",
-                strata=~stype,
-                fpc=~fpc,
-                data = apistrat
-                )
-
-summary(z.out2)
-
-
-data(scd)
-
-scd$sued <- as.vector(c(0,0,0,1,1,1))
-
-BRRrep<-2*cbind(
-                c(1,0,1,0,1,0),
-                c(1,0,0,1,0,1),
-                c(0,1,1,0,0,1),
-                c(0,1,0,1,1,0)
-                )
-
-z.out3 <- zelig(
-                formula=sued ~ arrests + alive,
-                model = "probit.survey", 
-                repweights=BRRrep,
-                type="BRR",
-                data=scd
-                )
-
-summary(z.out3)
-
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8))
-
-x.low
-x.high
-
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
diff --git a/tests/plot-ci.R b/tests/plot-ci.R
deleted file mode 100644
index f0b7133..0000000
--- a/tests/plot-ci.R
+++ /dev/null
@@ -1,30 +0,0 @@
-library(Zelig)
-
-data(turnout)
-
-par(mfrow=c(2, 2))
-
-z <- zelig(vote ~ income + educate, model="relogit", data=turnout)
-x <- setx(z, educate=2:8)
-x1 <- setx(z, educate=2:8, income = 10)
-s <- sim(z, x, x1)
-
-plot.ci(s, var="educate")
-
-z <- zelig(vote ~ income + educate, model="logit", data=turnout)
-x <- setx(z, educate=-5:5)
-s <- sim(z, x)
-
-plot.ci(s, var="educate", ylim = c(-2, 1))
-
-z <- zelig(vote ~ income + educate, model="logit", data=turnout)
-x <- setx(z, educate=-5:5)
-s <- sim(z, x)
-
-plot.ci(s, var="educate")
-
-z <- zelig(vote ~ income + educate, model="logit", data=turnout)
-x <- setx(z, educate=12)
-s <- sim(z, x)
-
-plot.ci(s)
diff --git a/tests/pooled.R b/tests/pooled.R
deleted file mode 100644
index 25053ee..0000000
--- a/tests/pooled.R
+++ /dev/null
@@ -1,11 +0,0 @@
-library(Zelig)
-
-data(turnout)
-
-z <- zelig(vote ~ race + educate + age, model = "logit", data = turnout)
-x <- setx(z, educate = 6:7, age = 17)
-s <- sim(z, x, num = 200)
-
-summary(s)
-
-plot(s)
diff --git a/tests/relogit.R b/tests/relogit.R
deleted file mode 100644
index a3abf62..0000000
--- a/tests/relogit.R
+++ /dev/null
@@ -1,20 +0,0 @@
-library(Zelig)
-
-data(mid)
-
-z <- zelig(conflict ~ major + contig + power + maxdem + mindem + years, model = "relogit", tau = 1042/303772, data = mid)
-x <- setx(z)
-s <- sim(z, x)
-
-summary(s)
-
-plot(s)
-
-## weighting + bias correction + robust s.e.
-z <- zelig(conflict ~ major + contig + power + maxdem + mindem + years,
-           data = mid, model = "relogit", tau = 1042/303772,
-           case.control = "weighting", robust = TRUE)
-x <- setx(z)
-s <- sim(z, x)
-
-summary(s)
diff --git a/tests/summary.MI.R b/tests/summary.MI.R
deleted file mode 100644
index e49b63c..0000000
--- a/tests/summary.MI.R
+++ /dev/null
@@ -1,13 +0,0 @@
-library(Zelig)
-
-data(turnout)
-
-d1 <- turnout[1:500, ]
-d2 <- turnout[501:1000, ]
-d3 <- turnout[1001:2000, ]
-
-z <- zelig(vote ~ I(educate*income) + educate, model = "logit", data = mi(d1, d2, d3))
-
-summary(z, subset = c(1, 3))
-
-# F
diff --git a/tests/testthat.R b/tests/testthat.R
new file mode 100755
index 0000000..4efd767
--- /dev/null
+++ b/tests/testthat.R
@@ -0,0 +1,5 @@
+library(testthat)
+library(Zelig)
+
+set.seed("123")
+test_check("Zelig")
\ No newline at end of file
diff --git a/tests/testthat/test-logit.R b/tests/testthat/test-logit.R
new file mode 100755
index 0000000..3e23ef3
--- /dev/null
+++ b/tests/testthat/test-logit.R
@@ -0,0 +1,3 @@
+z <- zlogit$new()
+test <- z$mcunit(minx=-2, maxx=2, plot=FALSE)
+expect_true(test)
diff --git a/tests/testthat/test-lognom.R b/tests/testthat/test-lognom.R
new file mode 100755
index 0000000..b500680
--- /dev/null
+++ b/tests/testthat/test-lognom.R
@@ -0,0 +1,3 @@
+z <- zlognorm$new()
+test <- z$mcunit(plot=FALSE)
+expect_true(test)
\ No newline at end of file
diff --git a/tests/testthat/test-ls.R b/tests/testthat/test-ls.R
new file mode 100755
index 0000000..d8d7aaf
--- /dev/null
+++ b/tests/testthat/test-ls.R
@@ -0,0 +1,3 @@
+z <- zls$new()
+test <- z$mcunit(plot=FALSE)
+expect_true(test)
\ No newline at end of file
diff --git a/tests/testthat/test-negbin.R b/tests/testthat/test-negbin.R
new file mode 100644
index 0000000..8f21ff1
--- /dev/null
+++ b/tests/testthat/test-negbin.R
@@ -0,0 +1,3 @@
+z <- znegbin$new()
+test <- z$mcunit(plot=FALSE)
+expect_true(test)
\ No newline at end of file
diff --git a/tests/testthat/test-poisson.R b/tests/testthat/test-poisson.R
new file mode 100644
index 0000000..2a3ffe2
--- /dev/null
+++ b/tests/testthat/test-poisson.R
@@ -0,0 +1,3 @@
+z <- zpoisson$new()
+test <- z$mcunit(minx=0, plot=FALSE)
+expect_true(test)
\ No newline at end of file
diff --git a/tests/testthat/test-probit.R b/tests/testthat/test-probit.R
new file mode 100644
index 0000000..75c223b
--- /dev/null
+++ b/tests/testthat/test-probit.R
@@ -0,0 +1,3 @@
+z <- zprobit$new()
+test <- z$mcunit(plot=FALSE)
+expect_true(test)
\ No newline at end of file
diff --git a/tests/twosls.R b/tests/twosls.R
deleted file mode 100644
index 0a65cee..0000000
--- a/tests/twosls.R
+++ /dev/null
@@ -1,22 +0,0 @@
-library(Zelig)
-
-data(klein)
-
-formula <- list(
-                mu1  = C ~ Wtot + P1,
-                mu2  = I ~ P + P1 + K1,
-                mu3  = Wp ~ X + X1 + Tm,
-                inst = ~ P1 + K1 + X1 + Tm + Wg + G
-                )
-
-z <- zelig(formula, model="twosls", data=klein, cite=F)
-
-x <-setx(z)
-x1 <-setx(z, Wtot = 60)
-
-s <-sim(z, x, x1)
-
-summary(s)
-
-# Plot
-plot(s)
diff --git a/vignettes/Zelig.bib b/vignettes/Zelig.bib
deleted file mode 100644
index 550d1cf..0000000
--- a/vignettes/Zelig.bib
+++ /dev/null
@@ -1,65 +0,0 @@
- at manual{ImaLauKin-gamma11,
-  author = { Kosuke Imai and Olivia Lau and Gary King},
-  title =  { gamma: Gamma Regression for Continuous,
-             Positive Dependent Variables
-           },
-  year =   2011,
-  url =    { http://gking.harvard.edu/zelig }
-}
-
- at manual{ImaLauKin-logit11,
-  author = { Kosuke Imai and Olivia Lau and Gary King},
-  title =  { logit: Logistic Regression for Dichotomous Dependent },
-  year =   2011,
-  url =    { http://gking.harvard.edu/zelig }
-}
-
- at manual{ImaLauKin-ls11,
-  author = { Kosuke Imai and Olivia Lau and Gary King},
-  title =  {
-            ls: Least Squares Regression for Continuous
-            Dependent Variables
-           },
-  year =   2011,
-  url =    { http://gking.harvard.edu/zelig }
-}
-
-
- at manual{ImaLauKin-negbinom11,
-  author = { Kosuke Imai and Olivia Lau and Gary King},
-  title =  {
-            negbinom: Negative Binomial Regression
-            for Event Count Dependent Variables
-           },
-  year =   2011,
-  url =    { http://gking.harvard.edu/zelig }
-}
-
-
- at manual{ImaLauKin-normal11,
-  author = { Kosuke Imai and Olivia Lau and Gary King},
-  title =  { normal: Normal Regression for Continuous Dependent Variables },
-  year =   2011,
-  url =    { http://gking.harvard.edu/zelig }
-}
-
-
- at manual{ImaLauKin-poisson11,
-  author = { Kosuke Imai and Olivia Lau and Gary King},
-  title =  {
-            poisson: Poisson Regression for Event Count
-            Dependent Variables
-           },
-  year =   2011,
-  url =    { http://gking.harvard.edu/zelig }
-}
-
- at manual{ImaLauKin-probit11,
-  author = { Kosuke Imai and Olivia Lau and Gary King},
-  title =  {
-            probit: Probit Regression for
-            Dichotomous Dependent Variables
-           },
-  year =   2011,
-  url =    { http://gking.harvard.edu/zelig }
-}
diff --git a/vignettes/Zelig.sty b/vignettes/Zelig.sty
deleted file mode 100644
index 044d562..0000000
--- a/vignettes/Zelig.sty
+++ /dev/null
@@ -1,33 +0,0 @@
-\usepackage{hyperref}
-\usepackage{Sweave}
-
-\DefineVerbatimEnvironment{Code}{Verbatim}{
-  samepage=TRUE,
-  fontsize=\small
-}
-
-\newcommand{\CiteZelig}[0]{
-  To cite Zelig as a whole, please reference these two sources:
-
-  \begin{verse}
-    Kosuke Imai, Gary King, and Olivia Lau. 2007. ``Zelig: Everyone's
-    Statistical Software,'' \url{http://GKing.harvard.edu/zelig}.
-  \end{verse}
-
-  \begin{verse}
-  Imai, Kosuke, Gary King, and Olivia Lau. (2008). ``Toward A Common Framework for Statistical Analysis and Development.'' Journal of Computational and Graphical Statistics, Vol. 17, No. 4 (December), pp. 892-913. 
-  \end{verse}
-}
-
-
-
-\newcommand{\Sref}[1]{Section~\ref{#1}}
-\newcommand{\hlink}[2]{\href{#2}{#1}}
-\newcommand{\rvers}{2.5}
-\newcommand{\rwvers}{R-2.5.1}
-\newcommand{\fullrvers}{2.5.1}
-\newcommand{\code}[1]{{\tt #1}}
-
-
-
-\usepackage[all]{xy}
diff --git a/vignettes/gk.bib b/vignettes/gk.bib
deleted file mode 100644
index 0359e26..0000000
--- a/vignettes/gk.bib
+++ /dev/null
@@ -1,20130 +0,0 @@
-% A bibtex file for papers by or coauthored with Gary King
-%
-% To add references, first please CHECK that your doesn't already
-%      exist in this file and
-%      then add entries only at the end.
-%
-% Use these rules for the reference label:
-%
-% -if one author: use last name and last 2 digits of the year: Tobler79.
-% -if multiple authors, use 1st 3 letters of each of UP TO the first three
-%     authors and the last 2 digits of the year:  KinTomWit00.
-% -if necessary add lower-case letters for multiple entries in a year:  King02, King02b
-%     (the first one should NOT have an 'a' afterwards)
-%
-% -feel free to use the abbreviations at the start, or add to them.
-% -Use authors full names when known.
-%
-% please be sure to commit changes to CVS regularly as a number of
-% people are all using this at the same time.
-
- at STRING{ prq = "Political Research Quarterly"}
- at STRING{ apsr = "American Political Science Review"}
- at STRING{ ajps = "American Journal of Political Science"}
- at STRING{ jop = "Journal of Politics"}
- at STRING{ bjps = "British Journal of Political Science"}
- at STRING{ jleo = "Journal of Law, Economics, and Organization"}
- at STRING{ isa = "Paper presented at the annual meetings of the International Studies Association"}
- at STRING{ apsa = "Paper presented at the annual meetings of the American Political Science Association"}
- at STRING{ cp = "Comparative Politics"}
- at STRING{ io = "International Organization"}
- at STRING{ midwest = "Paper presented at the Annual Meeting of the Midwest Political Science Association"}
- at STRING{ mpsa = "midwest"}
- at STRING{ southern = "Paper presented at the Annual Meeting of the Southern Political Science Association"}
- at STRING{ icpsr = "Inter-University Consortium for Political and Social Research"}
- at STRING{ jasa = "Journal of the American Statistical Association"}
- at STRING{ lsq = "Legislative Studies Quarterly"}
- at STRING{ isq = "International Studies Quarterly"}
- at STRING{ tas = "The American Statistician"}
- at STRING{ jbes = "Journal of Business \& Economic Statistics"}
- at STRING{ joe = "Journal of Econometrics"}
- at STRING{ wp = "World Politics"}
- at STRING{ cup = "Cambridge University Press"}
- at STRING{ hup = "Harvard University Press"}
- at STRING{ ny = "New York"}
- at STRING{ sv = "Springer Verlag"}
- at STRING{ pup = "Princeton University Press"}
- at STRING{ ucp = "University of California Press"}
- at STRING{ ap = "Academic Press"}
- at STRING{ wb = "The World Bank"}
- at STRING{ eas = "Europe-Asia Studies"}
- at STRING{ jet = "Journal of Economic Theory"}
- at STRING{ jrssA = "Journal of the Royal Statistical Society, A"}
- at STRING{ jrssb = "Journal of the Royal Statistical Society, B"}
- at STRING{ poq = "Public Opinion Quarterly"}
- at STRING{ pnas = "Proceedings of the National Academy of Sciences"}
- at STRING{ ai = "Artificial Intelligence"}
- at STRING{ pa = "Political Analysis"}
- at STRING{ ps = "PS: Political Science and Politics"}
- at STRING{ smr = "Sociological Methods and Research"}
- at STRING{ sim = "Statistics in Medicine"}
- at STRING{ asr = "American Sociological Review"}
- at STRING{ bmj = "British Medical Journal"}
- at STRING{ lan = "Lancet"}
- at STRING{ dem = "Demography"}
- at STRING{ bull = "Bulletin of WHO"}
- at STRING{ ssm = "Social Science and Medicine"}
- at STRING{ mitai = "Artificial Intelligence Laboratory, Massachusetts Institute of Technology"}
- at STRING{ nc = "Neural Computation"}
-
- at article{AbaDruLeb02,
-  author =	 {Alberto Abadie and David Druckker and Jane Leber
-                  Herr and Guido W. Imbens},
-  title =	 {Implementing Matching Estimators for Average
-                  Treatment Effects in Stata},
-  journal =	 {The Stata Journal},
-  volume =	 1,
-  year =	 2002,
-  pages =	 {1--18},
-  number =	 1
-}
-
- at article{Bin83,
-    author  = {David A. Binder},
-    title   = {On the Variance of Asymptoticaly Normal Estimators from Complex Surveys},
-    year    = {1983},
-    journal = {International Statistical Review},
-    volume  = {51},
-    number  = {3},
-    pages   = {279--292}
-}
-
- at article{HorTho52,
-  author =	 {D. G. Horvitz and D.J. Thompson},
-  title =	 {A Generalization of Sampling without Replacement
-                  from a Finite Universe},
-  year =	 {1952},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 {47},
-  pages =	 {663--685}
-}
-
- at article{AbaGelLev08,
-  author =	 {Kobi Abayomi and Andrew Gelman and Marc Levy},
-  title =	 {Diagnostics for Multivariate Imputations},
-  journal =	 {Applied Statistics},
-  volume =	 {57},
-  number =	 {3},
-  pages =	 {273--291},
-  year =	 {2008}
-}
-
- at misc{AbaImb05,
-  author =	 {Alberto Abadie and Guido Imbens},
-  title =	 {Estimation of the Conditional Variance in Paired
-                  Experiments},
-  year =	 2006,
-  howpublished = {KSG Working Paper},
-  note =
-                  {{http://ksghome.harvard.edu/\~{}.aabadie.academic.ksg/cve.pdf}}
-}
-
-
- at misc{AbaImb09,
-  author =	 {Alberto Abadie and Guido Imbens},
-  title =	 {A Martingale Representation for Matching Estimators},
-  year =	 2009,
-  howpublished = {IZA Discussion Papers number 4073},
-  note = {{http://ftp.iza.org/dp4073.pdf}}
-}
-
-
-
- at article{Imb00,
-  author =	 {Guido Imbens},
-  title =	 {The role of the propensity score in estimating the dose-response functions},
-  year =	 {2000},
-  journal = {Biometrika},
-  pages = {706--710},
-  volume = {87},
-  issue = {3}
-}
-
- at article{AbaImb07b,
-  author =	 {Alberto Abadie and Guido Imbens},
-  title =	 {On the Failure of the Bootstrap for Matching Estimators},
-  year =	 {2007},
-  journal = {Econometrica},
-  pages = {1537--1557},
-  volume = {76},
-  issue = {6}
-}
-
- at article{AbaImb06,
-  author =	 {Abadie, Alberto and Imbens, Guido W.},
-  title =	 {Large Sample Properties of Matching Estimators for
-                  Average Treatment Effects},
-  journal =	 {Econometrica},
-  volume =	 {74},
-  year =	 {2006},
-  pages =	 {235--267},
-  number =	 {1}
-}
-
- at inproceedings{Abu-Mostafa92,
-	author={Y. Abu-Mostafa},
-	title={A Method for Learning from Hints},
-	booktitle={Advances in Neural information processings systems 5},
-	year={1992},
-	publisher={Morgan Kaufmann Publishers},
-	address={San Mateo, CA},
-	editor={S. J. Hanson and Jack D. Cowan and C. Lee Giles}
-}
-
- at article{Abu-Mostafa92,
-	author={Y. Abu-Mostafa},
-	title={A Method for Learning from Hints},
-	journal={Advances in Neural information processings systems 5},
-	volume={1992},
-	pages={Morgan Kaufmann Publishers},
-	month={San Mateo CA},
-	number={S. J. Hanson and Jack D. Cowan and C. Lee Giles}
-}
-
- at book{Achen86,
-	author={Christopher Achen},
-	title={Statistical Analysis of Quasi-experiments},
-	publisher={University of California Press},
-	year={1986},
-	address={Berkeley}
-}
-
- at techreport{AdaCoaRue00,
-	author={Michelle Adato and David Coady and Marie Ruel},
-	title={An Operations Evaluation of Progresa from the Perspective of Beneficiaries,
-		Promotoras, School Directors and Health Staff},
-	institution={International Food Policy Research Institute},
-	year={2000},
-	month={August},
-	type={Final Report},
-	address={2033 K Street, NW Washington, DC 20006}
-}
-
- at article{AdaGla05,
-	author={Adamic, L.A. and Glance, N.},
-	title={{The political blogosphere and the 2004 US election: divided they blog}},
-	journal={Proceedings of the 3rd international workshop on Link discovery},
-	year={2005},
-	pages={36--43},
-	publisher={ACM Press New York, NY, USA}
-}
-
- at article{AgoDyn04,
-	author={Roberto Agodini and Mark Dynarski},
-	title={Are experiments the only option? {A} look at dropout prevention programs},
-	journal={Review of Economics and Statistics},
-	volume= 86,
-	year= 2004,
-	pages={180-194},
-	month={February},
-	number= 1
-}
-
- at unpublished{AgrRajSri03,
-	author={Rakesh Agrawal and Sridhar Rajagopolan and Ramakrishnan Srikant and Yirong
-		Xu},
-	title={Mining Newsgroups Using Networks Arising from Social Behavior},
-	note={IBM ALmaden Research Center 650 Harry Rd., San Jose, CA 95120},
-	year={2003},
-	month={May}
-}
-
- at book{Aitchison86,
-	author={J. Aitchison},
-	title={The Statistical Analysis of Compositional Data},
-	publisher={Chapman and Hall},
-	year= 1986,
-	address={London}
-}
-
- at article{Albert88,
-	author={James H. Albert},
-	title={Computational Methods Using a Bayesian Hierarchical Generalized Linear Model},
-	journal={Journal of the American Statistical Association},
-	volume={83},
-	year={1988},
-	pages={1037-1004},
-	month={December},
-	number={404}
-}
-
- at article{AldMcK77,
-	author={John H. Aldrich and Richard D. McKelvey},
-	title={A Method of Scaling With Applications to the 1968 and 1972 Presidential
-		Elections},
-	journal= apsr,
-	volume= 71,
-	year= 1977,
-	pages={111-130},
-	month={March}
-}
-
- at article{AleTab90,
-	author={Alberto Alesina and Guido Tabellini},
-	title={A Positive Theory of Fiscal Deficits and Government Debt},
-	journal={The Review of Economic Studies},
-	volume={57},
-	year={1990},
-	pages={403-414},
-	month={July},
-	number={3}
-}
-
- at article{Alho00,
-	author={J. M. Alho},
-	title={Discussion},
-	journal={North American Actuarial Journal},
-	volume= 4,
-	year= 2000,
-	pages={91--93},
-	number= 1
-}
-
- at article{Alho92,
-	author={J. M. Alho},
-	title={{Comment on ``Modeling and Forecasting U.S. Mortality'' by R. Lee and L.
-		Carter}},
-	journal= jasa,
-	volume= 87,
-	year= 1992,
-	pages={673--674},
-	month={September},
-	number= 419
-}
-
- at article{AlSaCr76,
-	author={James Alt and Bo Sarlvik and Ivor Crewe},
-	title={Individual Differences Scaling and Group Attitude Structures: British Party
-		Imagery in 1974},
-	journal={Quality and Quantity},
-	volume= 10,
-	year= 1976,
-	pages={297--320},
-	month={October}
-}
-
- at book{AltGilMcD03,
-	author={Micah Altman and Jeff Gill and Michael P. McDonald},
-	title={Numerical Issues in Statistical Computing for the Social Scientist},
-	publisher={John Wiley and Sons},
-	year= 2003,
-	address={New York}
-}
-
- at article{Altman85,
-	author={Douglas G. Altman},
-	title={Comparability of Randomised Groups},
-	journal={The Statistician},
-	volume={34},
-	year={1985},
-	pages={125-136},
-	number={1}
-}
-
- at article{Altman98,
-	author={Douglas G. Altman and Jonathan J. Deeks and David L. Sackett},
-	title={Odds Ratios Should be Avoided When Events are Common},
-	journal={British Medical Journal},
-	volume= 317,
-	year= 1998,
-	pages= 1318,
-	month={Nov. 7}
-}
-
- at article{AltMcD03,
-	author={Micah Altman and Michael P. McDonald},
-	title={Replication with Attention to Numerical Accuracy},
-	journal={Political Analysis},
-	volume={11},
-	year={2003},
-	pages={302-307},
-	number={3}
-}
-
- at article{AltRub70,
-	author={Robert P. Althauser and Donald B. Rubin},
-	title={The computerized construction of a matched sample},
-	journal={American Journal of Sociology},
-	volume= 76,
-	year= 1970,
-	pages={325-346},
-	month={September}
-}
-
- at article{AlvBre95,
-	author={Michael R. Alvarez and John Brehm},
-	title={American Ambivalence Toward Abortion Policy: A Heteroskedastic probit Method
-		for Assessing Conflicting Values},
-	journal={American Journal of Political Science},
-	volume={39},
-	year={1995},
-	pages={1055-82},
-	month={November}
-}
-
- at article{AlvBre97,
-	author={Michael R. Alvarez and John Brehm},
-	title={Are Americans Ambivalent Towards Racial Policies},
-	journal={American Journal of Political Science},
-	volume={41},
-	year={1997},
-	pages={345-374},
-	month={April},
-	number={2}
-}
-
- at article{AlvGarLan91,
-	author={Michael R.\ Alvarez and Geoffrey Garrett and Peter Lange},
-	title={Government Partisanship, Labor Organization, and Macroeconomic Performance},
-	journal= apsr,
-	volume= 85,
-	year= 1991,
-	pages={539--556}
-}
-
- at article{AmoMccZim97,
-	author={A.F. Amos and D.J. McCarty and P. Zimmet},
-	title={The Rising Global Burden of Diabetes and its Complications: Estimates and
-		Projections to the Year 2010},
-	journal={Diabetic Medicine},
-	volume= 14,
-	year= 1997,
-	tpages={S7--S85}
-}
-
- at book{AndBasHum83,
-	author={Andy B. Anderson and Alexander Basilevsky and Derek P.J. Hum},
-	title={Missing Data: A Review of the Literature},
-	publisher={Academic Press, Inc},
-	year={1983},
-	editor={Peter H. Rossi and James D. Writght and Andy B. Anderson}
-}
-
- at article{AndGib06,
-	author={Krister Andersson and Clark C. Gibson},
-	title={Decentralized Governance and Environmental Change: Local Institutional Moderation
-		of Defroestation in Bolivia},
-	journal={Journal of Policy Analysis and Management},
-	volume={26},
-	year={2006},
-	pages={99-123},
-	number={1}
-}
-
- at article{AndGreMcc05,
-	author={Richard G. Anderson and William H. Greene and B.D. McCullough and H.D. Vinod},
-	title={The Role of Data \& Program Code Archives in the Future of Economic Research},
-	year= 2005,
-	month={July},
-	note={Federal Reserve Bank of St. Louis Research Division}
-}
-
- at article{Andrews91,
-	author={Donald W.K. Andrews},
-	title={Heteroskedasticity and Autocorrelation Consistent Covariance Matrix Estimation},
-	journal={Econometrica},
-	volume={59},
-	year={1991},
-	pages={817--858},
-	month={May},
-	number={3}
-}
-
- at article{AndZom01,
-	author={A.S. Andreou and G.A. Zombanakis},
-	title={A Neural Network Measurement of Relative Military Security--The Case of
-		Greece and Cyprus},
-	journal={Defence and Peace Economics},
-	volume= 12,
-	year= 2001,
-	pages={303--324},
-	number= 4,
-	annote={have not read primary source. looks promising, given secondary source comments:
-		all input variables are financial and the output variable--relative security--is
-		population/demographics based. Arms race scenarios are simulated by increasing
-		and decreasing financial covariates.}
-}
-
- at article{AngAngFro94,
-	author={G. De Angelis and R. De Angelis and L. Frova and A. Verdecchia},
-	title={MIAMOD: A Computer Package to Estimate Chronic Disease Morbidity Using Mortality
-		and Survival Data},
-	journal={Computer Methods and Programs in Biomedicine},
-	volume= 44,
-	year= 1994,
-	pages={99--107}
-}
-
- at article{AngImb95,
-	author={Joshua D. Angrist and Guido W. Imbens},
-	title={Two-Stage Least Squares Estimation of Average Causal Effects in Models withVariable
-		Treatment Intensity},
-	journal={Journal of the American Statistical Association},
-	volume={90},
-	year={1995},
-	pages={431-442},
-	month={June},
-	number={430}
-}
-
- at article{AngImbRub96,
-	author={Angrist, Joshua D. and Imbens, Guido W. and Rubin, Donald B.},
-	title={Identification of Causal Effects Using Instrumental Variables (with discussion)},
-	journal={Journal of the American Statistical Association},
-	volume={91},
-	year={1996},
-	pages={444--455},
-	optnumber={434}
-}
-
- at article{Angress59,
-	author={Werner T. Angress},
-	title={The Political Role of the Peasantry in the Weimar Republic},
-	journal={The Review of Politics},
-	volume= 21,
-	year= 1959,
-	pages={530--549},
-	number= 3
-}
-
- at unpublished{AnkBlaCol99,
-  author =	 {Martha Anker and Robert E. Black and Christopher
-                  Coldham and Henry D. Kalter and Maria A. Quigley and
-                  David Ross and Robert W. Snow},
-  title =	 {A Standard Verbal Autopsy Method for Investigating
-                  Causes of Death in Infants and Children},
-  note =	 {World Health Organization, Department of
-                  communicable Disease Surveillance and Response},
-  year =	 {1999},
-  journal =	 {World Health Organization}
-}
-
- at book{Anker03,
-  author =	 {Martha Anker},
-  title =	 {Investigating Cause of Death During an Outbreak of
-                  Ebola Virus Haemorrhagic Fever: Draft Verbal Autopsy
-                  Instrument},
-  publisher =	 {World Health Organization},
-  year =	 2003,
-  address =	 {Geneva}
-}
-
- at article{Anker97,
-  author =	 {Martha Anker},
-  title =	 {The Effect of Misclassification Error on Reported
-                  Cause-Specific Mortality Fractions from Verbal
-                  Autopsy},
-  journal =	 {International Journal of Epidemiology},
-  volume =	 {26},
-  year =	 {1997},
-  pages =	 {1090-1096}
-}
-
- at article{AppBosGra96,
-  author =	 {A. Appels, et al},
-  title =	 {Self-Rated Health and Mortality in a Lithuanian and
-                  Dutch Population},
-  journal =	 {Social Science and Medicine},
-  volume =	 42,
-  year =	 1996,
-  pages =	 {{681-89}},
-  number =	 5
-}
-
- at techreport{Arendt03,
-  author =	 {Jacob N. Arendt},
-  title =	 {Social gradients in self-rated health in Denmark -
-                  gender differences and health risk factors in
-                  dynamic context},
-  institution =	 {AKF, Institute of Local Government Studies},
-  year =	 2003,
-  month =	 {May},
-  address =	 {Nyropsgade 37, 1602 Copenhagen V, Denmark}
-}
-
- at book{Arendt73,
-  author =	 {Arendt, Hannah},
-  title =	 {The Origins of Totalitarianism},
-  publisher =	 {Harcourt Brace Jovanovich},
-  year =	 1973,
-  address =	 {New York}
-}
-
- at incollection{Armstrong01,
-  author =	 {J. Scott Armstrong},
-  title =	 {Extrapolation of Time Series and Cross-Sectional
-                  Data},
-  booktitle =	 {Principles of Forecasting: A Handbook for
-                  Researchers and Practitioners},
-  publisher =	 {Kluwer},
-  year =	 2001,
-  editor =	 {J. Scott Armstrong},
-  pages =	 {217--243}
-}
-
- at unpublished{Ashworth01,
-  author =	 {Scott Ashworth},
-  title =	 {Reputational Dynamics and Congressional Careers},
-  note =	 {Harvard University},
-  year =	 2001,
-  annote =	 {introduce the single crossing property in political
-                  science}
-}
-
- at article{AssPocEno00,
-  author =	 {Susan F. Assmann and Stuart J. Pocock, Laura E. Enos
-                  and Linda E. Kasten},
-  title =	 {Subgroup analysis and other (mis)uses of baseline
-                  data in clinical trials},
-  journal =	 {The Lancet},
-  volume =	 {355},
-  year =	 {2000},
-  pages =	 {1064-1069},
-  month =	 {March}
-}
-
- at article{AusMam06,
-	author={Peter C. Austin and Muhammad M. Mamdani},
-	title={A comparison of propensity score methods: A case-study estimating the effectiveness
-		of post-AMI statin use},
-	journal={Statistics in Medicine},
-	volume={25},
-	year={2006},
-	pages={2084-2106}
-}
-
- at article{AusMamStu05,
-	author={Peter C. Austin and Muhammad M. Mamdani and Therese A. Stukel and Geoffrey
-		M. Anderson and Jack V. Tu},
-	title={The use of the propensity score for estimating treatment effects: {A}dministrative
-		versus clinical data},
-	journal={Statistics in Medicine},
-	volume={24},
-	year={2005},
-	pages={1563-1578}
-}
-
- at article{AvlSchDav98,
-	author={Kirsten Avlund, Kirsten Schultz-Larsen, and Michael Davidson},
-	title={Tiredness in Daily Activities at Age 70 as a Predictor of Mortality During
-		the Next 10 Years},
-	journal={Journal of Clinical Epidemiology},
-	volume= 51,
-	year= 1998,
-	pages={{323-33}}
-}
-
- at article{BacKin04,
-	author={Bachrach, Christine A. and King, Roslind B.},
-	title={{Data Sharing and Duplication: Is There a Problem?}},
-	journal={Archives of Pediatric and Adolescent Medicine},
-	volume= 158,
-	year={2004},
-	month={September},
-	number= 9
-}
-
- at article{BagHopMas02,
-	author={A. Bagust and P.K. Hopkinson and L. Maslove and C.J. Currie},
-	title={The Projected Health Care Burden of Type 2 Diabetes in the UK from 2000
-		to 2060},
-	journal={Diabetic Medicine},
-	volume= 19,
-	year= 2002,
-	pages={1--5},
-	number= 4
-}
-
- at book{Balderston02,
-	author={Theo Balderston},
-	title={Economics and Politics in the Weimar Republic},
-	publisher={Cambridge University Press},
-	year= 2002,
-	address={Cambridge}
-}
-
- at article{BanBan92,
-	author={A.T. Bang and R.A. Bang and the SEARCH team},
-	title={Diagnosis of causes of childhood deaths in developing countries by verbal
-		autosy: suggested criteria},
-	journal={Bulletin of the World Health Organization},
-	volume={70},
-	year={1992},
-	pages={499-507},
-	number={4}
-}
-
- at article{BaqBlaAri98,
-	author={A.H. Baqui and R.E. Black and S.E. Arifeen and K. Hill and S.N. Mitra and
-		A.Al Sabir},
-	title={Causes of childhood deaths in Bangladesh: results of a nationwide verbal
-		autopsy study},
-	journal={Bulletin of the World Health Organization},
-	volume={76},
-	year={1998},
-	pages={161},
-	number={2}
-}
-
- at article{BarFraHil03,
-	author={John Barnard and Constantine E. Frangakis and Jennifer L. Hill and Donald
-		B. Rubin},
-	title={{Principal Stratification Approach to Broken Randomized Experiments: A Case
-		Study of School Choice Vouchers in New York City.}},
-	journal={Journal of the American Statistical Association},
-	volume={98},
-	year={2003},
-	pages={299-324},
-	number={462}
-}
-
- at book{Barkai90,
-	author={Barkai, Avram},
-	title={Nazi Economics: Ideology, Theory, and Policy},
-	address={Oxford},
-	publisher={Berg Press},
-	year={1990}
-}
-
- at article{BarPonCor00,
-	author={Ivana C. H. C. Barr{\^e}to and L{\'i}gia Kerr Pontes and e Luciano Corr{\^e}a},
-	title={Vigil{\^a}ncia de {\'o}bitos infantis em sistemas locais de sa{\'u}de: avalia{\c{c}}{\~a}o da
-		aut{\'o}psia verbal e das informa{\c{c}}{\~o}es de agentes de sa{\'u}de},
-	journal={Rev Panam Salud Publica / Pan Am Journal of Public Health},
-	volume={7},
-	year={2000},
-	pages={303-312},
-	number={5}
-}
-
- at article{Bartels96,
-	author={Bartels, Larry M.},
-	title={Uninformed Votes: Information Effects in Presidential Elections},
-	journal={American Journal of Political Science},
-	volume= 40,
-	year= 1996,
-	pages={194--230}
-}
-
- at unpublished{Bartels98,
-	author={Larry Bartels},
-	title={Panel Attrition and Panel Conditioning in American National Election Sudies},
-	note={Paper prepared for the 1998 meetings of the Society for Political Methodology,
-		San Deigo},
-	year={1998}
-}
-
- at article{BasEst01,
-	author={S.A. Bashir and J. Esteve},
-	title={Projecting Cancer Incidence and Mortality Using Bayesian Age-Period-Cohort
-		Models},
-	journal={Journal of Epidemiology and Biostatistics},
-	volume= 6,
-	year= 2001,
-	pages={287--296},
-	number= 3
-}
-
- at unpublished{BatFerHab06,
-	author={Robert Bates and Karen Feree and James Habyarimana and Macartan Humphreys
-		and Smita Singh},
-	title={The Africa Research Program},
-	note={{http://africa.gov.harvard.edu}},
-	year= 2006
-}
-
- at article{Bath03,
-	author={Peter A. Bath, PhD},
-	title={Differences Between Older Men and Women in the Self-Rated Health-Mortality
-		Relationship},
-	journal={The Gerontologist},
-	volume= 43,
-	year= 2003,
-	pages={{387-95}}
-}
-
- at article{Baum88,
-	author={Lawrence Baum},
-	title={Measuring Policy Change in the U.S. Supreme Court},
-	journal= apsr,
-	volume= 82,
-	year= 1988,
-	pages={905--912},
-	month={September},
-	number= 3
-}
-
- at article{BeaMei89,
-	author={Michael L. Beach and Paul Meier},
-	title={Choosing Covariates in the Analysis of Clinical Trials},
-	journal={Controlled Clinical Trials},
-	volume={10},
-	year={1989},
-	pages={161S-175S}
-}
-
- at incollection{Bearce00,
-	author={David Bearce},
-	title={Economic Sanctions and Neural Networks: Forecasting Effectiveness and Reconsidering
-		Cooperation},
-	booktitle={Political Complexity: Non Linear Models of Politics},
-	publisher={University of Michigan Press},
-	year= 2000,
-	address={Ann Arbor, MI},
-	editor={Diana Richards},
-	pages={269--295},
-	annote={asks whether real-world forecasting needs make NN preferable to traditional
-		(and linear) analysis. Looks at effectiveness of sanctions, using about
-		100 quantitative cases first examined in 1980s. NNs are shown to forecast
-		twice as well as traditional methods.}
-}
-
- at book{BecChaWil88,
-	author={Richard A. Becker and John M. Chambers and Allan R. Wilks},
-	title={The New S. language},
-	publisher={Wadsworth},
-	year={1988},
-	address={New York}
-}
-
- at article{BecIch02,
-	author={Sascha O. Becker and Andrea Ichino},
-	title={Estimation of average treatment effects based on propensity scores},
-	journal={The Stata Journal},
-	volume= 2,
-	year= 2002,
-	pages={358-377},
-	number= 4
-}
-
- at article{BecIch02,
-	author={Sascha O. Becker and Andrea Ichino},
-	title={Stata programs for ATT estimation based on propensity score matching},
-	journal={The Stata Journal},
-	volume= 2,
-	year= 2002,
-	pages={358--377},
-	number= 4
-}
-
- at article{BecJac98,
-	author={Nathaniel Beck and Simon Jackman},
-	title={Beyond Linearity by Default: Generalized Additive Model},
-	journal= ajps,
-	volume= 42,
-	year= 1998,
-	pages={596--627},
-	month={April},
-	number= 2
-}
-
- at article{BecKat95,
-	author={Nathaniel Beck and Jonathan Katz},
-	title={``What to Do (and Not to Do) with Time-Series-Cross-Section Data''},
-	journal= apsr,
-	volume= 89,
-	year= 1995,
-	pages={634--647}
-}
-
- at article{BecKat96,
-	author={Nathaniel Beck and Jonathan Katz},
-	title={Nuisance vs. Substance: Specifying and Estimating Time-Series-Cross-Section
-		Model},
-	journal= pa,
-	volume={VI},
-	year= 1996,
-	pages={1--36}
-}
-
- at article{BecKatTuc98,
-	author={Nathaniel Beck and Jonathan Katz and Richard Tucker},
-	title={Taking Time Seriously: Time-Series-Cross-Section Analysis with a Binary
-		Dependent Variable},
-	journal= apsr,
-	volume= 42,
-	year= 1998,
-	pages={1260-1288}
-}
-
- at article{BedChrJoh96,
-	author={Edward J. Bedrick and Ronald Christensen and Wesley Johnson},
-	title={A New Perspective on Priors for Generalized Linear models},
-	journal={Journal of the American Statistical Association},
-	volume={91},
-	year={1996},
-	pages={1450-1460},
-	number={436}
-}
-
- at techreport{BehTod99,
-	author={Jere R. Behrman and Petra E. Todd},
-	title={Randomness in the Experimental Samples of Progresa (Education, Health, and
-		Nutrition Program)},
-	institution={International Food Policy Research Institute},
-	year={1999},
-	month={March},
-	type={Research Report},
-	address={2033 K Street, NW Washington, DC 20006}
-}
-
- at article{Bell97,
-	author={W.R. Bell},
-	title={{Comparing and Assessing Time Series Methods for Forecasting Age-Specific
-		Fertility and Mortality Rates}},
-	journal={Journal of Official Statistics},
-	volume= 13,
-	year= 1997,
-	pages={279--303},
-	number= 3
-}
-
- at article{Bello93,
-	author={Abdul Lateef Bello},
-	title={Choosing Among Imputation Techniques for Incomplete Multivariate Data: A
-		Simulation Study},
-	journal={Communications in Statistics A: Theory and Methods},
-	volume={22},
-	year={1993},
-	pages={853-877},
-	number={3}
-}
-
- at article{BelMon91,
-	author={W.R. Bell and B.C. Monsell},
-	title={Using Principal Components in time Series modeling and Forecasting of Age-Specific
-		Mortality Rates},
-	journal={Proceedings of the American Statistical Association, Social Statistics Section},
-	year= 1991,
-	pages={154--159}
-}
-
- at article{Beltrami1873,
-	author={E. Beltrami},
-	title={Sulle funzioni bilineari},
-	journal={Giornale di Matematiche ad Uso degli Studenti Delle Universit{\'a}},
-	volume= 11,
-	year= 1873,
-	pages={98--106},
-	note={{An English translation by D. Boley is available as University of Minnesota,
-		Department of Computer Science, Technical Report 90-37, 1990}}
-}
-
- at article{BenBluLus03,
-	author={Yael Benyamini, et al.},
-	title={Gender differences in the self-rated health-mortality association: Is it
-		poor self-rated health that predicts mortality or excellent self-rated
-		health that predicts survival?},
-	journal={The Gerontologist},
-	volume={43},
-	year={2003},
-	pages={{396-405}},
-	number={3}
-}
-
- at incollection{Bendix53,
-	author={Bendix, Reinhard},
-	title={Social Stratification and Political Power},
-	booktitle={Class Status and Power},
-	publisher={The Free Press},
-	year= 1953,
-	address={Glencoe, IL},
-	editor={Bendix, Reinhard and Lipset, Seymour Martin}
-}
-
- at article{BenHumEbe04,
-	author={Maureen Reindl Benjamins},
-	title={Self-Reported Health and Adult Mortality Risk: An Analysis of Cause Specific
-		Mortality},
-	journal={Social Science and Medicine (Forthcoming 2004)}
-}
-
- at article{benichou95,
-	author={J. Benichou and M. Gail},
-	title={Methods of Inference for Estimates of Absolute Risk Derived From Population-Based
-		Case-Control Studies},
-	journal={Biometrics},
-	volume= 51,
-	year= 1995,
-	pages={182-194}
-}
-
- at article{BenIdl99,
-	author={Yael Benyamini, and Ellen Idler},
-	title={Community Studies Reporting Association Between Self-Rated Health and Mortality},
-	journal={Research on Aging},
-	volume= 21,
-	year= 1999,
-	pages={{392-401}},
-	number= 3
-}
-
- at article{BenIdlLev00,
-	author={Yael Benyamini, Ellen Idler, Howard Leventhal, and Elaine A. Leventhal},
-	title={Positive-Affect and Function as Influences on Self-Assessments of Health:
-		Expanding ou View Beyond Illness and Disability},
-	journal={Journal of Gerontology: Psychological Sciences},
-	volume={{55B}},
-	year= 2000,
-	pages={{P107-116}}
-}
-
- at article{BenLav03,
-	author={Kenneth Benoit and Michael Laver},
-	title={Estimating Irish party policy positions using computer wordscoring: the
-		2002 election - a research note},
-	journal={Irish Political Studies},
-	volume={18},
-	year={2003},
-	pages={97--107},
-	number={1}
-}
-
- at article{BenLevLev00,
-	author={Yael Benyamini, et al},
-	title={Gender Differences in Processing Information for Making Self-Assessments
-		of Health },
-	journal={American Psychosomatic Society},
-	volume= 62,
-	year= 2000,
-	pages={{354-64}},
-	number= 2
-}
-
- at article{BenLevLev99,
-	author={Yael Benyamini, Elaine A. Leventhal, and Howard Leventhal},
-	title={Self-Assessments of Health. What Do People Know that Predicts their Mortality?},
-	journal={Reasearch on Aging},
-	volume= 21,
-	year= 1999,
-	pages={{477-500}},
-	month={{May}},
-	number= 3
-}
-
- at article{BenLip59,
-	author={Bendix, Reinhard and Lipset, Seymour Martin},
-	title={On the Social Structure of Western Societies: Some Reflections on Comparative
-		Analysis},
-	journal={Berkeley Journal of Sociology},
-	volume= 5,
-	year= 1959,
-	pages={1-15}
-}
-
- at article{BenSin99,
-	author={S.K. Benara and Padam Singh},
-	title={Validity of Causes of Infant Death by Verbal Autopsy},
-	journal={Indian Journal of Pediatrics},
-	volume={66},
-	year={1999},
-	pages={647-650}
-}
-
- at article{BenSin99,
-	author={S.K. Benara and Padam Singh},
-	title={Validity of Causes of Infant Death by Verbal Autopsy},
-	journal={Indian Journal of Pediatrics},
-	volume={66},
-	year={1999},
-	pages={647-650}
-}
-
- at article{BerdeG47,
-	author={Berelson, B. and de Grazia, S.},
-	title={{Detecting Collaboration in Propaganda}},
-	journal={Public Opinion Quarterly},
-	volume={11},
-	year={1947},
-	pages={244--253},
-	number={2}
-}
-
- at proceedings{BerDegLin88,
-	editor={J. M. Bernardo and M.H. Degroot and D.V. Lindley and A.F.M. Smith},
-	title={Bayesian Statistics 3},
-	publisher={Clarendon Press, Oxford},
-	year={1987},
-	month={June 1-5},
-	organization={Proceedings of the Third Valencia International Meeting}
-}
-
- at article{Berenson04,
-	author={Robert Berenson},
-	title={The Medicare Chronic Care Improvement Program},
-	journal={The Urban Institute},
-	year={2004},
-	month={May},
-	note={{http://www.urban.org/url.cfm?ID=900714}}
-}
-
- at article{Berger04,
-	author={Vance W. Berger},
-	title={Selection Bias and Baseline Imbalances in Randomized Trials},
-	journal={Drug Information Journal},
-	volume={38},
-	year={2004},
-	pages={1-2}
-}
-
- at article{Berger05a,
-	author={Vance W. Berger},
-	title={Quantifying the Magnitude of Baseline Covariate Imbalances Resulting fronmSelection
-		Bias in Randomized Clinical Trials},
-	journal={Biometrical Journal},
-	volume={47},
-	year={2005},
-	pages={119-127},
-	number={2}
-}
-
- at book{Berger05b,
-	author={Vance W. Berger},
-	title={Selection Bias and covariate Imbalances in Randomized Clinical Trials},
-	publisher={John Wiley \& Sons, Ltd.},
-	year={2005},
-	editor={Stephen Senn and Vic Barnett},
-	series={Statistics in Practice}
-}
-
- at article{Berger94,
-	author={James Berger},
-	title={An Overview of Robust Bayesian Analysis (With Discussion)},
-	journal={Test},
-	volume= 3,
-	year= 1994,
-	pages={5-124}
-}
-
- at article{BerHenSav77,
-	author={E. Berndt and D. Hendry and N.E. Savin},
-	title={Conflict Among Criteria for Testing Hypotheses in the Multivariate Linear
-		Regression Model},
-	journal={Econometrica},
-	volume={45},
-	year={1977},
-	pages={1263-1277}
-}
-
- at article{BerKinKon97,
-	author={Shulamit L. Bernard, Jean E. Kincade, Thomas R. Conrad, et al},
-	title={Predicting Mortality from Community Surveys of Older Adults: The Importance
-		of Self-Rated Functional Ability},
-	journal={Journal of Gerontology: Social Sciences},
-	volume={{52B}},
-	year= 1997,
-	pages={{S155-63}}
-}
-
- at misc{BerKos03,
-	author={Erik Bergstralh and Jon Kosanke},
-	title={dist, gmatch, and vmatch: SAS Macros},
-	year= 2003,
-	howpublished={Mayo Clinic, Division of Biostatistics},
-	note={{http://mayoresearch.mayo.edu/mayo/research/biostat/sasmacros.cfm}}
-}
-
- at book{BerKreOve98,
-	author={Mark de Berg and Marc van Krevald and Mark Overmars and Otfried Schwarzkopf},
-	title={Computational Geometry: Algorithms and Applications},
-	publisher={Springer},
-	year= 1998,
-	address={New York},
-	edition={2nd, revised edition}
-}
-
-
- at book{ImbRub10,
-	author={Guido Imbens and Donald Rubin},
-	title={Causal Inference},
-	year= {2010},
-	note={Unpublished manuscript}
-}
-
-
- at article{Bernstein32,
-	author={F. Bernstein},
-	title={{\"U}ber eine Methode, die soziologische und bev{\"o}lkerungsstatistische Gliederung
-		von Abstimmungen bei geheimen Wahlverfahren zu ermittlen},
-	journal={Allgemeines Statistisches Archiv},
-	volume= 22,
-	year= 1932,
-	pages={253--256}
-}
-
- at article{Besag74,
-	author={Julian Besag},
-	title={Spatial Interaction and the Statistical Analysis of Lattice Systems (With
-		Discussion)},
-	journal= jrssb,
-	volume= 36,
-	year= 1974,
-	pages={192-236}
-}
-
- at article{Besag75,
-	author={Julian Besag},
-	title={Statistical Analysis of Non-Lattice Data},
-	journal={The Statistician},
-	volume= 24,
-	year= 1975,
-	pages={179--195},
-	number= 3
-}
-
- at article{Besag83,
-	author={Julian E. Besag},
-	title={Discussion of paper by {P}. {S}witzer},
-	journal={Bull. Intern. Statist. Inst.},
-	volume= 50,
-	year= 1983,
-	pages={422-425},
-	number={Bk. 3}
-}
-
- at article{Besag86,
-	author={Julian Besag},
-	title={On the Statistical Analysis of Dirty Pictures},
-	journal={Journal of the Royal Statistical Society B},
-	volume={48},
-	year={1986},
-	pages={259--302},
-	number={3}
-}
-
- at article{Besancon05,
-	author={Marie L. Besancon},
-	title={Relative Resources: Inequality in Ethnic Wars, Revolutions, and Genocides},
-	abstract={Political scientists and economists have exhaustively examined the nexus
-		between economic inequality and political conflict (EI-PC nexus) in aggregated
-		civil wars. This article revisits the nexus and its related theories, empirically
-		and parsimoniously testing the effects of inequality on disaggregated intrastate
-		conflicts. The results buttress the notion that traditionally deprived
-		identity groups are more likely to engage in conflict under more economically
-		equal conditions, while class or revolutionary wars fall under the conditions
-		of greater economic inequality and war. Of the three types of conflicts
-		tested - ethnic conflicts, revolutions, and genocides - economic inequality
-		seems to have the most ambiguous bearing on genocides. Support follows
-		for recent findings that political and social equalities are of greater
-		importance in mitigating ethnic violence and that greed factors might exacerbate
-		violence in all civil conflicts, including genocides. The theoretical argument
-		proposes that the context within which intrastate violence takes place
-		affects the requisite level of relative resources needed for the escalation
-		of violence between groups. The results have policy implications for ethnically
-		divided states that are in the process of equalizing their income differential,
-		but neglect the substantial inclusion of all groups within the political
-		process and the distribution of public goods. The social contracts between
-		the governors and the governed then require careful crafting for a peaceful
-		coexistence of diverse identity groups.},
-	journal={The Journal of Peace Research},
-	volume={42},
-	year={2005},
-	pages={393-415},
-	month={July},
-	number={4}
-}
-
- at article{BesGreHigMen95,
-	author={Julian Besag and Peter Green and David Higdon and Kerrie Mengersen},
-	title={Bayesian Computation and Stochastic Systems (With Discussion)},
-	journal={Statistical Science},
-	volume= 10,
-	year= 1995,
-	pages={3-66},
-	number= 1
-}
-
- at article{BesHig99,
-	author={Julian Besag and David M. Higdon},
-	title={Bayesian Analysis of Agricultural Field Experiments (With Discussion)},
-	journal= jrssb,
-	volume= 61,
-	year= 1999,
-	pages={691-746},
-	number={4}
-}
-
- at article{BesKoo95,
-	author={Julian Besag and Charles Kooperberg},
-	title={On Conditional and Intrinsic Autoregressions},
-	journal={Biometrika},
-	volume={82},
-	year= 1995,
-	pages={733-746},
-	number={4}
-}
-
- at article{BesYorMol91,
-	author={Julian Besag and Jeremy York and Annie Molli{\'e}},
-	title={Bayesian Image Restoration with Two Applications in Spatial Statistics (With
-		Discussion)},
-	journal={Annals of the Institute of Statistical Mathematics},
-	volume= 43,
-	year= 1991,
-	pages={1-59},
-	number= 1
-}
-
- at article{Bicego97,
-	author={G. Bicego},
-	title={Estimating adult mortality rates in the context of the AIDS epidemic in
-		sub-Saharan Africa: analysis of DHS sibling histories},
-	journal={Health Transition Review},
-	volume= 7,
-	year= 1997,
-	pages={7--22},
-	number={S2}
-}
-
- at book{Biggs93,
-	author={N.L. Biggs},
-	title={Algebraic Graph Theory},
-	publisher={Cambridge University Press},
-	year= 1993,
-	address={Cambridge, UK},
-	edition={2nd}
-}
-
- at article{Billordo05a,
-	author={Libia Billordo},
-	title={Publishing in French Political Science Journals},
-	journal={French Politics},
-	volume={3},
-	year={2005},
-	pages={178-186},
-	number={2}
-}
-
- at article{Billordo05b,
-	author={Libia Billordo},
-	title={Methods Training in French Political Science},
-	journal={French Politics},
-	volume={3},
-	year={2005},
-	pages={352-0357},
-	number={3}
-}
-
- at article{BinBreEar05,
-	author={J.B. Bingenheimer and R.T. Brennan and F.J. Earls},
-	title={Firearm violence exposure and serious violent behavior},
-	journal={Science},
-	volume={308},
-	year={2005},
-	pages={1323-1326},
-	month={May}
-}
-
- at book{BisFieHol75,
-	author={Y.M. M. Bishop and S.E. Fienberg and P.W. Holland},
-	title={Multivariate Analysis: Theory and Practice},
-	publisher={MIT Press},
-	year= 1975,
-	address={Cambridge, MA}
-}
-
- at book{Bishop95,
-	author={Christopher M. Bishop},
-	title={Neural Networks for Pattern Recognition},
-	publisher={Oxford University Press},
-	year= 1995,
-	address={Oxford}
-}
-
- at article{BisSteWil06,
-	author={Benjamin Bishin and Daniel Stevens and Christian Wilson},
-	title={{Character Counts: Honesty and Fairness in Election 2000}},
-	journal={Public Opinion Quarterly},
-	volume={70},
-	year={2006},
-	pages={235-248},
-	number={2}
-}
-
- at article{BjoKri99,
-	author={Jakob Bue Bjorner and Tage Sondergaard Kristensen},
-	title={Multi-item Scales for Measuring Global Self Rated Health},
-	journal={Research on Aging},
-	volume= 21,
-	year= 1999,
-	pages={{417-39}},
-	number= 3
-}
-
- at article{BlaGeo91,
-	author={R.C. Blattberg and E.I. George},
-	title={Shrinkage Estimation of Price and Promotional Elasticities: Seemingly Unrelated
-		Equations},
-	journal= jasa,
-	volume= 86,
-	year= 1991,
-	pages={304--315},
-	month={Jun},
-	number= 414
-}
-
- at article{BlaRas04,
-	author={Grant Blank and Karsten B. Rasmussen},
-	title={The Data Documentation Initiative: The Value and Significance of a Worldwide
-		Standard.},
-	journal={Social Science Computer Review},
-	volume={22},
-	year={2004},
-	pages={306-318},
-	number={3}
-}
-
- at article{BlaSmi04,
-	author={Dan A. Black and Jeffrey A. Smith},
-	title={How robust is the evidence on the effects of college quality? Evidence from
-		matching},
-	journal={Journal of Econometrics},
-	volume={121},
-	year={2004},
-	pages={99-124},
-	number={1}
-}
-
- at article{BloHilRic03,
-	author={Howard S. Bloom and Carolyn J. Hill and James A. Riccio},
-	title={Linking Program Implementation and Effectiveness: Lessons from a Pooled
-		Sample of Welfare-to-Work Experiments},
-	journal={Journal of Policy Analysis and Management},
-	volume={22},
-	year={2003},
-	pages={551-575},
-	number={4}
-}
-
- at book{Bloom05,
-	title={Learning More from Social Experiments},
-	publisher={Russell Sage Foundation},
-	year={2005},
-	editor={Howard S. Bloom},
-	address={New York}
-}
-
- at article{BloRicBla07,
-	author={Howard S. Bloom and Lashawn Richburg-Hayes and Alison Black},
-	title={Using Covariates to Improve Precision for Studies that Randomize Schools
-		to Evaluate Educational Interventions},
-	journal={Educational Evaluation and Policy Analysis},
-	year={2007}
-}
-
- at book{BLS03,
-	author={{Board on Life Sciences}},
-	title={Sharing Publication-Related Data and Materials: Responsibilities of Authorship
-		in the Life Sciences},
-	publisher={National Academies Press},
-	year= 2003,
-	address={Washington, D.C.}
-}
-
- at article{Blumer48,
-  author =	 {Herbert Blumer},
-  title =	 {Public Opinion and Public Opinion Polling},
-  journal =	 {American Sociological Review},
-  volume =	 {13},
-  year =	 {1948},
-  pages =	 {542-549},
-  month =	 {October},
-  number =	 {5}
-}
-
- at incollection{Bohm84,
-	author={Peter Bohm},
-	title={Are thee Practicable Demand-Revealing Mechanisms?},
-	booktitle={Public Finance and the Quest for Efficiency},
-	publisher={Wayne State University Press},
-	year={1984},
-	address={Detroit},
-	editor={H. Hanusch},
-	pages={127-139}
-}
-
- at article{BonBarMee94,
-	author={Luc Monneux and Jan J. Barendregt and Karin Meeter and Gouke J. Bonsel and
-		Paul J. van der Maas},
-	title={Estimating Clinical Morbidity Due to Ischemic Heart Disease and Congestive
-		Heart Failure: The Future Rise of Heart Failure},
-	journal={American Journal of Public Health},
-	volume= 84,
-	year= 1994,
-	pages={20-28}
-}
-
- at unpublished{BonBonJen01,
-	author={Doug Bond and Joe Bond and J. Craig Jenkins and Churl Oh and Charles Lewis
-		Taylor},
-	title={Integrated Data for Events Analysis (IDEA): An Event Form Typology for Automated
-		Events Data Development},
-	note={manuscript, Harvard University},
-	year= 2001
-}
-
- at article{Bongaarts89,
-	author={John Bongaarts},
-	title={A Model of the Spread of HIV Infection and the Demographic Impact of AIDS},
-	journal={Statistics in Medicine},
-	volume= 8,
-	year= 1989,
-	pages={103--120}
-}
-
- at techreport{BooMaiSmi02,
-	author={Heather Booth and John Maindonald and Len Smith},
-	title={{Age-Time Interactions in Mortality Projection: Applying Lee-Carter to Australia}},
-	institution={The Australian National University},
-	year= 2002,
-	month={August},
-	type={Working Papers in Demography}
-}
-
- at article{BooMaiSmi02b,
-	author={Heather Booth and John Maindonald and Len Smith},
-	title={Applying Lee-Carter Under Conditions of Variable Mortality Decline},
-	journal={Population Studies},
-	volume= 56,
-	year= 2002,
-	pages={325--336},
-	number= 3
-}
-
- at unpublished{BorBorRal01,
-	author={Roman Borisyuk and Galina Borisyuk and Colin Rallings and Michael Thrasher},
-	title={Forecasting the 2001 General Election Result: A Neural Network Approach},
-	note={{http://www.psa.ac.uk/spgrp/epop/forecasting\_genelect2001.htm}},
-	year={2001},
-	annote={authors try to forecast election results; they generate fitted values by
-		'predicting' the winners of past elections. they do break up their data
-		sets into training and test in the spirit of cross-validation; they also
-		compare their results to logit.}
-}
-
- at incollection{Borchardt91,
-  author =	 {Knut Borchardt},
-  title =	 {Economic Causes for the Collapse of the Weimar
-                  Republic},
-  booktitle =	 {Perspectives on Modern German Economic History and
-                  Policy},
-  publisher =	 {Cambridge University Press},
-  year =	 1991,
-  address =	 {New York},
-  editor =	 {Knut Borchardt},
-  pages =	 {161--184}
-}
-
- at article{borgan95,
-	author={{\O }rnulf Borgan and B. Langgholz and L. Goldstein},
-	title={Methods for the Analysis of Sampled Cohort Data in the Cox Proportional
-		Hazard Model},
-	journal={The Annals of Statistics},
-	volume= 23,
-	year= 1995,
-	number={1749-1778}
-}
-
- at article{BorMayTur04,
-	author={Robert Boruch and Henry May and Herbert Turner and Julia Lavenberg and Anthony
-		Petrosino and Dorothy de Moya and Jeremy Grimshaw and Ellen Foley },
-	title={Estimating the Effects of Interventions That are Deployed in Many Places:
-		Place-Randomized Trials},
-	journal={American Behavioral Scientist},
-	volume={47},
-	year={2004},
-	pages={608-633},
-	number={5}
-}
-
- at book{Boruch97,
-	author={Robert F. Boruch},
-	title={Randomized Experiments for Planning and Evaluation},
-	publisher={Sage Publications},
-	year={1997},
-	address={Thousand Oaks}
-}
-
- at article{BouChaWel01,
-	author={Andrew Boulle and Daniel Chandramohan and Peter Weller},
-	title={A Case Study of Using Artificial Neural Networks for Classifying Cause of
-		Death from Verbal Autopsy},
-	journal={International Journal of Epidemiology},
-	volume={30},
-	year={2001},
-	pages={515-520}
-}
-
- at unpublished{BowHan05,
-	author={Jake Bowers and Ben Hansen},
-	title={Attributing Effects to A Cluster Randomized Get-Out-The-Vote Campaign: An
-		Application of Randomization Inference Using Full Matching},
-	note={Departments of Political Science and Statistics: University of Michigan},
-	year={2005},
-	month={July}
-}
-
- at book{BoxHunHun78,
-	author={George E.P. Box and William G. Hunger and J. Stuart Hunter},
-	title={Statistics for Experimenters},
-	publisher={Wiley-Interscience},
-	year={1978},
-	address={New York}
-}
-
- at article{BoyHonNar01,
-	author={James P. Boyle and Amanda A. Honeycutt and K.M. Venkat Narayan and Thomas
-		J. Hoerger and Linda S. Geiss and Hong Chen and Theodore J. Thompson},
-	title={Projection of Diabetes Burden Through 2050},
-	journal={Diabetes Care},
-	volume= 24,
-	year= 2001,
-	pages={1936--1940},
-	number= 11
-}
-
- at article{BozBel87,
-	author={J.E. Bozik and W.R. Bell},
-	title={Forecasting Age Specific Fertility Using Principal Components},
-	journal={Proceedings of the Americal Statistical Association, Social Statistics Section},
-	year= 1987,
-	pages={396--401}
-}
-
- at article{bracken98,
-	author={Michael B. and John C. Bracken},
-	title={Avoidable Systematic Error in Estimating Treatment Effects Must not be Tolerated},
-	journal={British Medical Journal},
-	volume= 317,
-	year= 1998,
-	pages={11-56},
-	month={October 24}
-}
-
- at article{Brady85,
-	author={Henry E. Brady},
-	title={The Perils of Survey Research: Inter-Personally Incomparable Responses},
-	journal={Political Methodology},
-	volume= 11,
-	year= 1985,
-	pages={269--290},
-	month={June},
-	number={3--4}
-}
-
- at article{Brady89,
-	author={Henry E. Brady},
-	title={Factor and Ideal Point Analysis for Interpersonally Incomparable Data},
-	journal={Psychometrika},
-	volume= 54,
-	year= 1989,
-	pages={181--202},
-	month={June},
-	number= 2
-}
-
- at inproceedings{BraHil73,
-	author={William Brass and Kenneth Hill},
-	title={Estimating Adult Mortality in Africa from Orphanhood},
-	booktitle={Proceedings of the International Population Conference Liege},
-	year= 1973,
-	organization={International Union for the Scientific Study of Population}
-}
-
- at article{BraTuc01,
-	author={Ted Brader and Joshua Tucker},
-	title={The Emergence of Mass Partisanship in Russia, 1993-96},
-	journal={American Journal of Political Science},
-	volume={45},
-	year={2001},
-	pages={69-83}
-}
-
- at book{BreDay80,
-	author={Norman E. Breslow and N.E. Day },
-	title={Methods in Cancer Research},
-	publisher={Lyon},
-	year= 1980
-}
-
- at book{BreFriOls84,
-	author={Leo Breiman and Jerome H. Friedman and Richard A. Olshen and Charles J.
-		Stone},
-	title={Classification and Regression Trees},
-	publisher={Chapman \& Hall},
-	year={1984},
-	address={New York, New York}
-}
-
- at book{Brehm93,
-	author={John Brehm},
-	title={The Phantom Respondents: Opinion Surveys and Political Respresentation},
-	publisher={University of Michigan Press},
-	year={1993},
-	address={Ann Arbor}
-}
-
- at article{Breiman01,
-	author={Leo Breiman},
-	title={Statistical Modeling: The Two Cultures},
-	journal={Statistical Science},
-	volume={16},
-	year={2001},
-	pages={199-215},
-	month={August},
-	number={3}
-}
-
- at article{Breslow96,
-	author={Norman E. Breslow},
-	title={Statistics in Epidemiology: The Case-Control Study},
-	journal= jasa,
-	volume= 91,
-	year= 1996,
-	pages={14--28}
-}
-
- at unpublished{Breyer04,
-	author={L.A. Breyer},
-	title={The Dbacl Text Classifier},
-	note={laird at lbreyer.com},
-	year={04},
-	month={June}
-}
-
- at unpublished{Breyer04,
-	author={L.A. Breyer},
-	title={The Dbacl Text Classifier},
-	note={laird at lbreyer.com},
-	year={04},
-	month={June}
-}
-
- at article{BreZie92,
-	author={Hermann Brenner and Hartwig Ziegler},
-	title={Monitoring and Projecting Cancer Incidence in Saarland, Germany, Based on
-		Age-Cohort Analyses},
-	journal={Journal of Epidemiology and Community Health},
-	volume= 46,
-	year= 1992,
-	pages={15--20}
-}
-
- at book{BroDav91,
-	author={Peter J. Brockwell and Richard A. Davis},
-	title={Time Series: Theory and Methods},
-	publisher={Springer-Verlag},
-	year={1991},
-	edition={2nd}
-}
-
- at article{BroDenVer02,
-	author={N. Brouhns and M. Denuit and J. Vermunt},
-	title={A Poisson Log-bilinear Regression Approach to the Construction of Projected
-		Lifetables},
-	journal={Insurance: Mathematics and Economics},
-	volume= 31,
-	year= 2002,
-	pages={373--393}
-}
-
- at article{BroGra00,
-	author={Ron Brookmeyer and Sarah Gray},
-	title={Methods for Projecting the Incidence and Prevalence of Chronic Diseases
-		in Ageing Populations: Application to Alzheimer's Disease},
-	journal={Statistics in Medicine},
-	volume= 19,
-	year= 2000,
-	pages={1481--1493}
-}
-
- at article{BroHew00,
-	author={M. Brockerhoff and P. Hewett},
-	title={Inequality of child mortality among ethnic groups in sub-Saharan Africa},
-	journal= bull,
-	year={2000},
-	optnumber={1},
-	optvolume={78},
-	optpages={30--41}
-}
-
- at article{Bronnum-Hansen02,
-	author={Henrik Bronnum-Hansen},
-	title={Predicting the Effect of Prevention of Ischaemic Heart Disease},
-	journal={Scandinavian Journal of Public Health},
-	volume= 30,
-	year= 2002,
-	pages={5--11}
-}
-
- at article{Bronnum-Hansen99,
-	author={Henrik Bronnum-Hansen},
-	title={How Good is the Prevent Model for Estimating the Health Benefits of Prevention?},
-	journal={Journal of Epidemiology and Community Health},
-	volume= 53,
-	year= 1999,
-	pages={300--305}
-}
-
- at article{BroSchRot06,
-	author={M. Alan Brookhart and Sebastian Schneeweiss and Kenneth J. Rothman and Robert
-		J. Glynn and Jerry Avorn and Til Sturmer},
-	title={Variable Selection for Propensity Score Models},
-	journal={American Journal of Epidemiology},
-	volume={163},
-	year={2006},
-	pages={1149-1156},
-	month={April}
-}
-
- at book{Brown58,
-	author={Ralph Brown},
-	title={Loyalty and Security},
-	publisher={Yale University Press},
-	year= 1958,
-	address={New Haven, CT}
-}
-
- at article{Brown82,
-	author={Brown, Courtney},
-	title={The Nazi Vote: A National Ecological Study},
-	journal={American Political Science Review},
-	volume= 76,
-	year= 1982,
-	pages={285-302},
-	number= 2
-}
-
- at article{BruFal94,
-	author={Brustein, William and Falter, J{\"u}rgen W.},
-	title={The Sociology of Nazism: An Interest-Based Account},
-	journal={Rationality and Society},
-	volume= 6,
-	year= 1994,
-	pages={369-399},
-	number= 3
-}
-
- at book{Brustein96,
-	author={William Brustein},
-	title={The Logic of Evil: Social Origins of the Nazi Party, 1925-1933},
-	publisher={Yale University Press},
-	year= 1996
-}
-
- at article{BruUrd05,
-	author={Helge Brunborg and Henrik Urdal},
-	title={The Demography of Conflict and Violence: An Introduction},
-	abstract={The demography of armed conflict is an emerging field among demographers
-		and peace researchers alike. The articles in this special issue treat demography
-		as both a cause and a consequence of armed conflict, and they carry important
-		policy implications. A study of German-allied countries during World War
-		II addresses the role of refugees and territorial loss in paving the way
-		for genocide. Other articles focusing on the demographic causes of conflict
-		discuss highly contentious issues of whether economic and social inequality,
-		high population pressure on natural resources, and youth bulges and limited
-		migration opportunities can lead to different forms of armed conflict and
-		state failure. The articles on demographic responses to armed conflict
-		analyze the destructiveness of pre-industrial warfare, differences in short-
-		and long-term mortality trends after armed conflict, and migratory responses
-		in war. Another set of articles on demographic responses to war is published
-		simultaneously in the European Journal of Population.},
-	journal={The Journal of Peace Research},
-	volume={42},
-	year={2005},
-	pages={371-374},
-	month={July},
-	number={4}
-}
-
- at book{BSER02,
-	author={{Board on Earth Sciences and Resources}},
-	title={Geoscience Data and Collections: National Resources in Peril},
-	publisher={National Academies Press},
-	year= 2002,
-	address={Washington, D.C.}
-}
-
- at article{Buchheim03,
-	author={Christoph Buchheim},
-	title={Die Erholung von der Weltwirtschaftskrise 1932/33 in Deutschland},
-	journal={Jahrbuch fuer Wirtschaftsgeschicht},
-	year={2003},
-	pages={13-26},
-	number={1}
-}
-
- at article{BurFred01,
-	author={B Burstrom and P Fredlund},
-	title={Self-rated health: Is it as good a predictor of subsequent mortality among
-		adults in lower as well as in higher social classes?},
-	journal={Journal of Epidemiology and Community Health},
-	volume= 55,
-	year= 2001,
-	pages={{836-40}}
-}
-
- at article{Burgoon06,
-	author={Brian Burgoon},
-	title={On Welfare and Terror},
-	journal={Journal of Conflict Resolution},
-	volume={50},
-	year={2006},
-	pages={176-203},
-	month={April},
-	number={2}
-}
-
- at article{Burnham72,
-	author={Walter Dean Burnham},
-	title={Political Immunisation and Political Confessionalism: The United States
-		and Weimar Germany},
-	journal={Journal of Interdisciplinary History},
-	volume= 3,
-	year= 1972,
-	pages={1--30}
-}
-
- at article{Burtless95,
-	author={Gary Burtless},
-	title={The Case for Randomized Field Trials in Economic and Policy Research},
-	journal={The Journal of Economic Perspectives},
-	volume={9},
-	year={1995},
-	pages={63-84},
-	number={2}
-}
-
- at article{ButBurMit87,
-	author={J.S. Butler, et al.},
-	title={Measurement Error in Self-Reported Health Variables},
-	journal={The Review of Economics and Statistics},
-	volume= 69,
-	year= 1987,
-	pages={{644-50}}
-}
-
- at techreport{ButCar01,
-	author={C.T.\ Butts and K.M.\ Carley},
-	title={Multivariate Methods for Interstructural Analysis},
-	institution={CASOS working paper, Carnegie Mellon University},
-	year={2001}
-}
-
- at incollection{Butler51,
-	author={David E. Butler},
-	title={Appendix},
-	booktitle={The British General Election of 1950},
-	publisher={Macmillan},
-	year= 1951,
-	address={London},
-	editor={H.G. Nicholas}
-}
-
- at article{BuuEyrTenHop03,
-	author={S. van Buuren and S. Eyres and A. Tennant and M. Hopman-Rock},
-	title={Assessing comparability of dressing disability in different countries by
-		response conversion},
-	journal={European Journal of Public Health},
-	volume={13},
-	year={2003},
-	pages={15-19}
-}
-
- at book{ByaFotHuo06,
-  author =	 {Peter Byass and Edward Fottrell and Dao Lan Huong
-                  and Yemane Berhane and Tumani Corrah and Kathleen
-                  Kahn and Lulu Muhe and Do Duc Van},
-  title =	 {Refining a probabilistic model for interpreting
-                  verbal autopsy data},
-  publisher =	 {Scandinavian Journal of Public Health},
-  year =	 {34},
-  volume =	 {2006},
-  pages =	 {26-31}
-}
-
- at article{CamJagHar03,
-	author={Michael J. Camasso and Radha Jagannathan and Carol Harvey and Mark Killingsworth},
-	title={The Use of Client Surveys to Guage the Threat of Contamination in Welfare
-		Reform Experiments},
-	journal={Journal of Policy Analysis and Management},
-	volume={22},
-	year={2003},
-	pages={207-223},
-	number={2}
-}
-
- at book{CamTri98,
-	author={A.C. Cameron and P.K. Trivedi},
-	title={Regression Analysis of Count Data},
-	publisher={Cambridge University Press},
-	year={1998}
-}
-
- at article{Canner91,
-	author={Paul L. Canner},
-	title={Covariate Adjustment of Treatment Effects in Clinical Trials},
-	journal={Controlled Clinical Trials},
-	volume={12},
-	year={1991},
-	pages={359-366}
-}
-
- at book{Cantril65,
-	author={Hadley Cantril},
-	title={The Pattern of Human Concerns},
-	publisher={Rutgers University Press},
-	year= 1965,
-	address={New Brunswick, N.J.}
-}
-
- at article{CapAngFro95,
-	author={Riccardo Capocaccia and Robert De Angelis and Luisa Frova and Milena Sant
-		and Eva Buiatti and Gemma Gatta and Andrea Micheli and Franco Berrino and
-		Alessandro Barchielli and Ettore Conti and Lorenzo Gafa and Arduino Verdecchia},
-	title={Estimation and Projections of Stomach Cancer Trends in Italy},
-	journal={Cancer Causes and Control},
-	volume= 6,
-	year= 1995,
-	pages={339--346}
-}
-
- at article{CapDeaFro97,
-	author={Riccardo Capocaccia and Roberta De Angelis and Luisa Frova and Gemma Gatta
-		and Milena Sant and Andrea Micheli and Franco Berrino and Ettore Conti
-		and Lorenzo Gafa and Luca Roncucci and Arduino Verdecchia},
-	title={Estimation and Projections of Colorectal Cancer Trends in Italy},
-	journal={International Journal of Epidemiology},
-	volume= 26,
-	year= 1997,
-	pages={924--932},
-	number= 5
-}
-
- at article{CapVerMic90,
-	author={Riccardo Capocaccia and Arduino Verdecchia and Andrea Micheli and Milena
-		Sant and Gemma Gatta and Franco Berrino},
-	title={Breast Cancer Incidence and Prevalence Estimated from Survival and Mortality},
-	journal={Cancer Causes and Control},
-	volume= 1,
-	year= 1990,
-	pages={23--29}
-}
-
- at article{CarCha70,
-	author={J.D. Caroll and J. J. Chang},
-	title={Analysis of Individual Differences in Multidimensional Scaling},
-	journal={Psychometrika},
-	volume= 35,
-	year= 1970,
-	pages={283--319},
-	month={September}
-}
-
- at article{CarGre97,
-	author={J. Douglas Carroll and Paul E. Green},
-	title={Psychometric Methods in Marketing Research: Part II, Multidimensional Scaling},
-	journal={Journal of Marketing Research},
-	volume={XXXIV},
-	year= 1997,
-	pages={193--204},
-	month={May}
-}
-
- at article{CarKucLom96,
-	author={Raymond J. Carroll and helmut Kuchenhoff and F. Lombard and Leonard A. Stefanski},
-	title={Asymptotics for the SIMEX estimator in structural measurement error models.
-		},
-	journal={Journal of the American Statistical Association},
-	volume={91},
-	year={1996},
-	pages={242-250}
-}
-
- at book{CarLou00,
-	author={Bardley P. Carlin and Thomas A. Louis},
-	title={Bayes and Empirical Bayes Methods for Data Analysis},
-	publisher={CRC Press},
-	year= 2000,
-	edition={2nd}
-}
-
- at article{CarMacRup99,
-	author={Raymond J. Carroll and Jeffrey D. Maca and David Ruppert},
-	title={Nonparametric regression in the presence of measurement error},
-	journal={Biometrika},
-	volume={86},
-	year={1999},
-	pages={3},
-	month={541-554}
-}
-
- at article{Carpenter02,
-	author={Daniel Paul Carpenter},
-	title={Groups, the Media, Agency Waiting Costs, and FDA Drug Approval},
-	journal= ajps,
-	volume= 46,
-	year= 2002,
-	pages={490--505},
-	month={July},
-	number= 2
-}
-
- at techreport{CarPrs00,
-	author={Lawrence R. Carter and Alexia Prskawetz},
-	title={Examining Structural Shifts in Mortality using the Lee-Carter Method},
-	institution={Bundesinstitut fur Bevolkerungswissenschaften},
-	year= 2000,
-	address={Germany},
-	note={Demographische Vorausschatzungen --- Abhandlungen des Arbeitkreises Bevolkerrungswissenschaftlicher
-		Methoden der Statistischen Woche}
-}
-
- at article{Carr07,
-	author={David Carr},
-	title={24-Hour Newspaper People},
-	journal={New York Times},
-	year= 2007,
-	month={15 January}
-}
-
- at article{CavTre94,
-	author={W.B. Cavnar and J.M. Trenkle},
-	title={{N-Gram-Based Text Categorization}},
-	journal={Proceedings of the Third Annual Symposium on Document Analysis and Information
-		Retrival},
-	year={1994},
-	pages={161-175}
-}
-
- at article{Chafee19,
-	author={Zechariah Chafee},
-	title={Freedom of Speech in War Time},
-	journal= hlr,
-	volume= 32,
-	year={1919},
-	pages={932--??}
-}
-
- at book{Chafee41,
-	author={Zechariah Chafee},
-	title={Free Speech in the United States},
-	publisher= hup,
-	year= 1941,
-	address={Cambridge, MA}
-}
-
- at article{ChaMauRod94,
-	author={Daniel Chandramohan and Gillian H. Maude and Laura C. Rodrigues and Richard
-		J. Hayes},
-	title={Verbal Autopsies for Adult Deaths: Issues in their Development and Validation},
-	journal={International Journal of Epidemiology},
-	volume={23},
-	year={1994},
-	pages={213-222},
-	number={2}
-}
-
- at article{Chamberlain80,
-	author={Gary Chamberlain},
-	title={Analysis of Covariance with Qualitative Data},
-	journal={Review of Economic Studies},
-	volume={XLVII},
-	year= 1980,
-	pages={225-238}
-}
-
- at article{ChaRodMau98,
-	author={Daniel Chandramohan and Laura C. Rodriques and Gillian H. Maude and Richard
-		Hayes},
-	title={The Validy of Verbal Autopsies for Assessing the Causes of Institutional
-		Maternal Death},
-	journal={Studies in Family Planning},
-	volume={29},
-	year={1998},
-	pages={414-422},
-	month={December},
-	number={4}
-}
-
- at article{Chase68,
-	author={G.R. Chase},
-	title={On the Efficiency of Matched Pairs in Bernoulli Trials},
-	journal={Biometrika},
-	volume={55},
-	year={1968},
-	pages={365-369},
-	month={July},
-	number={2}
-}
-
- at article{ChaSetQui01,
-	author={Daniel Chandramohan and Philip Setel and Maria Quigley},
-	title={Effect of misclassification of causes of death in verbal autopsy: can it
-		be adjusted},
-	journal={International Journal of Epidemiology},
-	volume={30},
-	year={2001},
-	pages={509-514}
-}
-
- at article{ChaSolShi05,
-	author={Daniel Chandramohan and Nadia Soleman and Kenji Shibuya and John Porter},
-	title={Editorial: Ethical issues in the application of verbal autopsies in mortality
-		surveillance systems},
-	journal={Tropical Medicine and International Health},
-	volume={10},
-	year={2005},
-	pages={1087-1089},
-	month={November},
-	number={11}
-}
-
- at article{CheCumDum03,
-	author={Lee Cheng, et al},
-	title={Health related quality of life in pregeriatric patients with chronic diseases
-		at urban, public supported clinics},
-	journal={Health and Quality of Life Outcomes},
-	volume= 1,
-	year= 2003,
-	pages={{1-8}},
-	month={October},
-	number= 63
-}
-
- at book{CheRon83,
-	author={G. Shabbir Cheema and Dennis A. Rondinelli},
-	title={Decentralization and Development: Policy Implementation in Developing Countries},
-	publisher={Sage Publications},
-	year={1983},
-	address={Beverly Hills, CA}
-}
-
- at article{Childers76,
-	author={Thomas Childers},
-	title={The Social Bases of the Nationalist Socialist Vote},
-	journal={Journal of Contemporary History},
-	volume= 11,
-	year= 1976,
-	pages={17-42}
-}
-
- at book{Childers83,
-	author={Childers, Thomas},
-	title={The Nazi Voter},
-	publisher={University of North Carolina Press},
-	year= 1983
-}
-
- at article{ChiLwa91,
-	author={J. Chin and S.K. Lwanga},
-	title={Estimation and Projection of Adult AIDS Cases: a Simple Epidemiological
-		Model},
-	journal={Bulletin of the World Health Organization},
-	volume= 69,
-	year= 1991,
-	pages={399--406},
-	number= 4
-}
-
- at article{ChiZamGra92,
-	author={J.D. Chiphangwi and T.P. Zamaere and W Graham and B. Duncan and T. Kenyon
-		and R. Chinyama},
-	title={Maternal mortality in the Thyolo district of southern Malawi},
-	journal={East African Medical Journal},
-	volume= 69,
-	year= 1992,
-	pages={675--679}
-}
-
- at article{Chochran53,
-	author={William G. Cochran},
-	title={Matching in Analytical Studies},
-	journal={American Journal of Public Health},
-	volume={43},
-	year={1953},
-	pages={684-691},
-	month={June}
-}
-
- at book{Christensen96,
-	author={Ronadl Christensen},
-	title={Plane Answers to Complex Questions: The Theory of Linear Models},
-	publisher={Springer-Verlag New York},
-	year={1996},
-	edition={Second}
-}
-
- at article{Church75,
-	author={Thomas Church},
-	title={Conspiracy Doctrine and Speech Offenses: A Reexamination of Yates v. U.S.
-		from the Perspective of U.S. v. Spock},
-	journal={Cornell Law Review},
-	volume= 60,
-	year={1975},
-	pages={569--??}
-}
-
- at incollection{ClaBer92,
-	author={David G. Clayton and Luisa Bernardinelli},
-	title={Bayesian Methods For Mapping Disease Risk},
-	booktitle={Geographical and Environmental Epidemiology: Methods for Small-Area Studies},
-	publisher={Oxford University Press},
-	year= 1992,
-	address={Oxford},
-	editor={P. Elliott and J.Cuzick and D. English and R. Stern},
-	pages={205-220}
-}
-
- at article{ClaJanHob01,
-	author={W. Crawford Clark and Malvin N. Janal and Elaine K. Hoben and J. Douglas
-		Carroll},
-	title={How Separate are the Sensory, Emotional, and Motivational Dimensions of
-		Pain? A Multidimensional Scaling Analysis},
-	journal={Somatosensory and Motor Research},
-	volume= 18,
-	year= 2001,
-	pages={31-39},
-	number= 1
-}
-
- at article{ClaMarLie04,
-	author={Tim Clark and Sean Martin and Ted Liefeld},
-	title={Globally Distributed Object Identification for Biological Knowledgebases},
-	journal={Briefings in Bioinformatics},
-	volume={5},
-	year={2004},
-	pages={59-71},
-	month={March},
-	number={1}
-}
-
- at article{Clarkson00,
-	author={Douglas B. Clarkson},
-	title={A Random Effects Individual Difference Multidimensional Scaling Model},
-	journal={Computational Statistics and Data Analysis},
-	volume= 32,
-	year= 2000,
-	pages={337--347},
-	month={January}
-}
-
- at incollection{Clayton96,
-	author={David G. Clayton},
-	title={Generalized Linear Mixed Models},
-	booktitle={Markov Chain {M}onte {C}arlo in Practice},
-	publisher={Chapman \& Hall},
-	year= 1996,
-	address={London},
-	editor={W.R. Gilks and S. Richardson and D.J. Spiegelhalter},
-	pages={275-301}
-}
-
- at article{CleDev88,
-	author={W.S. cleveland and S.J. Devlin},
-	title={Locally Weighted Regression: An Approach to Regression Analysis by Local
-		Fitting},
-	journal={Journal of the American Statistical Association},
-	volume={83},
-	year={1988},
-	pages={596-610}
-}
-
- at book{CleHen98,
-	author={M.P. Clements and D.F. Hendry},
-	title={{Forecasting Economic Time Series}},
-	publisher= cup,
-	year= 1998,
-	address={Cambridge, U.K.}
-}
-
- at misc{CliJacRiv00,
-	author={Joshua Clinton and Simon Jackman and Douglas Rivers},
-	title={The Statistical Analysis of Legislative Behavior: A Unified Approach},
-	year={2000},
-	howpublished={Paper presented at the Annual Meeting of the Political Methodology Society}
-}
-
- at unpublished{CliJacRiv02,
-	author={Joshua Clinton and Simon Jackman and Douglas Rivers},
-	title={The Statistical Analysis of Roll Call Data},
-	note={Stanford University},
-	year= 2002
-}
-
- at article{CloRubSch91,
-	author={Clifford C. Clogg and Donald B. Rubin and Nathaniel Schenker and Bradley
-		Schultz and Lynn Weidman},
-	title={Multiple Imputation of Industry and Occupation Codes in Census Public-Use
-		Samples Using Bayesian Logistic Regression},
-	journal={Journal of the American Statistical Association},
-	volume={86},
-	year={1991},
-	pages={68-78},
-	month={March},
-	number={413}
-}
-
- at book{CoaDem66,
-	author={Ansley J. Coale and Paul Demeny},
-	title={Regional Model Life Tables and Stable Populations},
-	publisher={Princeton University Press},
-	year= 1966,
-	address={Princeton, N.J.}
-}
-
- at book{CocCox57,
-	author={WG Cochran and GM Cox},
-	title={Experimental Designs},
-	publisher={Wiley},
-	year={1957},
-	address={New York}
-}
-
- at article{Cochran65,
-	author={William G. Cochran},
-	title={The Planning of Observational Studies of Human Populations},
-	journal={Journal of the Royal Statistical Society. Series A (General)},
-	volume= 128,
-	year= 1965,
-	pages={234-266},
-	number= 2
-}
-
- at article{Cochran68,
-	author={Cochran, William G.},
-	title={The effectiveness of adjustment by subclassification in removing bias in
-		observational studies},
-	journal={Biometrics},
-	volume={24},
-	year={1968},
-	pages={295-313}
-}
-
- at article{CocRub73,
-	author={Cochran, William G. and Rubin, Donald B.},
-	title={Controlling bias in observational studies: A review},
-	journal={Sankhya: The Indian Journal of Statistics, Series A},
-	volume={35, Part 4},
-	year={1973},
-	pages={417-466}
-}
-
- at article{ColMah93,
-  author =	 {David Collier and Mahon, Jr., James E.},
-  title =	 {Conceptual `Stretching' Revisited},
-  journal =	 apsr,
-  volume =	 87,
-  year =	 1993,
-  pages =	 {845-855},
-  month =	 {December},
-  number =	 4
-}
-
- at article{ColSul02,
-	author={James E. Coleman and Barry Sullivan},
-	title={Enduring and Empowering: The Bill of Rights in the Third Millennium},
-	journal={Law and Contemporary Problems},
-	volume= 65,
-	year={2002},
-	pages={1--??}
-}
-
- at book{Colton06,
-	author={Timothy Colton},
-	title={Transitional Citizens: Voters and What Influences Them in the New Russia},
-	publisher={Harvard University Press},
-	year={2006 in press},
-	address={Cambridge, MA}
-}
-
- at article{ComMolGri01,
-	author={Campbell, M.K. and Mollison, J. and Grimshaw, J.M.},
-	title={{Cluster trials in implementation research: estimation of intracluster correlation
-		coefficients and sample size}},
-	journal={Statistics in Medicine},
-	volume={20},
-	year={2001},
-	pages={391--399},
-	number={3}
-}
-
- at unpublished{Congdon06,
-	author={Peter Congdon},
-	title={A Model Framework for Mortality and Health Data Classified by Age, Area,
-		and Time},
-	note={to be published in Biometrics Peter congdon, Dept. of Geography, Queen Mary
-		(University of London), Mile end Road, London E1 4NS, England p.congdon at qmul.ac.uk},
-	year={2006}
-}
-
- at book{CooCam79,
-	author={Thomas D. Cook and Donald T. Campbell},
-	title={Quasi-Experimentation: Design and Analysis Issues for Field Settings},
-	publisher={Rand McNally College Publishing Company},
-	year={1979},
-	address={Chicago}
-}
-
- at article{CooSte94,
-	author={J. Cook and L. Stefanski},
-	title={Simulation-extrapolation estimation in parametric measurement error models},
-	journal={Journal of the American Statistical Asociation},
-	volume={89},
-	year={1994},
-	pages={1314-1328}
-}
-
- at book{CorCraFox94,
-	title={Transforming State-Society Relations in Mexico},
-	publisher={Center for U.S.-Mexican Studies},
-	year={1994},
-	editor={Wayne A. Cornelius and Ann L. Craig and Jonathan Fox},
-	address={University of California, San Diego},
-	series={U.S.-Mexico contemporary Perspectives Series, 6}
-}
-
- at InCollection{Cornelius04,
-author = {Wayne A. Cornelius},
-title = {Mobilized Voting in the 2000 Elections: The Changing Efficacy of Vote Buying and Coercion in Mexican Electoral Politics},
-booktitle = {Mexico's Pivotal Democratic Election: Candidates, Voters, and the Presidential Campaign of 2000},
-OPTcrossref = {},
-OPTkey = {},
-OPTpages = {},
-publisher = {Stanford University Press},
-year = {2004},
-editor = {Jorge I. Dom\'{i}nguez and Chappell Lawson},
-OPTvolume = {},
-OPTnumber = {},
-OPTseries = {},
-OPTtype = {},
-OPTchapter = {},
-address = {Stanford and La Jolla, CA},
-OPTedition = {},
-OPTmonth = {},
-OPTnote = {},
-OPTannote = {}
-}
-
- at article{Cornfield51,
-	author={Jerome Cornfield},
-	title={A Method of Estimating Comparative Rates from Clinical Data: Application
-		to Cancer of the Lung, Breast and Cervix},
-	journal={Journal of the National Cancer Institute},
-	volume= 11,
-	year= 1951,
-	number={1269-1275}
-}
-
- at article{CowBra96,
-	author={Mary Kathryn Cowles and Bradley P. Carlin},
-	title={Markov Chain Monte Carlo Convergence Diagnotics: A Comparative Review},
-	journal={Journal of the American Statistical Association},
-	volume={91},
-	year={1996},
-	pages={883-904},
-	month={June},
-	number={434}
-}
-
- at article{Cox52,
-	author={D.R. Cox},
-	title={Some Recent Work on Systematic Experimental Designs},
-	journal={Journal of the Royal Statistical Society. Series B (Methodological)},
-	volume={14},
-	year={1952},
-	pages={211-219},
-	number={2}
-}
-
- at book{Cox58,
-	author={David R. Cox},
-	title={Planning of Experiments},
-	publisher={John Wiley},
-	year= 1958,
-	address={New York}
-}
-
- at book{Cox58,
-	author={D.R. Cox},
-	title={The Planning of Experiments},
-	publisher={Wiley},
-	year= 1958
-}
-
- at article{CoxSke92,
-	author={Brian Cox and D.C.G. Skegg},
-	title={Projections of Cervical Cancer Mortality and Incidence in New Zealand: The
-		Possible Impact of Screening},
-	journal={Journal of Epidemiology and Community Health},
-	volume= 46,
-	year= 1992,
-	pages={373--377}
-}
-
- at article{CraWoo05,
-	author={Craggs, Richard and Mary McGee Wood},
-	title={{Evaluating Discourse and Dialogue Coding Schemes}},
-	journal={Computational Linguistics},
-	volume={31},
-	year={2005},
-	pages={289-295},
-	number={3}
-}
-
- at article{CroKen02,
-	author={Thomas F. Crossley and Steven Kennedy},
-	title={The reliability of self-assesses health status},
-	journal={Journal of Health Economics},
-	volume= 21,
-	year= 2002,
-	pages={{643-58}},
-	number= 4
-}
-
- at article{Crosnoe05,
-	author={Robert Crosnoe},
-	title={Double Disadvantage or Signs of Resilience? The Elementary School Contexts
-		of Children from Mexican Immigrant Families},
-	journal={American Educational Research Journal},
-	volume={42},
-	year={2005},
-	pages={269-303},
-	number={2}
-}
-
- at unpublished{CruHotImb06,
-  author =	 {Richard K. Crump and V. Joseph Hotz and Guido
-                  W. Imbens and Oscar Mitnik},
-  title =	 {Moving the Goalposts: Addressing Limited Overlap in
-                  Estimation of Average Treatment Effects by Changing
-                  the Estimand},
-  note =	 {Department of Economics, UC Berkeley},
-  year =	 {2006},
-  month =	 {September}
-}
-
- at article{CruHotImb09,
-  title =	 {{Dealing with limited overlap in estimation of
-                  average treatment effects}},
-  author =	 {Richard K. Crump and V. Joseph Hotz and Guido
-                  W. Imbens and Oscar Mitnik},
-  journal =	 {Biometrika},
-  volume =	 {96},
-  number =	 {1},
-  pages =	 {187},
-  year =	 {2009}
-}
-
-
- at article{CuaFor95,
-	author={C.M. Cuadras and J. Fortiana},
-	title={A Continuous Metric Scaling Solution for A Random Variable},
-	journal={Journal of Multivariate Analysis},
-	volume= 52,
-	year= 1995,
-	pages={1--14}
-}
-
- at article{CuaForOli97,
-	author={C.M. Cuadras and J. Fortiana and F. Oliva},
-	title={The Proximity of an Individual to a Population with Applications to Discriminant
-		Analysis},
-	journal={Journal of Classification},
-	volume= 14,
-	year= 1997,
-	pages={117-136}
-}
-
- at article{CumMcKWei03,
-	author={Peter Cummings and B. McKnight and NS Weiss},
-	title={Matched-pair cohort methods in traffic crash research},
-	journal={Accident Analysis and Prevention},
-	volume= 35,
-	year= 2003,
-	pages={131--141},
-	note={{http://depts.washington.edu/hiprc/about/topics/web/bike\_prevmat/}}
-}
-
- at article{CumRoy88,
-	author={W.G. Cumberland and R. M. Royall},
-	title={Does Simple Random Sampling Provide Adequate Balance?},
-	journal={Journal of the Royal Statistical Society. Series B (methodological)},
-	volume={50},
-	year={1988},
-	pages={118-124},
-	number={1}
-}
-
- at article{Dagostino98,
-	author={Ralph B. {D'Agostino, Jr.}},
-	title={Propensity Score Methods for Bias Reduction in the Comparison of a Treatment
-		to a Non-randomized Control Group},
-	journal={Statistics in Medicine},
-	volume= 17,
-	year= 1998,
-	pages={2265--2281}
-}
-
- at article{DagRub00,
-	author={Ralph B. {D'Agostino, Jr.} and Donald B. Rubin},
-	title={Estimating and using propensity scores with partially missing data},
-	journal={Journal of the American Statistical Association},
-	volume= 95 ,
-	year= 2000,
-	pages={749-759}
-}
-
- at article{DalBecHuc98,
-	author={Russell J. Dalton and Paul A. Beck and Robert Huckfeldt},
-	title={Partisan Cues and the Media: Information Flows in the 1992 Presidential
-		election},
-	journal={American Poltical Science Review},
-	volume={92},
-	year={1998},
-	pages={111-126}
-}
-
- at article{DanGraStu96,
-	author={I. Danel and W. Graham and P Stupp and P. Castillo},
-	title={Applying the sisterhood method for estimating maternal mortality to a health
-		facility-based sample: a comparison with results from a household-based
-		sample},
-	journal={International Journal of Epidemiology},
-	volume= 25,
-	year= 1996,
-	pages={1017--1-22}
-}
-
- at inbook{DanLaf05,
-	author={Isabel Danel and Gerard M. La Forgia},
-	title={Health Systems Innovation in Central America},
-	chapter={Contracting for Basic Health Care in Rural Guatemala - Comparison of the
-		Performance of Three Delivery Models},
-	year={2005},
-	publisher={The World Bank},
-	address={Washington, DC},
-	editor={Gerard M. La Forgia}
-}
-
- at unpublished{DasChe01,
-	author={Sanjiv R. Das and Mike Y. Chen},
-	title={Yahoo! for Amazon: Opinion Extraction from Small Talk on the Web},
-	note={Department of Finance Santa Clara University},
-	year={2001},
-	month={August}
-}
-
- at article{DasKleiKlei94,
-	author={Erik J. Dasbach, PhD, Ronald Klein, MD, Barbara E. K. Klein, MD, and Scot
-		E. Moss, MA},
-	title={Self-Rated Health and Mortality in People with Diabetes},
-	journal={American Journal of Public Health},
-	volume= 84,
-	year= 1994,
-	pages={{1775-79}}
-}
-
- at article{DavAlbCoo03,
-	author={G.L. Davis and J.E. Albright and S.F. Cook and D.M. Rosenberg},
-	title={Projecting Future Complications of Chornic Hepatitis C in the United States},
-	journal={Liver Transplantation},
-	volume= 9,
-	year= 2003,
-	pages={331--338},
-	number= 4
-}
-
- at article{DavAlbCoo03,
-	author={G.L. Davis and J.E. Albright and S.F. Cook and D.M. Rosenberg},
-	title={Projecting Future Complications of Chornic Hepatitis C in the United States},
-	journal={Liver Transplantation},
-	volume= 9,
-	year= 2003,
-	pages={331--338},
-	number= 4
-}
-
- at unpublished{DavLawPen03,
-	author={Kushal Dave and Steve Lawrence and David Pennock},
-	title={Mining the Peanut Gallery: Opinion Extraction and Semantic Classification
-		of Product Reviews},
-	note={Kushal Dave NEC Laboratories America, 4 Independence Way Princeton, NJ 08540,
-		Kushal at nec-labs.com},
-	year={2003},
-	month={May}
-}
-
- at article{DavManLai98,
-	author={Huw Talfryn Oakley Davies and Tavakoli Manouche and Crombie Kinloch Iain},
-	title={Authors Reply},
-	journal={British Medical Journal},
-	volume= 317,
-	year= 1998,
-	pages={1156-7},
-	month={October 24}
-}
-
- at article{DavNeaWen98,
-	author={G. Davey-Smith and J.D. Neaton and D. Wentworth and R. Stamler and J. Stamler},
-	title={Mortality differences between black and white men in the USA: contribution
-		of income and other risk factors among men screened for the Multiple Risk
-		Factor Intervention Trial (MRFIT)},
-	journal= lan,
-	volume= 351,
-	year= 1998,
-	pages={934--939},
-	number= 9107
-}
-
- at article{DawFauMee89,
-	author={Robyn M. Dawes and David Faust and Paul E. Meehl},
-	title={Clinical Versus Actuarial Judgement},
-	journal={Science},
-	volume={243},
-	year={1989},
-	pages={1668-1674},
-	month={March},
-	number={4899}
-}
-
- at article{Dawid00,
-	author={Philip Dawid},
-	title={Causal Inference Without Counterfactuals (with discussion)},
-	journal= jasa,
-	volume= 95,
-	year= 2000,
-	pages={447-448}
-}
-
- at incollection{Dawid83,
-	author={A. P. Dawid},
-	title={Invariant Prior Distributions},
-	booktitle={Encyclopedia of Statistical Sciences},
-	publisher={Wiley-Interscience},
-	year= 1983,
-	editor={S. Kotz and S. Johnson and C.B. Read},
-	pages={228--236},
-	volume= 4
-}
-
- at inbook{DeaGro00,
-	author={Angus Deaton and Mararet Grosh},
-	title={Consumption},
-	chapter={5},
-	year={2000},
-	publisher={The World Bank},
-	pages={91-133},
-	volume={1},
-	series={Designing household survey questionnaires for developing countries: lessons
-		from fifteen years of the Living Standards Measurement Study}
-}
-
- at techreport{DeaPax04,
-	author={Angus Deaton and Christina Paxson},
-	title={Mortality, Income, and Income Inequality Over Time in the Britain and the
-		United States},
-	institution={National Bureau of Economic Research},
-	year= 2004,
-	address={Cambridge, MA},
-	number= 8534,
-	note={{http://www.nber.org/papers/w8534}}
-}
-
- at unpublished{DebKee05,
-	author={Suzanna De Boef and Luke Keele},
-	title={Revisiting Dynamic Specification},
-	note={DeBoef: Dept. of Political Science; PA State University, State College,
-		PA 16802; 814-863-9402 sdeboef at psu.edu},
-	year={2005},
-	month={July}
-}
-
- at book{DeBoor78a,
-	author={C. de Boor},
-	title={A Practical Guide to Splines},
-	publisher={Springer-Verlag},
-	year={1978},
-	address={New York}
-}
-
- at article{DeeBat03,
-	author={Dorly J. H. Deeg and Peter A. Bath },
-	title={Self-rated health, gender, and mortality in older persons: Introduction
-		to a special section},
-	journal={The Gerontologist},
-	volume={43},
-	year={2003},
-	pages={{369-71}},
-	number={3}
-}
-
- at article{DeeKey04,
-	author={Thomas S. Dee and Benjamin J. Keys},
-	title={Does Merit Pay Reward Good Teachers? Evidence from a Randomized Experiment},
-	journal={Journal of Policy Analysis and Management},
-	volume={23},
-	year={2004},
-	pages={471-488},
-	number={3}
-}
-
- at article{Deeks98,
-	author={Jon Deeks},
-	title={Odds Ratio Should be Used Only in Case-Control Studies and Logistic Regression
-		Analyses},
-	journal={British Medical Journal},
-	volume= 317,
-	year= 1998,
-	pages={1155-6},
-	month={October 24}
-}
-
- at article{DeeZonMaa89,
-	author={Dorly J. H. Deeg, et al},
-	title={Medical and Social Predictors of Longevity in the Elderly: Total Predictive
-		Value and Interdependence},
-	journal={Social Science and Medicine},
-	volume= 29,
-	year= 1989,
-	pages={{1271-80}},
-	number= 11
-}
-
- at article{Dehejia05,
-	author={Dehejia Rajeev},
-	title={Practical Propensity Score Matching: A Reply to Smith and Todd},
-	journal={Journal of Econometrics},
-	volume={125},
-	year={2005},
-	pages={355-364}
-}
-
- at article{DehWah02,
-	author={Rajeev H. Dehejia and Sadek Wahba},
-	title={Propensity Score Matching Methods for Non-Experimental Causal Studies},
-	journal={Review of Economics and Statistics},
-	volume={84},
-	year={2002},
-	pages={151-161},
-	number={1}
-}
-
- at article{DehWah99,
-	author={Rajeev H. Dehejia and Sadek Wahba},
-	title={Causal Effects in Nonexperimental Studies: Re-Evaluating the Evaluation
-		of Training Programs},
-	journal={Journal of the American Statistical Association},
-	volume={94},
-	year={1999},
-	pages={1053-62},
-	month={December},
-	number={448}
-}
-
- at article{deMGelGry03,
-	author={Scott de Marchi and Christopher F. Gelpi and Jeffrey D. Grynaviski},
-	title={Untangling Neural Nets},
-	journal= apsr,
-	year={2003, forthcoming}
-}
-
- at article{DemLaiRub77,
-	author={Arthur P. Dempster and N.M. Laird and D.B. Rubin},
-	title={Maximum Likelihood Estimation from Incomplete Data via the EM Algorithm},
-	journal={Journal of the Royal Statistical Association},
-	volume={39},
-	year={1977},
-	pages={1-38}
-}
-
- at book{DeMoivere1725,
-	title={Annuities on Lives},
-	year= 1725,
-	editor={Abraham DeMoivre},
-	address={London}
-}
-
- at book{Derthick79,
-	author={Martha Derthick},
-	title={Policymaking for Social Security},
-	publisher={The Brookings Institution},
-	year={1979},
-	address={Washington, DC}
-}
-
- at article{DeuBufPoy99,
-	author={Sylvie Deuffic and Laurent Buffat and Thierry Poynard and Alain-Jacques
-		Valleron},
-	title={Modeling the Hepatitis C Virus Epidemic in France},
-	journal={Hepatology},
-	volume= 29,
-	year= 1999,
-	pages={1596--1601},
-	number= 5
-}
-
- at book{DevLor93,
-	author={R. DeVore and G. Lorentz},
-	title={Constructive Approximation},
-	publisher={Springer-Verlag},
-	year= 1993,
-	address={New York}
-}
-
- at article{DewThuAnd86,
-	author={William G. Dewald and Jerry G. Thursby and Richard G. Anderson},
-	title={Replication in Empirical Economics: The Journal of Money, Credit and Banking
-		Project},
-	journal={American Economic Review},
-	volume={76},
-	year={1986},
-	pages={587-603},
-	month={September},
-	number={4}
-}
-
- at article{Diamond86,
-	author={Diamond, A.M.},
-	title={{What is a citation worth}},
-	journal={Journal of Human Resources},
-	volume={21},
-	year={1986},
-	pages={200--215},
-	number={2}
-}
-
- at misc{DiaSek05,
-	author={Alexis Diamond and Jasjeet Sekhon},
-	title={Genetic Matching for Estimating Causal Effects: A New Method of Achieving
-		Balance in Observational Studies},
-	year= 2005 ,
-	howpublished={{http://jsekhon.fas.harvard.edu/}}
-}
-
- at article{DieGodYu07,
-	author={Daniel Diermeier and Jean-Fran{\c{c}}ois Godbout and Bei Yu and Stefan Kaufmann},
-	title={Language and Ideology in Congress},
-	year={2007},
-	note={Corresponding author, d-diermeier at kellogg.northwestern.edu}
-}
-
- at article{DieMarKoe95,
-	author={Paula Diehr and Donald C. Martin and Thomas Koepsell and Allen Cheadle},
-	title={Breaking the Matches in a Paired t-Test for Community Interventions When
-		the Number of Pairs is Small},
-	journal={Statistics in Medicine},
-	volume={14},
-	year={1995},
-	pages={1491-1504}
-}
-
- at article{Dinh02,
-	author={Viet D. Dinh},
-	title={Freedom and Security After September 11},
-	journal={Harvard Journal of Law and Public Policy},
-	volume= 25,
-	year={2002},
-	pages={399--??}
-}
-
- at unpublished{DinMaz02,
-	author={L. Dini and G. Mazzini},
-	title={Opinion Classification Through Information Extraction},
-	note={Turin, Italy},
-	year={02}
-}
-
- at article{DipGan04,
-	author={Thomas A. DiPrete and Markus Gangl},
-	title={Assessing Bias in the Estimation of Causal Effects: Rosenbaum Bounds on
-		Matching Estimators and Instrumental Variables Estimation with Imprerfect
-		Instruments},
-	journal={Sociological Methodology},
-	volume={34},
-	year={2004},
-	pages={271-310},
-	month={December}
-}
-
- at article{DocWei03,
-	author={Henry V. Doctor and Alexander A. Weinreb},
-	title={Estimation of AIDS adult mortality by verbal autopsy in rural Malawi},
-	journal={AIDS},
-	volume={17},
-	year={2003},
-	pages={2509-2513}
-}
-
- at article{DonDon87,
-	author={Allan Donner and A. Donald},
-	title={Analysis of data arising from a stratified design with the cluster as unit of randomization},
-	journal={Statistics in Medicine},
-	volume={6},
-	year={1987},
-	pages={43-52}
-}
- at techreport{Dong04,
-	author={Lauren Bin Dong},
-	title={{The Behrens-Fisher Problem: An Empirical Likelihood Approach}},
-	institution={University of Victoria},
-	year= 2004,
-	key={Econometric Working Paper}
-}
-
- at article{DonHau89,
-	author={Allan Donner and W. Hauck},
-	title={Estimation of a common odds ration in paired-cluster randomization designs},
-	journal={Statistics in Medicine},
-	volume={8},
-	year={1989},
-	pages={599-607}
-}
-
- at book{DonKla00,
-	author={Allan Donner and Neil Klar},
-	title={Design and Analysis of Cluster Randomization Trials in Health Research},
-	publisher={Arnold},
-	year={2000},
-	address={London}
-}
-
- at article{DonKla93,
-	author={Allan Donner and Neil Klar},
-	title={Confidence Interval Construction for Effect Measures Arising from Cluster
-		Randomization Trials},
-	journal={Journal of Clinical Epidemiology},
-	volume={46},
-	year={1993},
-	pages={123-131},
-	number={2}
-}
-
- at article{Donner87,
-	author={Allan Donner},
-	title={Statistical Methodology for Paired Cluster Designs},
-	journal={American Journal of Epidemiology},
-	volume={126},
-	year={1987},
-	pages={972-979},
-	number={5}
-}
-
- at article{Doorn98,
-	author={Carol Van Doorn },
-	title={Spouse-rated limitations and spouse-rated life expectancy as mortality predictors},
-	journal={Journal of Gerontolofy: Social Sciences},
-	volume={{53B}},
-	year= 1998,
-	pages={{S137-143}}
-}
-
- at article{Doorn98,
-	author={Carol van Doorn},
-	title={Spouse-Raetd Limitations and Spouse Rated Life Expectancy as Mortality Predictors},
-	journal={Journal of Gerontology},
-	volume={{53B}},
-	year= 1998,
-	pages={{S137-43}},
-	number= 3
-}
-
- at techreport{DooTra90,
-	author={Fred Doolittle and Linda Traeger},
-	title={Implementing the National JTPA Study},
-	institution={Manpower Demonstration Research Croporation},
-	year={1990},
-	month={April},
-	address={New York}
-}
-
- at article{Dorsen89,
-	author={Norman Dorsen},
-	title={Here and There: Foreign Affairs and Civil Liberties},
-	journal= ajil,
-	volume= 83,
-	year={1989},
-	pages={840--??}
-}
-
- at article{DowMan90,
-	author={John E. Dowd and Kenneth G. Manton},
-	title={Forecasting Chronic Disease Risks in Developing Countries},
-	journal={International Journal of Epidemiology},
-	volume= 19,
-	year= 1990,
-	pages={1019--1036},
-	month={May},
-	number= 4
-}
-
- at book{Doyle06,
-	author={Sir Arthur Conan Doyle},
-	title={A Study in Scarlet},
-	publisher={Adamant Media Corporation},
-	year= 1888
-}
-
- at article{DoySam00,
-	author={Michael W. Doyle and Nicholas Sambanis},
-	title={International Peacebuilding},
-	journal= apsr,
-	volume= 94,
-	year= 2000,
-	pages={779--801},
-	month={December},
-	number= 4
-}
-
- at book{DozSch98,
-	title={Roads not Taken: Tales of Alternative History},
-	publisher={Del Rey},
-	year= 1998,
-	editor={Gardner Dozois and Stanley Schmidt},
-	address={New York}
-}
-
- at article{Drake93,
-	author={C. Drake},
-	title={Effects of misspecification of the propensity score on estimators of treatment
-		effects},
-	journal={Biometrics},
-	volume={49},
-	year={1993},
-	pages={1231-1236}
-}
-
- at unpublished{DreFar04,
-	author={Daniel W. Drezner and Henry Farrell},
-	title={The Power and Politics of Blogs},
-	note={American Political Science Association, Chicago, Illinois},
-	year={2004},
-	month={August}
-}
-
- at book{Dueve95,
-	author={Christian de Dueve},
-	title={Vital Dust},
-	publisher={Basic Books},
-	year= 1995
-}
-
- at article{DunDav53,
-	author={Duncan, O. D. and Davis, B.},
-	title={An Alternative to Ecological Correlation},
-	journal={American Sociological Review},
-	volume= 18,
-	year= 1953,
-	pages={665-666}
-}
-
- at unpublished{DurRicWar03,
-	author={Stephen D. Durbin and J. Neal Richter and Doug Warner},
-	title={A System for Affective Rating of Texts},
-	note={RightNow Technolgies, Bozeman, MT},
-	year={03}
-}
-
- at unpublished{DurRicWar03,
-	author={Stephen D. Durbin and J. Neal Richter and Doug Warner},
-	title={A System for Affective Rating of Texts},
-	note={RightNow Technolgies, Bozeman, MT},
-	year={03}
-}
-
- at article{Easterlin03,
-	author={Richard A. Easterlin},
-	title={Explaining happiness},
-	journal={PNAS},
-	volume={100},
-	year={2003},
-	pages={11176-11183},
-	month={September},
-	number={19}
-}
-
- at book{Edwards72,
-	author={A.W.F. Edwards},
-	title={Likelihood},
-	publisher={Cambridge University Press},
-	year= 1972,
-	address={New York}
-}
-
- at article{Edwards91,
-	author={Harry T. Edwards},
-	title={The Judicial Function and the Elusive Goal of Principled Decisionmaking},
-	journal={Wisconsin Law Review},
-	volume= 1991,
-	year={1991},
-	pages={837--??}
-}
-
- at article{Efron01,
-	author={Brad Efron},
-	title={[statistical Modeling: The Two Cultures]: Comment},
-	journal={Statistical Science},
-	volume={16},
-	year={2001},
-	pages={218-219},
-	month={August},
-	number={3}
-}
-
- at article{Efron79,
-	author={B. Efron},
-	title={{Bootstrap methods: another look at the jackknife}},
-	journal={Annals of Statistics},
-	volume= 7,
-	year= 1979,
-	pages={1--26}
-}
-
- at book{Efron82,
-	author={B. Efron},
-	title={{The Jacknife, the Bootstrap, and Other Resampling Plans}},
-	publisher={SIAM},
-	year= 1982,
-	address={Philadelphia}
-}
-
- at article{Efron87,
-	author={B. Efron},
-	title={Empirical Bayes Confidence Intervals Based on Bootstrap Samples: Comment},
-	journal={Journal of the American Statistical Association},
-	volume={82},
-	year={1987},
-	pages={754},
-	month={September},
-	number={399}
-}
-
- at article{Efron94,
-	author={Bradley Efron},
-	title={Missing Data, Imputation, and the Bootstrap},
-	journal={Journal of the American Statistical Association},
-	volume={89},
-	year={1994},
-	pages={463-475},
-	month={June},
-	number={426}
-}
-
- at article{Efron94b,
-	author={Bradley Efron},
-	title={Missing Data, Imputation, and he Bootstrap: Rejoinder},
-	journal={Journal of the American Statistical Association},
-	volume={89},
-	year={1994},
-	pages={478-479},
-	month={June},
-	number={426}
-}
-
- at book{EfrTib93,
-	author={B. Efron and R. Tibshirani},
-	title={{An Introduction to the Bootstrap}},
-	publisher={Chapmand and Hall},
-	year= 1993,
-	address={London}
-}
-
- at book{Einstein20,
-	author={Albert Einstein},
-	title={Relativity: The Special and General Theory},
-	publisher={Henry Holt},
-	year= 1920,
-	address={NY}
-}
-
- at article{EisLaz38,
-	author={P. Eisenberg and Paul F. Lazarsfeld},
-	title={The psychological effects of unemployment},
-	journal={Psychological Bulletin},
-	volume= 35,
-	year= 1938,
-	pages={358--390}
-}
-
- at article{ElbGilWu05,
-	author={Nabila El-Bassei and Louisa Gilbert and Elwin Wu and Hyun Go and Jennifer
-		Hill},
-	title={Relationship between drug abuse and intimate partner violence: A longitudinal
-		study among women receiving methadone},
-	journal={American Journal of Public Health},
-	volume={95},
-	year={2005},
-	pages={465-470},
-	month={March},
-	number={3}
-}
-
- at article{Elekes86,
-	author={G. Elekes},
-	title={A Geometric Inequality and the Complexity of Computing Volume},
-	journal={Discrete \& Computational Geometry},
-	volume={1},
-	year={1986},
-	pages={289-292}
-}
-
- at book{Elster00,
-	author={Jon Elster},
-	title={Ulysses unbound: studies in rationality, precommitment, and constraints},
-	publisher={Cambridge University Press},
-	year={2000},
-	address={New York}
-}
-
- at inbook{Elster79,
-	author={John Elster},
-	title={Ulysses and the Sirens: studies in rationality and irrationality},
-	chapter={II Imperfect Rationality: Ulysses and the Sirens},
-	year={1979},
-	publisher={Cambridge University Press},
-	pages={36 - 111},
-	address={Cambridge}
-}
-
- at article{Emerson68,
-	author={Thomas I. Emerson},
-	title={Freedom of Expression in Wartime},
-	journal={University of Pennsylvania Law Review},
-	volume= 116,
-	year={1968},
-	pages={975--1011}
-}
-
- at book{Emerson70,
-	author={Thomas I. Emerson},
-	title={The System of Freedom of Expression},
-	publisher={Vintage},
-	year= 1970,
-	address={New York}
-}
-
- at book{Enders04,
-	author={Walter Enders},
-	title={Applied Econometric Time Series},
-	publisher={Wiley},
-	year={2004},
-	edition={2nd}
-}
-
- at book{EneHin84,
-	author={James M. Enelow and Melvin J. Hinich},
-	title={The Spatial Theory of Voting: An Introduction},
-	publisher={Cambridge University Press},
-	year= 1984,
-	address={New York}
-}
-
- at article{EoPre92,
-	author={Irma T. Elo and Samuel H. Preston},
-	title={Effects of Early-Life Conditions on Adult Mortality: A Review},
-	journal={1992},
-	volume={58},
-	year={1992},
-	pages={186-212},
-	month={Summer},
-	number={2}
-}
-
- at article{EpsGonWei01,
-	author={S.A. Epstein and J.J. Gonzales and K. Weinfurt and B Bockeloo and N Yuan
-		and G Chase},
-	title={Are Psychiatrists' Characterists Related to how They Care for Depression
-		in the Medically Ill? Results from a National Case-Vignette Study},
-	journal={Psychosomatics},
-	volume= 42,
-	year= 2001,
-	pages={482--489},
-	month={Nov.--Dec.},
-	number= 6
-}
-
- at unpublished{EpsOha05,
-	author={David L. Epstein and Sharyn O'Halloran},
-	title={Higher-Order Markov Models},
-	note={Columbia University},
-	year={2005}
-}
-
- at book{EriMacSti02,
-	author={Robert S. Erikson and Michael B MacKuen and James S. Stimson},
-	title={The Macro Polity},
-	publisher={Cambridge University Press},
-	year= 2002,
-	address={New York}
-}
-
- at article{EriUndElo01,
-	author={Ingeborg Eriksson, Anna-Lena Unden, and Stig Elofsson},
-	title={Self-Rated Health. Comparisons Between Three Different Measures. Results
-		from a Population Study.},
-	journal={International Epidemiological Association},
-	volume= 30,
-	year= 2001,
-	pages={{326-33}}
-}
-
- at book{EstGolGur95,
-	author={Daniel C. Esty and Jack Goldstone and Ted Robert Gurr and Pamela T. Surko
-		and Alan N. Unger},
-	title={State Failure Task Force Report},
-	publisher={Science Applications International Corporation},
-	year= 1995,
-	address={McLean, Virginia}
-}
-
- at book{EstGolGur98,
-	author={Daniel C. Esty and Jack Goldstone and Ted Robert Gurr and Barbara Harff
-		and Pamela T.\ Surko and Alan N.\ Unger and Robert S. Chen },
-	title={The State Failure Task Force Report: Phase II Findings},
-	publisher={Science Applications International Corporation},
-	year= 1998,
-	address={McLean, Virginia}
-}
-
- at incollection{EstGolGur98b,
-	author={Daniel C. Esty and Jack Goldstone and Ted Robert Gurr and Barbara Harff
-		and Pamela T.\ Surko and Alan N.\ Unger and Robert S.\ Chen},
-	title={The State Failure Project: Early Warning Research for U.S. Foreign Policy
-		Planning},
-	booktitle={Preventive Measures: Building Risk Assessment and Crisis Early Warning System},
-	publisher={Rowman and Littlefield},
-	year={1998b},
-	address={Lanham, Maryland},
-	editor={John L. Davies and Ted Robert Gurr}
-}
-
- at article{EstGolGur99,
-	author={Daniel C. Esty and Jack Goldstone and Ted Robert Gurr and Barbara Harff
-		and Marc Levy, Geoffrey D.\ Dabelko, Pamela T.\ Surko and Alan N.\ Unge},
-	title={The State Failure Report: Phase II Findings},
-	journal={Environmental Change and Security},
-	volume= 5,
-	year= 1999,
-	month={Summer}
-}
-
- at article{EtaLehDia04,
-	author={Jean-Francois Etard and Jean-Yves Le Hesran and Aldiouma Diallo and Jean-Pierre
-		diallo and Jean-Louis Ndiaye and Valerie Delaunay},
-	title={Childhood mortality and probably causes of death using verbal autopsy in
-		Niakhar, Senegal, 1989-2000},
-	journal={International Journal of Epidemiology},
-	volume={33},
-	year={2004},
-	pages={1286-1292}
-}
-
- at book{Eubank88,
-	author={R.L. Eubank},
-	title={Spline Smoothing and Nonparametric Regression},
-	publisher={Marcel Dekker},
-	year={1988},
-	volume={90},
-	address={Basel},
-	series={Statistics, textbooks and monographs}
-}
-
- at article{Eule87,
-	author={Julian N. Eule},
-	title={Temporal Limits on the Legislative Mandate: Entrenchment and Retroactivity},
-	journal={American Bar Foundation Research Journal},
-	volume={12},
-	year={1987},
-	pages={379-459},
-	number={2/3}
-}
-
- at book{Everitt05,
-	author={Brian Everitt},
-	title={An R and S-Plus Companion to Multivariate Analysis},
-	publisher={Springer-Verlag},
-	year={2005},
-	address={London}
-}
-
- at inproceedings{EzzJohKha95,
-	author={T. Ezzati-Rice and W. Johnson and M. Khare and R. Little and D. Rubin and
-		J. Schafer},
-	title={A Simulation Study to Evaluate the Performance of Model-Based Multiple Imputations
-		in NCHS Health Examination Surveys},
-	publisher={Proceedings of the Annual Research conference},
-	address={Washington, D.C.},
-	pages={257-266},
-	organization={Bureau of the Census}
-}
-
- at manual{FalHae89,
-	author={J{\"u}rgen W. Falter and Dirk H{\"a}nisch},
-	title={Wahl- und Sozialdaten der Kreise und Gemeinden des Deutschen Reiches von
-		1920 bis 1933},
-	organization={Zentralarchiv f{\"u}r Empirische Sozialforschung},
-	year= 1989,
-	address={Universit{\"a}t zu K{\"o}ln},
-	note={ZA number 8013}
-}
-
- at article{FalHan99,
-	author={J{\"u}rgen W. Falter and Dirk H{\"a}nisch},
-	title={Wahlerfolge und W{\"a}hlerschaft der NSDAP in {\"O}sterreich von 1927 bis 1932},
-	journal={Zeitgeschichte},
-	volume= 15,
-	year= 1988,
-	pages={223-244},
-	number= 6
-}
-
- at article{FalLohLin85,
-  author =	 {J{\"u}rgen W. Falter and Jan-Bernd Lohm{\"o}ller and
-                  Andreas Link and Johann de Rijke},
-  title =	 {Hat Arbeitslosigkeit tats{\"a}chlich den Aufstieg
-                  des Nationalsozialismus bewirkt?},
-  journal =	 {Jahrbuch f{\"u}r National{\"o}konomie und Statistik},
-  volume =	 200,
-  year =	 1985,
-  pages =	 {121-136},
-  number =	 2
-}
-
- at incollection{Falter90,
-	author={Falter, J{\"u}rgen},
-	title={The First German Volkspartei: The Social Foundations of the NSDAP},
-	booktitle={Elections, Parties and Political Traditions},
-	publisher={Berg},
-	year= 1990,
-	address={M{\"u}nchen},
-	editor={Rohe, K.}
-}
-
- at article{Falter90b,
-	author={Falter, J{\"u}rgen W.},
-	title={Arbeiter haben erheblich haeufiger, Angestellte dagegen sehr viel seltener
-		NSDAP gewaehlt als wir lange Zeit angenommen haben},
-	journal={Geschichte und Gesellschaft},
-	volume= 16,
-	year= 1990,
-	pages={536-552},
-	number= 4
-}
-
- at book{Falter91,
-	author={Falter, J{\"u}rgen},
-	title={Hitlers W{\"a}hler},
-	publisher={Beck},
-	year= 1991,
-	address={M{\"u}nchen}
-}
-
- at article{FalZin88,
-	author={Falter, J{\"u}rgen W. and Zintl, Reinhard},
-	title={The Economic Crisis of the 1930s and the Nazi Vote},
-	journal={Journal of Interdisciplinary History},
-	volume= 19,
-	year= 1988,
-	pages={55-85},
-	number= 1
-}
-
- at article{FanFotBer06,
-	author={Mesganaw Fantahun and Edward Fottrell and Yemane Berhane and Stig Wall and
-		Ulf Hogberg and Peter Byass},
-	title={Assessing a new approach to verbal autopsy interpretation in a rural Ethiopian
-		community: the InterVA model},
-	journal={Bulletin of the World Health Organization},
-	volume={84},
-	year={2006},
-	pages={204-210},
-	month={March},
-	number={3}
-}
-
- at article{FanFotBer06,
-	author={Mesganaw Fantahun and Edward Fottrell and Yemane Berhane and Stig Wall and
-		Ulf Hogberg and Peter Byass},
-	title={Assessing a new approach to verbal autopsy interpretation in a rural Ethiopian
-		community: the InterVA model},
-	journal={Bulletin of the World Health Organization},
-	volume={84},
-	year={2006},
-	pages={204-210},
-	month={March},
-	number={3}
-}
-
- at article{Fantahun98,
-	author={Mesganaw Fantahun},
-	title={Patters of Childhood Mortality in Three Districts of North Gondar Administrative
-		Zone},
-	journal={Ethiopian Medical Journal},
-	volume={36},
-	year={1998},
-	pages={71-81},
-	number={2}
-}
-
- at inproceedings{Fay92,
-	author={Robert E. Fay},
-	title={When are Inferences from Multiple Imputation Valid?},
-	year={1992},
-	pages={354-365},
-	organization={Proceedings of Survey Research Methods Section of the American Satistical
-		Association}
-}
-
- at article{fearon91,
-	author={James D. Fearon},
-	title={Counterfactuals and Hypothesis Testing in Political Science},
-	journal={World Politics},
-	volume= 43,
-	year= 1991,
-	pages={169--195},
-	month={June},
-	number= 2
-}
-
- at article{Feeney01,
-	author={G. Feeney},
-	title={The Impact of HIV/AIDS on Adult Mortality in Zimbabwe},
-	journal={Population and Development Review},
-	volume= 27,
-	year= 2001,
-	pages={771--980},
-	number= 4
-}
-
- at unpublished{Fernandezval05,
-	author={Ivan Fernandez-Val},
-	title={Bias Correcion in Panel Data Models with Individual Specific Parameters},
-	note={Boston University},
-	year={2005}
-}
-
- at unpublished{Fernandezval05,
-	author={Ivan Fernandez-Val},
-	title={Bias Correction in Panel Data Models with Individual Specific Parameters},
-	note={Boston University},
-	year={2005}
-}
-
- at techreport{FidFreGro92,
-	author={M. Fidrich and J. Frenk and J. Gromicho},
-	title={An efficient algorithm to check whether $0$ belongs to the convex hull of
-		a finite number of {$L_p$}-circles},
-	institution={Econometric Institute},
-	year={1992},
-	type={Report 9204/A},
-	address={Netherlands}
-}
-
- at book{FieMarStr85,
-	author={Stephen E. Fienberg and Margaret E. Martin and Miron L. Straf},
-	title={Sharing Research Data},
-	publisher={National Academy Press},
-	year={1985}
-}
-
- at article{Finkel01,
-	author={N.J. Finkel},
-	title={When Principles Collide in Hard Cases},
-	journal={Psychology, Public Policy, and Law},
-	volume= 7,
-	year= 2001,
-	pages={515--560},
-	month={September},
-	number= 3
-}
-
- at book{Fiorina81,
-	author={Morris P. Fiorina},
-	title={Retrospective Voting in American National Elections},
-	publisher={Yale University Press},
-	year= 1981,
-	address={New Haven}
-}
-
- at article{FisCobVen98,
-	author={Bonnie S. Fisher and Craig T. Cobane and Thomas M. Vander Ven and Francis
-		T. Cullen},
-	title={How Many Authors Does It Take to Publish an Article? Trends and Patterns
-		in Political Science},
-	journal={PS: Political Science and Politics},
-	volume= 31,
-	year= 1998,
-	pages={847--856},
-	number= 4
-}
-
- at article{Fish95,
-	author={Steven M. Fish},
-	title={The Advent of Multipartism in Russia, 1993-95},
-	journal={Post Soviet Affairs},
-	volume={11},
-	year={1995},
-	pages={340-383},
-	number={4}
-}
-
- at book{Fisher35,
-	author={Ronald A. Fisher},
-	title={The Design of Experiments},
-	publisher={Oliver and Boyd},
-	year= 1935,
-	address={London}
-}
-
- at article{Fisher24,
-	author={Ronald A. Fisher},
-	title={The conditions under which $\chi^2$ measures the discrepancy
-		between observed observation and hypothesis},
-	journal={Journal of the Royal Statistical Society},
-	volume={87},
-	year={1924},
-	pages={442-450}
-}
-
- at article{Pearson00,
-	author={Karl Pearson},
-	title={On a criterion that a given system of deviations from the
-	  probable in the case of a correlated system of variables is such
-	  that it can reasonably be supposed to have arisen
-      from random sampling},
-	journal={Philosophical Magazine},
-	volume={50},
-	year={1900},
-	pages={157-175}
-}
-
-
- at article{FlaBes82,
-	author={Brian R. Flay and J. Allen Best},
-	title={Overcoming Design Problems in Evaluating Health Behavior Programs},
-	journal={Evaluation \& The Health Professions},
-	volume={5},
-	year={1982},
-	pages={43-69},
-	month={March},
-	number={1}
-}
-
- at article{Foote58,
-	author={Richard J. Foote},
-	title={A Modified {D}oolittle Approach for Multiple and Partial Correlation and
-		Regression},
-	journal= jasa,
-	volume= 53,
-	year= 1958,
-	pages={133-143}
-}
-
- at article{ForNorAhm95,
-	author={Ian Ford and John Norrie and Susan Ahmadi},
-	title={Model Inconsistency, Illustrated by Cox proortional Hazard Model},
-	journal={Statistics in Medicine},
-	volume={14},
-	year={1995},
-	pages={735-746}
-}
-
- at book{Fortas68,
-	author={Abe Fortas},
-	title={Concerning Dissent and Civil Disobedience},
-	publisher={Signet},
-	year= 1968,
-	address={New York}
-}
-
- at article{Franklin89,
-	author={Charles H. Franklin},
-	title={Estimation across Data Sets: Two-Stage Auxiliary Instrumental Variables
-		Estimation},
-	journal={Political Analysis},
-	volume={1},
-	year={1989},
-	pages={1-23},
-	number={1}
-}
-
- at book{Franzese02,
-	author={R.J. Franzese},
-	title={Macroeconomic policies of developed democracies},
-	publisher={Cambridge University Press},
-	year={2002},
-	address={New York}
-}
-
- at unpublished{FraRub01,
-	author={Constantine E.\ Frangakis and Donald Rubin},
-	title={The Defining Role of Principal Effects in Comparing Treatments Using General
-		Post-Treatments Using General Post-Treatment Variables: From Surrogate
-		Endpoints to Censoring by Death},
-	note={\url{http://biosun01.biostat.jhsph.edu/~cfrangak}}
-}
-
- at article{FraRub02,
-	author={Constantine E. Frangakis and Donald B. Rubin},
-	title={Principal stratification in causal inference},
-	journal={Biometrics},
-	volume= 58,
-	year= 2002,
-	pages={21-29}
-}
-
- at article{FraRubZho02,
-	author={Constantine E. Frangakis and Donald B. Rubin and Ziao-Hua Zhou},
-	title={Clustered Encouragement Designs with Individual Noncompliance: Bayesian
-		Inference with Randomization, and Application to Advance Directive Forms},
-	journal={Biostatistics},
-	volume={3},
-	year={2002},
-	pages={147-164},
-	number={2}
-}
-
- at article{FreChrKha05,
-	author={James V. Freeman and Parul Christian and Subarna K. Khatry and Ramesh K.
-		Adhikari and Steven C. LeClerq and Joanne Katz and Gary L. Darmstadt},
-	title={Evaluation of neonatal verbal autopsy using physician review versus algorithm-based
-		cause-of-death assignment in rural Nepal},
-	journal={Paediatric and Perinatal Epidemiology},
-	volume={19},
-	year={2005},
-	pages={323-331}
-}
-
- at article{FreChrKha05,
-	author={James V. Freeman and Parul Christian and Subarna K. Khatry and Ramesh K.
-		Adhikari and Steven C. LeClerq and Joanne Katz and Gary L. Darmstadt},
-	title={Evaluation of neonatal verbal autopsy using physician review versus algorithm-based
-		cause-of-death assignment in rural Nepal},
-	journal={Paediatric and Perinatal Epidemiology},
-	volume={19},
-	year={2005},
-	pages={323-331}
-}
-
- at article{Freedman08,
-  author =	 {David A. Freedman},
-  year =	 2008,
-  volume =	 40,
-  title =	 {On Regression Adjustments to Experimental Data},
-  journal =	 {Advances in Applied Mathematics},
-  pages =	 {180--193}
-}
-
- at article{Freese07,
-	author={Jeremy Freese},
-	title={Replication Standards for Quantitative Social Science: Why not Sociology},
-	journal={Sociological Methods and Research},
-	year={2007, forthcoming}
-}
-
- at article{FreGonGom06,
-	author={Julio Frenk and Eduardo Gonz{\'a}lez-Pier and Octavio G{\'o}mez-Dant{\'e}s and Miguel
-		A. Lezana and Felicia Marie Knaul},
-	title={Comprehensive reform to improve health system performance in Mexico},
-	journal={Lancet},
-	volume={268},
-	year={2006},
-	pages={1524-34},
-	month={October}
-}
-
- at article{FreKleOst98,
-	author={D.A. Freedman and S.P. Klein and M. Ostland and M.R. Roberts},
-	title={Review},
-	journal={Journal of the American Statistical Association},
-	volume= 93,
-	year= 1998,
-	pages={{1518-1522}}
-}
-
- at article{FreMil04,
-	author={Per G. Fredriksson and Daniel L. Millimet},
-	title={Comparative Politics and Envrionmental Taxation},
-	journal={Journal of Environmental Economics and Management},
-	volume={48},
-	year={2004},
-	pages={705-722}
-}
-
- at techreport{Frenk04,
-	author={Julio Frenk},
-	title={Fair Financing and Universal Social Protection: the structural reform of
-		the Mexican health system},
-	institution={Secretaria de Salud},
-	year={2004},
-	address={Mexico City}
-}
-
- at article{Frenk06,
-	author={Julio Frenk},
-	title={Bridging the divide: global lessons from evidence-based health policy in
-		Mexico},
-	journal={Lancet},
-	volume={368},
-	year={2006},
-	pages={954-61},
-	month={September}
-}
-
-
-
- at article{FreSepGom03,
-	author={Julio Frenk and Jamie Sep{\'u}lveda and Octavio G{\'o}mez-Dant{\'e}s and Felicia Knaul},
-	title={{Evidence-based health policy: three generations of reform in Mexico}},
-	journal={The Lancet},
-	volume={362},
-	year={2003},
-	pages={1667--1671},
-	number={9396}
-}
-
- at article{FreWec81,
-	author={Frey, Bruno and Weck, Hannelore},
-	title={Hat Arbeitslosigkeit den Aufstieg des Nationalsozialismus bewirkt?},
-	journal={Jahrbuch fuer Nationaloekonomie und Statistik},
-	volume= 196,
-	year= 1981,
-	pages={1-31}
-}
-
- at book{Friendly00,
-	author={Michael Friendly},
-	title={Visualizing Categorical Data},
-	publisher={SAS Institute},
-	year={2000}
-}
-
- at unpublished{FriHol05,
-	author={John N. Friedman and Richard T. Holden},
-	title={The Rising Incumbent Advantage: What's Gerrymandering Got to Do With It?},
-	note={Dept. of Economics, Harvard; rholden at fas.harvard.edu},
-	year={2005},
-	month={August}
-}
-
- at unpublished{FriHol05,
-	author={John N. Friedman and Richard T. Holden},
-	title={The Rising Incumbent Advantage: What's Gerrymandering Got to Do With It?},
-	note={Dept. of Economics, Harvard; rholden at fas.harvard.edu},
-	year={2005},
-	month={August}
-}
-
- at article{FriKroNew98,
-	author={Linda P. Fried, Md, MPH, Richard A. Kronmal, PhD, Anne B. Newman, MD, PhD,
-		et al},
-	title={Risk Factors for 5-Year Mortality in Older-Adults: The Cardiovascular Health
-		Study},
-	journal={Journal of the American Medical Association},
-	volume= 279,
-	year= 1998,
-	pages={{585-92}}
-}
-
- at book{Fritzsche98,
-	author={Peter Fritzsche},
-	title={Germans into Nazis},
-	publisher={Harvard University Press},
-	year= 1998,
-	address={Cambridge, MA}
-}
-
- at unpublished{Frolich02,
-	author={Markus Fr{\"o}lich},
-	title={What is the Value of Knowing the Propensity Score for Estimating Average
-		Treatment Effects?},
-	note={IZA Discussion Paper 548, University of St. Gallen},
-	year= 2002
-}
-
- at article{Frolich04,
-	author={Markus Fr{\"o}lich},
-	title={Finite Sample Properties of Propensity Score Matching and Weighting Estimators},
-	journal={Review of Econometrics and Statistics},
-	volume= 86,
-	year= 2004,
-	pages={77--90}
-}
-
- at article{FurLov01,
-	author={A. Furnham and J. Lovett},
-	title={The Perceived Efficacy and Risks of Complementary and Alternative Medicine
-		and Conventional Medicine: A Vignette Study},
-	journal={Journal of Applied Biobehavioral Research},
-	volume= 6,
-	year= 2001,
-	pages={39--63},
-	number= 1
-}
-
- at article{FylFor91,
-	author={Knut Fylkesnes and Olav Helge Forde},
-	title={The Tromso Study: Predictors of Self-Evaluated Health-Has Society Adopted
-		the Expanded Health Concept?},
-	journal={Social Science and Medicine},
-	volume= 32,
-	year= 1991,
-	pages={{141-46}},
-	number= 2
-}
-
- at book{GAD01,
-	author={{Government's Actuary Department}},
-	title={National Population Projections: Review of Methodology for Projecting Mortality},
-	publisher={National Statistics Direct, UK},
-	year= 2001,
-	address={London},
-	note={{http://www.statistics.gov.uk/}}
-}
-
- at article{GaiMarCar96,
-	author={Mitchell H. Gail and Steven D. Mark and Raymond J. Carroll and Sylvan B.
-		Green and David Pee},
-	title={On Design Considerations and Randomization-Based Inference for Community
-		Intervention Trials},
-	journal={Statistics in Medicine},
-	volume={15},
-	year={1996},
-	pages={1069-1092}
-}
-
- at article{GaiMarCar96,
-	author={Mitchell H. Gail and Steven D. Mark and Raymond j. Carroll and Sylvan B.
-		Green and David Pee},
-	title={On Design Considerations and Randomization-Based Inference for Community
-		Intervention Trials},
-	journal={Statistics in Medicine},
-	volume={15},
-	year={1996},
-	pages={1069-1092}
-}
-
- at article{GaiWiePia84,
-	author={M.H. Gail and S. Wieand and S. Piantadosi},
-	title={Biased Estimates of Treatment Effect in Randomized Experiements with Nonlinear
-		Regressions and Omitted Covariates},
-	journal={Biometrika},
-	volume={71},
-	year={1984},
-	pages={431-444},
-	month={December},
-	number={3}
-}
-
- at article{GajPet04,
-	author={Vendhan Gajalakshmi and Richard Peto},
-	title={Verbal autopsy of 80,000 adult deaths in Tamilnadu, South India},
-	journal={BMC Public Health},
-	volume={4},
-	year={2004},
-	month={October},
-	number={47}
-}
-
- at article{GajPetKan02,
-	author={Vendhan Gajalakshmi and Richard Peto and Santhanakrishnan Kanaka and Sivagurunathan
-		Balasubramanian},
-	title={Verbal autopsy of 48000 adult deaths attributable to medical causes in Chennai
-		(formerly Madras), India},
-	journal={BMC Public Health},
-	volume={2},
-	year={2002}
-}
-
- at article{GajPetKan02,
-	author={Vendhan Gajalakshmi and Richard Peto and Santhanakrishnan Kanaka and Sivagurunathan
-		Balasubramanian},
-	title={Verbal autopsy of 48000 adult deaths attributable to medical causes in Chennai
-		(formerly Madras), India},
-	journal={BMC Public Health},
-	volume={2},
-	year={2002}
-}
-
- at article{GakHogLop04,
-	author={Emmanuela Gakidou and Margaret Hogan and Alan D Lopez},
-	title={Adult Mortality: Time for a Reapprasal},
-	journal={International Journal of Epidemiology},
-	volume= 33,
-	year= 2004,
-	pages={710-717},
-	number= 4
-}
-
- at article{GakLozGon06,
-	author={Emmanuela Gakidou and Rafael Lozano and Eduardo Gonz{\'a}lez-Pier and Jesse
-		Abbott-Klafter and Jeremy T. Barofsky and Chloe Bryson-Cahn and Dennis
-		M. Feehan and Diana K. Lee and Hector Hern{\'a}ndez-Llamas and Christopher
-		J.L. Murray},
-	title={Assessing the effect of the 2001-06 Mexican health reform: an interim report
-		card},
-	journal={Lancet},
-	volume={368},
-	year={2006},
-	pages={1920-35},
-	month={November}
-}
-
- at book{Gamson92,
-	author={William A. Gamson},
-	title={Talking Politics},
-	publisher={Cambridge University Press},
-	year= 1992,
-	address={New York, NY}
-}
-
- at techreport{GAO94,
-	author={{U.S. General Accounting Office}},
-	title={Breast conservation versus mastectomy: patient survival in day-to-day medical
-		practice and randomized studies: report to the chairman, Subcommitee on
-		Human Resources and Intergovernmental Relations, Committee on Government
-		Operations, House of Representatives},
-	institution={U.S. General Accounting Office},
-	year= 1994,
-	address={Washington, DC},
-	number={Report GAO-PEMD-95-9}
-}
-
- at article{GarFri97,
-	author={M. Garenne and F. Friedberg},
-	title={Accuracy of indirect estimates of maternal mortality: a simulation model},
-	journal={Studies in Family Planning},
-	volume= 28,
-	year= 1997,
-	pages={132--142}
-}
-
- at book{Gasset32,
-	author={Ortega y Gasset, Javier},
-	title={The Revolt of the Masses},
-	publisher={G. Allen \& Unwin},
-	year= 1932,
-	address={London}
-}
-
- at article{Gastwirth87,
-	author={J. Gastwirth},
-	title={The statistical precision of medical screening procedures: Application to
-		polygraph and AIDS antibodies test data},
-	journal={Statistical Science},
-	volume={2},
-	year={1987},
-	pages={213-222},
-	number={3}
-}
-
- at book{Geiger32,
-	author={Geiger, Theodor},
-	title={Die soziale Schichtung des deutschen Volkes},
-	publisher={Ferdinand Enke},
-	year={1932},
-	address={Stuttgart}
-}
-
- at book{GelCarSte03,
-	author={Andrew Gelman and J.B. Carlin and H.S. Stern and D.B. Rubin},
-	title={Bayesian Data Analysis, Second Edition},
-	publisher={Chapman \& Hall},
-	year= 2003
-}
-
- at book{GelCarSte95,
-	author={Andrew Gelman and J.B. Carlin and H.S. Stern and D.B. Rubin},
-	title={Bayesian Data Analysis},
-	publisher={Chapman and Hall},
-	year= 1995
-}
-
- at unpublished{GelGri00,
-	author={Christopher Gelpi and Joseph M. Grieco},
-	title={Democracy, Interdependence, and the Liberal Peace},
-	note={{Duke University, http://www.duke.edu/$\sim$gelpi/papers.htm}},
-	year={2000}
-}
-
- at book{GelHil07,
-	author={Andrew Gelman and Jennifer Hill},
-	title={Data Analysis Using Regression and Multilevel/Hierarchical Models},
-	publisher={Cambridge University Press},
-	year= 2007,
-	address={New York}
-}
-
- at unpublished{GelHua04,
-	author={Andrew Gelman and Zaiying Huang},
-	title={Estimating incumbency advantage and its varation, as an example of a before-after
-		study},
-	year={2004},
-	month={October}
-}
-
- at article{Gelman06,
-	author={Andrew Gelman},
-	title={Prior distributions for variance parameters in hierarchical models},
-	journal={Bayesian Analysis},
-	volume={1},
-	year={2006},
-	pages={515-533},
-	number={3}
-}
-
- at article{GelSmi90,
-	author={{Gelfand, A.E. and Smith, A.F.M.}},
-	title={Sampling-based approach to calculating marginal densities},
-	journal= jasa,
-	volume= 85,
-	year= 1990,
-	pages={398--409}
-}
-
- at article{GelSmi90,
-	author={A.E. Gelfand and A.F.M. Smith},
-	title={Sampling-based approaches to calculating marginal densities},
-	journal={Journal of the American Statistical Association},
-	volume={85},
-	year={1990},
-	pages={398-409}
-}
-
- at article{GemGem84,
-	author={Stuart Geman and Donald Geman},
-	title={Stochastic Relaxation, {G}ibbs Distributions, and the {B}ayesian Restoration
-		of Images},
-	journal={I.E.E.E. Transactions: Pattern Analysis and Machine Intelligence},
-	volume= 6,
-	year= 1984,
-	pages={721-741}
-}
-
- at article{GerGre00,
-	author={Gerber, Alan S. and Green, Donald P.},
-	title={The Effects of Canvassing, Telephone Calls, and Direct Mail on Voter Turnout:
-		A Field Experiment},
-	journal={American Political Science Review},
-	volume= 94,
-	year= 2000,
-	pages={653--663},
-	month={September},
-	number= 3
-}
-
- at inbook{GerGreKap04,
-	author={Alan S. Gerber and Donald P. Green and Edward H. Kaplan},
-	title={The illusion of learning from observational research},
-	chapter={12},
-	year={2004},
-	publisher={Cambridge University Press},
-	pages={251-273},
-	address={Cambridge, United Kingdom},
-	editor={Ian Shapiro and Rogers M. Smith and Tarek e. Masoud}
-}
-
- at article{GerSchFra94,
-	author={Gerner, Deborah J. and Philip A. Schrodt and Ronald A. Francisco and Judith
-		L. Weddle},
-	title={{Machine Coding of Event Data Using Regional and International Sources}},
-	journal={International Studies Quarterly},
-	volume={38},
-	year={1994},
-	pages={91-119},
-	number={1}
-}
-
- at unpublished{Gertler00,
-	author={Paul J. Gertler},
-	title={Final Report: The Impact of PROGRESA on Health},
-	note={International Food Policy Research Institute},
-	year={2000},
-	month={November}
-}
-
- at article{Gertler06,
-	author={P. Gertler},
-	title={Do Conditional Cash Transfers Improve child Health? Evidence from PROGRESA's
-		Control Randomized Experiment},
-	journal={The American Economic Review: Papers and Proceedings},
-	volume={94},
-	year={2006},
-	pages={336-42},
-	number={2}
-}
-
- at techreport{Geyer05,
-	author={Charles J. Geyer},
-	title={Le Cam Made Simple: Asymptotics of Maximum Likelihood without the LLN or
-		CLT or Sample Size Going to Infinity},
-	institution={University of Minnesota},
-	year={2005},
-	month={May},
-	address={Univ. MN, Twin Cities, School of Statistics}
-}
-
- at techreport{Geyer05,
-	author={Charles J. Geyer},
-	title={Le Cam Made Simple: Asymptotics of Maximum Likelihood without the LLN or
-		CLT or Sample Size Going to Infinity},
-	institution={University of Minnesota},
-	year={2005},
-	month={May},
-	address={Univ. MN, Twin Cities, School of Statistics}
-}
-
- at article{GhoHutRus03,
-	author={Hazem Ghobarah and Paul Huth and Bruce Russett},
-	title={Civil Wars Kill and Maim People--Long after the Shooting Stops},
-	journal= apsr,
-	volume= 97,
-	year= 2003,
-	pages={189--202},
-	month={May},
-	number= 2
-}
-
- at article{GiaPalCap01,
-	author={S. Giampaoli and L. Palmieri and R. Capocaccia and L. Pilotto and D. Vanuzzo},
-	title={Estimating Population-based Incidence and Prevalence of Major Coronary Events},
-	journal={International Journal of Epidemiology},
-	volume= 30,
-	year= 2001,
-	pages={S5--S10}
-}
-
- at article{Giles06,
-	author={Jim Giles},
-	title={The Trouble with Replication},
-	journal={Nature},
-	volume={442},
-	year={2006},
-	pages={344-347},
-	month={July}
-}
-
- at book{Gilksetal96,
-	title={Markov Chain Monte Carlo in Practice},
-	publisher={Chapman \& Hall},
-	year= 1996 ,
-	editor={W.R. Gilks and S. Richardson and D.J. Spiegelhalter}
-}
-
- at book{Gill02,
-	author={Jeff Gill},
-	title={Bayesian Methods for the Social and Behavioral Sciences},
-	publisher={Chapman and Hall},
-	year= 2002,
-	address={London}
-}
-
- at article{GilWal05,
-  author =	 {Jeff Gill and Lee Walker},
-  title =	 {Elicited Priors for Bayesian Model Specification in
-                  Political Science Research},
-  journal =	 {Journal of Politics},
-  volume =	 67,
-  year =	 2005,
-  pages =	 {841--872},
-  month =	 {August},
-  number =	 3
-}
-
- at unpublished{GimHus06,
-	author={James G. Gimpel and Laura Hussey},
-	title={State of the Journal Market 2006-2007: Political Science},
-	note={Univ. of MD, Dept of Gov 3140 Tydings Hall, College Park, MD 20742},
-	year={2006},
-	month={February}
-}
-
- at techreport{Girosi91,
-	author={F. Girosi},
-	title={Models of noise and robust estimates},
-	institution= mitai,
-	year={1991},
-	type={A.I. Memo},
-	number={1287},
-	note={ftp://publications.ai.mit.edu/ai-publications/pdf/AIM-1287.pdf}
-}
-
- at article{GlaLevMye03,
-	author={Steve Glazerman and Dan M. Levy and David Myers},
-	title={Nonexperimental versus experimental estimates of earnings impacts},
-	journal={The Annals of the American Academy of Political and Social Science},
-	volume= 589,
-	year= 2003,
-	pages={63-93},
-	month={September}
-}
-
- at article{GlaMayDec06,
-	author={Steven Glazerman and Daniel Mayer and Paul Decker},
-	title={Alternative Routes to Teaching: The Impacts of Teach for America on Student
-		Achievement and other Outcomes},
-	journal={Journal of Policy Analysis and Management},
-	volume={25},
-	year={2006},
-	pages={75-96},
-	number={1}
-}
-
- at article{Gleditsch02,
-	author={Kristian Skrede Gleditsch},
-	title={Expanded Trade and GDP Data},
-	journal={Journal of Conflict Resolution},
-	volume={46},
-	year={2002},
-	pages={712-724},
-	month={October},
-	number={5}
-}
-
- at article{GleJamRay03,
-	author={Nils Petter Gleditsch and Patrick James and James Lee Ray and Bruce Russett},
-	title={Editors' Joint Statement: Minimum Replication Standards for International
-		Relations Journals},
-	journal={International Studies Perspectives},
-	volume= 4,
-	year= 2003,
-	pages={105}
-}
-
- at article{GleMetStr03,
-	author={Nils Petter Gleditsch and Claire Metelits and Havard Strand},
-	title={Posting Your Data: Will You be Scooped or Will You be Famous?},
-	journal={International Studies Perspectives},
-	volume= 4,
-	year= 2003,
-	pages={89--97}
-}
-
- at unpublished{Globetti97,
-	author={Suzanne Globetti},
-	title={What We Know About 'Don't Knows': An Analysis of Seven Point Issue Placements},
-	note={Paper presentated at the annual meetings of the Political Methodology Society,
-		Columbus, OH},
-	year={1997}
-}
-
- at book{GoeZel86,
-	author={Prem K. Goel and Arnold Zellner},
-	title={Bayesian Inference and Decision Techniques: Essays in Honor of Bruno de
-		Finetti},
-	publisher={Elsevier Science Publishers B.V.},
-	year={1986},
-	volume={6},
-	editor={Prem K. Goel and Arnold Zellner}
-}
-
- at book{Goldberger91,
-	author={Arthur Goldberger},
-	title={A Course in Econometrics},
-	publisher={Harvard University Press},
-	year= 1991
-}
-
- at article{GolFraEri96,
-	author={Marthe Gold, MD, MPH, Peter Franks, MD, and Pennifer Erickson},
-	title={Assessing the Health of the Nation. The Predictive Validity of a Preference-Based
-		Measure and Self-Rated Health},
-	journal={Medical Care},
-	volume= 34,
-	year= 1996,
-	pages={{163-77}},
-	number= 2
-}
-
- at article{GolHeaWah79,
-	author={G. Golub and M. Heath and G. Wahba},
-	title={Generalized cross validation as a method for choosing a good ridge parameter},
-	journal={Technometrics},
-	volume= 21,
-	year= 1979,
-	pages={215--224}
-}
-
- at article{GolIdn83,
-	author={D. Goldfarb and A. Idnani},
-	title={A Numerically Stable Dual Method for Solving Strictly Convex Quadratic Programs},
-	journal={Mathematical Programming},
-	volume={27},
-	year={1983},
-	pages={1-33}
-}
-
- at book{GolJudMil96,
-	author={Amos Golan and George Judge and Doug Miller},
-	title={Maximum Entropy Econometrics: Robust Estimation With Limited Data},
-	publisher={John Wiley and Sons},
-	year= 1996
-}
-
- at article{GolLan92,
-	author={Larry Goldstein and Bryan Langholz},
-	title={Asymptotic Theory for Nested Case-Control Sampling in the cox Regression
-		Model},
-	journal={The Annals of Statistics},
-	volume= 20,
-	year= 1992,
-	pages={1903-1928},
-	number= 4
-}
-
- at article{GolPhiCox01,
-	author={Lee Goldman and Kathryn A. Phillips and Pamela Coxson and Paula A. Goldman
-		and Lawrence Williams and M.G. Myriam Hunink and Milton C. Weinstein},
-	title={The Effect of Risk Factor Reductions Between 1981-1990 on Coronary Heart
-		Disease Incidence, Prevalence, Mortality, and Cost},
-	journal={Journal of the American College of Cardiology},
-	volume={38},
-	year= 2001,
-	pages={1012--1017},
-	number= 4
-}
-
- at article{GolSchMcC02,
-	author={J. Goldie and L. Schwartz and A. McConnachie and J. Morrison},
-	title={The Impact of Three Years' Ethics Teaching, in an Integrated Medical Curriculum,
-		on Students' Proposed Behavior on Meeting Ethical Dilemmas},
-	journal={Medical Education},
-	volume= 36,
-	year= 2002,
-	pages={489--497},
-	month={May},
-	number= 5
-}
-
- at article{GomGarLop99,
-	author={Octavio G{\'o}mez-Dant{\'e}s and Francisco Garrido-Latorre and Sergio L{\'o}pez-Moreno
-		and Blanca Villa and Malaqu{\'i}as L{\'o}pez-Cervantes},
-	title={Assessment of the health program for the non-insured population},
-	journal={Revista de Sa{\'u}de P{\'u}blica},
-	volume={33},
-	year={1999},
-	pages={401-412},
-	number={4},
-	note={Evaluaci{\'o}n de programa de salud para poblaci{\'o}nno asegurada}
-}
-
- at article{Gompertz1825,
-	author={B. Gompertz},
-	title={On the Nature of the Function Expressive of the Law of Mortality},
-	journal={Philosophical Transactions},
-	volume= 27,
-	year= 1825,
-	pages={513--585}
-}
-
- at article{GonChaLev02,
-	author={Jeffrey S. Gonzales, Gretchen B. Chapman, and Howard Leventhal},
-	title={Gender Differences in the Factors that Affect Self-Assessments of Health},
-	journal={Journal of Applied Behavioral Research},
-	volume= 7,
-	year= 2002,
-	pages={{133-55}}
-}
-
- at article{GooBlu96,
-	author={Jodi S. Goodman and Terry C. Blum},
-	title={Assessing the Non-random Sampling Effects of Subject Attrition in Longitudinal
-		Research},
-	journal={Journal of Management},
-	volume={22},
-	year={1996},
-	pages={627-652}
-}
-
- at article{Goodman53,
-	author={Goodman, Leo},
-	title={Ecological Regressions and the Behavior of Individuals},
-	journal={American Sociological Review},
-	volume= 18,
-	year= 1953,
-	pages={663-666}
-}
-
- at inbook{GooJaGoo03,
-	author={Mary-Jo DelVecchio Good, et al.},
-	title={The Culture of Medicine and Racial, Ethnic, and Class Disparities in Healthcare},
-	year={2003},
-	publisher={The National Academies Press},
-	pages={{594-625}}
-}
-
- at article{GosWadBel98,
-	author={Stephen C. Goss and Alice Wade and Felicitie Bell and Bernard Dussault},
-	title={Historical and Projected Mortality for Mexico, Canada, and the United States},
-	journal={North American Actuarial Journal},
-	volume= 4,
-	year= 1998,
-	pages={108--126},
-	number= 2
-}
-
- at article{Gower66,
-	author={J.C. Gower},
-	title={Some Distance Properties of Latent Root and Vector Methods Used in Multivariate
-		Analysis},
-	journal={Biometrika},
-	volume= 53,
-	year= 1966,
-	pages={325--388},
-	month={December},
-	number={3/4}
-}
-
- at article{Gower71,
-	author={J.C. Gower},
-	title={A General Coefficient of Similarity and Some of its Properties},
-	journal={Biometrics},
-	volume= 27,
-	year= 1971,
-	pages={857--872}
-}
-
- at article{GraBraSno89,
-	author={W. Graham and W. Brass and R.W. Snow},
-	title={Estimating Maternal Mortality: The Sisterhood Methods},
-	journal={Studies in Family Planning},
-	volume= 20,
-	year= 1989,
-	pages={125--135},
-	number= 125
-}
-
- at inbook{Graham94,
-	author={Carol Graham},
-	title={Mexico's Solidarity Program in Comparative Context: Demand-based Poverty
-		Alleviation Programs in Latin America, Africa and Eastern Europe},
-	chapter={15},
-	year={1994},
-	publisher={Center for U.S.-Mexican Studies},
-	pages={309-327},
-	series={U.S.-Mexico Contemporary Perspectives Series, 6},
-	address={University of California, San Diego}
-}
-
- at article{GraPioCha95,
-	author={Mark Grant, Zdzisiaw Piotrowski, and Rick Chappell},
-	title={Self-Reported Health and Survival in the Longitudinal Study of Aging, 1984-1986},
-	journal={Journal of Clinical Epidemiology},
-	volume= 48,
-	year= 1995,
-	pages={{375-87}},
-	number= 3
-}
-
- at inbook{GraSch06,
-	author={J.W. Graham and J.L. Schafer},
-	title={Statistical Strategies for Small Sample Research},
-	chapter={On the performance of Multiple Imputation for Multivariate Data with Small
-		Sample Size},
-	year={2006 In press},
-	publisher={Sage},
-	address={Thousand Oaks},
-	editor={R. Hoyle}
-}
-
- at book{GraSmiBar90,
-	author={Ronald H. Gray and Gordon Smith and Peter Barss},
-	title={The Use of Verbal Autopsy Methods to Determine Selected Causes of Death
-		in Children},
-	publisher={International Union for the Scientific Study of Population},
-	year={1990},
-	address={Rue des augustins, 34 ; 4000 Liege (Belgium)},
-	month={February},
-	number={30}
-}
-
- at book{Graunt1662,
-	author={John Graunt},
-	title={Natural and Political Observations Mentioned in a Following Index, and Made
-		Upon the Bills of Mortality},
-	publisher={John Martyn and James Allestry.},
-	year= 1662,
-	address={London}
-}
-
- at article{GreChr01,
-	author={Sander Greenland and Ronald Christensen},
-	title={Data Augmentation Priors for Bayesian and Semi-Bayes Analyses of Conditional-logistic
-		and Proportional-Hazards Regression},
-	journal={Statistics in Medicine},
-	volume={20},
-	year={2001},
-	pages={2421-2428}
-}
-
- at article{Greenland00,
-	author={Sander Greenland},
-	title={When should Epidemiologic Regressions Use Random Coefficients?},
-	journal={Biometrics},
-	volume={56},
-	year={2000},
-	pages={915-921},
-	month={September}
-}
-
- at article{Greenland01,
-	author={Sander Greenland},
-	title={Putting Background Information About Relative Risks into conjugate Prior
-		Distributions},
-	journal={Biometrics},
-	volume={57},
-	year={2001},
-	pages={663-670},
-	month={September}
-}
-
- at article{Greenland03,
-	author={Sander Greenland},
-	title={Quantifying biases in causal models: classical confounding vs collider-stratification
-		bias},
-	journal={Epidemiology},
-	volume= 14,
-	year= 2003,
-	pages={300-306},
-	number= 3
-}
-
- at article{Greenland03b,
-	author={Sander Greenland},
-	title={Generalized Conjugate Priors for Bayesian Analysis of Risk and Survival
-		Regressions},
-	journal={Biometrics},
-	volume={59},
-	year={2003},
-	pages={92-99},
-	month={March}
-}
-
- at article{greenland81,
-	author={Sander Greenland},
-	title={Multivariate Estimation of Exposure-Specific Incidence From Case-Control
-		Studies},
-	journal={Journal of Chronic Disease},
-	volume= 34,
-	year= 1981,
-	pages={445-453}
-}
-
- at article{greenland82,
-	author={Sander Greenland},
-	title={On the Need for the Rare Disease Assumption in Case-Control Studies},
-	journal={American Journal of Epidemiology},
-	volume= 116,
-	year= 1982,
-	pages={547-553},
-	number= 3
-}
-
- at article{greenland87,
-	author={Sander Greenland},
-	title={Interpretation and Choice of Effect Measures in Epidemiologic Analysis},
-	journal={American Journal of Epidemiology},
-	volume= 125,
-	year= 1987,
-	pages={761-768},
-	number= 5
-}
-
- at article{greenland94,
-	author={Sander Greenland},
-	title={Modeling Risk Ratios from Matched Cohort Data: An Estimating Equation Approach},
-	journal={Applied Statistics},
-	volume= 43,
-	year= 1994,
-	pages={223-232},
-	number= 1
-}
-
- at incollection{GreGer01,
-	author={Donald P. Green and Alan Gerber},
-	title={Reclaiming the Experimental Tradition in Political Science},
-	booktitle={Political Science: State of the Discipline, III},
-	publisher={APSA},
-	year= 2001,
-	address={Washington, D.C.},
-	editor={Helen Milner and Ira Katznelson}
-}
-
- at incollection{GreGer02,
-	author={Donald P. Green and Alan S. Gerber},
-	title={Reclaiming the Experimental Tradition in Political Science},
-	booktitle={State of the Discipline},
-	publisher={W.W. Norton \& Company, Inc.},
-	year={2002},
-	address={New York},
-	editor={Helen Milner and Ira Katznelson},
-	pages={805-832},
-	volume={III}
-}
-
- at misc{GreGre03,
-	author={Grendar, Jr., M. and M. Grendar},
-	title={Maximum Probability/Entropy Translating of Contiguous Categorical Observations
-		into Frequencies},
-	year= 2003 ,
-	howpublished={Working paper, Institute of Mathematics and Computer Science, Mathematical
-		Institute of Slovak Academy of Sciences, Banska Bystrica}
-}
-
- at article{GreKimYoo01,
-	author={Donald P.\ Green and Soo Yeon Kim and David H.\ Yoon},
-	title={Dirty Pool},
-	journal= io,
-	volume= 55,
-	year= 2001,
-	pages={441--468},
-	month={Spring},
-	number= 2
-}
-
- at article{GreLuSil04,
-	author={Robert Greevy and Bo Lu and Jeffrey H. Silver and Paul Rosenbaum},
-	title={Optimal multivariate matching before randomization},
-	journal={Biostatistics},
-	volume={5},
-	year={2004},
-	pages={263-275},
-	number={2}
-}
-
- at article{GreMicRob06,
-	author={David H. Greenberg and Charles Michalopoulos and Philip K. Robins},
-	title={Do Experimental and Nonexperimental Evaluations give Different Answers about
-		the Effectiveness of Government-Funded Training Programs?},
-	journal={Journal of Policy Analysis and Management},
-	volume={25},
-	year={2006},
-	pages={523-552},
-	number={3}
-}
-
- at misc{Grenander83,
-	author={Ulf Grenander},
-	title={Tutorial in Pattern Theory},
-	year= 1983,
-	howpublished={Technical Report, Division of Applied Mathematics, Brown University}
-}
-
- at article{GrePeaRob99,
-	author={Sander Greenland and Judea Pearl and James M. Robins},
-	title={Causal Diagrams for Epidemiologic Research},
-	journal={Epidemiology},
-	volume= 10,
-	year= 1999,
-	pages={37--48},
-	month={January},
-	number= 1
-}
-
- at book{GreShr04,
-	author={David Greenberg and Mark Shroder},
-	title={The Digest of Social Experiments},
-	publisher={Urban Institute Press},
-	year={2004},
-	address={Washington, DC},
-	edition={Third}
-}
-
- at techreport{GriHilOdo01,
-	author={W. E. Griffiths and R. Carter Hill and C. J. O'Donnell},
-	title={{Including Prior Information in Probit Model Estimation}},
-	institution={Department of Economics},
-	year= 2001,
-	month={September},
-	type={Working Papers},
-	address={University of Melbourne},
-	number= 816
-}
-
- at book{Grindle04,
-	author={Merilee S. Grindle},
-	title={Despite the Odds},
-	publisher={Princeton University Press},
-	year={2004},
-	address={Princeton, NJ}
-}
-
- at unpublished{Grindle05,
-	author={Merilee S. Grindle},
-	title={Going Local: Decentralization, Democratization, and the Promise of Good
-		Governance},
-	note={Kennedy School of Government, Harvard University},
-	year={2005},
-	month={July}
-}
-
- at book{Grindle77,
-	author={Merilee Serrill Grindle},
-	title={Bureaucrats, Politicans, and Peasants in Mexico},
-	publisher={University of California Press},
-	year={1977},
-	address={Berkeley and Los Angeles, California}
-}
-
- at book{Grindle80,
-	author={Merilee S. Grindle},
-	title={Politics and Policy Implementation in the Third World},
-	publisher={Princeton University Press},
-	year={1980},
-	address={Princeton, NJ}
-}
-
- at article{GroBri99,
-	author={Wim Groot and Henriette Maassen van den Brink},
-	title={Job Satisfaction and Preference Drift},
-	journal={Economics Letters},
-	volume= 63,
-	year= 1999,
-	pages={363--367}
-}
-
- at article{GroLevSny99,
-	author={Tim Groseclose and Steven D. Levitt and James Snyder},
-	title={Comparing Interest Group Scores Across Time and Chambers: Adjusted ADA Scores
-		for the U.S. Congress},
-	journal= apsr,
-	volume= 93,
-	year= 1999,
-	pages={33--50},
-	month={March},
-	number= 1
-}
-
- at article{Groot00,
-	author={Wim Groot},
-	title={Adaptation and Scale of Reference Bias in Self-Assessments of Quality of
-		Life},
-	journal={Journal of Health Economics},
-	volume= 19,
-	year= 2000,
-	pages={403--420},
-	month={June}
-}
-
- at article{Grossman97,
-	author={Joel B. Grossman},
-	title={The Japanese American Cases and the Vagaries of Constitutional Adjudication
-		in Wartime: An Institutional Perspective},
-	journal={Hawaii Law Review},
-	volume= 19,
-	year={1997},
-	pages={649}
-}
-
- at article{GroZalLeb00,
-	author={William M. Grove and David H. Zald and Boyd S. Lebow and Beth E. Snitz and
-		Chad Nelson},
-	title={Clinical Versus Mechanical Prediction: A Meta-Analysis},
-	journal={Psychological Assessment},
-	volume={12},
-	year={19-30},
-	pages={1},
-	number={1}
-}
-
- at unpublished{GruGuhKum05,
-	author={DanielGruhl and R. Guha and Ravi Kumar and Jasmine NOvak and Andrew Tomkins},
-	title={The Predictive Power of Online Chatter},
-	note={Daniel Gruhl IBM Almaden Research Center, 650 Harry Rd. San Jose, CA 95120
-		dgruhl at us.ibm.com},
-	year={2005},
-	month={August}
-}
-
- at article{GuRos93,
-	author={X.S. Gu and Paul R. Rosenbaum},
-	title={Comparison of multivariate matching methods: structures, distances, and
-		algorithms},
-	journal={Journal of Computational and Graphical Statistics},
-	volume={2},
-	year={1993},
-	pages={405-420}
-}
-
- at article{GutVan98,
-	author={Sam Gutterman and Irwin T. Vanderhoof},
-	title={Forecasting Changes in Mortality: A Search for a Law of Causes and Effects},
-	journal={North American Actuarial Journal},
-	volume= 4,
-	year= 1998,
-	pages={135--138},
-	number= 2
-}
-
- at article{Gwatkin00,
-	author={Davidson R. Gwatkin},
-	title={Health Inequalities and the Health of the Poor},
-	journal= bull,
-	year={2000},
-	optnumber={1},
-	optvolume={78},
-	optpages={3--18}
-}
-
- at article{Gwatkin03,
-	author={Davidson Gwatkin},
-	title={How well do health programmes reach the poor?},
-	journal={Lancet},
-	volume={361},
-	year={2003},
-	pages={540-1},
-	month={February}
-}
-
- at article{HabBer95,
-	author={J. Haberland and K.E. Bergmann},
-	title={The Lee-Carter Model of the Prognosis of Mortality in Germany},
-	journal={Gesundheitswesen},
-	volume={57},
-	year={1995},
-	pages={674--679},
-	month={October},
-	number={10},
-	note={article in German},
-	annote={Lee-Carter model is applied to West Germany.}
-}
-
- at article{Haenisch89,
-	author={Dirk H{\"a}nisch},
-	title={Wahl- und Sozialdaten der Kreise und Gemeinden des Deutschen Reiches von
-		1920 bis 1933},
-	journal={Historical Social Research},
-	volume= 14,
-	year= 1989,
-	pages={39--67},
-	number= 1
-}
-
- at incollection{Hagtvet80,
-	author={Hagtvet, Bernt},
-	title={The Theory of Mass Society and the Collapse of the Weimar Republic},
-	booktitle={Who Were the Fascists},
-	publisher={Universitetsforlaget},
-	year= 1980,
-	address={Oslo},
-	editor={al., S. U. Larsen et}
-}
-
- at article{hahn98,
-	author={Jinyong Hahn},
-	title={On the Role of the Propensity Score in Efficient Semiparametric Estimation
-		of Average Treatment Effects},
-	journal={Econometrica},
-	volume={66},
-	year={1998},
-	pages={315-31}
-}
-
- at unpublished{Hamermesh07,
-	author={Daniel Hamermesh},
-	title={Replication in Economics: Discussion Paper No. 2760},
-	note={{IZA Discussion Paper : http://www.iza.org/publications/dps/}},
-	year={2007},
-	month={April},
-	address={University of Texas at Austin, NBER and IZA; iza at iza.org}
-}
-
- at book{Hamilton83,
-	author={Hamilton, Richard},
-	title={Who Voted for Hitler?},
-	publisher={Princeton University Press},
-	year= 1983
-}
-
- at book{Hamilton94,
-	author={James Douglas Hamilton},
-	title={Time Series Analysis},
-	publisher={Princeton University Press},
-	year= 1994,
-	address={Princeton}
-}
-
- at book{Hammond24,
-	author={C.S. Hammond and Company},
-	title={[Map of] Germany},
-	publisher={C.S. Hammond and Company},
-	year= 1924,
-	address={New York}
-}
-
- at article{Hand06,
-	author={David J. Hand},
-	title={Classifier Technology and the Illusion of Progress},
-	journal={Statistical Science},
-	volume={21},
-	year={2006},
-	pages={1-14},
-	number={1}
-}
-
- at article{Hansen04,
-	author={Ben B. Hansen},
-	title={Full Matching in an Observational Study of Coaching for the {SAT}},
-	journal={Journal of the American Statistical Association},
-	volume={99},
-	year={2004},
-	pages={609--618},
-	number={467}
-}
-
- at misc{Hansen05,
-	author={Ben Hansen},
-	title={Optmatch: Software for Optimal Matching},
-	year= 2005,
-	note={{http://www.stat.lsa.umich.edu/\~{}bbh/optmatch.html}}
-}
-
- at techreport{Hansen06,
-	author={Ben Hansen},
-	title={Appraising Covariate Balance After Assignment to Treatment by Groups},
-	institution={Statistics Department, University of Michigan},
-	year= 2006,
-	month={{April}},
-	number= 436
-}
-
- at article{Harding03,
-	author={David J. Harding},
-	title={Counterfactual Models of Neighborhood Effects: The Effect of Neighborhood
-		Poverty on Dropping Out and Teenage Pregnancy},
-	journal={American Journal of Sociology},
-	volume={109},
-	year={2003},
-	pages={676-719},
-	month={November},
-	number={3}
-}
-
- at article{HarLis04,
-	author={Glenn W. Harrison and John A. List},
-	title={Field Experiments},
-	journal={Journal of Economic Literature},
-	volume={XLII},
-	year={2004},
-	pages={1009-1055}
-}
-
- at article{HarSie75,
-	author={H.O. Hartley and R.L. Sielken, Jr.},
-	title={A `Super-Population Viewpoint' for Finite Population Sampling},
-	journal={Biometrics},
-	volume={31},
-	year={1975},
-	pages={411-422},
-	month={June},
-	number={2}
-}
-
- at article{Hartung56,
-	author={Fritz Hartung},
-	title={Zur Geschichte der Weimarer Republik},
-	journal={Historische Zeitschrift},
-	volume= 181,
-	year= 1956,
-	pages={581--591},
-	number= 3
-}
-
- at book{Harvey91,
-	author={Andrew Harvey},
-	title={Forecasting, Structural Time Series Models and the Kalman Filter},
-	publisher={Cambridge University Press},
-	year= 1991
-}
-
- at book{Harville97,
-	author={David A. Harville},
-	title={Matrix Algebra from a Statistician's Perspective},
-	publisher={Springer},
-	year= 1997,
-	address={New York}
-}
-
- at unpublished{HasKanSta05,
-	author={Justine S. Hastings and Thomas J. Kane and Douglas O. Staiger},
-	title={Evaluating a School Choice Lottery: The Importance of Heterogeneous Treatment
-		Effects},
-	note={Hastings - Yale, Kane - Harvard GSE, Staiger, Dartmouth College},
-	year={2005},
-	month={November}
-}
-
- at unpublished{HasKanStai05b,
-	author={Justine S. Hastings and Thomas J. Kane and Douglas O. Staiger and Jeffrey
-		M. Weinstein},
-	title={Economic Outcomes and the Decision to Vote: The Effect of Randomized School
-		Admissions on Voter Participation},
-	note={Working paper 11794, National Bureau of Economic Research, 1050 Mass. Ave.,
-		Cambridge},
-	year={2005},
-	month={November}
-}
-
- at unpublished{HasKanStai05c,
-	author={Justine S. Hastings and Thomas J. Kane and Douglas O. Staiger},
-	title={parental Preferences and School Competition: Evidence froma Public School
-		Choice Program},
-	note={National Bureau of Economic Research, 1050 Mass Ave. Camb. Working Paper
-		11805},
-	year={2005},
-	month={November}
-}
-
- at book{HasTib90,
-  author =	 {Hastie, Trevor J. and Tibshirani, Robert},
-  title =	 {Generalized Additive Models},
-  publisher =	 {Chapman Hall},
-  year =	 {1990},
-  address =	 {London}
-}
-
- at article{HauDutBeh99,
-  author =	 {Lene V. Hau and Z. Dutton and C.H. Behroozi and SE
-                  Harris},
-  title =	 {{Light Speed Reduction to 17 Metres per Second in an
-                  Ultracold Atomic Gas}},
-  journal =	 {Nature},
-  volume =	 {397},
-  year =	 {1999},
-  pages =	 {594-598},
-  number =	 {6720}
-}
-
- at article{HavNag05,
-	author={Amelia M. Haviland and Daniel S. Nagin},
-	title={Causal Inferences with Group Based Trajectory models},
-	journal={Psychometrika},
-	volume={70},
-	year={2005},
-	pages={557-578},
-	month={September},
-	number={3}
-}
-
- at book{Hayes87,
-	author={Hayes, Peter},
-	title={Industry and Ideology},
-	publisher={Cambridge University Press},
-	year={1987}
-}
-
- at article{Haynes02,
-	author={R Brian Haynes},
-	title={What Kind of Evidence is it that Evidence-Based Medicine Advocates Want
-		Health Care Providers and Consumers to Pay Attention to?},
-	journal={BMC Health Services Research},
-	volume= 2,
-	year= 2002,
-	month={March},
-	number= 3,
-	note={{http://www.biomedcentral.com/1472-6963/2/3}}
-}
-
- at article{HaySchBla96,
-	author={Judith C. Hayes, David Schoenfield, Dan Blazer, and Deborah T. Gold},
-	title={Global Self-Ratings of Health and Mortality: Hazards in North Carolina Piedmont},
-	journal={Journal of Clinical Epidemiology},
-	volume= 49,
-	year= 1996,
-	pages={{969-79}}
-}
-
- at article{Heberle43,
-	author={Heberle, R.},
-	title={The Political Movements among the Rural People in Schleswig-Holstein, 1918-1932},
-	journal={Journal of Politics},
-	volume= 5,
-	year= 1943,
-	pages={3-26}
-}
-
- at book{Heberle45,
-	author={Heberle, Rudolf},
-	title={From Democracy to Nazism},
-	publisher={Louisiana State University Press},
-	year= 1945,
-	address={Baton Rouge}
-}
-
- at article{HecHidTod97,
-	author={James J. Heckman and Hidehiko Hidehiko and Petra Todd},
-	title={Matching as an econometric evaluation estimator: evidence from evaluating
-		a job training programme},
-	journal={Review of Economic Studies},
-	volume= 64,
-	year= 1997,
-	pages={605-654}
-}
-
- at article{HecIchSmi98,
-	author={James J. Heckman and Hidehiko Ichimura and Jeffrey Smith and Petra Todd},
-	title={Characterizing selection bias using experimental data},
-	journal={Econometrika},
-	volume= 66,
-	year= 1998,
-	pages={1017-1098},
-	number= 5
-}
-
- at article{HecIchTod97,
-  author =	 {James Heckman and H. Ichimura and P. Todd},
-  title =	 {Matching as an Econometric Evaluation Estimator:
-                  Evidence from Evaluating a Job Training Program},
-  journal =	 {Review of Economic Studies},
-  volume =	 64,
-  month =	 {October},
-  year =	 1997,
-  pages =	 {605--654}
-}
-
- at article{Heckman06,
-	author={James J. Heckman},
-	title={The Scientific Model of Causality},
-	journal={Sociological Methodology},
-	volume={35},
-	year={2006},
-	pages={1-98},
-	month={June},
-	number={1}
-}
-
- at article{Heckman06b,
-	author={James J. Heckman},
-	title={Rejoinder: Response to Sobel},
-	journal={Sociological Methodology},
-	volume={35},
-	year={2006},
-	pages={135-162},
-	month={June},
-	number={1}
-}
-
- at article{Heckman76,
-	author={James Heckman},
-	title={The Common Structure of Statistical Models of Truncation, Sample Selection
-		and Limited Dependent Variables, and Simple Estimator for Such Models},
-	journal={Annals of Economic and Social Measurement},
-	volume={5},
-	year={1976},
-	pages={475-492}
-}
-
- at incollection{Heckman92,
-	author={James J. Heckman},
-	title={Randomization and Social Policy Evaluation},
-	booktitle={Evaluating Welfare and Training Programs},
-	publisher={Harvard University Press},
-	year={1992},
-	editor={Charles F. Manski and Irwin Garfinkel}
-}
-
- at inbook{HecRob85,
-	author={J. Heckman and R. Robb},
-	title={Longitudional Analysis of Labor Market Data},
-	chapter={Alternative Methods for Evaluating the Impacts of Interventions},
-	year= 1985 ,
-	publisher= cup,
-	editor={J. Heckman and B. Singer}
-}
-
- at article{HecSmi95,
-	author={James J. Heckman and Jeffrey A. Smith},
-	title={Assessing the Case for Social Experiments},
-	journal={The Journal of Economic Perspectives},
-	volume={9},
-	year={1995},
-	pages={85-110},
-	number={2}
-}
-
- at article{HecSny97,
-	author={James Heckman and James Snyder},
-	title={Linear Probabilty Models of the Demand for Attributes With an Empirical
-		Application to Estimating the Preferences of Legislators},
-	journal={Rand Journal of Economics},
-	volume= 28,
-	year= 1997,
-	pages={142--189},
-	month={special issue},
-	number= 0
-}
-
- at article{Heilbronner97,
-	author={Heilbronner, Oded and M{\"u}hlberger, Detlef},
-	title={The Achilles' Heel of German Catholicism: ``Who Voted for Hitler?'' Revisited},
-	journal={European History Quarterly},
-	volume= 27,
-	year= 1997,
-	pages={221-249},
-	number= 2
-}
-
- at article{HeiRub90,
-	author={Daniel F. Heitjan and Donald Rubin},
-	title={Inference from Coarse Data via Multiple Imputation with Application to Age
-		Heaping},
-	journal= jasa,
-	volume= 85,
-	year= 1990,
-	pages={304--314}
-}
-
- at article{Heitjan89,
-	author={Daniel F. Heitjan},
-	title={Inference from Grouped Continuous Data: A Review},
-	journal={Statistical Science},
-	volume={4},
-	year={1989},
-	pages={164-183}
-}
-
- at article{HelPol80,
-	author={L. Heligman and J. H. Pollard},
-	title={The Age Pattern of Mortality},
-	journal={Journal of the Institute of Acturaries},
-	volume= 107,
-	year= 1980,
-	pages={49--80}
-}
-
- at article{Henderson24,
-	author={R. Henderson},
-	title={A new method of graduation},
-	journal={Transaction of the Actuarial Society of America},
-	volume= 119,
-	year= 1924,
-	pages={457--526}
-}
-
- at unpublished{Herron98,
-	author={Michael C. Herron},
-	title={Voting Abstention, and Individual Expectations in the 1992 Presidential
-		Election},
-	note={Presented for the Midwest Political Science Assocation conference, Chicago},
-	year={1998}
-}
-
- at article{HerSek04,
-	author={Michael C. Herron and Jasjeet S. Sekhon},
-	title={Black Candidates and Black Voters: Assessing the Impact of Candidate Race
-		on Uncounted Vote Rates},
-	journal={Journal of Politics},
-	volume= 66,
-	year= 2005,
-	month={forthcoming November},
-	number= 4
-}
-
- at article{Heymann02,
-	author={Philip B. Heymann},
-	title={Civil Liberties and Human Rights in the Aftermath of September 11},
-	journal={Harvard Journal of Law and Public Policy},
-	volume= 25,
-	year={2002},
-	pages={440--455}
-}
-
- at article{Hibbs82,
-	author={Douglas Hibbs},
-	title={Economic Outcomes and Political Support for British Governments Among the
-		Occupational Classes},
-	journal={American Political Science Review},
-	volume= 76,
-	year= 1982,
-	pages={259--279},
-	month={June}
-}
-
- at incollection{Hicks94,
-	author={Alexander M. Hicks},
-	title={{Introduction to Pooling}},
-	booktitle={{The Comparative Political Economy of the Welfare State}},
-	publisher={Cambridge University Press},
-	year= 1994,
-	address={New York},
-	editor={T. Janoski and A. Hicks}
-}
-
- at misc{HigYam00,
-	author={Dave Higdon and Steve Yamamoto},
-	title={Bayesian Image Analysis in Scanning Magnetoresistance Microscopy},
-	year= 2000,
-	howpublished={Discussion Paper \# 98-35, Institute of Statistics and Decision Sciences,
-		Duke University}
-}
-
- at article{HilButLor77,
-	author={Lewis E. Hill and Charles E. Butler and Stephen A. Lorenzen},
-	title={Inflation and the Destruction of Democracy: The Case of the Weimar Republic},
-	journal={Journal of Economic Issues},
-	volume= 11,
-	year= 1977,
-	pages={299-314},
-	number= 2
-}
-
- at unpublished{Hill04,
-	author={Jennifer Hill},
-	title={Reducing bias in treatment effect estimation in observational studies suffering
-		from missing data},
-	note={Columbia University Instititute for Social and Economic Research and Policy
-		(ISERP) Working Paper 04-01},
-	year= 2004
-}
-
- at article{Hill87,
-	author={Joe R. Hill},
-	title={Empirical Bayes Confidence Intervals Based on Bootstrap Samples: Comment},
-	journal={Journal of the American Statistical Association},
-	volume={82},
-	year={1987},
-	pages={752-754},
-	month={September},
-	number={399}
-}
-
- at unpublished{HilPurWil07,
-	author={Dustin Hillard and Stephen Purpura and John Wilkerson},
-	title={Bill Titles as Proxies for Bill Content: A Case Study of the Distillation
-		of Meaning from a Political Corpus},
-	note={Prepared for delivery at 2007 annual Meeting of the Midwest Political Science
-		Association, Chicago, IL},
-	year={2007},
-	month={April},
-	address={Univ of WA hillard at u.washington.edu; JFK School of Gov't. stephen_purpura at ksg07.harvard.edu;
-		Univ of WA jwilker at u.washington.edu}
-}
-
- at article{HilRei06,
-	author={Jennifer Hill and Jerome P. Reiter},
-	title={Interval estimation for treatment effects using propensity score matching},
-	journal={Statistics in Medicine},
-	volume={25},
-	year={2006},
-	pages={2230-2256}
-}
-
- at incollection{HilReiZan04,
-	author={Jennifer Hill and J. Reiter and Elaine Zanutto},
-	title={A comparison of experimental and observational data analyses},
-	booktitle={Applied Bayesian Modeling and Causal Inference from an Incomplete-Data Perspective},
-	year= 2004 ,
-	editor={Andrew Gelman and Xiao-Li Meng}
-}
-
- at incollection{HilRubTho99,
-	author={Jennifer Hill and Donald B. Rubin and Neal Thomas},
-	title={The Design of the {N}ew {Y}ork {S}chool {C}hoice {S}cholarship {P}rogram
-		Evaluation},
-	booktitle={Research Designs: Inspired by the Work of Donald Campbell},
-	publisher={Sage},
-	year= 1999,
-	address={Thousand Oaks, CA},
-	editor={L. Bickman},
-	chapter= 7,
-	pages={155--180}
-}
-
- at article{HilTru77,
-	author={Kenneth Hill and J Trussell},
-	title={Further Developments in Indirect Mortality Estimation},
-	journal={Population Studies},
-	volume= 31,
-	year= 1977,
-	pages={313--334}
-}
-
- at article{HilWalBro05,
-	author={Jennifer L. Hill and Jane Waldfogel and Jeanne Brooks-Gunn and Wen-Jui Han},
-	title={Maternal Employment and Child Development: A Fresh Look Using Newer Methods},
-	journal={Developmental Psychology},
-	volume={41},
-	year={2005},
-	pages={833-850},
-	number={6}
-}
-
- at unpublished{Hindman07,
-	author={Matthew Hindman},
-	title={Voice, Equality, and the Internet},
-	note={Book Manuscript},
-	year={2007}
-}
-
- at book{HinMun94,
-	author={Melvin J. Hinich and Michael C. Munger},
-	title={Ideology and the Theory of Political Choice},
-	publisher={University of Michigan Press},
-	year={1994},
-	address={Ann Arbor}
-}
-
- at unpublished{HinTsiJoh03,
-	author={Matthew Hindman and Kostas Tsioutsiouliklis and Judy A. Johnson},
-	title={Googlearchy: How a Few Heavily-Linked Sites Dominate Politics on the Web},
-	note={Midwest Political Science Association, Chicago, Illinois},
-	year={2003},
-	month={April}
-}
-
- at article{HirImbRid03,
-  author =	 {Keisuke Hirano and Guido W. Imbens and Geert Ridder},
-  title =	 {Efficient Estimation of Average Treatment Effects
-                  Using the Estimated Propensity Score},
-  journal =	 {Econometrica},
-  volume =	 71,
-  year =	 2003,
-  pages =	 {1161--1189},
-  month =	 {July},
-  number =	 4
-}
-
- at article{HirImbRub00,
-  author =	 {Keisuke Hirano and Guido W. Imbens and Donald
-                  B. Rubin and Xiao-Hua Zhou},
-  title =	 {Assessing the effect of an influenza vaccine in an
-                  encouragement design},
-  journal =	 {Biostatistics},
-  volume =	 {1},
-  year =	 {2000},
-  pages =	 {69-88},
-  number =	 {1}
-}
-
- at article{HirRubZho00,
-	author={Keisuke Hirano and Guido W. Imbens and Donald B. Rubin and Ziao-Hua Zhou},
-	title={Assessing the effect of an influenza vaccine in an encouragement design},
-	journal={Biostatistics},
-	volume={1},
-	year={2000},
-	pages={69-88},
-	number={1}
-}
-
- at unpublished{Hiscox04,
-	author={Michael J. Hiscox},
-	title={Through a Glass and Darkly: Attitudes Toward International Trade and the
-		Curious Effects of Issue Framing},
-	note={{http://www.experimentcentral.org/data/data.php?pid=136}},
-	year= 2004,
-	address={Chicago},
-	organization={Annual meetings of the American Political Science Association}
-}
-
- at article{Ho91,
-	author={Ho, Suzanne C.},
-	title={Health and Social Predictors of Mortality in an Elderly Chinese Cohort},
-	journal={American Journal of Epidemiology},
-	volume= 133,
-	year= 1991,
-	pages={{209-21}},
-	number= 9,
-	keywords={aged, cohort studies, health status, mortality, social environment}
-}
-
- at article{HoeFesVan97,
-	author={Nancy Hoeymans, MSc, eta al},
-	title={Age, time, and cohort effects on functional status and self-rated health
-		in elderly men},
-	journal={American Journal of Public Health},
-	volume= 87,
-	year= 1997,
-	pages={{1620-25}},
-	number= 10
-}
-
- at article{HoeMadRaf99,
-	author={Jennifer A. Hoeting and David Madigan and Adrian E. Raftery, Adrian E. and
-		Chris T. Volinsky},
-	title={Bayesian Model Averaging: A Tutorial},
-	journal={Statistical Science},
-	volume= 14,
-	year= 1999,
-	pages={382-417},
-	number= 4
-}
-
- at article{HoeMadRaf99,
-	author={J.A. Hoeting and D. Madigan and Adrian E. Raftery and C. T. Volinsky},
-	title={Bayesian Model Averaging: A Tutorial (with discussion)},
-	journal={Statistical Science},
-	volume= 14,
-	year= 1999,
-	pages={382--417},
-	month={{corrected version at http://www.stat.washington.edu/www/research/online/hoeting1999.pdf}}
-}
-
- at article{HojSteAab99,
-	author={Lars Hoj and Jakob Stensballe and Peter Aaby},
-	title={Maternal mortality in Guinea-Bissau: the use of verbal autopsy in a mutli-ethnic
-		population},
-	journal={International Journal of Epidemiology},
-	volume={28},
-	year={1999},
-	pages={70-76}
-}
-
- at article{holland86,
-	author={Paul W. Holland},
-	title={Statistics and Causal Inference},
-	journal={Journal of the American Statistical Association},
-	volume= 81,
-	year= 1986,
-	pages={945--960}
-}
-
- at inbook{Holmes95,
-	author={Stephen Holmes},
-	title={Passions and constraint: on the theory of liberal democracy},
-	chapter={5 Precommitment and the Paradox of Democracy},
-	year={1995},
-	publisher={University of Chicago Press},
-	pages={134 - 177},
-	address={Chicago}
-}
-
- at misc{HolMulKal00,
-	author={F.W. Hollmann and T.J. Mulder and J.E. Kallan},
-	title={{Methodology and Assumptions for the Population Projections of the United
-		States: 1999 to 2100}},
-	year= 2000 ,
-	howpublished={Working Paper 38, Population Division, U.S. Bureau of Census}
-}
-
- at article{HolQuiRap03,
-	author={Harry J. Holzer and John M. Quigley and Steven Raphael},
-	title={Public Transit and the Spatial Distribution of Minority Employment: Evidence
-		from a Natural Experiment},
-	journal={Journal of Policy Analysis and Management},
-	volume={22},
-	year={2003},
-	pages={415-441},
-	number={3}
-}
-
- at article{Holtfrerich84,
-  author =	 {Carl-Ludwig Holtfrerich},
-  title =	 {Zu hohe L{\"o}hne in der Weimarer Republik?
-                  Bemerkungen zur Borchardt-These},
-  journal =	 {Geschichte und Gesellschaft},
-  volume =	 10,
-  year =	 1984,
-  number =	 1
-}
-
- at book{HolWai93,
-	title={Differential Item Functioning},
-	publisher={Lawrence Erlbaum},
-	year= 1993,
-	editor={Paul W. Holland and Howard Wainer},
-	address={Hillsdale, N.J.}
-}
-
- at book{HomUga06,
-	title={Decentralizing Health Services in Mexico},
-	publisher={Center for U.S. -Mexican Studies, UCSD},
-	year={2006},
-	editor={N{\'u}ria Homedes and Antonio Ugalde},
-	address={La Jolla, California}
-}
-
- at book{HopVau02,
-	title={Paleodemography},
-	publisher={Cambridge University Press},
-	year={2002},
-	editor={Robert D. Hoppa and James W. Vaupel},
-	address={Cambridge, UK}
-}
-
- at article{Horowitz01,
-	author={Joel L. Horowitz},
-	title={{The Bootstrap}},
-	journal={Handbook of Econometrics},
-	volume={5},
-	year={2001},
-	pages={3159-3228},
-	publisher={Elsevier}
-}
-
- at article{Howell04,
-	author={William G. Howell},
-	title={Dynamic Selection Effects in Means-Tested, Urban School Voucher Programs},
-	journal={Journal of Policy Analysis and Management},
-	volume={23},
-	year={2004},
-	pages={225-250},
-	number={2}
-}
-
- at article{Hsieh85,
-	author={David A. Hsieh and Charles F. Manski and Daniel McFadden},
-	title={Estimation of Response Probabilities from Augmented Retrospective Observations},
-	journal={Journal of the American Statistical Association},
-	volume= 80,
-	year= 1985,
-	pages={651-652},
-	month={September},
-	number= 391
-}
-
- at book{HucSpr95,
-	author={R. Robert Huckfeldt and John Sprague},
-	title={Citizens, Politics, and Social Communication},
-	publisher={Cambridge University Press},
-	year= 1995,
-	address={New York, NY}
-}
-
- at article{HumRogEbe98,
-	author={R.A. Hummer and R.G. Rogers and I.W. Eberstein},
-	title={Sociodemographic Differentials in Adult Mortality: a Review of Analytic
-		Approaches},
-	journal={Population and Development Review},
-	year={1998},
-	optnumber={2},
-	optvolume={24},
-	optpages={553--578}
-}
-
- at article{IacPor08,
-  author =	 {Stefano M. Iacus and Giuseppe Porro},
-  title =	 {Random Recursive Partitioning: a matching method for
-                  the estimation of the average treatment effect},
-  journal =	 {Journal of Applied Econometrics},
-  volume =   {24},
-  pages =    {163-185},
-  year =	 {2009}
-}
-
- at unpublished{IacPor06b,
-  author =	 {Stefano M. Iacus and Giuseppe Porro},
-  title =	 {Missing data imputation, matching and other applications
-              of Random Recursive Partitioning},
-  journal =  {Computational Statistics and Data Analysis},
-  volume =   {52},
-  number =   {2},
-  pages =    {773-789},			
-  year =	 {2007}
-}
-
- at article{IbrChe97,
-	author={Joseph G. Ibrahim and Ming-Hui Chen},
-	title={Predictive Variable Selection for the Multivariate Linear Model},
-	journal={Biometrics},
-	volume= 53,
-	year= 1997,
-	pages={465--478},
-	month={June}
-}
-
- at article{IdlAng90,
-	author={Ellen Idler and Ronald Angel},
-	title={Self-Rated Health and Mortality in the NHANES-I Epidemiologic Follow-Up
-		Study},
-	journal={American Journal of Public Health},
-	volume= 80,
-	year= 1990,
-	pages={{446-52}}
-}
-
- at article{IdlBen97,
-	author={Ellen L. Idler and Yael Benyamini},
-	title={Self-Rated Health and Mortality: A Review of Twenty-Seven Community Studies},
-	journal={Journal of Health and Social Behavior},
-	volume= 38,
-	year= 1997,
-	pages={{21-37}}
-}
-
- at article{Idler03,
-	author={Ellen L. Idler},
-	title={Discussion: Gender Differences in Self-Rated Health, in Mortality, and in
-		the Relationship Between the Two. },
-	journal={The Gerontologist},
-	volume= 43,
-	year= 2003,
-	pages={{372-75}},
-	number= 4
-}
-
- at inbook{Idler92,
-	author={Ellen L. Idler},
-	title={Self-Assessed Health and Mortality: A Review of Studies},
-	chapter= 2,
-	year= 1992,
-	publisher={{John Wiley \& Sons, Ltd.}},
-	pages={{33-54}},
-	volume= 1,
-	journal={International Review of Health Psychology}
-}
-
- at article{IdlHudLev99,
-	author={Ellen L. Idler, Shawna V. Hudson, and Howard Leventhal},
-	title={The Meanings of Self-Rated Health- A Qualitative and Quantitative Approach},
-	journal={Research on Aging},
-	volume= 21,
-	year= 1999,
-	pages={{458-76}},
-	month={{May}},
-	number= 3
-}
-
- at article{IdlKas91,
-	author={Ellen L. Idler and Stanislav Kasl},
-	title={Health Perceptions and Survival: Do Global Evaluations of Health Status
-		Really Predict Mortality?},
-	journal={Journal of Gerontology: Social Sciences},
-	volume= 46,
-	year= 191,
-	pages={{S55-65}},
-	number= 2
-}
-
- at article{IdlKas95,
-	author={Ellen Idler and Stanislav Kasl},
-	title={Self-Ratings of Health: Do they Also Predict Change in Functional Ability?},
-	journal={Journal of Gerontology: Social Sciences},
-	volume={{50B}},
-	year= 1995,
-	pages={{S344-53}},
-	number= 6
-}
-
- at article{IdlKasLem90,
-	author={Ellen L. Idler, Stanislav V. Kasl, and Jon H. Lemke},
-	title={Self-Evaluated Health and Mortality Among the Elderly in New Haven, Connecticut,
-		and Iowa and Washington Counties, Iowa, 1982-1986},
-	journal={American Journal of Epidemiology},
-	volume= 131,
-	year= 1990,
-	pages={{91-103}}
-}
-
- at article{IdlRusDav00,
-	author={Ellen Idler, Louise Russell, and Diane Davis},
-	title={Survival, Functional Limitations, and Self-Rated Health in the NHANES I
-		Epidemiological Follow-Up Study, 1992},
-	journal={American Journal of Epidemiology},
-	volume= 152,
-	year= 2000,
-	pages={{874-83}},
-	number= 4
-}
-
- at article{IhaGen96,
-	author={Ross Ihaka and Robert Gentleman},
-	title={R: A Language for Data Analysis and Graphics},
-	journal={Journal of Computational and Graphical Statistics},
-	volume={5},
-	year={1996},
-	pages={299-314},
-	month={September},
-	number={3}
-}
-
- at article{ImaDyk04,
-	author={Kosuke Imai and David A. van Dyk},
-	title={Causal Inference with General Treatment Treatment Regimes: Generalizing
-		the Propensity Score},
-	journal= jasa,
-	volume= 99,
-	year= 2004,
-	pages={854--866},
-	month={September},
-	number= 467
-}
-
- at article{Imai05,
-	author={Kosuke Imai},
-	title={Do Get-Out-The-Vote Calls Reduce Turnout? The Importance of Statistical
-		Methods for Field Experiments},
-	journal= apsr,
-	volume={99},
-	year={2005},
-	pages={283--300},
-	month={May},
-	number={2}
-}
-
- at techreport{Imai07,
-	author={Imai, Kosuke},
-	title={Randomization-based Inference and Efficiency Analysis in Experiments under
-		the Matched-Pair Design},
-	institution={Department of Politics, Princeton University},
-	year={2007}
-}
-
- at article{Imbens00,
-	author={Guido W. Imbens},
-	title={The Role of the Propensity Score in Estimating Dose-Response Functions},
-	journal={Biometrika},
-	volume= 87,
-	year={2000},
-	pages={706-710},
-	number= 3
-}
-
- at article{Imbens03,
-	author={Guido W. Imbens},
-	title={Sensitivity to exogeneity assumptions in program evaluation},
-	journal={American Economic Review},
-	volume= 96,
-	year= 2003,
-	pages={126-132},
-	number= 2
-}
-
- at article{Imbens04,
-	author={Guido W. Imbens},
-	title={Nonparametric estimation of average treatment effects under exogeneity:
-		a review},
-	journal={Review of Economics and Statistics},
-	volume= 86,
-	year= 2004,
-	pages={4-29},
-	number= 1
-}
-
- at unpublished{ImbensNDb,
-	author={Imbens, Guido W. },
-	title={Semiparametric Estimation of Average Treatment Effects under Exogeneity:
-		A Review},
-	note={Manuscript, UC Berkeley},
-	year={2003}
-}
-
- at article{ImbRub97,
-	author={Guido W. Imbens and Donald B. Rubin},
-	title={Bayesian Inference for Causal Effects in Randomized Experiements with Noncompliance},
-	journal={The Annals of Statistics},
-	volume={25},
-	year={1997},
-	pages={305-327},
-	month={February},
-	number={1}
-}
-
- at unpublished{ImbRubND,
-	author={Guido W. Imbens and Donald B. Rubin},
-	title={Causal Inference},
-	note={Book Manuscript},
-	year= 2002
-}
-
- at book{IngMcC96,
-  author =	 {Jorge Inguez and James A. McCann},
-  title =	 {Democratizing Mexico: Public Opinion and Electoral
-                  Choice},
-  address =	 {Baltimore},
-  publisher =	 {Johns Hopkins University Press},
-  year =	 {1996}
-}
-
- at book{Insua00,
-	author={David R. Insua and Ruggeri Fabrizio},
-	title={Bayesian Analysis},
-	publisher={Springer-Verlag},
-	year= 2000
-}
-
- at article{IrwJonMun96,
-	author={Julie R. Irwin and Lawrence E. Jones and David Mundo},
-	title={Risk Perception and Victim Perception: The Judgment of HIV Cases},
-	journal={Journal of Behavioral Decision Making},
-	volume= 9,
-	year= 1996,
-	pages={1--22}
-}
-
- at article{IslRahMah96,
-	author={M. Aminul Islam and M. Mujibur Rahman and D. Mahalanabis and A.K.S. Mahmudur
-		Rahman},
-	title={Death ina Diarrhoeal Cohort of Infants and Young Children Soon After Discharge
-		From Hospital: Risk Factors and Causes by Verbal Autopsy},
-	journal={Journal of Tropical Pediatrics},
-	volume={42},
-	year={1996},
-	pages={342-347},
-	month={December }
-}
-
- at article{IslRahMah96,
-	author={M. Aminul Islam and M. Mujibur Rahman and D. Mahalanabis and A.K.S. Mahmudur
-		Rahman},
-	title={Death ina Diarrhoeal Cohort of Infants and Young Children Soon After Discharge
-		From Hospital: Risk Factors and Causes by Verbal Autopsy},
-	journal={Journal of Tropical Pediatrics},
-	volume={42},
-	year={1996},
-	pages={342-347},
-	month={December }
-}
-
- at misc{ISO97,
-	author={ISO},
-	title={The Dublin Core Metadata Element Set},
-	year={1997},
-	note={{http://www.collectionscanada.ca/iso/tc46sc9/standard/690-2e.htm}}
-}
-
- at article{Iversen01,
-	author={Edwin S. Iversen, Jr.},
-	title={Spatially disaggregated Real Estate Indices},
-	journal={Journal of Business \& Economic Statistics},
-	volume={19},
-	year={2001},
-	pages={341 - 357},
-	month={July},
-	number={3}
-}
-
- at article{Jackman00,
-	author={Simon Jackman},
-	title={Estimation and Inference via Bayesian Simulation: An Introduction to Markov
-		Chain Monte Carlo},
-	journal={American Journal of Political Science},
-	volume={44},
-	year={2000},
-	pages={375-404},
-	month={April},
-	number={2}
-}
-
- at techreport{JagRob03,
-	author={Carol Jagger and Dr. Jean-Marie Robine},
-	title={The Health of Adults in the Eurpoean Union},
-	institution={Press and Communication, Unit "Analysis and Public Opinion"},
-	year= 2003,
-	month={{June}}
-}
-
- at article{JagSpiCla93,
-	author={C. Jagger, N.A. Spiers, and M. Clarke},
-	title={Factors Associated with Decline in Function, Institutionalization and Mortality
-		of Elderly People},
-	journal={Age and Ageing },
-	volume= 22,
-	year= 1993,
-	pages={{190-97}}
-}
-
- at incollection{James90,
-	author={Harold James},
-	title={Economic Reasons for the Collapse of the Weimar Republic},
-	booktitle={Weimar: Why Did German Democracy Fail?},
-	publisher={Weidenfeld and Nicolson},
-	year= 1990,
-	address={London},
-	pages={30-57}
-}
-
- at article{JeeGimSug98,
-	author={Sun Ha Jee and Il Soon Kim and Il Suh and Dongchun Shin and Lawrence J Appel},
-	title={Projected Mortality from Lung Cancer in South Korea, 1980-2004},
-	journal={International Journal of Epidemiology},
-	volume= 27,
-	year= 1998,
-	pages={365--369}
-}
-
- at inproceedings{JefBarRod01,
-	author={William H. Jefferys and Thomas G. Barnes and Raquel Rodrigues and James
-		O. Berger and Peter M{\"u}ller},
-	title={Model Selection for Cepheid Star Oscillations},
-	booktitle={Bayesian methods, with Applications to Science, Policy, and Official Statistics},
-	year={2001},
-	publisher={Official Publications of the European Communities, Luxembourg},
-	editor={E. George and P. Nanopoulos},
-	pages={253 --252}
-}
-
- at unpublished{JefBarRod06,
-	author={William H. Jefferys and Thomas G. Barnes and Raquel Rodrigues and James
-		O. Berger and Peter Muller},
-	title={Nonparametric Regression with Wavelet Based Priors: Efficient Posterior
-		Simulation for Unequally Spaced Data and Dependent Priors},
-	note={Jefferys, Barnes Rodriques, Univ of Tx at Austin, Berger and Muller, Duke
-		Univ.},
-	year={2006}
-}
-
- at book{Jeffreys61,
-	author={H. Jeffreys},
-	title={Theory of Probability},
-	publisher={Clarendon Press},
-	year= 1961,
-	address={Oxford},
-	edition={3rd (1st edn., 1939}
-}
-
- at book{JohAlb99,
-	author={Valen E. Johnson and James H. Albert},
-	title={Ordinal Data Modeling},
-	publisher={Springer},
-	year= 1999,
-	address={New York}
-}
-
- at article{Johnson01,
-	author={David H. Johnson},
-	title={Sharing Data: It's Time to End Psychology's Guild Approach},
-	journal={Observer (American Psychological Society)},
-	volume= 14,
-	year= 2001,
-	month={October},
-	number= 8,
-	note={{http://www.psychologicalscience.org/observer/1001/data.html}}
-}
-
- at inbook{Johnson96,
-	author={Wesley O. Johnson},
-	title={Predictive Influence in the Lognormal Survival Essays in Honor of Seymour
-		Geisser},
-	year={1996},
-	publisher={Elsevier},
-	pages={104-121},
-	address={Amsterdam},
-	editor={J. Lee and A. Zellner and W. Johnson}
-}
-
- at article{Johnson98,
-	author={Timothy P. Johnson},
-	title={Approaches to Equivalence in Cross-Cultural and Cross-National Survey Research},
-	journal={ZUMA Nachrichten Spezial},
-	volume= 3,
-	year= 1998,
-	pages={1--40},
-	month={January}
-}
-
- at book{JonBau05,
-	author={Bryan D. Jones and Frank R. Baumgartner},
-	title={The Politics of Attention: How Government Prioritizes Problems},
-	publisher={University of Chicago Press},
-	year={2005},
-	address={Chicago, IL}
-}
-
- at article{JonDagGon04,
-	author={Alison Snow Jones and Ralph B. D'Agostino Jr. and Edward W. Gondolf and
-		Alex Heckert},
-	title={Assessing the Effectof Batterer Program Completion on Reassault Using Propsensity
-		Scores},
-	journal={Journal of Interpersonal Violence},
-	volume={19},
-	year={2004},
-	pages={1002-1020},
-	month={September},
-	number={9}
-}
-
- at book{Jones88,
-  author =	 {Jones, Larry Eugene},
-  title =	 {German Liberalism and the Dissolution of the Weimar
-                  Party System},
-  publisher =	 {The University of North Carolina Press},
-  year =	 1988,
-  address =	 {Chapel Hill and London}
-}
-
- at unpublished{JonKimSta05,
-	author={Bryan D. Jones and Chang-Jin Kim and Richard Startz},
-	title={A Markov Switching Model of Congressional Partisan Regimes},
-	note={University of Washington, Center for American Politics and Public Policy,
-		Box 353530 Seattle, WA 98195-3530, bdjones at u.washington.edu},
-	year={2005}
-}
-
- at article{Jordan1874,
-	author={C. Jordan},
-	title={{M{\'e}moire sur les formes bilin{\'e}aires}},
-	journal={Comptes Rendus de l' Acad{\'e}mie des Sciences, Paris},
-	volume= 78,
-	year= 1874,
-	pages={614--617}
-}
-
- at article{JylGurFer98,
-	author={Maria Jylha, Jack Guralnik, Luigi Jokela, et al},
-	title={Is self-rated health comparable across cultures and genders?},
-	journal={Journal of Gerontology: Social Sciences},
-	volume={{53B}},
-	year= 1998,
-	pages={{S144-52}}
-}
-
- at incollection{Kadane80,
-	author={Joseph B. Kadane},
-	title={Predictive and Structural Methods for Eliciting Prior Distributions},
-	booktitle={Bayesian Analysis in Econometrics and Statistics},
-	publisher={North-Holland},
-	year= 1980,
-	editor={Arnold Zellner}
-}
-
- at article{KadDicWin80,
-	author={Joseph B. Kadane and James M. Dickey and Robert L. Winkler and Wayne S.
-		Smith and Stephen C. Peters},
-	title={Interactive Elicitation of Opinion for a Normal Linear Model},
-	journal={Journal of the American Statistical Association},
-	volume={75},
-	year={1980},
-	pages={845-854},
-	month={December},
-	number={372}
-}
-
- at article{Kahn86,
-	author={Paul W. Kahn},
-	title={Gramm-Rudman and the Capacity of Congress to Control the Future},
-	journal={Hastings Constitutional Law Quarterly},
-	volume={13},
-	year={1986},
-	pages={185-231}
-}
-
- at article{KahSchSun98,
-	author={Daniel Kahneman and David Schkade and Cass R. Sunstein},
-	title={Shared Outrage and Erratic Awards: The Psychology of Punitive Damages},
-	journal={Journal of Risk and Uncertainty},
-	volume= 16,
-	year= 1998,
-	pages={49--86},
-	month={April}
-}
-
- at article{KahTolGar00,
-	author={Kathleen Kahn and Stephen M. Tollman and Michel Garenne and John S.S.Gear},
-	title={Validation and Application of Verbal autosies in a Rural Area of South Africa},
-	journal={Tropical Medicine and International Health},
-	volume={5},
-	year={2000},
-	pages={824-831},
-	number={11}
-}
-
- at article{KalGraBla90,
-	author={Henry D. Kalter and Ronald H. Gary and Robert E. Black and Socorro A. Gultiano},
-	title={Validation of Postmortem Interviews to Ascertain Selected Causes of Death
-		in Children},
-	journal={International Journal of Epidemiology},
-	volume={19},
-	year={1990},
-	pages={380-386},
-	number={2}
-}
-
- at article{KalHosBur99,
-	author={Henry D. Kalter and Munir Hossain and Gilbert Burnham and Naila Z. Khan
-		and Samir K. Saha and Md Anwar Ali and Robert E. Black},
-	title={Validation of caregiver interviews to diagnose common causes of severe neonatal
-		illness},
-	journal={Paediatric and Perinatal Epidemiology},
-	volume={13},
-	year={1999},
-	pages={99-113}
-}
-
- at article{KalHosBur99,
-	author={Henry D. Kalter and Munir Hossain and Gilbert Burnham and Naila Z. Khan
-		and Samir K. Saha and Md Anwar Ali and Robert E. Black},
-	title={Validation of caregiver interviews to diagnose common causes of severe neonatal
-		illness},
-	journal={Paediatric and Perinatal Epidemiology},
-	volume={13},
-	year={1999},
-	pages={99-113}
-}
-
- at article{Kallay84,
-	author={Michael Kallay},
-	title={The Complexity of Incremental Convex Hull Algorithms in Rd.},
-	journal={Informational Processes Letter},
-	volume={19},
-	year={1984},
-	pages={197},
-	number={4}
-}
-
- at article{Kallay86,
-	author={Michael Kallay},
-	title={Convex Hull Made Easy},
-	journal={Information Processing Letters},
-	volume={22},
-	year={1986},
-	month={March},
-	issue={3}
-}
-
- at article{Kalter92,
-	author={Henry Kalter},
-	title={The Validation of interviews for estimating morbidity},
-	journal={Health Policy and Planning},
-	volume={7},
-	year={1992},
-	pages={30-39},
-	number={1}
-}
-
- at article{KapBarLus88,
-	author={George Kaplan, Vita Barell, and Ayala Lusky},
-	title={Subjective State of Health and Survival in Elderly Adults},
-	journal={Journal of Gerontology: Social Sciences},
-	volume= 43,
-	year= 1988,
-	pages={{S114-20}}
-}
-
- at article{KapGolEve96,
-	author={George A Kaplan, Debbie Goldberg, Susan Everson, et al},
-	title={Perceived Health Status and Morbidity and Mortality: Evidence from the Kuopio
-		Ischaemic Heart Disease Risk Factor Study},
-	journal={International Journal of Epidemiology},
-	volume= 25,
-	year= 1996,
-	pages={{259-65}}
-}
-
- at article{Karaagaoglu99,
-	author={Ergun Karaagaoglu},
-	title={Estimation of the Prevalence of a Disease from Screening Tests},
-	journal={Tropical Journal of Medical Sciences},
-	volume={29},
-	year={1999},
-	pages={425-430}
-}
-
- at article{KasCarGel98,
-	author={Robert E. Kass and Bradley P. Carlin and Andrew Gelman and Radford M. Neal},
-	title={Markov chain Monte Carlo in Practice: A Roundtable Discussion},
-	journal={The American Statistician},
-	volume={52},
-	year={1998},
-	pages={93-100},
-	number={2}
-}
-
- at article{KasWas96,
-	author={Robert E. Kass and Larry Wasserman},
-	title={The Selection of Prior Distributions by Formal Rules},
-	journal={Journal of the American Statistical Association},
-	volume= 91,
-	year= 1996,
-	pages={1343--1370},
-	month={September},
-	number= 435
-}
-
- at article{KatTri02,
-	author={Neal K. Katyal and Laurence H. Tribe},
-	title={Waging War, Deciding Guilt: Trying the Military Tribunals},
-	journal= ylj,
-	volume= 111,
-	year={2002},
-	pages={1259--1310}
-}
-
- at article{Kawada03,
-	author={Tomoyuki Kawada},
-	title={Self rated health and life prognosis},
-	journal={Archives of Medical Research},
-	volume= 34,
-	year= 2003,
-	pages={{343-47}}
-}
-
- at book{Kele72,
-	author={Kele, M.},
-	title={Nazis and Workers},
-	publisher={University of North Carolina Press},
-	year= 1972,
-	address={Chapel Hill}
-}
-
- at article{Kelly06,
-	author={Kevin Kelly},
-	title={{Scan this Book}},
-	journal={The New York Times Magazine},
-	year={2006},
-	month={October 11}
-}
-
- at article{KenStu50,
-	author={M.G. Kendall and A. Stuart},
-	title={The Law of the Cubic Proportion in election Results},
-	journal={The British journal of Sociology},
-	volume={1},
-	year={1950},
-	pages={183-196},
-	month={September},
-	number={3}
-}
-
- at book{Keyfitz68,
-	author={N. Keyfitz},
-	title={Introduction to the Mathematics of Population},
-	publisher={Addison Wesley},
-	year= 1968,
-	address={Reading, MA}
-}
-
- at article{Keyfitz82,
-	author={N. Keyfitz},
-	title={Choice of Function for mortality Analysis: Effective Forecasting Depends
-		on a Minimum Parameter Representation},
-	journal={Theoretical Population Biology},
-	volume= 21,
-	year= 1982,
-	pages={239--252}
-}
-
- at unpublished{KimHov04,
-	author={Soo-Min Kim and Eduard Hovy},
-	title={Determining the Sentiment of Opinions},
-	note={Soo-Min Kim Information Sciences Inst. Univ. of Southern Calif. 4676 Admiralty
-		Way, Marina del Rey, CA 90292-6695; skim at isi.edu},
-	year={04}
-}
-
- at unpublished{KimHov04,
-	author={Soo-Min Kim and Eduard Hovy},
-	title={Determining the Sentiment of Opinions},
-	note={Soo-Min Kim Information Sciences Inst. Univ. of Southern Calif. 4676 Admiralty
-		Way, Marina del Rey, CA 90292-6695; skim at isi.edu},
-	year={04}
-}
-
- at article{KimWah70,
-	author={G.S. Kimeldorf and G. Wahba},
-	title={A correspondence between {Bayesian} estimation on stochastic processes and
-		smoothing by splines},
-	journal={Ann. Math. Statist.},
-	volume={41},
-	year={1970},
-	pages={495--502},
-	number={2}
-}
-
- at article{KinAubHer98,
-	author={Hilary King and Ronald E. Aubert and William H. Herman},
-	title={Global Burden of Diabetes, 1995-2025},
-	journal={Diabetes Care},
-	volume= 21,
-	year= 1998,
-	pages={1414--1431}
-}
-
- at article{Kinder86,
-	author={Donald R. Kinder},
-	title={The Continuing American Dilemma: White Resistance to Racial Change 40 years
-		After Myrdal},
-	journal={Journal of Social Issues},
-	volume={42},
-	year={1986},
-	pages={151-71}
-}
-
- at book{KinPal93,
-	title={Experimental Foundations of Political Science},
-	publisher={University of Michigan Press},
-	year= 1993,
-	editor={Donald R. Kinder and Thomas R. Palfrey},
-	address={Ann Arbor}
-}
-
- at article{KinSea81,
-	author={Donald R. Kinder and David O. Sears},
-	title={Prejudice and Politics: Symbolic Racism Versus Racial Threats to the Good
-		Life},
-	journal={Journal of Personality and Social Psychology},
-	volume={40},
-	year={1981},
-	pages={414-31}
-}
-
- at article{Kirchgaessner85,
-	author={Gebhard Kirchg{\"a}ssner},
-	title={Rationality, Causality and the Relation between Economic Conditions and
-		the Popularity of Parties},
-	journal={European Economic Review},
-	volume= 28,
-	year= 1985,
-	pages={243-268},
-	month={June/July}
-}
-
- at article{Kish49,
-	author={Kish, Leslie},
-	title={{A Procedure for Objective Respondent Selection within the Household}},
-	journal={Journal of the American Statistical Association},
-	volume={44},
-	year={1949},
-	pages={380--387},
-	number={247}
-}
-
- at article{KlaDon97,
-	author={Neil Klar and Allan Donner},
-	title={The Merits of Matching in Community Intervention Trials: A Cautionary Tale},
-	journal={Statistics in Medicine},
-	volume={16},
-	year={1997},
-	pages={1753-1764},
-	number={15}
-}
-
- at article{KlaDon98,
-	author={Neil Klar and Allan Donner},
-	title={Authors' Reply: The Merits of Matching in Community Intervention Trials:
-		A Cautionary Tale},
-	journal={Statistics in Medicine},
-	volume={17},
-	year={1998},
-	pages={2151-2152}
-}
-
- at article{Klarman97,
-	author={Michael J. Klarman},
-	title={Majoritarian Judicial Review: The Entrenchment Problem},
-	journal={The Georgetown Law Journal},
-	volume={85},
-	year={1997},
-	pages={491-554}
-}
-
- at article{Klee80,
-	author={Victor Klee},
-	title={On the Complexity of d-Dimensional Voronoi Diagrams},
-	journal={Archive der Mathematik},
-	volume={34},
-	year={1980},
-	pages={75--80}
-}
-
- at book{KliSmi99,
-	author={Philip A. Klinker and Rogers M. Smith},
-	title={The Unsteady March: The Rise and Decline of Racial Equality in America},
-	publisher={?},
-	year= 1999
-}
-
- at article{KluBerBra06,
-	author={Klump, J. and Bertelmann, R. and Brase, J. and Diepenbroek, M. and Grobe,
-		H. and H{\"o}ck, H. and Lautenschlager, M. and Schindler, U. and Sens, I. and
-		W{\"a}chter, J.},
-	title={{Data publication in the open access initiative}},
-	journal={Data Science Journal},
-	volume={5},
-	year={2006},
-	pages={79--83},
-	number={0}
-}
-
- at article{Knorr-Held00,
-	author={Leonhard Knorr-Held},
-	title={Bayesian Modelling of Inseparable Space-Time Variation in Disease Risk},
-	journal={Statistics in Medicine},
-	volume= 19,
-	year= 2000,
-	pages={2555-2567}
-}
-
- at article{Koch02,
-	author={Koch, Jeffrey M.},
-	title={Gender Stereotypes and Citizens' Impressions of House Candidates' Ideological
-		Orientation},
-	journal={American Journal of Political Science},
-	volume= 46,
-	year= 2002,
-	pages={453--462}
-}
-
- at article{KohAlt05,
-	author={Isaac S. Kohane and Russ B Altman},
-	title={Health-Information Altruists --- A Potentially Critical Resource},
-	journal={New England Journal of Medicine},
-	volume= 19,
-	year= 2005,
-	pages={2074--2077},
-	month={November},
-	number= 353
-}
-
- at article{KohAlt05,
-	author={Isaac S. Kohane and Russ B. Altman},
-	title={Health-Information Altruists - A Potentially Critical Resource},
-	journal={New England Journal of Medicine},
-	volume={353},
-	year={2005},
-	pages={2074-2077},
-	month={November},
-	number={19}
-}
-
- at book{Kolb88,
-	author={Eberhard Kolb},
-	title={The Weimar Republic},
-	publisher={Unwin Hyman},
-	year= 1988,
-	address={London}
-}
-
- at article{KolBur91,
-	author={Kolbe, R.H. and Burnett, M.S.},
-	title={{Content-Analysis Research: An Examination of Applications with Directives
-		for Improving Research Reliability and Objectivity}},
-	journal={The Journal of Consumer Research},
-	volume={18},
-	year={1991},
-	pages={243--250},
-	number={2}
-}
-
- at unpublished{KolFinJos06,
-	author={Pranam Kolari and Tim Finin and Anupam Joshi},
-	title={{SVMs for the Blogosphere: Blog Identification and Splog Detection}},
-	note={American Association for Artificial Intelligence Spring Symposium on Computational
-		Approaches to Analyzing Weblogs},
-	year={2006}
-}
-
- at article{KolVliKap00,
-	author={H. Koivumaa-Honkanen et al},
-	title={Self-Reported Life Satisfaction and 20-Year Mortality in Healthy Finnish
-		Adults},
-	journal={American Journal of Epedimiology},
-	volume= 152,
-	year= 2000,
-	pages={{983-91}}
-}
-
- at article{KonDes01,
-	author={M.M. Konstantareas and N. Desbois},
-	title={Preschoolers Perceptions of the Unfairness of Maternal Disciplinary Practices},
-	journal={Child Abuse \& Neglect},
-	volume= 25,
-	year= 2001,
-	pages={473--488},
-	month={April},
-	number= 4
-}
-
- at article{KooVanBon94,
-	author={Marc A. Koopmanschap and Leona Van Roijen and Luc Bonneux and Jan J. Barendregt},
-	title={Current and Future Costs of Cancer},
-	journal={European Journal of Cancer},
-	volume={30A},
-	year= 1994,
-	pages={60--65},
-	number= 1
-}
-
- at unpublished{KopSch05,
-	author={Moshe Koppel and Jonathan Schler},
-	title={The Importance of Neutral Examples for Learning Sentiment},
-	note={Dept. of Computer Science Bar-Ilan University, Ramat-Gan Israel koppel,schlerj at cs.biu.ac.il},
-	year={05}
-}
-
- at unpublished{KopSch05,
-	author={Moshe Koppel and Jonathan Schler},
-	title={The Importance of Neutral Examples for Learning Sentiment},
-	note={Dept. of Computer Science Bar-Ilan University, Ramat-Gan Israel koppel,schlerj at cs.biu.ac.il},
-	year={05}
-}
-
- at article{KorJorLet99,
-	author={A E Kirtne, A F Jorm, Z Jiao, et al},
-	title={Health, Cognitive and psychosocial factors as predictors of mortality in
-		an elderly community sample},
-	journal={Journal of Epidemiology and Communtiy Health },
-	volume= 53,
-	year= 1999,
-	pages={{83-8}}
-}
-
- at book{Kornhauser59,
-	author={Kornhauser, W.},
-	title={The Politics of Mass Society},
-	publisher={The Free Press},
-	year= 1959,
-	address={New York}
-}
-
- at article{KorWilGou03,
-	author={Eline L. Korenromp and Brian G. Williams and Eleanor Gouws and christopher
-		Dye and Robert W. Snow},
-	title={Measurement of trends in childhood malaria mortality in Africa: an assessment
-		of progress toward targets based on verbal autopsy},
-	journal={The Lancet Infectious Diseases},
-	volume={3},
-	year={2003},
-	pages={349-58}
-}
-
- at book{Koshar86,
-	author={Koshar, R.},
-	title={Social Life, Local Politics, and Nazism},
-	publisher={University of North Carolina Press},
-	year= 1986,
-	address={Chapel Hill}
-}
-
- at article{KosHeiZak05,
-	author={Michael Kosfeld and Markus Heinrichs and Paul J. Zak and Urs Fischbacher
-		and Ernst Fehr},
-	title={Oxytocin Increases Trust in Humans},
-	journal={Nature},
-	volume={435},
-	year={2005},
-	pages={673-676},
-	month={June}
-}
-
- at article{KraJay94,
-	author={Neal M. Krause, PhD, and Gina M. Jay, PhD},
-	title={What do Global Self-Rated Health Items Measure?},
-	journal={Medical Care},
-	volume= 32,
-	year= 1994,
-	pages={{930-42}}
-}
-
- at article{KraSha84,
-	author={M.S. Kramer and S.H. Shapiro},
-	title={{Scientific challenges in the application of randomized trials}},
-	journal={Journal of the American Medical Association},
-	volume= 252,
-	year= 1984 ,
-	pages={2739-45},
-	number= 19
-}
-
- at article{KriBacRob07,
-	author={Samuel Krislov and Charles Backstrom and Leonard Robins},
-	title={When Texans Gerrymander: Much Power, Continuous Politics, Little Law}
-}
-
- at book{Krippendorff04,
-	author={Krippendorff, D.K.},
-	title={{Content Analysis: An Introduction to Its Methodology}},
-	publisher={Thousand Oaks, CA: Sage},
-	year={2004}
-}
-
- at article{Kruedener85,
-	author={J{\"u}rgen von Kruedener},
-	title={Die {\"U}berforderung der Weimarer Republic als Sozialstaat},
-	journal={Geschichte und Gesellschaft},
-	volume= 1,
-	year= 1985,
-	pages={358--376},
-	number= 3
-}
-
- at article{Krueger90,
-	author={Anne O. Krueger},
-	title={Government Failures in Development},
-	journal={The Journal of Economic Perspectives},
-	volume={4},
-	year={1990},
-	pages={9-23},
-	number={3}
-}
-
- at article{Krueger99,
-	author={Alan Krueger},
-	title={Experimental Estimates of Education Production Functions},
-	journal={Quarterly Journal of Economics},
-	volume={114},
-	year={1999},
-	pages={497-532},
-	month={May},
-	number={2}
-}
-
- at article{KrzWys86,
-	author={Mical Krzyzanowski and Miroslaw Wysocki},
-	title={The Relation of Thirteen-Year Mortality to Ventilatory Impairment and Other
-		Respiratory Symptoms: The Cracow Study},
-	journal={International Journal of Epidemiology},
-	volume= 15,
-	year= 1986,
-	pages={{56-64}},
-	number= 1
-}
-
- at article{KucMwaLes06,
-	author={Helmut K{\"u}chenohoff and Samuel M. Mwalili and Emmanuel Lassaffre},
-	title={A General Method for Dealing with Misclassification in Regression: The Misclassification
-		SIMEX},
-	journal={Biometrics},
-	volume={62},
-	year={2006},
-	pages={85-96},
-	month={March}
-}
-
- at article{KulLei51,
-	author={S. Kullback and R.A. Leibler},
-	title={On Information and Sufficiency},
-	journal={Annals of Mathematical Statistics},
-	volume= 22,
-	year= 1951,
-	pages={79--86},
-	month={March},
-	number= 1
-}
-
- at unpublished{KumAll04,
-	author={Giridhar Kumaran and James Allan},
-	title={Text Classification and Named Entities for New Event Detection},
-	note={Center for Intelligent Information Retreival, Department of Computer Science,
-		Univ of MA, Amherst},
-	year={2004},
-	month={July}
-}
-
- at unpublished{KumAll04,
-	author={Giridhar Kumaran and James Allan},
-	title={Text Classification and Named Entities for New Event Detection},
-	note={Center for Intelligent Information Retreival, Department of Computer Science,
-		Univ of MA, Amherst},
-	year={2004},
-	month={July}
-}
-
- at article{KunGeuvan95,
-	author={Anton Kunst and J.J. Geurts and J. van den Berg},
-	title={International variation in socioeconomic inequalities in self reported health},
-	journal={Journal of Epidemiology and Community Health},
-	year={1995},
-	optnumber={2},
-	optvolume={49},
-	optpages={117--123}
-}
-
- at article{KunGroMac98,
-	author={A.E. Kunst and F. Gorenhof and J.P. Mackenbach and E.W. Health},
-	title={Occupational class and cause specific mortality in middle aged men in 11
-		European countries: comparison of population based studies. EU Working
-		Group on Socioeconomic Inequalities in Health },
-	journal= bmj,
-	year={1998},
-	optvolume={316},
-	optpages={1636--1642}
-}
-
- at article{KunGroMac98b,
-	author={Anton Kunst and F. Groenhof and Johann Mackenbach},
-	title={Mortality by occupational class among men 30--64 years in 11 European countries},
-	journal= ssm,
-	volume= 46,
-	year= 1998,
-	pages={1459-1476},
-	number= 11
-}
-
- at article{Kunsch87,
-	author={Hans R. K{\"u}nsch},
-	title={Intrinsic Autoregressions and Related Models on the Two-Dimensional Lattice},
-	journal={Biometrika},
-	volume= 74,
-	year= 1987,
-	pages={517-524},
-	number= 3
-}
-
- at article{Kuo01,
-	author={Yen-Hong Kuo},
-	title={Extrapolation of Association Between Two Variables in Four General Medical
-		Journals},
-	year= 2001 ,
-	month={September},
-	note={Forth International Congress on Peer Review in Biomedical Publication},
-	key={Barcelona, Spain}
-}
-
- at book{kvart86,
-	author={Igal Kvart},
-	title={A Theory of Counterfactuals},
-	publisher={Indianapolis: Hackett Publishing Company},
-	year= 1986
-}
-
- at article{KwoShuHov06,
-	author={Namhee Kwon and Stuart W. Shulman and Eduard Hovy},
-	title={{Collective Text Analysis for eRulemaking}},
-	journal={7th Annual International Conference on Digital Government Research},
-	year={2006}
-}
-
- at incollection{LagRus02,
-	author={Monica Lagazio and Bruce Russett},
-	title={A Neural Network Analysis of Militarized International Disputes, 1885-1992:
-		Temporal Stability and Causal Complexity},
-	booktitle={The Scourge of War: New Extensions on an Old Problem},
-	publisher={University of Michigan Press},
-	year={2002},
-	address={Ann Arbor},
-	editor={Paul Diehl},
-	optpages={269--295},
-	optannote={This paper fits a model for Cold War militarized disputes and assess how
-		well the Cold War model fits pre-Cold War data (so-called 'post-diction').
-		So it gets fitted values. Also, runs test and training, and discusses merits
-		of neural net approach.}
-}
-
- at article{Lahlrl03,
-	author={P. Lahlrl},
-	title={On the Impact of Boostrapping in Survey Sampling and Small Area Estimation},
-	journal={Statistical Science},
-	volume= 18,
-	year= 2003,
-	pages={199-210},
-	number= 2
-}
-
- at article{LaiLou87,
-	author={Nan M. Laird and Thomas A. Louis},
-	title={Empirical Bayes Confidence Intervals Based on Bootstrap Samples},
-	journal={Journal of the American Statistical Association},
-	volume={82},
-	year={1987},
-	pages={739-750},
-	month={September},
-	number={399}
-}
-
- at article{LaiLou87b,
-	author={Nan M. Laird and Thomas A. Louis},
-	title={Empirical Bayes Confidence Intervals Based on Bootstrap Samples: Rejoinder},
-	journal={Journal of the American Statistical Association},
-	volume={399},
-	year={1987},
-	pages={756-757},
-	month={September}
-}
-
- at unpublished{Lakin05,
-	author={Jason Lakin},
-	title={Letting the Outsiders in: Democratization and Health Reform in Mexico},
-	note={American Political Science Association},
-	year= 2005 ,
-	address={Washington D.C.}
-}
-
- at article{Lalonde86,
-	author={Robert Lalonde},
-	title={Evaluating the Econometric Evaluations of Training Programs},
-	journal={American Economic Review},
-	volume={76},
-	year={1986},
-	pages={604-620}
-}
-
- at article{Landers05,
-	author={John Landers},
-	title={The Destructiveness of Pre-Industrial Warfare: Political and Technological
-		Determinants},
-	journal={Journal for Peace Research},
-	volume={42},
-	year={2005},
-	pages={455-470},
-	month={July},
-	number={4}
-}
-
- at article{langholz91,
-	author={B. Langholz and D.C. Thomas},
-	title={Efficiency of Cohort Sampling Designs: Some Surprising Results},
-	journal={Biometrics},
-	volume= 47,
-	year= 1991,
-	pages={1563-1571}
-}
-
- at article{langholz96,
-	author={Bryan Langholz and Larry Goldstein},
-	title={Risk Set Sampling in Epidemiologic Cohort Studies},
-	journal={Statistical Science},
-	volume= 11,
-	year= 1996,
-	pages={35-53},
-	number= 1
-}
-
- at article{langholz97,
-	author={Bryan Langholz and Boran {\O }rnulf},
-	title={Estimation of Absolute Risk from Nested Case-Control Data},
-	journal={Biometrics},
-	volume= 53,
-	year= 1997,
-	pages={767-774},
-	month={June}
-}
-
- at article{LaPalombara68,
-	author={Joseph LaPalombara},
-	title={Macrotheories and Microapplications in Comparative Politics: A Widening
-		Chasm},
-	journal= cp,
-	year= 1968,
-	pages={52--78},
-	month={October}
-}
-
- at book{Laplace1820,
-	author={P.S. Laplace},
-	title={Philosophical Essays on Probaiblities},
-	publisher={Dover},
-	year={1951, original: 1820},
-	address={New York}
-}
-
- at article{LaRBanJar79,
-	author={Asenath LaRue, PhD, Lew Bank, MA, Lissy Jarvick, MD, PhD, and Monte Hetland,
-		BA},
-	title={Health in Old Age: How Do Physicians' Ratings and Self-Ratings Compare?},
-	journal={Journal of Gerontology},
-	volume= 34,
-	year= 1979,
-	pages={{687-91}}
-}
-
- at article{Lassen05,
-	author={David Dreyer Lassen},
-	title={The Effect of Information on Voter Turnout: Evidence from a Natural Experiement},
-	journal={American Journal of Political Science},
-	volume={2005},
-	year={49},
-	pages={103-118},
-	month={January},
-	number={1}
-}
-
- at article{Lau97,
-	author={Tai-Shing Lau},
-	title={The Latent Class Model for Multiple Binary Screening Tests},
-	journal={Statistics in Medicine},
-	volume={16},
-	year={1997},
-	pages={2283-2295}
-}
-
- at article{LauIbr95,
-	author={Purushottam W. Laud and Joseph G. Ibrahim},
-	title={Predictive Model Selection},
-	journal= jrssb,
-	volume= 57,
-	year= 1995,
-	pages={247--262},
-	number= 1
-}
-
- at article{LauIbr96,
-	author={Purushottam W. Laud and Joseph G. Ibrahim},
-	title={Predictive Specification of Prior Model Probabilities in Variable Selection},
-	journal={Biometrika},
-	volume= 83,
-	year= 1996,
-	pages={267--274},
-	number= 2
-}
-
- at article{LauSmiSta00,
-	author={Jennifer L. Lauby and Philip J. Smith and Michael Stark and Bobbie Person
-		and Janet Adams},
-	title={A Community-Level HIV Prevention Intervention for Inner-City Women: Results
-		of the Women and Infants Demonstration Projects},
-	journal={American Journal of Public Health},
-	volume={90},
-	year={2000},
-	pages={216-222},
-	month={February},
-	number={2}
-}
-
- at article{LavBenGar03,
-	author={Michael Laver and Kenneth Benoit and John Garry},
-	title={{Extracting Policy Positions from Political Texts Using Words as Data}},
-	journal={American Political Science Review},
-	volume={97},
-	year={2003},
-	pages={311-331},
-	number={2}
-}
-
- at book{Leamer78,
-	author={Edward Leamer},
-	title={Specification Searches},
-	publisher={Wiley},
-	year= 1978,
-	address={New York}
-}
-
- at article{Lebow00,
-	author={Richard Ned Lebow},
-	title={What's so Different About a Counterfactual?},
-	journal= wp,
-	volume= 52,
-	year= 2000,
-	pages={550--85},
-	month={July}
-}
-
- at misc{Lechner00,
-	author={Michael Lechner},
-	title={A note on the common support problem in applied evaluation studies},
-	year= 2000,
-	howpublished={{http://www.siaw.unisg.ch/lechner}},
-	note={University of St. Galen}
-}
-
- at incollection{Lechner99,
-	author={Michael Lechner},
-	title={Identification and Estimation of Causal Effects of Multiple Treatments under the Conditional Independence Assumption},
-	booktitle={Econometric Evaluation of Labour Market Policies},
-	publisher={Physica},
-	address={Heidelberg},
-	editor={Lechner, M. and Pfeiffer, F.},
-	year= 2001,
-	pages={43--58}
-}
-
-
-
- at article{LedBre59,
-	author={S. Ledermann and J. Breas},
-	title={{Les Dimensions de la Mortalit{\'e}}},
-	journal={Population},
-	volume= 14,
-	year= 1959,
-	pages={637--682},
-	note={[in French]}
-}
-
- at article{Lee00,
-	author={Ronald D. Lee},
-	title={The Lee-Carter Method for Forecasting Mortality, with Various Extensions
-		and Applications},
-	journal={North American Actuarial Journal},
-	volume= 4,
-	year= 2000,
-	pages={80--93},
-	number= 1
-}
-
- at article{Lee00a,
-	author={Ronald D. Lee},
-	title={{Long-Term Projections and the US Social Security System}},
-	journal={Population and Development Review},
-	volume= 26,
-	year= 2000,
-	pages={137--143},
-	month={March},
-	number= 1
-}
-
- at article{Lee93,
-	author={Ronald D. Lee},
-	title={{Modeling and Forecasting the Time Series of US Fertility: Age Patterns,
-		Range, and Ultimate Level}},
-	journal={International Journal of Forecasting},
-	volume= 9,
-	year= 1993,
-	pages={187--202}
-}
-
-
- at article{LeeCar92,
-  author =	 {Ronald D. Lee and Lawrence R. Carter},
-  title =	 {{Modeling and Forecasting U.S. Mortality}},
-  journal =	 jasa,
-  volume =	 87,
-  year =	 1992,
-  month =	 {September},
-  pages =	 {659--675},
-  number =	 419
-}
-
- at article{LeeCar92b,
-	author={Ronald D. Lee and Lawrence R. Carter},
-	title={Rejoinder},
-	journal= jasa,
-	volume= 87,
-	year= 1992,
-	pages={674--675},
-	month={September},
-	number= 419
-}
-
- at article{LeeCarTul95,
-	author={Ronald D. Lee and Lawrence Carter and S. Tuljapurkar},
-	title={Disaggregation in Population Forecasting: Do We Need It? And How to Do it
-		Simply},
-	journal={Mathematical Population Studies},
-	volume={5},
-	year={1995},
-	pages={217--234},
-	month={July},
-	number={3},
-	annote={Authors describe a model for reducing the dimensionality of the forecasting
-		problem by modeling the evolution over time of the age schedules of vital
-		rates, reducing the problem to forecasting a single parameter for fertility
-		and another for mortality. Authors also show how one can fit the model
-		more simply and prepare integrated forecasts for a collection of regions,
-		and discuss alternate approaches to forecasting the estimated indices of
-		fertility and mortality, including state-space methods.}
-}
-
- at misc{LeeHigBiFerWes00,
-	author={Herbert Lee and David Higdon and Zhuoxin Bi and Marco Ferreira and Mike
-		West},
-	title={Markov Random Field Models for High-Dimensional Parameters in Simulations
-		of Fluid Flow in Porous Media},
-	year= 2000,
-	howpublished={Discussion Paper \#00-35, Institute of Statistics and Decision Sciences,
-		Duke University}
-}
-
- at book{LeeJohZel96,
-	author={Jack C. Lee and Wesley O. Johnson and Arnold Zellner},
-	title={Modelling and Prediction: Honoring Seymour Geisser},
-	publisher={Springer},
-	year={1996},
-	editor={Jack C. Lee and Wesley O. Johnson and Arnold Zellner}
-}
-
- at article{LeeMil01,
-	author={Ronald D. Lee and Timothy Miller},
-	title={Evaluating the Performance of the Lee-Carter Approach to Modeling and Forecasting
-		Mortality},
-	journal={Demography},
-	volume= 38,
-	year= 2001,
-	pages={537--549},
-	month={November},
-	number= 4
-}
-
- at article{LeeRof94,
-	author={Ronald D. Lee and R. Rofman},
-	title={Modeling and Projecting Mortality in Chile},
-	journal={Notas Poblacion},
-	volume={22},
-	year={1994},
-	pages={183--213},
-	month={Jun},
-	number={59},
-	annote={Authors extend the Lee-Carter method to deal with various problems of incomplete
-		data common in Third World populations, and then apply the method to forecast
-		mortality in Chile.}
-}
-
- at article{LeeSki99,
-	author={Ronald D. Lee and Jonathan Skinner},
-	title={Will Aging Baby Boomers Bust the Federal Budget},
-	journal={Journal of Economic Perspectives},
-	volume= 13,
-	year= 1999,
-	pages={117--140},
-	month={Winter},
-	number= 1
-}
-
- at article{LeeTul94,
-	author={Ronald D. Lee and S. Tuljapurkar},
-	title={{Stochastic Population Projections for the U.S.: Beyond High, Medium and
-		Low}},
-	journal= jasa,
-	volume= 89,
-	year= 1994,
-	pages={1175--1189},
-	month={December},
-	number= 428
-}
-
- at article{LeeTul98,
-	author={Ronald D. Lee and S. Tuljapurkar},
-	title={{Uncertain Demographic Futures and Social Security Finances}},
-	journal={American Economic Review: Papers and Proceedings},
-	year= 1998,
-	pages={237--241},
-	month={May}
-}
-
- at incollection{LeeTul98a,
-	author={Ronald D. Lee and S. Tuljapurkar},
-	title={{Stochastic Forecasts for Social Security}},
-	booktitle={Frontiers in the Economics of Aging},
-	publisher={University of Chicago Press},
-	year= 1998,
-	address={Chicago},
-	editor={David Wise},
-	pages={393--420}
-}
-
- at techreport{LenFox06,
-	author={Amanda Lenhart and Susannah Fox},
-	title={{Bloggers: A Portrait of the Internet's New Storytellers}},
-	institution={Pew Internet and American Life Project},
-	year= 2006,
-	note={{http://207.21.232.103/pdfs/PIP\%20Bloggers\%20Report\%20July\%2019\%202006.pdf}}
-}
-
- at book{LenHsu99,
-	author={T. Leonard and J.S.J. Hsu},
-	title={Bayesian Methods},
-	publisher={Cambridge University Press},
-	year= 1999,
-	address={Cambridge}
-}
-
- at unpublished{LeuSia03,
-	author={E. Leuven and B. Sianesi},
-	title={psmatch2},
-	note={{Stata module to perform full {M}ahalanobis and propensity score matching,
-		common support graphing, and covariate imbalance testing. Available at:
-		http://www1.fee.uva.nl/scholar/mdw/leuven/stata}},
-	year= 2003
-}
-
- at misc{LeuSia04,
-	author={Edwin Leuven and Barbara Sianesi},
-	title={PSMATCH2: Stata module to perform full Mahalanobis and propensity score
-		matching, common support graphing, and covariate imbalance testing},
-	year= 2004,
-	howpublished={EconPapers},
-	note={{http://econpapers.repec.org/software/bocbocode/S432001.htm}}
-}
-
- at article{LeuTanLue97,
-	author={Kai-Kuen Leung, Li-Yu Tang, and Bee-Horng Lue},
-	title={Self-Rated Health and Mortality in Chinese Institutional Elderly Persons},
-	journal={Journal of Clinical Epidemiology},
-	volume= 50,
-	year= 1997,
-	pages={{1107-16}},
-	number= 10
-}
-
- at book{Levinson01,
-	author={Sanford Levinson},
-	title={What is the Constitution's Role in Wartime: Why Free Speech and Other Rights
-		Are Not as Safe as You Might Think},
-	publisher={?},
-	year= 2001
-}
-
- at article{LevKas70,
-	author={P.S. Levy and E. H. Kass},
-	title={A three population model for sequential screening for Bacteriuria},
-	journal={American Journal of Epidemiology},
-	volume={91},
-	year={1970},
-	pages={148-154}
-}
-
- at article{Lewis01,
-	author={Jeffrey B. Lewis},
-	title={Estimating Voter Preference Distributions from Individual-Level Voting Data},
-	journal={Political Analysis},
-	volume= 9,
-	year= 2001,
-	pages={275-297},
-	month={June},
-	number= 3
-}
-
- at article{Lewis03,
-	author={Anthony Lewis},
-	title={Marbury v. Madison v. Ashcroft},
-	journal={New York Times},
-	year={2003},
-	month={February 24, A17}
-}
-
- at incollection{Lewis05,
-	author={Maureen Lewis},
-	title={Improving Efficiency and Impact in Health Care Services: Lessons from Central
-		America},
-	booktitle={Health Systems Innovation in Central America},
-	publisher={The World Bank},
-	year= 2005,
-	address={Washington, D.C.},
-	editor={Gerard M. La Forgia}
-}
-
- at inbook{Lewis05,
-	author={Maureen Lewis},
-	title={Health Sysems Innovatin in Central American},
-	chapter={Improving Efficiency and Impact in Health Care Services - Lessons from Central
-		American },
-	year={2005},
-	publisher={The World Bank},
-	address={Washington DC},
-	editor={Gerard M. La Forgia}
-}
-
- at book{lewis73,
-	author={David K. Lewis},
-	title={Counterfactuals},
-	publisher={Cambridge: Harvard University Press},
-	year= 1973
-}
-
- at article{Lewis99b,
-	author={John A. Lewis},
-	title={Statistical Principles for Clinical Trials (ICH E9) An Introductory Note
-		on an International Guideline},
-	journal={Statistics in Medicine},
-	volume={18},
-	year={1999},
-	pages={1903-1904}
-}
-
- at article{LewLev89,
-	author={R.A. Lew and P.S. Levy},
-	title={Estimation of prevalence on the basis of screening tests},
-	journal={Statistics in Medicine},
-	volume={8},
-	year={1989}
-}
-
- at article{LewLev89,
-	author={Robert A. Lew and Paul S. Levy},
-	title={Estimation of Prevalence on the Basis of Screening Tests},
-	journal={Statistics in Medicine},
-	volume={8},
-	year={1989},
-	pages={1225-1230}
-}
-
- at book{Li95,
-	author={S.Z. Li},
-	title={Markov Random Field Modeling in Computer Vision},
-	publisher={Springer-Verlag},
-	year= 1995
-}
-
- at article{LicLipDan92,
-	author={Allen S. Lichter and Marc E. Lippman and David N. Danforth Jr and Teresa
-		d'Angelo and Seth M. Steinberg and Ernest deMoss and Harold D. MacDonald
-		and Cheryl M. Reichert and Maria Merino and Sandra M. Swain and Kenneth
-		Cowan and Lynn H. Gerber and Judith L. Bader and Peggie A. Findlay and
-		Wendy Schain and Catherine R. Gorrell and Karen Straus and Steven A. Rosenberg
-		and Eli Glatstein},
-	title={Mastectomy Versus Breast-Conserving Therapy in the Treatment of Stage I
-		and II Carcinoma of the Breast: A Randomized Trial at the National Cancer
-		Institute},
-	journal={Journal of Clinical Oncology},
-	volume= 10,
-	year= 1992,
-	pages={976-983},
-	month= June ,
-	number= 6
-}
-
- at article{LilPer94,
-	author={David E. Lilienfeld and Daniel P. Perl},
-	title={Projected Neurodegenerative Disease Mortality among Minorities in the United
-		States},
-	journal={Neuroepidemiology},
-	volume= 13,
-	year= 1994,
-	pages={179--186}
-}
-
- at article{LinCutZwi02,
-	author={Shin Lin and David L. Cutler and Michael E. Zwick and Aravinda Chakravarti},
-	title={Haplotype Inference in Random Population Samples},
-	journal={American Journal of Human Genetics},
-	volume= 71,
-	year= 2002,
-	pages={1129--1137}
-}
-
- at book{LinHam97,
-	title={Handbook of Modern Item Response Theory},
-	publisher={Springer},
-	year= 1997,
-	editor={Wim Van Der Linden and Ronald K. Hambleton},
-	address={New York}
-}
-
- at article{LinLin80,
-	author={Bernard S. Linn and Margaret W. Linn},
-	title={Objective and self-assessed health in the old and very old},
-	journal={Social Science and Medicine },
-	volume={{14A}},
-	year= 1980,
-	pages={{311-15}}
-}
-
- at article{LinPekWan05,
-	author={Peter K. Lindenauer and Penelope Pekow and Kaijun Wang and Dheeresh K. Mamidi
-		and Benjamin Guierrez and Evan M. Benjamin},
-	title={Perioperative beta-blocker therapy and mortality after major noncardiac
-		surgery},
-	journal={New England Journal of Medicine},
-	volume={353},
-	year={2005},
-	pages={349-361},
-	month={July},
-	number={4}
-}
-
- at article{LinSmi72,
-	author={D. V. Lindley and A. F. M. Smith},
-	title={{B}ayes Estimates for the Linear Model},
-	journal={Journal of the Royal Statistical Society B},
-	volume= 34,
-	year= 1972,
-	pages={1-41},
-	number= 1
-}
-
- at book{Lipset63,
-	author={Lipset, Seymour Martin},
-	title={Political Man: The Social Basis of Politics},
-	publisher={Anchor Books},
-	year= 1963,
-	address={Garden City, NY}
-}
-
- at article{LisMilFre03,
-	author={John A. List and Daniel L. Millimet and Per G. Fredriksson and W. Warren
-		McHone},
-	title={Effects of Environmental Regulations on Manufacturing Plant Births: Evidence
-		from a Propensity Score Matching Estimator},
-	journal={The Review of Economics and Statistics},
-	volume={85},
-	year={2003},
-	pages={944-952},
-	month={November},
-	number={4}
-}
-
- at article{LitAn04,
-	author={Roderick Little and Hyonggin An},
-	title={Robust Likelihood-Based Analysis of Multivariate Data with Missing Values},
-	journal={Statistica Sinica},
-	volume={14},
-	year={2004},
-	pages={949-968}
-}
-
- at article{LitAn04,
-	author={Roderick Little and Hyonggin An},
-	title={Robust Likelihood-Based Analysis of Multivariate Data with Missing Values},
-	journal={Statistica Sinica},
-	volume={14},
-	year={2004},
-	pages={949-968}
-}
-
- at book{LitRub02,
-	author={Roderick J.A. Little and Donald B. Rubin},
-	title={Statistical Analysis with Missing Data, 2nd Edition},
-	publisher={John Wiley and Sons},
-	year={2002},
-	address={New York, New York}
-}
-
- at article{LitRub89,
-	author={Rodrick J. Little and Donald Rubin},
-	title={The Analysis of Social Science Data with Missing Values},
-	journal={Sociological Methods and Research},
-	volume={18},
-	year={1989},
-	pages={292-326}
-}
-
- at inbook{LitSch95,
-	author={Rodrick J. Little and N. Schenker},
-	title={Handbook of Statistical Modeling for the Social and Behavioral Sciences},
-	chapter={Missing Data},
-	year={1995},
-	pages={39-75}
-}
-
- at unpublished{Little05,
-	author={Roderick Little},
-	title={Calibrated Bayes: A Bayes/Frequentist Roadmap},
-	note={University of Michigan},
-	year={2005},
-	month={September}
-}
-
- at article{Little92,
-	author={Roderick J. Little},
-	title={Regression with Missing X's: A Review},
-	journal={Journal of the American Statistical Association},
-	volume={87},
-	year={1992},
-	pages={1227-1237}
-}
-
- at unpublished{LiuHuChe05,
-	author={Bing Liu and Minqing Hu and Junsheng Cheng},
-	title={Opinion Observer: Analyzing and Comparing Opinions on the Web},
-	note={Bing Liu Dept of Computer Science; Univ of Illinois at Chicago, 851 south
-		Morgan St. Chicago, IL 60607-7053, liub at cs.uic.edu},
-	year={2005},
-	month={May}
-}
-
- at article{LiuWonKon94,
-	author={J. Liu and W.H. Wong and A. Kong},
-	title={Covariance Structure of the Gibbs Sampler with Applications to the Comparisons
-		of Estimators and Augmentation Schemes},
-	journal={Biometrika},
-	volume={81},
-	year={1994},
-	pages={27-40}
-}
-
- at article{LiWen05,
-	author={Quan Li and Ming Wen},
-	title={The Immediate and Lingering Effects of Armed Conflict on Adult Mortality:
-		A Time-Series Cross-National Analysis},
-	abstract={This research investigates the effect of armed conflict on adult mortality
-		across countries and over time. Theoretical mechanisms are specified for
-		how military violence influences adult mortality, both immediately and
-		over time after conflict. The effects of aggregate conflict, interstate
-		and intrastate conflicts, and conflict severity are explored. The Heckman
-		selection model is applied to account for the conflict-induced missing
-		data problem. A pooled analysis across 84 countries for the period from
-		1961 to 1998 provides broad empirical support for the proposed theoretical
-		expectations across both genders. This study confirms the importance of
-		both the immediate and the lingering effect of military conflict on the
-		mortality of the working-age population. The immediate effect of civil
-		conflict is much stronger than that of the interstate conflict, while the
-		reverse applies to the lingering effect. Both the immediate and the lingering
-		effects of severe conflict are much stronger than those of minor conflict.
-		While men tend to suffer higher mortality immediately from intrastate conflict
-		and severe conflict, women in the long run experience as much mortality
-		owing to the lingering effects of these conflicts. The mortality data show
-		a strong data selection bias caused by military conflict. The research
-		findings highlight the imperative for negotiating peace. Preventing a contest
-		from escalating into a severe conflict can produce noticeable gains in
-		saved human lives.},
-	journal={Journal of Peace Research},
-	volume={42},
-	year={2005},
-	pages={471-492},
-	month={July},
-	number={4}
-}
-
- at article{LoeFulKag03,
-	author={Susanna Loeb and Bruce Fuller and Sharon Lynn Kagan and Bidemi Carrol},
-	title={How Welfare Reform Affects Young Children: Experimental Findings from Connecticut
-		- A Research Note},
-	journal={Journal of Policy Analysis and Management},
-	volume={22},
-	year={2003},
-	pages={537-550},
-	number={4}
-}
-
- at article{Londregan00,
-	author={John Londregan},
-	title={Estimating Legislator's Preferred Points},
-	journal={Political Analysis},
-	volume= 8,
-	year= 2000,
-	pages={21--34},
-	month={Winter},
-	number= 1
-}
-
- at book{Londregan00b,
-	author={John Londregan},
-	title={Legislative Institutions and Ideology in Chile},
-	publisher={Cambridge University Press},
-	year= 2000,
-	address={New York}
-}
-
- at book{LonFre06,
-	author={J. Scott Long and Jeremy Freese},
-	title={Regression Models for Categorical Dependent Variables Using Stata},
-	publisher={Stata Press},
-	year={2006},
-	address={College Station, TX}
-}
-
- at article{LooBee46,
-	author={Loomis, C. P. and Beegle, J. A.},
-	title={The Spread of German Nazism in Rural Areas},
-	journal= asr,
-	volume= 11,
-	year= 1946,
-	pages={724-734}
-}
-
- at book{LopAhmGui00,
-  author =	 {Alan Lopez and O. Ahmed and M. Guillot and
-                  B.D. Ferguson and J.A. Salomon and C.J.L. Murray and
-                  K.H. Hill},
-  title =	 {World Mortality in 2000: Life Tables for 191
-                  Countries},
-  publisher =	 {World Health Organization},
-  year =	 2000,
-  address =	 {Geneva}
-}
-
- at article{LowAltFer98,
-	author={Robert Lowry and James E. Alt and Karen Ferree},
-	title={Fiscal Policy Outcomes and Electoral Accountability in the American States},
-	journal={American Political Science Review},
-	volume= 92,
-	year= 1998,
-	pages={759-777},
-	month={December}
-}
-
- at article{Lowenstein2006,
-	author={Daniel H. Lowenstein},
-	title={Vieth's Gap: Has the Supreme Court Gone from Bad to Worse on Partisan Gerrymandering?},
-	journal={Cornell Journal of Law and Public Policy},
-	volume={N:X},
-	year={2006},
-	pages={101-130},
-	month={January}
-}
-
- at article{LozSolGak06,
-  author =	 {Rafael Lozano and Patricia Soliz and Emmanuela
-                  Gakidou and Jesse Abbott-Klafter and Dennis
-                  M. Feehan and Cecilia Vidal and Juan Pablo Ortiz and
-                  Christopher J.L. Murray},
-  title =	 {Benchmarking of performance of Mexican States with
-                  Effective Coverage},
-  journal =	 {The Lancet},
-  volume =	 {368},
-  year =	 {2006},
-  pages =	 {1729-1741},
-  month =	 {November}
-}
-
- at article{lubin94,
-  author =	 {J.H. Lubin and M.H. Gail},
-  title =	 {Sampling Strategies in Nested Case-Control Studies},
-  journal =	 {Environmental Health Perspectives},
-  volume =	 102,
-  year =	 1994,
-  pages =	 {47-51},
-  number =	 {suppl 8}
-}
-
- at article{Lubkemann05,
-  author =	 {Stephen C. Lubkemann},
-  title =	 {Migratory Coping in Wartime Mozambique: An
-                  Anthropology of Violence and Displacement in
-                  Fragmented Wars},
-  journal =	 {Journal of Peace Research},
-  volume =	 {42},
-  year =	 {2005},
-  pages =	 {493-508},
-  month =	 {July},
-  number =	 {4}
-}
-
- at article{LumHea99,
-  author =	 {Thomas Lumley and Patrick Heagerty},
-  title =	 {Weighted Empirical Adaptive Variance Estimators for
-                  Correlated Data Regression},
-  journal =	 {jrssb},
-  volume =	 {61},
-  year =	 {1999},
-  pages =	 {459--477},
-  number =	 {2}
-}
-
- at article{LunSmi05,
-  author =	 {Jennifer Hickes Lundquist and Herbert L. Smith},
-  title =	 {Family Formation Among Women in the U.S. Military:
-                  Evidence from the NLSY},
-  journal =	 {Journal of Marriage and Family},
-  volume =	 {67},
-  year =	 {2005},
-  pages =	 {1-13},
-  month =	 {February}
-}
-
- at article{LuRos04,
-	author={Bo Lu and Paul R. Rosenbaum},
-	title={Optimal Pair Matching With Two Control Groups},
-	journal={Journal of Computational and Graphical Statistics},
-	volume={13},
-	year={2004},
-	pages={422-434},
-	number={2}
-}
-
- at inbook{Lustig94,
-	author={Nora Lustig},
-	title={Solidarity as a Strategy of Poverty Alleviation},
-	chapter={5},
-	year={1994},
-	publisher={Center for U.S.-Mexican Studies},
-	pages={79-96},
-	series={U.S.-Mexico Contemporary Perspectives Series, 6},
-	address={University of California, San Diego}
-}
-
- at article{LuZanHor01,
-  author =	 {Bo Lu and Elaine Zanuto and Robert Hornik and Paul
-                  R. Rosenbaum},
-  title =	 {Matching With Doses in an Observational Study of a
-                  Media Campaign Against Drug Abuse},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 {96},
-  year =	 {2001},
-  pages =	 {1245-1253},
-  month =	 {December},
-  number =	 {456}
-}
-
- at techreport{LymVar03,
-  author =	 {Peter Lyman and Hal R. Varian},
-  title =	 {{How much information 2003}},
-  institution =	 {University of California},
-  year =	 2003,
-  note =
-                  {{http://www2.sims.berkeley.edu/research/projects/how-much-info-2003/}}
-}
-
- at article{Lynch03,
-  author =	 {Lynch, C.A.},
-  title =	 {{Institutional Repositories: Essential
-                  Infrastructure For Scholarship In The Digital Age}},
-  journal =	 {portal: Libraries and the Academy},
-  volume =	 {3},
-  year =	 {2003},
-  pages =	 {327--336},
-  number =	 {2}
-}
-
- at article{LynMcc92,
-  author =	 {Henry S. Lynn and Charles E. McCulloch},
-  title =	 {When Does it Pay to Break the Matches for Analysis
-                  of a Matched-Pairs Design?},
-  journal =	 {Biometrics},
-  volume =	 {48},
-  year =	 {1992},
-  pages =	 {397-409},
-  month =	 {June}
-}
-
- at article{MacKunGro99,
-  author =	 {J.P. Mackenbach and A.E. Kunst and F. Groenhof and
-                  J.K. Borgan and G. Costa and F. Faggiano },
-  title =	 {Socioeconomic inequalities in mortality among women
-                  and among men: an international study},
-  journal =	 {American Journal of Public Health},
-  volume =	 89,
-  year =	 1999,
-  pages =	 {1800-1806},
-  number =	 12
-}
-
- at book{Macridis55,
-	author={Roy C. Macridis},
-	title={The Study of Comparative Government},
-	publisher={Doubleday and Co.},
-	year= 1955,
-	address={New York}
-}
-
- at article{MacRivJur06,
-  author =	 {Ellen J. MacKenzie and Frederick P. Rivara and
-                  Gregory J. Jurkovich and Avery B. Nathens and
-                  Katherine P. Frey and Brian L. Egleston and David
-                  S. Salkever and Daniel O Scharfstein},
-  title =	 {A National Evaluation of the Effect of Trauma-Center
-                  Care on Mortality},
-  journal =	 {New England Journal of Medicine},
-  volume =	 {354},
-  year =	 {2006},
-  pages =	 {366-378},
-  month =	 {January}
-}
-
- at article{MadDou64,
-  author =	 {George L. Maddox and Elizabeth B. Douglas},
-  title =	 {Self-Assessments of Health: A Longitudinal Study of
-                  Elderly Subjects},
-  journal =	 {Journal of Health and Social Behavior },
-  volume =	 14,
-  year =	 1973,
-  pages =	 {{87-93}}
-}
-
- at article{MadNel92,
-  author =	 {W.R. Madych and S.A. Nelson},
-  title =	 {Bounds on Multivariate Polynomials and Exponential
-                  Error Estimates for Multiquadric Interpolation},
-  journal =	 {Journal of Approximation Theory},
-  volume =	 70,
-  year =	 1992,
-  pages =	 {94--114}
-}
-
- at article{Malaker86,
-	author={CR Malaker},
-	title={Estimation of Adult Mortality in India: 1971--1981},
-	journal={Demography India},
-	volume= 15,
-	year= 1986,
-	pages={126--136}
-}
-
- at article{ManKarMar03,
-  author =	 {Kristiina Manderbacka, et al},
-  title =	 {The Effect of Point of Reference on the Association
-                  Between Self-Rated Health and Mortality},
-  journal =	 {Social Science and Medicine},
-  volume =	 56,
-  year =	 2003,
-  pages =	 {{1447-52}}
-}
-
- at book{ManSch99,
-	author={Christopher D. Manning and Hinrich Sch{\"u}tze},
-	title={Foundations of Statistical Natural Language Processing},
-	publisher={Massachusetts Institute of Technology},
-	year={1999},
-	address={Cambridge, MA}
-}
-
- at book{Manski05,
-	author={Charles F. Manski},
-	title={Social choice with Partial Knowledge of Treatment Response},
-	publisher={Princeton University Press},
-	year={2005},
-	series={Econometric Institute Lectures}
-}
-
- at article{Manski77,
-	author={Charles F. Manski},
-	title={The Estimation of Choice Probabilities from Choice Based Samples},
-	journal={Econometrics},
-	volume= 45,
-	year= 1977,
-	pages={1977--88},
-	month={November},
-	number= 8
-}
-
- at article{Manski90,
-  author =	 {Charles F. Manski},
-  title =	 {The Use of Intentions Data to Predict Behavior: A
-                  Best-Case Analysis},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 {85},
-  year =	 {1990},
-  pages =	 {934-940},
-  month =	 {December},
-  number =	 {412}
-}
-
- at book{Manski95,
-	author={Charles F. Manski},
-	title={Identification Problems in the Social Sciences},
-	publisher={Harvard University Press},
-	year= 1995
-}
-
- at inproceedings{manski99,
-  author =	 {Charles F. Manski},
-  title =	 {Nonlinear Statistical Inference: Essays in Honor of
-                  Takeshi Amemiya},
-  booktitle =	 {Nonparametric Identification Under Response-Based
-                  Sampling},
-  year =	 1999 ,
-  publisher =	 cup,
-  editor =	 {C. Hsiao and K. Morimune and J. Powell}
-}
-
- at article{ManSzoKoh01,
-  author =	 {Mandl, K.D. and Szolovits, P. and Kohane, I.S.},
-  title =	 {{Public standards and patients' control: how to keep
-                  electronic medical records accessible but private}},
-  journal =	 {BMJ},
-  volume =	 {322},
-  year =	 {2001},
-  pages =	 {283--7},
-  number =	 {7281}
-}
-
- at article{ManSzoKoh01,
-	author={Kenneth D. Mandl and Peter Szolovits and Isaac S. Kohane},
-	title={Public standards and patients' control: how to keep electronic medical records
-		accessible but private},
-	journal={British Medical Journal},
-	volume={322},
-	year={2001},
-	pages={283-287},
-	month={February},
-	publisher={British Medical Journal}
-}
-
- at article{mantel73,
-	author={N. Mantel},
-	title={Synthetic Retrospective Studies and Related Topics},
-	journal={Biometrics},
-	volume= 29,
-	year= 1973,
-	number={479-486}
-}
-
-
- at article{ManTudDie06,
-	author={Dennis T. Mangano and Julia C. Tudor and Cynthia Dietzel},
-	title={The Risk Associated wtih Aprotinin in Cardiac Surgery},
-	journal={The New England Journal of Medicine},
-	volume={354},
-	year={2006},
-	pages={353-365},
-	month={January},
-	number={4}
-}
-
- at article{ManRao04,
-  title =	 {{Community-Based and-Driven Development: A Critical
-                  Review}},
-  author =	 {Mansuri, G. and Rao, V.},
-  journal =	 {The World Bank Research Observer},
-  volume =	 {19},
-  number =	 {1},
-  pages =	 {1-39},
-  year =	 {2004}
-}
-
- at article{MarCamFay91,
-  author =	 {Elizabeth A. Martin and Pamela C. Campanelli and
-                  Robert E. Fay},
-  title =	 {An Application of Rasch Analysis to Questionnaire
-                  Design: Using Vignettes to Study the Meaning of
-                  `Work' in the Current Population Survey},
-  journal =	 {The Statistician},
-  volume =	 40,
-  year =	 1991,
-  pages =	 {265--276},
-  month =	 {September},
-  number =	 3
-}
-
- at article{March57,
-	author={James G. March},
-	title={party Legislative Representation as a Function of Election Results},
-	journal={1957},
-	volume={21},
-	year={1957-1958},
-	pages={521-542},
-	month={Winter},
-	number={4}
-}
-
- at article{Marcus00,
-	author={George E. Marcus},
-	title={{Emotions in Politics}},
-	journal={Annual Review of Political Science},
-	volume={3},
-	year={2006},
-	pages={221-50}
-}
-
- at article{Marcus88,
-	author={George E. Marcus},
-	title={{The Structure of Emotional Response: The 1984 Presidential Candidates}},
-	journal={American Political Science Review},
-	volume={82},
-	year={1988},
-	pages={737-761},
-	number={3}
-}
-
- at article{MarDiePer93,
-	author={Donald C. Martin and Paula Diehr and Edward B. Perrin and Thomas D. Koepsell},
-	title={The Effect of Matching on the Power of Randomized Community Intervention
-		Studies},
-	journal={Statistics in Medicine},
-	volume={12},
-	year={1993},
-	pages={329-338}
-}
-
- at article{MarHusLob95,
-	author={David Marsh and Khatidja Husein and Melvyn Lobo and Mehboob Ali Shah and
-		Stephen Luby},
-	title={Verbal autopsy in Karachi slums: comparing single and multiple cause of
-		child deaths},
-	journal={Health Policy and Planning},
-	volume={10},
-	year={1995},
-	pages={395-403},
-	number={4}
-}
-
- at article{MarHusLob95,
-	author={David Marsh and Khatidja Husein and Melvyn Lobo and Mehboob Ali Shah and
-		Stephen Luby},
-	title={Verbal autopsy in Karachi slums: comparing single and multiple cause of
-		child deaths},
-	journal={Health Policy and Planning},
-	volume={10},
-	year={1995},
-	pages={395-403},
-	number={4}
-}
-
- at book{MarKenBib79,
-	author={K. V. Mardia and J. T. Kent and J. M. Bibby},
-	title={Multivariate Analysis},
-	publisher= ap,
-	year={1979},
-	address={London}
-}
-
- at article{MarMajRas93,
-	author={David Marsh and Nuzhat Majit and Zeba Rasmussen and Khalid Mateen and Arif
-		Amin Khan},
-	title={Cause-Specific Child Mortality In A Mountainous Community In Pakistan By
-		Verbal Autopsy},
-	journal={Journal of the Pakistan Medical Association},
-	volume={43},
-	year={1993},
-	pages={226-229},
-	month={November},
-	number={11}
-}
-
- at article{MarMajRas93,
-	author={David Marsh and Nuzhat Majit and Zeba Rasmussen and Khalid Mateen and Arif
-		Amin Khan},
-	title={Cause-Specific Child Mortality In A Mountainous Community In Pakistan By
-		Verbal Autopsy},
-	journal={Journal of the Pakistan Medical Association},
-	volume={43},
-	year={1993},
-	pages={226-229},
-	month={November},
-	number={11}
-}
-
- at misc{MarQui01,
-	author={Andrew D. Martin and Kevin M. Quinn},
-	title={The Dimensions of {S}upreme {C}ourt Decision Making: Again Revisiting {T}he
-		{J}udicial {M}ind},
-	year= 2001,
-	howpublished={Paper presented at the Annual Meeting of the Midwest Political Science Association}
-}
-
- at manual{MarQui05,
-	author={Andrew D. Martin and Kevin M. Quinn},
-	title={MCMCpack: Markov chain Monte Carlo (MCMC) Package},
-	year={2005},
-	url={{http://mcmcpack.wustl.edu}}
-}
-
- at article{MarSadFik03,
-	author={David R. Marsh and Salim Sadruddin and Fariyal F. Fikree and Chitra Krishnan
-		and Gary L. Darmstadt},
-	title={Validation of verbal autopsy to determine the cause of 137 neonatal deaths
-		in Karachi, Pakistan},
-	journal={2003},
-	volume={17},
-	year={2003},
-	pages={132-142}
-}
-
- at article{MarSadFik03,
-	author={David R. Marsh and Salim Sadruddin and Fariyal F. Fikree and Chitra Krishnan
-		and Gary L. Darmstadt},
-	title={Validation of verbal autopsy to determine the cause of 137 neonatal deaths
-		in Karachi, Pakistan},
-	journal={2003},
-	volume={17},
-	year={2003},
-	pages={132-142}
-}
-
- at article{Marshall91,
-	author={R.J. Marshall},
-	title={Mapping Disease and Mortality Rates using Empirical Bayes Estimators},
-	journal={Applied Statistics},
-	volume= 40,
-	year= 1991,
-	pages={283--294},
-	number= 2
-}
-
- at article{MarSmiSta91,
-	author={M.G. Marmot and G.D. Smith and S. Stansfeld and C. Patel and F. North and
-		J. Head and I. White and E. Brunner and A. Feeney},
-	title={Health inequalities among British civil servants: the Whitehall II study
-		[see comments].},
-	journal= lan,
-	year={1991},
-	optvolume={337},
-	optpages={1387--1393}
-}
-
- at book{Martin92,
-	author={Lisa Martin},
-	title={Coercive Cooperation: Explaining Multilateral Economic Sanctions},
-	publisher={Princeton University Press},
-	year={1992},
-	note={Please inquire with Lisa Martin before publishing results from these data,
-		as this dataset includes errors that have since been corrected.}
-}
-
- at article{MatFatIno05,
-  author =	 {Colin D. Mathers and Doris Ma Fat and Mie Inoue and
-                  Chalapati Rao and Alan Lopez},
-  title =	 {Counting the dead and what they died from: an
-                  assessment of the global status of cause of death
-                  data},
-  journal =	 {Bulletin of the World Health Organization},
-  volume =	 83,
-  year =	 2005,
-  pages =	 {171--177},
-  month =	 {March},
-  number =	 3
-}
-
- at unpublished{MatLopSte03,
-	author={Colin D. Mathers and Alan Lopez and Claudia Stein and Doris Ma Fat and Chalapati
-		Rao and Mie Unoue and Kenji Shiubuya and Niels Tomijima and Christina Bernard
-		and Hongyi Xu},
-	title={Deaths and Disease Burden by Cause: Global Burden of Disease Estimates for
-		2001 by World Bank Country Groups},
-	note={Evidence and Information for Policy, World Health Organization, Geneva and
-		School of Population health, University of Queensland, Brisbane, Austrailia},
-	year={2003}
-}
-
- at unpublished{MatSteFat02,
-	author={Colin D. Mathers and Claudia Stein and Doris Ma Fat and Chalapati Rao and
-		Mie Unoue and Niels Tomijima and Christina Bernard and Alan D. Lopez and
-		Christopher J.L. Murray},
-	title={Global Burden of Disease 2000: Version 2 Methods and Results},
-	note={Global Programme on Evidence for Health Policy Discussion Paper No. 50 World
-		Health Organization},
-	year={2002},
-	month={October}
-}
-
- at article{MauRos97,
-	author={Gillian H. Maude and David A. Ross},
-	title={The Effect of Different Sensitivity, Specificity and Cause-Specific Mortality
-		Fractions on the Estimation of Differences in Cause-Specific Mortality
-		Rates in Children from Studies Using Verbal Autopsies},
-	journal={International Journal of Epidemiology},
-	volume={26},
-	year={1997},
-	pages={1097-1106},
-	number={5}
-}
-
- at techreport{Mayaud01,
-	author={Philippe Mayaud},
-	title={Aids-Related Research Projects},
-	institution={The London School of Hygiene \& Tropical Medicine},
-	year={2001},
-	address={London School of Hygiene \& Tropical Medicine www.ishtm.ac.uk}
-}
-
- at book{McCNel89,
-	author={Peter McCullagh and James A. Nelder},
-	title={Generalized Linear Models},
-	publisher={Chapman \& Hall},
-	year={1989},
-	series={Monograph on Statistics and Applied Probability},
-	edition={2nd},
-	number={37}
-}
-
- at inbook{McConahay86,
-	author={John B. McConahay},
-	title={Prejudice, Discrimination, and Racism: Theory and Research},
-	chapter={Modern Racism Ambivalence, and the Modern Racism Scale},
-	year={1986},
-	publisher={New York: Academic Press},
-	editor={J. Dovidio and S.L. Gaertner}
-}
-
- at article{MccRidMor04,
-	author={Daniel F. McCaffrey and Greg Ridgeway and Andrew R. Morral},
-	title={Propensity Score Estimation With Boosted Regression for Evaluating Causal
-		Effects in Observational Studies},
-	journal={Psychological Methods},
-	volume={9},
-	year={2004},
-	pages={403-425},
-	number={4}
-}
-
- at article{McCShaWan94,
-	author={John McCallum, DPhil, MPhil, Bruce Shadbolt, PhD, and Dong Wang, BSc},
-	title={Self-Rated Health and Survival: A 7-year Follow-Up study of Australian Elderly.},
-	journal={American Journal of Public Health},
-	volume= 84,
-	year= 1994,
-	pages={{1100-05}}
-}
-
- at unpublished{McDonald06a,
-	author={Michael P. McDonald},
-	title={Seats to Votes Ratios in the United States},
-	note={George Mason University, Dept of Public and International Affairs 4400 University
-		Dr., 3-F4 Fairfax, VA 22030-4444 (703)-993-4191 mmcdon at gmu.edu},
-	year={2006}
-}
-
- at unpublished{McDonald06b,
-	author={Michael D. McDonald},
-	title={A Standard for Detecting and Remedying Gerrymanders},
-	note={Department of Political Science, Binghamton University- SUNY, Binghamton
-		NY 13902-6000, (617) 777-2946, mdmcd at binghamton.edu},
-	year={2006}
-}
-
- at article{McKibbin69,
-	author={Ross McKibbin},
-	title={The Myth of the Unemployed: Who did Vote for the Nazis?},
-	journal={Australian Journal of Politics and History},
-	volume= 15,
-	year= 1969,
-	pages={25--40},
-	number= 2
-}
-
- at book{McLThr97,
-	author={Geoffrey J. McLachlan and Thriyambakam Krishan},
-	title={The EM Algorithm and Extensions},
-	publisher={New York: Wiley}
-}
-
- at article{McNown92,
-	author={Robert McNown},
-	title={Comment},
-	journal= jasa,
-	volume= 87,
-	year= 1992,
-	pages={671--672},
-	number= 419
-}
-
- at article{McNRog89,
-	author={Rober McNown and Andrei Rogers},
-	title={Forecasting Mortality: A Parameterized Time Series Approach},
-	journal={Demography},
-	volume= 26,
-	year= 1989,
-	pages={645--660},
-	number= 4
-}
-
- at article{McNRog92,
-	author={Robert McNown and Andrei Rogers},
-	title={Forecasting Cause-Specific Mortality Using Time Series Methods},
-	journal={International Journal of Forecasting},
-	volume= 8,
-	year= 1992,
-	pages={413--432}
-}
-
- at unpublished{McqLasLai06,
-	author={Matthew B. McQueen and Jessica Lasky-Su and Nan M. Laird and Christoph Lange},
-	title={Screening and Testing using the Same Data Set: A Testing Strategy for Genome-Wide
-		Association Studies for Case-Control and Case-Cohort Designs},
-	year={2006}
-}
-
- at article{Mead92,
-	author={A. Mead},
-	title={Review of the Development of Multidimensional Scaling Methods},
-	journal={The Statistician},
-	volume= 41,
-	year= 1992,
-	pages={27--39},
-	month={April},
-	number= 1
-}
-
- at article{MebSek04,
-	author={Walter Mebane and Jasjeet Sekhon},
-	title={Robust Estimation and Outlier Detection in Overdispersed Multinomial Models
-		of Count Data},
-	journal= ajps,
-	volume= 48,
-	year= 2004,
-	pages={391--410},
-	month={April}
-}
-
- at article{MeiKarPar04,
-	author={Bettina Meinow et al.},
-	title={The effect of the duration of follow-up in mortality analysis: The temporal
-		pattern of different predictors},
-	journal={Journal of Gerontology: Social Sciences},
-	volume={{59B}},
-	year={2004},
-	pages={{S181-89}},
-	number={3}
-}
-
- at article{Meng94,
-	author={Xiao-Li Meng},
-	title={Multiple-Imputation Inferences with Uncongenial Sources of input},
-	journal={Statistical Science},
-	volume={9},
-	year={1994},
-	pages={538-573},
-	number={4}
-}
-
- at article{Meng94b,
-	author={X.L. Meng},
-	title={Posterior Predictive p-Values},
-	journal={Annals of Statistics},
-	volume={22},
-	year={1994},
-	pages={1142-1160},
-	number={3}
-}
-
- at article{MenRom03,
-	author={Xiao-Li Meng and Marin Romero},
-	title={Discussion: Efficiency and Self-Efficiency with Multiple Imputation Inference},
-	journal={International Statistical Review},
-	volume={71},
-	year={2003},
-	pages={607-618},
-	number={3}
-}
-
- at article{MenRub92,
-	author={X.L. Meng and Donald Rubin},
-	title={Performing Likelihood Ratio Tests with Multiply-imputed Data Sets},
-	journal={Biometrika},
-	volume={79},
-	year={192},
-	pages={103-111}
-}
-
- at article{Metetal53,
-	author={N. Metropolis and A. W. Rosenbluth and M. N. Rosenbluth and A. H. Teller
-		and E. Teller},
-	title={Equation of State Calculations by Fast Computing Machines},
-	journal={Journal of Chemical Physics},
-	volume={21},
-	year= 1953,
-	pages={1087-1092}
-}
-
- at article{MicBloHil04,
-	author={Charles Michalopoulos and Howard S. Bloom and Carolyn J. Hill},
-	title={Can propensity-score methods match the findings from a random assignment
-		evaluation of mandatory welfare-to-work programs?},
-	journal={Review of Economics and Statistics},
-	volume= 56,
-	year= 2004,
-	pages={156-179},
-	number= 1
-}
-
- at article{Midlarsky05,
-	author={Halbert White},
-	title={A Heteroskedasticity-Consistent Covariance Matrix Estimator and a Direct
-		Test for Heteroskedasticity},
-	abstract={This study seeks to distinguish between instances where genocide occurred
-		and others where it might have been expected to occur but did not. Territorial
-		loss, a corollary refugee influx, and a resulting contraction of socio-economic
-		space are suggested to provide that distinction. Four analytic perspectives
-		based on emotional reactions, class envy, prospect theory, and territoriality
-		indicate the critical importance of loss. The theory is examined in the
-		context of the mass murder of European Jewry including, of course, Germany
-		and Austria, and all European German allies that allowed an indigenous
-		genocidal impulse, willingness to comply with German genocidal policies,
-		or an ability to resist German pressures for Jewish deportation. Three
-		instances of perpetrating states - Italy, Vichy France, and Romania - emerge
-		from the analysis. The latter two governments willingly collaborated with
-		the Germans in victimizing their own Jewish citizenry, while Italy was
-		on a genocidal path just prior to the German occupation. All five states
-		mentioned above were found to experience considerable territorial loss
-		and a contraction of socio-economic space. Bulgaria and Finland, on the
-		other hand, actually expanded their borders at the start of the war and
-		saved virtually all of their Jewish citizens. The importance of loss is
-		demonstrated not only cross-sectionally, in the comparison between the
-		five victimizers, on the one hand, and Bulgaria and Finland, on the other,
-		but also diachronically, in the changing behavior over time of the genocidal
-		and perpetrating states.},
-	journal={Econometrica},
-	volume={42},
-	year={1980},
-	pages={375-391},
-	month={July},
-	number={4},
-	optnumber={4},
-	optvolume={48},
-	optpages={817--838},
-	optmonth={May}
-}
-
- at article{Mierendorff30,
-	author={Carl Mierendorff},
-	title={Gesicht und Charakter der nationalsozialistischen Bewegung},
-	journal={Gesellschaft},
-	volume= 7,
-	year= 1930,
-	pages={489--540},
-	number= 1
-}
-
- at article{MiiVuoOja97,
-	author={Seppo Miilunpalo, Ilkka Vuori, Pekka Oja, Matti Pasanen, and Helka Urponen},
-	title={Self Rated Health as a Health Measure: The Predictive Value of Self-Reported
-		Health Status on the Use of Physician Services and on Mortality in the
-		Working Age Population},
-	journal={Journal of Clinical Epidemiology},
-	volume= 50,
-	year= 1997,
-	pages={{517-28}}
-}
-
- at misc{MikSuc05,
-	author={Gerome Miklau and Dan Suciu},
-	title={Managing Integrity for Data Exchanged on the Web},
-	year= 2005,
-	month={16--17 June},
-	howpublished={Eighth International Workshop on the Web and Databases, Baltimore},
-	note={{http://webdb2005.uhasselt.be/papers/1-3.pdf}}
-}
-
- at article{Miller01,
-	author={Tim Miller},
-	title={Increasing Longevity and Medicare Expenditures},
-	journal={Demography},
-	volume= 38,
-	year= 2001,
-	pages={215--226},
-	month={May},
-	number= 2
-}
-
- at inbook{MilReiHes98,
-	author={Arthur H. Miller and William M. Reisinger and Vicki L. Hesli},
-	title={Elections and Voters in Post-Communist Russia},
-	chapter={Leader Popularity and Party Development in Post-Soviet Russia },
-	year={100-135},
-	publisher={London: Edward Elgar},
-	editor={Matthew Wyman and Stephen White and Sarah Oates}
-}
-
- at article{MilRob89,
-	author={Miller, Abraham H. and Robbins, James S.},
-	title={Who Did Vote for Hitler? A Reanalysis of the Lipset/Bendix Controversy},
-	journal={Polity},
-	volume= 21,
-	year= 1989,
-	pages={655-677},
-	number= 4
-}
-
- at article{MilSha94,
-	author={Paul Milgrom and Chris Shannon},
-	title={Monotone Comparative Statics},
-	journal={Econometrica},
-	volume= 62,
-	year= 1994,
-	pages={157--180},
-	month={January},
-	number= 1,
-	annote={introduction of the single crossing property as a way to ensure monotonicity}
-}
-
- at article{MinRos00,
-	author={Ming, K. and Rosenbaum, Paul R.},
-	title={Substantial gains in bias reduction from matching with a variable number
-		of controls},
-	journal={Biometrics},
-	volume={56},
-	year={2000},
-	pages={118-124}
-}
-
- at article{MoeWal01,
-	author={Karl Ove Moene and Michael Wallerstein},
-	title={Inequality, Social Insurance, and Redistribution},
-	journal={American Political Science Review},
-	volume={95},
-	year={2001},
-	pages={859-874},
-	month={December},
-	number={4}
-}
-
- at article{MoeWal03,
-	author={Karl Ove Moene and Michael Wallerstein},
-	title={Earnings Inequality and Welfare Spending: A Disaggregated Analysis},
-	journal={World Politics},
-	volume={55},
-	year={2003},
-	pages={485-516},
-	month={July}
-}
-
- at book{Mommsen89,
-	author={Hans Mommsen},
-	title={Die verspielte Freiheit --- der Weg der Republik von Weimar in den Untergang,
-		1918 bis 1933},
-	publisher={Propyl{\"a}en},
-	year= 1989
-}
-
- at techreport{MonLopGel99,
-	author={Manuel Montes-y-Gomez and Aurelio Lopez-Loez and Alexander F. Gelbukh and
-		Grigori Sidorov and Adolfo Guzman-Arenas},
-	title={Text Mining: New Techniques and Applications},
-	institution={Center for computing Research of the National Polytechnic Institute, Mexico
-		City},
-	year={1999},
-	month={August},
-	address={CIC, IPN, Laboratorio de Lenguaje Natural, Av. Juan de Dios Batiz, Mexico
-		DF.},
-	volume={34}
-}
-
- at techreport{MonLopGel99b,
-	author={Manuel Montes y Gomez and Aurelio Lopez Lopez and Alexander F. Gelbukh},
-	title={Text Mining as a Social Thermometer},
-	institution={Centro de Investigaci{\'o}n en Computaci{\'o}n},
-	year={1999},
-	address={CIC, IPN Laboratorio de Lenguaje Natural. Ave. Juan de Dios Batiz, Mexico
-		DF}
-}
-
- at book{MonSam04,
-	title={Decentralization and Democracy in Latin America},
-	publisher={University of Notre Dame Press},
-	year={2004},
-	editor={Alfred P. Montero and David J. Samuels},
-	address={Notre Dame, Indiana}
-}
-
- at book{Montgomery2001,
-	author={Douglas C. Montgomery},
-	title={Design and Analysis of Experiments},
-	publisher={Wiley},
-	year={2001},
-	address={New York},
-	edition={5th}
-}
-
- at article{MorBlaTom03,
-	author={Saul S. Morris and Robert E. Black and Lana Tomaskovic},
-	title={Predicting the distribution of under-five deaths by cause in countries without
-		adequate vital registration systems},
-	journal={International Journal of Epidemiology},
-	volume={32},
-	year={2003},
-	pages={1041-1051}
-}
-
- at book{Morozov84,
-	author={V.A. Morozov},
-	title={Methods for solving incorrectly posed problems},
-	publisher={Springer-Verlag},
-	year= 1984 ,
-	address={Berlin}
-}
-
- at article{MorSpi99,
-	author={Mary J. Morrissey and Donna Spiegelman},
-	title={Matrix Methods for Estimating Odds Ratios with Misclassified Exposure Data:
-		Extensions and Comparisons},
-	journal={Biometrics},
-	volume={55},
-	year={1999},
-	pages={338-344},
-	month={June}
-}
-
- at unpublished{MorYamTat02,
-	author={Satoshi Morinaga and Kenji Yamanishi and Kenji Tateishi and Toshikazu Fukushima},
-	title={Mining Product Reputations on the Web},
-	note={Satoshi Morinaga and Kenji Yamanishi NEC Corp. 4-1-1 Miyazaki Miyamae Kawasaki
-		Kanagawa 216-8555 Japan Tel: 81-44-856-2143; morinaga at cw.jp.nec.com},
-	year={2002}
-}
-
- at unpublished{MorYamTat02,
-	author={Satoshi Morinaga and Kenji Yamanishi and Kenji Tateishi and Toshikazu Fukushima},
-	title={Mining Product Reputations on the Web},
-	note={Satoshi Morinaga and Kenji Yamanishi NEC Corp. 4-1-1 Miyazaki Miyamae Kawasaki
-		Kanagawa 216-8555 Japan Tel: 81-44-856-2143; morinaga at cw.jp.nec.com},
-	year={2002}
-}
-
- at article{MosSha82,
-	author={Jana Mossey MPH, PhD, and Evelyn SHapira, MA},
-	title={Self-Rated Health: A Predictor of Mortality Among the Elderly},
-	journal={American Journal of Public Health},
-	volume= 72,
-	year= 1982,
-	pages={{800-08}}
-}
-
- at article{moynihan00,
-	author={Ray Moynihan and Lisa Bero and Dennis Ross-Degnan and David Henry and Kriby
-		Lee and Judy Watkins and Connie Mah and Stephen B. Soumerai},
-	title={Coverage by the News Media of the Benefits and Risks of Medications},
-	journal={New England Journal of Medicine},
-	volume= 342,
-	year= 2000,
-	pages={1645-1650},
-	month={June 1},
-	number= 22
-}
-
- at article{MugLac02,
-	author={Anthony Mughan and Dean Lacy},
-	title={Economic Performance, Job Insecurity, and Electoral Choice},
-	journal={British Journal of Political Science},
-	volume= 32,
-	year= 2002,
-	pages={513--533}
-}
-
- at book{MurEva03,
-	title={Health Systems Performance Assessment: Debates, Methods and Empiricism},
-	publisher={World Health Organization},
-	year= 2003,
-	editor={Christopher J.L. Murray and David B. Evans},
-	address={Geneva}
-}
-
- at book{MurLop96,
-	title={The Global Burden of Disease},
-	publisher={Harvard University Press and WHO},
-	year= 1996 ,
-	editor={Christopher L.J.\ Murray and Alan D.\ Lopez}
-}
-
- at article{MurLop97,
-	author={Christopher J. L. Murray and Alan D. Lopez},
-	title={Mortality by Cause for Eight Regions of the World: Global Burden of Disease
-		Study},
-	journal={The Lancet},
-	volume={349},
-	year={1997},
-	pages={1269-1276}
-}
-
- at article{murphy69,
-	author={George G. S. Murphy},
-	title={On Counterfactual Propositions},
-	journal={History and Theory},
-	volume= 9,
-	year= 1969,
-	pages={14-38}
-}
-
- at book{Murphy72,
-	author={Paul L. Murphy},
-	title={The Meaning of Freedom of Speech},
-	publisher={Greenwood},
-	year= 1972,
-	address={Westport, CT}
-}
-
- at book{Murray98,
-	author={Murray, David M.},
-	title={Design and Analysis of Group-Randomized Trials},
-	publisher={Oxford UP},
-	year={1998},
-	address={New York}
-}
-
- at book{Mutz98,
-	author={Diana C. Mutz},
-	title={Impersonal Influence: How Perceptions of Mass Collectives Affect Political
-		Attitudes},
-	publisher={Cambridge University Press},
-	year={1998},
-	address={New York, NY}
-}
-
- at article{MwaLesDec05,
-	author={Samuel M. Mwalili and Emmanuel Lesaffre and Dominique Declerck},
-	title={A Bayesian ordinal logistic regression model to correct for interobserver
-		measurement error in a geographical oral health study},
-	journal={Applied Statistics},
-	volume={54},
-	year={2005},
-	pages={77-93}
-}
-
- at article{Myerson04,
-  author =	 {Roger Myerson},
-  year =	 {2004},
-  title =	 {Political Economics and the Weimar Disaster},
-  journal =	 {Journal of Institutional and Theoretical Economics},
-  pages =	 {187-209},
-  volume =       {160}
-}
-
- at article{Nagler91,
-	author={Jonathan Nagler},
-	title={{The Effect of Registration Laws and Education on U. S. Voter Turnout}},
-	journal= apsr,
-	volume={85},
-	year={1991},
-	pages={1393--1405},
-	number={4}
-}
-
- at unpublished{NasYi03,
-	author={Tetsuya Nasukawa and Jeonghee Yi},
-	title={Sentiment Analysis: Capturing Favoriability Using Natural Language Processing},
-	note={Tetsuya Nasukawa IBM Research, Tokyo Research Laboratory 1623-14 Shimotsuruma,
-		Yamato-shi, Kanagawa-ken, 242-8502, Japan; nasukawa at jp.ibm.cop},
-	year={2003},
-	month={October}
-}
-
- at unpublished{NasYi03,
-	author={Tetsuya Nasukawa and Jeonghee Yi},
-	title={Sentiment Analysis: Capturing Favoriability Using Natural Language Processing},
-	note={Tetsuya Nasukawa IBM Research, Tokyo Research Laboratory 1623-14 Shimotsuruma,
-		Yamato-shi, Kanagawa-ken, 242-8502, Japan; nasukawa at jp.ibm.cop},
-	year={2003},
-	month={October}
-}
-
- at book{National02,
-	author={NIPSSR},
-	title={Population Projections for Japan (January, 2002)},
-	publisher={National Institute of Population and Social Security Research},
-	year= 2002,
-	annote={Life tables for Japan are constructed using the Lee-Carter method.}
-}
-
- at book{Neuendorf02,
-	author={Neuendorf, K.A.},
-	title={{The Content Analysis Guidebook}},
-	publisher={Thousand Oaks, CA: Sage Publications},
-	year={2002}
-}
-
- at article{Neuhaus02,
-	author={John M. Neuhaus},
-	title={Analysis of Clustered and Longitudinal Binary Data Subject to Response Misclassification},
-	journal={Biometrics},
-	volume={58},
-	year={2002},
-	pages={675-683},
-	month={September}
-}
-
- at article{Neuhaus99,
-	author={John M. Neuhaus},
-	title={Bias and efficiency loss due to misclassified responses in binary regression},
-	journal={Biomtrika},
-	volume={86},
-	year={1999},
-	pages={843-855},
-	number={4}
-}
-
- at article{neutra78,
-	author={Raymond R. Neutra and Margaret E. Drolette},
-	title={Estimating Exposure-Specific Disease Rates from Case-Control Studies Using
-		Bayes Theorem},
-	journal={American Journal of Epidemiology},
-	volume= 108,
-	year= 1978,
-	pages={214-222},
-	number= 3
-}
-
- at article{Neyman23,
-	author={J. Neyman},
-	title={On the application of probability theory to agricultural experiments. Essay
-		on Principles. Section 9},
-	journal={Statistical Science},
-	volume={5},
-	year={1923},
-	pages={465-480},
-	note={Translated in 1990, with discussion}
-}
-
- at article{Neyman23b,
-	author={J. Neyman},
-	title={Statistical Problems in Agricultural Experiments},
-	journal={Journal of the Royal Statistical Association},
-	volume= 2,
-	year= 1923,
-	pages={107--180},
-	number= 2
-}
-
- at article{Nickerson05,
-	author={David W. Nickerson},
-	title={Scalable Protocols Offer Efficient Design for Field Experiements},
-	journal={Political Analysis},
-	volume={13},
-	year={2005},
-	pages={233-252}
-}
-
- at article{NieFet86,
-	author={Richard G. Niemi and Patrick Fett},
-	title={The Swing Ratio: An Explanation and an Assessment},
-	journal={Legislative Studies Quarterly},
-	volume={11},
-	year={1986},
-	pages={75-90},
-	month={February},
-	number={1}
-}
-
- at misc{NISO01,
-	author={NISO},
-	title={The Dublin Core Metadata Element Set},
-	year={2001},
-	note={{http://www.niso.org/standards/resources/Z39-85.pdf}}
-}
-
- at article{NiyGirPog98,
-  author =	 {P. Niyogi and F. Girosi and T. Poggio},
-  title =	 {Incorporating Prior Information in Machine Learning
-                  by Creating Virtual061 Examples},
-  journal =	 {Proceedings of the IEEE.},
-  volume =	 {86},
-  year =	 {1998},
-  pages =	 {2196--2209},
-  number =	 {11}
-}
-
- at book{Noakes71,
-	author={Noakes, J.},
-	title={The Nazi Party in Lower Saxony},
-	publisher={Oxford University Press},
-	year= 1971,
-	address={New York}
-}
-
- at article{NovReaRau06,
-	author={Scott P. Novak and Sean F. Reardon and Stephen W. Raudenbush and Stephen
-		L. Buka},
-	title={Retail tobacco outlet density and youth cigarette smoking: A propensity-modeling
-		approach},
-	journal={American Journal of Public Health},
-	volume={96},
-	year={2006},
-	pages={670-676},
-	month={April},
-	number={4}
-}
-
- at article{nurminen95,
-	author={Markku Nurminen},
-	title={To Use or Not to Use the Odds Ratio in Epidemiologic Analysis},
-	journal={European Journal of Epidemiology},
-	volume= 11,
-	year= 1995,
-	number={365-371}
-}
-
- at article{NybPetGai03,
-	author={Hanne Nybo et al},
-	title={Predictors of Mortality in 2249 Nonagenarians-The Danish 1905 Cohort Study},
-	journal={Journal of the American Geriatrics Society},
-	volume= 51,
-	year= 2003,
-	pages={{1365-73}}
-}
-
- at booklet{OetParHym03,
-	title={The Not So Short Introduction to \LaTeXe\}, author = {Tobias Oetiker, Hubert
-		Partl, Irene Hyma and Elisabeth Schlegl}, year = 2003, note = {{Available
-		at http://www.ctan.org/tex-archive/info/lshort/english/lshort.pdf.}}
-}
-
- at article{OhaWooMoo90,
-	author={A. O'Hagan and E.G. Woodward and L.C. Moodaley},
-	title={Practical Bayesian Analysis of a Simple Logistic Regression: Predicting
-		Corneal Transplants},
-	journal={Statistics in Medicine},
-	volume={9},
-	year={1990},
-	pages={1091-1101}
-}
-
- at book{Ohr97a,
-  author =	 {Dieter Ohr},
-  title =	 {Nationalsozialistische Propaganda und Weimarer
-                  Wahlen: empirische Analysen zur Wirkung von
-                  NSDAP-Versammlungen},
-  publisher =	 {Westdeutscher Verlag},
-  year =	 1997,
-  address =	 {Opladen}
-}
-
- at article{Ohr97b,
-	author={Dieter Ohr},
-	title={Nationalsozialistische Versammlungspropaganda und Wahlerfolg der NSDAP:
-		eine kausale Beziehung?},
-	journal={Historical Social Research},
-	volume= 22,
-	year= 1997,
-	pages={106--127},
-	number={3/4}
-}
-
- at article{OkoDev01,
-	author={Ike S. Okosun and G.E. Alan Dever},
-	title={Verbal Autopsy: A Necessary Solution for the Paucity of Mortality Data in
-		the Less-Developed Countries},
-	journal={Ethnicity and Disease},
-	volume={11},
-	year={2001},
-	pages={575-577}
-}
-
- at article{OkoDev01,
-	author={Ike S. Okosun and G.E. Alan Dever},
-	title={Verbal Autopsy: A Necessary Solution for the Paucity of Mortality Data in
-		the Less-Developed Countries},
-	journal={Ethnicity and Disease},
-	volume={11},
-	year={2001},
-	pages={575-577}
-}
-
- at article{OloFliAns94,
-	author={John O'Loughlin and Colin Flint and Luc Anselin},
-	title={The Geography of the Nazi Vote: Context, Confession, and Class in the Reichstag
-		Election of 1930},
-	journal={Annals of the Association of American Geographers},
-	volume= 84,
-	year= 1994,
-	pages={351-380}
-}
-
- at article{O'Loughlin02,
-	author={John O'Loughlin},
-	title={The Electoral Geography of Weimar Germany},
-	journal={Political Analysis},
-	volume= 10,
-	year= 2002,
-	pages={217--243},
-	number= 3
-}
-
- at article{Oman85,
-	author={Samuel D. Oman},
-	title={Specifying a Prior Distribution in Structured Regression Problems},
-	journal= jasa,
-	volume= 80,
-	year= 1985,
-	pages={190--195},
-	month={March},
-	number= 389
-}
-
- at article{OneRus97,
-	author={John R. Oneal and Bruce Russett},
-	title={The Classical Liberals Were Right: Democracy, Interdependence, and Conflict,
-		1950-1985},
-	journal= isq,
-	volume= 41,
-	year= 1997,
-	pages={267--293},
-	month={June},
-	number= 2
-}
-
- at inproceedings{OrcWoo72,
-	author={T. Orchard and M.A. Woodbury},
-	title={A Missing Information Principle: Theory and Applications},
-	booktitle={Proceedings of the 6th Berkeley Symposium on Mathematical Statistics and
-		Probability},
-	year={1972},
-	publisher={Berkeley: University of California Press},
-	pages={697-715}
-}
-
- at book{ORourke98,
-	author={Joseph O'Rourke},
-	title={Computational Geometry in C},
-	publisher={Cambridge University Press},
-	year= 1998,
-	address={New York}
-}
-
- at article{OSS44,
-	author={OSS},
-	title={Greater Germany --- Kreis Boundaries},
-	journal={OSS Map 6289},
-	year= 1944,
-	month={July 1}
-}
-
- at article{OveMag92,
-	author={John E. Overall and Kevin N. Magee},
-	title={Directional Baseline Differences and Type I Error Probabilities in Randomized
-		Clinical Trials},
-	journal={Journal of Biopharmaceutical Statistics},
-	volume={2},
-	year={1992},
-	pages={189-203},
-	number={2}
-}
-
- at article{PacPacDuk90,
-	author={Sara Pacque-Margolis and Michel Pacque and Zwannah Dukuly and John Boateng
-		and Hugh R. Taylor},
-	title={Application of the Verbal Autopsy During A Clinical Trial},
-	journal={Social Science Medicine},
-	volume={31},
-	year={1990},
-	pages={585-591},
-	number={5}
-}
-
- at article{PalPet03,
-	author={Ted Palmer and Anthony Petrosino},
-	title={The "Experimenting Agency". The California Youth Authority Research Division},
-	journal={Evaluation Review},
-	volume={22},
-	year={2003},
-	pages={228-266},
-	month={June},
-	number={3}
-}
-
- at article{PalPoo87,
-	author={Thomas R. Palfrey and Keith T. Poole},
-	title={The Relationshiop Between Information, Ideology, and Voter Behavior},
-	journal= ajps,
-	volume= 31,
-	year= 1987,
-	pages={511-530},
-	month={August},
-	number= 3
-}
-
- at article{Palvi41,
-	author={Palvi, Melchior},
-	title={Economic Foundations of the German Totalitarian State},
-	journal={American Journal of Sociology},
-	volume= 46,
-	year= 1941,
-	pages={469-486},
-	number= 4
-}
-
- at inproceedings{PanLee05,
-	author={Bo Pang and Lillian Lee},
-	title={Seeing stars: Exploiting class relationships for sentiment categorization
-		with respect to rating scales},
-	booktitle={Proceedings of the ACL},
-	year= 2005 ,
-	pages={115--124}
-}
-
- at article{PanLeeVai02,
-	author={Bo Pang and Lillian Lee and Shivakumar Vaithyanathan},
-	title={{Thumbs Up? Sentiment Classification using Machine Learning Techniques}},
-	journal={Proceedings of the Conference on Empirical Methods in Natural Language Processing},
-	year={2002},
-	pages={79-86}
-}
-
- at misc{Parsons00,
-	author={Lori S. Parsons},
-	title={Using SAS Software to Perform a Case-Control Match on Propensity Score in
-		an Observational Study},
-	year= 2000,
-	note={{http://www2.sas.com/proceedings/sugi25/25/po/25p225.pdf}},
-	booktitle={SUGI 25},
-	volume={225-25}
-}
-
- at misc{Parsons01,
-	author={Lori S. Parsons},
-	title={Reducing Bias in a Propensity Score Matched-Pair Sample Using Greedy Matching
-		Techniques},
-	year= 2001,
-	note={{http://www2.sas.com/proceedings/sugi26/p214-26.pdf}},
-	booktitle={SUGI 26},
-	volume={214-26}
-}
-
- at article{ParThoNor92,
-	author={Marti G. Parker, Mat Thorslund, and Marie-Louise Nordstrom},
-	title={Predictors of Mortality for the Oldest Old. A 4-year Follow-up of Community
-		Based Elderly in Sweden},
-	journal={Archives of Gerontology and Geriatrics},
-	volume= 14,
-	year= 1992 ,
-	pages={{227-37}}
-}
-
- at article{Paskin05,
-	author={Norman Paskin},
-	title={Digital Object Identifiers for Scientific Data},
-	journal={Data Science Journal},
-	volume={28},
-	year={2005},
-	pages={12-20},
-	month={April},
-	note={{http://www.doi.org/topics/050428CODATAarticleDSJ.pdf}}
-}
-
- at inproceedings{Passchier80,
-	author={N. Passchier},
-	title={The Electoral Geography of the Nazi Landslide},
-	booktitle={Who Were the Fascists?},
-	year= 1980,
-	publisher={Universitetsforlaget},
-	address={Bergen},
-	editor={S.U. Larsen and B. Hagtvet and J.P. Myklebust},
-	pages={283--300}
-}
-
- at book{Paul90,
-	author={Paul, G.},
-	title={Aufstand der Bilder},
-	publisher={Dietz},
-	year= 1990,
-	address={Bonn}
-}
-
- at article{PeaLucGla02,
-	author={J.W. Peabody and J. Luck and P. Glassman and S. Jain},
-	title={Measuring What we want to Measure: Using Vignettes in Clinical Education},
-	journal={Journal of Internal Medicine},
-	volume={17, Suppl.},
-	year= 2002,
-	pages={232--232},
-	month={April},
-	number= 1
-}
-
- at article{Pearce70,
-	author={S.C. Pearce},
-	title={The Efficiency of Block Designs in General},
-	journal={Biometrika},
-	volume={57},
-	year={1970},
-	pages={339-346},
-	month={August},
-	number={2}
-}
-
- at book{pearl00,
-	author={Judea Pearl},
-	title={Causality: Models, Reasoning, and Inference},
-	publisher= cup,
-	year= 2000
-}
-
- at article{PecBeeSan02,
-	author={Mark Peceny and Caroline C. Beer and Shannon Sanchez-Terry},
-	title={Dictatorial Peace?},
-	journal= apsr,
-	volume= 96,
-	year= 2002,
-	pages={15--26},
-	number= 1
-}
-
- at article{PelAsp1996,
-	author={Markku Peltonen and Kjell Asplund},
-	title={Age-Period-Cohort Effects on Stroke Mortality in Sweden 1969-1993 and Forecasts
-		Up to the Year 2004},
-	journal={Stroke},
-	volume= 27,
-	year= 1996,
-	pages={1981-1985},
-	number= 11
-}
-
- at article{PelAsp96,
-	author={Markku Peltonen and Kjell Asplund},
-	title={Age-Period-Cohort Effects on Stroke Mortality in Sweden 1969-1993 and Forecasts
-		Up to the Year 2003},
-	journal={Stroke},
-	volume={27},
-	year= 1996,
-	pages={1981--1985}
-}
-
- at article{Perkins00,
-	author={Susan M. Perkins and Wanzhu Tu and Michael G. Underhill and Xiao-Hua Zhou
-		and Michael D. Murray},
-	title={The use of propensity scores in pharmacoepidemiological research},
-	journal={Pharmacoepidemiology and drug safety},
-	volume= 9,
-	year= 2000,
-	pages={93-101}
-}
-
- at article{Permutt90,
-	author={Thomas Permutt},
-	title={Testing for Imbalance of Covariates in Controlled Experiments},
-	journal={Statistics in Medicine},
-	volume={9},
-	year={90},
-	pages={1455-1462}
-}
-
- at article{PerTuUnd00,
-	author={Susan M. Perkins and Wanzhu Tu and Michael G. Underhill and Xiao-Hua Zhou
-		and Michael D. Murray},
-	title={The use of propensity scores in pharmacoepidemiologic research},
-	journal={Pharmacoepidemiology and Drug Safety},
-	volume={9},
-	year={2000},
-	pages={93-101}
-}
-
- at article{PerWilLev02,
-	author={Thomas T. Perls and John Wilmoth and Robin Levenson and Maureen Drinkwater
-		and Melissa Cohen and Hazel Bogan and Erin Joyce and Stephanie Brewster
-		and Louis Kunkel and Annibale Puca},
-	title={Life-long Sustained Mortality Advantage of Siblings of Centenarians},
-	journal= pnas,
-	volume= 99,
-	year= 2002,
-	pages={8442--8447},
-	month={June 11},
-	number= 12
-}
-
- at article{PetRoeMul06,
-	author={E.D. Peterson and M.T. Roe and J. Mulgund and E.R. DeLong and B.L.Lytle
-		and R.G. Brindis and S.C. Smith Jr. and C.V. Pollack Jr. and L.K. Newby
-		and R.A. Harrington and W.B. Gibler and E.M. Ohman},
-	title={Association between hospital process performance and outcomes among patients
-		with acute coronary syndromes},
-	journal={Journal of the American Medical Association},
-	volume={295},
-	year={2006},
-	pages={1912-1920},
-	month={April}
-}
-
- at article{PijFesKro93,
-	author={Loek T. J. Pijls, Edith J.M. Feskens, and Daan Kromhout},
-	title={Self-Rated Health, Mortality, and Chronic Diseases in Elderly Men: The Zutphen
-		Study, 1985-1990},
-	journal={American Journal of Epidemiology},
-	volume= 138,
-	year= 1993,
-	pages={{840-48}},
-	number= 10
-}
-
- at article{PiqMacHic02,
-	author={Alex R. Piquero and Randall Macintosh},
-	title={The Validity of a Self-Reported Delinquency Scale: Comparisons Across Gender,
-		Age, Race, and Place of Residence},
-	journal= smr,
-	volume= 30,
-	year= 2002,
-	pages={492--529},
-	month={May},
-	number= 4
-}
-
- at book{Placket81,
-	author={R. L. Plackett},
-	title={The Analysis of Categorical Data},
-	publisher={Macmillian},
-	year= 1981,
-	address={New York},
-	edition={2nd}
-}
-
- at book{Plackett81,
-	author={R.L. Plackett},
-	title={{The Analysis of Categorical Data}},
-	publisher={Griffin},
-	year= 1981,
-	address={London}
-}
-
- at manual{PluBesCowVin05,
-	author={Martyn Plummer and Nicky Best and Kate Cowles and Karen Vines},
-	title={coda: Output analysis and diagnostics for MCMC},
-	year={2005},
-	url={{http://www-fis.iarc.fr/coda/}}
-}
-
- at article{PocAssEno02,
-	author={Stuart J. Pocock and Susan E. Assmann and Laura E. Enos and Linda E. Kasen},
-	title={Subgroup analysis covariate adjustment and baseline comparisons in clinical
-		trial reporting: current practice and problems},
-	journal={Statistics in Medicine},
-	volume={21},
-	year={2002},
-	pages={2917-2930}
-}
-
- at article{PogGamLit88,
-	author={T. Poggio and E.B.\ Gamble and J.J.\ Little},
-	title={Parallel Integration of Vision Modules},
-	journal={Science},
-	volume= 242,
-	year= 1988,
-	pages={436--440},
-	month={21 October}
-}
-
- at unpublished{Pole06a,
-  author =	 {Antoinette Pole},
-  title =	 {Congressional Blogging: Advertising, Credit
-                  Claiming, & Position Taking},
-  note =	 {Presented at the 2006 annual meeting of the American
-                  Political Science Association, Philadelphia, PA},
-  address =	 {Antoinette_Pole at brown.edu}
-}
-
- at article{Pole06a,
-	author={Antoinette Pole},
-	title={Black Bloggers and the Blogosphere},
-	journal={International Journal of Technology, Knowledge and Society},
-	volume={2},
-	year={2006},
-	number={6}
-}
-
- at unpublished{Pole07,
-	author={Antoinette Pole},
-	title={Do Blogs Matter? Elite Political Bloggers in American Politics},
-	address={Antoinette_Pole at brown.edu}
-}
-
- at article{Pollock44,
-	author={Pollock, James K.},
-	title={An Areal Study of the German Electorate, 1930-1933},
-	journal={American Political Science Review},
-	volume= 38,
-	year= 1944,
-	pages={89-95}
-}
-
- at unpublished{PolMck07,
-	author={Antoinette Pole and Laura McKenna},
-	title={Blogging Alone? Political Participation and the Blogosphere},
-	address={Antoinette_Pole at brown.edu},
-	organization={Brown University}
-}
-
- at article{PooDan85,
-	author={Keith Poole and R. Steven Daniels},
-	title={Ideology, Party, and Voting in the U.S. Congress, 1959--1980},
-	journal= apsr,
-	volume= 79,
-	year= 1985,
-	pages={373-399},
-	month={June}
-}
-
- at article{Poole98,
-	author={Keith T. Poole},
-	title={Recovering a Basic Space From a Set of Issue Scales},
-	journal= ajps,
-	volume= 42,
-	year= 1998,
-	pages={954--993},
-	month={July},
-	number= 3
-}
-
- at article{PooRos91,
-	author={Keith Poole and Howard Rosenthal},
-	title={Patterns of Congressional Voting},
-	journal= ajps,
-	volume= 35,
-	year= 1991,
-	pages={228--278},
-	month={February}
-}
-
- at book{Popkin94,
-	author={Samuel Popkin},
-	title={The Reasoning Voter: Communication and Persuasion in Presidential Campaigns},
-	publisher={University of Chicago Press},
-	year= 1994,
-	address={Chicago}
-}
-
- at article{Porter80,
-	author={Porter, M. F.},
-	title={{An algorithm for suffix stripping}},
-	journal={Program},
-	volume={14},
-	year={1980},
-	pages={130-137},
-	number={3}
-}
-
- at article{PosVer02,
-	author={Eric A. Posner and Adrian Vermeule},
-	title={Legislative Entrenchment: A Reappraisal},
-	journal={The Yale Law Journal},
-	volume={111},
-	year={2002},
-	pages={1665-1705},
-	month={May},
-	number={7}
-}
-
- at article{Powell01,
-	author={G.N. Powell},
-	title={Workplace Romances between Senior-level Executives and Lower-Level Employees},
-	journal={Human Relations},
-	volume= 54,
-	year= 2001,
-	pages={1519--1544},
-	month={November},
-	number= 11
-}
-
- at article{PraAit54,
-	author={Prais, S. J. and Aitchison, J.},
-	title={The Grouping of Observations in Regression Analysis},
-	journal={Revue de l'Institut International de Statistique},
-	volume= 22,
-	year= 1954,
-	pages={1-22}
-}
-
- at book{PreHeuGui01,
-	author={Samuel H. Preston and Patrick Heuveline and Michel Guillot},
-	title={Demography: Measuring and Modeling Population Processes},
-	publisher={Blackwell},
-	year= 2001,
-	address={Oxford, England}
-}
-
- at article{prentice78,
-	author={R.L. Prentice and N.E. Breslow},
-	title={Retrospective Studies and Failure-time Models},
-	journal={Biometrica},
-	volume= 65,
-	year= 1978,
-	number={153--5}
-}
-
- at article{prentice79,
-	author={R.L. Prentice and R. Pyke},
-	title={Logistic Disease Incidence Models and Case-control Studies},
-	journal={Biometrica},
-	volume= 63,
-	year= 1979,
-	number={403-411}
-}
-
- at article{prentice86,
-	author={R.L. Prentice},
-	title={A Case-Cohort Design for Epidemiological Studies and Disease Prevention
-		Trials},
-	journal={Biometrica},
-	volume= 73,
-	year= 1986,
-	number={1-11}
-}
-
- at incollection{Preston91,
-	author={Samuel H. Preston},
-	title={Demographic Change in the United States, 1970--2050},
-	booktitle={Forecasting the Health of Elderly Populations},
-	publisher={Springer-Verlag},
-	year= 1991,
-	address={New York},
-	editor={K.G. Manton and B.H. Singer and R.M. Suzman},
-	pages={51--77}
-}
-
- at incollection{Preston93,
-	author={Samuel H. Preston},
-	title={Demographic Change in the United States, 1970--2050},
-	booktitle={Demography and Retirement: The Twenty-First Century},
-	publisher={Praeger Publishers},
-	year= 1993,
-	address={New York},
-	editor={A.M. Rappaport and Sylvester Scheiber},
-	pages={19-48}
-}
-
- at article{Prinz86,
-	author={Michael Prinz},
-	title={Der unerw{\"u}nschte Stand: Lage und Status der Angestellten im `Dritten Reich'},
-	journal={Historische Zeitschrift},
-	volume= 242,
-	year= 1986,
-	pages={327--359},
-	number= 2
-}
-
- at article{Prinz89,
-	author={Michael Prinz},
-	title={Angestellte und Nationalsozialismus},
-	journal={Geschichte und Gesellschaft},
-	volume= 15,
-	year= 1989,
-	pages={552--562},
-	number= 4
-}
-
- at book{Prinz91,
-	author={Von Michael Prinz and Rainer Zitelmann},
-	title={Nationalsozialismus und Modernisierung},
-	publisher={Darmstadt},
-	year={1991}
-}
-
- at book{PrzAlvChe00,
-	author={Adam Przeworski and Michael e. Alvarez and Jose Antonio Cheibub and Fernando
-		Limongi},
-	title={Democracy and Development: poltical institutions and well-being in the world,
-		1950-1990},
-	publisher={Cambridge University Press},
-	year={New York, NY},
-	address={The Edinburgh Building, Cambridge CB22RU, UK}
-}
-
- at incollection{Przeworski05,
-	author={Adam Przeworski},
-	title={Is the Science of Comparative Politics Possible?},
-	booktitle={Oxford Handbook of Comparative Poltics},
-	publisher={Oxford University Press},
-	year={2005},
-	month={August},
-	address={New York},
-	editor={Carles Boix and Susan C. Stokes}
-}
-
- at article{PrzTeu66,
-	author={Adam Przeworski and Henry Teune},
-	title={Equivalence in Cross-National Research},
-	journal={Public Opinion Quarterly},
-	volume= 30,
-	year={1966--1967},
-	pages={551--568},
-	month={Winter}
-}
-
- at article{PurHil06,
-	author={Purpura, Stephen and Dustin Hillard},
-	title={{Automated Classification of Congressional Legislation}},
-	journal={Proceedings of the International Conference on Digital Government Research},
-	year={2006}
-}
-
- at book{Putnam00,
-	author={Robert D. Putnam},
-	title={Bowling Alone: The Collapse and Revival of American Community},
-	publisher={Simon and Schuster},
-	year= 2000,
-	address={New York}
-}
-
- at article{Quandt72,
-	author={Richard Quandt},
-	title={Methods of Estimating Switching Regressions},
-	journal= jasa,
-	volume= 67,
-	year= 1972,
-	pages={306-310},
-	number= 338
-}
-
- at article{Quandt74,
-	author={Richard E. Quandt},
-	title={A Stochastic Model of Elections in Two-Party Systems},
-	journal={Journal of theAmerican Statistical Association},
-	volume={69},
-	year={1974},
-	pages={315-324},
-	month={June},
-	number={346}
-}
-
- at incollection{Qui04,
-	author={Kevin Quinn},
-	title={Ecological Inference in the Presence of Temporal Dependence},
-	booktitle={Ecological Inference: New Methodological Strategies},
-	publisher={Cambridge University Press},
-	year= 2004,
-	address={New York},
-	editor={Gary King and Ori Rosen and Martin A. Tanner}
-}
-
- at article{QuiArmSno96,
-  author =	 {M.A. Quigley and J.R.M. Armstrong Schellenberg and
-                  R.W. Snow},
-  title =	 {Algorithms for verbal autopsies: a validation study
-                  in Kenyan children},
-  journal =	 {Bulletin of the World Health Organization},
-  volume =	 {74},
-  year =	 {1996},
-  pages =	 {147-154},
-  number =	 {2}
-}
-
- at article{QuiChaSet00,
-	author={Maria A. Quigley and Daniel Chandramohan and Philip Setel and Fred Binka
-		and Laura C. Rodrigues},
-	title={Validity of data-derived algorithms for ascertaining causes of adult death
-		in two African sites using verbal autopsy},
-	journal={Tropical Medicine and International Health},
-	volume={5},
-	year={2000},
-	pages={33-39},
-	month={January},
-	number={1}
-}
-
- at article{Quigley05,
-	author={Maria A. Quigley},
-	title={Commentary: Verbal Autopsies - from small-scale studies to mortality surveillance
-		systems},
-	journal={International Journal of Epidemiology},
-	volume={34},
-	year={2005},
-	pages={1087-1088}
-}
-
- at misc{QuiMonCol06,
-  author =	 {Quinn, K.M. and Monroe, B.L. and Colaresi, M. and
-                  Crespin, M.H. and Radev, D.R.},
-  title =	 {{How To Analyze Political Attention With Minimal
-                  Assumptions And Costs}},
-  year =	 {2006},
-  howpublished = {Annual Meeting of the Society for Political
-                  Methodology}
-}
-
- at misc{Quinn00,
-  author =	 {Kevin M. Quinn},
-  title =	 {Flexible Prior Specifications for Factor Analytic
-                  Models with an Application to {A}merican Political
-                  Ideology},
-  year =	 2000,
-  howpublished = {Paper presented at the Annual Meeting of the Midwest
-                  Political Science Association}
-}
-
- at manual{R08,
-	author={{R Development Core Team}},
-	title={R: A Language and Environment for Statistical Computing},
-	organization={R Foundation for Statistical Computing},
-	year={2008},
-	address={Vienna, Austria},
-	note={{ISBN} 3-900051-07-0},
-	url={{http://www.R-project.org}}
-}
-
- at article{Rabban83,
-	author={David Rabban},
-	title={The Emergence of Modern First Amendment Doctrine},
-	journal={University of Chicago Law Review},
-	volume= 50,
-	year={1983},
-	pages={1207--??}
-}
-
- at article{RagGri95,
-	author={T.E. Raghunathan and J.E. Grizzle},
-	title={A Split Questionnaire Survey Design},
-	journal={Journal of the American Statistical Association},
-	volume={90},
-	year={1995},
-	pages={54-63}
-}
-
- at article{Ragsdale91,
-	author={Lyn Ragsdale},
-	title={{Strong Feelings: Emotional Responses to Presidents}},
-	journal={Political Behavior},
-	volume={13},
-	year={1991},
-	pages={33-65},
-	number={1}
-}
-
- at article{RahJos98,
-	author={Elham Rahme and Lawrence Joseph},
-	title={Estimating the Prevalence of a Rare Disease: Adjusted Maximum Likelihood},
-	journal={The Statistician},
-	volume={47},
-	year={1998},
-	pages={149-158},
-	number={1}
-}
-
- at article{RakMorHir91,
-	author={William Rakowski, PhD, Vincent Mor, PhD, and Jeffrey Hiris, BA},
-	title={The Association of Self-Rated Health with Two-Year Mortality in a Sample
-		of Well Elderly},
-	journal={Journal of Aging and Health},
-	volume= 3,
-	year= 1991,
-	pages={{527-45}}
-}
-
- at article{Rao96,
-	author={J.N.K. Rao},
-	title={On Variance Estimation with Imputed Survey Data},
-	journal={Journal of the American Statistical Association},
-	volume={91},
-	year={1996},
-	pages={499-506}
-}
-
- at article{RauMarSpy06,
-	author={Stephen W. Raudenbush and Andres Martinez and Jessaca Spybrook},
-	title={Strategies for Improving Precision in Group-Randomized Experiments},
-	journal={Educational Evaluation and Policy Analysis},
-	year={2007},
-	address={University of Chicago, University of Michigan}
-}
-
- at book{Recipes87,
-	author={William H. Press and Saul Teukolsky and William T. Vetterling and Brian
-		P. Flannery},
-	title={Numerical Recipes: the Art of Scientific Computing},
-	publisher={Cambridge University Press},
-	year={1987},
-	address={Cambridge}
-}
-
- at article{ReeQui97,
-	author={Barnaby C. Reeves and Maria Quigley},
-	title={A Review of Data-Derived Methods for Assigning Causes of Death from Verbal
-		Autopsy Data},
-	journal={International Journal of Epidemiology},
-	volume={26},
-	year={1997},
-	pages={1080-1089},
-	number={5}
-}
-
- at article{ReeQui97,
-	author={Barnaby C. Reeves and Maria Quigley},
-	title={A Review of Data-Derived Methods for Assigning Causes of Death from Verbal
-		Autopsy Data},
-	journal={International Journal of Epidemiology},
-	volume={26},
-	year={1997},
-	pages={1080-1089},
-	number={5}
-}
-
- at book{Rehnquist98,
-	author={William H. Rehnquist},
-	title={All the Laws But One: Civil Liberties in Wartime},
-	publisher={Knopf},
-	year= 1998,
-	address={New York}
-}
-
- at article{ReuLi03,
-	author={Rafael Reuveny and Quan Li},
-	title={The Joint Democracy-Dyadic Conflict Nexus: A Simultaneous Equations Model},
-	journal= isq,
-	volume= 47,
-	year= 2003,
-	pages={325--346},
-	month={September},
-	number= 3
-}
-
- at article{RieSch93,
-	author={Arthur van Riel and Arthur Schram},
-	title={Weimar Economic Decline, Nazi Economic Recovery, and the Stabilization of
-		Political Dictatorship},
-	journal={Journal of Economic History},
-	volume= 53,
-	year= 1993,
-	pages={71--105},
-	number= 1
-}
-
- at book{Riffenburgh98,
-	author={R. H. Riffenburgh},
-	title={Statistics in Medicine},
-	publisher={Academic Press},
-	year={1998},
-	address={San Diego}
-}
-
- at proceedings{RilWie03,
-	editor={Ellen Riloff and Janyce Wiebe},
-	title={Learning Extraction Patterns for Subjective Expressions},
-	publisher={Conference on Empirical Methods in Natural Language Processing},
-	year={2003},
-	address={Ellen Riloff, School of Computing, University of UT, Salt Lake City, UT
-		84112; riloff at cs.utah.edu}
-}
-
- at proceedings{RilWie03,
-	editor={Ellen Riloff and Janyce Wiebe},
-	title={Learning Extraction Patterns for Subjective Expressions},
-	publisher={Conference on Empirical Methods in Natural Language Processing},
-	year={2003},
-	address={Ellen Riloff, School of Computing, University of UT, Salt Lake City, UT
-		84112; riloff at cs.utah.edu}
-}
-
- at proceedings{RilWieWil03,
-	editor={Ellen Riloff and Janyce Wiebe and Theresa Wilson},
-	title={Learning Subjective Nouns Using Extraction Pattern Bootstrapping},
-	publisher={Seventh CoNLL Conf. Edmonton},
-	year={2003},
-	month={May-June}
-}
-
- at proceedings{RilWieWil03,
-	editor={Ellen Riloff and Janyce Wiebe and Theresa Wilson},
-	title={Learning Subjective Nouns Using Extraction Pattern Bootstrapping},
-	publisher={Seventh CoNLL Conf. Edmonton},
-	year={2003},
-	month={May-June}
-}
-
- at book{Ripley96,
-	author={Brian Ripley},
-	title={Pattern Recognition and Neural Networks},
-	publisher={Cambridge Univeristy Press},
-	year={1996}
-}
-
- at article{Ritschl03,
-  author =	 {Ritschl, Albrecht},
-  title =	 {Hat das Dritte Reich wirklich eine ordentliche
-                  Besch{\"a}ftigungspolitik betrieben?},
-  journal =	 {Jahrbuch f{\"u}r Wirtschaftsgeschichte},
-  year =	 {2003},
-  pages =	 {{125-40}}
-}
-
- at article{Ritschl90,
-  author =	 {Albrecht Ritschl},
-  title =	 {Zu hohe L{\"o}hne in der Weimarer Republik? Eine
-                  Auseinandersetzung mit Holtferichs Berechnungen zur
-                  Lohnposition der Arbeitsschaft 1925-1932},
-  journal =	 {Geschichte und Gesellschaft},
-  volume =	 {16},
-  year =	 1990,
-  pages =	 {375--402}
-}
-
- at article{Robins95,
-  author =	 {J.M. Robins},
-  title =	 {Discussion of ``Causal diagrams in empirical research'' by J. Pearl},
-  journal =	 {Biometrika},
-  volume =	 82,
-  year =	 1995,
-  number =	 {387-394}
-}
-
- at article{Robins86,
-  author =	 {J.M. Robins and M.H. Gail and J.H. Lubin},
-  title =	 {More on Biased Selection of Controls for
-                  Case-Control Analyses of Cohort Studies},
-  journal =	 {Biometrics},
-  volume =	 42,
-  year =	 1986,
-  number =	 {293-299}
-}
-
- at incollection{Robins99,
-	author={James M. Robins},
-	title={Marginal Structural Models Versus Structural Nested Models as Tools for
-		Causal Inference},
-	booktitle={Statistical Models in Epidemiology: The Environment and Clinical Trials},
-	publisher={Springer-Verlag},
-	year= 1999,
-	address={New York},
-	editor={M.E. Halloran and D. Berry},
-	pages={95-134},
-	volume= 116
-}
-
- at article{Robins99b,
-	author={James M. Robins},
-	title={Association, Causation, and Marginal Structural Models},
-	journal={Synthese},
-	volume= 121,
-	year={1999b},
-	pages={151--179}
-}
-
- at article{RobJew91,
-	author={Laurence D. Robinson and Nicholas P. Jewell},
-	title={Some Surprising Results about Covariate Adjustment in Logistic Regression
-		Models},
-	journal={International Statistical Review},
-	volume={59},
-	year={1991},
-	pages={227-240},
-	month={August},
-	number={2}
-}
-
- at article{RobMar99,
-	author={Noah Jamie Robinson and Ravai Marindo},
-	title={Current Estimates of and Future Projections for Adult Deaths Attributed
-		to HIV Infection in Zimbabwe},
-	journal={Journal of Acquired Immune Deficiency Syndromes and Human Retrovirology},
-	volume= 20,
-	year= 1999,
-	pages={187--194}
-}
-
- at article{RobRot01,
-	author={James M. Robins and Andrea Rotnitzky},
-	title={Comment on the Peter J. Bickel and Jaimyoung Kwon,, `Inference for semiparametric
-		models: Some questions and an answer'},
-	journal={Statistica Sinica},
-	volume= 11,
-	year= 2001,
-	pages={920--936},
-	number= 4,
-	annote={on double robustness}
-}
-
- at article{RobRot03,
-	author={James M. Robins and Andrea Rotnitzky},
-	title={Inverse Probability Weighting Estimation in Survival Analysis},
-	journal={Encyclopedia of Biostatistics},
-	year= 2003,
-	note={forthcoming}
-}
-
- at article{RobRot95,
-	author={J. Robins and A. Rotnitzky},
-	title={Semiparametric efficiency in multivariate regression models with missing
-		data},
-	journal={Journal of the American Statistical Association},
-	volume= 90,
-	year= 1995 ,
-	pages={122-129}
-}
-
- at article{RobWan00,
-	author={James Robins and Naisyin Wang},
-	title={Inference for Imputation Estimators},
-	journal={Biometrika},
-	volume={87},
-	year={2000},
-	pages={113-124}
-}
-
- at article{Rogers86,
-	author={Andrei Rogers},
-	title={Parameterized Multistate Population Dynamics and Projections},
-	journal= jasa,
-	volume= 81,
-	year= 1986,
-	pages={48--61}
-}
-
- at article{RogRay99,
-	author={Andrei Rogers and James Raymer},
-	title={Fitting Observed Demographic Rates with the Multiexponential Model Schedule:
-		An Assessment of Two Estimation Programs},
-	journal={Applied Regional Science Conference},
-	volume= 11,
-	year= 1999,
-	pages={1--10},
-	number= 1
-}
-
- at book{Rokeach73,
-	author={Milton Rokeach},
-	title={The Nature of Human Values},
-	publisher={Free Press},
-	year= 1973,
-	address={New York}
-}
-
- at book{Rokeach79,
-	author={Rokeach, M.},
-	title={{Understanding Human Values: Individual and Societal}},
-	publisher={Free Press},
-	year={1979},
-	address={New York}
-}
-
- at article{RonVanCha98,
-	author={Carine Ronsmans and Anne Marie Vanneste and jyotshamoy Chakraborty and Jereon
-		Van Ginneken},
-	title={A comparison of Three Verbal Autopsy Methods to Ascertain Levels and Causes
-		of Maternal Deaths in Matlab, Bangladesh},
-	journal={International Journal of Epidemiology},
-	volume={27},
-	year={1998},
-	pages={660-666}
-}
-
- at book{RoseAckerman99,
-  title={Corruption and Government: Causes, Consequences, and Reform},
-  author={Rose-Ackerman, Susan},
-  year={1999},
-  publisher={Cambridge University Press},
-  address={Cambridge, UK}
-}
-
- at book{Rosenbaum02,
-	author={Rosenbaum, Paul R.},
-	title={Observational Studies, 2nd Edition},
-	publisher={Springer Verlag},
-	year={2002},
-	address={New York, NY}
-}
-
- at article{Rosenbaum05,
-	author={Paul R. Rosenbaum},
-	title={An exact distribution-free test comparing two multivariate distributions
-		based on adjacency},
-	journal={Journal of the Royal Statisitcal Society B},
-	volume={67},
-	year={2005},
-	pages={515-530}
-}
-
- at article{Rosenbaum05b,
-	author={Paul R. Rosenbaum},
-	title={Heterogeneity and Causality: Unit Heterogeneity and Design Sensitivity in
-		Observational Studies},
-	journal={The American Statistician},
-	volume={59},
-	year={2005},
-	pages={147-152},
-	month={May},
-	number={2}
-}
-
- at article{Rosenbaum84,
-	author={Paul Rosenbaum},
-	title={The Consequences of Adjusting for a Concomitant Variable That Has Been Affected
-		by the Treatment},
-	journal= jrssA,
-	volume= 147,
-	year= 1984,
-	pages={656--666},
-	number= 5
-}
-
- at article{Rosenbaum86,
-	author={Paul R. Rosenbaum},
-	title={Dropping out of high school in the {U}nited {S}tates: an observational study},
-	journal={Journal of Educational Statistics},
-	volume= 11,
-	year= 1986,
-	pages={207-224}
-}
-
- at article{Rosenbaum89,
-	author={Rosenbaum, Paul R.},
-	title={Optimal matching for observational studies},
-	journal={Journal of the American Statistical Association},
-	volume= 84,
-	year= 1989,
-	pages={{1024--1032}},
-	keywords={Network; Computation}
-}
-
- at article{Rosenbaum91,
-	author={Paul R. Rosenbaum},
-	title={A Characterization of Optimal Designs for Observational Studies},
-	journal={Journal of the Royal Statistical Society, Series B},
-	volume={53},
-	year={1991},
-	pages={597-610},
-	number={3}
-}
-
- at article{Rosenbaum91,
-	author={Paul R. Rosenbaum},
-	title={Sensitivity analysis for matched case-control studies},
-	journal={Biometrics},
-	volume= 47,
-	year= 1991,
-	pages={87-100},
-	number= 1
-}
-
- at article{Rosenbaum99,
-	author={Paul R. Rosenbaum},
-	title={Choice as an alternative to control in observational studies},
-	journal={Statistical Science},
-	volume= 14,
-	year= 1999,
-	pages={259-304},
-	number= 3,
-	note={With discussion and rejoinder.}
-}
-
- at book{RosHan93,
-	author={Steven J.\ Rosenstone and John M.\ Hansen},
-	title={Mobilization, Participation, and Democracy in America},
-	publisher={MacMillian},
-	year= 1993
-}
-
- at article{RosHen78,
-	author={Bernard Rosner and Charles H. Hennekens},
-	title={Analytic Methods in Matched Pair Epidemiological Studies},
-	journal={International Journal of Epidemiology},
-	volume={7},
-	year={1978},
-	pages={367-372},
-	number={4}
-}
-
- at book{RosNoc83,
-	title={Measuring Social Judgements: The Factorial Survey Approach},
-	publisher={Sage},
-	year= 1983,
-	editor={P. H. Rossi and S. L. Nock},
-	address={Beverly Hills, CA}
-}
-
- at article{RosRub83,
-	author={Paul R. Rosenbaum and Donald B.\ Rubin},
-	title={The Central Role of the Propensity Score in Observational Studies for Causal
-		Effects},
-	journal={Biometrika},
-	volume= 70,
-	year= 1983,
-	pages={41--55}
-}
-
- at article{RosRub83b,
-	author={Paul R. Rosenbaum and Donald B. Rubin},
-	title={Assessing sensitivity to an unobserved binary covariate in an observational
-		study with binary outcome},
-	journal={Journal of the Royal Statistical Society Series B},
-	volume= 45,
-	year= 1983,
-	pages={212-218},
-	number= 2
-}
-
- at article{RosRub84,
-  author =	 {Paul R. Rosenbaum and Donald B. Rubin},
-  title =	 {Reducing Bias in Observational Studies Using
-                  Subclassification on the Propensity Score},
-  journal =	 jasa,
-  volume =	 79,
-  year =	 1984,
-  pages =	 {515--524}
-}
-
- at article{RosRub85,
-	author={Rosenbaum, Paul R. and Rubin, Donald B.},
-	title={Constructing a Control Group Using Multivariate Matched Sampling Methods
-		That Incorporate the Propensity Score},
-	journal={The American Statistician},
-	volume={39},
-	year={1985},
-	pages={33-38}
-}
-
- at article{RosRub85b,
-	author={Rosenbaum, P.R. and Rubin, D.B.},
-	title={The Bias Due to Incomplete Matching},
-	journal={Biometrics},
-	volume={41},
-	year={1985},
-	pages={103--116},
-	number={1}
-}
-
- at article{RosSil01,
-	author={Paul R. Rosenbaum and J.H. Silber},
-	title={Matching and Thick Description in an Observational Study of Mortality After
-		Surgery},
-	journal={Biostatistics},
-	volume= 2,
-	year= 2001,
-	pages={{217--232}}
-}
-
- at article{Rothman77,
-	author={Kenneth J. Rothman},
-	title={Epidemiologic Methods in Clinical Trials},
-	journal={Cancer},
-	volume={39},
-	year={1977},
-	pages={1771-1775}
-}
-
- at book{rothman98,
-	author={Kenneth J. Rothman and Sander Greenland},
-	title={Modern Epidemiology},
-	publisher={Philadelphia: Lippincott-Raven},
-	year= 1998,
-	edition={2nd edition}
-}
-
- at article{RouSto96,
-	author={L. Roussos and W. Stout},
-	title={A Multidimensionality-based DIF Anslysis Paradigm},
-	journal={Applied Psychological Measurement},
-	volume= 20,
-	year= 1996,
-	pages={355--371}
-}
-
- at article{Rowe05,
-	author={Alexander K Rowe},
-	title={Should verbal autopsy results for malaria be adjusted to improve validity?},
-	journal={International Journal of Epidemiology},
-	volume={34},
-	year={2005},
-	pages={712-13},
-	number={3}
-}
-
- at article{Roy51,
-	author={A.D. Roy},
-	title={Some Thoughts on the Distribution of Earnings},
-	journal={Oxford Economic Papers},
-	volume= 3,
-	year= 1951,
-	pages={135--146}
-}
-
- at article{RoyCum85,
-	author={Richard M. Royall and William G. Cumberland},
-	title={Conditional Doverage Properties of Finite Population Confidence Intervals},
-	journal={Journal of the American Statistical Assocation},
-	volume={80},
-	year={1985},
-	pages={355-359},
-	month={June},
-	number={390},
-	tpages={+}
-}
-
- at article{RubDudVan06,
-	author={Daniel Rubin and Sandrine Dudoit and Mark van der Laan},
-	title={A Method to Increase the Power of Multiple Testing Procedures Through Sample
-		Splitting},
-	journal={Statistical Applications in Genetics and Molecular Biology},
-	volume={5},
-	year={2006},
-	number={1}
-}
-
- at article{Rubin01,
-	author={Donald B. Rubin},
-	title={Using propensity scores to help design observational studies: Application
-		to the tobacco litigation},
-	journal={Heatlh Services \& Outcomes Research Methodology},
-	volume={2},
-	year={2001},
-	pages={169-188},
-	month={December},
-	number={3-4}
-}
-
- at unpublished{Rubin04,
-	author={Donald B. Rubin},
-	title={Discussion of ``{P}rinciples for modeling propensity scores in medical research:
-		a systematic literature review"},
-	note={Forthcoming in {\it Pharmacoepidemiology and Drug Safety}. Referenced paper
-		by Weitzen, Lapane, Toledano, Hume, Mor},
-	year= 2004
-}
-
- at book{Rubin06,
-	author={Donald B. Rubin},
-	title={Matched Sampling for Causal Effects},
-	publisher={Cambridge University Press},
-	year= 2006 ,
-	address={Cambridge, England}
-}
-
- at article{Rubin80,
-	author={Rubin, Donald B.},
-	title={Comments on ``Randomization Analysis of Experimental Data: The Fisher Randomization Test'', by D. Basu},
-	journal={Journal of the American Statistical Association},
-	volume={75},
-	year={1980},
-	pages={591-593}
-}
-
-
-
- at article{Rubin73,
-	author={Rubin, Donald B.},
-	title={Matching to remove bias in observational studies},
-	journal={Biometrics},
-	volume={29},
-	year={1973},
-	pages={159-184}
-}
-
- at article{Rubin73b,
-	author={Rubin, Donald B.},
-	title={The use of matched sampling and regression adjustment to remove bias in
-		observational studies},
-	journal={Biometrics},
-	volume={29},
-	year={1973},
-	pages={185-203}
-}
-
- at article{rubin74,
-	author={Donald B. Rubin},
-	title={Estimating Causal Effects of Treatments in Randomized and Nonrandomized
-		Studies},
-	journal={Journal of Educational Psychology},
-	volume= 6,
-	year= 1974,
-	pages={688--701}
-}
-
- at article{Rubin76,
-	author={Donald Rubin},
-	title={Inference and Missing Data},
-	journal={Biometrika},
-	volume={63},
-	year={1976},
-	pages={581-592}
-}
-
- at article{Rubin77,
-	author={Donald Rubin},
-	title={Formalizing Subjective Notions about the Effect of Nonrespondents in Sample
-		Surveys},
-	journal={Journal of the American Statistical Association},
-	volume={72},
-	year={1977},
-	pages={538-543},
-	month={September},
-	number={359}
-}
-
- at article{Rubin77b,
-	author={Donald B. Rubin},
-	title={Assignment to Treatment Group on the Basis of a Covariate},
-	journal={Journal of Educational Statistics},
-	volume={2},
-	year={1977},
-	pages={1},
-	number={1-26}
-}
-
- at article{Rubin78,
-	author={Donald B. Rubin},
-	title={Bayesian inference for causal effects: The role of randomization},
-	journal={The Annals of Statistics},
-	volume={6},
-	year={1978},
-	pages={34-58}
-}
-
- at article{Rubin79,
-	author={Donald B. Rubin},
-	title={Using Multivariate Matched Sampling and Regression Adjustment to Control
-		Bias in Observational Studies},
-	journal= jasa,
-	volume={74},
-	year={1979},
-	pages={318--328}
-}
-
- at book{Rubin87,
-	author={Donald B. Rubin},
-	title={Multiple Imputation for Nonresponse in Surveys},
-	publisher={John Wiley},
-	year={1987},
-	address={New York}
-}
-
- at article{Rubin87b,
-	author={Donald Rubin},
-	title={A Noniterative sampling/importance resampling alternative to the data augmentation
-		algorithm for creating a few imputations when fractions of missing information
-		are modest: the SIR Algorithm, Discussion of Tanner and Wong},
-	journal={Journal of the American Statistical Assocaition},
-	volume={82},
-	year={1987},
-	pages={543-546}
-}
-
- at article{Rubin91,
-	author={Donald B. Rubin},
-	title={Practical implications of modes of statistical inference for causal effects
-		and the critical role of the assignment mechanism},
-	journal={Biometrics},
-	volume= 47,
-	year= 1991,
-	pages={1213-1234}
-}
-
- at article{Rubin94,
-	author={Donald B. Rubin},
-	title={Missing Data, Imputation, and the Bootstrap: Comment},
-	journal={Journal of the American Statistical Association},
-	volume={89},
-	year={1994},
-	pages={475-478},
-	month={Jun},
-	number={426}
-}
-
- at article{Rubin96,
-	author={Donald B. Rubin},
-	title={Multiple Imputation after 18+ Years},
-	journal= jasa,
-	volume= 91,
-	year= 1996,
-	pages={473--489},
-	number= 434
-}
-
- at article{Rubin96,
-	author={Donald Rubin},
-	title={Multiple Imputation after 18+ Years},
-	journal={Journal of the American Statistical Association},
-	volume={91},
-	year={1996},
-	pages={473-489}
-}
-
- at article{Rubin97,
-	author={Donald B. Rubin},
-	title={Estimating causal effects from large data sets using propensity scores},
-	journal={Annals of Internal Medicine},
-	volume= 127,
-	year= 1997,
-	pages={757-763}
-}
-
- at article{RubSch86,
-	author={Donald Rubin and Nathaniel Schenker},
-	title={Multiple Imputation for Interval Estimation for Simple Random Samples with
-		Ignorable Nonresponse},
-	journal= jasa,
-	volume= 81,
-	year= 1986,
-	pages={366-374},
-	number= 394
-}
-
- at article{RubSch87,
-	author={Donald B. Rubin and Nathaniel Schenker},
-	title={Logit-Based Interval Estimation from Binomial Data Using the Jeffreys Prior},
-	journal={Sociological Methodology},
-	volume={17},
-	year={1987},
-	pages={131-144}
-}
-
- at inproceedings{RubSch90,
-	author={D.B. Rubin and J.L. Schafer},
-	title={Efficiently Creating Multiple Imputations for Incomplete Multivariate Normal
-		Data},
-	booktitle={Proceedings of the Statistical Computing Section of the American Statistical
-		Association},
-	year={1990},
-	pages={83-88}
-}
-
- at article{RubStu06,
-	author={Donald B. Rubin and Elizabeth A. Stuart},
-	title={Affinely invariant matching methods with discriminant mixtures of proportional
-		ellipsoidally symmetric distributions},
-	journal={Annals of Statistics},
-	volume= 34,
-	year={2006},
-	pages={1814-1826},
-	number= 4
-}
-
- at article{RubTho00,
-	author={Donald B. Rubin and Neal Thomas},
-	title={Combining propensity score matching with additional adjustments for prognostic
-		covariates},
-	journal={Journal of the American Statistical Association},
-	volume= 95,
-	year={2000},
-	pages={573-585}
-}
-
- at article{RubTh,
-	author={Donald B. Rubin and Neal Thomas},
-	title={Characterizing the Effect of Matching Using Linear Propensity Score Methods
-		With Normal Distributions},
-	journal={Biometrika},
-	volume={79},
-	year={1992},
-	pages={797-809}
-}
-
- at article{RubTho96,
-	author={Donald B. Rubin and Neal Thomas},
-	title={Matching Using Estimated Propensity Scores, Relating Theory to Practice},
-	journal={Biometrics},
-	volume={52},
-	year={1996},
-	pages={249-264}
-}
-
- at unpublished{RugKimMar03,
-  author =	 {Theodore W. Ruger and Pauline T. Kim and Andrew
-                  D. Martin and Kevin M. Quinn},
-  title =	 {The Supreme Court Forecasting Project: Legal and
-                  Political Science Approaches to Predicting Supreme
-                  Court Decision-Making},
-  note =	 {Washington University in St. Louis},
-  year =	 2003
-}
-
- at book{Rule88,
-	author={Rule, J. B.},
-	title={Theories of Civil Violence},
-	publisher={University of California Press},
-	year= 1988,
-	address={Berkeley}
-}
-
- at article{RusOneBer03,
-	author={Bruce Russett and John Oneal and Michael L. Berbaum},
-	title={Causes of Peace: Democracy, Interdependence, and International Organizations,
-		1885--1992},
-	journal= isq,
-	volume= 47,
-	year= 2003,
-	pages={371--393},
-	month={September},
-	number= 3
-}
-
- at article{SabCanGib05,
-	author={Marc S. Sabatine and Christopher P. Cannon and C. Michael Gibson and Jose
-		L. Lopez-Sendon and Gilles Montalescot and Pierre Theroux and Basil S.
-		Lewis and Sabina A. Murphy and Carolyn H. McCabe and Eugene Braunwald},
-	title={Effect of Clopidogrel Pretreatment Before Percutaneous Coronary Intervention
-		in Patients with ST-Elevation Myoc},
-	journal={Journal of the American Medical Association},
-	volume={294},
-	year={2005},
-	pages={1224-1232},
-	month={September},
-	number={10}
-}
-
- at article{sackett96,
-	author={D. Sackett, J. Deeks and D. Altman},
-	title={Down with Odds Ratios},
-	journal={Evidence-Based Medicine},
-	volume= 1,
-	year= 1996,
-	pages={164--6},
-	number= 6
-}
-
- at book{Saldern79,
-	author={Adelheid Saldern},
-	title={Mittelstand im `Dritten Reich'. Handwerker-Einzelh{\"a}ndler - Bauern},
-	publisher={Campus},
-	year= 1979,
-	address={Frankfurt}
-}
-
- at article{SalWeiHam02,
-	author={Joshua A. Salomon and Milton C. Weinstein and James K. Hammitt and Sue J.
-		Goldie},
-	title={Empirically Calibrated Model of Hepatitis C Virus Infection in the United
-		States},
-	journal={American Journal of Epidemiology},
-	volume= 156,
-	year= 2002,
-	pages={761--773}
-}
-
- at article{Sambanis01,
-	author={Nicholas Sambanis},
-	title={Do Ethnic and Nonethnic Civil Wars Have the Same Causes?},
-	journal={Journal of Conflict Resolution},
-	volume={45},
-	year={2001},
-	pages={259-82},
-	month={June},
-	number={3}
-}
-
- at article{SamDoy07,
-	author={Nicholas Sambanis and Michael W. Doyle},
-	title={No Easy Choices: Estimating the Effects of United Nations Peacekeeping (Response
-		to King and Zeng)},
-	journal= isq,
-	year= 2007,
-	month={October}
-}
-
- at article{SanKliDun06,
-	author={Lisa Sanbonmatsu and Jeffrey R. Kling and Greg J. Duncan and Jeanne Brooks-Gunn},
-	title={Neighborhoods and Academic Achievement: Results From the Moving to Opportunity
-		Experiment},
-	journal={National Bureau of Economic Research, Working Paper Series},
-	year={2006},
-	month={January},
-	number={Working Paper 11909},
-	note={{http://www.nber.org/papers/w11909}}
-}
-
- at article{SanNaiWhi02,
-	author={H. Babad and C. Sanderson and B. Naidoo and I. White and D. Wang},
-	title={The Development of a Simulation Model of Primary Prevention Strategies for
-		Coronary Heart Disease},
-	journal={Health Care Management Science},
-	volume= 5,
-	year= 2002,
-	pages={269--274},
-	number= 4
-}
-
- at article{SanRedHan96,
-	author={Robert Sanson-Fisher and Sally Redman and Lynne Hancock and Stehen Halpin
-		and Philip Clarke and Margot Schofield and Robert Burton and Michael Hensley
-		and robert Gibberd and Alexander Reid and Raoul Walsh and Afaf Girgis and
-		Louise Burton and Ann McClintock and Robert Carter and Allan Donner and
-		Sylvan Green },
-	title={Developing methodologies for evaluating community-wide health promotion},
-	journal={Health Promotion International},
-	volume={11},
-	year={1996},
-	pages={227-236},
-	number={3}
-}
-
- at article{Sartori70,
-	author={Giovanni Sartori},
-	title={Concept Misformation in Comparative Politics},
-	journal= apsr,
-	volume= 64,
-	year= 1970,
-	pages={1033--1053},
-	month={December},
-	number= 4
-}
-
- at article{Schaback96,
-	author={R. Schaback},
-	title={Approximation by Radia Basis Functions with Finitely Many Centers},
-	journal={Constructive Approximation},
-	volume= 12,
-	year= 1996,
-	pages={331--340}
-}
-
- at book{Schafer97,
-	author={Joseph L. Schafer},
-	title={Analysis of incomplete multivariate data},
-	publisher={Chapman \& Hall},
-	year={1997},
-	address={London}
-}
-
- at book{Schattschneider60,
-  title =	 {{The Semisovereign People}},
-  author =	 {Schattschneider, E.E.},
-  year =	 {1960},
-  address =	 {New York},
-  publisher =	 {Holt, Rinehart and Winston}
-}
-
- at article{SchBuc03,
-	author={M. Schneider and J. Buckley},
-	title={Making the grade: comparing DC charter schools to other DC public schools},
-	journal={Educational Evaluation and Policy Analysis},
-	volume={25},
-	year={2003},
-	pages={203-215},
-	number={2}
-}
-
- at article{SchGer00,
-  author =	 {Philip Schrodt and Deborah J.\ Gerner},
-  title =	 {Cluster-Based Early Warning Indicators for Political
-                  Change in the Contemporary Levant},
-  journal =	 apsr,
-  volume =	 94,
-  year =	 2000,
-  pages =	 {803--818},
-  number =	 4
-}
-
- at article{Schieder93,
-	author={Wolfgang Schieder},
-	title={Die NSDAP vor 1933},
-	journal={Geschichte und Gesellschaft},
-	volume= 19,
-	year= 1993,
-	pages={141--154},
-	number= 1
-}
-
- at inproceedings{SchKhaEzz93,
-	author={Joseph L. Schafer and Meena Khare and Trena M. Ezzati-Rice},
-	title={Multiple Imputation of Missing Data in NHANESIII },
-	booktitle={Proceedings of the Annual Research Conference},
-	year={1993},
-	pages={459-487},
-	organization={Washington, D.C., Bureau of the Census}
-}
-
- at article{SchMalBla94,
-	author={David E. Schoenfeld, et al},
-	title={Self-Rated Health and Mortality in High Functioning Elderly-a Closer Look
-		at Healthy Individuals:MacArthur Field Study of Successful Aging. },
-	journal={Journal of Gerontology},
-	volume= 49,
-	year= 1994,
-	pages={{M109-113}},
-	number= 3
-}
-
- at article{Schneider04,
-	author={Schneider, B.},
-	title={{Building a Scientific Community: The Need for Replication}},
-	journal={The Teachers College Record},
-	volume={106},
-	year={2004},
-	pages={1471--1483},
-	number={7}
-}
-
- at techreport{Schochet03,
-	author={Peter Schochet and Sheena McConnell and John Burghardt},
-	title={National Job Corps Study: Findings Using Administrative Earnings Records
-		Data. Final Report},
-	institution={Mathematica Policy Research, Inc.},
-	year={2003},
-	month={October},
-	address={Princeton, NJ}
-}
-
- at book{Schoenbaum80,
-  author =	 {Schoenbaum, David},
-  title =	 {Hitler's Social Revolution},
-  address =	 {New York},
-  publisher =	 {Norton},
-  year =	 {1980}
-}
-
- at article{Schoenberg46,
-	author={I.J. Schoenberg},
-	title={Contributions to the problem of approximation of equidistant data by analytic
-		functions, Part A: On the problem of smoothing of graduation, a first class
-		of analytic approximation formulae},
-	journal={Quart. Appl. Math.},
-	volume={4},
-	year={1946},
-	pages={45--99}
-}
-
- at article{SchOls98,
-	author={Joseph L. Schafer and Maren K. Olsen},
-	title={Multiple Imputation for multivariate Missing-Data Problems: A Data Analyst's
-		Perspective},
-	journal={Multivariate Behavioral Research},
-	volume={33},
-	year={1998},
-	pages={545-571},
-	number={4}
-}
-
- at article{SchSin00,
-	author={Robert E. Schapire and Yoram Singer},
-	title={BoosTexter: A Boosting-based System for Text Categorization},
-	journal={Machine Learning},
-	volume={39},
-	year={2000},
-	pages={135-168},
-	number={2/3}
-}
-
- at article{SchSin00,
-	author={Robert E. Schapire and Yoram Singer},
-	title={BoosTexter: A Boosting-based System for Text Categorization},
-	journal={Machine Learning},
-	volume={39},
-	year={2000},
-	pages={135-168},
-	number={2/3}
-}
-
- at article{SchSla01,
-	author={Kenneth Scheve and Matthew Slaughter},
-	title={Labor Market Competition and Individual Preferences over Immigration Policy},
-	journal={Review of Economics and Statistics},
-	volume={83},
-	year={2001},
-	pages={133-145},
-	month={February},
-	number={1},
-	note={Sample data include only the first five of ten multiply imputed data sets.}
-}
-
- at article{Schuessler99,
-	author={Alexander A. Schuessler},
-	title={Ecological Inference},
-	journal={Proceedings of the National Academy of Sciences},
-	volume= 96,
-	year= 1999,
-	pages={10578-10581},
-	month={September 14},
-	number= 19,
-	note={{http://www.pnas.org/cgi/content/full/96/19/10578}}
-}
-
- at book{Schumaker81,
-	author={L.L. Schumaker},
-	title={Spline functions: basic theory},
-	publisher={John Wiley and Sons },
-	year= 1981 ,
-	address={New York}
-}
-
- at article{Schwarz99,
-	author={Norbert Schwarz},
-	title={Self-Reports: How the Questions Shape the Answers},
-	journal={American Psychologist},
-	volume= 54,
-	year= 1999,
-	pages={93--105},
-	number= 2
-}
-
- at article{SchWolWel99,
-	author={Lisa M. Schwartz, Steven Woloshin Gilbert H. Welch},
-	title={Misunderstanding About the Effects of Race and Sex on Physicians' Referrals
-		for Cardiac Cetheterization},
-	journal={New England Journal of Medicine},
-	volume= 341,
-	year= 1999,
-	pages={279-283},
-	number= 4
-}
-
- at article{ScoMacCor97,
-	author={William K. Scott, et al},
-	title={Functional Health Status as a Predictor of Mortality in Men and Women Over
-		65.},
-	journal={Journal of Clinical Epidemiology},
-	volume= 50,
-	year= 1997,
-	pages={{291-96}},
-	number= 3
-}
-
- at article{SegBurLoo03,
-	author={O. Segura and A. Burdorf and C. Looman},
-	title={Update of Predictions of mortality from Pleural Mesothelioma in the Netherlands},
-	journal={Occupational and Environmental Medicine},
-	volume= 60,
-	year= 2003,
-	pages={50--55},
-	number= 1
-}
-
- at article{Seidel91,
-	author={Raimund Seidel},
-	title={Small-Dimensional Linear Programming and Convex Hulls Made Easy},
-	journal={Discrete \& Computational Geometry},
-	volume={6},
-	year={1991},
-	pages={{423-434}},
-	issue={5}
-}
-
- at inbook{SeiMue94,
-	title={Modelling the AIDS Epidemic: Planning, Policy, and Prediction},
-	chapter={Viral Load and Sexual Risk: Epidemiologic and Policy Implications for HIV/AIDS},
-	year={1994},
-	publisher={Raven Press},
-	pages={461--480},
-	address={New York},
-	altauthor={S.T. Seitz and G.E. Mueller},
-	alteditor={E.H. Kaplan and M.L. Brandeau}
-}
-
- at article{Sekhon08,
-  author =	 {Jasjeet S. Sekhon},
-  title =	 {Multivariate and Propensity Score Matching Software
-                  with Automated Balance Optimization: The matching
-                  Package for R},
-  journal =	 {Journal of Statistical Software},
-  year =	 {2008}
-}
-
- at unpublished{Sekhon04b,
-	author={Jasjeet S. Sekhon},
-	title={The Varying Role of Voter Information across Democratic Societies},
-	note={{http://jsekhon.fas.harvard.edu/papers/SekhonInformation.pdf}},
-	year= 2004
-}
-
- at article{SekMeb98,
-	author={Jasjeet Singh Sekhon and Mebane, Jr., Walter R.},
-	title={Genetic Optimization Using Derivatives: Theory and Application to Nonlinear
-		Model},
-	journal= pa,
-	volume= 7,
-	year= 1998,
-	pages={187--210},
-	note={{http://jsekhon.fas.harvard.edu/genoud/genoud.pdf}}
-}
-
- at article{Sen02,
-	author={Amartya Sen},
-	title={Health: Perception Versus Observation},
-	journal={BMJ},
-	volume= 324,
-	year= 2002,
-	pages={860-861},
-	month={13 April}
-}
-
- at article{Senn00,
-	author={Stephen Senn},
-	title={Consensus and Controversy in Pharmaceutical Statistics},
-	journal={The Statistician},
-	volume={49},
-	year={2000},
-	pages={135-176},
-	number={2}
-}
-
- at article{Senn04,
-	author={Stephen Senn},
-	title={Controversies concerning randomization and additivity in clinical trials},
-	journal={Statistics in Medicine},
-	volume={23},
-	year={2004},
-	pages={3729-3753}
-}
-
- at article{Senn04b,
-	author={Stephen Senn},
-	title={Unbalanced Claims for Balance},
-	journal={2004},
-	volume={13},
-	year={2004},
-	pages={14-16},
-	month={June},
-	number={6}
-}
-
- at article{Senn05,
-	author={Stephen Senn},
-	title={Quantifying the magnitude of baseline covariate imbalances resulting from
-		selection bias in randomized clinical trials - Comment},
-	journal={Biometrical Journal},
-	volume={47},
-	year={2005},
-	pages={133-135},
-	number={2}
-}
-
- at article{Senn89,
-	author={S. J. Senn},
-	title={Covariate Imbalance and Random Allocation in Clinical Trials},
-	journal={Statistics in Medicine},
-	volume={8},
-	year={1989},
-	pages={467-475}
-}
-
- at article{Senn93,
-	author={Stephen Senn},
-	title={Baseline Distribution and conditional Size},
-	journal={Journal of Biopharmaceutical Statistics},
-	volume={3},
-	year={1993},
-	pages={265-270},
-	number={2}
-}
-
- at article{Senn94,
-	author={S.J. Senn},
-	title={Testing for Baseline Balance in Clinical Trials},
-	journal={Statistics in Medicine},
-	volume={13},
-	year={1994},
-	pages={1715-1726}
-}
-
- at article{Senn96,
-	author={Stephen Senn},
-	title={Baseline Balance and conditional Size: A Reply to Overall Et. Al.},
-	journal={Journal of Biopharmaceutical Statistics},
-	volume={6},
-	year={1996},
-	pages={201-210},
-	number={2}
-}
-
- at article{SetSanRao05,
-	author={Philip W. Setal and Osman Sankoh and Chalapati Rao and Victoria A. Velkoff
-		and Colin Mathers and Yang Gonghuan and Yusuf Hemed and Prabhat Jha and
-		Alan D. Lopez},
-	title={Sample registriation of vital events with verbal autopsy: a renewed commitment
-		to measuring and monitoring vital statistics},
-	journal={Bulletin of the World Health Organization},
-	volume={83},
-	year={2005},
-	pages={611-617}
-}
-
- at article{SetSanVel05,
-	author={Philip W. Setel and O. Sankoh and VA Velkoff and C Mathers and Y Gonghuan
-		et al.},
-	title={Sample registration of vital events with verbal autopsy: a renewed commitment
-		to measuring and monitoring vital statistics},
-	journal={Bulletin of the World Health Organization},
-	volume= 83,
-	year= 2005,
-	pages={611-617}
-}
-
- at article{SetWhiHem06,
-	author={Philip W. Setel and David R. Whiting and Yusuf Hemed and Daniel Chandramohan
-		and Lara J Wolfson and K.G.M.M. Alberti and Alan Lopez},
-	title={Validity of verbal autopsy procedures for determining causes of death in
-		Tanzania.},
-	journal={Tropical Medicine and International Health},
-	volume= 11,
-	year={2006},
-	pages={681--696},
-	number= 5
-}
-
- at article{ShaBarrCra02,
-	author={Bruce Shadbolt, Jane Barresi, and Paul Craft},
-	title={Self Rated Health as a Predictor of Survival Among Patients with Advanced
-		Cancer},
-	journal={Journal of Clinical Oncology},
-	volume= 20,
-	year= 2002,
-	pages={{2514-19}},
-	month={{May 15}},
-	number= 10
-}
-
- at article{ShaDavWeg92,
-	author={Said Shahtahmasebi, MSc, Richard Davies, PhD, and G. Clare Wenger PhD},
-	title={A Longitudinal Analysis of Factors Related to Survival in Old Age},
-	journal={The Gerontological Society of America},
-	volume= 32,
-	year= 1992,
-	pages={{404-13}},
-	number= 3
-}
-
- at article{Shahidullah95,
-	author={M. Shahidullah},
-	title={The sisterhood method of estimating maternal mortality: the Matlab experience},
-	journal={Studies in Family Planning},
-	volume= 26,
-	year= 1995,
-	pages={101--106}
-}
-
- at unpublished{Shalev05,
-	author={Michael Shalev},
-	title={Limits and Alternatives to Multiple Regression in Comparative Research},
-	note={Dept. of Sociology \& Anthropology and Department of Political Science;
-		The Hebrew University of Jerusalem; Israel 91905},
-	year={2005},
-	month={July}
-}
-
- at book{Shannon49,
-	author={Claude E. Shannon},
-	title={The Mathematical Theory of Communication},
-	publisher={University of Illinois Press},
-	year= 1949,
-	address={Urbana-Champaign}
-}
-
- at article{ShaSit96,
-	author={Jun Shao and Randy R. Sitter},
-	title={Bootstrap for Imputed Survey Data},
-	journal={Journal of the American Statistical Association},
-	volume={91},
-	year={1996},
-	pages={1278-1288},
-	month={September},
-	number={435}
-}
-
- at article{Sherman00,
-	author={Robert P. Sherman},
-	title={Tests of Certain Types of Ignorable Nonresponse in Surveys Subject to Item
-		Nonresponse or Attrition},
-	journal={American Journal of Political Science},
-	volume={44},
-	year={2000},
-	pages={356-368},
-	number={2}
-}
-
- at article{SheSto93,
-	author={R. Shealy and W. Stout},
-	title={A Model-Based Standardization Approach That Separates True Bias/DIF From
-		Group Ability Differences and Detects Test Bias/DIF as Well as Item Bias/DIF},
-	journal={Psychometrika},
-	volume= 58,
-	year= 1993,
-	pages={159--194},
-	month={June},
-	number= 2
-}
-
- at article{ShiLitPot06,
-	author={M.H. Shishehbor and D. Litaker and C.E. Pothier and M.S. Lauer},
-	title={Association of socioeconomic status with functional capacity, heart rate,
-		recoery, and all-cause mortality},
-	journal={Journal of the American Medical Association},
-	volume={295},
-	year={2006},
-	pages={784-792},
-	month={February}
-}
-
- at article{ShiSmiDra89,
-	author={M.J. Shipley and P.G. Smith and M. Draimaix},
-	title={Calculation of Power for Matched Pair Studies When Randomization is by Group},
-	journal={International Journal of Epidemiology},
-	volume={18},
-	year={1989},
-	pages={457-461},
-	number={2}
-}
-
- at article{ShiTes93,
-	author={T. Shiferaw and F. Tessema},
-	title={Maternal mortality in rural communities of Illubabor, Southwestern Ethiopia:
-		as estimated by the `sisterhood method'},
-	journal={Ethiopian Medical Journal},
-	volume= 31,
-	year= 1993,
-	pages={239--249}
-}
-
- at article{Shively72,
-	author={Shivley, W. Phillips},
-	title={Party Identification and Voting Choice and Voting Stability: The Weimar
-		Case},
-	journal= apsr,
-	volume= 66,
-	year= 1972,
-	pages={1203-1225}
-}
-
- at article{Sianesi04,
-	author={Barbara Sianesi},
-	title={An evaluation of the {S}wedish system of active labor market programs in
-		the 1990's},
-	journal={Review of Economics and Statistics},
-	volume= 86,
-	year= 2004,
-	pages={133-155},
-	number= 1
-}
-
- at article{SibFleHil01,
-	author={A.M. Sibai and A. Fletcher and M. Hills and O. Campbell},
-	title={Non-communicable disease mortality rates using the verbal autopsy in a cohort
-		of middle aged and older populations in Beirut during wartime, 1983-93},
-	journal={Journal of Epidemiology and Community Health},
-	volume={55},
-	year={2001},
-	pages={271-276}
-}
-
- at article{Signorino99,
-	author={Curtis Signorino},
-	title={Strategic Interaction and the Statistical Analysis of International Conflict},
-	journal= apsr,
-	volume= 93,
-	year= 1999,
-	pages={279-298},
-	number= 2
-}
-
- at article{SigYil03,
-	author={Curtis Signorino and Kuzey Yilmaz},
-	title={Strategic Misspecification in Discrete Choice Models},
-	journal= ajps,
-	year={2003},
-	month={July},
-	note={{http://www.rochester.edu/College/PSC/signorino/papers/Signo00.pdf}}
-}
-
- at article{SimHop05,
-	author={Beth A. Simmons and Daniel J. Hopkins},
-	title={The Constraining Power of International Treaties: Theory and Methods},
-	journal={American Political Science Review},
-	volume={99},
-	year={2005},
-	pages={623-631},
-	month={November},
-	number={4}
-}
-
- at article{SimXeo04,
-	author={Simon, Adam F. and Michael Xeons},
-	title={{Dimensional Reduction of Word-Frequency Data as a Substitute for Intersubjective
-		Content Analysis}},
-	journal={Political Analysis},
-	volume={12},
-	year={2004},
-	pages={63-75},
-	number={1}
-}
-
- at unpublished{Singh00,
-	author={R. Singh},
-	title={Estimation of Adult Mortality from Widowhood Data for India and its Major
-		States},
-	note={Mumbai, India: International Institute for Population Sciences},
-	year={2000}
-}
-
- at inproceedings{Singh02,
-	author={Abhishek Singh},
-	title={Forecasting Mortality in India},
-	organization={Ninth International Conference of Forum for Interdisciplinary Mathematics
-		on Statistics Combinatorics and Related Areas, University of Allahbad},
-	crossref={Atlas Document #cakd-39},
-	annote={Lee-Carter is used to forecast mortality in India from 2000-2015.}
-}
-
- at article{Sisson05,
-	author={Scott A. Sisson},
-	title={Transdimensional Markov Chains: A Decade of Progress and Future Perspectives},
-	journal= jasa,
-	volume= 100,
-	year= 2005,
-	pages={1077--1089},
-	month={September},
-	number= 471
-}
-
- at inproceedings{Sivamurthy87,
-	author={M. Sivamurthy},
-	title={{Principal Components Representation of ASFR: Model of Fertility Estimation
-		and Projection}},
-	booktitle={CDC Research Monograph},
-	year= 1987 ,
-	address={Cairo Demographic Center},
-	pages={655--693}
-}
-
- at article{Skalaban92,
-	author={Andrew Skalaban},
-	title={Interstate Competition and State Strategies to Deregulate Interstate Banking
-		1982-1988},
-	journal={Journal of Politics},
-	volume={54},
-	year={1992},
-	pages={793-809},
-	month={August},
-	number={3}
-}
-
- at InCollection{Skocpol91,
-  author =	 {Theda Skocpol},
-  title =	 {Targeting Within Universalism: Politically Viable
-                  Policies to Combat Poverty in the United States},
-  booktitle =	 {The Urban Underclass},
-  pages =	 {411-436},
-  publisher =	 {Brookings Institution},
-  editor =	 {Christopher Jencks and Paul Peterson},
-  address =	 {Washington, D.C.},
-  year =	 {1991}
-}
-
- at proceedings{Skoufias05,
-	title={PROGRESA and Its Impacts on the Welfare of Rural Households in Mexico},
-	year={2005},
-	organization={International Food Policy Research Institute},
-	address={Washington},
-	author={E. Skoufias}
-}
-
- at unpublished{Small05,
-	author={Dylan S. Small},
-	title={Sensitivity Analysis for Limited Information Linear Simultaneous Equations
-		Models With Overidentifying Restrictions},
-	note={Dept. of Statistics, The Wharton School of the University of Pennsylvania,
-		Philadelphia, PA 19104-6340, dmall at wharton.upenn.edu},
-	year={2005},
-	month={September}
-}
-
- at book{SmiMor91,
-	author={P.J. Smith and R.H. Morrow},
-	title={Methods for field trials of interventions against tropical diseases: a 'toolbox'},
-	publisher={Oxford University Press},
-	year={1991},
-	address={Oxford}
-}
-
- at article{Smith03,
-	author={Tom W. Smith},
-	title={Developing Comparable Questions in Cross-National Surveys, in Cross-Cultural
-		Survey Methods},
-	journal={John Wiley and Sons},
-	volume={2003},
-	pages={Janet A. Harkness and Fons J. R. van de Vijver and Peter Ph. Mohler},
-	month={Hoboken, NJ}
-}
-
- at article{Smith97,
-	author={Smith, H.L.},
-	title={Matching With Multiple Controls to Estimate Treatment Effects in Observational
-		Studies},
-	journal={Sociological Methodology},
-	volume={27},
-	year={1997},
-	pages={325--353},
-	number={1}
-}
-
- at article{SmiTod05,
-	author={Jeffrey A. Smith and Petra E. Todd},
-	title={Does matching overcome LaLonde's critique of nonexperimental estimators?},
-	journal={Journal of Econometrics},
-	volume= 125,
-	year= 2005,
-	pages={305-353},
-	month={March-April},
-	number={1-2}
-}
-
- at article{SmiTod05b,
-	author={Jeffrey Smith and Petra Todd},
-	title={Rejoinder},
-	journal={Journal of Econometrics},
-	volume={2005},
-	year={125},
-	pages={365-375}
-}
-
- at book{SneCoc80,
-	author={George W. Snedecor and William G. Cochran},
-	title={Statistical Methods},
-	publisher={Iowa State University Press},
-	year={1980},
-	address={Ames, IA},
-	edition={7th}
-}
-
- at book{SniCar97,
-	author={Paul Sniderman and Edward Carmines},
-	title={Reaching Beyond Race},
-	publisher={Harvard University Press},
-	year= 1997,
-	address={Cambridge, MA}
-}
-
- at article{SniGro96,
-	author={Paul M. Sniderman and Douglas B. Grob},
-	title={Innovations in Experimental Design in Attitude Surveys},
-	journal={Annual Review of Sociology},
-	volume= 22,
-	year= 1996,
-	pages={377-399},
-	month={August}
-}
-
- at article{Sobel06,
-	author={Michael E. Sobel},
-	title={Discussion: 'The Scientific Model of Causality'},
-	journal={Sociological Methodology},
-	volume={35},
-	year={2006},
-	pages={99-33},
-	month={June},
-	number={1}
-}
-
- at article{Sobolev38,
-	author={S.L. Sobolev},
-	title={On a theorem of functional analysis},
-	journal={Math. Sbornik},
-	volume= 45,
-	year= 1938,
-	pages={471--496},
-	note={Russian original;AMS Transl. (2),34(1963),39--68}
-}
-
- at book{SolChaShi05,
-	author={Nadia Soleman and Daniel Chandramohan and Kenji Shibuya},
-	title={WHO Technical Consultation on Verbal Autopsy Tools},
-	publisher={Geneva},
-	year= 2005,
-	note={{http://www.who.int/healthinfo/statistics/mort\_verbalautopsy.pdf}}
-}
-
- at article{SolChaShi06,
-	author={Nadia Soleman and Daniel Chandramohan and Kenji Shibuya},
-	title={Verbal autopsy: current practices and challenges},
-	journal={Bulletin of the World Health Organization},
-	volume={84},
-	year={2006},
-	pages={239-245},
-	month={March},
-	number={3}
-}
-
- at article{SolWil92,
-	author={Patricia J. Solomon and Susan R. Wilson},
-	title={Predicting AIDS Deaths and Prevalence in Australia},
-	journal={The Medical Journal of Australia},
-	volume= 157,
-	year= 1992,
-	pages={121--125}
-}
-
- at article{SomDjuLoe86,
-	author={Alfred Sommer and Edi Djunaedi and A.A. Loeden and Ignatius Tarwotjo and
-		Keith P. West, Jr. and Robert Tilden and Lisa Mele},
-	title={Impact of Vitamin A Supplementation on Childhood Mortality},
-	journal={The Lancet},
-	volume={1},
-	year={1986},
-	pages={1169-1173}
-}
-
- at article{SomZeg91,
-	author={A. Sommer and SL Zeger},
-	title={{On Estimating Efficacy from Clinical Trials.}},
-	journal={Statistics in Medicine},
-	volume={10},
-	year={1991},
-	pages={45-52},
-	number={1}
-}
-
- at article{SomZeg91,
-	author={Alfred Sommer and Scott L. Zeger},
-	title={On Estimating Efficacy From Clinical Trials},
-	journal={Statistics in Medicine},
-	volume={10},
-	year={1991},
-	pages={45-52}
-}
-
- at article{Song01,
-	author={Juwon Song and Thomas R. Belin and Martha B. Lee and Xingyu Gao and Mary
-		Jane Rotheram-Borus},
-	title={Handling baseline differences and missing items in a longitudinal study
-		of {HIV} risk among runaway youths},
-	journal={Health Services and Outcomes Research Methodology},
-	volume= 2,
-	year= 2001,
-	pages={317-329}
-}
-
- at article{Sorenson88,
-	author={Kirsten Hjort Sorensen},
-	title={State of Health and its Association with Death Among Old People at Three-Years
-		Follow Up},
-	journal={Danish Medical Bulletin},
-	volume= 35,
-	year= 1988,
-	pages={{597-00}}
-}
-
- at book{Sowa99,
-	author={J. F. Sowa},
-	title={Knowledge Representation: Logical, Philosophical and Computational Foundations},
-	publisher={Brooks Cole},
-	year= 1999
-}
-
- at unpublished{SpeSun01,
-	author={P. Speckman and D. Sun},
-	title={{Bayesian Nonparametric Regression and Autoregression Priors}},
-	note={{www.stat.missouri.edu/\textasciitilde speckman/report/bnpreg.ps}},
-	year= 2001
-}
-
- at article{SpiJagCla96,
-	author={Nicola Spiers, Carol Jagger, and Michael Clarke},
-	title={Physical Function and Perceived Health: Cohort Diffrences and Interrelationships
-		in Older People},
-	journal={Joural of Gerontology: Social Sciences},
-	volume={{51B}},
-	year= 1996,
-	pages={{S226-33}}
-}
-
- at techreport{Sroka06,
-	author={T. Neil Sroka},
-	title={Understanding the Political Influence of Blogs},
-	institution={Graduate School of Political Management, George Washington University},
-	year={2006},
-	month={April},
-	address={The Institue for Politics, Democracy, & the Internet, Graduate School of
-		Political Management, George Washington U, 805 21st St., NW Suite 401,
-		Washington, DC 20052}
-}
-
- at article{Stachura93,
-	author={Stachura, Peter D.},
-	title={National Socialism and the German Proletariat, 1925-1935: Old Myths and
-		New Perspectives},
-	journal={The Historical Journal},
-	volume= 36,
-	year= 1993,
-	pages={701-718},
-	number= 3
-}
-
- at article{StaNouHil00,
-	author={C. Stanton and A. Noureddine and K. Hill},
-	title={An Assessment of DHS Maternal Mortality Indicators},
-	journal={Studies in Family Planning},
-	volume= 31,
-	year= 2000,
-	pages={111--123}
-}
-
- at inbook{StaSeiWay91,
-	author={E.A. Stanley and S.T. Seitz and P.O. Way and P.D. Johnson and T.F. Curry},
-	title={The AIDS Epidemic and its Demographic Consequences},
-	chapter={The iwgAIDS Model for the Heterosexual Spread of HIV and the Demographic
-		Impacts of the AIDS Epidemic},
-	year= 1991,
-	publisher={United Nations and World Health Organization},
-	pages={119--136},
-	series={ST/ESA/SER.A/119},
-	address={New York}
-}
-
- at article{SteCoo95,
-	author={L.A. Stefanski and J.R. Cook},
-	title={Stimulation-Extrapolation: The Measurement Error Jackknife},
-	journal={Journal of the American Statistical Association},
-	volume={90},
-	year={1995},
-	pages={1247-1256},
-	month={December},
-	number={432}
-}
-
- at book{Steele04,
-	author={J. Michael Steele},
-	title={The Cauchy-Schwarz Master Class},
-	publisher={Cambridge University Press},
-	year={2004}
-}
-
- at article{Stefanski92,
-	author={Leonard A. Stefanski},
-	title={Monotone Likelihood Ratio of a "Faulty-Inspection" Distribution},
-	journal={The American Statistician},
-	volume={46},
-	year={1992},
-	pages={110-114},
-	month={May},
-	number={2}
-}
-
- at article{SteNap00,
-	author={Anita L. Stewart and Anna Napoles-Springer},
-	title={Health-Related Quality of Life Assessments in Diverse Population Groups
-		in the United States},
-	journal={Medical Care},
-	volume= 38,
-	year= 2000,
-	pages={II-102 -- II-124},
-	month={September},
-	number= 9
-}
-
- at article{Stephan32,
-  author =	 {Stephan, Werner},
-  title =	 {Grenzen des nationalsozialistischen
-                  Vormarsches. Eine Analyse der Wahlziffern seit der
-                  Reichstagswahl 1930},
-  journal =	 {Zeitschrift f{\"u}r Politik},
-  volume =	 21,
-  year =	 1932,
-  pages =	 {570-578}
-}
-
- at article{Stephan32b,
-  author =	 {Stephan, Werner},
-  title =	 {Die Parteien nach den grossen
-                  Fr{\"u}hjahrswahlk{\"a}mpfen. Eine Analyse der
-                  Wahlziffern des Jahres 1932},
-  journal =	 {Zeitschrift f{\"u}r Politik},
-  volume =	 22,
-  year =	 1932,
-  pages =	 {110-118}
-}
-
- at article{Stephan33,
-  author =	 {Stephan, Werner},
-  title =	 {Die Reichstagswahlen vom 31. Juli 1932},
-  journal =	 {Zeitschrift f{\"u}r Politik},
-  volume =	 22,
-  year =	 1933,
-  pages =	 {353-360}
-}
-
- at article{Sterk03,
-	author={Stewart E. Sterk},
-	title={Retrenchment on Entrenchment},
-	journal={The George Washington Law Review},
-	volume={71},
-	year={2003},
-	pages={231-254},
-	month={April},
-	number={2}
-}
-
- at techreport{Stewart92,
-	author={G.W. Stewart},
-	title={{On the Early History of the Singular Value Decomposition}},
-	institution={University of Maryland, College Park},
-	year= 1992,
-	type={Institute for Advanced Computer Studies},
-	number={TR-92-31}
-}
-
- at article{Stimson85,
-	author={James A.\ Stimson},
-	title={Regression Models in Space and Time: A Statistical Essay},
-	journal= ajps,
-	volume= 29,
-	year= 1985,
-	pages={914--947}
-}
-
- at book{Stockman86,
-	author={David A. Stockman},
-	title={The Triumph of Politics: How the Reagon Revolution Failed},
-	publisher={Harper \& Row, Publishers},
-	year={1986},
-	address={New York}
-}
-
- at article{StoDorKoz89,
-	author={M.J. Stones, Brenda Dornan, and Albert Kozma},
-	title={The prediction of mortality in elderly institution residents},
-	journal={Journal of Gerontology: Psychological Sciences},
-	volume= 44,
-	year= 1989,
-	pages={{P72-79}}
-}
-
- at article{Stogbauer01,
-	author={Christian St{\"o}gbauer},
-	title={The Radicalisation of the German Electorate: Swinging to the Right and the
-		Left in the Twilight of the Weimar Republic},
-	journal={European Review of Economic History},
-	volume= 5,
-	year= 2001,
-	pages={251--280}
-}
-
- at article{Stone74,
-	author={Stone, M.},
-	title={Cross-Validatory Choice and Assessment of Statistical Prediction},
-	journal= jrssb,
-	volume= 36,
-	year= 1974,
-	pages={111-33},
-	number= 2
-}
-
- at article{StoRel90,
-	author={Ross M. Stolzenberg and Daniel a. Relles},
-	title={Theory Testing in a World of Constrained Research Design: The Significance
-		of Heckman's Censored Sampling Bias Correction for Nonexperimental Research},
-	journal={Sociological Methods and Research},
-	volume={18},
-	year={1990},
-	pages={395-415},
-	month={May}
-}
-
- at article{StoWat03,
-	author={James H. Stock and Mark W. Watson},
-	title={Forecasting Output and Inflation: The Role of Asset Prices},
-	journal={Journal of Economic Literature},
-	year={2003},
-	optnumber={3},
-	optvolume={41},
-	optmonth={September}
-}
-
- at article{StoWay98,
-	author={John Stover and Peter Way},
-	title={Projecting the Impact of AIDS on Mortality},
-	journal={AIDS},
-	volume= 12,
-	year={1998},
-	pages={S29--S39},
-	number={supplement 1}
-}
-
- at book{Strang88,
-	author={G. Strang},
-	title={{Linear Algebra and Its Applications}},
-	publisher={Saunders},
-	year= 1988
-}
-
- at phdthesis{Stuart04,
-	author={Stuart, Elizabeth A.},
-	title={Matching methods for estimating causal effects using multiple control groups},
-	school={Department of Statistics, Harvard University},
-	year= 2004
-}
-
- at article{StuRub07,
-	author={Elizabeth A. Stuart and Donald B. Rubin},
-	title={Matching with multiple control groups with adjustment for group differences},
-	journal={Journal of Educational and Behavioral Statistics},
-	year= 2007,
-	note={Forthcoming}
-}
-
- at article{SucJor90,
-	author={L. Suchman and B. Jordan},
-	title={Interactional Troubles in Face to Face Survey Interviews (With Comments
-		and Rejoinder)},
-	journal= jasa,
-	volume= 85,
-	year= 1990,
-	pages={232--253},
-	month={March},
-	number= 409
-}
-
- at article{Sunetal00,
-	author={D. Sun and R. Tsutakawa and H. Kim and Z. He},
-	title={{Spatio-temporal Interaction with Disease Mapping}},
-	journal={Statistics in Medicine},
-	volume= 19,
-	year= 2000,
-	pages={2015--2035}
-}
-
- at manual{SunReiLan03,
-	author={S. Sun and S. Reilly and L. Lannom and J. Petrone},
-	title={Handle System Protocol (ver 2.1) Specification },
-	organization={RFC 3652 (Informational)},
-	year={2003},
-	note={{http://www.ietf.org/rfc/rfc3652.txt}}
-}
-
- at article{Super04,
-	author={Nora Super},
-	title={Medicare's Chronic Care Improvement Pilot Program: What is its Potential},
-	journal={National Health Policy Forum Issue Brief},
-	year={2004},
-	pages={1-20},
-	month={May},
-	number={797},
-	note={The George Washinton University, Washington, DC},
-	institution={National Health Policy Form}
-}
-
- at incollection{Tabeau01,
-	author={Ewa Tabeau},
-	title={A Review of Demographic Forecasting Models for Mortality},
-	booktitle={Forecasting Mortality in Developed Countries},
-	publisher={Kluwer Academic Publishers},
-	year= 2001,
-	address={The Netherlands},
-	editor={Ewa Tabeau, Anneke van de Berg Jeths and Christopher Heathcoate},
-	chapter= 1,
-	pages={1--32}
-}
-
- at article{TabEkaHuiBos98,
-	author={Ewa Tabeau and Peter Ekamper and Corina Huisman and Alinda Bosch},
-	title={Improving Overall Mortality Forecasts by Analysing Cause-of-Death, Period
-		and Cohort Effects in Trends},
-	journal={European Journal of Population},
-	volume={15},
-	year={1999},
-	pages={153-183}
-}
-
- at article{TabJetHea03,
-	author={Ewa Tabeau and Aneke van den Berg Jeths and Christopher Heathcote},
-	title={Forecasting Mortality in Developed Countries: Insights from a statistical,
-		demographic and epidemiological perspective.},
-	journal={Journal of European Population},
-	volume={1023},
-	number={10}
-}
-
- at book{Takeshi85,
-	author={Takeshi Amemiya},
-	title={Advanced Econometrics},
-	publisher={Harvard University Press},
-	year={1985},
-	address={Cambridge}
-}
-
- at book{Tally00,
-	author={Steve Tally},
-	title={Almost America: From the Colonists to Clinton, A ``What If'' History of
-		the U.S.},
-	publisher={Quill},
-	year= 2000,
-	address={New York}
-}
-
- at article{TanKumIke03,
-	author={J. Tanaka and H. Humada and K. Ikeda and K. Chayama and M. Mizui and K.
-		Hino and K. Katayama and J. Kumagai and Y. Komiya and Y. Miyakawa and H.
-		Yoshizawa},
-	title={Natural histories of Hepatitis C Virus Infection in Men and Women Simulated
-		by the Markov Model},
-	journal={Journal of Medical Virology},
-	volume= 70,
-	year= 2003,
-	pages={378--386},
-	number= 3
-}
-
- at book{Tanner96,
-	author={Martin A. Tanner},
-	title={Tools for Statistical Inference: Methods for the Exploration of Posterior
-		Distributions and Likelihood Functions},
-	publisher={Springer-Verlag},
-	year= 1996,
-	address={New York}
-}
-
- at article{TanWon87,
-	author={M.A. Tanner and W.H. Wong},
-	title={The Calculation of Posterior Distributions by Data Augmentation},
-	journal={Journal of the American Statistical Association},
-	volume={82},
-	year={1987},
-	pages={528-550},
-	month={June}
-}
-
- at article{Taylor92,
-	author={G. Taylor},
-	title={A Bayesian interpretation of Whittaker-Henderson graduation},
-	journal={Insurance: Mathematics and Economics},
-	volume= 11,
-	year= 1992,
-	pages={7--16}
-}
-
- at book{Tendler97,
-	author={Judith Tendler},
-	title={Good Government in the Tropics},
-	publisher={The Johns Hopkins University Press},
-	year={1997},
-	address={Baltimore}
-}
-
- at article{TerKleOce00,
-	author={Jeanne A. Teresi and Marjorie Kleinman and Katja Ocepek-Welikson},
-	title={Modern Psychometric Methods for Detection of Differential Item Functioning:
-		Application to Cognitive Assessment Measures},
-	journal= sim,
-	volume= 19,
-	year= 2000,
-	pages={1651--1683}
-}
-
- at book{TetBel96,
-	title={Counterfactual Throught Experiments in World Politics},
-	publisher={Princeton University Press},
-	year= 1996,
-	editor={Philip E. Tetlock and A. Belkin},
-	address={Princeton}
-}
-
- at article{TetLeb01,
-	author={Philip E. Tetlock and Richard Ned Lebow},
-	title={Poking Counterfactual Holes in Covering Laws: Cognitive Styles and Historical
-		Reasoning},
-	journal= apsr,
-	volume= 95,
-	year= 2001,
-	month={December},
-	number= 4
-}
-
- at book{TetLebPar00,
-	title={Unmaking the West: Counterfactual Explorations of Alternative Histories},
-	publisher={Columbia University Press},
-	year= 2000,
-	editor={Philip E. Tetlock and Ned R. Lebow and G. Parker},
-	address={New York}
-}
-
- at article{tetlock99,
-	author={Philip E. Tetlock},
-	title={Theory-Driven Reasoning About Plausible Pasts and Probable Futures in World
-		Politics: Are we Prisoners of our Preconceptions?},
-	journal= ajps,
-	volume= 43,
-	year= 1999,
-	pages={335-366},
-	month={April},
-	number= 2
-}
-
- at book{Thisted88,
-	author={Ronald A. Thisted},
-	title={Elements of Statistical Computing: Numerical Computation},
-	publisher={Chapman and Hall},
-	year= 1988,
-	address={Florida}
-}
-
- at inproceedings{ThiSteWai93,
-	author={David Thissen and Lynn Steinberg and Howard Wainer},
-	title={Detection of Differential Item Functioning Using the Parameters of the Item
-		Response Models},
-	booktitle={Differential Item Functioning},
-	crossref={HolWai93}
-}
-
- at article{Thompson05,
-	author={Dennis F. Thompson},
-	title={{Democracy in Time: Popular Sovereignty and Temporal Representation}},
-	journal={Constellations},
-	volume= 12,
-	year= 2005 ,
-	pages={245-261},
-	month={June},
-	number= 2
-}
-
- at article{Thompson98,
-	author={Simon G. Thompson},
-	title={Letters to the Editor: The Merits of Matching in Community Intervention
-		Trials: A Cautionary Tale},
-	journal={Statistics in Medicine},
-	volume={17},
-	year={1998},
-	pages={2147-2151}
-}
-
- at article{ThoPanLee06,
-	author={Matt Thomas and Bo Pang and Lillian Lee},
-	title={Get out the vote: Determining support or opposition from Congressional floor-debate
-		transcripts},
-	journal={Proceedings of EMNLP},
-	year= 2006,
-	pages={327--335},
-	note={{http://www.cs.cornell.edu/home/llee/papers/tpl-convote.home.html}}
-}
-
- at article{ThoSyl82,
-	author={Stuart J. Thorson and Donald A. Sylvan},
-	title={Counterfactuals and the Cuban Missle Crisis},
-	journal={International Studies Quarterly},
-	volume= 26,
-	year= 1982,
-	pages={539--571},
-	number= 4
-}
-
- at book{Thurstone59,
-	author={L.L. Thurstone},
-	title={The Measurement of Values},
-	publisher={University of Chicago Press},
-	year= 1959,
-	address={Chicago}
-}
-
- at book{TikArs77,
-	author={A. N. Tikhonov and V. Y. Arsenin},
-	title={Solutions of Ill-posed Problems},
-	publisher={W. H. Winston},
-	year= 1977 ,
-	address={Washington, D.C.}
-}
-
- at article{Tikhonov63,
-	author={A. N. Tikhonov},
-	title={Solution of incorrectly formulated problems and the regularization method},
-	journal={Soviet Math. Dokl.},
-	volume={4},
-	year= 1963 ,
-	pages={1035--1038}
-}
-
- at article{Timaeus86,
-	author={Ian Timaeus},
-	title={An Assessment of Methods for Estimating Adult Mortality from Two Sets of
-		Data on Maternal Orphanhood},
-	journal={Demography},
-	volume= 23,
-	year= 1986,
-	pages={435--450}
-}
-
- at article{Timaeus91,
-	author={Iain Timaeus},
-	title={Measurement of Adult Mortality in Developing Countries: A Comparative Review},
-	journal={Population Index},
-	volume= 57,
-	year= 1991,
-	pages={552-568},
-	number= 4
-}
-
- at article{Timaeus91b,
-	author={Ian Timaeus},
-	title={Estimation of Adult Mortality from Orphanhood Before and Since Marriage},
-	journal={Population Studies},
-	volume= 45,
-	year={1991b},
-	pages={455--472}
-}
-
- at article{Timpone98,
-	author={Richard J. Timpone},
-	title={Structure, Behavior, and Voter Turnout in the United States},
-	journal={American Poltical Science Review},
-	volume={92},
-	year={1998},
-	pages={145-158},
-	month={March},
-	number={1}
-}
-
- at incollection{TimZabAli01,
-	author={Ian M. Timaeus Basia Zaba and Mohammed Ali},
-	title={Estimation of Adult Mortality from Data on Adult Siblings},
-	booktitle={Brass Tacks: Essays in Medical Demography},
-	publisher={Athlone},
-	year= 2001,
-	editor={B. Zaba and J. Blacker},
-	pages={43--66}
-}
-
- at incollection{Tobler79,
-	author={Waldo Tobler},
-	title={Cellular Geography},
-	booktitle={Philosophy in Geography},
-	publisher={Dordrecht: Reidel},
-	year= 1979,
-	editor={S.\ Gale and G.\ Olssen}
-}
-
- at article{TodDefOde94,
-  author =	 {J.E. Todd and A. De Francisco and T.J.D. O'Dempsey
-                  and B.M. Greenwood},
-  title =	 {The limitations of verbal autopsy in a
-                  malaria-endemic region},
-  journal =	 {Annals of Tropical Paediatrics},
-  volume =	 {14},
-  year =	 {1994},
-  pages =	 {31-36}
-}
-
- at book{Torgerson58,
-  author =	 {Warren S. Torgerson},
-  title =	 {Theory and Methods of Scaling},
-  publisher =	 {Wiley and Sons},
-  year =	 1958,
-  address =	 {New York}
-}
-
- at incollection{TorRauHer93,
-  author =	 {Hege Torp and O. Rauum and E. Hernaes and
-                  H. Goldstein},
-  title =	 {The First Norwegian Experiment},
-  booktitle =	 {Measuring Labour Market Measures: Evaluating the
-                  Effects of Active Labour Market Policies},
-  publisher =	 {Ministry of Labour},
-  year =	 {1993},
-  address =	 {Copenhagen, Denmark},
-  editor =	 {K. Jensen and Per Kongshoj Madsen}
-}
-
- at article{TruRod90,
-  author =	 {J. Trussell and G. Rodriguez},
-  title =	 {A Note on the Sisterhood Estimator of Maternal
-                  Mortality},
-  journal =	 {Studies in Family Planning},
-  volume =	 21,
-  year =	 1990,
-  pages =	 {344--346},
-  month =	 {Nov-Dec},
-  number =	 6
-}
-
- at article{TsuLin86,
-  author =	 {Robert K. Tsutakawa and Hsin Ying Lin},
-  title =	 {Bayesian Estimation of Item Response Curves},
-  journal =	 {Psychometrika},
-  volume =	 {51},
-  year =	 {1986},
-  pages =	 {251-267},
-  month =	 {June},
-  number =	 {2}
-}
-
- at article{TsuMinKey94,
-  author =	 {Ichiro Tsuji, MD, et al},
-  title =	 {The Predictive Power of Self-Rated Health,
-                  Activities of Daily Living, and Ambulatory Activity
-                  for Cause Specific Mortality among the Elderly: A
-                  Three-year Follow-up in Urban Japan. },
-  journal =	 {Journal of the American Geriatric Society},
-  volume =	 42,
-  year =	 1994,
-  pages =	 {{153-56}}
-}
-
- at techreport{Tsutakawa75,
-  author =	 {Robert K. Tsutakawa},
-  title =	 {Bayesian Inference for Bioassay},
-  institution =	 {University of Missouri - Columbia},
-  year =	 {1975},
-  month =	 {August},
-  number =	 {52}
-}
-
- at article{Tsutakawa84,
-  author =	 {Robert K. Tsutakawa},
-  title =	 {Estimation of Two-Parameter Logistic Item Response
-                  Curves},
-  journal =	 {Journal of Educational Statistics},
-  volume =	 {9},
-  year =	 {1984},
-  pages =	 {263-276},
-  number =	 {4}
-}
-
- at article{Tsutakawa92,
-  author =	 {Robert K. Tsutakawa},
-  title =	 {Moments Under Conjugate Distributions in Bioassay},
-  journal =	 {Statistics \& Probability Letters},
-  volume =	 {15},
-  year =	 {1992},
-  pages =	 {229-233},
-  month =	 {October}
-}
-
- at article{Tsutakawa92b,
-  author =	 {Robert K. Tsutakawa},
-  title =	 {Prior Distribution for Item Response Curves},
-  journal =	 {British Journal of Mathematical and Statistical
-                  Psychology},
-  volume =	 {45},
-  year =	 {1992},
-  pages =	 {51-74}
-}
-
- at article{TulBoe98,
-  author =	 {Shripad Tuljapurkar and Carl Boe},
-  title =	 {Mortality Change and Forecasting: How Much and How
-                  Little Do We Know?},
-  journal =	 {North American Actuarial Journal},
-  volume =	 {2},
-  year =	 {1998},
-  number =	 {4},
-  annote =	 {This paper makes a critical assessment of knowledge
-                  about mortality change and the potential of existing
-                  work to contribute to the development of useful
-                  forecasts in Canada, Mexico, and the United
-                  States. Methods of forecasting are reviewed,
-                  including the scenario method used by the US Social
-                  Security Administration and the time series method
-                  of Lee and Carter.}
-}
-
- at article{TulLiBoe00,
-  author =	 {S. Tuljapurkar and N. Li and C. Boe},
-  title =	 {A Universal Pattern of Mortality Decline in the {G7}
-                  Countries},
-  journal =	 {Nature},
-  volume =	 405,
-  year =	 2000,
-  pages =	 {789--792},
-  month =	 {June}
-}
-
- at article{tumbarello98,
-  author =	 {M. Tumbarello and E. Tacconelli and K. de Gaetano
-                  and F. Ardit and T. Pirronti and R. Claudia and
-                  L. Ortona},
-  title =	 {Bacterial Pneumonia in HIV-Infected Patients:
-                  Analysis of Risk Factors and Prognostic Indicators},
-  journal =	 {Journal of Acquired Immune Deficiency Syndromes and
-                  Human Retroviology},
-  volume =	 18,
-  year =	 1998,
-  number =	 {39-45}
-}
-
- at unpublished{TurLit02,
-  author =	 {P.D. Turney and M.L. Littman},
-  title =	 {Unsupervised Learning of Semantic Orientation},
-  note =	 {National Research Council Canada},
-  year =	 {2002},
-  month =	 {May}
-}
-
- at unpublished{TurLit02,
-  author =	 {P.D. Turney and M.L. Littman},
-  title =	 {Unsupervised Learning of Semantic Orientation},
-  note =	 {National Research Council Canada},
-  year =	 {2002},
-  month =	 {May}
-}
-
- at article{TurMat01,
-  author =	 {G. Turrell and Colin Mathers},
-  title =	 {Socioeconomic inequalities in all-cause and
-                  specific-cause mortality in Australia: 1985--1987
-                  and 1995--1997},
-  journal =	 {International Journal of Epidemiology},
-  volume =	 30,
-  year =	 2001,
-  pages =	 {231--239},
-  number =	 2
-}
-
- at book{Turner85,
-  author =	 {Turner, Henry-Ashbury},
-  title =	 {German big business and the rise of Hitler},
-  publisher =	 {Oxford University Press},
-  year =	 {1985}
-}
-
- at proceedings{Turney02,
-  editor =	 {Peter D. Turney},
-  title =	 {Thumbs Up or Thumbs Down? Semantic Orientation
-                  Applied to},
-  publisher =	 {40th Annual Meeting of the Associatin for
-                  Computational Linguistics},
-  year =	 {2002},
-  month =	 {July},
-  organization = {Institute for Information Technology},
-  address =	 {National Research Council of Canada, Ottawa,
-                  Ontario, Canada K1A0R6}
-}
-
- at proceedings{Turney02,
-  editor =	 {Peter D. Turney},
-  title =	 {Thumbs Up or Thumbs Down? Semantic Orientation
-                  Applied to},
-  publisher =	 {40th Annual Meeting of the Associatin for
-                  Computational Linguistics},
-  year =	 {2002},
-  month =	 {July},
-  organization = {Institute for Information Technology},
-  address =	 {National Research Council of Canada, Ottawa,
-                  Ontario, Canada K1A0R6}
-}
-
- at article{Urdal05,
-  author =	 {Henrik Urdal},
-  title =	 {People vs. Malthus: Population Pressure,
-                  Environmental Degradation, and Armed Conflict
-                  Revisited},
-  journal =	 {Journal of Peace Research},
-  volume =	 {42},
-  year =	 {2005},
-  pages =	 {417-434},
-  month =	 {July},
-  number =	 {4},
-  publisher =	 {Journal for Peace Research}
-}
-
- at book{UttLock02,
-  title =	 {American Political Scientists: a Dictionary},
-  publisher =	 {Greenwood Press},
-  year =	 {2002},
-  editor =	 {Glenn H. Utter and Charles Lockhart},
-  address =	 {Westport, Conn},
-  edition =	 {2nd}
-}
-
- at book{Valentine64,
-  author =	 {Frederick Albert Valentine},
-  title =	 {Convex Sets},
-  publisher =	 {New York, McGraw-Hill},
-  year =	 {1964}
-}
-
- at book{Valentine64,
-  author =	 {Frederick A Valentine},
-  title =	 {Convex Sets},
-  publisher =	 {McGraw-Hill},
-  year =	 1964,
-  address =	 {New York}
-}
-
- at article{VanCor99,
-  author =	 {Marina Vannucci},
-  title =	 {Covariance structure of wavelet coefficients: theory
-                  and models in a Bayesian perspective},
-  journal =	 {Journal of the Royal Statistical Society B},
-  volume =	 {61},
-  year =	 {1999},
-  pages =	 {971-986},
-  number =	 {Part 4}
-}
-
- at book{Vandeth98,
-  author =	 {Jan W. van Deth},
-  title =	 {Comparative Politics, the problem of equivalence},
-  publisher =	 {Routledge},
-  year =	 {1998},
-  editor =	 {Jan W. van Deth},
-  address =	 {11 New Fetter Lane, London EC4P 4EE}
-}
-
- at article{vanDoorslaer97,
-  author =	 {Eddy van Doorslaer},
-  title =	 {Income-related Inequalities in Health: Some
-                  International Comparisons},
-  journal =	 {Journal of Health Economics},
-  year =	 {1997},
-  optnumber =	 {1},
-  optvolume =	 {16},
-  optpages =	 {93--112}
-}
-
- at article{VanWissen01,
-  author =	 {Leo J.G. van Wissen},
-  title =	 {Demography of the Firm: A Useful Metaphor?},
-  journal =	 {European Journal of Population},
-  volume =	 {18},
-  year =	 {2002},
-  pages =	 {263-279}
-}
-
- at book{Vapnik95,
-	author={Vladimir N. Vapnick},
-	title={The Nature of Statistical Learning Theory},
-	publisher={Springer},
-	year= 1995,
-	address={New York}
-}
-
- at book{Vapnik98,
-	author={Vladimir N. Vapnik},
-	title={Statistical Learning Theory},
-	publisher={Wiley},
-	year= 1998 ,
-	address={New York}
-}
-
- at book{VenRip02,
-	author={William N. Venables and Brian D. Ripley},
-	title={Modern Applied Statistics with S},
-	publisher={Springer-Verlag},
-	year={2002},
-	edition={4th}
-}
-
- at article{VerAngCap02,
-	author={Arduino Verdecchia and Giovanni De Angelis and Riccardo Capocaccia},
-	title={Estimation and Projections of Cancer Prevalence from Cancer Registry Data},
-	journal={Statistics in Medicine},
-	volume= 21,
-	year= 2002,
-	pages={3511--3526}
-}
-
- at article{VerCapEgi89,
-	author={A. Verdecchia and R. Capocaccia and V. Egidi and A. Golini},
-	title={A Method for the Estimation of Chronic Disease Morbidity and Trends from
-		Mortality Data},
-	journal={Statistics in Medicine},
-	volume= 8,
-	year= 1989,
-	pages={201--216}
-}
-
- at article{Verrall93,
-	author={R.J. Verrall},
-	title={A state space formulation of Whittaker graduation, with extensions},
-	journal={Insurance: Mathematics and Economics},
-	volume= 13,
-	year= 1993,
-	pages={7--14}
-}
-
- at article{VerSch77,
-	author={Sidney Verba and Kay Lehman Schlozman},
-	title={Unemployment, Class Consciousness, and Radical Politics: What Didn't Happen
-		in the Thirties},
-	journal={Journal of Politics},
-	volume= 39,
-	year= 1977,
-	pages={291--323},
-	number= 2
-}
-
- at book{VerSchBra95,
-	author={Sidney Verba and Kay Lehman Schlozman and Henry E. Brady},
-	title={Voice and Equality: Civic Volunteerism in American Politics},
-	publisher={Harvard University Press},
-	year= 1995,
-	address={Cambridge, MA}
-}
-
- at article{Villalonga04,
-	author={Belen Villalonga},
-	title={Does Diversification Cause the "Diversification Discount"?},
-	journal={Financial Management},
-	volume={33},
-	year={2004},
-	pages={5-27},
-	number={2}
-}
-
- at article{Voth03,
-  author =	 {Voth, Hans-Joachim},
-  title =	 {With a Bang, not a Whimper: Pricking Germany's Stock
-                  Market Bubble in 1927 and the Slide into Depression},
-  journal =	 {Journal of Economic History},
-  volume =	 63,
-  year =	 2003,
-  pages =	 {65--99},
-  number =	 1
-}
-
- at article{Voth95,
-  author =	 {Voth, Hans-Joachim},
-  title =	 {Did High Wages or High Interest Rates Bring Down the
-                  Weimar Republic? A Cointegration Model of Investment
-                  in Germany, 1925-1930},
-  journal =	 {Journal of Economic History},
-  volume =	 55,
-  year =	 1995,
-  pages =	 {801--821},
-  month =	 {December},
-  number =	 4
-}
-
- at article{WacWei82,
-	author={Wacholder, S. and Weinberg, C.R.},
-	title={{Paired versus Two-Sample Design for a Clinical Trial of Treatments with
-		Dichotomous Outcome: Power Considerations}},
-	journal={Biometrics},
-	volume={38},
-	year={1982},
-	pages={801--812},
-	number={3}
-}
-
- at article{Wagstaff00,
-	author={Adam Wagstaff},
-	title={Socioeconomic inequalities in Child Mortality: Comparisons Across Nine Developing
-		Countries},
-	journal= bull,
-	year={2000},
-	optnumber={1},
-	optvolume={78},
-	optpages={19--29}
-}
-
- at article{Wahba75,
-	author={G. Wahba},
-	title={Smoothing noisy data by spline functions},
-	journal={Numer. Math},
-	volume= 24,
-	year= 1975,
-	pages={383--393}
-}
-
- at article{Wahba77,
-	author={G. Wahba},
-	title={Practical approximate solutions to linear operator equations when the data
-		are noisy},
-	journal={SIAM J. Numer. Anal.},
-	volume={14},
-	year={1977}
-}
-
- at article{Wahba78,
-	author={G. Wahba},
-	title={{Improper Priors, Spline Smoothing and the Problem of Guarding Against Model
-		Errors in Regression}},
-	journal={Journal of the Royal Statistical Society B},
-	volume= 40,
-	year= 1978,
-	pages={364--372},
-	number= 3
-}
-
- at incollection{Wahba79,
-	author={G. Wahba},
-	title={Smoothing and ill-posed problems},
-	booktitle={Solutions methods for integral equations and applications},
-	publisher={Plenum Press},
-	year= 1979,
-	address={New York},
-	editor={M. Golberg},
-	pages={183--194}
-}
-
- at inproceedings{Wahba80,
-	author={G. Wahba},
-	title={Spline bases, regularization, and generalized cross-validation for solving
-		approximation problems with large quantities of noisy data},
-	booktitle={Proceedings of the International Conference on Approximation theory in honour
-		of George Lorenz},
-	year={1980},
-	month={January 8--10},
-	publisher={Academic Press},
-	address={Austin, TX},
-	editor={J. Ward and E. Cheney}
-}
-
- at incollection{Wahba80a,
-	author={G. Wahba},
-	title={Spline bases, regularization, and generalized cross-validation for solving
-		approximation problems with large quantities of noisy data},
-	booktitle={Approximation theory III},
-	publisher={Academic Press},
-	year= 1980,
-	address={New York},
-	editor={W. Cheney},
-	pages={905--912}
-}
-
- at article{Wahba85,
-	author={G. Wahba},
-	title={A comparison of {GCV} and {GML} for choosing the smoothing parameter in
-		the generalized splines smoothing problem},
-	journal={The Annals of Statistics},
-	volume= 13,
-	year= 1985,
-	pages={1378--1402}
-}
-
- at book{Wahba90,
-	author={G. Wahba},
-	title={Splines Models for Observational Data},
-	publisher={{Series in Applied Mathematics, Vol. 59, SIAM}},
-	year= 1990 ,
-	address={Philadelphia}
-}
-
- at techreport{WahLinZha99,
-	author={G. Wahba and Y. Lin and H. Zhang},
-	title={Generalized Approximate Cross Validation for SVM, or, anather way to look
-		at margin-like quantities},
-	institution={Department of Statistics, University of Wisconsin},
-	year={1999},
-	type={Tech. Report},
-	number={1006}
-}
-
- at misc{Wakefield01,
-	author={Jon Wakefield},
-	title={Ecological Inference for $2 \times 2$ Tables},
-	year= 2001,
-	howpublished={Working Paper \# 12, Center for Statistics and the Social Sciences, University
-		of Washington}
-}
-
- at article{WalCarXiaGel97,
-	author={Lance A. Waller and Bradley P. Carlin and Hong Xia and Alan E. Gelfand},
-	title={Hierarchical Spatio-Temporal Mapping of Disease Rates},
-	journal= jasa,
-	volume= 92,
-	year= 1996,
-	pages={607-617}
-}
-
- at article{WalDon94,
-	author={G.E.L. Walraven and P.W.J. van Dongen},
-	title={Assessment of maternal mortality in Tanzania},
-	journal={British Journal of Obstetrics and Gynaecology},
-	volume= 101,
-	year= 1994,
-	pages={414--417}
-}
-
- at book{Waldron99,
-	author={Jeremy Waldron},
-	title={Law and disagreement},
-	publisher={Oxford University Press},
-	year={1999},
-	address={New York}
-}
-
- at article{Waletal97,
-	author={L.A. Waller and B.P. Carlin and H. Xia and A.E. Gelfand},
-	title={{Hierarchical Spatio-Temporal Mapping of Disease Rates}},
-	journal= jasa,
-	volume= 92,
-	year= 1997,
-	pages={607--617},
-	number= 438
-}
-
- at techreport{WalHogHam06,
-	author={Robert Walker and Lesley Hoggart and Gayle Hamilton with Susan Blank},
-	title={Making random assignment happen: Evidence from the UK Employment Retention
-		and Advancement (ERA) demonstration},
-	institution={Department for Work and Pensions, Corporate Document Services},
-	year={2006},
-	month={March},
-	type={research report},
-	note={ISBN 1 84 123981 X, Research Report 330}
-}
-
- at article{WanRob98,
-	author={Naisyin Wang and James Robins},
-	title={Large-sample theory for parametric multiple imputation procedures},
-	journal={Biometrika},
-	volume={85},
-	year={1998},
-	pages={935-948}
-}
-
- at article{WanSchAvo05,
-	author={Philip S. Wang and Sebastian Schneeweiss and Jerry Avorn and Michael A.
-		Fischer and Helen Mogun and Daniel H. Solomon and M. Alan Brookhart},
-	title={Risk of Death inelderly Users of Conventional vs. Atypical Antipsychotic
-		Medications},
-	journal={New England Journal of Medicine},
-	volume={353},
-	year={2005},
-	pages={2335-2341},
-	month={December}
-}
-
- at article{WanSha91,
-	author={Goya Wannamethee and A. G. Shaper },
-	title={Self-assessment of Health Status and Mortality in Middle Aged British Men},
-	journal={International Journal of Epidemiology},
-	volume= 20,
-	year= 1991,
-	pages={{239-45}},
-	number= 1
-}
-
- at unpublished{WanYanMa06,
-	author={L Wang and G. Yang and J Ma and C Rao and X Wan and AD Lopez},
-	title={Evaluation of the quality of cause of death statistics in rural China using
-		verbal autopsies},
-	year={2006},
-	journal={Journal of Epidemiology and Community Health}
-}
-
- at book{WapBerBra40,
-	author={Waples, D. and Berelson, B. and Bradshaw, F.R.},
-	title={{What Reading Does to People: A Summary of Evidence on the Social Effects
-		of Reading and a Statement of Problems for Research}},
-	publisher={The University of Chicago Press},
-	year={1940}
-}
-
- at article{Ware05,
-	author={Helen Ware},
-	title={Demography, Migration and Conflict in the Pacific},
-	abstract={This article explores the relationships between demography and internal
-		conflict in the Pacific Island countries, focusing on the three subregions
-		Polynesia, Micronesia and Melanesia. These countries confront distinctive
-		challenges and opportunities because of their unique cultures and non-militarized
-		status, combined with very small size and remote locations. The use of
-		the MIRAB model of island economies based on migration, remittances, aid
-		and bureaucracy is extended to examine its impact on social cohesion and
-		the avoidance of internal conflict. For Polynesia, MIRAB is found to be
-		a sustainable development strategy. Continuous emigration from Polynesia
-		serves to reduce population pressure and communal tensions. Further, remittance
-		income supports the Polynesian economies, and this also reduces the potential
-		for conflict. For Micronesia, except Kiribati and Nauru, migration access
-		to the USA is assured. In contrast, for the Melanesian countries, there
-		is minimal emigration, rapid population growth and considerable intercommunal
-		tension, which has resulted in several coups and one 'failed state'. Demographic
-		pressure created by rapid population growth results in a lack of employment
-		opportunities for youths (who provide the majority of participators in
-		civil unrest and conflicts) rather than in direct pressure on land and
-		other natural resources.},
-	journal={Journal for Peace Research},
-	volume={42},
-	year={2005},
-	pages={435-454},
-	month={July},
-	number={4}
-}
-
- at unpublished{WarSivCao05,
-	author={Michael D. Ward and Randolph M. Siverson and Xun Cao},
-	title={Everybody Out of the Pool!},
-	note={Michael Ward, Dept of Politcal Science, Univ of WA, Seattle mdw at u.washington.edu},
-	year={2005},
-	month={August}
-}
-
- at techreport{WasRoe06,
-	author={Larry Wasserman and Kathryn Roeder},
-	title={Weighted Hypothesis Testing},
-	institution={Carnegie Mellon University},
-	year={2006},
-	month={April}
-}
-
- at article{Weibe04,
-	author={Janyce M. Wiebe},
-	title={Tracking Point of View in Narrative},
-	journal={Computational Linguistics},
-	volume={20},
-	year={1994},
-	pages={233-287},
-	number={2}
-}
-
- at article{Weibe04,
-	author={Janyce M. Wiebe},
-	title={Tracking Point of View in Narrative},
-	journal={Computational Linguistics},
-	volume={20},
-	year={1994},
-	pages={233-287},
-	number={2}
-}
-
- at article{WeiCoxWil87,
-	author={Milton C. Weinstein and Pamela G. Coxson and Lawrence W. Williams and Theodore
-		M. Pass and William B Stason and Lee Goldman},
-	title={Forecasting Coronary Heart Disease Incidence, Mortality, and Cost: The Coronary
-		Heart Disease Policy Model},
-	journal={American Journal of Public Health},
-	volume= 77,
-	year= 1987,
-	pages={1417--1426},
-	number= 11
-}
-
- at book{Weiss86,
-	author={N.S. Weiss},
-	title={Clinical Epidemiology: the Study of Outcome of Disease},
-	publisher={Oxford University Press, NY},
-	year={1986}
-}
-
- at book{Weiss86,
-	author={Noel S. Weiss},
-	title={Clinical Epidemiology: The Study of the Outcome of Illness},
-	publisher={Oxford University Press},
-	year={1986},
-	volume={Volume 11},
-	address={New York},
-	series={Monographs in Epidemiology and Biostatistics }
-}
-
- at article{WeiTan90,
-	author={Greg C. Wei and Martin A. Tanner},
-	title={A Monte Carlo Implementation of the EM Algorithm and the Poor Man's Data
-		Augmentation Algorithms},
-	journal={Journal of the American Statistical Association},
-	volume={85},
-	year={1990},
-	pages={699-704},
-	month={September}
-}
-
- at article{WeiWanIbr97,
-	author={Robert E. Weiss and Yan Wang and Joseph G. Ibrahim},
-	title={Predictive Model Selection for Repeated Measures Random Effects Models Using
-		Bayes Factors},
-	journal={Biometrics},
-	volume= 53,
-	year= 1997,
-	pages={592--602},
-	month={June}
-}
-
- at article{Wellhofer03,
-  title =	 {{Democracy and Fascism: Class, Civil Society, and
-                  Rational Choice in Italy}},
-  author =	 {E. Spencer Wellhofer},
-  journal =	 {American Political Science Review},
-  volume =	 {97},
-  number =	 {01},
-  pages =	 {91--106},
-  year =	 {2003}
-}
-
- at article{Werner00,
-	author={Suzanne Werner},
-	title={The Effects of Political Similarity on the Onset of Militarized Disputes,
-		1816-1985},
-	journal= prq,
-	volume= 53,
-	year= 2000,
-	pages={343--374},
-	month={June}
-}
-
- at article{Wernette1977,
-	author={Wernette, Dee Richard},
-	title={Quantitative Methods in Studying Political Mobilization in Late Weimar Germany},
-	journal={Historical Methods Newsletter},
-	volume= 10,
-	year= 1977,
-	pages={97-101}
-}
-
- at book{WesHar97,
-	author={Mike West and Jeff Harrison},
-	title={Bayesian Forecasting and Dynamic Linear Models},
-	publisher={Springer},
-	year= 1997,
-	address={New York}
-}
-
- at article{WesHarMig85,
-	author={Mike West and P. Jeff Harrison and Helio S. Migon},
-	title={Dynamic Generalized Linear Models and Bayesian Forecasting},
-	journal={Journal of the American Statistical Association},
-	volume={80},
-	year={1985},
-	pages={73-83},
-	month={March},
-	number={389}
-}
-
- at article{Western95,
-	author={Bruce Western},
-	title={{Concepts and Suggestions for Robust Regression Analysis}},
-	journal={American Journal of Political Science},
-	volume={39},
-	year={1995},
-	pages={786--817},
-	number={3}
-}
-
- at article{Western98,
-	author={Bruce Western},
-	title={{Causal Heterogeneity in Comparative Research: a Bayesian Hierarchical Modelling
-		Approach}},
-	journal={American Journal of Political Science},
-	volume= 42,
-	year= 1998,
-	pages={1233--1259},
-	month={October},
-	number= 4
-}
-
- at article{WhiEva96,
-	author={Stephen Whitefield and Geoffrey Evans},
-	title={Support for Democracy and Poltical Opposition in Russia 1993-95},
-	journal={Post Soviet Affairs},
-	volume={12},
-	year={1996},
-	pages={218-52},
-	number={3}
-}
-
- at book{WhiRosMcA97,
-	author={Stephen White and Richard Rose and Ian McAllister},
-	title={How Russia Votes},
-	publisher={Chatham House Publishers, Inc.},
-	year={1997},
-	address={Chatham, NJ}
-}
-
- at unpublished{WhiSetCha06,
-	author={David R. Whiting and Philip W. Setel and Daniel Chandramohan and Lara J.
-		Wolfson and Yusuf Hemed and Alan D. Lopez},
-	title={Estimating Cause-Specific Mortality from Community- and Facility-Based Data
-		Sources in Tanzania: Options and Implications for Mortality Burden Estimates},
-	note={Whiting, MEASURE Evaluation, Carolina Population Center, Univ. of NC at
-		Chapel Hill, Dept of Medicine, School of Clinical Medical Sciences, Univ
-		of Newcastle upon Tyne England; david.whiting at ncl.ac.uk},
-	year={2006}
-}
-
- at article{Whitbeck05,
-	author={Caroline Whitbeck},
-	title={The Responsible Collection, Retention, Sharing, and Interpretation of Data},
-	journal={Online Ethics Center for Engineering and Science},
-	year= 2005,
-	note={{http://onlineethics.org/reseth/mod/data.html}}
-}
-
- at article{White02,
-	author={Kevin M. White},
-	title={Longevity Advances in High-IncomeCountries, 1955-96},
-	journal={Population and Development Review},
-	volume= 28,
-	year= 2002,
-	pages={59--76},
-	month={March},
-	number= 1
-}
-
- at book{White82,
-	author={Halbert L. White},
-	title={Asymptotic Theory For Econometricians},
-	publisher={Academic Press},
-	year= 1984,
-	address={New York}
-}
-
- at book{White92,
-	author={Halbert H. White},
-	title={Artificial Neural Networks, Approximation and Learning Theory},
-	publisher={Blackwell},
-	year= 1992,
-	address={Cambridge, MA}
-}
-
- at article{Whittaker23,
-	author={{Whittaker E.T.}},
-	title={On a New Method of Graduation},
-	journal={Proceedings of the Edinburgh Mathematical Society},
-	volume= 41,
-	year= 1923,
-	pages={63--75}
-}
-
- at article{WidKub96,
-	author={Widmer, G. and Kubat, M.},
-	title={{Learning in the presence of concept drift and hidden contexts}},
-	journal={Machine Learning},
-	volume={23},
-	year={1996},
-	pages={69--101},
-	number={1},
-	publisher={Springer}
-}
-
- at unpublished{WieWilBel01,
-	author={Janyce Wiebe and Theresa Wilson and Matthew Bell},
-	title={Identifying Collocations for Recognizing Opinions},
-	note={University of Pittsburgh wiebe, twilson, mbell at cs.pitt.edu},
-	year={2001},
-	month={April}
-}
-
- at article{WilBerNob01,
-	author={B.P. Will and J.M. Berthelot and K.M. Nobrega and W. Flanagan and W.K. Evans},
-	title={Canada's Population Health Model (POHEM): A Tool for Performing Economic
-		Evaluations of Cancer Control Interventions},
-	journal={European Journal of Cancer},
-	volume= 37,
-	year= 2001,
-	pages={1797--1804}
-}
-
- at article{WilGouBos02,
-	author={Brian G. Williams and Eleanor Gouws and Cynthia Boschi-Pinto and Jennifer
-		Bryce and Christopher Dye},
-	title={Estimates of world-wide distribution of child deaths from acute respiratory
-		infections},
-	journal={The Lancet Infectious Diseases},
-	volume={2},
-	year={2002},
-	pages={25-32},
-	month={January}
-}
-
- at article{WilGouBos02,
-	author={Brian G. Williams and Eleanor Gouws and Cynthia Boschi-Pinto and Jennifer
-		Bryce and Christopher Dye},
-	title={Estimates of world-wide distribution of child deaths from acute respiratory
-		infections},
-	journal={The Lancet Infectious Diseases},
-	volume={2},
-	year={2002},
-	pages={25-32},
-	month={January}
-}
-
- at article{WilHol07,
-	author={Elizabeth Ty Wilde and Robinson Hollister},
-	title={How Close is Close Enough? Evaluating Propensity Score Matching Using Data
-		from a Class-Size Reduction Experiment},
-	journal={Journal of Policy Analysis and Management},
-	volume={26},
-	year={2007},
-	number={3}
-}
-
- at techreport{Wilmoth93,
-	author={John Wilmoth},
-	title={{Computational Methods for Fitting and Extrapolating the Lee-Carter Model
-		of Mortality Change}},
-	institution={Department of Demography, University of California, Berkeley},
-	year= 1993
-}
-
- at incollection{Wilmoth96,
-	author={John R. Wilmoth},
-	title={Mortality Projections for Japan: A Comparison of Four Methods},
-	booktitle={Health and Mortality Among Elderly Populations},
-	publisher={Oxford University Press},
-	year= 1996,
-	address={Oxford},
-	editor={G. Caselli and Alan Lopez},
-	pages={266-287}
-}
-
- at article{Wilmoth98,
-	author={John Wilmoth},
-	title={The Future of Human Longevity: A Demographer's Perspective},
-	journal={Science},
-	volume= 280,
-	year= 1998,
-	pages={395--397},
-	month={April 17},
-	number= 5362
-}
-
- at article{Wilmoth98b,
-	author={John Wilmoth},
-	title={Is the Pace of Japanese Mortality Decline Converging Towards International
-		Trends?},
-	journal={Population and Development Review},
-	volume= 24,
-	year= 1998,
-	pages={593--600},
-	number= 3
-}
-
- at article{WinMar92,
-	author={Christopher Winship and Robert D. Mare},
-	title={Models for Sample Selection Bias},
-	journal={Annual Review of Sociology},
-	volume={18},
-	year={1992},
-	pages={327-50}
-}
-
- at article{WinMor99,
-	author={Christopher Winship and Stephen L. Morgan},
-	title={The Estimation of causal Effects from Observational Data},
-	journal={American Review of Sociology},
-	volume= 25,
-	year= 1999,
-	pages={659--707}
-}
-
- at article{WinRad94,
-	author={Christopher Winship and Larry Radbill},
-	title={Sampling Weights and Regression Analysis},
-	journal={Sociological Methods and Research},
-	volume={23},
-	year={1994},
-	pages={230-257},
-	month={November},
-	number={2}
-}
-
- at unpublished{WinSob00,
-	author={Christopher Winship and Michael Sobel},
-	title={Causal Inference in Sociological Studies},
-	note={Harvard University},
-	year= 2000
-}
-
- at article{WirLin94,
-	author={D.N. Wirawan and M. Linnan},
-	title={The Bali indirect maternal mortality study},
-	journal={Studies in Family Planning},
-	volume= 5,
-	year= 1994,
-	pages={304--309}
-}
-
- at inbook{WolCalJoh94,
-  author =	 {F.D. Wolinsky, C.M. Callahan, and R.J. Johnson},
-  title =	 {Subjective Health Status and Mortality in the
-                  Elderly},
-  year =	 1994,
-  publisher =	 {{New York: Springer Publishing Company}},
-  pages =	 {{13-28}},
-  journal =	 {Facts and Research in Gerontology}
-}
-
- at article{WolFir02,
-  author =	 {Rory Wolfe and David Firth},
-  title =	 {Modelling Subjective Use of an Ordinal Reponse Scale
-                  in a Many Period Crossover Experiment},
-  journal =	 {Applied Statistics},
-  volume =	 51,
-  year =	 2002,
-  pages =	 {245--255},
-  month =	 {April},
-  number =	 2
-}
-
- at article{WolJoh92,
-  author =	 {Fredric Wolinsky and Robert Johnson},
-  title =	 {Perceived Health Status and Mortality Among Older
-                  Men and Women},
-  journal =	 {Journal of Gerontology: Social Sciences},
-  volume =	 47,
-  year =	 1992,
-  pages =	 {{S304-12}}
-}
-
- at article{WolJohStu95,
-  author =	 {Fredric D. Wolinsky, Robert L. Johnson, and Timothy
-                  E. Stump},
-  title =	 {The Riske of Mortality among Older Adults over an
-                  Eight-Year Period},
-  journal =	 {The Gerontologist},
-  volume =	 35,
-  year =	 1995,
-  pages =	 {{150-61}}
-}
-
- at article{WonBenKof98,
-  author =	 {John B. Wong and William G. Bennett and Raymond
-                  S. Koff and Stephen G. Pauker},
-  title =	 {Pretreatment Evaluation of Chronic Hepatitis C:
-                  Risks Benefits, and Costs},
-  journal =	 {Journal of the American Medical Association},
-  volume =	 280,
-  year =	 1998,
-  pages =	 {2088--2093}
-}
-
- at article{WonMcqMch00,
-  author =	 {John B. Wong and Gerladine M. McQuillan and John
-                  G. McHutchison and Thierry Poynard},
-  title =	 {Estimating Future Hepatitis C Morbidity, Mortality,
-                  and Costs in the United States},
-  journal =	 {American Journal of Public Health},
-  volume =	 90,
-  year =	 2000,
-  pages =	 {1562--1569},
-  number =	 10
-}
-
- at book{WraPet96,
-  author =	 {Richard Wrangham and Dale Peterson},
-  title =	 {Demonic Males},
-  publisher =	 {Houghton Mifflin},
-  year =	 1996
-}
-
- at book{WuHam00,
-  author =	 {Chien-Fu Wu and Michael Hamada},
-  title =	 {Experiments: Planning, analyzing and Parameter
-                  Design Optimization},
-  publisher =	 {Wiley-Interscience},
-  year =	 {2000},
-  address =	 {New York}
-}
-
- at book{WuHam00,
-  author =	 {Chien-Fu Wu and Michael Hamada},
-  title =	 {Experiments: Planning, Analyzing, and Parameter
-                  Design Optimization},
-  publisher =	 {Wiley-Interscience},
-  year =	 {2000},
-  address =	 {New York}
-}
-
- at article{WuSch93,
-  author =	 {Z. Wu and R. Schaback},
-  title =	 {Local Error Estimates for Radial Basis Function
-                  Interpolation of Scattered Data},
-  journal =	 {Journal of Numerical Analysis},
-  volume =	 13,
-  year =	 1993,
-  pages =	 {13--27}
-}
-
- at article{YanRaoMa05,
-  author =	 {Gonghuan Yang and Chalapati Rao and Jiemin Ma and
-                  Lijun Wang and Xia Wan and Guillermo Dubrovsy and
-                  Alan D. Lopez},
-  title =	 {Validation of verbal autopsy procedures for adult
-                  deaths in China},
-  journal =	 {International Journal of Epidemiology},
-  year =	 {2005},
-  month =	 {September},
-  note =	 {Advance Access published 9/6/05
-                  doi:10.1093/ije/dyi181}
-}
-
- at article{YeeHas03,
-  author =	 {Yee, T. W. and Hastie, T. J.},
-  title =	 {Reduced-rank vector generalized linear models},
-  journal =	 {Statistical Modelling},
-  volume =	 3,
-  year =	 {2003},
-  pages =	 {15--41},
-  issue =	 1
-}
-
- at article{YeeWil96,
-  author =	 {T.W. Yee and C.J. Wild},
-  title =	 {Vector Generalized Additive Models},
-  journal =	 {Journal of the Royal Statistical Society. Series B
-                  (Methodological)},
-  volume =	 {58},
-  year =	 {1996},
-  pages =	 {481--493},
-  number =	 {3}
-}
-
- at article{Yoo02,
-  author =	 {Thomas W. Yoo},
-  title =	 {Presumed Disloyal: Executive Power Judicial
-                  Deference, and the Construction of Race Before and
-                  After September 11},
-  journal =	 {Columbia Human Rights Law Review},
-  volume =	 34,
-  year =	 {2002},
-  pages =	 {1--??}
-}
-
- at Article{YacYac06,
-  author =	 {Jason Webb Yackee and Susan Webb Yackee},
-  title =	 {A Bias Towards Business? Assessing Interest Group
-                  Influence on the U.S. Bureaucracy},
-  journal =	 {Journal of Politics},
-  year =	 {2006},
-  OPTkey =	 {},
-  volume =	 {68},
-  number =	 {1},
-  pages =	 {128-169},
-  OPTmonth =	 {},
-  OPTnote =	 {},
-  OPTannote =	 {}
-}
-
- at article{YosMagBos03,
-  author =	 {Hirokazu Yoshikawa and Katherine A. Magnuson and
-                  Johannes M. Bos and Jo Ann Hsueh},
-  title =	 {Effects of Earnings-Supplement Policies on Adult
-                  Economic and Middle-Childhood Outcomes Differ for
-                  the `Hardest to Employ'},
-  journal =	 {Child Development},
-  volume =	 {74},
-  year =	 {2003},
-  pages =	 {1500-1521},
-  month =	 {September/October},
-  number =	 {5}
-}
-
- at proceedings{YuHat03,
-	editor={Hong Yu and Vasileios Hatzivassiloglou},
-	title={Towards Answering Opinion Questions: Separating Facts from Opinions and
-		Identifying the Polarity of Opinion Sentences},
-	year={03},
-	organization={2003 Conference on Empirical Methods in Natural Language Processing},
-	address={Hong Yu Dept. of Computer Science columbia Univ. New York, NY 10027 hongyu at cs.columbia.edu}
-}
-
- at article{YuKeaSly98,
-	author={Elena S. H. Yu, Yin M. Kean, Donal J. Slyman, et al},
-	title={Self Perceived Health and 5-Year Mortality Risks among the Elderly in Shanghai,
-		China},
-	journal={American Journal of Epidemiology},
-	volume= 147,
-	year= 1998,
-	pages={{880-90}}
-}
-
- at article{yule12,
-	author={G.U. Yule},
-	title={On the Methods of Measuring the Association Between Two Attributes},
-	journal={Journal of the Royal Statistical Society},
-	volume= 75,
-	year= 1912,
-	pages={579--642}
-}
-
- at book{Zaba86,
-	author={Basia Zaba},
-	title={Measurement of Emigration Using Indirect Techniques. Manual for the collection
-		and analysis of Data on Residence of Relatives},
-	publisher={Ordina Editions, 10, place St. Jacques 4000 Liege Belgium},
-	year={1986},
-	note={Working Group on the Methodology for the Study of International Migration}
-}
-
- at article{ZabDav96,
-	author={Basia Zaba and Patricia H. David},
-	title={Fertility and the Distribution of Child Mortality Risk Among Women: An Illustrative
-		Analysis},
-	journal={Population Studies},
-	volume= 50,
-	year= 1996,
-	pages={263--278}
-}
-
- at book{Zaller92,
-	author={John R. Zaller},
-	title={The Nature and Origins of Mass Opinion},
-	publisher={Cambridge University Press},
-	year= 1992,
-	address={New York, NY}
-}
-
- at article{ZamRouOrh01,
-	author={Asad Zaman and Peter J. Rousseeuw and Mehmet Orhan},
-	title={{Econometric applications of high-breakdown robust regression techniques}},
-	journal={Economics Letters},
-	volume={71},
-	year={2001},
-	pages={1--8}
-}
-
- at article{Zeileis04,
-	author={Achim Zeileis},
-	title={Econometric Computing with HC and HAC Covariance Matrix Estimators},
-	journal={Journal of Statistical Software},
-	volume={11},
-	year={2004},
-	pages={1--17},
-	number={10},
-	publisher={Wiley}
-}
-
- at article{Zellner62,
-	author={A. Zellner},
-	title={An Efficient Method of Estimating Seemingly Unrelated Regressions and Tests
-		for Aggregation Bias},
-	journal= jasa,
-	volume= 57,
-	year= 1962,
-	pages={348--368},
-	month={June},
-	number= 298
-}
-
- at incollection{Zeng00,
-	author={Langche Zeng},
-	title={Neural Network Models for Political Analysis},
-	booktitle={Political Complexity: Nonlinear Models of Politics},
-	publisher={University of Michigan Press},
-	year= 2000,
-	editor={Diana Richards},
-	pages={239--268}
-}
-
- at article{Zeng99,
-	author={Langche Zeng},
-	title={Classification and Prediction with Neural Network Models},
-	journal= smr,
-	volume= 27,
-	year= 1999,
-	pages={499--524},
-	month={May},
-	number= 4
-}
-
- at article{zhang98,
-	author={Jun Zhang and F. Kai Yu},
-	title={What's the Relative Risk? A Method of Correcting the Odds Ratio in Cohort
-		Studies of Common Outcomes},
-	journal={New England Journal of Medicine},
-	volume= 280,
-	year= 1998,
-	pages={1690--1},
-	number= 19
-}
-
- at article{Zhao04,
-	author={Zhong Zhao},
-	title={Using matching to estimate treatment effects: data requirements, matching
-		metrics, and {M}onte {C}arlo evidence},
-	journal={Review of Economics and Statistics},
-	volume= 86,
-	year= 2004,
-	pages={91-107},
-	number= 1
-}
-
- at article{zocchetti97,
-	author={Carlo Zocchetti, Dario Consonni and Pier Bertazzi},
-	title={Relationship Between Prevalence Rate Ratios and Odds Ratios in Cross-sectional
-		Studies},
-	journal={International Journal of Epidemiology},
-	volume= 26,
-	year= 1997,
-	pages={220--23},
-	number= 1
-}
-
- at article{Zorn01,
-	author={Christopher Zorn},
-	title={Generalized Estimating Equation Models for Correlated Data: A Review with
-		Applications},
-	journal= ajps,
-	volume= 45,
-	year= 2001,
-	pages={470--490},
-	month={April}
-}
-
- at article{ZouTepSaa00,
-	author={Shimian Zou and Martin Tepper and Susie El Saadany},
-	title={Prediction of Hepatitis C Burden in Canada},
-	journal={Canadian Journal of Gastroenterology},
-	volume= 14,
-	year= 2000,
-	pages={575--580},
-	month={July/August},
-	number= 7
-}
-
- at article{KnaArrMen06,
-  author =	 {Felicia Marie Knaul, H{\'e}ctor Arreola-Ornelas,
-                  Oscar M{\'e}ndez-Carniado, Chloe Bryson-Cahn, Jeremy
-                  Barofsky, Rachel Maguire, Martha Miranda,},
-  title =	 {Evidence is good for your health system: policy
-                  reform to remedy catastrophic and impoverishing
-                  health spending in Mexico},
-  journal =	 {Lancet},
-  volume =	 {368},
-  year =	 {2006},
-  pages =	 {1828-41},
-  month =	 {November}
-}
-
- at article{KnaFre05,
-  author =	 {Felicia Marie Knaul and Julio Frenk},
-  title =	 {Health Insurance in Mexico: Achieving Universal
-                  Coverage Through Structural Reform},
-  journal =	 {Health Affairs},
-  volume =	 {24},
-  number =	 {6},
-  year =	 {2005},
-  pages =	 {1828-41},
-  month =	 {November}
-}
-
- at book{Turner79,
-	author={Henry Turner},
-	title={German Big Business and the Rise of Hitler},
-	publisher={Oxford University Press},
-	year= 1979,
-	address={New York}
-}
-
- at article{CamElbAlt04,
-	author={Campbell, M. and Elbourne, D. and Altman, D.},
-	title={{CONSORT statement: extension to cluster randomised trials}},
-	journal={BMJ},
-	volume={328},
-	year={2004},
-	pages={702--708},
-	number={7441}
-}
-
- at proceedings{HigGre06,
-	editor={JPT Higgins and S. Green},
-	title={Cochrane Handbook for Systematic Review of Interventions 4.2.5 [updated
-		September 2006]},
-	publisher={John Wiley and Sons},
-	year= 2006,
-	address={Chichester, UK},
-	series={The Cochrane Library},
-	number= 4
-}
-
- at article{Campbell04,
-	author={Michael J Campbell},
-	title={Editorial: Extending CONSORT to include cluster trials},
-	journal={BMJ},
-	volume= 328,
-	year= 2004,
-	pages={654-655},
-	month={March},
-	note={{http://www.bmj.com/cgi/content/full/328/7441/654\%20?q=y}}
-}
-
- at techreport{MRC02,
-	author={{Medical Research Council}},
-	title={Cluster Randomized Trials: Methodological and Ethical Considerations},
-	institution={MRC Clinical Trials Series},
-	year= 2002,
-	note={{http://www.mrc.ac.uk/Utilities/Documentrecord/index.htm?d=MRC002406}}
-}
-
- at article{Cornfield78,
-	author={Jerome Cornfield},
-	title={Randomization by Group: a Formal Analysis},
-	journal={American Journal of Epidemiology},
-	volume={108},
-	year={1978},
-	pages={100},
-	number={2}
-}
-
- at book{Wood06,
-	author={Simon N. Wood},
-	title={Generalized Additive Models: An Introduction with R},
-	publisher={CRC Press},
-	year= 2006,
-	address={London}
-}
-
- at article{Wood04,
-	author={Simon N. Wood},
-	title={Stable and efficient multiple smoothing parameter estimation for generalized
-		additive models},
-	journal= jasa,
-	volume= 99,
-	year= 2004,
-	pages={673--686}
-}
-
- at article{Wood00,
-	author={Simon N. Wood},
-	title={Modeling and Smoothing Parameter Estimation wiht Multiple Quadratic penalties},
-	journal={Journal of the Royal Statistical Society},
-	volume={62},
-	year={2000},
-	pages={413-428},
-	number={2}
-}
-
- at manual{HamHen05,
-	author={Jeff Hamann and Arne Henningsen},
-	title={systemfit: Simultaneous Equation Systems in R Package},
-	year={2005},
-	url={{http://www.systemfit.org}}
-}
-
- at article{BenYek01,
-	author={Yoav Benjamini and Daniel Yekutieli},
-	title={The Control of the False Discovery Rate in Multiple Teting under Dependency},
-	journal={The Annals of Statistics},
-	volume={29},
-	year={2001},
-	pages={1165-1188},
-	month={August},
-	number={4}
-}
-
- at article{BenHoc95,
-	author={Yoav Benjamini and Yosef Hochberg},
-	title={Controlling the False Disvoery Rate: A Practical and Powerful Approach to
-		Multiple Testing},
-	journal={Journal of the Royal Statistical Society, Series B},
-	volume={57},
-	year={1995},
-	pages={289-300},
-	number={1}
-}
-
- at techreport{WWC06,
-	author={{What Works Clearinghouse}},
-	title={Evidence Standards for Reviewing Studies},
-	institution={Institute for Educational Sciences},
-	year= 2006,
-	note={{http://www.whatworks.ed.gov/reviewprocess/standards.html}}
-}
-
- at article{RauMarSpy07,
-	author={Stephen W. Raudenbush and Andres Martinez and Jessaca Spybrook},
-	title={Strategies for Improving Precision in Group-Randomized Experiments},
-	journal={Educational Evaluation and Policy Analysis},
-	volume= 29,
-	year= 2007,
-	pages={5--29}
-}
-
- at article{HorKle07,
-	author={Nicholas J. Horton and Ken P. Kleinman},
-	title={Much Ado About Nothing: A Comparion of Missing Data Methods and Software
-		to Fit Incomplete Data Regression Models},
-	journal={The American Statistician},
-	volume= 61,
-	year= 2007,
-	pages={79--90},
-	month={February},
-	number= 1
-}
-
- at book{GwaWagYaz05,
-	title={Reaching the Poor},
-	publisher={The World Bank},
-	year= 2005,
-	editor={Davidson R. Gwatkin and Adam Wagstaff and Adbo S. Yazbeck},
-	address={Washington, D.C.}
-}
-
- at book{wdr04,
-	title={Making Services Work for Poor People: World Development Report, 2004},
-	publisher={Oxford University Press and the World Bank},
-	year= 2003,
-	editor={{World Bank}},
-	address={Washington, D.C.}
-}
-
- at article{BauLak03,
-	author={Matthew A. Baum and David A. Lake},
-	title={The Political Economy of Growth: Democracy and Human Capital},
-	journal={American Journal of Political Science},
-	volume={47},
-	year={2003},
-	pages={333-347},
-	month={April},
-	number={2}
-}
-
-
- at book{Lee02,
-        author={Taeku Lee},
-	title={Mobilizing Public Opinion: Black Insurgency and Racial Attitudes in the Civil Rights Era},
-	publisher={University of Chicago Press},
-	year={2002},
-	address={Chicago, IL}
-}
-
- at book{Herbst93,
-        author={Susan Herbst},
-	title={Numbered Voices: How Opinion Polling Has Shaped American Politics},
-	publisher={University of Chicago Press},
-	year={1993},
-	address={Chicago, IL}
-}
-
- at book{Ginsberg86,
-  title =	 {The Captive Public: How Mass Opinion Promotes State
-                  Power},
-  author =	 {Benjamin Ginsberg},
-  year =	 {1986},
-  publisher =	 {Basic Books},
-  address =	 {New York, NY}
-}
-
- at article{Blumer48,
-  title =	 {Public Opinion and Public Opinion Polling},
-  author =	 {Hubert Blumer},
-  journal =	 {American Sociological Review},
-  volume =	 {13},
-  number =	 {5},
-  pages =	 {542--549},
-  year =	 {1948}
-}
-
- at article{Converse87,
-  title =	 {{Changing Conceptions of Public Opinion in the
-                  Political Process}},
-  author =	 {Philip E. Converse},
-  journal =	 {The Public Opinion Quarterly},
-  volume =	 {51},
-  pages =	 {12--24},
-  year =	 {1987}
-}
-
- at article{LakBau01,
-  author =	 {David A. Lake and Matthew A. Baum},
-  title =	 {The Invisible Hand of Democracy: Political Control
-                  and the Provision of Public Services},
-  journal =	 {Comparative Political Studies},
-  volume =	 {34},
-  year =	 {2001},
-  pages =	 {587-621},
-  month =	 {August},
-  number =	 {6}
-}
-
- at article{IveSos06,
-  author =	 {Torben Iversen and David Soskice},
-  title =	 {Electoral Institutions and the Politics of
-                  Coalitions: Why Some Democracies Redistribute More
-                  Than Others},
-  journal =	 apsr,
-  volume =	 {100},
-  year =	 {2006},
-  pages =	 {165-181},
-  month =	 {May},
-  number =	 {2}
-}
-
- at article{Timmons05,
-  author =	 {Jeffrey F. Timmons},
-  title =	 {The Fiscal Contract: States, Taxes, and Public
-                  Services},
-  journal =	 {World Politics},
-  volume =	 {57},
-  year =	 {2005},
-  pages =	 {530-567},
-  month =	 {July},
-  number =	 {4}
-}
-
- at article{Ross06,
-	author={Michael Ross},
-	title={Is Democracy Good for the Poor?},
-	journal= ajps,
-	volume={50},
-	year={2006},
-	pages={860-874},
-	month={October},
-	number={4}
-}
-
- at unpublished{Spence07,
-	author={Matthew J. Spence},
-	title={Do Governments Spend More to Compensate for Openness},
-	note={Working paper.},
-	year= 2007
-}
-
-
- at article{Rodrik98,
-	author={Dani Rodrik},
-	title={Why Do More Open Economies Have Bigger Governments?},
-	journal={Journal of Political Economy},
-	volume={106},
-	year={1998},
-	pages={997-1032},
-	month={October},
-	number={5}
-}
-
- at article{Fearon05,
-	author={James D. Fearon},
-	title={Primary Commodity Exports and Civil War},
-	journal={Journal of Conflict Resolution},
-	volume={49},
-	year={2005},
-	pages={483-507},
-	month={August},
-	number={4}
-}
-
- at article{ColHoe04,
-	author={Paul Collier and Anke Hoeffler},
-	title={Greed and Grievance in Civil War},
-	journal={Oxford Economic Papers},
-	volume={56},
-	year={2004},
-	pages={563-595},
-	month={October},
-	number={4}
-}
-
- at article{FeaLai03,
-  author =	 {James D. Fearon and David D. Laitin},
-  title =	 {Ethnicity, Insurgency, and Civil War},
-  journal =	 apsr,
-  volume =	 {97},
-  year =	 {2003},
-  pages =	 {75-90},
-  month =	 {February},
-  number =	 {1}
-}
-
- at article{Marinov05,
-  author =	 {Nikolay Marinov},
-  title =	 {Do Economic Sanctions Destabilize Country Leaders?},
-  journal =	 ajps,
-  volume =	 {49},
-  year =	 {2005},
-  pages =	 {564-576},
-  month =	 {July},
-  number =	 {3}
-}
-
- at book{Barro97,
-  author =	 {Robert J. Barro},
-  title =	 {Determinants of Economic Growth},
-  publisher =	 {MIT Press},
-  year =	 1997,
-  address =	 {Cambridge}
-}
-
- at article{ChaLin01,
-  author =	 {Chih-Chung Chang and Chih-Jen Lin},
-  title =	 {{LIBSVM}: a library for support vector machines},
-  year =	 {2001},
-  note =	 {{http://www.csie.ntu.edu.tw/~cjlin/libsvm}}
-}
-
- at article{BosLarGie03,
-  author =	 {Thomas J. Bossert and Osvaldo Larra{\~n}aga and Ursula
-                  Giedion Jos\'e and Jesus Arbelaez and Diana M. Bowser},
-  title =	 {Descentralizaci{\'o}n y distribuci{\'o}n equitativa
-                  de los recursos: evidencia obtenida en Colombia y
-                  Chile},
-  journal =	 {Bulletin of World Health Organization},
-  volume =	 81,
-  year =	 2003,
-  pages =	 {95-100},
-  number =	 2
-}
-
- at article{Bowyer04,
-  author =	 {Tim Bowyer},
-  title =	 {Popular participation and the State: democratizing
-                  the health sector in rural Peru},
-  journal =	 {International Journal of Health Planning and
-                  Management},
-  volume =	 19,
-  year =	 2003,
-  pages =	 {131-161},
-}
- at article{CohPet97,
-  author =	 {John M. Cohen and Stephen B. Peterson},
-  title =	 {Administrative Decentalization: A New Framework for
-                  Improved Governance, Accountability, and
-                  Performance},
-  journal =	 {CID Development Discussion Paper 582},
-  year =	 1997,
-  pages =	 {1-37},
-  month =	 {July},
-}
- at book{DjuMac75,
-  title =	 {Alternative Approaches to Meeting Basic Health Needs
-                  in Developing Countries},
-  publisher =	 {World Health Organization},
-  year =	 1975,
-  editor =	 {V. Djukanovic and E.P. Mach},
-  address =	 {Geneva},
-}
-
- at book{Frenk95,
-  title =	 {Health and the Economy: Proposals for Progress in
-                  the Mexican Health System},
-  publisher =	 {D.F. Funsalud},
-  year =	 1995,
-  editor =	 {V. Djukanovic and E.P. Mach},
-  address =	 {Mexico},
-}
-
- at article{GaiKul02,
-  author =	 {Raghav Gaiha and Vani Kulkami},
-  title =	 {Panchayats, Communities, and the Rural Poor in
-                  India},
-  journal =	 {Journal of Asian and African Studies},
-  volume =	 37,
-  year =	 2002,
-  pages =	 {131-161},
-}
- at article{GonLeyAta89,
-  author =	 {Miguel Gonz\'alez-Block and Ren\'e Leyva and Oscar
-                  Zap Ata and Ricardo Loewe and Javier Alag\'on},
-  title =	 {Health Services Decentralisation in Mexico:
-                  Formulation, Implementation and Results of Policy},
-  journal =	 {Health Policy and Planning},
-  year =	 1989,
-  pages =	 {301-315},
-  volume =	 4,
-  month =	 {July}
-}
-
- at book{KauNel04,
-  title =	 {Crucial Needs, Weak Incentives: social sector
-                  reform, democratization and globalization in Latin
-                  American},
-  publisher =	 {Wilson Center Press},
-  year =	 2004,
-  editor =	 {Robert R. Kaufman and Joan M. Nelson},
-  address =	 {Washington}
-}
-
- at article{Lloyd-Sherlock00,
-	author={Peter Lloyd-Sherlock},
-	title={Failing the needy: public social spending in Latin America},
-        journal={Journal of International Development},
-	year= 2000,
-	pages={101-119},
-        volume= 12,
-        month={July},
-	
-}
- at article{LonFre97,
-	author={Juan Luis Londo\~no and Julio Frenk},
-	title={Structured Pluralism: towards an innovative model for health system reform in Latin American},
-        journal={Health Policy},
-	year=1997,
-	pages={1-36},
-        volume= 41,
-        month={July},
-	
-}
-
- at inbook{Lustig94,
-	author={Nora Lustig},
-	title={Solidarity as a strategy of poverty alleviation},
-	year={1994},
-	publisher={Center for U.S.-Mexican Studies},
-	address={University of California, San Diego},
-        editor={Wayne Cornelius and Ann Craig and Jonathan Fox},
-}
-
- at article{Prudhomme95,
-	author={R\'emy Prud'homme},
-	title={The Dangers of Decentralization},
-        journal={The World Bank Research Observer},
-	year=1995,
-        volume= 10,
-        month={August},
-        pages={201-220},
-        number= 2,
-}
-
- at article{RawSheVan04,
-  author =	 {Laura Rawlings and Lynne Sherburne-Benz and Julie
-                  Van Domelen},
-  title =	 {Evaluating Social Funds: A Cross Country analysis of
-                  Community Investments},
-  journal =	 {World Bank Regional and Sectoral Studies},
-  year =	 2004,
-}
-
- at article{Shah97,
-  author =	 {Anwar Shah},
-  title =	 {Fostering Responsive and Accountable Governance:
-                  Lessons from Decentralization Experience},
-  journapublisherl ={World Bank},
-  year =	 1997,
-  address =	 {Washington, DC},
-  annote =
-                  {{http://www1.worldbank.org/wbiep/decentralization/library3/shah.pdf}},
-}
-
- at article{Smoke01,
-  author =	 {Paul Smoke},
-  title =	 {Fiscal Decentralization in Developing Countries: A
-                  Review of Current Concepts and Practice},
-  journal =	 {Governance and Human Rights Programme Paper No. 2},
-  year =	 2001,
-  month =	 {February},
-  journapublisherl ={UNRISD},
-  address =	 {Geneva},
-}
-
- at book{Snyder01,
-       author={Richard Snyder},
-       title={Politics after neoliberalism},
-       publisher={Cambridge University Press},
-       address={Cambridge},
-       year=2001,
-}
-
- at book{Weyland04,
-  title =	 {Learning From Foreign Models in Latin American
-                  Policy Reform},
-  editor =	 {Kurt Weyland},
-  publisher =	 {Woodrow Wilson Center Press},
-  year =	 2004,
-  address =	 {Washington DC},
-}
-
- at book{Weyland96,
-  author =	 {Kurt Weyland},
-  title =	 {Democracy Without Equity: failures of reform in
-                  Brazil},
-  address =	 {Pittsburgh},
-  publisher =	 {University of Pittsburgh Press},
-  year =	 1996,
-}
- at unpublished{Wallach06,
-	author={Hanna M. Wallach},
-	title={Topic Modeling: Beyond Bag-of-Words},
-	note={{http://www.icml2006.org/icml_documents/camera-ready/123_Topic_Modeling_Beyon.pdf}},
-	year= 2006,
-}
- at unpublished{WanMccWei07,
-  author =	 {Xuerui Wang and Andrew MacCallum and Xing Wei},
-  title =	 {Topical N-grams: Phrase and Topic Discovery, with an
-                  Application to Information Retreival},
-  note =
-                  {{http://www.cs.umass.edu/%7Exuerui/papers/ngram_tr.pdf}},
-  year =	 2007,
-}
- at unpublished{GriSteBle04,
-  author =	 {Thomas L. Griffiths and mark Steyvers and David
-                  M. Blei and Joshua B. Tenenbaum},
-  title =	 {Integrating Topics and Syntax},
-  note =
-                  {{http://books.nips.cc/papers/files/nips17/NIPS2004_0642.pdf}},
-  year =	 2004,
-}
- at unpublished{ScoMat99,
-  author =	 {Sam Scott and Stan Matwin},
-  title =	 {Feature Engineering for Text Classification},
-  note =
-                  {{http://www.ldv.uni-trier.de/ldvpage/naumann/textklassifikation/Textklassifikation/scott99feature.pdf}},
-  year =	 1999,
-}
- at unpublished{Sabastiani02,
-  author =	 {Fabrizio Sebastiani},
-  title =	 {Machine Learning in Automated Text Categorisation},
-  note =
-                  {{http://www.math.tau.ac.il/%7Eshimsh/Text_Domain/ACMCS00.pdf}},
-  year =	 2002,
-}
- at unpublished{BekAll03,
-  author =	 {Ron Bekkerman and James Allan},
-  title =	 {Using Bigrams in Text Categorization},
-  note =	 {{http://ciir.cs.umass.edu/pubfiles/ir-408.pdf}},
-  year =	 2003,
-}
- at unpublished{MosBas04,
-  author =	 {Alessandro Moschitti and Roberto Basili},
-  title =	 {Complex Linguistic Features for Text
-                  Classification:a comprehensive study},
-  note =
-                  {{http://dit.unitn.it/\~moschitt/articles/ECIR2004.pdf}},
-  year =	 2004,
-}
-
- at article{Manor95,
-  title =	 {{Democratic Decentralization in Africa and Asia}},
-  author =	 {Manor, J.},
-  journal =	 {IDS Bulletin},
-  volume =	 {26},
-  number =	 {2},
-  pages =	 {81--88},
-  year =	 {1995}
-}
-
- at article{BarMoo90,
-  title =	 {{Capture and Governance at Local and National
-                  Levels}},
-  author =	 {Bardhan, P. and Mookherjee, D.},
-  journal =	 {The American Economic Review},
-  volume =	 {90},
-  number =	 {2},
-  pages =	 {135--139},
-  year =	 {2000}
-}
-
- at book{SavLevBir06,
-  author =	 {William D. Savedoff and Ruth Levine and Nancy
-                  Birdsall},
-  title =	 {When Will We Ever Learn? Improving Lives Through
-                  Impact Evaluation},
-  publisher =	 {Center for Global Development},
-  year =	 2006,
-  note =
-                  {{http://www.cgdev.org/section/initiatives/\_active/evalgap}}
-}
-
- at article{GonGutSte06,
-  title =	 {Priority setting for health interventions in
-                  Mexico's System of Social Protection in Health},
-  author =	 {Gonz{\'a}lez-Pier, E. and Guti{\'e}rrez-Delgado,
-                  C. and Stevens, G. and Barraza-Llor{\'e}ns, M. and
-                  Porras-Condey, R. and Carvalho, N. and Loncich,
-                  K. and Dias, R.H. and Kulkarni, S. and Casey, A. and
-                  others},
-  journal =	 {The Lancet},
-  volume =	 {368},
-  number =	 {9547},
-  pages =	 {1608--1618},
-  year =	 {2006},
-  publisher =	 {Elsevier}
-}
-
- at article{DonKla94,
-  author =	 {Allan Donner and Neil Klar},
-  title =	 {Cluster Randomization Trials in Epidemiology: Theory
-                  and Application},
-  journal =	 {Journal of Statistical Planning and Inference},
-  volume =	 {42},
-  year =	 {1994},
-  pages =	 {37-56}
-}
-
- at article{BleLaf07,
-  author =	 {David M. Blei and John D. Lafferty},
-  title =	 {A Correlated Topic Model of Science},
-  journal =	 {The Annals of Applied Statistics},
-  volume =	 {1},
-  year =	 {2007},
-  pages =	 {17-35},
-  number =	 {1}
-}
-
- at article{BleNgJor03,
-  author =	 {David M. Blei and Andrew Y. Ng and Michael
-                  I. Jordan},
-  title =	 {Latent Dirichlet Allocation},
-  journal =	 {Journal of Machine Learning Research},
-  volume =	 {3},
-  year =	 {2003},
-  pages =	 {993-1022}
-}
-
- at inproceedings{GolZhu06,
-  author =	 {Andrew B. Goldberg and Xiaojin Zhu},
-  title =	 {Seeing Stars When there aren't Many Stars: Graph
-                  Based Semi-Supervised Learning for Sentiment
-                  Categorization},
-  booktitle =	 {HLT-NAACL 2006 Workshop on Textgraphs: Graph-based
-                  Algorithms for Natural Language Processing},
-  year =	 {2006},
-  address =	 {New York, NY},
-  url =		 {{http://www.cs.wisc.edu/\~jerryzhu/pub/sslsa.pdf}}
-}
-
- at inproceedings{Turney02,
-  author =	 {Peter Turney},
-  title =	 {Thumbs Up or thumbs Down? Semantic Orientation
-                  Applied to Unsupervised Classification of Reviews},
-  booktitle =	 {Proceedings of ACL-02, 40th Annual Meeting of the
-                  Assocation for Computational Linguistics},
-  year =	 {2002},
-  pages =	 {417-424},
-  address =	 {Philadelphia, US},
-  url =		 {{http://www.aclweb.org/anthology/P02-1053.pdf}}
-}
-
- at inproceedings{YuHat03,
-  author =	 {Hong Yu and Vasileios Hatzivassiloglou},
-  title =	 {Towards Answering Opinion Questions: Separating
-                  Facts from Opinions and Identifying the Polarity of
-                  Opinion Sentences},
-  booktitle =	 {Proceedings of EMNLP-03, 8th Conference on Empirical
-                  Methods in Natural Language Processing},
-  year =	 {2003},
-  editor =	 {Michael Collins and Mark Steedman},
-  pages =	 {129-136},
-  address =	 {Sapporo, JP},
-  url =		 {{http://www.aclweb.org/anthology/W03-1017.pdf}}
-}
-
- at inproceedings{PopEtz05,
-  author =	 {Ana-Maria Popescu and Oren Etzioni},
-  title =	 {Extracting Product Features and Opinions from
-                  Reviews},
-  booktitle =	 {Proceedings of HLT-EMNLP-05, the Human Language
-                  Technology Conference / Conference on Empirical
-                  Methods in Natural Language Processing},
-  year =	 {2005},
-  pages =	 {339-346},
-  address =	 {Vancouver, CA},
-  url =		 {{http://www.acl.ldc.upenn.edu/H/H05/H05-1043.pdf}}
-}
-
- at inproceedings{BalPea94,
-  author =	 {Alexander Balke and Judea Pearl},
-  title =	 {Counterfactual Probabilities: Computational Methods,
-                  Bounds and Applications},
-  booktitle =	 {Proceedings of the Conference on Uncertainty in
-                  Artificial Intelligence (UAI-94)},
-  year =	 {1994},
-  month =	 {July},
-  address =	 {Seattle, WA},
-}
-
-
- at manual{Bates07,
-  author =	 {Douglas Bates},
-  title =	 {lme4: Fit linear and generalized linear
-                  mixed-effects models},
-  year =	 {2007},
-}
-
-
-
- at book{PinBat00,
-  author =	 {Jose C. Pinheiro and Douglas M. Bates},
-  title =	 {Mixed-Effects Models in S and S-PLUS},
-  publisher =	 {Springer},
-  year =	 {2000},
-  address =	 {New York}
-}
-
- at BOOK{BoxJon04,
-  AUTHOR =	 {Janet M. Box-Steffensmeier and Bradford S. Jones},
-  TITLE =	 {Event History Modeling: A Guide for Social
-                  Scientists},
-  PUBLISHER =	 {Cambridge University Press},
-  YEAR =	 {2004},
-}
-
- at BOOK{Huber81,
-  AUTHOR =	 {Peter J. Huber},
-  TITLE =	 {Robust Statistics},
-  PUBLISHER =	 {Wiley},
-  YEAR =	 {1981},
-}
-
- at ARTICLE{White80,
-  AUTHOR =       {Halbert White},
-  TITLE =        {A Heteroscedastic-Consistent Covariance Matrix Estimator and a Direct Test for Heteroscedasticity},
-  JOURNAL =      {Econometrica},
-  YEAR =         {1980},
-  volume =       {48},
-  number =       {4},
-  pages =        {817--838},
-}
-
- at BOOK{TheGra00,
-  AUTHOR =	 {Terry M. Therneau and Patricia M. Grambsch},
-  TITLE =	 {Modeling Survival Data: Extending the Cox Model},
-  PUBLISHER =	 {Springer},
-  YEAR =	 {2000},
-}
-
- at book{Schoenhoven72,
-    author={Klaus Sch\"onhoven},
-    title={Die Bayerische Volkspartei 1924-1932},
-    publisher={Droste},
-    year= 1972,
-    address={D\"usseldorf}
-}
-
- at article{Geiger30,
-    author={Theodor Geiger},
-    title={Panik im Mittelstand},
-    journal={Die Arbeit},
-    year={1930},
-    pages={637-654},
-    number={10}
-}
-
- at article{Borchardt79,
-  author =	 {Knut Borchardt},
-  title =	 {Zwangslagen und Handlungsspielr\"aume in der
-                  gro\ss{}en Wirtschaftskrise der fr\"uhen
-                  drei\ss{}iger Jahre: Zur Revision des
-                  \"uberlieferten Geschichtsbildes},
-  journal =	 {Jahrbuch der Bayerischen Akademie der
-                  Wissenschaften},
-  year =	 {1979},
-  pages =	 {87-132}
-}
-
- at book{Kruedener90,
-  author =	 {J{\"u}rgen von Kruedener},
-  title =	 {Economic Crisis and Political Collapse: The Weimar
-                  Republic},
-  publisher =	 {Berg},
-  year =	 1990,
-  address =	 {Oxford}
-}
-
- at book{Barkai77,
-  author =	 {Avraham Barkai},
-  title =	 {Das Wirtschaftssystem des Nationalsozialismus},
-  publisher =	 {Berend von Nottbeck},
-  year =	 1977,
-  address =	 {K\"oln}
-}
-
- at book{Lipset60,
-  author =	 {Seymour Lipset},
-  title =	 {Political Man: The Social Bases of Politics},
-  publisher =	 {Johns Hopkins University Press},
-  address =	 {Baltimore},
-  year =	 1960
-}
-
- at article{Temin91,
-  author =	 {Temin,Peter},
-  title =	 {Soviet and Nazi Planning in the 1930s},
-  journal =	 {Economic History Review},
-  year =	 {1991},
-  volume =	 {44},
-  pages =	 {573-593}
-}
-
- at article{Palyi41,
-    author={Palyi, Melchior},
-    title={Economic Foundations of the German Totalitarian State},
-    journal={American Journal of Sociology},
-    volume= 46,
-    year= 1941,
-    pages={469-486},
-    number= 4
-}
-
- at article{Abelshauser99,
-    author={Abelshauser, Werner},
-    title={Kriegswirtschaft und Wirtschaftswunder},
-    journal={Vierteljahrshefte fuer Zeitgeschichte},
-    year={1999},
-    pages={503-38}
-}
-
- at book{Kretschmar33,
-    author={Hans Kretschmar},
-    title={Deutsche Agrarprogramme der Nachkriegszeit},
-    publisher={Junker und D\"unnhaupt},
-    year= 1933,
-    address={Berlin}
-}
-
- at book{Ruppert92,
-    author={Karsten Ruppert},
-    title={Im Dienst am Staat von Weimar: Das Zentrum als regierende Partei in der Weimarer Demokratie 1923-1930},
-    publisher={Droste},
-    year= 1992,
-    address={D\"usseldorf}
-}
-
- at article{BucSch06,
-  title =	 {{The Role of Private Property in the Nazi Economy:
-                  The Case of Industry}},
-  author =	 {Christoph Buchheim and Jonas Scherner},
-  journal =	 {The Journal of Economic History},
-  volume =	 {66},
-  number =	 {02},
-  pages =	 {390--416},
-  year =	 {2006},
-  publisher =	 {Cambridge University Press}
-}
-
- at article{Hemmer35,
-  title =	 {{Die unsichtbaren Arbeitslosen}},
-  author =	 {Hemmer, W.},
-  journal =	 {Statistische Methoden-Soziale Tatsachen. Zeulenroda:
-                  Bernhard Sporn, Buchdruckerei und Verlagsanstalt},
-  year =	 {1935}
-}
-
- at book{Plum72,
-  author =	 {Plum, G\"unter},
-  title =	 {Gesellschaftsstruktur und politisches Bewusstsein in
-                  einer katholischen Region 1928-1933: Untersuchung am
-                  Beispiel des Regierungsbezirks Aachen},
-  publisher =	 {Deutsche Verlags-Anstalt},
-  year =	 1972,
-  address =	 {Stuttgart}
-}
-
- at Article{HerTre07,
-  author =	 {Claudia Herrera and Jesus Trevi{\~n}o},
-  title =	 {Aplaza Calderón hasta 2030 la meta sobre un sistema
-                  universal de salud},
-  journal =	 {La Jornada},
-  year =	 2007,
-  month =	 {October 6}
-}
-
- at Article{Frenk05,
-  author =	 {Julio Frenk},
-  title =	 {Sistema de Pr\'{o}teccion Social en Salud, Elementos
-                  conceptuales, financieros, y operativos},
-  journal =	 {Secretaria de Salud},
-  year =	 2005,
-  note =	 {Mexico City}
-}
-
- at InCollection{Ritschl92,
-  author =	 {Albrecht Ritschl},
-  title =	 {Die Wirtschaftspolitik des Dritten Reichs: Ein
-                  {\"U}berblick},
-  booktitle =	 {Deutschland 1933-1945. Neue Studien zur
-                  nationalsozialistischen Herrschaft},
-  year =	 1992,
-  editor =	 {Karl-Dietrich Bracher and M. Funke and
-                  H.-A. Jacobsen},
-  address =	 {D\"{u}sseldorf},
-  publisher =	 {Droste}
-}
-
- at Book{James86,
-  author =	 {Harold James},
-  title =	 {The German Slump: Politics and Economics, 1924-1936},
-  publisher =	 {Clarendon Press},
-  year =	 1986,
-  address =	 {Oxford}
-}
- at inbook{Brady04b,
-	author={Henry E. Brady},
-	title={Rethinking Social Inquiry: Diverse Tools, Shared Standards},
-	chapter={Doing Good and Doing Better: How Far Does the Quantitative Template Get Us?},
-	year={2004},
-	publisher={Lanham, MD: Rowman and Littlefield},
-	editor={H.E. Brady and D. Collier}
-}
- at inbook{Munck04,
-	author={Gerardo L. Munck},
-	title={Rethinking Social Inquiry: Diverse Tools, Shared Standards},
-	chapter={Tools for Qualitative Research},
-	year={2004},
-	publisher={Lanham, MD: Rowman and Littlefield},
-	editor={H.E. Brady and D. Collier}
-}
- at inbook{Rogowski04,
-	author={Ronald Rogowski},
-	title={Rethinking Social Inquiry: Diverse Tools, Shared Standards},
-	chapter={How Inference in the Social (but Not the Physical) Sciences Neglects Theoretical Anomaly},
-	year={2004},
-	publisher={Lanham, MD: Rowman and Littlefield},
-	editor={H.E. Brady and D. Collier}
-}
- at inbook{Bartels04,
-	author={Larry M. Bartels},
-	title={Rethinking Social Inquiry: Diverse Tools, Shared Standards},
-	chapter={Some Unfulfilled Promises of Quantitative Imperialism},
-	year={2004},
-	publisher={Lanham, MD: Rowman and Littlefield},
-	editor={H.E. Brady and D. Collier}
-}
- at inbook{BraColSea04,
-	author={Henry E. Brady and David Collier and Jason Seawright},
-	title={Rethinking Social Inquiry: Diverse Tools, Shared Standards},
-	chapter={Refocusing the Discussion of Methodology},
-	year={2004},
-	publisher={Lanham, MD: Rowman and Littlefield},
-	editor={H.E. Brady and D. Collier}
-}
- at inbook{ColBraSea04,
-	author={David Collier and Henry E. Brady and Jason Seawright},
-	title={Rethinking Social Inquiry: Diverse Tools, Shared Standards},
-	chapter={Critiques, Responses, and Trade-Offs: Drawing Together the Debate},
-	year={2004},
-	publisher={Lanham, MD: Rowman and Littlefield},
-	editor={H.E. Brady and D. Collier}
-}
- at inbook{ColBraSea04b,
-	author={David Collier and Henry E. Brady and Jason Seawright},
-	title={Rethinking Social Inquiry: Diverse Tools, Shared Standards},
-	chapter={Sources of Leverage in Causal Inference: Toward an Alternative View of Methodology},
-	year={2004},
-	publisher={Lanham, MD: Rowman and Littlefield},
-	editor={H.E. Brady and D. Collier}
-}
- at inbook{ColMahSea04,
-	author={David Collier and James Mahoney and Jason Seawright},
-	title={Rethinking Social Inquiry: Diverse Tools, Shared Standards},
-	chapter={Claiming Too Much: Warnings about Selection Bias},
-	year={2004},
-	publisher={Lanham, MD: Rowman and Littlefield},
-	editor={H.E. Brady and D. Collier}
-}
- at article{Brady04,
-  title =	 {Symposium: Two Paths to a Science of Politics},
-  author =	 {Henry E. Brady},
-  journal =	 {Perspectives on Politics},
-  volume =	 {2},
-  pages =	 {295--300},
-  year =	 {2004}
-}
- at article{Fischer98,
-  title =	 {Beyond Empiricism: Policy Inquiry in Postpositivist Perspective},
-  author =	 {Frank Fischer},
-  journal =	 {Policy Studies Journal},
-  volume =	 {26},
-  number =       {1},
-  pages =	 {129--146},
-  year =	 {1998}
-}
- at article{ClaGilGol06,
-  title =	 {A Simple Multivariate Test for Asymmetric Hypotheses},
-  author =	 {William Roberts Clark and Mihcael J. Gilligan and Matt Golder},
-  journal =	 {Political Analysis},
-  volume =	 {14},
-  pages =	 {311--331},
-  year =	 {2006}
-}
- at article{Denrell03,
-  title =	 {Vicarious Learning, Undersampling of Failure and the Myths of Management},
-  author =	 {Jerker Denrell},
-  journal =	 {Organization Science},
-  volume =	 {14},
-  number =       {3},
-  pages =	 {227--243},
-  year =	 {2003}
-}
- at article{Wong02,
-  title =	 {Did how we learn affect what we learn? Methodological bias, multimethod research and the case of econmic development},
-  author =	 {Wilson Wong},
-  journal =	 {The Social Science Journal},
-  volume =	 {39},
-  pages =	 {247--264},
-  year =	 {2002}
-}
- at article{Tilly01,
-  title =	 {Mechanisms in Political Processes},
-  author =	 {Charles Tilly},
-  journal =	 {Annual Review of Political Science},
-  volume =	 {4},
-  pages =	 {21--41},
-  year =	 {2001}
-}
- at article{CarPan05,
-  title =	 {TQCA: A Technique for Adding Temporality to
-                  Qualitative Comparative analysis},
-  author =	 {Neal Caren and Aaron Panofsky},
-  journal =	 {Sociological Methods Reseaarch},
-  volume =	 {34},
-  pages =	 {147},
-  year =	 {2005}
-}
- at article{Ebbinghaus05,
-  title =	 {When Less is More: Selection Problems in a Large-N
-                  and Small-N Cross-National Comparisons},
-  author =	 {Bernhard Ebbinghaus},
-  journal =	 {International Sociology},
-  volume =	 {20},
-  number =	 {2},
-  Month =	 {June},
-  pages =	 {133--152},
-  year =	 {2005}
-}
- at article{Tarrow95,
-  title =	 {Bridging the Quantitative-Qualitative Divide in
-                  Political Science},
-  author =	 {Sidney Tarrow},
-  journal =	 {American Political Science Review},
-  volume =	 {89},
-  number =	 {2},
-  Month =	 {June},
-  pages =	 {471--474},
-  year =	 {1995}
-}
- at article{Tickner05,
-  title =	 {What Is Your Research Program? Some Feminist Answers
-                  to International Relations Methodological Questions},
-  author =	 {J. Ann Tickner},
-  journal =	 {International Studies Quarterly},
-  volume =	 {49},
-  pages =	 {1--21},
-  year =	 {2005}
-}
- at article{Ragin97,
-  title =	 {Turning the Tables: How Case-Oriented Research
-                  Challenges Variable-Oriented Research},
-  author =	 {Charles C. Ragin},
-  journal =	 {Comparative Social Research},
-  volume =	 {16},
-  pages =	 {27--42},
-  year =	 {1997}
-}
- at article{Abelson01,
-  author =	 {Julia Abelson},
-  title =	 {Understanding the Role of Contextual Influences on
-                  Local Healh-Care Decision Making: Case Study Results
-                  from Ontario, Canada},
-  journal =	 {Social Science & Medicine},
-  volume =	 {53},
-  year =	 {2001},
-  pages =	 {777--793}
-}
- at article{Achen05,
-  author =	 {Christopher H. Achen},
-  title =	 {Two Cheers for Charles Ragin},
-  journal =	 {Studies in Comparative International Development},
-  volume =	 {40},
-  year =	 {2005},
-  pages =	 {27--32},
-  month =	 {Spring},
-  number =	 {1}
-}
- at article{Adler97,
-  author =	 {Emanuel Adler},
-  title =	 {Seizing the Middle Ground: Constructivism in World
-                  Politics},
-  journal =	 {European Journal of International Relations},
-  volume =	 {3},
-  year =	 {1997},
-  pages =	 {319-363},
-  number =	 {3}
-}
- at article{Agrawal01,
-  author =	 {Arun Agrawal},
-  title =	 {Common Property Institutions and Sustainable
-                  Governance of Resources},
-  journal =	 {World Development},
-  volume =	 {29},
-  year =	 {2001},
-  pages =	 {1649--1672},
-  number =	 {10}
-}
- at article{Agrawal03,
-  author =	 {Arun Agrawal},
-  title =	 {Sustainable Governance of Common-Pool Resources:
-                  Context, Methods, and Politics},
-  journal =	 {Annual Review of Anthropology},
-  volume =	 {32},
-  year =	 {2003},
-  pages =	 {243--262}
-}
- at article{AmeHalYou99,
-  author =	 {Edwin Amenta and Drew Halfmann and Michael P. Young},
-  title =	 {The Strategies and Contexts of Social
-                  Protest:Political Mediation and the Impact of the
-                  Townsend Movement in California},
-  journal =	 {Mobilization: An International Journal},
-  volume =	 {4},
-  year =	 {1999},
-  pages =	 {1--23},
-  number =	 {1}
-}
- at unpublished{Andersen03,
-  author =	 {Svein S. Andersen},
-  title =	 {On a Cleary Day You Can See the EU. Case Study
-                  Methodology in EU Research},
-  note =
-                  {{http://www.arena.uio.no/publications/working-papers2003/papers/03_16.xml}},
-  year =	 2003,
-}
- at unpublished{AndWet01,
-  author =	 {Steinar Andresen and Joergen Wettestad},
-  title =	 {Case studies of the effectiveness of international
-                  environmental regimes: Balancing textbook ideals and
-                  feasibility concerns},
-  note =	 {{http://www.fni.no/doc&.pdf/rapp1901.pdf}},
-  year =	 {2001}
-}
- at article{AspSch00,
-  author =	 {Mark D. Aspinwall and Gerald Schneider},
-  title =	 {Same Menu, Seperate Tables: The Institutionalist
-                  Turn in Political Science and the Study of European
-                  Integration},
-  journal =	 {European Journal of Political Research},
-  volume =	 {38},
-  year =	 {2000},
-  pages =	 {1--36}
-}
- at article{Bartle00,
-  author =	 {John Bartle},
-  title =	 {Political Awareness, Opinion Constraining and the
-                  Stablility of Ideological Positions},
-  journal =	 {Political Studies},
-  volume =	 {48},
-  year =	 {2000},
-  pages =	 {467--484}
-}
- at article{Bartle03,
-  author =	 {John Bartle},
-  title =	 {Partisanship, Performance and Personality: Competing
-                  and Complementary Characterizations of the 2001
-                  British General Election},
-  journal =	 {Party Politics},
-  volume =	 {9},
-  year =	 {2003},
-  pages =	 {317--345},
-  number =	 {3}
-}
- at article{Beck06,
-  author =	 {Nathaniel Beck},
-  title =	 {Causal Process ``Observation'': Oxymoron or Old
-                  Wine},
-}
- at article{Bellin00,
-  author =	 {Eva Bellin},
-  title =	 {Contingent Democrats: Industrialists, Labor, and
-                  Democratization in Late-Developing Countries},
-  journal =	 {World Politics},
-  volume =	 {52},
-  year =	 {2000},
-  pages =	 {175-205},
-  month =	 {January}
-}
- at article{BelMacTha01,
-  author =	 {Duncan S.A. Bell and Paul K. MacDonald and Bradley
-                  A. Thayer},
-  title =	 {Start the Evolution Without Us},
-  journal =	 {International Security},
-  volume =	 {26},
-  year =	 {2001},
-  pages =	 {187--198},
-  month =	 {Summer},
-  number =	 {1}
-}
- at article{BerLebSte00,
-  author =	 {Steven Bernstein and Richard Ned Lebow and Janice
-                  Gross Stein and Steven Weber},
-  title =	 {God Gave Physics the Easy Problems: Adapting Social
-                  Science to an Unpredictable World},
-  journal =	 {European Journal of International Relations},
-  volume =	 {6},
-  year =	 {2000},
-  pages =	 {43--76},
-  number =	 {1}
-}
- at article{Berman97,
-  author =	 {Sheri Berman},
-  title =	 {Civil Society and the Collapse of the Weimar
-                  Republic},
-  journal =	 {World Politics},
-  volume =	 {49},
-  year =	 {1997},
-  pages =	 {401--429},
-  number =	 {3}
-}
- at article{BlaDob98,
-  author =	 {Andr\'{e} Blais and Agnieszka Dobrznska},
-  title =	 {Turnout in electoral democracies},
-  journal =	 {European Journal of Political Research},
-  volume =	 {33},
-  year =	 {1998},
-  pages =	 {239--261},
-  number =	 {}
-}
- at article{BogCatKel00,
-  author =	 {Laura M. Bogart and Sheryl L. Catz and Jeffrey
-                  A. Kelly and Michelle L. Gray-Bernhardt, and Barbara
-                  R. Hartmann and Laura L. Otto-Salaj and Kristin
-                  L. Hackl and Frederick R. Bloom},
-  title =	 {Psychosical Issues in the Era of New Aids Treatments
-                  from the Perspective of Persons Living with HIV},
-  journal =	 {2000},
-  volume =	 {5},
-  year =	 {2000},
-  pages =	 {500--516},
-  number =	 {4}
-}
-
- at article{Bostrom03,
-  author =	 {Magnus Bostro{\''m}},
-  title =	 {How State-Dependent is a Non-State-Driven
-                  Rule-Making Project? The Case of Forest
-                  Certification in Sweden},
-  journal =	 {Journal of Envrionmental Policy & Planning},
-  volume =	 {5},
-  year =	 {2003},
-  pages =	 {165--180},
-  number =	 {2}
-}
-
- at article{Brady07,
-  author =	 {Henry E. Brady},
-  title =	 {Using a Simple Model of Decision-Making to Select
-                  and Understand Cases}
-}
- at article{BraLanHal00,
-  author =	 {Paul Brace and Laura Langer and Melinda Gann Hall},
-  title =	 {Measuring the Preferences of State Supreme Court
-                  Judges},
-  journal =	 {The Journal of Politics},
-  volume =	 {62},
-  year =	 {2000},
-  pages =	 {387--413},
-  month =	 {May},
-  number =	 {2}
-}
- at article{BraOhr99,
-  author =	 {AnnP. Branch and Jakob C. Ohrgaard},
-  title =	 {Trapped in the Supranational-Intergovernmental
-                  Dichotomy: A Respone to Stone Sweet and Sandholtz},
-  journal =	 {Journal of European Public Policy},
-  volume =	 {6},
-  year =	 {1999},
-  pages =	 {123--143},
-  number =	 {1}
-}
- at unpublished{Braumoeller99,
-  author =	 {Bear F. Braumoeller},
-  title =	 {Statistical Estimation in the Presence of Multiple
-                  Causal Paths},
-  note =	 {paper prepared of rhte annual meeting of the Midwest
-                  Political Science Association, Chicago, IL April
-                  15-17, 1999.},
-  year =	 {1999}
-}
- at article{Braumoeller03,
-  author =	 {Bear F. Braumoeller},
-  title =	 {Causal Complexity and the Study of Politics},
-  journal =	 {Political Analysis},
-  volume =	 {11},
-  year =	 {2003},
-  pages =	 {209-233}
-}
- at article{Brecher99,
-  author =	 {Michael Brecher},
-  title =	 {International Studies in the Twentieth Century and
-                  Beyond: Flawed Dichotomies, Symthesis, Cumulation:
-                  ISA Presidential Address},
-  journal =	 {International Studies Quarterly},
-  volume =	 {43},
-  year =	 {1999},
-  pages =	 {213-264},
-  month =	 {June},
-  number =	 {2}
-}
- at article{BreKerPet03,
-  author =	 {Mark D. Brewer and Rogan Kersh and R. Eric Petersen},
-  title =	 {Assessing Conventional Wisdom about Religion and
-                  Politics: A Preliminary View from the pews},
-  journal =	 {Journal for the Scientific Study of Religion},
-  volume =	 {42},
-  year =	 {2003},
-  pages =	 {125--136},
-  number =	 {}
-}
- at article{Burden05,
-  author =	 {Barry C. Burden},
-  title =	 {Ralph Nader's Campaign Strategy in the 2000
-                  U.S. Presidential Election},
-  journal =	 {American Politics Research},
-  volume =	 {33},
-  year =	 {2005},
-  pages =	 {672--699},
-  month =	 {September},
-  number =	 {5}
-}
- at article{Buthe02,
-  author =	 {Tim B\''{u}the},
-  title =	 {Taking Temporality Seriously: Modeling History and
-                  the Use of Narratives as Evidence},
-  journal =	 {American Political Science Review},
-  volume =	 {96},
-  year =	 {2002},
-  pages =	 {481--493},
-  month =	 {September},
-  number =	 {3}
-}
-
- at article{CamLapRie00,
-  author =	 {Charles Cameron and John S. Lapinski and Charles
-                  R. Riemann},
-  title =	 {Testing Formal Theories of Political Rhetoric},
-  journal =	 {The Journal of Politics},
-  volume =	 {62},
-  year =	 {2000},
-  pages =	 {187--205},
-  month =	 {February},
-  number =	 {1}
-}
- at article{Caporaso95,
-  author =	 {James A. Caporaso},
-  title =	 {Review: Research Design, Falsification, and the
-                  Qualitative-Quantitative Divide},
-  journal =	 {American Political Science Review},
-  volume =	 {89},
-  year =	 {1995},
-  pages =	 {457--460},
-  month =	 {June},
-  number =	 {2}
-}
- at article{ChiRot03,
-  author =	 {Fang-Yi Chiou and Lawrence S. Rothenberg},
-  title =	 {When Pivotal Politics Meets Partisan Politics},
-  journal =	 {American Journal of Political Science},
-  volume =	 {47},
-  year =	 {2003},
-  pages =	 {503--522},
-  month =	 {July},
-  number =	 {3}
-}
- at article{ClaGilGol06,
-  author =	 {William Roberts Clark and Michael J. Gilligan and
-                  Matt Golder},
-  title =	 {A Simple Multivariate Test for Asymmetric
-                  Hypotheses},
-  journal =	 {Political Analysis},
-  volume =	 {14},
-  year =	 {2006},
-  pages =	 {311--331}
-}
- at article{Clarke05,
-  author =	 {Kevin A. Clarke},
-  title =	 {The Phantom Menace: Omitted Variable Bias in
-                  Econometric Research},
-  journal =	 {Conflict management and Peace Science},
-  volume =	 {22},
-  year =	 {2005},
-  pages =	 {341--352},
-  number =	 {4}
-}
- at article{Collier95,
-  author =	 {David Collier},
-  title =	 {Review: Translating Quantitative Methods for
-                  Qualitative Researchers: the Case of Selection Bias},
-  journal =	 {American Political Science Review},
-  volume =	 {89},
-  year =	 {1995},
-  pages =	 {461--466},
-  month =	 {June},
-  number =	 {2}
-}
- at article{Denrell03,
-  author =	 {Jerker Denrell},
-  title =	 {Vicarious Learning, Undersampling of Failure, and
-                  the Myths of Management},
-  journal =	 {Organization Science},
-  volume =	 {14},
-  year =	 {2003},
-  pages =	 {227--243},
-  month =	 {May-June},
-  number =	 {3}
-}
- at article{DerBou04,
-  author =	 {Mark de Rond and Hamid Bouchikhi},
-  title =	 {On the dialectics of Strategic Alliances},
-  journal =	 {Organization Science},
-  volume =	 {15},
-  year =	 {15},
-  pages =	 {56-69},
-  month =	 {January-February},
-  number =	 {1}
-}
- at article{DesFinHen00,
-  author =	 {Laura Desimone and Matia Finn-Stevenson and
-                  Christopher Henrich},
-  title =	 {Whole School Reform in a Lowe-Income African
-                  American Community: The Effects of the CoZi Model on
-                  Teachers, Parents, and Students},
-  journal =	 {Urban Education},
-  volume =	 {35},
-  year =	 {2000},
-  pages =	 {269}
-}
- at article{DeSoysa02,
-  author =	 {Indra de Soysa},
-  title =	 {Ecoviolence: Shrinking Pie, or Honey Pot?},
-  journal =	 {Global Environmental Politics},
-  volume =	 {2},
-  year =	 {2002},
-  pages =	 {1--34},
-  month =	 {November},
-  number =	 {4}
-}
- at article{DeSoysa02b,
-  author =	 {Indra De Soysa},
-  title =	 {Paradise Is a Bazaar? Greed, Creed and Governance in
-                  Civil War, 1989-99},
-  journal =	 {Journal of Pece Research},
-  volume =	 {39},
-  year =	 {2002},
-  pages =	 {395--416},
-  number =	 {4}
-}
- at article{DicLev99,
-  author =	 {Jonathan M. Dicicco and Jack S. Levy},
-  title =	 {Power Shifts and Problem Shifts},
-  journal =	 {Journal of Conflict Resolution},
-  volume =	 {43},
-  year =	 {1999},
-  pages =	 {675--704},
-  month =	 {December},
-  number =	 {6}
-}
- at article{Dorussen01,
-  author =	 {Han Dorussen},
-  title =	 {Mixing Carrots with Sticks: Evaluating the
-                  Effectiveness of Positive Incentives},
-  journal =	 {Journal of Peace Research},
-  volume =	 {38},
-  year =	 {2001},
-  pages =	 {251--262},
-  number =	 {2}
-}
- at article{Dowding01,
-  author =	 {Keith Dowding},
-  title =	 {There Must Be End to Confusion: Policy Networks,
-                  Intellectual Fatigue, and the Need for Political
-                  Science Methods Courses in British Universities},
-  journal =	 {Political Studies},
-  volume =	 {49},
-  year =	 {2001},
-  pages =	 {89--105}
-}
- at article{Druckman04,
-  author =	 {James N. Druckman},
-  title =	 {Political Preference Formation: Competition,
-                  Deliberation, and the (Ir)relevance of Framing
-                  Effects},
-  journal =	 {American Political Science Review},
-  volume =	 {98},
-  year =	 {2004},
-  pages =	 {671--686},
-  month =	 {November},
-  number =	 {4}
-}
- at article{Elgie04,
-  author =	 {Robert Elgie},
-  title =	 {Semi-Presidentialism: Concepts, Consequences and
-                  contesting Explanations},
-  journal =	 {Political Studies Review},
-  volume =	 {2},
-  year =	 {2004},
-  pages =	 {314--330}
-}
- at article{ElmElm02,
-  author =	 {Colin Elman and Miriam Fendius Elman},
-  title =	 {How Not to Be Lakatos Intolerant: Appraising
-                  Progress in IR Research},
-  journal =	 {International Studies Quarterly},
-  volume =	 {46},
-  year =	 {2002},
-  pages =	 {231--262}
-}
- at article{BerLor99,
-  author =	 {Bernard I. Finel and Kristin M. Lord},
-  title =	 {The Surprising Logic of Transparency},
-  journal =	 {International Studies Quarterly},
-  volume =	 {43},
-  year =	 {1999},
-  pages =	 {315--339},
-  month =	 {June},
-  number =	 {June}
-}
- at article{Forster98,
-  author =	 {Anthony Forster},
-  title =	 {Britain and the Negotiation of the Maastricht
-                  Treaty: A Critique of Liberal Intergovernmentalism},
-  journal =	 {Journal of Common Market Studies},
-  volume =	 {36},
-  year =	 {1998},
-  pages =	 {347--368},
-  month =	 {September},
-  number =	 {3}
-}
- at article{Fricke03,
-  author =	 {Tom Fricke},
-  title =	 {Culture and Causality: An Anthropological Comment},
-  journal =	 {Population and Development Review},
-  volume =	 {29},
-  year =	 {2003},
-  pages =	 {470--479},
-  month =	 {September},
-  number =	 {3}
-}
- at article{From02,
-  author =	 {Johan From},
-  title =	 {Decision-making in a complex envrironment: A
-                  sociological institutionalist analysis of
-                  competition policy decision-making in the European
-                  Commission},
-  journal =	 {Journal of European Public Policy},
-  volume =	 {9},
-  year =	 {2002},
-  pages =	 {219--237},
-  number =	 {2}
-}
- at article{Galaz05,
-  author =	 {Victor Galaz},
-  title =	 {Social-ecological Resilience and Social Conflict:
-                  Institutions and Strategic Adaptation in Swedish
-                  Water Management},
-  journal =	 {Ambio},
-  volume =	 {34},
-  year =	 {2005},
-  pages =	 {567--572},
-  month =	 {November},
-  number =	 {7}
-}
-
- at Article{MadHofKup07,
-  author =	 {Temina Madon and Karen J. Hofman and Linda Kupfer
-                  and Roger I. Glass},
-  title =	 {Implementation Science},
-  journal =	 {Science},
-  year =	 {2007},
-  volume =	 {318},
-  pages =	 {1728--1729},
-  month =	 {14 December}
-}
-
- at Article{Steele05,
-  author = 	 {J. Michael Steele},
-  title = 	 {Darrell Huff and Fifty Years of \emph{How to Lie
-                  With Statistics}},
-  journal = 	 {Statistical Science},
-  year = 	 2005,
-  volume =	 20,
-  number =	 3,
-  pages =	 {205-209}
-}
-
- at book{Huff54,
-  title =	 {{How to Lie With Statistics}},
-  author =	 {Darrell Huff},
-  year =	 {1954},
-  address =	 {New York},
-  publisher =	 {WW Norton \& Company}
-}
-
- at article{HeiRub91,
-  title =	 {{Ignorability and Coarse Data}},
-  author =	 {Heitjan, D.F. and Rubin, D.B.},
-  journal =	 {The Annals of Statistics},
-  volume =	 {19},
-  number =	 {4},
-  pages =	 {2244--2253},
-  year =	 {1991}
-}
-
- at Article{Izenman91,
-  author = 	 {Alan Julian Izenman},
-  title = 	 {Recent developments in nonparametric density estimation},
-  journal = 	 {Journal of the American Statistical Association},
-  year = 	 1991,
-  volume =	 86,
-  number =	 413,
-  pages =	 {205--224}
-}
-
- at article{Gartzke99,
-  title =	 {War is in the Error Term},
-  author =	 {Erik Gartzke},
-  journal =	 {International Organization},
-  volume =	 {53},
-  year =	 {1999},
-  pages =	 {567--587},
-  month =	 {Summer},
-  number =	 {3}
-}
-
- at article{GauLie04,
-  author =	 {Varun Gauri and Evan S. Lieberman},
-  title =	 {Institutions, Social Boundaries, and Epidemics:
-                  Explaining Goverment AIDS Policies in Brazil and
-                  South Africa}
-}
-
- at article{GelGri01,
-  author =	 {Christopher Gelpi and Joseph M. Grieco},
-  title =	 {Democracy, Leadership Tenure, and the Targeting of
-                  Militarized challenges, 1918-1992},
-  journal =	 {Journal of Conflict Resolution},
-  volume =	 {45},
-  year =	 {2001},
-  pages =	 {794--817},
-  month =	 {December},
-  number =	 {6}
-}
-
- at article{GerBar03,
-  author =	 {John Gerring and Paul A. Barresi},
-  title =	 {Putting Ordinary Language to Work: A Min-Max
-                  Strategy of Concept Formation in the Social
-                  Sciences},
-  journal =	 {Journal of Theoretical Politics},
-  volume =	 {15},
-  year =	 {2003},
-  pages =	 {201--232},
-  number =	 {2}
-}
-
- at article{GerGreKap03,
-  author =	 {Alan S. Gerber and Donald P. Green and Edward
-                  H. Kaplan},
-  title =	 {The Illusion of Learning from Observational
-                  Research},
-  year =	 {2003}
-}
-
- at article{GerMcD07,
-  author =	 {John Gerring and Rose McDermott},
-  title =	 {An Experimental Template for case Study Research},
-  journal =	 {American Journal of Political Science},
-  volume =	 {51},
-  year =	 {2007},
-  pages =	 {688--701},
-  month =	 {July},
-  number =	 {3}
-}
-
- at article{Gerring04,
-  author =	 {John Gerring},
-  title =	 {What Is a Case Study and What Is It Good for?},
-  journal =	 {American Political Science Review},
-  volume =	 {98},
-  year =	 {2004},
-  pages =	 {341--354},
-  month =	 {May},
-  number =	 {2}
-}
-
- at article{Gerring05,
-  author =	 {John Gerring},
-  title =	 {A unified Framework for The Social Sciences},
-  journal =	 {Journal of Theoretical Politics},
-  volume =	 {17},
-  year =	 {2005},
-  pages =	 {163--198},
-  number =	 {2}
-}
-
- at article{Gilardi01,
-  author =	 {Fabrizio Gilardi},
-  title =	 {Policy Credibility and Delegation of Regulatory
-                  Competencies or Independent Agencies: A Comparative
-                  Empirical Consideration},
-  year =	 {2001}
-}
-
- at article{Gilardi02,
-  author =	 {Fabrizio Gilardi},
-  title =	 {Policy Credibility and Delegation to Independent
-                  Regulatory Agencies: A Comparative Empirical
-                  Analysis},
-  journal =	 {Journal of European Public Policy},
-  volume =	 {9},
-  year =	 {2002},
-  pages =	 {873--893},
-  month =	 {December},
-  number =	 {6}
-}
-
- at article{Glaser02,
-  author =	 {Barney G. Glaser},
-  title =	 {Conceptualization: On Theory and Theorizing Using
-                  Grounded theory},
-  journal =	 {International Journal of Qualitative Methods},
-  volume =	 {1},
-  year =	 {2002},
-  note =	 {{Article 3 from http://www.ualberta.ca/~ijqm/}},
-  month =	 {Spring},
-  number =	 {2}
-}
-
- at article{GoeLev05,
-  author =	 {Gary Goertz and Jack S. Levy},
-  title =	 {Causal Explanations, Necessary Conditions, and Case
-                  Studies: World War I and the End of the Cold War}
-}
-
- at article{Goerzen05,
-  author =	 {Anthony Goerzen},
-  title =	 {Managing Alliance Networks: Emerging Practices of
-                  Multinational Corporations},
-  journal =	 {Academy of management Executive},
-  volume =	 {19},
-  year =	 {20},
-  pages =	 {94--107},
-  number =	 {2}
-}
-
- at article{Golder03,
-  author =	 {Matt Golder},
-  title =	 {Explaining Variation in the Success of Extreme Right
-                  Parties in Western Europe},
-  journal =	 {Comparative Political Studies},
-  volume =	 {36},
-  year =	 {2003},
-  pages =	 {432--466},
-  month =	 {May},
-  number =	 {4}
-}
- at article{Grendstad99,
-  author =	 {Gunnar Grendstad},
-  title =	 {A Political Cultural Map of Europe. A Survey Approach},
-  journal =	 {GeoJournal},
-  volume =	 {47},
-  year =	 {20},
-  pages =	 {463--475}
-}
-
- at article{Grigorian05,
-  author =	 {Arman Grigorian},
-  title =	 {Third-Party Intervention and Escalation in Kosovo:
-                  Does Moral Hazard Explain it?},
-  journal =	 {Ethnopolitics},
-  volume =	 {4},
-  year =	 {2005},
-  pages =	 {195--213},
-  number =	 {2}
-}
-
- at article{Guzzini01,
-  author =	 {Stefano Guzzini},
-  title =	 {The Significance and Roles of Teaching Theory in
-                  International Relations},
-  journal =	 {Journal of International Relations and Development},
-  volume =	 {4},
-  year =	 {2001},
-  pages =	 {98--117},
-  number =	 {2}
-}
-
- at article{Gwako97,
-  author =	 {Edwins Laban Moogi Gwako},
-  title =	 {Conjugal Power in Rural Kenya Families: Its
-                  Influence on Women's Decsions About Family Size and
-                  Family Planning Practices},
-  journal =	 {Sex Roles},
-  volume =	 {36},
-  year =	 {1997},
-  pages =	 {127--147},
-  month =	 {February},
-  number =	 {3/4}
-}
-
- at article{Hansen98,
-  author =	 {Kenneth N. Hansen},
-  title =	 {Identifying Facets of Democratic Administration: The
-                  Empirical Referents of Discourse},
-  journal =	 {Administration & Society},
-  volume =	 {30},
-  year =	 {1998},
-  pages =	 {443--461},
-  month =	 {September},
-  number =	 {4}
-}
-
- at article{Harcourt00,
-  author =	 {Bernard E. Harcourt},
-  title =	 {After the "Social Meaning Turn": Implications for Research Design and methods of Proof in Contemporary Criminal Law Policy Analysis},
-  journal =	 {Law & Society Review},
-  volume =	 {34},
-  year =	 {20},
-  pages =	 {179--211},
-  number =	 {1}
-}
-
- at article{Haverland03,
-  author =	 {Markus Haverland},
-  title =	 {Methodological Issues in Europeanisation Research:
-                  the `No Variation' problem},
-  note =	 {prepared for preentation the section
-                  'Europeanisation: Challengs of a New Research
-                  Agenda', panl `Europeanisation: Concepts and
-                  Methods', ECPR Conference, Marburg, 18-21 September,
-                  2003},
-  year =	 {2003},
-  pages =	 {},
-  month =	 {September}
-}
-
- at article{Haverland06,
-  author =	 {Markus Haverland},
-  title =	 {Does the EU Cause Domestic Developments? Improving
-                  Case Selection in Europeanisation Research},
-  journal =	 {West European Politics},
-  volume =	 {29},
-  year =	 {2006},
-  pages =	 {134--146},
-  month =	 {January},
-  number =	 {1}
-}
-
- at article{Hawkins04,
-  author =	 {Darren Hawkins},
-  title =	 {Explaining Costly International Institutions:
-                  Persuasion and Enforceable Human Rights Norms},
-  journal =	 {International Studies Quarterly},
-  volume =	 {48},
-  year =	 {2004},
-  pages =	 {779--804}
-}
-
- at article{Hay04,
-  author =	 {Colin Hay},
-  title =	 {Theory, Stylized Heuristic or Self-Fulfilling
-                  Prophecy? The Status of Rational Choice Theory in
-                  Public Administration},
-  journal =	 {Public Administration},
-  volume =	 {82},
-  year =	 {2004},
-  pages =	 {39--62},
-  number =	 {1}
-}
-
- at article{HelHer01,
-  author =	 {Gunther Hellmann and Benjamin Herborth},
-  title =	 {Democratic Peace and Militarized Interstate Disputes
-                  in the Transatlantic Community},
-  note =	 {Paper prepared for presentation at the 42. Annual
-                  Convention of the International Studies Assocation
-                  in Chicago, 21\-25 February 2001},
-  year =	 {2001}
-}
-
- at article{HelMul03,
-  author =	 {Gunther Hellmann and Harald M\"{u}ller},
-  title =	 {Editing (I)nternational (R)elations: A Changing
-                  World},
-  journal =	 {Journal of International Relations and Development},
-  volume =	 {6},
-  year =	 {2003},
-  pages =	 {372--389},
-  month =	 {December},
-  number =	 {4}
-}
- at article{HesLea97,
-  author =	 {Frederick M. Hess and David L. Leal},
-  title =	 {Minority Teachers, Minority Students, and College Matriculation: A New Look at the Role-Modeling Hypothesis},
-  journal =	 {Policy Studies Journal},
-  volume =	 {25},
-  year =	 {1997},
-  pages =	 {235-248},
-  number =	 {2}
-}
- at article{HesLea99,
-  author =	 {Frederick M. Hess and David L. Leal},
-  title =	 {Computer-Assisted Learning in Urban Classrooms:The
-                  Impact of Politics, Race, and Class},
-  journal =	 {Urban Education},
-  volume =	 {34},
-  year =	 {1999},
-  pages =	 {370}
-}
- at article{Hess99,
-  author =	 {Frederick M. Hess},
-  title =	 {A Political Explanation of Policy Selection: The
-                  Case of Urban School Reform},
-  journal =	 {Policy Studies Journal},
-  volume =	 {27},
-  pages =	 {459--473},
-  number =	 {3}
-}
- at article{Hite03,
-  author =	 {Julie M. Hite},
-  title =	 {Patterns of Multidimensionality Among Embedded Network Ties: A Typology of Relational Embeddedness in Emerging Entrepreneurial Firms},
-  journal =	 {Strategic Organization},
-  volume =	 {1},
-  year =	 {2003},
-  pages =	 {9--49},
-  number =	 {1}
-}
- at article{Hite05,
-  author =	 {Julie M. Hite},
-  title =	 {Evolutionary Processes and Paths of Relationally
-                  Embedded Network Ties in Emerging Entrepreneurial
-                  Firms},
-  journal =	 {Entrepreneurship, Theory \& Practice},
-  volume =	 {29},
-  year =	 {2005},
-  pages =	 {113--144},
-  month =	 {January},
-  number =	 {1}
-}
- at article{HodHar03,
-  author =	 {Matthew Hoddie and Caroline Hartzell},
-  title =	 {Civil War Settlements and the Implementation of
-                  Military Power-Sharing Arrangements},
-  journal =	 {Journal Of Peace Research},
-  volume =	 {40},
-  year =	 {2003},
-  pages =	 {303-320},
-  number =	 {3}
-}
- at article{HofOca01,
-  author =	 {Andrew J. Hoffman and William Ocasio},
-  title =	 {Not All Events Are Attended Equally: Toward a
-                  Middle-Range Theory of Industry Attention to
-                  External Events},
-  journal =	 {Organization Science},
-  volume =	 {12},
-  year =	 {2001},
-  pages =	 {414--434},
-  month =	 {July-August},
-  number =	 {4}
-}
- at article{Hooghe97,
-  author =	 {Liesbet Hooghe},
-  title =	 {Serving `Europe' - Political Orientations of Senior
-                  Commission Officials},
-  journal =	 {European Integration Online Papers},
-  volume =	 {1},
-  year =	 {1997},
-  note =	 {{http://eiop.or.at/eiop/texte/1997-008a.htm}},
-  month =	 {April},
-  number =	 {8}
-}
- at article{HowPerVil04,
-  author =	 {Susan E. Howell and Huey L. Perry and Matthew vile},
-  title =	 {Black Cities / White Cities: Evaluating the Police},
-  journal =	 {Political Behavior},
-  volume =	 {26},
-  year =	 {2004},
-  pages =	 {45--68},
-  month =	 {March},
-  number =	 {1}
-}
- at article{JacLan04,
-  author =	 {Karen Jacobsen and Loren B. Landau},
-  title =	 {The Dual Imperative in Refugee Research: Some Methodological and Ethical Considerations in Social Science Research on Forced Migration},
-  journal =	 {Disasters},
-  volume =	 {27},
-  year =	 {2003},
-  pages =	 {185--206},
-  number =	 {3}
-}
- at article{JacPhaSwy04,
-  author =	 {Dirk Jacobs and Karen Phalet and Marc Swyngedouw},
-  title =	 {Associational Membership and Political Involvement Among Ethnic Minority Groups in Brussels},
-  journal =	 {Journal of Ethnic and Migration Studies},
-  volume =	 {30},
-  year =	 {2004},
-  pages =	 {543--559},
-  number =	 {3}
-}
- at article{JacTil04,
-  author =	 {Dirk Jacobs and Jean Tillie},
-  title =	 {Introduction: Social Capital and Political Integration of Migrants},
-  journal =	 {Journal of Ethnic and Migration Studies},
-  volume =	 {30},
-  year =	 {2004},
-  pages =	 {419--427},
-  number =	 {3}
-}
- at article{Johnson04,
-  author =	 {Craig Johnson},
-  title =	 {Uncommon Ground: The `Poverty of History' in common Property Discourse},
-  journal =	 {Development and Change},
-  volume =	 {35},
-  year =	 {2004},
-  pages =	 {407--433},
-  number =	 {3}
-}
- at article{JonSte97,
-  author =	 {Bradford S. Jones and Marco R. Steenbergen},
-  title =	 {Modeling Multilevel Data Structures},
-  year =	 {1997},
-  note =	 {Annual Meetings of the Political Methodology
-                  Society}
-}
- at article{KaaBea99,
-  author =	 {Juliet Kaarbo and Ryan K. Beasley},
-  title =	 {A Practical Guide to the Comparative Case Study
-                  Method in Political Psychology},
-  journal =	 {Political Psychology},
-  volume =	 {20},
-  year =	 {1999},
-  pages =	 {369--391},
-  number =	 {2}
-}
- at article{KatVomMah05,
-  author =	 {Aaron Katz and Matthias vom Hau and James Mahoney},
-  title =	 {Explaining the Great Reversal in Spanish America:
-                  Fuzzy-Set Analysis Versus Regression Analysis},
-  journal =	 {Sociological Methods Research},
-  volume =	 {33},
-  year =	 {2005},
-  pages =	 {539--573}
-}
-
- at Book{Kenneally07,
-  author =	 {Christine Kenneally},
-  title = 	 {The First Word: The Search for the Origins of Language},
-  publisher = 	 {Viking},
-  year = 	 2007,
-  address =	 {New York}
-}
-
- at book{Coombs65,
-  title =	 {A Theory of Data},
-  author =	 {C.H. Coombs},
-  year =	 {1965},
-  address =	 {New York},
-  publisher =	 {Wiley}
-}
-
- at article{CEFP06,
-  author =	 {Centro de Estudios de las Finanzas P{\'u}blicas,
-                  C{\'a}mara de Diputados},
-  title =	 {Gasto en el Sector Salud},
-  journal =	 {Nota Informativa},
-  volume =	 {64},
-  month =	 {September},
-  year =	 {2006},
-}
-
- at Book{Ayres07,
-  author =	 {Ian Ayres},
-  title =	 {Supercrunchers},
-  publisher =	 {Random House},
-  year =	 2007,
-  address =	 {New York}
-}
-
- at Article{Ross06b,
-  author =	 {Philip E. Ross},
-  title =	 {The Expert Mind},
-  journal =	 {Scientific American},
-  year =	 {2006},
-  month =	 {August},
-  note =	 {{http://www.sciam.com/article.cfm?id=the-expert-mind}}
-}
-
- at article{RubTho92,
-  author =	 {Donald B. Rubin and Neal Thomas},
-  title =	 {Affinely Invariant Matching methods with Ellipsoidal
-                  Distributions},
-  journal =	 {Annals of Statistics},
-  volume =	 {20},
-  number =	 {2},
-  year =	 {1992},
-  pages =	 {1079-1093}
-}
-
- at article{Rubin76b,
-  author =	 {Donald B. Rubin},
-  title =	 {Multivariate Matching Methods that are Equally
-                  Percent Bias Reducing, II: Maximums on Bias
-                  Reduction for Fixed Sampled Sizes},
-  journal =	 {Biometrics},
-  volume =	 {32},
-  year =	 {1976},
-  pages =	 {121-132},
-}
-
- at book{RosRub02,
-  author =	 {P.R. Rosenbaum and Donald B. Rubin},
-  title =	 {Observational Studies},
-  Publisher =	 {Springer},
-  year =	 {2002},
-  address =	 {New York}
-}
-
- at article{RacineLi09,
-  author =	 {J.S. Racine  and Q. Li},
-  title =	 {Efficient Estimation of Average Treatment Effects With Mixed Categorical and Continuous Data},
-  journal =	 {Journal of Business and Economic Statistics},
-  volume =	 {27},
-  number =    {2},
-  year =	 {2009},
-  pages =	 {203-223}
-}
-
- at article{Popoviciu35,
-  author =	 {T. Popoviciu},
-  title =	 {Sur Les \'Equations Alg\'ebriques Ayant Toutes Leurs
-                  Racines R\'eelles},
-  journal =	 {Mathematica},
-  volume =	 {9},
-  year =	 {1935},
-  pages =	 {129-145}
-}
-
- at article{Rubin76c,
-  title =	 {{Multivariate Matching Methods That are Equal
-                  Percent Bias Reducing, I: Some Examples}},
-  author =	 {Donald B. Rubin},
-  journal =	 {Biometrics},
-  volume =	 {32},
-  number =	 {1},
-  pages =	 {109--120},
-  year =	 {1976}
-}
-
- at InCollection{Coleridge1789,
-  author = 	 {Samuel Taylor Coleridge},
-  title = 	 {The Rime of the Ancyent Marinere},
-  booktitle = 	 {Lyrical Ballads},
-  publisher =	 {Routledge},
-  year =	 {1789 (1991)},
-  editor =	 {W. Wordsworth and S. T. Coleridge},
-  address =	 {London}
-}
-
- at book{MieBer07,
-  title =	 {{Permutation Methods: A Distance Function Approach}},
-  author =	 {Mielke, P.W. and Berry, K.J.},
-  year =	 {2007},
-  address =	 {New York},
-  publisher =	 {Springer}
-}
-
- at Article{ShiShi07,
-  author = 	 {H. Shimazaki and S. Shinomoto},
-  title = 	 {A Method for Selecting the Bin Size of a Time Histogram},
-  journal = 	 {Neural Computation},
-  year = 	 2007,
-  volume =	 19,
-  number =	 6,
-  pages =	 {1503--1527}
-}
- at inbook{Ragin04,
-  author =	 {Charles C. Ragin},
-  title =	 {Rethinking Social Inquiry},
-  chapter =	 {Turning the Tables: How Case-Oriented Research
-                  CHallenges Variable Oriented Research},
-  year =	 {2004},
-  publisher =	 {Rowman and Littlefield Publishers, Inc.},
-  address =	 {Lanham, MD},
-  editor =	 {Henry E. Brady and David Collier}
-}
-
- at unpublished{Ragin07,
-  author =	 {Charles C. Ragin},
-  title =	 {Qualitative Comparative Analysis and Fuzzy Sets},
-  note =	 {Presented for the American Political Science
-                  Assocation conference, Chicago},
-  year =	 {2007}
-}
-
- at article{BatChe04,
-  title =	 {{The Impact of Measurement Error on Evaluation
-                  Methods Based on Strong Ignorability}},
-  author =	 {Battistin, E. and Chesher, A.},
-  journal =	 {Institute for Fiscal Studies, London},
-  year =	 {2004}
-}
-
- at book{Agresti90,
-  title =	 {{Categorical data analysis}},
-  author =	 {Agresti, A.},
-  year =	 {1990},
-  address =	 {New York},
-  publisher = {John Wiley \& Sons, Inc.}
-}
-
- at book{Scott92,
-  title =	 {{Multivariate density estimation. Theory, practice and visualization}},
-  author =	 {Scott, D.W.},
-  year =	 {1992},
-  address =	 {New York},
-  publisher = {John Wiley \& Sons, Inc.}
-}
-
- at article{FDiac81,
-  title =	 {{On the histogram as a density estimator: $L_2$
-                  theory}},
-  author =	 {Freedman, D. and Diaconis, P.},
-  journal =	 {Probability Theory and Related Fields},
-  year =	 {1981},
-  volume =	 {57},
-  pages =	 {453-476}
-}
-
- at article{FreDue07,
-  title =	 {Clustering by Passing Messages Between Data Points},
-  author =	 {BJ Frey and D Dueck},
-  journal =	 {Science},
-  volume =	 {315},
-  number =	 {5814},
-  pages =	 {972},
-  year =	 {2007}
-}
-
- at Article{Ruben02,
-  author =	 {Harold Ruben},
-  title =	 {A simple conservative and robust solution of the
-                  Behrens-Fisher problem},
-  journal =	 {Sankhya},
-  year =	 {2002},
-  volume =	 {64},
-  number =	 {1},
-  pages =	 {139--155}
-}
-
- at Article{FawChaHer93,
-  author =	 {WW Fawzi and TC Chalmers and MG Herrera and F
-                  Mosteller},
-  title =	 {Vitamin A Supplementation and Child Mortality},
-  journal =	 {Journal of the American Medical Association},
-  year =	 {1993},
-  volume =	 {269},
-  number =	 {7},
-  pages =	 {898--903}
-}
-
- at Article{LavDurKoc05,
-  author =	 {LM LaVange and TA Durham and GG Koch},
-  title =	 {Randomization-based Nonparametric Methods for the
-                  Analysis of Multicentre Trials},
-  journal =	 {Statistical Methods in Medical Research},
-  year =	 {2005},
-  volume =	 {14},
-  number =	 {3},
-  pages =	 {281--301}
-}
-
- at Article{Little04,
-  author =	 {RJ Little},
-  title =	 {To Model or Not to Model? Competing Modes of
-                  Inference for Finite Population Sampling},
-  journal =	 {Journal of the American Statistical Assocation},
-  year =	 {2004},
-  volume =	 {99},
-  pages =	 {546--556}
-}
-
- at Article{LitRub00,
-  author =	 {RJ Little and DB Rubin},
-  title =	 {Causal Effects in Clinical and Epidemiological
-                  Studies Via Potential Outcomes: Concepts and
-                  Analytical Approaches},
-  journal =	 {Annual Review of Public Health},
-  year =	 {2000},
-  volume =	 {21},
-  pages =	 {121--145}
-}
-
- at Article{MalGre02,
-  author =	 {G Maldanado and S Greenland},
-  title =	 {Estimating Causal Effects},
-  journal =	 {International Journal of Epidemiology},
-  year =	 {2002},
-  volume =	 {31},
-  pages =	 {422--429}
-}
- at Article{DasNewVel03,
-  author =	 {Mitali Das and Whitney K. Newey and Francis Vella},
-  title =	 {Nonparametric Estimation of Sample Selection Models},
-  journal =	 {Review of Economic Studies},
-  year =	 {2003},
-  volume =	 {70},
-  pages =	 {33--58}
-}
- at Article{AbaDruHer01,
-  author =	 {Alberto Abadie and David Drukker and Jane Leber Herr
-                  and Guido W. Imbens},
-  title =	 {Implementing Matching Estimators for Average
-                  Treatment Effects in Stata},
-  journal =	 {The Stata Journal},
-  year =	 {2004},
-  volume =	 {4},
-  number =	 {3},
-  pages =	 {290--311}
-}
- at Article{DunAuMil03,
-  author =	 {Joel Dunning and JKK Au and RWJ Millner and AJ
-                  Levine},
-  title =	 {Derivation and Validation of a Clinical Scoring
-                  System to Predict the Need for an Intra-Aortic
-                  Balloon Pump in Patients Undergoing Adult Cardiac
-                  Surgery},
-  journal =	 {Interactive Cardiovascular and Thoracic Surgery},
-  year =	 {203},
-  volume =	 {2},
-  pages =	 {639--643}
-}
-
-
-
- at Article{Rubin90,
-  author =	 {DB Rubin},
-  title =	 {On the Applicaiton of Probability Theory to
-                  Agricultural Experiments. Essay on
-                  Principles. Section 9. Comment: Neyman (1923) and
-                  Causal Inference in Experiments and Observational
-                  Studies},
-  journal =	 {Statistical Science},
-  year =	 {1990},
-  volume =	 {5},
-  number =	 {4},
-  pages =	 {472--480}
-}
- at book{HasTibFri01,
-  title =	 {{The Elements of Statistical Learning: Data Mining,
-                  Inference, and Prediction}},
-  author =	 {Trevor Hastie and Robert Tibshirani and Jerome
-                  Friedman},
-  year =	 {2001},
-  address =	 {New York},
-  publisher =	 {Springer}
-}
- at book{HasTibFri09,
-  title =	 {{The Elements of Statistical Learning: Data Mining,
-                  Inference, and Prediction, 2nd Ed}},
-  author =	 {Trevor Hastie and Robert Tibshirani and Jerome
-                  Friedman},
-  year =	 {2009},
-  address =	 {New York},
-  publisher =	 {Springer}
-}
- at Article{WesMukCha00,
-  title =	 {{Feature selection for SVMs}},
-  author =	 {J. Weston and S. Mukherjee and O. Chapelle and
-                  M. Pontil and T. Poggio and V. Vapnik},
-  journal =	 {Advances in Neural Information Processing Systems},
-  volume =	 {13},
-  pages =	 {668--674},
-  year =	 {2000}
-}
- at Article{HsuChaLin03,
-  title =	 {A practical guide to support vector classification},
-  author =	 {C.W. Hsu and C.C. Chang and C.J. Lin},
-  journal =	 {National Taiwan University, Technical Report, July},
-  year =	 {2003}
-}
-
- at Article{BraGroMil02,
-  title =	 {Feature Selection Using Linear Support Vector
-                  Machines},
-  author =	 {Janez Brank and Marko Grobelnik and Natasa
-                  Milic-Frayling and Dunja Mladenic},
-  journal =	 {Microsoft Research, Technical Report},
-  year =	 {2002}
-}
- at Article{HilPurWil08,
-  title =	 {{Computer Assisted Topic Classification for Mixed
-                  Methods Social Science Research}},
-  author =	 {Dustin Hillard and Stephen Purpura and John
-                  Wilkerson},
-  journal =	 {Journal of Information Technology and Politics},
-  year =	 {2008},
-  volume =	 {4},
-  number =	 {4}
-}
-
-
- at Incollection{Joachims98,
-  author =	 {Thorsten Joachims},
-  editor =	 {Claire N\'{e}dellec and C\'{e}line Rouvierol},
-  booktitle =	 {Machine Learning ECML-98},
-  title =	 {Text Categorization with Support Vector Machines:
-                  Learning with Many Relevant Features},
-  publisher =	 {Springer},
-  year =	 1998,
-  volume =	 1398,
-  pages =	 {127--142}
-}
-
- at article{AnaAlkLav08,
-  title =	 {The Role of Fish Oil in Arrhythmia Prevention},
-  author =	 {Rishi G. Anand and Mohi Alkadri and Carl J. Lavie
-                  and Richard V. Milani},
-  journal =	 {Journal of Cardiopulmonary Rehabilitation and
-                  Prevention},
-  year =	 {2008},
-  volume =	 {28},
-  pages =	 {92--98}
-}
- at article{RosRosSil07,
-  title =	 {{Minimum Distance Matched Sampling With Fine Balance
-                  in an Observational Study of Treatment for Ovarian
-                  Cancer}},
-  author =	 {Rosenbaum, P.R. and Ross, R.N. and Silber, J.H.},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 {102},
-  number =	 {477},
-  pages =	 {75--83},
-  year =	 {2007}
-}
- at article{AbaGar03,
-  title =	 {{The Economic Costs of Conflict: A Case Study of the
-                  Basque Country}},
-  author =	 {Abadie, A. and Gardeazabal, J.},
-  journal =	 {American Economic Review},
-  volume =	 {93},
-  number =	 {1},
-  pages =	 {113--132},
-  year =	 {2003}
-}
-
- at Article{IacPor08b,
-  author =	 {Stefano M. Iacus and Giuseppe Porro},
-  title =	 {Invariant and Metric Free Proximities for Data
-                  Matching: An R Package},
-  journal =	 {Journal of Statistical Software},
-  year =	 2008,
-  volume =	 25,
-  number =	 11,
-  pages =	 {1--22}
-}
-
- at Article{IacPor07,
-  author =	 {Stefano M. Iacus and Giuseppe Porro},
-  title =	 {Missing data imputation, matching and other
-                  applications of random recursive partitioning},
-  journal =	 {Computational Statistics and Data Analysis},
-  year =	 2007,
-  volume =	 52,
-  number =	 2,
-  pages =	 {773--789}
-}
-
- at article{StuGre08,
-  title =	 {Using Full Matching to Estimate Causal Effects in
-                  Nonexperimental Studies: Examining the Relationship
-                  Between Adolescent Marijuana Use and Adult Outcomes},
-  author =	 {Elizabeth A. Stuart and Kerry M. Green},
-  journal =	 {Developmental Psychology},
-  volume =	 {44},
-  number =	 {2},
-  pages =	 {395--406},
-  year =	 {2008}
-}
-
- at PhdThesis{Moore08,
-  author =	 {Ryan T. Moore},
-  title =	 {Political Analysis and Statistical Applications for
-                  Social Policy Research},
-  school =	 {Harvard University},
-  year =	 {2008},
-  OPTaddress =	 {},
-  month =	 {May},
-}
-
- at Article{Moore08b,
-  author =	 {Ryan T. Moore},
-  title =	 {blockTools: Blocking, Assignment, and Diagnosing
-                  Interference in Randomized Experiments},
-  journal =	 { },
-  year =	 {2008},
-  note =
-                  {{http://www.people.fas.harvard.edu/$\sim$rtmoore/software.blockTools.htm}}
-}
-
- at Incollection{NigOroOla03,
-  author =	 {Gustavo Nigenda and Emanuel Orozco and Gustavo
-                  Olaiz},
-  editor =	 {Felicia Knaul and Gustavo Nigenda},
-  booktitle =	 {Caleidoscopia de la Salud},
-  title =	 {La Importancia de los Medicamentos en la Operacion
-                  del Seguro Popular de Salud},
-  publisher =	 {Funsalud},
-  pages =	 {263-273},
-  year =	 {2003}
-}
-
- at article{ImaVan04,
-  title =	 {{Causal inference with general treatment regimes:
-                  Generalizing the propensity score}},
-  author =	 {Imai, K. and van Dyk, D.A.},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 {99},
-  number =	 {467},
-  pages =	 {854--866},
-  year =	 {2004}
-}
- at Book{MorWin07,
-  author =	 {Stephen L. Morgan and Christopher Winship},
-  title =	 {Counterfactuals and Causal Inference: Methods and
-                  Principles for Social Research},
-  publisher =	 {Cambridge University Press},
-  year =	 2007,
-  address =	 {Cambridge}
-}
-
-
- at Article{AbaImb07,
-  author =	 {Alberto Abadie and Guido W. Imbens},
-  title =	 {Bias-Corrected Matching Estimators for Average
-                  Treatment Effects},
-  journal =	 { },
-  year =	 {2007},
-  OPTkey =	 {},
-  OPTvolume =	 {},
-  OPTnumber =	 {},
-  OPTpages =	 {},
-  OPTmonth =	 {},
-  note =	 {{http://ksghome.harvard.edu/~aabadie/research.html}},
-  OPTannote =	 {}
-}
- at Book{Jevons1874,
-  author =	 {W. Stanley Jevons},
-  title =	 {The Principles of Science: A Treatise on Logic and
-                  the Scientific Method},
-  publisher =	 {MacMillen and Co.},
-  year =	 1874,
-  address =	 {New York}
-}
-
- at Book{Twain1883,
-  author =	 {Mark Twain},
-  title = 	 {Life on the Mississippi},
-  publisher = 	 {Chatto and Windus},
-  year = 	 1883,
-  address =	 {London}
-}
-
- at Article{SamMic08,
-  author =	 {Nicholas Sambanis and Alexander Michaelides},
-  title =	 {A Comment on Diagnostic Tools for Counterfactual
-                  Inference},
-  journal =	 {Political Analysis},
-  year =	 {2008},
-  OPTkey =	 {},
-  OPTvolume =	 {17},
-  OPTnumber =	 {1},
-  OPTpages =	 {},
-  OPTmonth =	 {},
-  OPTnote =	 {},
-  OPTannote =	 {}
-}
-
- at Article{McAfee02,
-  author =	 {R. Preston McAfee},
-  title =	 {Coarse Matching},
-  journal =	 {Econometrica},
-  year =	 {2002},
-  volume =	 {70},
-  number =	 {5},
-  pages =	 {2025-2034}
-}
-
- at Article{Mielke85,
-  author =	 {Paul W. Mielke Jr.},
-  title =	 {Geometric Concerns Pertaining to Applications of
-                  Statistical Tests in the Atmospheric Sciences},
-  journal =	 {Journal of the Atmospheric Sciences},
-  year =	 {1985},
-  volume =	 {42},
-  number =	 {12},
-  pages =	 {1209-1212}
-}
-
- at Book{Greene08,
-  author =	 {William H. Greene},
-  title =	 {Econometric Analysis, 6th Edn.},
-  publisher =	 {Prentice Hall},
-  year =	 2008,
-  address =	 {New York}
-}
-
- at Article{LeeMil02,
-  author =	 {Ronald Lee and Timothy Miller},
-  title =	 {An Approach to Forecasting Health Expenditures, with
-                  Application to the U.S. Medicare System},
-  journal =	 {Health Services Research},
-  year =	 {2002},
-  volume =	 {37},
-  number =	 {5},
-  pages =	 {1365-1386}
-}
-
- at Article{LubBeeBak95,
-  author =	 {James Lubitz and James Beebe and Colin Baker},
-  title =	 {New England Journal of Medicine},
-  journal =	 {Longevity and Medicare Expenditures},
-  year =	 {1995},
-  volume =	 {332},
-  number =	 {15},
-  pages =	 {999-1003}
-}
-
- at Article{McKusick99,
-  author =	 {David McKusick},
-  title =	 {Demographic Issues in Medicare
-                  Reform:Birthrates, Death Rates, and an Aging
-                  Population all Affect Medicare's Financing},
-  journal =	 {Health Affairs},
-  year =	 {1999},
-  volume =	 {18},
-  number =	 {1},
-  pages =	 {194-207}
-}
-
- at Article{Miller01,
-  author =	 {Tim Miller},
-  title =	 {Increasing Longevity and Medicare Expenditures},
-  journal =	 {Demography},
-  year =	 {2001},
-  volume =	 {38},
-  number =	 {2},
-  pages =	 {215-226}
-}
-
- at unpublished{Caldis08,
-  author =	 {Todd G. Caldis},
-  title =	 {The Long-Term Projection Assumptions for Medicare
-                  and Aggregate national health Expenditures},
-  note =
-                  {{http://www.cms.hhs.gov/ReportsTrustFunds/downloads/projectionmethodology.pdf}},
-  year =	 {2008}
-}
- at Article{Hansen08,
-  author = 	 {Ben Hansen},
-  title = 	 {The Prognostic Analogy of the Propensity Score},
-  journal = 	 {Biometrika},
-  year = 	 2008,
-  volume =	 95,
-  number =	 2,
-  pages =	 {481--488}
-}
-
- at unpublished{GalSmiBla08,
-  author =	 {Jose Galdo and Jeffrey Smith and Dan Black},
-  title =	 {Bandwidth Selection and the Estimation of Treatment
-                  Effects with Unbalanced Data},
-  note =	 {University of Michigan},
-  year =	 {2008}
-}
- at Article{Pronin08,
-  author = 	 {Emily Pronin},
-  title = 	 {How We See Ourselves and How We See Others},
-  journal = 	 {Science},
-  year = 	 2008,
-  volume =	 320,
-  pages =	 {1170--1180}
-}
- at article{GraSci04,
-  title =	 {{Puzzles, Proverbs, and Omega Matrices: The
-                  Scientific and Social Significance of Empirical
-                  Implications of Theoretical Models (EITM)}},
-  author =	 {Granato, Jim and Scioli, Frank},
-  journal =	 {Perspectives on Politics},
-  volume =	 {2},
-  number =	 {02},
-  pages =	 {313--323},
-  year =	 {2004},
-  publisher =	 {Cambridge Univ Press}
-}
- at book{GeoBen05,
-  title =	 {{Case Studies and Theory Development in the Social
-                  Sciences}},
-  author =	 {George, A.L. and Bennett, A.},
-  year =	 {2005},
-  publisher =	 {Mit Press}
-}
-
- at article{MarQuiRug04,
-  title =	 {{Competing Approaches to Predicting Supreme Court
-                  Decision Making}},
-  author =	 {Martin, A.D. and Quinn, K.M. and Ruger, T.W. and
-                  Kim, P.T.},
-  journal =	 {Perspectives on Politics},
-  volume =	 {2},
-  number =	 {04},
-  pages =	 {761--767},
-  year =	 {2004}
-}
-
- at Article{Grove05,
-  author = 	 {William M. Grove},
-  title = 	 {Clinical Versus Statistical Prediction: The
-                  Contribution of Paul E. Meehl},
-  journal = 	 {Journal of Clinical Psychology},
-  year = 	 2005,
-  volume =	 61,
-  number =	 10,
-  pages =	 {1233--1243}
-}
-
- at proceedings{DjeSmi08,
-  title =	 {Heterogeneous Impacts in PROGRESA},
-  address =	 {Bonn, Germany},
-  author =	 {Habiba Djebbari and Jeffrey Smith},
-  organization = {IZA},
-  year =	 {2008}
-}
- at Book{Meehl54,
-  author =	 {Paul E. Meehl},
-  title =	 {Clinical Versus Statistical Prediction: A
-                  Theoretical Analysis and a Review of the Evidence},
-  publisher =	 {University of Minnesota Press},
-  year =	 1954,
-  address =	 {Minneapolis}
-}
-
- at Book{Rosenstone83,
-  title =	 {Forecasting Presidential Elections},
-  author =	 {S.J. Rosenstone},
-  year =	 {1983},
-  publisher =	 {Yale University Press New Haven}
-}
- at Article{AdcCol01,
-  author =	 {Robert Adcock and David Collier},
-  title =	 {Measurement Validity: A Shared Standard for
-                  Qualitative and Quantitative Research},
-  journal =	 {American Political Science Review},
-  year =	 {2001},
-  volume =	 {95},
-  number =	 {3},
-  month =	 {September},
-  pages =	 {529--546}
-}
-
- at Article{BatGreLev00,
-  author =	 {R.H. Bates and A. Greif and M. Levi, and
-                  J.L. Rosenthal, and B.R. Weingast},
-  title =	 {The Analytic Narrative Project},
-  journal =	 {American Political Science Review},
-  year =	 {2000},
-  volume =	 {94},
-  number =	 {3},
-  pages =	 {696--702}
-}
-
- at Article{Carpenter00,
-  author = 	 {Daniel P. Carpenter},
-  title = 	 {What is the Marginal Value of Analytic Narratives?},
-  journal = 	 {Social Science History},
-  year = 	 {2000},
-  volume =	{24},
-  number={4},
-  pages =	 {653-668}
-}
-
- at Article{Skocpol00,
-  author =	 {Theda Skocpol},
-  title =	 {Commentary: Theory Tackles History},
-  journal =	 {Social Science History},
-  year =	 {2000},
-  volume =	 {24},
-  number =	 {4},
-  pages =	 {677-684}
-}
-
- at article{Parikh00,
-  author =	 {Sunita Parikh},
-  title =	 {The Strategic Value of Analytic Narratives},
-  journal =	 {Social Science History},
-  volume =	 {24},
-  year =	 {2000},
-  pages =	 {677--684},
-  number =	 {4}
-}
- at article{Mahoney00b,
-  author =	 {James Mahoney},
-  title =	 {Path Dependence in Historical Sociology},
-  journal =	 {Theory and Society},
-  volume =	 {29},
-  year =	 {2000},
-  pages =	 {507--548}
-}
-
- at article{Pierson00,
-  author =	 {Paul Pierson},
-  title =	 {Increasing Returns, Path Dependence, and the Study
-                  of Politics},
-  journal =	 {American Political Science Review},
-  year =	 {2000},
-  pages =	 {251--268},
-  month =	 {June}
-}
- at article{Mahoney99,
-  author =	 {James Mahoney},
-  title =	 {Nominal, Ordinal, and Narrative Appraisal in
-                  Macro-Causal Analysis},
-  journal =	 {American Journal of Sociology},
-  volume =	 {104},
-  year =	 {1999},
-  pages =	 {1154-1196},
-  month =	 {January},
-  number =	 {4}
-}
- at article{Pierson00b,
-  author =	 {Paul Pierson},
-  title =	 {Not Just What, but When: Timing and Sequence in
-                  Political Processes},
-  journal =	 {Studies in american Political Development},
-  volume =	 {14},
-  year =	 {2000},
-  pages =	 {72--92},
-  month =	 {spring}
-}
-
- at article{ColLev77,
-  author =	 {David Collier and Steven Levitsky},
-  title =	 {Democracy with Adjectives: Conceptual Innovation in
-                  Comparative Research},
-  journal =	 {World Politics},
-  volume =	 {43},
-  year =	 {1977},
-  pages =	 {430--451},
-  month =	 {April},
-  number =	 {3}
-}
-
- at article{Elman05,
-  author =	 {Colin Elman},
-  title =	 {Explanatory Typologies in Qualitative Studies of
-                  International Politics},
-  journal =	 {International Organization},
-  volume =	 {59},
-  year =	 {2005},
-  pages =	 {293--326},
-  month =	 {spring},
-  number =	 {2}
-}
-
- at article{Lustick96,
-  author =	 {Ian S. Lustick},
-  title =	 {History, Historiography, and Political Science:
-                  Multiple Historical Records and the Problem of
-                  Selection Bias},
-  journal =	 {American Political Science Review},
-  volume =	 {90},
-  year =	 {1996},
-  pages =	 {605--618},
-  month =	 {September},
-  number =	 {3}
-}
-
- at Article{Campbell05,
-  author =	 {James E. Campbell},
-  title =	 {Introduction: Assessments of the 2004 Presidential
-                  Vote Forecasts},
-  journal =	 {PS: Political Science \& Politics},
-  year =	 {2005},
-  volume =	 {38},
-  pages =	 {23--24}
-}
-
- at article{Lieberman05,
-  author =	 {Evan S. Lieberman},
-  title =	 {Nested Analysis as a Mixed-Method Strategy for
-                  Comparative Research},
-  journal =	 {American Political Science Review},
-  volume =	 {99},
-  year =	 {2005},
-  pages =	 {435--452},
-  month =	 {August},
-  number =	 {3}
-}
-
- at techreport{Heckman08,
-  author =	 {James J. Heckman},
-  title =	 {Econometric Causality},
-  institution =	 {National Bureau of Economic Research},
-  year =	 2008,
-  address =	 {Cambridge, MA},
-  number =	 13934,
-  note =	 {{http://www.nber.org/papers/w13934}}
-}
-
- at article{Hall06,
-  author =	 {Peter A. Hall},
-  title =	 {Systematic Process Analysis: When and How to Use It},
-  journal =	 {European Management Review},
-  volume =	 {3},
-  year =	 {2006},
-  pages =	 {24--31},
-  month =	 {Spring},
-  number =	 {1}
-}
-
- at article{Converse87,
-  title =	 {Changing Conceptions of Public Opinion in the
-                  Political Process},
-  author =	 {Philip E. Converse},
-  journal =	 {The Public Opinion Quarterly},
-  volume =	 {51},
-  pages =	 {12--24},
-  year =	 {1987}
-}
-
- at article{Mahoney08,
-  author =	 {James Mahoney},
-  title =	 {Toward a Unified Theory of Causality},
-  journal =	 {Comparative Political studies},
-  volume =	 {41},
-  year =	 {2008},
-  pages =	 {412--436},
-  month =	 {April/May},
-  number =	 {4/5}
-}
-
- at article{DasNewVel03,
-  title =	 {Nonparametric Estimation of Sample Selection Models},
-  author =	 {Mitali Das and Whitney K. Newey and Francis Vella},
-  journal =	 {Review of Economic Studies},
-  volume =	 {70},
-  year =	 {2003},
-  pages =	 {33--58}
-}
-
- at article{HerHei04,
-  title =	 {The Distribution of R&D Subsidies and Its Effect on
-                  the Final Outcome of Innovation Policy},
-  author =	 {Liliana Herrera and Joost Heijs},
-  year =	 {04},
-  journal =	 {DRUID Summern conference 2004 on Industrial Dynamic,
-                  Innovation and Development,Elsinore, Denmark, June
-                  14-16},
-  year =	 {2004}
-}
-
- at article{AbaDruHer01,
-  title =	 {Implementing Matching Estimators for Average
-                  Treatment Effects in Stata},
-  author =	 {Alberto Abadie and David Drukker and Jane Leber Herr
-                  and Guido W. Imbens},
-  journal =	 {The Stata Journal},
-  volume =	 {1},
-  year =	 {2001},
-  pages =	 {1--18},
-  number =	 {1}
-}
-
- at unpublished{BryCapLuc02,
-  title =	 {Why so Unhappy? The Effect of Union Membership on
-                  Job Satisfaction},
-  author =	 {Alex Bryson and Lorenzo Cappellari and Claudio
-                  Lucifora},
-  year =	 {2002},
-  note =	 {Policy Studies Institute and Centre for Economic
-                  Performance}
-}
-
- at article{DunAuMil03,
-  title =	 {Derivation and Validation of a Clinical Scoring
-                  System to Predict the Need for an Intra-Aortic
-                  Balloon Pump in Patients Undergoing Adult Cardiac
-                  Surgery},
-  author =	 {Joel Dunning and JKK Au and RWJ Millner and AJ
-                  Levine},
-  journal =	 {Interactive Cardiovascular and Thoracic Surgery},
-  volume =	 {2},
-  year =	 {2003},
-  pages =	 {639--643}
-}
-
-
- at Book{GlaStr99,
-  author =	 {Barney G. Glaser and Anselm L. Strauss},
-  title =	 {The Discovery of Grounded theory: Strategies for
-                  Qualitative Research},
-  publisher =	 {Aldine De Gruyter},
-  year =	 {1999},
-  address =	 {New York}
-}
-
- at Book{Tetlock05,
-  author =	 {Philip E. Tetlock},
-  title =	 {Expert Political Judgment: How Good Is It? How Can
-                  We Know?},
-  publisher =	 {Princeton University Press},
-  year =	 {2005},
-  address =	 {Princeton}
-}
-
- at Book{Hammersley00,
-  author =	 {Martyn Hammersley},
-  title =	 {Taking Sides in Social Research: Essays on
-                  Partisanship and Bias},
-  publisher =	 {Routledge},
-  year =	 {2000},
-  address =	 {London and New York}
-}
-
- at book{Little91,
-  title =	 {{Varieties of Social Explanation: An Introduction to
-                  the Philosophy of Social Science}},
-  author =	 {Little, Daniel},
-  year =	 {1991},
-  publisher =	 {Westview Press}
-}
-
- at Book{Gerring07,
-  author =	 {John Gerring},
-  title = 	 {Case Study Research: Principles and Practices},
-  publisher = 	 {Cambridge University Press},
-  year = 	 2007,
-  address =	 {New York}
-}
-
- at article{Goldthorpe01,
-  title =	 {{Causation, Statistics, and Sociology}},
-  author =	 {Goldthorpe, J.H.},
-  journal =	 {European Sociological Review},
-  volume =	 {17},
-  number =	 {1},
-  pages =	 {1--20},
-  year =	 {2001}
-}
-
- at book{Gill08,
-  title =	 {{Bayesian Methods: A Social and Behavioral Sciences
-                  Approach, 2nd edition}},
-  author =	 {Jeff Gill},
-  year =	 {2008},
-  publisher =	 {Chapman \& Hall/CRC}
-}
-
- at book{Elster89,
-  title =	 {Nuts and Bols for the Social Sciences},
-  author =	 {Jon Elster},
-  year =	 {1989},
-  publisher =	 {Cambridge University Press},
-  address =	 {Cambridge, New York}
-}
-
- at book{Hausman98,
-  author =	 {Daniel M. Hausman},
-  title =	 {Causal Asymmetries},
-  publisher =	 {Cambridge University Press},
-  year =	 {1998},
-  address =	 {Cambridge UK, New York}
-}
-
- at book{Collins98,
-  author =	 {Randall Collins},
-  title =	 {The Sociology of Philosophies: A Global Theory of
-                  Intellecutual Change},
-  publisher =	 {Belknap Press of Harvard University Press},
-  year =	 {1998},
-  address =	 {Cambridge,MA}
-}
-
- at InCollection{Hall09,
-  author = 	 {Peter A. Hall},
-  title = 	 {Path Dependence},
-  booktitle = 	 {The Future of Political Science: 100 Perspectives},
-  pages =	 { },
-  publisher =	 {Routledge},
-  year =	 {2009, forthcoming},
-  editor =	 {Gary King and Kay Scholzman and Norman Nie}
-}
-
- at article{MahGoe06,
-  title =	 {{A Tale of Two Cultures: Contrasting Quantitative
-                  and Qualitative Research}},
-  author =	 {James Mahoney and Gary Goertz},
-  journal =	 {Political Analysis},
-  volume =	 {14},
-  number =	 {3},
-  pages =	 {227--249},
-  year =	 {2006}
-}
-
- at techreport{Duneier08,
-  author = 	 {Mitchell Duneier},
-  title = 	 {How Not to Lie with Ethnography},
-  institution =  {Princeton University},
-  year = 	 {2008},
-  OPTkey = 	 {},
-  OPTvolume = 	 {},
-  OPTnumber = 	 {},
-  OPTpages = 	 {},
-  OPTmonth = 	 {},
-  OPTnote = 	 {},
-  OPTannote = 	 {}
-}
-
- at Article{BraGoe00,
-  author = 	 {Bear Braumoeller and Gary Goertz},
-  title = 	 {The Methodology of Necessary Conditions},
-  journal = 	 {American Journal of Political Science},
-  year = 	 2000,
-  volume =	 44,
-  number =	 4,
-  pages =	 {844--858},
-  month =	 {October}
-}
-
- at techreport{GlyQui08,
-  author =	 {Adam Glynn and Kevin Quinn},
-  title =	 {Non-parametric Mechanisms and Causal Modeling},
-  institution =	 {Harvard},
-  year =	 2008
-}
-
- at InCollection{Goertz03,
-  author =	 {Gary Goertz},
-  title =	 {The Substantive Importance of Necessary Condition
-                  Hypotheses},
-  booktitle =	 {Necessary Conditions: Theory, Methodology, and
-                  Applications},
-  publisher =	 {Rowman \& Littlefield},
-  year =	 2003,
-  editor =	 {Gary Goertz and Harvey Starr},
-  address =	 {Lanham, MD}
-}
-
- at book{RosAllMcc05,
-  author =	 {Peter E. Rossi and Greg M. Allenby and Robert
-                  McCulloch},
-  title =	 {Bayesian Statistics and Marketing},
-  publisher =	 {John Wiley & Sons, Ltd},
-  year =	 2005,
-  address =	 {West Sussex, England}
-}
-
- at Article{Starfield91,
-  author =	 {Starfield, B.},
-  title =	 {Primary care and health. {A} cross-national
-                  comparison},
-  journal =	 {The Journal of the American Medical Association},
-  year =	 {1991},
-  volume =	 {226},
-  number =	 {16},
-  pages =	 {2268-2271},
-}
-
-
- at Article{WarMur82,
-  author =	 {Warner, Kenneth and Murt, Hillary},
-  title =	 {Imact of the antismoking campaign on smoking
-                  prevalence: {A} cohort analysis},
-  journal =	 {Journal of Public Health Policy},
-  year =	 1982,
-  volume =	 3,
-  number =	 4,
-  pages =	 {374-390}
-}
-
- at Article{WilDeeLun00,
-  author =	 {Wilmoth, J.R. and Deegan, L.J. and Lundstr\"{o}m,
-                  H. and Horiuchi, S.},
-  title =	 {Increase of maximum life-span in {S}weden,
-                  1861-1999},
-  journal =	 {Science},
-  year =	 2000,
-  volume =	 289,
-  pages =	 {2366-2368}
-}
-
- at Article{Waldron91,
-  author =	 {Waldron, Ingrid},
-  title =	 {Patterns and causes of gender differences in
-                  smoking},
-  journal =	 {Social Science and Medicine},
-  year =	 1991,
-  volume =	 32,
-  number =	 9,
-  pages =	 {989-1005}
-}
-
- at Article{DolPetBor04,
-  author =	 {Doll, Richard and Peto, Richard and Boreham, Jillian
-                  and Sutherland, Isabelle},
-  title =	 {Mortality in relation to smoking: 50 years'
-                  observations on male {B}ritish doctors},
-  journal =	 {British Medical Journal},
-  year =	 {2004},
-  volume =	 {328},
-  pages =	 {1519-1527}
-}
-
- at Article{Doll99,
-  author =	 {Doll, Richard},
-  title =	 {Tobacco: A Medical History},
-  journal =	 {Journal of Urban Health: {B}ulletin of the {New York
-                  Academy of Medicine} },
-  year =	 1999,
-  volume =	 76,
-  number =	 3,
-  pages =	 {989-1005}
-}
-
- at Article{RouBhoPar98,
-  author =	 {Routh, Hirak Behari and Bhowmik, Kazal Rekha and
-                  Parish, Jennifer and Parish, Lawrence},
-  title =	 {Historical Aspects of Tobacco Use and Smoking},
-  journal =	 {Clinics in Dermatology},
-  year =	 1998,
-  volume =	 16,
-  pages =	 {539-544}
-}
-
- at Article{LeeTul97,
-  author =	 {Lee, Ronald and Tuljapurkar, Shripad},
-  title =	 {Death and taxes: Longer life, consumption, and
-                  Social Security},
-  journal =	 {Demography},
-  year =	 1997,
-  volume =	 34,
-  number =	 1,
-  pages =	 {67-81},
-  month =	 {June}
-}
-
- at TechReport{SSAHist07,
-  author =	 {{Social Security Administration Historian's Office}},
-  title =	 {Social Security A Brief History},
-  institution =	 {Social Security Administration},
-  year =	 2007,
-  number =	 {21-059},
-  month =	 {October 2007}
-}
-
- at Article{Schneider99,
-  author = 	 {Schneider, Edward},
-  title = 	 {Aging in the Third Millenium},
-  journal = 	 {Science},
-  year = 	 1999,
-  volume = 	 283,
-  number = 	 5403,
-  pages = 	 {796-797}
-}
-
- at Article{FrePla07,
-  author =	 {Freedland, Stephen and Platz, Elizabeth},
-  title =	 {Obesity and prostate cancer: making snese out of
-                  apparently conflicting data},
-  journal =	 {Epidemiologic Reviews},
-  year =	 2007,
-  volume =	 29,
-  number =	 1,
-  pages =	 {88-97}
-}
-
- at Article{LitWhiKri07,
-  author =	 {Littman, Alyson and White, Emily and Kristal, Alan},
-  title =	 {Anthropometrics and prostate cancer risk},
-  journal =	 {American Journal of Epidemiology},
-  year =	 2007,
-  volume =	 165,
-  number =	 11,
-  pages =	 {1271-1279}
-}
-
-
- at Article{YanKelHe07,
-  author =	 {Yang, Wenjie and Kelly, Tanika and He, Jiang},
-  title =	 {Genetic Epidemiology of Obesity},
-  journal =	 {Epidemiologic Reviews},
-  year =	 2007,
-  volume =	 29,
-  number =	 1,
-  pages =	 {49-61}
-}
-
- at Article{RisHelKne90,
-  author =	 {Rissanen, Aila and Heli\"{o}vaara, Markku and Knekt,
-                  Paul and Reunanen, Antti and Aromaa, Arpo and
-                  Maatela, Jouni},
-  title =	 {Risk of disability and mortality due to verweight in
-                  a {F}innish population},
-  journal =	 {British Medical Journal},
-  year =	 1990,
-  volume =	 301,
-  number =	 {},
-  pages =	 {835-837}
-}
-
- at Article{HeiEriEll00,
-  author =	 {Heitmann, BL and Erikson, H and Ellsinger, B-M and
-                  Mikkelsen, KL and Larsson, B},
-  title =	 {Mortaltiy associated with body fat, fat-free mass
-                  and body mass index among 60-year-old {S}wedish men
-                  $-$ a 22-year follow-up. {T}he study of mean born in
-                  $1913$},
-  journal =	 {International Journal of Obesity},
-  year =	 2000,
-  volume =	 24,
-  pages =	 {33-37}
-}
-
- at Article{CorMonSom06,
-  author =	 {Romero-Corral, Abel and Montori, Victor and Somers,
-                  Virend and Korinek, Josef and Thomas, Randal and
-                  Allison, Thomas and Mookadam, Farouk and
-                  Lopez-Jimenez, Francisco},
-  title =	 {Association of bodyweight with total mortality and
-                  with cardiovascular events in coronary artery
-                  disease: a systematic review of cohort studies},
-  journal =	 {The Lancet},
-  year =	 2006,
-  volume =	 368,
-  pages =	 {666-678},
-  month =	 {August}
-}
-
- at Article{FleGraWil07,
-  author =	 {Flegal, Katherine and Graubard, Barry and
-                  Williamson, David and Gail, Mitchell},
-  title =	 {Cause-specific excess deaths associated with
-                  underweight, overweight, and obesity},
-  journal =	 {Journal of the American Medical Association},
-  year =	 2007,
-  volume =	 298,
-  number =	 17,
-  pages =	 {2028-2037}
-}
-
- at Article{AdaSchHar06,
-  author =	 {Adams, Kenneth and Schatzkin, Arthur and Harris,
-                  Tamara and Kipnis, Victor and Mouw, Traci and
-                  Ballard-Barbash, Rachel and Hollenbeck, Albert and
-                  Leitzmann, Michael},
-  title =	 {Overweight, obesity, and mortality in a large
-                  prospective cohort of persons $50$ to $71$ years
-                  old},
-  journal =	 {New England Journal of Medicine},
-  year =	 2006,
-  volume =	 355,
-  number =	 8,
-  pages =	 {763-778}
-}
-
- at Article{SukSacBod03,
-  author =	 {Suk, Seung-Han and Sacco, Ralph and Boden-Albala
-                  Bernadette and Cheun, Jian and Pittman, John and
-                  Elkind, Mitchell and Paik, Myunghee},
-  title =	 {Abdominal obesity and the risk of ischemic stroke:
-                  The {N}orthern {M}anhattan {S}troke {S}tudy},
-  journal =	 {Stroke},
-  year =	 2003,
-  volume =	 34,
-  number =	 7,
-  pages =	 {1586-1592},
-  month =	 {July}
-}
-
- at Book{Quetelet1842,
-  author =	 {Qu\'{e}telet, Lambert Adolphe Jacques},
-  title =	 {A treatise on man and the development of his
-                  faculties},
-  publisher =	 {William and Robert Chambers},
-  year =	 1842,
-  address =	 {Edinburgh},
-}
-
- at TechReport{ssa07,
-  author =	 {{The Board of Trustees, Federal Old-Age and
-                  Survivors Insurance and Federal Disability Insurance
-                  Trust Funds}},
-  title =	 {The 2007 annual report of the board of trustees of
-                  the federal old-age and survivors insurance and
-                  federal disability insurance trust funds},
-  institution =	 {Social Security Administration},
-  year =	 2007
-}
-
- at TechReport{ssa04,
-  author =	 {{The Board of Trustees, Federal Old-Age and
-                  Survivors Insurance and Federal Disability Insurance
-                  Trust Funds}},
-  title =	 {The 2004 annual report of the board of trustees of
-                  the federal old-age and survivors insurance and
-                  federal disability insurance trust funds},
-  institution =	 {Social Security Administration},
-  year =	 2004
-}
-
- at TechReport{ssa06,
-  author =	 {{The Board of Trustees, Federal Old-Age and
-                  Survivors Insurance and Federal Disability Insurance
-                  Trust Funds}},
-  title =	 {The 2006 annual report of the board of trustees of
-                  the federal old-age and survivors insurance and
-                  federal disability insurance trust funds},
-  institution =	 {Social Security Administration},
-  year =	 2006
-}
-
- at TechReport{USG:05,
-  author =	 {{United States Government}},
-  title =	 {Budget of the United States Government, Fiscal Year
-                  $2006$},
-  institution =	 {US Government Printing Office},
-  year =	 {2005}
-}
-
- at TechReport{USG:09,
-  author =	 {{United States Government}},
-  title =	 {Budget of the United States Government, Fiscal Year
-                  $2009$},
-  institution =	 {US Government Printing Office},
-  year =	 {2009}
-}
-
- at TechReport{ShoSunBun87,
-  author =	 {Shoven, John and Sundberg, Jeffrey and Bunker, John},
-  title =	 {The social security cost of smoking},
-  institution =	 {National Bureau of Economic Research},
-  year =	 1987,
-  type =	 {Working Paper Series},
-  number =	 2234
-}
-
- at TechReport{Gravelle98,
-  author =	 {Gravelle, Jane},
-  title =	 {The proposed tobacco settlement: who pays for the
-                  health costs of smoking?},
-  institution =	 {Library of Congress, Congressional Research Service},
-  year =	 1998,
-  number =	 {97-1053 E}
-}
-
- at Article{HaeShiMil56,
-  author =	 {Haenszel, W and Shimkin, MB and Miller, HP},
-  title =	 {Tobacco smoking patterns in the {U}nited {S}tates},
-  journal =	 {Public Health Monograph},
-  year =	 1956,
-  volume =	 45,
-  pages =	 {1-105}
-}
-
- at Article{WalLyeBra91,
-  author =	 {Waldron, I and Lye, D and Brandon, A},
-  title =	 {Gender differences in teenage smoking},
-  journal =	 {Women Health},
-  year =	 1991,
-  volume =	 17,
-  pages =	 {63-87}
-}
-
- at Article{Ferrence88,
-  author =	 {Ferrence, R},
-  title =	 {Sex differences in cigarette smoking in {C}anada,
-                  1900-1978: {A} reconstructed cohort study},
-  journal =	 {Canadian Journal of Public Health},
-  year =	 1988,
-  volume =	 79,
-  pages =	 {160-165}
-}
-
- at Article{Elkind85,
-  author =	 {Elkind, A},
-  title =	 {The social definition of women's smoking behavior},
-  journal =	 {Social Science and Medicine},
-  year =	 1985,
-  volume =	 20,
-  pages =	 {1269-1278}
-}
-
- at Article{LynBro01,
-  author =	 {Lynch, Scott and Brown, Scott},
-  title =	 {Reconsidering mortality compression and
-                  deceleration: {A}n alternative model of mortality
-                  rates},
-  journal =	 {Demography},
-  year =	 2001,
-  volume =	 38,
-  number =	 1,
-  pages =	 {79-95}
-}
-
- at TechReport{Holmer08,
-  author =	 {Holmer, Martin},
-  title =	 {SSASIM Guide},
-  institution =	 {Policy Simulation Group},
-  year =	 2008,
-  month =	 {March}
-}
-
- at Article{MeySab00,
-  author =	 {Meyerson, Noah and Sabelhaus, John},
-  title =	 {Uncertainty in Social Security Trust Fund
-                  Projections},
-  journal =	 {National Tax Journal},
-  year =	 2000,
-  volume =	 53,
-  number =	 3,
-  pages =	 {515-529}
-}
-
- at Article{TulLiBoe00,
-  author =	 {Tuljapurkar, Shripad and Li, Nan and Boe, Carl},
-  title =	 {A universal pattern of mortality decline in the $G7$
-                  countries},
-  journal =	 {Nature},
-  year =	 2000,
-  volume =	 405,
-  pages =	 {789-792}
-}
-
- at Article{Boyer47,
-  author =	 {Boyer, Carl},
-  title =	 {Note on an early graph of statistical data {(Huygens
-                  1669)}},
-  journal =	 {Isis},
-  year =	 1947,
-  volume =	 37,
-  number =	 {3$/$4},
-  pages =	 {148-149}
-}
-
- at Book{Vollgraff50,
-  editor =	 {Johan Adriaan Vollgraff},
-  title =	 {Oeuvres compl\`{e}tes de Christiaan
-                  Huygens. Publi\'{e}es par la Soci\'{e}t\'{e}
-                  hollandaise des sciences},
-  publisher =	 {La Haye: M. Nijhoff},
-  year =	 1950
-}
-
- at TechReport{Wilmoth03,
-  author =	 {Wilmoth, John R.},
-  title =	 {Overview and Discussion of the Social Security
-                  Mortality Projections},
-  institution =	 {Technical Panel on Assumptions and Methods, Social Security Advisory Board},
-  year =	 2003,
-}
-
- at TechReport{SSAB07,
-  author =	 {{Social Security Advisory Board Technical Panel}},
-  title =	 {2007 Technical Panel on Assumptions and Methods},
-  institution =	 {Social Security Advisory Board},
-  year =	 2007,
-}
-
- at TechReport{SSAB99,
-  author =	 {{Social Security Advisory Board Technical Panel}},
-  title =	 {1999 Technical Panel on Assumptions and Methods},
-  institution =	 {Social Security Advisory Board},
-  year =	 1999,
-}
-
- at TechReport{SSAB94,
-  author =	 {{Social Security Advisory Board Technical Panel}},
-  title =	 {1994 Technical Panel on Assumptions and Methods},
-  institution =	 {Social Security Advisory Board},
-  year =	 1994,
-}
-
- at TechReport{SSAB91,
-  author =	 {{Social Security Advisory Board Technical Panel}},
-  title =	 {1991 Technical Panel on Assumptions and Methods},
-  institution =	 {Social Security Advisory Board},
-  year =	 1991,
-}
-
- at Article{Alho90,
-  author =	 {Alho, Juha},
-  title =	 {Effects of targets and aggregration on the
-                  propogation of error in mortality forecasts},
-  journal =	 {Mathematical Population Studies},
-  year =	 1990,
-  volume =	 2,
-  pages =	 {209-227}
-}
-
-
- at Article{CoaKis86,
-  author =	 {Coale, Ansley and Kisker, Ellen},
-  title =	 {Mortality crossover: {R}eality or bad data},
-  journal =	 {Population Studies},
-  year =	 1986,
-  volume =	 40,
-  number =	 3,
-  pages =	 {389-401}
-}
-
- at Article{Parascandola04,
-  author =	 {Parascandola, Mark},
-  title =	 {Skepticism, statistical methods, and the cigarette:
-                  {A} historical analyis of a methodological debate},
-  journal =	 {Perspectives in Biology and Medicine},
-  year =	 2004,
-  volume =	 47,
-  number =	 42,
-  pages =	 {244-261}
-}
-
- at TechReport{Ball73,
-  author =	 {Ball, Robert},
-  title =	 {Hearings before the {S}pecial {C}ommittee on
-                  {Aging}},
-  institution =	 {U.S. Senate, Ninety-Third Congress},
-  year =	 1973,
-  number =	 {Part 1},
-  month =	 {January},
-  note =	 {SUDOC:Y4.Ag4:So1/2/pt.1}
-}
-
- at Article{Olshansky05,
-  author =	 {Olshansky, S. Jay and Passaro, Douglas and Hershow,
-                  Ronald and Layden, Jennifer and Carnes, Bruce and
-                  Brody, Jacob and Hayflick, Leonard and Butler,
-                  Robert and Allison, David and Ludwig, David},
-  title =	 {A potential decline in life expectancy in the
-                  {U}united {S}tates in the 21st century},
-  journal =	 {The New England Journal of Medicine},
-  year =	 2005,
-  volume =	 352,
-  number =	 11,
-  pages =	 {1138-1145}
-}
-
- at TechReport{ssa117,
-  author =	 {Cheng, Anthony and Miller, Michael and Morris,
-                  Michael and Schultz, Jason and Skirvin, J. Patrick
-                  and Walder, Danielle},
-  title =	 {A stochastic model of the long-range financial
-                  status of the {OASDI} program},
-  institution =	 {Social Security Administration, Office of the Chief
-                  Actuary},
-  year =	 2004,
-  note =	 {Actuarial Study No. 117}
-}
-
-
- at TechReport{ssa116,
-  author =	 {Bell, Felicitie and Miller, Michael},
-  title =	 {Life tables for the {United States} social security
-                  area 1900-2100},
-  institution =	 {Social Security Administration, Office of the Chief
-                  Actuary},
-  year =	 2002,
-  note =	 {Actuarial Study No. 116}
-}
-
- at TechReport{ssa120,
-  author =	 {Bell, Felicitie and Miller, Michael},
-  title =	 {Life tables for the {United States} social security
-                  area 1900-2100},
-  institution =	 {Social Security Administration, Office of the Chief
-                  Actuary},
-  year =	 2005,
-  note =	 {Actuarial Study No. 120}
-}
-
-
- at Article{Warner78,
-  author =	 {Warner, Kenneth},
-  title =	 {Possible increases in the underreporting of
-                  cigarette consumption},
-  journal =	 {Journal of the American Statitical Association},
-  year =	 1978,
-  volume =	 73,
-  number =	 362,
-  pages =	 {314-318}
-}
-
- at Article{CanFisBak65,
-  author =	 {Cannell, C.F. and Fisher, G. and Bakker, T.},
-  title =	 {Reporting of hospitalization in the health interview
-                  survey},
-  journal =	 {Vital and Health Statistics},
-  year =	 1965,
-  volume =	 2,
-  number =	 6,
-  pages =	 {i-71}
-}
-
- at Article{Halley93,
-  author =	 {Halley, Edmund},
-  title =	 {An estimate of the degrees of mortality of mankind,
-                  drawn from curious tables of the births and funerals
-                  at the city of Breslaw; with an attempt to ascertain
-                  the price of annuities upon lives},
-  journal =	 {Philosophical Transactions of the Royal Society of
-                  London},
-  year =	 1693,
-  volume =	 17,
-  pages =	 {596-610}
-}
-
-
- at Article{Wright86,
-  author =	 {Wright, VB},
-  title =	 {Will quitting smoking help Medicare solve its
-                  financial problems?},
-  journal =	 {Inquiry},
-  year =	 1986,
-  volume =	 23,
-  number =	 1,
-  pages =	 {76-82}
-}
-
- at TechReport{HMD08,
-  author =	 {{University of California, Berkeley (USA)} and {Max
-                  Planck Institute for Demographic Research
-                  (Germany)}},
-  title =	 {Human Mortality Database},
-  year =	 2008,
-  institution =	 {\url{http://www.mortality.org}},
-  note =	 {data downloaded on April 7, 2008}
-}
-
- at Article{SasIch02,
-  author =	 {Sascha O.\ Becker and Andrea Ichino},
-  title =	 {Estimation of average treatment effects based on
-                  propensity scores},
-  journal =	 {The Stata Journal},
-  year =	 2002,
-  volume =	 2,
-  number =	 4,
-  pages =	 {358--377}
-}
-
- at Article{JacAdaMul99,
-  author =	 {Jacobs, David R., Jr and Adachi, Hisashi and Mulder,
-                  Ina and Kromhout, Daan and Menotti, Alessandro and
-                  Nissinen, Aulikki and Blackburn, Henry},
-  title =	 {Cigarette Smoking and Mortality Risk:
-                  Twenty-five-Year Follow-up of the Seven Countries
-                  Study},
-  journal =	 {Archives of Internal Medicine},
-  year =	 1999,
-  volume =	 159,
-  number =	 7,
-  pages =	 {733-740}
-}
-
- at Article{PeeBarWil03,
-  author =	 {Peeters, Anna and Barendregt, Jan and Willekens,
-                  Frans and Mackenbach, Johan and Al Mamun, Abdullah
-                  and Bonneux, Luc},
-  title =	 {Obesity in Adulthood and its Consequences for Life
-                  Expectancy: A Life-Table Analysis},
-  journal =	 {Annals of Internal Medicine},
-  year =	 2003,
-  volume =	 138,
-  number =	 {24-32}
-}
-
- at Article{LynSmi05,
-  author =	 {Lynch, John and Smith, George Davey},
-  title =	 {A Life Course Approach to Chronic Disease
-                  Epidemiology},
-  journal =	 {Annual Review of Public Health},
-  year =	 2005,
-  volume =	 26,
-  number =	 1,
-  pages =	 {1-35},
-}
-
- at Article{Peace85,
-  author =	 {Peace, L.R.},
-  title =	 {A Time Correlation Between Cigarette Smoking and
-                  Lung Cancer},
-  journal =	 {The Statistician},
-  year =	 1985,
-  volume =	 34,
-  number =	 4,
-  pages =	 {371-381}
-}
-
-
- at Article{Sturm02,
-  author =	 {Sturm, Roland},
-  title =	 {The Effects of Obesity, Smoking, and Drinking on
-                  Medical Problems and Cost},
-  journal =	 {Health Affairs},
-  year =	 2002,
-  pages =	 {245-253},
-  month =	 {March/April}
-}
-
- at TechReport{Gutterman08,
-  author =	 {Gutterman, Sam},
-  title =	 {Human Behavior: An Impediment to Future Mortality
-                  Improvement, A Focus on Obesity and Related Matters},
-  institution =	 {Society of Actuaries},
-  year =	 2008,
-  note =	 {Living to 100 and Beyond Symposium}
-}
-
- at Article{BakOlsSor07,
-  author =	 {Baker, Jennifer and Olsen, Lina and
-                  S{\o}rensen,Thorkild},
-  title =	 {Childhood Body-Mass Index and the Risk of Coronary
-                  Heart Disease in Adulthood},
-  journal =	 {New England Journal of Medicine},
-  year =	 2007,
-  volume =	 357,
-  number =	 23,
-  pages =	 {2329-2337}
-}
-
- at Article{LeuSia03,
-  title =	 {{PSMATCH2: Stata module to perform full Mahalanobis
-                  and propensity score matching, common support
-                  graphing, and covariate imbalance testing}},
-  author =	 {Leuven, Edwin and Sianesi, Barbara},
-  journal =	 {Statistical Software Components},
-  year =	 2003
-}
-
- at Article{Kalton68,
-  author = 	 {G. Kalton},
-  title = 	 {Standardization: A Technique to Control for Extraneous Variables},
-  journal = 	 {Applied Statistics},
-  year = 	 1968,
-  volume =	 17,
-  number =	 2,
-  pages =	 {118--136}
-}
-
- at Article{Wei82,
-  title =	 {Interval estimation of location difference with
-                  incomplete data},
-  author =	 {Wei, L. J.},
-  journal =	 {Biometrika},
-  volume =	 {69},
-  number =	 {1},
-  pages =	 {249--251},
-  year =	 {1982}
-}
-
- at article{Moulton04,
-  title =	 {Covariate-based constrained randomization of
-                  group-randomized trials},
-  author =	 {Moulton, L.H.},
-  journal =	 {Clinical Trials},
-  volume =	 {1},
-  number =	 {3},
-  pages =	 {297},
-  year =	 {2004}
-}
-
- at Unpublished{Wand07,
-  author =	 {Jonathan Wand},
-  title =	 {Credible Comparisons Using Interpersonally
-                  Incomparable Data: Ranking self-evaluations relative
-                  to anchoring vignettes or other common survey
-                  questions},
-  year =	 {2007},
-  note =	 {http://wand.stanford.edu},
-}
-
-
- at Unpublished{Buckley08,
-  author =	 {Jack Buckley},
-  title =	 {Survey Context Effects in Anchoring Vignettes},
-  year =	 {2008},
-  note =	 {http://polmeth.wustl.edu/workingpapers.php},
-}
-
- at Book{GrzymalaBusse07,
-  author =	 {Anna Grzymala-Busse},
-  title =	 {Rebuilding Levithan: Party Competition and State
-                  Exploitation in Post-Communist Democracies},
-  publisher =	 {Cambridge University Press},
-  address =	 {New York},
-  year =	 {2007}
-}
-
- at Unpublished{SoeDelHar07,
-  title =	 {{Validating the Use of Vignettes for Subjective
-                  Threshold Scales}},
-  author =	 {Arthur Van Soest and Liam Delaney and Colm P. Harmon
-                  and Arie Kapteyn and James P. Smith},
-  year =	 {2007},
-  note =	 {UCD Geary Institute Working Paper}
-}
-
- at article{JavRip07,
-  title =	 {{An" Unfolding" Latent Variable Model for Likert
-                  Attitude Data: Drawing Inferences Adjusted for
-                  Response Style}},
-  author =	 {Kristin N. Javaras and Brian D. Ripley},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 {102},
-  number =	 {478},
-  pages =	 {454--463},
-  year =	 {2007},
-  publisher =	 {American Statistical Association}
-}
-
- at article{KapSmiSoe07,
-  title =	 {{Vignettes and Self-Reports of Work Disability in
-                  the United States and the Netherlands}},
-  author =	 {Arie Kapteyn and James P. Smith and Arthur Soest},
-  journal =	 {American Economic Review},
-  volume =	 {97},
-  number =	 {1},
-  pages =	 {461--473},
-  year =	 {2007},
-  publisher =	 {American Economic Association Publications}
-}
-
- at article{Bowling05,
-  title =	 {{Just one question: If one question works, why ask
-                  several?}},
-  author =	 {Ann Bowling},
-  journal =	 {Journal of Epidemiology and Community Health},
-  volume =	 {59},
-  number =	 {5},
-  pages =	 {342},
-  year =	 {2005}
-}
-
- at article{DamVasSzw05,
-  title =	 {{Perception of health state and the use of vignettes
-                  to calibrate for socioeconomic status: results of
-                  the World Health Survey in Brazil, 2003}},
-  author =	 {Damacena, G.N. and Vasconcellos, M.T.L. and
-                  Szwarcwald, C.L.},
-  journal =	 {Cadernos de Sa{\'u}de P{\'u}blica},
-  volume =	 {21},
-  pages =	 {65--77},
-  year =	 {2005},
-  publisher =	 {SciELO Brasil}
-}
-
-
- at article{HseTan07,
-  title =	 {{Sun and Water: On a Modulus-Based Measurement of
-                  Happiness}},
-  author =	 {Christopher K. Hsee and Judy Ningyu Tang},
-  journal =	 {Emotion},
-  volume =	 {7},
-  pages =	 {213--218},
-  year =	 {2007}
-}
-
- at article{SalTanMur04,
-  title =	 {{Comparability of self rated health: cross sectional
-                  multi-country survey using anchoring vignettes}},
-  author =	 {Joshua A. Salomon and Ajay Tandon and Christopher
-                  J.L. Murray},
-  journal =	 {British Medical Journal},
-  volume =	 {328},
-  number =	 {7434},
-  pages =	 {258},
-  year =	 {2004},
-  publisher =	 {Br Med Assoc}
-}
-
- at unpublished{AbaDiaHai09,
-  title =	 {Synthetic Control Methods for Comparative Case
-                  Studies: Estimating the Effect of California's
-                  Tobacco Control Program},
-  author =	 {Alberto Abadie and Alexis Diamond and Jens
-                  Hainmueller},
-  journal =	 {Journal of the American Statistical Association},
-  year =	 {2009, forthcoming}
-}
-
- at Article{AbaDiaHai09b,
-  author =	 {Alberto Abadie and Alexis Diamond and Jens
-                  Hainmueller},
-  title = 	 {Synth: An R Package for Synthetic Control Methods in
-                  Comparative Case Studies},
-  journal = 	 {Journal of Statistical Software},
-  year = 	 {2009, forthcoming}
-}
-
- at article{LozSolGak07,
-  title =	 {{Benchmarking of performance of Mexican states with
-                  effective coverage}},
-  author =	 {Rafael Lozano and Patricia Soliz and Emmanuela
-                  Gakidou and Jesse Abbott-Klafter and Dennis
-                  M. Feehan and Cecilia M. Vidal and Juan Pablo Ortiz
-                  and Christopher J. L. Murray},
-  journal =	 {Salud P{\'u}blica de M{\'e}xico},
-  volume =	 {49},
-  pages =	 {53--69},
-  year =	 {2007},
-  publisher =	 {SciELO Public Health}
-}
-
-
- at article{MarPol95,
-  title =	 {{Diagnostics for Redesigning Survey Questionnaires:
-                  Measuring Work in the Current Population Survey}},
-  author =	 {Elizabeth Martin and Anne E. Polivka},
-  journal =	 {Public Opinion Quarterly},
-  volume =	 {59},
-  number =	 {4},
-  pages =	 {547--567},
-  year =	 {1995},
-  publisher =	 {AAPOR}
-}
-
- at article{Tourangeau04,
-  title =	 {{Survey Research and Societal Change}},
-  author =	 {Roger Tourangeau},
-  journal =	 {Annual Review of Psychology},
-  volume =	 {55},
-  number =	 {1},
-  pages =	 {775--801},
-  year =	 {2004},
-  publisher =	 {Annual Reviews}
-}
-
- at conference{GerWelKel96,
-  title =	 {{Who lives here? The use of vignettes in household
-                  roster research}},
-  year =	 {1996},
-  author =	 {Eleanor R. Gerber and Tracy R. Wellens and Catherine
-                  Keeley},
-  booktitle =	 {Proceedings of the Section on Survey Research
-                  Methods, American Statistical Association},
-  pages =	 {962--967}
-}
-
-
- at InCollection{Tourangeau91,
-  author =	 {Roger Tourangeau},
-  title =	 {Context Effects on Respones to Attitude Questions:
-                  Attitudes as Memory Structures},
-  booktitle =	 {Contextual Effects in Social and Psychological
-                  Research},
-  pages =	 {35-48},
-  publisher =	 {Springer-Verlag},
-  year =	 {1991},
-  editor =	 {Norbert Schwarz and Seymour Sudman},
-  address =	 {New York}
-}
-
- at InCollection{Smith91,
-  author =	 {Tom Smith},
-  title =	 {Thoughts on the Nature of Context Effects},
-  booktitle =	 {Contextual Effects in Social and Psychological
-                  Research},
-  pages =	 {163-186},
-  publisher =	 {Springer-Verlag},
-  year =	 {1991},
-  editor =	 {Norbert Schwarz and Seymour Sudman},
-  address =	 {New York}
-}
-
-
- at article{NieCraMat91,
-  title =	 {{Measuring Internal Political Efficacy in the 1988
-                  National Election Study}},
-  author =	 {Richard G. Niemi and Stephen C. Craig and Franco
-                  Mattei},
-  journal =	 {American Political Science Review},
-  volume =	 {85},
-  number =	 {4},
-  pages =	 {1407--1413},
-  year =	 {1991},
-  publisher =	 {JSTOR}
-}
-
- at article{CraNieSil90,
-  title =	 {{Political efficacy and trust: A report on the NES
-                  pilot study items}},
-  author =	 {Stephen C. Craig and Richard G. Niemi and Glenn
-                  E. Silver},
-  journal =	 {Political Behavior},
-  volume =	 {12},
-  number =	 {3},
-  pages =	 {289--314},
-  year =	 {1990},
-  publisher =	 {Springer}
-}
-
- at article{SteKorCar92,
-  title =	 {{Arenas and Attitudes: A Note on Political Efficacy
-                  in a Federal System}},
-  author =	 {Marianne C. Stewart and Allan Kornberg and Harold
-                  D. Clarke and Alan Acock},
-  journal =	 {Journal of Politics},
-  volume =	 {54},
-  number =	 {1},
-  pages =	 {179--196},
-  year =	 {1992},
-  publisher =	 {JSTOR}
-}
-
- at book{Oliver01,
-  title =	 {{Democracy in Suburbia}},
-  author =	 {J. Eric Oliver},
-  year =	 {2001},
-  publisher =	 {Princeton University Press},
-  address =	 {Princeton, NJ}
-}
-
- at InCollection{Strack91,
-  author =	 {Fritz Strack},
-  title =	 {'Order Effects' in Survey Research: Activation and
-                  Information Functions of Preceding Questions},
-  booktitle =	 {Contextual Effects in Social and Psychological
-                  Research},
-  pages =	 {23-34},
-  publisher =	 {Springer-Verlag},
-  year =	 {1991},
-  editor =	 {Norbert Schwarz and Seymour Sudman},
-  address =	 {New York},
-}
-
- at book{Weisberg96,
-  title =	 {{An introduction to survey research, polling, and
-                  data analysis}},
-  author =	 {Herbert F. Weisberg and Jon A. Krosnick and Bruce
-                  D. Bowen},
-  year =	 {1996},
-  publisher =	 {Sage Publications},
-  address =	 {Thousand Oaks, CA}
-}
-
- at book{Fowler95,
-  title =	 {{Improving Survey Questions: Design and Evaluation}},
-  author =	 {Floyd J. Fowler},
-  year =	 {1995},
-  publisher =	 {Sage Publications},
-  address =	 {Thousand Oaks, CA}
-}
-
- at InCollection{SchHipNoe91,
-  author =	 {Norbert Schwarz and Hans-J. Hippler and Elisabeth
-                  Noelle-Neumann},
-  title =	 {A Cognitive Model of Response-Order Effects},
-  booktitle =	 {Contextual Effects in Social and Psychological
-                  Research},
-  pages =	 {187-202},
-  publisher =	 {Springer-Verlag},
-  year =	 {1991},
-  editor =	 {Norbert Schwarz and Seymour Sudman},
-  address =	 {New York},
-}
-
-
- at article{SchStrMai91,
-  title =	 {{Assimilation and Contrast Effects in Part-Whole
-                  Question Sequences: A Conversational Logic
-                  Analysis}},
-  author =	 {Norbert Schwarz and Fritz Strack and Hans-Peter Mai},
-  journal =	 {Public Opinion Quarterly},
-  volume =	 {55},
-  number =	 {1},
-  pages =	 {3--23},
-  year =	 {1991},
-  publisher =	 {AAPOR}
-}
-
- at article{WilKe95,
-  title =	 {{Part-Whole Question Order Effects: Views of
-                  Rurality}},
-  author =	 {Fern K. Willits and Bin Ke},
-  journal =	 {Public Opinion Quarterly},
-  volume =	 {59},
-  number =	 {3},
-  pages =	 {392--403},
-  year =	 {1995},
-  publisher =	 {AAPOR}
-}
-
- at article{MccObr88,
-  title =	 {{Question-Order Effects on the Determinants of
-                  Subjective Well-Being}},
-  author =	 {McKee J. McClendon and David J. O'Brien},
-  journal =	 {Public Opinion Quarterly},
-  volume =	 {52},
-  number =	 {3},
-  pages =	 {351--364},
-  year =	 {1988},
-  publisher =	 {AAPOR}
-}
-
- at article{Finkel87,
-  title =	 {{The Effects of Participation on Political Efficacy
-                  and Political Support: Evidence from a West German
-                  Panel}},
-  author =	 {Steven E. Finkel},
-  journal =	 {Journal of Politics},
-  volume =	 {49},
-  number =	 {2},
-  pages =	 {441--464},
-  year =	 {1987}
-}
-
- at book{SchPre96,
-  title =	 {{Questions and Answers in Attitude Surveys:
-                  Experiments on Question Form, Wording, and Context}},
-  author =	 {Howard Schuman and Stanley Presser},
-  year =	 {1996},
-  publisher =	 {Sage},
-  address =	 {Thousand Oaks, CA}
-}
-
-
- at article{Krosnick99,
-  title =	 {{Survey Research}},
-  author =	 {Jon A. Krosnick},
-  journal =	 {Annual Review of Psychology},
-  volume =	 {50},
-  number =	 {1},
-  pages =	 {537--567},
-  year =	 {1999},
-  publisher =	 {Annual Reviews}
-}
-
- at book{SudBraSch96,
-  title =	 {{Thinking about Answers: The Application of
-                  Cognitive Processes to Survey Methodology}},
-  author =	 {Seymour Sudman and Norman M. Bradburn and Norbert
-                  Schwarz},
-  year =	 {1996},
-  publisher =	 {Jossey-Bass Publishers},
-  address =	 {San Francisco, CA}
-}
-
- at article{KriWilMos97,
-  title =	 {{Measuring Social Class U.S. Public Health Research:
-                  Concepts, Methodologies, and Guidelines}},
-  author =	 {N. Krieger and D.R. Williams and M.E. Moss},
-  journal =	 {Annual Reviews in Public Health},
-  volume =	 {18},
-  number =	 {1},
-  pages =	 {341--378},
-  year =	 {1997},
-  publisher =	 {Annual Reviews}
-}
-
- at book{Payne51,
-  title =	 {{The Art ofAsking Questions}},
-  author =	 {Stanley L. Payne},
-  publisher =	 {Princeton University Press},
-  address =	 {Princeton, NJ},
-  year =	 {1951}
-}
-
- at InCollection{Bradburn83,
-  author =	 {Norman M. Bradburn},
-  title =	 {Response Effects},
-  booktitle =	 {Handbook of Survey Research},
-  pages =	 {},
-  publisher =	 {Academic Press},
-  year =	 {1983},
-  address =	 {New York, NY},
-  editor =	 {Peter H. Rossi and James D. Wright and Andy
-                  B. Anderson},
-}
-
-
- at Article{Robins08,
-  author =	 {James M. Robins},
-  title =	 {Causal models for estimating the effects of weight
-                  gain on mortality},
-  journal =	 {International Journal of Obesity},
-  year =	 2008,
-  volume =	 32,
-  pages =	 {s15--s41}
-}
-
-
- at Article{OepVau02,
-  author =	 {Oeppen, Jim and Vaupel, James},
-  title =	 {Broken Limits to Life Expectancy},
-  journal =	 {Science},
-  year =	 2002,
-  volume =	 296,
-  pages =	 {1029-1031},
-  month =	 {May}
-}
-
- at Article{HorWil98,
-  author =	 {Horiuchi, Shiro and Wilmoth, John},
-  title =	 {Deceleration in the Age Pattern of Mortality at
-                  Older Ages},
-  journal =	 {Demography},
-  year =	 1998,
-  volume =	 35,
-  number =	 4,
-  pages =	 {391-412},
-  month =	 {November}
-}
-
- at Article{PreWan06,
-  author =	 {Preston, Samuel and Wang, Haidong},
-  title =	 {Sex Mortality Differences in the United States: The
-                  Role of Cohort Smoking Patterns},
-  journal =	 {Demography},
-  year =	 2006,
-  volume =	 43,
-  number =	 4,
-  pages =	 {631-646},
-  month =	 {November}
-}
-
- at article{Finkel85,
-  title =	 {{Reciprocal Effects of Participation and Political
-                  Efficacy: A Panel Analysis}},
-  author =	 {Steven E. Finkel},
-  journal =	 {American Journal of Political Science},
-  volume =	 {29},
-  number =	 {4},
-  pages =	 {891--913},
-  year =	 {1985}
-}
-
- at article{DipGla99,
-  title =	 {{Incentives and Social Capital: Are Homeowners
-                  Better Citizens?}},
-  author =	 {Denise DiPasquale and Edward L. Glaeser},
-  journal =	 {Journal of Urban Economics},
-  volume =	 {45},
-  number =	 {2},
-  pages =	 {354--384},
-  year =	 {1999},
-  publisher =	 {Elsevier}
-}
-
- at book{ColRai78,
-  title =	 {{Social Standing in America: New Dimensions of
-                  Class}},
-  author =	 {Richard P. Coleman and Lee Rainwater},
-  publisher =	 {Basic Books},
-  address =	 {New York, NY},
-  year =	 {1978}
-}
-
- at article{GruSor98,
-  title =	 {{Can Class Analysis Be Salvaged?}},
-  author =	 {David Grusky and Jesper B. Sorensen},
-  journal =	 {American Journal of Sociology},
-  volume =	 {103},
-  number =	 {5},
-  pages =	 {1187--1234},
-  year =	 {1998},
-  publisher =	 {UChicago Press}
-}
-
-
- at article{Sorensen00,
-  title =	 {{Toward a Sounder Basis for Class Analysis}},
-  author =	 {Aage B. Sorensen},
-  journal =	 {American Journal of Sociology},
-  volume =	 {105},
-  number =	 {6},
-  pages =	 {1523--1558},
-  year =	 {2000},
-  publisher =	 {University of Chicago Press}
-}
-
-
- at book{JacJac91,
-  title =	 {{Class Awareness in the United States}},
-  author =	 {Mary R. Jackman and Robert W. Jackman},
-  publisher =	 {University of California Press},
-  year =	 {1991},
-  address =	 {Berkeley, CA}
-}
-
-
- at article{Fischer03,
-  title =	 {{The Relative Importance of Income and Race in
-                  Determining Residential Outcomes in US Urban Areas,
-                  1970-2000}},
-  author =	 {Mary J. Fischer},
-  journal =	 {Urban Affairs Review},
-  volume =	 {38},
-  number =	 {5},
-  pages =	 {669-696},
-  year =	 {2003}
-}
-
- at article{SchHipDeu85,
-  title =	 {{Response Scales: Effects of Category Range on
-                  Reported Behavior and Comparative Judgments}},
-  author =	 {Norbert Schwarz and Hans J. Hippler and Bridget
-                  Deutsch and Fritz Strack},
-  journal =	 {Public Opinion Quarterly},
-  volume =	 {49},
-  number =	 {3},
-  pages =	 {388--395},
-  year =	 {1985},
-  publisher =	 {The Trustees of Columbia University}
-}
-
- at article{JavPopLal08,
-  title =	 {{Co-occurrence of Binge Eating Disorder with
-                  Psychiatric and Medical Disorders.}},
-  author =	 {Kristin Javaras and Harrison Pope and Justine
-                  Lalonde and Jacqueline Roberts and Yael Nillni and
-                  Nan Laird and Cynthia Bulik and Scott Crow and Susan
-                  McElroy and B. Timothy Walsh and others},
-  journal =	 {The Journal of Clinical Psychiatry},
-  volume =	 {69},
-  number =	 {2},
-  pages =	 {266-273},
-  year =	 {2008},
-  publisher =	 {J Clin Psychiatry}
-}
-
- at article{SinAdlMar03,
-  title =	 {{Subjective social status: its determinants and its
-                  association with measures of ill-health in the
-                  Whitehall II study}},
-  author =	 {Archana Singh-Manoux and Nancy E. Adler and Michael
-                  G. Marmot},
-  journal =	 {Social Science and Medicine},
-  volume =	 {56},
-  number =	 {6},
-  pages =	 {1321--1333},
-  year =	 {2003},
-  publisher =	 {Elsevier}
-}
-
-
- at Article{RogHumKru05,
-  author = 	 {Rogers, Richard and Hummer, Robert and Krueger, Patrick and Pampel, Fred},
-  title = 	 {Mortality Attributable to Cigarette Smoking in the United States},
-  journal = 	 {Population and Development Review},
-  year = 	 2005,
-  volume =	 31,
-  number = 	 2,
-  pages = 	 {259-292},
-  month = 	 {June}
-}
-
- at Article{MehCha08,
-  author = 	 {Mehta, Neil and Chang, Virginia},
-  title = 	 {Mortality Attributable to Obesity Among Middle-Adults in the United States},
-  journal = 	 {Demography},
-  year = 	 {Forthcoming}
-}
-
- at Article{StuRinAnd04,
-  author = 	 {Sturm, Roland and Ringel, Jeanne and Andreyeva, Tatiana},
-  title = 	 {Increasing Obesity Rates and Disability Trends},
-  journal = 	 {Health Affairs},
-  year = 	 2004,
-  volume = 	 23,
-  number = 	 2,
-  pages = 	 {199-205}
-}
-
- at Book{SloSmiTay03,
-  author = 	 {Sloan, Frank and Smith, Kerry and Taylor, Donald},
-    title = 	 {The Smoking Puzzle: Information, Risk, Perception, and Choice},
-  publisher = 	 {Harvard University Press},
-  year = 	 2003,
-  address = 	 {Cambridge, Mass.}
-}
-
- at Article{RyaZweOra92,
-  author = 	 {Ryan, James and Zwerling, Craig and Orav, Endel John},
-  title = 	 {Occupational Risks Associated with Cigarette Smoking{:} A Prospective Study},
-  journal = 	 {American Journal of Public Health},
-  year = 	 1992,
-  volume = 	 82,
-  number = 	 1,
-  pages = 	 {29-32},
-}
-
-
- at Article{LevGusVel97,
-  author = 	 {Levine, Phillip and Gustafson, Tara and Velenchik, Ann},
-  title = 	 {More Bad News for Smokers? The Effects of Cigarette Smoking on Wages},
-  journal = 	 {Industrial and Labor Relations Review},
-  year = 	 1997,
-  volume = 	 50,
-  number = 	 3,
-  pages = 	 {493-509}
-}
-
- at Book{SloOstPic04,
-  author = 	 {Sloan, Frank and Ostermann, Jan and Picone, Gabriel and Conover, Christopher and Taylor, Donald},
-    title = 	 {The Price of Smoking},
-  publisher = 	 {The MIT Press},
-  year = 	 2004,
-  address = 	 {Cambridge, Mass.}
-}
-
- at Article{DolPetWhe94,
-  author = 	 {Doll, Richard and Peto, Richard and Wheatley, Keith and Gray, Richard and Sutherland, Isabelle},
-  title = 	 {Mortality in Relation to Smoking: 40 Years' Observations on Male British Doctors},
-  journal = 	 {British Medical Journal},
-  year = 	 1994,
-  volume = 	 309,
-  number = 	 6959,
-  pages = 	 {901-911}
-}
-
- at book{Mayhew91,
-  title={{Divided We Govern: Party Control, Lawmaking and Investigations}},
-  author={David R. Mayhew},
-  year={1991},
-  publisher={New Haven CT, Yale University Press}
-}
-
- at article{Sebastiani02, title={{Machine learning in automated text
-categorization}}, author={Fabrizio Sebastiani}, journal={ACM Computing
-Surveys (CSUR)}, volume={34}, number={1}, pages={1--47}, year={2002},
-publisher={ACM New York, NY, USA} }
-
- at article{PanLee08,
-  title={{Opinion Mining and Sentiment Analysis}},
-  author={Bo Pang and Lillian Lee},
-  journal={Foundations and Trends in Information Retrieval},
-  volume={2},
-  number={1},
-  pages={1--135},
-  year={2008}
-}
-
-
- at Book{Rudalevige02,
-author = {Andrew Rudalevige},
-title = {Managing the President's Program},
-publisher = {Princeton University Press},
-year = {2002},
-address = {Princeton, NJ}
-}
-
- at Book{Kellstedt03,
-author = {Paul M. Kellstedt},
-title = {The Mass Media and the Dynamics of American Racial Attitudes},
-publisher = {Cambridge University Press},
-year = {2003},
-address = {New York, NY}
-}
-
- at Book{Gilens99,
-author = {Martin Gilens},
-title = {Why Americans Hate Welfare},
-publisher = {University of Chicago Press},
-year = {1999},
-address = {Chicago, IL}
-}
-
- at unpublished{NovRau08,
-  title =	 {The Intergenerational Transfer of Public Pension
-                  Promises},
-  author =	 {Robert Novy-Marx and Joshua D.Rauh},
-  journal =	 {Working Paper},
-  year =	 {2008},
-  note =	 {http://www.nber.org/papers/w14343}
-}
-
- at Article{Helmond08,
-  author =	 {Anne Helmond},
-  title =	 {How Many Blogs Are There? Is Someone Still
-                  Counting?},
-  journal =	 {The Blog Herald},
-  year =	 2008,
-  number =	 {2/11},
-  note =	
-                  {http://www.blogherald.com/2008/02/11/how-many-blogs-are-there-is-someone-still-counting/}
-}
-
- at book{Mendelberg01,
-  title={{The Race Card: Campaign Strategy, Implicit Messages, and the Norm of Equality}},
-  author={Tali Mendelberg},
-  year={2001},
-  publisher={Princeton University Press},
-  address={Princeton, NJ}
-}
-
- at book{Gerring98,
-  title={{Party Ideologies in America, 1828-1996}},
-  author={John Gerring},
-  year={1998},
-  publisher={Cambridge University Press},
-  address={New York}
-}
-
- at book{Thompson02,
-  title={{Sampling}},
-  author={Steven K. Thompson},
-  year={2002},
-  publisher={John Wiley and Sons},
-  address={New York}
-}
-
- at book{HilShi08,
-  title={{The Persuadable Voter: Wedge Issues in Presidential Campaigns}},
-  author={Sunshine Hillygus and Todd G. Shields},
-  year={2008},
-  publisher={Princeton University Press},
-  address={Princeton, NJ}
-}
-
- at book{BraSudWan04,
-  title =	 {{Asking Questions: The Definitive Guide to
-                  Questionnaire Design}},
-  author =	 {Norman M. Bradburn and Seymour Sudman and Brian
-                  Wansink},
-  year =	 {2004},
-  publisher =	 {Jossey-Bass},
-  address =	 {San Francisco}
-}
-
- at book{ConPre86,
-  title =	 {{Survey Questions: Handcrafting the Standardized
-                  Questionnaire}},
-  author =	 {Jean M. Converse and Stanley Presser},
-  year =	 {1986},
-  publisher =	 {Sage Publications},
-  address =	 {Thousand Oaks, CA}
-}
-
- at book{GroCouLep04,
-  title =	 {{Survey Methodology}},
-  author =	 {Robert M. Groves and Mick P. Couper and James
-                  M. Lepkowski and Eleanor Singer and Roger
-                  Tourangeau},
-  year =	 {2004},
-  publisher =	 {Wiley},
-  address =	 {Hoboken, NJ}
-}
-
- at article{KriJoh08,
-  title =	 {{New evidence on cross-country differences in job
-                  satisfaction using anchoring vignettes}},
-  author =	 {Nicolai Kristensen and Edvard Johansson},
-  journal =	 {Labour Economics},
-  volume =	 {15},
-  number =	 {1},
-  pages =	 {96--117},
-  year =	 {2008}
-}
-
- at Article{GupKriPoz08,
-  author =	 {Nabanita Datta Gupta and Nicolai Kristensen and
-                  Dario Pozzoli},
-  title =	 {External Validation of the Use of Vignettes in
-                  Cross-Country Health Studies},
-  journal =	 { },
-  year =	 2008,
-  note =	 {Danish National Centre for Social Research}
-}
-
- at article{ColRosQui00,
-  title =	 {{Prospective validation of a standardized
-                  questionnaire for estimating childhood mortality and
-                  morbidity due to pneumonia and diarrhoea}},
-  author =	 {Coldham, C. and Ross, D. and Quigley, M. and Segura,
-                  Z. and Chandramohan, D.},
-  journal =	 {Tropical Medicine \& International Health},
-  volume =	 {5},
-  number =	 {2},
-  pages =	 {134--144},
-  year =	 {2000}
-}
-
- at Article{MorDaw06,
-  author =	 {Morera, Osvaldo and Dawes, Robyn},
-  title =	 {Clinical and Statistical Prediction After 50 Years:
-                  A Dedication to Paul Meehl},
-  journal =	 {Journal of Behavioral Decision Making},
-  year =	 2006,
-  volume =	 19,
-  pages =	 {409-412}
-}
-
- at article{OlsHayCar02,
-  author =	 {Olshansky, S. Jay and Hayflick, Leonard and Carnes,
-                  Bruce A.},
-  title =	 {{Position Statement on Human Aging}},
-  journal =	 {Journal of Gerontology Series A: Biological Sciences and Medical Sciences},
-  volume =	 {57},
-  number =	 {8},
-  pages =	 {B292-297},
-  year =	 {2002}
-}
-
- at article{Fries80,
-  author =	 {Fries, JF},
-  title =	 {{Aging, natural death, and the compression of
-                  morbidity}},
-  journal =	 {New England Journal of Medicine},
-  volume =	 {303},
-  number =	 {3},
-  pages =	 {130-135},
-  year =	 {1980}
-}
-
- at article{WarCarHaw08,
-  author =	 {Wardle, Jane and Carnell, Susan and Haworth, Claire
-                  MA and Plomin, Robert},
-  title =	 {{Evidence for a strong genetic influence on
-                  childhood adiposity despite the force of the
-                  obesogenic environment}},
-  journal =	 {American Journal of Clinical Nutrition},
-  volume =	 {87},
-  number =	 {2},
-  pages =	 {398-404},
-  year =	 {2008}
-}
-
- at Article{MokForBow01,
-  author =	 {Mokdad, Ali and Ford, Earl and Bowman, Barbara and
-                  Dietz, William and Vinicor, Frank and Bales,
-                  Virginia and Marks, James},
-  title =	 {Prevalence of Obesity, Diabetes, and Obesity-Related
-                  Health Risk Factors, 2001},
-  journal =	 {Journal of the American Medical Association},
-  year =	 2003,
-  volume =	 289,
-  pages =	 {76-79}
-}
-
- at article{FonRedWan03,
-  author =	 {Fontaine, Kevin R. and Redden, David T. and Wang,
-                  Chenxi and Westfall, Andrew O. and Allison, David
-                  B.},
-  title =	 {{Years of Life Lost Due to Obesity}},
-  journal =	 {Journal of the American Medical Association},
-  year =	 2003,
-  volume =	 {289},
-  number =	 {2},
-  pages =	 {187-193}
-}
-
- at article{AllWanRed03,
-  author =	 {Allison, David B. and Wang, Chenxi and Redden, David
-                  T. and and Westfall, Andrew O. and Fontaine, Kevin
-                  R.},
-  title =	 {{Obesity and Years of Life Lost$-$Reply}},
-  journal =	 {Journal of the American Medical Association},
-  year =	 2003,
-  volume =	 {289},
-  number =	 {14},
-  pages =	 {1777-1778}
-}
-
- at article{WesAraOls04,
-  author =	 {Wessel, Timothy R. and Arant, Christopher B. and
-                  Olson, Marian B. and Johnson, B. Delia and Reis,
-                  Steven E. and Sharaf, Barry L. and Shaw, Leslee
-                  J. and Handberg, Eileen and Sopko, George and
-                  Kelsey, Sheryl F. and Pepine, Carl J. and Bairey
-                  Merz, C. Noel},
-  title =	 {{Relationship of Physical Fitness vs Body Mass Index
-                  With Coronary Artery Disease and Cardiovascular
-                  Events in Women}},
-  journal =	 {Journal of the American Medical Association},
-  volume =	 {292},
-  number =	 {10},
-  pages =	 {1179-1187},
-  year =	 {2004}
-}
-
- at article{BlaChu04,
-  author =	 {Blair, Steven N. and Church, Tim S.},
-  title =	 {{The Fitness, Obesity, and Health Equation: Is
-                  Physical Activity the Common Denominator?}},
-  journal =	 {Journal of the American Medical Association},
-  volume =	 {292},
-  number =	 {10},
-  pages =	 {1232-1234},
-  year =	 {2004}
-}
-
- at Article{FreSigRaj06,
-  author =	 {Freedman, Michal and Sigurdson, Alice and Rajaraman,
-                  Preetha and Doody, Michele and Linet, Martha and
-                  Ron, Elaine},
-  title =	 {The Mortality Risk of Smoking and Obesity Combined},
-  journal =	 {American Journal of Preventive Medicine},
-  year =	 2006,
-  volume =	 31,
-  number =	 5,
-  pages =	 {355-362}
-}
-
- at Article{GreCheCad05,
-  author =	 {Gregg, Edward and Cheng, Yiling and Cadwell, Betsy
-                  and Imperatore, Giuseppina and Williams, Desmond and
-                  Flegal, Katherine and Narayan, Venkat and
-                  Williamson, David},
-  title =	 {Secular Trends in Cardiovascular Disease Risk
-                  Factors According to Body Mass Index in US Adults},
-  journal =	 {Journal of the American Medical Association},
-  year =	 2005,
-  volume =	 293,
-  number =	 15,
-  pages =	 {1868-1874}
-}
-
- at Article{MurLopFee07,
-  author =	 {Christopher J.L.\ Murray and Alan D. Lopez and
-                  Dennis M. Feean and Shanon T. Peter and Gonghuan
-                  Yang},
-  title =	 {Validation of the Symptom Pattern Method for
-                  Analyzing Verbal Autopsy Data},
-  journal =	 {PLOS Medicine},
-  year =	 2007,
-  volume =	 4,
-  number =	 11,
-  pages =	 {1739--1753},
-  month =	 {November}
-}
-
- at Article{CurDurEil04,
-  author = 	 {Currie, Iain and Durban, Maria and Eilers, Paul},
-  title = 	 {Smoothing and Forecasting Mortality Rates},
-  journal = 	 {Statistical Modelling},
-  year = 	 2004,
-  volume =	 4,
-  pages =	 {279-298}
-}
-
- at Article{KirCur09,
-  author = 	 {Kirkby, James and Currie, Iain },
-  title = 	 {Smooth Models of Mortality with Period Shocks},
-  journal = 	 {Statistical Modelling},
-  year = 	 {Forthcoming}
-}
-
- at Article{INDEPTH03,
-  author =	 {{INDEPTH Network}},
-  title =	 {Standardised Verbal Autopsy Questionnaire},
-  journal =	 { },
-  note =	 {{http://indepth-network.org}},
-  year =	 2003
-}
-
- at Article{SetRaoHem06,
-  author =	 {PW Setel and C Rao and Y Hemed and DR Whiting and G
-                  Yang et al.},
-  title =	 {Core Verbal Autopsy Procedures with Comparative
-                  Validation Results from Two Countries},
-  journal =	 {PLoS Medicine},
-  year =	 2006,
-  volume =	 3,
-  number =	 8,
-  pages =	 {e268},
-  note =	 {doi:10.1371/journal.pmed.0030268}
-}
-
- at Book{WHO07,
-  author =	 {{World Health Organization}},
-  title =	 {Verbal Autopsy Standards: Ascertaining and
-                  Attributing Causes of Death},
-  publisher =	 {World Health Organization},
-  address =	 {Geneva},
-  year =	 2007
-}
-
- at Article{ThaKalBaq08,
-  author =	 {N Thatte and H D Kalter and A H Baqui and E M
-                  Williams and G L Darmstadt},
-  title =	 {Ascertaining causes of neonatal deaths using verbal
-                  autopsy: current methods and challenges},
-  journal =	 {Journal of Perinatology},
-  year =	 2008,
-  pages =	 {1--8},
-  month =	 {December}
-}
-
- at TechReport{LeeAndTul03,
-  author = 	 {Lee, Ronald and Anderson, Michael and Tuljapurkar, Shripad},
-  title = 	 {Stochastic Forecasts of the Social Security Trust Fund},
-  institution =  {Center for the Economics and Demography of Aging},
-  year = 	 2003,
-  type =	 {2003-0005CL}
-}
-
- at TechReport{Holmer03,
-  author =	 {Holmer, Martin},
-  title =	 {Methods for Stochastic Trust Fund Projection},
-  institution =	 {Policy Simulation Group},
-  year =	 2003,
-  month =	 {January}
-}
-
- at TechReport{CBO01,
-  author = 	 {{Congressional Budget Office}},
-  title = 	 {Uncertainty in Social Security's Long-Term Finances: A Stochastic Analysis},
-  institution =  {Congressional Budget Office},
-  year = 	 2001,
-  month =	 {December}
-}
-
- at TechReport{BurMan03,
-  author = 	 {Burdick, Clark and Manchester, Joyce},
-  title = 	 {Stochastic Models of the Social Security Trust Funds},
-  institution =  {Division of Economic Research, Social Security Administration},
-  year = 	 2003,
-  type =	 {Research and Statistics Note},
-  number =	 {2003-01}
-}
-
- at Manual{Harrell08,
-  title =	 {Hmisc: Harrell Miscellaneous},
-  author =	 {Frank E Harrell Jr and with contributions from many
-                  other users.},
-  year =	 {2008},
-  note =	 {R package version 3.5-2,
-                  http://biostat.mc.vanderbilt.edu/s/Hmisc}
-}
-
- at article{Heckman90,
-  title =	 {{Varieties of selection bias}},
-  author =	 {Heckman, James},
-  volume =	 {80},
-  number =	 {2},
-  journal =	 {The American Economic Review},
-  pages =	 {313--318},
-  year =	 {1990}
-}
-
- at article{TheGol61,
-  title={{On pure and mixed estimation in econometrics}},
-  author={Theil, H. and Goldberger, AS},
-  journal={International Economic Review},
-  volume={2},
-  pages={65--78},
-  year={1961}
-}
- at Book{Hsiao03,
-  author =	 {C. Hsiao},
-  title = 	 {Analysis of Panel Data},
-  publisher = 	 {Cambridge University Press},
-  year = 	 2003,
-  address =	 {New York}
-}
-
- at article{BanDhiGho05,
-  title=	{{Clustering on the Unit Hypersphere Using von Mises-Fisher Distributions}},
-  author=	{Arindam Banerjee and Inderjit Dhillon and Joydeep Ghosh and Suvrit Sra},
-  journal=	{Journal of Machine Learning},
-  volume=	{6},
-  pages=	{1345-1382},
-  year=		{2005}
-}
-
- at article{BleJor06,
-  title=	{{Variational Inference for Dirichlet Process Mixtures}},
-  author=	{David Blei and Michael Jordan},
-  journal=	{Journal of Bayesian Analysis},
-  volume=	{1},
-  number = 	{1},
-  pages=	{121--144},
-  year=		{2006}
-}
-
- at article{Cowan00,
-  title=	{{The Magical Number 4 in Short Term Memory: A Reconsideration of Mental Storage Capacity}},
-  author=	{Nelson Cowan},
-  journal=	{Behavioral and Brain Sciences},
-  volume=	{24},
-  pages=	{87--185},
-  year=		{2000}
-}
-
- at article{Dhillon01,
-  title=	{{Co-clustering Documents and Words Using Bipartite Spectral Graph Partitioning}},
-  author=	{Inderjit Dhillon},
-  journal=	{Proceedings of the Seventh ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},
-  pages=	{89--98},
-  year=		{2003}
-}
-
- at article{HoPep02,
-  title=	{{Simple Explanation of the No Free Lunch Theorem and Its Implications}},
-  author=	{Y Ho and D Pepyne},
-  journal=	{Journal of Optimization Theory and Applications},
-  volume=	{115},
-  number = 	{3},
-  pages=	{549--570},
-  year=		{2002}
-}
-
- at MISC{JonWilBau09,
-  author =	 {Bryan Jones and John Wilkerson and Frank
-                  Baumgartner},
-  title =	 {{The Policy Agendas Project}},
-  year =	 2009,
-  note =	 {http://www.policyagendas.org}
-}
-
-
- at article{JorGhaJaa99,
-  title=	{{An Introduction to Variational Methods for Graphical Models}},
-  author=	{Michael Jordan and Zoubin Ghahramani and Tommi Jaakkola and Lawrence Saul},
-  journal=	{Journal of Machine Learning},
-  volume=	{37},
-  pages=	{183--233},
-  year=		{1999}
-}
-
- at Book{KauRou90,
-  author =	 {Leonard Kaufman and Peter Rousseeuw},
-  title = 	 {Finding Groups in Data: An Introduction to Cluster Analysis},
-  publisher = 	 {Wiley},
-  year = 	 {1990},
-  address =	 {New York}
-}
-
- at Book{Kohonen01,
-  author =	 {Teuvo Kohonen},
-  title = 	 {Self-Organizing Maps},
-  publisher = 	 {Springer},
-  year = 	 {2001},
-  address =	 {New York}
-}
-
- at MISC{Lewis99,
- author = 	{David Lewis},
- title = 	{{Reuters -21578 text Categorization Test Collection Distribution 1.0}},
- year = 	{1999}
-}
-
- at Book{Mackay03,
-  author =	 {David Mackay},
-  title = 	 {Information Theory, Inference, and Learning Algorithms},
-  publisher = 	 {Cambridge University Press},
-  year = 	 {2003},
-  address =	 {Cambridge}
-}
-
- at article{Meila07,
-  title=	{{Comparing Clusterings: An Information Based Distance}},
-  author=	{Marina Meila},
-  journal=	{Journal of Multivariate Analysis},
-  volume=	{98},
-  number = 	{5},
-  pages=	{873--895},
-  year=		{2007}
-}
-
- at article{Miller56,
-  title=	{{The Magical Number Seven, Plus or Minus Two: Some Limits on Our Capacity for Processing Information}},
-  author=	{George Miller},
-  journal=	{Psychological Review},
-  volume=	{63},
-  pages=	{81--97},
-  year=		{1956}
-}
-
- at article{NgJorWei02,
-  title=	{{On Spectral Clustering: Analysis and an Algorithm}},
-  author=	{Andrew Ng and Michael Jordan and Yair Weiss},
-  journal=	{Advances in Neural Information Processing Systems 14: Proceedings of the 2002 Conference},
-  year=		{2002}
-}
-
- at article{Sammon69,
-  title=	{{A Nonlinear Mapping for Data Structure Analysis}},
-  author=	{John Sammon},
-  journal=	{IEEE Transactions on Computers},
-  volume=	{18},
-  number = 	{5},
-  pages=	{401--409},
-  year=		{1969}
-}
-
- at article{ShiMal00,
-  title=	{{Normalized Cuts and Image Segmentation}},
-  author=	{J Shi and J Malik},
-  journal=	{IEEE Transactions on Pattern Analysis and Machine Intelligence},
-  volume=	{22},
-  number = 	{8},
-  pages=	{888--905},
-  year=		{2000}
-}
-
-
- at Book{Simon57,
-  author =	 {Herbert Simon},
-  title = 	 {Models of Man},
-  publisher = 	 {Wiley},
-  year = 	 {1957},
-  address =	 {New York}
-}
-
- at article{StrGro02,
-  title=	{{Cluster Ensembles: A Knowledge Reuse Framework for Combining Multiple Partitions}},
-  author=	{Alexander Strehl and Joydeep Grosh},
-  journal=	{Journal of Machine Learning Research},
-  volume=	{3},
-  pages=	{583--617},
-  year=		{2002}
-}
-
- at article{vonLuxburg07,
-  title=	{{A Tutorial on Spectral Clustering}},
-  author=	{Ulrike von Luxburg},
-  journal=	{Statistics and Computing},
-  volume=	{17},
-  number = 	{4},
-  pages=	{395--416},
-  year=		{2007}
-}
-
- at Book{Watanabe69,
-  title =	 {Knowing and Guessing: A Quantitative Study of Inference and Information},
-  author = 	 {Satosi Watanabe},
-  publisher = 	 {Wiley},
-  year = 	 {1969},
-  address =	 {New York}
-}
-
- at article{WolMac97,
-  title=	{{No Free Lunch Theorems for Optimization}},
-  author=	{DH Wolpert and WG Macready},
-  journal=	{IEEE Transactions on Evolutionary Computation},
-  volume=	{1},
-  number = 	{1},
-  pages=	{67--82},
-  year=		{1997}
-}
-
- at Book{Bailey94,
-  author =	 {Kenneth D. Bailey},
-  title =	 {Typologies and taxonomies: an introduction to
-                  classification techniques},
-  publisher =	 {Sage},
-  year =	 1994,
-  address =	 {Beverly Hills}
-}
-
- at article{Spivey08,
-  title =	 {{A generalized recurrence for Bell numbers}},
-  author =	 {Spivey, M.Z.},
-  journal =	 {J. Integer Sequences},
-  volume =	 {11},
-  year =	 {2008}
-}
-
- at article{FraRaf02,
-  title =	 {{Model-based clustering, discriminant analysis, and
-                  density estimation}},
-  author =	 {Fraley, C. and Raftery, A.E.},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 {97},
-  number =	 {458},
-  pages =	 {611--631},
-  year =	 {2002}
-}
-
- at Book{GanMaWu07,
-  author =	 {Guojun Gan and Chaoqun Ma and Jianhong Wu},
-  title = 	 {Data Clustering: Theory, Algorithms, and Applications},
-  publisher = 	 {Siam},
-  year = 	 2007,
-  address =	 {Philadelphia}
-}
-
- at Article{DiaGoeHol08,
-  author = 	 {Persi Diaconis and Sharad Goel and Susan Holmes},
-  title = 	 {Horseshoes in multidimensional scaling and local kernel methods},
-  journal = 	 {Annals of Applied Statistics},
-  year = 	 2008,
-  volume =	 2,
-  number =	 3,
-  pages =	 {777--807}
-}
-
- at article{Almon65,
-  title =	 {{The distributed lag between capital appropriations
-                  and expenditures}},
-  author =	 {Almon, Shirley},
-  journal =	 {Econometrica: Journal of the Econometric Society},
-  pages =	 {178--196},
-  year =	 {1965},
-  publisher =	 {The Econometric Society}
-}
- at Book{ManRagSch08,
-  author =	 {Christopher D. Manning and Prabhakar Raghavan and
-                  Hinrich Sch{\"u}tze},
-  title =	 {Introduction to Information Retrieval},
-  publisher =	 {Cambridge University Press},
-  year =	 2008,
-  address =	 {NY}
-}
-
- at article{Mayhew74,
-  title =	 {{The electoral connection}},
-  author =	 {Mayhew, D.},
-  journal =	 {New Haven: Yale University},
-  year =	 {1974}
-}
-
- at Book{Fiorina89,
-  author =	 {Morris Fiorina},
-  title =	 {Congress, Keystone of the Washington Establishment},
-  publisher =	 {Yale University Press},
-  year =	 {1989},
-  address =	 {New Haven}
-}
-
- at article{EulKar77,
-  title =	 {{The Puzzle of Representation: Specifying Components
-                  of Responsiveness}},
-  author =	 {Heiz Eulau and Paul Karps},
-  journal =	 {Legislative Studies Quarterly},
-  volume =	 {2},
-  number =	 {3},
-  pages =	 {233-254},
-  year =	 {1977}
-}
-
- at article{Yiannakis82,
-  title =	 {{House Members Communication Styles: Newsletters and
-                  Press Releases}},
-  author =	 {Diane Evans Yiannakis},
-  journal =	 {Journal of Politics},
-  volume =	 {44},
-  number =	 {4},
-  pages =	 {1049-1071},
-  year =	 {1982}
-}
-
- at article{MonColQui08,
-  title =	 {{Fightin' Words: Lexical Feature Selection and
-                  Evaluation for Identifying the Content of Political
-                  Conflict}},
-  author =	 {Burt Monroe and Michael Colaresi and Kevin Quinn},
-  journal =	 {Political Analysis},
-  volume =	 {16},
-  number =	 {4},
-  pages =	 {372-403},
-  year =	 {2008}
-}
-
- at Book{TayCri04,
-  author =	 {John Shawe-Taylor and Nello Cristianini},
-  title =	 {Kernel Methods for Pattern Analysis},
-  publisher =	 {Cambridge University Press},
-  year =	 {2004},
-  address =	 {Cambridge}
-}
- at Misc{Billington07,
-  author =	 {James H. Billington},
-  title =	 {Testimony to Congress (House Subcommittee on
-                  Legislative Branch)},
-  howpublished =
-                  {http://www.loc.gov/about/welcome/speeches/digital/digitalage.html},
-  month =	 {20 March},
-  year =	 2007
-}
-
- at article{AbeLedLew08,
-  title =	 {{Blown to bits: your life, liberty, and happiness
-                  after the digital explosion}},
-  author =	 {Abelson, H. and Ledeen, K. and Lewis, H.},
-  year =	 {2008},
-  publisher =	 {Addison-Wesley Professional}
-}
- at Article{GilCas09,
-  author = 	 {Jeff Gill and George Casella},
-  title = 	 {Nonparametric Priors for Ordinal Bayesian Social
-                  Science Models: Specification and Estimation},
-  journal = 	 {Journal of the American Statistical Association},
-  year = 	 2009,
-  volume =	 104,
-  number =	 486,
-  pages =	 {1--12},
-  month =	 {June}
-}
-
- at article{SchGer97,
-  title={{Empirical indicators of crisis phase in the Middle East, 1979-1995}},
-  author={Schrodt, P.A. and Gerner, D.J.},
-  journal={Journal of Conflict Resolution},
-  pages={529--552},
-  year={1997},
-  publisher={Sage Publications}
-}
-
- at article{Guttman50,
-  title =	 {{The problem of attitude and opinion measurement}},
-  author =	 {Guttman, L.},
-  journal =	 {Measurement and prediction},
-  volume =	 {4},
-  year =	 {1950}
-}
-
-
- at article{MilKub05,
-  title =	{{Why the move to free trade? Democracy and trade policy in the developing countries}},
-  author =	{Helen Milner and Keiko Kubota},
-  journal = 	{International Organization},
-  volume = 	{59},
-  number = 	{1},
-  pages = 	{107--143},
-  year = 	{2005}
-}
-
-
- at Book{GutTho96,
-  author =	 {Amy Gutmann and Dennis Thompson},
-  title =	 {Democracy and Disagreement},
-  publisher =	 {Harvard University Press},
-  year =	 {1996},
-  address =	 {Harvard University Press}
-}
-
- at Unpublished{MikLavBen08,
-  author =	 {Slava Mikhaylov and Michael Laver and Kenneth
-                  Benoit},
-  title =	 {Coder Reliability and Misclassification in
-                  Comparative Manifesto Project Codings},
-  note =	 {Paper presented at the Midwest Political Science
-                  Association, Chicago},
-  month =	 {April},
-  year =	 2008
-}
-
- at conference{CarElhNgu06,
-  title={{Meta clustering}},
-  author={Rich Caruana and Mohamed Elhawary and Nam Nguyen and Casey Smith},
-  booktitle={ICDM'06. Sixth International Conference on Data Mining},
-  pages={107--118},
-  year={2006}
-}
-
- at conference{CarNgu07,
-  title={{Consensus clustering}},
-  author={Rich Caruana and Nam Nguyen},
-  booktitle={ICDM'07. Seventh International Conference on Data Mining},
-  year={2007}
-}
-
- at conference{FerBro03,
-  title={Random Project for High Dimensional Data Clustering: A Cluster Ensemble Approach},
-  author={Xiaoli Fern and Carla Brodley},
-  booktitle={Proceedings of the Twentieth International Conference on Machine Learning},
-  year={2003}
-}
-
-
- at conference{LawTopJai04,
-  title={Multi-objective Data Clustering},
-  author={Martin Law and Alexander Topchy and Anil Jain},
-  booktitle={IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
-  year={2004}
-}
-
-
- at conference{BaeBai06,
-  title={A Novel Approach for the Extraction of an Alternate Clustering of High Quality and High Dissimilarity},
-  author={Eric Bae and James Bailey},
-  booktitle={Proceedings of the IEEE International Conference on Data Mining},
-  year={2006}
-}
-
-
- at conference{FreJai02,
-  title={Data Clustering using Evidence Accumulation},
-  author={Martin Law and Alexander Topchy and Anil Jain},
-  booktitle={Proceedings of the 16th International Conference on Pattern Recognition},
-  year={2002}
-}
-
- at conference{GioManTsa05,
-  title={Clustering aggregation},
-  author={A Gionis and H Mannila and P Tsaparas},
-  booktitle={Proceedings of the 21st International Conference on Data Mining},
-  year={2005}
-}
-
- at conference{TopJaiPun03,
-  title={Combining Multiple Weak Clusterings},
-  author={A Topchy and AK Jain and W Punch},
-  booktitle={Proceedings IEEE International Conference on Data Mining},
-  year={2003}
-}
-
- at conference{TopJaiPun03b,
-  title={A Mixture Model for Clustering Ensembles},
-  author={A Topchy and AK Jain and W Punch},
-  booktitle={Proceedings SIAM International Conference on Data Mining},
-  year={2004}
-}
-
- at conference{Kleinberg03,
-  title={An Impossibility Theorem for Clustering},
-  author={Jon Kleinberg},
-  booktitle={Advances in Neural Information Processing Systems Proceedings of the 2002 Conference},
-  pages={463-470},
-  year={2003},
-}
-
-
-
- at article{Achen78,
-  title={Measuring Representation},
-  author={Chris Achen},
-  journal={American Journal of Political Science},
-  pages={475--510},
-  year={1978},
-}
-
-
- at INCOLLECTION{LazBar65,
-   author= {Paul Lazardsfeld and Allen Barton},
-   title = {Qualitative Measurement in the Social Sciences: Classification, Typologies, and Indices},
-   booktitle = {The Policy Sciences},
-   publisher = {Standard University Press},
-   year = {1965},
-   editor = {Daniel Lerner and Harold Lasswell},
-}
-
- at article{Pitman97,
-  title={Some Probabilistic Aspects of Set Partitions},
-  author={Jim Pitman},
-  journal={The American Mathematical Monthly},
-  pages={201--209},
-  year={1997},
-}
-
-
- at Article{ZhaSma09,
-  author = 	 {Kai Zhang and Dyland S.\ Small},
-  title = 	 {Comment: The Essential Role of Pair Matching in
-                  Cluster-Randomized Experiments, with Application to
-                  the Mexican Universal Health Insurance Program},
-  journal = 	 {Statistical Science},
-  year = 	 {2009, forthcoming},
-  OPTkey = 	 {},
-  OPTvolume = 	 {},
-  OPTnumber = 	 {},
-  OPTpages = 	 {},
-  OPTmonth = 	 {},
-  OPTnote = 	 {},
-  OPTannote = 	 {}
-}
-
- at Article{HilSco09,
-  author = 	 {Jennifer Hill and Marc Scott},
-  title = 	 {Discussion of `The Essential Role of Pair Matching'},
-  journal = 	 {Statistical Science},
-  year = 	 {2009},
-  OPTkey = 	 {},
-  OPTvolume = 	 {},
-  OPTnumber = 	 {},
-  OPTpages = 	 {},
-  OPTmonth = 	 {},
-  OPTnote = 	 {},
-  OPTannote = 	 {}
-}
-
- at article{Imbens09,
-  title =	 {{Better LATE Than Nothing: Some Comments on Deaton
-                  (2009) and Heckman and Urzua (2009)}},
-  author =	 {Imbens, G.W.},
-  journal =	 {NBER Working Paper},
-  year =	 {2009}
-}
-
- at article{Ashenfelter78,
-  title =	 {{Estimating the effect of training programs on
-                  earnings}},
-  author =	 {Ashenfelter, Orley},
-  journal =	 {The Review of Economics and Statistics},
-  pages =	 {47--57},
-  year =	 {1978}
-}
-
- at article{Grimmer10,
-  author =	 {Justin Grimmer},
-  title =	 {A Bayesian Hierarchical Topic Model for Political
-                  Texts: Measuring Expressed Agendas in Senate Press
-                  Releases},
-  year =	 {2010},
-  journal =	 {Political Analysis},
-}
-
- at article{TehJorBea06,
-  title =	 {Hierarchical Dirichlet Processes},
-  author =	 { Y Teh and M Jordan and M Beal and D Blei},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 {101},
-  number =	 {476},
-  pages =	 {1566--1581},
-  year =	 {2006},
-}
-
- at article{MilSto63,
-  title =	 {{Constituency influence in Congress}},
-  author =	 {Miller, W.E. and Stokes, D.E.},
-  journal =	 {The American Political Science Review},
-  pages =	 {45--56},
-  year =	 {1963},
-  publisher =	 {The American Political Science Association}
-}
-
- at book{Pitkin72,
-  title =	 {{The Concept of Representation}},
-  author =	 {Pitkin, Hanna F.},
-  year =	 {1972},
-  publisher =	 {University of California Press}
-}
-
- at Article{HorCoa82,
-  author = 	 {Horiuchi, Shiro and Coale, Ansley},
-  title = 	 {A Simple Equation for Estimating the Expectation of Life at Old Ages},
-  journal = 	 {Population Studies},
-  year = 	 1982,
-  volume =	 36,
-  number =	 2,
-  pages =	 {317-326}
-}
-
- at book{Ayres08,
-  title={{Super crunchers: why thinking-by-numbers is the new way to be smart}},
-  author={Ayres, Iain},
-  year={2008},
-  publisher={Bantam}
-}
-
- at Article{Wilmoth05,
-  author = 	 {Wilmoth, John},
-  title = 	 {Some methodological issues in mortality projection, based on an analysis of the US Social Security System},
-  journal = 	 {Genus},
-  year = 	 2005,
-  volume =	 61,
-  number =	 1,
-  pages =	 {179-211}
-}
-
- at TechReport{Romig08,
-  author = 	 {Romig, Kathleen},
-  title = 	 {Social Security: What Would Happen If the Trust Funds Ran Out?},
-  institution =  {Congressional Research Service},
-  year = 	 2008,
-  number =	 {RL33514}
-}
-
- at TechReport{SweNic08,
-  author = 	 {Swendiman, Kathleen and Nicola, Thomas},
-  title = 	 {Social Security Reform: Legal Analysis of Social Security Benefit Entitlement Issues},
-  institution =  {Congressional Research Service},
-  year = 	 2008,
-  number =	 {RL32822}
-}
-
- at Unpublished{BelSon09,
-  author = 	 {Beltr\'{a}n-S\'{a}nchez, Hiram and Soneji, Samir},
-  title = 	 {A Unifying Approach for Assessing Changes in Life Expectancy Associated with Changes in Mortality: The Case of Violent Deaths},
-  note = 	 {},
-  OPTkey = 	 {},
-  OPTmonth = 	 {},
-  year = 	 {2009},
-  OPTannote = 	 {}
-}
-
- at Article{Olshansky88,
-  author = 	 {Olshansky, S. Jay},
-  title = 	 {On Forecasting Mortality},
-  journal = 	 {The Milbank Quarterly},
-  year = 	 1988,
-  volume =	 66,
-  number =	 3,
-  pages =	 {482-530}
-}
-
-
- at TechReport{ChaWad05,
-  author = 	 {Chaplain, Chris and Wade, Alice},
-  title = 	 {Estimated OASDI Long-Range Financial Effects of Several Provisions Requested by the Social Security Advisory Board},
-  institution =  {Social Security Administration},
-  year = 	 2005,
-  address =	 {http://www.ssa.gov/OACT/solvency/provisions/index.html}
-}
-
- at article{Armstrong67,
-  title =	 {{Derivation of theory by means of factor analysis or
-                  Tom Swift and his electric factor analysis machine}},
-  author =	 {Armstrong, J.S.},
-  journal =	 {American Statistician},
-  pages =	 {17--21},
-  year =	 {1967},
-  publisher =	 {American Statistical Association}
-}
-
- at article{ImbAng94,
-  title={{Identification and estimation of local average treatment effects}},
-  author={Imbens, G.W. and Angrist, J.D.},
-  journal={Econometrica},
-  volume={62},
-  number={2},
-  pages={467--475},
-  year={1994}
-}
-
- at article{Little08,
-  author={Roderick Little},
-  title={Calibrated Bayes: A Bayes/Frequentist Roadmap},
-  journal={American Statistician},
-  volume={60},
-  number={1},
-  pages={1--11},
-  year={2008}
-}
-
- at book{McLThr08,
-	author={Geoffrey J. McLachlan and Thriyambakam Krishan},
-	title={The EM Algorithm and Extensions, Second Edition},
-    year={2008},
-	publisher={New York: Wiley}
-}
-
- at article{Little95,
-  author={Roderick Little},
-  title={Modeling the Drop-Out Mechanism in Repeated-Measures Studies},
-  journal={jasa},
-  volume={90},
-  number={431},
-  pages={1112--1121},
-  year={1995}
-}
-
- at book{MolVer05,
-  author={Geert Molenberghs and Geert Verbeke},
-  title={Models for Discrete Longitudinal Data},
-  year={2005},
-  publisher={New York: Wiley}
-}
-
- at article{DavShaSch01,
-  author={Adam Davey, Michael J.\ Shanahan and Joseph L.\ Schafer  },
-  title={Correcting for selective nonresponse in the national longitudinal survey of youth using multiple imputation},
-  journal={The Journal of Human Resources},
-  volume={36},
-  number={3},
-  pages={500--519},
-  year={2001}
-}
-
- at article{KacRagSch08,
-  author={Niko A. Kaciroti, Trivellore E. Raghunathan, M. Anthony Schork and Noreen M. Clark},
-  title={A Bayesian model for longitudinal count data with non-ignorable dropout},
-  journal={Journal of the Royal Statistical Society Series C-Applied Statistics},
-  volume={57},
-  number={5},
-  pages={521-534},
-  year={2008}
-}
-
- at article{SteCutRos09,
-  title =	 {{Forecasting the Effects of Obesity and Smoking on
-                  US Life Expectancy}},
-  author =	 {Stewart, S.T. and Cutler, D.M. and Rosen, A.B.},
-  journal =	 {The New England Journal of Medicine},
-  volume =	 {361},
-  number =	 {23},
-  pages =	 {2252},
-  year =	 {2009}
-}
- at Article{ByaDamOue09,
-  AUTHOR =	 {Byass, Peter and D'Ambruoso, Lucia and Ouedraogo,
-                  Moctar and Qomariyah, S Nurul},
-  TITLE =	 {Assessing the repeatability of verbal autopsy for
-                  determining cause of death: two case studies among
-                  women of reproductive age in Burkina Faso and
-                  Indonesia},
-  JOURNAL =	 {Population Health Metrics},
-  VOLUME =	 {7},
-  YEAR =	 {2009},
-  NUMBER =	 {1},
-  PAGES =	 {6},
-  URL =		 {http://www.pophealthmetrics.com/content/7/1/6}
-}
-
- at article{Traxler97,
-  title =	 {{An Algorithm for Adaptive Mesh Refinement in N Dimensions}},
-  author =	 {Traxler, S.T.},
-  journal =	 {Computing},
-  volume =	 {59},
-  number =	 {1},
-  pages =	 {115-137},
-  year =	 {1997}
-}
-
- at TechReport{BelMil05,
-  author = 	 {Bell, Felicitie and Miller, Michael},
-  title = 	 {Life Tables for the United States Social Security Area 1900-2100},
-  institution =  {Social Security Administration Office of the Chief Actuary},
-  year = 	 2005,
-  number =	 {Actuarial Study No. 120}
-}
-
-
- at Article{TweCutRos09,
-  author = 	 {Stewart, S.T. and Cutler, D.M. and Rosen, A.B.},
-  title = 	 {Forecasting the Effects of Obesity and Smoking on Life Expectancy},
-  journal = 	 {The New England Journal of Medicine},
-  year = 	 {2009},
-  OPTkey = 	 {},
-  OPTvolume = 	 {361},
-  OPTnumber = 	 {23},
-  OPTpages = 	 {2252-2260},
-  OPTmonth = 	 {},
-  OPTnote = 	 {},
-  OPTannote = 	 {}
-}
-
- at article{Guolo08,
-	author = {Guolo, Annamaria},
-	title = {{Robust techniques for measurement error correction: a review.}},
-	journal = {{Statistical Methods in Medical Research}},
-	volume = {{17}},
-	number = {{6}},
-	pages = {555-80},
-	year = {{2008}},
-	doi = {{10.1177/0962280207081318}},
-}
-
- at Article{GrePal90,
-	author = {Green, Donald Philip and Palmquist, Bradley},
-	title = {{Of Artifacts and Partisan Instability}},
-	journal = {{American Journal of Political Science}},
-	volume = {{34}},
-	number = {{3}},
-	pages = {872},
-	year = {{1990}},
-	doi = {{10.2307/2111402}},
-}
-
- at Article{WilWil70,
-	author = {Wiley, David E. and Wiley, James A.},
-	title = {{The Estimation of Measurement Error in Panel Data}},
-	journal = {{American Sociological Review}},
-	volume = {{35}},
-	number = {{1}},
-	pages = {112},
-	year = {{1970}},
-	doi = {{10.2307/2093858}},
-}
-
- at article{Stefanski00,
-	author = {Stefanski, L. A.},
-	title = {{Measurement Error Models}},
-	journal = {{Journal of the American Statistical Association}},
-	volume = {{95}},
-	number = {{452}},
-	pages = {1353--1358},
-	year = {{2000}}
-}
-
- at article{BroVal96,
-	author = {Brownstone, David and Valletta, Robert G.},
-	title = {{Modeling Earnings Measurement Error: A Multiple Imputation Approach}},
-	journal = {{Review of Economics and Statistics}},
-	volume = {{78}},
-	number = {{4}},
-	pages = {705-717},
-	year = {{1996}},
-	tags = "measurement error, statistics"
-}
- at article{FreMidCar08,
-	pmid = {{18680172}},
-	author = {Freedman, Laurence S and Midthune, Douglas and Carroll, Raymond J and Kipnis, Victor},
-	title = {{A comparison of regression calibration, moment reconstruction and imputation for adjusting for covariate measurement error in regression.}},
-	journal = {{Stat Med}},
-	volume = {{27}},
-	number = {{25}},
-	pages = {5195-216},
-	year = {{2008}},
-	doi = {{10.1002/sim.3361}},
-	tags = "measurement error, statistics"
-}
-
- at article{ColChuGre06,
-	author = {Cole, Stephen R and Chu, Haitao and Greenland, Sander},
-	title = {{Multiple-imputation for measurement-error correction.}},
-	journal = {{International Journal of Epidemiology}},
-	volume = {{35}},
-	number = {{4}},
-	pages = {1074-81},
-	year = {{2006}},
-	doi = {{10.1093/ije/dyl097}},
-	tags = "measurement error, statistics"
-}
-
- at article{CasTuf03,
-	author={Casper, Gretchen and Cladiu Tufis},
-	title={Correlation Versus Interchangeability: The Limited Robustness of Empirical Findings on Democracy Using Highly Correlated Data Sets},
-	journal={Political Analysis},
-	volume={11},
-	year={2003},
-	pages={196-203},
-	number={2}
-}
- 
- at Article{OlsGolZheRow09,
-  author = 	 {Olshansky, S. Jay and Goldman, Dana and Zheng, Yuhui and Rowe, John},
-  title = 	 {Aging in America in the Twenty-first Century: Demographic Forecasts from the MacArthur Foundation Research Network on an Aging Society},
-  journal = 	 {Milbank Quarterly},
-  year = 	 2009,
-  volume =	 87,
-  number =	 4,
-  pages =	 {842-862}
-}
-
- at TechReport{SSAHist10,
-  author =	 {{Social Security Administration Historian's Office}},
-  title = 	 {Historical Background and Development of Social Security},
-  institution =  {Social Security Administration},
-  year = 	 2010,
-  note =	 {http://www.ssa.gov/history}
-}
-
- at Article{KanLauThaVau94,
-  author = 	 {Kannisto, Vaino and Lauritsen, Jens and Thatcher, A. Roger and Vaupel, James},
-  title = 	 {Reductions in Mortality at Advanced Ages: Several Decades of Evidence from 27 Countries},
-  journal = 	 {Population and Development Reiew},
-  year = 	 1994,
-  volume =	 20,
-  number =	 4,
-  pages =	 {793-810}
-}
-
- at Article{HucPluSpr93,
-  author =	 {Robert Huckfeldt and Eric Plutzer and John Sprague},
-  title =	 {Alternative Contexts of Political Behavior:
-                  Churches, Neighborhoods, and Individuals},
-  journal =	 {Journal of Politics},
-  year =	 1993,
-  volume =	 55,
-  number =	 2,
-  pages =	 {365--381},
-  month =	 {May}
-}
-
- at Article{KatKat10,
-  author = {Jonathan N. Katz and Gabriel Katz},
-  title = {{ Correcting for Survey Misreports Using Auxiliary Information with an Application to Estimating Turnout}},
-  journal = {{American Journal of Political Science}},
-  year = 2010,
-  volume = 54,
-  number = 3,
-  pages = {{815--835}},
-}
-
- at Article{ImaYam10,
-  author =	 {Kosuke Imai and Teppei Yamamoto},
-  title =	 {Causal Inference with Differential Measurement
-                  Error: Nonparametric Identification and Sensitivity
-                  Analysis},
-  journal =	 {American Journal of Political Science},
-  year =	 2010,
-  volume =	 54,
-  number =	 2,
-  month =	 {April},
-  pages =	 {{543--560}}
-}
-
- at conference{BitSmiKra06,
-  title =	 {Statistically dual distributions in statistical
-                  inference},
-  author =	 {Bityukov, SI and Smirnova, VV and Krasnikov, NV and
-                  Taperechkina, VA},
-  booktitle =	 {Statistical Problems in Particle Physics,
-                  Astrophysics and Cosmology: proceedings of
-                  PHYSTAT05, Oxford, UK, 12-15 September 2005},
-  pages =	 {102--105},
-  year =	 {2006},
-  note =	 {http://arxiv.org/abs/math/0411462v2}
-}
-
-Rubin, D. B. (2010), On the limitations of comparative effectiveness
-                  research. Statistics in Medicine, 29: 1991–1995. 
-
- at Article{Rubin10,
-  author = 	 {Donald B. Rubin},
-  title = 	 {On the Limitations of Comparative Effectiveness Research},
-  journal = 	 {Statistics in Medicine},
-  year = 	 2010,
-  volume =	 29,
-  number =	 19,
-  pages =	 {1991-1995},
-  month =	 {August}
-}
-
- at Article{TunBenMcC10,
-  author =	 {Sean R. Tunis and Joshua Benner and Mark McClellan},
-  title =	 {Comparative effectiveness research: Policy context,
-                  methods development and research infrastructure},
-  journal =	 {Statistics in Medicine},
-  year =	 2010,
-  volume =	 29,
-  number =	 19,
-  pages =	 {1964-1976},
-  month =	 {August}
-}
-
- at Article{Rubin08,
-  author = 	 {Donald B. Rubin},
-  title = 	 {For Objective Causal Inference, Design Trumps Analysis},
-  journal = 	 {Annals of Applied Statistics},
-  year = 	 2008,
-  volume =	 2,
-  number =	 3,
-  pages =	 {808--840}
-}
-
- at Article{Rubin08b,
-  author =	 {Donald B. Rubin},
-  title =	 {Comment: The Design and Analysis of Gold Standard
-                  Randomized Experiments},
-  journal =	 {Journal of the American Statistical Association},
-  year =	 2008,
-  volume =	 103,
-  number =	 484,
-  pages =	 {1350--1353}
-}
- at Article{Austin09,
-  author =	 {Peter C. Austin},
-  title =	 {Some Methods of Propensity-Score Matching had
-                  Superior Performance to Others: Results of an
-                  Empirical Investigation and Monte Carlo simulations},
-  journal =	 {Biometrical Journal},
-  year =	 2009,
-  volume =	 51,
-  number =	 1,
-  pages =	 {171-184},
-  month =	 {February}
-}
-
- at Article{Stuart10,
-  author = 	 {Elizabeth A. Stuart},
-  title = 	 {Matching Methods for Causal Inference: A Review and
-                  a Look Forward},
-  journal = 	 {Statistical Science},
-  year = 	 2010,
-  volume =	 25,
-  number =	 1,
-  pages =	 {1--21}
-}
-
- at Article{Rubin80b,
-  author = 	 {Donald B. Rubin},
-  title = 	 {Bias Reduction using Mahalanobis Metric Matching},
-  journal = 	 {Biometrics},
-  year = 	 1980,
-  volume =	 36,
-  pages =	 {293--298}
-}
-
- at InCollection{StuRub07b,
-  author =	 {Elizabeth A. Stuart and Donald B. Rubin},
-  title =	 {Best practices in quasi-experimental designs:
-                  Matching methods for causal inference},
-  booktitle =	 {Best Practices in Quantitative Methods},
-  pages =	 {155--176},
-  publisher =	 {Sage},
-  year =	 2007,
-  editor =	 {Jason Osborne},
-  address =	 {New York}
-}
-
- at Article{Wilmoth05a,
-  author = 	 {Wilmoth, John},
-  title = 	 {On the Relationship Between Period and Cohort Mortality},
-  journal = 	 {Demographic Research},
-  year = 	 2005,
-  volume =	 13,
-  number =	 11,
-  pages =	 {231-280}
-}
-
- at Article{Guillot03,
-  author = 	 {Guillot, Michel},
-  title = 	 {The Cross-Sectional Average Length of Life (CAL): A Cross-Sectional Mortality Measure That Reflects the Experience of Cohorts},
-  journal = 	 {Population Studies},
-  year = 	 2003,
-  volume =	 57,
-  number =	 1,
-  pages =	 {41-54}
-}
-
- at Article{BonFee03,
-  author = 	 {Bongaarts, John and Feeney, Griffith},
-  title = 	 {Estimating Mean Lifetime},
-  journal = 	 {Proceedings of the National Academy of Sciences},
-  year = 	 2003,
-  volume =	 100,
-  number =	 23,
-  pages =	 {13127-13133}
-}
-
- at Article{Wilmoth95,
-  author = 	 {Wilmoth, John},
-  title = 	 {Are Mortality Projections Always More Pessimistic When Disaggregated by Cause of Death?},
-  journal = 	 {Mathematical Population Studies},
-  year = 	 1995,
-  volume =	 5,
-  number =	 4,
-  pages =	 {293-319}
-}
-
- at Article{Cawley04,
-  author = 	 {Cawley, John},
-  title = 	 {The Impact of Obesity on Wages},
-  journal = 	 {Journal of Human Resources},
-  year = 	 2004,
-  volume =	 39,
-  number =	 2,
-  pages =	 {451-474}
-}
-
- at Article{WolDagKan98,
-  author = 	 {Wolf, Philip and D\'Agostino, Ralph and Kannel, William and Bonita, Ruth and Belanger, Albert},
-  title = 	 {Cigarette Smoking as a Risk Factor for Stroke.  The Framingham Study.},
-  journal = 	 {Journal of the American Medical Association},
-  year = 	 1998,
-  volume =	 259,
-  number =	 7,
-  pages =	 {1025-1029}
-}
-
- at Article{BurGouBra03,
-  author = 	 {Burns, Paul and Gough, Stephan and Bradbury, Andrew},
-  title = 	 {Management of Peripheral Arterial Disease in Primary Care},
-  journal = 	 {British Medical Journal},
-  year = 	 2003,
-  volume =	 326,
-  pages =	 {584-588}
-}
-
- at TechReport{ReiSar08,
-  author = 	 {Reichmuth, Wolfgang and Sarferaz, Samad},
-  title = 	 {Bayesian Demographic Modeling and Forecasting: An Application to U.S.\ Mortality},
-  institution =  {Humboldt University},
-  year = 	 2008,
-  type =	 {SFB 649},
-  note =	 {Discussion Paper 2008-052}
-}
-
- at Article{WanPre09,
-  author = 	 {Wang, Haidong and Preston, Samuel},
-  title = 	 {Forecasting United States Mortality using Cohort Smoking Histories},
-  journal = 	 {Proceedings of the National Academy of Sciences},
-  year = 	 2009,
-  volume =	 109,
-  number =	 2,
-  pages =	 {393-398}
-}
-
- at Article{Platt05,
-  title =	 {Fastmap, MetricMap, and Landmark MDS are all
-                  Nystr{\\"o}m algorithms},
-  author =	 {Platt, J.C.},
-  journal =	 {Proceedings of the 10th International Workshop on
-                  Artificial Intelligence and Statistics},
-  pages =	 {261--268},
-  year =	 {2005},
-}
-
- at Article{DesTen03,
-  title = {Global Versus Local Methods in Nonlinearity Dimensionality Reduction},
-  author = {de Silva, V. and Tenenbaum, J.B.},
-  journal = {Proceedings of Neural Information Processing Systems},
-  volume = {15},
-  pages = {721-728},
-  year = {2003},
-}
-
- at article{DuFabGun99,
-  title =	 {{Centroidal Voronoi tessellations: applications and
-                  algorithms}},
-  author =	 {Du, Q. and Faber, V. and Gunzburger, M.},
-  journal =	 {SIAM review},
-  pages =	 {637--676},
-  year =	 {1999}
-}
- at Article{CroMcCBur08,
-  author =	 {Jerry Cromwell and Nancy McCall and Joe Burton},
-  title =	 {Evaluation of Medicare Health Support Chronic
-                  Disease Pilot Program},
-  journal =	 {Health Care Financing Review},
-  year =	 2008,
-  volume =	 30,
-  number =	 1,
-  pages =	 {47--60}
-}
-
- at Article{McCCroUra08,
-  author =	 {Nancy McCall and Jerry Cromwell and Carol Urato and
-                  Donna Rabiner},
-  title =	 {Evaluation of Phase I of the Medicare Health Support
-                  Pilot Program Under Traditional Fee-for-Service
-                  Medicare: 18-Month Interim Analysis},
-  journal =	 {Report to Congress},
-  year =	 2008,
-  month =	 {October},
-  note =	 {CMS Contract No. 500-00-0022}
-}
-
- at Article{Foote09,
-  author =	 {Sandra M. Foote},
-  title =	 {Next Steps: How Can Medicare Accelerate The Pace Of
-                  Improving Chronic Care?},
-  journal =	 {Health Affairs},
-  year =	 2009,
-  volume =	 28,
-  number =	 1,
-  pages =	 {99--102},
-  note =	 {http://content.healthaffairs.org/cgi/reprint/28/1/99}
-}
-
- at article{GhoSch03,
-  title =	 {Multiple edit/multiple imputation for multivariate
-                  continuous data},
-  author =	 {Ghosh-Dastidar, B. and Schafer, J.L.},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 {98},
-  number =	 {464},
-  pages =	 {807--817},
-  issn =	 {0162-1459},
-  year =	 {2003}
-}
-
-
- at article{ThoOgdGal10,
-  title =	 {{Chronic conditions account for rise in Medicare
-                  spending from 1987 to 2006}},
-  author =	 {Thorpe, K.E. and Ogden, L.L. and Galactionova, K.},
-  journal =	 {Health Affairs},
-  month =	 {April},
-  volume =	 29,
-  number =	 4,
-  year =	 {2010}
-}
-
- at Article{Weintraub95,
-  author =	 {Hal Weintraub et al.},
-  title =	 {Through the Glass Lightly},
-  journal =	 {Science},
-  year =	 1995,
-  volume =	 267,
-  pages =	 {1609--1618},
-  month =	 {17 March}
-}
-
- at article{Forgy65,
-author = {EW Forgy},
-title = {Cluster Analysis of Multivariate Data: Efficiency vs Interpretability of Classifications},
-journal = {Biometrics},
-year = {1965},
-volume = {21},
-OPTpages = {768-769},
-}
-
- at article{GatGev89,
-author = {I Gath and AB Geva},
-title = {Unsupervised Optimal Fuzzy Clustering},
-journal = {IEEE Transactions On Pattern Analysis and Machine Intelligence},
-year = {1989},
-volume = {11},
-number = {7},
-pages = {773-780},
-}
-
- at Article{CueGorMat97,
-author = {JA Cuesta-Albertos and A Gordaliza and C Matran},
-title = {Trimmed K-Means: An Attempt to Robustify Quantizers},
-journal = {Annals of Statistics},
-year = {1997},
-volume = {25},
-number = {553-576},
-}
-
- at TechReport{ZhaHsuDay99,
-author = {Bin Zhang and Meichun Hsu and Umeshwar Dayal},
-title = {K-Harmonic Means: A Data Clustering Algorithm},
-institution = {HP Laboratories},
-year = {1999},
-number = {HPL-1999-124},
-}
-
- at conference{Karayiannis94,
-  title={MECA: Maximum Entropy Clustering Algorithm},
-  author={NB Karayiannis},
-  booktitle={The 3rd IEEE International Conference on Fuzzy Systems},
-  pages={630--635},
-  year={1994}
-}
-
-
- at article{McQuitty66,
-author = {LL McQuitty},
-title = {Similarity Analysis by Reciprocal Pairs for Discrete and Continuous Data},
-journal = {Educational and Psychological Measurement},
-year = {1966},
-volume = {26},
-pages = {825-831},
-}
-
-
- at article{Fraley98,
-author = {Chris Fraley},
-title = {Algorithms for Model-Based Gaussian Hierarchical Clustering},
-journal = {SIAM Journal of Scientific Computing},
-year = {1998},
-volume = {20},
-number = {1},
-pages = {270-281},
-}
-
- at article{KoyGraRam05,
-author = {M Koyuturk and A Graham and N Ramakrishnan},
-title = {Compression, Clustering, and Pattern Discovery in Very High-Dimensional Discrete-Attribute Data Sets},
-journal = {IEEE Transactions On Knowledge and Data Engineering},
-year = {2005},
-volume = {17},
-number = {4},
-}
-
- at article{GuhRasShi00,
-author = {S Guha and R Rastogi and K Shim},
-title = {ROCK: A Robust Clustering Algorithm for Categorical Attributes},
-journal = {Information Science},
-year = {2000},
-volume = {25},
-number = {5},
-}
-
- at article{HeyKruYoo99,
-author = {LJ Heyer and S Kruglyak and S Yooseph},
-title = {Exploring Expression Data: Identification and Analysis of Coexpressed Genes},
-journal = {Genome Research},
-year = {1999},
-volume = {9},
-pages = {1106-1115},
-}
-
-
- at article{BroPihDatDat08,
-author = {G Brock and V Pihur and S Datta and S Datta},
-title = {clValid: An R Package for Cluster Validation},
-journal = {Journal of Statistical Software},
-year = {2008},
-volume = {25},
-number = {4},
-}
-
-
- at article{MeiShi01,
-author = {M Meila and J Shi},
-title = {A Random Walks View of Spectral Segmentation},
-journal = {8th International Workshop on Artificial Intelligence and Statistics (AISTATS)},
-year = {2001},
-}
-
- at article{DhiMalMod03,
-author = {Inderjit Dhillon and Subramanyam Mallela and Dharmendra Modha},
-title = {Information Theoretic Co-Clustering},
-journal = {Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},
-year = {2003},
-volume = {9},
-}
-
- at article{Lerman91,
-author = {IC Lerman},
-title = {Foundations of the Likelihood Linkage Analysis Classification Method},
-journal = {Applied Stochastic Models and Data Analysis},
-year = {1991},
-volume = {7},
-pages = {63-76},
-}
-
- at article{WanQiuZam07,
-author = {S Wang and W Qiu and RH Zamar},
-title = {CLUES: A Non-Parametric Clustering Method Based on Local Shrinking},
-journal = {Computational Statistics \& Data Analysis},
-year = {2007},
-volume = {52},
-number = {1},
-pages = {286-298},
-}
-
- at misc{Leisch99,
-author = {Friedrich Leisch},
-title = {Bagged Clustering},
-howpublished = {Working Paper 51, Adaptive Information Systems and Modelling in Economics and Management Science},
-month = {August},
-year = {1999},
-}
-
- at article{Rajesh96,
-author = {Dave Rajesh},
-title = {Fuzzy Shell-Clustering and Applications to Circle Detection in Digital Images},
-journal = {Internationl Journal of General Systems},
-year = {1996},
-volume = {16},
-pages = {343-355},
-}
-
- at Book{Kullback59,
-author = {Solomon Kullback},
-title = {Information Theory and Statistics},
-publisher = {Dover Publications},
-year = {1959},
-}
-
- at book{KauRou90,
-author = {L Kaufman and PJ Rousseeuw},
-title = {Finding Groups in Data: An Introduction to Cluster Analysis},
-publisher = {Wiley},
-year = {1990},
-}
- at Article{Washington08,
-  author =	 {Ebonya L. Washington},
-  title =	 {Female Socialization: How Daughters Affect Their
-                  Legislator Fathers' Voting on Woman's Issues},
-  journal =	 {American Economic Review},
-  year =	 2008,
-  volume =	 98,
-  number =	 1,
-  pages =	 {311-332}
-}
-
- at Article{Muller59,
-  author = 	 {M.E. Muller},
-  title = 	 {A Note on a Method for Generating Points Uniformly on N -Dimensional Spheres},
-  journal = 	 {Comm. Assoc. Comput. Mach.},
-  year = 	 1959,
-  volume =	 2,
-  pages =	 {19-20},
-  month =	 {April}
-}
-
diff --git a/vignettes/gkpubs.bib b/vignettes/gkpubs.bib
deleted file mode 100644
index e9b910d..0000000
--- a/vignettes/gkpubs.bib
+++ /dev/null
@@ -1,2259 +0,0 @@
-% A bibtex-format file for papers by or coauthored with Gary King
-%
-% rules used for abbreviations:
-%
-% -if one author: use last name and last 2 digits of the year: King99.
-% -if multiple authors, use 1st 3 letters of each of UP TO the first three
-%     authors and the last 2 digits of the year:  KinTomWit00.
-% -if necessary add lower-case letters for multiple entries in a year:  
-%     King02, King02b (the first one should NOT have an 'a' afterwards)
-% -No string abbreviations are used
-%
-% entries are in separate sections (books, articles, software, data)
-% in reverse chronical order
-%
-% copies of all papers, articles, data, and software, and some books, 
-% are available at http://gking.harvard.edu/
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Books
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
- at book{KinSchNie09,
-  editor =	 {Gary King and Kay Schlozman and Norman Nie},
-  title =	 {The Future of Political Science: 100 Perspectives},
-  publisher =	 {Routledge Press},
-  address =	 {New York},
-  year =	 {2009}
-}
-
- at book{GirKin08,
-  author =	 {Federico Girosi and Gary King},
-  title =	 {Demographic Forecasting},
-  publisher =	 {Princeton University Press},
-  year =	 {2008},
-  address =	 {Princeton},
-  note =	 {{http://gking.harvard.edu/files/smooth/}}
-}
-
- at book{KinRosTan04,
-  editor =	 {Gary King and Ori Rosen and Martin A. Tanner},
-  title =	 {Ecological Inference: New Methodological Strategies},
-  publisher =	 {Cambridge University Press},
-  year =	 {2004},
-  address =	 {New York},
-  note =         {{http://gking.harvard.edu/files/abs/ecinf04-abs.shtml}}
-}
-
- at book{King97,
-  author =	 {Gary King},
-  title =	 {A Solution to the Ecological Inference Problem:
-                  Reconstructing Individual Behavior from Aggregate
-                  Data},
-  publisher =	 {Princeton University Press},
-  year =	 {1997},
-  address =	 {Princeton},
-  note =	 {{http://gking.harvard.edu/eicamera/kinroot.html}}
-}
-
- at book{KinKeoVer94,
-  author =	 {Gary King and Robert O. Keohane and Sidney Verba},
-  title =	 {Designing Social Inquiry: Scientific Inference in
-                  Qualitative Research},
-  publisher =	 {Princeton University Press},
-  year =	 {1994},
-  address =	 {Princeton},
-  note =	 {{http://www.pupress.princeton.edu/titles/5458.html}}
-}
-
- at book{King89,
-  author =	 {Gary King},
-  title =	 {Unifying Political Methodology: The Likelihood
-                  Theory of Statistical Inference},
-  publisher =	 {Michigan University Press},
-  year =	 1989,
-  address =	 {Ann Arbor}
-}
-
- at book{KinRag88,
-  author =	 {Gary King and Lyn Ragsdale},
-  title =	 {The Elusive Executive: Discovering Statistical
-                  Patterns in the Presidency},
-  publisher =	 {Congressional Quarterly Press},
-  year =	 {1988},
-  address =	 {Washington, D.C}
-}
-
- at book{BraHarKin89,
-  author =	 {Paul Brace and Christine Harrington and Gary King},
-  title =	 {The Presidency in American Politics},
-  publisher =	 {New York University Press},
-  year =	 {1989},
-  address =	 {New York and London}
-}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Articles
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
- at Article{SonKin11,
-  author =	 {Samir Soneji and Gary King},
-  title =	 {Statistical Security for Social Security},
-  journal =	 {Demography},
-  year =	 2011,
-  note =	 {{http://gking.harvard.edu/files/abs/ssc-abs.shtml}}
-}
-
- at Article{,
-  author =	 {Gary King and Richard Nielsen and Carter Coberley
-                  and James Pope and Aaron Wells},
-  title =	 {Avoiding Randomization Failure in Program
-                  Evaluation},
-  journal =	 {Population Health Management},
-  year =	 2011,
-  volume =	 14,
-  number =	 1,
-  pages =	 {S11-S22},
-  note =	 {{http://gking.harvard.edu/gking/files/mhs.pdf}}
-}
-
- at Article{KinNieCob11,
-  author =	 {Gary King and Richard Nielsen and Carter Coberley
-                  and James Pope and Aaron Wells},
-  title =	 {Comparative Effectiveness of Matching Methods for
-                  Causal Inference},
-  journal =	 { },
-  year =	 {2011},
-  OPTkey =	 {},
-  OPTvolume =	 {},
-  OPTnumber =	 {},
-  OPTpages =	 {},
-  OPTmonth =	 {},
-  OPTnote =	 {{http://gking.harvard.edu/files/abs/psparadox-abs.shtml}},
-  OPTannote =	 {}
-}
-
- at Article{SteKinShi10,
-  author =	 {Gretchen Stevens, Gary King, and Kenji Shibuya},
-  title =	 {Deaths From Heart Failure: Using Coarsened Exact
-                  Matching to Correct Cause of Death Statistics},
-  journal =	 {Population Health Metrics},
-  year =	 2010,
-  volume =	 8,
-  number =	 6,
-  note =	 {{http://gking.harvard.edu/files/abs/heartfcem-abs.shtml}}
-}
-
- at Article{GriKin10,
-  author =	 {Justin Grimmer and Gary King},
-  title =	 {Quantitative Discovery from Qualitative Information:
-                  A General-Purpose Document Clustering Methodology},
-  journal =	 { },
-  year =	 {2010},
-  note =	 {{http://gking.harvard.edu/files/abs/discov-abs.shtml}}
-}
-
- at Article{GriKin10b,
-  author =	 {Justin Grimmer and Gary King},
-  title =	 {A General Purpose Computer-Assisted Document
-                  Clustering Methodology},
-  journal =	 { },
-  year =	 {2010},
-  note =
-                  {{http://gking.harvard.edu/files/abs/discovm-abs.shtml}}
-}
-
- at Article{IacKinPor11,
-  author =	 {Stefano M. Iacus and Gary King and Giuseppe Porro},
-  title =	 {Multivariate Matching Methods That are Monotonic
-                  Imbalance Bounding},
-  journal =	 {Journal of the American Statistical Association},
-  year =	 {In press},
-  note =	 {{http://gking.harvard.edu/files/abs/cem-math-abs.shtml}}
-}
-
- at article{HopKin10,
-  author =	 {Daniel Hopkins and Gary King},
-  title =	 {Improving Anchoring Vignettes: Designing Surveys to
-                  Correct Interpersonal Incomparability},
-  journal =	 {Public Opinion Quarterly},
-  year =	 {2010},
-  pages =	 {1-22},
-  note =         {{http://gking.harvard.edu/files/abs/implement-abs.shtml}}
-}
-
- at Article{KinLuShi10,
-  author = 	 {Gary King and Ying Lu and Kenji Shibuya},
-  title = 	 {Designing Verbal Autopsy Studies},
-  journal = 	 {Population Health Metrics},
-  year = 	 2010,
-  volume =	 8,
-  number =	 19,
-  note =	 {http://gking.harvard.edu/files/abs/desva-abs.shtml}
-}
-
- at article{LazPenAda09,
-  author =	 {Lazer, David and Pentland, Alex and Adamic, Lada and
-                  Aral, Sinan and Barabasi, Albert-Laszlo and Brewer,
-                  Devon and Christakis, Nicholas and Contractor,
-                  Noshir and Fowler, James and Gutmann, Myron and
-                  Jebara, Tony and King, Gary and Macy, Michael and
-                  Roy, Deb and Van Alstyne, Marshall},
-  title =	 {{SOCIAL SCIENCE: Computational Social Science}},
-  journal =	 {Science},
-  volume =	 {323},
-  number =	 {5915},
-  pages =	 {721-723},
-  year =	 {2009},
-  note =         {{http://gking.harvard.edu/files/abs/LazPenAda09-abs.shtml}}
-}
-
- at Article{AbrBolGut09,
-  author =	 {Mark Abrahamson and Kenneth A. Bollen and Myron
-                  Gutmann and Gary King and Amy M. Pienta},
-  title =	 {Preserving Data for Long Term Analyses},
-  journal =	 {Historical Social Research},
-  year =	 {2009},
-  OPTkey =	 {},
-  OPTvolume =	 {},
-  OPTnumber =	 {},
-  OPTpages =	 {},
-  OPTmonth =	 {Summer, forthcoming},
-  OPTnote =	 {},
-  OPTannote =	 {}
-}
-
- at article{KinGakIma09,
-  Author =	 {Gary King and Emmanuela Gakidou and Kosuke Imai and
-                  Jason Lakin and Ryan T. Moore and Clayton Nall and
-                  Nirmala Ravishankar and Manett Vargas and Martha
-                  Mar{\'i}a T{\'e}llez-Rojo and Juan Eugenio
-                  Hern{\'a}ndez {\'A}vila and Mauricio Hern{\'a}ndez
-                  {\'A}vila and H{\'e}ctor Hern{\'a}ndez Llamas},
-  title =	 {Public Policy for the Poor? A Randomised Assessment
-                  of the Mexican Universal Health Insurance Programme},
-  journal =	 {The Lancet},
-  volumne =	 {373},
-  year =	 {2009},
-  note =	 {{http://gking.harvard.edu/files/abs/spi-abs.shtml}}
-}
-
-
- at Article{KinSon09,
-  author =	 {Gary King and Samir Soneji},
-  title =	 {The Future of Death in America},
-  journal =	 {},
-  year =	 2009,
-  note =	 {{http://gking.harvard.edu/files/abs/mort-abs.shtml}} 
-}
-
- at Article{IacKinPor11,
-  author =	 {Stefano M. Iacus and Gary King and Giuseppe Porro},
-  title =	 {Causal Inference Without Balance Checking: Coarsened
-                  Exact Matching},
-  journal =	 {Political Analysis},
-  year =	 {2011, in press},
-  note =	 {{http://gking.harvard.edu/files/abs/cem-plus-abs.shtml}}
-}
-
- at article{ImaKinStu08,
-  author =	 {Kosuke Imai and Gary King and Elizabeth Stuart},
-  title =	 {Misunderstandings Among Experimentalists and
-                  Observationalists about Causal Inference},
-  journal =	 {Journal of the Royal Statistical Society, {S}eries
-                  {A}},
-  volume =	 {171, part 2},
-  year =	 {2008},
-  pages =	 {481--502},
-  note =
-                  {{http://gking.harvard.edu/files/abs/matchse-abs.shtml}}
-}
-
- at article{uImaKinStu08,
-  author =	 {Kosuke Imai and Gary King and Elizabeth Stuart},
-  title =	 {Misunderstandings Among Experimentalists and
-                  Observationalists about Causal Inference},
-  journal =	 {Journal of the Royal Statistical Society, {S}eries
-                  {A}},
-  volume =	 {171, part 2},
-  year =	 {2008},
-  pages =	 {481--502}
-}
-
- at article{KinLu08,
-  author =	 {Gary King and Ying Lu},
-  title =	 {Verbal Autopsy Methods with Multiple Causes of
-                  Death},
-  journal =	 {Statistical Science},
-  volume =	 {23},
-  number =	 {1},
-  year =	 {2008},
-  pages =	 {78--91},
-  note =	 {{http://gking.harvard.edu/files/abs/vamc-abs.shtml}}
-}
-
- at article{AltKin07,
-  author =	 {Micah Altman and Gary King},
-  title =	 {A Proposed Standard for the Scholarly Citation of
-                  Quantitative Data },
-  journal =	 {D-Lib Magazine},
-  volume =	 {13},
-  year =	 {2007},
-  month =	 {March / April},
-  number =	 {3/4},
-  note =	 {{http://gking.harvard.edu/files/abs/cite-abs.shtml}}
-}
-
- at article{GroKin07,
-  author =	 {Bernard Grofman and Gary King},
-  title =	 {The Future of Partisan Symmetry as a Judicial Test
-                  for Partisan Gerrymandering after LULAC v. Perry},
-  journal =	 {Election Law Journal},
-  volume =	 {6},
-  year =	 {2007},
-  pages =	 {2-35},
-  month =	 {January},
-  number =	 {1},
-  note =	 {{http://gking.harvard.edu/files/abs/jp-abs.shtml}}
-}
-
- at article{uHoImaKin07,
-  author =	 {Daniel Ho and Kosuke Imai and Gary King and
-                  Elizabeth Stuart},
-  title =	 {Matching as Nonparametric Preprocessing for Reducing
-                  Model Dependence in Parametric Causal Inference},
-  journal =	 {Political Analysis},
-  year =	 {2007},
-  volume =	 {15},
-  pages =	 {199--236}
-}
-
- at article{HoImaKin07,
-  author =	 {Daniel Ho and Kosuke Imai and Gary King and
-                  Elizabeth Stuart},
-  title =	 {Matching as Nonparametric Preprocessing for Reducing
-                  Model Dependence in Parametric Causal Inference},
-  journal =	 {Political Analysis},
-  year =	 {2007},
-  volume =	 {15},
-  pages =	 {199--236},
-  note =	 {{http://gking.harvard.edu/files/abs/matchp-abs.shtml}}
-}
-
- at article{HopKin10b,
-  author =	 {Daniel Hopkins and Gary King},
-  title =	 {A Method of Automated Nonparametric Content Analysis
-                  for Social Science},
-  journal =	 {American Journal of Political Science},
-  year =	 {2010},
-  volume =	 {54},
-  number =	 {1},
-  month =	 {January},
-  pages =	 {229--247},
-  note =	 {http://gking.harvard.edu/files/abs/words-abs.shtml}
-}
-
- at article{ImaKinLau07,
-  author =	 {Kosuke Imai and Gary King and Olivia Lau},
-  title =	 {Toward A Common Framework for Statistical Analysis
-                  and Development},
-  journal =	 {Journal of Computational Graphics and Statistics},
-  volume =	 17,
-  number =	 4,
-  pages =	 {1--22},
-  year =	 2008,
-  note =	 {{http://gking.harvard.edu/files/abs/z-abs.shtml}}
-}
-
- at article{ImaKinNal09d,
-  author =	 {Kosuke Imai and Gary King and Clayton Nall},
-  title =	 {Matched Pairs and the Future of Cluster-Randomized
-                  Experiments: A Rejoinder},
-  journal =	 {Statistical Science},
-  volume =	 {24},
-  number =	 {1},
-  pages =	 {64--72},
-  year =	 {2009},
-  note =
-                  {{http://gking.harvard.edu/files/abs/cluster-abs.shtml}}
-}
-
- at article{ImaKinNal09,
-  author =	 {Kosuke Imai and Gary King and Clayton Nall},
-  title =	 {The Essential Role of Pair Matching in
-                  Cluster-Randomized Experiments, with Application to
-                  the Mexican Universal Health Insurance Evaluation},
-  journal =	 {Statistical Science},
-  volume =	 {24},
-  number =	 {1},
-  pages =	 {29--53},
-  year =	 {2009},
-  note =
-                  {{http://gking.harvard.edu/files/abs/cluster-abs.shtml}}
-}
-
- at article{uImaKinNal09,
-  author =	 {Kosuke Imai and Gary King and Clayton Nall},
-  title =	 {The Essential Role of Pair Matching in
-                  Cluster-Randomized Experiments, with Application to
-                  the Mexican Universal Health Insurance Evaluation},
-  journal =	 {Statistical Science},
-  volume =	 {24},
-  number =	 {1},
-  pages =	 {29--53},
-  year =	 {2009}
-}
-
- at article{King07,
-  author =	 {Gary King},
-  title =	 {An Introduction to the Dataverse Network as an
-                  Infrastructure for Data Sharing},
-  journal =	 {Sociological Methods and Research},
-  year =	 {2007},
-  volume =	 {36},
-  number =	 {2},
-  pages =	 {173--199},
-  note =	 {{http://gking.harvard.edu/files/abs/dvn-abs.shtml}}
-}
-
- at inbook{King09b,
-  author =	 {Gary King},
-  chapter =	 {The Changing Evidence Base of Political Science
-                  Research},
-  title =	 {The Future of Political Science: 100 Perspectives},
-  publisher =	 {Routledge Press},
-  address =	 {New York},
-  year =	 {2009, forthcoming},
-  editor =	 {Gary King and Kay Schlozman and Norman Nie},
-  note =	 {{http://gking.harvard.edu/files/abs/evbase-abs.shtml}}
-}
-
- at inbook{King09c,
-  author =	 {Gary King and Kay Schlozman and Norman Nie},
-  chapter =	 {An Introduction to the Future of Political Science},
-  title =	 {The Future of Political Science: 100 Perspectives},
-  publisher =	 {Routledge Press},
-  address =	 {New York},
-  year =	 {2009, forthcoming},
-  editor =	 {Gary King and Kay Schlozman and Norman Nie}
-}
-
- at article{KinGakRav07,
-  Author =	 {Gary King and Emmanuela Gakidou and Nirmala
-                  Ravishankar and Ryan T. Moore and Jason Lakin and
-                  Manett Vargas and Martha Mar{\'i}a T{\'e}llez-Rojo
-                  and Juan Eugenio Hern{\'a}ndez {\'A}vila and
-                  Mauricio Hern{\'a}ndez {\'A}vila and H{\'e}ctor
-                  Hern{\'a}ndez Llamas},
-  title =	 {A `Politically Robust' Experimental Design for
-                  Public Policy Evaluation, with Application to the
-                  Mexican Universal Health Insurance Program},
-  journal =	 {Journal of Policy Analysis and Management},
-  volume =	 {26},
-  year =	 {2007},
-  pages =	 {479-506},
-  number =	 {3},
-  note =	 {{http://gking.harvard.edu/files/abs/spd-abs.shtml}}
-}
-
- at article{KinWan07,
-  author =	 {Gary King and Jonathan Wand},
-  title =	 {Comparing Incomparable Survey Responses: New Tools
-                  for Anchoring Vignettes},
-  journal =	 {Political Analysis},
-  volume =	 {15},
-  year =	 {2007},
-  pages =	 {46-66},
-  month =	 {Winter},
-  number =	 {1},
-  note =	 {{http://gking.harvard.edu/files/abs/c-abs.shtml}}
-}
-
- at article{KinZen07,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {When Can History Be Our Guide? The Pitfalls of
-                  Counterfactual Inference},
-  journal =	 {International Studies Quarterly},
-  year =	 {2007},
-  pages =	 {183-210},
-  month =	 {March},
-  note =         {{http://gking.harvard.edu/files/abs/counterf-abs.shtml}}
-}
-
- at article{uKinZen07,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {When Can History Be Our Guide? The Pitfalls of
-                  Counterfactual Inference},
-  journal =	 {International Studies Quarterly},
-  year =	 {2007},
-  pages =	 {183-210},
-  month =	 {March}
-}
-
- at article{KinZen07b,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Detecting Model Dependence in Statistical Inference:
-                  A Response},
-  journal =	 {International Studies Quarterly},
-  volume =	 {51},
-  year =	 {2007},
-  pages =	 {231-241},
-  month =	 {March},
-  note =         {{http://gking.harvard.edu/files/abs/counterf-abs.shtml}}
-}
-
- at article{WanKinLau07,
-  author =	 {Jonathan Wand and Gary King and Olivia Lau},
-  title =	 {Anchors: Software for Anchoring Vignettes Data},
-  journal =	 {Journal of Statistical Software},
-  year =	 {2007, forthcoming}
-}
-
- at inbook{EpsHoKin06,
-  author =	 {Lee Epstein and Daniel E. Ho and Gary King and
-                  Jeffrey A. Segal},
-  title =	 {Principles and Practice in American Politics:
-                  Classic and Contemporary Readings},
-  chapter =	 {The Effect of War on the Supreme Court},
-  year =	 {2006},
-  publisher =	 {Congressional Quarterly Press},
-  edition =	 {3rd},
-  address =	 {Washington, D.C.},
-  editor =	 {Samuel Kernell and Steven S. Smith},
-  note =	 {{http://gking.harvard.edu/files/abs/crisis-abs.shtml}}
-}
-
- at article{GakKin06,
-  author =	 {Emmanuela Gakidou and Gary King},
-  title =	 {Death by Survey: Estimating Adult Mortality without
-                  Selection Bias from Sibling Survival Data from
-                  Sibling Survival Data},
-  journal =	 {Demography},
-  volume =	 43,
-  year =	 2006,
-  pages =	 {569--585},
-  month =	 {August},
-  number =	 3,
-  note =         {{http://gking.harvard.edu/files/abs/deathbys-abs.shtml}}
-}
-
- at article{HonKin10,
-  author =	 {James Honaker and Gary King},
-  title =	 {What to do About Missing Values in Time Series
-                  Cross-Section Data},
-  journal =	 {American Journal of Political Science},
-  year =	 {2010},
-  volume =	 {54},
-  number =	 {2},
-  month =	 {April},
-  pages =	 {561--581},
-  note =	 {{http://gking.harvard.edu/files/abs/pr-abs.shtml}}
-}
-
- at article{King06,
-  author =	 {Gary King},
-  title =	 {{Publication, Publication}},
-  journal =	 {PS: Political Science and Politics},
-  volume =	 {39},
-  year =	 {2006},
-  pages =	 {119--125},
-  month =	 {January},
-  number =	 {01},
-  note =
-                  {{http://gking.harvard.edu/files/abs/paperspub-abs.shtml}}
-}
-
- at inbook{KinRosTan06,
-  author =	 {Gary King and Ori Rosen and Martin Tanner},
-  title =	 {The New Palgrave Dictionary of Economics},
-  chapter =	 {Ecological Inference},
-  year =	 {2006},
-  edition =	 {2nd},
-  editor =	 {Larry Blume and Steven N. Durlauf},
-  note =
-                  {{http://gking.harvard.edu/files/abs/newintro-abs.shtml}}
-}
-
- at article{KinZen06,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {The Dangers of Extreme Counterfactuals},
-  journal =	 {Political Analysis},
-  volume =	 {14},
-  year =	 2006,
-  pages =	 {131--159},
-  number =	 {2},
-  note =
-                  {{http://gking.harvard.edu/files/abs/counterft-abs.shtml}}
-}
-
- at article{GirKin07,
-  author =	 {Federico Girosi and Gary King},
-  title =	 {Understanding the Lee-Carter Mortality Forecasting
-                  Method},
-  year =	 2007,
-  note =	 {{http://gking.harvard.edu/files/abs/lc-abs.shtml}}
-}
-
- at article{EpsHoKin05,
-  author =	 {Lee Epstein and Daniel E. Ho and Gary King, and
-                  Jeffrey A. Segal},
-  title =	 {The Supreme Court During Crisis: How War Affects
-                  only Non-War Cases},
-  journal =	 {New York University Law Review},
-  volume =	 {80},
-  year =	 {2005},
-  pages =	 {1--116},
-  month =	 {April},
-  number =	 {1},
-  note =	 {{http://gking.harvard.edu/files/abs/crisis-abs.shtml}}
-}
-
- at article{StoKinZen05,
-  author =	 {Heather Stoll and Gary King and Langchee Zeng},
-  title =	 {WhatIf: Software for Evaluating Counterfactuals},
-  journal =	 {Journal of Statistical Software},
-  volume =	 {15},
-  year =	 {2005},
-  number =	 {4},
-  note =	 {{http://www.jstatsoft.org/index.php?vol=15}}
-}
-
- at article{BecKinZen04,
-  author =	 {Nathaniel Beck and Gary King and Langche Zeng},
-  title =	 {Theory and Evidence in International Conflict:
-                  Response to de Marchi, Gelpi, and Grynaviski},
-  journal =	 apsr,
-  volume =	 {98},
-  year =	 {2004},
-  pages =	 {379-389},
-  month =	 {May},
-  number =	 {2},
-  note =
-                  {{http://gking.harvard.edu/files/abs/toe-resp-abs.shtml}}
-}
-
- at inbook{GelKatKin04,
-  author =	 {Andrew Gelman and Jonathan Katz and Gary King},
-  title =	 {Rethinking the Vote: The Politics and Prospects of
-                  American Electoreal Reform},
-  chapter =	 {Chapter 5, Empirically Evaluating the Electoral
-                  College},
-  year =	 {2004},
-  publisher =	 {Oxford University Press},
-  pages =	 {75-88},
-  address =	 {New York},
-  editor =	 {Ann N. Crigler and Marion R. Just and Edward
-                  J. McCaffery},
-  note =
-                  {{http://gking.harvard.edu/files/abs/rethink-abs.shtml}}
-}
-
- at article{GilKin04,
-  author =	 {Jeff Gill and Gary King},
-  title =	 {What to do When Your Hessian is Not Invertible:
-                  Alternatives to Model Respecification in Nonlinear
-                  Estimation},
-  journal =	 {Sociological Methods and Research},
-  volume =	 {32},
-  year =	 {2004},
-  pages =	 {54-87},
-  month =	 {August},
-  number =	 {1},
-  note =	 {{http://gking.harvard.edu/files/abs/help-abs.shtml}}
-}
-
- at article{ImaKin04,
-  author =	 {Kosuke Imai and Gary King},
-  title =	 {Did Illegal Overseas Absentee Ballots Decide the
-                  2000 U.S. Presidential Election?},
-  journal =	 {Perspectives on Politics},
-  volume =	 {2},
-  year =	 {2004},
-  pages =	 {537--549},
-  month =	 {September},
-  number =	 {3},
-  note =
-                  {{http://gking.harvard.edu/files/abs/ballots-abs.shtml}}
-}
-
- at article{King04,
-  author =	 {Gary King},
-  title =	 {EI: A Program for Ecological Inference},
-  journal =	 {Journal of Statistical Software},
-  volume =	 {11},
-  year =	 {2004},
-  number =	 {7}
-}
-
- at article{King04b,
-  author =	 {Gary King},
-  title =	 {Finding New Information for Ecological Inference
-                  Models: A Comment on Jon Wakefield, "Ecological
-                  Inference in 2 X 2 Tables"},
-  journal =	 {Journal of the Royal Statistical Society},
-  volume =	 {167},
-  year =	 {2004},
-  pages =	 {437},
-  number =	 {Series A}
-}
-
- at inbook{KinRosTan04b,
-  author =	 {Gary King and Ori Rosen and Martin Tanner},
-  title =	 {Ecological Inference: New Methodological Strategies},
-  chapter =	 {Information in Ecological Inference: An
-                  Introduction},
-  year =	 {2004},
-  publisher =	 {Cambridge University Press},
-  address =	 {New York},
-  editor =	 {Gary King and Ori Rosen and Martin Tanner}
-}
-
- at inbook{KinZen04,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Encyclopedia of Biopharmaceutical Statistics},
-  chapter =	 {Inference in Case-Control Studies},
-  year =	 {2004},
-  publisher =	 {Marcel Dekker},
-  edition =	 {2nd},
-  address =	 {New York},
-  editor =	 {Shein-Chung Chow},
-  note =	 {{http://gking.harvard.edu/files/abs/1s-enc-abs.shtml}}
-}
-
- at article{AdoKin03,
-  author =	 {Christopher Adolph and Gary King},
-  title =	 {Analyzing Second Stage Ecological Regressions},
-  journal =	 {Political Analysis},
-  volume =	 {11},
-  year =	 {2003},
-  pages =	 {65-76},
-  month =	 {Winter},
-  number =	 {1}
-}
-
- at article{AdoKinHer03,
-  author =	 {Christopher Adolph and Gary King, with Michael
-                  C. Herron and Kenneth W. Shotts},
-  title =	 {A Consensus on Second Stage Analyses in Ecological
-                  Inference Models},
-  journal =	 {Political Analysis},
-  volume =	 {11},
-  year =	 {2003},
-  pages =	 {86--94},
-  month =	 {Winter},
-  number =	 {1},
-  note =	 {{http://gking.harvard.edu/files/abs/akhs-abs.shtml}}
-}
-
- at article{EpsKin03,
-  author =	 {Lee Epstein and Gary King},
-  title =	 {Building An Infrastructure for Empirical Research in
-                  the Law [with comments from four law school deans]},
-  journal =	 {Journal of Legal Education},
-  volume =	 {53},
-  year =	 {2003},
-  pages =	 {311--320},
-  number =	 {311},
-  note =	 {{http://gking.harvard.edu/files/abs/infra-abs.shtml}}
-}
-
- at inbook{GakKin03,
-  author =	 {Emmanuela Gakidou and Gary King},
-  title =	 {Health Systems Performance Assessment: Debates,
-                  Methods and Empiricism},
-  chapter =	 {Chapter 36, Determinants of Inequality in Child
-                  Survival: Results from 39 Countries},
-  publisher =	 {World Health Organization},
-  pages =	 {497-502},
-  address =	 {Geneva},
-  editor =	 {Chrisopher Murray and David B. Evans}
-}
-
- at inbook{GilKin03,
-  author =	 {Jeff Gill and Gary King},
-  title =	 {Numerical Issues in Statistical Computing for the
-                  Social Scientist},
-  chapter =	 {Chapter 6, Numerical Issues Involved in Inverting
-                  Hessian Matrices},
-  year =	 {2003},
-  publisher =	 {John Wiley and Sons, Inc.},
-  pages =	 {143-176},
-  address =	 {Hoboken, NJ},
-  editor =	 {Micah Altman and Jeff Gill and Michael P. McDonald}
-}
-
- at article{King03,
-  author =	 {Gary King},
-  title =	 {The Future of Replication},
-  journal =	 {International Studies Perspectives},
-  volume =	 {4},
-  year =	 {2003},
-  pages =	 {443--499},
-  month =	 {February},
-  number =	 1,
-  note =
-                  {{http://gking.harvard.edu/files/abs/replvdc-abs.shtml}}
-}
-
- at article{KinLow03,
-  author =	 {Gary King and Will Lowe},
-  title =	 {An Automated Information Extraction Tool For
-                  International Conflict Data with Performance as Good
-                  as Human Coders: A Rare Events Evaluation Design},
-  journal =	 {International Organization},
-  volume =	 {57},
-  year =	 {2003},
-  pages =	 {617-642},
-  month =	 {July},
-  number =	 {3},
-  note =	 {{http://gking.harvard.edu/files/abs/infoex-abs.shtml}}
-}
-
- at article{KinMurSal04,
-  author =	 {Gary King and Christopher J.L. Murray and Joshua
-                  A. Salomon and Ajay Tandon},
-  title =	 {Enhancing the Validity and Cross-cultural
-                  Comparability of Measurement in Survey Research},
-  journal =	 {American Political Science Review},
-  volume =	 {98},
-  year =	 {2004},
-  pages =	 {191--207},
-  month =	 {February},
-  number =	 {1},
-  note =	 {{http://gking.harvard.edu/files/abs/vign-abs.shtml}}
-}
-
- at article{KinRosTan07,
-  author =	 {Gary King and Ori Rosen and Martin Tanner and
-                  Alexander F. Wagner.},
-  title =	 {Ordinary Economic Voting Behavior in the
-                  Extraordinary Election of Adolf Hitler},
-  year =	 {2007},
-  note =	 {{http://gking.harvard.edu/files/abs/naziV-abs.shtml}}
-}
-
- at inbook{KinZen03,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Inference in Case-Control Studies},
-  year =	 {2003},
-  publisher =	 {Marcel Dekker},
-  volume =	 {2nd edition},
-  address =	 {New York},
-  editor =	 {Shein-Chung Chow, ed.},
-  journal =	 {Encyclopedia of Biopharmaceutical Statistics}
-}
-
- at article{LowKin03,
-  author =	 {Will Lowe and Gary King },
-  title =	 {Some Statistical Methods for Evaluating Information
-                  Extraction Systems},
-  journal =	 {Proceedings of the 10th Conference of the European
-                  Chapter of the Association for Computational
-                  Linguistics},
-  year =	 {2003},
-  pages =	 {19-26}
-}
-
- at article{TomKinZen03,
-  author =	 {Michael Tomz and Gary King and Langche Zeng},
-  title =	 {ReLogit: Rare Events Logistic Regression},
-  journal =	 {Journal of Statistical Software},
-  volume =	 {8},
-  year =	 {2003},
-  number =	 {2},
-  note =	 {{http://gking.harvard.edu/stats.shtml#relogit}}
-}
-
- at article{TomWitKin03,
-  author =	 {Michael Tomz and Jason Wittenberg and Gary King},
-  title =	 {CLARIFY: Software for Interpreting and Presenting
-                  Statistical Results},
-  journal =	 {Journal of Statistical Software},
-  volume =	 {8},
-  year =	 {2003},
-  number =	 {1},
-  note =	 {{http://gking.harvard.edu/stats.shtml}}
-}
-
- at article{EpsKin02,
-  author =	 {Lee Epstein and Gary King},
-  title =	 {The Rules of Inference},
-  journal =	 {University of Chicago Law Review},
-  volume =	 {69},
-  year =	 {2002},
-  pages =	 {1--209},
-  month =	 {Winter},
-  number =	 {1},
-  note =	 {{http://gking.harvard.edu/files/abs/rules-abs.shtml}}
-}
-
- at article{EpsKin02b,
-  author =	 {Lee Epstein and Gary King},
-  title =	 {Empirical Research and The Goals of Legal
-                  Scholarship: A Response},
-  journal =	 {University of Chicago Law Review},
-  volume =	 {69},
-  year =	 {2002},
-  pages =	 {1--209},
-  month =	 {Winter},
-  number =	 {1},
-  note =	 {{http://gking.harvard.edu/files/abs/rules-abs.shtml}}
-}
-
- at article{GakKin02,
-  author =	 {Emmanuela Gakidou and Gary King},
-  title =	 {Measuring Total Health Inequality: Adding Individual
-                  Variation to Group-Level Differences},
-  journal =	 {BioMed Central: International Journal for Equity in
-                  Health},
-  volume =	 {1},
-  year =	 2002,
-  month =	 {August},
-  number =	 3,
-  note =	 {{http://gking.harvard.edu/files/abs/ebb-abs.shtml}}
-}
-
- at article{HonKinKat02,
-  author =	 {James Honaker and Gary King and Jonathan N. Katz},
-  title =	 {A Fast, Easy, and Efficient Estimator for Multiparty
-                  Electoral Data},
-  journal =	 {Political Analysis},
-  volume =	 {10},
-  year =	 {2002},
-  pages =	 {84--100},
-  month =	 {Winter},
-  number =	 {1},
-  note =	 {{http://gking.harvard.edu/files/abs/trip-abs.shtml}}
-}
-
- at article{King02b,
-  author =	 {Gary King},
-  title =	 {Isolating Spatial Autocorrelation, Aggregation Bias,
-                  and Distributional Violations in Ecological
-                  Inference},
-  journal =	 {Political Analysis},
-  volume =	 {10},
-  year =	 {2002},
-  pages =	 {298--300},
-  month =	 {Summer},
-  number =	 {3},
-  note =	 {{http://gking.harvard.edu/files/abs/ac-abs.shtml}}
-}
-
- at article{KinMur02,
-  author =	 {Gary King and Christopher J.L. Murray},
-  title =	 {Rethinking Human Security},
-  journal =	 {Political Science Quarterly},
-  volume =	 {116},
-  year =	 {2002},
-  pages =	 {585--610},
-  month =	 {Winter},
-  number =	 {4},
-  note =	 {{http://gking.harvard.edu/files/abs/hs-abs.shtml}}
-}
-
- at article{KinZen02,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Improving Forecasts of State Failure},
-  journal =	 {World Politics},
-  volume =	 53,
-  year =	 2002,
-  pages =	 {623--658},
-  month =	 {July},
-  number =	 4 ,
-  note =	 {{http://gking.harvard.edu/files/abs/civil-abs.shtml}}
-}
-
- at article{KinZen02b,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Estimating Risk and Rate Levels, Ratios, and
-                  Differences in Case-Control Studies},
-  journal =	 {Statistics in Medicine},
-  volume =	 21,
-  year =	 2002,
-  pages =	 {1409--1427},
-  note =	 {{http://gking.harvard.edu/files/abs/1s-abs.shtml}}
-}
-
- at article{MurKinLop02,
-  author =	 {Christopher J.L. Murray and Gary King and Alan
-                  D. Lopez and Niels Tomijima and Etienne Krug},
-  title =	 {Armed Conflict as a Public Health Problem},
-  journal =	 {BMJ (British Medical Journal)},
-  volume =	 {324},
-  year =	 {2002},
-  pages =	 {346--349},
-  month =	 {February 9},
-  note =
-                  {{http://gking.harvard.edu/files/abs/armedph-abs.shtml}}
-}
-
- at article{AltAndDig01a,
-  author =	 {Micah Altman and Leonid Andreev and Mark Diggory and
-                  Gary King and Daniel L. Kiskis and Elizabeth Kolster
-                  and M. Krot and Sidney Verba},
-  title =	 {A Digital Library for the Dissemination and
-                  Replication of Quantitative Social Science Research:
-                  The Virtual Data Center},
-  journal =	 {Social Science Computer Review},
-  volume =	 19,
-  year =	 2001,
-  pages =	 {458--470},
-  month =	 {Winter},
-  number =	 4,
-  note =
-                  {{http://gking.harvard.edu/files/abs/vdcwhitepaper-abs.shtml}}
-}
-
- at article{AltAndDig01b,
-  author =	 {Micah Altman and Leonid Andreev and Mark Diggory and
-                  Gary King and Daniel L. Kiskis and Elizabeth Kolster
-                  and M. Krot and Sidney Verba},
-  title =	 {An Overview of the Virtual Data Center Project and
-                  Software},
-  journal =	 {JCDL '01: First Joint Conference on Digital
-                  Libraries},
-  year =	 2001,
-  pages =	 {203-204},
-  note =	 {{http://gking.harvard.edu/files/abs/jcdl01-abs.shtml}}
-}
-
- at article{AltKinSig01,
-  author =	 {James E. Alt and Gary King and Curtis Signorino},
-  title =	 {Aggregation Among Binary, Count, and Duration
-                  Models: Estimating the Same Quantities from
-                  Different Levels of Data},
-  journal =	 {Political Analysis},
-  volume =	 {9},
-  year =	 {2001},
-  pages =	 {21--44},
-  month =	 {Winter},
-  number =	 {1},
-  note =	 {{http://gking.harvard.edu/files/abs/abcd-abs.shtml}}
-}
-
- at article{King01,
-  author =	 {Gary King},
-  title =	 {Proper Nouns and Methodological Propriety: Pooling
-                  Dyads in International Relations Data},
-  journal =	 {International Organization},
-  volume =	 {55},
-  year =	 {2001},
-  pages =	 {497--507},
-  month =	 {Fall},
-  number =	 {2},
-  note =	 {{http://gking.harvard.edu/files/abs/pool-abs.shtml}}
-}
-
- at article{KinHonJos01,
-  author =	 {Gary King and James Honaker and Anne Joseph and
-                  Kenneth Scheve},
-  title =	 {Analyzing Incomplete Political Science Data: An
-                  Alternative Algorithm for Multiple Imputation},
-  journal =	 {American Political Science Review},
-  volume =	 95,
-  year =	 2001,
-  pages =	 {49--69},
-  month =	 {March},
-  number =	 1 ,
-  note =	 {{http://gking.harvard.edu/files/abs/evil-abs.shtml}}
-}
-
- at article{KinZen01,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Logistic Regression in Rare Events Data},
-  journal =	 {Political Analysis},
-  volume =	 9,
-  year =	 2001,
-  pages =	 {137--163},
-  month =	 {Spring},
-  number =	 2 ,
-  note =	 {{http://gking.harvard.edu/files/abs/0s-abs.shtml}}
-}
-
- at article{KinZen01b,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Explaining Rare Events in International Relations},
-  journal =	 {International Organization},
-  volume =	 55,
-  year =	 2001,
-  pages =	 {693--715},
-  month =	 {Summer},
-  number =	 3 ,
-  note =	 {{http://gking.harvard.edu/files/abs/baby0s-abs.shtml}}
-}
-
- at article{RosJiaKin01,
-  author =	 {Ori Rosen and Wenxin Jiang and Gary King and Martin
-                  A. Tanner},
-  title =	 {Bayesian and Frequentist Inference for Ecological
-                  Inference: The $R \times C$ Case},
-  journal =	 {Statistica Neerlandica},
-  volume =	 55,
-  year =	 2001,
-  pages =	 {134--156},
-  number =	 2 ,
-  note =	 {{http://gking.harvard.edu/files/abs/rosen-abs.shtml}}
-}
-
- at article{BecKinZen00,
-  author =	 {Nathaniel Beck and Gary King and Langche Zeng},
-  title =	 {Improving Quantitative Studies of International
-                  Conflict},
-  journal =	 {American Political Science Review},
-  volume =	 94,
-  year =	 2000,
-  pages =	 {21--36},
-  month =	 {March},
-  number =	 1 ,
-  note =	 {{http://gking.harvard.edu/files/abs/improv-abs.shtml}}
-}
-
- at article{King00,
-  author =	 {Gary King},
-  title =	 {Geography, Statistics, and Ecological Inference},
-  journal =	 {Annals of the Association of American Geographers},
-  volume =	 {90},
-  year =	 {2000},
-  pages =	 {601--606},
-  month =	 {September},
-  number =	 {3},
-  note =	 {{http://gking.harvard.edu/files/abs/geog-abs.shtml}}
-}
-
- at article{KinTomWit00,
-  author =	 {Gary King and Michael Tomz and Jason Wittenberg},
-  title =	 {Making the Most of Statistical Analyses: Improving
-                  Interpretation and Presentation},
-  journal =	 {American Journal of Political Science},
-  volume =	 44,
-  year =	 2000,
-  pages =	 {341--355},
-  month =	 {April},
-  number =	 2,
-  note =	 {{http://gking.harvard.edu/files/abs/making-abs.shtml}}
-}
-
- at article{GelKinLiu99,
-  author =	 {Andrew Gelman and Gary King and Chuanhai Liu},
-  title =	 {Not Asked and Not Answered: Multiple Imputation for
-                  Multiple Surveys},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 93,
-  year =	 1999,
-  pages =	 {846--857},
-  month =	 {September},
-  number =	 433 ,
-  note =	 {{http://gking.harvard.edu/files/abs/not-abs.shtml}}
-}
-
- at article{GelKinLiu99b,
-  author =	 {Andrew Gelman and Gary King and Chuanhai Liu},
-  title =	 {Rejoinder},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 93,
-  year =	 1999,
-  pages =	 {869--874},
-  month =	 {September},
-  number =	 433 ,
-  note =	 {{http://gking.harvard.edu/files/abs/not-abs.shtml}}
-}
-
- at article{KatKin99,
-  author =	 {Jonathan Katz and Gary King},
-  title =	 {A Statistical Model for Multiparty Electoral Data},
-  journal =	 {American Political Science Review},
-  volume =	 93,
-  year =	 1999,
-  pages =	 {15--32},
-  month =	 {March},
-  number =	 {1},
-  note =
-                  {{http://gking.harvard.edu/files/abs/multiparty-abs.shtml}}
-}
-
- at article{King99,
-  author =	 {Gary King},
-  title =	 {The Future of Ecological Inference Research: A Reply
-                  to Freedman et al.},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 {94},
-  year =	 {1999},
-  pages =	 {352-355},
-  month =	 {March},
-  number =	 {445},
-  note =	 {{http://gking.harvard.edu/files/abs/reply-abs.shtml}}
-}
-
- at article{KinLav99,
-  author =	 {Gary King and Michael Laver},
-  title =	 {Many Publications, but Still No Evidence},
-  journal =	 {Electoral Studies},
-  volume =	 {18},
-  year =	 {1999},
-  pages =	 {597--598},
-  month =	 {December},
-  number =	 {4},
-  note =
-                  {{http://gking.harvard.edu/files/abs/manypub-abs.shtml}}
-}
-
- at article{KinRosTan99,
-  author =	 {Gary King and Ori Rosen and Martin A. Tanner},
-  title =	 {Binomial-Beta Hierarchical Models for Ecological
-                  Inference},
-  journal =	 {Sociological Methods and Research},
-  volume =	 28,
-  year =	 1999,
-  pages =	 {61--90},
-  month =	 {August},
-  number =	 1 ,
-  note =	 {{http://gking.harvard.edu/files/abs/binom-abs.shtml}}
-}
-
- at article{LewKin99,
-  author =	 {Jeffrey Lewis and Gary King},
-  title =	 {No Evidence on Directional vs. Proximity Voting},
-  journal =	 {Political Analysis},
-  volume =	 {8},
-  year =	 {1999},
-  pages =	 {21--33},
-  month =	 {August},
-  number =	 {1},
-  note =
-                  {{http://gking.harvard.edu/files/abs/spatial-abs.shtml}}
-}
-
- at article{GelKinBos98,
-  author =	 {Andrew Gelman and Gary King and John Boscardin},
-  title =	 {Estimating the Probability of Events that Have Never
-                  Occurred: When Is Your Vote Decisive?},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 {93},
-  year =	 {1998},
-  pages =	 {1--9},
-  month =	 {March},
-  number =	 {441},
-  note =
-                  {{http://gking.harvard.edu/files/abs/estimatprob-abs.shtml}}
-}
-
- at article{KinPal98,
-  author =	 {Gary King and Bradley Palmquist},
-  title =	 {The Record of American Democracy, 1984-1990},
-  journal =	 {Sociological Methods and Research},
-  volume =	 {26},
-  year =	 {1998},
-  pages =	 {424--427},
-  month =	 {February},
-  number =	 {3},
-  note =	 {{http://www.hmdc.harvard.edu/ROAD/}}
-}
-
- at article{BenKin96,
-  author =	 {Kenneth Benoit and Gary King},
-  title =	 {A Preview of EI and EzI: Programs for Ecological
-                  Inference},
-  journal =	 {Social Science Computer Review},
-  volume =	 {14},
-  year =	 {1996},
-  pages =	 {433--438},
-  month =	 {Winter},
-  number =	 {4},
-  note =
-                  {{http://gking.harvard.edu/files/abs/preview-abs.shtml}}
-}
-
- at inbook{GelKin96,
-  author =	 {Andrew Gelman and Gary King},
-  title =	 {Advantages of Conflictual Redistricting},
-  year =	 {1996},
-  publisher =	 {Dartmouth Publishing Company},
-  pages =	 {207--218 },
-  address =	 {Aldershot, England},
-  editor =	 {Iain McLean and David Butler, eds},
-  note =	 {{http://gking.harvard.edu/files/abs/advant-abs.shtml}},
-  journal =	 {Fixing the Boundary: Defining and Redefining
-                  Single-Member Electoral Districts}
-}
-
- at inbook{KinBruGel96,
-  author =	 {Gary King and John Bruce and Andrew Gelman},
-  title =	 {Racial Fairness in Legislative Redistricting},
-  year =	 {1996},
-  publisher =	 {Princeton University Press},
-  editor =	 {Paul E. Peterson, ed.},
-  note =	 {{http://gking.harvard.edu/files/abs/racial-abs.shtml}},
-  journal =	 {Classifying by Race}
-}
-
- at article{King96,
-  author =	 {Gary King},
-  title =	 {Why Context Should Not Count},
-  journal =	 {Political Geography },
-  volume =	 {15},
-  year =	 {1996},
-  pages =	 {159--164},
-  month =	 {February},
-  number =	 {2},
-  note =	 {{http://gking.harvard.edu/files/abs/contxt-abs.shtml}}
-}
-
- at article{KinSig96,
-  author =	 {Gary King and Curtis S. Signorino},
-  title =	 {The Generalization in the Generalized Event Count
-                  Model},
-  journal =	 {Political Analysis},
-  volume =	 6,
-  year =	 1996,
-  pages =	 {225--252},
-  note =
-                  {{http://gking.harvard.edu/files/abs/generaliz-abs.shtml}}
-}
-
- at article{King95,
-  author =	 {Gary King},
-  title =	 {Replication, Replication},
-  journal =	 {PS: Political Science and Politics},
-  volume =	 {28},
-  year =	 1995,
-  pages =	 {443--499},
-  month =	 {September},
-  number =	 3,
-  note =
-                  {{http://gking.harvard.edu/files/abs/replication-abs.shtml}}
-}
-
- at article{King95b,
-  author =	 {Gary King},
-  title =	 {A Revised Proposal, Proposal},
-  journal =	 {PS: Political Science and Politics},
-  volume =	 {XXVIII},
-  year =	 1995,
-  pages =	 {494--499},
-  month =	 {September},
-  number =	 3,
-  note =
-                  {{http://gking.harvard.edu/files/abs/replication-abs.shtml}}
-}
-
- at article{KinKeoVer95,
-  author =	 {Gary King and Robert O. Keohane and Sidney Verba},
-  title =	 {The Importance of Research Design in Political
-                  Science},
-  journal =	 {American Political Science Review},
-  volume =	 {89},
-  year =	 {1995},
-  pages =	 {454--481 },
-  month =	 {June},
-  number =	 {2},
-  note =
-                  {{http://gking.harvard.edu/files/abs/kkvresp-abs.shtml}}
-}
-
- at article{VosGelKin95,
-  author =	 {D. Steven Voss and Andrew Gelman and Gary King},
-  title =	 {Pre-Election Survey Methodology: Details From Nine
-                  Polling Organizations, 1988 and 1992},
-  journal =	 {Public Opinion Quarterly},
-  volume =	 {59},
-  year =	 {1995},
-  pages =	 {98--132},
-  month =	 {Spring},
-  number =	 {1},
-  note =
-                  {{http://gking.harvard.edu/files/abs/preelection-abs.shtml}}
-}
-
- at article{WinSigKin95,
-  author =	 {Rainer Winkelmann and Curtis Signorino and Gary
-                  King},
-  title =	 {A Correction for an Underdispersed Event Count
-                  Probability Distribution},
-  journal =	 {Political Analysis},
-  year =	 {1995},
-  pages =	 {215--228},
-  note =
-                  {{http://gking.harvard.edu/files/abs/correction-abs.shtml}}
-}
-
- at article{AltKin94,
-  author =	 {James E. Alt and Gary King},
-  title =	 {Transfers of Governmental Power: The Meaning of Time
-                  Dependence},
-  journal =	 {Comparative Political Studies},
-  volume =	 {27},
-  year =	 {1994},
-  pages =	 {190--210},
-  month =	 {July},
-  number =	 {2},
-  note =
-                  {{http://gking.harvard.edu/files/abs/transfers-abs.shtml}}
-}
-
- at article{GelKin94,
-  author =	 {Andrew Gelman and Gary King},
-  title =	 {A Unified Method of Evaluating Electoral Systems and
-                  Redistricting Plans},
-  journal =	 {American Journal of Political Science},
-  volume =	 38,
-  year =	 1994,
-  pages =	 {514--554},
-  month =	 {May},
-  number =	 2,
-  note =
-                  {{http://gking.harvard.edu/files/abs/writeit-abs.shtml}}
-}
-
- at article{GelKin94b,
-  author =	 {Andrew Gelman and Gary King},
-  title =	 {Enhancing Democracy Through Legislative
-                  Redistricting},
-  journal =	 {American Political Science Review},
-  volume =	 {88},
-  year =	 {1994},
-  pages =	 {541--559},
-  month =	 {September},
-  number =	 {3},
-  note =	 {{http://gking.harvard.edu/files/abs/red-abs.shtml}}
-}
-
- at incollection{GelKin94c,
-  author =	 {Andrew Gelman and Gary King},
-  title =	 {Party Competition and Media Messages in
-                  U.S. Presidential Election Campaigns},
-  booktitle =	 {The Parties Respond: Changes in the American Party
-                  System},
-  publisher =	 {Westview Press},
-  year =	 1994,
-  address =	 {Boulder, Colorado},
-  editor =	 {L. Sandy Maisel},
-  pages =	 {255-295},
-  note =
-                  {{http://gking.harvard.edu/files/abs/partycomp-abs.shtml}}
-}
-
- at article{GelKin93,
-  author =	 {Andrew Gelman and Gary King},
-  title =	 {Why are American Presidential Election Campaign
-                  Polls so Variable when Votes are so Predictable?},
-  journal =	 {British Journal of Political Science},
-  volume =	 23,
-  year =	 1993,
-  pages =	 {409--451},
-  month =	 {October},
-  number =	 1,
-  note =
-                  {{http://gking.harvard.edu/files/abs/variable-abs.shtml}}
-}
-
- at inbook{King93,
-  author =	 {Gary King},
-  title =	 {The Methodology of Presidential Research},
-  year =	 {1993},
-  publisher =	 {University of Pittsburgh},
-  pages =	 {387--412},
-  address =	 {Pittsburgh},
-  editor =	 {George Edwards, III, John H. Kessel, and Bert
-                  A. Rockman, eds.},
-  note =
-                  {{http://gking.harvard.edu/files/abs/methpres-abs.shtml}},
-  journal =	 {Researching the Presidency: Vital Questions, New
-                  Approaches}
-}
-
- at article{KingBruGil93,
-  author =	 {Gary King and John M. Bruce and Michael Gilligan},
-  title =	 {The Science of Political Science Graduate
-                  Admissions},
-  journal =	 {PS: Political Science and Politics},
-  volume =	 {XXVI},
-  year =	 {1993},
-  pages =	 {772--778},
-  month =	 {December},
-  number =	 {4},
-  note =	 {{http://gking.harvard.edu/files/abs/admis-abs.shtml}}
-}
-
- at article{KinLav93,
-  author =	 {Gary King and Michael Laver},
-  title =	 {On Party Platforms, Mandates, and Government
-                  Spending},
-  journal =	 {American Political Science Review},
-  volume =	 {87},
-  year =	 {1993},
-  pages =	 {744--750},
-  month =	 {September},
-  number =	 {3},
-  note =	 {{http://gking.harvard.edu/files/abs/hoff-abs.shtml}}
-}
-
- at article{KinWal93,
-  author =	 {Gary King and Daniel J. Walsh},
-  title =	 {Good Research and Bad Research: Extending Zimile's
-                  Criticism},
-  journal =	 {Early Childhood Research Quarterly},
-  volume =	 {8},
-  year =	 {1993},
-  pages =	 {397--401},
-  month =	 {September},
-  number =	 {3},
-  note =	 {{http://gking.harvard.edu/files/abs/good-abs.shtml}}
-}
-
- at article{King91,
-  author =	 {Gary King},
-  title =	 {'Truth' is Stranger than Prediction, More
-                  Questionable Than Causal Inference},
-  journal =	 {American Journal of Political Science},
-  volume =	 {35},
-  year =	 {1991},
-  pages =	 {1047--1053},
-  month =	 {November},
-  number =	 {4},
-  note =	 {{http://gking.harvard.edu/files/abs/truth-abs.shtml}}
-}
-
- at article{King91b,
-  author =	 {Gary King},
-  title =	 {Constituency Service and Incumbency Advantage},
-  journal =	 {British Journal of Political Science},
-  volume =	 {21},
-  year =	 {1991},
-  pages =	 {119--128},
-  month =	 {January},
-  number =	 {1},
-  note =
-                  {{http://gking.harvard.edu/files/abs/constit-abs.shtml}}
-}
-
- at article{King91c,
-  author =	 {Gary King},
-  title =	 {On Political Methodology},
-  journal =	 {Political Analysis},
-  volume =	 {2},
-  year =	 {1991},
-  pages =	 {1--30},
-  note =
-                  {{http://gking.harvard.edu/files/abs/polmeth-abs.shtml}}
-}
-
- at article{King91d,
-  author =	 {Gary King},
-  title =	 {Stochastic Variation: A Comment on Lewis-Beck and
-                  Skalaban's `The R-Square'},
-  journal =	 {Political Analysis},
-  volume =	 {2},
-  year =	 {1991},
-  pages =	 {185--200},
-  note =	 {{http://gking.harvard.edu/files/abs/stoch-abs.shtml}}
-}
-
- at article{King91e,
-  author =	 {Gary King},
-  title =	 {Calculating Standard Errors of Predicted Values
-                  based on Nonlinear Functional Forms},
-  journal =	 {The Political Methodologist},
-  volume =	 {4},
-  year =	 {1991},
-  month =	 {Fall},
-  number =	 {2}
-}
-
- at article{KinGel91,
-  author =	 {Gary King and Andrew Gelman},
-  title =	 {Systemic Consequences of Incumbency Advantage in the
-                  U.S. House},
-  journal =	 {American Journal of Political Science},
-  volume =	 35,
-  year =	 1991,
-  pages =	 {110--138},
-  month =	 {February},
-  number =	 1 ,
-  note =
-                  {{http://gking.harvard.edu/files/abs/sysconseq-abs.shtml}}
-}
-
- at article{AnsKin90,
-  author =	 {Stephen Ansolabehere and Gary King},
-  title =	 {Measuring the Consequences of Delegate Selection
-                  Rules in Presidential Nominations},
-  journal =	 {Journal of Politics},
-  volume =	 {52},
-  year =	 {1990},
-  pages =	 {609--621},
-  month =	 {May},
-  number =	 {2},
-  note =	 {{http://gking.harvard.edu/files/abs/pri-abs.shtml}}
-}
-
- at article{GelKin90,
-  author =	 {Andrew Gelman and Gary King},
-  title =	 {Estimating the Electoral Consequences of Legislative
-                  Redistricting},
-  journal =	 {Journal of the American Statistical Association},
-  volume =	 {85},
-  year =	 {1990},
-  pages =	 {274--282},
-  month =	 {June},
-  number =	 {410},
-  note =	 {{http://gking.harvard.edu/files/abs/svstat-abs.shtml}}
-}
-
- at article{GelKin90b,
-  author =	 {Andrew Gelman and Gary King},
-  title =	 {Estimating Incumbency Advantage Without Bias},
-  journal =	 {American Journal of Political Science},
-  volume =	 {34},
-  year =	 {1990},
-  pages =	 {1142--1164},
-  month =	 {November},
-  number =	 {4},
-  note =	 {{http://gking.harvard.edu/files/abs/inc-abs.shtml}}
-}
-
- at article{KinAltBur90,
-  author =	 {Gary King and James Alt and Nancy Burns and Michael
-                  Laver},
-  title =	 {A Unified Model of Cabinet Dissolution in
-                  Parliamentary Democracies},
-  journal =	 {American Journal of Political Science},
-  volume =	 {34},
-  year =	 {1990},
-  pages =	 {846--871},
-  month =	 {August},
-  number =	 {3},
-  note =	 {{http://gking.harvard.edu/files/abs/coal-abs.shtml}}
-}
-
- at article{King90,
-  author =	 {Gary King},
-  title =	 {Electoral Responsiveness and Partisan Bias in
-                  Multiparty Democracies},
-  journal =	 {Legislative Studies Quarterly},
-  volume =	 {XV},
-  year =	 {1990},
-  pages =	 {159--181},
-  month =	 {May},
-  number =	 {2},
-  note =
-                  {{http://gking.harvard.edu/files/abs/electresp-abs.shtml}}
-}
-
- at article{GelKin89,
-  author =	 {Andrew Gelman and Gary King},
-  title =	 {Electoral Responsiveness in U.S. Congressional
-                  Elections, 1946-1986},
-  journal =	 {Proceedings of the Social Statistics Section,
-                  American Statistical Association},
-  year =	 {1989},
-  pages =	 {208}
-}
-
- at article{King89b,
-  author =	 {Gary King},
-  title =	 {Representation Through Legislative Redistricting: A
-                  Stochastic Model},
-  journal =	 {American Journal of Political Science},
-  volume =	 {33},
-  year =	 {1989},
-  pages =	 {787--824},
-  month =	 {November},
-  number =	 {4},
-  note =
-                  {{http://gking.harvard.edu/files/abs/repstoch-abs.shtml}}
-}
-
- at article{King89c,
-  author =	 {Gary King},
-  title =	 {Event Count Models for International Relations:
-                  Generalizations and Applications},
-  journal =	 {International Studies Quarterly},
-  volume =	 {33},
-  year =	 {1989},
-  pages =	 {123--147},
-  month =	 {June},
-  number =	 {2},
-  note =	 {{http://gking.harvard.edu/files/abs/ISQ33-abs.shtml}}
-}
-
- at article{King89d,
-  author =	 {Gary King},
-  title =	 {Variance Specification in Event Count Models: From
-                  Restrictive Assumptions to a Generalized Estimator},
-  journal =	 {American Journal of Political Science},
-  volume =	 33,
-  year =	 1989,
-  pages =	 {762--784},
-  month =	 {August},
-  number =	 3 ,
-  note =
-                  {{http://gking.harvard.edu/files/abs/varspecec-abs.shtml}}
-}
-
- at article{King89e,
-  author =	 {Gary King},
-  title =	 {A Seemingly Unrelated Poisson Regression Model},
-  journal =	 {Sociological Methods and Research},
-  volume =	 {17},
-  year =	 {1989},
-  pages =	 {235--255},
-  month =	 {February},
-  number =	 {3},
-  note =	 {{http://gking.harvard.edu/files/abs/SMR17-abs.shtml}}
-}
-
- at article{King88,
-  author =	 {Gary King},
-  title =	 {Statistical Models for Political Science Event
-                  Counts: Bias in Conventional Procedures and Evidence
-                  for The Exponential Poisson Regression Model},
-  journal =	 {American Journal of Political Science},
-  volume =	 32,
-  year =	 1988,
-  pages =	 {838-863},
-  month =	 {August},
-  number =	 3 ,
-  note =	 {{http://gking.harvard.edu/files/abs/epr-abs.shtml}}
-}
-
- at article{BroKin87,
-  author =	 {Robert X Browning and Gary King},
-  title =	 {Seats, Votes, and Gerrymandering: Measuring Bias and
-                  Representation in Legislative Redistricting},
-  journal =	 {Law and Policy},
-  volume =	 {9},
-  year =	 {1987},
-  pages =	 {305--322},
-  month =	 {July},
-  number =	 {3},
-  note =	 {{http://gking.harvard.edu/files/abs/LP9-abs.shtml}}
-}
-
- at article{KinBro87,
-  author =	 {Gary King and Robert X Browning},
-  title =	 {Democratic Representation and Partisan Bias in
-                  Congressional Elections},
-  journal =	 {American Political Science Review},
-  volume =	 {81},
-  year =	 {1987},
-  pages =	 {1252--1273},
-  month =	 {December},
-  number =	 {4},
-  note =	 {{http://gking.harvard.edu/files/abs/sv-abs.shtml}}
-}
-
- at article{King87,
-  author =	 {Gary King},
-  title =	 {Presidential Appointments to the Supreme Court:
-                  Adding Systematic Explanation to Probabilistic
-                  Description},
-  journal =	 {American Politics Quarterly},
-  volume =	 {15},
-  year =	 {1987},
-  pages =	 {373--386},
-  month =	 {July},
-  number =	 {3},
-  note =	 {{http://gking.harvard.edu/files/abs/sct-abs.shtml}}
-}
-
- at article{King86,
-  author =	 {Gary King},
-  title =	 {How Not to Lie With Statistics: Avoiding Common
-                  Mistakes in Quantitative Political Science},
-  journal =	 {American Journal of Political Science},
-  volume =	 {30},
-  year =	 {1986},
-  pages =	 {666--687},
-  month =	 {August},
-  number =	 {3},
-  note =	 {{http://gking.harvard.edu/files/abs/mist-abs.shtml}}
-}
-
- at article{King86b,
-  author =	 {Gary King},
-  title =	 {The Significance of Roll Calls in Voting Bodies: A
-                  Model and Statistical Estimation},
-  journal =	 {Social Science Research},
-  volume =	 {15},
-  year =	 {1986},
-  pages =	 {135--152},
-  month =	 {June},
-  note =	 {{http://gking.harvard.edu/files/abs/SSR15-abs.shtml}}
-}
-
- at article{King86c,
-  author =	 {Gary King},
-  title =	 {Political Parties and Foreign Policy: A
-                  Structuralist Approach},
-  journal =	 {Political Psychology},
-  volume =	 {7},
-  year =	 {1986},
-  pages =	 {83--101},
-  month =	 {March},
-  number =	 {1},
-  note =	 {{http://gking.harvard.edu/files/abs/PP7-abs.shtml}}
-}
-
- at article{KinMer86,
-  author =	 {Gary King and Richard Merelman},
-  title =	 {The Development of Political Activists: A Model of
-                  Early Learning},
-  journal =	 {Social Science Quarterly},
-  volume =	 {67},
-  year =	 {1986},
-  pages =	 {473--490},
-  month =	 {September},
-  number =	 {3},
-  note =
-                  {{http://gking.harvard.edu/files/abs/poliactiv-abs.shtml}}
-}
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Data
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
- at Article{IacKinPor11b,
-  author =	 {Stefano M. Iacus and Gary King and Giuseppe Porro},
-  title =	 {Replication data for: Causal Inference Without
-                  Balance Checking: Coarsened Exact Matching},
-  journal =	 { },
-  year =	 2011,
-  note =	 {http://hdl.handle.net/1902.1/15601 Murray Research
-                  Archive [Distributor] V1 [Version]}
-}
-
- at article{HopKin09b,
-  author =	 {Daniel Hopkins and Gary King},
-  title =	 {Replication Data for: A Method of Automated
-                  Nonparametric Content Analysis for Social Science},
-  journal =	 { },
-  year =	 2009,
-  note =	 {\underline{UNF:3:xlE5stLgKvpeMvxzlLxzEQ==}
-                  hdl:1902.1/12898 Murray Research Archive [Distributor]}
-}
-
-
- at article{KinGakIma09b,
-  Author =	 {Gary King and Emmanuela Gakidou and Kosuke Imai and
-                  Jason Lakin and Clayton Nall and Ryan T. Moore and
-                  Nirmala Ravishankar and Manett Vargas and Martha
-                  Mar{\'i}a T{\'e}llez-Rojo and Juan Eugenio
-                  Hern{\'a}ndez {\'A}vila and Mauricio Hern{\'a}ndez
-                  {\'A}vila and H{\'e}ctor Hern{\'a}ndez Llamas},
-  title =	 {Replication Data for: Public Policy for the Poor? A
-                  Randomized Ten-Month Evaluation of the Mexican
-                  Universal Health Insurance Program},
-  journal =	 { },
-  year =	 {2009},
-  note =	 {\underline{hdl:1902.1/11044}
-                  UNF:3:jeUN9XODtYUp2iUbe8gWZQ== Murray Research
-                  Archive [Distributor]}
-}
-
- at article{ImaKinNal09c,
-  author =	 {Kosuke Imai and Gary King and Clayton Nall},
-  title =	 {Replication Data for: The Essential Role of Pair-Matching in
-                  Cluster-Randomized Experiments, with Application to
-                  the Mexican Universal Health Insurance Evaluation: Rejoinder},
-  journal =	 { },
-  year =	 {2009},
-  note =	 {\underline{hdl:1902.1/12730}
-                  UNF:3:CKs4T0iVYxP36LQSMgAkuw== Murray Research
-                  Archive [Distributor]}
-}
-
- at article{ImaKinNal09b,
-  author =	 {Kosuke Imai and Gary King and Clayton Nall},
-  title =	 {Replication Data for: The Essential Role of Pair Matching in
-                  Cluster-Randomized Experiments, with Application to
-                  the Mexican Universal Health Insurance Evaluation},
-  journal =	 { },
-  year =	 {2009},
-  note =	 {\underline{hdl:1902.1/11047}
-                  UNF:3:jeUN9XODtYUp2iUbe8gWZQ== Murray Research
-                  Archive [Distributor]}
-}
-
- at Article{KinZen08,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Replication data for: Empirical vs. Theoretical
-                  Claims about Extreme Counterfactuals: A Response},
-  journal =	 { },
-  year =	 2008,
-  note =	 {\underline{hdl:1902.1/11903}, Murray Research
-                  Archive [Distributor]}
-}
-
- at article{GakKin06b,
-  author =	 {Emmanuela Gakidou and Gary King},
-  title =	 {Replication data for: Death by Survey: Estimating
-                  Adult Mortality without Selection Bias from Sibling
-                  Survival Data},
-  year =	 2006,
-  note =	 {{\underline{hdl:1902.1/ZMESWNECZW}
-                  Murray Research Archive [Distributor]}}
-}
-
- at article{GirKin06,
-  author =	 {Federico Girosi and Gary King},
-  title =	 {Cause of Death Data},
-  year =	 {2006},
-  note =	 {{\underline{hdl:1902.1/UOVMCPSWOL}
-                  UNF:3:9JU+SmVyHgwRhAKclQ85Cg== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{HoImaKin06,
-  author =	 {Daniel E. Ho and Kosuke Imai and Gary King and
-                  Elizabeth A. Stuart},
-  title =	 {Replication Data Set for: Matching as Nonparametric
-                  Preprocessing for Reducing Model Dependence in
-                  Parametric Causal Inference},
-  year =	 2006,
-  note =	 {{\underline{hdl:1902.1/YVDZEQIYDS}
-                  Murray Research Archive [distributor]}}
-}
-
- at article{KinAlt06,
-  author =	 {Gary King and James E. Alt and Nancy Burns and
-                  Michael Laver},
-  title =	 {Replication data for: A Unified Model of Cabinet
-                  Dissolution in Parliamentary Democracies},
-  year =	 {2006},
-  note =	 {{\underline{hdl:1902.1/RMPXNUSBBS}
-                  UNF:3:lfKIeFJKgejkOzXEY1i6lw== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{KinZen06b,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Replication Data Set for: When Can History be Our
-                  Guide? The Pitfalls of Counterfactual Inference},
-  year =	 2006,
-  note =	 {{\underline{hdl:1902.1/DXRXCFAWPK}
-                  Murray Research Archive [distributor]}}
-}
-
- at article{KinZen06c,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Replication data for: Detecting Model Depedence in
-                  Statistical Inference: A Response},
-  year =	 {2006},
-  note =	 {{\underline{hdl:1902.1/FGSRBXXIYT}
-                  UNF:3:K4/CgnMYDMV6izc5RVOZTA== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{KinZen06d,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Replication data for: When Can History be Our Guide?
-                  The Pitfalls of Counterfactual Inference},
-  year =	 {2006},
-  note =	 {{\underline{hdl:1902.1/DXRXCFAWPK}
-                  UNF:3:DaYlT6QSX9r0D50ye+tXpA== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{KinZen06e,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Replication data for: The Dangers of Extreme
-                  Counterfactuals },
-  year =	 {2006},
-  note =	 {{\underline{hdl:1902.1/UTVMBVNGMX}
-                  UNF:3:ytKKNjK+yR8Pq3H0RcV6eg== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{EpsHoKin05b,
-  author =	 {Lee Epstein and Daniel E. Ho and Gary King and
-                  Jeffrey A. Segal},
-  title =	 {Replication data for: The Supreme Court During
-                  Crisis: How War Affects Only Nonwar Cases},
-  year =	 {2005},
-  note =	 {{\underline{hdl:1902.1/RESUDVYWPE}
-                  UNF:3:ZmbzFbfqogNM0Gb6CcV52A== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{BecKinZen04b,
-  author =	 {Nathaniel Beck and Gary King and Langche Zeng},
-  title =	 {Replication data for: Gelpi and Grynaviski},
-  year =	 {2004},
-  note =	 {{\underline{hdl:1902.1/LAAYCJJGDS}
-                  UNF:3:N0bEAswAlPPVXCxPOZYyqw== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{King03b,
-  author =	 {Gary King},
-  title =	 {10 Million International Dyadic Events},
-  year =	 {2003},
-  note =	 {{\underline{hdl:1902.1/FYXLAWZRIA}
-                  UNF:3:um06qkr/1tAwpS4roUqAiw== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{KinZen01c,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Replication data for: Explaining Rare Events in
-                  International Relations},
-  year =	 {2001},
-  note =	 {\underline{hdl:1902.1/OUCBSJKXIC}
-                  UNF:3:vyct3c8fMCdWOdp03NUhaA== Murray Research
-                  Archive [Distributor]}
-}
-
- at article{KinZen01d,
-  author =	 {Gary King and Langche Zeng},
-  title =	 {Replication data for: Improving Forecats of State
-                  Failure},
-  year =	 {2001},
-  note =	 {{\underline{hdl:1902.1/RPQIODIANR}
-                  UNF:3:CEsbEgPxbxExfYuh2NWwWQ== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{BecKinZen00b,
-  author =	 {Nathaniel Beck and Gary King and Langche Zeng},
-  title =	 {Replication data for: Improving Quantitative Studies
-                  of International Conflict: A Conjecture},
-  volume =	 {2000},
-  note =	 {{\underline{hdl:1902.1/SZKONDGOMF}
-                  UNF:3:rYRDzT8dCJ/BR7V9u8fObA== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{KinTomWit00b,
-  author =	 {Gary King and Michael Tomz and Jason Wittenberg},
-  title =	 {Replication data for: Making the Most of Statistical
-                  Analyses: Improving Interpretation and Presentation},
-  year =	 {2000},
-  note =	 {{\underline{hdl:1902.1/QTCABXZZRQ}
-                  UNF:3:1VaLflZ/LfB+AISX+hBm1w== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{KatKin99b,
-  author =	 {Jonathan Katz and Gary King},
-  title =	 {Replication data for: A Statistical Model of
-                  Multiparty Electoral Data},
-  year =	 {1999},
-  note =	 {{\underline{hdl:1902.1/QIGTWZYTLZ}
-                  UNF:3:gwGcKylle0BKJTGv3Zv4OA== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{GelKinBos98b,
-  author =	 {Andrew Gelman and Gary King and John Boscardin},
-  title =	 {Replication data for: Estimating the Probability of
-                  Events that have Never Occurred: When is your Vote
-                  Decisive},
-  year =	 {1998},
-  note =	 {{\underline{hdl:1902.1/NOLXXTUHNZ}
-                  UNF:3:ORDulVH6qEb4lsCyDn5W3A== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{King97b,
-  author =	 {Gary King},
-  title =	 {Replication data for: A Solution to the Ecological
-                  Inference Problem: Reconstructing Individuals
-                  Behavior from Aggregate Data},
-  year =	 {1997},
-  note =	 {{\underline{hdl:1902.1/LWMMKUTYXS}
-                  UNF:3:DRWozWd89+vNLO7lY2AHbg== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{GelKin94d,
-  author =	 {Andrew Gelman and Gary King},
-  title =	 {Replication data for: Enhancing Democracy Through
-                  Legislative Redistricting},
-  year =	 {1994},
-  note =	 {{\underline{hdl:1902.1/BNCOWNVERH}
-                  UNF:3:ZXahi7PBFxLRb46sVKOAuQ== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{GelKin94e,
-  author =	 {Andrew Gelman and Gary King},
-  title =	 {Replication data for: Unified Methods of Evaluating
-                  Electoral Systems and Redistricting Plans: United
-                  States House of Representatives adn Ohio State
-                  Legislature},
-  year =	 {1994},
-  note =	 {{\underline{hdl:1902.1/JWFTSFKOBK}
-                  UNF:3:Fi01DWj4Sx+0ZEOEo4TOXA== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{King94,
-  author =	 {Gary King},
-  title =	 {Elections to the United States House of
-                  Representatives, 1898-1992},
-  year =	 {1994},
-  note =	 {{\underline{hdl:1902.1/TQDSSPRDDZ}
-                  UNF:3:tD8SznMFjKIxWxOqTQaamQ== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{GelKin93b,
-  author =	 {Andrew Gelman and Gary King},
-  title =	 {Replication data for: Why Are American Presidential
-                  Election Campaign Polls so Variable When Votes are
-                  so Predictable?},
-  year =	 {1993},
-  note =	 {{\underline{hdl:1902.1/SBBXEUSSCW}
-                  Murray Research Archive [Distributor]}}
-}
-
- at article{KinLav93b,
-  author =	 {Gary King and Michael Laver},
-  title =	 {Replication data for: On Party Platforms, Mandates,
-                  and Government Spending},
-  year =	 {1993},
-  note =	 {{\underline{hdl:1902.1/XEHYCJAWQD}
-                  UNF:3:cwNXuRQ/6Lp72obLkttmGg== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{King91e,
-  author =	 {Gary King},
-  title =	 {Replication data for: Constituency Service and
-                  Incumbency Advantage},
-  year =	 {1991},
-  note =	 {{\underline{hdl:1902.1/JTMXGSZXIZ}
-                  UNF:3:IE4ZSAs8ZzUK+fRXNbVvGw== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{King91f,
-  author =	 {Gary King},
-  title =	 {Replication data for: On Political Methodology},
-  year =	 {1991},
-  note =	 {{\underline{hdl:1902.1/KHTLSQXAEJ}
-                  Murray Research Archive [Distributor]}}
-}
-
- at article{AnsKin90b,
-  author =	 {Stephen Ansolabehere and Gary King},
-  title =	 {Replication data for: Measuring the Consequences of
-                  Delegate Selection Rules in Presidential
-                  Nominations},
-  year =	 {1990},
-  note =	 {{\underline{hdl:1902.1/BUJXCEPXQK}
-                  UNF:3:OdFPcQcvfO5hc3WJ5ty8vQ== Murray Research
-                  Archive [Distributor]}}
-}
-
- at article{KinBen86,
-  author =	 {Gary King and Gerald Benjamin},
-  title =	 {Replication data for: The Stability of Partisan
-                  Identification in the U.S. House of Representatives,
-                  1789-1984},
-  year =	 {1986},
-  note =	 {{\underline{hdl:1902.1/HINHTJQYFO}
-                  Murray Research Archive [Distributor]}}
-}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Software
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-
- at Article{IacKinPor09b,
-  author =	 {Stefano M. Iacus and Gary King and Giuseppe Porro},
-  title =	 {CEM: Coarsened Exact Matching Software},
-  journal =	 {Journal of Statistical Software},
-  volume =	 30,
-  issue =	 9,
-  year =	 2009,
-  note =	 {{http://gking.harvard.edu/cem}}
-}
-
- at article{WanKinLau07,
-  author =	 {Jonathan Wand and Gary King and Olivia Lau},
-  title =	 {Anchors: Software for Anchoring Vignettes Data},
-  journal =	 {Journal of Statistical Software},
-  year =	 {2007, forthcoming}
-}
-
- at Article{HonKinBLa10,
-  author =	 {James Honaker and Gary King and Matthew Blackwell},
-  title =	 {Amelia II: A Program for Missing Data},
-  year =	 2010,
-  note =	 {{http://gking.harvard.edu/amelia}}
-}
-
- at article{ImaKinLau06,
-  author =	 {Kosuke Imai and Gary King and Olivia Lau},
-  title =	 {Zelig: Everyone's Statistical Software},
-  year =	 2006,
-  note =	 {{http://gking.harvard.edu/zelig}}
-}
-
- at article{TomWitKin05,
-  author =	 {Michael Tomz and Jason Wittenberg and Gary King},
-  title =	 {CLARIFY: Software for Interpreting and Presenting
-                  Statistical Results},
-  year =	 {1998-2005},
-  note =	 {{http://gking.harvard.edu/stats.shtml#clarify}}
-}
-
- at article{HonJosKin98,
-  author =	 {James Honaker and Anne Joseph and Gary King and
-                  Kenneth Scheve and Naunihal Singh.},
-  title =	 {AMELIA: A Program for Missing Data},
-  year =	 {1998-2002},
-  note =	 {{http://gking.harvard.edu/amelia}}
-}
-
- at article{King98,
-  author =	 {Gary King},
-  title =	 {MAXLIK, a set of Gauss programs, annotated for
-                  pedagogical purposes, to implement the maximum
-                  likelihood models in Unifying Political Methodology:
-                  The Likelihood Theory of Statistical Inference},
-  year =	 {1998},
-  note =	 {{http://gking.harvard.edu/stats.shtml#maxlik}}
-}
-
- at article{King96b,
-  author =	 {Gary King},
-  title =	 {EI: Program for Ecological Inference},
-  year =	 {1996-2003},
-  note =	 {{http://gking.harvard.edu/stats.shtml#ei}}
-}
-
- at article{GelKin92,
-  author =	 {Andrew Gelman and Gary King},
-  title =	 {JudgeIt: A Program for Evaluating Electoral Systems
-                  and Redistricting Plans},
-  year =	 {1992-2002},
-  note =	 {{http://gking.harvard.edu/stats.shtml#judgeit}}
-}
-
- at article{HoImaKin07a,
-  author =	 {Daniel E. Ho and Kosuke Imai and Gary King and
-                  Elizabeth A. Stuart},
-  title =	 {MatchIt: Nonparametric Preprocessing for Parametric
-                  Causal Inference},
-  year =	 {Forthcoming},
-  journal =	 {Journal of Statistical Software},
-  note =	 {{http://gking.harvard.edu/matchit}}
-}
-
- at InCollection{Gelman04,
-  author = 	 {Andrew Gelman},
-  title = 	 {Treatment Effects in Before-After Data},
-  booktitle = 	 {Applied Bayesian Modeling and Causal Inference from
-                  an Incomplete Data Perspective},
-  publisher =	 {Wiley},
-  year =	 2004,
-  editor =	 {Andrew Gelman and Xiao-Li Meng},
-  chapter =	 18,
-  address =	 {London}
-}
-

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-zelig.git



More information about the debian-science-commits mailing list