[r-cran-zelig] 83/102: Import Upstream version 4.1-3

Andreas Tille tille at debian.org
Sun Jan 8 17:00:17 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-zelig.

commit 1af63bcc0e7d6167f2551da86aed53edfebcb228
Author: Andreas Tille <tille at debian.org>
Date:   Sun Jan 8 09:40:30 2017 +0100

    Import Upstream version 4.1-3
---
 ANNOUNCEMENT.txt                               |   58 +
 CHANGES                                        |   26 +
 LICENSE => COPYING                             |    0
 DESCRIPTION                                    |   25 +-
 LICENSE                                        |  293 +----
 MD5                                            |  995 +++++++----------
 NAMESPACE                                      |  449 +++++---
 R/Diff.R                                       |    5 -
 R/GetObject.R                                  |   10 +
 R/GetSlot.R                                    |   12 +
 R/GetSlot.zelig.R                              |   38 +
 R/MCMChook.R                                   |   75 ++
 R/MIsimulation.R                               |   58 -
 R/MLutils.R                                    |   53 +-
 R/Zelig-package.R                              |   37 +
 R/arima.wrap.R                                 |    7 -
 R/as.dataframe.setx.R                          |   15 +
 R/as.matrix.pooled.setx.R                      |   32 +
 R/as.matrix.setx.R                             |   26 +
 R/as.parameters.R                              |  102 ++
 R/as.qi.R                                      |  247 +++++
 R/as.summarized.R                              |   29 +
 R/as.summarized.list.R                         |   12 +
 R/attach.env.R                                 |   55 +
 R/bootfn.default.R                             |  258 ++++-
 R/bootstrap.R                                  |   35 +
 R/bootstrap.gamma.R                            |   17 +
 R/bootstrap.negbinom.R                         |   17 +
 R/bootstrap.normal.R                           |   23 +
 R/callToString.R                               |   10 +
 R/callnetlm.R                                  |   62 --
 R/callnetlogit.R                               |   53 -
 R/callsystemfit.R                              |    8 -
 R/categories.R                                 |   10 -
 R/check.describe.R                             |   50 -
 R/chopitcode.R                                 |  245 -----
 R/class.ind.R                                  |   11 -
 R/cluster.formula.R                            |   22 +
 R/cmvglm.R                                     |    8 +
 R/coef.BetaReg.R                               |    2 -
 R/common-methods.R                             |   18 +
 R/current.packages.R                           |   66 --
 R/describe.R                                   |    7 +
 R/describe.aov.R                               |   19 -
 R/describe.arima.R                             |   21 -
 R/describe.blogit.R                            |   20 -
 R/describe.bprobit.R                           |   20 -
 R/describe.chopit.R                            |   31 -
 R/describe.coxph.R                             |   19 -
 R/describe.default.R                           |   31 +-
 R/describe.ei.RxC.R                            |   20 -
 R/describe.ei.dynamic.R                        |   20 -
 R/describe.ei.hier.R                           |   20 -
 R/describe.exp.R                               |   18 -
 R/describe.factor.bayes.R                      |   19 -
 R/describe.factor.mix.R                        |   21 -
 R/describe.factor.ord.R                        |   20 -
 R/describe.gam.logit.R                         |   18 -
 R/describe.gam.normal.R                        |   17 -
 R/describe.gam.poisson.R                       |   17 -
 R/describe.gam.probit.R                        |   17 -
 R/describe.gamma.R                             |   17 -
 R/describe.gamma.gee.R                         |   17 -
 R/describe.gamma.mixed.R                       |   32 -
 R/describe.gamma.survey.R                      |   17 -
 R/describe.irt1d.R                             |   19 -
 R/describe.irtkd.R                             |   19 -
 R/describe.lm.mixed.R                          |   32 -
 R/describe.logit.R                             |   18 -
 R/describe.logit.bayes.R                       |   17 -
 R/describe.logit.gee.R                         |   17 -
 R/describe.logit.mixed.R                       |   31 -
 R/describe.logit.survey.R                      |   17 -
 R/describe.lognorm.R                           |   19 -
 R/describe.ls.R                                |   17 -
 R/describe.mlogit.R                            |   19 -
 R/describe.mlogit.bayes.R                      |   19 -
 R/describe.negbin.R                            |   17 -
 R/describe.netcloglog.R                        |   17 -
 R/describe.netgamma.R                          |   18 -
 R/describe.netlogit.R                          |   17 -
 R/describe.netls.R                             |   17 -
 R/describe.netnormal.R                         |   17 -
 R/describe.netpoisson.R                        |   18 -
 R/describe.netprobit.R                         |   18 -
 R/describe.normal.R                            |   17 -
 R/describe.normal.bayes.R                      |   17 -
 R/describe.normal.gee.R                        |   17 -
 R/describe.normal.survey.R                     |   17 -
 R/describe.ologit.R                            |   19 -
 R/describe.oprobit.R                           |   22 -
 R/describe.oprobit.bayes.R                     |   19 -
 R/describe.poisson.R                           |   17 -
 R/describe.poisson.bayes.R                     |   17 -
 R/describe.poisson.gee.R                       |   17 -
 R/describe.poisson.mixed.R                     |   31 -
 R/describe.poisson.survey.R                    |   17 -
 R/describe.probit.R                            |   18 -
 R/describe.probit.bayes.R                      |   17 -
 R/describe.probit.gee.R                        |   17 -
 R/describe.probit.mixed.R                      |   32 -
 R/describe.probit.survey.R                     |   17 -
 R/describe.quantile.R                          |   17 -
 R/describe.relogit.R                           |   17 -
 R/describe.rq.R                                |   17 -
 R/describe.sur.R                               |   16 -
 R/describe.threesls.R                          |   20 -
 R/describe.tobit.R                             |   17 -
 R/describe.tobit.bayes.R                       |   17 -
 R/describe.twosls.R                            |   20 -
 R/describe.weibull.R                           |   19 -
 R/describe.zelig.R                             |   13 +
 R/description.R                                |  155 +++
 R/dims.R                                       |    6 -
 R/eiRxC.R                                      |  220 ----
 R/exp.R                                        |  140 +++
 R/factor.bayes.R                               |   55 +
 R/formula.vglm.R                               |    3 -
 R/gamma.R                                      |  131 +++
 R/gamma.gee.R                                  |  161 +++
 R/gamma.survey.R                               |  176 +++
 R/get.R                                        |   43 -
 R/get.package.R                                |   94 ++
 R/getPredictorTerms.R                          |   45 +
 R/getResponseTerms.R                           |   10 +
 R/getResponseTerms.formula.R                   |  130 +++
 R/getResponseTerms.list.R                      |   29 +
 R/gsource.R                                    |   15 -
 R/help.zelig.R                                 |    7 +-
 R/ignore.R                                     |   22 +
 R/is.formula.R                                 |    9 +
 R/lag.eps.R                                    |    3 -
 R/lag.y.R                                      |    3 -
 R/list.depth.R                                 |   35 +
 R/load.first.R                                 |   32 -
 R/logit.R                                      |  152 +++
 R/logit.bayes.R                                |   89 ++
 R/logit.gee.R                                  |   98 ++
 R/logit.survey.R                               |  191 ++++
 R/lognorm.R                                    |  107 ++
 R/ls.R                                         |   94 ++
 R/make.parameters.R                            |    8 +
 R/makeModelMatrix.R                            |   39 +
 R/mcmcei.R                                     |   15 -
 R/mi.R                                         |   28 +-
 R/mlogit.bayes.R                               |  103 ++
 R/model.end.R                                  |   17 -
 R/model.frame.gamF.R                           |    5 -
 R/model.frame.multiple.R                       |   12 +
 R/model.matrix.multiple.R                      |   14 +
 R/model.matrix.parseFormula.R                  |   32 +
 R/model.warnings.R                             |   71 ++
 R/multi.R                                      |   26 -
 R/multi.dataset.R                              |  144 +++
 R/multipleUtil.R                               |   27 +-
 R/names.ZeligS4vglm.R                          |    4 -
 R/names.relogit.R                              |    6 -
 R/names.summary.vglm.R                         |    4 -
 R/names.summary.zelig.R                        |   11 -
 R/names.summary.zelig.relogit.R                |   11 -
 R/names.zelig.R                                |   11 -
 R/names.zelig.relogit.R                        |   11 -
 R/negbinom.R                                   |  119 ++
 R/netbinom.R                                   |  242 -----
 R/netgamma.R                                   |  242 -----
 R/netlogit.zelig.R                             |  200 ----
 R/netnormal.R                                  |  242 -----
 R/netpoisson.R                                 |  242 -----
 R/network.R                                    |   17 -
 R/normal.R                                     |  122 +++
 R/normal.bayes.R                               |   90 ++
 R/normal.gee.R                                 |   71 ++
 R/normal.survey.R                              |  161 +++
 R/oprobit.bayes.R                              |  140 +++
 R/packageConflicts.R                           |    7 -
 R/param.MCMCZelig.R                            |   14 -
 R/param.R                                      |   60 +-
 R/param.default.R                              |    7 -
 R/param.eiRxC.R                                |   12 -
 R/param.gam.R                                  |   27 -
 R/param.gee.R                                  |   12 -
 R/param.glm.R                                  |   46 -
 R/param.lm.R                                   |   18 -
 R/param.mixed.R                                |   42 -
 R/param.multinom.R                             |   12 -
 R/param.multiple.R                             |   19 -
 R/param.netglm.R                               |   42 -
 R/param.netlm.R                                |   16 -
 R/param.polr.R                                 |   13 -
 R/param.relogit.R                              |   38 -
 R/param.rq.R                                   |   12 -
 R/param.survreg.R                              |   19 -
 R/param.svyglm.R                               |   46 -
 R/param.vglm.R                                 |    7 -
 R/param.zaov.R                                 |   46 -
 R/parameters.R                                 |  132 +++
 R/parse.formula.R                              |    7 +
 R/parse.par.R                                  |   50 -
 R/parseFormula.R                               |  120 ++
 R/plot.ci.R                                    |   81 --
 R/plot.surv.R                                  |  100 --
 R/plot.zelig.R                                 |   15 -
 R/plot.zelig.arima.R                           |  172 ---
 R/plot.zelig.blogit.R                          |   36 -
 R/plot.zelig.bprobit.R                         |   36 -
 R/plot.zelig.coxph.R                           |   30 -
 R/plot.zelig.default.R                         |   14 -
 R/plot.zelig.gamma.gee.R                       |   14 -
 R/plot.zelig.logit.R                           |   20 -
 R/plot.zelig.logit.gam.R                       |   22 -
 R/plot.zelig.logit.gee.R                       |   17 -
 R/plot.zelig.logit.survey.R                    |   20 -
 R/plot.zelig.mlogit.R                          |   49 -
 R/plot.zelig.negbin.R                          |   13 -
 R/plot.zelig.netcloglog.R                      |   20 -
 R/plot.zelig.netlogit.R                        |   20 -
 R/plot.zelig.netpoisson.R                      |   16 -
 R/plot.zelig.netprobit.R                       |   20 -
 R/plot.zelig.normal.gee.R                      |   12 -
 R/plot.zelig.ologit.R                          |   49 -
 R/plot.zelig.oprobit.R                         |   49 -
 R/plot.zelig.poisson.R                         |   16 -
 R/plot.zelig.poisson.gam.R                     |   18 -
 R/plot.zelig.poisson.gee.R                     |   15 -
 R/plot.zelig.poisson.survey.R                  |   16 -
 R/plot.zelig.probit.R                          |   20 -
 R/plot.zelig.probit.gam.R                      |   22 -
 R/plot.zelig.probit.gee.R                      |   20 -
 R/plot.zelig.probit.survey.R                   |   20 -
 R/plot.zelig.relogit.R                         |   42 -
 R/plot.zeliglist.R                             |   21 -
 R/plots.R                                      |  673 ++++++++++++
 R/poisson.R                                    |  116 ++
 R/poisson.bayes.R                              |   90 ++
 R/poisson.gee.R                                |   71 ++
 R/poisson.survey.R                             |  155 +++
 R/print.BetaReg.R                              |   18 -
 R/print.R                                      |  411 +++++++
 R/print.arimaSummary.R                         |   11 -
 R/print.coxhazard.R                            |    8 -
 R/print.eiRxC.R                                |    6 -
 R/print.names.relogit.R                        |   10 -
 R/print.names.zelig.R                          |    3 -
 R/print.relogit.R                              |   11 -
 R/print.relogit2.R                             |    7 -
 R/print.summary.MCMCZelig.R                    |   13 -
 R/print.summary.MI.R                           |   28 -
 R/print.summary.glm.robust.R                   |   12 -
 R/print.summary.lm.robust.R                    |   72 --
 R/print.summary.relogit.R                      |   14 -
 R/print.summary.relogit2.R                     |    6 -
 R/print.summary.strata.R                       |   16 -
 R/print.summary.zelig.R                        |   68 --
 R/print.summary.zelig.strata.R                 |   14 -
 R/print.zaovlist.R                             |   12 -
 R/print.zelig.R                                |   12 -
 R/probit.R                                     |   73 ++
 R/probit.bayes.R                               |   48 +
 R/probit.gee.R                                 |   71 ++
 R/probit.survey.R                              |  101 ++
 R/put.start.R                                  |   17 -
 R/qi.BetaReg.R                                 |   31 -
 R/qi.MCMCZelig.R                               |  293 -----
 R/qi.R                                         |   38 +-
 R/qi.aov.R                                     |   10 -
 R/qi.coxph.R                                   |  241 -----
 R/qi.eiRxC.R                                   |   15 -
 R/qi.gam.R                                     |   82 --
 R/qi.gee.R                                     |   27 -
 R/qi.glm.R                                     |   82 --
 R/qi.lm.R                                      |   31 -
 R/qi.mixed.R                                   |  171 ---
 R/qi.multinom.R                                |   35 -
 R/qi.multiple.R                                |   71 --
 R/qi.netglm.R                                  |   81 --
 R/qi.netlm.R                                   |   26 -
 R/qi.netlogit.R                                |   62 --
 R/qi.polr.R                                    |  107 --
 R/qi.relogit.R                                 |   79 --
 R/qi.rq.R                                      |   51 -
 R/qi.summarized.R                              |  103 ++
 R/qi.survreg.R                                 |  112 --
 R/qi.svyglm.R                                  |   82 --
 R/qi.vglm.R                                    |  202 ----
 R/qi.zmlm.R                                    |   71 --
 R/relogit.R                                    |  293 ++++-
 R/repl.R                                       |   81 +-
 R/repl.default.R                               |    5 -
 R/repl.zelig.R                                 |   21 -
 R/robust.glm.hook.R                            |   32 +
 R/robust.hook.R                                |   20 +
 R/rocplot.R                                    |   43 -
 R/set.start.R                                  |   15 -
 R/setx.Arima.R                                 |   54 -
 R/setx.MI.R                                    |   43 -
 R/setx.R                                       |  311 +++++-
 R/setx.coxph.R                                 |  182 ----
 R/setx.default.R                               |  253 -----
 R/setx.eiRxC.R                                 |   17 -
 R/setx.gam.R                                   |  236 ----
 R/setx.netglm.R                                |  196 ----
 R/setx.noX.R                                   |   24 -
 R/setx.relogit2.R                              |    7 -
 R/setx.rq.R                                    |    8 -
 R/setx.rq.process.R                            |    3 -
 R/setx.rqs.R                                   |   12 -
 R/setx.strata.R                                |   47 -
 R/setx.zaovlist.R                              |    9 -
 R/sim.MI.R                                     |   40 +
 R/sim.R                                        |   96 +-
 R/sim.cond.R                                   |   63 --
 R/sim.counter.R                                |   21 -
 R/sim.coxph.R                                  |   55 -
 R/sim.default.R                                |  286 ++++-
 R/sim.eiRxC.R                                  |   59 -
 R/sim.netglm.R                                 |   46 -
 R/sim.setx.MI.R                                |   77 --
 R/sim.setx.rqs.R                               |   20 -
 R/sim.setx.strata.R                            |   40 -
 R/sim.setxArima.R                              |  217 ----
 R/sim.zaovlist.R                               |  119 --
 R/simulation.matrix.R                          |  116 ++
 R/simulations.plot.R                           |  186 ++++
 R/summarize.R                                  |  147 ++-
 R/summarize.array.R                            |   24 -
 R/summarize.coxhazard.R                        |    3 -
 R/summarize.default.R                          |   13 -
 R/summarize.ei.R                               |   24 -
 R/summarize.matrix.R                           |   25 -
 R/summary.Arima.R                              |    4 -
 R/summary.BetaReg.R                            |   10 -
 R/summary.MCMCZelig.R                          |   19 -
 R/summary.MI.R                                 |   49 -
 R/summary.R                                    |  404 +++++++
 R/summary.coxph.naive.R                        |   12 -
 R/summary.coxph.robust.R                       |   15 -
 R/summary.eiRxC.R                              |   13 -
 R/summary.gee.naive.R                          |   12 -
 R/summary.gee.robust.R                         |   12 -
 R/summary.glm.robust.R                         |   32 -
 R/summary.lm.robust.R                          |   26 -
 R/summary.netglm.R                             |   74 --
 R/summary.relogit.R                            |   34 -
 R/summary.relogit2.R                           |   21 -
 R/summary.setx.R                               |    8 -
 R/summary.setx.cond.R                          |   23 -
 R/summary.strata.R                             |   27 -
 R/summary.vglm.R                               |    4 -
 R/summary.zaov.R                               |   26 -
 R/summary.zelig.R                              |   71 --
 R/summary.zelig.arima.R                        |   57 -
 R/summary.zelig.rqs.strata.R                   |    8 -
 R/summary.zelig.strata.R                       |   84 --
 R/summary.zeliglist.R                          |   33 -
 R/t.setx.R                                     |   14 +
 R/terms.R                                      |  235 ++++
 R/terms.multiple.R                             |  204 ----
 R/terms.vglm.R                                 |    2 -
 R/terms.zaov.R                                 |   13 -
 R/termsFromFormula.R                           |   15 +
 R/ternaryplot.R                                |   89 --
 R/ternarypoints.R                              |   10 -
 R/tobit.R                                      |  143 +++
 R/twosls.R                                     |  279 +++++
 R/user.prompt.R                                |   16 +-
 R/vcov.BetaReg.R                               |    2 -
 R/vcov.R                                       |   17 +
 R/vcov.eiRxC.R                                 |    2 -
 R/vcov.gee.naive.R                             |    6 -
 R/vcov.gee.robust.R                            |    4 -
 R/vcov.glm.robust.R                            |    4 -
 R/vcov.lm.robust.R                             |    4 -
 R/vcov.netglm.R                                |    5 -
 R/vcov.netlm.R                                 |    4 -
 R/vcov.netlogit.R                              |    5 -
 R/vcov.relogit.R                               |    2 -
 R/vcov.survreg.R                               |    1 -
 R/vcov.zmlm.R                                  |    4 -
 R/vdc.R                                        |  259 -----
 R/vignettesMenu.R                              |   28 -
 R/z.R                                          |   53 +
 R/zelig.R                                      |  326 +++++-
 R/zelig.citation.R                             |   68 --
 R/zelig.default.R                              |  135 ---
 R/zelig.skeleton.R                             |  134 +++
 R/zelig2.R                                     |   49 +
 R/zelig2MCMC.R                                 |  392 -------
 R/zelig2aov.R                                  |   11 -
 R/zelig2arima.R                                |   70 --
 R/zelig2blogit.R                               |   16 -
 R/zelig2bprobit.R                              |   16 -
 R/zelig2coxph.R                                |   24 -
 R/zelig2ei.RxC.R                               |    7 -
 R/zelig2exp.R                                  |   16 -
 R/zelig2gam.logit.R                            |    9 -
 R/zelig2gam.normal.R                           |    8 -
 R/zelig2gam.poisson.R                          |    9 -
 R/zelig2gam.probit.R                           |    9 -
 R/zelig2gamma.R                                |   10 -
 R/zelig2gamma.gee.R                            |   15 -
 R/zelig2gamma.mixed.R                          |   17 -
 R/zelig2gamma.survey.R                         |   50 -
 R/zelig2lm.mixed.R                             |   17 -
 R/zelig2logit.R                                |   10 -
 R/zelig2logit.gee.R                            |   15 -
 R/zelig2logit.mixed.R                          |   17 -
 R/zelig2logit.survey.R                         |   50 -
 R/zelig2lognorm.R                              |   19 -
 R/zelig2ls.R                                   |   10 -
 R/zelig2mlogit.R                               |   14 -
 R/zelig2negbin.R                               |    8 -
 R/zelig2netcloglog.R                           |    8 -
 R/zelig2netgamma.R                             |    8 -
 R/zelig2netlogit.R                             |    8 -
 R/zelig2netls.R                                |    7 -
 R/zelig2netnormal.R                            |    7 -
 R/zelig2netpoisson.R                           |    7 -
 R/zelig2netprobit.R                            |   12 -
 R/zelig2normal.R                               |   10 -
 R/zelig2normal.gee.R                           |   15 -
 R/zelig2normal.survey.R                        |   48 -
 R/zelig2ologit.R                               |   10 -
 R/zelig2oprobit.R                              |   10 -
 R/zelig2poisson.R                              |   10 -
 R/zelig2poisson.gee.R                          |   15 -
 R/zelig2poisson.mixed.R                        |   17 -
 R/zelig2poisson.survey.R                       |   50 -
 R/zelig2probit.R                               |   10 -
 R/zelig2probit.gee.R                           |   15 -
 R/zelig2probit.mixed.R                         |   19 -
 R/zelig2probit.survey.R                        |   50 -
 R/zelig2quantile.R                             |   20 -
 R/zelig2relogit.R                              |   12 -
 R/zelig2rq.R                                   |   20 -
 R/zelig2sur.R                                  |   11 -
 R/zelig2threesls.R                             |   20 -
 R/zelig2tobit.R                                |   52 -
 R/zelig2twosls.R                               |   23 -
 R/zelig2weibull.R                              |   18 -
 R/zelig3MCMC.R                                 |  136 ---
 R/zelig3aov.R                                  |    9 -
 R/zelig3coxph.R                                |   15 -
 R/zelig3gee.R                                  |   16 -
 R/zelig3glm.R                                  |   22 -
 R/zelig3ls.R                                   |   22 -
 R/zelig3mixed.R                                |    8 -
 R/zelig3ologit.R                               |   12 -
 R/zelig3oprobit.R                              |   12 -
 R/zelig3quantile.R                             |   26 -
 R/zelig3relogit.R                              |   51 -
 R/zelig3rq.R                                   |   26 -
 R/zelig4gee.R                                  |   46 -
 R/zelig4glm.R                                  |   75 --
 R/zvcClient.R                                  | 1114 -------------------
 R/zzz.R                                        | 1381 ++++++++++++++++++++++++
 README                                         |   33 +-
 RELEASE_NOTES                                  |   88 ++
 data/friendship.RData                          |  Bin 2781 -> 2781 bytes
 data/sna.ex.RData                              |  Bin 23681 -> 23681 bytes
 demo/00Index                                   |   72 +-
 demo/Zelig.HelloWorld.R                        |  173 +++
 demo/aov.R                                     |   47 -
 demo/arima.R                                   |   75 --
 demo/bivariate.probit.R                        |  112 --
 demo/blogit.R                                  |   89 --
 demo/bprobit.R                                 |   90 --
 demo/chopit.R                                  |   36 -
 demo/cloglog.net.R                             |   24 -
 demo/conditional.R                             |   13 -
 demo/coxph.R                                   |   90 --
 demo/ei.RxC.R                                  |   70 --
 demo/ei.dynamic.R                              |   55 -
 demo/ei.hier.R                                 |   55 -
 demo/exp.R                                     |   36 +-
 demo/factor.bayes.R                            |    6 +-
 demo/factor.mix.R                              |   27 -
 demo/factor.ord.R                              |   28 -
 demo/gamma.R                                   |   25 +-
 demo/gamma.mixed.R                             |   26 -
 demo/gamma.negvalues.R                         |   35 -
 demo/gamma.net.R                               |   23 -
 demo/irt1d.R                                   |   32 -
 demo/irtkd.R                                   |   34 -
 demo/logit.R                                   |   64 +-
 demo/logit.bayes.R                             |    6 +-
 demo/logit.gam.R                               |   62 --
 demo/logit.gee.R                               |    0
 demo/logit.mixed.R                             |   23 -
 demo/logit.net.R                               |   29 -
 demo/lognorm.R                                 |    1 +
 demo/ls.R                                      |   42 +-
 demo/ls.mixed.R                                |   23 -
 demo/ls.net.R                                  |   31 -
 demo/match.R                                   |  115 --
 demo/mi.R                                      |   33 +-
 demo/mlogit.R                                  |   49 -
 demo/mlogit.bayes.R                            |    4 +-
 demo/negbin.R                                  |   13 -
 demo/negbinom.R                                |   26 +
 demo/normal.R                                  |   45 +-
 demo/normal.bayes.R                            |    6 +-
 demo/normal.gam.R                              |   76 --
 demo/normal.net.R                              |   24 -
 demo/normal.regression.R                       |   99 --
 demo/ologit.R                                  |   67 --
 demo/oprobit.R                                 |   62 --
 demo/oprobit.bayes.R                           |    4 +-
 demo/poisson.R                                 |   24 +-
 demo/poisson.bayes.R                           |    6 +-
 demo/poisson.gam.R                             |   62 --
 demo/poisson.mixed.R                           |   21 -
 demo/poisson.net.R                             |   24 -
 demo/probit.R                                  |   58 +-
 demo/probit.bayes.R                            |    6 +-
 demo/probit.gam.R                              |   62 --
 demo/probit.mixed.R                            |   23 -
 demo/probit.net.R                              |   23 -
 demo/quantile.R                                |   83 --
 demo/relogit.R                                 |   22 -
 demo/repl.R                                    |   39 -
 demo/robust.R                                  |   66 --
 demo/roc.R                                     |    8 -
 demo/strata.R                                  |   27 -
 demo/sur.R                                     |   19 -
 demo/threesls.R                                |   53 -
 demo/tobit.R                                   |   47 -
 demo/tobit.bayes.R                             |   59 -
 demo/twosls.R                                  |   55 +-
 demo/vertci.R                                  |   42 -
 demo/weibull.R                                 |   39 -
 inst/doc/.latex2html-init                      |  244 -----
 inst/doc/bl.pdf                                |  Bin 71412 -> 0 bytes
 inst/doc/blogit.Rnw                            |  349 ------
 inst/doc/blogit.pdf                            |  Bin 71413 -> 0 bytes
 inst/doc/bprobit.Rnw                           |  385 -------
 inst/doc/bprobit.pdf                           |  Bin 74302 -> 0 bytes
 inst/doc/gamma.Rnw                             |  252 -----
 inst/doc/gamma.mixed.Rnw                       |  181 ----
 inst/doc/gamma.mixed.pdf                       |  Bin 60666 -> 0 bytes
 inst/doc/gamma.pdf                             |  Bin 68204 -> 193871 bytes
 inst/doc/gamma.survey.Rnw                      |  501 ---------
 inst/doc/gamma.survey.pdf                      |  Bin 105136 -> 0 bytes
 inst/doc/logit.Rnw                             |  299 -----
 inst/doc/logit.mixed.Rnw                       |  185 ----
 inst/doc/logit.mixed.pdf                       |  Bin 59478 -> 0 bytes
 inst/doc/logit.pdf                             |  Bin 77878 -> 205148 bytes
 inst/doc/logit.survey.Rnw                      |  520 ---------
 inst/doc/logit.survey.pdf                      |  Bin 105806 -> 0 bytes
 inst/doc/ls.Rnw                                |  288 -----
 inst/doc/ls.mixed.Rnw                          |  187 ----
 inst/doc/ls.mixed.pdf                          |  Bin 71244 -> 0 bytes
 inst/doc/ls.pdf                                |  Bin 88933 -> 192727 bytes
 inst/doc/manual-bayes.pdf                      |  Bin 0 -> 289737 bytes
 inst/doc/manual-gee.pdf                        |  Bin 0 -> 358169 bytes
 inst/doc/manual.pdf                            |  Bin 0 -> 420060 bytes
 inst/doc/negbin.Rnw                            |  254 -----
 inst/doc/negbin.pdf                            |  Bin 72083 -> 0 bytes
 inst/doc/negbinom.pdf                          |  Bin 0 -> 203459 bytes
 inst/doc/normal.pdf                            |  Bin 0 -> 191988 bytes
 inst/doc/normal.survey.Rnw                     |  511 ---------
 inst/doc/normal.survey.pdf                     |  Bin 102498 -> 0 bytes
 inst/doc/parse.formula.pdf                     |  Bin 0 -> 77954 bytes
 inst/doc/poisson.Rnw                           |  241 -----
 inst/doc/poisson.mixed.Rnw                     |  178 ---
 inst/doc/poisson.mixed.pdf                     |  Bin 67231 -> 0 bytes
 inst/doc/poisson.pdf                           |  Bin 69042 -> 198613 bytes
 inst/doc/poisson.survey.Rnw                    |  513 ---------
 inst/doc/poisson.survey.pdf                    |  Bin 99405 -> 0 bytes
 inst/doc/probit.Rnw                            |  241 -----
 inst/doc/probit.mixed.Rnw                      |  185 ----
 inst/doc/probit.mixed.pdf                      |  Bin 59552 -> 0 bytes
 inst/doc/probit.pdf                            |  Bin 64337 -> 170007 bytes
 inst/doc/probit.survey.Rnw                     |  521 ---------
 inst/doc/probit.survey.pdf                     |  Bin 104894 -> 0 bytes
 inst/doc/twosls.pdf                            |  Bin 0 -> 235468 bytes
 inst/doc/weibull.Rnw                           |  292 -----
 inst/doc/weibull.pdf                           |  Bin 73907 -> 0 bytes
 inst/po/en/LC_MESSAGES/R-Zelig.mo              |    0
 inst/templates/DESCRIPTION                     |   11 +
 inst/templates/PACKAGE.R                       |   20 +
 inst/templates/ZELIG.README                    |    0
 inst/templates/describe.R                      |   10 +
 inst/templates/param.R                         |   13 +
 inst/templates/qi.R                            |   16 +
 inst/templates/zelig2.R                        |   14 +
 inst/zideal/zvcServer.R                        | 1292 ----------------------
 man/GetObject.Rd                               |   19 +
 man/GetSlot.Rd                                 |   26 +
 man/GetSlot.zelig.Rd                           |   30 +
 man/MCMChook.Rd                                |   35 +
 man/Max.Rd                                     |   21 +
 man/McmcHookFactor.Rd                          |   34 +
 man/Median.Rd                                  |   21 +
 man/Min.Rd                                     |   21 +
 man/Mode.Rd                                    |   20 +
 man/TexCite.Rd                                 |   17 +
 man/Zelig-package.Rd                           |  180 +--
 man/ZeligDescribeModel.Rd                      |   23 +
 man/ZeligListModels.Rd                         |   27 +
 man/ZeligListTitles.Rd                         |   13 +
 man/alpha.Rd                                   |   21 +
 man/as.bootlist.Rd                             |   25 +
 man/as.bootvector.Rd                           |   30 +
 man/as.data.frame.setx.Rd                      |   31 +
 man/as.description.Rd                          |   28 +
 man/as.description.description.Rd              |   21 +
 man/as.description.list.Rd                     |   21 +
 man/as.matrix.pooled.setx.Rd                   |   33 +
 man/as.matrix.setx.Rd                          |   33 +
 man/as.parameters.Rd                           |   43 +
 man/as.parameters.default.Rd                   |   27 +
 man/as.parameters.list.Rd                      |   28 +
 man/as.parameters.parameters.Rd                |   25 +
 man/as.qi.Rd                                   |   40 +
 man/as.qi.default.Rd                           |   19 +
 man/as.qi.list.Rd                              |   31 +
 man/as.qi.qi.Rd                                |   19 +
 man/as.summarized.Rd                           |   28 +
 man/as.summarized.list.Rd                      |   23 +
 man/as.summarized.summarized.qi.Rd             |   21 +
 man/attach.env.Rd                              |   40 +
 man/bootfn.default.Rd                          |   34 +
 man/bootstrap.Rd                               |   33 +
 man/bootstrap.default.Rd                       |   26 +
 man/bootstrap.gamma.Rd                         |   26 +
 man/bootstrap.negbinom.Rd                      |   26 +
 man/bootstrap.normal.Rd                        |   29 +
 man/callToString.Rd                            |   22 +
 man/cite.Rd                                    |   19 +
 man/cluster.formula.Rd                         |   19 +
 man/cmvglm.Rd                                  |   25 +
 man/coef.parameters.Rd                         |   28 +
 man/combine.Rd                                 |   24 +
 man/constructDataFrame.Rd                      |   24 +
 man/constructDesignMatrix.Rd                   |   22 +
 man/current.packages.Rd                        |   37 -
 man/depends.on.zelig.Rd                        |   24 +
 man/describe.Rd                                |   19 +
 man/describe.default.Rd                        |   25 +
 man/describe.exp.Rd                            |   19 +
 man/describe.gamma.Rd                          |   19 +
 man/describe.logit.Rd                          |   19 +
 man/describe.ls.Rd                             |   22 +
 man/describe.negbinom.Rd                       |   22 +
 man/describe.normal.Rd                         |   19 +
 man/describe.poisson.Rd                        |   19 +
 man/describe.probit.Rd                         |   19 +
 man/describe.tobit.Rd                          |   19 +
 man/describe.zelig.Rd                          |   28 +
 man/description.Rd                             |   37 +
 man/dims.Rd                                    |   38 -
 man/find.match.Rd                              |   35 +
 man/get.package.Rd                             |   26 +
 man/getPredictorTerms.Rd                       |   26 +
 man/getResponseTerms.Formula-not-formula.Rd    |   31 +
 man/getResponseTerms.Rd                        |   20 +
 man/getResponseTerms.formula.Rd                |   30 +
 man/getResponseTerms.list.Rd                   |   23 +
 man/gsource.Rd                                 |   51 -
 man/has.zelig2.Rd                              |   23 +
 man/help.zelig.Rd                              |   42 +-
 man/ignore.Rd                                  |   26 +
 man/is.formula.Rd                              |   24 +
 man/is.qi.Rd                                   |   20 +
 man/is.valid.qi.list.Rd                        |   19 +
 man/is.zelig.compliant.Rd                      |   30 +
 man/is.zelig.package.Rd                        |   21 +
 man/link.Rd                                    |   21 +
 man/linkinv.Rd                                 |   23 +
 man/list.depth.Rd                              |   21 +
 man/list.zelig.dependent.packages.Rd           |   20 +
 man/list.zelig.models.Rd                       |   19 +
 man/loadDependencies.Rd                        |   36 +
 man/make.parameters.Rd                         |   26 +
 man/makeModelMatrix.Rd                         |   22 +
 man/makeZeligObject.Rd                         |   37 +
 man/mi.Rd                                      |   43 +-
 man/mix.Rd                                     |   24 +
 man/model.end.Rd                               |   40 -
 man/model.frame.multiple.Rd                    |   69 +-
 man/model.matrix.multiple.Rd                   |   97 +-
 man/model.matrix.parseFormula.Rd               |   32 +
 man/multilevel.Rd                              |   29 +
 man/name.object.Rd                             |   30 +
 man/names.qi.Rd                                |   29 +
 man/network.Rd                                 |   39 -
 man/param.Rd                                   |   67 ++
 man/param.default.Rd                           |   21 +
 man/param.exp.Rd                               |   27 +
 man/param.gamma.Rd                             |   25 +
 man/param.logit.Rd                             |   27 +
 man/param.ls.Rd                                |   27 +
 man/param.negbinom.Rd                          |   27 +
 man/param.normal.Rd                            |   27 +
 man/param.poisson.Rd                           |   27 +
 man/param.probit.Rd                            |   27 +
 man/param.relogit.Rd                           |   26 +
 man/param.relogit2.Rd                          |   26 +
 man/param.tobit.Rd                             |   27 +
 man/parameters.Rd                              |   34 +
 man/parse.formula.Rd                           |   90 +-
 man/parse.par.Rd                               |   82 --
 man/parseFormula.Rd                            |   25 +
 man/parseFormula.formula.Rd                    |   24 +
 man/parseFormula.list.Rd                       |   21 +
 man/plot.MI.sim.Rd                             |   20 +
 man/plot.ci.Rd                                 |  105 +-
 man/plot.pooled.sim.Rd                         |   57 +
 man/plot.sim.Rd                                |   23 +
 man/plot.simulations.Rd                        |   28 +
 man/plot.surv.Rd                               |   66 --
 man/plot.zelig.Rd                              |   49 -
 man/print.qi.Rd                                |   22 +
 man/print.qi.summarized.Rd                     |   25 +
 man/print.setx.Rd                              |   21 +
 man/print.setx.mi.Rd                           |   21 +
 man/print.sim.Rd                               |   22 +
 man/print.summary.MCMCZelig.Rd                 |   27 +
 man/print.summary.pooled.sim.Rd                |   28 +
 man/print.summary.relogit.Rd                   |   24 +
 man/print.summary.relogit2.Rd                  |   22 +
 man/print.summary.sim.Rd                       |   22 +
 man/print.summarySim.MI.Rd                     |   21 +
 man/print.zelig.Rd                             |   21 +
 man/put.start.Rd                               |   34 -
 man/qi.Rd                                      |   61 ++
 man/qi.exp.Rd                                  |   34 +
 man/qi.summarize.Rd                            |   36 +
 man/reduceMI.Rd                                |   24 +
 man/relogit.Rd                                 |   27 +
 man/repl.Rd                                    |   86 +-
 man/repl.default.Rd                            |   23 +
 man/repl.sim.Rd                                |   45 +
 man/replace.call.Rd                            |   26 +
 man/robust.gee.hook.Rd                         |   32 +
 man/robust.glm.hook.Rd                         |   26 +
 man/rocplot.Rd                                 |  139 ++-
 man/set.start.Rd                               |   40 -
 man/setx.MI.Rd                                 |   33 +
 man/setx.Rd                                    |  148 +--
 man/setx.default.Rd                            |   31 +
 man/sim.MI.Rd                                  |   38 +
 man/sim.Rd                                     |  240 ++--
 man/sim.default.Rd                             |   43 +
 man/simulation.matrix.Rd                       |   28 +
 man/simulations.parameters.Rd                  |   28 +
 man/simulations.plot.Rd                        |   46 +
 man/special_print_LIST.Rd                      |   26 +
 man/special_print_MATRIX.Rd                    |   25 +
 man/splitUp.Rd                                 |   33 +
 man/store.object.Rd                            |   41 +
 man/structuralToReduced.Rd                     |   20 +
 man/summarize.Rd                               |   21 +
 man/summarize.default.Rd                       |   20 +
 man/summary.MI.Rd                              |   23 +
 man/summary.MI.sim.Rd                          |   22 +
 man/summary.Relogit2.Rd                        |   18 +
 man/summary.glm.robust.Rd                      |   22 +
 man/summary.pooled.sim.Rd                      |   28 +
 man/summary.relogit.Rd                         |   18 +
 man/summary.sim.Rd                             |   21 +
 man/summary.zelig.Rd                           |   58 +-
 man/t.setx.Rd                                  |   23 +
 man/table.levels.Rd                            |   28 +
 man/terms.multiple.Rd                          |   21 +
 man/terms.vglm.Rd                              |   21 +
 man/terms.zelig.Rd                             |   19 +
 man/termsFromFormula.Rd                        |   17 +
 man/ternaryplot.Rd                             |   74 --
 man/ternarypoints.Rd                           |   48 -
 man/toBuildFormula.Rd                          |   21 +
 man/tolmerFormat.Rd                            |   25 +
 man/ucfirst.Rd                                 |   20 +
 man/user.prompt.Rd                             |   31 +-
 man/z.Rd                                       |   29 +
 man/zelig.Rd                                   |  139 +--
 man/zelig.call.Rd                              |   27 +
 man/zelig.skeleton.Rd                          |   62 ++
 man/zelig2-bayes.Rd                            |   48 +
 man/zelig2-core.Rd                             |   69 ++
 man/zelig2-gee.Rd                              |   48 +
 man/zelig2-survey.Rd                           |  148 +++
 man/zelig2.Rd                                  |   58 +
 man/zeligDepStatus.Rd                          |   57 -
 man/zeligDepUpdate.Rd                          |   72 --
 man/zeligVDC.Rd                                |   83 --
 messages/templates/en/describe.canned          |   21 +
 messages/templates/en/describe.credit          |    7 +
 messages/templates/en/describe.how.to          |   11 +
 messages/templates/en/print.summary.sim.canned |   37 +
 messages/templates/en/print.summary.sim.credit |    7 +
 messages/templates/en/print.summary.sim.how.to |    9 +
 messages/templates/en/qi.canned                |   36 +
 messages/templates/en/qi.credit                |    6 +
 messages/templates/en/qi.how.to                |   29 +
 messages/templates/en/setx.canned              |   18 +
 messages/templates/en/setx.credit              |    7 +
 messages/templates/en/setx.how.to              |   22 +
 messages/templates/en/sim.canned               |   32 +
 messages/templates/en/sim.credit               |    8 +
 messages/templates/en/sim.how.to               |   16 +
 messages/templates/en/sim.setx.canned          |    8 +
 messages/templates/en/sim.setx.credit          |    8 +
 messages/templates/en/sim.setx.how.to          |   28 +
 messages/templates/en/summary.sim.canned       |   18 +
 messages/templates/en/summary.sim.credit       |    7 +
 messages/templates/en/summary.sim.how.to       |   24 +
 messages/templates/en/zelig2.canned            |   17 +
 messages/templates/en/zelig2.credit            |    7 +
 messages/templates/en/zelig2.how.to            |   25 +
 messages/templates/en/zelig3.canned            |   11 +
 messages/templates/en/zelig3.credit            |    8 +
 messages/templates/en/zelig3.how.to            |   16 +
 po/R-en.po                                     |  524 +++++++++
 tests/MatchIt.R                                |   19 +
 tests/amelia.R                                 |   45 +
 tests/by.R                                     |    9 +
 tests/lognorm.R                                |   26 +
 tests/mi.R                                     |    9 +
 tests/mix.R                                    |   28 +
 tests/models-bayes.R                           |  135 +++
 tests/models-core.R                            |  144 +++
 tests/models-gee.R                             |  152 +++
 tests/models-survey.R                          |  326 ++++++
 tests/plot-ci.R                                |   35 +
 tests/pooled.R                                 |   11 +
 tests/relogit.R                                |   20 +
 tests/summary.MI.R                             |   13 +
 tests/twosls.R                                 |   22 +
 texput.log                                     |   20 -
 vignettes/Rd.sty                               |  397 -------
 vignettes/Sweave.sty                           |    8 +-
 vignettes/Zelig.bib                            |   65 ++
 vignettes/Zelig.sty                            |   33 +
 vignettes/blogit.Rnw                           |  349 ------
 vignettes/bprobit.Rnw                          |  385 -------
 vignettes/citeZelig.tex                        |    8 -
 vignettes/cites/aov.tex                        |    4 -
 vignettes/cites/arima.tex                      |    4 -
 vignettes/cites/blogit.tex                     |    4 -
 vignettes/cites/bprobit.tex                    |    4 -
 vignettes/cites/chopit.tex                     |    4 -
 vignettes/cites/cloglog.net.tex                |    4 -
 vignettes/cites/coxph.tex                      |    4 -
 vignettes/cites/ei.RxC.tex                     |    4 -
 vignettes/cites/ei.dynamic.tex                 |    4 -
 vignettes/cites/ei.hier.tex                    |    4 -
 vignettes/cites/exp.tex                        |    4 -
 vignettes/cites/factor.bayes.tex               |    4 -
 vignettes/cites/factor.mix.tex                 |    4 -
 vignettes/cites/factor.ord.tex                 |    4 -
 vignettes/cites/gamma.gee.tex                  |    4 -
 vignettes/cites/gamma.mixed.tex                |    4 -
 vignettes/cites/gamma.net.tex                  |    4 -
 vignettes/cites/gamma.survey.tex               |    4 -
 vignettes/cites/gamma.tex                      |    4 -
 vignettes/cites/irt1d.tex                      |    4 -
 vignettes/cites/irtkd.tex                      |    4 -
 vignettes/cites/logit.bayes.tex                |    4 -
 vignettes/cites/logit.gam.tex                  |    4 -
 vignettes/cites/logit.gee.tex                  |    4 -
 vignettes/cites/logit.mixed.tex                |    4 -
 vignettes/cites/logit.net.tex                  |    4 -
 vignettes/cites/logit.survey.tex               |    4 -
 vignettes/cites/logit.tex                      |    4 -
 vignettes/cites/lognorm.tex                    |    4 -
 vignettes/cites/ls.mixed.tex                   |    4 -
 vignettes/cites/ls.net.tex                     |    4 -
 vignettes/cites/ls.tex                         |    4 -
 vignettes/cites/mlogit.bayes.tex               |    4 -
 vignettes/cites/mlogit.tex                     |    4 -
 vignettes/cites/mprobit.tex                    |    4 -
 vignettes/cites/negbin.tex                     |    4 -
 vignettes/cites/normal.bayes.tex               |    4 -
 vignettes/cites/normal.gam.tex                 |    4 -
 vignettes/cites/normal.gee.tex                 |    4 -
 vignettes/cites/normal.net.tex                 |    4 -
 vignettes/cites/normal.survey.tex              |    4 -
 vignettes/cites/normal.tex                     |    4 -
 vignettes/cites/ologit.tex                     |    4 -
 vignettes/cites/oprobit.bayes.tex              |    4 -
 vignettes/cites/oprobit.tex                    |    4 -
 vignettes/cites/poisson.bayes.tex              |    4 -
 vignettes/cites/poisson.gam.tex                |    4 -
 vignettes/cites/poisson.gee.tex                |    4 -
 vignettes/cites/poisson.mixed.tex              |    4 -
 vignettes/cites/poisson.net.tex                |    4 -
 vignettes/cites/poisson.survey.tex             |    4 -
 vignettes/cites/poisson.tex                    |    4 -
 vignettes/cites/probit.bayes.tex               |    4 -
 vignettes/cites/probit.gam.tex                 |    4 -
 vignettes/cites/probit.gee.tex                 |    4 -
 vignettes/cites/probit.mixed.tex               |    4 -
 vignettes/cites/probit.net.tex                 |    4 -
 vignettes/cites/probit.survey.tex              |    4 -
 vignettes/cites/probit.tex                     |    4 -
 vignettes/cites/quantile.tex                   |    4 -
 vignettes/cites/relogit.tex                    |    4 -
 vignettes/cites/rq.tex                         |    4 -
 vignettes/cites/sur.tex                        |    4 -
 vignettes/cites/threesls.tex                   |    4 -
 vignettes/cites/tobit.bayes.tex                |    4 -
 vignettes/cites/tobit.tex                      |    4 -
 vignettes/cites/twosls.tex                     |    4 -
 vignettes/cites/weibull.tex                    |    4 -
 vignettes/gamma.Rnw                            |  252 -----
 vignettes/gamma.mixed.Rnw                      |  181 ----
 vignettes/gamma.survey.Rnw                     |  501 ---------
 vignettes/gk.bib                               |   36 +-
 vignettes/gkpubs.bib                           |   39 +-
 vignettes/html.sty                             |  200 ----
 vignettes/logit.Rnw                            |  299 -----
 vignettes/logit.mixed.Rnw                      |  185 ----
 vignettes/logit.survey.Rnw                     |  520 ---------
 vignettes/ls.Rnw                               |  288 -----
 vignettes/ls.mixed.Rnw                         |  187 ----
 vignettes/natbib.sty                           |  724 -------------
 vignettes/negbin.Rnw                           |  254 -----
 vignettes/normal.survey.Rnw                    |  511 ---------
 vignettes/otherworks.bib                       |    8 -
 vignettes/poisson.Rnw                          |  241 -----
 vignettes/poisson.mixed.Rnw                    |  178 ---
 vignettes/poisson.survey.Rnw                   |  513 ---------
 vignettes/probit.Rnw                           |  241 -----
 vignettes/probit.mixed.Rnw                     |  185 ----
 vignettes/probit.survey.Rnw                    |  521 ---------
 vignettes/upquote.sty                          |   76 --
 vignettes/weibull.Rnw                          |  292 -----
 vignettes/zinput.tex                           |   38 -
 929 files changed, 19823 insertions(+), 34975 deletions(-)

diff --git a/ANNOUNCEMENT.txt b/ANNOUNCEMENT.txt
new file mode 100644
index 0000000..d7cde0c
--- /dev/null
+++ b/ANNOUNCEMENT.txt
@@ -0,0 +1,58 @@
+The Zelig core team is pleased to announce the alpha release of Zelig 4.
+
+Designated as the "Developer Update", Zelig 4 offers a wide-range of improvements to ease the process of adding new statistical models to the already extensive Zelig software suite. Significantly, this release is packaged with a brand-new API, geared towards reducing the complexity and length of Zelig's development functions - the zelig2, param and qi methods. In addition to this, Zelig now brandishes a package-creator (zelig.skeleton) that operates in the same vein as R's core function  [...]
+
+In addition to changes in the development toolkit, Zelig has now been split across 13 distinct packages. This change has been made to refine the scope of Zelig and its add-ons. In particular, this restructuring of Zelig into a full software suite allows developers to contribute, develop and repair add-on packages without tinkering with the Zelig API and core functionality. 
+
+While this release's prime focus has been improving the developer toolkit and restructuring the software suite, Zelig 4 offers an end-user experience completely identical to previous versions. That is, zelig's basic functions - zelig, setx and sim - ostensibly remain unchanged in functionality for available statistical models.
+
+For full details concerning changes between Zelig 3.5 and Zelig 4, please refer to:
+ http://zeligdev.github.com/
+
+
+New Features
+------------
+
+Some of the new available features are:
+
+A revised developer API. The primary developer methods - zelig2, param and sim - have been reimplemented to use a sleeker, simpler API. For information, please read the Zelig developer's manual found here:
+  http://zeligdev.github.com/files/booklet.pdf
+
+The core package has been restructured and minimized. In particular, Zelig core now contains only code essential to its operation, while all non-essential tasks have been made into specific R-packages. For a complete list of official Zelig packages, please refer to:
+  https://github.com/zeligdev
+
+Development tools for contributors have been added to the core package. In particular, the "zelig.skeleton" function is packaged within Zelig-core in order to facilitate the rapid development of new Zelig packages.
+
+The Zelig software suite has grown to include a total of 7 R-packages. This change offers a simple and easy method for ensuring that development and bug-fixing within any particular Zelig add-on will leave the remainder of the Zelig software suite unchanged.
+
+A hook API has been integrated into the core package, in order to reduce the necessity to directly alter the zelig, setx and sim methods.
+
+Roxygen-compliant documentation has become standard in all Zelig packages. This offers an easy way to manage Rd documentation, dependencies and exports from within the R code itself. That is, documentation is more tightly paired with the actual R code. For more information about Roxygen, please refer to:
+  http://roxygen.org/
+
+
+GitHub
+------
+
+Zelig is now on GitHub! Fork an add-on package or contribute bug-finds today!
+
+For a full listing of official packages and their repositories, please see:
+ https://github.com/zeligdev
+
+
+Links
+-----
+
+The following comprises a list of relevant information for Zelig 4:
+ * Website: http://zeligdev.github.com/
+ * Package Repositories: https://github.com/zeligdev/
+ * Installation Guide: http://zeligdev.github.com/files/zelig.pdf
+ * Zelig Manual: http://zeligdev.github.com/#install
+ * Available and Missing Add-on Packages: http://zeligdev.github.com/#models
+
+
+Questions
+---------
+
+For any particular questions on developing new Zelig models, please send all mail to:
+  zelig at lists.gking.harvard.edu
diff --git a/CHANGES b/CHANGES
new file mode 100644
index 0000000..78adab7
--- /dev/null
+++ b/CHANGES
@@ -0,0 +1,26 @@
+4.0-6:
+ * Improving parse.formula function
+ * Renamed 'parse.formula' to 'parseFormula', to avoid name confusion and
+   adhere to common naming standards in Zelig.
+
+4.0-5 (August 25th, 2011):
+ * Removed dependency on 'iterators' package
+ * Updated NAMESPACE and DESCRIPTION files correspondingly
+ * Restructured internals of 'zelig' function
+ * Added CHANGES file to summarize detail-oriented code changes
+ * Print methods for multiply-imputed data-sets have been improved
+ * Print methods for setx objects have been improved
+ * zelig now returns a specially constructed list when handling multiply-
+   imputed data-sets
+ * The 'state' variable has been added to the the zelig and MI objects.
+   This variable stores shared information between an "MI" (zelig-list) object
+   and all its children.
+ * Beautified output of print.setx
+ * Added terms.zelig as a method with documentation
+ * Added a 'formula' index to the 'zelig' class. This contains a formula
+   formula identical that contained in the result index (within the zelig 
+   object)
+ * Added "old-formula" variable to the "state" attribute (this attribute is an
+   environment. That is, the attribute "state", which is an environment, now
+   contains a variable titled "old-formula". This specifies the original
+   formula submitted to the 'zelig' function
diff --git a/LICENSE b/COPYING
similarity index 100%
copy from LICENSE
copy to COPYING
diff --git a/DESCRIPTION b/DESCRIPTION
index c76e6a6..d92f3e1 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,11 +1,10 @@
 Package: Zelig
-Version: 3.5.5
-Date: 2010-01-20
+Maintainer: Matt Owen <mowen at iq.harvard.edu>
+License: GPL (>= 2)
 Title: Everyone's Statistical Software
-Author: Kosuke Imai <kimai at Princeton.Edu>, Gary King
-        <king at harvard.edu>, Olivia Lau <olivia.lau at post.harvard.edu>
-Maintainer: Kosuke Imai <kimai at Princeton.Edu>
-Depends: R (>= 2.10), MASS, boot, stats
+Author: Matt Owen <mowen at iq.harvard.edu>, Kosuke Imai
+        <kimai at Princeton.Edu>, Gary King <king at harvard.edu>, Olivia Lau
+        <olau at fas.harvard.edu>
 Description: Zelig is an easy-to-use program that can estimate, and
         help interpret the results of, an enormous range of statistical
         models. It literally is ``everyone's statistical software''
@@ -19,11 +18,13 @@ Description: Zelig is an easy-to-use program that can estimate, and
         program Clarify (for Stata) that takes the raw output of
         existing statistical procedures and translates them into
         quantities of direct interest.
-License: GPL (>= 2)
+Version: 4.1-3
 URL: http://gking.harvard.edu/zelig
-Suggests: VGAM (>= 0.8-4), MCMCpack (>= 0.8-2), mvtnorm, survival,
-        sandwich (>= 2.1-0), zoo (>= 1.5-0), coda, nnet, sna, gee,
-        systemfit, mgcv, lme4, anchors (>= 2.0), survey, quantreg
+Date: 2013-01-30
+Depends: R (>= 2.14), boot, MASS, methods, sandwich
+Suggests: Amelia, mvtnorm, Formula, gee, survey, survival, systemfit,
+        MatchIt, MCMCpack, coda
+Packaged: 2013-02-19 15:27:31 UTC; matt
+NeedsCompilation: no
 Repository: CRAN
-Date/Publication: 2012-04-05 14:52:42
-Packaged: 2012-04-05 14:16:18 UTC; matt
+Date/Publication: 2013-02-19 17:45:30
diff --git a/LICENSE b/LICENSE
index 727ef8f..0851d70 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,278 +1,27 @@
-		    GNU GENERAL PUBLIC LICENSE
-		       Version 2, June 1991
+Zelig: Everyone's Statistical Software (Zelig)
+Copyright (C) 2004.  Kosuke Imai, Gary King and Olivia Lau.
 
- Copyright (C) 1989, 1991 Free Software Foundation, Inc.
-                          675 Mass Ave, Cambridge, MA 02139, USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2 of the License, or
+(at your option) any later version.
 
-			    Preamble
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
 
-  The licenses for most software are designed to take away your
-freedom to share and change it.  By contrast, the GNU General Public
-License is intended to guarantee your freedom to share and change free
-software--to make sure the software is free for all its users.  This
-General Public License applies to most of the Free Software
-Foundation's software and to any other program whose authors commit to
-using it.  (Some other Free Software Foundation software is covered by
-the GNU Library General Public License instead.)  You can apply it to
-your programs, too.
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-this service if you wish), that you receive source code or can get it
-if you want it, that you can change the software or use pieces of it
-in new free programs; and that you know you can do these things.
+Please contact:
 
-  To protect your rights, we need to make restrictions that forbid
-anyone to deny you these rights or to ask you to surrender the rights.
-These restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
+Kosuke Imai
+Corwin Hall 041                 
+Department of Politics   
+Princeton University                
+Princeton, NJ 08544            
+Email: kimai at princeton.edu   
 
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must give the recipients all the rights that
-you have.  You must make sure that they, too, receive or can get the
-source code.  And you must show them these terms so they know their
-rights.
-
-  We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
-  Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software.  If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
-  Finally, any free program is threatened constantly by software
-patents.  We wish to avoid the danger that redistributors of a free
-program will individually obtain patent licenses, in effect making the
-program proprietary.  To prevent this, we have made it clear that any
-patent must be licensed for everyone's free use or not licensed at all.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-		    GNU GENERAL PUBLIC LICENSE
-   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-  0. This License applies to any program or other work which contains
-a notice placed by the copyright holder saying it may be distributed
-under the terms of this General Public License.  The "Program", below,
-refers to any such program or work, and a "work based on the Program"
-means either the Program or any derivative work under copyright law:
-that is to say, a work containing the Program or a portion of it,
-either verbatim or with modifications and/or translated into another
-language.  (Hereinafter, translation is included without limitation in
-the term "modification".)  Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope.  The act of
-running the Program is not restricted, and the output from the Program
-is covered only if its contents constitute a work based on the
-Program (independent of having been made by running the Program).
-Whether that is true depends on what the Program does.
-
-  1. You may copy and distribute verbatim copies of the Program's
-source code as you receive it, in any medium, provided that you
-conspicuously and appropriately publish on each copy an appropriate
-copyright notice and disclaimer of warranty; keep intact all the
-notices that refer to this License and to the absence of any warranty;
-and give any other recipients of the Program a copy of this License
-along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
-  2. You may modify your copy or copies of the Program or any portion
-of it, thus forming a work based on the Program, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
-    a) You must cause the modified files to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    b) You must cause any work that you distribute or publish, that in
-    whole or in part contains or is derived from the Program or any
-    part thereof, to be licensed as a whole at no charge to all third
-    parties under the terms of this License.
-
-    c) If the modified program normally reads commands interactively
-    when run, you must cause it, when started running for such
-    interactive use in the most ordinary way, to print or display an
-    announcement including an appropriate copyright notice and a
-    notice that there is no warranty (or else, saying that you provide
-    a warranty) and that users may redistribute the program under
-    these conditions, and telling the user how to view a copy of this
-    License.  (Exception: if the Program itself is interactive but
-    does not normally print such an announcement, your work based on
-    the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole.  If
-identifiable sections of that work are not derived from the Program,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works.  But when you
-distribute the same sections as part of a whole which is a work based
-on the Program, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
-  3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
-    a) Accompany it with the complete corresponding machine-readable
-    source code, which must be distributed under the terms of Sections
-    1 and 2 above on a medium customarily used for software interchange; or,
-
-    b) Accompany it with a written offer, valid for at least three
-    years, to give any third party, for a charge no more than your
-    cost of physically performing source distribution, a complete
-    machine-readable copy of the corresponding source code, to be
-    distributed under the terms of Sections 1 and 2 above on a medium
-    customarily used for software interchange; or,
-
-    c) Accompany it with the information you received as to the offer
-    to distribute corresponding source code.  (This alternative is
-    allowed only for noncommercial distribution and only if you
-    received the program in object code or executable form with such
-    an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it.  For an executable work, complete source
-code means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to
-control compilation and installation of the executable.  However, as a
-special exception, the source code distributed need not include
-anything that is normally distributed (in either source or binary
-form) with the major components (compiler, kernel, and so on) of the
-operating system on which the executable runs, unless that component
-itself accompanies the executable.
-
-If distribution of executable or object code is made by offering
-access to copy from a designated place, then offering equivalent
-access to copy the source code from the same place counts as
-distribution of the source code, even though third parties are not
-compelled to copy the source along with the object code.
-

-  4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License.  Any attempt
-otherwise to copy, modify, sublicense or distribute the Program is
-void, and will automatically terminate your rights under this License.
-However, parties who have received copies, or rights, from you under
-this License will not have their licenses terminated so long as such
-parties remain in full compliance.
-
-  5. You are not required to accept this License, since you have not
-signed it.  However, nothing else grants you permission to modify or
-distribute the Program or its derivative works.  These actions are
-prohibited by law if you do not accept this License.  Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Program or works based on it.
-
-  6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions.  You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties to
-this License.
-
-  7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Program at all.  For example, if a patent
-license would not permit royalty-free redistribution of the Program by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is
-implemented by public license practices.  Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
-  8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License
-may add an explicit geographical distribution limitation excluding
-those countries, so that distribution is permitted only in or among
-countries not thus excluded.  In such case, this License incorporates
-the limitation as if written in the body of this License.
-
-  9. The Free Software Foundation may publish revised and/or new versions
-of the General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-Each version is given a distinguishing version number.  If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and conditions
-either of that version or of any later version published by the Free
-Software Foundation.  If the Program does not specify a version number of
-this License, you may choose any version ever published by the Free Software
-Foundation.
-
-  10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the author
-to ask for permission.  For software which is copyrighted by the Free
-Software Foundation, write to the Free Software Foundation; we sometimes
-make exceptions for this.  Our decision will be guided by the two goals
-of preserving the free status of all derivatives of our free software and
-of promoting the sharing and reuse of software generally.
-
-			    NO WARRANTY
-
-  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
-FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
-OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
-PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
-OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
-TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
-PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
-REPAIR OR CORRECTION.
-
-  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
-REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
-INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
-OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
-TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
-YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
-PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGES.
+With any problems or questions.
diff --git a/MD5 b/MD5
index 85e4b30..58ff8fe 100644
--- a/MD5
+++ b/MD5
@@ -1,373 +1,115 @@
-b6ddadaa5f6015ce93ca6eac877314dc *DESCRIPTION
-263b03fca1747f78c6f7dc74e8d028b3 *LICENSE
-2351a9a37ad7e41fabcda87b615046ee *NAMESPACE
-ae85f50505301e5f0b9efee181727057 *R/Diff.R
-8961b9386f03c88201f4ceda407b7dfe *R/MIsimulation.R
-04dfbc41a5261259d8c9b537b1b723cb *R/MLutils.R
-fa5c359506942e3a1cbb3beacb8057cf *R/arima.wrap.R
-8de59a869d75a1ba4fed23a121238287 *R/bootfn.default.R
-f94dde81de7f42c541d8be6568f8f8b8 *R/callnetlm.R
-313b092ed955bec5ae67cf59fbd07fec *R/callnetlogit.R
-5f9fcafde9d686f9b0dc07d992c571a6 *R/callsystemfit.R
-3016db007d18b969edef61513203774a *R/categories.R
-6bae06ed4eb740d2337c118f5a0f368e *R/check.describe.R
-6c59bccccd5b5a36fbca465339f21cc4 *R/chopitcode.R
-f0b7fb2ff52a34219e00d3eeed842d4e *R/class.ind.R
-06709750fb0f1c98b74fd6ce58a780bd *R/cmvglm.R
-8e62c3cce5c4a10ac21376e1fc0fd19b *R/coef.BetaReg.R
-1b0a58126dfbc60389b9096ea1a32a27 *R/current.packages.R
-262fa2316d9be9db068662dfa29ddb05 *R/describe.aov.R
-4a7ba6b71788f4011490207cc6ea7bd6 *R/describe.arima.R
-6f5b356ca49656d085a26c7952771227 *R/describe.blogit.R
-d32f38f3f38cf020c98ae1d4bdd3f06c *R/describe.bprobit.R
-1438369104dd69dffa010607d125a100 *R/describe.chopit.R
-e4db0e6d557785932a77f5d2d1c424c9 *R/describe.coxph.R
-03e80132b61934f1faaf3dc1d6c4c37d *R/describe.default.R
-1fc53590977948ae9dbc05672d4bf659 *R/describe.ei.RxC.R
-57f0bff06c3d14dc15738f0f0db55f91 *R/describe.ei.dynamic.R
-62e54557533701e61f548e827e745e1d *R/describe.ei.hier.R
-280aaf42f5ffd6ec84630ec0d6aaa14d *R/describe.exp.R
-3af2634e4bd36198791ee4bb742c624a *R/describe.factor.bayes.R
-8b825f8ee6841f64d68d167e83c236c5 *R/describe.factor.mix.R
-5ced19c02140a73d3b20bc191b15abdc *R/describe.factor.ord.R
-610833eee0ff3f6f4bdee6727644734e *R/describe.gam.logit.R
-6388abca0bb8dbc5a9ae1f52d6e64c69 *R/describe.gam.normal.R
-3abae74830c15dd40b9a25fdd22a7b2c *R/describe.gam.poisson.R
-72f366a2f427651bc6daa495d6bb14eb *R/describe.gam.probit.R
-4968551432f4f4fe2dca4b293e3baa6e *R/describe.gamma.R
-6f5ba9cb0c4cf9a9c945a1a4c68f1d0e *R/describe.gamma.gee.R
-2edb2ae85026593cd748e870194b5d7a *R/describe.gamma.mixed.R
-2133293c9ff6c596c6caca9da49a8b65 *R/describe.gamma.survey.R
-9a21d716d0c29ea78244861b148013ef *R/describe.irt1d.R
-ce157e9a2db48e18c112589817cda999 *R/describe.irtkd.R
-a8761e0ffb12c91385b11ffeb0ae3991 *R/describe.lm.mixed.R
-6502a7a82fec59822b47dbc391d5c539 *R/describe.logit.R
-6a6e633438189bac45b09326dca58552 *R/describe.logit.bayes.R
-5a104d18a06996a46dcd901243406e49 *R/describe.logit.gee.R
-f1b3c92b3ed8a58bc625f30c0f717684 *R/describe.logit.mixed.R
-6269cf086f17dc2249c4d1b742626e53 *R/describe.logit.survey.R
-45cdc49d6317982af0845d65237c2a60 *R/describe.lognorm.R
-d639624fe728662d5b56821658d5be8d *R/describe.ls.R
-d9bd8db4d04fd9b39273cdbb34ed353b *R/describe.mlogit.R
-01337c058abba9980e4d348d1e2c3033 *R/describe.mlogit.bayes.R
-231b8e4f71f86821405121bf5e90213d *R/describe.negbin.R
-adc64b3b6955f5946ad9d1993bd68397 *R/describe.netcloglog.R
-ece9ab1ac612582627bd51c6c076d14c *R/describe.netgamma.R
-e77ce8dbbc1247a85e1cf65ad0f5170a *R/describe.netlogit.R
-0a8f1b7eacd75b5ac20e741f61ff8acd *R/describe.netls.R
-8459ed02d05e036ae16acb967dc3e6c5 *R/describe.netnormal.R
-3e1572ebe8220a7c149b7e3bebaf2eb9 *R/describe.netpoisson.R
-1bf14e0e750c5ccc5cfd54a82673027d *R/describe.netprobit.R
-042451a2965d989b9b360a4dd383407d *R/describe.normal.R
-f9da5877dcb605de97d1a049d28960f8 *R/describe.normal.bayes.R
-d02b4eeb90305eefb56ca4da26a1fb4f *R/describe.normal.gee.R
-c446cb95a51ec5c877357dd14188f3e3 *R/describe.normal.survey.R
-095c0448b91b0973f9b01ad23abd53d2 *R/describe.ologit.R
-97185ce7600657648f1b0965f6d6604d *R/describe.oprobit.R
-83334246178f06650721371b94db1aa6 *R/describe.oprobit.bayes.R
-21f312e2ac28447ff7f4cac7e719b326 *R/describe.poisson.R
-4ecdfc1b9a6cdb891602f8734efa3eee *R/describe.poisson.bayes.R
-872e5c7ced05d264a24e6695582d8b93 *R/describe.poisson.gee.R
-5ac060237f33674f08e259f5367148db *R/describe.poisson.mixed.R
-1fed9bacf98ec9274e3071e6c1959f93 *R/describe.poisson.survey.R
-929282112a5a720da8b6fd3fae57d08b *R/describe.probit.R
-9c0797dab765e505c38401f67d867ac5 *R/describe.probit.bayes.R
-c83e537943d0365bb38f927ea8bcdae9 *R/describe.probit.gee.R
-6ac9a46d26c714641335e8e0f8f12383 *R/describe.probit.mixed.R
-1f4cbc58174a59281470e284cc234415 *R/describe.probit.survey.R
-5111d7bf5fbc1342a18b328833081ccd *R/describe.quantile.R
-3c7e4e178e4003a3136b2516de0c8e0a *R/describe.relogit.R
-0bd9b8d57bd85c55926cf6ce66acab80 *R/describe.rq.R
-0a972fe76898c047cf7c9fc20e8c8386 *R/describe.sur.R
-68659368e2a2a62bacc94d6955f80ee3 *R/describe.threesls.R
-de631e62a66a814517b416165708a46e *R/describe.tobit.R
-2adc39192e852866acee6df8eaf36094 *R/describe.tobit.bayes.R
-896890c98ceeae65ebe0b7e35979cfb1 *R/describe.twosls.R
-dac15d55c5e64b2eae40da3df2c13e65 *R/describe.weibull.R
-fa4014a8c7f4614100d54dd8ceb13292 *R/dims.R
-9e56673ceddbd7962723bce816a270b0 *R/eiRxC.R
-03c4ad6c68f42ea6ffe0c3772dc0ee6d *R/formula.vglm.R
-30c8114373c28a9eac38552c3fb19873 *R/get.R
-36cea99682b7b142f6cd5dba7e04a4b6 *R/gsource.R
-a7d08032d3ab97220c3d92c0f5ad5426 *R/help.zelig.R
-fc3c8af929812f932c037a55f82a8b2e *R/lag.eps.R
-194a5c8900105034e95de29bfb141cbc *R/lag.y.R
-5bf5e31a0bed2f31e468c91534aa76e6 *R/load.first.R
-5c431a3314726f08161d44b96caba10f *R/make.parameters.R
-85a1ff417a2a530fa9b6d5bae4c39573 *R/mcmcei.R
-60d2342449a0bfac2c929a7260291d75 *R/mi.R
-48ec69ad18715d200878eb5743303b16 *R/model.end.R
-9f300e3776fbb2959afcaec950313da1 *R/model.frame.gamF.R
-0155fc0efad55668dfdfde7d6c529911 *R/model.frame.multiple.R
-5c50fba844bcd6c8b6f28e2d4c21ce83 *R/model.matrix.multiple.R
-6ca4768f7ca37d64af8baac3e312f179 *R/multi.R
-3bbf5508e2055a53e75471c2f689ad20 *R/multipleUtil.R
-9e8b90407d21bf99324a6ee22ea506d8 *R/names.ZeligS4vglm.R
-415529fc54e4844044b50dec04141685 *R/names.relogit.R
-d4a3749da41cf46ba73f898448495e51 *R/names.summary.vglm.R
-6057a7b5a5a4b938e8e64fb42dbc0c7a *R/names.summary.zelig.R
-e00754a440bb56c0422c86fde10a851d *R/names.summary.zelig.relogit.R
-952780980e0f8a0d06d0505fab9a94dc *R/names.zelig.R
-f726f843cd68d7d073d6a6099e04c295 *R/names.zelig.relogit.R
-0011b56677932faca3577253bc00e5ae *R/netbinom.R
-457068e1f46653f9686e4ef2daf7822c *R/netgamma.R
-0b0cb1d6ef2707dc431416c8cfa8f56f *R/netlogit.zelig.R
-8cc54b9074d994bb535961662e8707d3 *R/netnormal.R
-5edea9d0bd2d16d27ac135d23a669220 *R/netpoisson.R
-0d72c9d09f1abaaff95e68a249f64311 *R/network.R
-6c864b30e5796b92c4021b0f598a9b71 *R/packageConflicts.R
-fdd8a31ff2d8902e96d696c78adfb965 *R/param.MCMCZelig.R
-e2e9499c987efc5fc6879792ae2ddd13 *R/param.R
-7ec161f995746d105abe2ff7e0bcc731 *R/param.default.R
-58a76c5fbf5e3408a541b11d62c6342a *R/param.eiRxC.R
-81556d3eecbb6c627cd5b35e30dbb4b6 *R/param.gam.R
-89f224e60eb5b1f5579dc6826d6bc038 *R/param.gee.R
-1899c490861e9ae5cb2684ea82144523 *R/param.glm.R
-2d5655afacd747172cfe95c70dfaef11 *R/param.lm.R
-6466b22d5ece733726dee2bebfb3ffec *R/param.mixed.R
-da6557a5923d4ae785d82ea374a62891 *R/param.multinom.R
-40d8d580dc84f0cf0f127d82c8984813 *R/param.multiple.R
-cd9c14d16b06f0099036256b1a60e4c2 *R/param.netglm.R
-fa6485bdd403bb540c3be7b736f5fe75 *R/param.netlm.R
-86cf85f9ab6d81a2e6a55ce04af36371 *R/param.polr.R
-e1d25dcba0e42ed7bdf8bbb568754f16 *R/param.relogit.R
-8d521769595fd9d99864a033f937ba1d *R/param.rq.R
-b8d7c228e07e078f6b66e14a72b82a18 *R/param.survreg.R
-f88366b38d570224fa8c3c654b792c49 *R/param.svyglm.R
-3fd6f8ecb14af7d2f5641c42f6d11f5a *R/param.vglm.R
-559ce00a39d5771d5b8d5b2def489537 *R/param.zaov.R
-c1d46098e430d1093cfcc6d4a849000c *R/parse.formula.R
-601354c59d98146eee83d263a3961932 *R/parse.par.R
-08cc9f49793acc1e675a69151731fb25 *R/plot.ci.R
-b5ee785f9a2ebee84465be41dd68ebc4 *R/plot.surv.R
-f4ff47b148f8b5e35f9d0fa2dd5592a8 *R/plot.zelig.R
-4d9da15fca8b361295c0dd06ec79bd94 *R/plot.zelig.arima.R
-62394b1abf685ed5d2742f1f589e08cd *R/plot.zelig.blogit.R
-d0e90de189439d5f1b9d5c8dbc88abf0 *R/plot.zelig.bprobit.R
-f5e3041b613a9ea54412fc59692dc168 *R/plot.zelig.coxph.R
-141cdc99980d51a01f50eac22c207a89 *R/plot.zelig.default.R
-fd3ee59096811d6e198537c8281ea2f1 *R/plot.zelig.gamma.gee.R
-cf05761fdee04e7c70dfe8245305c083 *R/plot.zelig.logit.R
-889c105e10c21986c607bec80566ce83 *R/plot.zelig.logit.gam.R
-aaf24d1867b052a3d08e0822c2086aa4 *R/plot.zelig.logit.gee.R
-14345f1bd35b2c1348e8fd550dd3742e *R/plot.zelig.logit.survey.R
-38d52abfce61d4e0f69a1aa37369a944 *R/plot.zelig.mlogit.R
-a6a86f80965485d2ffff44db0c0fe3cc *R/plot.zelig.negbin.R
-35b178e89e9e7e1c036c94c873815737 *R/plot.zelig.netcloglog.R
-c285aba7878ec124b4dba17c99549923 *R/plot.zelig.netlogit.R
-9452f7ec2970b703ab7f9f23e2a94723 *R/plot.zelig.netpoisson.R
-5b45ec9fd039bcb0c5200ad015c8f76a *R/plot.zelig.netprobit.R
-dee2f8d3101aa90019d31d2db6e41679 *R/plot.zelig.normal.gee.R
-bb15bed69ac5f956b07384a89b04a6f1 *R/plot.zelig.ologit.R
-c5a0fcbbac35ab0bfe3d938d1bf402fe *R/plot.zelig.oprobit.R
-7c793d4e0860fc75feb78e9536a11757 *R/plot.zelig.poisson.R
-80d961e7f39a72052afbbf3a4e847d49 *R/plot.zelig.poisson.gam.R
-ff924b51603c0f8d16fba6bffa19ce64 *R/plot.zelig.poisson.gee.R
-20ff8071e0211519f3d5a407685f7b25 *R/plot.zelig.poisson.survey.R
-58c333ed3bff90a74d5365f8d67687d8 *R/plot.zelig.probit.R
-dccb9f255ce4bfc48e211a79c75db878 *R/plot.zelig.probit.gam.R
-45ad7413c4ff249df421a6a136d11e13 *R/plot.zelig.probit.gee.R
-c0cdd4944dbe54986763e2e89f5dd745 *R/plot.zelig.probit.survey.R
-77c5172fa5c8c55925ccbd0cb68e3966 *R/plot.zelig.relogit.R
-99e6de5f7e03553902092635d028e287 *R/plot.zeliglist.R
-00891f8c27d556ce2ffcddf5c1c70d6b *R/print.BetaReg.R
-73b42f8e7cb991f32a0c0e747b4cd7dc *R/print.arimaSummary.R
-b1daf173c0102cbed85b559e1f19fb17 *R/print.coxhazard.R
-81ed67885199183474ec70b04cdec583 *R/print.eiRxC.R
-64176e2bde078793a71558724545132e *R/print.names.relogit.R
-838541afaa3a572ae3168170306f046f *R/print.names.zelig.R
-47fac67dbd5f6623ce1a4f398606bbe2 *R/print.relogit.R
-efea07bba6dfabe8539ad3b58757943b *R/print.relogit2.R
-f7453d329629d14b597a8ceb3afc6896 *R/print.summary.MCMCZelig.R
-4e30fa67cac1c1e9be8f042cf03d5dae *R/print.summary.MI.R
-fa80678bcf11de8295de7aca9a56d172 *R/print.summary.glm.robust.R
-ec4e66e0edec148bbf3a24e424ec5cf4 *R/print.summary.lm.robust.R
-c5d515d80ac7ab5cec338a31e6b78dc6 *R/print.summary.relogit.R
-9971da14c0aa41613bea9b6cda753bf7 *R/print.summary.relogit2.R
-613478de10f7a634d43ebca4e3b82d13 *R/print.summary.strata.R
-9764dad4308f0aa280174ca8ba91ebc8 *R/print.summary.zelig.R
-e77fd00514535355b0f2de821d9055e4 *R/print.summary.zelig.strata.R
-aa6f6f3d4773e0859510922915656470 *R/print.zaovlist.R
-64294efe3599577754ecff4a4a1a5e47 *R/print.zelig.R
-462f24605b658a22cb6b3c778f8ff784 *R/put.start.R
-370f268971517637496be5629bee8ec8 *R/qi.BetaReg.R
-65811c09b2a2005c72c092c7f74f5f41 *R/qi.MCMCZelig.R
-3e8d6d5a62f77af049aed1548db44e87 *R/qi.R
-304d7b3f89a04d35b1c552f9d1828ede *R/qi.aov.R
-063a5228b94f40cc8909eb16506dae8a *R/qi.coxph.R
-e9d254b8fcb3d2299fb5716d387a4059 *R/qi.eiRxC.R
-36779badce5247db46257253ad43cf55 *R/qi.gam.R
-e6d1c311142e11531c39dcd4e758ec1f *R/qi.gee.R
-b0be30e6922e2b11ff6f469715b21d6d *R/qi.glm.R
-e8707624bf12450089ef7d88606ec752 *R/qi.lm.R
-677eef04b7d1d2df7b599cff941093b0 *R/qi.mixed.R
-d06d3ee287d6f52c831ab9e08c893a00 *R/qi.multinom.R
-4999e9bc5abdf6f4cab339af6f5a86ac *R/qi.multiple.R
-a971f7c0274a4edf88ff3dd01f0caf20 *R/qi.netglm.R
-0def1e3956d4e3e77923dca52c213dad *R/qi.netlm.R
-16c996253cbc0b8f4bd29cd074268a21 *R/qi.netlogit.R
-053b0645a96ab8044133b502aa116ac0 *R/qi.polr.R
-53b2b75df9b8b4c604113fd5f9f437c5 *R/qi.relogit.R
-b37eb30a4b6bdd7a11181dfb57b1703f *R/qi.rq.R
-ea9702a1046f8b3877018d3d8669981a *R/qi.survreg.R
-c559839dd3459ed09e51ed284d41d77f *R/qi.svyglm.R
-009e11e2e090367bbc49e6f1a41c62e8 *R/qi.vglm.R
-cf809672b03267759d7fea8d275dac28 *R/qi.zmlm.R
-9040fbbb0d49e7b156e94747b8dcdc89 *R/relogit.R
-abd92d525debe26e3bab6e7c614b3d72 *R/repl.R
-9aa896450749e8b0457b56e8232f52e1 *R/repl.default.R
-6db84ec5edf38df140edbffd62a187b2 *R/repl.zelig.R
-35d3083983644d5b7a005bed942f3341 *R/rocplot.R
-3d17f67ae15be34ac58dce6a2a5f23c8 *R/set.start.R
-933676a4f572d8b0d7847941ccac1edd *R/setx.Arima.R
-2d9d5afa108ad33b38e8c4e8ba2823c2 *R/setx.MI.R
-179386042d6ac9be58daf0e1b9578e9c *R/setx.R
-0b0945a43d2227737ca7b28766abb33d *R/setx.coxph.R
-9f2b70e4abd4899635abf311aa288337 *R/setx.default.R
-603278ae5505c686f178c3bc02aa5853 *R/setx.eiRxC.R
-0661515e1390dc4f108a467816ff5c39 *R/setx.gam.R
-cc2270bffa546468a1ff4dc910bb4c4a *R/setx.netglm.R
-5a6d61ebe2d9784682e1d5c9fea39d2f *R/setx.noX.R
-308a1353a89c8dc0edb867c51a1b7ffe *R/setx.relogit2.R
-0c18d92604ee78fdb0d600d77b96250e *R/setx.rq.R
-c56e1a503effa803ea3d2438121fe769 *R/setx.rq.process.R
-84d7a9539cdfd8d4082b3f590bf66f0a *R/setx.rqs.R
-5e5c47fe3e1e24eeb73bf03d14c7da0e *R/setx.strata.R
-bfa03d6f18ff9d42663b27cbf79a76f6 *R/setx.zaovlist.R
-06305a39b3fa8df37b7d317a65f71c2e *R/sim.R
-0f42e08e7c78b12eee5483989b0527a2 *R/sim.cond.R
-42d4e18c6063c0aba5edfab40ca52207 *R/sim.counter.R
-9b22cb457549b9ef5b01c06e0a8b3484 *R/sim.coxph.R
-efbb3b0ff69ceef56beaacf9680d5085 *R/sim.default.R
-0f3bfb6c0bf5c4c0c869756b5c2ddba4 *R/sim.eiRxC.R
-d0c6ada53e5be37964230b29b407f97e *R/sim.netglm.R
-7db402d36571cee787a3f438073dbb16 *R/sim.setx.MI.R
-ebc56c2443c844e849709647a32e740a *R/sim.setx.rqs.R
-b9e5954669dadbb7acdca22252e7c23b *R/sim.setx.strata.R
-7fc1a5d8126fad382c4d7f9aa7158441 *R/sim.setxArima.R
-2c6b6ca617e976236898cda87e349de6 *R/sim.zaovlist.R
-2080adb989f20bed977ec2a62770097c *R/summarize.R
-9e1a411fbe6b66558c5c4f814f01cc66 *R/summarize.array.R
-6e9a234b2f69e700e5caad162d908ebc *R/summarize.coxhazard.R
-2e9d303c153bd5d36145055a39ef8c81 *R/summarize.default.R
-16d2087c91b9dbe9f6accd56ed5e3a4b *R/summarize.ei.R
-501286da9eaa0c06f765fabfd60e7e63 *R/summarize.matrix.R
-b0de8071c35df59ff4728e5a64e3ee81 *R/summary.Arima.R
-e6add4cc95ba5dbef745a1fe0606c166 *R/summary.BetaReg.R
-fbe4009393d827f183e70987c5b45ca9 *R/summary.MCMCZelig.R
-dc76aabc248be845680cea3c9d4952e9 *R/summary.MI.R
-d2f41c9edc8d5a5959326729a9602591 *R/summary.coxph.naive.R
-23865701f1dea5cd201bc10caee63647 *R/summary.coxph.robust.R
-0922201335c9f5fcb8a323401802cc08 *R/summary.eiRxC.R
-4a81e75d917455761559f75cbeaa58c7 *R/summary.gee.naive.R
-cb45e53958629a280445ad4b1ee018e1 *R/summary.gee.robust.R
-92a16646896fce8d56431dae2c424fd5 *R/summary.glm.robust.R
-79e4dfbb0bddebd167d10e6edf10f883 *R/summary.lm.robust.R
-b857739bc107fa37b2a46350232a1006 *R/summary.netglm.R
-18c13986882700f1bfde1567e58c26b2 *R/summary.relogit.R
-7b89bbc43d03467a00de766bdf54d71d *R/summary.relogit2.R
-975cc39c85ad6392360b5ce41d6d5e9c *R/summary.setx.R
-9dd27ab115806e8da22b47af581673e2 *R/summary.setx.cond.R
-18f342dc0ec797aff2166b66a97601ed *R/summary.strata.R
-6edb72cd498a4e9a2a7cc1d7b61dbb0b *R/summary.vglm.R
-237938f9bd7b5c2b23a2f55c45a51144 *R/summary.zaov.R
-9cc2576a164ca4c6c6464de489570b71 *R/summary.zelig.R
-9a4a99edd7302d7cb230b9c93a607377 *R/summary.zelig.arima.R
-b79c6b312c977c9e4ed693e734b6be2a *R/summary.zelig.rqs.strata.R
-0dc7eea9f48998767864a32dc6c4b7dc *R/summary.zelig.strata.R
-ede61f1965fa111c9a2b2cedf11f1718 *R/summary.zeliglist.R
-6e436d06aad48b0889c4a6c92f49cb86 *R/terms.multiple.R
-0421040f294dd52c489cd855857101ac *R/terms.vglm.R
-5c2debd19ee281aa33f772a7ab2b2d7d *R/terms.zaov.R
-28cc6ec0229a23e4f89405a0fbf798d9 *R/ternaryplot.R
-5aa43747ba728cd8ec609469c6bec6c2 *R/ternarypoints.R
-950b76a8b5b742dc4714ce93e730ff5f *R/user.prompt.R
-4cc31b0804add48f7259279fa3b48169 *R/vcov.BetaReg.R
-3c79094600c8c2df091d953bf48bd2b7 *R/vcov.eiRxC.R
-4716423935c70356702588c81a36a8d5 *R/vcov.gee.naive.R
-e7c8ec4245149c6b45cf3e474d455b4f *R/vcov.gee.robust.R
-1a9bb1fb37ce6614bcc399166403f843 *R/vcov.glm.robust.R
-a3824fca223e4ab976bfacd7f794d376 *R/vcov.lm.robust.R
-045ea5536539edf33e50ddc916d03c4b *R/vcov.netglm.R
-971239a3c88d2759e30c78b0bc45a6b1 *R/vcov.netlm.R
-367a7bc3607ba2737b1c6a054a356997 *R/vcov.netlogit.R
-3082563fae3baa332f785c0ed4797809 *R/vcov.relogit.R
-94192e7b6d129ad7cb0cdbad5b0cd72b *R/vcov.survreg.R
-d88e88d84b9ddc3d87f9f50582952a18 *R/vcov.zmlm.R
-e45b5c015701ae7589d4539fd196eabf *R/vdc.R
-0a4773037603a6a214801a8b526b88d6 *R/vignettesMenu.R
-3a36af058f18c42bc16d32065717d6c6 *R/zelig.R
-124b771feafb7fb0b031a4756b1eaf39 *R/zelig.citation.R
-2fd6a88cf14fb63785a712d8347397f4 *R/zelig.default.R
-0cf852879e73a7336f5df0386cfb40e3 *R/zelig2MCMC.R
-8b5dddcc0b566f9f38903b0138367703 *R/zelig2aov.R
-d9d718be662ced141178e955d378497a *R/zelig2arima.R
-07930a719efde10454f86c2559d815dd *R/zelig2blogit.R
-3a19fd7d70e2f804edc9ab8c75334a6d *R/zelig2bprobit.R
-2696ca6eda1dfb35e2eaff84aadb602b *R/zelig2coxph.R
-4e2f04d10a930232ed3188ef1d7b52b8 *R/zelig2ei.RxC.R
-aa84976cad9b5f71bd4b9ce806d13994 *R/zelig2exp.R
-a3f7cf32afcf210fdc0a2b7a8312b330 *R/zelig2gam.logit.R
-57da82c020c8ba32ae0c6da682147de8 *R/zelig2gam.normal.R
-0078022400f882dd12426b7f6a4e1e79 *R/zelig2gam.poisson.R
-63c739800caf3ff45a8b4ca78773d9d8 *R/zelig2gam.probit.R
-ea110f72ead9c3ee22c3b5bf07e751d8 *R/zelig2gamma.R
-dda0aaf9c0a9d310777490ae85903069 *R/zelig2gamma.gee.R
-cec944f3318d59db41916f11cd8d3899 *R/zelig2gamma.mixed.R
-e3ae32cf40ff0b2425c32739429240ef *R/zelig2gamma.survey.R
-02b3a8bfa46be8e7bd15fd08fad70200 *R/zelig2lm.mixed.R
-968fe413d371ea244994def7b302a99b *R/zelig2logit.R
-d4adb6ebeffee929870601979e9ac576 *R/zelig2logit.gee.R
-05d76df93f65f0d234a466e67e5a0a6b *R/zelig2logit.mixed.R
-5b1e6a0c278ad66051b20da260860ba9 *R/zelig2logit.survey.R
-0f73a9c81d59aee4b84cc8609a3ae553 *R/zelig2lognorm.R
-5d8bfb60037568348af34315420fc0bc *R/zelig2ls.R
-d7f354f5cd564fbccedbff73581f040e *R/zelig2mlogit.R
-c559e7399e6bef7bb7ff6bc824c9599f *R/zelig2negbin.R
-70638bf33ef803e07c08e178eb40b7ef *R/zelig2netcloglog.R
-4b50acd4b32295945bd2ca75b84c1de7 *R/zelig2netgamma.R
-96d1f3f639fd078e13e840db5de6d70d *R/zelig2netlogit.R
-6d22c505a925549e67b02cacb6a19cfb *R/zelig2netls.R
-59ba9d83f2abd2126c341d72f8168ef4 *R/zelig2netnormal.R
-75ec5b6d46fe1f5b07918dd287aab8e0 *R/zelig2netpoisson.R
-1be160521e3ca3eccf65ed06524e41db *R/zelig2netprobit.R
-0121c8b815fbc52f6caee6dc9c66aa8f *R/zelig2normal.R
-8f9d6ef6a9204d538b3f6b691b977820 *R/zelig2normal.gee.R
-d419b1f147b79d41e81095230166737d *R/zelig2normal.survey.R
-1c36103fbb0c6c7743c260fc89baa3f1 *R/zelig2ologit.R
-b957a77e0f8d99bc69727a3f069ea95a *R/zelig2oprobit.R
-08453bd42bacf6855a94fe136151632b *R/zelig2poisson.R
-3fbddf7a27cf2aa69626bb6930657675 *R/zelig2poisson.gee.R
-fbed33de17c1615246244419d919fee6 *R/zelig2poisson.mixed.R
-1ca5b5a05860c68e630fb8033199bb8a *R/zelig2poisson.survey.R
-14e805dc0106cede129738fd52dca1e3 *R/zelig2probit.R
-0c2da231dd98f982064f594ceb9507dd *R/zelig2probit.gee.R
-6274b34250d19e0956f2efb51e356921 *R/zelig2probit.mixed.R
-a076f551ec904138a81dce0c15680401 *R/zelig2probit.survey.R
-447cf212401028779e9afd6c88ecb4da *R/zelig2quantile.R
-0fd2360c03cf5d9a13a0ba384b1ce2a7 *R/zelig2relogit.R
-1d1db9aab3017bb07589d238ecdc1271 *R/zelig2rq.R
-9f20cde0749782fbce2925160faa3750 *R/zelig2sur.R
-b52a28d72bd769b2421491e994004c9f *R/zelig2threesls.R
-f03226e0ec7ecc17067a0eaf82d4afa4 *R/zelig2tobit.R
-129a7414b476fd006d4165a5e5d4020e *R/zelig2twosls.R
-2f73627d2237d19d33dc386585a3b3b5 *R/zelig2weibull.R
-bb6144b99eeb4faeba8e7ea9ca6c30bb *R/zelig3MCMC.R
-1a59bab36431f3a301e5fc46b04fc139 *R/zelig3aov.R
-6469fd98fcdce98a37f7b244c71d8a0c *R/zelig3coxph.R
-8e9e18cf03e8c0d1850f53ee314adb35 *R/zelig3gee.R
-850b214afa03097c97ce6e27093ffdb2 *R/zelig3glm.R
-0ba6a3631a14970a6cb635d6b042335e *R/zelig3ls.R
-d66d5f63f92bd81a577c530d69e418f7 *R/zelig3mixed.R
-8da64b5d4d059ea64c5c6d3f9d0582d6 *R/zelig3ologit.R
-3439adef4f77ea995a654e31b83dc176 *R/zelig3oprobit.R
-cb243061594cad748c849e9f9a05c0ab *R/zelig3quantile.R
-034600abb7425f205c3e41abcbbb3eb6 *R/zelig3relogit.R
-b6a0cbf09b68e36da2597fe7acfefa90 *R/zelig3rq.R
-0e3e245ed294593fa3bf795dd26d88ac *R/zelig4gee.R
-8ad4b85f819d62a07e8af7ee3c892854 *R/zelig4glm.R
-8947ffd5142aa42635dced332fd711cd *R/zvcClient.R
-1d6510b98dd742df2c155de4270d437e *README
+c88f9b6c55ad2dc2952f1c49580f213a *ANNOUNCEMENT.txt
+577c21becf5e9c56c5fe0e528be7424d *CHANGES
+263b03fca1747f78c6f7dc74e8d028b3 *COPYING
+3a44baa19c8a2dbc509fdb05a2aba818 *DESCRIPTION
+9e0405e31c184eac9aaea3ba6a398cac *LICENSE
+d96a9940470102df83f6a88746758730 *NAMESPACE
+b087530253b66e2f221a18045b8b2797 *R/GetObject.R
+04b1a37fe9c0ea566ee393326e1b1e1d *R/GetSlot.R
+fec78b51d14f64c8fd150423b150ea6c *R/GetSlot.zelig.R
+dced6b0eac63aed97fb5a4dda097b21e *R/MCMChook.R
+9b6ed3cf40cf3c85d9f0027d16b2d0a2 *R/MLutils.R
+922284ab917d3c8020c260774dd310c6 *R/Zelig-package.R
+38ff46f6c8894a6edb307ab769c1df47 *R/as.dataframe.setx.R
+8c993d074f1cdb9f04ac32d7c2059cf0 *R/as.matrix.pooled.setx.R
+9fb636319620cbc85fafa03eb71c1564 *R/as.matrix.setx.R
+e0953e2dca569c7777227d6ca1c575a7 *R/as.parameters.R
+af7d00382f5752ed4d43da5987251fd3 *R/as.qi.R
+a069447d1fef9f59f5c5baa13868d4cc *R/as.summarized.R
+fce511662ec23766b6c8008f9487ea9b *R/as.summarized.list.R
+281ca3ae7ed98ff52d143c443d967cf9 *R/attach.env.R
+e7185eb25d4973cead42e1c8e2d078b5 *R/bootfn.default.R
+57a7b009cdd02bac27d8de7eec9b7579 *R/bootstrap.R
+f3baaa4332c8a63ee08391380c8610b6 *R/bootstrap.gamma.R
+b15d34bc142939e4187892e1f87aaa67 *R/bootstrap.negbinom.R
+1e627353517c14db57aaa418fbe47927 *R/bootstrap.normal.R
+18dcd166adb42d5394871285446aace1 *R/callToString.R
+bbb80a37728924859ec04c51e4012f8f *R/cluster.formula.R
+034ffe0bb6847bae195f0a4b4b622cfa *R/cmvglm.R
+605fd97789b918b1551ef1ba115c21d9 *R/common-methods.R
+3baba2387850a5790bed8a37791e5351 *R/describe.R
+25c13325bf3130035c20b7aae04e3cb4 *R/describe.default.R
+7725517c5014cb8bf5f61d158e90ec32 *R/describe.zelig.R
+86cd18c299b633531983e9b3c6d47c80 *R/description.R
+9dbbfc42a46347db9838de2c55255c45 *R/exp.R
+c2df466056cad5fcdb6b69094966df03 *R/factor.bayes.R
+122238465b2d50bc98c3472166c8083f *R/gamma.R
+d5b500d351bf6bf0504d5e0e0f91e8cb *R/gamma.gee.R
+e240ebf5f63cfea783259426b4439b8e *R/gamma.survey.R
+d4d37567c7a668594f16aa1fb2b87524 *R/get.package.R
+2d4d275c23474b79dd5a44433c2a1fe9 *R/getPredictorTerms.R
+2b0ecd0287f4a0261e715ee86d195e28 *R/getResponseTerms.R
+3e72a999f3dd2e3bab609a66cd0b7f6a *R/getResponseTerms.formula.R
+8c60d36664b0f5b65460c8038899aa02 *R/getResponseTerms.list.R
+23bebcebf65e49089e37367ab0d4d1e2 *R/help.zelig.R
+1a9c2fdc0277364a211a9b051a734ed9 *R/ignore.R
+da6bd58b3374044577642c068d6bd45f *R/is.formula.R
+5f1230fa11c859fc902f2c026521a20e *R/list.depth.R
+884b01095b353823da0ecc4f9ec0e803 *R/logit.R
+390532016c7c3259b80f261771917c72 *R/logit.bayes.R
+d8f4ccc940d87ceeca7a9d49872d8147 *R/logit.gee.R
+8afab5bb4e93510fa4d2889813314109 *R/logit.survey.R
+7fb40166a123fdfcc57f95d53626e1df *R/lognorm.R
+a392f5cc85177a05e18aebf1411c3e85 *R/ls.R
+1be0c5070cd5bbeae803a4e4c0ea9782 *R/make.parameters.R
+722f70b4ab267ed63c12775d26d97210 *R/makeModelMatrix.R
+f80e2219a6824b74d020fa3d73c5bcf2 *R/mi.R
+c6dba740ff5b5b45b10d08ee84f02a81 *R/mlogit.bayes.R
+47a223326c0d43f7682ee7ee09c11a8c *R/model.frame.multiple.R
+057b9fe88d7a95248d344659c747c741 *R/model.matrix.multiple.R
+e335ed7e0bf3ce1218bfa659edabfd98 *R/model.matrix.parseFormula.R
+e3e1629fa2bdd28a9a4606501723437e *R/model.warnings.R
+e8eeed1cda2ed6af7b4513f171ee99cd *R/multi.dataset.R
+a2338a7f2a569d976a7431c516a55ce4 *R/multipleUtil.R
+d41d8cd98f00b204e9800998ecf8427e *R/names.relogit.R
+d6be216b0cbda6ee2addc69a1a5ef1a0 *R/negbinom.R
+bde1114a0a7f7dd4686d69fd6a66fc6f *R/normal.R
+c9ea43bfb803fdf16569fa19bb23bd9e *R/normal.bayes.R
+be1a168ba1761809c8e7c3e2fa45a662 *R/normal.gee.R
+c532712f4284ce882a4fb8ef708a2161 *R/normal.survey.R
+b419598efacb4f6db0a75c41387cb3bc *R/oprobit.bayes.R
+5b8d60db6ebb46faf99d0ace8d8cdee1 *R/param.R
+246f95bbf2338c3d83b3f573e3d74613 *R/parameters.R
+4256c2a803299e053aef257d076cae8a *R/parse.formula.R
+80ca8d82671353a2c58694af079fd50a *R/parseFormula.R
+cde7d94aeb8f6828a62c33ef0479ddc1 *R/plots.R
+995511490db5fd0770d8bf1f797e337a *R/poisson.R
+06f7988988376b2fcbe80d550b41546b *R/poisson.bayes.R
+d365262df07215b227411820d0a7b6c6 *R/poisson.gee.R
+490a754a436a5aea141d57b6028e84e1 *R/poisson.survey.R
+dcd083536fd798f90d12ab1bbdae8656 *R/print.R
+91e476019d62c0d863dbf20f0d25774a *R/probit.R
+0810aa237d64f6108655e2ce6cc96dbe *R/probit.bayes.R
+b99bcb6128cee77d276e7842835f6cce *R/probit.gee.R
+9146c4b6f8823d638fbd9edfd6d49359 *R/probit.survey.R
+94221b6f88ee82df95fd82560b91cf74 *R/qi.R
+457528566b2e4e2d4555fe6d0763ac71 *R/qi.summarized.R
+9d6a844d7dd8e981c54873c6d9e73b1f *R/relogit.R
+2e86f2447930d4d4c8bec285e5d99fdd *R/repl.R
+8c9d7d6f667dbd1685020090536e8472 *R/robust.glm.hook.R
+b5aa8503dcb438182d3c4abe2116cf66 *R/robust.hook.R
+90a390fe2a8e84fb283a73648a6c5faf *R/setx.R
+d416988e52e2280773bf198011e22fd6 *R/sim.MI.R
+07784710c5dc5b989f320184ddfa70fc *R/sim.R
+d647a3af95e422c6aa48a30d7f79120f *R/sim.default.R
+273efcd0bd95588cb76835a799f913c8 *R/simulation.matrix.R
+56b3aedc50e3d42f9a37506c06077162 *R/simulations.plot.R
+13d14dc0c552b55857c56d10ba43cb3a *R/summarize.R
+9545402c696aa3c3c8328bc539e90f37 *R/summary.R
+5f82bb886519200bfb8ef9c6b7e2c3b2 *R/t.setx.R
+26a1ddd198da5b23b007bd5154add412 *R/terms.R
+aa36e193d4ac3b6a77eaf39d36c6b109 *R/termsFromFormula.R
+60a72741864b42dc622f729d7f103235 *R/tobit.R
+a63434f6d1e10509df1a2a2d916d68ac *R/twosls.R
+ad6237fff8d120414edbb13e090eb5b2 *R/user.prompt.R
+b1547c95d94d113b0b2b759c0aa52222 *R/vcov.R
+6b22678c08fed5bb69c63a89018c8e49 *R/z.R
+7f404f13a7309f2edabeb2c5f6ab2cbf *R/zelig.R
+b35cd3e934f074488d96ba7cc8f49535 *R/zelig.skeleton.R
+0b2980cf9cc2abfd8d935a4847d4c2f3 *R/zelig2.R
+e315be1ee65f7a5477ebe9d1d16668cb *R/zzz.R
+5dde81c7d95c4d5ee7c2d6112a56ed45 *README
+a05f058ca6b0ddd1afa188a2bb8f127a *RELEASE_NOTES
 d8568ae5389248b670f8c744a6393bc5 *data/MatchIt.url.tab.gz
 42bfcc353eae9f5f6903d5018fd21f17 *data/PErisk.txt.bz2
 118d8ee31df10e8303e70d33299afde0 *data/SupremeCourt.txt.gz
@@ -380,7 +122,7 @@ b7e99eba34328eb8666a65215d295aec *data/Weimar.txt.gz
 809c9dc00afa3a9a2fac98c6a5beb07a *data/eidat.txt.gz
 9d604cbab664c61aecb2029b22e1ff09 *data/free1.tab.bz2
 9d604cbab664c61aecb2029b22e1ff09 *data/free2.tab.bz2
-8a804a508d4072b59169fa765027b222 *data/friendship.RData
+0eeb37557be8d7c6f5dce71515bfded6 *data/friendship.RData
 66063f43a7ab713fe9902234fff20336 *data/grunfeld.txt.gz
 350bdb7fcd6af5111de558dc4d95dbdc *data/hoff.tab.gz
 21e5c0751ad485d6f448f45b327f8296 *data/homerun.txt.xz
@@ -397,282 +139,321 @@ d4d8ae34bc9283fb37eed1e4823b7530 *data/mexico.tab.bz2
 f0f226b3b844530f36498e42c75b1703 *data/mid.tab.bz2
 c10afea1fb3a11e9aa8b6c5717bc6e2f *data/newpainters.txt.bz2
 839ca4b77a23441da065a7db969b8882 *data/sanction.tab.bz2
-20fd7a4d8c7fadd8a73600461a977613 *data/sna.ex.RData
+46599e63d6b42df79b65cfc3ac6c69c3 *data/sna.ex.RData
 bc8dca44e8c9f5b07da91bc0a10cb26a *data/swiss.txt.bz2
 6ac34a147ed09bb7cbc393d35382cd85 *data/tobin.txt.gz
 b7ffde3c50f11549db21d920a600605d *data/turnout.tab.bz2
 6346b190eb5375586fbbb87cc8926124 *data/voteincome.txt.bz2
-a69b8111377b38549f6e67fb66c0d429 *demo/00Index
-0a2104a1bb37b09d03685448cdb638b1 *demo/aov.R
-584d8ea03b71bb53feb9114fd7d4a3b4 *demo/arima.R
-d8781023da0b2398cdbe69b4c9863d0a *demo/bivariate.probit.R
-e01d09628076099d4ed8b3f74754d309 *demo/blogit.R
-5312df0f046d778705be8976f5716fc0 *demo/bprobit.R
-972422d9d21067128ecaee0c9b731cda *demo/chopit.R
-98d9fdb73e47639144b31516adc778e7 *demo/cloglog.net.R
-2f4ea65ae6adffc3d842f2fdbf6966f7 *demo/conditional.R
-6a3d6bad65a46b63b9e9eb4ab0f73965 *demo/coxph.R
-7a395dbe8026f9150d1ca3767aadece3 *demo/ei.RxC.R
-97538dcb65194cfa684037a7481766be *demo/ei.dynamic.R
-7fdadf9615392cdcd00188ce73d47a24 *demo/ei.hier.R
-8c23725e6ab488c19f71ecfbaa6b216f *demo/exp.R
-8aadadecd1459e4e310bb08bf55dcaad *demo/factor.bayes.R
-3a15d4e12d5da3d69e88b9a8390c6133 *demo/factor.mix.R
-af5c6aa74bb60b45732817361695eec7 *demo/factor.ord.R
-d51bab519203111c8e0de709fafb6373 *demo/gamma.R
+537e28750ec399d8224790b94a000ca7 *demo/00Index
+3b0efd341facce5bf27391cfb0e10d79 *demo/Zelig.HelloWorld.R
+6c5015399f7e0b91ac107fcbd790ce33 *demo/exp.R
+dbc45d1cfb23ef9e0741e13a985aae74 *demo/factor.bayes.R
+9a1be8041e1ec3b22b6ebfb641abf783 *demo/gamma.R
 5b05bfcdc9d10a518c4ede23e2c44400 *demo/gamma.gee.R
-6c5cff1cfafeacc4ebe3f0daab8d135c *demo/gamma.mixed.R
-bc32012351f1cd66fe38be5aadcf3eca *demo/gamma.negvalues.R
-ee37989c688fe0bd4cb205786141bb5d *demo/gamma.net.R
 c3be56905783df81f90f20f61c5de12c *demo/gamma.survey.R
-69783a9455f50768772b5968451bfc81 *demo/irt1d.R
-915c83c62d4e88b3f577374c3f5e3a7d *demo/irtkd.R
-0b07b575b7521c1758afede64653f82f *demo/logit.R
-e0f28e71aeca37bc61afcae88b4bb1cf *demo/logit.bayes.R
-e3a48c84b82d166ab3c78426d9d2369e *demo/logit.gam.R
+4507c6194f692d249751fc798c8d08cf *demo/logit.R
+81c44823cfeccee63ed0614756972e06 *demo/logit.bayes.R
 47e45df8683896d5da1fda6e536e2a7f *demo/logit.gee.R
-6583e64e212436d1e8126e47865576da *demo/logit.mixed.R
-7b44d58feec1491248d1a9aeae9a9203 *demo/logit.net.R
 49456e875d48127913cbb4965e3b5f9c *demo/logit.survey.R
-686b39ac4bb05fc8301ba9ad295cae90 *demo/lognorm.R
-55bcf4b6b51ec23fdf7b43f7130a3d05 *demo/ls.R
-7c91c0651e5c200498421891a8e139b9 *demo/ls.mixed.R
-ecdbc958194e34c6086e3761a6fdccc8 *demo/ls.net.R
-c8dd3822b9dfc5b1eb46dffd082ecc37 *demo/match.R
-a2fa83e1f19ffdd91b28442dec11e6a9 *demo/mi.R
-b609f25a7733a0c45c2e00545ab07cac *demo/mlogit.R
-dbc3be6d7c60d2303b183de2c4b1a7e7 *demo/mlogit.bayes.R
-331a40e40c034840ff1ebaf9ad42ee67 *demo/negbin.R
-93dbe4f72040fd5ba5d4727b8b51e6c0 *demo/normal.R
-7d7e0dff6df24b0577056c23689336c3 *demo/normal.bayes.R
-7f9e4ad81c1bfa01989dc7baa785ac69 *demo/normal.gam.R
+588688ad2bf33680472aca01c1928149 *demo/lognorm.R
+483314f54b7b8a8414a6c7c070952ce9 *demo/ls.R
+34a1e173102a8e3580402fd97af5f516 *demo/mi.R
+f5861bcbf70c2d7c2c70d81d86d39af1 *demo/mlogit.bayes.R
+7db69d65a9c2c46773eeb7a4b1f3a9de *demo/negbinom.R
+14c8b85bca57a7cb1451bc3dfb10e3fa *demo/normal.R
+495061a7c0b681610c99c1b0dcc28fdd *demo/normal.bayes.R
 4384f1f9cac3718c31713dcb589c1b4d *demo/normal.gee.R
-978b197a0ba5e1a62b324feecfc5ccc9 *demo/normal.net.R
-4fa5116b2eecc94265c47de86d3f4990 *demo/normal.regression.R
 445550b68a8fa873ae2e3221921dac62 *demo/normal.survey.R
-05b4bc464cb63cc287561bc5f13c1e00 *demo/ologit.R
-f1ff85890a84890af161771bbe8a429f *demo/oprobit.R
-34d666c0947b7132403bf9eb26097a32 *demo/oprobit.bayes.R
-8611475f37da0ddebcafe5993d85fbe4 *demo/poisson.R
-1989f2d2e752d48ab8dd58a9e5b34a5e *demo/poisson.bayes.R
-d4baec358d0012bab3def592f5a092c8 *demo/poisson.gam.R
+ab9b753894065112812022419b4194e2 *demo/oprobit.bayes.R
+dd0649acf889bb4d92ff4a9ac0f1f94c *demo/poisson.R
+4f8d7638e1a58166b74ba97c4b78753f *demo/poisson.bayes.R
 637f75deedcde3780f381c550cb0007f *demo/poisson.gee.R
-b961a4909bff4221175728be0fe61a50 *demo/poisson.mixed.R
-6534d7c74a48d58efe3a86cd9b50c073 *demo/poisson.net.R
 c92a8fead665a2caa59d7c1dce82c0c5 *demo/poisson.survey.R
-20732a920f202ec32d255bc6f14d6565 *demo/probit.R
-88c95d5252fdb4ee61ab7a95dc4ff66e *demo/probit.bayes.R
-c3b0d240e042913296ec1b70fd423dac *demo/probit.gam.R
+c87c01971457e57842e3052a22ad607b *demo/probit.R
+0e63f5a84bc11793b58e3c10b366081b *demo/probit.bayes.R
 3063e1bb3f9c31ea1cc0cffd4d93d075 *demo/probit.gee.R
-518ee3c0ff5d6c014578c16a5c027c54 *demo/probit.mixed.R
-12f9f4b41b7a5a3f8119d6a585233382 *demo/probit.net.R
 1b1c5bd37ec746d04910dafd9e9d67e8 *demo/probit.survey.R
-32039a7446d68fbed58d33bc976deb24 *demo/quantile.R
-5c4e6f86ee87a0fecd2d6e6071165860 *demo/relogit.R
-59c6037d364484ff1ca25a75b0dfacd7 *demo/repl.R
-c35540f65de7d70cbd1bef171f0373e6 *demo/robust.R
-96339d70796ab1c4128608b9140773e4 *demo/roc.R
-2a35e85caa8dc184da0b340ca882dde6 *demo/strata.R
-b8dca6a21e86d3458c628dd1f2a29018 *demo/sur.R
-5d5c850851f7d959e58809b378a10600 *demo/threesls.R
-a3e43ed11bd94225eb380a75f1c2c89c *demo/tobit.R
-09fd4d7ddf8806a64068e4b3a36439e7 *demo/tobit.bayes.R
-5fdc7680d369c7b11e606d871de90ea7 *demo/twosls.R
-151d97fb4c5b0a62e5817968f621d2b1 *demo/vertci.R
-5c6e39ed19a5af686049c8e83c0cf010 *demo/weibull.R
-93e3fbd0fa9d6b17c1c2ed7a4f55a2f8 *inst/doc/bl.pdf
-6677cef9e3e708ef3ecfbf0a9b839e96 *inst/doc/blogit.Rnw
-c05213839669b8bd6d77118889fea787 *inst/doc/blogit.pdf
-f07675eb32ac1761e435db6d45516688 *inst/doc/bprobit.Rnw
-ce81c20529ca5043c04a5b6180578d42 *inst/doc/bprobit.pdf
-742add5d0228aab286b222dde3b28a7d *inst/doc/gamma.Rnw
-d223c4e70a16479ef98bb0dcd5f14fdd *inst/doc/gamma.mixed.Rnw
-241361d1db756299d15e59768a9ff5c3 *inst/doc/gamma.mixed.pdf
-51033c2aac6c07326af24d7e8388bc3c *inst/doc/gamma.pdf
-2ee5e77373e7b02d98108dd14bcb5005 *inst/doc/gamma.survey.Rnw
-8d6dd63e73fa13aee0307b186c79df98 *inst/doc/gamma.survey.pdf
-f4fd2d1deb0f074cced6c27067c27186 *inst/doc/logit.Rnw
-1217b462c81ab60b22d1f4f65842fa30 *inst/doc/logit.mixed.Rnw
-bdc6bb4eb4918f344271af0c71827e9d *inst/doc/logit.mixed.pdf
-6a428130dde1e943e468d98525ddad68 *inst/doc/logit.pdf
-72c3362a5b9b7a58d2a4f3f26c9b7f59 *inst/doc/logit.survey.Rnw
-9e620a2d0f53523b2444ebcbb0dafe57 *inst/doc/logit.survey.pdf
-517464a2d454cca633eee2b1e0c79df9 *inst/doc/ls.Rnw
-27e7da6633f43e6169c51c01e23e3b89 *inst/doc/ls.mixed.Rnw
-95486e14efee0fde0e0a17c05e9a71f2 *inst/doc/ls.mixed.pdf
-690ad37ab438b5424abc2916d3f3984d *inst/doc/ls.pdf
-502fc45e7102035a74ba2f4d9d333d42 *inst/doc/negbin.Rnw
-8d3fd05351c89a6ae7c32ec22d56fab9 *inst/doc/negbin.pdf
-9cadb6aa754caa65862fdccb6c50debd *inst/doc/normal.survey.Rnw
-93959f8c09985abb005ab5c39d52e417 *inst/doc/normal.survey.pdf
-db085b2c8ce9806225ad9ed85984f6cb *inst/doc/poisson.Rnw
-778a5c55f5642258161e6d95ac8d93f5 *inst/doc/poisson.mixed.Rnw
-7b96df9555565a6c72db8d4d6bb0dcce *inst/doc/poisson.mixed.pdf
-4f65960facfb2e4fbcc54883a0761c2b *inst/doc/poisson.pdf
-bb83664df4042dd371185419ae1bbdff *inst/doc/poisson.survey.Rnw
-ab42614398c96adbf742f9ea8111e76a *inst/doc/poisson.survey.pdf
-807069ec17b311f463d6a41d08b1fa8e *inst/doc/probit.Rnw
-e019e8363b3a427482351ce3d4a9bb09 *inst/doc/probit.mixed.Rnw
-040716ce7ac0036c0cec31b455100026 *inst/doc/probit.mixed.pdf
-162aa53a2c1e9297b675627c118f05e2 *inst/doc/probit.pdf
-dbbaa61346e4423b2ac3146c5de6c801 *inst/doc/probit.survey.Rnw
-e2b66996b36b6a072b3b7c4d0d7c81f3 *inst/doc/probit.survey.pdf
-5396e5c8145c9640d728a30ef647fc33 *inst/doc/weibull.Rnw
-af0aaace7fc1a1d214bcb29bbba7ccf9 *inst/doc/weibull.pdf
-eb2c0db5c37c79d9463760d4edacce34 *inst/zideal/zvcServer.R
+7e02b50c97835c7f3a20fc51b10a5fbf *demo/relogit.R
+fb3e86404eb48271f47eb48c0dfe0db5 *demo/twosls.R
+5820f9f84b9e8e0394dcb295d069d3bd *inst/doc/gamma.pdf
+ed7aa0c5990772ff01ce921f04fae9dd *inst/doc/logit.pdf
+5f5e87631da75b783d443a38c8c287ca *inst/doc/ls.pdf
+259741ae49d1e7e83d5721e85f4973e4 *inst/doc/manual-bayes.pdf
+f269ba201f2537636445ed3d86b6e6de *inst/doc/manual-gee.pdf
+41d545c82736ade872175bd3ebc4db20 *inst/doc/manual.pdf
+b110ff07eda2be42ed1430efcb0c0051 *inst/doc/negbinom.pdf
+285a6c7e33ab81edb9c7721bd233364e *inst/doc/normal.pdf
+33df7df4e1113c852bffda80a0529056 *inst/doc/parse.formula.pdf
+a235b51b2668e7905bb97fca42a0d879 *inst/doc/poisson.pdf
+d8439c38a20251d17224b157326b7d8a *inst/doc/probit.pdf
+55fea654c712696b18b603540d4e781e *inst/doc/twosls.pdf
+d41d8cd98f00b204e9800998ecf8427e *inst/po/en/LC_MESSAGES/R-Zelig.mo
+bbc5b26d487010691036f3a2626e03c5 *inst/templates/DESCRIPTION
+1f675b08283031c5ed15607ae39eb3b8 *inst/templates/PACKAGE.R
+d41d8cd98f00b204e9800998ecf8427e *inst/templates/ZELIG.README
+fc182c4100f4789333fd2dd78bf7f92c *inst/templates/describe.R
+f17c6c109624641719be76a9e5ba5ede *inst/templates/param.R
+2aed8671075ebf1457a96da96879b28e *inst/templates/qi.R
+530c754f2afa6b440664b9b2cc040c75 *inst/templates/zelig2.R
+c9bd6c94c6336ebd9598acec18f97bc0 *man/GetObject.Rd
+4a74d5cdef2fbd4d6bfe97305cceac6c *man/GetSlot.Rd
+453e3d46105fc8199620512e0b6c4e82 *man/GetSlot.zelig.Rd
+784832952993d1e953cef1faf1335554 *man/MCMChook.Rd
 6dffb5b20df0d6fa72529763c7f62a27 *man/MatchIt.url.Rd
+0c3084f4758abddde8685ff64c411db2 *man/Max.Rd
+f5e18a14c6b0d61846a0121daafb4b7c *man/McmcHookFactor.Rd
+3d559d57f5f1960561ab873c81549f89 *man/Median.Rd
+e356125658c18d9ce162080fc443e79c *man/Min.Rd
+9ad339b46e6e3653d3ee823eea2830d7 *man/Mode.Rd
 0641d8ba40e205316b2d2fbe0fb5eaf5 *man/PErisk.Rd
 58172f8c13fe9864a8ac2e26fbd391de *man/SupremeCourt.Rd
+5a035cf4a11e64ae120b7dc2f9700008 *man/TexCite.Rd
 fe15364db9b4e80c56722eec238629e7 *man/Weimar.Rd
-2c721ec323b2f82eff190063790b9db7 *man/Zelig-package.Rd
+643e1e6e1be099a7018af34911cd710f *man/Zelig-package.Rd
 8ded77c2eb2977abe80918da28c0782a *man/Zelig.url.Rd
+233f3a62ca4cd15cbd9bcfa16a979881 *man/ZeligDescribeModel.Rd
+aead67c0c6da91ab1d7f19af2824d037 *man/ZeligListModels.Rd
+6fa7bfa9d92779c30481d6f113bde54a *man/ZeligListTitles.Rd
+e1a3a7386d920fa043fb322abe8756fe *man/alpha.Rd
 7e5422c7821d99df3cd21a9e789c5cb6 *man/approval.Rd
+b0e49b8c8af1a58c1ffec7f9c5fb85da *man/as.bootlist.Rd
+98c5b6a6e86079e6dc3a840d6294ed3f *man/as.bootvector.Rd
+c6197a492a799f5028bbfaffeae74cff *man/as.data.frame.setx.Rd
+cb25845ba9fdb44eef75f01afc59305e *man/as.description.Rd
+68fed7a45cb4432bdf9f66da04d5d7b6 *man/as.description.description.Rd
+ea835225f64f0b57d3ff642d5883d119 *man/as.description.list.Rd
+738db6158fd3beabb5ced5403beb98ba *man/as.matrix.pooled.setx.Rd
+851f39fc7c78240e2e69df1d737da0a8 *man/as.matrix.setx.Rd
+10958eb02ef264ad5a639432294b848c *man/as.parameters.Rd
+b6d87e40368e8f26413112fe647d3b4f *man/as.parameters.default.Rd
+2f5a002c1dc83ddc03c1bfae46fab8e8 *man/as.parameters.list.Rd
+c6d321f1daca4d0a333c8e590b8c4d36 *man/as.parameters.parameters.Rd
+b45437817da882d869e3017a3ccefc3a *man/as.qi.Rd
+10f4fcd3618643b9339a8288b4ad1406 *man/as.qi.default.Rd
+b64e11467fc8952a22b1cc95d1601f10 *man/as.qi.list.Rd
+5d9f612735bf8a60c2c2f49f736d5217 *man/as.qi.qi.Rd
+3cf9ae08fd13f68ebf7c0efaafe31365 *man/as.summarized.Rd
+862925d5cde1fc83b59f74a0752668d6 *man/as.summarized.list.Rd
+2c47c7167bc70c1fcf7d8b96a2d2b0f9 *man/as.summarized.summarized.qi.Rd
+6828b0d881bc787ab5d08665770916ec *man/attach.env.Rd
 83d85754bfcbadc14cfe8dc538007d0b *man/bivariate.Rd
+41d681b024e1156e65dbf19ef732b68d *man/bootfn.default.Rd
+45ab871f55572cfe62b1e5954a2460a8 *man/bootstrap.Rd
+4c937f3a46fa2c4cd17636a6629cf107 *man/bootstrap.default.Rd
+06b3b50467d814f0232240357c683547 *man/bootstrap.gamma.Rd
+42617ae2cf1b45be1c70f2657db9a934 *man/bootstrap.negbinom.Rd
+70d4bf51840d417a42756b30be007553 *man/bootstrap.normal.Rd
+524cb5ea071b054abed5c4d4958c06dd *man/callToString.Rd
+5a0f6a763f1b4e93bfb0c3675cf1f5f4 *man/cite.Rd
+294f05247a62c832331330d9263fcee7 *man/cluster.formula.Rd
+f57c88e9649b4188a10635c6833bc33c *man/cmvglm.Rd
 3b01d1373c2b9f311a70f150f8a3e7cf *man/coalition.Rd
 d9588301df675d5e63882097e8130ea2 *man/coalition2.Rd
-d2ee9a455edc259788bbf0feac32751f *man/current.packages.Rd
-2425cb4f749bd955249fb9383cde6b87 *man/dims.Rd
+6b1d516559cd05f32dc64b14a40ff321 *man/coef.parameters.Rd
+f60d8fa916719234a99bcfc58fa44977 *man/combine.Rd
+ba3a632f7ec6a5f903ebfd1465448cb7 *man/constructDataFrame.Rd
+55a88929afcdbc4d351ea8500bc795ec *man/constructDesignMatrix.Rd
+4b09bd9ab242c0b67e5626e0b7d32da2 *man/depends.on.zelig.Rd
+98315ff01f7c1ecd2ad1c7cc96ebea1d *man/describe.Rd
+34336df2b30c26705d57728707ef48fd *man/describe.default.Rd
+637599adac074b6ceb1e63711e39e7ac *man/describe.exp.Rd
+4d646e38b5d6d52b162fffd1ef152c9a *man/describe.gamma.Rd
+f142f11f4df7402bcfd27882facb9465 *man/describe.logit.Rd
+8681bc0f95fbf3cc724fe45a3888f12c *man/describe.ls.Rd
+a3d647c361183857fdab12c3465b2b2e *man/describe.negbinom.Rd
+120b7375c8097e1cf5b8daf24aaeb164 *man/describe.normal.Rd
+39e04467b04c947a7647acf3283f2a40 *man/describe.poisson.Rd
+644d8e676e7735a8043189b78a70523c *man/describe.probit.Rd
+16d54cde09a2ada7394b2c02435b1287 *man/describe.tobit.Rd
+87c6fd1b4f212d2592c112e0682f8590 *man/describe.zelig.Rd
+6f08d366da6bc44fdd951a705e8115f1 *man/description.Rd
 11ad69ed866d12262602fc3b786091d4 *man/eidat.Rd
+73a2f7194321c4edeb5d69c878c37167 *man/find.match.Rd
 d8e4df6b181afc5e10fee0d774850c90 *man/free1.Rd
 788c8364b3a5ff56275ed6f1de9b7790 *man/free2.Rd
 f573c879f54e045383611015e7d5495c *man/friendship.Rd
+3c0993ec2cccedfa85723963fd153611 *man/get.package.Rd
+2d06d33e4f904368f053bb261437a862 *man/getPredictorTerms.Rd
+465423b551f5a638a20bd99550f3c157 *man/getResponseTerms.Formula-not-formula.Rd
+221b400f09d18267827cc6d864d81f5e *man/getResponseTerms.Rd
+cbc0c02ce6572fc96d8d2c8713baed62 *man/getResponseTerms.formula.Rd
+9becd5adc4ce12ee099cdfbb41a87712 *man/getResponseTerms.list.Rd
 1f77e073ad9ed30b57064d284fe0f2a6 *man/grunfeld.Rd
-92fc5e7d8a9651c08737916d6a5efd66 *man/gsource.Rd
-5ab15428e24e7c85f06a9822585aee74 *man/help.zelig.Rd
+face801c31d1dc268b6289a1ea5aa8c0 *man/has.zelig2.Rd
+e9d755c538423b59f86ae485fd9f615f *man/help.zelig.Rd
 2c288905c76033d24c8f7f57b0d65d40 *man/hoff.Rd
 5f0c67b452fcfdfb90eb29a5d8ed1097 *man/homerun.Rd
+065cad3e06bc5b280ad685677abb0d74 *man/ignore.Rd
 20131069ca801fde8baa331de4b7657e *man/immigration.Rd
+bd950ad3a6dd8c54ad6655872c7dfb69 *man/is.formula.Rd
+338f8a5835bea2f84b7fa6dcf0af657e *man/is.qi.Rd
+b681dcd3ebf33d9c5ceeb51ef40c826f *man/is.valid.qi.list.Rd
+4b386091dbdb2f05991417274ba37d1f *man/is.zelig.compliant.Rd
+c7ee6bc2ceeb30482f0340167d36a9f7 *man/is.zelig.package.Rd
 81c4ba627b9e0c72a52277a18b8baa7a *man/klein.Rd
 e01f00d574aa52df6ae5c03e836c79b3 *man/kmenta.Rd
+9a85bd994b7c1306c6721151f15566de *man/link.Rd
+897e4e2473be3f9de1c597c3270069f0 *man/linkinv.Rd
+581ff0fd47c5280102e0c32ac3cb188e *man/list.depth.Rd
+b0a27dc8fbd7472a907ce1efcd5d61d8 *man/list.zelig.dependent.packages.Rd
+c67df5f8da39b1d03d9057a70d525a6b *man/list.zelig.models.Rd
+631d28c57a183d19abc2c3393469d7de *man/loadDependencies.Rd
 58bda9cf87e4f5c413a73aedc14bb159 *man/macro.Rd
+2ba5cbca95a93f318d474f6b3fb69832 *man/make.parameters.Rd
+6b04dd54072499f51a6d907887b6ff41 *man/makeModelMatrix.Rd
+7f77974ebd56cb8c3cb286a7a871c42c *man/makeZeligObject.Rd
 f9c9396da5c2e9ab849dd470df38c0f5 *man/mexico.Rd
-1f3024cf3abc9f634720dbd59bf3e7a3 *man/mi.Rd
+0d2a6b5e4916ff0c691c773a91e5644a *man/mi.Rd
 485a9a9403ecf50f15440f191801f2a2 *man/mid.Rd
-5c4acfa6bee4217da3a97f463d5c1157 *man/model.end.Rd
-fc83d043b489cc12287251ea2868384e *man/model.frame.multiple.Rd
-8a074f02af32919fa827d9b52278903a *man/model.matrix.multiple.Rd
-e070d9233e7d0902e612765fe23a1509 *man/network.Rd
+67fd27df704501a7488a7354343b9c8d *man/mix.Rd
+f2989d1582d56b7ed47a09fd179936ff *man/model.frame.multiple.Rd
+71b500e88dc689d6991e0267994c7c20 *man/model.matrix.multiple.Rd
+78206eb5459fe64390498ae12548b4b1 *man/model.matrix.parseFormula.Rd
+5254acd8bc4fef34301afc991fc07252 *man/multilevel.Rd
+d1c1a887fa9678ca30d132892ef762de *man/name.object.Rd
+d7cda1a9c4a73cdc91befb37c36a8901 *man/names.qi.Rd
 d7905236f8793559d3c452facbc3ea4c *man/newpainters.Rd
-04876336edd6e4d2b6b1afca391eb377 *man/parse.formula.Rd
-c3516227ef7f53127740307d16e32354 *man/parse.par.Rd
-36682fc3640f908651b4b20c22bb8b8f *man/plot.ci.Rd
-0d8c8703c2bbdcaf8102af2d34fe723f *man/plot.surv.Rd
-73074658ea340774b1fcc0856b7809e1 *man/plot.zelig.Rd
-6319e7329a0d552a676abc60069ba74e *man/put.start.Rd
-98f1b4b0f9adfcd06941908b8803d95e *man/repl.Rd
-7456039e1bbb2c453b73972465f50723 *man/rocplot.Rd
+e04b2f0a71aa447253375a12f195a3ea *man/param.Rd
+803d9a00ea8f7e3d8a75ab6feae27931 *man/param.default.Rd
+cd29ee9bdf3ea6256769153fde38869c *man/param.exp.Rd
+f76209fa73c1b36644ac56b233d4122f *man/param.gamma.Rd
+c94a6c248d1fc6a88ce7ed8063277651 *man/param.logit.Rd
+1b5d754ef9e96e292000f42271c183b9 *man/param.ls.Rd
+55f5a3a524678ffe2af11db1ea84ccd2 *man/param.negbinom.Rd
+01e5d1cd2e766188a57c2fd87f4bf91a *man/param.normal.Rd
+90b8bc399c84ae33dfd3b1a365044852 *man/param.poisson.Rd
+4ceab9c1912859dd59a9cb8b1e1d11ce *man/param.probit.Rd
+39b0c8be9d995f7642b1f9519c5efc82 *man/param.relogit.Rd
+3e6d072f5a4059bdad5873ff32017eca *man/param.relogit2.Rd
+d0b92597318ad87473d4d10c7cfce53c *man/param.tobit.Rd
+c2ca077a4c40ab85f6f3a248d1a0acf7 *man/parameters.Rd
+50e3c177cba2d0c0b122d85c43cc09b4 *man/parse.formula.Rd
+4b0740eaeb69d90ba468ef6575bf3563 *man/parseFormula.Rd
+7d20becde042ea0facb79e610cb71122 *man/parseFormula.formula.Rd
+821d71ca6ae7206b242590dedaa6864a *man/parseFormula.list.Rd
+c3210f27d600cbb6ca83d62774f0db22 *man/plot.MI.sim.Rd
+26f1eab54546cb1060dd2c89a82b90af *man/plot.ci.Rd
+4a1d6609b4d85d998b3dbe7f0e204b55 *man/plot.pooled.sim.Rd
+34507a7ed081d653c8e5509d0c959a58 *man/plot.sim.Rd
+d0c103e40b38d54a4108a9c6013104aa *man/plot.simulations.Rd
+ac17ad9be163a17d0a5d7b30350d4b76 *man/print.qi.Rd
+367b0c6d18525152b27cb1013a3f9555 *man/print.qi.summarized.Rd
+b27ee0bc8c9491f75c953ca27fc24d7b *man/print.setx.Rd
+df13b5983368d41f3612043d95b38a35 *man/print.setx.mi.Rd
+b5072e4e129ba0b28c7f5c6ea47dcf2e *man/print.sim.Rd
+ef5ee63ca6e4f23c25a63914ca16feec *man/print.summary.MCMCZelig.Rd
+fa80a23aae29624ac831bb90f32c14ef *man/print.summary.pooled.sim.Rd
+ec8c2c06c81608e34f09fc5b7ed5653c *man/print.summary.relogit.Rd
+e66e81ef463297415de8ade84e242dc5 *man/print.summary.relogit2.Rd
+203891c1e2c0576052d2da6717399bb5 *man/print.summary.sim.Rd
+c7eb506e8c71f699afbc00d1c1b4fe7f *man/print.summarySim.MI.Rd
+6b28ce03dca907fa837480069fa56bad *man/print.zelig.Rd
+680cd1c79176cf28ef6c5a1dcca245f5 *man/qi.Rd
+b6bdef3602275edb000eb71e64d1ca59 *man/qi.exp.Rd
+2546cd1df4831fe7c1fb368f9d09ae53 *man/qi.summarize.Rd
+dde5d2eb226a14bbaf9541386b4407ce *man/reduceMI.Rd
+30941e963f829a38020b64a094212803 *man/relogit.Rd
+64db643c8032b1362cac56cdc9b98e26 *man/repl.Rd
+656fef44714f9e5f2cb63e39f9572368 *man/repl.default.Rd
+aa0c4a9184cb6a5f34d67827c0a64af6 *man/repl.sim.Rd
+a496bcce7e71378d22cd0938bf7563f7 *man/replace.call.Rd
+d46cf72bf76964907d1e15cee9a583c7 *man/robust.gee.hook.Rd
+9727c64c5b8d6e24045d78d689c5dbf7 *man/robust.glm.hook.Rd
+194900341a4145076a510bd4b3b69b2e *man/rocplot.Rd
 685e8fe4738e2aad2ad73d7f2388570b *man/sanction.Rd
-98d8a6b58feaaeb41e81ceba3354ba86 *man/set.start.Rd
-029e561888ce4f2ca7b00697babe8bea *man/setx.Rd
-43793d75503f0f7178e924e538b83054 *man/sim.Rd
+2443219ee36a1d7f1a995adfbb03eca2 *man/setx.MI.Rd
+fbca11d6a833ef32c79001dc7660f534 *man/setx.Rd
+35ae732054417b4dd15561df6eea76c2 *man/setx.default.Rd
+b828c382fe49b52e0768d3c8f58246fe *man/sim.MI.Rd
+c1506f57a058d26b1dcafaf9a5329e93 *man/sim.Rd
+0f8cd4ff64927ac5c040605c19eff20f *man/sim.default.Rd
+77cc07e347939e579b117c93ee9acd3b *man/simulation.matrix.Rd
+fdaa2a66e1a6f52bab44d95d13ffceb3 *man/simulations.parameters.Rd
+ed6b11c524a1bdf7164c42721bc23f8c *man/simulations.plot.Rd
 1eab2cf2e0c82ea67d16c8ad05571a9c *man/sna.ex.Rd
-db0de00b4c3c31d0d9481cf7305610e6 *man/summary.zelig.Rd
+54d6dd5011a574c16c7f494eae45fc48 *man/special_print_LIST.Rd
+781ec28f6c60ee7aaece1571725a3922 *man/special_print_MATRIX.Rd
+7a064c38895979a1f9562891031c82fd *man/splitUp.Rd
+a84301cb431946f8887d01cc756ef364 *man/store.object.Rd
+4f966930f5b6422521bb408266b1d154 *man/structuralToReduced.Rd
+64a34068369970e69c9fb556d3aed895 *man/summarize.Rd
+a87af5212ad0e40d71379862d6dc2551 *man/summarize.default.Rd
+3879f433725da0db03d1cb6600e1028f *man/summary.MI.Rd
+288c2380bbb272af77d89d70ec648644 *man/summary.MI.sim.Rd
+0e1ad76e17a9597f7218d3863cc60c1d *man/summary.Relogit2.Rd
+2ba6219325aee97b57e05e13d1a61e21 *man/summary.glm.robust.Rd
+ee86a5246f90b4ed876b026442cac539 *man/summary.pooled.sim.Rd
+1bd5a6763e3d675293bd4449a43d0746 *man/summary.relogit.Rd
+df0d723d1afa54ac3ee04f2379c9b43d *man/summary.sim.Rd
+6459266f8831aec535e4b81000b45d83 *man/summary.zelig.Rd
 ca14c12e0087b1815d741b09dba0f5cc *man/swiss.Rd
-c139174e0ca254d0d1c83f73c38cde32 *man/ternaryplot.Rd
-3d56aeb81459f4e83e30c9ff1939e9bc *man/ternarypoints.Rd
+29cd4b01a20aedd254d64c8bddf6f481 *man/t.setx.Rd
+e03c72941cd05a1a87ec1e96bf4e7a2f *man/table.levels.Rd
+2b62155d275a1850ce6834c70b92b2b6 *man/terms.multiple.Rd
+5c3cd23a97d6f35d4e9cbd649875a14d *man/terms.vglm.Rd
+0cd8cf800eb4b6de6fdc253078af0f56 *man/terms.zelig.Rd
+77f7851d9f7813d81f8e461fd42c7015 *man/termsFromFormula.Rd
+6ff4e69066aedfcd7882e397d91b1dfa *man/toBuildFormula.Rd
 a75e0696550ade6ffe2e44144e98d75b *man/tobin.Rd
+b0c4b0f3838717ea98f43da5fe4f8b25 *man/tolmerFormat.Rd
 f7b42178326db13f916c540e911d3864 *man/turnout.Rd
-d4332f7d61229f03db890e3f4a73888f *man/user.prompt.Rd
+54d6ad9e9be6c4abc531fd18c4d0b07a *man/ucfirst.Rd
+69c49f3e5d195827788453416aad89f0 *man/user.prompt.Rd
 01c9c5b45b38c6240e5a1831e153390c *man/voteincome.Rd
-f77967992bb18cd5342dd7c036f4dda6 *man/zelig.Rd
-fe47f4ba8b9f64b979862d827505b6e4 *man/zeligDepStatus.Rd
-b829b4736326c656a15d6e12ecf1a404 *man/zeligDepUpdate.Rd
-7cdf4957fb501d5ba923a63a6c3cf98d *man/zeligVDC.Rd
-2cc098d4ef79558426bea0f0a2d54d64 *texput.log
-a9028638f2cdd0a55a71158e9d25cca8 *vignettes/Rd.sty
-697b50193d60e36cd3ae4f2a18752ab1 *vignettes/Sweave.sty
-6677cef9e3e708ef3ecfbf0a9b839e96 *vignettes/blogit.Rnw
-f07675eb32ac1761e435db6d45516688 *vignettes/bprobit.Rnw
-f728ed7010b23a0c4e59d29fa3aec6d1 *vignettes/citeZelig.tex
-3058d80d7cdfb13deff41c903f7f1e9d *vignettes/cites/aov.tex
-e64a4dd2bc2e0b6827fec4139cf0cd55 *vignettes/cites/arima.tex
-89f5ce81b6e7cce92dfbc08599c20a9c *vignettes/cites/blogit.tex
-e6d32ff785a440f867b30adee445e9cb *vignettes/cites/bprobit.tex
-16601c8ea800a1fc0ff9a6c28a4281e7 *vignettes/cites/chopit.tex
-20349c4bbe700f9bbc1a5d5bfded1bb7 *vignettes/cites/cloglog.net.tex
-12be2f50bf207a10381fc92091da736e *vignettes/cites/coxph.tex
-93d91c41e03f2a64ba01bcd447ba8fc1 *vignettes/cites/ei.RxC.tex
-8638052de110800fc2ab940a159031aa *vignettes/cites/ei.dynamic.tex
-da6ecefbc08f39c29de48750a30163ca *vignettes/cites/ei.hier.tex
-ca09fc3ba1d8f3acca8cda6bb24bdb61 *vignettes/cites/exp.tex
-b12e4170027072e20a6eb1473b20aefb *vignettes/cites/factor.bayes.tex
-ab1dcbf290cdc7cb812f5a78a7d602ea *vignettes/cites/factor.mix.tex
-25098f26e2b3feac42d0df7639cd486b *vignettes/cites/factor.ord.tex
-06dd16e7cbdfd29b55ab4b670cd45b95 *vignettes/cites/gamma.gee.tex
-a27103d57e45b5a1cb7c1da0ffbed1cf *vignettes/cites/gamma.mixed.tex
-c86cf97a3a73ba4a34f45adbbbec43d5 *vignettes/cites/gamma.net.tex
-833920595668740fa843181b7ca0e487 *vignettes/cites/gamma.survey.tex
-580687dc2a2474dd6eb953870851b8f4 *vignettes/cites/gamma.tex
-4f85f878063567375b2854378c2673b8 *vignettes/cites/irt1d.tex
-ef74baea8bb2355c4a521e8ca32725c9 *vignettes/cites/irtkd.tex
-5eb5eeaee918da537efdc45796761d8b *vignettes/cites/logit.bayes.tex
-57f12251a685046dfc576f641c4e726c *vignettes/cites/logit.gam.tex
-32928aeb4400ef38a80e390c4a1dd7d3 *vignettes/cites/logit.gee.tex
-132e2bb843784c07624bd367eab22b3b *vignettes/cites/logit.mixed.tex
-94b47b39baa18d44ad2f791cb4fdefe1 *vignettes/cites/logit.net.tex
-18f144d8f6705754a34d57cd61a706a3 *vignettes/cites/logit.survey.tex
-f6aaed68a794b341f85b2c5bbfc70ea7 *vignettes/cites/logit.tex
-d9a60fd1c1944254a6a042605aa8ac5c *vignettes/cites/lognorm.tex
-cd105b3f760766caf983e83b2ea5d0d5 *vignettes/cites/ls.mixed.tex
-4ece2e45d169e486cbce7897945e7f0d *vignettes/cites/ls.net.tex
-656500e06a963c44526910e4076da52e *vignettes/cites/ls.tex
-4cf5d5b7032862a8a3ffdfdc626e368e *vignettes/cites/mlogit.bayes.tex
-968dfe27810467a091e95e132ee862bc *vignettes/cites/mlogit.tex
-9a67e4358e4d75cd8ec1a4cf74a4ea79 *vignettes/cites/mprobit.tex
-a6de1c1bc67e15463f2ceb4e4ce28400 *vignettes/cites/negbin.tex
-d7263c62a4bf792a8b4ccf53eec58e8d *vignettes/cites/normal.bayes.tex
-331209ccdfdf613c53688ca8918bd9fd *vignettes/cites/normal.gam.tex
-d4bd1089e046cc4d0255176a819b2dec *vignettes/cites/normal.gee.tex
-255f6d50b319e901287d5515ca9d5246 *vignettes/cites/normal.net.tex
-b6b5c06eef351d7554836853e1e730cf *vignettes/cites/normal.survey.tex
-180dde93520e2c1fb6d361dbbd698054 *vignettes/cites/normal.tex
-21a7d3e7d9e915f78cd378e35c80e375 *vignettes/cites/ologit.tex
-8550307f454ae593ff3c8a387b1148cd *vignettes/cites/oprobit.bayes.tex
-262b9a4e185edcb78c30e325a8fd71a7 *vignettes/cites/oprobit.tex
-40015bbcbaa7d4a8be89968ca3af1381 *vignettes/cites/poisson.bayes.tex
-3d3afac86b0e500718d63521dac146a9 *vignettes/cites/poisson.gam.tex
-ef4d6acbc3c217ecdce3b5636cc22963 *vignettes/cites/poisson.gee.tex
-ba912b0ee2ea6ebe7ca5aab979b07088 *vignettes/cites/poisson.mixed.tex
-5ccb0f5a7794a14fe3064a439d51dc6b *vignettes/cites/poisson.net.tex
-ee8a39bf07acf69f5fa61d26c3eda55d *vignettes/cites/poisson.survey.tex
-3724680edf42d2f90086b8017ece4d93 *vignettes/cites/poisson.tex
-1ac74a17711066834a8b0f99dffb790e *vignettes/cites/probit.bayes.tex
-118c21dc1fcaf5dfa35ad42d1d8cf1b1 *vignettes/cites/probit.gam.tex
-11c6632768f239a65075a52b8359a8cb *vignettes/cites/probit.gee.tex
-a4f5c9e9083eb525cfea9729ea2b64e6 *vignettes/cites/probit.mixed.tex
-3cec896392095df56168ed26c90eef8d *vignettes/cites/probit.net.tex
-755cc2ef8967290e4d5beb8c6ada62ac *vignettes/cites/probit.survey.tex
-38426a69403a2ae684ecb3827f27b71a *vignettes/cites/probit.tex
-a84a96c7ea42b8eaea2e4be7269c6e7e *vignettes/cites/quantile.tex
-2a35c1fd06f131c075f29941ec1a4359 *vignettes/cites/relogit.tex
-efd8416307a41f79e23e03cfffe3bf31 *vignettes/cites/rq.tex
-ca4bbea23dc7b7aaf9a194ebfa864285 *vignettes/cites/sur.tex
-aab970e7b09216a10341945c93ab28af *vignettes/cites/threesls.tex
-6ada63e6aa911e28d19bfc5b3826e1dd *vignettes/cites/tobit.bayes.tex
-f26036a70c1b4ca43080f9408a4a2b93 *vignettes/cites/tobit.tex
-7a441d3da40e451d6425331e83e35382 *vignettes/cites/twosls.tex
-7cfba10d059a7400adb175769bd1de80 *vignettes/cites/weibull.tex
-742add5d0228aab286b222dde3b28a7d *vignettes/gamma.Rnw
-d223c4e70a16479ef98bb0dcd5f14fdd *vignettes/gamma.mixed.Rnw
-2ee5e77373e7b02d98108dd14bcb5005 *vignettes/gamma.survey.Rnw
-a04d2eefca9a62fde4d6fd446728f8db *vignettes/gk.bib
-f7c02e422740f8c02ea577287a8ed5b4 *vignettes/gkpubs.bib
-23b86f37e9012e3f7adbf9f59626026e *vignettes/html.sty
-f4fd2d1deb0f074cced6c27067c27186 *vignettes/logit.Rnw
-1217b462c81ab60b22d1f4f65842fa30 *vignettes/logit.mixed.Rnw
-72c3362a5b9b7a58d2a4f3f26c9b7f59 *vignettes/logit.survey.Rnw
-517464a2d454cca633eee2b1e0c79df9 *vignettes/ls.Rnw
-27e7da6633f43e6169c51c01e23e3b89 *vignettes/ls.mixed.Rnw
-115cd866e54f531a975d2a3455d53c3c *vignettes/natbib.sty
-502fc45e7102035a74ba2f4d9d333d42 *vignettes/negbin.Rnw
-9cadb6aa754caa65862fdccb6c50debd *vignettes/normal.survey.Rnw
-ab42408ebe1eeeb382e0b0ef7fc73cb5 *vignettes/otherworks.bib
-db085b2c8ce9806225ad9ed85984f6cb *vignettes/poisson.Rnw
-778a5c55f5642258161e6d95ac8d93f5 *vignettes/poisson.mixed.Rnw
-bb83664df4042dd371185419ae1bbdff *vignettes/poisson.survey.Rnw
-807069ec17b311f463d6a41d08b1fa8e *vignettes/probit.Rnw
-e019e8363b3a427482351ce3d4a9bb09 *vignettes/probit.mixed.Rnw
-dbbaa61346e4423b2ac3146c5de6c801 *vignettes/probit.survey.Rnw
-4218031f2d3d06b74984dc52aa76f07f *vignettes/upquote.sty
-5396e5c8145c9640d728a30ef647fc33 *vignettes/weibull.Rnw
-416e4497ac73bd802a8221c813d10a3a *vignettes/zinput.tex
+08bb4fc168852c1af1dfe900a99e416e *man/z.Rd
+d5a57f78acdf7e3665275691b7d51d0d *man/zelig.Rd
+db5d3d911b778db51d0ed336355060d4 *man/zelig.call.Rd
+bf3bf749ecafeb03ebaf312ce24e8751 *man/zelig.skeleton.Rd
+be529b1f9b10b5c3d68cb979d45bdd39 *man/zelig2-bayes.Rd
+982fbf939e18d3a501c1a4edd9660e71 *man/zelig2-core.Rd
+142acdbd460032c48b2fa9ab46ae9ae2 *man/zelig2-gee.Rd
+92798192843543bd475f7a1127abebcd *man/zelig2-survey.Rd
+0582491d8b2073639b1202c677d839ce *man/zelig2.Rd
+36994842c5d7884aba86e3d95fbc43e1 *messages/templates/en/describe.canned
+a0a76b93dfe9e586f0b688bc659261ef *messages/templates/en/describe.credit
+4c8b1a164cb8da4c0ea293372cade421 *messages/templates/en/describe.how.to
+8db4827ad74792f26bbe060cd14820ff *messages/templates/en/print.summary.sim.canned
+f4052924e1f3288279bd4659c02f97a4 *messages/templates/en/print.summary.sim.credit
+623bf05a3c4f85aa2379356d4ed7081d *messages/templates/en/print.summary.sim.how.to
+d68992839f91eae7dce929c5ae781e8a *messages/templates/en/qi.canned
+5a980a6bafeabeb5fe863583359ecca4 *messages/templates/en/qi.credit
+0ada437f5601cc33a880eb62a23fa583 *messages/templates/en/qi.how.to
+f199ffb1cf8bc7ea6a8074534f434394 *messages/templates/en/setx.canned
+5c27f3897543b513c4091afa64eeee12 *messages/templates/en/setx.credit
+b430d08a9319c1b43541e1f37cb1f275 *messages/templates/en/setx.how.to
+d954c9ee1c99878e0a379efff180b220 *messages/templates/en/sim.canned
+0b40e165c3945b5e697f2d0a89601f4b *messages/templates/en/sim.credit
+3b0c2a1e26e1a38f3215293f4fbda7af *messages/templates/en/sim.how.to
+831acd1b1ca307efab6d0f831acc9d05 *messages/templates/en/sim.setx.canned
+fcd8fac6c542aa0a8cedcc47bab55b34 *messages/templates/en/sim.setx.credit
+203a243137f35276b3fc288b482aa5ee *messages/templates/en/sim.setx.how.to
+b2782dc79b45fd6eda9db50f70cdb29e *messages/templates/en/summary.sim.canned
+8e6cd4b40a9979cb804dc15699e393be *messages/templates/en/summary.sim.credit
+e4525acde6c3329d73d02475f692ebe0 *messages/templates/en/summary.sim.how.to
+28263ff0964a540f7f874bc7093935c9 *messages/templates/en/zelig2.canned
+83cecaa2f5045a8b024ef67feb0e723a *messages/templates/en/zelig2.credit
+56431b0570f88212688cac62ffa94b99 *messages/templates/en/zelig2.how.to
+9d20e1354ec8f17a50ed9d3cda0124df *messages/templates/en/zelig3.canned
+d5f01072ed9a039255d95ae4bcaeb329 *messages/templates/en/zelig3.credit
+99abfaa353de1f242ecf2af83e8cf6af *messages/templates/en/zelig3.how.to
+438bb2e5447a9d566fbcae4657974c34 *po/R-en.po
+247f0490337165f500224fd5eab1de8b *tests/MatchIt.R
+7104b3beb85e184be2193da209497f77 *tests/amelia.R
+b47aea86fa83382112dfa9e531d4fabc *tests/by.R
+9ce2df193b74ae47a82024f81a35bf50 *tests/lognorm.R
+04720577fdbcc28544b396b55164efe9 *tests/mi.R
+c0458e644bb50ace45e4b89f3adc235a *tests/mix.R
+04b6f9b189a9fb6e4dbdfd959794521c *tests/models-bayes.R
+4712575f3142cbe8894db95db0393f87 *tests/models-core.R
+b64512358b907c88cab39c8808ddd62f *tests/models-gee.R
+7f82704b0c25112224ac2cdd89ebfaf9 *tests/models-survey.R
+ca8d9096a7a0622f6ef3af428653d90f *tests/plot-ci.R
+cff6618ed3f2a58687d22c115ab745af *tests/pooled.R
+32522f6db5aa52a087c7fed252054cd5 *tests/relogit.R
+4f5bf07089b9425a121ab6cdae418c6a *tests/summary.MI.R
+bbb4157a80472a791a3fa21a06eaf2a2 *tests/twosls.R
+de1aeda81e8c8ab6ded25df5ae2dbfab *vignettes/Sweave.sty
+55c2ecd46b3b0d9576b9aa82d0545124 *vignettes/Zelig.bib
+fa7f97b865034c25ca528ebfe02e0d0f *vignettes/Zelig.sty
+c9a0058c2df7ec58641689e43e66e9fc *vignettes/gk.bib
+a35da60f3f139a9a7cd6749353eb430f *vignettes/gkpubs.bib
diff --git a/NAMESPACE b/NAMESPACE
index 4e4fa6c..52b82ec 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,174 +1,275 @@
-importFrom(MASS, mvrnorm)
-importFrom(boot, boot)
-importFrom(stats, glm)
-importFrom(stats, family)
-importFrom(stats, binomial)
-importFrom(stats, gaussian)
-importFrom(stats, Gamma)
-importFrom(stats, inverse.gaussian)
-importFrom(stats, poisson)
-importFrom(stats, quasi)
-importFrom(stats, quasibinomial)
-importFrom(stats, quasipoisson)
-
-
-export( current.packages, 
-        dims, 
-        gsource,
-        help.zelig,
-        network,
-        mi,
-	model.end,
-        model.matrix.multiple, 
-        model.frame.multiple,
-	parse.formula, 
-	parse.par,
-        plot.ci,
-        plot.surv,
-        plot.zelig,
-	put.start,
-        repl,
-        rocplot, 
-	set.start,
-	setx,
-	sim,
-        ternaryplot,
-	ternarypoints, 
-	user.prompt,
-        zelig,
-        zeligDepStatus,
-        zeligDepUpdate,
-        zeligListModels,
-        zeligInstalledModels,
-        zeligModelDependency, 
-        zeligGetSpecial,
-	zeligDescribeModelXML
-       )
-	
-#S3method("$", ZeligS4vglm)
-#S3method("$", summary.ZeligS4vglm)
-#S3method("$<-", ZeligS4vglm)
-#S3method("$<-", summary.ZeligS4vglm)
-S3method(coef, chopit)
-S3method(formula, vglm)
-S3method(model.matrix, multiple)
-S3method(model.frame, multiple)
-S3method(names, relogit)
-S3method(names, summary.vglm)
-S3method(names, summary.zelig.relogit)
-S3method(names, ZeligS4vglm)
-S3method(names, zelig)
-S3method(names, zelig.relogit)
-S3method(plot, zelig)
-S3method(plot, zelig.arima)
-S3method(plot, zeliglist)
-S3method(plot.zelig, blogit)
-S3method(plot.zelig, bprobit)
-S3method(plot.zelig, coxph)
-S3method(plot.zelig, logit.gam)
-S3method(plot.zelig, poisson.gam)
-S3method(plot.zelig, probit.gam)
-S3method(plot.zelig, logit)
-S3method(plot.zelig, logit.gee)
-S3method(plot.zelig, gamma.gee)
-S3method(plot.zelig, poisson.gee)
-S3method(plot.zelig, probit.gee)
-S3method(plot.zelig, normal.gee)
-S3method(plot.zelig, mlogit)
-S3method(plot.zelig, negbin)
-S3method(plot.zelig, cloglog.net)
-S3method(plot.zelig, logit.net)
-S3method(plot.zelig, poisson.net)
-S3method(plot.zelig, probit.net)
-S3method(plot.zelig, ologit)
-S3method(plot.zelig, oprobit)
-S3method(plot.zelig, probit)
-S3method(plot.zelig, poisson)
-S3method(plot.zelig, relogit)
-S3method(plot.zelig, logit.survey)
-S3method(plot.zelig, probit.survey)
-S3method(plot.zelig, poisson.survey)
-S3method(print, arimaSummary)
-S3method(print, BetaReg)
-S3method(print, coxhazard)
-S3method(print, names.relogit)
-S3method(print, names.zelig)
-S3method(print, relogit)
-S3method(print, relogit2)
-S3method(print, summary.relogit)
-S3method(print, summary.relogit2)
-S3method(print, summary.MI)
-S3method(print, summary.strata)
-S3method(print, summary.zelig.strata)
-S3method(print, summary.lm.robust)
-S3method(print, summary.glm.robust)
-S3method(print, summary.MCMCZelig)
-S3method(print, summary.zelig)
-S3method(print, eiRxC)
-S3method(print, zelig)
-S3method(qi, chopit)
-S3method(repl, default)
-S3method(repl, zelig)
-S3method(sim, setxArima)
-S3method(sim, default)
-S3method(sim, cond)
-S3method(sim, coxph)
-S3method(sim, eiRxC)
-S3method(sim, counter)
-S3method(sim, setx.MI)
-S3method(sim, setx.rqs)
-S3method(sim, setx.strata)
-S3method(sim, zaovlist)
-S3method(sim, eiRxC)
-S3method(setx, Arima)
-S3method(setx, coxph)
-S3method(setx, default)
-S3method(setx, eiRxC)
-S3method(setx, gam)
-S3method(setx, noX)
-S3method(setx, MI)
-S3method(setx, netglm)
-S3method(setx, rq)
-S3method(setx, rq.process)
-S3method(setx, rqs)
-S3method(setx, strata)
-S3method(setx, relogit2)
-S3method(setx, zaovlist)
-S3method(summarize, array)
-S3method(summarize, ei)
-S3method(summarize, coxhazard)
-S3method(summary, Arima)
-S3method(summary, zelig.arima)
-S3method(summary, BetaReg)
-S3method(summary, coxph.naive)
-S3method(summary, coxph.robust)
-S3method(summary, MI)
-S3method(summary, netglm)
-S3method(summary, strata)   
-S3method(summary, relogit)   
-S3method(summary, relogit2)   
-S3method(summary, setx.cond)
-S3method(summary, setx)   
-S3method(summary, zelig.strata)
-S3method(summary, zelig.rqs.strata)
-S3method(summary, MCMCZelig)   
-S3method(summary, zelig)   
-S3method(summary, vglm)
-S3method(summary, lm.robust)   
-S3method(summary, glm.robust)   
-S3method(summary, eiRxC)
-S3method(summary, zaovlist)
-S3method(summary, zmaov)
-S3method(terms, vglm)
-S3method(summary, zmlm)
-S3method(terms, multiple)
-S3method(vcov, BetaReg)
-S3method(vcov, eiRxC)
-S3method(vcov, relogit)
-S3method(vcov, lm.robust)
-S3method(vcov, glm.robust)
-S3method(vcov, zmlm)
-S3method(vcov, chopit)
-S3method(zelig, default)
-
-
+export(.getRandAndFixedTerms)
+export(.reduceFurther)
+export(.ZeligModelCategories)
+export(alpha)
+export(as.description)
+export(as.qi.default)
+export(as.qi.list)
+export(as.qi.qi)
+export(as.summarized.list)
+export(as.summarized.summarized.qi)
+export(as.summarized)
+export(bootstrap)
+export(cite)
+export(cmvglm)
+export(coef.parameters)
+export(combine)
+export(depends.on.zelig)
+export(describe.default)
+export(describe.gamma)
+export(describe.logit)
+export(describe.ls)
+export(describe.negbinom)
+export(describe.normal)
+export(describe.poisson)
+export(describe.probit)
+export(describe)
+export(description)
+export(GetObject)
+export(getPredictorTerms)
+export(getResponseTerms)
+export(GetSlot.zelig)
+export(GetSlot)
+export(has.zelig2)
+export(help.zelig)
+export(ignore)
+export(link)
+export(linkinv)
+export(loadDependencies)
+export(make.parameters)
+export(Max)
+export(MCMChook)
+export(McmcHookFactor)
+export(Median)
+export(mi)
+export(Min)
+export(mix)
+export(Mode)
+export(param.default)
+export(param)
+export(parameters)
+export(parse.formula)
+export(parseFormula)
+export(plot.ci)
+export(print.setx)
+export(print.zelig)
+export(qi)
+export(reduceMI)
+export(relogit)
+export(repl)
+export(robust.glm.hook)
+export(robust.gee.hook)
+export(rocplot)
+export(setx)
+export(sim)
+export(simulation.matrix)
+export(simulations.parameters)
+export(splitUp)
+export(structuralToReduced)
+export(summarize)
+export(summary.MI.sim)
+export(summary.sim)
+export(summary.zelig)
+export(termsFromFormula)
+export(TexCite)
+export(tolmerFormat)
+export(user.prompt)
+export(z)
+export(zelig.call)
+export(zelig.skeleton)
+export(zelig)
+export(zelig2exp)
+export(zelig2factor.bayes)
+export(zelig2gamma.gee)
+export(zelig2gamma.survey)
+export(zelig2gamma)
+export(zelig2logit.bayes)
+export(zelig2logit.gee)
+export(zelig2logit.survey)
+export(zelig2logit)
+export(zelig2lognorm)
+export(zelig2ls)
+export(zelig2mlogit.bayes)
+export(zelig2negbinom)
+export(zelig2normal.bayes)
+export(zelig2normal.gee)
+export(zelig2normal.survey)
+export(zelig2normal)
+export(zelig2oprobit.bayes)
+export(zelig2poisson.bayes)
+export(zelig2poisson.gee)
+export(zelig2poisson.survey)
+export(zelig2poisson)
+export(zelig2probit.bayes)
+export(zelig2probit.gee)
+export(zelig2probit.survey)
+export(zelig2probit)
+export(zelig2relogit)
+export(zelig2tobit)
+export(zelig2twosls)
+export(ZeligDescribeModel)
+export(ZeligListModels)
+export(ZeligListTitles)
+S3method("[[",qi)
+S3method("[[",zelig)
+S3method(as.data.frame,setx)
+S3method(as.description,description)
+S3method(as.description,list)
+S3method(as.matrix,pooled.setx)
+S3method(as.matrix,setx)
+S3method(bootstrap,default)
+S3method(bootstrap,gamma)
+S3method(bootstrap,negbinom)
+S3method(bootstrap,normal)
+S3method(coef,parameters)
+S3method(coef,zelig)
+S3method(describe,default)
+S3method(describe,exp)
+S3method(describe,factor.bayes)
+S3method(describe,gamma.gee)
+S3method(describe,gamma.survey)
+S3method(describe,logit.bayes)
+S3method(describe,logit.gee)
+S3method(describe,logit.survey)
+S3method(describe,logit)
+S3method(describe,lognorm)
+S3method(describe,ls)
+S3method(describe,mlogit.bayes)
+S3method(describe,negbinom)
+S3method(describe,normal.bayes)
+S3method(describe,normal.gee)
+S3method(describe,normal.survey)
+S3method(describe,normal)
+S3method(describe,oprobit.bayes)
+S3method(describe,poisson.bayes)
+S3method(describe,poisson.gee)
+S3method(describe,poisson.survey)
+S3method(describe,poisson)
+S3method(describe,probit.bayes)
+S3method(describe,probit.gee)
+S3method(describe,probit.survey)
+S3method(describe,relogit)
+S3method(describe,tobit)
+S3method(describe,twosls)
+S3method(describe,zelig)
+S3method(getResponseTerms,formula)
+S3method(getResponseTerms,Formula)
+S3method(getResponseTerms,list)
+S3method(logLik,zelig)
+S3method(model.frame,multiple)
+S3method(model.matrix,multiple)
+S3method(model.matrix,parseFormula)
+S3method(names,qi)
+S3method(param,default)
+S3method(param,exp)
+S3method(param,factor.bayes)
+S3method(param,gamma.gee)
+S3method(param,gamma.survey)
+S3method(param,gamma)
+S3method(param,logit.bayes)
+S3method(param,logit.gee)
+S3method(param,logit.survey)
+S3method(param,logit)
+S3method(param,lognorm)
+S3method(param,ls)
+S3method(param,mlogit.bayes)
+S3method(param,negbinom)
+S3method(param,normal.bayes)
+S3method(param,normal.gee)
+S3method(param,normal.survey)
+S3method(param,oprobit.bayes)
+S3method(param,poisson.bayes)
+S3method(param,poisson.gee)
+S3method(param,poisson.survey)
+S3method(param,probit.bayes)
+S3method(param,probit.gee)
+S3method(param,probit.survey)
+S3method(param,relogit)
+S3method(param,relogit2)
+S3method(param,tobit)
+S3method(param,twosls)
+S3method(parseFormula,formula)
+S3method(parseFormula,Formula)
+S3method(parseFormula,list)
+S3method(plot,MI.sim)
+S3method(plot,pooled.sim)
+S3method(plot,sim.cloglog.net)
+S3method(plot,sim.gamma.gee)
+S3method(plot,sim.logit.gee)
+S3method(plot,sim.normal.gee)
+S3method(plot,sim.poisson.gee)
+S3method(plot,sim.probit.gee)
+S3method(plot,sim.twosls)
+S3method(plot,sim)
+S3method(plot,zelig)
+S3method(print,qi.summarized)
+S3method(print,qi)
+S3method(print,setx.mi)
+S3method(print,setx)
+S3method(print,sim)
+S3method(print,summary.MCMCZelig)
+S3method(print,summary.pooled.sim)
+S3method(print,summary.relogit)
+S3method(print,summary.relogit2)
+S3method(print,summary.setx)
+S3method(print,summary.sim)
+S3method(print,summary.MCMCZelig)
+S3method(print,summaryMI)
+S3method(print,summarySim.MI)
+S3method(print,zelig)
+S3method(qi,exp)
+S3method(qi,gamma.gee)
+S3method(qi,gamma.survey)
+S3method(qi,gamma)
+S3method(qi,logit.bayes)
+S3method(qi,logit.gee)
+S3method(qi,logit.survey)
+S3method(qi,logit)
+S3method(qi,lognorm)
+S3method(qi,ls)
+S3method(qi,mlogit.bayes)
+S3method(qi,negbinom)
+S3method(qi,normal.bayes)
+S3method(qi,normal.gee)
+S3method(qi,normal.survey)
+S3method(qi,normal)
+S3method(qi,oprobit.bayes)
+S3method(qi,poisson.gee)
+S3method(qi,poisson.survey)
+S3method(qi,poisson)
+S3method(qi,probit.bayes)
+S3method(qi,probit.gee)
+S3method(qi,probit.survey)
+S3method(qi,probit)
+S3method(qi,relogit)
+S3method(qi,relogit2)
+S3method(qi,tobit)
+S3method(qi,twosls)
+S3method(repl,default)
+S3method(repl,sim)
+S3method(setx,default)
+S3method(setx,MI)
+S3method(sim,default)
+S3method(sim,MI)
+S3method(simulation.matrix,pooled.sim)
+S3method(simulation.matrix,sim)
+S3method(summarize,default)
+S3method(summary,glm.robust)
+S3method(summary,MCMCZelig)
+S3method(summary,MI.sim)
+S3method(summary,MI)
+S3method(summary,pooled.sim)
+S3method(summary,Relogit)
+S3method(summary,Relogit2)
+S3method(summary,setx)
+S3method(summary,sim)
+S3method(summary,zelig)
+S3method(t,setx)
+S3method(terms,multiple)
+S3method(terms,vglm)
+S3method(terms,zelig)
+S3method(vcov,gee.naive)
+S3method(vcov,gee.robust)
+S3method(vcov,glm.robust)
+S3method(vcov,Relogit)
+S3method(vcov,zelig)
diff --git a/R/Diff.R b/R/Diff.R
deleted file mode 100644
index 9d4b58d..0000000
--- a/R/Diff.R
+++ /dev/null
@@ -1,5 +0,0 @@
-Diff <- function(Y, d, ds=NULL, per=NULL){
-  mc <- match.call()
-  name <- as.name(mc[[2]])
-  list(name=name, d=d, ds=ds, per=per)
-}
diff --git a/R/GetObject.R b/R/GetObject.R
new file mode 100644
index 0000000..da78da3
--- /dev/null
+++ b/R/GetObject.R
@@ -0,0 +1,10 @@
+#' Extract the fitted model object from the Zelig object
+#'
+#' @param obj an object of type `zelig'
+#' @return the fitted model object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+GetObject <- function(obj) {
+  if (inherits(obj, 'zelig'))
+    obj$result
+}
diff --git a/R/GetSlot.R b/R/GetSlot.R
new file mode 100644
index 0000000..4f68ec8
--- /dev/null
+++ b/R/GetSlot.R
@@ -0,0 +1,12 @@
+#' Generic method for extracting variables from both
+#' S3 and S4 fitted model object
+#'
+#' @param obj an object of type `zelig'
+#' @param key a character-string specifying the name
+#'            of the variable to extract
+#' @param ... typically ignored parameters
+#' @return the value of that extracted object or NULL
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+GetSlot <- function(obj, key, ...)
+  UseMethod("GetSlot")
diff --git a/R/GetSlot.zelig.R b/R/GetSlot.zelig.R
new file mode 100644
index 0000000..b9e99a9
--- /dev/null
+++ b/R/GetSlot.zelig.R
@@ -0,0 +1,38 @@
+#' Return a Value from a \code{zelig} Fitted Model
+#'
+#' Returns a value from the result of a model fitting function
+#' @usage \method{GetSlot}{zelig}(obj, key, ...)
+#' @note This function is primarily used by Zelig developers within \code{qi}
+#'   functions
+#' @param obj a \code{zelig} object
+#' @param key a character-string specifying the which value to extract from
+#'   the fitted model object  
+#' @param ... subsequent values to extract from the fitted model object
+#' @return values of the specified keys
+#' @export
+#' @author Matt Owen \emph{mowen@@iq.harvard.edu}
+GetSlot.zelig <- function(obj, key, ...) {
+  # expand dots
+  dots <- list(...)
+
+  # error-catching
+  if (!all(sapply(dots, is.character)))
+    stop("all dot parameters must be characters")
+
+  # get result of zelig object
+  obj <- obj$result
+  #
+  res <- obj[[key]]
+
+  for (key in dots) {
+    # 
+    res <- try(res[[key]], silent=T)
+
+    # if it doesn't exist, then NULL
+    if (inherits(res, "try-error"))
+      return(NULL)
+  }
+
+  # return
+  res
+}
diff --git a/R/MCMChook.R b/R/MCMChook.R
new file mode 100644
index 0000000..b7e67f8
--- /dev/null
+++ b/R/MCMChook.R
@@ -0,0 +1,75 @@
+#' Hook to Clean-up MCMC Objects
+#'
+#' This method gives valid methods to the resulting MCMC object so that it can
+#' be used with Zelig.
+#' @note This function is used internally by the ZeligBayesian package.
+#' @param obj the fitted model object (in this case a \code{mcmc} object.
+#' @param model.call the call made to the external model
+#' @param zelig.call the actual call to zelig itself
+#' @param seed a seed for the MCMC algorithm
+#' @param ... ignored parameters
+#' @return an object useable by Zelig
+#' @author Olivia Lau, Kosuke Imai, Gary King and Matt Owen
+#' @export
+MCMChook <- function (obj, model.call, zelig.call, seed=NULL, ..., data = NULL) {
+
+  # Create a new object
+  res <- list()
+
+  attr(obj, "call") <- NULL
+
+  # Add the bare necessities for a zelig object
+  res$coefficients <- obj
+  res$formula <- zelig.call$formula
+  res$data <- data
+  res$model <- model.frame(eval(res$formula), data = data)
+  res$terms <- attr(res$model, "terms")
+  res$call <- model.call
+
+  # Ensure that a "seed" element exists
+  res$seed <- if (is.null(seed))
+    NA
+  else
+    seed
+
+  class(res) <- "MCMCZelig"
+
+  res
+}
+
+#' Hook to Clean-up MCMC Factor Object
+#'
+#' This method gives valid methods to the resulting MCMC object so that it can
+#' be used with Zelig.
+#' @note This function is used internally by the ZeligBayesian package.
+#' @param obj the fitted model object (in this case a \code{mcmc} object.
+#' @param model.call the call made to the external model
+#' @param zelig.call the actual call to zelig itself
+#' @param seed a seed for the MCMC algorithm
+#' @param ... ignored parameters
+#' @return an object useable by Zelig
+#' @author Olivia Lau, Kosuke Imai, Gary King and Matt Owen
+#' @export
+McmcHookFactor <- function (obj, model.call, zelig.call, seed = NULL, ...) {
+
+  out <- list()
+
+  out$coefficients <- obj
+  out$formula <- zelig.call$formula
+  out$data <- zelig.call$data
+  out$model <- model.frame(eval(out$formula), eval(out$data))
+  out$terms <- attr(out$model, "terms")
+  out$call <- model.call
+
+  # Factors have no intercept term?
+  attr(out$terms,"intercept") <- 0
+
+  if (is.null(zelig.call$seed))
+    out$seed <- NA
+  else
+    out$seed <- zelig.call$seed
+
+  class(out) <- "MCMCZelig"
+
+  out
+}
diff --git a/R/MIsimulation.R b/R/MIsimulation.R
deleted file mode 100644
index c74b6bd..0000000
--- a/R/MIsimulation.R
+++ /dev/null
@@ -1,58 +0,0 @@
-MIsimulation <- function (object, num = c(1000, 100), prev = NULL, 
-                          bootstrap = FALSE, bootfn = NULL, x = NULL, x1 = NULL, ...)  {
-  M <- length(object)
-  simpar <- simqi <- res <- list()
-  fn <- paste("zelig4", getzelig(object[[1]]), sep = "")
-  if (is.null(prev)) {
-    numM <- round(num/M)
-    if (!bootstrap) {
-      for (i in 1:M){
-        simpar[[i]] <- param(object[[i]], num = numM, bootstrap =
-                             bootstrap)
-  	  if(exists(fn)){
-	  	if(any(class(x)=="setx.cond")){
-		  xvar <- as.matrix(cbind(1,x[[1]][,2:ncol(x[[1]])]))
-		  for (h in 2:M)
-      		xvar <- as.matrix(rbind(xvar, as.matrix(cbind(1,x[[h]][,2:ncol(x[[1]])]))))
-              
-		  simpar[[i]] <- do.call(fn, list(object=object[[i]], simpar=simpar[[i]], x=xvar, x1=NULL, bootstrap=bootstrap, bootfn=bootfn))       
-		}
-		else
-    	  	  simpar[[i]] <- do.call(fn, list(object=object[[i]], simpar=simpar[[i]], x=x, x1=x1, bootstrap=bootstrap, bootfn=bootfn))      
-	  }
-	}
-    }
-    else {
-      tt <- terms(object[[1]])
-      if (is.null(bootfn)) 
-        bootfn <- bootfn.default
-      for (i in 1:M) {
-        dta <- eval(object[[i]]$data, sys.parent())
-        res <- boot(dta, bootfn, R = num, object = object[[i]],...)
-        colnames(res$t) <- names(res$t0)
-        simpar[[i]] <- res$t
-	  if(exists(fn)){
-	  	if(any(class(x)=="setx.cond")){
-		  xvar <- as.matrix(cbind(1,x[[1]][,2:ncol(x[[1]])]))
-		  for (h in 2:M)
-      		xvar <- as.matrix(rbind(xvar, as.matrix(cbind(1,x[[h]][,2:ncol(x[[1]])]))))
-              
-		  simpar[[i]] <- do.call(fn, list(object=object[[i]], simpar=simpar[[i]], x=xvar, x1=NULL, bootstrap=bootstrap, bootfn=bootfn, dta=dta))       
-		}
-		else
-    	  	  simpar[[i]] <- do.call(fn, list(object=object[[i]], simpar=simpar[[i]], x=x, x1=x1, bootstrap=bootstrap, bootfn=bootfn, dta=dta))      
-	  }
-	}
-    }
-    params <- as.matrix(simpar[[1]])
-    for (j in 2:M)
-      params <- rbind(params, as.matrix(simpar[[j]]))
-  }
-  else {
-    if (bootstrap) 
-      stop("Error: Choosing 'bootstrap = TRUE' generates new parameters.  \nIf you wish to use previously generated parameters, \nplease specify only 'prev'.")
-    else params <- prev
-  }
-  params
-}
-
diff --git a/R/MLutils.R b/R/MLutils.R
index e245c91..dfcd766 100644
--- a/R/MLutils.R
+++ b/R/MLutils.R
@@ -1,8 +1,11 @@
-###
-## Take a formula in any of the reduced form or in a structural form
-## and returns the most reduced form of that formula
-## 
-
+#' Reduce MI Formulas
+#' Take a formula in any of the reduced form or in a structural form and return
+#' the most reduced form of that formula
+#' @note This formula is used primarily by 'zelig2' functions of multivariate
+#'   Zelig models
+#' @param f a formula
+#' @export
+#' @author Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
 reduceMI <-function(f){
         if(class(f)=="list")
           f <- structuralToReduced(f)
@@ -10,13 +13,12 @@ reduceMI <-function(f){
 }
 
 
-##
-# Transform the multilevel's structural form formulas into
-# reduced form
-# input: formula in structural form ( a list of formulas)
-# output: formula in reduced form
-# possible bugs: What if class(f) is multiple and not list??
-
+#' Transform the Multilevel's Structural Formulas Into Reduced Form
+#' @param f a list of formulas
+#' @return a formula in reduced form
+#' @export
+#' @author Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
+# possible bug: what if class(f) is 'multiple' and not a list?
 structuralToReduced <- function(f){
 
         ## input should be a list
@@ -51,13 +53,14 @@ structuralToReduced <- function(f){
         return(res)
 }
 
-###
-##  take a formula in the reduced form and return it in
-##  lmer representation (basically remove starting "tag"
-##  of each term)
-##  possible errors: What if input is not in reduced form?
-##                   Maybe call reduceMI first??
-
+#' Convert a Formula into 'lmer' Representation from Reduced Form
+#' Take a formula in its reducd from and return it as a 'lmer' representation
+#' (from the lme4 package). This is basically removing the starting 'tag' from
+#' each term.
+#' @param f a formula in reduced form
+#' @return the 'lmer' representation of 'f'
+#' @export
+#' @author Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
 tolmerFormat<-function(f){
         lhs <- f[[2]]
         tt <- terms(f, specials="tag")
@@ -71,11 +74,12 @@ tolmerFormat<-function(f){
         return(res)
 }
 
-
-###
-## given a formula in a reduced from, output the most reduced one
-##
-
+#' Further Reduce Formulas in Reduced Form
+#' Given a formula in a reduced form, output the most reduced one.
+#' @param f a formula in reduced form
+#' @return an even-more reduced formula
+#' @export
+#' @author Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
 .reduceFurther <- function(f){
         
         if(length(f)!=3)
@@ -251,6 +255,7 @@ return (c(src %w/o% src[index],dest))
 #         random = ~ 1 + Days)
 #
 
+#' @export
 .getRandAndFixedTerms <- function (fml){
         f <- function(x){
                 as.formula(paste("~",paste(x, collapse = "+")))
diff --git a/R/Zelig-package.R b/R/Zelig-package.R
new file mode 100644
index 0000000..4c06a82
--- /dev/null
+++ b/R/Zelig-package.R
@@ -0,0 +1,37 @@
+#' Zelig Everyone's Statistical Software
+#'
+#' Zelig is an easy-to-use program that can estimate, and
+#' help interpret the results of, an enormous range of statistical models. It
+#' literally is ``everyone's statistical software'' because Zelig's simple
+#' unified framework incorporates everyone else's (R) code. We also hope it will 
+#' become ``everyone's statistical software'' for applications and teaching,
+#' and so have designed Zelig so that anyone can easily use it or add their
+#' programs to it.  Zelig also comes with infrastructure that facilitates the
+#' use of any existing method, such as by allowing multiply imputed data for
+#' any model, and mimicking the program Clarify (for Stata) that takes the raw
+#' output of existing statistical procedures and translates them into
+#' quantities of direct interest.
+#' 
+#' \tabular{ll}{
+#' Package: \tab Zelig\cr
+#' Version: \tab 4.1-2\cr
+#' Date: \tab 2013-01-11\cr
+#' Depends: \tab R (>= 2.14), boot, MASS, methods, sandwich, survival\cr
+#' Suggests: \tab mvtnorm, Formula \cr
+#' License: \tab GPL version 2 or newer\cr
+#' URL: \tab http://gking.harvard.edu/zelig\cr
+#' }
+#'
+#' @name Zelig-package
+#' @aliases Zelig
+#' @docType package
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Kosuke Imai, Olivia Lau,
+#'   and Gary King 
+#' @keywords package
+#' @seealso zelig setx sim
+NULL
+
+# SUPER SECRET VARIABLES...
+# These squelch "R CMD CHECK" issues for dynamically (though constantly added)
+# local variables to the "bootstrap", "param" and "qi" functions.
+.call <- .fitted <- .object <- NULL
diff --git a/R/arima.wrap.R b/R/arima.wrap.R
deleted file mode 100644
index 82c2591..0000000
--- a/R/arima.wrap.R
+++ /dev/null
@@ -1,7 +0,0 @@
-arima.wrap <- function(formula, order, x, xreg=NULL, data, M, ...){
-  t<- terms(formula)
-	out<-arima(x=x, xreg=xreg, order=order, ...)
-  out$terms<- t
-  class(out)<- c(class(out), "Arima")
-  return(out)
-} 
diff --git a/R/as.dataframe.setx.R b/R/as.dataframe.setx.R
new file mode 100644
index 0000000..7cca4ad
--- /dev/null
+++ b/R/as.dataframe.setx.R
@@ -0,0 +1,15 @@
+#' Coerce a \code{setx} Object into a \code{data.frame}
+#' @usage \method{as.data.frame}{setx}(x, row.names=NULL, optional=FALSE, ...)
+#' @note In subsequent versions of Zelig, this version is expected to undergo
+#'   minor modifications.
+#' @param x a \code{setx} object
+#' @param row.names ignored parameter
+#' @param optional ignored parameter
+#' @param ... ignored parameters
+#' @return the \code{setx} object interpretted as a \code{data.frame}. The
+#'   column-names of the resulting \code{data.frame} are specified by the names
+#'   of the \code{setx} object. The row-names are typically unlabeled.
+#' @S3method as.data.frame setx
+as.data.frame.setx <- function (x, row.names=NULL, optional=FALSE, ...) {
+  x$matrix
+}
diff --git a/R/as.matrix.pooled.setx.R b/R/as.matrix.pooled.setx.R
new file mode 100644
index 0000000..66438f5
--- /dev/null
+++ b/R/as.matrix.pooled.setx.R
@@ -0,0 +1,32 @@
+#' Convert a ``pooled.setx'' Object to a Matrix
+#'
+#' The setx object is, in its most basic form, a list of column names and values
+#' specified for each of these column names. This function simply converts the
+#' key-value pairs of column-name and specified value into a matrix.
+#'
+#' @note This method allows basic matrix arithmetic operations on data objects,
+#' which mirror values stored within setx objects. In many scenarios,
+#' simulations require matrix-multiplication, etc. to be performed on a
+#' data-set. This function faciliates that need.
+#' 
+#' @usage \method{as.matrix}{pooled.setx}(x, ...)
+#' @S3method as.matrix pooled.setx
+#' @param x a setx object
+#' @param ... ignored parameters
+#' @return a matrix containing columns and rows corrseponding to the explanatory
+#' variables specified in the call to the 'setx' function
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.matrix.pooled.setx <- function(x, ...) {
+  big.matrix <- NULL
+  for (label in names(x)) {
+    small.matrix <- as.matrix(x[[label]])
+    big.matrix <- rbind(big.matrix, small.matrix)
+  }
+
+  rownames(big.matrix) <- names(x)
+  attr(big.matrix, "labels") <- names(x)
+  attr(big.matrix, "which") <- 1:nrow(big.matrix)
+  names(attr(big.matrix, "which")) <- names(x)
+
+  big.matrix
+}
diff --git a/R/as.matrix.setx.R b/R/as.matrix.setx.R
new file mode 100644
index 0000000..f58645b
--- /dev/null
+++ b/R/as.matrix.setx.R
@@ -0,0 +1,26 @@
+#' Convert a 'setx' Object to a Matrix
+#'
+#' The setx object is, in its most basic form, a list of column names and values
+#' specified for each of these column names. This function simply converts the
+#' key-value pairs of column-name and specified value into a matrix.
+#'
+#' @note This method allows basic matrix arithmetic operations on data objects,
+#' which mirror values stored within setx objects. In many scenarios,
+#' simulations require matrix-multiplication, etc. to be performed on a
+#' data-set. This function faciliates that need.
+#' 
+#' @usage \method{as.matrix}{setx}(x, ...)
+#' @S3method as.matrix setx
+#' @param x a setx object
+#' @param ... ignored parameters
+#' @return a matrix containing columns and rows corrseponding to the explanatory
+#' variables specified in the call to the 'setx' function
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.matrix.setx <- function(x, ...) {
+  if (!is.null(x$matrix))
+    #as.matrix(x$updated[, x$explan])
+    x$matrix
+  
+  else
+    stop("unspecified error")
+}
diff --git a/R/as.parameters.R b/R/as.parameters.R
new file mode 100644
index 0000000..38254da
--- /dev/null
+++ b/R/as.parameters.R
@@ -0,0 +1,102 @@
+#' Generic Method for Converting Objects into 'parameters'
+#'
+#' Converts list-style objects into Parameter lists primarily used by the 'qi'
+#' methods. These list-style objects may contain keys specifying: 'link' (the 
+#' link function of a statistical model), 'linkinv' (the inverse-link
+#'function), 'family' (a object of 'family' class used to specify the model's
+#' classification), 'alpha' (a vector of ancillary parameters, and 'simulations'
+#' (a vector of simulated draws from the model's underlying distribution.
+#'
+#' @note Only three scenarios may exist - converting 'parameters' to
+#'   'parameters', 'list' to 'parameters', and vectors to 'parameters'. The
+#'   third in particular is needed only for backwards compatibility, and support
+#'   will likely be deprecated.
+#'
+#'   Furthermore, this function should be exlusively used implicitly and
+#'   by Zelig.
+#' 
+#' @param params the object to be casted
+#' @param ... parameters reserved for future revisions
+#' @return an object of type `parameters'
+#' @seealso as.parameters.list as.parameters.parameters, as.parameters.default
+#' @author Matt Owen \email{mowen@@ig.harvard.edu}
+as.parameters <- function(params, ...)
+  UseMethod("as.parameters")
+
+
+#' list -> parameters
+#'
+#' The list may contain: 'link', 'linkinv', 'family', 'alpha', and
+#' 'simulations' keys.
+#'
+#' @param params a list object
+#' @param num an integer specifying the number of simulations
+#'        to be taken
+#' @param ... ignored parameters
+#' @return an object of type `parameters'
+#' @seealso as.parameters
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.parameters.list <- function(params, num=NULL, ...) {
+ #
+  coefficients <- if ("simulations" %in% names(params))
+    params$simulations
+  else if (num < length(params))
+    params[1:num]
+  else
+    params[[1]]
+
+  # Extract alpha parameters from Zelig
+  alpha <- if ("alpha" %in% names(params))
+    params$alpha
+  else if (num < length(params))
+    tail(params, -num)
+
+  # link function
+  if ("link" %in% names(params))
+    link <- params$link
+
+  # link-inverse function
+  if ("linkinv" %in% names(params))
+    linkinv <- params$linkinv
+
+  # family object, has both a link and link-inverse
+  fam <- if ("family" %in% names(params))
+    params$family
+  else if ("fam" %in% names(params))
+    params$fam
+  else
+    NULL
+
+  # Return
+  parameters(coefficients, alpha, fam=fam, link=link, linkinv=linkinv)
+}
+
+#' parameters -> parameters
+#' This is merely an identity function when casting 'parameters' objects into
+#' 'parameters'.
+#' @param params a parameters object
+#' @param ... ignored parameters
+#' @return the same parameter object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.parameters.parameters <- function(params, ...)
+  params
+
+#' ??? -> parameters
+#' @note This function should be deprecated.
+#' @param params any non-supported data-type
+#' @param num an integer specifying the number of simulations to compute
+#' @param ... ignored
+#' @return the object passed in
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.parameters.default <- function(params, num=NULL, ...) {
+  if (!missing(num)) {
+    alpha <- if (num < nrow(params))
+      tail(params, -num)
+
+    #
+    parameters(simulations=head(params, num), alpha=alpha)
+  }
+  
+  else
+    parameters(simulations=params, alpha=NULL)
+}
diff --git a/R/as.qi.R b/R/as.qi.R
new file mode 100644
index 0000000..4832ebd
--- /dev/null
+++ b/R/as.qi.R
@@ -0,0 +1,247 @@
+#' Generic Method for Converting Various Objects into 'qi' Objects
+#' 'qi' objects are list-style objects used by the 'summarize' function to 
+#' compute simple summaries about the simulated data. For readability and
+#' and simplicity purposes, the 'qi' function typically returns a list of
+#' named simulations. This list is converted internally by Zelig into a 'qi'
+#' object so that several methods can be easily applied to the Quantities of
+#' Interest: plot, summarize, and print
+#' @note These functions are primarily used internall by Zelig and should not
+#'   be used in the Global namespace.
+#' @param s the object to be casted
+#' @return an object of type `qi'
+#' @seealso as.qi.default as.qi.qi as.qi.list
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.qi <- function(s)
+  UseMethod("as.qi")
+
+
+#' ??? -> qi
+#'
+#' @param s any unsupported object
+#' @return an object of type `qi'
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.qi.default <- function(s)
+  stop("as.qi does not yet support this data-type")
+
+
+#' qi -> qi
+#'
+#' @param s an object of type `qi'
+#' @return s an object of type `qi'
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.qi.qi <- function(s)
+  s
+
+
+#' list -> qi
+#' This function has a lot of room to go wrong. It tries o detect whether the
+#' zelig model is old-style or new-style (as of 4/4/2011). Eventually this
+#' feature should be phased out.
+#' @note This method has peculiar behavior when the list contains only two
+#' elements. The crucial fix is to simply remove the portion of code which
+#' intentionally implements this perculiar behavior.
+#' @param s a list
+#' @return an object of type `qi'
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.qi.list <- function(s) {
+  #q <- list(titles=list(), stats=list())
+  titles <- list()
+  stats <- list()
+
+  # divide the list into ones with/without keys
+  keys <- splitUp(s)
+
+  fail.names <- paste("qi", 1:length(s), sep="")
+  success.names <- unlist(Map(.acronym, names(s), fail=''))
+  success.names <- .number.list(success.names)
+
+  # create new environment
+  env <- new.env()
+
+  # iterator
+  k <- 1
+
+  long  <- list()
+  short <- list()
+  stats <- list()
+
+  # add the named entries
+  for (title in names(keys$wordful)) {
+    key <- if (regexpr("^[a-zA-Z]", success.names[k]) != -1)
+      success.names[k]
+    else
+      ''
+
+    stats[[key]] <- keys$wordful[[title]]
+    long[[title]] <- key
+    #attr(stats, title) <- key
+
+    # increment counter
+    k <- k + 1
+  }
+
+  attr(stats, ".index") <- long
+
+  q <- stats
+
+  # cast as `qi' object, and return
+  class(q) <- "qi"
+
+  q    
+}
+
+
+#' Print a Quantity of Interest in Human-Readable Form
+#'
+#' Print simulated quantities of interest in a human-readable form
+#' 
+#' @usage \method{print}{qi}(x, ...)
+#' @S3method print qi
+#' @param x a qi object
+#' @param ... ignored parameters
+#' @return the object that was printed (invisibly)
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+print.qi <- function(x, ...) {
+  self <- x
+
+  # error-catching
+  if (length(self$titles) != length(self$stats))
+    stop("corrupted object!  titles and stats length mismatch")
+
+  qi.length <- length(self)
+
+  # iterate through
+  for (k in 1:qi.length) {
+    # output title
+    message(self$titles[[k]])
+
+    # output qi
+    print(self$stats[[k]])
+
+    # just to prevent extra end-line
+    if (k != qi.length)
+      message()
+  }
+
+  invisible(x)
+}
+
+
+#' The Names of a 'qi' Object
+#' 
+#' Function to get the names of a 'qi' object. This function does not entirely
+#' parallel the functionality of traditional 'names' methods; this is because
+#' the \code{$} operator has been overloaded to support a unique style of value
+#' extraction. For technical details, please see the source code.
+#' @note No method exists to set the names of a 'qi' object, once it is 
+#' constructed. This will be a feature added later.
+#' @usage \method{names}{qi}(x)
+#' @S3method names qi
+#' @param x a 'qi' object
+#' @return a character-vector containing the names of the Quantities of
+#' Interest
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+names.qi <- function(x) {
+  names(attr(x, ".index"))
+}
+
+
+#' Convert a Vector of Character Strings into Acronyms
+#' This function will convert a vector of character strings into their
+#' appropriately titled acronym forms. That is, the two Quantity of Interest
+#' titles:
+#' \begin{itemize}
+#'    \item "Expected Values (for X): E(Y|X)"
+#'    \item "Expected Values (for X1): E(Y|X1)"
+#' \end{itemize}
+#' The result will be: "ev1" and "ev2". That is, the acronym will not contain
+#' information kept in paranetheses or after a colon. 
+#' @note This function currently includes preopositions as parts of acroynms
+#' @param str a vector of character strings to convert into acronymns
+#' @param fail a result to produce upon failure
+#' @return a vector of character-strings
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+.acronym <- function(str, fail=str) {
+  ignored.words <- c(
+                     "in", "for", "by",
+                     "the", "a", "an"
+                     )
+  
+  # remove all text after colon
+  # remove trailing whitespace
+  # remove leading whitespace
+  # remove paranthetical statements
+  # remove non-alphanumerics
+  reduced <- sub(':.*$', '', str)
+  reduced <- sub('\\s+$', '', reduced, perl=TRUE)
+  reduced <- sub('^\\s+', '', reduced, perl=TRUE)
+  reduced <- gsub('\\(.*?\\)', '', reduced, perl=TRUE)
+  
+  # if we get an empty string, return whatever the fail value is
+  if (nchar(reduced) < 1)
+    return(fail)
+
+  # splitted is not a word, I know
+  #  1. split the reduced string into non-whitespace characters
+  #  2. take the first letter of each
+  #  3. put into lowercase
+  splitted <- unlist(strsplit(reduced, '\\s+'))
+
+  # remove ignored words
+##   splitted <- Filter(
+##                      function (char) regexpr(
+##                      splitted
+##                      )
+  
+  splitted <- substr(splitted, 1, 1)
+  splitted <- tolower(splitted)
+
+  # remove all non-letters
+  acronym <- Filter(
+                    function (char)
+                    regexpr('^[a-zA-Z]$', char, perl=TRUE),
+                    splitted
+                    )
+
+  # paste together, and return
+  paste(acronym, sep="", collapse="")
+}
+
+
+#' Append Numbers to Identically Valued Strings
+#' This function ensures that vectors of strings are uniquely named.
+#' @note This function is used in tandem with '.acronym' to correctly produce
+#'   short-names for quantities of interest.
+#' @param vec a vector of character-string
+#' @return a vector of character-strings of shorter length. Duplicate hits on
+#'   short-titled names append a number to the end. E.g.: the character vector
+#'   if vec equals c('ev', 'ev', 'pr'), then the result will be:
+#'   c('ev1', 'ev2', 'pr')
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+.number.list <- function(vec) {
+  if (!is.character(vec)) {
+    warning()
+    return(vec)
+  }
+
+  final.list <- c()
+  unique.vec <- unique(vec)
+
+  for (k in 1:length(vec)) {
+    val <- vec[k]
+
+    hits <- sum(val == vec[1:k])
+    total.hits <- sum(val == vec)
+
+    final.list[names(vec)[k]] <- if (total.hits > 1)
+      paste(val, hits, sep="")
+    else
+      val
+  }
+
+  # return
+  final.list
+}
diff --git a/R/as.summarized.R b/R/as.summarized.R
new file mode 100644
index 0000000..3d1e46f
--- /dev/null
+++ b/R/as.summarized.R
@@ -0,0 +1,29 @@
+#' Generic Method for Casting Objectst as 'summarized' Objects
+#' 
+#' This function is particularly for use by the 'summarize' method, which
+#' summarizes the simulations taken from the 'qi' method. The generic function
+#' 'summary' when applied to a Zelig Simulation implicitly uses this function.
+#' 
+#' @note This is made available on the Global namespace as a matter of potential
+#' future compliancy.
+#' @param x an object
+#' @param ... unspecified parameters
+#' @return a 'summarized.qi' object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.summarized <- function(x, ...) {
+  UseMethod("as.summarized")
+}
+
+#' summarized.qi -> summarized.qi
+#' 
+#' Identity operation on ``summarized.qi'' objects
+#' @usage \method{as.summarized}{summarized.qi}(x, ...)
+#' @param x an object of type 'summarized.qi'
+#' @param ... ignored parameters
+#' @return the same 'summarized.qi' object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.summarized.summarized.qi <- function(x, ...) {
+  x
+}
diff --git a/R/as.summarized.list.R b/R/as.summarized.list.R
new file mode 100644
index 0000000..68d4eda
--- /dev/null
+++ b/R/as.summarized.list.R
@@ -0,0 +1,12 @@
+#' list -> summarized.qi
+#' Convert a list into a ``summarized.qi'' object
+#' @usage \method{as.summarized}{list}(x, ...)
+#' @param x a list
+#' @param ... ignored parameters
+#' @return a ``summarized.qi'' object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.summarized.list <- function(x, ...) {
+  class(x) <- "summarized.qi"
+  x
+}
diff --git a/R/attach.env.R b/R/attach.env.R
new file mode 100644
index 0000000..01a916b
--- /dev/null
+++ b/R/attach.env.R
@@ -0,0 +1,55 @@
+#' Attach Variables to a Function
+#'
+#' Returns a function, specified by the user, with the variables of a specified
+#' environment attached. This, in essence, allows programmers to write functions
+#' that have forms of private memory. This makes the function behave similarly
+#' to an object.
+#' 
+#' @note This function is used by Zelig to ensure that particular method calls -
+#' param, qi, bootstap - will contain the private variables: ``.fitted'',
+#' ``.model'', ``.call'' and ``.env'' which respectively contain the fitted
+#' model object, the name of the zelig model being invoked, the original call
+#' to the model-fitting function and the environment in which to call the
+#' function call.
+#'
+#' @param f a function which will be modified
+#' @param env an environment variable which will be attached to the function
+#' being returned
+#' @param ... arbitrary key-value paired parameters which will be assigned to
+#' the environment of the function being returned
+#' @return the original function ``f'' with a different environment attached to
+#' it.
+#'
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+attach.env <- function (f, env = NULL, ...) {
+
+  # Ensure that a valid environment is passed in
+  if (is.null(env))
+    env <- new.env()
+
+
+  # Expand dot parameters
+  dots <- list(...)
+
+  # Ensure that "env" is a valid environment
+  if (is.null(env))
+    env <- new.env()
+
+  else if (!inherits(env, "environment")) {
+    warning('Environment "env" is not a valid environment variable. ',
+            'A default environment will be applied to "f" instead.')
+    env <- new.env()
+  }
+
+  if (length(dots)) {
+    # Add variables to the newly created environment
+    for (key in names(dots))
+      assign(key, dots[[key]], env)
+  }
+
+  # Modify the default environment of the function
+  environment(f) <- env
+
+  # Return the modified function
+  f
+}
diff --git a/R/bootfn.default.R b/R/bootfn.default.R
index 6dcbef3..b71a005 100644
--- a/R/bootfn.default.R
+++ b/R/bootfn.default.R
@@ -1,13 +1,249 @@
-bootfn.default <- function(data, i, object) {
+#' Default Boot-strapping procedure
+#' 
+#' The default procedure for extracting bootstrap information. Note that this
+#' method re-fits the data and resamples the data frequently. This is a good
+#' candidate for fixing-up.
+#'
+#' @param data a data.frame
+#' @param i an integer or chacter-string specifying the index of the row to
+#' be used in the bootstrapping procedure.
+#' @param object the fitted model object
+#' @param bootstrapfn a function used to bootstrap the object
+#' @param num an integer specifying the number of samples to simulate
+#' @param ... unspecified parameters
+#' @return a list of paramters
+bootfn.default <- function(data, i, object, bootstrapfn=NULL, num, ...) {
+
+  # This is mostly here to squelch R-check notes, however sloppy programming
+  # can potentially prevent the ".model" variable from being defined in the
+  # attached environment. To make sense of this line, see the "sim.default"
+  # function where an environment (containing the variable ".model"  is
+  # explicity attached to the boot function
+  if (!exists(".model"))
+    .model <- "default"
+
+  # Get a random sample of the data set
   d <- data[i,]
-  object$call$data <- d
-  l <- length(param(object, bootstrap = TRUE))
-  fit <- eval(object$call, sys.parent())
-  l1 <- length(param(fit, bootstrap = TRUE))
-  while (l > l1) {
-    object$call$data <- data[sample(nrow(data), replace=TRUE),]
-    fit <- eval(object$call, sys.parent())
-    l1 <- length(param(fit, bootstrap = TRUE))
-  }
-  param(fit, bootstrap = TRUE)
+
+  # Extract the call object
+  # Naming it "jeez" because it's really hard to find names for call objects
+  # that are meaningful and not reserved for other functions
+  jeez <- .call
+
+  # Replace the data frame with an appropriate one
+  jeez$data <- d
+
+  .env <- if (exists('.env'))
+    .env
+  else
+    NULL
+
+  # Fit the model
+  fit <- eval(jeez)
+
+  # If "bootstrapfn" is unspecified, then we try to search its appropriate value
+  # down
+  if (is.null(bootstrapfn))
+    bootstrapfn <- getS3method("bootstrap", .model, TRUE)
+
+  # If bootstrap search came up sour, get default
+  if (is.null(bootstrapfn))
+    bootstrapfn <- Zelig:::bootstrap.default
+
+  # Attach the ".num" private variable
+  bootstrapfn <- attach.env(bootstrapfn, NULL, .num = num, .fitted = object)
+
+  # Get a result
+  res <- bootstrapfn(fit)
+
+  # Return vectorized bootstrap simulation to "boot" function
+  as.bootvector(res)$vector
+}
+
+#' Convert Boot Object to a Vector
+#'
+#' Receives a list with 2 slots as its input, and returns a vector of the two
+#' smashed together alongwith the offsets used to reverse-construct the object.
+#'
+#' @note This method is used internally by Zelig to allow an intuitive,
+#' ``param''-like API for bootstrapping.
+#'
+#' @param obj a list with two slots: ``alpha'' and ``beta''. Respectively, these
+#' represent bootstrap samples for ancillary parameters and systematic
+#' component of the bootstrapped GLM.
+#' @return a list containing the resulting vector, as well as an object used to
+#' reverse-build the list (``obj'') from the resulting call to ``bootstrap''.
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.bootvector <- function (obj) {
+
+  # If this fails, something is really wrong.
+  a <- obj$alpha
+  b <- obj$beta
+
+  # Error-checking
+  if (!(is.vector(a) || is.null(a)))
+    stop('The "alpha" slot of "obj" must be a vector or NULL.')
+
+  if (!(is.vector(b)))
+    stop('The "beta" slot of "obj" must be a vector')
+
+  # Return
+  list(
+       # For antiquity, beta should be placed before alpha. This is partially
+       # because alpha is not always specified.
+       vector = c(b, a),
+
+       # The respective lengths of each vector
+       lengths = c(beta = length(b), alpha = length(a)),
+
+       # Names
+       names = list(beta = names(b), alpha = names(a))
+       )
+}
+
+#' Convert of Vector of Bootstrapped Parameters to a List-style Boot Object
+#'
+#' This inverts the ``as.bootvector'' function, and returns a list containing
+#' the slots ``alpha'' and ``beta''.
+#'
+#' @param bootstraps ...
+#' @param lengths ...
+#' @param names a character-vector specifying the names of the boot terms
+#' @return ...
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.bootlist <- function (bootstraps, lengths, names) {
+
+  # Error-checking. "bootstraps" and "lengths" must:
+  #  1. "bootstraps" must be a matrix && have at least 1 value
+  #  2. "lengths" must be a vector
+  #  3. The sum of all the lengths must perfectly add up to the number of
+  #     columns in bootstraps
+  if (!is.matrix(bootstraps) && ncol(bootstraps) > 0 && nrow(bootstraps) > 0)
+    stop('The parameter "bootstraps" must be a matrix')
+
+  if (!is.vector(lengths))
+    stop('The parameter "lengths" must be a vector.')
+
+  if (sum(lengths) != ncol(bootstraps))
+    stop('The parameters "bootstraps" and "lengths" must be ',
+         'the same length.'
+         )
+
+  # Actual work begins here. This could be made more general, but if there's
+  # more info besides "alpha" and "beta", it's not very much like a bootstrap...
+  # In the future, we might need to add support for "link", "inverse link" and
+  # "family" slots, but there is overlap here with the "param" method.
+
+  # Note that to make sense of the below, it has to be understood that the
+  # canonical form of these bootstrapped values is:
+  # (beta, alpha)
+  # where "beta" is several columns of systematic parameters and
+  # "alpha" is several columns of ancillary parameters
+  a <- b <- NULL
+
+  # If beta is 0-sized, then we should ignore it
+  if (lengths[["beta"]] > 0) {
+    # Extract the "beta" portion of "bootstraps". These values should represent
+    # the systematic parameters
+    b <- bootstraps[ , 1:lengths[["beta"]] ]
+
+    # Change the column names of the system's parameter (beta) simulations
+    b <- name.object(b, names$beta)
+  }
+
+  # Note that 1 + 1:2 is 2:3, so that this statement offsets subsetting by the
+  # length of "a". 
+  if (lengths[["alpha"]] > 0) {
+    # Extract several columns from "bootstraps". These values should represent
+    # the model's ancillary parameters
+    a <- bootstraps[ , lengths[["beta"]] + 1:lengths[["alpha"]] ]
+
+    # Change the column names of the ancillary parameter (alpha) simulations
+    a <- name.object(a, names$alpha)
+  }
+
+  # Return the appropriate
+  list(alpha = a, beta = b)
+}
+
+#' Name Elements of an Object
+#'
+#' Returns an object
+#' @note This method is used internally by Zelig to name the columns and
+#' elements of matrices and vectors for simulations and bootstrapped parameters.
+#' @param obj a vector or matrix
+#' @param names a character-vector specifying names
+#' @return the original object, with a "colnames" or "names" equal to the
+#' parameter "names". If "names" is larger than "obj", the "names" parameter
+#' is truncated appropriately. If it is smaller, then the latter part of "obj"
+#' is replaced with a numbered generic column name
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+name.object <- function (obj, names) {
+
+  # Handle the special case, which shouldn't really happen...
+  if (is.null(names)) {
+    if (is.matrix(obj))
+      colnames(obj) <- NULL
+    else if (is.vector(obj))
+      names(obj) <- NULL
+    return(obj)
+  }
+
+  # Get the length of names
+  names.len <- length(names)
+
+  # Get the 'length' of the object, regardless of whether it is a vector or
+  # matrix. Note that in our case, length is equivalient to "ncol" if the
+  # object is a matrix
+  obj.len <- if (is.matrix(obj))
+    ncol(obj)
+  else if (is.vector(obj))
+    length(obj)
+  else {
+    # Warn the user. This might be necessary, but it helps debug for
+    # developers. Ideally this case never crops up in well-made Zelig models
+    warning('"name.object" ignores objects that are not matrices or vectors')
+
+    # Bail out of the function
+    return(obj)
+  }
+
+  # Ensure that names is the exact length of "obj" by
+  if (names.len < obj.len) {
+    # Create vector equal in size to the length of the object being named
+    temp <- paste(rep("col", obj.len), 1:obj.len, sep = "")
+
+    # Replace default values (col1, col2, ... colN) with the value that
+    # *should* there in a perfect world, where there is never any glitchy code
+    temp[1:names.len] <- names
+
+    # Replace "names" with the newly constructed, appropriately size, vector
+    # of names
+    names <- temp
+  }
+
+  # Truncate the "names" parameter if it is too largit is too large
+  else if (names.len > obj.len) {
+    # Warn the user. This is probably only useful/meaningful to developers. 
+    # This case should not crop up in well made Zelig models.
+    warning('"names.object" is truncating the names parameter, because it ',
+            'is larger than "obj" the object of the function.')
+
+    # Truncate "names"
+    names <- names[1:obj.len]
+  }
+
+  # After all the prep work, finally name the object
+  if (is.matrix(obj))
+    colnames(obj) <- names
+
+  else if (is.vector(obj))
+    names(obj) <- names
+
+  else
+    warning('"obj" must be a matrix or a vector. ',
+            'Returning the "obj" untouched.')
+
+  # Return modified object
+  obj
 }
diff --git a/R/bootstrap.R b/R/bootstrap.R
new file mode 100644
index 0000000..a455338
--- /dev/null
+++ b/R/bootstrap.R
@@ -0,0 +1,35 @@
+#' Generic Method for ``bootstrap''
+#'
+#' This method is intended to be overried by statistical models that would like
+#' to support statistical bootstrapping.
+#' @note This method has private memory storage and can reference the objects:
+#' ``.fitted'', ``.data'', ``.call'', ``.env'', despite having no declaration in
+#' the argument list.
+#' @param obj a fitted model object that will be used to produce boot-strapped
+#' parameters. This object usually inherits the class ``glm'' or ``lm'' object
+#' @param ... unspecified parameters
+#' @return a list with the ``alpha'' and ``beta'' slots set. Note that ``alpha''
+#' corresponds to ancillary parameters and ``beta'' corresponds to systematic
+#' components of the model
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+bootstrap <- function (obj, ...)
+  UseMethod("bootstrap")
+
+#' Produce Boot-strapped Parameters for a Statistical Model
+#'
+#' This method is a fallback for bootstrapping models that do not have a defined
+#' ``bootstrap'' method. For most models, this default is sufficient, so long as
+#' the model follows the usual convention that ``coef(obj)'' returns the
+#' systematic parameters of a fitted model.
+#' @usage \method{bootstrap}{default}(obj, ...)
+#' @S3method bootstrap default
+#' @param obj a fitted model object. This is typically of type ``glm'' or ``lm''
+#' @param ... unspecified parameters
+#' @return a list with the ``alpha'' and ``beta'' slots set
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+bootstrap.default <- function (obj, ...)
+  list(
+       alpha = NULL,
+       beta = coef(obj)
+       )
diff --git a/R/bootstrap.gamma.R b/R/bootstrap.gamma.R
new file mode 100644
index 0000000..f3c52a9
--- /dev/null
+++ b/R/bootstrap.gamma.R
@@ -0,0 +1,17 @@
+#' Bootstrap Parameters for Zelig ``gamma'' GLM
+#'
+#' Returns bootstrapped parameter estimates for a ``gamma'' GLM.
+#' @usage \method{bootstrap}{gamma}(obj, ...)
+#' @S3method bootstrap gamma
+#' @param obj a ``zelig'' object that will be used to produce boot-strapped
+#' parameters
+#' @param ... extra parameters to be passed to the ``boot'' method. These are
+#' typically ignored, but is included for further expansion.
+#' @return a list containing information concerning link, link-inverses, etc.
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+bootstrap.gamma <- function (obj, ...) {
+  list(
+       alpha = gamma.shape(.fitted)$alpha,
+       beta = coef(.fitted)
+       )
+}
diff --git a/R/bootstrap.negbinom.R b/R/bootstrap.negbinom.R
new file mode 100644
index 0000000..699ab79
--- /dev/null
+++ b/R/bootstrap.negbinom.R
@@ -0,0 +1,17 @@
+#' Bootstrap Parameters for Zelig ``negbinom'' GLM
+#'
+#' Returns bootstrapped parameter estimates for a negative-binomial GLM.
+#' @usage \method{bootstrap}{negbinom}(obj, ...)
+#' @S3method bootstrap negbinom
+#' @param obj a ``zelig'' object that will be used to produce boot-strapped
+#' parameters
+#' @param ... extra parameters to be passed to the ``boot'' method. These are
+#' typically ignored, but is included for further expansion.
+#' @return a list containing information concerning link, link-inverses, etc.
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+bootstrap.negbinom <- function (obj, ...) {
+  list(
+       alpha = .fitted$theta,
+       beta = coef(.fitted)
+       )
+}
diff --git a/R/bootstrap.normal.R b/R/bootstrap.normal.R
new file mode 100644
index 0000000..3809401
--- /dev/null
+++ b/R/bootstrap.normal.R
@@ -0,0 +1,23 @@
+#' Bootstrap Parameters for Zelig ``normal'' GLM
+#'
+#' Returns bootstrapped parameter estimates for a Gaussian GLM.
+#' @usage \method{bootstrap}{normal}(obj, num, ...)
+#' @S3method bootstrap normal
+#' @param obj a ``zelig'' object that will be used to produce boot-strapped
+#' parameters
+#' @param num an integer specifying the number of simulations to produce
+#' @param ... extra parameters to be passed to the ``boot'' method. These are
+#' typically ignored, but is included for further expansion.
+#' @return a list containing information concerning link, link-inverses, etc.
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+bootstrap.normal <- function (obj, num, ...) {
+
+  degrees.freedom <- obj[["df.residual"]]
+  sig2 <- summary(obj)$dispersion
+  alpha <- sqrt(degrees.freedom * sig2 / rchisq(20, degrees.freedom))
+
+  list(
+       alpha = alpha,
+       beta = coef(obj)
+       )
+}
diff --git a/R/callToString.R b/R/callToString.R
new file mode 100644
index 0000000..16e5d88
--- /dev/null
+++ b/R/callToString.R
@@ -0,0 +1,10 @@
+#' Convert \code{call} Object to a String
+#'
+#' This method concerts \code{call} objects into a simple, intuitive 
+#' human-readable form.
+#' @param x a \code{call} object
+#' @param ... ignored parameters
+#' @return a character-string representing the \code{call} object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+callToString <- function (x, ...)
+  as.character(as.expression(x))
diff --git a/R/callnetlm.R b/R/callnetlm.R
deleted file mode 100644
index 82173c5..0000000
--- a/R/callnetlm.R
+++ /dev/null
@@ -1,62 +0,0 @@
-callnetlm <- function (formula, data, ...) 
-{
-    Terms <- terms(formula)
-    intercept.value <- attr(Terms, "intercept") 
-    if (intercept.value > 0){
-    	intercept = TRUE
-    	}
-    if (intercept.value == 0){
-    	intercept = FALSE
-    	} 
-	if (missing(data)) 
-			data <- environment(formula)
-	cl <- match.call()
-	mf <- match.call(expand.dots = FALSE)
-    m <- match(c("formula", "data"), names(mf), 0)
-    mf <- mf[c(1, m)]
-    mf$drop.unused.levels <- TRUE
-    mf[[1]] <- as.name("model.frame")
-    mf <- eval(mf, parent.frame())
-    mt <- attr(mf, "terms")
-	D <- model.frame(formula, data = data)
-	y <- D[[1]]
-	#x.array.names <- as.list(for(i in 2:length(D)) {names(D[[i]])})
-	x <- array(dim=c((length(D) - 1), nrow(y), ncol(y)))
-	for(i in 2:length(D)) {
-		x[i - 1,,] <- D[[i]]	}
-	if (intercept == TRUE){
-	fit <- netlm(y, x, intercept=TRUE,...)
-		}
-	if (intercept == FALSE){
-	fit <- netlm(y, x, intercept=FALSE, ...)
-		}
-    fit$names <- names(mf[2:length(mf)])
-	#fit$names <- names(mf[2:stackcount(mf)])  
-    if (intercept) 
-        fit$names <- c("(intercept)", fit$names)
-    fit$intercept <- intercept
-	mm <- model.matrix(mt, mf, contrasts)
-	fit$contrasts <- attr(x, "contrasts")
-	fit$call <- cl
-	fit$terms <- mt
-	fit$model <- mf
-	fit$mm <- mm
-	fit$x <- x
-	fit$y <- y
-	fit$xlevels <- .getXlevels(mt, mf)
-	fit <- c(fit, list(call = call, formula = formula, terms = mt, 
-	data = data, xlevels = .getXlevels(mt, mf)))
-	new.data <- as.data.frame(as.vector(data[,1]))
-	for(i in 2:ncol(data)){
-	new.data <- cbind(new.data, as.vector(data[,i])) } 
-	names(new.data) <- names(data)
-	fit$zelig.data <- new.data
-	so <- summary.lm(fit)
-	fit$sigma <- so$sigma
-	fit$r.squared <- so$r.squared
-	fit$adj.r.squared <- so$adj.r.squared
-	fit$cov.unscaled <- so$cov.unscaled
-	fit$mod.coefficients <- so$coefficients
-	class(fit) <- "netlm"
-    return(fit)
-}
diff --git a/R/callnetlogit.R b/R/callnetlogit.R
deleted file mode 100644
index 41890b7..0000000
--- a/R/callnetlogit.R
+++ /dev/null
@@ -1,53 +0,0 @@
-calllogit.net <- function(formula, data, family = binomial(link=logit), ...)
-{
-    Terms <- terms(formula)
-    intercept.value <- attr(Terms, "intercept") 
-    if (intercept.value > 0){
-    	intercept = TRUE
-    	}
-    if (intercept.value == 0){
-    	intercept = FALSE
-    	} 
-	if (missing(data)) 
-			data <- environment(formula)		
-	mf <- match.call(expand.dots = FALSE)	
-    m <- match(c("formula", "data", "weights"), names(mf), 0)
-    mf <- mf[c(1, m)]
-    mf$drop.unused.levels <- TRUE
-    mf[[1]] <- as.name("model.frame")
-    mf <- eval(mf, parent.frame())
-    mt <- attr(mf, "terms")
-	D <- model.frame(formula, data = data)
-	y <- D[[1]]
-	#x.array.names <- as.list(for(i in 2:length(D)) {names(D[[i]])})
-	x <- array(dim=c((length(D) - 1), nrow(y), ncol(y)))
-	for(i in 2:length(D)) {
-		x[i - 1,,] <- D[[i]]	}
-	if (intercept == TRUE){
-	fit <- logit.net.zelig(y, x, intercept=TRUE,...)
-		}
-	if (intercept == FALSE){
-	fit <- logit.net.zelig(y, x, intercept=FALSE, ...)
-		}
-    fit$names <- names(mf[2:length(mf)])
-	#fit$names <- names(mf[2:stackcount(mf)])  # paste("x", 1:(nx - intercept), sep = "")
-    if (intercept) 
-        fit$names <- c("(intercept)", fit$names)
-    fit$intercept <- intercept
-	fit$xlevels <- .getXlevels(mt, mf)
-	fit <- c(fit, list(call = call, formula = formula, terms = mt, 
-	data = data, xlevels = .getXlevels(mt, mf)))
-		new.data <- as.data.frame(as.vector(data[,1]))
-	for(i in 2:ncol(data)){
-	new.data <- cbind(new.data, as.vector(data[,i])) } 
-	names(new.data) <- names(data)
-	fit$zelig.data <- new.data
-	fit$family <- family
-	fit$rank <- fit$df.model
-	so <- summary.glm(fit)
-	fit$mod.coefficients <- so$coefficients
-	fit$cov.unscaled <- so$cov.unscaled
-	fit$cov.scaled <- so$cov.scaled
-    class(fit) <- "logit.net"
-    return(fit)
-}
diff --git a/R/callsystemfit.R b/R/callsystemfit.R
deleted file mode 100644
index 438b194..0000000
--- a/R/callsystemfit.R
+++ /dev/null
@@ -1,8 +0,0 @@
-callsystemfit<-function(formula,data,method,inst=NULL,...){
-        out<-systemfit(data=data,formula=formula,method=method,inst=inst,...)
-        class(formula)<-c("multiple","list")
-        t<-terms(formula)
-        attr(out,"terms")<-t
-        class(out)<-c("multiple",class(out))
-        return (out)
-}
diff --git a/R/categories.R b/R/categories.R
deleted file mode 100644
index a339d99..0000000
--- a/R/categories.R
+++ /dev/null
@@ -1,10 +0,0 @@
-categories <-function(){
-list(continuous="Models for Continuous Dependent Variables",
-     dichotomous="Models for Dichotomous Dependent Variables",
-     ordinal="Models for Ordinal Dependent Variables",
-     bounded="Models for Continous Bounded Dependent Variables",
-     multinomial="Multinomial Choice Models",
-     count="Event Count Models",
-     mixed="Models for Mixed Dependent Variables",
-     ei="Ecological Inference Models")
-}
diff --git a/R/check.describe.R b/R/check.describe.R
deleted file mode 100644
index c612263..0000000
--- a/R/check.describe.R
+++ /dev/null
@@ -1,50 +0,0 @@
-check.describe<-function(mymodel){
-  firstLvlNames<-c("category","description","package","parameters")
-  fn <- paste("describe.", mymodel, sep = "")
-  if (!exists(fn))
-    stop("Nothing to check ... The function describe.",mymodel,"does not exist")
-  z<-do.call(fn,list())
-
-  #any extra name in the list??
-  whiche<-which(!(names(z) %in% firstLvlNames))
-  if (length(whiche)!=0){
-    tmpstr<-names(z)[[whiche[[1]]]]
-    if(length(whiche)>1)
-    for(i in 2:length(whiche))
-      tmpstr<-paste(tmpstr,names(z)[[whiche[[i]]]],sep=",")
-    stop ("Unknown names in your list: ",tmpstr)
-  }
-  errmsg<-" is missing. It's required ..."
-  if(is.null(z$category))
-    stop("\"category\"", errmsg)
-  else
-    if(!(z$category %in% names(categories())))
-      stop("unknown category \"",z$category, "\"")
-  if(is.null(z$parameters)) stop("\"parameters\"",errmsg)
-
-  for (i in length(z$parameters)){
-    eqns<-z$parameters[[i]]$equations
-  if(is.null(eqns))  stop("\"equations\"",errmsg)
-  if(length(eqns)!=2) stop("equations must be an vector of length 2")
-  if(!(eqns[[2]] <= 999 || !(is.finite(eqns[[2]]))) ) stop("The maximum number of equations for each paramter should be <=999 or \"Inf\"..")
-
-    tags<-z$parameters[[i]]$tagsAllowed
-    if (is.null(tags)) stop ("\"tagsAllowed\"",errmsg)
-    if(!is.logical(tags)) stop("\"tagsAllowed\" must have a logical value (\"TRUE\" or \"FALSE\")")
-
-        dep<-z$parameters[[i]]$depVar
-    if (is.null(dep)) stop ("\"depVar\"",errmsg)
-    if(!is.logical(dep)) stop("\"depVar\" must have a logical value (\"TRUE\" or \"FALSE\")")
-
-        exp<-z$parameters[[i]]$expVar
-    if (is.null(exp)) stop ("\"expVar\"",errmsg)
-    if(!is.logical(exp)) stop("\"expVar\" must have a logical value (\"TRUE\" or \"FALSE\")")
-
-        tags<-z$parameters[[i]]$tagsAllowed
-    if (is.null(tags)) stop ("\"tagsAllowed\"",errmsg)
-    if(!is.logical(tags)) stop("\"tagsAllowed\" must have a logical value (\"TRUE\" or \"FALSE\")")
-  }
-  
-  cat("Congratulation, your function \"",fn, "\" passed this test\n")
-}
-
diff --git a/R/chopitcode.R b/R/chopitcode.R
deleted file mode 100644
index a55e0fa..0000000
--- a/R/chopitcode.R
+++ /dev/null
@@ -1,245 +0,0 @@
-## Need to register the following in the NAMESPACE
-##    S3method(coef, chopit)
-##    S3method(vcov, chopit)
-##    S3method(qi, chopit)
-
-coef.chopit <- function(object, ...) {
-  object$chopit.optim$par
-}
-
-vcov.chopit <- function(object, ...) {
-  solve(object$chopit.hess)
-} 
-
-print.chopit <- function(x, ...) {
-  cat("Use summary() for more information.\n")
-}
-
-zelig2chopit <- function(formula, model, data, M, ...) {
-  require(anchors)
-  mf <- match.call(expand.dots = TRUE)
-  mf$model <- NULL
-  mf$M <- mf$robust <- NULL
-  mf[[1]] <- prechop
-  as.call(mf)
-}
-
-prechop <- function(formula, data, ...) {
-  "%w/o%" <- function(x,y) x[!x %in% y]
-  ## Setting up complete formula
-  if (is.list(formula)) { 
-    fs <- sapply(formula$self, deparse, width.cutoff = 500)
-    fv <- sapply(formula$vign, deparse, width.cutoff = 500)
-    ft <- sapply(formula$tau, deparse, width.cutoff = 500)
-  } else { stop("formula must be a list of formula objects with names `self', `vign', and `tau'.") }
-  s2 <- unlist(strsplit(fs[2], "cbind(", fixed = TRUE))
-  s2 <- unlist(strsplit(s2, ",", fixed = TRUE))
-  s2 <- unlist(strsplit(s2, ")", fixed = TRUE))
-  v2 <- unlist(strsplit(fv[2], "cbind(", fixed = TRUE))
-  v2 <- unlist(strsplit(v2, ",", fixed = TRUE))
-  v2 <- unlist(strsplit(v2, ")", fixed = TRUE))
-  xs <- all.vars(formula$self) %w/o% s2
-  xt <- all.vars(formula$tau)
-  xx <- unique(c(xt, xs))
-  f1 <- as.formula(paste("~", paste(s2, collapse = "+"), "+",
-                         paste(xs, collapse = "+")))
-  f2a <- as.formula(paste("~", paste(xt, collapse = "+")))
-  f2b <- as.formula(paste("~", paste(v2, collapse = "+")))
-  f2c <- as.formula(paste("~", paste(v2, collapse = "+"), "+",
-                          paste(xt, collapse = "+")))
-
-  ## Setting up complete data frame
-  if (is.list(data) & !is.data.frame(data)) {
-    if (all( c("self", "vign") %in% names(data))) {
-      DD <- model.frame(formula$self, data = data$self)
-      D1 <- model.frame(f1, data = data$self)
-      D2a <- rownames(model.frame(f2a, data = data$vign))
-      D2b <- model.frame(f2b, data = data$vign, na.action = NULL)
-    } else if (length(data) == 2){
-      DD <- model.frame(as.formula(formula[[1]]), data = data[[1]])
-      D1 <- model.frame(f1, data = data[[1]])
-      D2a <- rownames(model.frame(f2a, data = data[[2]]))
-      D2b <- model.frame(f2b, data = data[[2]], na.action = NULL)
-    } else if (length(data) > 2) { stop("data cannot have more than two data frames.") } 
-    ridx <- names(which(apply(!apply(D2b, 1, is.na), 2, all)))
-    tmp <- data$vign[ridx %in% c(rownames(data$vign), D2a), c(names(D2b), xx)]
-    D2 <- model.frame(f2c, data = tmp)
-    remove(tmp)
-    idx1 <- names(D1)
-    idx2 <- names(D2)
-    miss1 <- idx1[!(idx1 %in% idx2)]
-    miss2 <- idx2[!(idx2 %in% idx1)]
-    for (i in miss1) { D2[[i]] <- rep(NA, length = nrow(D2)) }
-    for (i in miss2) { D1[[i]] <- rep(NA, length = nrow(D1)) } 
-    data <- rbind(D1, D2)
-  } else if (is.data.frame(data)) {
-    D1 <- data <- data
-  }
-  else { stop("data must be either a data frame or list of data frames") } 
-  out <- chopit(formula, data = data, ...)
-  out$par <- out$chopit.optim$par
-  out$value <- out$chopit.optim$value
-  out$counts <- out$chopit.optim$counts
-  out$convergence <- out$chopit.optim$convergence
-  out$message <- out$chopit.optim$message
-  out$formula <- list()
-  out$formula$self <- as.formula(formula$self)
-  out$formula$tau <- as.formula(formula$tau)
-  out$formula$vign <- as.formula(formula$vign)
-  out$data <- D1
-  tt <- attr(D1, "terms")
-  tt[[3]] <- all.vars(tt)[1]
-  attr(tt, "factors") <-  attr(tt, "factors")[,-1]
-  attr(tt, "response") <- 1
-  attr(out, "terms") <- out$terms <- attr(out$data, "terms") <- tt
-#  ttt <- attr(tt,"dataClasses")
-#  ttt[ttt == "factor"] <- "character"
-#  attr(tt, "dataClasses") <- ttt
-
-  out
-}
-
-qi.chopit <- function(object, simpar, x, x1 = NULL, y = NULL) {
-
-  getcut <- function(x) x[1]
-  getpars <- function(x) x[2]
-  
-  getsimpars <- function(simpar, k, name) {
-    idx <- match(name, names(k))
-    if (idx == 1) { k1 <- 1
-    } else k1 <- sum(k[1:(idx - 1)]) + 1
-    k2 <- sum(k[1:idx])
-    if ((k2 - k1) > 0) {
-      return(simpar[, k1:k2])
-    } else return(NULL)
-  }
-
-#  keeptau <- function(xx, yy, mu, sigma) {
-#     out <- NULL
-#    out[one] <- pnorm(xx[yy[one]], mean = mu[one], sd = sigma)
-#    out[last] <- 1 - pnorm(xx[(yy[last]-1)], mean = mu[last], sd = sigma)
-#    out[inter] <- pnorm(xx[yy[inter]], mean = mu[inter], sd = sigma) - 
-#      pnorm(xx[(yy[inter]-1)], mean = mu[inter], sd = sigma)
-#    out
-#  }
-
-  makepr <- function(mu, xx, nt, y, control) {
-    tau <- xx[1:nt]
-    sigma <- xx[nt+1]
-    switch(as.character(control), "1" = pnorm((tau[1] - mu) / sigma),
-           "2" = pnorm((tau[y] - mu) / sigma) - pnorm((tau[y-1] - mu) / sigma),
-           "3" = 1 - pnorm((tau[nt] - mu) / sigma))
-  }
-
-  fn <- function(mu, xxx, nt, control, y, psd) {
-    pm <- xxx[(nt + 2)]
-    p1 <- makepr(mu = mu, xx = xxx[1:(nt+1)], nt = nt, control = control,
-                 y = y)
-    mu * p1 * dnorm((mu - pm) / psd)
-  }
-
-  fint <- function(xxx, nt, control, psd, y) {
-    integrate(fn, lower = -Inf, upper = Inf,
-              xxx = xxx, nt = nt, control = control,
-              psd = psd, y = y)$value
-  }
-  
-  num <- nrow(simpar)
-  labs <- object$chopit.parm$labels
-  ests <- object$chopit.parm$estimated
-  for (i in names(labs)) labs[[i]] <- labs[[i]][ests[[i]]]
-  k <- sapply(labs, length)
-  colnames(simpar) <- unlist(labs)
-
-  gpars <- getsimpars(simpar, k, name = "gamma")
-  lnse.re <- getsimpars(simpar, k, name = "lnse.re")
-  if (is.null(lnse.re)) { omega <- 0 } else { omega <- exp(lnse.re) }
-  lnse.self <- getsimpars(simpar, k, name = "lnse.self")
-  if (is.null(lnse.self)) { sigma <- 1 } else { sigma <- exp(lnse.self)}
-  ## Only need lnse.vign = sigmaj for distn in footnote 8, p.209 (APSR)
-  ##  lnse.vign <- getsimpars(simpar, k, name = "lnse.vign")
-  ##  if (is.na(match("lnse.vign.vign1", colnames(lnse.vign)))) {
-  ##    lnse.vign <- cbind(lnse.vign1 = rep(0, num), lnse.vign, rep(1, num))
-  ##    colnames(lnse.vign)[ncol(lnse.vign)] <-
-  ##      paste("lnse.vign.vign", ncol(lnse.vign), sep = "")
-  ##  }
-  ## sigmaj <- exp(lnse.vign)  
-  ## theta1 <- getsimpars(simpar, k, name = "theta1")
-  beta <- getsimpars(simpar, k, name = "beta")
-
-  fmls <- object$formula
-  if (length(names(fmls)) == 3) {
-    ft <- fmls$tau
-    fs <- fmls$self
-  } else {
-    ft <- fmls[[3]]
-    fs <- fmls[[1]]
-  }
-  
-  vid <- sort(ncol(vcov(object)) - (0:(length(labs$beta) - 1)))
-  Vbeta <- vcov(object)[vid,vid]
-  X <- as.matrix(x)[,-1, drop = FALSE]
-  ev <- X %*% t(beta)
-  qi <- list(ev = t(ev))
-  qi.name <- list(ev = "Expected value: E(mu|x)")
-  if (!is.null(x1)) {
-    X1 <- as.matrix(x1)[,-1, drop = FALSE]
-    ev1 <- X1 %*% t(beta)
-    qi$fd <- t(ev1 - ev)
-    qi.name$fd <- "First Differences in Expected Value: E(mu|x1) - E(mu|x)"
-  }
-  if (!is.null(y)) {
-    pmean <- X %*% t(beta)
-    pvar <- diag(X %*% Vbeta %*% t(X) + omega^2)
-    tmp <- strsplit(unlist(strsplit(labs$gamma, "gamma1.", fixed = TRUE)),
-                    ".", fixed = TRUE)
-    cuts <- na.omit(unique(sapply(tmp, getcut)))
-    pars <- na.omit(unique(sapply(tmp, getpars)))
-    gamma <- array(NA, dim = c(num, length(pars), length(cuts)),
-                    dimnames = list(NULL, pars, cuts))
-    for (i in cuts) {
-      tmp <- paste("gamma1", i, pars, sep = ".")
-      idx <- match(tmp, labs$gamma)
-      gamma[,,i] <- simpar[,idx]
-    }
-    
-    check <- identical(deparse(ft[[length(ft)]], width.cutoff = 500),
-                       deparse(fs[[3]], width.cutoff = 500))
-    checkInt <- "(Intercept)" %in% pars
-    if (check) {
-      V <- X
-    } else {
-      xt <- all.vars(ft)
-      xidx <- NULL
-      for (ii in 1:length(xt)) xidx <- c(xidx, grep(xt[ii], colnames(X)))
-      V <- X[, xidx, drop = FALSE]
-    }
-    if (checkInt) V <- cbind("(Intercept)" = rep(1, nrow(V)), V)
-   
-    tau <- array(NA, c(num, length(cuts), nrow(x)),
-                 dimnames = list(NULL, cuts, rownames(x)))
-    for (i in 1:num) tau[i,,] <- V %*% drop(gamma[i,,])
-    tau[,2:ncol(tau),] <- exp(tau[,2:ncol(tau),])
-    tau1 <- aperm(apply(tau, c(1,3), cumsum), c(2, 1, 3))
-
-    control <- rep(2, length(y))
-    control[which(y == 1)] <- 1
-    control[which(y == (ncol(tau) + 1))] <- 3
-    if (length(sigma) == 1) sigma <- rep(sigma, num)
-
-    cev <- matrix(NA, nrow = num, ncol = nrow(X),
-                 dimnames = list(NULL, rownames(X)))
-
-    for (i in 1:length(y)) {
-      if ((i %% 10) == 0)
-        cat("Calculating E(mu|X,Y) for observation", i, "of", length(y), "\n")
-      tmp <- cbind(tau[,,i], sigma, pmean[i,])
-      cev[,i] <- apply(tmp, 1, fint, psd = sqrt(pvar[i]), 
-                      control = as.character(control[i]), nt = dim(tau)[2],
-                      y = y[i])
-    }
-    qi$cev <- cev
-    qi.name$cev <- "Conditional Expected Value:  E(mu|X, Y)"
-  }
-  list(qi = qi, qi.name = qi.name)
-}
diff --git a/R/class.ind.R b/R/class.ind.R
deleted file mode 100644
index 9c11983..0000000
--- a/R/class.ind.R
+++ /dev/null
@@ -1,11 +0,0 @@
-#borrow from library(nnet)
-class.ind<-function (cl, levels.data=NULL) 
-{
-    n <- length(cl)
-    cl <- as.factor(cl)
-    levels(cl)<-levels.data
-    x <- matrix(0, n, length(levels(cl)))
-    x[(1:n) + n * (unclass(cl) - 1)] <- 1
-    dimnames(x) <- list(names(cl), levels(cl))
-    x
-}
diff --git a/R/cluster.formula.R b/R/cluster.formula.R
new file mode 100644
index 0000000..2fbf487
--- /dev/null
+++ b/R/cluster.formula.R
@@ -0,0 +1,22 @@
+#' Generate Formulae that Consider Clustering
+#'
+#' This method is used internally by the "Zelig" Package to interpret
+#' clustering.
+#' @param formula a formula object
+#' @param cluster a vector
+#' @return a formula object describing clustering
+cluster.formula <- function (formula, cluster) { 
+
+  # Convert LHS of formula to a string
+  lhs <- deparse(formula[[2]])
+
+  cluster.part <- if (is.null(cluster))
+    # NULL values require
+    sprintf("cluster(1:nrow(%s))", lhs)
+
+  else
+    # Otherwise we trust user input
+    sprintf("cluster(%s)", cluster)
+
+  update(formula, paste(". ~ .", cluster.part, sep=" + "))
+}
diff --git a/R/cmvglm.R b/R/cmvglm.R
index f3c4f3c..cf3f16f 100644
--- a/R/cmvglm.R
+++ b/R/cmvglm.R
@@ -1,3 +1,11 @@
+#' cmvglm
+#' @param formula a formula
+#' @param model the names of the Zelig model
+#' @param ndim the number of dimensions in the statistical model
+#' @param data a data-frame
+#' @param fact ???
+#' @author Kosuke Imai and Olivia Lau
+#' @export
 cmvglm <- function(formula, model, ndim,data=NULL, fact=NULL){
 
   toBuildFormula<-function(Xnames,sepp="+"){
diff --git a/R/coef.BetaReg.R b/R/coef.BetaReg.R
deleted file mode 100644
index f67524e..0000000
--- a/R/coef.BetaReg.R
+++ /dev/null
@@ -1,2 +0,0 @@
-coef.BetaReg <- function(object)
-  object$coef
diff --git a/R/common-methods.R b/R/common-methods.R
new file mode 100644
index 0000000..bea7155
--- /dev/null
+++ b/R/common-methods.R
@@ -0,0 +1,18 @@
+# This file is a quick-hack to fix a mistake placed in Zelig Core on Oct. 1st.
+# The issue in Zelig should be fixed by November `12. :(
+
+#' @S3method coef zelig
+coef.zelig <- function (object, ...)
+  coef(object$result, ...)
+
+#' @S3method logLik zelig
+logLik.zelig <- function (object, ...)
+  logLik(object$result, ...)
+
+#' @S3method plot zelig
+plot.zelig <- function (x, ...)
+  plot(x$result, ...)
+
+#' @S3method vcov zelig
+vcov.zelig <- function (object, ...)
+  vcov(object$result, ...)
diff --git a/R/current.packages.R b/R/current.packages.R
deleted file mode 100644
index 24bdd62..0000000
--- a/R/current.packages.R
+++ /dev/null
@@ -1,66 +0,0 @@
-current.packages <- function(package){
-
-  required.packages <- function(pack) { 
-    mylib <- dirname(system.file(package = pack))
-    description <- packageDescription(pack, lib.loc = mylib)       
-    depends <- description$Depends
-    if (!is.null(depends)) {
-      depends <- strsplit(depends, ", ")[[1]]
-      Rdepends <- pmatch("R (", depends)
-      if (is.na(Rdepends)) {
-        Rdepends <- pmatch("R(", depends)
-        if (is.na(Rdepends))
-          Rdepends <- match("R", depends)
-      }
-      if (!is.na(Rdepends)) 
-        depends <- depends[-Rdepends]
-    }
-    suggests <- description$Suggests
-    if (!is.null(suggests)) 
-      suggests <- strsplit(suggests, ", ")[[1]]
-    total <- c(depends, suggests)
-    if (!is.null(total)) 
-      total <- unlist(strsplit(total, "\n"))
-    if (!is.null(total))
-      total <- unlist(strsplit(total, ","))
-    if (!is.null(total)) {
-      conditions <- grep(")", total)
-      if (length(conditions) > 0) { 
-        for (i in conditions) 
-          total[i] <- strsplit(total[i], " \\(")[[1]][1]
-      }
-      return(total)
-    }
-    else
-      return(NULL)
-  }
-  old <- packages <- required.packages(package)
-
-  check.start <- 1
-  check.end <- length(packages)-1
-  while(check.end < length(packages)) {
-    check.end <- length(packages)
-    for (i in check.start:check.end)
-      packages <- c(packages, required.packages(packages[i]))
-    check.start <- check.end+1
-    packages <- na.omit(unique(packages))
-  }
-
-  ver <- array(NA, length(packages) + 1)
-  for (i in 1:length(packages)) {
-    mylib <- dirname(system.file(package = packages[i]))
-    if (sum(!is.na(packageDescription(packages[i], lib.loc = mylib))))
-      ver[i+1] <- packageDescription(packages[i], lib.loc = mylib)$Ver
-    else
-      stop()
-    names(ver)[i+1] <- packages[i]
-  }
-  ver[1] <- paste(paste(paste(R.Version()$major, R.Version()$minor, sep = "."),
-                        R.Version()$status, sep = " "),
-                  R.Version()$svn, sep = " svn: ")
-  names(ver)[1] <- "R"
-  vv <- as.matrix(ver)
-  colnames(vv) <- "Version"
-  noquote(vv)
-}
-
diff --git a/R/describe.R b/R/describe.R
new file mode 100644
index 0000000..4f4d8b0
--- /dev/null
+++ b/R/describe.R
@@ -0,0 +1,7 @@
+#' Method to describe a model to Zelig
+#' @param ... parameters which are typically ignored
+#' @return a list to be processed by `as.description'
+#' @export describe
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+describe <- function(...)
+  UseMethod("describe")
diff --git a/R/describe.aov.R b/R/describe.aov.R
deleted file mode 100644
index 1210381..0000000
--- a/R/describe.aov.R
+++ /dev/null
@@ -1,19 +0,0 @@
-describe.aov<-function(){
-category <- "continuous"
-description  <- "Fit an Analysis of Variance Model"
-authors <- c()
-year <- 2007
-package <-list(	name 	="stats",
-		version	="2.5.0"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-                    tagsAllowed=FALSE,
-                    depVar=TRUE,
-                    expVar=TRUE
-                    ##specialFunction="cbind",
-                    ##varInSpecialFunction= c(2,Inf)
-                    )
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.arima.R b/R/describe.arima.R
deleted file mode 100644
index efff81d..0000000
--- a/R/describe.arima.R
+++ /dev/null
@@ -1,21 +0,0 @@
-describe.arima<- function(){
-category<- "continuous"
-description <- "Arima models for Time Series Data"
-authors <- c("Justin Grimmer")
-year <- 2007
-package <-list( name    ="stats",
-                version ="0.1"
-                )
-mu<- list(equations=c(1,1),
-	tagsAllowed=FALSE,
-	depVar=TRUE,
-	expVar=TRUE,
-	specialFunction="Diff",
-	varInSpecialFunction= 4)
-sigma2<- list(equations=c(1,1),
-	tagsAllowed=FALSE,
-	depVar=FALSE,
-	expVar=FALSE)
-pars<- list(mu=mu, sigma2=sigma2)
-model<- list(category = category, authors = authors, year = year,description=description,package=package, parameters=pars)
-}
diff --git a/R/describe.blogit.R b/R/describe.blogit.R
deleted file mode 100644
index 47fccc7..0000000
--- a/R/describe.blogit.R
+++ /dev/null
@@ -1,20 +0,0 @@
-describe.blogit<-function(){
-category <- "dichotomous"
-description  <- "Bivariate Logistic Regression for Dichotomous Dependent Variables"
-authors <- c()
-year <- 2007
-package <-list(	name 	="VGAM",
-		version	="0.6"
-		)
-parameters<-list(mu="mu",phi="phi")
-parameters$mu<-list(equations=c(2,2),
-			tagsAllowed=TRUE,
-			depVar=TRUE,
-			expVar=TRUE)
-			
-parameters$phi<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=FALSE,
-			expVar=TRUE)
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.bprobit.R b/R/describe.bprobit.R
deleted file mode 100644
index db9076c..0000000
--- a/R/describe.bprobit.R
+++ /dev/null
@@ -1,20 +0,0 @@
-describe.bprobit<-function(){
-category <- "dichotomous"
-description  <- "Bivariate Probit Regression for Dichotomous Dependent Variables"
-authors <- c()
-year <- 2007
-package <-list(	name 	="VGAM",
-		version	="0.6"
-		)
-parameters<-list(mu="mu", rho="rho")
-parameters$mu<-list(equations=c(2,2),
-			tagsAllowed=TRUE,
-			depVar=TRUE,
-			expVar=TRUE)
-			
-parameters$rho<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=FALSE,
-			expVar=TRUE)
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.chopit.R b/R/describe.chopit.R
deleted file mode 100644
index b03b9b0..0000000
--- a/R/describe.chopit.R
+++ /dev/null
@@ -1,31 +0,0 @@
-describe.chopit <- function(){
-  category <- "ordinal"
-  description  <- "Compound Hierarchical Ordinal Probit Regression for Survey Vignettes"
-  authors <- c()
-  year <- 2007
-  
-  package <-list(name    = "anchors",
-                 version = "2.0",
-                 CRAN    = "http://wand.stanford.edu/R/CRAN")
-
-  parameters <- list()
-  parameters$self <- list(equations = c(1,1),
-                          tagsAllowed = FALSE,
-                          depVar = TRUE,
-                          expVar = TRUE)
-
-  parameters$vign <- list(equations = c(1,1),
-                          tagsAllowed = FALSE,
-                          depVar = TRUE,
-                          expVar = FALSE)
-
-  parameters$tau <- list(equations = c(1,1),
-                         tagsAllowed = FALSE,
-                         depVar = FALSE,
-                         expVar = TRUE)
-
-  list(category = category, authors = authors, year = year,
-       description = description,
-       package = package,
-       parameters = parameters)
-}
diff --git a/R/describe.coxph.R b/R/describe.coxph.R
deleted file mode 100644
index 675252a..0000000
--- a/R/describe.coxph.R
+++ /dev/null
@@ -1,19 +0,0 @@
-describe.coxph<-function(){
-  category <- "bounded"
-  description  <- "Cox Proportional Hazard Regression for Duration Dependent Variables"
-  authors <- c("Patrick Lam")
-  year <- 2007
-  package <- list(name ="survival",
-		version	="2.34"
-		)
-  parameters <- list()
-  parameters$mu<-list(equations=c(1,1),
-                      tagsAllowed=FALSE,
-                      depVar=TRUE,
-                      expVar=TRUE,
-                      specialFunction="Surv",
-                      varInSpecialFunction=c(2,2)
-		)
-			
-  list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.default.R b/R/describe.default.R
index f279123..4589ff9 100644
--- a/R/describe.default.R
+++ b/R/describe.default.R
@@ -1,15 +1,18 @@
-describe.default<-function(){
-category <- "Dichotomous"
-description  <- "A statistical model"
-
-authors <- c("an author")
-year <- "a year"
-
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE)
-			
-list(category = category, authors = authors, year = year,description=description,parameters=parameters)
+#' Default describe function for an arbitrary model
+#' This method exists solely as a backup when an author does not contribute a
+#' 'describe' function for their model
+#' @usage \method{describe}{default}(...)
+#' @S3method describe default
+#' @param ... dummy parameters purely to cast the correct object. That is, the
+#'   parameters of the function should not
+#'            BE referenced specifically
+#' @return a list to be processed by \code{as.description}
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+describe.default <- function(...) {
+  warning("The 'describe' method for this function is unspecified")
+  list(
+       authors = "Unknown Author",
+       year    = as.numeric(format(Sys.Date(), "%Y"))
+       )
 }
diff --git a/R/describe.ei.RxC.R b/R/describe.ei.RxC.R
deleted file mode 100644
index c92dac1..0000000
--- a/R/describe.ei.RxC.R
+++ /dev/null
@@ -1,20 +0,0 @@
-describe.ei.RxC <- function(){
-category <- "ei"
-description  <- "Hierarchical Multinomial-Dirichlet Ecological Inference Model for R x C Tables"
-authors <- c("Jason Wittenberg", "Ferdinand Alimadhi","Badri Narayan Bhaskar","Olivia Lau")
-year <- 2007
-
-package <-list( name    ="stats",
-                version ="0.1"
-                )
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE,
-			specialFunction="cbind",
-			varInSpecialFunction=c(2,Inf)
-		)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.ei.dynamic.R b/R/describe.ei.dynamic.R
deleted file mode 100644
index 2d1d36d..0000000
--- a/R/describe.ei.dynamic.R
+++ /dev/null
@@ -1,20 +0,0 @@
-describe.ei.dynamic <- function(){
-category <- "ei"
-description  <- "Quinn's Dynamic Ecological Inference Model"
-authors <- c("Ben Goodrich", "Ying Lu")
-year <- 2007
-package <- list (
-	name="MCMCpack",
-	version="0.8-2"
-	)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE,
-			specialFunction="cbind",
-			varInSpecialFunction=c(2,Inf)
-		)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.ei.hier.R b/R/describe.ei.hier.R
deleted file mode 100644
index 2ffcedf..0000000
--- a/R/describe.ei.hier.R
+++ /dev/null
@@ -1,20 +0,0 @@
-describe.ei.hier <- function(){
-category <- "ei"
-description  <- "Hierarchical Ecological Inference Model for  2 x 2 Tables"
-authors <- c("Ben Goodrich", "Ying Lu")
-year <- 2007
-package <- list (
-	name="MCMCpack",
-	version="0.8-2"
-	)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE,
-			specialFunction="cbind",
-			varInSpecialFunction=c(2,Inf)
-		)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.exp.R b/R/describe.exp.R
deleted file mode 100644
index e66ba75..0000000
--- a/R/describe.exp.R
+++ /dev/null
@@ -1,18 +0,0 @@
-describe.exp<-function(){
-category <- "bounded"
-description  <- "Exponential Regression for Duration Dependent Variables"
-authors <- c()
-year <- 2007
-package <-list(	name 	="survival",
-		version	="2.0"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE,
-			specialFunction="Surv",
-			varInSpecialFunction=c(2,2))
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.factor.bayes.R b/R/describe.factor.bayes.R
deleted file mode 100644
index 63024e1..0000000
--- a/R/describe.factor.bayes.R
+++ /dev/null
@@ -1,19 +0,0 @@
-describe.factor.bayes<-function(){
-category <- "continuous"
-description  <- "Bayesian Factor Analysis"
-authors <- c("Ben Goodrich", "Ying Lu")
-year <- 2007
-package <-list(	name 	="MCMCpack",
-		version	="0.6"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-                    tagsAllowed=FALSE,
-                    depVar=TRUE,
-                    expVar=FALSE,
-                    specialFunction="cbind",
-                    varInSpecialFunction=c(3,Inf)
-		)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.factor.mix.R b/R/describe.factor.mix.R
deleted file mode 100644
index 12fd76d..0000000
--- a/R/describe.factor.mix.R
+++ /dev/null
@@ -1,21 +0,0 @@
-describe.factor.mix<-function(){
-category <- "mixed"
-description  <- "Mixed Data Factor Analysis"
-
-authors <- c("Ben Goodrich", "Ying Lu")
-year <- 2007
-
-package <-list(	name 	="MCMCpack",
-		version	="0.6"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=FALSE,
-			specialFunction="cbind",
-			varInSpecialFunction=c(2,Inf)
-		)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.factor.ord.R b/R/describe.factor.ord.R
deleted file mode 100644
index dd4b0df..0000000
--- a/R/describe.factor.ord.R
+++ /dev/null
@@ -1,20 +0,0 @@
-describe.factor.ord<-function(){
-category <- "ordinal"
-description  <- "Ordinal Data Factor Analysis"
-authors <- c("Ben Goodrich", "Ying Lu")
-year <- 2007
-
-package <-list(	name 	="MCMCpack",
-		version	="0.6"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=FALSE,
-			specialFunction="cbind",
-			varInSpecialFunction=c(2,Inf)
-		)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.gam.logit.R b/R/describe.gam.logit.R
deleted file mode 100644
index 1e747f4..0000000
--- a/R/describe.gam.logit.R
+++ /dev/null
@@ -1,18 +0,0 @@
-describe.logit.gam<-function(){
-category <- "dichotomous"
-description  <- "Generalized Additive Model for Dichotomous Dependent Variables"
-authors <- c("Skyler J. Cranmer")
-year <- 2007
-
-package <-list(	name 	="mgcv",
-		version	="0.1"
-		)
-parameters<-list(pi="pi")
-parameters$pi<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.gam.normal.R b/R/describe.gam.normal.R
deleted file mode 100644
index 8313b08..0000000
--- a/R/describe.gam.normal.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.normal.gam<-function(){
-category <- "continuous"
-description  <- "Generalized Additive Model for Continuous Dependent Variables"
-authors <- c("Skyler J. Cranmer")
-year <- 2007
-package <-list(	name 	="mgcv",
-		version	="0.1"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.gam.poisson.R b/R/describe.gam.poisson.R
deleted file mode 100644
index d68608e..0000000
--- a/R/describe.gam.poisson.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.poisson.gam<-function(){
-category <- "count"
-description  <- "Generalized Additive Model for Event Count Dependent Variables"
-authors <- c("Skyler J. Cranmer")
-year <- 2007
-package <-list(	name 	="mgcv",
-		version	="0.1"
-		)
-parameters<-list(lambda="lambda")
-parameters$lambda<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.gam.probit.R b/R/describe.gam.probit.R
deleted file mode 100644
index b53480b..0000000
--- a/R/describe.gam.probit.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.probit.gam<-function(){
-category <- "dichotomous"
-description  <- "Generalized Additive Model for Dichotomous Dependent Variables"
-authors <- c("Skyler J. Cranmer")
-year <- 2007
-package <-list(	name 	="mgcv",
-		version	="0.1"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.gamma.R b/R/describe.gamma.R
deleted file mode 100644
index 729b56a..0000000
--- a/R/describe.gamma.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.gamma<-function(){
-category <- "bounded"
-description  <- "Gamma Regression for Continuous, Positive Dependent Variables"
-authors <- c()
-year <- 2007
-package <-list(	name 	="stats",
-		version	="0.1"
-		)
-parameters<-list(lambda="lambda")
-parameters$lambda<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.gamma.gee.R b/R/describe.gamma.gee.R
deleted file mode 100644
index a5badcf..0000000
--- a/R/describe.gamma.gee.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.gamma.gee<-function(){
-  category <- "bounded"
-  description  <- "General Estimating Equation for Gamma Regression"
-  authors <- c("Patrick Lam")
-  year <- 2007
-  package <- list(name ="gee",
-		version	="4.13-12"
-		)
-  lambda <- list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-  list(category = category, authors = authors, year = year,description=description,package=package,parameters=list(lambda=lambda))
-}
-
diff --git a/R/describe.gamma.mixed.R b/R/describe.gamma.mixed.R
deleted file mode 100644
index 7600178..0000000
--- a/R/describe.gamma.mixed.R
+++ /dev/null
@@ -1,32 +0,0 @@
-describe.gamma.mixed <- function(){
-  category <- "bounded"
-  description  <- "Mixed effects gamma model"
-  authors <- c("Delia Bailey","Ferdinand Alimadhi")
-  year <- 2007
-  
-  parameters <- list(mu="mu", delta="delta", sigma2="sigma2")
-
-  parameters$mu <- list(equations=c(1,1),
-                        tagsAllowed=TRUE,
-                        depVar=TRUE,
-                        expVar=TRUE)
-
-  parameters$delta <- list(equations=c(1,2),
-                           tagsAllowed=TRUE,
-                           depVar=FALSE,
-                           expVar=TRUE)
-
-  parameters$sigma2 <- list(equations=c(1,1),
-                            tagsAllowed=FALSE,
-                            depVar=FALSE,
-                            expVar=FALSE)
-
-  # Does Zelig need all dependencies here?
-  # e.g., lme4 depends on Matrix and lattice
-  package <- list(name="lme4", version="0.99875-9")
-  
-
-  list(category = category, authors = authors, year = year, description = description, package=package, parameters=parameters)
-}
-
-  
diff --git a/R/describe.gamma.survey.R b/R/describe.gamma.survey.R
deleted file mode 100644
index 0689f22..0000000
--- a/R/describe.gamma.survey.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.gamma.survey<-function(){
-category <- "bounded"
-description  <- "Survey-Weighted Gamma Regression for Continuous, Positive Dependent Variables"
-authors <- c("Nicholas Carnes")
-year <- 2008
-package <-list(	name 	="survey",
-		version	="3.6-13"
-		)
-parameters<-list(lambda="lambda")
-parameters$lambda<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.irt1d.R b/R/describe.irt1d.R
deleted file mode 100644
index bd8917d..0000000
--- a/R/describe.irt1d.R
+++ /dev/null
@@ -1,19 +0,0 @@
-describe.irt1d<-function(){
-category <- "dichotomous"
-description  <- "One Dimensional Item Response Model"
-authors <- c("Ben Goodrich","Ying Lu")
-year <- 2007
-package <-list(	name 	="MCMCpack",
-		version	="0.6"
-		)
-parameters<-list(pi="pi")
-parameters$pi<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=FALSE,
-			specialFunction="cbind",
-			varInSpecialFunction=c(1,Inf)
-		)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.irtkd.R b/R/describe.irtkd.R
deleted file mode 100644
index 690e3c6..0000000
--- a/R/describe.irtkd.R
+++ /dev/null
@@ -1,19 +0,0 @@
-describe.irtkd<-function(){
-category <- "dichotomous"
-description  <- "K-Dimensional Item Response Model"
-authors <- c("Ben Goodrich", "Ying Lu")
-year <- 2007
-package <-list(	name 	="MCMCpack",
-		version	="0.6"
-		)
-parameters<-list(pi="pi")
-parameters$pi<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=FALSE,
-			specialFunction="cbind",
-			varInSpecialFunction=c(2,Inf)
-		)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.lm.mixed.R b/R/describe.lm.mixed.R
deleted file mode 100644
index a29d760..0000000
--- a/R/describe.lm.mixed.R
+++ /dev/null
@@ -1,32 +0,0 @@
-describe.ls.mixed <- function(){
-  category <- "continuous"
-  description  <- "Mixed effects linear model"
-
-  authors <- c("Delia Bailey","Ferdinand Alimadhi")
-  year <- 2007
-  parameters <- list(mu="mu", delta="delta", sigma2="sigma2")
-
-  parameters$mu <- list(equations=c(1,1),
-                        tagsAllowed=TRUE,
-                        depVar=TRUE,
-                        expVar=TRUE)
-
-  parameters$delta <- list(equations=c(1,2),
-                           tagsAllowed=TRUE,
-                           depVar=FALSE,
-                           expVar=TRUE)
-
-  parameters$sigma2 <- list(equations=c(1,1),
-                            tagsAllowed=FALSE,
-                            depVar=FALSE,
-                            expVar=FALSE)
-
-  # Does Zelig need all dependencies here?
-  # e.g., lme4 depends on Matrix and lattice
-  package <- list(name="lme4", version="0.99875-9")
-  
-
-  list(category = category, authors = authors, year = year, package=package, description = description, parameters=parameters)
-}
-
-  
diff --git a/R/describe.logit.R b/R/describe.logit.R
deleted file mode 100644
index f911762..0000000
--- a/R/describe.logit.R
+++ /dev/null
@@ -1,18 +0,0 @@
-describe.logit<-function(){
-category <- "dichotomous"
-description  <- "Logistic Regression for Dichotomous Dependent Variables"
-authors <- c()
-year <- 2008
-
-package <-list(	name 	="stats",
-		version	="0.1"
-		)
-parameters<-list(pi="pi")
-parameters$pi<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.logit.bayes.R b/R/describe.logit.bayes.R
deleted file mode 100644
index 380ba33..0000000
--- a/R/describe.logit.bayes.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.logit.bayes<-function(){
-category <- "dichotomous"
-description  <- "Bayesian Logistic Regression for Dichotomous Dependent Variables"
-authors <- c("Ben Goodrich", "Ying Lu")
-year <- 2007
-package <-list(	name 	="MCMCpack",
-		version	="0.6"
-		)
-parameters<-list(pi="pi")
-parameters$pi<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.logit.gee.R b/R/describe.logit.gee.R
deleted file mode 100644
index f085590..0000000
--- a/R/describe.logit.gee.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.logit.gee<-function(){
-  category <- "dichotomous"
-  description  <- "General Estimating Equation for Logistic Regression"
-  authors <- c("Patrick Lam")
-  year <- 2007
-  package <- list(name ="gee",
-		version	="4.13-12"
-		)
-  pi <- list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-  list(category = category, authors = authors, year = year,description=description,package=package,parameters=list(pi=pi))
-}
-
diff --git a/R/describe.logit.mixed.R b/R/describe.logit.mixed.R
deleted file mode 100644
index 08bc760..0000000
--- a/R/describe.logit.mixed.R
+++ /dev/null
@@ -1,31 +0,0 @@
-describe.logit.mixed <- function(){
-  category <- "dichotomous"
-  description  <- "Mixed effects logistic model"
-  authors <- c("Delia Bailey", "Ferdinand Alimadhi")
-  year <- 2007
-  parameters <- list(mu="mu", delta="delta", sigma2="sigma2")
-
-  parameters$mu <- list(equations=c(1,1),
-                        tagsAllowed=TRUE,
-                        depVar=TRUE,
-                        expVar=TRUE)
-
-  parameters$delta <- list(equations=c(1,2),
-                           tagsAllowed=TRUE,
-                           depVar=FALSE,
-                           expVar=TRUE)
-
-  parameters$sigma2 <- list(equations=c(1,1),
-                            tagsAllowed=FALSE,
-                            depVar=FALSE,
-                            expVar=FALSE)
-
-  # Does Zelig need all dependencies here?
-  # e.g., lme4 depends on Matrix and lattice
-  package <- list(name="lme4", version="0.99875-9")
-  
-
-  list(category = category, authors = authors, year = year, package=package,description = description, parameters=parameters)
-}
-
-  
diff --git a/R/describe.logit.survey.R b/R/describe.logit.survey.R
deleted file mode 100644
index 5d04a7b..0000000
--- a/R/describe.logit.survey.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.logit.survey<-function(){
-category <- "dichotomous"
-description  <- "Survey-Weighted Logistic Regression for Dichotomous Dependent Variables"
-authors <- c("Nicholas Carnes")
-year <- 2007
-package <-list(	name 	="survey",
-		version	="3.6-13"
-		)
-parameters<-list(pi="pi")
-parameters$pi<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.lognorm.R b/R/describe.lognorm.R
deleted file mode 100644
index 25466e9..0000000
--- a/R/describe.lognorm.R
+++ /dev/null
@@ -1,19 +0,0 @@
-describe.lognorm<-function(){
-category <- "bounded"
-description  <- "Log-Normal Regression for Duration Dependent Variables"
-authors <- c()
-year <- 2007
-package <-list(	name 	="survival",
-		version	="2.2"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE,
-			specialFunction="Surv",
-			varInSpecialFunction=c(2,2)
-		)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.ls.R b/R/describe.ls.R
deleted file mode 100644
index 73f4972..0000000
--- a/R/describe.ls.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.ls<-function(){
-category <- "continuous"
-description  <- "Least Squares Regression for Continuous Dependent Variables"
-authors <- c()
-year <- 2007
-package <-list(	name 	="stats",
-		version	="0.1"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.mlogit.R b/R/describe.mlogit.R
deleted file mode 100644
index 71255ad..0000000
--- a/R/describe.mlogit.R
+++ /dev/null
@@ -1,19 +0,0 @@
-describe.mlogit<-function(){
-category <- "multinomial"
-description  <- "Multinomial Logistic Regression for Dependent Variables with Unordered Categorical Values"
-authors <- c()
-year <- 2007
-package <-list(	name 	="VGAM",
-		version	="0.6"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,Inf),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE,
-			specialFunction="as.factor",
-			varInSpecialFunction=c(1,1)
-		)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.mlogit.bayes.R b/R/describe.mlogit.bayes.R
deleted file mode 100644
index 46d0247..0000000
--- a/R/describe.mlogit.bayes.R
+++ /dev/null
@@ -1,19 +0,0 @@
-describe.mlogit.bayes<-function(){
-category <- "multinomial"
-description  <- "Bayesian Multinomial Logistic Regression for Dependent Variables with Unordered Categorical Values"
-authors <- c("Ben Goodrich", "Ying Lu")
-year <- 2007
-package <-list(	name 	="MCMCpack",
-		version	="0.6"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE,
-			specialFunction="as.factor",
-			varInSpecialFunction=c(1,1)
-		)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.negbin.R b/R/describe.negbin.R
deleted file mode 100644
index 1b21708..0000000
--- a/R/describe.negbin.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.negbin<-function(){
-category <- "count"
-description  <- "Negative Binomial Regression for Event Count Dependent Variables"
-authors <- c()
-year <- 2007
-package <-list(	name 	="MASS",
-		version	="0.1"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.netcloglog.R b/R/describe.netcloglog.R
deleted file mode 100644
index d7b21f7..0000000
--- a/R/describe.netcloglog.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.cloglog.net<-function(){
-category <- "dichotomous"
-description  <- "Social Network Complementary Log Log Regression for Dichotomous Dependent Variables"
-authors <- c("Skyler J. Cranmer")
-year <- 2007
-package <-list(	name 	="sna",
-		version	="1.4"
-		)
-parameters<-list(pi="pi")
-parameters$pi<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.netgamma.R b/R/describe.netgamma.R
deleted file mode 100644
index 1da6fb9..0000000
--- a/R/describe.netgamma.R
+++ /dev/null
@@ -1,18 +0,0 @@
-describe.gamma.net<-function(){
-category <- "bounded"
-description  <- "Social Network Gamma Regression for Continuous, Positive Dependent Variables"
-authors <- c("Skyler J. Cranmer")
-year <- 2007
-
-package <-list(	name 	="sna",
-		version	="1.4"
-		)
-parameters<-list(lambda="lambda")
-parameters$lambda<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.netlogit.R b/R/describe.netlogit.R
deleted file mode 100644
index d4051bf..0000000
--- a/R/describe.netlogit.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.logit.net<-function(){
-category <- "dichotomous"
-description  <- "Social Network Logistic Regression for Dichotomous Dependent Variables"
-authors <- c("Skyler J. Cranmer")
-year <- 2007
-package <-list(	name 	="sna",
-		version	="1.4"
-		)
-parameters<-list(pi="pi")
-parameters$pi<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.netls.R b/R/describe.netls.R
deleted file mode 100644
index e50d8d7..0000000
--- a/R/describe.netls.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.ls.net<-function(){
-category <- "continuous"
-description  <- "Social Network Least Squares Regression for Continuous Dependent Variables"
-authors <- c("Skyler J. Cranmer")
-year <- 2007
-package <-list(	name 	="sna",
-		version	="1.4"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.netnormal.R b/R/describe.netnormal.R
deleted file mode 100644
index 2058d63..0000000
--- a/R/describe.netnormal.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.normal.net<-function(){
-category <- "continuous"
-description  <- "Social Network Normal Regression for Continuous Dependent Variables"
-authors <- c("Skyler J. Cranmer")
-year <- 2007
-package <-list(	name 	="sna",
-		version	="1.4"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.netpoisson.R b/R/describe.netpoisson.R
deleted file mode 100644
index 9dc7f22..0000000
--- a/R/describe.netpoisson.R
+++ /dev/null
@@ -1,18 +0,0 @@
-describe.poisson.net<-function(){
-category <- "count"
-description  <- "Social Network Poisson Regression for Event Count Dependent Variables"
-authors <- c("Skyler J. Cranmer")
-year <- 2007
-
-package <-list(	name 	="sna",
-		version	="1.4"
-		)
-parameters<-list(lambda="lambda")
-parameters$lambda<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.netprobit.R b/R/describe.netprobit.R
deleted file mode 100644
index 8903402..0000000
--- a/R/describe.netprobit.R
+++ /dev/null
@@ -1,18 +0,0 @@
-describe.probit.net<-function(){
-category <- "dichotomous"
-description  <- "Social Network Probit Regression for Dichotomous Dependent Variables"
-
-authors <- c("Skyler J. Cranmer")
-year <- 2007
-package <-list(	name 	="stats",
-		version	="0.1"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.normal.R b/R/describe.normal.R
deleted file mode 100644
index 5e52b57..0000000
--- a/R/describe.normal.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.normal<-function(){
-category <- "continuous"
-description  <- "Normal Regression for Continuous Dependent Variables"
-authors <- c()
-year <- 2007
-package <-list(	name 	="stats",
-		version	="0.1"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.normal.bayes.R b/R/describe.normal.bayes.R
deleted file mode 100644
index ab6670f..0000000
--- a/R/describe.normal.bayes.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.normal.bayes<-function(){
-category <- "continuous"
-description  <- "Bayesian Normal Linear Regression"
-authors <- c("Ben Goodrich", "Ying Lu")
-year <- 2007
-package <-list(	name 	="MCMCpack",
-		version	="0.6"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.normal.gee.R b/R/describe.normal.gee.R
deleted file mode 100644
index 51b74e1..0000000
--- a/R/describe.normal.gee.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.normal.gee<-function(){
-  category <- "continuous"
-  description  <- "General Estimating Equation for Normal Regression"
-  authors <- c("Patrick Lam")
-  year <- 2007
-  package <- list(name ="gee",
-		version	="4.13-12"
-		)
-  mu <- list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-  list(category = category, authors = authors, year = year,description=description,package=package,parameters=list(mu=mu))
-}
-
diff --git a/R/describe.normal.survey.R b/R/describe.normal.survey.R
deleted file mode 100644
index 713c9e6..0000000
--- a/R/describe.normal.survey.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.normal.survey<-function(){
-category <- "continuous"
-description  <- "Survey-Weighted Normal Regression for Continuous Dependent Variables"
-authors <- c("Nicholas Carnes")
-year <- 2008
-package <-list(name 	="survey",
-		version	="3.6-13"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.ologit.R b/R/describe.ologit.R
deleted file mode 100644
index 37b86b6..0000000
--- a/R/describe.ologit.R
+++ /dev/null
@@ -1,19 +0,0 @@
-describe.ologit<-function(){
-category <- "ordinal"
-description  <- "Ordinal Logistic Regression for Ordered Categorical Dependent Variables"
-authors <- c()
-year <- 2007
-package <-list(	name 	="MASS",
-		version	="0.1"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE,
-			specialFunction="as.factor",
-			varInSpecialFunction=c(1,1)
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.oprobit.R b/R/describe.oprobit.R
deleted file mode 100644
index 8808d6e..0000000
--- a/R/describe.oprobit.R
+++ /dev/null
@@ -1,22 +0,0 @@
-describe.oprobit<-function(){
-
-category <- "ordinal"
-description  <- "Ordinal Probit Regression for Ordered Categorical Dependent Variables"
-authors <- c()
-year <- 2007
-
-package <-list(	name 	="MASS",
-		version	="0.1"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE,
-			specialFunction="as.factor",
-			varInSpecialFunction=c(1,1)
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-
-}
diff --git a/R/describe.oprobit.bayes.R b/R/describe.oprobit.bayes.R
deleted file mode 100644
index 0707879..0000000
--- a/R/describe.oprobit.bayes.R
+++ /dev/null
@@ -1,19 +0,0 @@
-describe.oprobit.bayes<-function(){
-category <- "ordinal"
-description  <- "Bayesian Ordered Probit Regression"
-authors <- c("Ben Goodrich", "Ying Lu")
-year <- 2007
-package <-list(	name 	="MCMCpack",
-		version	="0.6"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE,
-			specialFunction="as.factor",
-			varInSpecialFunction=c(1,1)
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.poisson.R b/R/describe.poisson.R
deleted file mode 100644
index 3cc0c98..0000000
--- a/R/describe.poisson.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.poisson<-function(){
-category <- "count"
-description  <- "Poisson Regression for Event Count Dependent Variables"
-authors <- c()
-year <- 2007
-package <-list(	name 	="stats",
-		version	="0.1"
-		)
-parameters<-list(lambda="lambda")
-parameters$lambda<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.poisson.bayes.R b/R/describe.poisson.bayes.R
deleted file mode 100644
index e4ee362..0000000
--- a/R/describe.poisson.bayes.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.poisson.bayes<-function(){
-category <- "count"
-description  <- "Bayesian Poisson Regression"
-authors <- c("Ben Goodrich", "Ying Lu")
-year <- 2007
-package <-list(	name 	="MCMCpack",
-		version	="0.6"
-		)
-parameters<-list(lambda="lambda")
-parameters$lambda<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.poisson.gee.R b/R/describe.poisson.gee.R
deleted file mode 100644
index f140a74..0000000
--- a/R/describe.poisson.gee.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.poisson.gee<-function(){
-  category <- "count"
-  description  <- "General Estimating Equation for Poisson Regression"
-  authors <- c("Patrick Lam")
-  year <- 2007
-  package <- list(name ="gee",
-		version	="4.13-12"
-		)
-  lambda <- list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-  list(category = category, authors = authors, year = year,description=description,package=package,parameters=list(lambda=lambda))
-}
-
diff --git a/R/describe.poisson.mixed.R b/R/describe.poisson.mixed.R
deleted file mode 100644
index fe0f7f4..0000000
--- a/R/describe.poisson.mixed.R
+++ /dev/null
@@ -1,31 +0,0 @@
-describe.poisson.mixed <- function(){
-  category <- "count"
-  description  <- "Mixed effects poisson model"
-  authors <- c("Delia Bailey", "Ferdinand Alimadhi")
-  year <- 2007
-  parameters <- list(mu="mu", delta="delta", sigma2="sigma2")
-
-  parameters$mu <- list(equations=c(1,1),
-                        tagsAllowed=TRUE,
-                        depVar=TRUE,
-                        expVar=TRUE)
-
-  parameters$delta <- list(equations=c(1,2),
-                           tagsAllowed=TRUE,
-                           depVar=FALSE,
-                           expVar=TRUE)
-
-  parameters$sigma2 <- list(equations=c(1,1),
-                            tagsAllowed=FALSE,
-                            depVar=FALSE,
-                            expVar=FALSE)
-
-  # Does Zelig need all dependencies here?
-  # e.g., lme4 depends on Matrix and lattice
-  package <- list(name="lme4", version="0.99875-9")
-  
-
-  list(category = category, authors = authors, year = year, package=package, description = description, parameters=parameters)
-}
-
-  
diff --git a/R/describe.poisson.survey.R b/R/describe.poisson.survey.R
deleted file mode 100644
index d6c549a..0000000
--- a/R/describe.poisson.survey.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.poisson.survey<-function(){
-category <- "count"
-description  <- "Survey-Weighted Poisson Regression for Event Count Dependent Variables"
-authors <- c("Nicholas Carnes")
-year <- 2008
-package <-list(	name 	="survey",
-		version	="3.6-13"
-		)
-parameters<-list(lambda="lambda")
-parameters$lambda<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.probit.R b/R/describe.probit.R
deleted file mode 100644
index 2d90e25..0000000
--- a/R/describe.probit.R
+++ /dev/null
@@ -1,18 +0,0 @@
-describe.probit<-function(){
-category <- "dichotomous"
-description  <- "Probit Regression for Dichotomous Dependent Variables"
-authors <- c()
-year <- 2007
-
-package <-list(	name 	="stats",
-		version	="0.1"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.probit.bayes.R b/R/describe.probit.bayes.R
deleted file mode 100644
index aa09909..0000000
--- a/R/describe.probit.bayes.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.probit.bayes<-function(){
-category <- "dichotomous"
-description  <- "Bayesian Probit Regression for Dichotomous Dependent Variables"
-authors <- c("Ben Goodrich","Ying Lu")
-year <- 2007
-package <-list(	name 	="MCMCpack",
-		version	="0.6"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.probit.gee.R b/R/describe.probit.gee.R
deleted file mode 100644
index ceafd9c..0000000
--- a/R/describe.probit.gee.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.probit.gee<-function(){
-  category <- "dichotomous"
-  description  <- "General Estimating Equation for Probit Regression"
-  authors <- c("Patrick Lam")
-  year <- 2007
-  package <- list(name ="gee",
-		version	="4.13-12"
-		)
-  pi <- list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-  list(category = category, authors = authors, year = year,description=description,package=package,parameters=list(pi=pi))
-}
-
diff --git a/R/describe.probit.mixed.R b/R/describe.probit.mixed.R
deleted file mode 100644
index c3cb44c..0000000
--- a/R/describe.probit.mixed.R
+++ /dev/null
@@ -1,32 +0,0 @@
-describe.probit.mixed <- function(){
-  category <- "dichotomous"
-  description  <- "Mixed effects probit model"
-  authors <- c("Delia Bailey", "Ferdinand Alimadhi")
-  year <- 2007
-  
-  parameters <- list(mu="mu", delta="delta", sigma2="sigma2")
-
-  parameters$mu <- list(equations=c(1,1),
-                        tagsAllowed=TRUE,
-                        depVar=TRUE,
-                        expVar=TRUE)
-
-  parameters$delta <- list(equations=c(1,2),
-                           tagsAllowed=TRUE,
-                           depVar=FALSE,
-                           expVar=TRUE)
-
-  parameters$sigma2 <- list(equations=c(1,1),
-                            tagsAllowed=FALSE,
-                            depVar=FALSE,
-                            expVar=FALSE)
-
-  # Does Zelig need all dependencies here?
-  # e.g., lme4 depends on Matrix and lattice
-  package <- list(name="lme4", version="0.99875-9")
-  
-
-  list(category = category, authors = authors, year = year, package=package, description = description, parameters=parameters)
-}
-
-  
diff --git a/R/describe.probit.survey.R b/R/describe.probit.survey.R
deleted file mode 100644
index 17a1fa6..0000000
--- a/R/describe.probit.survey.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.probit.survey<-function(){
-category <- "dichotomous"
-description  <- "Survey-Weighted Probit Regression for Dichotomous Dependent Variables"
-authors <- c("Nicholas Carnes")
-year <- 2008
-package <-list(	name 	="survey",
-		version	="3.6-13"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.quantile.R b/R/describe.quantile.R
deleted file mode 100644
index 493a112..0000000
--- a/R/describe.quantile.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.quantile <- function(){
-	category <- "continuous"
-	description <- "Quantile Regression for Continuous Dependent Variables"
-	authors <- c("Alexander D'Amour")
-	year <- 2008
-	package <- list( name		="quantreg",
-					 version	="4.26"
-				   )
-	parameters <- list(xi="xi")
-	parameters$xi <- list(equations=c(1,1),
-					tagsAllowed=FALSE,
-					depVar=TRUE,
-					expVar=TRUE
-					)
-
-	list(category = category, authors = authors, year = year, description = description, package = package, parameters=parameters)
-}
diff --git a/R/describe.relogit.R b/R/describe.relogit.R
deleted file mode 100644
index 2481797..0000000
--- a/R/describe.relogit.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.relogit<-function(){
-category <- "dichotomous"
-description  <- "Rare Events Logistic Regression for Dichotomous Dependent Variables"
-authors <- c()
-year <- 2007
-package <-list(	name 	="stats",
-		version	="0.1"
-		)
-parameters<-list(pi="pi")
-parameters$pi<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.rq.R b/R/describe.rq.R
deleted file mode 100644
index aef5c34..0000000
--- a/R/describe.rq.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.rq <- function(){
-	category <- "continuous"
-	description <- "Quantile Regression for Continuous Dependent Variables"
-	authors <- c("Alexander D'Amour")
-	year <- 2008
-	package <- list( name		="quantreg",
-					 version	="4.26"
-				   )
-	parameters <- list(xi="xi")
-	parameters$xi <- list(equations=c(1,1),
-					tagsAllowed=FALSE,
-					depVar=TRUE,
-					expVar=TRUE
-					)
-
-	list(category = category, authors = authors, year = year, description = description, package = package, parameters=parameters)
-}
diff --git a/R/describe.sur.R b/R/describe.sur.R
deleted file mode 100644
index 5e08b0f..0000000
--- a/R/describe.sur.R
+++ /dev/null
@@ -1,16 +0,0 @@
-describe.sur<-function(){
-  category <- "continuous"
-description  <- "Seemingly Unrelated Regression"
-authors <- c("Ferdinand Alimadhi","Ying Lu", "Elena Villalon")
-year <- 2007
-package <-list(	name 	="systemfit",
-		version	="0.8"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(2,Inf),
-			tagsAllowed=TRUE,
-			depVar=TRUE,
-			expVar=TRUE)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.threesls.R b/R/describe.threesls.R
deleted file mode 100644
index 481c01f..0000000
--- a/R/describe.threesls.R
+++ /dev/null
@@ -1,20 +0,0 @@
-describe.threesls<-function(){
-category <- "continuous"
-description  <- "Three Stage Least Squares"
-authors <- c("Ferdinand Alimadhi","Ying Lu", "Elena Villalon")
-year <- 2007
-package <-list(	name 	="systemfit",
-		version	="0.8"
-		)
-parameters<-list(mu="mu", inst="inst")
-parameters$mu<-list(equations=c(2,Inf),
-			tagsAllowed=TRUE,
-			depVar=TRUE,
-			expVar=TRUE)
-			
-parameters$inst<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=FALSE,
-			expVar=TRUE)
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.tobit.R b/R/describe.tobit.R
deleted file mode 100644
index 3d18ae9..0000000
--- a/R/describe.tobit.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.tobit<-function(){
-category <- "continuous"
-description  <- "Linear regression for Left-Censored Dependent Variable"
-authors <- c()
-year <- 2007
-package <-list(	name 	="survival",
-		version	="2.2"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.tobit.bayes.R b/R/describe.tobit.bayes.R
deleted file mode 100644
index 17ef27b..0000000
--- a/R/describe.tobit.bayes.R
+++ /dev/null
@@ -1,17 +0,0 @@
-describe.tobit.bayes<-function(){
-category <- "continuous"
-description  <- "Bayesian Linear Regression for a Censored Dependent Variable"
-authors <- c("Ben Goodrich","Ying Lu")
-year <- 2007
-package <-list(	name 	="MCMCpack",
-		version	="0.6"
-		)
-parameters<-list(mu="mu")
-parameters$mu<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE
-			)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.twosls.R b/R/describe.twosls.R
deleted file mode 100644
index 3c73a52..0000000
--- a/R/describe.twosls.R
+++ /dev/null
@@ -1,20 +0,0 @@
-describe.twosls<-function(){
-category <- "continuous"
-description  <- "Two Stage Least Squares"
-authors <- c("Ferdinand Alimadhi","Ying Lu", "Elena Villalon")
-year <- 2007
-package <-list(	name 	="systemfit",
-		version	="0.8"
-		)
-parameters<-list(mu="mu", inst="inst")
-parameters$mu<-list(equations=c(2,Inf),
-			tagsAllowed=TRUE,
-			depVar=TRUE,
-			expVar=TRUE)
-			
-parameters$inst<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=FALSE,
-			expVar=TRUE)
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.weibull.R b/R/describe.weibull.R
deleted file mode 100644
index 7c6e760..0000000
--- a/R/describe.weibull.R
+++ /dev/null
@@ -1,19 +0,0 @@
-describe.weibull<-function(){
-category <- "bounded"
-description  <- "Weibull Regression for Duration Dependent Variables"
-authors <- c()
-year <- 2007
-package <-list(	name 	="survival",
-		version	="2.2"
-		)
-parameters<-list(lambda="lambda")
-parameters$lambda<-list(equations=c(1,1),
-			tagsAllowed=FALSE,
-			depVar=TRUE,
-			expVar=TRUE,
-			specialFunction="Surv",
-			varInSpecialFunction=c(2,2)
-		)
-			
-list(category = category, authors = authors, year = year,description=description,package=package,parameters=parameters)
-}
diff --git a/R/describe.zelig.R b/R/describe.zelig.R
new file mode 100644
index 0000000..c89227a
--- /dev/null
+++ b/R/describe.zelig.R
@@ -0,0 +1,13 @@
+#' Get Description Object Used to Cite this Zelig Model
+#' @note This function should be reevaluated in design, since 'description'
+#' objects are exclusively used internally. In particular, this method would
+#' be more useful to users as a 'cite' method.
+#' @usage \method{describe}{zelig}(object, ...)
+#' @S3method describe zelig
+#' @param object a 'zelig' object
+#' @param ... ignored parameters
+#' @return a 'description' object used internally to produce citation text
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+describe.zelig <- function(object, ...) {
+  append(list(model=object$name), NextMethod("describe"))
+}
diff --git a/R/description.R b/R/description.R
new file mode 100644
index 0000000..02720d4
--- /dev/null
+++ b/R/description.R
@@ -0,0 +1,155 @@
+#' Constructor for the 'description' class
+#'
+#' @param authors a character-vector of author names
+#' @param year a numeric specifying the year
+#' @param model a character-string specifying model name
+#' @param text a character-string specifying the title of the model. This
+#'   typically includes more exact information than 'model'. E.g., for the
+#'   'logit' the title 'Logistic Regression for Dichotomous Variables' would be
+#'   a suitable text parameter.
+#' @param url a character-string specifying the model's software page
+#' @param category deprecated until data-verse bindings are reevaluated
+#' @return an object of type 'description'
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+description <- function(authors=c("Kosuke Imai", "Gary King", "Olivia Lau"),
+                        year=NULL, model="", text="", url="",
+                        category = NULL) {
+  # error-catching
+  if (!is.character(authors))
+    author <- "Kosuke Imai, Gary King, and Olivia Lau"
+
+  else if (length(authors) > 1) {
+    # collapse author names if it is a character-vector bigger than 1
+    authors <- paste(paste(head(authors, -1), collapse=", "),
+                     ", and ",
+                     tail(authors, 1),
+                     sep = ""
+                     )
+  }
+
+  if (!is.numeric(year))
+    year <- as.numeric(format(Sys.Date(), "%Y"))
+
+  if (!is.character(model) || length(model) != 1) {
+    print(model)
+    stop("model must be a character-string")
+  }
+
+  if (length(text) > 1)
+    stop("text must be a character-vector of length 1")
+
+  if (is.null(url))
+    url <- "http://gking.harvard.edu/zelig"
+
+  if (!is.character(category))
+    category <- ""
+
+  else if (length(url) > 1 || !is.character(url))
+    stop("url must be a character-vector of length 1")
+
+  # double back-up, even though this should be impossible now
+  authors <- ifelse(nchar(authors) > 0, authors, "NAMELESS AUTHOR")
+  year <- ifelse(!is.null(year), year, "UNKNOWN YEAR")
+  model <- ifelse(nchar(model) > 0, model, "UNNAMED MODEL")
+
+  # construct object
+  self <- list(authors = authors,
+               year    = year,
+               model   = model,
+               text    = text,
+               url     = url
+               )
+  class(self) <- "description"
+  self
+}
+
+
+#' Citation information for a 'description' object
+#' @param descr an object of type 'description'
+#' @return a character-string giving citation info
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+cite <- function(descr) {
+  #
+  if (inherits(descr, "list"))
+    descr <- as.description(descr)
+  else if (!inherits(descr, "description"))
+    descr <- description()
+
+  # 
+  url <- "http://gking.harvard.edu/zelig"
+
+  title <- if (is.null(descr$text))
+    descr$model
+  else
+    paste(descr$model, ": ", descr$text, sep="")
+
+  # quote
+  title <- paste('"', title, '"', sep="")
+
+  # construct string.  This should be done much more elegantly
+  # and with localization
+  str <- "How to cite this model in Zelig:\n  "
+  str <- paste(str, descr$authors, ". ", descr$year, ".\n  ", title, sep="")
+  str <- paste(str, "\n  in Kosuke Imai, Gary King, and Olivia Lau, ", sep="")
+  str <- paste(str, "\"Zelig: Everyone's Statistical Software,\"", sep="")
+  str <- paste(str, "\n  ", url, "\n", sep="")
+  str
+}
+
+
+#' Generic Method for Casting 'description' Objects
+#' 
+#' Convert the result of a call to the 'describe' method into an object 
+#' parseble by Zelig. Currently conversions only exist for lists and 
+#' description objects.
+#' @param descr an object to cast an object of type 'description'
+#' @param ... parameters which are reserved for future Zelig revisions
+#' @return an object of type 'description'
+#' @export
+#' @seealso as.description.description as.description.list
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.description <- function(descr, ...)
+  UseMethod("as.description")
+
+
+#' description -> description
+#'
+#' Identity operation on a description object.
+#' @S3method as.description description
+#' @usage \method{as.description}{description}(descr, ...)
+#' @param descr an object of type 'description'
+#' @param ... ignored
+#' @return the same object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.description.description <- function(descr, ...)
+  descr
+
+
+#' list -> description
+#'
+#' Convert list into a description object.
+#' @usage \method{as.description}{list}(descr, ...)
+#' @S3method as.description list
+#' @param descr a list
+#' @param ... ignored
+#' @return an object of type 'description'
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+as.description.list <- function(descr, ...) {
+
+  text <- if (!is.null(descr$text))
+    descr$text
+  else if (!is.null(descr$description))
+    descr$description
+  else
+    NULL
+  
+  description(authors = descr$authors,
+              year    = descr$year,
+              model   = descr$model,
+              text    = text,
+              url     = descr$url,
+              category= descr$category
+              )
+}
diff --git a/R/dims.R b/R/dims.R
deleted file mode 100644
index 6cbae0a..0000000
--- a/R/dims.R
+++ /dev/null
@@ -1,6 +0,0 @@
-dims <- function(x) {
-  if (is.vector(x)) 
-    return(length(x))
-  else
-    return(dim(x))
-}
diff --git a/R/eiRxC.R b/R/eiRxC.R
deleted file mode 100644
index d90546b..0000000
--- a/R/eiRxC.R
+++ /dev/null
@@ -1,220 +0,0 @@
-callparamseiestim <- function(formula,data, covar = NULL,
-                              const = 0.001, parSeed = NULL, ...){
-  
-
-  tmp <- cmei(formula=formula, data=data, covar=covar)
-  data <- tmp$data
-  nR<-tmp$nR
-  nC <- tmp$nC
-
- if(!(is.null(covar))){
-  covar<-tmp$covar
-}
-
-  mf <- match.call()
-  t <- terms(mf$formula)
-  out <- paramsei.estim(data=data, covar=covar, nR=nR, nC=nC, const=const, 
-                        parSeed=parSeed)
-  out$terms <- t
-
-  return (out)
-}
-
-# CALL.DIFP
-# Calculates penalty for given parameters
-# p     - parameter vector R x (C-1)
-# mx    - Column marginals
-# my    - row marginals
-# nR    - number of rows
-# nC    - number of columns
-# nP    - number of precincts
-# const - weight for penalty
-# covar
-
-call.difp <- function(par, mx, my, covar, nR, nC, nP, const){
-  g <- par[1:(nR*(nC-1))]
-  if(is.numeric(covar)) {  
-    d <- par[(nR*(nC-1)+1):(2*nR*(nC-1))]
-    gamma <- array(0, dim = c(nR, nC-1, nP))
-    diff <- 0
-    for (i in 1:nP) {
-      gamma[,,i] <- matrix(g + covar[i]*d, nrow = nR, ncol = nC-1, byrow = TRUE)
-      expo <- exp(gamma[,,i]) 
-      if (nC != 2) 
-        ebeta <- exp(gamma[,,i]) / (1 + apply(exp(gamma[,,i]), 1, sum))
-      else 
-        ebeta <- exp(gamma[,,i]) / (1 + exp(gamma[,,i]))
-      yhat <- mx[i,] %*% ebeta
-      diff <- diff + sum((yhat - my[i, -nC])^2) + (const*sum(gamma[,,i]^2))
-    }
-  }
-  else {
-    d <- array(0, dim = nR * (nC-1))
-    gamma <- matrix(g, nrow = nR, ncol = nC-1, byrow = TRUE)
-    ebeta <- exp(gamma)/(1 + apply(exp(gamma), 1, sum))
-    diff <- sum((mx %*% ebeta - my[, -nC])^2) + (const * sum(gamma^2))
-  }
-
-  ## Trap bad values
-  if (is.null(diff))
-    diff <- 9999999;
-  if (!is.finite(diff))
-    diff <- 9999999;
-  
-  diff
-}
-
-
-# Penalized Least Square Minimizer
-# PARAMS.ESTIM
-# Estimates parameters minimizing the penalized least squares criterion
-# x       - index (optional, for bootstrapping)
-# data    - marginals (optionally with covariates)
-# nR      - number of rows
-# nC      - number of columns
-# const   - weight for penalty
-# parSeed - Seed for parameters (optional)
-
-paramsei.estim <- function(data, covar=NULL, nR, nC, const=0.001, 
-                           parSeed=NULL) {
-  
-  normalizedata <- function(dt){
-    s <- apply(dt,1,sum)
-    return (dt/s) 
-  }
-
-  mc <- match.call(expand.dots = TRUE)
-  #print(colnames(data))
-
-  socialnames <- colnames(data[,1:nR])
-  mx <- as.matrix(data[,1:nR])
-  mx <- normalizedata(mx)
-  for (i in 1:nrow(mx)){
-    if (round(sum(mx[i,]), digits=4) != 1){
-      stop ("The Sum of each row marginal (\"social classes\") should be 1. It seems like you have a problem in the row nr. ", i)
-    }
-  }
-  partiesnames <- colnames(data[,(nR+1):(nR+nC)])
-  my <- as.matrix(data[,(nR+1):(nR+nC)])
-  my <- normalizedata(my)
-  for(i in 1:nrow(my)){
-    if (round(sum(my[i,]),digits=4)!= 1){
-      stop ("The Sum of each column marginal (\"parties\") should be 1. It seems like you have a problem in the row nr. ", i)
-    }
-  }
-  nP <- nrow(data)
-
-  coef.names<-c()
-  for(i in 1:(nR)){
-    for(j in 1:(nC-1)){
-      coef.names <- c(coef.names, paste(socialnames[[i]], ".",
-                                        partiesnames[[j]], sep = ""))
-    }
-  }
-  if(!is.null(covar)){
-    if(is.null(parSeed)) parSeed = rnorm(2*nR*(nC-1))
-    for (i in 1:(nR*(nC-1)))
-      coef.names<-c(coef.names,paste("delta",i,sep=""))
-    names(parSeed)<-coef.names
-  }
-  else {
-    if(is.null(parSeed))  parSeed = rnorm(nR*(nC-1))
-    names(parSeed)<-coef.names
-  }
-
-  fit <- optim(parSeed, fn = call.difp, method="L-BFGS-B", hessian=TRUE,
-               covar = covar, nR = nR, nC = nC, nP = nP, mx = mx,
-               my = my, const = const)
-  fit$coefficients <- fit$par
-  fit$cov1<-diag(1/diag(fit$hessian))
-  fit$vcov <- solve(fit$hessian)
-  fit$hessian<-fit$hessian
-  fit$terms <- attr(all.vars, "terms")
-  fit$call <- mc
-  fit$contrasts <- attr(all.vars, "contrasts")
-  fit$xlevels <- attr(all.vars, "xlev")
-  fit$levels <- attr(all.vars, "lev")
-  fit$dims <- c(nR, nC, nP)
-  fit$covar <- covar
-  fit$socialnames <- socialnames
-  fit$partiesnames <- partiesnames
-  class(fit) <- "eiRxC"
-  return(fit)
-}
-
-
-# Calculate Fractions
-# CALC.FRACTIONS
-# Calculate fractions from the parameters
-# p     - parameters
-# nR    - number of rows
-# nC    - number of columns
-# covar - (Optional) Vector of covariates
-
-calc.fractions <- function(object, simpar) {
-  nR <- object$dims[1]
-  nC <- object$dims[2]
-  d <- array(0, dim = nR*(nC-1))
-  if (!is.null(object$covar)) {
-    covar<-as.matrix(object$covar)
-    nP <- nrow(covar)
-    ests <- array(0, dim = c(nR, nC, nP),
-                  dimnames=list(object$socialnames,
-                    object$partiesnames,
-                    c(1:nP)))
-    d <-object$coefficients[(nR*(nC-1)+1):(2*nR*(nC-1))]
-    for(i in 1:nP) {
-      estsTmp<- array(0, dim = c(nR, nC, nrow(simpar)))
-      for(j in 1:nrow(simpar)){
-        g <- simpar[j,1:(nR*(nC-1))]
-        p.exp <- exp(g + d*covar[i])
-        p.matrix <- matrix(p.exp, nrow = nR, byrow = TRUE)
-   
-        p.sums <- apply(p.matrix, 1, sum)
-        p.sums <- p.sums + 1
-        p.less <- p.matrix/p.sums
-        estsTmp[,,j] <- cbind(p.less, 1 - apply(p.less, 1, sum))
-      }
-      ests[,,i]<-apply(estsTmp,c(1,2),mean)
-    } 
-  }
-  else {
-    ests <- array(0, dim = c(nR, nC, nrow(simpar)),
-                  dimnames=list(object$socialnames,
-                    object$partiesnames,
-                    c(1:nrow(simpar))))
-    for(i in 1:nrow(simpar)){
-      g<-simpar[i,1:(nR*(nC-1))]
-      p.matrix <- matrix(exp(g), nrow = nR, byrow = TRUE)
-      p.sums <- apply(p.matrix, 1, sum)
-      p.sums <- p.sums + 1
-      p.less <- p.matrix / p.sums
-      ests[,,i] <- cbind(p.less, 1 - apply(p.less, 1, sum))
-    }
-  }
-  return (ests)
-}
-
-
-cmei <- function(formula, data, covar = NULL, ...){
-  if (is.null(rownames(data))) {
-    rownames(data) <- 1:nrow(data)
-    assign(data, as.character(data), envir = .GlobalEnv)
-  }
-  res<-NULL
-  myVars<-all.vars(formula[[2]])
-  mxVars<-all.vars(formula[[3]])
-  allVars<-c(mxVars,myVars)
-  nR<-length(mxVars)
-  nC <-length(myVars)
-  if(!(is.null(covar))){
-    covar <- model.frame(covar, data)
-    res$covar<-as.matrix(covar)
-  }
-  else
-    res$covar<-NULL
-  res$data<-data[,allVars]
-  res$nR<-nR
-  res$nC<-nC
-  return(res)
-}
diff --git a/R/exp.R b/R/exp.R
new file mode 100644
index 0000000..d3ca1bc
--- /dev/null
+++ b/R/exp.R
@@ -0,0 +1,140 @@
+#' Interface between the Zelig Model exp and 
+#' the Pre-existing Model-fitting Method
+#' @param formula a formula
+#' @param ... additonal parameters
+#' @param robust a boolean specifying whether to use robust error estimates
+#' @param cluster a vector describing the clustering of the data
+#' @param data a data.frame 
+#' @return a list specifying '.function'
+#' @export
+zelig2exp <- function (formula, ..., robust = FALSE, cluster = NULL, data) {
+
+  loadDependencies("survival")
+
+  if (!(is.null(cluster) || robust))
+    stop("If cluster is specified, then `robust` must be TRUE")
+
+  # Add cluster term
+  if (robust || !is.null(cluster))
+    formula <- cluster.formula(formula, cluster)
+
+  # Return
+  z(
+    .function = "survreg",
+    formula = formula,
+    dist = "exponential",
+    robust = robust,
+    data = data,
+    ...
+    )
+}
+
+
+stratify.rqs <- function (obj) {
+  x <- vector("list", length(obj$tau))
+
+  for(i in 1:length(obj$tau)) {
+    xi <- obj
+
+    xi$coefficients <- xi$coefficients[, i]
+    xi$residuals <- xi$residuals[, i]
+    xi$tau <- xi$tau[i]
+    class(xi) <- "rq"
+
+    x[[i]] <- xi 
+  }
+
+  names(x) <- obj$tau
+  x
+}
+#' Param Method for the \code{exp} Zelig Model
+#' @note This method is used by the \code{param} Zelig model
+#' @usage \method{param}{exp}(obj, num, ...)
+#' @S3method param exp
+#' @param obj a 'zelig' object
+#' @param num an integer specifying the number of simulations to sample
+#' @param ... ignored parameters
+#' @return a list to be cast as a 'parameters' object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+param.exp <- function(obj, num=1000, ...) {
+  cov <- vcov(.object)
+  mu <- coef(.object)
+
+  # Return
+  list(
+       coef = mvrnorm(num, mu=mu, Sigma=cov),
+       linkinv = survreg.distributions[["exponential"]]$itrans
+       )
+}
+#' Compute quantities of interest for 'exp' Zelig models
+#' @usage \method{qi}{exp}(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
+#' @S3method qi exp
+#' @param obj a 'zelig' object
+#' @param x a 'setx' object or NULL
+#' @param x1 an optional 'setx' object
+#' @param y this parameter is reserved for simulating average treatment effects,
+#' though this feature is currentlysupported by only a handful of models
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a parameters object
+#' @return a list of key-value pairs specifying pairing titles of quantities of
+#' interest with their simulations
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+qi.exp <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+
+  linkinv <- linkinv(param)
+
+  # Compute Expected Values for the "exp" Regression
+  # @param simulations 
+  # @param x
+  # @return a matrix
+  compute.ev <- function (simulations, x) {
+
+    if (is.null(x) || is.na(x))
+      # If there are missing explanatory variables, ignore them
+      return(NA)
+
+    # Compute eta, which is the "flattened" prediction.
+    # This value must be *inverted* to be restored to the true "observed" value
+    eta <- simulations %*% t(x)
+
+    # Return as a matrix, since this should be a vector at this point.
+    as.matrix(apply(eta, 2, linkinv))
+  }
+
+
+  # Compute Predicted Values
+  compute.pv <- function (ev, param) {
+    rexp(length(ev), rate = 1/ev)
+  }
+
+
+  # Compute expected values for X and X1
+  ev1 <- compute.ev(coef(param), x)
+  ev2 <- compute.ev(coef(param), x1)
+
+  # Compute Predicted values for X and X1
+  pr1 <- compute.pv(ev1, x)
+  pr2 <- compute.pv(ev2, x1)
+
+  # Return quantities of Interest
+  list("Expected Values: E(Y|X)"  = ev1,
+       "Expected Values: E(Y|X1)" = ev2,
+       "Predicted Values: Y|X"    = pr1,
+       "Predicted Values: Y|X1"   = pr2,
+       "First Differences: E(Y|X1) - E(Y|X)" = ev2 - ev1
+       )
+}
+#' Describe a ``exp'' model to Zelig
+#' @usage \method{describe}{exp}(...)
+#' @S3method describe exp
+#' @param ... ignored parameters
+#' @return a list to be processed by `as.description'
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+describe.exp <- function(...) {
+  list(
+       authors = c("Olivia Lau", "Kosuke Imai", "Gary King"),
+       year = 2011,
+       category = "bounded",
+       text = "Exponential Regression for Duration Dependent Variables"
+       )
+}
diff --git a/R/factor.bayes.R b/R/factor.bayes.R
new file mode 100644
index 0000000..cf55d4b
--- /dev/null
+++ b/R/factor.bayes.R
@@ -0,0 +1,55 @@
+#' @export
+zelig2factor.bayes <- function (
+                                formula, 
+                                factors = 2,
+                                burnin = 1000, mcmc = 20000, 
+                                verbose=0, 
+                                ..., 
+                                data
+                                ) {
+
+  loadDependencies("MCMCpack", "coda")
+
+  if (missing(verbose))
+    verbose <- round((mcmc + burnin)/10)
+
+  if (factors < 2)
+    stop("Number of factors needs to be at least 2")
+
+  x <- as.matrix(model.response(model.frame(formula, data=data, na.action=NULL)))
+
+  list(
+       .function = "MCMCfactanal",
+       .hook = "McmcHookFactor",
+
+       formula = formula,
+       x = x,
+       burnin = burnin,
+       mcmc   = mcmc,
+       verbose= verbose,
+       data   = data,
+       factors = factors,
+       ...
+       )
+}
+
+#' @S3method param factor.bayes
+param.factor.bayes <- function (...) {
+}
+
+#' @S3method param factor.bayes
+qi.factor.bayes <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+  stop('There is no qi function for the "factor.bayes" model')
+  list(
+       "Expected Value: E(Y|X)" = NA
+       )
+}
+
+#' @S3method describe factor.bayes
+describe.factor.bayes <- function(...) {
+  list(
+       authors = c("Ben Goodrich", "Ying Lu"),
+       text = "Bayesian Factor Analysis",
+       year = 2013
+       )
+}
diff --git a/R/formula.vglm.R b/R/formula.vglm.R
deleted file mode 100644
index 4d61970..0000000
--- a/R/formula.vglm.R
+++ /dev/null
@@ -1,3 +0,0 @@
-formula.vglm <- function(x, ...) {
-  x at call$formula
-}
diff --git a/R/gamma.R b/R/gamma.R
new file mode 100644
index 0000000..4b6e86b
--- /dev/null
+++ b/R/gamma.R
@@ -0,0 +1,131 @@
+#' Interface between gamma model and Zelig
+#' This function is exclusively for use by the `zelig' function
+#' @param formula a formula
+#' @param ... ignored parameters
+#' @param data a data.frame
+#' @return a list to be coerced into a zelig.call object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+zelig2gamma <- function(formula, ..., data)
+  z(
+    glm,
+    # .hook = "robust.glm.hook",
+
+    formula = formula,
+    family  = Gamma(),
+    model   = F,
+    data    = data
+    )
+#' param method for the `gamma' Zelig model
+#'
+#' Return parameter estimates for the ``gamma'' GLM in Zelig.
+#' @usage \method{param}{gamma}(obj, num, ...)
+#' @S3method param gamma
+#' @param obj a `zelig' object
+#' @param num an integer specifying the number of simulations to sample
+#' @param ... ignored parameters
+#' @return a list to be cast as a `parameters' object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+param.gamma <- function(obj, num = 1000, ...) {
+  # Extract shape parameters, which will be used to simulate the ancillary
+  # parameters
+  shape <- gamma.shape(.object)
+
+  # Simulate ancillary parameters
+  alpha <- rnorm(n=num, mean=shape$alpha, sd=shape$SE)
+
+  #
+  list(
+       simulations  = mvrnorm(n=num, mu=coef(.object), Sigma=vcov(.object)),
+       alpha = alpha,
+       family = Gamma()
+       )
+}
+#' Compute quantities of interest for 'gamma' Zelig models
+#' @usage \method{qi}{gamma}(obj, x, x1=NULL, y=NULL, num=1000, param=NULL)
+#' @S3method qi gamma
+#' @param obj a \code{zelig} object
+#' @param x a 'setx' object or NULL
+#' @param x1 an optional 'setx' object
+#' @param y this parameter is reserved for simulating average treatment effects,
+#' though this feature is currentlysupported by only a handful of models
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a parameters object
+#' @return a list of key-value pairs specifying pairing titles of quantities of
+#' interest with their simulations
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+qi.gamma <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
+  # Get parameters
+  shape <- gamma.shape(.fitted)
+  alpha <- rnorm(num, mean = shape$alpha, sd = shape$SE)
+  coef <- coef(param)
+
+
+  # Compute eta
+  eta <- coef %*% t(x)
+
+  # Compute theta (apply inverse)
+  theta <- matrix(1/eta, nrow = nrow(coef))
+
+  ev <- theta
+  pr <- matrix(NA, nrow = nrow(theta), ncol = ncol(theta))
+
+  # Default to not available
+  ev1 <- pr1 <- fd <- NA
+
+  # Compute predicted values
+  for (i in 1:nrow(ev))
+    pr[i,] <- rgamma(
+                     ncol(ev),
+                     shape = alpha[i],
+                     scale = theta[i,]/alpha[i]
+                     )
+
+  # if x1 is not NULL, run more simultations
+  # ...
+
+  if (!is.null(x1)) {
+
+    eta1 <- coef %*% t(x1)
+    ev1 <- theta1 <- matrix(1/eta1, nrow = nrow(coef))
+    pr1 <- matrix(NA, nrow = nrow(theta1), ncol = ncol(theta1))
+
+    for (i in 1:nrow(ev1))
+      pr1[i, ] <- rgamma(ncol(ev1), shape = alpha[i], scale = theta1[i,]/alpha[i])
+
+    fd <- ev1 - ev
+  }
+
+  # Return
+  list("Expected Values: E(Y|X)"  = ev,
+       "Expected Values: E(Y|X1)" = ev1,
+       "Predicted Values: Y|X"    = pr,
+       "Predicted Values: Y|X1"   = pr1,
+       "First Differences: E(Y|X1) - E(Y|X)" = fd
+       )
+}
+#' Describe the \code{gamma} model to Zelig
+#' @usage \method{describe}{gamma}(...)
+#' @S3method describe default
+#' @param ... ignored parameters
+#' @return a list of important information
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+describe.gamma <- function(...) {
+  # parameters object
+  parameters <- list(lambda = list(
+                       equations = c(1, 1),
+                       tags.allowed = FALSE,
+                       dep.var = TRUE,
+                       exp.var = TRUE
+                       )
+                     )
+
+  # return list
+  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
+       year     = 2007,
+       category = "bounded",
+       parameters = parameters,
+       text = "Gamma Regression for Continuous, Positive Dependent Variables"
+       )
+}
diff --git a/R/gamma.gee.R b/R/gamma.gee.R
new file mode 100644
index 0000000..a3ea284
--- /dev/null
+++ b/R/gamma.gee.R
@@ -0,0 +1,161 @@
+#' Interface between the Zelig Model gamma.gee and 
+#' the Pre-existing Model-fitting Method
+#' @param formula a formula
+#' @param id a character-string specifying the column of the data-set to use
+#'   for clustering
+#' @param robust a logical specifying whether to robustly or naively compute
+#'   the covariance matrix. This parameter is ignore in the \code{zelig2}
+#'   method, and instead used in the \code{robust.hook} function, which
+#'   executes after the call to the \code{gee} function
+#' @param ... ignored parameters
+#' @param R a square-matrix specifying the correlation
+#' @param corstr a character-string specifying the correlation structure
+#' @param data a data.frame 
+#' @return a list specifying the call to the external model
+#' @export
+zelig2gamma.gee <- function (formula, id, robust = FALSE, ..., R = NULL, corstr = "independence", data) {
+
+  loadDependencies("gee")
+
+  if (corstr == "fixed" && is.null(R))
+    stop("R must be defined")
+
+  # if id is a valid column-name in data, then we just need to extract the
+  # column and re-order the data.frame and cluster information
+  if (is.character(id) && length(id) == 1 && id %in% colnames(data)) {
+    id <- data[, id]
+    data <- data[order(id), ]
+    id <- sort(id)
+  }
+
+  z(
+    .function = gee,
+    .hook = robust.gee.hook,
+
+    formula = formula,
+    id = id,
+    corstr = corstr,
+    family  = Gamma,
+    data = data,
+    ...
+    )
+}
+
+#' @S3method param gamma.gee
+param.gamma.gee <- function(obj, num=1000, ...) {
+
+  # Extract means to compute maximum likelihood
+  mu <- coef(.fitted)
+
+  # Extract covariance matrix to compute maximum likelihood
+  Sigma <- .fitted$naive.variance
+
+
+  #
+  list(
+       coef = mvrnorm(num, mu, Sigma),
+       fam = Gamma()
+       )
+}
+
+#' @S3method qi gamma.gee
+qi.gamma.gee <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+
+  coef <- coef(param)
+  inverse <- linkinv(param)
+
+  eta1 <- coef %*% t(x)
+  ev1 <- theta1 <- matrix(inverse(eta1), nrow=num)
+
+  # default to NA
+  ev2 <- fd <- NA
+
+  if (!is.null(x1)) {
+    eta2 <- coef %*% t(x1)
+    ev2 <- theta1 <- matrix(inverse(eta2), nrow=num)
+
+    fd <- ev2 - ev1
+  }
+
+  list(
+       "Expected Values (for x): E(Y|X)"   = ev1,
+       "Expected Values (for x1): E(Y|X1)" = ev2,
+       "First Differences: E(Y|X1) - E(Y|X)" = fd
+       )
+}
+
+#' @S3method describe gamma.gee
+describe.gamma.gee <- function(...) {
+  list(
+       authors = "Patrick Lam",
+       text = "General Estimating Equation for Gamma Regression",
+       year = 2011
+       )
+}
+
+# Remove Negative Simulations from Gamma GEE Parameter Simulations
+# @param object a \code{zelig} object
+# @param x a \code{setx} object
+# @param x1 a \code{setx} object
+# @param bootstrap a logical specifying whether the model is using a boot function
+# @param bootfn the boot function
+# @param data a data.frame used to simulated parameters
+# @param param the original \code{param} object
+# @param num an integer specifying the number of simulations to produce
+clean.up.gamma.gee <- function(object, x, x1=NULL,
+                            bootstrap = FALSE, bootfn = NULL,
+                            data = NULL,
+                            param, num = 1000) {
+  coef <- coef(param)
+  eta <- coef %*% t(x)
+
+  if(!is.null(x1))
+    eta1 <- coef %*% t(x1)
+  else
+    eta1 <- NULL
+
+  # define good.parameters (???)
+  good.params <- function(par, x, x1=NULL) {
+    eta <- par %*% t(x)
+    if(!is.null(x1)) {
+      eta1 <- par %*% t(x1)
+      pos <- which(eta>0 & eta1>0)
+    }
+    else {
+      pos <- which(apply(eta > 0,1,all))
+    }
+
+    matrix(par[pos,], nrow=length(pos), ncol=ncol(par))
+  }
+
+
+
+      if(length(which(apply(eta<=0,1,any)))>0 | (!is.null(eta1) & any(eta1<=0))){
+              warning(paste("Negative expected values in simulations.  Rejection sampling method used."))
+              sum.neg <- length(which(apply(eta<=0,1,any)))
+              coef <- good.params(par=coef, x=x, x1=x1)
+              counter <- 1
+              while(sum.neg > 0){
+                      if(!bootstrap)
+                              new.coef <- matrix(mvrnorm(sum.neg, mu = coef(object), Sigma = vcov(object)), nrow=sum.neg)
+			#else
+			#	new.coef <- matrix(boot(data, bootfn, R = sum.neg, object = object)$t, nrow=sum.neg)
+				
+			new.coef <- good.params(par=new.coef, x=x, x1=x1)
+			coef <- rbind(coef, new.coef)	
+			sum.neg <- num - nrow(coef)
+			counter <- counter + 1
+			if(counter==200)
+				warning(paste("Suitable parameters not found after 200 iterations of rejection sampling.  Iterations will continue, but choosing another x is suggested for non-conditional prediction models."))
+			if(counter==2000)
+				stop("Rejection sampling stopped after 2000 iterations.  Please choose another x value.")
+		}
+	}
+
+  #
+  list(
+       coefficients=coef,
+       fam=Gamma(),
+       linkinv = Gamma()$linkinv
+       )
+}
diff --git a/R/gamma.survey.R b/R/gamma.survey.R
new file mode 100644
index 0000000..e972497
--- /dev/null
+++ b/R/gamma.survey.R
@@ -0,0 +1,176 @@
+#' @export
+zelig2gamma.survey <- function(
+                               formula,
+                               weights=NULL, 
+                               ids=NULL,
+                               probs=NULL,
+                               strata = NULL,  
+                               fpc = NULL,
+                               nest = FALSE,
+                               check.strata = !nest,
+                               repweights = NULL,
+                               type,
+                               combined.weights = FALSE,
+                               rho = NULL,
+                               bootstrap.average = NULL, 
+                               scale = NULL,
+                               rscales = NULL,
+                               fpctype = "fraction",
+                               return.replicates=FALSE,
+                               na.action = "na.omit",
+                               start = NULL,
+                               etastart = NULL, 
+                               mustart = NULL,
+                               offset = NULL, 	      		
+                               model1 = TRUE,
+                               method = "glm.fit",
+                               x = FALSE,
+                               y = TRUE,
+                               contrasts = NULL,
+                               design = NULL,
+                               link = "inverse",
+                               data,
+                               ...
+                               ) {
+
+  loadDependencies("survey")
+
+  if (is.null(ids))
+    ids <- ~1
+
+  # the following lines designate the design
+  # NOTE: nothing truly special goes on here;
+  #       the below just makes sure the design is created correctly
+  #       for whether or not the replication weights are set
+  design <- if (is.null(repweights)) {
+    svydesign(
+              data=data,
+              ids=ids,
+              probs=probs,
+              strata=strata,
+              fpc=fpc,
+              nest=nest,
+              check.strata=check.strata,
+              weights=weights
+              )
+  }
+
+  else {
+    # Using the "z" function stores this implicitly in a namespace
+    .survey.prob.weights <- weights
+    
+    # 
+    svrepdesign(
+                data=data,
+                repweights=repweights, 	
+                type=type,
+                weights=weights,
+                combined.weights=combined.weights, 
+                rho=rho,
+                bootstrap.average=bootstrap.average,
+                scale=scale,
+                rscales=rscales,
+                fpctype=fpctype,
+                fpc=fpc
+                )
+  }
+
+  z(.function = svyglm,
+    formula = formula,
+    design  = design,
+    family  = Gamma()
+    )
+}
+
+#' @S3method param gamma.survey
+param.gamma.survey <- function(obj, num=1000, ...) {
+  shape <- gamma.shape(.fitted)
+
+  list(
+       # .fitted is the fitted model object
+       simulations = mvrnorm(num, coef(.fitted), vcov(.fitted)),
+       alpha = rnorm(num, shape$alpha, shape$SE),
+       fam   = Gamma()
+       )
+}
+
+#' @S3method qi gamma.survey
+qi.gamma.survey <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
+  model <- GetObject(obj)
+
+  coef <- coef(param)
+  alpha <- alpha(param)
+
+  eta <- coef %*% t(x)
+
+  link.inverse <- linkinv(param)
+
+  theta <- matrix(link.inverse(eta), nrow=nrow(coef))
+
+  pr <- ev <- matrix(NA, nrow=nrow(theta), ncol(theta))
+
+  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
+
+
+  ev <- theta
+
+  for (i in 1:nrow(ev)) {
+    pr[i,] <- rgamma(
+                     n     = length(ev[i,]),
+                     shape = alpha[i],
+                     scale = theta[i,]/alpha[i]
+                     )
+  }
+
+
+  # ensure these are no-show
+  pr1 <- ev1 <- fd <- NA
+
+  
+  # if x1 is available
+  if (!is.null(x1)) {
+    ev1 <- theta1 <- matrix(link.inverse(coef %*% t(x1)), nrow(coef))
+    fd <- ev1-ev
+  }
+
+
+  # ensure these are no-show
+  att.pr <- att.ev <- NA
+
+
+  # I have no clue if this even works
+  if (!is.null(y)) {
+
+    yvar <- matrix(
+                   rep(y, nrow(param)),
+                   nrow = nrow(param),
+                   byrow = TRUE
+                   )
+    
+    tmp.ev <- yvar - ev
+    tmp.pr <- yvar - pr
+
+    att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(param))
+    att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = nrow(param))
+  }
+
+
+  list(
+       "Expected Values: E(Y|X)" = ev,
+       "Expected Values for (X1): E(Y|X1)" = ev1,
+       "Predicted Values: Y|X" = pr,
+       "Predicted Values (for X1): Y|X1" = pr1,
+       "First Differences E(Y|X1)-E(Y|X)" = fd,
+       "Average Treatment Effect: Y-EV" = att.ev,
+       "Average Treatment Effect: Y-PR" = att.pr
+       )
+}
+
+#' @S3method describe gamma.survey
+describe.gamma.survey <- function(...) {
+  list(
+       authors = "Nicholas Carnes",
+       year = 2008,
+       description = "Survey-Weighted Gamma Regression for Continuous, Positive Dependent Variables"
+       )
+}
diff --git a/R/get.R b/R/get.R
deleted file mode 100644
index 7a2f96e..0000000
--- a/R/get.R
+++ /dev/null
@@ -1,43 +0,0 @@
-getzelig <- function(x) {
-  if (zeligS4check(x)) return(eval(x at call$model))
-  else return(eval(x$call$model))
-}
-
-getcall <- function(x) {
-  if (zeligS4check(x)) {
-     return(x at call)
-  } else {
-      return(x$call)
-  }
-}
-
-getcoef <- function(x) {
-  if (zeligS4check(x)) {
-    if ("coef3" %in% slotNames(x)) {
-        return(x at coef3)
-    } else {
-        return(x at coef)
-    }
-  } else {
-      return(x$coef)
-  }
-}
-
-getdata <- function(x) {
-  if (zeligS4check(x)) {
-    if ("data" %in% slotNames(x)) {
-      return(x at data)
-    } else if ("model" %in% slotNames(x)){
-       return(x at model)
-    } else {
-       return(NULL)
-    }
-  } else {
-      return(x$zelig.data)
-  }
-}
-
-
-zeligS4check <- function(obj){
-   return(isS4(obj))
-}
diff --git a/R/get.package.R b/R/get.package.R
new file mode 100644
index 0000000..b2bc1fb
--- /dev/null
+++ b/R/get.package.R
@@ -0,0 +1,94 @@
+#' Find the Zelig package that a particular model belong to
+#'
+#' This method is used to help transition Zelig v3.5 users to Zelig v4
+#' @param model a character-string specifying a Zelig model
+#' @param quiet a logical indicating whether to display messages and warnings
+#' @param ... ignored parameters
+#' @return NA or a character-string specifying the name of the package which 
+#' contains a specific model
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+get.package <- function (model, quiet=TRUE, ...) {
+
+  # Bad variable-types return NULL
+  if (is.null(model))
+    return(NA)
+
+  else if (!is.character(model))
+    return(NA)
+
+  else if (length(model) != 1)
+    return (NA)
+
+  # Create list (auto-generated using another script.
+  # This is a copy-and-paster of that below
+  descr <- c(
+    gamma = "Zelig",
+    logit = "Zelig",
+    ls = "Zelig",
+    negbinom = "Zelig",
+    normal = "Zelig",
+    poisson = "Zelig",
+    probit = "Zelig",
+
+    gamma.gee = "Zelig",
+    logit.gee = "Zelig",
+    normal.gee = "Zelig",
+    poisson.gee = "Zelig",
+    probit.gee = "Zelig",
+
+    factor.bayes = "Zelig",
+    logit.bayes = "Zelig",
+    mlogit.bayes = "Zelig",
+    normal.bayes = "Zelig",
+    oprobit.bayes = "Zelig",
+    poisson.bayes = "Zelig",
+    probit.bayes = "Zelig",
+
+    aov = "Zelig",
+    sur = "Zelig",
+    twosls = "Zelig",
+    threesls = "Zelig",
+
+    blogit = "ZeligChoice",
+    bprobit = "ZeligChoice",
+    mlogit = "ZeligChoice",
+    mprobit = "ZeligChoice",
+    ologit = "ZeligChoice",
+    oprobit = "ZeligChoice",
+
+
+
+    logit.gam = "ZeligGAM",
+    normal.gam = "ZeligGAM",
+    poisson.gam = "ZeligGAM",
+    probit.gam = "ZeligGAM",
+
+    gamma.mixed = "ZeligMultilevel",
+    logit.mixed = "ZeligMultilevel",
+    ls.mixed = "ZeligMultilevel",
+    normal.mixed = "ZeligMultilevel",
+    poisson.mixed = "ZeligMultilevel",
+    probit.mixed = "ZeligMultilevel",
+
+    gamma.survey = "ZeligSurvey",
+    logit.survey = "ZeligSurvey",
+    normal.survey = "ZeligSurvey",
+    poisson.survey = "ZeligSurvey",
+    probit.survey = "ZeligSurvey",
+
+    cloglog.net = "ZeligNetwork",
+    gamma.net = "ZeligNetwork",
+    logit.net = "ZeligNetwork",
+    ls.net = "ZeligNetwork",
+    negbinom.net = "ZeligNetwork",
+    normal.net = "ZeligNetwork",
+    poisson.net = "ZeligNetwork",
+    probit.net = "ZeligNetwork"
+  )
+
+  if (model %in% names(descr))
+    descr[[model]]
+
+  else
+    NA
+}
diff --git a/R/getPredictorTerms.R b/R/getPredictorTerms.R
new file mode 100644
index 0000000..f7e9723
--- /dev/null
+++ b/R/getPredictorTerms.R
@@ -0,0 +1,45 @@
+#' Get Predictor Terms from Zelig-style Formulae
+#'
+#' This function extracts the predictor terms from a Zelig-style object.
+#' @note This function is used exclusively in the development of Zelig-core.
+#' @param x a Zelig-style formula ('formula' or 'list')
+#' @param ... ignored parameters
+#' @return a character-vector or NA
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+getPredictorTerms <- function (x, ...) {
+  # The following functions are unsafe for general input, so they are being
+  # kept as nested functions.
+
+  # Extract "predictor" terms from a formula
+  # @param x a formula
+  # @param ... ignored parameters
+  # @return a character-vector specifying the 
+  # @author Matt Owen
+  extractFromFormula <- function (form, ...) {
+    TERMS <- terms(form)
+    attr(TERMS, "term.labels")
+  }
+
+  # Extract "predictor" terms from a list of formulae
+  # @param x a list
+  # @param ... ignored parameters
+  # @return a character-vector specifying the 
+  # @author Matt Owen
+  extractFromList <- function (x, ...) {
+    as.vector(unlist(Map(extractFromFormula, x)))
+  }
+
+  # Beginning of work for function
+  if (is.list(x))
+    unique(extractFromList(x))
+
+  else if ("formula" %in% class(x))
+    unique(extractFromFormula(x))
+
+  else {
+    warning("The model formula must either ",
+            "be a list of formula to work properly")
+    NA
+  }
+}
diff --git a/R/getResponseTerms.R b/R/getResponseTerms.R
new file mode 100644
index 0000000..0c83a8a
--- /dev/null
+++ b/R/getResponseTerms.R
@@ -0,0 +1,10 @@
+#' Get Response Terms from a Zelig-style Formula
+#'
+#' This method acquires the response variables from Zelig-style input.
+#' @param x a formula or list of formulae
+#' @param ... ignored parameters
+#' @return a character-vector specifying a the of response terms in this formula
+#' @export
+getResponseTerms <- function (x, ...) {
+  UseMethod("getResponseTerms")
+}
diff --git a/R/getResponseTerms.formula.R b/R/getResponseTerms.formula.R
new file mode 100644
index 0000000..20eeaa2
--- /dev/null
+++ b/R/getResponseTerms.formula.R
@@ -0,0 +1,130 @@
+#' Get Response Terms from a Standard Formula
+#'
+#' This method gets the response terms from a standard formula
+#' @usage
+#' \method{getResponseTerms}{formula}(x, ..., single.only=FALSE, duplicates=TRUE)
+#' @param x a formula
+#' @param ... ignored parameters
+#' @param single.only a logical specifying whether 'cbind' or 'list' keywords
+#' are allowed
+#' @param duplicates a logical specifying whether the returned character-vector
+#' will only return duplicates.
+#' @return a character-vector specifying the response terms of the formula
+#' @S3method getResponseTerms formula
+#' @author Matt Owen
+getResponseTerms.formula <- function (x, ..., single.only=FALSE, duplicates=TRUE)
+{
+  # Handle 
+  handle.formula.err <- function (e) {
+    message("\n\n")
+    message("The formula ", x, " seems to have no dependent variables")
+    stop("The formula for the ")
+  }
+
+  rhs <- tryCatch(x[[3]], error = handle.formula.err)
+  lhs <- tryCatch(x[[2]], error = handle.formula.err)
+
+  # Reponse terms are always specified in the lefthand-side of the equation
+  if (is.name(lhs)) {
+    # If the lhs is a name, this implies it's a single variable with no function
+    # applied to it. Thus, it's a term.
+    return(tryCatch(
+                    callToString(lhs),
+                    error = function (e) as.character(lhs)
+           ))
+  }
+
+  # Otherwise, it is either a function being applied or the keywords "cbind" or
+  # "list"
+  op <- callToString(lhs[[1]])
+
+  if (op %in% c("cbind", "list")) {
+
+    if (single.only) {
+      # If only single outcome response terms are allowed, then 'cbind' and
+      # 'list' cannot be used.
+      warning("'cbind' and 'list' may not be used ",
+              "in this formula specification.")
+      return(vector("character", 0))
+    }
+
+    # If it is one of the keywords, we extract these terms individually
+    lis <- as.list(lhs[-1])
+    lis <- unlist(Map(callToString, lis))
+
+    if (!duplicates)
+      # If duplicates flag is FALSE, remove all duplicate entries
+      lis <- unique(lis)
+
+    # Remove all emptry strings and return
+    Filter(nchar, lis)
+  }
+
+  else {
+    # Otherwise, we can treat them as one single term. That is the formula:
+    #   x + y ~ 1
+    # will have a single response term:
+    #   x + y
+    callToString(lhs)
+  }
+}
+
+
+
+#' Get Response Terms from a ``Formula'' Object
+#'
+#' This method gets the response terms from a ``Formula'' Object
+#' @rdname getResponseTerms.Formula-not-formula
+#' @aliases getResponse.Formula
+#' @usage
+#' \method{getResponseTerms}{Formula}(x, ..., single.only=FALSE, duplicates=TRUE)
+#' @param x a formula
+#' @param ... ignored parameters
+#' @param single.only a logical specifying whether 'cbind' or 'list' keywords
+#' are allowed
+#' @param duplicates a logical specifying whether the returned character-vector
+#' will only return duplicates.
+#' @return a character-vector specifying the response terms of the formula
+#' @S3method getResponseTerms Formula
+#' @author Matt Owen
+getResponseTerms.Formula <- function (x, ..., single.only=FALSE, duplicates=TRUE)
+{
+  # Create and empty list
+  list.formula <- list()
+
+  # This loop goes through all the list response and predictor terms and 
+  # creates a "Zelig-style" list based on it. This is so we can extract response
+  # and predictor terms with "getResponstTerms" and "getPredictorTerms" in a
+  # manageable way!
+  for (resp in attr(x, "lhs")) {
+    # Iterate through all response variables
+
+    for (pred in attr(x, "rhs")) {
+      # Iterate through all predictor variables
+
+      # Append response variable and predictor terms
+      # "ccc" is probably going to be convention for a call object in Zelig
+      # models since "CALL", "call", "Call" all seem too similar to "call".
+      # And we need to break
+      ccc <- call("~", resp, pred)
+
+      # Cast from a "call" object to a "formula" object
+      ccc <- as.formula(ccc)
+
+      # Append to list
+      list.formula <- append(list.formula, ccc)
+
+    }
+  }
+  
+  # Important to send 'single.only'/'duplicates' into this function
+  resp <- getResponseTerms(list.formula, ..., single.only, duplicates)
+
+  # Apply unique only if 'duplicates' is FALSE
+  # This ensures the list has the expected properties
+  if (duplicates)
+    resp
+
+  else
+    unique(resp)
+}
diff --git a/R/getResponseTerms.list.R b/R/getResponseTerms.list.R
new file mode 100644
index 0000000..8233f33
--- /dev/null
+++ b/R/getResponseTerms.list.R
@@ -0,0 +1,29 @@
+#' Get Response Terms from a List-style Formula
+#'
+#' This method gets the response terms from a standard formula
+#' @usage \method{getResponseTerms}{list}(x, ...)
+#' @param x a list of formulae
+#' @param ... ignored parameters
+#' @return a character-vector specifying the response terms of the formula
+#' @S3method getResponseTerms list
+#' @author Matt Owen
+getResponseTerms.list <- function (x, ...) {
+  if (! all(unlist(Map(is.formula, x)))) {
+    # If not all the elements are formulae, then we should strip them from 'x'
+    warning("All non-formula will be removed from this list.")
+
+    x <- Filter(is.formula, x)
+  }
+
+  if (length(x) == 0)
+    # Zero-sized lists will have no available response terms, and should thus
+    # return a zero-length character vector. Note this is intended to ensure
+    # the result of 'getResponseTerms' is always a character-string.
+    vector("character", 0)
+
+  else
+    # Get response terms of each element of 'x',
+    # then transform the list into a vector, which should always be flat, since
+    # getResponseTerms should always return a character-string
+    unique(unlist(Map(getResponseTerms, x, single.only=TRUE)))
+}
diff --git a/R/gsource.R b/R/gsource.R
deleted file mode 100644
index b4d8e7c..0000000
--- a/R/gsource.R
+++ /dev/null
@@ -1,15 +0,0 @@
-gsource <- function(var.names= NULL, variables) {
-  if (is.null(var.names)) {
-    cat(variables, file = ".Rtmp.dat", sep = "\n")
-    out <- read.table(".Rtmp.dat")
-  }
-  else {
-    cat(var.names, variables, file = ".Rtmp.dat", sep = "\n")
-    out <- read.table(".Rtmp.dat", header = TRUE)
-  }
-  unlink(".Rtmp.dat")
-  return(as.data.frame(out))
-}
-
-
-
diff --git a/R/help.zelig.R b/R/help.zelig.R
index 70f1064..577a550 100644
--- a/R/help.zelig.R
+++ b/R/help.zelig.R
@@ -1,5 +1,9 @@
+#' Help system for Zelig models
+#' @param ... the help files to look-up
+#' @return results of calling the specific help function
+#' @export
+#' @author Matt Owen \emph{mowen@@iq.harvard.edu}
 help.zelig <- function (...)  {
-
         driver  <- match.call()
         driver  <- as.character(driver)
         name <- NULL
@@ -59,4 +63,3 @@ help.zelig <- function (...)  {
         ##message("Not valid input...Showing package description")
         do.call("help", c(list("Zelig"), list(package="Zelig")), envir=parent.frame())
 }
-
diff --git a/R/ignore.R b/R/ignore.R
new file mode 100644
index 0000000..a795b8c
--- /dev/null
+++ b/R/ignore.R
@@ -0,0 +1,22 @@
+#' Constructor for the 'ignore' class
+#' This class is included for future use, and is currently
+#' not used in any Zelig model. It is designed for use with
+#' zelig2* functions
+#' @param default default value
+#' @param type ignored parameter
+#' @return an 'ignore' object
+#' @export
+#' @author Matt Owen \emph{mowen@@iq.harvard.edu}
+ignore <- function (default = NULL, type = "no pass") {
+
+  self <- default
+  class(self) <- "ignore"
+
+  # store information, set class, and return
+  self <- list(
+               default = default,
+               type    = type
+               )
+  class(self) <- "ignore"
+  self
+}
diff --git a/R/is.formula.R b/R/is.formula.R
new file mode 100644
index 0000000..2b1c1b8
--- /dev/null
+++ b/R/is.formula.R
@@ -0,0 +1,9 @@
+#' Whether an Object is a Formula
+#' 
+#' This is a boolean-check to see whether an object is a formula.
+#' @note This will not be shared in the Zelig/ZeligFormulae namespace.
+#' @param x an object
+#' @return a logical specifying whether an object is a formula
+#' @author Matt Owen
+is.formula <- function (x)
+  "formula" %in% class(x)
diff --git a/R/lag.eps.R b/R/lag.eps.R
deleted file mode 100644
index 80a23eb..0000000
--- a/R/lag.eps.R
+++ /dev/null
@@ -1,3 +0,0 @@
-lag.eps<- function(q, qs=NULL){
-  list(q=q, qs=qs, ep=TRUE, y=FALSE)
-}
diff --git a/R/lag.y.R b/R/lag.y.R
deleted file mode 100644
index 27774ec..0000000
--- a/R/lag.y.R
+++ /dev/null
@@ -1,3 +0,0 @@
-lag.y <- function(p, ps=NULL){
-  list(p=p, ps = ps, ep=FALSE, y=TRUE)
-}
diff --git a/R/list.depth.R b/R/list.depth.R
new file mode 100644
index 0000000..a0ba415
--- /dev/null
+++ b/R/list.depth.R
@@ -0,0 +1,35 @@
+#' Count the Depth of a List Object
+#'
+#' This function recursively computes the depth of a list object. That is, it
+#' determines how many layers or levels exist within the object.
+#' @note This function is used internally by Zelig.
+#' @param obj a vector or list object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+list.depth <- function (obj) {
+
+  # Stop-recursing conditions
+
+  if (length(obj) == 0)
+    return(0)
+
+  else if (is.atomic(obj))
+    # Atomic vectors can only have one level
+    return(1)
+
+  else if (!is.list(obj))
+    # If the object is not a list, then we have the option whether to compute
+    # the depth of its elements.
+    return(1)
+
+  # Produce a list of integers, specifying the depth of each element
+  results <- Map(list.depth, obj)
+
+  # Ensure that the result is a non-list
+  results <- unlist(results)
+
+  # Find the maximum, ensuring that the value is neither negative nor -Inf
+  max.depth <- max(results, 0)
+
+  # Add one for the level that we are on
+  1 + max.depth
+}
diff --git a/R/load.first.R b/R/load.first.R
deleted file mode 100644
index 9195c3d..0000000
--- a/R/load.first.R
+++ /dev/null
@@ -1,32 +0,0 @@
-.onAttach <- function(...) {
-  mylib <- dirname(system.file(package = "Zelig"))
-  ver <- packageDescription("Zelig", lib.loc = mylib)$Version
-  builddate <- packageDescription("Zelig", lib.loc = mylib)$Date
-
-  packageStartupMessage(
-    paste(
-          "## \n##  Zelig (Version ", ver, ", built: ", builddate, ")\n",
-          sep = ""
-          )
-  )
-  packageStartupMessage(
-      "##  Please refer to http://gking.harvard.edu/zelig for full\n",
-      "##  documentation or help.zelig() for help with commands and\n",
-      "##  models supported by Zelig.\n##\n\n", sep="")
-  packageStartupMessage(
-      "##  Zelig project citations:\n",
-      "##    Kosuke Imai, Gary King, and Olivia Lau. (2009).\n", 
-      "##    ``Zelig: Everyone's Statistical Software,''\n", 
-      "##    http://gking.harvard.edu/zelig.\n",
-      "##  and\n",
-      "##    Kosuke Imai, Gary King, and Olivia Lau. (2008).\n", 
-      "##    ``Toward A Common Framework for Statistical Analysis\n", 
-      "##    and Development,'' Journal of Computational and\n",
-      "##    Graphical Statistics, Vol. 17, No. 4 (December)\n", 
-      "##    pp. 892-913. \n\n",
-      "##  To cite individual Zelig models, please use the citation format printed with\n",
-      "##  each model run and in the documentation.\n##\n", sep="")
-
-  ## add viggnettes menu
-  addVigs2WinMenu("Zelig")
-}
diff --git a/R/logit.R b/R/logit.R
new file mode 100644
index 0000000..a1d25ea
--- /dev/null
+++ b/R/logit.R
@@ -0,0 +1,152 @@
+#' Interface between logit model and Zelig
+#'
+#' This function is exclusively for use by the `zelig' function
+#' @param formula a formula
+#' @param weights a numeric vector
+#' @param robust a boolean (logical) specifying whether robust error estimates
+#' should be used
+#' @param ... ignored parameters
+#' @param data a data.frame
+#' @return a list to be coerced into a zelig.call object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+zelig2logit <- function(formula, weights=NULL, robust = F, ..., data) {
+  w <- weights
+  z(
+    glm,
+    formula = formula,
+    weights = w,
+    family  = binomial(link="logit"),
+    model   = F,
+    data    = data
+    )
+}
+
+#' Param Method for the \code{logit} Zelig Model
+#' @note This method is used by the \code{logit} Zelig model
+#' @usage \method{param}{logit}(obj, num, ...)
+#' @S3method param logit
+#' @param obj a 'zelig' object
+#' @param num an integer specifying the number of simulations to sample
+#' @param ... ignored parameters
+#' @return a list to be cast as a 'parameters' object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+param.logit <- function(obj, num, ...) {
+  list(
+       simulations = mvrnorm(n=num, mu=coef(.object), Sigma=vcov(.object)),
+       alpha       = NULL,
+       fam = binomial(link="logit")
+       )
+}
+
+#' Compute quantities of interest for 'logit' Zelig models
+#' @usage \method{qi}{logit}(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
+#' @S3method qi logit
+#' @param obj a 'zelig' object
+#' @param x a 'setx' object or NULL
+#' @param x1 an optional 'setx' object
+#' @param y this parameter is reserved for simulating average treatment effects,
+#' though this feature is currentlysupported by only a handful of models
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a parameters object
+#' @return a list of key-value pairs specifying pairing titles of quantities of
+#' interest with their simulations
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+qi.logit <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+
+  # Compute expected values
+  compute.ev <- function(obj, x=NULL, num=1000, param=NULL) {
+    if (is.null(x))
+      return(NA)
+
+    coef <- coef(param)
+    link.inverse <- linkinv(param)
+
+    eta <- coef %*% t(x)
+    eta <- Filter(function (y) !is.na(y), eta)
+
+    theta <- matrix(link.inverse(eta), nrow = nrow(coef))
+
+    ev <- matrix(link.inverse(eta), ncol=ncol(theta))
+
+    ev
+  }
+
+  # Simulate quantities of interest for "x"
+  ev1 <- compute.ev(obj, x, num, param)
+  pr1 <- matrix(nrow=nrow(ev1), ncol=ncol(ev1))
+
+  # Simulate the quantities of interest for "x1"
+  ev2 <- compute.ev(obj, x1, num, param)
+  pr2 <- fd <- NA
+
+  
+  # Produce 0 or 1 (FALSE/TRUE) results for "x"
+  for (i in 1:ncol(ev1))
+    pr1[,i] <- as.character(rbinom(length(ev1[,i]), 1, ev1[,i]))
+
+  # Produce 0 or 1 (FALSE/TRUE) results for "x1" and comppute first-differences
+  if (!is.null(x1)) {
+    pr2 <- matrix(nrow=nrow(ev2), ncol=ncol(ev2))
+
+    for (i in 1:ncol(ev2))
+      pr2[,i] <- as.character(rbinom(length(ev2[,i]), 1, ev2[,i]))
+
+    # This is the computation of the first difference...
+    fd <- ev2 - ev1
+  }
+
+  # Ensure that the correct levels are passed along.
+  levels(pr1) <- levels(pr2) <- c('0', '1')
+
+  # return
+  list("Expected Values: E(Y|X)"  = ev1,
+       "Expected Values: E(Y|X1)" = ev2,
+       "Predicted Values: Y|X"    = pr1,
+       "Predicted Values: Y|X1"   = pr2,
+       "First Differences: E(Y|X1) - E(Y|X)" = fd
+       )
+}
+
+.compute.ev <- function(obj, x=NULL, num=1000, param=NULL) {
+
+  if (is.null(x))
+    return(NA)
+
+  coef <- coef(param)
+  link.inverse <- linkinv(param)
+
+  eta <- coef %*% t(x)
+
+  theta <- matrix(link.inverse(eta), nrow = nrow(coef))
+
+  ev <- matrix(link.inverse(eta), ncol=ncol(theta))
+
+  ev
+}
+
+#' Describe a `logit' model to Zelig
+#' @usage \method{describe}{logit}(...)
+#' @S3method describe logit
+#' @param ... ignored parameters
+#' @return a list to be processed by `as.description'
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+describe.logit <- function(...) {
+  # parameters object
+  parameters <- list(pi = list(
+                       equations = c(1, 1),
+                       tags.allowed = FALSE,
+                       dep.var = TRUE,
+                       exp.var = TRUE
+                       )
+                     )
+
+  # return list
+  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
+       year     = 2008,
+       category = "dichotomous",
+       parameters = parameters,
+       text = "Logistic Regression for Dichotomous Dependent Variables"
+       )
+}
diff --git a/R/logit.bayes.R b/R/logit.bayes.R
new file mode 100644
index 0000000..387458f
--- /dev/null
+++ b/R/logit.bayes.R
@@ -0,0 +1,89 @@
+#' @export
+zelig2logit.bayes <- function (
+                               formula, 
+                               burnin = 1000, mcmc = 10000, 
+                               verbose=0, 
+                               ..., 
+                               data
+                               ) {
+
+  loadDependencies("MCMCpack", "coda")
+
+  if (missing(verbose))
+    verbose <- round((mcmc + burnin)/10)
+
+  list(
+       .function = "MCMClogit",
+       .hook = "MCMChook",
+
+       formula = formula,
+       data   = data,
+       burnin = burnin,
+       mcmc   = mcmc,
+       verbose= verbose,
+
+       # Most parameters can be simply passed forward
+       ...
+       )
+}
+
+
+#' @S3method param logit.bayes
+param.logit.bayes <- function(obj, num=1000, ...) {
+  list(
+       coef = coef(obj),
+       fam  = binomial(link="logit")
+       )
+}
+
+#' @S3method qi logit.bayes
+qi.logit.bayes <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+
+  # Use a Helper-Function that computes expected values and predicted values
+  # simultaneously.
+  res1 <- logit.ev(x, param)
+  res2 <- logit.ev(x1, param)
+
+  # Return quantities of interest
+  list(
+       "Expected Value: E(Y|X)" = res1$ev,
+       "Predicted Value: Y|X" = res1$pv,
+       "Expected Value (for X1): E(Y|X1)" = res2$ev,
+       "Predicted Value (for X1): Y|X1" = res2$pv,
+       "First Differences: E(Y|X1)-E(Y|X)" = res2$ev - res1$ev
+       )
+}
+
+logit.ev <- function (x, param) {
+  # If either of the parameters are invalid,
+  # Then return NA for both qi's
+  if (is.null(x) || is.na(x) || is.null(param))
+    return(list(ev=NA, pv=NA))
+
+  # Extract inverse-link and simulated parameters (respectively)
+  inv <- linkinv(param)
+  eta <- coef(param) %*% t(x)
+
+  # Give matrix identical rows/columns to the simulated parameters
+  ev <- pv <- matrix(NA, nrow(eta), ncol(eta))
+  dimnames(ev) <- dimnames(pv) <- dimnames(eta)
+
+  # Compute Expected Values
+  ev <- inv(eta)
+
+  # Compute Predicted Values
+  for (i in 1:ncol(ev)) 
+    pv[,i] <- as.character(rbinom(length(ev[,i]), 1, ev[,i])) 
+
+  # Return
+  list(ev=ev, pv=pv)
+}
+
+#' @S3method describe logit.bayes
+describe.logit.bayes <- function(...) {
+  list(
+       authors = c("Ben Goodrich", "Ying Lu"),
+       text = "Bayesian Logistic Regression for Dichotomous Dependent Variables",
+       year = 2013
+       )
+}
diff --git a/R/logit.gee.R b/R/logit.gee.R
new file mode 100644
index 0000000..aea9640
--- /dev/null
+++ b/R/logit.gee.R
@@ -0,0 +1,98 @@
+#' General Estimating Equation for Logit Regression
+#' @param formula a formula
+#' @param id a character-string specifying the column of the data-set to use
+#' for clustering
+#' @param robust a logical specifying whether to robustly or naively compute
+#' the covariance matrix. This parameter is ignore in the \code{zelig2}
+#' method, and instead used in the \code{robust.hook} function, which
+#' executes after the call to the \code{gee} function
+#' @param ... ignored parameters
+#' @param R a square-matrix specifying the correlation
+#' @param corstr a character-string specifying the correlation structure
+#' @param data a data.frame 
+#' @return a list specifying the call to the external model
+#' @export zelig2logit.gee
+#' @name logit.gee
+#' @aliases zelig2logit.gee
+zelig2logit.gee <- function (formula, id, robust, ..., R = NULL, corstr = "independence", data) {
+
+  loadDependencies("gee")
+
+  if (corstr == "fixed" && is.null(R))
+    stop("R must be defined")
+
+  # if id is a valid column-name in data, then we just need to extract the
+  # column and re-order the data.frame and cluster information
+  if (is.character(id) && length(id) == 1 && id %in% colnames(data)) {
+    id <- data[, id]
+    data <- data[order(id), ]
+    id <- sort(id)
+  }
+
+  z(
+    .function = gee,
+    .hook = robust.gee.hook,
+
+    formula = formula,
+    id = id,
+    corstr = corstr,
+    family  = binomial(link="logit"),
+    data = data,
+    R = R,
+    ...
+    )
+}
+
+
+#' @S3method param logit.gee
+param.logit.gee <- function(obj, num=1000, ...) {
+  # Extract means to compute maximum likelihood
+  mu <- coef(obj)
+
+  # Extract covariance matrix to compute maximum likelihood
+  Sigma <- vcov(obj)
+
+  list(
+       coef = mvrnorm(num, mu, Sigma),
+       fam = binomial(link="logit")
+       )
+}
+
+
+#' @S3method qi logit.gee
+qi.logit.gee <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+
+  coef <- coef(param)
+  inverse <- linkinv(param)
+
+  eta1 <- coef %*% t(x)
+  ev1 <- theta1 <- matrix(inverse(eta1), nrow=num)
+
+  # default to NA
+  rr <- ev2 <- fd <- NA
+
+  if (!is.null(x1)) {
+    eta2 <- coef %*% t(x1)
+    ev2 <- theta1 <- matrix(inverse(eta2), nrow=num)
+
+    fd <- ev2 - ev1
+    rr <- ev2/ev1
+  }
+
+  list(
+       "Expected Values (for x): E(Y|X)"   = ev1,
+       "Expected Values (for x1): E(Y|X1)" = ev2,
+       "First Differences: E(Y|X1) - E(Y|X)" = fd,
+       "Risk Ratios: E(Y|X1)/E(Y|X)" = rr
+       )
+}
+
+
+#' @S3method describe logit.gee
+describe.logit.gee <- function(...) {
+  list(
+       authors = "Patrick Lam",
+       text = "General Estimating Equation for Logistic Regression",
+       year = 2011
+       )
+}
diff --git a/R/logit.survey.R b/R/logit.survey.R
new file mode 100644
index 0000000..88eafe0
--- /dev/null
+++ b/R/logit.survey.R
@@ -0,0 +1,191 @@
+#' @export
+zelig2logit.survey <- function(
+                               formula,
+                               weights=NULL, 
+                               ids=NULL,
+                               probs=NULL,
+                               strata = NULL,  
+                               fpc=NULL,
+                               nest = FALSE,
+                               check.strata = !nest,
+                               repweights = NULL,
+                               type,
+                               combined.weights=FALSE,
+                               rho = NULL,
+                               bootstrap.average=NULL, 
+                               scale=NULL,
+                               rscales=NULL,
+                               fpctype="fraction",
+                               return.replicates=FALSE,
+                               na.action="na.omit",
+                               start=NULL,
+                               etastart=NULL, 
+                               mustart=NULL,
+                               offset=NULL, 	      		
+                               model1=TRUE,
+                               method="glm.fit",
+                               x=FALSE,
+                               y=TRUE,
+                               contrasts=NULL,
+                               design=NULL,
+                               data
+                               ) {
+
+  loadDependencies("survey")
+
+  if (is.null(ids))
+    ids <- ~1
+
+  # the following lines designate the design
+  # NOTE: nothing truly special goes on here;
+  #       the below just makes sure the design is created correctly
+  #       for whether or not the replication weights are set
+  design <- if (is.null(repweights))
+    svydesign(
+              data=data,
+              ids=ids,
+              probs=probs,
+              strata=strata,
+              fpc=fpc,
+              nest=nest,
+              check.strata=check.strata,
+              weights=weights
+              )
+
+  else {
+    .survey.prob.weights <- weights
+    
+    svrepdesign(
+                data=data,
+                repweights=repweights, 	
+                type=type,
+                weights=weights,
+                combined.weights=combined.weights, 
+                rho=rho,
+                bootstrap.average=bootstrap.average,
+                scale=scale,
+                rscales=rscales,
+                fpctype=fpctype,
+                fpc=fpc
+                )
+  }
+
+  # we cannot plug in family=Gamma yet because of weird issues
+  # with glm. Uncomment the below lines for an explanation:
+
+  ## fails:
+  # test <- Gauss
+  # svyglm(formula=formula, design=design, family=test)
+
+  ## works:
+  # svyglm(formula=formula, design=design, family=Gauss)
+
+  # this is because of how glm is written (it evaluates the
+  # family variable as a function in the parent.frame)
+
+  z(.function = svyglm,
+    formula = formula,
+    design  = design,
+    family  = quasibinomial(link="logit")
+    )
+}
+
+#' @S3method param logit.survey
+param.logit.survey <- function(obj, num=1000, ...) {
+  list(
+       simulations = mvrnorm(num, coef(obj), vcov(obj)),
+       alpha = NULL,
+       fam   = binomial(link="logit")
+       )
+}
+
+#' @S3method qi logit.survey
+qi.logit.survey <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
+
+  model <- GetObject(obj)
+
+  coef <- coef(param)
+  alpha <- alpha(param)
+
+  eta <- coef %*% t(x)
+
+  link.inverse <- linkinv(param)
+
+  theta <- matrix(link.inverse(eta), nrow=nrow(coef))
+
+  pr <- ev <- matrix(NA, nrow=nrow(theta), ncol(theta))
+
+  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
+
+  ev <- theta
+
+  for (k in 1:ncol(theta)) {
+    pr[,k] <- rbinom(length(ev[,k]), 1, ev[,k])
+    pr[,k] <- as.character(pr[,k])
+  }
+
+  levels(pr) <- c("0", "1")
+  
+  if (!is.null(y) && NCOL(y))
+    y <- y[,1]
+
+
+  # invisiblify 
+  pr1 <- ev1 <- fd <- rr <- NA
+
+  
+  if (!is.null(x1)) {
+    ev1 <- theta1 <- matrix(link.inverse(coef %*% t(x1)),
+                            nrow = nrow(coef)
+                            )
+
+
+    pr1 <- matrix(NA, nrow=nrow(theta), ncol(theta))
+
+    for (k in 1:ncol(theta)) {
+      pr1[,k] <- rbinom(length(ev1[,k]), 1, ev1[,k])
+      pr1[,k] <- as.character(pr1[,k])
+    }
+
+    levels(pr1) <- c("0", "1")
+    
+    fd <- ev1-ev
+    rr <- ev1/ev
+  }
+
+
+  att.ev <- att.pr <- NA
+
+  if (!is.null(y)) {
+
+    yvar <- matrix(rep(y, nrow(coef)),
+                   nrow = nrow(coef)
+                   )
+
+    tmp.ev <- yvar - ev
+    tmp.pr <- yvar - as.integer(pr)
+
+    att.ev <- matrix(apply(tmp.ev, 1, mean), nrow=nrow(coef))
+    att.pr <- matrix(apply(tmp.pr, 1, mean), nrow=nrow(coef))
+  }
+
+  list(
+       "Expected Values: E(Y|X)" = ev,
+       "Expected Values (for X1): E(Y|X1)" = ev1,
+       "Predicted Values: Y|X" = pr,
+       "Predicted Values (for X1): Y|X1" = pr1,
+       "First Differences: E(Y|X1) - E(Y|X)" = fd,
+       "Risk Ratios: P(Y=1|X1)/P(Y=0|X)" = rr,
+       "Average Treatment Effect: Y - EV" = att.ev,
+       "Average Treatment Effect: Y - PR" = att.pr
+       )
+}
+
+#' @S3method describe logit.survey
+describe.logit.survey <- function(...) {
+  list(
+       authors = "Nicholas Carnes",
+       year = 2008,
+       description = "Survey-Weighted Logitistic Regression for Continuous, Positive Dependent Variables"
+       )
+}
diff --git a/R/lognorm.R b/R/lognorm.R
new file mode 100644
index 0000000..e569e3d
--- /dev/null
+++ b/R/lognorm.R
@@ -0,0 +1,107 @@
+#' Interface between the Zelig Model lognorm and 
+#' the Pre-existing Model-fitting Method
+#' @param formula a formula
+#' @param ... additonal parameters
+#' @param data a data.frame 
+#' @return a list specifying '.function'
+#' @export
+zelig2lognorm <- function (formula, ..., robust = FALSE, cluster = NULL, data) {
+
+  loadDependencies("survival")
+
+  if (!(is.null(cluster) || robust))
+    stop("If cluster is specified, then `robust` must be TRUE")
+
+  # Add cluster term
+  if (robust || !is.null(cluster))
+    formula <- cluster.formula(formula, cluster)
+
+  # Return
+  list(
+       .function = "survreg",
+       formula = formula,
+       dist = "lognormal",
+       robust = robust,
+       data = data,
+       ...
+       )
+}
+
+#' @S3method param lognorm
+param.lognorm <- function(obj, num=1000, ...) {
+
+  # These are the fitted parameters
+  coef <- coef(obj)
+
+  # Append the log-scale
+  mu <- c(coef, log(obj$result$scale))
+
+  # These are their correlations
+  cov <- vcov(obj)
+
+  # Simulate the results
+  simulations <- mvrnorm(num, mu, cov)
+
+  # Return
+  list(
+       coef = as.matrix(simulations[, 1:length(coef)]),
+       alpha = as.matrix(simulations[, -(1:length(coef))]),
+       linkinv = survreg.distributions[["lognormal"]]$itrans
+       )
+}
+
+#' @S3method qi lognorm
+qi.lognorm <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+
+  linkinv <- linkinv(param)
+  alpha <- alpha(param)
+  beta <- coef(param)
+
+  # Compute expected values for "lognorm" regression
+  #
+  # This function is nested within qi.lognorm for code-clarity and because it
+  # will not be used by any other function
+  # @param coef
+  # @param alpha sim.scale
+  # @param x
+  # @return a matrix
+  compute.ev <- function (coef, alpha, x) {
+    if (is.null(x) || is.na(x))
+      # If there are missing explanatory variables, ignore them
+      return(NA)
+
+    # Compute eta
+    # This value must be *inverted* to be restored to the true "observed" value
+    eta <- coef %*% t(x)
+
+    # Apply inverse link function
+    theta <- as.matrix(apply(eta, 2, linkinv))
+
+    # Copied from qi.survreg in Zelig v3.5
+    ev <- exp(log(theta) + 0.5*(exp(alpha))^2)
+    dimnames(ev) <- dimnames(theta)
+
+    # Return
+    as.matrix(ev)
+  }
+
+  # Compute expected values for X and X1
+  ev1 <- compute.ev(beta, alpha, x)
+  ev2 <- compute.ev(beta, alpha, x1)
+
+
+  list(
+       "Expected Value: E(Y|X)" = ev1,
+       "Expected Value: E(Y|X1)" = ev2,
+       "First Differences: E(Y|X1) - E(Y|X)" = ev2 - ev1
+       )
+}
+
+#' @S3method describe lognorm
+describe.lognorm <- function(...) {
+  list(
+       authors = c("Matthew Owen", "Olivia Lau", "Kosuke Imai", "Gary King"),
+       text = "Log-Normal Regression for Duration Dependent Variables",
+       year = 2007
+       )
+}
diff --git a/R/ls.R b/R/ls.R
new file mode 100644
index 0000000..e6536be
--- /dev/null
+++ b/R/ls.R
@@ -0,0 +1,94 @@
+#' Interface between ls model and Zelig
+#' This function is exclusively for use by the `zelig' function
+#' @param formula a formula
+#' @param weights a numeric vector
+#' @param ... ignored parameters
+#' @param data a data.frame
+#' @return a list to be coerced into a zelig.call object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+zelig2ls <- function(formula, ..., data, weights=NULL)
+  z(
+    lm,
+    formula = formula,
+    weights = weights,
+    model   = F,
+    data    = data
+    )
+#' Param Method for the 'ls' Zelig Model
+#' @note This method currently returns via a deprectated style
+#' @usage \method{param}{ls}(obj, num, \dots)
+#' @S3method param ls
+#' @param obj a 'zelig' object
+#' @param num an integer specifying the number of simulations to sample
+#' @param ... ignored parameters
+#' @return a list to be cast as a 'parameters' object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+param.ls <- function(obj, num, ...) {
+  mvrnorm(n=num, mu=coef(.object), Sigma=vcov(.object))
+}
+#' Compute quantities of interest for 'ls' Zelig models
+#' @usage \method{qi}{ls}(obj, x, x1=NULL, y=NULL, num=1000, param=NULL)
+#' @S3method qi ls
+#' @param obj a \code{zelig} object
+#' @param x a 'setx' object or NULL
+#' @param x1 an optional 'setx' object
+#' @param y this parameter is reserved for simulating average treatment effects,
+#'   though this feature is currentlysupported by only a handful of models
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a parameters object
+#' @return a list of key-value pairs specifying pairing titles of quantities of
+#'   interest with their simulations
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+qi.ls <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
+  # error-catching
+  if (missing(x))
+    stop("x cannot be missing while computing the `ls' model")
+
+  # Get coefficients of the linear model
+  coefs <- coef(param)
+
+  # compute expected value
+  ev <- coefs %*% t(x)
+
+  ev1 <- NA
+  fd <- NA
+  
+  if (!is.null(x1)) {
+    ev1 <- coefs %*% t(x1)
+    fd <- ev1 - ev
+  }
+
+  # return
+  list("Expected Values: E(Y|X)"  = ev,
+       "Expected Values: E(Y|X1)" = ev1,
+       "Predicted Values: Y|X"    = ev,
+       "Predicted Values: Y|X1"   = ev1,
+       "First Differences: E(Y|X1) - E(Y|X)" = fd
+       )
+}
+#' Describe a \code{ls} model to Zelig
+#' @note \code{ls} stands for "least squares fit"
+#' @usage \method{describe}{ls}(...)
+#' @S3method describe ls
+#' @param ... ignored parameters
+#' @return a list to be processed by \code{as.description}
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+describe.ls <- function(...){
+  parameters <-list(mu = list(
+                      equations = c(1,1),
+                      tags.allowed = FALSE,
+                      dep.vars = TRUE,
+                      exp.vars = TRUE
+                      )
+                    )
+  
+  # return
+  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
+       year     = 2007,
+       category = "continuous",
+       parameters = parameters,
+       text = "Least Squares Regression for Continuous Dependent Variables"
+       )
+}
diff --git a/R/make.parameters.R b/R/make.parameters.R
index 29a9924..4e06884 100644
--- a/R/make.parameters.R
+++ b/R/make.parameters.R
@@ -1,3 +1,11 @@
+#' ??? For use with cmvglm
+#' @param terms ???
+#' @param shape ???
+#' @param ancillary ???
+#' @param eqns ???
+#' @return ???
+#' @export
+#' @author Kosuke Imai and Olivia Lau
 make.parameters <- function(terms, shape = "vector", ancillary = TRUE,eqns=NULL) {
   if (!shape %in% c("matrix", "vector"))
     stop("not a valid 'shape' for parameters.  Choose from \"matrix\" or \"vector\".")
diff --git a/R/makeModelMatrix.R b/R/makeModelMatrix.R
new file mode 100644
index 0000000..d6f54c7
--- /dev/null
+++ b/R/makeModelMatrix.R
@@ -0,0 +1,39 @@
+#' Make a Model Matrix from a Zelig-Style Formula
+#' 
+#' This is a helper function that creates a \code{model.matrix} like object
+#' of Zelig-style formulae.
+#' @param formula a Zelig-style formula
+#' @param data a \code{data.frame}
+#' @return a design (or model) matrix
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+makeModelMatrix <- function (formula, data) {
+
+  if (missing(data) || is.null(data))
+    return(NULL)
+
+  # This is kludge and should be generalized
+  if (inherits(formula, "Formula")) {
+  }
+
+  if (is.list(formula)) {
+    m <- NULL
+
+    for (form in formula) {
+      m <- cbind(m, model.matrix(form, data))
+    }
+
+    t(as.matrix(m[, unique(colnames(m))]))
+  }
+
+  else {
+    return(model.matrix(formula, data))
+  }
+}
+
+
+#
+#
+#
+makeModelMatrixFromFormula <- function (formula, data) {
+
+}
diff --git a/R/mcmcei.R b/R/mcmcei.R
deleted file mode 100644
index 9b11704..0000000
--- a/R/mcmcei.R
+++ /dev/null
@@ -1,15 +0,0 @@
-mcmcei <- function(formula, data, ...) {
-  #if (is.null(rownames(data))) {
-  #  rownames(data) <-1:nrow(data)
-  # assign(data, as.character(mc$data), env = .GlobalEnv)
-  #}
-  res <- NULL
-  vars <- model.frame(formula, data)
-  vars <- as.matrix(vars)
-  res$c0 <- vars[,1]
-  res$c1 <- vars[,2]
-  res$r0 <- vars[,3]
-  res$r1 <- vars[,4]
-  res<-as.data.frame(res)
- res
-}
diff --git a/R/mi.R b/R/mi.R
index 0489647..4beffc4 100644
--- a/R/mi.R
+++ b/R/mi.R
@@ -1,5 +1,25 @@
-mi <- function(...) {
-  res <- list(...)
-  class(res) <- c("mi", "list")
-  return(res)
+#' Bundle Data-sets for Multiple Imputation
+#' 
+#' This object prepares data-sets for processing with multiple imputation.
+#' @note This function is largely identical to simply creating a list object,
+#'   with the exception that any unnamed data-sets are automatically labeled
+#'   via the \code{substitute} function
+#' @param ... a set of \code{data.frame}'s
+#' @return an \code{almost.mi} object, which contains the important internals
+#'   of a valid, useful \code{mi} object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+mi <- function (...) {
+
+  # Get arguments as list
+  data.frames <- list(...)
+
+  # Ensure that everything is data.fram
+  for (k in length(data.frames):1) {
+    if (!is.data.frame(data.frames[[k]]))
+      data.frames[[k]] <- NULL
+  }
+
+  # Return
+  data.frames
 }
diff --git a/R/mlogit.bayes.R b/R/mlogit.bayes.R
new file mode 100644
index 0000000..404a6a5
--- /dev/null
+++ b/R/mlogit.bayes.R
@@ -0,0 +1,103 @@
+#' @export
+zelig2mlogit.bayes <- function (
+                               formula, 
+                               burnin = 1000, mcmc = 10000, 
+                               verbose=0, 
+                               ..., 
+                               data
+                               ) {
+
+  loadDependencies("MCMCpack", "coda")
+
+  list(
+       .function = "MCMCmnl",
+       .hook = "MCMChook",
+
+       formula = formula,
+       data   = data,
+       burnin = burnin,
+       mcmc   = mcmc,
+       verbose= verbose,
+
+       # Most parameters can be simply passed forward
+       ...
+       )
+}
+
+#' @S3method param mlogit.bayes
+param.mlogit.bayes <- function(obj, num=1000, ...) {
+  list(
+       coef = coef(obj),
+       linkinv = NULL
+       )
+}
+
+#' @S3method qi mlogit.bayes
+qi.mlogit.bayes <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+
+  res1 <- compute.mlogit.bayes(.fitted, x, y, num, param)
+  res2 <- compute.mlogit.bayes(.fitted, x1, y, num, param)
+
+  list(
+       "Expected Value: E(Y|X)" = res1$ev,
+       "Predicted Value: Y|X"   = res1$pv,
+       "Expected Value (for X1): E(Y|X1)" = res2$ev,
+       "Predicted Value (for X1): Y|X1"   = res2$pv,
+       "First Differences"   = res2$ev - res1$ev
+       )
+}
+
+compute.mlogit.bayes <- function (obj, x, y, num, param) {
+  # If either of the parameters are invalid,
+  # Then return NA for both qi's
+  if (is.null(x) || is.na(x) || is.null(param))
+    return(list(ev=NA, pv=NA))
+
+  # 
+  resp <- model.response(model.frame(obj))
+
+  level <- length(table(resp))
+  p <- dim(model.matrix(eval(obj),data=obj$data))[2]
+  coef <- coef(obj)
+  eta <- array(NA, c(nrow(coef),level, nrow(x$matrix)))
+
+
+
+  eta[, 1, ] <- matrix(0, nrow(coef), nrow(x$matrix))
+
+  for (j in 2:level) {
+    ind <- (1:p)*(level-1)-(level-j)
+    eta[,j,]<- coef[,ind]%*%t(x)
+  }
+
+  eta<-exp(eta)
+  ev <- array(NA, c(nrow(coef), level, nrow(x$matrix)))
+  pr <- matrix(NA, nrow(coef), nrow(x$matrix))
+  colnames(ev) <- rep(NA, level)
+
+  for (k in 1:nrow(x$matrix)) {
+    for (j in 1:level)
+      ev[,j,k] <- eta[,j,k]/rowSums(eta[,,k])
+  }
+
+  for (j in 1:level) {
+    colnames(ev)[j] <- paste("P(Y=", j, ")", sep="")
+  }
+
+  for (k in 1:nrow(x$matrix)) {             
+    probs <- as.matrix(ev[,,k])
+    temp <- apply(probs, 1, FUN=rmultinom, n=1, size=1)
+    temp <- as.matrix(t(temp)%*%(1:nrow(temp)))
+    pr <- apply(temp,2,as.character)
+  }
+  list(ev = ev, pv = pr)
+}
+
+#' @S3method describe mlogit.bayes
+describe.mlogit.bayes <- function(...) {
+  list(
+       authors = c("Ben Goodrich", "Ying Lu"),
+       text = "Bayesian Multinomial Logistic Regression for Dependent Variables with Unordered Categorical Values",
+       year = 2013
+       )
+}
diff --git a/R/model.end.R b/R/model.end.R
deleted file mode 100644
index bf0aaba..0000000
--- a/R/model.end.R
+++ /dev/null
@@ -1,17 +0,0 @@
-model.end <- function(res, mf) {
-
-  res$variance <- -solve(res$hessian)
-  res$hessian <- NULL
-
-  colnames(res$variance) <- rownames(res$variance) <- names(res$par)
-  res$coefficients <- res$par
-  res$par <- NULL
-
-  res$terms <- attr(mf, "terms")
-
-  attr(res, "na.message") <- attr(mf, "na.message") 
-  if (!is.null(attr(mf, "na.action"))) 
-    res$na.action <- attr(mf, "na.action") 
-
-  res
-}
diff --git a/R/model.frame.gamF.R b/R/model.frame.gamF.R
deleted file mode 100644
index 975d567..0000000
--- a/R/model.frame.gamF.R
+++ /dev/null
@@ -1,5 +0,0 @@
-model.frame.gamF <- function(formula, data, ...){
-		gp <- interpret.gam(formula)
-		ff<- gp$fake.formula
-		return(model.frame.default(ff, data))
-	}
\ No newline at end of file
diff --git a/R/model.frame.multiple.R b/R/model.frame.multiple.R
index 85f44d0..f97ed00 100644
--- a/R/model.frame.multiple.R
+++ b/R/model.frame.multiple.R
@@ -1,3 +1,15 @@
+#' Create Model Frame from \code{multiple} Object
+#'
+#' This method creates a \code{model.frame} from a \code{multiple} object. This
+#' method will be deprecated as the development of Zelig 4 progresses.
+#' @usage \method{model.frame}{multiple}(formula,data,eqn=NULL,...)
+#' @S3method model.frame multiple
+#' @param formula an object of both type \code{formula} and \code{multiple}
+#' @param data a \code{data.frame}
+#' @param eqn the number of equations in the formula
+#' @param ... ignored parameters
+#' @return a \code{model.frame} object
+#' @author Kosuke Imai, Olivia Lau, Gary King and Ferdinand Alimadhi
 model.frame.multiple <- function (formula,data,eqn=NULL,...){
   if(class(formula)[[1]]=="terms"){
     terms <-formula
diff --git a/R/model.matrix.multiple.R b/R/model.matrix.multiple.R
index ced938c..22f740d 100644
--- a/R/model.matrix.multiple.R
+++ b/R/model.matrix.multiple.R
@@ -1,3 +1,17 @@
+#' Create Design Matrix of a \code{multiple} Object
+#'
+#' This method is used to generate a \code{model.matrix} adhering to the
+#' specifications in the help document "model.matrix".
+#' @usage
+#' \method{model.matrix}{multiple}(object,data,shape="compact",eqn=NULL,...)
+#' @note This method is scheduled to be deprecated.
+#' @param object an object of type \code{multiple}. This represents a Zelig 3.5
+#' formula
+#' @param data a \code{data.frame}
+#' @param shape a character-string specifying the shape of the matrix
+#' @param eqn an integer specifying the number of equations
+#' @param ... ignored parameters
+#' @S3method model.matrix multiple
 model.matrix.multiple <- function (object,data,shape="compact",eqn=NULL,...){
   
   intersect <- function(x, y) y[match(x, y, nomatch = 0)]
diff --git a/R/model.matrix.parseFormula.R b/R/model.matrix.parseFormula.R
new file mode 100644
index 0000000..487a607
--- /dev/null
+++ b/R/model.matrix.parseFormula.R
@@ -0,0 +1,32 @@
+#' Construct Design Matrix from a Parsed, Zelig-style Formula
+#'
+#' This method constructs a design matrix from a Zelig-style formula. This
+#' matrix is commonly used in statistical simulation, and will likely be
+#' relevent as the relevant form of a \code{setx} object.
+#' @usage \method{model.matrix}{parseFormula}(object, data = NULL, ...)
+#' @note This method is primarily used by the \code{setx} function.
+#' @param object a "parseFormula" object
+#' @param data a "data.frame"
+#' @param ... ignored parameters
+#' @return a "model.matrix" specifying information relevant to a statistical
+#' model
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @S3method model.matrix parseFormula
+model.matrix.parseFormula <- function (object, data = NULL, ...) {
+
+#   if (is.null(object$model.matrix))
+#     # Note that if data is NULL, then "makeModelMatrix" will return NULL
+#     makeModelMatrix(formula(object), data)
+# 
+#   else if (!missing(data))
+#     # If data is supplied, recompute the model matrix
+#     makeModelMatrix(formula(object), data)
+# 
+#   else
+#     # Otherwise use the previous stored value (which still might be NULL)
+#     object$model.matrix
+
+
+  makeModelMatrix(formula(object), data)
+
+}
diff --git a/R/model.warnings.R b/R/model.warnings.R
new file mode 100644
index 0000000..65cc787
--- /dev/null
+++ b/R/model.warnings.R
@@ -0,0 +1,71 @@
+# This code is rough looking. It needs to be made more elegant
+# but R doesn't really support block quotes
+model.warnings <- function (model) {
+  # Get appropriate Zelig package
+  pkg <- get.package(model)
+
+  # Get 
+  zelig2 <- paste("zelig2", as.character(model), sep="")
+  zelig2 <- tryCatch(
+                     { get(zelig2, mode="function"); 1 },
+                     error = function (e) NA
+                     )
+
+  #
+  #
+  #
+  if (is.na(zelig2) && is.na(pkg)) {
+
+      msg <- '
+
+** The model "%s" is not available with the currently loaded packages,
+** and is not an official Zelig package.
+** The model\'s name may be a typo.
+
+'
+    message(sprintf(msg, model))
+
+  }
+
+
+  else if (is.na(zelig2) && !is.na(pkg)) {
+
+    if (pkg %in% .packages(TRUE)) {
+      # The package is available on the system
+
+      msg <- '
+
+** The model "%s" is not available with the currently loaded packages,
+** however it *is* installed on your system.
+**
+** To load this model\'s package, please type:
+library("%s")
+
+'
+      message(sprintf(msg, model, pkg))
+    }
+
+    #
+    #
+    #
+    else {
+      # Else... the package is not available on the system
+
+      repos <- "http://r.iq.harvard.edu/"
+      msg <- '
+
+** The model "%s" is not installed on your system,
+** however it *is* available for download from Harvard.
+**
+** To install and load this model\'s package, please type:
+install.packages("%s", repos="%s", type="source")
+library("%s")
+
+'
+      message(sprintf(msg, model, pkg, repos, pkg))
+    }
+  }
+
+
+  invisible()
+}
diff --git a/R/multi.R b/R/multi.R
deleted file mode 100644
index 22b2b05..0000000
--- a/R/multi.R
+++ /dev/null
@@ -1,26 +0,0 @@
-multi<-function(...){
-  res<-list()
-  mf<-match.call(expand.dots=TRUE)
-  for(i in 2:length(mf)){
-    leveli<-eval(mf[[i]])
-    levelnamei<-names(mf)[[i]]
-    dta<-leveli[[2]]
-    if(class(dta)!="MI")
-      if(is.data.frame(dta[[1]])){
-        for(j in 1:length(dta)){
-          newlevelname<-paste(levelnamei,j,sep="")
-          res[[newlevelname]]<-list(formula=NULL, data=NULL)
-          res[[newlevelname]][[1]]<-leveli[[1]]
-          res[[newlevelname]][[2]]<-leveli[[2]][[j]]
-        }
-      }
-      else{
-        res[[levelnamei]]<-leveli
-        names(res[[levelnamei]])<-c("formula","data")
-      }
-}
-  class(res)<-c("multi", class(res))
-return(res)
-}
-
-
diff --git a/R/multi.dataset.R b/R/multi.dataset.R
new file mode 100644
index 0000000..9e31cdb
--- /dev/null
+++ b/R/multi.dataset.R
@@ -0,0 +1,144 @@
+# Make a ``multi.dataset'' Object
+# @param datasets a list containing data.frames
+# @param labels a character vector labeling indices of the dataset
+make.multi.dataset <- function (datasets, labels=NULL) {
+  md <- datasets
+
+  if (!missing(labels))
+    names(md) <- labels
+
+  # Set super important attributes
+  #attr(md, "something") <- "red"
+  class(md) <- "multi.dataset"
+
+  # Return object
+  md
+}
+
+# Multiple Dataset Object
+multi.dataset <- function (obj, ...) {
+  UseMethod("multi.dataset")
+}
+
+# Create a Multiple Dataset Object from a data.frame
+# @param obj a data.frame to conver
+# @return a ``multi.dataset'' object
+multi.dataset.data.frame <- function (obj, ...) {
+  # Place inside a list and label according to the name from the function call
+  label <- as.character(as.expression(substitute(obj)))
+  make.multi.dataset(list(obj), label)
+}
+
+# Create a Multiple Dataset Object from a data.frame
+# @param obj a list of data.frame's
+# @return a ``multi.dataset'' object
+multi.dataset.list <- function (obj, ...) {
+
+  # Iterate backwards through list, so that we can remove elements
+  for (k in length(obj):1) {
+    if (!is.data.frame(obj[[k]])) {
+      warning('"obj" contains an element that is not a data.frame... removing.')
+      obj[[k]] <- NULL
+    }
+  }
+
+  LABELS <- names(obj)
+
+  # If there are no labels, or they are uneven
+  if (is.null(LABELS) || length(LABELS) != length(obj))
+    LABELS <- paste("data-set-", 1:length(obj), sep = "")
+
+  # Otherwise, we have a nice matching of labels, but we might still have some
+  # that are empty
+  else {
+    for (k in 1:length(LABELS)) {
+      lab <- LABELS[k]
+
+      if (is.na(lab) || is.null(lab) || (is.character(lab) && nchar(lab) == 0))
+        LABELS[k] <- paste("data-set-", k, sep = "")
+    }
+  }
+
+  # Return object
+  make.multi.dataset(obj, LABELS)
+}
+
+# Create a Multiple Dataset Object from a data.frame
+# @param obj a list of data.frame's
+# @return a ``multi.dataset'' object
+multi.dataset.amelia <- function (obj, ...) {
+  data.frames <- obj$imputations
+  class(data.frames) <- NULL
+  make.multi.dataset(data.frames, names(data.frames))
+}
+
+# Divide a Data Frame or Matrix Into Subsets
+# @param obj a data.frame or matrix to be split into subsets, divided by the
+# categorical variable
+# @param by a character-string, specifying the column to subset
+# @return a list, containing the subsetted data sets. The names of the list
+# correspond to the value of the subsetted list
+divide <- function (obj, by) {
+
+  # Ensure that "obj" is valid (a data.frame or matrix)
+  if (!is.data.frame(obj) && !is.matrix(obj)) {
+    warning('"obj" is not a data.frame or matrix')
+    return(list(obj))
+  }
+
+  # Ensure that "by" is valid (a character-string)
+  if (!is.character(by) && length(by) == 1) {
+    warning('"by" is not a character-string')
+    return(list(obj))
+  }
+
+  # Ensure that "by" is a column in "obj"
+  if (! by %in% colnames(obj)) {
+    warning('"by" is not a valid column of "obj"')
+    return(list(obj))
+  }
+
+  # Get the set of possible values
+  column.levels <-if (is.factor(obj[, by]))
+    levels(obj[, by])
+  else
+    unique(obj[, by])
+
+
+  # A list used to store each individual data.frame
+  res <- list()
+
+  # Iterate through all possible values and store each subset in a separate
+  # entry in the list
+  for (val in column.levels) {
+    # Determine which rows match this value
+    hits <- obj[, by] == val
+
+    # Store data set temporarily in a local value
+    data.set <- obj[hits, ]
+
+    # Assign levels to the column. This adds levels to string data.
+    levels(data.set[, by]) <- column.levels
+
+    # Store data set in list
+    res[[val]] <- data.set
+  }
+
+  # Return list
+  res
+}
+
+# Print a ``multi.dataset'' Object
+# @param x a multi.dataset object, essentially a list of data.frames
+# @param ... parameters to pass to the print.data.frame object
+# @return x (invisibly)
+print.multi.dataset <- function (x, ...) {
+  for (key in names(x)) {
+    cat("label =", key, "\n")
+    print(x[[key]], ...)
+    cat("\n")
+  }
+
+  # Return printed object (invisibly)
+  invisible(x)
+}
diff --git a/R/multipleUtil.R b/R/multipleUtil.R
index 0535b23..0c0ddc2 100644
--- a/R/multipleUtil.R
+++ b/R/multipleUtil.R
@@ -1,6 +1,13 @@
-  toBuildFormula<-function(Xnames,sepp="+"){
-    lng<-length(Xnames)
-    rhs<-NULL
+#' Build Formula ???
+#' 
+#' This function builds a formula
+#' @param Xnames a character-vector
+#' @param sepp a seperator (???)
+#' @return a character-string
+#' @author ???
+toBuildFormula<-function(Xnames,sepp="+"){
+  lng<-length(Xnames)
+  rhs<-NULL
     if (lng!=0){
       if(lng==1){
         rhs=Xnames
@@ -15,8 +22,18 @@
     return (rhs)
   }
 
-#mode=1 model.matrix
-#mode=2 model.frame
+
+#' Multilevel
+#' 
+#' This function currently has no documentation, but is essential in Zelig 3.5's
+#' implementation of formulae.
+#' @param tt a terms object
+#' @param data a \code{data.frame}
+#' @param mode ???
+#' @param eqn an integer specifying the number of equations in a model
+#' @param ... ignored parameters
+#' @return a list with the "terms" attribute specified
+#' @author Kosuke Imai, Olivia Lau, Gary King and Ferdinand Alimadhi
 multilevel<-function(tt,data,mode,eqn,...){
   if(!(mode %in% c(1,2)))
     stop("Wrong mode argument")
diff --git a/R/names.ZeligS4vglm.R b/R/names.ZeligS4vglm.R
deleted file mode 100644
index 419de77..0000000
--- a/R/names.ZeligS4vglm.R
+++ /dev/null
@@ -1,4 +0,0 @@
-names.ZeligS4vglm <- function(x) {
-  cat("object is a S4 object.  Use '@' to extract slots.\n")
-  slotNames(x)
-}
diff --git a/R/names.relogit.R b/R/names.relogit.R
index ae2f4f1..e69de29 100644
--- a/R/names.relogit.R
+++ b/R/names.relogit.R
@@ -1,6 +0,0 @@
-names.relogit <- function(x){
-  res <- list(default=names(unclass(x)),
-            estimate = names(x$lower.estimate), tau = x$tau)
-  class(res) <- "names.relogit"
-  res
-}
diff --git a/R/names.summary.vglm.R b/R/names.summary.vglm.R
deleted file mode 100644
index 6590fd8..0000000
--- a/R/names.summary.vglm.R
+++ /dev/null
@@ -1,4 +0,0 @@
-names.summary.vglm <- function(x){
-  cat("object is a S4 object.  Use '@' to extract slots.\n")
-  slotNames(x)
-}
diff --git a/R/names.summary.zelig.R b/R/names.summary.zelig.R
deleted file mode 100644
index 12564bb..0000000
--- a/R/names.summary.zelig.R
+++ /dev/null
@@ -1,11 +0,0 @@
-names.summary.zelig <- function(x) {
-  res <- names(unclass(x))[1:4]
-  qi.names <- names(x$qi.stats)
-  for (i in 1:length(qi.names)) {
-    qi.names[i] <- paste("qi.stats$", qi.names[i], sep = "")
-  }
-  res <- c(res, qi.names)
-  res <- list(default = res)
-  class(res)<-"names.zelig"
-  res
-}
diff --git a/R/names.summary.zelig.relogit.R b/R/names.summary.zelig.relogit.R
deleted file mode 100644
index 134aef0..0000000
--- a/R/names.summary.zelig.relogit.R
+++ /dev/null
@@ -1,11 +0,0 @@
-names.summary.zelig.relogit <- function(x) {
-  res <- names(unclass(x))[1:5]
-  qi.names <- names(x$qi.stats)
-  for (i in 1:length(qi.names)) {
-    qi.names[i] <- paste("qi.stats$", qi.names[i], sep = "")
-  }
-  res <- c(res, qi.names)
-  res <- list(default = res)
-  class(res)<-"names.zelig"
-  res
-}
diff --git a/R/names.zelig.R b/R/names.zelig.R
deleted file mode 100644
index c32e165..0000000
--- a/R/names.zelig.R
+++ /dev/null
@@ -1,11 +0,0 @@
-names.zelig <- function (x) {
-   res <- names(unclass(x))[1:5]
-   qi.names <- names(x$qi)
-   for (i in 1:length(qi.names)) {
-     qi.names[i] <- paste("qi$", qi.names[i], sep = "")
-  }
-  res <- c(res, qi.names)
-  res <- list(default = res)
-  class(res) <- "names.zelig"
-  res
-}
diff --git a/R/names.zelig.relogit.R b/R/names.zelig.relogit.R
deleted file mode 100644
index 095fde2..0000000
--- a/R/names.zelig.relogit.R
+++ /dev/null
@@ -1,11 +0,0 @@
-names.zelig.relogit <- function(x) {
-   res <- names(unclass(x))[1:5]
-   qi.names <- names(x$qi)
-   for (i in 1:length(qi.names)) {
-     qi.names[i] <- paste("qi$", qi.names[i], sep = "")
-  }
-  res <- c(res, qi.names)
-  res <- list(default = res)
-  class(res) <- "names.zelig"
-  res
-}
diff --git a/R/negbinom.R b/R/negbinom.R
new file mode 100644
index 0000000..9a875b1
--- /dev/null
+++ b/R/negbinom.R
@@ -0,0 +1,119 @@
+#' Interface between negbinom model and Zelig
+#' This function is exclusively for use by the `zelig' function
+#' @param formula a formula
+#' @param weights a numeric vector
+#' @param ... ignored parameters
+#' @param data a data.frame
+#' @return a list to be coerced into a zelig.call object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+zelig2negbinom <- function(formula, weights=NULL, ..., data)
+  z(
+    .function = "glm.nb",
+    .hook = "robust.glm.hook",
+
+    weights = weights,
+    formula = formula,
+    data    = data
+    )
+#' Param Method for the 'negbinom' Zelig Model
+#' @note This method is used by the 'negbinom' Zelig model
+#' @usage \method{param}{negbinom}(obj, num=1000, ...)
+#' @S3method param negbinom
+#' @param obj a 'zelig' object
+#' @param num an integer specifying the number of simulations to sample
+#' @param ... ignored
+#' @return a list to be cast as a 'parameters' object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+param.negbinom <- function(obj, num=1000, ...) {
+  list(
+       simulations = mvrnorm(num, mu=coef(.fitted), Sigma=vcov(.fitted)),
+       alpha = .fitted$theta,
+       link = function (e) e,
+       linkinv = function (e) e
+       )
+}
+#' Compute quantities of interest for 'negbinom' Zelig models
+#' @usage \method{qi}{negbinom}(obj, x, x1=NULL, y=NULL, num=1000, param=NULL)
+#' @S3method qi negbinom
+#' @param obj a 'zelig' object
+#' @param x a 'setx' object or NULL
+#' @param x1 an optional 'setx' object
+#' @param y this parameter is reserved for simulating average treatment effects,
+#'   though this feature is currentlysupported by only a handful of models
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a parameters object
+#' @return a list of key-value pairs specifying pairing titles of quantities of
+#'   interest with their simulations
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+qi.negbinom <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+  #
+  coef <- coef(param)
+  alpha <- alpha(param)
+
+  # get inverse function
+  inverse <- obj[["family", "linkinv"]]
+
+  #
+  eta <- coef %*% t(x)
+  theta <- matrix(inverse(eta), nrow=nrow(coef))
+
+  # ...
+  ev <- theta
+  pr <- matrix(NA, nrow=nrow(theta), ncol=ncol(theta))
+
+  # default values
+  ev1 <- pr1 <- fd <- NA
+
+  #
+  for (i in 1:ncol(ev))
+    pr[,i] <- rnegbin(nrow(ev), mu = ev[i,], theta = alpha[i])
+
+
+  if (!is.null(x1)) {
+
+    # quantities of interest
+    results <- qi(obj, x1, num=num)
+
+    # pass values over
+    ev1 <- results[["Expected Values: E(Y|X)"]]
+    pr1 <- results[["Predicted Values: Y|X"]]
+
+    # compute first differences
+    fd <- ev1 - ev
+  }
+
+  # Return quantities of interest, paired off with their titles
+  list("Expected Values: E(Y|X)"  = ev,
+       "Expected Values: E(Y|X1)" = ev1,
+       "Predicted Values: Y|X"    = pr,
+       "Predicted Values: Y|X1"   = pr1,
+       "First Differences: E(Y|X1) - E(Y|X)" = fd
+       )
+}
+#' Describe the \code{negbinom} model to Zelig
+#' @note \code{negbinom} stands for "negative binomial"
+#' @usage \method{describe}{negbinom}(...)
+#' @S3method describe negbinom
+#' @param ... ignored parameters
+#' @return a list to be processed by \code{as.description}
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+describe.negbinom <- function(...) {
+  # parameters object
+  parameters <- list(pi = list(
+                       equations = c(1, 1),
+                       tags.allowed = FALSE,
+                       dep.var = TRUE,
+                       exp.var = TRUE
+                       )
+                     )
+
+  # return list
+  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
+       year     = 2008,
+       category = "count",
+       parameters = parameters,
+       text = "Negative Binomial Regression for Event Count Dependent Variables"
+       )
+}
diff --git a/R/netbinom.R b/R/netbinom.R
deleted file mode 100644
index 28ec796..0000000
--- a/R/netbinom.R
+++ /dev/null
@@ -1,242 +0,0 @@
-netbinom <- function (formula, data, LF="logit", family=binomial(link=LF), ..., mode = "digraph", diag = FALSE, 
-    nullhyp = c("qap", "qapspp", "qapy", "qapx", "qapallx", "cugtie", 
-        "cugden", "cuguman", "classical"), tol = 1e-07, reps = 1000) 
-{	
-    gfit <- function(glist, mode, diag) {
-        y <- gvectorize(glist[[1]], mode = mode, diag = diag, 
-            censor.as.na = TRUE)
-        x <- vector()
-        for (i in 2:length(glist)) x <- cbind(x, gvectorize(glist[[i]], 
-            mode = mode, diag = diag, censor.as.na = TRUE))
-        if (!is.matrix(x)) 
-            x <- matrix(x, ncol = 1)
-        mis <- is.na(y) | apply(is.na(x), 1, any)
-        glm.fit(x[!mis, ], y[!mis], family = binomial(link=LF), intercept = FALSE)
-    }
-    gfitlm <- function(glist, mode, diag, tol) {
-        y <- gvectorize(glist[[1]], mode = mode, diag = diag, 
-            censor.as.na = TRUE)
-        x <- vector()
-        for (i in 2:length(glist)) x <- cbind(x, gvectorize(glist[[i]], 
-            mode = mode, diag = diag, censor.as.na = TRUE))
-        if (!is.matrix(x)) 
-            x <- matrix(x, ncol = 1)
-        mis <- is.na(y) | apply(is.na(x), 1, any)
-        list(qr(x[!mis, ], tol = tol), y[!mis])
-    }
-   call <- match.call()
-   Terms <- terms(formula)
-    intercept.value <- attr(Terms, "intercept") 
-    if (intercept.value > 0){
-    	intercept = TRUE
-    	}
-    if (intercept.value == 0){
-    	intercept = FALSE
-    	} 
-   if (missing(data)) 
-		data <- environment(formula)		
-	mf <- match.call(expand.dots = FALSE)	
-    m <- match(c("formula", "data", "weights"), names(mf), 0)
-    mf <- mf[c(1, m)]
-    mf$drop.unused.levels <- TRUE
-    mf[[1]] <- as.name("model.frame")
-    mf <- eval(mf, parent.frame())
-    mt <- attr(mf, "terms")
-	D <- model.frame(formula, data = data)
-	y <- D[[1]]
-	#x.array.names <- as.list(for(i in 2:length(D)) {names(D[[i]])})
-	x <- array(dim=c((length(D) - 1), nrow(y), ncol(y)))
-	for(i in 2:length(D)) {
-		x[i - 1,,] <- D[[i]]	}
- 
-    
-    
-    y <- as.sociomatrix.sna(y)
-    x <- as.sociomatrix.sna(x)
-    if (is.list(y) || ((length(dim(y)) > 2) && (dim(y)[1] > 1))) 
-        stop("y must be a single graph.")
-    if (length(dim(y)) > 2) 
-        y <- y[1, , ]
-    if (is.list(x) || (dim(x)[2] != dim(y)[2])) 
-        stop("Homogeneous graph orders required.")
-    nx <- stackcount(x) + intercept
-    n <- dim(y)[2]
-    g <- list(y)
-    if (intercept) 
-        g[[2]] <- matrix(1, n, n)
-    if (nx - intercept == 1) 
-        g[[2 + intercept]] <- x
-    else for (i in 1:(nx - intercept)) g[[i + 1 + intercept]] <- x[i, 
-        , ]
-    if (any(sapply(lapply(g, is.na), any))) 
-        warning("Missing data supplied to poisson.net; this may pose problems for certain null hypotheses.  Hope you know what you're doing....")
-    fit.base <- gfit(g, mode = mode, diag = diag)
-    fit <- list()
-    fit$coefficients <- fit.base$coefficients
-    fit$fitted.values <- fit.base$fitted.values
-    fit$residuals <- fit.base$residuals
-    fit$linear.predictors <- fit.base$linear.predictors
-    fit$n <- length(fit.base$y)
-    fit$df.model <- fit.base$rank
-    fit$df.residual <- fit.base$df.residual
-    fit$deviance <- fit.base$deviance
-    fit$null.deviance <- fit.base$null.deviance
-    fit$df.null <- fit.base$df.null
-	######
-	fit$weights <- fit.base$weights
-	fit$boundary <- fit.base$boundary
-	fit$converged <- fit.base$converged
-	fit$contrasts <- fit.base$contrasts
-	fit$control <- fit.base$control
-	fit$effects <- fit.base$effects
-	fit$iter <- fit.base$iter
-	fit$model <- fit.base$model
-	fit$offset <- fit.base$offset
-	fit$prior.weights <- fit.base$prior.weights
-	fit$R <- fit.base$R
-	fit$y <- fit.base$y
-	#fit$summary <- summary(fit.base)
-	######
-    fit$aic <- fit.base$aic
-    fit$bic <- fit$deviance + fit$df.model * log(fit$n)
-    fit$qr <- fit.base$qr
-    #fit$ctable <- table(as.numeric(fit$fitted.values >= 0.5), 
-    #    fit.base$y, dnn = c("Predicted", "Actual"))
-    #if (NROW(fit$ctable) == 1) {
-    #    if (rownames(fit$ctable) == "0") 
-    #        fit$ctable <- rbind(fit$ctable, c(0, 0))
-    #    else fit$ctable <- rbind(c(0, 0), fit$ctable)
-    #    rownames(fit$ctable) <- c("0", "1")
-    #}
-    nullhyp <- match.arg(nullhyp)
-    if ((nullhyp %in% c("qap", "qapspp")) && (nx == 1)) 
-        nullhyp <- "qapy"
-    if (nullhyp == "classical") {
-        cvm <- chol2inv(fit$qr$qr)
-        se <- sqrt(diag(cvm))
-        tval <- fit$coefficients/se
-        fit$dist <- NULL
-        fit$pleeq <- pt(tval, fit$df.residual)
-        fit$pgreq <- pt(tval, fit$df.residual, lower.tail = FALSE)
-        fit$pgreqabs <- 2 * pt(abs(tval), fit$df.residual, lower.tail = FALSE)
-    }
-    else if (nullhyp %in% c("cugtie", "cugden", "cuguman")) {
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            gr <- g
-            for (j in 1:reps) {
-                gr[[i + 1]] <- switch(nullhyp, cugtie <- rgraph(n, 
-                  mode = mode, diag = diag, replace = FALSE, 
-                  tielist = g[[i + 1]]), cugden <- rgraph(n, 
-                  tprob = gden(g[[i + 1]], mode = mode, diag = diag), 
-                  mode = mode, diag = diag), cuguman <- (function(dc, 
-                  n) {
-                  rguman(1, n, mut = x[1], asym = x[2], null = x[3], 
-                    method = "exact")
-                })(dyad.census(g[[i + 1]]), n))
-                repdist[j, i] <- gfit(gr, mode = mode, diag = diag)$coef[i]
-            }
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapy") {
-        repdist <- matrix(0, reps, nx)
-        gr <- g
-        for (i in 1:reps) {
-            gr[[1]] <- rmperm(g[[1]])
-            repdist[i, ] <- gfit(gr, mode = mode, diag = diag)$coef
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapx") {
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            gr <- g
-            for (j in 1:reps) {
-                gr[[i + 1]] <- rmperm(gr[[i + 1]])
-                repdist[j, i] <- gfit(gr, mode = mode, diag = diag)$coef[i]
-            }
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapallx") {
-        repdist <- matrix(0, reps, nx)
-        gr <- g
-        for (i in 1:reps) {
-            for (j in 1:nx) gr[[1 + j]] <- rmperm(g[[1 + j]])
-            repdist[i, ] <- gfit(gr, mode = mode, diag = diag)$coef
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if ((nullhyp == "qap") || (nullhyp == "qapspp")) {
-        xsel <- matrix(TRUE, n, n)
-        if (!diag) 
-            diag(xsel) <- FALSE
-        if (mode == "graph") 
-            xsel[upper.tri(xsel)] <- FALSE
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            xfit <- gfitlm(g[1 + c(i, (1:nx)[-i])], mode = mode, 
-                diag = diag, tol = tol)
-            xres <- g[[1 + i]]
-            xres[xsel] <- qr.resid(xfit[[1]], xfit[[2]])
-            if (mode == "graph") 
-                xres[upper.tri(xres)] <- t(xres)[upper.tri(xres)]
-            for (j in 1:reps) repdist[j, i] <- gfit(c(g[-(1 + 
-                i)], list(rmperm(xres))), mode = mode, diag = diag)$coef[nx]
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    fit$nullhyp <- nullhyp
-    fit$names <- names(mf[2:length(mf)])
-	#fit$names <- names(mf[2:stackcount(mf)])  # paste("x", 1:(nx - intercept), sep = "")
-    if (intercept) 
-        fit$names <- c("(intercept)", fit$names)
-    fit$intercept <- intercept
-	fit$xlevels <- .getXlevels(mt, mf)
-	fit <- c(fit, list(call = call, formula = formula, terms = mt, 
-	data = data, xlevels = .getXlevels(mt, mf)))
-		new.data <- as.data.frame(as.vector(data[,1]))
-	for(i in 2:ncol(data)){
-	new.data <- cbind(new.data, as.vector(data[,i])) } 
-	names(new.data) <- names(data)
-	fit$data <- new.data
-	fit$family <- family
-	fit$rank <- fit$df.model
-	so <- summary.glm(fit)
-	fit$mod.coefficients <- so$coefficients
-	fit$cov.unscaled <- so$cov.unscaled
-	fit$cov.scaled <- so$cov.scaled
-    class(fit) <- c("netglm")
-    return(fit)
-}
diff --git a/R/netgamma.R b/R/netgamma.R
deleted file mode 100644
index f9db840..0000000
--- a/R/netgamma.R
+++ /dev/null
@@ -1,242 +0,0 @@
-gamma.net <- function (formula, data, LF= "inverse", family=Gamma(link=LF), ..., mode = "digraph", diag = FALSE, 
-    nullhyp = c("qap", "qapspp", "qapy", "qapx", "qapallx", "cugtie", 
-        "cugden", "cuguman", "classical"), tol = 1e-07, reps = 1000) 
-{	
-    gfit <- function(glist, mode, diag) {
-        y <- gvectorize(glist[[1]], mode = mode, diag = diag, 
-            censor.as.na = TRUE)
-        x <- vector()
-        for (i in 2:length(glist)) x <- cbind(x, gvectorize(glist[[i]], 
-            mode = mode, diag = diag, censor.as.na = TRUE))
-        if (!is.matrix(x)) 
-            x <- matrix(x, ncol = 1)
-        mis <- is.na(y) | apply(is.na(x), 1, any)
-        glm.fit(x[!mis, ], y[!mis], family = Gamma(link=LF), intercept = FALSE)
-    }
-    gfitlm <- function(glist, mode, diag, tol) {
-        y <- gvectorize(glist[[1]], mode = mode, diag = diag, 
-            censor.as.na = TRUE)
-        x <- vector()
-        for (i in 2:length(glist)) x <- cbind(x, gvectorize(glist[[i]], 
-            mode = mode, diag = diag, censor.as.na = TRUE))
-        if (!is.matrix(x)) 
-            x <- matrix(x, ncol = 1)
-        mis <- is.na(y) | apply(is.na(x), 1, any)
-        list(qr(x[!mis, ], tol = tol), y[!mis])
-    }
-   call <- match.call()
-   Terms <- terms(formula)
-    intercept.value <- attr(Terms, "intercept") 
-    if (intercept.value > 0){
-    	intercept = TRUE
-    	}
-    if (intercept.value == 0){
-    	intercept = FALSE
-    	} 
-   if (missing(data)) 
-		data <- environment(formula)		
-	mf <- match.call(expand.dots = FALSE)	
-    m <- match(c("formula", "data", "weights"), names(mf), 0)
-    mf <- mf[c(1, m)]
-    mf$drop.unused.levels <- TRUE
-    mf[[1]] <- as.name("model.frame")
-    mf <- eval(mf, parent.frame())
-    mt <- attr(mf, "terms")
-	D <- model.frame(formula, data = data)
-	y <- D[[1]]
-	#x.array.names <- as.list(for(i in 2:length(D)) {names(D[[i]])})
-	x <- array(dim=c((length(D) - 1), nrow(y), ncol(y)))
-	for(i in 2:length(D)) {
-		x[i - 1,,] <- D[[i]]	}
- 
-    
-    
-    y <- as.sociomatrix.sna(y)
-    x <- as.sociomatrix.sna(x)
-    if (is.list(y) || ((length(dim(y)) > 2) && (dim(y)[1] > 1))) 
-        stop("y must be a single graph.")
-    if (length(dim(y)) > 2) 
-        y <- y[1, , ]
-    if (is.list(x) || (dim(x)[2] != dim(y)[2])) 
-        stop("Homogeneous graph orders required.")
-    nx <- stackcount(x) + intercept
-    n <- dim(y)[2]
-    g <- list(y)
-    if (intercept) 
-        g[[2]] <- matrix(1, n, n)
-    if (nx - intercept == 1) 
-        g[[2 + intercept]] <- x
-    else for (i in 1:(nx - intercept)) g[[i + 1 + intercept]] <- x[i, 
-        , ]
-    if (any(sapply(lapply(g, is.na), any))) 
-        warning("Missing data supplied to poisson.net; this may pose problems for certain null hypotheses.  Hope you know what you're doing....")
-    fit.base <- gfit(g, mode = mode, diag = diag)
-    fit <- list()
-    fit$coefficients <- fit.base$coefficients
-    fit$fitted.values <- fit.base$fitted.values
-    fit$residuals <- fit.base$residuals
-    fit$linear.predictors <- fit.base$linear.predictors
-    fit$n <- length(fit.base$y)
-    fit$df.model <- fit.base$rank
-    fit$df.residual <- fit.base$df.residual
-    fit$deviance <- fit.base$deviance
-    fit$null.deviance <- fit.base$null.deviance
-    fit$df.null <- fit.base$df.null
-	######
-	fit$weights <- fit.base$weights
-	fit$boundary <- fit.base$boundary
-	fit$converged <- fit.base$converged
-	fit$contrasts <- fit.base$contrasts
-	fit$control <- fit.base$control
-	fit$effects <- fit.base$effects
-	fit$iter <- fit.base$iter
-	fit$model <- fit.base$model
-	fit$offset <- fit.base$offset
-	fit$prior.weights <- fit.base$prior.weights
-	fit$R <- fit.base$R
-	fit$y <- fit.base$y
-	#fit$summary <- summary(fit.base)
-	######
-    fit$aic <- fit.base$aic
-    fit$bic <- fit$deviance + fit$df.model * log(fit$n)
-    fit$qr <- fit.base$qr
-    #fit$ctable <- table(as.numeric(fit$fitted.values >= 0.5), 
-    #    fit.base$y, dnn = c("Predicted", "Actual"))
-    #if (NROW(fit$ctable) == 1) {
-    #    if (rownames(fit$ctable) == "0") 
-    #        fit$ctable <- rbind(fit$ctable, c(0, 0))
-    #    else fit$ctable <- rbind(c(0, 0), fit$ctable)
-    #    rownames(fit$ctable) <- c("0", "1")
-    #}
-    nullhyp <- match.arg(nullhyp)
-    if ((nullhyp %in% c("qap", "qapspp")) && (nx == 1)) 
-        nullhyp <- "qapy"
-    if (nullhyp == "classical") {
-        cvm <- chol2inv(fit$qr$qr)
-        se <- sqrt(diag(cvm))
-        tval <- fit$coefficients/se
-        fit$dist <- NULL
-        fit$pleeq <- pt(tval, fit$df.residual)
-        fit$pgreq <- pt(tval, fit$df.residual, lower.tail = FALSE)
-        fit$pgreqabs <- 2 * pt(abs(tval), fit$df.residual, lower.tail = FALSE)
-    }
-    else if (nullhyp %in% c("cugtie", "cugden", "cuguman")) {
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            gr <- g
-            for (j in 1:reps) {
-                gr[[i + 1]] <- switch(nullhyp, cugtie <- rgraph(n, 
-                  mode = mode, diag = diag, replace = FALSE, 
-                  tielist = g[[i + 1]]), cugden <- rgraph(n, 
-                  tprob = gden(g[[i + 1]], mode = mode, diag = diag), 
-                  mode = mode, diag = diag), cuguman <- (function(dc, 
-                  n) {
-                  rguman(1, n, mut = x[1], asym = x[2], null = x[3], 
-                    method = "exact")
-                })(dyad.census(g[[i + 1]]), n))
-                repdist[j, i] <- gfit(gr, mode = mode, diag = diag)$coef[i]
-            }
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapy") {
-        repdist <- matrix(0, reps, nx)
-        gr <- g
-        for (i in 1:reps) {
-            gr[[1]] <- rmperm(g[[1]])
-            repdist[i, ] <- gfit(gr, mode = mode, diag = diag)$coef
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapx") {
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            gr <- g
-            for (j in 1:reps) {
-                gr[[i + 1]] <- rmperm(gr[[i + 1]])
-                repdist[j, i] <- gfit(gr, mode = mode, diag = diag)$coef[i]
-            }
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapallx") {
-        repdist <- matrix(0, reps, nx)
-        gr <- g
-        for (i in 1:reps) {
-            for (j in 1:nx) gr[[1 + j]] <- rmperm(g[[1 + j]])
-            repdist[i, ] <- gfit(gr, mode = mode, diag = diag)$coef
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if ((nullhyp == "qap") || (nullhyp == "qapspp")) {
-        xsel <- matrix(TRUE, n, n)
-        if (!diag) 
-            diag(xsel) <- FALSE
-        if (mode == "graph") 
-            xsel[upper.tri(xsel)] <- FALSE
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            xfit <- gfitlm(g[1 + c(i, (1:nx)[-i])], mode = mode, 
-                diag = diag, tol = tol)
-            xres <- g[[1 + i]]
-            xres[xsel] <- qr.resid(xfit[[1]], xfit[[2]])
-            if (mode == "graph") 
-                xres[upper.tri(xres)] <- t(xres)[upper.tri(xres)]
-            for (j in 1:reps) repdist[j, i] <- gfit(c(g[-(1 + 
-                i)], list(rmperm(xres))), mode = mode, diag = diag)$coef[nx]
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    fit$nullhyp <- nullhyp
-    fit$names <- names(mf[2:length(mf)])
-	#fit$names <- names(mf[2:stackcount(mf)])  # paste("x", 1:(nx - intercept), sep = "")
-    if (intercept) 
-        fit$names <- c("(intercept)", fit$names)
-    fit$intercept <- intercept
-	fit$xlevels <- .getXlevels(mt, mf)
-	fit <- c(fit, list(call = call, formula = formula, terms = mt, 
-	data = data, xlevels = .getXlevels(mt, mf)))
-		new.data <- as.data.frame(as.vector(data[,1]))
-	for(i in 2:ncol(data)){
-	new.data <- cbind(new.data, as.vector(data[,i])) } 
-	names(new.data) <- names(data)
-	fit$data <- new.data
-	fit$family <- family
-	fit$rank <- fit$df.model
-	so <- summary.glm(fit)
-	fit$mod.coefficients <- so$coefficients
-	fit$cov.unscaled <- so$cov.unscaled
-	fit$cov.scaled <- so$cov.scaled
-    class(fit) <- c("netglm")
-    return(fit)
-}
diff --git a/R/netlogit.zelig.R b/R/netlogit.zelig.R
deleted file mode 100644
index 73dc9b5..0000000
--- a/R/netlogit.zelig.R
+++ /dev/null
@@ -1,200 +0,0 @@
-logit.net.zelig <- function (y, x, intercept = TRUE, mode = "digraph", diag = FALSE, 
-    nullhyp = c("qap", "qapspp", "qapy", "qapx", "qapallx", "cugtie", 
-        "cugden", "cuguman", "classical"), tol = 1e-07, reps = 1000) 
-{
-    gfit <- function(glist, mode, diag) {
-        y <- gvectorize(glist[[1]], mode = mode, diag = diag, 
-            censor.as.na = TRUE)
-        x <- vector()
-        for (i in 2:length(glist)) x <- cbind(x, gvectorize(glist[[i]], 
-            mode = mode, diag = diag, censor.as.na = TRUE))
-        if (!is.matrix(x)) 
-            x <- matrix(x, ncol = 1)
-        mis <- is.na(y) | apply(is.na(x), 1, any)
-        glm.fit(x[!mis, ], y[!mis], family = binomial(), intercept = FALSE)
-    }
-    gfitlm <- function(glist, mode, diag, tol) {
-        y <- gvectorize(glist[[1]], mode = mode, diag = diag, 
-            censor.as.na = TRUE)
-        x <- vector()
-        for (i in 2:length(glist)) x <- cbind(x, gvectorize(glist[[i]], 
-            mode = mode, diag = diag, censor.as.na = TRUE))
-        if (!is.matrix(x)) 
-            x <- matrix(x, ncol = 1)
-        mis <- is.na(y) | apply(is.na(x), 1, any)
-        list(qr(x[!mis, ], tol = tol), y[!mis])
-    }
-    y <- as.sociomatrix.sna(y)
-    x <- as.sociomatrix.sna(x)
-    if (is.list(y) || ((length(dim(y)) > 2) && (dim(y)[1] > 1))) 
-        stop("y must be a single graph in logit.net.")
-    if (length(dim(y)) > 2) 
-        y <- y[1, , ]
-    if (is.list(x) || (dim(x)[2] != dim(y)[2])) 
-        stop("Homogeneous graph orders required in logit.net.")
-    nx <- stackcount(x) + intercept
-    n <- dim(y)[2]
-    g <- list(y)
-    if (intercept) 
-        g[[2]] <- matrix(1, n, n)
-    if (nx - intercept == 1) 
-        g[[2 + intercept]] <- x
-    else for (i in 1:(nx - intercept)) g[[i + 1 + intercept]] <- x[i, 
-        , ]
-    if (any(sapply(lapply(g, is.na), any))) 
-        warning("Missing data supplied to logit.net; this may pose problems for certain null hypotheses.  Hope you know what you're doing....")
-    fit.base <- gfit(g, mode = mode, diag = diag)
-    fit <- list()
-    fit$coefficients <- fit.base$coefficients
-    fit$fitted.values <- fit.base$fitted.values
-    fit$residuals <- fit.base$residuals
-    fit$linear.predictors <- fit.base$linear.predictors
-    fit$n <- length(fit.base$y)
-    fit$df.model <- fit.base$rank
-    fit$df.residual <- fit.base$df.residual
-    fit$deviance <- fit.base$deviance
-    fit$null.deviance <- fit.base$null.deviance
-    fit$df.null <- fit.base$df.null
-	######
-	fit$weights <- fit.base$weights
-	fit$boundary <- fit.base$boundary
-	fit$converged <- fit.base$converged
-	fit$contrasts <- fit.base$contrasts
-	fit$control <- fit.base$control
-	fit$effects <- fit.base$effects
-	fit$iter <- fit.base$iter
-	fit$model <- fit.base$model
-	fit$offset <- fit.base$offset
-	fit$prior.weights <- fit.base$prior.weights
-	fit$R <- fit.base$R
-	fit$y <- fit.base$y
-	#fit$summary <- summary(fit.base)
-	######
-    fit$aic <- fit.base$aic
-    fit$bic <- fit$deviance + fit$df.model * log(fit$n)
-    fit$qr <- fit.base$qr
-    fit$ctable <- table(as.numeric(fit$fitted.values >= 0.5), 
-        fit.base$y, dnn = c("Predicted", "Actual"))
-    if (NROW(fit$ctable) == 1) {
-        if (rownames(fit$ctable) == "0") 
-            fit$ctable <- rbind(fit$ctable, c(0, 0))
-        else fit$ctable <- rbind(c(0, 0), fit$ctable)
-        rownames(fit$ctable) <- c("0", "1")
-    }
-    nullhyp <- match.arg(nullhyp)
-    if ((nullhyp %in% c("qap", "qapspp")) && (nx == 1)) 
-        nullhyp <- "qapy"
-    if (nullhyp == "classical") {
-        cvm <- chol2inv(fit$qr$qr)
-        se <- sqrt(diag(cvm))
-        tval <- fit$coefficients/se
-        fit$dist <- NULL
-        fit$pleeq <- pt(tval, fit$df.residual)
-        fit$pgreq <- pt(tval, fit$df.residual, lower.tail = FALSE)
-        fit$pgreqabs <- 2 * pt(abs(tval), fit$df.residual, lower.tail = FALSE)
-    }
-    else if (nullhyp %in% c("cugtie", "cugden", "cuguman")) {
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            gr <- g
-            for (j in 1:reps) {
-                gr[[i + 1]] <- switch(nullhyp, cugtie <- rgraph(n, 
-                  mode = mode, diag = diag, replace = FALSE, 
-                  tielist = g[[i + 1]]), cugden <- rgraph(n, 
-                  tprob = gden(g[[i + 1]], mode = mode, diag = diag), 
-                  mode = mode, diag = diag), cuguman <- (function(dc, 
-                  n) {
-                  rguman(1, n, mut = x[1], asym = x[2], null = x[3], 
-                    method = "exact")
-                })(dyad.census(g[[i + 1]]), n))
-                repdist[j, i] <- gfit(gr, mode = mode, diag = diag)$coef[i]
-            }
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapy") {
-        repdist <- matrix(0, reps, nx)
-        gr <- g
-        for (i in 1:reps) {
-            gr[[1]] <- rmperm(g[[1]])
-            repdist[i, ] <- gfit(gr, mode = mode, diag = diag)$coef
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapx") {
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            gr <- g
-            for (j in 1:reps) {
-                gr[[i + 1]] <- rmperm(gr[[i + 1]])
-                repdist[j, i] <- gfit(gr, mode = mode, diag = diag)$coef[i]
-            }
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapallx") {
-        repdist <- matrix(0, reps, nx)
-        gr <- g
-        for (i in 1:reps) {
-            for (j in 1:nx) gr[[1 + j]] <- rmperm(g[[1 + j]])
-            repdist[i, ] <- gfit(gr, mode = mode, diag = diag)$coef
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if ((nullhyp == "qap") || (nullhyp == "qapspp")) {
-        xsel <- matrix(TRUE, n, n)
-        if (!diag) 
-            diag(xsel) <- FALSE
-        if (mode == "graph") 
-            xsel[upper.tri(xsel)] <- FALSE
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            xfit <- gfitlm(g[1 + c(i, (1:nx)[-i])], mode = mode, 
-                diag = diag, tol = tol)
-            xres <- g[[1 + i]]
-            xres[xsel] <- qr.resid(xfit[[1]], xfit[[2]])
-            if (mode == "graph") 
-                xres[upper.tri(xres)] <- t(xres)[upper.tri(xres)]
-            for (j in 1:reps) repdist[j, i] <- gfit(c(g[-(1 + 
-                i)], list(rmperm(xres))), mode = mode, diag = diag)$coef[nx]
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    fit$nullhyp <- nullhyp
-    fit$names <- paste("x", 1:(nx - intercept), sep = "")
-    if (intercept) 
-        fit$names <- c("(intercept)", fit$names)
-    fit$intercept <- intercept
-    class(fit) <- "logit.net"
-    fit
-}
diff --git a/R/netnormal.R b/R/netnormal.R
deleted file mode 100644
index 431d474..0000000
--- a/R/netnormal.R
+++ /dev/null
@@ -1,242 +0,0 @@
-normal.net <- function (formula, data, LF= "identity", family=gaussian(link=LF), ..., mode = "digraph", diag = FALSE, 
-    nullhyp = c("qap", "qapspp", "qapy", "qapx", "qapallx", "cugtie", 
-        "cugden", "cuguman", "classical"), tol = 1e-07, reps = 1000) 
-{	
-    gfit <- function(glist, mode, diag) {
-        y <- gvectorize(glist[[1]], mode = mode, diag = diag, 
-            censor.as.na = TRUE)
-        x <- vector()
-        for (i in 2:length(glist)) x <- cbind(x, gvectorize(glist[[i]], 
-            mode = mode, diag = diag, censor.as.na = TRUE))
-        if (!is.matrix(x)) 
-            x <- matrix(x, ncol = 1)
-        mis <- is.na(y) | apply(is.na(x), 1, any)
-        glm.fit(x[!mis, ], y[!mis], family = gaussian(link=LF), intercept = FALSE)
-    }
-    gfitlm <- function(glist, mode, diag, tol) {
-        y <- gvectorize(glist[[1]], mode = mode, diag = diag, 
-            censor.as.na = TRUE)
-        x <- vector()
-        for (i in 2:length(glist)) x <- cbind(x, gvectorize(glist[[i]], 
-            mode = mode, diag = diag, censor.as.na = TRUE))
-        if (!is.matrix(x)) 
-            x <- matrix(x, ncol = 1)
-        mis <- is.na(y) | apply(is.na(x), 1, any)
-        list(qr(x[!mis, ], tol = tol), y[!mis])
-    }
-   call <- match.call()
-   Terms <- terms(formula)
-    intercept.value <- attr(Terms, "intercept") 
-    if (intercept.value > 0){
-    	intercept = TRUE
-    	}
-    if (intercept.value == 0){
-    	intercept = FALSE
-    	} 
-   if (missing(data)) 
-		data <- environment(formula)		
-	mf <- match.call(expand.dots = FALSE)	
-    m <- match(c("formula", "data", "weights"), names(mf), 0)
-    mf <- mf[c(1, m)]
-    mf$drop.unused.levels <- TRUE
-    mf[[1]] <- as.name("model.frame")
-    mf <- eval(mf, parent.frame())
-    mt <- attr(mf, "terms")
-	D <- model.frame(formula, data = data)
-	y <- D[[1]]
-	#x.array.names <- as.list(for(i in 2:length(D)) {names(D[[i]])})
-	x <- array(dim=c((length(D) - 1), nrow(y), ncol(y)))
-	for(i in 2:length(D)) {
-		x[i - 1,,] <- D[[i]]	}
- 
-    
-    
-    y <- as.sociomatrix.sna(y)
-    x <- as.sociomatrix.sna(x)
-    if (is.list(y) || ((length(dim(y)) > 2) && (dim(y)[1] > 1))) 
-        stop("y must be a single graph.")
-    if (length(dim(y)) > 2) 
-        y <- y[1, , ]
-    if (is.list(x) || (dim(x)[2] != dim(y)[2])) 
-        stop("Homogeneous graph orders required.")
-    nx <- stackcount(x) + intercept
-    n <- dim(y)[2]
-    g <- list(y)
-    if (intercept) 
-        g[[2]] <- matrix(1, n, n)
-    if (nx - intercept == 1) 
-        g[[2 + intercept]] <- x
-    else for (i in 1:(nx - intercept)) g[[i + 1 + intercept]] <- x[i, 
-        , ]
-    if (any(sapply(lapply(g, is.na), any))) 
-        warning("Missing data supplied to poisson.net; this may pose problems for certain null hypotheses.  Hope you know what you're doing....")
-    fit.base <- gfit(g, mode = mode, diag = diag)
-    fit <- list()
-    fit$coefficients <- fit.base$coefficients
-    fit$fitted.values <- fit.base$fitted.values
-    fit$residuals <- fit.base$residuals
-    fit$linear.predictors <- fit.base$linear.predictors
-    fit$n <- length(fit.base$y)
-    fit$df.model <- fit.base$rank
-    fit$df.residual <- fit.base$df.residual
-    fit$deviance <- fit.base$deviance
-    fit$null.deviance <- fit.base$null.deviance
-    fit$df.null <- fit.base$df.null
-	######
-	fit$weights <- fit.base$weights
-	fit$boundary <- fit.base$boundary
-	fit$converged <- fit.base$converged
-	fit$contrasts <- fit.base$contrasts
-	fit$control <- fit.base$control
-	fit$effects <- fit.base$effects
-	fit$iter <- fit.base$iter
-	fit$model <- fit.base$model
-	fit$offset <- fit.base$offset
-	fit$prior.weights <- fit.base$prior.weights
-	fit$R <- fit.base$R
-	fit$y <- fit.base$y
-	#fit$summary <- summary(fit.base)
-	######
-    fit$aic <- fit.base$aic
-    fit$bic <- fit$deviance + fit$df.model * log(fit$n)
-    fit$qr <- fit.base$qr
-    #fit$ctable <- table(as.numeric(fit$fitted.values >= 0.5), 
-    #    fit.base$y, dnn = c("Predicted", "Actual"))
-    #if (NROW(fit$ctable) == 1) {
-    #    if (rownames(fit$ctable) == "0") 
-    #        fit$ctable <- rbind(fit$ctable, c(0, 0))
-    #    else fit$ctable <- rbind(c(0, 0), fit$ctable)
-    #    rownames(fit$ctable) <- c("0", "1")
-    #}
-    nullhyp <- match.arg(nullhyp)
-    if ((nullhyp %in% c("qap", "qapspp")) && (nx == 1)) 
-        nullhyp <- "qapy"
-    if (nullhyp == "classical") {
-        cvm <- chol2inv(fit$qr$qr)
-        se <- sqrt(diag(cvm))
-        tval <- fit$coefficients/se
-        fit$dist <- NULL
-        fit$pleeq <- pt(tval, fit$df.residual)
-        fit$pgreq <- pt(tval, fit$df.residual, lower.tail = FALSE)
-        fit$pgreqabs <- 2 * pt(abs(tval), fit$df.residual, lower.tail = FALSE)
-    }
-    else if (nullhyp %in% c("cugtie", "cugden", "cuguman")) {
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            gr <- g
-            for (j in 1:reps) {
-                gr[[i + 1]] <- switch(nullhyp, cugtie <- rgraph(n, 
-                  mode = mode, diag = diag, replace = FALSE, 
-                  tielist = g[[i + 1]]), cugden <- rgraph(n, 
-                  tprob = gden(g[[i + 1]], mode = mode, diag = diag), 
-                  mode = mode, diag = diag), cuguman <- (function(dc, 
-                  n) {
-                  rguman(1, n, mut = x[1], asym = x[2], null = x[3], 
-                    method = "exact")
-                })(dyad.census(g[[i + 1]]), n))
-                repdist[j, i] <- gfit(gr, mode = mode, diag = diag)$coef[i]
-            }
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapy") {
-        repdist <- matrix(0, reps, nx)
-        gr <- g
-        for (i in 1:reps) {
-            gr[[1]] <- rmperm(g[[1]])
-            repdist[i, ] <- gfit(gr, mode = mode, diag = diag)$coef
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapx") {
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            gr <- g
-            for (j in 1:reps) {
-                gr[[i + 1]] <- rmperm(gr[[i + 1]])
-                repdist[j, i] <- gfit(gr, mode = mode, diag = diag)$coef[i]
-            }
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapallx") {
-        repdist <- matrix(0, reps, nx)
-        gr <- g
-        for (i in 1:reps) {
-            for (j in 1:nx) gr[[1 + j]] <- rmperm(g[[1 + j]])
-            repdist[i, ] <- gfit(gr, mode = mode, diag = diag)$coef
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if ((nullhyp == "qap") || (nullhyp == "qapspp")) {
-        xsel <- matrix(TRUE, n, n)
-        if (!diag) 
-            diag(xsel) <- FALSE
-        if (mode == "graph") 
-            xsel[upper.tri(xsel)] <- FALSE
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            xfit <- gfitlm(g[1 + c(i, (1:nx)[-i])], mode = mode, 
-                diag = diag, tol = tol)
-            xres <- g[[1 + i]]
-            xres[xsel] <- qr.resid(xfit[[1]], xfit[[2]])
-            if (mode == "graph") 
-                xres[upper.tri(xres)] <- t(xres)[upper.tri(xres)]
-            for (j in 1:reps) repdist[j, i] <- gfit(c(g[-(1 + 
-                i)], list(rmperm(xres))), mode = mode, diag = diag)$coef[nx]
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    fit$nullhyp <- nullhyp
-    fit$names <- names(mf[2:length(mf)]) 
-	#fit$names <- names(mf[2:stackcount(mf)])  # paste("x", 1:(nx - intercept), sep = "")
-    if (intercept) 
-        fit$names <- c("(intercept)", fit$names)
-    fit$intercept <- intercept
-	fit$xlevels <- .getXlevels(mt, mf)
-	fit <- c(fit, list(call = call, formula = formula, terms = mt, 
-	data = data, xlevels = .getXlevels(mt, mf)))
-		new.data <- as.data.frame(as.vector(data[,1]))
-	for(i in 2:ncol(data)){
-	new.data <- cbind(new.data, as.vector(data[,i])) } 
-	names(new.data) <- names(data)
-	fit$data <- new.data
-	fit$family <- family
-	fit$rank <- fit$df.model
-	so <- summary.glm(fit)
-	fit$mod.coefficients <- so$coefficients
-	fit$cov.unscaled <- so$cov.unscaled
-	fit$cov.scaled <- so$cov.scaled
-    class(fit) <- c("netglm")
-    return(fit)
-}
diff --git a/R/netpoisson.R b/R/netpoisson.R
deleted file mode 100644
index 64c0b49..0000000
--- a/R/netpoisson.R
+++ /dev/null
@@ -1,242 +0,0 @@
-poisson.net <- function (formula, data, LF="log", family=poisson(link=LF), ..., mode = "digraph", diag = FALSE, 
-    nullhyp = c("qap", "qapspp", "qapy", "qapx", "qapallx", "cugtie", 
-        "cugden", "cuguman", "classical"), tol = 1e-07, reps = 1000) 
-{	
-    gfit <- function(glist, mode, diag) {
-        y <- gvectorize(glist[[1]], mode = mode, diag = diag, 
-            censor.as.na = TRUE)
-        x <- vector()
-        for (i in 2:length(glist)) x <- cbind(x, gvectorize(glist[[i]], 
-            mode = mode, diag = diag, censor.as.na = TRUE))
-        if (!is.matrix(x)) 
-            x <- matrix(x, ncol = 1)
-        mis <- is.na(y) | apply(is.na(x), 1, any)
-        glm.fit(x[!mis, ], y[!mis], family = poisson(link=LF), intercept = FALSE)
-    }
-    gfitlm <- function(glist, mode, diag, tol) {
-        y <- gvectorize(glist[[1]], mode = mode, diag = diag, 
-            censor.as.na = TRUE)
-        x <- vector()
-        for (i in 2:length(glist)) x <- cbind(x, gvectorize(glist[[i]], 
-            mode = mode, diag = diag, censor.as.na = TRUE))
-        if (!is.matrix(x)) 
-            x <- matrix(x, ncol = 1)
-        mis <- is.na(y) | apply(is.na(x), 1, any)
-        list(qr(x[!mis, ], tol = tol), y[!mis])
-    }
-   call <- match.call()
-   Terms <- terms(formula)
-    intercept.value <- attr(Terms, "intercept") 
-    if (intercept.value > 0){
-    	intercept = TRUE
-    	}
-    if (intercept.value == 0){
-    	intercept = FALSE
-    	} 
-   if (missing(data)) 
-		data <- environment(formula)		
-	mf <- match.call(expand.dots = FALSE)	
-    m <- match(c("formula", "data", "weights"), names(mf), 0)
-    mf <- mf[c(1, m)]
-    mf$drop.unused.levels <- TRUE
-    mf[[1]] <- as.name("model.frame")
-    mf <- eval(mf, parent.frame())
-    mt <- attr(mf, "terms")
-	D <- model.frame(formula, data = data)
-	y <- D[[1]]
-	#x.array.names <- as.list(for(i in 2:length(D)) {names(D[[i]])})
-	x <- array(dim=c((length(D) - 1), nrow(y), ncol(y)))
-	for(i in 2:length(D)) {
-		x[i - 1,,] <- D[[i]]	}
- 
-    
-    
-    y <- as.sociomatrix.sna(y)
-    x <- as.sociomatrix.sna(x)
-    if (is.list(y) || ((length(dim(y)) > 2) && (dim(y)[1] > 1))) 
-        stop("y must be a single graph.")
-    if (length(dim(y)) > 2) 
-        y <- y[1, , ]
-    if (is.list(x) || (dim(x)[2] != dim(y)[2])) 
-        stop("Homogeneous graph orders required.")
-    nx <- stackcount(x) + intercept
-    n <- dim(y)[2]
-    g <- list(y)
-    if (intercept) 
-        g[[2]] <- matrix(1, n, n)
-    if (nx - intercept == 1) 
-        g[[2 + intercept]] <- x
-    else for (i in 1:(nx - intercept)) g[[i + 1 + intercept]] <- x[i, 
-        , ]
-    if (any(sapply(lapply(g, is.na), any))) 
-        warning("Missing data supplied to poisson.net; this may pose problems for certain null hypotheses.  Hope you know what you're doing....")
-    fit.base <- gfit(g, mode = mode, diag = diag)
-    fit <- list()
-    fit$coefficients <- fit.base$coefficients
-    fit$fitted.values <- fit.base$fitted.values
-    fit$residuals <- fit.base$residuals
-    fit$linear.predictors <- fit.base$linear.predictors
-    fit$n <- length(fit.base$y)
-    fit$df.model <- fit.base$rank
-    fit$df.residual <- fit.base$df.residual
-    fit$deviance <- fit.base$deviance
-    fit$null.deviance <- fit.base$null.deviance
-    fit$df.null <- fit.base$df.null
-	######
-	fit$weights <- fit.base$weights
-	fit$boundary <- fit.base$boundary
-	fit$converged <- fit.base$converged
-	fit$contrasts <- fit.base$contrasts
-	fit$control <- fit.base$control
-	fit$effects <- fit.base$effects
-	fit$iter <- fit.base$iter
-	fit$model <- fit.base$model
-	fit$offset <- fit.base$offset
-	fit$prior.weights <- fit.base$prior.weights
-	fit$R <- fit.base$R
-	fit$y <- fit.base$y
-	#fit$summary <- summary(fit.base)
-	######
-    fit$aic <- fit.base$aic
-    fit$bic <- fit$deviance + fit$df.model * log(fit$n)
-    fit$qr <- fit.base$qr
-    #fit$ctable <- table(as.numeric(fit$fitted.values >= 0.5), 
-    #    fit.base$y, dnn = c("Predicted", "Actual"))
-    #if (NROW(fit$ctable) == 1) {
-    #    if (rownames(fit$ctable) == "0") 
-    #        fit$ctable <- rbind(fit$ctable, c(0, 0))
-    #    else fit$ctable <- rbind(c(0, 0), fit$ctable)
-    #    rownames(fit$ctable) <- c("0", "1")
-    #}
-    nullhyp <- match.arg(nullhyp)
-    if ((nullhyp %in% c("qap", "qapspp")) && (nx == 1)) 
-        nullhyp <- "qapy"
-    if (nullhyp == "classical") {
-        cvm <- chol2inv(fit$qr$qr)
-        se <- sqrt(diag(cvm))
-        tval <- fit$coefficients/se
-        fit$dist <- NULL
-        fit$pleeq <- pt(tval, fit$df.residual)
-        fit$pgreq <- pt(tval, fit$df.residual, lower.tail = FALSE)
-        fit$pgreqabs <- 2 * pt(abs(tval), fit$df.residual, lower.tail = FALSE)
-    }
-    else if (nullhyp %in% c("cugtie", "cugden", "cuguman")) {
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            gr <- g
-            for (j in 1:reps) {
-                gr[[i + 1]] <- switch(nullhyp, cugtie <- rgraph(n, 
-                  mode = mode, diag = diag, replace = FALSE, 
-                  tielist = g[[i + 1]]), cugden <- rgraph(n, 
-                  tprob = gden(g[[i + 1]], mode = mode, diag = diag), 
-                  mode = mode, diag = diag), cuguman <- (function(dc, 
-                  n) {
-                  rguman(1, n, mut = x[1], asym = x[2], null = x[3], 
-                    method = "exact")
-                })(dyad.census(g[[i + 1]]), n))
-                repdist[j, i] <- gfit(gr, mode = mode, diag = diag)$coef[i]
-            }
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapy") {
-        repdist <- matrix(0, reps, nx)
-        gr <- g
-        for (i in 1:reps) {
-            gr[[1]] <- rmperm(g[[1]])
-            repdist[i, ] <- gfit(gr, mode = mode, diag = diag)$coef
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapx") {
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            gr <- g
-            for (j in 1:reps) {
-                gr[[i + 1]] <- rmperm(gr[[i + 1]])
-                repdist[j, i] <- gfit(gr, mode = mode, diag = diag)$coef[i]
-            }
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if (nullhyp == "qapallx") {
-        repdist <- matrix(0, reps, nx)
-        gr <- g
-        for (i in 1:reps) {
-            for (j in 1:nx) gr[[1 + j]] <- rmperm(g[[1 + j]])
-            repdist[i, ] <- gfit(gr, mode = mode, diag = diag)$coef
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    else if ((nullhyp == "qap") || (nullhyp == "qapspp")) {
-        xsel <- matrix(TRUE, n, n)
-        if (!diag) 
-            diag(xsel) <- FALSE
-        if (mode == "graph") 
-            xsel[upper.tri(xsel)] <- FALSE
-        repdist <- matrix(0, reps, nx)
-        for (i in 1:nx) {
-            xfit <- gfitlm(g[1 + c(i, (1:nx)[-i])], mode = mode, 
-                diag = diag, tol = tol)
-            xres <- g[[1 + i]]
-            xres[xsel] <- qr.resid(xfit[[1]], xfit[[2]])
-            if (mode == "graph") 
-                xres[upper.tri(xres)] <- t(xres)[upper.tri(xres)]
-            for (j in 1:reps) repdist[j, i] <- gfit(c(g[-(1 + 
-                i)], list(rmperm(xres))), mode = mode, diag = diag)$coef[nx]
-        }
-        fit$dist <- repdist
-        fit$pleeq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            "<="), 2, mean)
-        fit$pgreq <- apply(sweep(fit$dist, 2, fit$coefficients, 
-            ">="), 2, mean)
-        fit$pgreqabs <- apply(sweep(abs(fit$dist), 2, abs(fit$coefficients), 
-            ">="), 2, mean)
-    }
-    fit$nullhyp <- nullhyp
-    fit$names <- names(mf[2:length(mf)])
-	#fit$names <- names(mf[2:stackcount(mf)])  # paste("x", 1:(nx - intercept), sep = "")
-    if (intercept) 
-        fit$names <- c("(intercept)", fit$names)
-    fit$intercept <- intercept
-	fit$xlevels <- .getXlevels(mt, mf)
-	fit <- c(fit, list(call = call, formula = formula, terms = mt, 
-	data = data, xlevels = .getXlevels(mt, mf)))
-		new.data <- as.data.frame(as.vector(data[,1]))
-	for(i in 2:ncol(data)){
-	new.data <- cbind(new.data, as.vector(data[,i])) } 
-	names(new.data) <- names(data)
-	fit$data <- new.data
-	fit$family <- family
-	fit$rank <- fit$df.model
-	so <- summary.glm(fit)
-	fit$mod.coefficients <- so$coefficients
-	fit$cov.unscaled <- so$cov.unscaled
-	fit$cov.scaled <- so$cov.scaled
-    class(fit) <- c("netglm")
-    return(fit)
-}
diff --git a/R/network.R b/R/network.R
deleted file mode 100644
index f764717..0000000
--- a/R/network.R
+++ /dev/null
@@ -1,17 +0,0 @@
-network <- function(...){
-	cl <- match.call()
-	datanames <- list()
-	for(i in 2: length(cl)){ 
-		datanames[[i-1]] <- cl[[i]]
-		} 
-	newdata <- list(...)
-	names(newdata) <- datanames
-	for(i in 1: length(newdata)){
-			newdata[[i]] <- as.matrix(newdata[[i]])
-			if(any(dimnames(newdata[[i]])[[2]] == "V1")){
-				dimnames(newdata[[i]]) <- list(NULL, NULL)
-				}
-			}	
-	class(newdata) <- "data.frame"
-	return(newdata)	
-	}
diff --git a/R/normal.R b/R/normal.R
new file mode 100644
index 0000000..7a08a37
--- /dev/null
+++ b/R/normal.R
@@ -0,0 +1,122 @@
+#' Interface between normal model and Zelig
+#' This function is exclusively for use by the `zelig' function
+#' @param formula a formula
+#' @param weights a numeric vector
+#' @param ... ignored parameters
+#' @param data a data.frame
+#' @return a list to be coerced into a zelig.call object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+zelig2normal <- function(formula, weights=NULL, ..., data)
+  z(
+    glm,
+    # .hook = "robust.glm.hook",
+    formula = formula,
+    weights = weights,
+    family  = gaussian,
+    model   = F,
+    data    = data
+    )
+#' Param Method for the 'normal' Zelig Model
+#' @note This method is used by the 'normal' Zelig model
+#' @usage \method{param}{normal}(obj, num=1000, ...)
+#' @S3method param negbinom
+#' @param obj a 'zelig' object
+#' @param num an integer specifying the number of simulations to sample
+#' @param ... ignored
+#' @return a list to be cast as a 'parameters' object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+param.normal <- function(obj, num=1000, ...) {
+  degrees.freedom <- .fitted$df.residual
+  sig2 <- summary(.fitted)$dispersion
+
+  list(
+       simulations = mvrnorm(n=num, mu=coef(.fitted), Sigma=vcov(.fitted)),
+       alpha = sqrt(degrees.freedom * sig2 / rchisq(num, degrees.freedom)),
+       link = function (x) x,
+       linkinv = function (x) x
+       )
+}
+#' Compute quantities of interest for 'normal' Zelig models
+#' @usage \method{qi}{normal}(obj, x, x1=NULL, y=NULL, num=1000, param=NULL)
+#' @S3method qi normal
+#' @param obj a 'zelig' object
+#' @param x a 'setx' object or NULL
+#' @param x1 an optional 'setx' object
+#' @param y this parameter is reserved for simulating average treatment effects,
+#'   though this feature is currentlysupported by only a handful of models
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a parameters object
+#' @return a list of key-value pairs specifying pairing titles of quantities of
+#'   interest with their simulations
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+qi.normal <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
+  # get `num` samples from the underlying distribution
+  coef <- coef(param)
+  alpha <- alpha(param)
+
+  # theta = eta, because inverse of 
+  # normal models' link function is
+  # the identity
+  theta <- matrix(coef %*% t(x), nrow=nrow(coef))
+
+  #
+  pr <- matrix(NA, nrow=nrow(theta), ncol=ncol(theta))
+
+  #
+  ev <- theta
+  ev1 <- pr1 <- fd <- NA
+  
+  for (i in 1:nrow(ev))
+    pr[i,] <- rnorm(ncol(ev), mean = ev[i,], sd = alpha[i])
+
+
+  # if x1 is not NULL, run more simultations
+  # ...
+
+  if (!is.null(x1)) {
+
+    # quantities of interest
+    lis1 <- qi(obj, x1, num=num, param=param)
+
+    # pass values over
+    ev1 <- lis1[[1]]
+    pr1 <- lis1[[3]]
+
+    # compute first differences
+    fd <- ev1 - ev
+  }
+
+  # return
+  list("Expected Values: E(Y|X)"  = ev,
+       "Expected Values: E(Y|X1)" = ev1,
+       "Predicted Values: Y|X"    = pr,
+       "Predicted Values: Y|X1"   = pr1,
+       "First Differences: E(Y|X1) - E(Y|X)" = fd
+       )
+}
+#' Describe the \code{normal} model to Zelig
+#' @usage \method{describe}{normal}(...)
+#' @S3method describe normal
+#' @param ... ignored parameters
+#' @return a list to be processed by `as.description'
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+describe.normal <- function(...) {
+  # parameters object
+  parameters <- list(pi = list(
+                       equations = c(1, 1),
+                       tags.allowed = FALSE,
+                       dep.var = TRUE,
+                       exp.var = TRUE
+                       )
+                     )
+
+  # return list
+  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
+       year     = 2008,
+       category = "continuous",
+       parameters = parameters,
+       text = "Normal Regression for Continuous Dependent Variables"
+       )
+}
diff --git a/R/normal.bayes.R b/R/normal.bayes.R
new file mode 100644
index 0000000..fde4514
--- /dev/null
+++ b/R/normal.bayes.R
@@ -0,0 +1,90 @@
+#' Interface between the Zelig Model normal.bayes and the Pre-existing Model-fitting Method
+#' @param formula a formula
+#' @param ... additonal parameters
+#' @param data a data.frame 
+#' @return a list specifying '.function'
+#' @export
+zelig2normal.bayes <- function (
+                               formula, 
+                               burnin = 1000, mcmc = 10000, 
+                               verbose = 0, 
+                               ..., 
+                               data
+                               ) {
+
+  loadDependencies("MCMCpack", "coda")
+
+  list(
+       .function = "MCMCregress",
+       .hook = "MCMChook",
+
+       formula = formula,
+       data   = data,
+       burnin = burnin,
+       mcmc   = mcmc,
+       verbose= verbose,
+
+       # Most parameters can be simply passed forward
+       ...
+       )
+}
+
+#' @S3method param normal.bayes
+param.normal.bayes <- function(obj, num=1000, ...) {
+  list(
+       coef = coef(obj),
+       linkinv = gaussian()
+       )
+}
+
+#' @S3method qi normal.bayes
+qi.normal.bayes <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+  normal.ev <- function (x, param) {
+    # If either of the parameters are invalid,
+    # Then return NA for both qi's
+    if (is.null(x) || is.na(x) || is.null(param))
+      return(list(ev=NA, pv=NA))
+
+    # Extract simulated parameters and get column names
+    coef <- coef(param)
+    cols <- colnames(coef)
+
+    # Place the simulated variances in their own vector
+    sigma2 <- coef[, ncol(coef)]
+
+    # Remove the "sigma2" (variance) parameter which should already be placed
+    # in the simulated parameters
+    cols <- cols[ ! "sigma2" == cols ]
+    
+    #
+    coef <- coef[, cols]
+
+    #
+    ev <- coef %*% t(x)
+    pv <- rnorm(nrow(ev), ev, sqrt(sigma2))
+
+    #
+    list(ev = ev, pv = pv)
+  }
+
+  res1 <- normal.ev(x, param)
+  res2 <- normal.ev(x1, param)
+
+  list(
+       "Expected Value: E(Y|X)" = res1$ev,
+       "Predicted Value: Y|X" = res1$pv,
+       "Expected Value (for X1): E(Y|X1)" = res2$ev,
+       "Predicted Value (for X1): Y|X1" = res2$pv,
+       "First Differences: E(Y|X1) - E(Y|X)" = res2$ev - res1$ev
+       )
+}
+
+
+#' @S3method describe normal.bayes
+describe.normal.bayes <- function(...) {
+  list(
+       authors = c("Ben Goodrich", "Ying Lu"),
+       text = "Bayesian Normal Linear Regression",
+       year = 2013
+       )
+}
diff --git a/R/normal.gee.R b/R/normal.gee.R
new file mode 100644
index 0000000..19fb0a4
--- /dev/null
+++ b/R/normal.gee.R
@@ -0,0 +1,71 @@
+#' Interface between the Zelig Model normal.gee and 
+#' the Pre-existing Model-fitting Method
+#' @param formula a formula
+#' @param id a character-string specifying the column of the data-set to use
+#'   for clustering
+#' @param robust a logical specifying whether to robustly or naively compute
+#'   the covariance matrix. This parameter is ignore in the \code{zelig2}
+#'   method, and instead used in the \code{robust.hook} function, which
+#'   executes after the call to the \code{gee} function
+#' @param ... ignored parameters
+#' @param R a square-matrix specifying the correlation
+#' @param corstr a character-string specifying the correlation structure
+#' @param data a data.frame 
+#' @return a list specifying the call to the external model
+#' @export
+zelig2normal.gee <- function (formula, id, robust, ..., R = NULL, corstr = "independence", data) {
+
+  loadDependencies("gee")
+
+  if (corstr == "fixed" && is.null(R))
+    stop("R must be defined")
+
+  # if id is a valid column-name in data, then we just need to extract the
+  # column and re-order the data.frame and cluster information
+  if (is.character(id) && length(id) == 1 && id %in% colnames(data)) {
+    id <- data[, id]
+    data <- data[order(id), ]
+    id <- sort(id)
+  }
+
+  z(
+    .function = gee,
+    .hook = robust.gee.hook,
+
+    formula = formula,
+    id = id,
+    corstr = corstr,
+    family  = gaussian(),
+    R = R,
+    data = data,
+    ...
+    )
+}
+
+#' @S3method param normal.gee
+param.normal.gee <- function(obj, num=1000, ...) {
+
+  # Extract means to compute maximum likelihood
+  mu <- coef(obj)
+
+  # Extract covariance matrix to compute maximum likelihood
+  Sigma <- vcov(obj)
+
+  #
+  list(
+       coef = mvrnorm(num, mu, Sigma),
+       linkinv = function (x) x
+       )
+}
+
+#' @S3method qi normal.gee
+qi.normal.gee <- qi.gamma.gee
+
+#' @S3method describe normal.gee
+describe.normal.gee <- function(...) {
+  list(
+       authors = "Patrick Lam",
+       text = "General Estimating Equation for Normal Regression",
+       year = 2011
+       )
+}
diff --git a/R/normal.survey.R b/R/normal.survey.R
new file mode 100644
index 0000000..5f37d5e
--- /dev/null
+++ b/R/normal.survey.R
@@ -0,0 +1,161 @@
+#' @export
+zelig2normal.survey <- function(
+                               formula,
+                               weights=NULL, 
+                               ids=NULL,
+                               probs=NULL,
+                               strata = NULL,  
+                               fpc=NULL,
+                               nest = FALSE,
+                               check.strata = !nest,
+                               repweights = NULL,
+                               type,
+                               combined.weights=FALSE,
+                               rho = NULL,
+                               bootstrap.average=NULL, 
+                               scale=NULL,
+                               rscales=NULL,
+                               fpctype="fraction",
+                               return.replicates=FALSE,
+                               na.action="na.omit",
+                               start=NULL,
+                               etastart=NULL, 
+                               mustart=NULL,
+                               offset=NULL, 	      		
+                               model1=TRUE,
+                               method="glm.fit",
+                               x=FALSE,
+                               y=TRUE,
+                               contrasts=NULL,
+                               design=NULL,
+                               data
+                               ) {
+
+  loadDependencies("survey")
+
+  if (is.null(ids))
+    ids <- ~1
+
+  # the following lines designate the design
+  # NOTE: nothing truly special goes on here;
+  #       the below just makes sure the design is created correctly
+  #       for whether or not the replication weights are set
+  design <- if (is.null(repweights))
+    svydesign(
+              data=data,
+              ids=ids,
+              probs=probs,
+              strata=strata,
+              fpc=fpc,
+              nest=nest,
+              check.strata=check.strata,
+              weights=weights
+              )
+
+  else {
+    .survey.prob.weights <- weights
+    
+    svrepdesign(
+                data=data,
+                repweights=repweights, 	
+                type=type,
+                weights=weights,
+                combined.weights=combined.weights, 
+                rho=rho,
+                bootstrap.average=bootstrap.average,
+                scale=scale,
+                rscales=rscales,
+                fpctype=fpctype,
+                fpc=fpc
+                )
+  }
+
+  
+  z(.function = svyglm,
+    formula = formula,
+    design  = design
+    )
+}
+
+  
+#' @S3method param normal.survey
+param.normal.survey <- function(obj, num=1000, ...) {
+  df <- obj$result$df.residual
+  sig2 <- summary(obj)$dispersion
+  
+  list(
+       simulations = mvrnorm(num, coef(obj), vcov(obj)),
+       alpha = sqrt(df*sig2/rchisq(num, df=df)),
+
+       # note: assignment of link and link-inverse are
+       #       implicit when the family is assigned
+       fam   = gaussian()
+       )
+}
+#' @S3method qi normal.survey
+qi.normal.survey <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
+  model <- GetObject(obj)
+
+  coef <- coef(param)
+  alpha <- alpha(param)
+
+  eta <- coef %*% t(x)
+
+  link.inverse <- linkinv(param)
+
+  theta <- matrix(link.inverse(eta), nrow=nrow(coef))
+
+  pr <- ev <- matrix(NA, nrow=nrow(theta), ncol(theta))
+
+  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
+
+
+  ev <- theta
+
+
+  for (k in 1:nrow(ev))
+    pr[k, ] <- rnorm(length(ev[k, ]), ev[k,], alpha[k])
+
+
+
+  ev1 <- pr1 <- fd <- NA
+
+  if (!is.null(x1)) {
+    ev1 <- theta1 <- matrix(link.inverse(coef %*% t(x1)),
+                            nrow = nrow(coef)
+                            )
+
+    fd <- ev1-ev
+  }
+
+  att.ev <- att.pr <- NA
+
+  if (!is.null(y)) {
+    yvar <- matrix(rep(y, nrow(coef)), nrow=nrow(coef), byrow=TRUE)
+
+    tmp.ev <- yvar - ev
+    tmp.pr <- yvar - pr
+
+    att.ev <- matrix(apply(tmp.ev, 1, mean), nrow=nrow(coef))
+    att.pr <- matrix(apply(tmp.pr, 1, mean), nrow=nrow(coef))
+  }
+
+
+  list(
+       "Expected Values: E(Y|X)" = ev,
+       "Expected Values for (X1): E(Y|X1)" = ev1,
+       "Predicted Values: Y|X" = pr,
+       "Predicted Values (for X1): Y|X1" = pr1,
+       "First Differences E(Y|X1)-E(Y|X)" = fd,
+       "Average Treatment Effect: Y-EV" = att.ev,
+       "Average Treatment Effect: Y-PR" = att.pr
+       )
+}
+#' @S3method describe normal.survey
+describe.normal.survey <- function(...) {
+  list(
+       authors = "Nicholas Carnes",
+       year = 2008,
+       description = "Survey-Weighted Normal Regression for Continuous, Positive Dependent Variables"
+       )
+}
diff --git a/R/oprobit.bayes.R b/R/oprobit.bayes.R
new file mode 100644
index 0000000..354b77f
--- /dev/null
+++ b/R/oprobit.bayes.R
@@ -0,0 +1,140 @@
+#' @export
+zelig2oprobit.bayes <- function (
+                               formula, 
+                               burnin = 1000, mcmc = 10000, 
+                               verbose=0, 
+                               ..., 
+                               data
+                               ) {
+
+  loadDependencies("MCMCpack", "coda")
+
+  if (missing(verbose))
+    verbose <- round((mcmc + burnin)/10)
+
+  list(
+       .function = "MCMCoprobit",
+       .hook = "MCMChook",
+
+       formula = formula,
+       data   = data,
+       burnin = burnin,
+       mcmc   = mcmc,
+       verbose= verbose,
+
+       # Most parameters can be simply passed forward
+       ...
+       )
+}
+#' @S3method param oprobit.bayes
+param.oprobit.bayes <- function(obj, num=1000, ...) {
+
+  # Produce the model matrix in order to get all terms (explicit and implicit)
+  # from the regression model.
+  mat <- model.matrix(obj$result, data=obj$data)
+
+  # Response Terms
+  p <- ncol(mat)
+
+  # All coefficients
+  coefficients <- coef(obj)
+
+  # Coefficients for predictor variables
+  beta <- coefficients[, 1:p]
+
+  # Middle values of "gamma" matrix
+  mid.gamma <- coefficients[, -(1:p)]
+
+  # ...
+  level <- ncol(coefficients) - p + 2
+
+
+  # Initialize the "gamma" parameters
+  gamma <- matrix(NA, nrow(coefficients), level + 1)
+
+  # The first, second and last values are fixed
+  gamma[, 1] <- -Inf
+  gamma[, 2] <- 0
+  gamma[, ncol(gamma)] <- Inf
+
+  # All others are determined by the coef-matrix (now stored in mid.gamma)
+  if (ncol(gamma) > 3)
+    gamma[, 3:(ncol(gamma)-1)] <- mid.gamma
+
+  # return
+  list(
+       simulations = beta,
+       alpha   = gamma,
+       linkinv = NULL
+       )
+}
+#' @S3method qi oprobit.bayes
+qi.oprobit.bayes <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
+{
+  labels <- levels(model.response(model.frame(obj$result)))
+
+  res1 <- compute.oprobit.bayes(x, param, labels)
+  res2 <- compute.oprobit.bayes(x1, param, labels)
+
+  # 
+  list(
+       "Expected Value: E(Y|X)" = res1$ev,
+       "Predicted Value: Y|X"   = res1$pv,
+       "Expected Value (for X1): E(Y|X1)" = res2$ev,
+       "Predicted Value (for X1): Y|X1"   = res2$pv,
+       "First Differences: E(Y|X1) - E(Y|X)" = res2$ev - res1$ev
+       )
+}
+# Helper function used to generate expected values
+compute.oprobit.bayes <- function (x, param, labels) {
+  # If either of the parameters are invalid,
+  # Then return NA for both qi's
+  if (is.null(x) || is.na(x) || is.null(param))
+    return(list(ev=NA, pv=NA))
+
+
+  # Extract simulated parameters
+  beta <- coef(param)
+  gamma <- alpha(param)
+
+  # x is implicitly cast into a matrix
+  eta <- beta %*% t(x)
+
+  # **TODO: Sort out sizes of matrices for these things.
+  ev <- array(NA, c(nrow(eta), ncol(gamma) - 1, ncol(eta)))
+  pv <- matrix(NA, nrow(eta), ncol(eta))
+
+  # Compute Expected Values
+  # ***********************
+  # Note that the inverse link function is:
+  #   pnorm(gamma[, j+1]-eta) - pnorm(gamma[, j]-eta)
+  for (j in 1:(ncol(gamma)-1)) {
+    ev[, j, ] <- pnorm(gamma[, j+1]-eta) - pnorm(gamma[, j]-eta)
+  }
+
+  colnames(ev) <- labels
+
+
+  # Compute Predicted Values
+  # ************************
+  for (j in 1:nrow(pv)) {
+    mu <- eta[j, ]
+    pv[j, ] <- as.character(cut(mu, gamma[j, ], labels=labels))
+  }
+
+
+  # **TODO: Update summarize to work with at most 3-dimensional arrays
+  ev <- ev[, , 1]
+
+
+  # Return
+  list(ev = ev, pv = pv)
+}
+#' @S3method describe oprobit.bayes
+describe.oprobit.bayes <- function(...) {
+  list(
+       text = "Bayesian Probit Regression for Dichotomous Dependent Variables",
+       authors = c("Ben Goodrich", "Ying Lu"),
+       year = 2013
+       )
+}
diff --git a/R/packageConflicts.R b/R/packageConflicts.R
deleted file mode 100644
index dc2ed6c..0000000
--- a/R/packageConflicts.R
+++ /dev/null
@@ -1,7 +0,0 @@
-packageConflicts <- function(str) {
-  paths <- search()
-  str <- paste("package:", str, sep = "")
-  if (str %in% paths)
-    do.call(detach, list(str))
-  return(invisible(0))
-}
diff --git a/R/param.MCMCZelig.R b/R/param.MCMCZelig.R
deleted file mode 100644
index 007ee15..0000000
--- a/R/param.MCMCZelig.R
+++ /dev/null
@@ -1,14 +0,0 @@
-param.MCMCZelig <- function(object, num = NULL, bootstrap = FALSE) {
-  if (bootstrap) 
-    stop("For the class of MCMC models, no need to use Bootstrap method.")
-   else 
-    res <- object$coefficients
-
-
-  res
-  
-}
-
-
-
-
diff --git a/R/param.R b/R/param.R
index 989e180..942a621 100644
--- a/R/param.R
+++ b/R/param.R
@@ -1,2 +1,60 @@
-param<-function(object, ...)
+#' The \code{param} method is used by developers to specify simulated and fixed
+#' ancillary parameters of the Zelig statistical model. That is, this method
+#' is used between the \link{zelig2} function and the \link{qi}
+#' as a helper function that specifies all the necessary details needed to 
+#' simulate quantities of interest, given the fitted statistical model produced
+#' by the \code{zelig2} function.
+#'
+#' @title Generic Method for Simulating Ancillary/Auxillary Parameters of Zelig
+#'   Models
+#' @note The 'param' function is a method meant to be overloaded by Zelig
+#'   Developers
+#' @param obj a \code{zelig} object
+#' @param num an integer specifying the number of simulations to sample
+#' @param ... optional parameters which will likely be ignored
+#' @return
+#'   The main purpose of the \code{param} function is to return a list of 
+#'   key-value pairs, specifuing information that should be shared between
+#'   the \code{qi} function and the fitted statistical model (produced by the
+#'   \code{zelig2} function. This list can contain the following entries:
+#'
+#'   \item{\code{simulations}}{specifies a set of simulated parameters used to
+#'     describe the statistical model's underlying distribution}
+#'   \item{\code{alpha}}{specifies the fixed (non-simulated) ancillary
+#'     parameters used by the statistical model's underlying distribution}
+#'   \item{\code{family}}{specifies a family object used to implicitly define
+#'     the \code{link} and \code{linkinv} functions. That is, this specifies
+#'     the "link" and "inverse link" functions of generalized linear models}
+#'   \item{\code{link}}{specifies the \code{link} function to be used. This 
+#'     parameter is largely unimportant compared to the "inverse link"
+#'     function}
+#'   \item{\code{linkinv}}{specifies the \code{linkinv} function to be used.}
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @examples
+#' param.some.model <- function (obj, num, ...) {
+#'   list(
+#'        simulations = NULL,
+#'        alpha = NULL,
+#'        link = NULL,
+#'        linkinv = NULL,
+#'        fam = NULL
+#'        )
+#' }
+param <- function (obj, num, ...)
   UseMethod("param")
+
+
+#' Default Method for ``param''
+#'
+#' If no \code{param} function is set for a Zelig model, then this function will
+#' return NULL.
+#' @usage \method{param}{default}(obj, num, ...)
+#' @S3method param default
+#' @param obj ignored parameter
+#' @param num ignored parameter
+#' @param ... ignored parameters
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+param.default <- function (obj, num, ...)
+  list()
diff --git a/R/param.default.R b/R/param.default.R
deleted file mode 100644
index 89b15ac..0000000
--- a/R/param.default.R
+++ /dev/null
@@ -1,7 +0,0 @@
-param.default <- function(object, num, bootstrap = FALSE) {
-  if (!bootstrap)
-    res <- mvrnorm(num, mu=coef(object), Sigma=vcov(object))
-  else
-    res <- coef(object)
-  res
-}
diff --git a/R/param.eiRxC.R b/R/param.eiRxC.R
deleted file mode 100644
index 6d7b09b..0000000
--- a/R/param.eiRxC.R
+++ /dev/null
@@ -1,12 +0,0 @@
-param.eiRxC <- function(object, num = NULL, bootstrap = FALSE) {
-  if (!bootstrap) 
-    coef <- mvrnorm(num, mu=coef(object), Sigma=vcov(object))
-  
-  else 
-    coef <- coef(object)
-  coef
-}
-
-
-
-
diff --git a/R/param.gam.R b/R/param.gam.R
deleted file mode 100644
index 9953fd3..0000000
--- a/R/param.gam.R
+++ /dev/null
@@ -1,27 +0,0 @@
-param.gam <- function(object, num = NULL, bootstrap = FALSE) {
-  if (!bootstrap) {
-    coef <- mvrnorm(num, mu=coef(object), Sigma=vcov(object))
-    if (getzelig(object) == "normal.gam") {
-      df <- object$df.residual
-      sig2 <- summary(object)$dispersion
-      alpha <- sqrt(df*sig2/rchisq(num, df=df))
-      res <- cbind(coef, alpha)
-    }
-    else if (getzelig(object) == "gamma.gam")  {
-      rate <- gamma.shape(object) 
-      alpha <- rnorm(num, mean = rate$alpha, sd = rate$SE)
-      res <- cbind(coef, alpha)
-    }
-    else if (getzelig(object) == "negbin.gam") {
-      alpha <- object$theta
-      res <- cbind(coef, c(alpha))
-    }
-    else
-      res <- coef
-  }
-  res
-}
-
-
-
-
diff --git a/R/param.gee.R b/R/param.gee.R
deleted file mode 100644
index 7ea4675..0000000
--- a/R/param.gee.R
+++ /dev/null
@@ -1,12 +0,0 @@
-param.gee <- function(object, num = NULL, bootstrap = FALSE){
-  model <- getzelig(object)
-  if (!bootstrap) {
-    res <- mvrnorm(num, mu=coef(object), Sigma=vcov(object))
-  }
-  else {
-    res <- coef(object)
-  }
-  res
-}
-
-
diff --git a/R/param.glm.R b/R/param.glm.R
deleted file mode 100644
index 4ca72da..0000000
--- a/R/param.glm.R
+++ /dev/null
@@ -1,46 +0,0 @@
-param.glm <- function(object, num = NULL, bootstrap = FALSE){
-  model <- getzelig(object)
-  if (!bootstrap) {
-    coef <- mvrnorm(num, mu=coef(object), Sigma=vcov(object))
-    if (model == "normal") {
-      df <- object$df.residual
-      sig2 <- summary(object)$dispersion
-      alpha <- sqrt(df*sig2/rchisq(num, df=df))
-      res <- cbind(coef, alpha)
-    }
-    else if (model == "gamma")  {
-      shape <- gamma.shape(object)
-      alpha <- rnorm(num, mean = shape$alpha, sd = shape$SE)
-      res <- cbind(coef, alpha)
-    }
-    else if (model == "negbin") {
-      alpha <- object$theta
-      res <- cbind(coef, c(alpha))
-    }
-    else
-      res <- coef
-  }
-  else {
-    coef <- coef(object)
-    if (object$family$family == "gaussian") {
-      alpha <- sum(object$residuals^2)/object$df.residual
-      res <- c(coef, alpha)
-    }
-    else if (object$family$family == "Gamma") {
-      alpha <- gamma.shape(object)$alpha
-      res <- c(coef, alpha)
-    }
-    else if (object$family$family == "neg.bin") {
-      alpha <- object$theta
-      res <- c(coef, alpha)
-    }
-    else
-      res <- coef
-  }
-  res
-}
-
-
-
-
-
diff --git a/R/param.lm.R b/R/param.lm.R
deleted file mode 100644
index 17d8082..0000000
--- a/R/param.lm.R
+++ /dev/null
@@ -1,18 +0,0 @@
-param.lm <-function(object, num, bootstrap = FALSE) {
-#  if (num < 1) num <- 1
- 
-  if (!bootstrap) {
-    coef <- mvrnorm(num, mu=coef(object), Sigma=vcov(object))
-    df <- object$df.residual
-    sig2 <- summary.lm(object)$sigma^2
-    alpha <- sqrt(df*sig2/rchisq(num, df=df))
-    res <- cbind(matrix(coef, nrow=num), alpha)
-    #res <- cbind(coef, alpha)
-  }
-  else {
-    coef <- coef(object)
-    alpha <- summary.lm(object)$sigma
-    res <- c(coef, alpha)
-  }
-  res
-}
diff --git a/R/param.mixed.R b/R/param.mixed.R
deleted file mode 100644
index 6ef4275..0000000
--- a/R/param.mixed.R
+++ /dev/null
@@ -1,42 +0,0 @@
-## modified by delia 09/22/08
-################################
-
-param.mer <- function(object, num, bootstrap=FALSE){
-	fixed <- fixef(object)
-	vars <- ranef(object, postVar=TRUE)
-	gammas <- NULL
-	n.G <- length(vars)
-	
-	if (!bootstrap){
-		object <- selectMethod("summary", "mer")(object)
-		# sample fixed effects
-		betasF <- NULL
-		vcov.fix <- vcov(object)
-		if (length(fixed) > 0){
-			betasF <- mvrnorm(num, fixed, vcov.fix)
-		}
-		# sample random effects
-		for (m in 1:n.G){
-			vars.m <- attr(vars[[m]], "postVar")
-			V.beta <- VarCorr(object)[[m]]
-			J <- dim(vars.m)[1]
-			gammas[[m]] <- mvrnorm(num, rep(0, J), V.beta)
-		}
-	}
-	else {
-		object <- summary(object)
-		# set fixed effects
-		betasF <- fixed
-		# sample random effects
-		for (m in 1:n.G){
-			V.beta <- VarCorr(object)[[m]]
-			gammas[[m]]<- mvrnorm(1, 0, V.beta)
-		}
-	}
-	
-	names(gammas) <- names(vars)
-	betas <- betasF
-	scale <- object at sigma
-	
-	list(betas=betas, gammas=gammas, scale=scale)
-}
\ No newline at end of file
diff --git a/R/param.multinom.R b/R/param.multinom.R
deleted file mode 100644
index c968e30..0000000
--- a/R/param.multinom.R
+++ /dev/null
@@ -1,12 +0,0 @@
-param.multinom <- function(object, num, bootstrap = FALSE) {
-  k <- length(object$lev)
-  coef <- NULL
-  tmp <- coef(object)
-  for (i in 1:(k-1))
-    coef <- c(coef, tmp[i,])
-  if (!bootstrap) 
-    sim.coef <- mvrnorm(num, mu=coef, Sigma=vcov(object))
-  else
-    sim.coef <- coef
-  sim.coef
-}
diff --git a/R/param.multiple.R b/R/param.multiple.R
deleted file mode 100644
index 045f64e..0000000
--- a/R/param.multiple.R
+++ /dev/null
@@ -1,19 +0,0 @@
-param.multiple <- function(object, num = NULL, bootstrap = FALSE) {
-  if (!bootstrap) {
-    coef <- mvrnorm(num, mu=coef(object), Sigma=vcov(object))
-    if (getzelig(object) %in% c("sur","2sls","w2sls","3sls")) {
-      res <- coef
-    }
-    else
-      res <- coef
-  }
-  else {
-    coef <- coef(object)
-      res <- coef
-  }
-  res
-}
-
-
-
-
diff --git a/R/param.netglm.R b/R/param.netglm.R
deleted file mode 100644
index d119de6..0000000
--- a/R/param.netglm.R
+++ /dev/null
@@ -1,42 +0,0 @@
-param.netglm <- function(object, num = NULL, bootstrap = FALSE, x = NULL) {
-  if (!bootstrap) {
-    coef <- mvrnorm(num, mu=coef(object), Sigma=vcov(object))
-    if (getzelig(object) == "normal.net") {
-      df <- object$df.residual
-      sig2 <- summary(object)$dispersion
-      alpha <- sqrt(df*sig2/rchisq(num, df=df))
-      res <- cbind(coef, alpha)
-    }
-    else if (getzelig(object) == "gamma.net")  {
-      class(object) <- c("glm","lm")
-	  rate <- gamma.shape(object) 
-	  class(object) <- "netglm"
-      alpha <- rnorm(num, mean = rate$alpha, sd = rate$SE)
-      res <- cbind(coef, alpha)
-    }
-    else if (getzelig(object) == "netnegbin") {
-      alpha <- object$theta
-      res <- cbind(coef, c(alpha))
-    }
-    else
-      res <- coef
-  }
-  else {
-    coef <- coef(object)
-    if (object$family$family == "normal.net") {
-      alpha <- sum(object$residuals^2)/length(object$residuals)
-      res <- c(coef, alpha)
-    }
-    else if (object$family$family == "gamma.net") {
-      alpha <- gamma.dispersion(object)
-      res <- c(coef, alpha)
-    }
-    else if (object$family$family == "netnegbin") {
-      alpha <- object$theta
-      res <- c(coef, alpha)
-    }
-    else
-      res <- coef
-  }
-  res
-}
diff --git a/R/param.netlm.R b/R/param.netlm.R
deleted file mode 100644
index 7a6f1cc..0000000
--- a/R/param.netlm.R
+++ /dev/null
@@ -1,16 +0,0 @@
-param.netlm <- function(object, num, bootstrap = FALSE) {
-#  if (num < 1) num <- 1
-  if (!bootstrap) {
-    coef <- mvrnorm(num, mu=coef(object), Sigma=vcov.netlm(object))
-    df <- object$df.residual
-    sig2 <- summary(object)$sigma^2
-    alpha <- sqrt(df*sig2/rchisq(num, df=df))
-    res <- cbind(coef, alpha)
-  }
-  else {
-    coef <- coef(object)
-    alpha <- summary(object)$sigma
-    res <- c(coef, alpha)
-  }
-  res
-}
\ No newline at end of file
diff --git a/R/param.polr.R b/R/param.polr.R
deleted file mode 100644
index 5202ec9..0000000
--- a/R/param.polr.R
+++ /dev/null
@@ -1,13 +0,0 @@
-param.polr <- function(object, num, bootstrap=FALSE) {
-  num <- 100
-  coef <- object$coefficients
-  zeta <- object$zeta
-  k <- length(coef)
-  if (!bootstrap) {
-    theta <- zeta
-    res <- matrix(mvrnorm(num, mu=c(coef,theta), Sigma=vcov(object)),nrow=num)
-  }
-  else
-    res <- c(coef, zeta)
-  res
-}
diff --git a/R/param.relogit.R b/R/param.relogit.R
deleted file mode 100644
index ef8af98..0000000
--- a/R/param.relogit.R
+++ /dev/null
@@ -1,38 +0,0 @@
-param.relogit <- function(object, num, x, bootstrap = FALSE, bootfn = NULL) {
-  if ("relogit2" %in% class(object)) {
-    pping <- function(tmp0, tmp1, num, bootstrap, x) {
-      par0 <- param.relogit(tmp0, num=num, x=x, bootstrap=bootstrap)
-      par1 <- param.relogit(tmp1, num=num, x=x, bootstrap=bootstrap)
-      P00 <- as.matrix(qi.relogit(tmp0, par0, x=x)$qi$ev)
-      P10 <- as.matrix(qi.relogit(tmp1, par1, x=x)$qi$ev)
-      test <- P00[,1] < P10[,1]
-      par0 <- as.matrix(par0[test,])
-      par1 <- as.matrix(par1[test,])
-      list(par0 = par0, par1 = par1)
-    }
-    tmp0 <- object$lower.estimate
-    tmp1 <- object$upper.estimate
-    tmp <- pping(tmp0, tmp1, num = num, bootstrap=bootstrap, x=x)
-    par0 <- tmp$par0
-    par1 <- tmp$par1
-    while (nrow(par0) < num) {
-      tmp <- pping(tmp0, tmp1, num=num, bootstrap=bootstrap, x=x)
-      par0 <- rbind(par0, tmp$par0)
-      par1 <- rbind(par1, tmp$par1)
-    }
-    if (nrow(par0) > num) {
-      par0 <- par0[1:num,]
-      par1 <- par1[1:num,]
-    }
-    par0 <- as.matrix(par0)
-    par1 <- as.matrix(par1)
-    rownames(par0) <- 1:nrow(par0)
-    rownames(par1) <- 1:nrow(par1)
-    return(list(par0 = par0, par1 = par1))    
-  } else {
-    if (!bootstrap) 
-      return(mvrnorm(num, mu = coef(object), Sigma = vcov(object)))
-    else
-      return(coef(object))
-  }
-}
diff --git a/R/param.rq.R b/R/param.rq.R
deleted file mode 100644
index cc7e1a3..0000000
--- a/R/param.rq.R
+++ /dev/null
@@ -1,12 +0,0 @@
-param.rq <- function(object, num, bootstrap=FALSE){
-    if (!bootstrap){
-        rq.sum <- summary.rq(object, covariance=TRUE, se=object$se)
-        coef <- mvrnorm(num, mu=object$coef, Sigma=rq.sum$cov)
-    }
-
-    else {
-        coef <- object$coef
-    }
-    res <- coef
-    res
-}
diff --git a/R/param.survreg.R b/R/param.survreg.R
deleted file mode 100644
index 277b2d4..0000000
--- a/R/param.survreg.R
+++ /dev/null
@@ -1,19 +0,0 @@
-param.survreg <- function(object, num, bootstrap = FALSE) {
-  cov <- vcov(object)
-  coef <- getcoef(object)
-  log.scale <- log(object$scale)
-  k <- length(coef)
-  if(!bootstrap) {
-    if(ncol(cov)==k)
-      res <- mvrnorm(num, mu=coef, Sigma=cov)
-    else 
-      res <- mvrnorm(num, mu=c(coef, log.scale), Sigma=cov)
-  }
-  else {
-    if (ncol(cov) == k)
-      res <- c(coef)
-    else
-      res <- c(coef, log.scale)
-  }
-  res
-}
diff --git a/R/param.svyglm.R b/R/param.svyglm.R
deleted file mode 100644
index de50d5e..0000000
--- a/R/param.svyglm.R
+++ /dev/null
@@ -1,46 +0,0 @@
-param.svyglm <- function(object, num = NULL, bootstrap = FALSE){
-  model <- getzelig(object)
-  if (!bootstrap) {
-    coef <- mvrnorm(num, mu=coef(object), Sigma=vcov(object))
-    if (model == "normal.survey") {			
-      df <- object$df.residual
-      sig2 <- summary(object)$dispersion
-      alpha <- sqrt(df*sig2/rchisq(num, df=df))
-      res <- cbind(coef, alpha)
-    }
-    else if (model== "gamma.survey")  {		
-      shape <- gamma.shape(object)
-      alpha <- rnorm(num, mean = shape$alpha, sd = shape$SE)
-      res <- cbind(coef, alpha)
-    }
-    else if (model == "negbin") {
-      alpha <- object$theta
-      res <- cbind(coef, c(alpha))
-    }
-    else
-      res <- coef
-  }
-  else {
-    coef <- coef(object)
-    if (object$family$family == "gaussian") {
-      alpha <- sum(object$residuals^2)/object$df.residual
-      res <- c(coef, alpha)
-    }
-    else if (object$family$family == "Gamma") {
-      alpha <- gamma.shape(object)$alpha
-      res <- c(coef, alpha)
-    }
-    else if (object$family$family == "neg.bin") {
-      alpha <- object$theta
-      res <- c(coef, alpha)
-    }
-    else
-      res <- coef
-  }
-  res
-}
-
-
-
-
-
diff --git a/R/param.vglm.R b/R/param.vglm.R
deleted file mode 100644
index 99c987b..0000000
--- a/R/param.vglm.R
+++ /dev/null
@@ -1,7 +0,0 @@
-param.vglm <- function(object, num, bootstrap = FALSE) {
-  cov <- vcov(object)
-  res <- object at coefficients
-  if (!bootstrap) 
-    res <- mvrnorm(num, mu=res, Sigma=cov)
-  res
-}
diff --git a/R/param.zaov.R b/R/param.zaov.R
deleted file mode 100644
index a7ef225..0000000
--- a/R/param.zaov.R
+++ /dev/null
@@ -1,46 +0,0 @@
-param.mlm <-function(object, num, bootstrap = FALSE) {
-        mu <- coef(object)
-        depVars <- colnames(mu)
-        sigma <- vcov(object)
-        mu <- matrix(mu, ncol=1)
-        
-        
-        if (!bootstrap) {
-                coef <- mvrnorm(num, mu=mu, Sigma=sigma)
-                res <- append.sigma(object, coef,num,bootstrap)
-                
-        } else {
-                coef <- coef(object)
-                res <- append.sigma(object, coef,num,bootstrap)
-                
-        }
-  
-        res
-}
- 
-append.sigma <- function(object, coef,num, bootstrap){
-        smobj <- summary.zmlm(object)
-        df <- object$df.residual
-        mat <- as.matrix(coef)
-        for(n in 1:length(smobj)){
-                resp <- smobj[[n]]
-                nm <- names(smobj)[n]
-
-                if(!bootstrap){
-                        sig2 <- resp$sigma^2
-                        alpha <- sqrt(df*sig2/rchisq(num, df=df))
-                        mat <- cbind(mat, alpha)
-                        nc <- ncol(mat)
-                        del <- sub("([[:alpha:]+])[[:space:]+](.*)","\\1", nm)
-                        cnm <- sub(paste(del, "[[:space:]+](.*)", sep=""),"\\1", nm)
-                        cnm <- paste(cnm,":alpha",sep="")
-                        colnames(mat)[nc] <- cnm
-                        
-                }else{
-                        alpha <- resp$sigma
-                        mat <- cbind(mat, alpha)
-                }
-                
-        }
-        return(mat)
-}
diff --git a/R/parameters.R b/R/parameters.R
new file mode 100644
index 0000000..c5f9807
--- /dev/null
+++ b/R/parameters.R
@@ -0,0 +1,132 @@
+#' Constructor for `parameters' class
+#'
+#'
+#' @param simulations a vector or matrix containing simulated values
+#' @param alpha ancillary parameters for the Zelig statistical model
+#' @param fam a family object which implicitly specifies the link
+#'            and link-inverse functions for the 
+#' @param link the link function of the specified statistical model.
+#'             The `linkinv' parameter is implicitly defined by
+#'             by the `link' parameter, when `linkinv' is omitted
+#' @param linkinv the inverse link function
+#' @return a `parameters' object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+parameters <- function(simulations,
+                       alpha,
+                       fam=NULL,
+                       link=NULL,
+                       linkinv=NULL
+                       )
+{
+  if (is.function(fam))
+    fam <- fam()
+
+  #
+  if (!missing(fam) && isS4(fam)) {
+    link <- fam at link
+    linkinv <- fam at inverse
+  }
+  else if (!missing(fam) && inherits(fam, "family")) {
+    link <- fam$linkfun
+    linkinv <- fam$linkinv
+  }
+  else if (missing(link)) {
+    #warning("no link function")
+  }
+
+  else if (missing(linkinv)) {
+    #warning("no inverse link function")
+    linkinv <- .NumInverse(link)
+  }
+
+  # Construct object
+  p <- list(coefficients = simulations,
+            alpha = alpha,
+            link = link,
+            linkinv = linkinv
+            )
+
+  # cast, and return
+  class(p) <- "parameters"
+  p  
+}
+
+
+#' Extract ancillary parameters from
+#' `parameters' objects
+#'
+#' @param param a `parameters' object
+#' @return the ancillary parameters \emph{specified} for
+#'         the statistical model
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+alpha <- function(param)
+  param$alpha
+
+
+#' Return Simulations of Parameter Coefficients
+#'
+#' Returns simulated parameters of coefficients for use in statistical 
+#' simulation. The values are set by the model-fitting function and the 
+#' developer of the qi.<model name> method.
+#'
+#' @note This function may not differ at all from coef.default
+#' @usage \method{coef}{parameters}(object, ...)
+#' @S3method coef parameters
+#' @param object a 'parameters' object
+#' @param \dots ignored
+#' @return simulations, specified by the Zelig model, of
+#'         the ancillary parameters
+#' @export 
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+coef.parameters <- function(object, ...) {
+  object$coefficients
+}
+  
+#' Return Simulations of Parameter Coefficients
+#'
+#' Returns simulated parameters of coefficients for use in statistical 
+#' simulation. The values are set by the model-fitting function and the 
+#' developer of the qi.<model name> method.
+#'
+#' @note This function does not differ at all from coef.default
+#' @usage \method{simulations}{parameters}(object, ...)
+#' @S3method coef parameters
+#' @param object a 'parameters' object
+#' @param \dots ignored
+#' @return simulations, specified by the Zelig model, of
+#'         the ancillary parameters
+#' @export 
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+simulations.parameters <- function(object, ...)
+  object$coefficients
+
+
+#' Method for extracting the link function from 'parameters' objects
+#' @param param a 'parameters' object
+#' @return the link function specified by the `param' function for the given 
+#' Zelig model
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+link <- function(param)
+  param$link
+
+
+#' Method for extracting the inverse link function from 'parameters' objects
+#'
+#' Returns the inverse link function of a ``parameters'' object. If the
+#' model's developer did not specify one (but did specify a link function) this
+#' function returns a numerical approximation of the link function.
+#' @param param a 'parameters' object
+#' @return the inverse link function specified by the 'param' function for the
+#' given Zelig model
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+linkinv <- function(param) {
+  if (is.null(param$linkinv))
+    .NumInverse(param$link)
+
+  else
+    param$linkinv
+}
diff --git a/R/parse.formula.R b/R/parse.formula.R
index e261911..f3c7fe7 100644
--- a/R/parse.formula.R
+++ b/R/parse.formula.R
@@ -1,3 +1,10 @@
+#' Parse Formulas for Zelig Models
+#' @note This is used typically in multinomial and multivariate Zelig models
+#' @param formula a formula
+#' @param model a Zelid model
+#' @param data a data-frame
+#' @export
+#' @author Kosuke Imai and Olivia Lau
 parse.formula<-function( formula, model,data=NULL){
         if(class(formula)[[1]]=="multiple")
           return(formula)
diff --git a/R/parse.par.R b/R/parse.par.R
deleted file mode 100644
index 97c5ca4..0000000
--- a/R/parse.par.R
+++ /dev/null
@@ -1,50 +0,0 @@
-parse.par <- function(par, terms,shape = "matrix", eqn=NULL) {
-  "%w/o%" <- function(x,y) x[!x %in% y]
-  if (is.null(shape)) {
-    if (any(class(terms) == "multiple"))
-      shape <- "matrix"
-    else
-      shape <- "vector"
-  }
-  if(is.null(eqn))
-    eqn<-attr(terms,"systEqns")
-  if (!shape %in% c("matrix", "vector"))
-    stop("not a valid 'shape' for parameters.  Choose from \"matrix\" or \"vector\".")
-  if (any(class(terms) == "multiple")) {
-    allidx <- make.parameters(terms = terms, shape = "vector")
-    idx <- make.parameters(terms = terms,eqns=eqn, shape = "vector")
-    mat <- t(make.parameters(terms = terms,eqns=eqn, shape = "matrix"))
-    if(length(par)==length(allidx))
-      par.names<-allidx
-    else
-      par.names<-idx
-    ancil <-attr(terms,"ancilEqns")
-    syst<-attr(terms,"systEqns")
-    if (length(syst) == 1)
-      shape <- "vector"
-    if (any(eqn %in% ancil)) {
-      if (any(eqn %in% syst)) {
-        stop("  eqn cannot include both systematic and ancillary \n  parameters at the same time.")
-      }
-      else
-        ret.val <- par[par.names %in% idx]
-    }
-    else { ## if eqn to be returned is a systematic component
-      subs<-mat
-      out <- matrix(0, nrow = nrow(subs), ncol = ncol(subs),
-                    dimnames = dimnames(subs))
-      for(i in 1:nrow(out))
-        for(j in 1:ncol(out))
-          if(!is.na(subs[i,j]))
-            out[i,j]<-par[par.names %in% subs[i,j]]
-      if (shape == "matrix") 
-        ret.val <- t(out)
-      else {
-        ret.val <- par[par.names %in% idx]
-      }
-    }
-  }
-  ret.val
-}
-
-
diff --git a/R/parseFormula.R b/R/parseFormula.R
new file mode 100644
index 0000000..a3bbf5e
--- /dev/null
+++ b/R/parseFormula.R
@@ -0,0 +1,120 @@
+#' Parse Zelig-style Formulae
+#'
+#' Zelig uses three distinct types of formulae. This method is a re-design
+#' of the Zelig function \code{parse.formula}.
+#' @param obj a list or formula
+#' @param data the data set associated with the formula object
+#' @return an object of type "parseFormula". This object has slots specifying:
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+parseFormula <- function (obj, data=NULL) {
+  UseMethod("parseFormula")
+}
+
+
+
+#' Parse Standard Formulae
+#'
+#' This method parses a formula-style Zelig formula
+#' @usage \method{parseFormula}{formula}(obj, data=NULL)
+#' @param obj a formula
+#' @param data a data frame
+#' @return an object of type "parseFormula"
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @S3method parseFormula formula
+parseFormula.formula <- function (obj, data=NULL) {
+
+  # Extract terms
+  TERMS <- terms(obj)
+
+  #
+  MODEL.MATRIX <- tryCatch(model.matrix(obj, data), error = function (e) NULL)
+
+  # Build the object
+  res <- list(
+              formula = obj,
+              terms = TERMS,
+              response = getResponseTerms(obj),
+              predictor = getPredictorTerms(obj),
+              model.matrix = MODEL.MATRIX
+              )
+
+  # Return
+  class(res) <- "parseFormula"
+  res
+}
+
+
+
+#' Parse List-Style Zelig Formulae
+#'
+#' This method parses a list-style Zelig formula.
+#' @usage \method{parseFormula}{list}(obj, data=NULL)
+#' @param obj a list of formulae
+#' @param data a data frame
+#' @return an object of type "parseFormula"
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @S3method parseFormula list
+parseFormula.list <- function (obj, data=NULL) {
+
+  # Extract terms (and place in a list)
+  TERMS <- Map(terms, obj)
+
+  # 
+  MODEL.MATRIX <- makeModelMatrix(obj, data)
+
+  # Build the object
+  res <- list(
+              formula = obj,
+              terms = TERMS,
+              response = getResponseTerms(obj),
+              predictor = getPredictorTerms(obj),
+              model.matrix = MODEL.MATRIX
+              )
+
+  # Return
+  class(res) <- "parseFormula"
+  res
+}
+
+
+
+#' Parse ``Formula''-style Zelig Formulae
+#'
+#' This method parses a ``Formula''-style Zelig formula. This is to support the
+#' ``Formula'' object. It seems like it has the right idea when it comes to 
+#' expressing multiple responses.
+#' @usage \method{parseFormula}{Formula}(obj, data=NULL)
+#' @param obj a list of formulae
+#' @param data a data frame
+#' @return an object of type ``parseFormula''
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @S3method parseFormula Formula
+parseFormula.Formula <- function (obj, data=NULL) {
+
+
+  message("parseFormula.Formula")
+  # Create the actual object
+  res <- list(
+              # Remember the source class
+              source.class = class(obj),
+
+              # Store the original copy of the formula
+              formula = obj,
+
+              # Store the terms
+              terms   = terms(obj),
+
+              # Use Zelig-style methods to get the responseTerms
+              response  = getResponseTerms(obj),
+              predictor = getPredictorTerms(obj),
+
+              # Create the design matrix from the ``Formula'' package
+              model.matrix = NULL
+              )
+
+
+  # Return
+  class(res) <- "parseFormula"
+  res
+}
diff --git a/R/plot.ci.R b/R/plot.ci.R
deleted file mode 100644
index 45a9d23..0000000
--- a/R/plot.ci.R
+++ /dev/null
@@ -1,81 +0,0 @@
-plot.ci <- function(x, CI=95, qi = "ev", main = "",
-                    ylab = NULL, xlab = NULL, xlim = NULL,
-                    ylim = NULL, col = c("red", "blue"), ...) {
-  "%w/o%" <- function(x,y) x[!x %in% y] #--  x without y
-  if (class(x) != "zelig")
-    stop(" plot.ci() works only for sim() output.")
-  if (!(x$zelig.call$model) %in% 
-        c("ls", "logit", "probit", "exp", "gamma", "lognorm",
-          "weibull", "normal", "poisson", "tobit", "relogit",
-          "negbin", "logit.bayes", "probit.bayes",
-          "poisson.bayes", "normal.bayes", "tobit.bayes",
-          "ls.mixed", "logit.mixed", "probit.mixed",
-          "gamma.mixed", "poisson.mixed",
-          "logit.gam", "gamma.gee", "normal.gam", "poisson.gam",
-          "probit.gam", "logit.gee", "normal.gee",
-          "poisson.gee", "probit.gee"))
-    stop("\n  plot.ci() is valid only for non-categorical, univariate response models.")
-  cip <- c((100-CI)/200, 1-(100-CI)/200)
-  summarize <- function(z, cip){
-    res <- NULL
-    res <- cbind(res, apply(z, 2, quantile, prob=cip[1]))
-    res <- cbind(res, apply(z, 2, quantile, prob=cip[2]))
-    res
-  }
-  vv <- apply(x$x, 2, unique)
-  idx <- sapply(vv, length)
-  cidx <- which(idx > 1)
-  if (!is.null(x$x1)) { 
-    vv1 <- apply(x$x1, 2, unique)
-    idx1 <- sapply(vv1, length)
-    cidx1 <- which(idx1 > 1)
-    if (!identical(names(idx), names(idx1)))
-      stop("variables in x and x1 do not match.")
-    ## Checking for one dimension of variation, including interaction terms
-    if (length(cidx) > length(cidx1)) { 
-      tmp <- names(idx)[cidx %w/o% cidx1]
-      tmp1 <- names(idx)[cidx[cidx %in% cidx1]]
-    }
-    else { 
-      tmp <- names(idx1)[cidx1 %w/o% cidx]
-      tmp1 <- names(idx1)[cidx1[cidx1 %in% cidx]]
-    }
-  check <- grep(tmp1, tmp)
-  if (length(check) != length(tmp)) 
-    stop("x and x1 vary on more than one dimension.")
-  }
-  var <- vv[[cidx[1]]]
-  q <- pmatch(qi, names(x$qi))
-  qofi <- x$qi[[q]]
-  sum.qi <- summarize(qofi, cip)
-  if (!is.null(x$x1) && qi == "ev") {
-    fd <- x$qi$fd
-    ev1 <- fd + qofi
-    sum.qi1 <- summarize(ev1, cip)
-  }
-  else sum.qi1 <- NULL
-  if (is.null(ylab))  ylab <- x$qi.name[[q]]
-  if (is.null(xlab))  xlab <- paste("Range of", colnames(x$x)[cidx[1]])
-  if (is.null(ylim)) {
-    if (is.null(sum.qi1))  ylim <- c(min(sum.qi), max(sum.qi))
-    else  ylim <- c(min(sum.qi, sum.qi1), max(sum.qi, sum.qi1))
-  }
-  if (is.null(xlim))  xlim <- c(min(var), max(var))
-  plot.default(var, type = "n", ylab = ylab, main = main, xlab = xlab, 
-               xlim = xlim, ylim = ylim)
-  for (i in 1:length(var)) {
-    lines(c(var[i], var[i]), c(sum.qi[i,1], sum.qi[i,2]), col = col[1], ...)
-    if (!is.null(x$x1) && qi == "ev")
-      lines(c(var[i], var[i]), c(sum.qi1[i,1], sum.qi1[i,2]), col = col[2], ...)
-  }
-}
-
-
-
-
-
-
-
-
-
-
diff --git a/R/plot.surv.R b/R/plot.surv.R
deleted file mode 100644
index 516ff03..0000000
--- a/R/plot.surv.R
+++ /dev/null
@@ -1,100 +0,0 @@
-plot.surv <- function(x,duration,censor,type="line",
-plotcensor=TRUE,plottimes=FALSE,int=c(0.025,0.975),...) {	
-	
-		s.out <- x
-		nobj <- length(s.out)
-		for (s in 1:nobj) {
-		if (s==1) {
-			survival <- s.out[[s]]$qi$survival
-			survest <- c(1,apply(survival,2,mean))
-	
-			survest.lb <- apply(survival,2,function(X){quantile(X,int[1])})
-	
-			survest.ub <- apply(survival,2,function(X){quantile(X,int[2])})
-	
-			times <- as.numeric(colnames(survival))
-	
-			#plot(stepfun(times,survest),xlim=c(0,max(duration)),xlab=xlab,ylab=ylab,main=title)
-			plot(times,survest[-1],type="n",xlim=c(0,max(duration)),
-					...)
-			ntimes <- length(times)-1
-			for (t in 1:ntimes){
-				u <- survest.ub[t]
-				l <- survest.lb[t]
-			if (type=="poly") {
-				x <- c(times[t],times[t+1],times[t+1],times[t])
-				y <- c(survest.ub[t],survest.ub[t+1],survest.lb[t+1],survest.lb[t])
-				polygon(x,y,density=100,col="grey")
-				}
-			if (type=="line") {
-					segments(times[t],u,times[t],l,col="grey",lwd=.5)
-				}
-			}
-			if (type=="line"){
-					t <- length(times)
-					u <- survest.ub[t]
-					l <- survest.lb[t]
-					segments(times[t],u,times[t],l,col="grey",lwd=.5)
-			}	
-
-		}
-		if (s>1){
-			survival <- s.out[[s]]$qi$survival
-			survest <- c(1,apply(survival,2,mean))
-	
-			survest.lb <- apply(survival,2,function(X){quantile(X,int[1])})
-	
-			survest.ub <- apply(survival,2,function(X){quantile(X,int[2])})
-	
-			times <- as.numeric(colnames(survival))
-	
-			#lines(times,survest[-1],type="s")
-	
-			ntimes <- length(times)-1
-			for (t in 1:ntimes){
-				u <- survest.ub[t]
-				l <- survest.lb[t]
-				
-				if (type=="poly") {
-				x <- c(times[t],times[t+1],times[t+1],times[t])
-				y <- c(survest.ub[t],survest.ub[t+1],survest.lb[t+1],survest.lb[t])
-				polygon(x,y,density=100,col="grey")
-				}
-				if (type=="line") {
-					segments(times[t],u,times[t],l,col="grey",lwd=.5)
-				}
-
-			}
-			if (type=="line"){
-					t <- length(times)
-					u <- survest.ub[t]
-					l <- survest.lb[t]
-					segments(times[t],u,times[t],l,col="grey",lwd=.5)
-			}	
-
-			
-			}
-
-				
-		
-	}
-	
-	for (s in 1:nobj) {
-			survival <- s.out[[s]]$qi$survival
-			survest <- c(1,apply(survival,2,mean))
-	
-			survest.lb <- apply(survival,2,function(X){quantile(X,0.025)})
-	
-			survest.ub <- apply(survival,2,function(X){quantile(X,0.975)})
-	
-			times <- as.numeric(colnames(survival))			
-			if (plottimes){lines(stepfun(times,survest),lty=s)}
-			else {lines(times,survest[-1],type="s",lty=s)}
-			}
-		if(plotcensor){
-				rug(duration[censor==0])
-			}
-	}
-
-
-
diff --git a/R/plot.zelig.R b/R/plot.zelig.R
deleted file mode 100644
index 4c39aa2..0000000
--- a/R/plot.zelig.R
+++ /dev/null
@@ -1,15 +0,0 @@
-plot.zelig <- function(x, xlab = "", user.par = FALSE, ...) {
-  if (dim(x$x)[1] > 1) 
-      plot.ci(x, xlab = "", ...)
-  else {
-    class(x) <- x$zelig.call$model
-    if (exists(paste("plot.zelig", x$zelig.call$model, sep = ".")))
-      UseMethod("plot.zelig", x)
-    else{
-      res <- try(plot.zelig.default(x, xlab = xlab, user.par = user.par, ...), silent=F)
-      if(class(res) =="try-error")
-        message("No plot generated for model ", class(x)[1]) 
-    }
-  
-  }
-}
diff --git a/R/plot.zelig.arima.R b/R/plot.zelig.arima.R
deleted file mode 100644
index c723ef8..0000000
--- a/R/plot.zelig.arima.R
+++ /dev/null
@@ -1,172 +0,0 @@
-
-plot.zelig.arima <- function(x, xlab="", user.par=FALSE, pred.se=TRUE,
-                             col=c("blue", "red", "green3", "black"),
-                             lty=2, ...){
-  if (length(col) > 4) col <- rep(col, 4)[1:4]
-  k <- length(x$qi)
-  if (k==3){
-    if (is.null(x$qi$t.eff) & !user.par){
-      par(mfrow=c(1,1))
-    }	
-    if (!is.null(x$qi$t.eff) & !user.par){
-      par(mfrow=c(2,1))
-    }
-    if (user.par){
-      par <- par(no.readonly = TRUE) 
-    }
-    ev.mean <- apply(x$qi$ev, 2, "mean")
-    ev.quant <- apply(x$qi$ev, 2, "quantile", c(0.025, 0.975))
-    if (pred.se){
-      ev.total.up <- x$qi$ev + 1.96*x$qi$se
-      ev.total.down <- x$qi$ev - 1.96*x$qi$se
-      ev.total.up <- apply(ev.total.up, 2, "mean")
-      ev.total.down <- apply(ev.total.down, 2, "mean")
-      min.plot <- min(ev.quant[1,], ev.total.down, x$t.series[1:x$min.time])
-      max.plot <- max(ev.quant[2,], ev.total.up, x$t.series[1:x$min.time])
-    }
-    if (!pred.se){
-      min.plot <- min(ev.quant[1,], x$t.series[1:x$min.time])
-      max.plot <- max(ev.quant[2,], x$t.series[1:x$min.time])
-    }
-    if (length(ev.mean)>1){
-      ts.plot(x$t.series[1:x$min.time],
-              ylim=c(min.plot, max.plot),
-              col=col[4],ylab="Time Series Value",
-              xlim=c(1, x$min.time + ncol(x$qi$ev)))
-      lines(ev.mean~c((x$min.time+1):
-                      (x$min.time + ncol(x$qi$ev))),
-            col=col[1])
-      lines(ev.quant[2,]~c((x$min.time+1):
-                           (x$min.time + ncol(x$qi$ev))),
-            lty=lty, col=col[1])
-      lines(ev.quant[1,]~c((x$min.time+1):
-                           (x$min.time + ncol(x$qi$ev))),
-            lty=lty, col=col[1])
-      if (pred.se){
-        lines(ev.total.up~c((x$min.time+1):
-                            (x$min.time + ncol(x$qi$ev))),
-              lty=lty, col= col[3])
-        lines(ev.total.down~c((x$min.time+1):
-                              (x$min.time + ncol(x$qi$ev))),
-              lty=lty, col=col[3])
-      }
-      lines(c(x$min.time, x$min.time + 1),
-            c(x$t.series[x$min.time], ev.mean[1]), col=col[1])
-      abline(v=x$min.time, lty=1, col=col[2])
-      if(!is.null(x$qi$t.eff)){
-        teff.mean<- apply(x$qi$t.eff, 2, "mean")
-        teff.quant<- apply(x$qi$t.eff, 2, "quantile", c(0.025, 0.975))
-        ts.plot(teff.mean, xlab="Time After Counterfactual",
-                ylab="Difference", main="Y - E[Y|X]",
-                ylim=c(min(teff.quant[1,]), max(teff.quant[2,])), col=col[1])
-        lines(teff.quant[2,], lty=lty, col=col[1])
-        lines(teff.quant[1,], lty=lty, col=col[1])
-      }
-    }
-    if (length(ev.mean)==1){
-      ts.plot(x$t.series[1:x$min.time],
-              ylim=c(min.plot, max.plot),
-              col=col[4],ylab="Time Series Value",
-              xlim=c(1, x$min.time + ncol(x$qi$ev)), gpars=...)
-      points(x$min.time + 1, ev.mean)
-      if (pred.se){
-        arrows(x$min.time + 1 , ev.total.down,
-               x$min.time + 1, ev.total.up, col=col[2],
-               code=3, length=0.1, angle=90)
-      }
-      arrows(x$min.time + 1, ev.quant[1,], x$min.time + 1,
-             ev.quant[2,], col=col[1], code=3, length=0.1, angle=90)
-      lines(c(x$min.time, x$min.time + 1),
-            c(x$t.series[x$min.time], ev.mean[1]), col=col[1])
-      if(!is.null(x$qi$t.eff)){
-        teff.mean<- apply(x$qi$t.eff, 2, "mean")
-        teff.quant<- apply(x$qi$t.eff, 2, "quantile", c(0.025, 0.975))
-        plot(density(x$qi$t.eff), ylab="Density",
-             xlab="Difference", main="Y-E[Y|X]", ...)
-      }
-    }
-  }
-  if (k==4){
-    par(mfrow=c(2,1))
-    ev.mean<- apply(x$qi$ev, 2, "mean")
-    ev.quant<- apply(x$qi$ev, 2, "quantile", c(0.025, 0.975))
-    if(pred.se){
-      ev.total.up<- x$qi$ev + 1.96*x$qi$se
-      ev.total.down<- x$qi$ev - 1.96*x$qi$se
-      ev.total.up<- apply(ev.total.up, 2, "mean")
-      ev.total.down<- apply(ev.total.down, 2, "mean")
-      min.plot<- min(ev.quant[1,], x$t.series[1:x$min.time], ev.total.down)
-      max.plot<- max(ev.quant[2,], x$t.series[1:x$min.time], ev.total.up)
-    }
-    if(!pred.se){
-      min.plot<- min(ev.quant[1,], x$t.series[1:x$min.time])
-      max.plot<- max(ev.quant[2,], x$t.series[1:x$min.time])
-    }
-    if(length(ev.mean)>1){
-      ts.plot(x$t.series[1:x$min.time],
-              ylim=c(min.plot, max.plot),
-              col=col[4], ylab="Time Series Value",
-              xlim=c(1, x$min.time + ncol(x$qi$ev)), gpars=...)
-      lines(ev.mean~c((x$min.time+1):
-                      (x$min.time + ncol(x$qi$ev))), col=col[1])
-      lines(ev.quant[2,]~c((x$min.time+1):
-                           (x$min.time + ncol(x$qi$ev))),
-            lty=lty, col=col[1])
-      lines(ev.quant[1,]~c((x$min.time+1):
-                           (x$min.time + ncol(x$qi$ev))),
-            lty=lty, col=col[1])
-      if(pred.se){
-        lines(ev.total.up~c((x$min.time+1):
-                            (x$min.time + ncol(x$qi$ev))),
-              lty=lty, col= col[3])
-        lines(ev.total.down~c((x$min.time+1):
-                              (x$min.time + ncol(x$qi$ev))),
-              lty=lty, col=col[3])
-      }
-      lines(c(x$min.time, x$min.time + 1),
-            c(x$t.series[x$min.time], ev.mean[1]))
-      abline(v=x$min.time, lty=1, col=col[2])
-    }
-    if(length(ev.mean)==1){
-      ts.plot(x$t.series[1:x$min.time], ylim=c(min.plot, max.plot),
-              ylab="Time Series Value", col=col[4],
-              xlim=c(1, x$min.time + ncol(x$qi$ev)), gpars=...)
-      points(x$min.time + 1, ev.mean)
-      if(pred.se){
-        arrows(x$min.time + 1, ev.total.down, x$min.time + 1,
-               ev.total.up, col=col[2], code=3, length=0.1, angle=90)
-      }
-      arrows(x$min.time + 1, ev.quant[1,], x$min.time + 1, ev.quant[2,],
-             col=col[1], code=3, length=0.1, angle=90)
-      lines(c(x$min.time, x$min.time + 1),
-            c(x$t.series[x$min.time], ev.mean[1]), col=col[1])
-    }
-    fd.mean<- apply(x$qi$fd, 2, "mean")
-    fd.quant<- apply(x$qi$fd, 2, "quantile", c(0.025, 0.975))
-    if(length(fd.mean)>1){
-      ts.plot(fd.mean, col=col[1], ylim=c(min(fd.quant[1,]), max(fd.quant[2,])),
-              main="E[Y|X1] - E[Y|X]", xlab="Time From First Counterfactual",
-              ylab="Difference", gpars=...)
-      lines(fd.quant[2,], lty=lty, col=col[1])
-      lines(fd.quant[1,], lty=lty, col=col[1])
-    }
-    if(length(fd.mean)==1){
-      par(mfrow=c(2,1))
-      ts.plot(x$t.series[1:x$min.time], ylim=c(min.plot, max.plot),
-              ylab="Time Series Value", col=col[4],
-              xlim=c(1, x$min.time + ncol(x$qi$ev)), gpars=...)
-      points(x$min.time + 1, ev.mean)
-      if(pred.se){
-        arrows(x$min.time + 1, ev.total.down, x$min.time + 1, ev.total.up,
-               col=col[2], code=3, length=0.1, angle=90)
-      }
-      arrows(x$min.time + 1, ev.quant[1,], x$min.time + 1, ev.quant[2,],
-             col=col[1], code=3, length=0.1, angle=90)
-      lines(c(x$min.time, x$min.time + 1),
-            c(x$t.series[x$min.time], ev.mean[1]), col=col[1])
-      plot(density(x$qi$fd), main="E[Y|X1] - E[Y|X]",
-           xlab="Difference in Value", ylab="Density", ...)
-    }
-  }
-}
-
diff --git a/R/plot.zelig.blogit.R b/R/plot.zelig.blogit.R
deleted file mode 100644
index 693d669..0000000
--- a/R/plot.zelig.blogit.R
+++ /dev/null
@@ -1,36 +0,0 @@
-plot.zelig.blogit <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par)
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  if ("rr" %in% names(x$qi)) {
-    x$qi$rr <- x$qi.name$rr <- NULL
-    k <- k - 1
-  }
-  main <- x$qi.name
-  par(mfrow = c(k, 1))
-  for (i in 1:k) {
-    qi <- as.matrix(x$qi[[i]])
-    if (names(x$qi)[i] == "pr") {
-      total <- sum(as.integer(qi))
-      y00 <- 100 * sum(as.integer(qi[,1]))/total
-      y01 <- 100 * sum(as.integer(qi[,2]))/total
-      y10 <- 100 * sum(as.integer(qi[,3]))/total
-      y11 <- 100 * sum(as.integer(qi[,4]))/total
-      xmax <- max(y00, y01, y10, y11)
-      labels <- c("(0,0)", "(0,1)","(1,0)", "(1,1)")
-      barplot(c(y00, y01, y10, y11), horiz = TRUE, col = alt.col,
-              names.arg = labels, xpd = TRUE, main = main[[i]],
-              xlim = c(0, min(100, 1.25*xmax)),
-              xlab = "Percentage of Simulations")
-    }
-    else if (is.numeric(qi[1])) {
-      y1 <- qi[, 3] + qi[, 4]
-      y2 <- qi[, 2] + qi[, 4]
-      contour(kde2d(y1, y2), xlab = "Pr(Y1 = 1)", 
-              ylab = "Pr(Y2 = 1)", main = main[[i]], ...)
-    }
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.bprobit.R b/R/plot.zelig.bprobit.R
deleted file mode 100644
index 70b7ae0..0000000
--- a/R/plot.zelig.bprobit.R
+++ /dev/null
@@ -1,36 +0,0 @@
-plot.zelig.bprobit <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par)
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  if ("rr" %in% names(x$qi)) {
-    x$qi$rr <- x$qi.name$rr <- NULL
-    k <- k - 1
-  }
-  main <- x$qi.name
-  par(mfrow = c(k, 1))
-  for (i in 1:k) {
-    qi <- as.matrix(x$qi[[i]])
-    if (names(x$qi)[i] == "pr") { 
-      total <- sum(as.integer(qi))
-      y00 <- 100 * sum(as.integer(qi[,1]))/total
-      y01 <- 100 * sum(as.integer(qi[,2]))/total
-      y10 <- 100 * sum(as.integer(qi[,3]))/total
-      y11 <- 100 * sum(as.integer(qi[,4]))/total
-      xmax <- max(y00, y01, y10, y11)
-      labels <- c("(0,0)", "(0,1)","(1,0)", "(1,1)")
-      barplot(c(y00, y01, y10, y11), horiz = TRUE, col = alt.col,
-              names.arg = labels, xpd = TRUE, main = main[[i]],
-              xlim = c(0, min(100, 1.25*xmax)),
-              xlab = "Percentage of Simulations")
-    }
-    else if (is.numeric(qi[1])) {
-      y1 <- qi[, 3] + qi[, 4]
-      y2 <- qi[, 2] + qi[, 4]
-      contour(kde2d(y1, y2), xlab = "Pr(Y1 = 1)", 
-              ylab = "Pr(Y2 = 1)", main = main[[i]], ...)
-    }
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.coxph.R b/R/plot.zelig.coxph.R
deleted file mode 100644
index 1b2bf26..0000000
--- a/R/plot.zelig.coxph.R
+++ /dev/null
@@ -1,30 +0,0 @@
-plot.zelig.coxph <- function(x, xlab = "", user.par = FALSE, alt.col = "red", alt.lty = "dashed", CI = 95, ...) {
-  s <- summary(x, CI=CI)
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  if(!is.null(x$qi$hr)){
-  	hr <- x$qi$hr
-  	plot(density(hr), main = x$qi.name$hr, xlab = xlab, ...)
-  	s$qi.stats$hr <- x$qi.name$hr <- NULL
-	j <- k-1
-  }
-  else{
-	j <- k
-  }
-  for (i in 1:(j-1)) {
-    qi <- as.vector(s$qi.stats[[i]][,1])
-    time <- as.numeric(rownames(s$qi.stats[[i]]))
-    ci.lower <- as.vector(s$qi.stats[[i]][,3])
-    ci.upper <- as.vector(s$qi.stats[[i]][,4])
-    plot(y = qi, x = time, main = x$qi.name[[i]], xlab = "time", ylab = names(x$qi.name)[i], type = "s", ...)
-    lines(y = ci.lower, x = time, type = "s", lty = alt.lty, col = alt.col)
-    lines(y = ci.upper, x = time, type = "s", lty = alt.lty, col = alt.col)
-  }
-  haz <- as.vector(s$qi.stats[[j]])
-  time <- as.numeric(rownames(s$qi.stats[[j-1]]))
-  plot(y = haz, x = time, main = x$qi.name[[j]], xlab = "time", ylab = names(x$qi.name)[j], type = "s")
-  par(op)
-}
diff --git a/R/plot.zelig.default.R b/R/plot.zelig.default.R
deleted file mode 100644
index 5019d7e..0000000
--- a/R/plot.zelig.default.R
+++ /dev/null
@@ -1,14 +0,0 @@
-plot.zelig.default <- function(x, xlab = "", user.par = FALSE, ...) {
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, dims(x$qi[[1]])[2]))
-  for (i in 1:k) {
-    for (j in 1:dims(x$qi[[i]])[2]){
-      qi <- as.vector((x$qi[[i]])[,j])
-      plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-    }
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.gamma.gee.R b/R/plot.zelig.gamma.gee.R
deleted file mode 100644
index a407567..0000000
--- a/R/plot.zelig.gamma.gee.R
+++ /dev/null
@@ -1,14 +0,0 @@
-plot.zelig.gamma.gee <- function(x, xlab = "", user.par = FALSE, ...) {
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  for (i in 1:k) {
-    qi <- as.vector(x$qi[[i]])
-    plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-  }
-  par(op)
-}
-
-
diff --git a/R/plot.zelig.logit.R b/R/plot.zelig.logit.R
deleted file mode 100644
index 35032aa..0000000
--- a/R/plot.zelig.logit.R
+++ /dev/null
@@ -1,20 +0,0 @@
-plot.zelig.logit <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  pr <- x$qi$pr
-  y0 <- 100 * sum(pr == 0)/length(pr)
-  y1 <- 100 * sum(pr == 1)/length(pr)
-  barplot(c(y0, y1), horiz = TRUE, col = alt.col, las = 1,
-          names.arg = c("Y = 0", "Y = 1"),
-          xlab = "Percentage of Simulations",
-          main = x$qi.name$pr, xlim = c(0, 100))
-  x$qi$pr <- x$qi.name$pr <- NULL
-  for (i in 1:(k-1)) {
-    qi <- as.vector(x$qi[[i]])
-    plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.logit.gam.R b/R/plot.zelig.logit.gam.R
deleted file mode 100644
index b83dc11..0000000
--- a/R/plot.zelig.logit.gam.R
+++ /dev/null
@@ -1,22 +0,0 @@
-plot.zelig.logit.gam <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  if(length(unique(x$qi$pr)) > 1){par(mfrow = c(k, 1))}
-  if(length(unique(x$qi$pr)) == 1){par(mfrow = c(k-1, 1))}
-  pr <- x$qi$pr
-  y0 <- 100 * sum(pr == 0)/length(pr)
-  y1 <- 100 * sum(pr == 1)/length(pr)
-  if(length(unique(x$qi$pr)) > 1){
-  barplot(c(y0, y1), horiz = TRUE, col = alt.col, las = 1,
-          names.arg = c("Y = 0", "Y = 1"),
-          xlab = "Percentage of Simulations",
-          main = x$qi.name$pr, xlim = c(0, 100))}
-  x$qi$pr <- x$qi.name$pr <- NULL
-  for (i in 1:(k-1)) {
-    qi <- as.vector(x$qi[[i]])
-    plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.logit.gee.R b/R/plot.zelig.logit.gee.R
deleted file mode 100644
index 0aef3c4..0000000
--- a/R/plot.zelig.logit.gee.R
+++ /dev/null
@@ -1,17 +0,0 @@
-plot.zelig.logit.gee <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-    par(mfrow = c(k, 1))
-  if (k > 1){
-    for (i in 1:k) {
-      qi <- as.vector(x$qi[[i]])
-      plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-    }
-  }
-  else{
-    plot(density(x$qi$ev), main = x$qi.name$ev, xlab = xlab, ...) 
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.logit.survey.R b/R/plot.zelig.logit.survey.R
deleted file mode 100644
index 25d7d25..0000000
--- a/R/plot.zelig.logit.survey.R
+++ /dev/null
@@ -1,20 +0,0 @@
-plot.zelig.logit.survey <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  pr <- x$qi$pr
-  y0 <- 100 * sum(pr == 0)/length(pr)
-  y1 <- 100 * sum(pr == 1)/length(pr)
-  barplot(c(y0, y1), horiz = TRUE, col = alt.col, las = 1,
-          names.arg = c("Y = 0", "Y = 1"),
-          xlab = "Percentage of Simulations",
-          main = x$qi.name$pr, xlim = c(0, 100))
-  x$qi$pr <- x$qi.name$pr <- NULL
-  for (i in 1:(k-1)) {
-    qi <- as.vector(x$qi[[i]])
-    plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.mlogit.R b/R/plot.zelig.mlogit.R
deleted file mode 100644
index 99d25a1..0000000
--- a/R/plot.zelig.mlogit.R
+++ /dev/null
@@ -1,49 +0,0 @@
-plot.zelig.mlogit <- function(x, xlab = "", user.par = FALSE, alt.col = NULL, ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par)
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  if (!is.null(x$qi$rr)) {
-    k <- k - 1
-    x$qi$rr <- x$qi.name$rr <- NULL
-  }
-  par(mfrow = c(k,1))
-  pr <- x$qi$pr
-  spr <- array()
-  lev <- sort(unique(pr))
-  K <- length(lev)
-  if (is.null(alt.col))
-    alt.col <- rainbow(K)
-  total <- length(pr)
-  for (i in 1:K)
-    spr[i] <- 100 * sum(as.character(pr) == lev[i])/total
-  xmax <- max(spr)
-  labels <- paste("Y=", lev, sep = "")
-  barplot(spr, horiz = TRUE, col = alt.col, names.arg = labels,
-          las = 1, main = x$qi.name$pr,
-          xlim = c(0, min(100, 1.15*xmax)),
-          xlab = "Percentage of Simulations")
-  x$qi$pr <- x$qi.name$pr <- NULL
-  main <- x$qi.name
-  for (i in 1:(k-1)) {
-    qi <- x$qi[[i]]
-    if (length(dim(qi)) == 3 && dim(qi)[3] == 1)
-      qi <- qi[,,1]
-    dens <- list()
-    xmax <- ymax <- array()
-    for (j in 1:ncol(qi)) {
-      dens[[j]] <- density(qi[,j])
-      xmax[j] <- max(dens[[j]]$x)
-      ymax[j] <- max(dens[[j]]$y)
-    }
-    plot(dens[[1]], col = alt.col[1],
-         xlim = c(min(min(qi), 0), max(xmax)),
-         xlab = xlab, main = "",
-         ylim = c(0, max(ymax)), ...)
-    for (j in 2:ncol(qi)) 
-      lines(dens[[j]], col = alt.col[j])
-    title(main = x$qi.name[[i]][1])
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.negbin.R b/R/plot.zelig.negbin.R
deleted file mode 100644
index 240a56c..0000000
--- a/R/plot.zelig.negbin.R
+++ /dev/null
@@ -1,13 +0,0 @@
-plot.zelig.negbin <- function(x, xlab = "", user.par = FALSE, ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  truehist(x$qi$pr, main = x$qi.name$pr, x0 = -0.25,
-           xlab = xlab, ylab = "Probability", ...)
-  plot(density(x$qi$ev), main = x$qi.name$ev, xlab = xlab, ...) 
-  if (k == 3)
-    plot(density(x$qi$fd), main = x$qi$fd, xlab = xlab, ...)
-  par(op)
-}
diff --git a/R/plot.zelig.netcloglog.R b/R/plot.zelig.netcloglog.R
deleted file mode 100644
index 0315d39..0000000
--- a/R/plot.zelig.netcloglog.R
+++ /dev/null
@@ -1,20 +0,0 @@
-plot.zelig.cloglog.net <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  pr <- x$qi$pr
-  y0 <- 100 * sum(pr == 0)/length(pr)
-  y1 <- 100 * sum(pr == 1)/length(pr)
-  barplot(c(y0, y1), horiz = TRUE, col = alt.col, las = 1,
-          names.arg = c("Y = 0", "Y = 1"),
-          xlab = "Percentage of Simulations",
-          main = x$qi.name$pr, xlim = c(0, 100))
-  x$qi$pr <- x$qi.name$pr <- NULL
-  for (i in 1:(k-1)) {
-    qi <- as.vector(x$qi[[i]])
-    plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.netlogit.R b/R/plot.zelig.netlogit.R
deleted file mode 100644
index 000d3e2..0000000
--- a/R/plot.zelig.netlogit.R
+++ /dev/null
@@ -1,20 +0,0 @@
-plot.zelig.logit.net <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  pr <- x$qi$pr
-  y0 <- 100 * sum(pr == 0)/length(pr)
-  y1 <- 100 * sum(pr == 1)/length(pr)
-  barplot(c(y0, y1), horiz = TRUE, col = alt.col, las = 1,
-          names.arg = c("Y = 0", "Y = 1"),
-          xlab = "Percentage of Simulations",
-          main = x$qi.name$pr, xlim = c(0, 100))
-  x$qi$pr <- x$qi.name$pr <- NULL
-  for (i in 1:(k-1)) {
-    qi <- as.vector(x$qi[[i]])
-    plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.netpoisson.R b/R/plot.zelig.netpoisson.R
deleted file mode 100644
index 163c484..0000000
--- a/R/plot.zelig.netpoisson.R
+++ /dev/null
@@ -1,16 +0,0 @@
-plot.zelig.poisson.net <- function(x, xlab = "", user.par = FALSE, ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  truehist(x$qi$pr, main = x$qi.name$pr, x0 = -0.25,
-           xlab = xlab, ylab = "Probability", ...)
-  plot(density(x$qi$ev), main = x$qi.name$ev, xlab = xlab, ...) 
-  if (k > 2) {
-    for (i in 3:k)
-      plot(density(x$qi[[i]]), main = x$qi.name[[i]],
-           xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.netprobit.R b/R/plot.zelig.netprobit.R
deleted file mode 100644
index 78c0ef0..0000000
--- a/R/plot.zelig.netprobit.R
+++ /dev/null
@@ -1,20 +0,0 @@
-plot.zelig.probit.net <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  pr <- x$qi$pr
-  y0 <- 100 * sum(pr == 0)/length(pr)
-  y1 <- 100 * sum(pr == 1)/length(pr)
-  barplot(c(y0, y1), horiz = TRUE, col = alt.col, las = 1,
-          names.arg = c("Y = 0", "Y = 1"),
-          xlab = "Percentage of Simulations",
-          main = x$qi.name$pr, xlim = c(0, 100))
-  x$qi$pr <- x$qi.name$pr <- NULL
-  for (i in 1:(k-1)) {
-    qi <- as.vector(x$qi[[i]])
-    plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.normal.gee.R b/R/plot.zelig.normal.gee.R
deleted file mode 100644
index a3f7d5a..0000000
--- a/R/plot.zelig.normal.gee.R
+++ /dev/null
@@ -1,12 +0,0 @@
-plot.zelig.normal.gee <- function(x, xlab = "", user.par = FALSE, ...) {
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  for (i in 1:k) {
-    qi <- as.vector(x$qi[[i]])
-    plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.ologit.R b/R/plot.zelig.ologit.R
deleted file mode 100644
index d80eafe..0000000
--- a/R/plot.zelig.ologit.R
+++ /dev/null
@@ -1,49 +0,0 @@
-plot.zelig.ologit <- function(x, xlab = "", user.par = FALSE, alt.col = NULL, ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par)
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  if (!is.null(x$qi$rr)) {
-    k <- k - 1
-    x$qi$rr <- x$qi.name$rr <- NULL
-  }
-  par(mfrow = c(k,1))
-  pr <- x$qi$pr
-  spr <- array()
-  lev <- sort(unique(pr))
-  K <- length(lev)
-  if (is.null(alt.col))
-    alt.col <- rainbow(K)
-  total <- length(pr)
-  for (i in 1:K)
-    spr[i] <- 100 * sum(as.character(pr) == lev[i])/total
-  xmax <- max(spr)
-  labels <- paste("Y=", lev, sep = "")
-  barplot(spr, horiz = TRUE, col = alt.col, names.arg = labels,
-          las = 1, main = x$qi.name$pr,
-          xlim = c(0, min(100, 1.15*xmax)),
-          xlab = "Percentage of Simulations")
-  x$qi$pr <- x$qi.name$pr <- NULL
-  main <- x$qi.name
-  for (i in 1:(k-1)) {
-    qi <- x$qi[[i]]
-    if (length(dim(qi)) == 3 && dim(qi)[3] == 1)
-      qi <- qi[,,1]
-    dens <- list()
-    xmax <- ymax <- array()
-    for (j in 1:ncol(qi)) {
-      dens[[j]] <- density(qi[,j])
-      xmax[j] <- max(dens[[j]]$x)
-      ymax[j] <- max(dens[[j]]$y)
-    }
-    plot(dens[[1]], col = alt.col[1],
-         xlim = c(min(min(qi), 0), max(xmax)),
-         xlab = xlab, main = "",
-         ylim = c(0, max(ymax)), ...)
-    for (j in 2:ncol(qi)) 
-      lines(dens[[j]], col = alt.col[j])
-    title(main = x$qi.name[[i]][1])
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.oprobit.R b/R/plot.zelig.oprobit.R
deleted file mode 100644
index 6943175..0000000
--- a/R/plot.zelig.oprobit.R
+++ /dev/null
@@ -1,49 +0,0 @@
-plot.zelig.oprobit <- function(x, xlab = "", user.par = FALSE, alt.col = NULL, ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par)
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  if (!is.null(x$qi$rr)) {
-    k <- k - 1
-    x$qi$rr <- x$qi.name$rr <- NULL
-  }
-  par(mfrow = c(k,1))
-  pr <- x$qi$pr
-  spr <- array()
-  lev <- sort(unique(pr))
-  K <- length(lev)
-  if (is.null(alt.col))
-    alt.col <- rainbow(K)
-  total <- length(pr)
-  for (i in 1:K)
-    spr[i] <- 100 * sum(as.character(pr) == lev[i])/total
-  xmax <- max(spr)
-  labels <- paste("Y=", lev, sep = "")
-  barplot(spr, horiz = TRUE, col = alt.col, names.arg = labels,
-          las = 1, main = x$qi.name$pr,
-          xlim = c(0, min(100, 1.15*xmax)),
-          xlab = "Percentage of Simulations")
-  x$qi$pr <- x$qi.name$pr <- NULL
-  main <- x$qi.name
-  for (i in 1:(k-1)) {
-    qi <- x$qi[[i]]
-    if (length(dim(qi)) == 3 && dim(qi)[3] == 1)
-      qi <- qi[,,1]
-    dens <- list()
-    xmax <- ymax <- array()
-    for (j in 1:ncol(qi)) {
-      dens[[j]] <- density(qi[,j])
-      xmax[j] <- max(dens[[j]]$x)
-      ymax[j] <- max(dens[[j]]$y)
-    }
-    plot(dens[[1]], col = alt.col[1],
-         xlim = c(min(min(qi), 0), max(xmax)),
-         xlab = xlab, main = "",
-         ylim = c(0, max(ymax)), ...)
-    for (j in 2:ncol(qi)) 
-      lines(dens[[j]], col = alt.col[j])
-    title(main = x$qi.name[[i]][1])
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.poisson.R b/R/plot.zelig.poisson.R
deleted file mode 100644
index cb5b861..0000000
--- a/R/plot.zelig.poisson.R
+++ /dev/null
@@ -1,16 +0,0 @@
-plot.zelig.poisson <- function(x, xlab = "", user.par = FALSE, ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  truehist(x$qi$pr, main = x$qi.name$pr, x0 = -0.25,
-           xlab = xlab, ylab = "Probability", ...)
-  plot(density(x$qi$ev), main = x$qi.name$ev, xlab = xlab, ...) 
-  if (k > 2) {
-    for (i in 3:k)
-      plot(density(x$qi[[i]]), main = x$qi.name[[i]],
-           xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.poisson.gam.R b/R/plot.zelig.poisson.gam.R
deleted file mode 100644
index 0b51f92..0000000
--- a/R/plot.zelig.poisson.gam.R
+++ /dev/null
@@ -1,18 +0,0 @@
-plot.zelig.poisson.gam <- function(x, xlab = "", user.par = FALSE, ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  if(length(unique(x$qi$pr)) > 1){par(mfrow = c(k, 1))}
-  if(length(unique(x$qi$pr)) == 1){par(mfrow = c(k-1, 1))}
-  if(length(unique(x$qi$pr)) > 1){
-  truehist(x$qi$pr, main = x$qi.name$pr, x0 = -0.25,
-           xlab = xlab, ylab = "Probability", ...)}
-  plot(density(x$qi$ev), main = x$qi.name$ev, xlab = xlab, ...) 
-  if (k > 2) {
-    for (i in 3:k)
-      plot(density(x$qi[[i]]), main = x$qi.name[[i]],
-           xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.poisson.gee.R b/R/plot.zelig.poisson.gee.R
deleted file mode 100644
index 06c03b1..0000000
--- a/R/plot.zelig.poisson.gee.R
+++ /dev/null
@@ -1,15 +0,0 @@
-plot.zelig.poisson.gee <- function(x, xlab = "", user.par = FALSE, ...) {
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  plot(density(x$qi$ev), main = x$qi.name$ev, xlab = xlab, ...) 
-  if (k > 2) {
-    for (i in 3:k)
-      plot(density(x$qi[[i]]), main = x$qi.name[[i]],
-           xlab = xlab, ...)
-  }
-  par(op)
-}
-
diff --git a/R/plot.zelig.poisson.survey.R b/R/plot.zelig.poisson.survey.R
deleted file mode 100644
index a5acdb7..0000000
--- a/R/plot.zelig.poisson.survey.R
+++ /dev/null
@@ -1,16 +0,0 @@
-plot.zelig.poisson.survey <- function(x, xlab = "", user.par = FALSE, ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  truehist(x$qi$pr, main = x$qi.name$pr, x0 = -0.25,
-           xlab = xlab, ylab = "Probability", ...)
-  plot(density(x$qi$ev), main = x$qi.name$ev, xlab = xlab, ...) 
-  if (k > 2) {
-    for (i in 3:k)
-      plot(density(x$qi[[i]]), main = x$qi.name[[i]],
-           xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.probit.R b/R/plot.zelig.probit.R
deleted file mode 100644
index 4bd6ed9..0000000
--- a/R/plot.zelig.probit.R
+++ /dev/null
@@ -1,20 +0,0 @@
-plot.zelig.probit <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  pr <- x$qi$pr
-  y0 <- 100 * sum(pr == 0)/length(pr)
-  y1 <- 100 * sum(pr == 1)/length(pr)
-  barplot(c(y0, y1), horiz = TRUE, col = alt.col, las = 1,
-          names.arg = c("Y = 0", "Y = 1"),
-          xlab = "Percentage of Simulations",
-          main = x$qi.name$pr, xlim = c(0, 100))
-  x$qi$pr <- x$qi.name$pr <- NULL
-  for (i in 1:(k-1)) {
-    qi <- as.vector(x$qi[[i]])
-    plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.probit.gam.R b/R/plot.zelig.probit.gam.R
deleted file mode 100644
index 2aafc0f..0000000
--- a/R/plot.zelig.probit.gam.R
+++ /dev/null
@@ -1,22 +0,0 @@
-plot.zelig.probit.gam <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  if(length(unique(x$qi$pr)) > 1){par(mfrow = c(k, 1))}
-  if(length(unique(x$qi$pr)) == 1){par(mfrow = c(k-1, 1))}
-  pr <- x$qi$pr
-  y0 <- 100 * sum(pr == 0)/length(pr)
-  y1 <- 100 * sum(pr == 1)/length(pr)
-  if(length(unique(x$qi$pr)) > 1){
-  barplot(c(y0, y1), horiz = TRUE, col = alt.col, las = 1,
-          names.arg = c("Y = 0", "Y = 1"),
-          xlab = "Percentage of Simulations",
-          main = x$qi.name$pr, xlim = c(0, 100))}
-  x$qi$pr <- x$qi.name$pr <- NULL
-  for (i in 1:(k-1)) {
-    qi <- as.vector(x$qi[[i]])
-    plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.probit.gee.R b/R/plot.zelig.probit.gee.R
deleted file mode 100644
index 3f77ce9..0000000
--- a/R/plot.zelig.probit.gee.R
+++ /dev/null
@@ -1,20 +0,0 @@
-plot.zelig.probit.gee <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  if (k > 1){
-    for (i in 1:k) {
-      qi <- as.vector(x$qi[[i]])
-      plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-    }
-  }
-  else{
-    plot(density(x$qi$ev), main = x$qi.name$ev, xlab = xlab, ...) 
-  }
-  par(op)
-}
-
-
-
diff --git a/R/plot.zelig.probit.survey.R b/R/plot.zelig.probit.survey.R
deleted file mode 100644
index d6129b4..0000000
--- a/R/plot.zelig.probit.survey.R
+++ /dev/null
@@ -1,20 +0,0 @@
-plot.zelig.probit.survey <- function(x, xlab = "", user.par = FALSE, alt.col = "red", ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  pr <- x$qi$pr
-  y0 <- 100 * sum(pr == 0)/length(pr)
-  y1 <- 100 * sum(pr == 1)/length(pr)
-  barplot(c(y0, y1), horiz = TRUE, col = alt.col, las = 1,
-          names.arg = c("Y = 0", "Y = 1"),
-          xlab = "Percentage of Simulations",
-          main = x$qi.name$pr, xlim = c(0, 100))
-  x$qi$pr <- x$qi.name$pr <- NULL
-  for (i in 1:(k-1)) {
-    qi <- as.vector(x$qi[[i]])
-    plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-  }
-  par(op)
-}
diff --git a/R/plot.zelig.relogit.R b/R/plot.zelig.relogit.R
deleted file mode 100644
index 7414a09..0000000
--- a/R/plot.zelig.relogit.R
+++ /dev/null
@@ -1,42 +0,0 @@
-plot.zelig.relogit <- function(x, xlab ="", user.par = FALSE, alt.col = "red",
-                               ylab = NULL, samples = 100, ...){
-  k <- length(x$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(k, 1))
-  if (dim(x$qi[[1]])[2] == 1) {
-    pr <- x$qi$pr
-    y0 <- 100 * sum(pr == 0)/length(pr)
-    y1 <- 100 * sum(pr == 1)/length(pr)
-    barplot(c(y0, y1), horiz = TRUE, col = alt.col, las = 1,
-            names.arg = c("Y = 0", "Y = 1"),
-            xlab = "Percentage of Simulations",
-            main = x$qi.name$pr, xlim = c(0, 100))
-    x$qi$pr <- x$qi.name$pr <- NULL
-    for (i in 1:(k-1)) {
-      qi <- as.vector(x$qi[[i]])
-      plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
-    }    
-  }
-  else {
-    for (i in 1:k) {
-      qi <- x$qi[[i]]
-      main <- as.character(x$qi.name[i])
-      if (is.null(rownames(qi)))
-        rownames(qi) <- 1:dim(qi)[1]
-      idx <- as.integer(sample(rownames(qi), 100))
-      tmp <- qi[idx,,1]
-      xlim <- c(min(qi[,1,1]), max(qi[,2,1]))
-      if (is.null(ylab))
-        ylab <- paste("Observations (n = ", samples, ")", sep = "")
-      plot(xlim, type = "n", xlab = xlab, ylab = ylab,
-           main = main, ylim = c(0, 100), xlim = xlim, ...)
-      for (j in 1:nrow(tmp))
-        lines(c(tmp[j,1], tmp[j,2]), c(j,j), col = alt.col)
-      abline(v = mean(qi[,1,1]))
-      abline(v = mean(qi[,2,1]))
-    }
-  }
-  par(op)
-}
diff --git a/R/plot.zeliglist.R b/R/plot.zeliglist.R
deleted file mode 100644
index 6cf5efe..0000000
--- a/R/plot.zeliglist.R
+++ /dev/null
@@ -1,21 +0,0 @@
-plot.zeliglist <- function(x, xlab = "", user.par = FALSE, ...) {
- 
-  kk <- length(x)
-  j  <- dims(x[[1]]$qi)
-  op <- par(no.readonly = TRUE)
-  if (!user.par) 
-    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
-  par(mfrow = c(kk, j))
- k <- length(x[[1]]$qi)
-  for(i in 1:k){
-    for(n in 1:kk){
-      xx <- x[[n]]
-      class(xx) <- xx$zelig.call$model
-      qi <- as.vector(xx$qi[[i]])
-     plot(density(qi), main=xx$qi.name[[i]], xlab = xlab, ...)
-     
-    
-    }
-  }
-
-}
diff --git a/R/plots.R b/R/plots.R
new file mode 100644
index 0000000..94d2a2b
--- /dev/null
+++ b/R/plots.R
@@ -0,0 +1,673 @@
+#' @S3method plot sim.gamma.gee
+plot.sim.gamma.gee <- function (x, ...) {
+
+  # store device settings
+  original.par <- par(no.readonly=TRUE)
+
+  if (is.null(x$x))
+    return()
+
+  panels <- if (is.null(x$x1)) {
+    palette <- rep("black", 3)
+    matrix(1, nrow=1, ncol=1)
+    # How the layout window will look:
+    # +---+
+    # | 1 |
+    # +---+
+  }
+
+  else {
+    palette <- c('red', 'navy', 'black')
+    matrix(c(1, 2, 3, 3), nrow=2, ncol=2, byrow=TRUE)
+    # How the layout window will look:
+    # +-------+
+    # | 1 | 2 |
+    # +-------+
+    # |   3   |
+    # +-------+
+  }
+
+  layout(panels)
+
+  # extract quantities of interest
+  ev1 <- x$qi$ev1
+  ev2 <- x$qi$ev2
+  fd <- x$qi$fd
+
+  # Plot ev1
+  .plot.density(ev1, "Expected Values (for X): E(Y|X)", palette[1])
+
+  if (!is.null(x$x1)) {
+    .plot.density(ev2, "Expected Values (for X1): E(Y|X1)", palette[2])
+    .plot.density(fd, "First Differences: E(Y|X1) - E(Y|X)", palette[3])
+  }
+    
+  # return plotting device
+  par(original.par)
+}
+
+#' @S3method plot sim.normal.gee
+plot.sim.normal.gee <- plot.sim.gamma.gee
+
+#' @S3method plot sim.poisson.gee
+plot.sim.poisson.gee <- plot.sim.gamma.gee
+
+#' @S3method plot sim.logit.gee
+plot.sim.logit.gee <- function (x, ...) {
+
+  # store device settings
+  original.par <- par(no.readonly=TRUE)
+
+  if (is.null(x$x))
+    return()
+
+  panels <- if (is.null(x$x1)) {
+    palette <- rep("black", 4)
+    matrix(1, nrow=1, ncol=1)
+    # How the layout window will look:
+    # +---+
+    # | 1 |
+    # +---+
+  }
+
+  else {
+    palette <- c('red', 'navy', 'black', 'black')
+    matrix(c(1, 2, 3, 3, 4, 4), nrow=3, ncol=2, byrow=TRUE)
+    # How the layout window will look:
+    # +-------+
+    # | 1 | 2 |
+    # +-------+
+    # |   3   |
+    # +-------+
+    # |   4   |
+    # +-------+
+  }
+
+  layout(panels)
+
+  # extract quantities of interest
+  ev1 <- x$qi$ev1
+  ev2 <- x$qi$ev2
+  fd <- x$qi$fd
+  rr <- x$qi$rr
+
+  # Plot ev1
+  .plot.density(ev1, "Expected Values (for X): E(Y|X)", palette[1])
+  .plot.density(ev2, "Expected Values (for X1): E(Y|X1)", palette[2])
+  .plot.density(fd, "First Differences: E(Y|X1) - E(Y|X)", palette[3])
+  .plot.density(rr, "Risk Ratios: E(Y|X1)/E(Y|X)", palette[4])
+    
+  # return plotting device
+  par(original.par)
+}
+
+#' @S3method plot sim.probit.gee
+plot.sim.probit.gee <- plot.sim.logit.gee
+
+# Plot Density Graphs for GEE Quantities of Interest
+# @param x a vector containing quantities of interest
+# @param main the main title of the plot
+# @param col the color of the line-plot
+.plot.density <- function (x, main, col) {
+  if (all(is.na(x)))
+    return()
+
+  density <- density(x)
+  plot(density(x), main = main, col = col)
+}
+#' Plot graphs of simulated multiply-imputed data
+#'
+#' This function is currently unimplemented, and reserved for future use.
+#'
+#' @usage \method{plot}{MI.sim}(...)
+#' @S3method plot MI.sim
+#' @param ... ignored parameters
+#' @return NULL (invisibly)
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+plot.MI.sim <- function(...) {
+  warning("Zelig currently does not support plots of mutiply imputed data")
+  invisible(NULL)
+}
+#' Method for plotting pooled simulations by confidence intervals
+#'
+#' Plot confidence intervals of pooled simulated values.
+#' 
+#' @param x A `sim' object
+#' @param qi a character-string specifying the quantity of interest to plot
+#' @param var The variable to be used on the x-axis. Default is the variable
+#' across all the chosen values with smallest nonzero variance
+#' @param ... Parameters to be passed to the `truehist' function which is 
+#' implicitly called for numeric simulations
+#' @param main a character-string specifying the main heading of the plot
+#' @param sub a character-string specifying the sub heading of the plot
+#' @param xlab a character-string specifying the label for the x-axis
+#' @param ylab a character-string specifying the label for the y-axis
+#' @param legcol ``legend color'', an valid color used for plotting the line
+#' colors in the legend
+#' @param col a valid vector of colors of at least length 3 to use to color the
+#' confidence intervals
+#' @param leg ``legend position'', an integer from 1 to 4, specifying the
+#' position of the legend. 1 to 4 correspond to ``SE'', ``SW'', ``NW'', and
+#' ``NE'' respectively
+#' @param legpos ``legend type'', exact coordinates and sizes for legend.
+#' Overrides argment ``leg.type''
+#' @return the current graphical parameters. This is subject to change in future
+#' implementations of Zelig
+#' @author James Honaker, adapted by Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export plot.ci
+#' @usage \method{plot}{ci}(x, qi="ev", var=NULL, ..., legcol="gray20", col=NULL, leg=1, legpos=NULL)
+plot.ci <- function(x, qi="ev", var=NULL, ..., main = NULL, sub = NULL, xlab = NULL, ylab = NULL, xlim = NULL, ylim = NULL, legcol="gray20", col=NULL, leg=1, legpos=NULL) {
+
+  if (! "pooled.sim" %in% class(x)) {
+    something <- list(x=x)
+    class(something) <- "pooled.sim"
+    attr(something, "titles") <- x$titles
+    x <- something
+  }
+
+  xmatrix<-matrix(NA,nrow=length(x),ncol=length(x[[1]]$x$data))
+
+  for(i in 1:length(x)){
+    xmatrix[i,]<-as.matrix(x[[i]]$x$data)
+  }
+
+  if (length(x) == 1 && is.null(var)) {
+    warning("Must specify the `var` parameter when plotting the confidence interval of an unvarying model. Plotting nothing.")
+    return(invisible(FALSE))
+  }
+
+  if (is.null(var)) {
+    each.var <- apply(xmatrix,2,sd) 
+    flag <- each.var>0
+    min.var<-min(each.var[flag])
+    var.seq<-1:ncol(xmatrix)
+    position<-var.seq[each.var==min.var]  
+    position<-min(position)
+    xseq<-xmatrix[,position]
+    return()
+    xname<-names(x[[1]]$x$data[position])
+  } else {
+
+    if(is.numeric(var)){
+      position<-var
+    }else if(is.character(var)){
+      position<-grep(var,names(x[[1]]$x$data))
+    }
+    xseq<-xmatrix[,position]
+    xname<-names(x[[1]]$x$data[position])
+  }
+
+
+  if(qi=="pv"){
+    ev<-simulation.matrix(x, "Predicted Values: Y|X")
+  }else{
+    ev<-simulation.matrix(x, "Expected Values: E(Y|X)")
+  }
+
+
+  # Define functions to compute confidence intervals
+  ci.upper <- function (x, alpha) {
+    pos <- max(round((1-alpha)*length(x)), 1)
+    return(sort(x)[pos])
+  }
+
+  ci.lower <- function (x, alpha) {
+    pos<-max(round(alpha*length(x)), 1)
+    return(sort(x)[pos])
+  }
+
+  #
+  k<-ncol(ev)
+  n<-nrow(ev)
+
+  #
+  if(is.null(col)){
+    myblue1<-rgb( 100, 149, 237, alpha=50, maxColorValue=255)
+    myblue2<-rgb( 152, 245, 255, alpha=50, maxColorValue=255)
+    myblue3<-rgb( 191, 239, 255, alpha=70, maxColorValue=255)
+    col<-c(myblue1,myblue2,myblue3)
+  }
+  history<-matrix(NA, nrow=k,ncol=8)
+  for (i in 1:k) {
+    v <- c(
+           xseq[i],
+           median(ev[,i]),
+
+           ci.upper(ev[,i],0.8),
+           ci.lower(ev[,i],0.8),
+
+           ci.upper(ev[,i],0.95),
+           ci.lower(ev[,i],0.95),
+
+           ci.upper(ev[,i],0.999),
+           ci.lower(ev[,i],0.999)
+           )
+
+    history[i, ] <- v
+  }
+  if (k == 1) {
+    left <- c(
+           xseq[1]-.5,
+           median(ev[,1]),
+
+           ci.upper(ev[,1],0.8),
+           ci.lower(ev[,1],0.8),
+
+           ci.upper(ev[,1],0.95),
+           ci.lower(ev[,1],0.95),
+
+           ci.upper(ev[,1],0.999),
+           ci.lower(ev[,1],0.999)
+           )
+    right <- c(
+           xseq[1]+.5,
+           median(ev[,1]),
+
+           ci.upper(ev[,1],0.8),
+           ci.lower(ev[,1],0.8),
+
+           ci.upper(ev[,1],0.95),
+           ci.lower(ev[,1],0.95),
+
+           ci.upper(ev[,1],0.999),
+           ci.lower(ev[,1],0.999)
+           )
+    v <- c(
+           xseq[1],
+           median(ev[,1]),
+
+           ci.upper(ev[,1],0.8),
+           ci.lower(ev[,1],0.8),
+
+           ci.upper(ev[,1],0.95),
+           ci.lower(ev[,1],0.95),
+
+           ci.upper(ev[,1],0.999),
+           ci.lower(ev[,1],0.999)
+           )
+    k <- 3
+    history <- rbind(left, v, right)
+  }
+
+  # Specify x-axis length
+  all.xlim <- if (is.null(xlim))
+    c(min(history[, 1]),max(history[, 1]))
+  else
+    xlim
+
+  # Specify y-axis length
+  all.ylim <-if (is.null(ylim))
+    c(min(history[, -1]), max(history[, -1]))
+  else
+    ylim
+
+  # Define xlabel
+  if (is.null(xlab))
+    xlab <- paste("Range of",xname)
+
+  if (is.null(ylab))
+    ylab <- "Expected Values: E(Y|X)"
+
+  ## This is the plot
+
+  par(bty="n")
+
+  plot(x=history[, 1], y=history[, 2], type="l", xlim=all.xlim, ylim=all.ylim, main = main, sub = sub, xlab=xlab, ylab=ylab)
+
+  polygon(c(history[,1],history[k:1,1]),c(history[,5],history[k:1,6]),col=col[2],border="gray90")
+  polygon(c(history[,1],history[k:1,1]),c(history[,3],history[k:1,4]),col=col[1],border="gray60")
+  polygon(c(history[,1],history[k:1,1]),c(history[,7],history[k:1,8]),col=col[3],border="white")
+
+  ## This is the legend
+
+  if(is.null(legpos)){
+    if(leg==1){
+      legpos<-c(.91,.04,.2,.05)
+    }else if(leg==2){
+      legpos<-c(.09,.04,.2,.05)
+    }else if(leg==3){
+      legpos<-c(.09,.04,.8,.05)
+    }else{
+      legpos<-c(.91,.04,.8,.05)
+    }
+  }
+
+  lx<-min(all.xlim)+ legpos[1]*(max(all.xlim)- min(all.xlim))
+  hx<-min(all.xlim)+ (legpos[1]+legpos[2])*(max(all.xlim)- min(all.xlim))
+
+  deltax<-(hx-lx)*.1
+
+  my<-min(all.ylim) +legpos[3]*min(max(all.ylim) - min(all.ylim))
+  dy<-legpos[4]*(max(all.ylim) - min(all.ylim))
+
+
+  lines(c(hx+deltax,hx+2*deltax,hx+2*deltax,hx+deltax),c(my+3*dy,my+3*dy,my-3*dy,my-3*dy),col=legcol)
+  lines(c(hx+3*deltax,hx+4*deltax,hx+4*deltax,hx+3*deltax),c(my+1*dy,my+1*dy,my-1*dy,my-1*dy),col=legcol)
+  lines(c(lx-deltax,lx-2*deltax,lx-2*deltax,lx-deltax),c(my+2*dy,my+2*dy,my-2*dy,my-2*dy),col=legcol)
+  lines(c(lx-5*deltax,lx),c(my,my),col="white",lwd=3)
+  lines(c(lx-5*deltax,lx),c(my,my),col=legcol)
+  lines(c(lx,hx),c(my,my))
+
+  polygon(c(lx,lx,hx,hx),c(my-2*dy,my+2*dy,my+2*dy,my-2*dy),col=col[2],border="gray90")
+  polygon(c(lx,lx,hx,hx),c(my-1*dy,my+1*dy,my+1*dy,my-1*dy),col=col[1],border="gray60")
+  polygon(c(lx,lx,hx,hx),c(my-3*dy,my+3*dy,my+3*dy,my-3*dy),col=col[3],border="white")
+
+  text(lx,my,labels="median",pos=2,cex=0.5,col=legcol)
+  text(lx,my+2*dy,labels="ci95",pos=2,cex=0.5,col=legcol)
+  text(hx,my+1*dy,labels="ci80",pos=4,cex=0.5,col=legcol)
+  text(hx,my+3*dy,labels="ci99.9",pos=4,cex=0.5,col=legcol)
+}
+
+#' Method for plotting pooled simulations by confidence intervals
+#'
+#' Plot pooled simulated quantities of interest.
+#' @usage \method{plot}{pooled.sim}(x, qi="ev", var=NULL,  ...,  legcol="gray20", col=NULL, leg=1, legpos=NULL)
+#' @S3method plot pooled.sim
+#' @param x A `sim' object
+#' @param qi a character-string specifying the quantity of interest to plot
+#' @param var The variable to be used on the x-axis. Default is the variable
+#' across all the chosen values with smallest nonzero variance
+#' @param ... Parameters to be passed to the `truehist' function which is 
+#' implicitly called for numeric simulations
+#' @param legcol ``legend color'', an valid color used for plotting the line
+#' colors in the legend
+#' @param col a valid vector of colors of at least length 3 to use to color the
+#' confidence intervals
+#' @param leg ``legend position'', an integer from 1 to 4, specifying the
+#' position of the legend. 1 to 4 correspond to ``SE'', ``SW'', ``NW'', and
+#' ``NE'' respectively
+#' @param legpos ``legend type'', exact coordinates and sizes for legend.
+#' Overrides argment ``leg.type''
+#' @return the current graphical parameters. This is subject to change in future
+#' implementations of Zelig
+#' @author James Honaker, adapted by Matt Owen \email{mowen@@iq.harvard.edu}
+plot.pooled.sim <- plot.ci
+#' Method for plotting simulations
+#'
+#' Plot simulated quantities of interest.
+#' @usage \method{plot}{sim}(x, ...)
+#' @S3method plot sim
+#' @param x a `sim' object
+#' @param ... parameters to be passed to the `truehist' function which is 
+#' implicitly called for numeric simulations
+#' @return nothing
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+plot.sim <- function (x, ...) {
+
+  env <- tryCatch(
+    asNamespace(x$package.name),
+    # 
+    error = function (e) { 
+      warning("")
+      globalenv()
+    }
+    )
+
+  # If plotPackageName
+  if (exists("plot.simulations", envir = env, mode="function")) {
+    # Get the simulation, because we know it exists
+    .plotter <- get("plot.simulations", envir = env, mode="function")
+
+    # Pass to a temporary variable to improve the visibility of the traceback
+    # if there is an error
+    res <- .plotter(x, ...)
+
+    # Return object (whatever it is)
+    return(invisible(res))
+  }
+
+  # Otherwise we just use this fall-back
+  old.par <- par(no.readonly = T)
+
+  # Some numbers we use to make things
+  total.qis <- length(names(x$qi))
+  palette <- rainbow(total.qis)
+  total.cols <- 2
+  total.rows <- ceiling(total.qis/total.cols)
+
+  vals <- ifelse(total.qis %% 2, c(1:total.qis, total.qis), 1:total.qis)
+
+  # Colors!
+  color.blue <- rgb(100, 149, 237, maxColorValue=255)
+
+  #
+  vals <- if (total.qis %% 2) {
+    c(1:total.qis, total.qis)
+  }
+  else {
+    1:total.qis
+  }
+
+  # Construct layout
+  layout(matrix(vals, total.rows, total.cols, byrow=TRUE))
+
+  k <- 1
+  for (title in names(x$qi)) {
+    simulations.plot(x$qi[[title]], main = title, col = palette[k], line.col = "black")
+    k <- k + 1
+  }
+
+
+  #
+  return(par(old.par))
+}
+
+#' @S3method plot sim.cloglog.net
+plot.sim.cloglog.net <- function (x, ...) {
+
+  env <- tryCatch(
+    asNamespace(x$package.name),
+    error = function (e) { 
+      warning("")
+      globalenv()
+    }
+  )
+
+  # If plotPackageName
+  if (exists("plot.simulations", envir = env, mode="function")) {
+    # Get the simulation, because we know it exists
+    .plotter <- get("plot.simulations", envir = env, mode="function")
+
+    # Pass to a temporary variable to improve the visibility of the traceback
+    # if there is an error
+    res <- .plotter(x, ...)
+
+    # Return object (whatever it is)
+    return(invisible(res))
+  }
+
+  # Otherwise we just use this fall-back
+  old.par <- par(no.readonly = T)
+
+  # Some numbers we use to make things
+  total.qis <- length(names(x$qi))
+  palette <- rainbow(total.qis)
+  total.cols <- 2
+  total.rows <- ceiling(total.qis/total.cols)
+
+  vals <- ifelse(total.qis %% 2, c(1:total.qis, total.qis), 1:total.qis)
+
+  # Colors!
+  color.blue <- rgb(100, 149, 237, maxColorValue=255)
+
+  #
+  vals <- if (total.qis %% 2) {
+    c(1:total.qis, total.qis)
+  }
+  else {
+    1:total.qis
+  }
+
+  # Construct layout
+  layout(matrix(vals, total.rows, total.cols, byrow=TRUE))
+
+  k <- 1
+  for (title in names(x$qi)) {
+    simulations.plot(x$qi[[title]], main = title, col = palette[k], line.col = "black")
+    k <- k + 1
+  }
+
+
+  #
+  return(par(old.par))
+}
+
+
+#' Plot Any Simulation from the Zelig Core Package
+#'
+#' Plots any simulation from the core package. In general, this function can
+#' \emph{neatly} plot simulations containing five of the popular ``quantities
+#' of interest'' - ``Expected Values: E(Y|X)'', ``Predicted Values: Y|X'',
+#' ``Expected Values (for X1): E(Y|X1)'', ``Predicted Values (for X1): Y|X1''
+#' and ``First Differences: E(Y|X1) - E(Y|X)''.
+#' @param x an object
+#' @param ... parameters passed to the ``plot'' and ``barplot'' functions
+#' @return the original graphical parameters
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+plot.simulations <- function (x, ...) {
+  # Save old state
+  old.par <- par(no.readonly=T)
+
+  # Quantities of Interest
+  qi <- x$qi
+
+  # Define Relevant quantity of interest titles that have special properties
+  ev.titles <- c('Expected Values: E(Y|X)', 'Expected Values: E(Y|X1)')
+  pv.titles <- c('Predicted Values: Y|X', 'Predicted Values: Y|X1')
+
+  # Determine whether two "Expected Values" qi's exist
+  both.ev.exist <- all(ev.titles %in% names(qi))
+  # Determine whether two "Predicted Values" qi's exist
+  both.pv.exist <- all(pv.titles %in% names(qi))
+
+  # Color of x should always be this pertty blue
+  color.x <- rgb(242, 122, 94, maxColorValue=255)
+  color.x1 <- rgb(100, 149, 237, maxColorValue=255)
+
+  # This mixes the above two colors, and converts the result into hexadecimal
+  color.mixed <- rgb(t(round((col2rgb(color.x) + col2rgb(color.x1))/2)), maxColorValue=255)
+
+  if (is.null(x$x)) {
+    return(par(old.par))
+  }
+  else if (is.null(x$x1) || is.na(x$x1)) {
+    panels <- matrix(1:2, 2, 1)
+
+    # The plotting device:
+    # +--------+
+    # |   1    |
+    # +--------+
+    # |   2    |
+    # +--------+
+  }
+  else {
+
+    panels <- matrix(c(1:5, 5), ncol=2, nrow=3, byrow = TRUE)
+
+    panels <- if (xor(both.ev.exist, both.pv.exist))
+      rbind(panels, c(6, 6))
+    else if (both.ev.exist && both.pv.exist)
+      rbind(panels, c(6, 7))
+    else
+      panels
+
+
+    # the plotting device:
+    #
+    # +-----------+    +-----------+
+    # |  1  |  2  |    |  1  |  2  |
+    # +-----+-----+    +-----+-----+
+    # |  3  |  4  |    |  3  |  4  |
+    # +-----+-----+ OR +-----+-----+
+    # |     5     |    |     5     |
+    # +-----------+    +-----------+
+    # |  6  |  7  |    |     6     |
+    # +-----+-----+    +-----+-----+
+  }
+
+  #
+  layout(panels)
+
+  titles <- list(
+    ev  = "Expected Values: E(Y|X)",
+    ev1 = "Expected Values: E(Y|X1)",
+    pv  = "Predicted Values: Y|X",
+    pv1 = "Predicted Values: Y|X1",
+    fd  = "First Differences: E(Y|X1) - E(Y|X)"
+    )
+  
+  # Plot each simulation
+  simulations.plot(qi[[titles$pv]], main = titles$pv, col = color.x, line.col = "black")
+  simulations.plot(qi[[titles$pv1]], main = titles$pv1, col = color.x1, line.col = "black")
+  simulations.plot(qi[[titles$ev]], main = titles$ev, col = color.x, line.col = "black")
+  simulations.plot(qi[[titles$ev1]], main = titles$ev1, col = color.x1, line.col = "black")
+  simulations.plot(qi[[titles$fd]], main = titles$fd, col = color.mixed, line.col = "black")
+
+  if (both.pv.exist) {
+    simulations.plot(
+      qi[["Predicted Values: Y|X"]],
+      qi[["Predicted Values: Y|X1"]],
+      main = "Comparison of Y|X and Y|X1",
+      # Note that we are adding transparency to this
+      col = paste(c(color.x, color.x1), "80", sep=""),
+      line.col = "black")
+  }
+
+  if (both.ev.exist) {
+    simulations.plot(
+      qi[["Expected Values: E(Y|X)"]],
+      qi[["Expected Values: E(Y|X1)"]],
+      main = "Comparison of E(Y|X) and E(Y|X1)",
+      # Note that we are adding transparency to this
+      col = paste(c(color.x, color.x1), "80", sep=""),
+      line.col = "black")
+  }
+
+  # Restore old state
+  par(old.par)
+
+  # Return old parameter invisibly
+  invisible(old.par)
+}
+plot.zelig.relogit <- function(x, xlab ="", user.par = FALSE, alt.col = "red",
+                               ylab = NULL, samples = 100, ...){
+  k <- length(x$qi)
+  op <- par(no.readonly = TRUE)
+  if (!user.par) 
+    par(mar = c(4,4,2,1), tcl = -0.25, mgp = c(2, 0.6, 0))
+  par(mfrow = c(k, 1))
+  if (dim(x$qi[[1]])[2] == 1) {
+    pr <- x$qi$pr
+    y0 <- 100 * sum(pr == 0)/length(pr)
+    y1 <- 100 * sum(pr == 1)/length(pr)
+    barplot(c(y0, y1), horiz = TRUE, col = alt.col, las = 1,
+            names.arg = c("Y = 0", "Y = 1"),
+            xlab = "Percentage of Simulations",
+            main = x$qi.name$pr, xlim = c(0, 100))
+    x$qi$pr <- x$qi.name$pr <- NULL
+    for (i in 1:(k-1)) {
+      qi <- as.vector(x$qi[[i]])
+      plot(density(qi), main = x$qi.name[[i]], xlab = xlab, ...)
+    }    
+  }
+  else {
+    for (i in 1:k) {
+      qi <- x$qi[[i]]
+      main <- as.character(x$qi.name[i])
+      if (is.null(rownames(qi)))
+        rownames(qi) <- 1:dim(qi)[1]
+      idx <- as.integer(sample(rownames(qi), 100))
+      tmp <- qi[idx,,1]
+      xlim <- c(min(qi[,1,1]), max(qi[,2,1]))
+      if (is.null(ylab))
+        ylab <- paste("Observations (n = ", samples, ")", sep = "")
+      plot(xlim, type = "n", xlab = xlab, ylab = ylab,
+           main = main, ylim = c(0, 100), xlim = xlim, ...)
+      for (j in 1:nrow(tmp))
+        lines(c(tmp[j,1], tmp[j,2]), c(j,j), col = alt.col)
+      abline(v = mean(qi[,1,1]))
+      abline(v = mean(qi[,2,1]))
+    }
+  }
+  par(op)
+}
diff --git a/R/poisson.R b/R/poisson.R
new file mode 100644
index 0000000..7ae2f40
--- /dev/null
+++ b/R/poisson.R
@@ -0,0 +1,116 @@
+#' Interface between poisson model and Zelig
+#' This function is exclusively for use by the `zelig' function
+#' @param formula a formula
+#' @param weights a numeric vector
+#' @param ... ignored parameters
+#' @param data a data.frame
+#' @return a list to be coerced into a zelig.call object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+zelig2poisson <- function(formula, weights=NULL, ..., data) {
+  z(
+    glm,
+    # .hook = "robust.glm.hook",
+    formula = formula,
+    weights = weights,
+    family  = poisson(),
+    model   = F,
+    data    = data
+    )
+}
+#' Param Method for the 'poisson' Zelig Model
+#' @note This method is used by the 'poisson' Zelig model
+#' @usage \method{param}{poisson}(obj, num=1000, ...)
+#' @S3method param negbinom
+#' @param obj a 'zelig' object
+#' @param num an integer specifying the number of simulations to sample
+#' @param ... ignored
+#' @return a list to be cast as a 'parameters' object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+param.poisson <- function (obj, num=1000, ...) {
+  list(
+       simulations = mvrnorm(num, mu=coef(.fitted), Sigma=vcov(.fitted)),
+       fam = poisson()
+       )
+}
+#' Compute quantities of interest for 'poisson' Zelig models
+#' @usage \method{qi}{poisson}(obj, x, x1=NULL, y=NULL, num=1000, param=NULL)
+#' @S3method qi poisson
+#' @param obj a 'zelig' object
+#' @param x a 'setx' object or NULL
+#' @param x1 an optional 'setx' object
+#' @param y this parameter is reserved for simulating average treatment effects,
+#'   though this feature is currentlysupported by only a handful of models
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a parameters object
+#' @return a list of key-value pairs specifying pairing titles of quantities of
+#'   interest with their simulations
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+qi.poisson <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+  # 
+  coef <- coef(param)
+
+  # get inverse function
+  inverse <- linkinv(param)
+
+  #
+  eta <- coef %*% t(x)
+  theta <- matrix(inverse(eta), nrow=nrow(coef))
+
+  # ...
+  ev <- theta
+  pr <- matrix(NA, nrow=nrow(theta), ncol=ncol(theta))
+
+  # default values
+  ev1 <- pr1 <- fd <- NA
+
+  for (i in 1:ncol(ev))
+    pr[,i] <- rpois(nrow(ev), lambda = ev[,i])
+
+
+  if (!is.null(x1)) {
+
+    # quantities of interest
+    results <- qi(obj, x1, num=num, param=param)
+
+    # pass values over
+    ev1 <- results[["Expected Values: E(Y|X)"]]
+    pr1 <- results[["Predicted Values: Y|X"]]
+
+    # compute first differences
+    fd <- ev1 - ev
+  }
+
+  # Return quantities of interest
+  list("Expected Values: E(Y|X)"  = ev,
+       "Expected Values: E(Y|X1)" = ev1,
+       "Predicted Values: Y|X"    = pr,
+       "Predicted Values: Y|X1"   = pr1,
+       "First Differences: E(Y|X1) - E(Y|X)" = fd
+       )
+}
+#' Describe the `poisson' model to Zelig
+#' @usage \method{describe}{poisson}(...)
+#' @S3method describe poisson
+#' @param ... ignored parameters
+#' @return a list to be processed by `as.description'
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+describe.poisson <- function(...) {
+  # parameters object
+  parameters <- list(lambda = list(
+                       equations = c(1, 1),
+                       tags.allowed = FALSE,
+                       dep.vars = TRUE,
+                       exp.vars = TRUE
+                       )
+                     )
+
+  # return list
+  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
+       year     = 2007,
+       category = "count",
+       parameters = parameters,
+       text = "Poisson Regression for Event Count Dependent Variables"
+       )
+}
diff --git a/R/poisson.bayes.R b/R/poisson.bayes.R
new file mode 100644
index 0000000..afc5589
--- /dev/null
+++ b/R/poisson.bayes.R
@@ -0,0 +1,90 @@
+#' Interface between the Zelig Model poisson.bayes and the Pre-existing Model-fitting Method
+#' @param formula a formula
+#' @param ... additonal parameters
+#' @param data a data.frame 
+#' @return a list specifying '.function'
+#' @export
+zelig2poisson.bayes <- function (
+                               formula, 
+                               burnin = 1000, mcmc = 10000, 
+                               verbose = 0, 
+                               ..., 
+                               data
+                               ) {
+
+  loadDependencies("MCMCpack", "coda")
+
+  if (missing(verbose))
+    verbose <- round((mcmc + burnin)/10)
+
+  list(
+       .function = "MCMCpoisson",
+       .hook = "MCMChook",
+
+       formula = formula,
+       data   = data,
+       burnin = burnin,
+       mcmc   = mcmc,
+       verbose= verbose,
+
+       # Most parameters can be simply passed forward
+       ...
+       )
+}
+
+#' @S3method param poisson.bayes
+param.poisson.bayes <- function(obj, num=1000, ...) {
+  list(
+       coef = coef(obj),
+       fam = poisson()
+       )
+}
+
+#' @S3method qi normal.bayes
+qi.poisson.bayes <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
+{
+
+  res1 <- poisson.ev(x, param)
+  res2 <- poisson.ev(x1, param)
+
+  list(
+       "Expected Value: E(Y|X)" = res1$ev,
+       "Predicted Value: Y|X" = res1$pv,
+       "Expected Value (for X1): E(Y|X1)" = res2$ev,
+       "Predicted Value (for X1): Y|X1" = res2$pv,
+       "First Differences: E(Y|X1) - E(Y|X)" = res2$ev - res1$ev
+       )
+}
+
+poisson.ev <- function (x, param) {
+  # If either of the parameters are invalid,
+  # Then return NA for both qi's
+  if (is.null(x) || is.na(x) || is.null(param))
+    return(list(ev=NA, pv=NA))
+
+  # Extract inverse-link and simulated parameters (respectively)
+  inv <- linkinv(param)
+  eta <- coef(param) %*% t(x)
+
+  # Give matrix identical rows/columns to the simulated parameters
+  ev <- pv <- matrix(NA, nrow(eta), ncol(eta))
+  dimnames(ev) <- dimnames(pv) <- dimnames(eta)
+
+  # Compute Expected Values
+  ev <- inv(eta)
+
+  # Compute Predicted Values
+  for (i in 1:ncol(ev))
+    pv[, i] <- rpois(length(ev[, i]), ev[, i])
+
+  list(ev=ev, pv=pv)
+}
+
+#' @S3method describe poisson.bayes
+describe.poisson.bayes <- function(...) {
+  list(
+       description  = "Bayesian Poisson Regression",
+       authors = c("Ben Goodrich", "Ying Lu"),
+       year = 2013
+       )
+}
diff --git a/R/poisson.gee.R b/R/poisson.gee.R
new file mode 100644
index 0000000..5545da9
--- /dev/null
+++ b/R/poisson.gee.R
@@ -0,0 +1,71 @@
+#' Interface between the Zelig Model poisson.gee and 
+#' the Pre-existing Model-fitting Method
+#' @param formula a formula
+#' @param id a character-string specifying the column of the data-set to use
+#'   for clustering
+#' @param robust a logical specifying whether to robustly or naively compute
+#'   the covariance matrix. This parameter is ignore in the \code{zelig2}
+#'   method, and instead used in the \code{robust.hook} function, which
+#'   executes after the call to the \code{gee} function
+#' @param ... ignored parameters
+#' @param R a square-matrix specifying the correlation
+#' @param corstr a character-string specifying the correlation structure
+#' @param data a data.frame 
+#' @return a list specifying the call to the external model
+#' @export
+zelig2poisson.gee <- function (formula, id, robust, ..., R = NULL, corstr = "independence", data) {
+
+  loadDependencies("gee")
+
+  if (corstr == "fixed" && is.null(R))
+    stop("R must be defined")
+
+  # if id is a valid column-name in data, then we just need to extract the
+  # column and re-order the data.frame and cluster information
+  if (is.character(id) && length(id) == 1 && id %in% colnames(data)) {
+    id <- data[, id]
+    data <- data[order(id), ]
+    id <- sort(id)
+  }
+
+  z(
+    .function = gee,
+    .hook = robust.gee.hook,
+
+    formula = formula,
+    id = id,
+    corstr = corstr,
+    family  = poisson(),
+    data = data,
+    R = R,
+    ...
+    )
+}
+
+#' @S3method param poisson.gee
+param.poisson.gee <- function(obj, num=1000, ...) {
+
+  # Extract means to compute maximum likelihood
+  mu <- coef(obj)
+
+  # Extract covariance matrix to compute maximum likelihood
+  Sigma <- vcov(obj)
+
+  #
+  list(
+       coef = mvrnorm(num, mu, Sigma),
+       fam = Gamma()
+       )
+}
+
+#' @S3method qi poisson.gee
+qi.poisson.gee <- qi.gamma.gee
+
+#' @S3method describe poisson.gee
+describe.poisson.gee <- function(...) {
+  list(
+       authors = "Patrick Lam",
+       text = "General Estimating Equation for Poisson Regression",
+       year = 2011
+       )
+}
diff --git a/R/poisson.survey.R b/R/poisson.survey.R
new file mode 100644
index 0000000..8f995e4
--- /dev/null
+++ b/R/poisson.survey.R
@@ -0,0 +1,155 @@
+#' @export
+zelig2poisson.survey <- function(
+                               formula,
+                               weights=NULL, 
+                               ids=NULL,
+                               probs=NULL,
+                               strata = NULL,  
+                               fpc=NULL,
+                               nest = FALSE,
+                               check.strata = !nest,
+                               repweights = NULL,
+                               type,
+                               combined.weights=FALSE,
+                               rho = NULL,
+                               bootstrap.average=NULL, 
+                               scale=NULL,
+                               rscales=NULL,
+                               fpctype="fraction",
+                               return.replicates=FALSE,
+                               na.action="na.omit",
+                               start=NULL,
+                               etastart=NULL, 
+                               mustart=NULL,
+                               offset=NULL, 	      		
+                               model1=TRUE,
+                               method="glm.fit",
+                               x=FALSE,
+                               y=TRUE,
+                               contrasts=NULL,
+                               design=NULL,
+                               data
+                               ) {
+  loadDependencies("survey")
+
+  if (is.null(ids))
+    ids <- ~1
+
+  # the following lines designate the design
+  # NOTE: nothing truly special goes on here;
+  #       the below just makes sure the design is created correctly
+  #       for whether or not the replication weights are set
+  design <- if (is.null(repweights))
+    svydesign(
+              data=data,
+              ids=ids,
+              probs=probs,
+              strata=strata,
+              fpc=fpc,
+              nest=nest,
+              check.strata=check.strata,
+              weights=weights
+              )
+
+  else {
+    .survey.prob.weights <- weights
+    svrepdesign(
+                data=data,
+                repweights=repweights, 	
+                type=type,
+                weights=weights,
+                combined.weights=combined.weights, 
+                rho=rho,
+                bootstrap.average=bootstrap.average,
+                scale=scale,
+                rscales=rscales,
+                fpctype=fpctype,
+                fpc=fpc
+                )
+  }
+
+  
+  z(.function = svyglm,
+    formula = formula,
+    design  = design,
+    family  = poisson()
+    )
+}
+#' @S3method param poisson.survey
+param.poisson.survey <- function(obj, num=1000, ...) {
+  list(
+       simulations = mvrnorm(num, coef(obj), vcov(obj)),
+       alpha = NULL,
+
+       # note: assignment of link and link-inverse are
+       #       implicit when the family is assigned
+       fam   = poisson()
+       )
+}
+#' @S3method qi poisson.survey
+qi.poisson.survey <- function(obj, x, x1=NULL, y=NULL, num=1000, param=NULL) {
+  model <- GetObject(obj)
+
+  coef <- coef(param)
+  alpha <- alpha(param)
+
+  eta <- coef %*% t(x)
+
+  link.inverse <- linkinv(param)
+
+  theta <- matrix(link.inverse(eta), nrow=nrow(coef))
+
+  pr <- ev <- matrix(NA, nrow=nrow(theta), ncol(theta))
+
+  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
+
+
+  ev <- theta
+
+
+  for (k in 1:nrow(ev))
+    pr[k, ] <- rpois(length(ev[k, ]), lambda=ev[k, ])
+
+
+
+  ev1 <- pr1 <- fd <- NA
+
+  if (!is.null(x1)) {
+    ev1 <- theta1 <- matrix(link.inverse(coef %*% t(x1)),
+                            nrow = nrow(coef)
+                            )
+
+    fd <- ev1-ev
+  }
+
+  att.ev <- att.pr <- NA
+
+  if (!is.null(y)) {
+    yvar <- matrix(rep(y, nrow(coef)), nrow=nrow(coef), byrow=TRUE)
+
+    tmp.ev <- yvar - ev
+    tmp.pr <- yvar - pr
+
+    att.ev <- matrix(apply(tmp.ev, 1, mean), nrow=nrow(coef))
+    att.pr <- matrix(apply(tmp.pr, 1, mean), nrow=nrow(coef))
+  }
+
+
+  list(
+       "Expected Values: E(Y|X)" = ev,
+       "Expected Values for (X1): E(Y|X1)" = ev1,
+       "Predicted Values: Y|X" = pr,
+       "Predicted Values (for X1): Y|X1" = pr1,
+       "First Differences E(Y|X1)-E(Y|X)" = fd,
+       "Average Treatment Effect: Y-EV" = att.ev,
+       "Average Treatment Effect: Y-PR" = att.pr
+       )
+}
+#' @S3method describe poisson.survey
+describe.poisson.survey <- function(...) {
+  list(
+       authors = "Nicholas Carnes",
+       year = 2008,
+       description = "Survey-Weighted Poisson Regression for Continuous, Positive Dependent Variables"
+       )
+}
diff --git a/R/print.BetaReg.R b/R/print.BetaReg.R
deleted file mode 100644
index bb176af..0000000
--- a/R/print.BetaReg.R
+++ /dev/null
@@ -1,18 +0,0 @@
-print.BetaReg <- function(x, digits = getOption("digits"), ...) {
-  cat("\nCall: ", deparse(x$call), "\n", fill = TRUE)
-  cat("Coefficients:\n")
-  if(is.matrix(x$coef)) {
-    print(x$coef[1:nrow(x$coef) - 1,], digits = digits, ...)
-    cat("\n")
-    phi <- c(x$coef[nrow(x$coef),])
-    names(phi) <- colnames(x$coef)
-    cat("Dispersion parameter (phi): \n")
-    print(phi, digits = digits, ...)
-  }
-  else {
-    print(x$coef[1:length(x$coef) - 1], digits = digits, ...)
-    cat("\n")
-    cat("Dispersion parameter (phi) = ", x$coef[length(x$coef)], "\n")
-  }
-  invisible(x)
-}
diff --git a/R/print.R b/R/print.R
new file mode 100644
index 0000000..18e2609
--- /dev/null
+++ b/R/print.R
@@ -0,0 +1,411 @@
+#' Print a Zelig Object
+#' @S3method print zelig
+print.zelig <- function (x, ...) {
+  name <- x$name
+  package.name <- x$package.name
+  call <- x$call
+
+  cat("Model Name: ", name, "\n")
+  cat("Package Name: ", package.name, "\n")
+  cat("Call:\n")
+  print(call)
+
+  # 
+  message("\nFor information about the fitted model, use the summary() function.")
+
+  # Return invisibly
+  invisible(x)
+}
+#' Print a Bundle of Data-sets
+#'
+#' @S3method print setx.mi
+#' @usage \method{print}{setx.mi}(x, ...)
+#' @param x a \code{setx} object to print
+#' @param ... ignored parameters
+#' @return the \code{setx} object (invisibly)
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+print.setx.mi <- function(x, ...) {
+  # Store size for readability
+  size <- length(x)
+
+  for (k in 1:size) {
+    # Print object
+    print(x[[k]])
+
+    # If this is not the last element, print a new-line
+    if (k < size)
+      cat("\n")
+  }
+
+  invisible(x)
+}
+#' Print values of `setx' objects
+#'
+#' Print a ``setx'' object in human-readable form.
+#' @usage \method{print}{setx}(x, ...)
+#' @S3method print setx
+#' @param x a `setx' object
+#' @param ... ignored parameters
+#' @return the value of x (invisibly)
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+print.setx <- function(x, ...) {
+  model <- x$name
+  formula <- x$formula
+  label <- x$label
+
+  cat("Call:\n")
+  print(x$call)
+
+  cat("Model name = ", model, "\n")
+  cat("Formula    = ")
+  print(formula)
+
+  cat("\nComplete data.frame:\n")
+  print(x$updated)
+
+  cat("\nModel Matrix (Design Matrix):\n")
+  print(x$matrix)
+
+  invisible()
+}
+#' @S3method print summary.setx
+print.summary.setx <- function (x, ...) {
+  cat("\nModel name =", x$model.name, "\n")
+  cat("Label      =", x$label, "\n")
+  cat("Formula    = ")
+  print(x$formula)
+
+  cat("\nCall:\n")
+  print(x$call)
+
+  cat("\nModel Matrix (Design Matrix):\n")
+  print(x$model.matrix)
+
+  invisible(x)
+}
+#' Print values of `sim' objects
+#' 
+#' This function is currently unimplemented, and included for future development
+#' @usage \method{print}{sim}(x, ...)
+#' @S3method print sim
+#' @param x a `sim' object (ignored)
+#' @param ... ignored parameters
+#' @return NULL (invisibly)
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+print.sim <- function(x, ...) {
+  o <- x
+  class(o) <- 'list'
+  print(o)
+}
+#' Print a Summary MCMCZelig Object
+#'
+#' This method prints a summary object for \code{MCMCZelig} objects
+#' @param x an "MCMCZelig" object
+#' @param digits a numeric specifying the precision of the summary object
+#' @param ... ignored parameters
+#' @return a \code{summary.MCMCZelig} object
+#' @S3method print summary.MCMCZelig
+print.summary.MCMCZelig <- function(x, digits=max(3, getOption("digits") - 
+3), ...) {
+  cat("\nCall: ") 
+  print(x$call) 
+  cat("\n", "Iterations = ", x$start, ":", x$end, "\n", sep = "")
+  cat("Thinning interval =", x$thin, "\n")
+  cat("Number of chains =", x$nchain, "\n")
+  cat("Sample size per chain =", (x$end -
+  x$start)/x$thin + 1, "\n")
+  cat("\n", "Mean, standard deviation, and quantiles for marginal posterior distributions.", "\n")
+  print(round(x$summary, digits=digits))
+  cat("\n")
+}
+print.summary.glm.robust <-
+    function (x, digits = max(3, getOption("digits") - 3),
+	      symbolic.cor = x$symbolic.cor,
+	      signif.stars = getOption("show.signif.stars"), ...)
+{
+  class(x) <- "summary.glm"
+  print(x)
+  cat("\nRobust standard errors computed using", x$robust)
+  cat("\n")
+  invisible(x)
+}
+#' Print a Summary of a Set of Pooled Simulated Interests
+#'
+#' Prints the summary information from a set of pooled simulated interests. This
+#' method assumes that quantities of interest are kept in a data type which can
+#' be used with ``rbind''.
+#' @usage \method{print}{summary.pooled.sim}(x, ...)
+#' @S3method print summary.pooled.sim
+#' @param x a ``summary.pooled.sim'' object, containing summarized information
+#' about simulated quantities of interest
+#' @param ... Optional parameters that will be passed onward to ``print.matrix''
+#' (the matrix printing function)
+#' @return a ``summary.pooled.sim'' object storing the quantities of interest
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+print.summary.pooled.sim <- function (x, ...) {
+  # los labels... kinda like spanish for "the labels"
+  # labels is function name in base, so we needed a name that said "labels,"
+  # without using "labels". You know?
+  los.labels <- x$labels
+  los.titles <- x$titles
+
+  # Pooled summarized data
+  for (title in los.titles) {
+
+    # This will implicity become a matrix
+    m <- NULL
+
+    for (label in los.labels)
+      m <- rbind(m, x$stats[[label]][[title]])
+
+    rownames(m) <- paste("[", los.labels, "]", sep="")
+
+    cat(title, "\n")
+    print(m)
+    cat("\n\n")
+  }
+}
+#' Print Summary of a Rare-event Logistic Model
+#'
+#' Prints the 
+#' @usage
+#' \method{print}{summary.relogit}(x, digits = max(3, getOption("digits") - 3), ...)
+#' @S3method print summary.relogit
+#' @param x an ``relogit.summary'' object produced by the ``summary'' method.
+#' @param digits an integer specifying the number of digits of precision to
+#' specify
+#' @param ... parameters passed forward to the ``print.glm'' function
+#' @return x (invisibly)
+print.summary.relogit <- function(
+                                  x,
+                                  digits = max(3, getOption("digits") - 3),
+                                  ...
+                                  ) {
+  # Straight-forwardly print the model using glm's method
+  print.glm(x, digits = digits, ...)
+
+  #  Additional slots
+
+  # Prior co
+  if (x$prior.correct) 
+    cat("\nPrior correction performed with tau =", x$tau, "\n")
+
+  # Weighting? Sure, if it exists, we'll print it.
+  if (x$weighting) 
+    cat("\nWeighting performed with tau =", x$tau, "\n")
+
+  # If there is bias-correction
+  if (x$bias.correct)
+    cat("Rare events bias correction performed\n")
+
+  # If robust errors are computed...
+  if (!is.null(x$robust))
+    cat("\nRobust standard errors computed using", x$robust, "\n")
+
+  # This is not a mutator assignment!
+  class(x) <- "summary.glm"
+
+  # Return object to be printed invisibly
+  invisible(x)  
+}
+#' Print Summary of a Rare-event Logistic Model
+#'
+#' ...
+#' @usage
+#' \method{print}{summary.relogit2}(x, digits = max(3, getOption("digits") - 3), ...)
+#' @S3method print summary.relogit2
+#' @param x the object to print
+#' @param digits an integer specifying the number of digits of precision
+#' @param ... ignored parameters
+#' @return x (invisibly)
+print.summary.relogit2 <- function(x,
+                                   digits = max(3, getOption("digits") - 3),
+                                  ...
+                                  ) {
+  cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
+  print(x$lower.estimate)
+  print(x$upper.estimate)
+}
+#' Print Values of a Summarized ``sim'' Object
+#'
+#' Print values of simulated quantities of interest (stored in a ``summary.sim''
+#' object.
+#' @usage \method{print}{summary.sim}(x, ...)
+#' @S3method print summary.sim
+#' @param x a 'summary.sim' object
+#' @param ... ignored parameters
+#' @return the value of the `summary.sim' object (invisibly)
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+print.summary.sim <- function(x, ...) {
+  # Rename 'x' 'summary'
+  summary <- x
+
+  obj <- summary$zeligcall
+  model <- summary$model
+  x <- summary$x
+  x1 <- summary$x1
+  stats <- summary$stats
+  num <- summary$num
+
+  # Error if there are no statistics to display
+  if (is.null(stats))
+    stop("stats object cannot be NULL")
+
+  # new-line
+  cat("\n")
+
+  # Print model name
+  cat("Model: ", model, "\n")
+
+  # Print number of simulations
+  cat("Number of simulations: ", num, "\n")
+
+  # new-line
+  cat("\n")
+
+  # Display information about the X setx object
+  # This should probably be reconsidered in the future
+  if (!is.null(x$matrix)) {
+    cat("Values of X\n")
+    print(as.matrix(x$matrix))
+
+    # new-line
+    cat("\n")
+  }
+  else if (is.list(x$s.x)) {
+    # add special hooks here?
+  }
+
+  # Display information about the X1 setx object
+  # This should probably be reconsidered in the future
+  if (!is.null(x1$matrix)) {
+    cat("Values of X1\n")
+    print(as.matrix(x1$matrix))
+
+    # new-line
+    cat("\n")
+  }
+
+  # Decrementing the size of the list will give us an easy way to print
+  size <- length(stats)
+
+  # Loop across all qi's
+  for (key in names(stats)) {
+    # Create variable for code clarity
+    val <- stats[[key]]
+
+    if (!is.qi(val))
+      next
+
+    # Display Title
+    cat(key, "\n")
+
+    # Round value if numeric
+    if (is.numeric(val))
+      print(round(val*(1000))/1000)
+
+    # Simply print if anything else
+    else
+      print(val)
+
+    # Print a new-line between qi's
+    if (size <- size - 1) {
+      cat("\n")
+    }
+  }
+
+  # Return invisibly
+  invisible(x)
+}
+#' Print Multiply Imputed Simulations Summary
+#'
+#' Prints summary information about Multiply Imputed Fits
+#' @usage \method{print}{summarySim.MI}(x, digits=3, ...)
+#' @S3method print summarySim.MI
+#' @param x a 'summarySim.MI' object
+#' @param digits an integer specifying the number of digits of precision to
+#'   print
+#' @param ... ignored parameters
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+print.summarySim.MI <- function(x, digits=3, ...) {
+  for (qi.name in names(x)) {
+    if (!is.valid.qi.list(x[[qi.name]]))
+      next
+
+    summed.qi <- qi.summarize(qi.name, x[[qi.name]])
+    print(summed.qi)
+    cat("\n")
+  }
+
+  invisible(x)
+}
+
+#' Row-bind Matrices and Lists
+#' @param x a list or a matrix
+#' @param y a list or a matrix
+#' @return a matrix
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+.bind <- function (x, y) {
+
+  # Get names for future columns
+
+  if (!is.matrix(x))
+    x <- matrix(x, nrow=1, ncol=length(x), dimnames=list(NULL, names(x)))
+
+  if (missing(y))
+    return(x)
+
+  if (!is.matrix(y))
+    y <- matrix(y, nrow=1, ncol=length(y), dimnames-list(NULL, names(y)))
+
+  names <- unique(c(colnames(x), colnames(y)))
+
+  ncol <- length(names)
+
+  X <- matrix(NA, nrow=nrow(x), ncol=ncol, dimnames=list(NULL, names))
+  Y <- matrix(NA, nrow=nrow(y), ncol=ncol, dimnames=list(NULL, names))
+
+  X[, colnames(x)] <- x
+  Y[, colnames(y)] <- y
+
+  rbind(X, Y)
+}
+
+#' Check If Object Is a List of Valid Quantities of Interest
+#' @param x an object to be tested
+#' @return TRUE or FALSE
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+is.valid.qi.list <- function (x) {
+
+  # if it is not a list or that list has no entries
+  if (!(is.list(x) && length(x)))
+    return(FALSE)
+
+  # if any are not a matrix
+
+  for (val in x) {
+
+    if (is.matrix(val) && !(ncol(val) && ncol(val)))
+      return(FALSE)
+
+    else if (is.list(val) && !length(val))
+      return(FALSE)
+  }
+
+  TRUE
+}
+#' Print values of ``zelig'' objects
+#'
+#' Print the zelig object as a list
+#' @usage \method{print}{zelig}(x, ...)
+#' @S3method print zelig
+#' @param x a `zelig' object
+#' @param ... ignored parameters
+#' @return the `zelig' object (invisibly)
+#' @export 
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+print.zelig <- function(x, ...) {
+  class(x) <- "list"
+  print(x)
+}
diff --git a/R/print.arimaSummary.R b/R/print.arimaSummary.R
deleted file mode 100644
index e805f72..0000000
--- a/R/print.arimaSummary.R
+++ /dev/null
@@ -1,11 +0,0 @@
-print.arimaSummary <- function(x, digits = min(getOption("digits"), 3), ...) {
-  cat("\nModel:", x$zelig.call$model, "\n", sep = " ")
-  cat("Number of simulations: ", x$number.sim, "\n\n")
-  dimnames(x$test.array)[[3]] <- dimnames(x$test.array)[[3]]
-  cat("Available Quantities of Interest: \n")
-  for (i in 1:length(dimnames(x$test.array)[[3]])) {
-    cat("\n", dimnames(x$test.array)[[3]][i], "\n")
-    print(x$test.array[,,i], digits = digits, ...)
-  }
-  return(invisible())
-}
diff --git a/R/print.coxhazard.R b/R/print.coxhazard.R
deleted file mode 100644
index 6005943..0000000
--- a/R/print.coxhazard.R
+++ /dev/null
@@ -1,8 +0,0 @@
-print.coxhazard <- function(x,...){
-  haz <- matrix(x, ncol=1)
-  colnames(haz) <- "hazard"
-  rownames(haz) <- rownames(x)
-  print(haz)
-}
-
-
diff --git a/R/print.eiRxC.R b/R/print.eiRxC.R
deleted file mode 100644
index 471b94d..0000000
--- a/R/print.eiRxC.R
+++ /dev/null
@@ -1,6 +0,0 @@
-print.eiRxC <- function(x, digits = max(getOption("digits"), 4), ...) {
-  cat("\nCall: \n")
-  print.formula(x$call)
-  cat("\nCoefficients: \n")
-  print(x$coefficients, digits = digits, ...)
-}
diff --git a/R/print.names.relogit.R b/R/print.names.relogit.R
deleted file mode 100644
index 5eb8d50..0000000
--- a/R/print.names.relogit.R
+++ /dev/null
@@ -1,10 +0,0 @@
-print.names.relogit<-function(x, ...){
-  if (length(x$tau) == 2) {
-    print(x$default, ...)
-    cat(paste("Additional objects available in lower.estimate and upper.estimate: \n"))
-    print(x$estimate)
-  }
-  else
-    print(x$default, ...)
-}
-
diff --git a/R/print.names.zelig.R b/R/print.names.zelig.R
deleted file mode 100644
index d04555f..0000000
--- a/R/print.names.zelig.R
+++ /dev/null
@@ -1,3 +0,0 @@
-print.names.zelig <- function(x, ...){
-  print(x$default, ...)
-}
diff --git a/R/print.relogit.R b/R/print.relogit.R
deleted file mode 100644
index bef9cc7..0000000
--- a/R/print.relogit.R
+++ /dev/null
@@ -1,11 +0,0 @@
-print.relogit <- function(x, digits = max(3, getOption("digits") - 3),
-                          ...) {
-  print.glm(x)
-  if (x$prior.correct) 
-    cat("Prior correction performed with tau =", x$tau, "\n")
-  if (x$weighting) 
-    cat("Weighting performed with tau =", x$tau, "\n")
-  if (x$bias.correct)
-    cat("Rare events bias correction performed\n") 
-  invisible(x)
-}
diff --git a/R/print.relogit2.R b/R/print.relogit2.R
deleted file mode 100644
index e551d3c..0000000
--- a/R/print.relogit2.R
+++ /dev/null
@@ -1,7 +0,0 @@
-print.relogit2 <- function(x, digits = max(3, getOption("digits") - 3),
-                          ...) {
-  cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
-  print.relogit(x$lower.estimate)
-  print.relogit(x$upper.estimate)
-}
-             
diff --git a/R/print.summary.MCMCZelig.R b/R/print.summary.MCMCZelig.R
deleted file mode 100644
index ebd035c..0000000
--- a/R/print.summary.MCMCZelig.R
+++ /dev/null
@@ -1,13 +0,0 @@
-print.summary.MCMCZelig <- function(x, digits=max(3, getOption("digits") - 
-3), ...) {
-  cat("\nCall: ") 
-  print(x$call) 
-  cat("\n", "Iterations = ", x$start, ":", x$end, "\n", sep = "")
-  cat("Thinning interval =", x$thin, "\n")
-  cat("Number of chains =", x$nchain, "\n")
-  cat("Sample size per chain =", (x$end -
-  x$start)/x$thin + 1, "\n")
-  cat("\n", "Mean, standard deviation, and quantiles for marginal posterior distributions.", "\n")
-  print(round(x$summary, digits=digits))
-  cat("\n")
-}
diff --git a/R/print.summary.MI.R b/R/print.summary.MI.R
deleted file mode 100644
index 7c202d7..0000000
--- a/R/print.summary.MI.R
+++ /dev/null
@@ -1,28 +0,0 @@
-print.summary.MI <- function(x, subset = NULL, ...){
-  m <- length(x$all)
-  if (m == 0)
-    m <- 1
-  if (any(subset > max(m)))
-    stop("the subset selected lies outside the range of available \n        observations in the MI regression output.")
-  cat("\n  Model:", x$zelig)
-  cat("\n  Number of multiply imputed data sets:", m, "\n")
-  if (is.null(subset)) {
-    cat("\nCombined results:\n\n")
-    cat("Call:\n")
-    print(x$call)
-    cat("\nCoefficients:\n")
-    print(x$coefficients)
-    cat("\nFor combined results from datasets i to j, use summary(x, subset = i:j).\nFor separate results, use print(summary(x), subset = i:j).\n\n")
-  }
-  else {
-    if (is.function(subset))
-      M <- 1:m
-    if (is.numeric(subset))
-      M <- subset
-    for(i in M){
-      cat(paste("\nResult with dataset", i, "\n"))
-      print(x$all[[i]], ...)
-    }
-  }
-}
-
diff --git a/R/print.summary.glm.robust.R b/R/print.summary.glm.robust.R
deleted file mode 100644
index e9d0fe7..0000000
--- a/R/print.summary.glm.robust.R
+++ /dev/null
@@ -1,12 +0,0 @@
-print.summary.glm.robust <-
-    function (x, digits = max(3, getOption("digits") - 3),
-	      symbolic.cor = x$symbolic.cor,
-	      signif.stars = getOption("show.signif.stars"), ...)
-{
-  class(x) <- "summary.glm"
-  print(x)
-  cat("\nRobust standard errors computed using", x$robust)
-  cat("\n")
-  invisible(x)
-}
-
diff --git a/R/print.summary.lm.robust.R b/R/print.summary.lm.robust.R
deleted file mode 100644
index 847a2ed..0000000
--- a/R/print.summary.lm.robust.R
+++ /dev/null
@@ -1,72 +0,0 @@
-print.summary.lm.robust <-
-    function (x, digits = max(3, getOption("digits") - 3),
-              symbolic.cor = x$symbolic.cor,
-	      signif.stars= getOption("show.signif.stars"),	...)
-{
-    cat("\nCall:\n")#S: ' ' instead of '\n'
-    cat(paste(deparse(x$call), sep="\n", collapse = "\n"), "\n\n", sep="")
-    resid <- x$residuals
-    df <- x$df
-    rdf <- df[2]
-    cat(if(!is.null(x$w) && diff(range(x$w))) "Weighted ",
-        "Residuals:\n", sep="")
-    if (rdf > 5) {
-	nam <- c("Min", "1Q", "Median", "3Q", "Max")
-	rq <- if (length(dim(resid)) == 2)
-	    structure(apply(t(resid), 1, quantile),
-		      dimnames = list(nam, dimnames(resid)[[2]]))
-	else  structure(quantile(resid), names = nam)
-	print(rq, digits = digits, ...)
-    }
-    else if (rdf > 0) {
-	print(resid, digits = digits, ...)
-    } else { # rdf == 0 : perfect fit!
-	cat("ALL", df[1], "residuals are 0: no residual degrees of freedom!\n")
-    }
-    if (length(x$aliased) == 0) {
-        cat("\nNo Coefficients\n")
-    } else {
-        if (nsingular <- df[3] - df[1])
-            cat("\nCoefficients: (", nsingular,
-                " not defined because of singularities)\n", sep = "")
-        else cat("\nCoefficients:\n")
-        coefs <- x$coefficients
-        if(!is.null(aliased <- x$aliased) && any(aliased)) {
-            cn <- names(aliased)
-            coefs <- matrix(NA, length(aliased), 4, dimnames=list(cn, colnames(coefs)))
-            coefs[!aliased, ] <- x$coefficients
-        }
-
-        printCoefmat(coefs, digits=digits, signif.stars=signif.stars, na.print="NA", ...)
-    }
-    ##
-    cat("\nResidual standard error:",
-	format(signif(x$sigma, digits)), "on", rdf, "degrees of freedom\n")
-    if (!is.null(x$fstatistic)) {
-	cat("Multiple R-Squared:", formatC(x$r.squared, digits=digits))
-	cat(",\tAdjusted R-squared:",formatC(x$adj.r.squared,digits=digits),
-	    "\nF-statistic:", formatC(x$fstatistic[1], digits=digits),
-	    "on", x$fstatistic[2], "and",
-	    x$fstatistic[3], "DF,  p-value:",
-	    format.pval(pf(x$fstatistic[1], x$fstatistic[2],
-                           x$fstatistic[3], lower.tail = FALSE), digits=digits),
-	    "\n")
-    }
-    correl <- x$correlation
-    if (!is.null(correl)) {
-	p <- NCOL(correl)
-	if (p > 1) {
-	    cat("\nCorrelation of Coefficients:\n")
-	    if(is.logical(symbolic.cor) && symbolic.cor) {# NULL < 1.7.0 objects
-		print(symnum(correl, abbr.colnames = NULL))
-	    } else {
-                correl <- format(round(correl, 2), nsmall = 2, digits = digits)
-                correl[!lower.tri(correl)] <- ""
-                print(correl[-1, -p, drop=FALSE], quote = FALSE)
-            }
-	}
-    }
-    cat("\nRobust standard errors computed using", x$robust)
-    cat("\n")#- not in S
-    invisible(x)
-}
diff --git a/R/print.summary.relogit.R b/R/print.summary.relogit.R
deleted file mode 100644
index 824ad06..0000000
--- a/R/print.summary.relogit.R
+++ /dev/null
@@ -1,14 +0,0 @@
-print.summary.relogit <- function(x, digits = max(3, getOption("digits") - 3),
-                                  ...){
-  class(x) <- "summary.glm"
-  print(x, digits = digits, ...)
-  if (x$prior.correct) 
-    cat("\nPrior correction performed with tau =", x$tau, "\n")
-  if (x$weighting) 
-    cat("\nWeighting performed with tau =", x$tau, "\n")
-  if (x$bias.correct)
-    cat("Rare events bias correction performed\n")
-  if (!is.null(x$robust))
-    cat("Robust standard errors computed using", x$robust, "\n")
-  invisible(x)  
-}
diff --git a/R/print.summary.relogit2.R b/R/print.summary.relogit2.R
deleted file mode 100644
index 8772513..0000000
--- a/R/print.summary.relogit2.R
+++ /dev/null
@@ -1,6 +0,0 @@
-print.summary.relogit2 <- function(x, digits = max(3, getOption("digits") - 3),
-                                  ...){
-  cat("\nCall:\n", deparse(x$call), "\n\n", sep = "")
-  print(x$lower.estimate)
-  print(x$upper.estimate)
-}
diff --git a/R/print.summary.strata.R b/R/print.summary.strata.R
deleted file mode 100644
index fbe5d84..0000000
--- a/R/print.summary.strata.R
+++ /dev/null
@@ -1,16 +0,0 @@
-print.summary.strata <- function(x, subset = NULL, ...){
-  if (is.null(subset))
-    m <- length(x$M)
-  else if (any(subset > max(m)))
-    stop("the subset selected lies outside the range of available \n        sets of regression output.")
-  else
-    m <- subset
-  cat("\n  Model:", x$call$model)
-  cat("\n  Number of subsets evaluated:", m, "\n")
-  for (i in 1:m) {
-    cat(paste("\nResults for", x$by[1], "=", x$lev[i], "\n"))
-    print(x[[i]])
-    cat("\n")
-  }
-}
-
diff --git a/R/print.summary.zelig.R b/R/print.summary.zelig.R
deleted file mode 100644
index d01d15e..0000000
--- a/R/print.summary.zelig.R
+++ /dev/null
@@ -1,68 +0,0 @@
-print.summary.zelig <- function(x, digits=getOption("digits"),
-                              print.x=FALSE, ...){
-  cat("\n  Model:", x$model, "\n")
-  if (!is.null(x$num))
-      cat("  Number of simulations:", x$num, "\n")
-  if (!is.null(x$x)) {
-    if (print.x || nrow(x$x) == 1 || is.null(dim(x$x))) {
-      if (any(class(x$x) == "cond"))
-        cat("\nObserved Data \n")
-      else
-        cat("\nValues of X \n")
-      print(x$x, digits=digits, ...)
-      if(!is.null(x$x1)){
-        cat("\nValues of X1 \n")
-        print(x$x1, digits=digits, ...)
-      }
-    }
-    else {
-      if (any(class(x$x) == "cond"))
-        cat("\nMean Values of Observed Data (n = ", nrow(x$x), ") \n", sep = "")
-      else
-        cat("\nMean Values of X (n = ", nrow(x$x), ") \n", sep = "")
-      print(apply(x$x, 2, mean), digits=digits, ...)
-      if (!is.null(x$x1)) {
-        cat("\nMean Values of X1 (n = ", nrow(x$x1), ") \n", sep = "")
-        print(apply(x$x1, 2, mean), digits=digits, ...) 
-      }
-    }
-  }
-  for (i in 1:length(x$qi.name)){
-    indx <- pmatch(names(x$qi.name[i]), names(x$qi.stats))
-    tmp <- x$qi.stats[[indx]]
-#    if (names(x$qi.name)[indx] == "pr" && colnames(tmp)[1] != "mean")
-#      lab <- paste(x$qi.name[[i]], "(percentage of simulations)", sep = " ")
-#    else
-      lab <- x$qi.name[[i]]
-    cat("\n", lab, "\n", sep = "")
-    if (length(dim(tmp)) == 3) {
-        for (j in 1:dim(tmp)[3]){
-          cat("\n  Observation", dimnames(tmp)[[3]][j], "\n")
-          if (is.null(rownames(tmp[,,j])))
-            rownames(tmp[,,j]) <- 1:nrow(tmp[,,j])
-          if (!is.null(names(tmp[,,j])))
-            names(tmp[,,j]) <- NULL
-          print(tmp[,,j], digits=digits, ...)
-        }
-      }
-    else {
-      if (is.matrix(tmp) & is.null(rownames(tmp)))
-        rownames(tmp) <- 1:nrow(tmp)
-#      if (!is.null(names(tmp)))
-#        names(tmp) <- NULL
-      print(tmp, digits=digits, ...)
-    }
-  }
-}
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/R/print.summary.zelig.strata.R b/R/print.summary.zelig.strata.R
deleted file mode 100644
index a7c9c21..0000000
--- a/R/print.summary.zelig.strata.R
+++ /dev/null
@@ -1,14 +0,0 @@
-print.summary.zelig.strata <- function(x, subset = NULL, ...){
-  if (is.null(subset))
-    m <- length(x)
-  else if (any(subset > max(m)))
-    stop("the subset selected lies outside the range of available \n        sets of regression output.")
-  else
-    m <- subset
-  for (i in 1:m) {
-    cat(paste("\nResults for", names(x)[i], "\n"))
-    print(x[[i]])
-    cat("\n")
-  }
-}
-
diff --git a/R/print.zaovlist.R b/R/print.zaovlist.R
deleted file mode 100644
index fe4ea42..0000000
--- a/R/print.zaovlist.R
+++ /dev/null
@@ -1,12 +0,0 @@
-print.zaovlist <- function (x, ...) 
-{
-    cl <- attr(x, "call")
-    if (!is.null(cl)) {
-      attr(x,"call") <- NULL
-     
-    }
-       
-    
-    stats:::print.aovlist(x,...)
-}
-   
diff --git a/R/print.zelig.R b/R/print.zelig.R
deleted file mode 100644
index 1fd6a5d..0000000
--- a/R/print.zelig.R
+++ /dev/null
@@ -1,12 +0,0 @@
-print.zelig <- function (x, digits = max(3, getOption("digits") - 3), ...) 
-{
-    cat("\nModel:", x$zelig.call$model, "\n", sep = " ")
-    cat("Number of simulations:", x$call$num, "\n\n", sep = " ")
-    idx <- unlist(x$qi.name)
-    cat("Available Quantities of Interest: \n")
-    for (i in 1:length(x$qi.name)){
-      cat(paste("  qi$", names(x$qi)[i], " = ", idx[[i]], "\n", sep = ""))
-    }
-    cat("\nPlease use summary() to obtain more information. \n")
-    invisible(x)
-}
diff --git a/R/probit.R b/R/probit.R
new file mode 100644
index 0000000..953ee05
--- /dev/null
+++ b/R/probit.R
@@ -0,0 +1,73 @@
+#' Interface between probit model and Zelig
+#' This function is exclusively for use by the `zelig' function
+#' @param formula a formula
+#' @param weights a numeric vector
+#' @param ... ignored parameters
+#' @param data a data.frame
+#' @return a list to be coerced into a zelig.call object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+zelig2probit <- function(formula, weights=NULL, ..., data)
+  z(
+    glm,
+    # .hook = "robust.glm.hook",
+    formula = formula,
+    weights = weights,
+    family  = binomial(link="probit"),
+    model   = F,
+    data    = data
+    )
+#' Param Method for the 'probit' Zelig Model
+#' @note This method is used by the 'probit' Zelig model
+#' @usage \method{param}{probit}(obj, num=1000, ...)
+#' @S3method param negbinom
+#' @param obj a 'zelig' object
+#' @param num an integer specifying the number of simulations to sample
+#' @param ... ignored
+#' @return a list to be cast as a 'parameters' object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+param.probit <- function(obj, num=1000, ...) {
+  list(
+       simulations = mvrnorm(n=num, mu=coef(.fitted), Sigma=vcov(.fitted)),
+       alpha = NULL,
+       fam = binomial(link="probit")
+       )
+}
+#' Compute quantities of interest for 'probit' Zelig models
+#' @usage \method{qi}{probit}(obj, x, x1=NULL, y=NULL, num=1000, param=NULL)
+#' @S3method qi probit
+#' @param obj a 'zelig' object
+#' @param x a 'setx' object or NULL
+#' @param x1 an optional 'setx' object
+#' @param y this parameter is reserved for simulating average treatment effects,
+#'   though this feature is currentlysupported by only a handful of models
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a parameters object
+#' @return a list of key-value pairs specifying pairing titles of quantities of
+#'   interest with their simulations
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+qi.probit <- qi.logit
+#' Describe the `probit' model to Zelig
+#' @usage \method{describe}{probit}(...)
+#' @S3method describe poisson
+#' @param ... ignored parameters
+#' @return a list to be processed by `as.description'
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+describe.probit <- function(...){
+  parameters <-list(mu = list(
+                      equations = c(1,1),
+                      tags.allowed = FALSE,
+                      dep.vars = TRUE,
+                      exp.vars = TRUE
+                      )
+                    )
+  
+  # return
+  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
+       year     = 2007,
+       category = "dichotomous",
+       parameters = parameters,
+       text = "Probit Regression for Dichotomous Dependent Variables"
+       )
+}
diff --git a/R/probit.bayes.R b/R/probit.bayes.R
new file mode 100644
index 0000000..db10cbc
--- /dev/null
+++ b/R/probit.bayes.R
@@ -0,0 +1,48 @@
+#' @export
+zelig2probit.bayes <- function (
+                               formula, 
+                               burnin = 1000, mcmc = 10000, 
+                               verbose=0, 
+                               ..., 
+                               data
+                               ) {
+
+  loadDependencies("MCMCpack", "coda")
+
+  if (missing(verbose))
+    verbose <- round((mcmc + burnin)/10)
+
+  list(
+       .function = "MCMCprobit",
+       .hook = "MCMChook",
+
+       formula = formula,
+       data   = data,
+       burnin = burnin,
+       mcmc   = mcmc,
+       verbose= verbose,
+
+       # Most parameters can be simply passed forward
+       ...
+       )
+}
+
+#' @S3method param probit.bayes
+param.probit.bayes <- function(obj, num=1000, ...) {
+  list(
+       coef = coef(obj),
+       fam  = binomial(link="probit")
+       )
+}
+
+#' @S3method qi probit.bayes
+qi.probit.bayes <- qi.logit.bayes
+
+#' @S3method describe probit.bayes
+describe.probit.bayes <- function(...) {
+  list(
+       description  = "Bayesian Probit Regression for Dichotomous Dependent Variables",
+       authors = c("Ben Goodrich", "Ying Lu"),
+       year = 2013
+       )
+}
diff --git a/R/probit.gee.R b/R/probit.gee.R
new file mode 100644
index 0000000..f12d259
--- /dev/null
+++ b/R/probit.gee.R
@@ -0,0 +1,71 @@
+#' Interface between the Zelig Model probit.gee and 
+#' the Pre-existing Model-fitting Method
+#' @param formula a formula
+#' @param id a character-string specifying the column of the data-set to use
+#'   for clustering
+#' @param robust a logical specifying whether to robustly or naively compute
+#'   the covariance matrix. This parameter is ignore in the \code{zelig2}
+#'   method, and instead used in the \code{robust.hook} function, which
+#'   executes after the call to the \code{gee} function
+#' @param ... ignored parameters
+#' @param R a square-matrix specifying the correlation
+#' @param corstr a character-string specifying the correlation structure
+#' @param data a data.frame 
+#' @return a list specifying the call to the external model
+#' @export
+zelig2probit.gee <- function (formula, id, robust, ..., R = NULL, corstr = "independence", data) {
+
+  loadDependencies("gee")
+
+  if (corstr == "fixed" && is.null(R))
+    stop("R must be defined")
+
+  # if id is a valid column-name in data, then we just need to extract the
+  # column and re-order the data.frame and cluster information
+  if (is.character(id) && length(id) == 1 && id %in% colnames(data)) {
+    id <- data[, id]
+    data <- data[order(id), ]
+    id <- sort(id)
+  }
+
+  z(
+    .function = gee,
+    .hook = robust.gee.hook,
+
+    formula = formula,
+    id = id,
+    corstr = corstr,
+    family  = binomial(link="probit"),
+    data = data,
+    R = R,
+    ...
+    )
+}
+
+#' @S3method param probit.gee
+param.probit.gee <- function(obj, num=1000, ...) {
+
+  # Extract means to compute maximum likelihood
+  mu <- coef(obj)
+
+  # Extract covariance matrix to compute maximum likelihood
+  Sigma <- vcov(obj)
+
+  #
+  list(
+       coef = mvrnorm(num, mu, Sigma),
+       fam = binomial(link="probit")
+       )
+}
+
+#' @S3method qi probit.gee
+qi.probit.gee <- qi.logit.gee
+
+#' @S3method describe probit.gee
+describe.probit.gee <- function(...) {
+  list(
+       authors = "Patrick Lam",
+       text = "General Estimating Equation for Poisson Regression",
+       year = 2011
+       )
+}
diff --git a/R/probit.survey.R b/R/probit.survey.R
new file mode 100644
index 0000000..1e3e982
--- /dev/null
+++ b/R/probit.survey.R
@@ -0,0 +1,101 @@
+#' @export
+zelig2probit.survey <- function(
+                               formula,
+                                weights=NULL, 
+                                ids=NULL,
+                                probs=NULL,
+                                strata = NULL,  
+                                fpc=NULL,
+                                nest = FALSE,
+                                check.strata = !nest,
+                                repweights = NULL,
+                                type,
+                                combined.weights=FALSE,
+                                rho = NULL,
+                                bootstrap.average=NULL, 
+                                scale=NULL,
+                                rscales=NULL,
+                                fpctype="fraction",
+                                return.replicates=FALSE,
+                                na.action="na.omit",
+                                start=NULL,
+                                etastart=NULL, 
+                                mustart=NULL,
+                                offset=NULL, 	      		
+                                model1=TRUE,
+                                method="glm.fit",
+                                x=FALSE,
+                                y=TRUE,
+                                contrasts=NULL,
+                                design=NULL,
+                                data
+                                ) {
+  loadDependencies("survey")
+
+  if (is.null(ids))
+    ids <- ~ 1
+
+  # the following lines designate the design
+  # NOTE: nothing truly special goes on here;
+  #       the below just makes sure the design is created correctly
+  #       for whether or not the replication weights are set
+  design <- if (is.null(repweights))
+    svydesign(
+              data=data,
+              ids=ids,
+              probs=probs,
+              strata=strata,
+              fpc=fpc,
+              nest=nest,
+              check.strata=check.strata,
+              weights=weights
+              )
+
+  else {
+    .survey.prob.weights <- weights
+    svrepdesign(
+                data=data,
+                repweights=repweights, 	
+                type=type,
+                weights=weights,
+                combined.weights=combined.weights, 
+                rho=rho,
+                bootstrap.average=bootstrap.average,
+                scale=scale,
+                rscales=rscales,
+                fpctype=fpctype,
+                fpc=fpc
+                )
+  }
+
+  
+  z(.function = svyglm,
+    formula = formula,
+    design  = design,
+    family  = quasibinomial(link="probit")
+    )
+}
+
+#' @S3method param probit.survey
+param.probit.survey <- function(obj, num=1000, ...) {
+  list(
+       simulations = mvrnorm(num, coef(obj), vcov(obj)),
+       alpha = NULL,
+
+       # note: assignment of link and link-inverse are
+       #       implicit when the family is assigned
+       fam   = binomial(link="probit")
+       )
+}
+
+#' @S3method qi probit.survey
+qi.probit.survey <- qi.logit.survey
+
+#' @S3method describe probit.survey
+describe.probit.survey <- function(...) {
+  list(
+       authors = "Nicholas Carnes",
+       year = 2008,
+       description = "Survey-Weighted Probit Regression for Continuous, Positive Dependent Variables"
+       )
+}
diff --git a/R/put.start.R b/R/put.start.R
deleted file mode 100644
index fd04392..0000000
--- a/R/put.start.R
+++ /dev/null
@@ -1,17 +0,0 @@
-put.start <- function(start.val, value, terms, eqn) {
-  if (!any(class(terms) == "multiple"))
-    stop("'put.start()' works with 'parse.formula()'.  Use that first!")
-  idx <- names(start.val)
-  const <- attr(terms, "constraints")
-  if (!is.logical(const)) {
-    for (var in colnames(const)) {
-      eqns <- paste(names(na.omit(const[,var])), collapse = ":")
-      idx[idx == var] <- paste(idx[idx == var], eqns, collapse = ":")
-    }
-  }
-  par.id <- NULL
-  for (vars in eqn) 
-    par.id <- c(par.id, grep(vars, idx))
-  start.val[par.id] <- value
-  start.val
-}
diff --git a/R/qi.BetaReg.R b/R/qi.BetaReg.R
deleted file mode 100644
index 3f1c7c2..0000000
--- a/R/qi.BetaReg.R
+++ /dev/null
@@ -1,31 +0,0 @@
-qi.BetaReg <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  k <- ncol(x)
-  coef <- simpar[,1:k]
-  phi <- simpar[,(k+1):ncol(simpar)]
-  eta <- coef %*% t(x)
-  ev <- exp(eta) / (1 + exp(eta))
-  a <- ev * phi
-  b <- phi - ev * phi
-  pr <- matrix(NA, ncol = ncol(ev), nrow = nrow(ev))
-  for (i in 1:ncol(pr))
-    pr[,i] <- sapply(a[,i], rbeta, n = 1, shape2 = b[,i])
-  qi <- list(ev = ev, pr = pr)
-  qi.name <- list(ev = "Expected Values: E(Y|X)",
-                  pr = "Predicted Values: Y|X")
-  if(!is.null(x1)){
-    eta1 <- coef %*% t(x1)
-    ev1 <- exp(eta1) / (1 + exp(eta1))
-    qi$fd <- ev1 - ev
-    qi.name$fd <-
-      "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow = TRUE)
-    #tmp.ev <- qi$tt.ev <- yvar - qi$ev
-    tmp.ev <- yvar - qi$ev
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-    #qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
diff --git a/R/qi.MCMCZelig.R b/R/qi.MCMCZelig.R
deleted file mode 100644
index 8593694..0000000
--- a/R/qi.MCMCZelig.R
+++ /dev/null
@@ -1,293 +0,0 @@
-qi.MCMCZelig <- function(object, simpar=NULL, x, x1 = NULL, y = NULL, ...) {
-  model <- getzelig(object)
-  qi <- list()
-  check <- FALSE
-  if (model %in% c("logit.bayes", "probit.bayes", "oprobit.bayes", "mlogit.bayes")) 
-    check <- TRUE
-  if (model %in% c("logit.bayes","probit.bayes", "normal.bayes",
-                   "poisson.bayes","tobit.bayes")) {
-    if (model == "logit.bayes") {
-      coef <- object$coefficients
-      eta <- coef %*% t(x)
-      pr <- ev <- matrix(NA, nrow = nrow(eta), ncol = ncol(eta))
-      dimnames(pr) <- dimnames(ev) <- dimnames(eta)
-      ev <- 1/(1+exp(-eta))
-      for (i in 1:ncol(ev)) 
-        pr[,i] <- as.character(rbinom(length(ev[,i]), 1, ev[,i])) 
-      qi$ev <- ev
-      qi$pr <- pr
-      qi.name <- list(ev = "Expected Values: E(Y|X)", pr = "Predicted Values: Y|X")
-    }
-    else if (model == "probit.bayes") {
-      coef <- object$coefficients
-      eta <- coef %*% t(x)
-      pr <- ev <- matrix(NA, nrow = nrow(eta), ncol = ncol(eta))
-      dimnames(pr) <- dimnames(ev) <- dimnames(eta)
-      ev <- pnorm(eta)
-      for (i in 1:ncol(ev)) 
-        pr[,i] <- as.character(rbinom(length(ev[,i]), 1, ev[,i]))
-      qi$ev <- ev
-      qi$pr <- pr
-      qi.name <- list(ev = "Expected Values: E(Y|X)", pr = "Predicted Values: Y|X")
-    }
-    else if (model =="normal.bayes") {
-      nvar <- ncol(object$coefficients) 
-      coef <- object$coefficients[,1:(nvar-1)]
-      qi$ev <- ev <- coef %*% t(x)
-      qi$pr <- rnorm(nrow(qi$ev), qi$ev,
-  sqrt(object$coefficients[,nvar]))
-      qi.name <- list(ev = "Expected Values: E(Y|X)", pr = "Predicted Values:Y|X")
-      
-    }
-    else if (model =="tobit.bayes") {
-      coef <- object$coefficients[,1:(ncol(object$coefficients)-1)]
-      sig2 <- object$coefficients[,ncol(object$coefficients)]
-      sig <- sqrt(sig2)
-      eta <- coef %*% t(x)
-      ev <- cev <- matrix(NA, nrow = nrow(eta), ncol = ncol(eta))
-      dimnames(cev) <- dimnames(ev) <- dimnames(eta)
-      L2 <- (object$above-eta)/sig
-      L1 <- (object$below-eta)/sig
-      ##cev <- eta + sig*(dnorm(L1)-dnorm(L2))/(pnorm(L2)-pnorm(L1))
-      temp1 <- pnorm(L1)*object$below
-      if (object$below==-Inf) temp1<-0
-
-      temp2 <- (1-pnorm(L2))*object$above
-      if (object$above==Inf) temp2<-0
-
-      qi$ev <-ev <- temp1+eta*(pnorm(L2)-pnorm(L1))+sig*(dnorm(L1)-dnorm(L2))+temp2
-      qi.name <- list(ev = "Expected Values: E(Y|X)")
-    }
-    else if (model == "poisson.bayes") {
-      coef <- object$coefficients
-      eta <- coef %*% t(x)
-      pr <- ev <- matrix(NA, nrow = nrow(eta), ncol = ncol(eta))
-      dimnames(pr) <- dimnames(ev) <- dimnames(eta)
-      ev <- exp(eta)
-      for (i in 1:ncol(ev)) 
-        pr[,i] <- rpois(length(ev[,i]), ev[,i])
-      qi$ev <- ev
-      qi$pr <- pr
-      qi.name <- list(ev = "Expected Values: E(Y|X)",
-                      pr="Predicted Values: Y|X")
-    }
-    if (!is.null(x1)) {
-      eta1 <- coef %*% t(x1)
-      if (model == "logit.bayes") {
-        ev1 <- 1/(1+exp(-eta1))
-        qi$fd <- ev1 - ev
-        qi$rr <- ev1 / ev
-        qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-        qi.name$rr <- "Risk Ratios: P(Y=1|X1)/P(Y=1|X)"
-      }
-      else if (model == "probit.bayes") {
-        ev1 <- pnorm(eta1)
-        qi$rr <-ev1/ev
-        qi$fd <-ev1-ev
-        qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-        qi.name$rr <- "Risk Ratios: P(Y=1|X1)/P(Y=1|X)"
-      }
-      else if (model == "normal.bayes") {
-        ev1 <- eta1
-        qi$fd <- ev1 - ev
-        qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-      }
-      else if (model == "tobit.bayes") {
-        L2 <- (object$above-eta1)/sig
-        L1 <- (object$below-eta1)/sig
-        ##cev <- eta + sig*(dnorm(L1)-dnorm(L2))/(pnorm(L2)-pnorm(L1))
-        temp1 <- pnorm(L1)*object$below
-        if (object$below==-Inf) temp1<-0
-
-        temp2 <- (1-pnorm(L2))*object$above
-        if (object$above==Inf) temp2<-0
-        
-        ev1 <- temp1+eta*(pnorm(L2)-pnorm(L1))+sig*(dnorm(L1)-dnorm(L2))+temp2
-        qi$fd <-ev1-ev
-        qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-      }        
-      else if (model == "poisson.bayes") {
-        ev1 <- exp(eta1)
-        qi$fd <- exp(eta1) - ev
-        qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-      }
-    }
-    if (!is.null(y)) {
-      yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow
-    = TRUE)
-      tmp.ev <- yvar - qi$ev
-      if (check) 
-        tmp.pr <- yvar - as.integer(qi$pr)
-      else
-        tmp.pr <- yvar - qi$pr
-      qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-      qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-      if (model %in% c("logit", "probit", "poisson")) {
-        qi$att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = nrow(simpar))
-        qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-      }
-    }
-    out <- list(qi=qi, qi.name=qi.name)
-  }
-  else if ((model =="oprobit.bayes") || (model == "mlogit.bayes")) {
-    if (model == "oprobit.bayes") {
-      library(stats)
-      p <- dim(model.matrix(object, data=eval(object$data)))[2]
-      coef <- object$coefficients[,1:p]
-      eta <- coef %*% t(x)
-      level <- ncol(object$coefficients)-p+2
-      gamma<-matrix(NA, nrow(object$coefficients), level+1) 
-      gamma[,1] <- rep(-Inf, nrow(gamma))
-      gamma[,2] <- rep(0, nrow(gamma))
-      gamma[,ncol(gamma)]<-rep(Inf, nrow(gamma))
-      if (ncol(gamma)>3)
-        gamma[,3:(ncol(gamma)-1)] <-
-          object$coefficients[,(p+1):ncol(object$coefficients)]
-      ev <- array(NA, c(nrow(eta), level, ncol(eta)))
-      pr <- matrix(NA, nrow(eta), ncol(eta))
-      ##      dimnames(pr)[1] <- dimnames(ev)[1] <- dimnames(eta)[1]
-      ##      dimnames(pr)[2] <- dimnames(ev)[3] <- dimnames(eta)[2]
-      for (j in 1:level)
-        ev[,j,] <- pnorm(gamma[,j+1]-eta) - pnorm(gamma[,j]-eta)
-      colnames(ev) <- levels(model.response(model.frame(object)))
-      for (j in 1:nrow(pr)) {
-        mu <- eta[j,]
-        ##       pr[j,]<-as.character(cut(mu, gamma[j,],
-        ##       labels=as.factor(1:level)))
-        pr[j,]<-as.character(cut(mu, gamma[j,], labels=colnames(ev)))   
-      }
-      colnames(ev) <- levels(model.response(model.frame(object)))
-      qi$ev <- ev
-      qi$pr <- pr
-      qi.name <- list(ev = "Expected Values: P(Y=j|X)",
-                      pr="Predicted Values: Y|X")      
-    }
-    else if (model == "mlogit.bayes") {
-      library(stats)
-      resp <- model.response(model.frame(object))
-      level <- length(table(resp))
-      p <- dim(model.matrix(eval(object),data=eval(object$data)))[2]
-      coef <- object$coefficients
-      eta <- array(NA, c(nrow(coef),level, nrow(x)))
-      eta[,1,]<-matrix(0, dim(eta)[1],dim(eta)[3])
-      for (j in 2:level) {
-        ind <- (1:p)*(level-1)-(level-j)
-        eta[,j,]<- coef[,ind]%*%t(x)
-      }
-      eta<-exp(eta)
-      ev <- array(NA, c(nrow(coef), level, nrow(x)))
-      pr <- matrix(NA, nrow(coef), nrow(x))
-      colnames(ev) <- rep(NA, level)
-      for (k in 1:nrow(x)) {
-        for (j in 1:level)
-          ev[,j,k] <- eta[,j,k]/rowSums(eta[,,k])
-      }
-      for (j in 1:level) {
-        colnames(ev)[j] <- paste("P(Y=", j, ")", sep="")
-      }
-      for (k in 1:nrow(x)) {             
-        probs <- as.matrix(ev[,,k])
-        temp <- apply(probs, 1, FUN=rmultinom, n=1, size=1)
-        temp <- as.matrix(t(temp)%*%(1:nrow(temp)))
-        pr <- apply(temp,2,as.character)
-      }
-      qi$ev <- ev
-      qi$pr <- pr
-      qi.name <- list(ev = "Expected Values: P(Y=j|X)",
-                      pr = "Predicted Values: Y|X")      
-    }
-    if (!is.null(x1)) {
-      if (model == "oprobit.bayes") {
-        eta1 <- coef %*% t(x1)
-        ev1 <- array(NA, c(nrow(eta), level, ncol(eta)))
-        for (j in 1:level)
-          ev1[,j,] <- pnorm(gamma[,j+1]-eta1) - pnorm(gamma[,j]-eta1)
-        qi$rr <-ev1/ev
-        qi$fd <-ev1-ev
-        qi.name$fd <- "First Differences in Expected Values: P(Y=j|X1)-P(Y=j|X)"
-        qi.name$rr <- "Risk Ratios: P(Y=j|X1)/P(Y=j|X)"
-      }
-      else if (model == "mlogit.bayes") {
-        eta1 <- array(NA, c(nrow(coef),level, nrow(x1)))
-        eta1[,1,]<-matrix(0, dim(eta1)[1],dim(eta1)[3])
-        for (j in 2:level) {
-          ind <- (1:p)*(level-1)-(level-j)
-          eta1[,j,]<- coef[,ind]%*%t(x1)
-        }
-        eta1<-exp(eta1)
-        ev1 <- array(NA, c(nrow(eta1), level, nrow(x1)))
-        for (k in 1:nrow(x)) {
-          for (j in 1:level)
-            ev1[,j,k] <- eta1[,j,k]/rowSums(eta1[,,k])
-        }
-        qi$rr <-ev1/ev
-        qi$fd <-ev1-ev
-        qi.name$fd <- "First Differences in Expected Values: P(Y=j|X1)-P(Y=j|X)"
-        qi.name$rr <- "Risk Ratios: P(Y=j|X1)/P(Y=j|X)"
-      }
-    }
-    if (!is.null(y)) {
-      yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow = TRUE)
-      levels.names<-levels(as.factor(y))
-      yvar1<- pr1 <-array(NA, c(nrow(yvar), level, ncol(yvar)))
-      for (j in 1:nrow(yvar)) {
-        yvar1[j,,]<-t(class.ind(yvar[j,], levels.names))
-        if (check)
-          pr1[j,,]<-t(class.ind(as.integer(qi$pr[j,]), levels.names))
-        else
-          pr1[j,,]<-t(class.ind(qi$pr[j,],levels.names))
-      }
-      tmp.ev <- yvar1 - qi$ev
-      tmp.pr <- yvar1 - pr1
-      qi$att.ev <- matrix(apply(tmp.ev, 2, rowMeans), nrow = nrow(simpar))
-      qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-      if (model %in% c("oprobit.bayes", "mlogit.bayes", "normal.bayes")) {
-        qi$att.pr <- matrix(apply(tmp.pr, 2, rowMeans), nrow = nrow(simpar))
-        qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-      }
-    }
-    out <- list(qi=qi, qi.name=qi.name) 
-  }
-  else if (model == "ei.hier" || model == "ei.dynamic") {
-    if (!any(class(x)=="cond")) stop("set 'cond=TRUE' in setx.\n")
-    else {
-      coef <- object$coefficients
-      n <- nrow(x)
-      if (is.null(object$N))
-        N<-rep(1,n)
-      else N <- eval(object$N)
-      ev <- array(NA, c(nrow = nrow(coef), 2,2, n))
-      pr <- array(NA, c(nrow = nrow(coef), 2,2, n))
-      nlen<-length(coef[,1])
-      for (j in 1:2) {
-        ev[,j,1,] <- t(apply(coef[,((1:n)+(j-1)*n)], 1,"*",  x[,j])*N)
-        ev[,j,2,] <- t(apply((1-coef[,((1:n)+(j-1)*n)]), 1,"*", x[,j])*N)
-        for (i in 1:n) {
-          size<-round(x[i,j]*N[i])
-          pr[,j,1,i] <-rbinom(prob=coef[,(i+(j-1)*n)],  n=nlen, size=size)
-          pr[,j,2,i] <- x[i,j]*N[i]-pr[,j,1,i]
-        }
-      }
-      ##        dimnames(ev)[[1]] <- dimnames(pr)[[4]] <- 1:nrow(coef)
-      dimnames(ev)[[4]] <- dimnames(pr)[[4]] <- rownames(x)
-      dimnames(ev)[[2]] <- dimnames(pr)[[2]] <- colnames(x)
-      dimnames(ev)[[3]] <- dimnames(pr)[[3]] <- colnames(model.response(object$model))
-      class(ev) <- class(pr) <- c("ei", "array")    
-      qi$ev <- ev
-      qi$pr <- pr
-      qi.name <- list(ev = "Expected In sample predictions at aggregate level",
-                      pr = "In sample predictions at aggregate level")
-    }
-    out <- list(qi=qi, qi.name=qi.name)
-  }
-  else if ( model %in% c("factor.bayes", "factor.ord", "factor.mix", "irt1d", "irtkd")) {
-    stop("sim procedure not applicable since no explanatory variables are involved.\n")
-    out <- list(qi=qi)
-  }
-  out
-}  
-  
-  
-
-
-
diff --git a/R/qi.R b/R/qi.R
index 4e531c8..4400685 100644
--- a/R/qi.R
+++ b/R/qi.R
@@ -1,2 +1,38 @@
-qi<-function(object, ...)
+#' The \code{qi} function is used by developers to simulated quantities of
+#' interest. This method, as a result, is the most significant method of any
+#' Zelig statistical model.
+#'
+#' @title Generic Method for Computing Quantities of Interest
+#' @param obj a \code{zelig} object
+#' @param x a \code{setx} object or NULL
+#' @param x1 an optional \code{setx} object
+#' @param y this parameter is reserved for simulating average treatment effects,
+#'          though this feature is currentlysupported by only a
+#'          handful of models
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a parameters object
+#' @return a list of key-value pairs specifying pairing titles of
+#'         quantities of interest with their simulations
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @note Run \code{example(qi)} to see a trivial version of 
+#' @examples
+#' qi.some.model <- function(obj, x=NULL, x1=NULL, y=NULL, param=NULL) {
+#'   list(
+#'        "Expected Values: E(Y|X)" = NA,
+#'        "Predicted Values: Y|X"   = NA
+#'        )
+#' }
+qi <- function(obj, x=NULL, x1=NULL, y=NULL, num, param=NULL) {
+  if (!inherits(obj, "zelig"))
+    stop('"obj" must be of a "zelig" object')
+
+  if (!(is.null(x) || inherits(x, "setx")))
+    stop('"x" must be a "setx" object"')
+
+  if (!(is.null(x1) || inherits(x1, "setx")))
+    stop('"x1" must be a "setx" object')
+
+  # then use the method
   UseMethod("qi")
+}
diff --git a/R/qi.aov.R b/R/qi.aov.R
deleted file mode 100644
index 8f9f54b..0000000
--- a/R/qi.aov.R
+++ /dev/null
@@ -1,10 +0,0 @@
-qi.aov <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  ### Calls coef.aov that deletes all entries with  NA.
-  ### qi.lm calls coef.default that do not delete entries with 
-  ### NA yielding error messages
-  
-  cc <- coef(object)
-  object$coef <- cc
-  qi.lm(object, simpar, x, x1, y)
-}
- 
diff --git a/R/qi.coxph.R b/R/qi.coxph.R
deleted file mode 100644
index fc4ac76..0000000
--- a/R/qi.coxph.R
+++ /dev/null
@@ -1,241 +0,0 @@
-qi.coxph <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  #model <- getzelig(object)  	
-  num <- nrow(simpar)
-  if(!any(class(object) == "MI"))
-    k <- length(getcoef(object))
-  else
-    k <- length(getcoef(object[[1]]))
-  #xnames <- colnames(x)[1:k]
-  
-## Strata
-  clean.strata <-function(aStr){        #fix strata string
-     aStr <- gsub('[[:space:]]', '', aStr)
-     ge <- unlist(strsplit(aStr, split=">="))
-     le <- unlist(strsplit(aStr, split="<="))
-     g <- unlist(strsplit(aStr, split=">"))
-     l <- unlist(strsplit(aStr, split="<"))
-     e <- unlist(strsplit(aStr, split="="))
-     split <- equal <- NULL
-     if(length(ge)>1)
-       split <- ">="
-     if(length(le)>1)
-       split <- "<="
-     if(length(g)>1 & length(ge)==1)
-       split <- ">"
-     if(length(l)>1 & length(le)==1)
-       split <- "<"
-     if(is.null(split)){
-       split <- "="
-       equal <- 1
-     }
-     res <- unlist(strsplit(aStr, split="=|>|<|>=|<="))
-     if(!is.null(equal)){
-       string <- paste(res[[1]],split,res[[2]], sep="")
-     }
-     else{
-       string <- paste(res[[1]], " ", split, " ", res[[2]], "=",
-                       res[[3]], sep="")
-     }
-     return(string)
-  }
-
-  if(length(x)>k){
-	strata <- as.matrix(x[k+1])
-        strata <- clean.strata(strata)
-	x <- x[1:k]
-  }
-  else
-	strata <- NULL
-  if(!is.null(x1) & length(x1)>k){
-	strata1 <- as.matrix(x1[k+1])
-        strata1 <- clean.strata(strata1)
-	x1 <- x1[1:k]
-  }
-  else
-	strata1 <- NULL
-  if(!is.null(x1) & !identical(strata, strata1))
-	stop("Strata must be identical for x and x1")    
-
-  #if(!is.null(x)){
-   # x <- matrix(as.numeric(x), nrow=1)
-   # colnames(x) <- xnames
-  #}
-  #if(!is.null(x1)){
-   # x1 <- matrix(as.numeric(x1), nrow=1)
-   # colnames(x1) <- xnames
-  #}
-  
-  if (!is.null(x))
-      x.mat <- as.matrix(x)
-  if (!is.null(x1))
-      x1.mat <- as.matrix(x1)
-
-  
-  coef <- as.matrix(simpar[,1:k])
-  eta <- coef%*%t(x.mat)
-  risk <- exp(eta)
-  qi <- qi.name <- list()
-
-
-## Rate Ratio
-  if(!is.null(x1)){
-	eta1 <- coef%*%t(x1.mat)
-	risk1 <- exp(eta1)
-	qi$hr <- risk1/risk
-	qi.name$hr <- "Hazard Ratios: h(t|X1)/h(t|X)"
-  }
-
-## Survival Function
-## Not MI data
-  if(!any(class(object) == "MI")){ 
-    surv.fit <- survfit(object, newdata=x)
-  
-    if(!is.null(strata)){			
-      index <- which(match(summary(surv.fit)$strata, strata) == 1)
-      surv <- surv.fit$surv[index]
-      time <- surv.fit$time[index]
-      surv.se <- summary(surv.fit)$std.err[index]
-      log.surv <- log(surv)
-      log.surv.se <- surv.fit$std.err[index]
-    }
-    else{
-      surv <- surv.fit$surv
-      time <- surv.fit$time
-      surv.se <- summary(surv.fit)$std.err
-      log.surv <- log(surv)
-      log.surv.se <- surv.fit$std.err
-    }
-  }
-
-## MI data
-
-  else{
-    m <- length(object)
-
-    surv.fit <- survfit(object[[1]], newdata=x)
-    
-    if(!is.null(strata)){			
-      index <- which(match(summary(surv.fit)$strata, strata) == 1)
-      surv <- surv.fit$surv[index]
-      time <- surv.fit$time[index]
-    }
-    else{
-      surv <- surv.fit$surv
-      time <- surv.fit$time
-    }
-    means <- cbind(time, "1"=surv)
-
-    #Merge data by survival time
-    for(i in 2:m){
-      surv.fit <- survfit(object[[i]], newdata=x)
-
-      if(!is.null(strata)){			
-        index <- which(match(summary(surv.fit)$strata, strata) == 1)
-        surv <- surv.fit$surv[index]
-        time <- surv.fit$time[index]
-      }
-      else{
-        surv <- surv.fit$surv
-        time <- surv.fit$time
-      }
-      
-      new.means <- cbind(time,surv)
-      colnames(new.means) <- c("time",paste(i))
-      means <- merge(means, new.means, by="time", all = T)
-    }
-    means <- means[,-1]
-    surv <- rowMeans(means, na.rm = T) #survival means
-    na <- apply(means, 2, is.na) 
-    na <- 1 - apply(na, 2, as.numeric)
-    n <- apply(na, 1, sum) #number of non-NA per survival time
-    if(any(n==1)){
-      warning("Some imputed survival times appear in only one
-dataset.  Suggest increasing number of imputed datasets and/or specify
-duration variable as ordinal")
-      n[which(n==1)] <- n[which(n==1)]+1
-    }
-
-    surv.fit <- survfit(object[[1]], newdata=x)
-
-    if(!is.null(strata)){			
-      index <- which(match(summary(surv.fit)$strata, strata) == 1)
-      surv.se <- summary(surv.fit)$std.err[index]
-      time <- surv.fit$time[index]
-    }
-    else{
-      surv.se <- summary(surv.fit)$std.err
-      time <- surv.fit$time
-    }
-    se <- cbind(time, "1"=surv.se)
-    for(i in 2:m){
-      surv.fit <- survfit(object[[i]], newdata=x)
-
-      if(!is.null(strata)){			
-        index <- which(match(summary(surv.fit)$strata, strata) == 1)
-        surv.se <- summary(surv.fit)$std.err[index]
-        time <- surv.fit$time[index]
-      }
-      else{
-        surv.se <- summary(surv.fit)$std.err
-        time <- surv.fit$time
-      }
-      new.se <- cbind(time,surv.se)
-      colnames(new.se) <- c("time",paste(i))
-      se <- merge(se, new.se, by="time", all = T)
-    }
-    
-    time <- se[,1]
-    t <- length(t)
-    se <- se[,-1]
-    var <- se^2
-    surv.se <- c()
-    mean.var <- rowMeans(var, na.rm = T)
-
-    #Rubin's rule
-    
-    B <- (means-surv)^2
-    B[is.na(B)] <- 0
-    B <- apply(B,1,sum)/(n-1)
-    surv.se <- sqrt(mean.var + B*(1+1/n))
-
-    log.surv <- log(surv)
-    log.surv.se <- sqrt(1/(surv^2) * surv.se^2) #delta method
-    
-  }
-
-  surv.sims <- matrix(NA, nrow=num, ncol=length(surv))
-      for (i in 1:length(surv)){
-	surv.sims[,i] <- exp(rnorm(num, mean=log.surv[i], sd=log.surv.se[i]))
-      }
-  colnames(surv.sims) <- time 
-  
-  qi$survival <- surv.sims
-  qi.name$survival <- "Estimated Survival Function Over Time: S(t|X)"
-
-## Cumulative Hazard
-  qi$cumhaz <- cumhaz <- -log(surv.sims)
-  qi.name$cumhaz <- "Estimated Cumulative Hazard Over Time: H(t|X)"
-
-## Hazard
-  cumhaz.means <- colMeans(cumhaz)
-  hazard <- matrix(NA, ncol=ncol(cumhaz), nrow=1)
-  colnames(hazard) <- colnames(cumhaz)
-  hazard[,1] <- cumhaz.means[1]
-  for (i in 2:length(time)){
-	hazard[,i] <- cumhaz.means[i] - cumhaz.means[i-1]
-  }
-  qi$hazard <- hazard
-  qi.name$hazard <- "Estimated Hazard Rate Over Time: h(t|X)"
-  class(qi$hazard) <- "coxhazard"
-  
-
-  list(qi=qi, qi.name=qi.name)
-}  
-
-
-
-
-
-
-
-
diff --git a/R/qi.eiRxC.R b/R/qi.eiRxC.R
deleted file mode 100644
index 77793ab..0000000
--- a/R/qi.eiRxC.R
+++ /dev/null
@@ -1,15 +0,0 @@
-qi.eiRxC <- function(object, simpar, x=NULL, x1=NULL, y=NULL, 
-                     user.fn=NULL) {
-        if (!is.null(x1))
-          warning ("no first difference are available for EI models")
-        if (!is.null(x))
-          object$covar <- x
-        ev <- calc.fractions(object, simpar)
-
-        ev <- aperm(ev, perm = c(3,1,2))
-        class(ev) <- c("ei", "array")
-        qi <- list(ev=ev)
-        qi.name <- list(ev = "Expected Values: E(Y|X)")
-        list(qi=qi, qi.name=qi.name)
-
-}
diff --git a/R/qi.gam.R b/R/qi.gam.R
deleted file mode 100644
index 383951c..0000000
--- a/R/qi.gam.R
+++ /dev/null
@@ -1,82 +0,0 @@
-qi.gam <- function(object, simpar, x, x1 = NULL, y = NULL, num = NULL) {
-num=nrow(simpar)
-x <- as.data.frame(x)
-if(!is.null(x1)){x1 <- as.data.frame(x1)}
-  check <- FALSE
-  model <- getzelig(object)
-  k <- length(object$coef)
-  coef <- simpar[,1:k]
-  if (k < ncol(simpar)) 
-    alpha <- simpar[,(k+1):ncol(simpar)]
-  eta <- coef %*% t(x)				## Here is the nonconformability problem
-  theta <- matrix(object$family$linkinv(eta), nrow = nrow(coef))
-  pr <- ev <- matrix(NA, nrow = nrow(theta), ncol = ncol(theta)) ## 1000 by 1 matrix
-  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
-  if (model %in% c("logit.gam", "probit.gam")) { 
-    check <- TRUE
-	evfit <- predict.gam(object, x, se.fit=TRUE, type="link")$fit
-	evse <-  predict.gam(object, x, se.fit=TRUE, type="link")$se.fit
-	ev <- rnorm(num, mean=object$family$linkinv(evfit), sd=evse)
-	prfit <- predict.gam(object, x, se.fit=TRUE, type="response")$fit 
-	pr <- rbinom(num, 1, prfit)
-    if (!is.null(y)) {
-      if (NCOL(y) > 1)
-        y <- y[,1]
-    }
-  }
-  else if (model == "normal.gam") {
-	evfit <- predict.gam(object, x, se.fit=TRUE, type="link")$fit
-	evse <-  predict.gam(object, x, se.fit=TRUE, type="link")$se.fit
-	ev <- rnorm(num, mean=object$family$linkinv(evfit), sd=evse)
-	prfit <- predict.gam(object, x, se.fit=TRUE, type="response")$fit 
-	prse <- predict.gam(object, x, se.fit=TRUE, type="response")$se.fit 
-	pr <- rnorm(num, mean=prfit, sd=prse)
-  }
-  else if (model == "poisson.gam") {
-	evfit <- predict.gam(object, x, se.fit=TRUE, type="link")$fit
-	evse <-  predict.gam(object, x, se.fit=TRUE, type="link")$se.fit
-	ev <- rnorm(num, mean=object$family$linkinv(evfit), sd=evse)
-	prfit <- predict.gam(object, x, se.fit=TRUE, type="response")$fit 
-	pr <- rpois(num, prfit)
-  }
-  qi <- list(ev = ev, pr = pr)
-  qi.name <- list(ev = "Expected Values: E(Y|X)",
-                  pr = "Predicted Values: Y|X")
-  if (!is.null(x1)){
-	evfit1 <- predict.gam(object, x1, se.fit=TRUE, type="link")$fit
-	evse1 <-  predict.gam(object, x1, se.fit=TRUE, type="link")$se.fit
-	ev1 <- rnorm(num, mean=object$family$linkinv(evfit), sd=evse)
-    qi$fd <- ev1-ev
-    qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-    if (model %in% c("logit.gam", "probit.gam")) {
-      qi$rr <- ev1/ev
-      qi.name$rr <- "Risk Ratios: P(Y=1|X1)/P(Y=1|X)"
-    }
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow = TRUE)
-#    tmp.ev <- qi$tt.ev <- yvar - qi$ev
-#    qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-#   if (check)
-#     tmp.pr <- qi$tt.pr <- yvar - as.integer(qi$pr)
-#   else
-#      tmp.pr <- qi$tt.pr <- yvar - qi$pr
-#    qi.name$tt.pr <- "Unit Treatment Effect for the Treated: Y - PR"
-    tmp.ev <- yvar - qi$ev
-    if (check)
-      tmp.pr <- yvar - as.integer(qi$pr)
-    else
-      tmp.pr <- yvar - qi$pr
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-    qi$att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = nrow(simpar))
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-    qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
-
-
-
-
-
diff --git a/R/qi.gee.R b/R/qi.gee.R
deleted file mode 100644
index 8681355..0000000
--- a/R/qi.gee.R
+++ /dev/null
@@ -1,27 +0,0 @@
-qi.gee <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  model <- getzelig(object)
-  coef <- simpar
-  eta <- coef %*% t(x)
-  ev <- theta <- matrix(object$family$linkinv(eta), nrow = nrow(coef)) 
-  qi <- list(ev = ev)
-  qi.name <- list(ev = "Expected Values: E(Y|X)")
-  if (!is.null(x1)){
-    ev1 <- theta1 <- matrix(object$family$linkinv(coef %*% t(as.matrix(x1))),
-                     nrow = nrow(coef))
-    qi$fd <- ev1-ev
-    qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-    if (model %in% c("logit.gee", "probit.gee")) {
-      qi$rr <- ev1/ev
-      qi.name$rr <- "Risk Ratios: P(Y=1|X1)/P(Y=1|X)"
-    }
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow = TRUE)
-    tmp.ev <- yvar - qi$ev
-#    tmp.ev <- qi$tt.ev <- yvar - qi$ev
-#    qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
diff --git a/R/qi.glm.R b/R/qi.glm.R
deleted file mode 100644
index 7b61fe2..0000000
--- a/R/qi.glm.R
+++ /dev/null
@@ -1,82 +0,0 @@
-qi.glm <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  check <- FALSE
-  model <- getzelig(object)
-  k <- length(getcoef(object))
-  coef <- simpar[,1:k]
-  if (k < ncol(simpar)) 
-    alpha <- simpar[,(k+1):ncol(simpar)]
-  eta <- coef %*% t(x)
-  theta <- matrix(object$family$linkinv(eta), nrow = nrow(coef))
-  pr <- ev <- matrix(NA, nrow = nrow(theta), ncol = ncol(theta))
-  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
-  if (model %in% c("logit", "probit", "relogit")) {
-    check <- TRUE
-    ev <- theta
-    for (i in 1:ncol(theta)) 
-      pr[,i] <- as.character(rbinom(length(ev[,i]), 1, ev[,i]))
-    if (!is.null(y)) {
-      if (NCOL(y) > 1)
-        y <- y[,1]
-    }
-  }
-  else if (model == "normal") {
-    ev <- theta
-    for (i in 1:nrow(ev)) 
-      pr[i,] <- rnorm(length(ev[i,]), mean = ev[i,], sd = alpha[i])
-  }
-  else if (model == "gamma") {
-    ev <- theta 
-    for (i in 1:nrow(ev))  
-      pr[i,] <- rgamma(length(ev[i,]), shape = alpha[i], scale = theta[i,]/alpha[i])
-  }
-  else if (model == "poisson") {
-    ev <- theta
-    for (i in 1:ncol(ev))
-      pr[,i] <- rpois(length(ev[,i]), lambda = ev[,i])
-  }
-  else if (model == "negbin") {  
-    ev <- theta
-    for (i in 1:nrow(ev)) 
-      pr[i,] <- rnegbin(length(ev[i,]), mu = ev[i,], theta = alpha[i])
-  }
-  qi <- list(ev = ev, pr = pr)
-  qi.name <- list(ev = "Expected Values: E(Y|X)",
-                  pr = "Predicted Values: Y|X")
-  if (!is.null(x1)){
-    ev1 <- theta1 <- matrix(object$family$linkinv(coef %*% t(as.matrix(x1))),
-                     nrow = nrow(coef))
-    qi$fd <- ev1-ev
-    qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-    if (model %in% c("logit", "probit", "relogit")) {
-      qi$rr <- ev1/ev
-      qi.name$rr <- "Risk Ratios: P(Y=1|X1)/P(Y=1|X)"
-    }
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow = TRUE)
-    tmp.ev <- yvar - qi$ev
-    if (check)
-      tmp.pr <- yvar - as.integer(qi$pr)
-    else
-      tmp.pr <- yvar - qi$pr
-#    tmp.ev <- qi$tt.ev <- yvar - qi$ev
-#    if (check)
-#      tmp.pr <- qi$tt.pr <- yvar - as.integer(qi$pr)
-#    else
-#      tmp.pr <- qi$tt.pr <- yvar - qi$pr
-#    qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-#    qi.name$tt.pr <- "Unit Treatment Effect for the Treated: Y - PR"
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-    qi$att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = nrow(simpar))
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-    qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
-
-
-
-
-
-
diff --git a/R/qi.lm.R b/R/qi.lm.R
deleted file mode 100644
index 33e0288..0000000
--- a/R/qi.lm.R
+++ /dev/null
@@ -1,31 +0,0 @@
-qi.lm <- function(object, simpar, x, x1 = NULL, y = NULL) {
-
-  k <- length(getcoef(object))
-  coef <- simpar[,1:k,drop = FALSE]
-  alpha <- simpar[,ncol(simpar)]
-  if (k < ncol(x))
-    x <- as.data.frame(x[,names(coef(object)),drop = FALSE])
-  ev <- coef %*% t(x)
-  qi <- list(ev=ev)
-  qi.name <- list(ev="Expected Values: E(Y|X)")
-  if(!is.null(x1)){
-    if (k < ncol(x1))
-      x1 <- as.data.frame(x1[,names(coef(object)),drop=FALSE])
-    ev1 <- coef %*% t(x1)
-    qi$fd <- ev1-ev
-    qi.name$fd <-
-      "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow = TRUE)
-#    tmp.ev <- qi$tt.ev <- yvar - qi$ev
-#    qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-    tmp.ev <- yvar - qi$ev
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
-
-
diff --git a/R/qi.mixed.R b/R/qi.mixed.R
deleted file mode 100644
index b017272..0000000
--- a/R/qi.mixed.R
+++ /dev/null
@@ -1,171 +0,0 @@
-## dbailey at wustl.edu
-## modified by Gregor Gorjanc 2008-01-07
-## modified by Ferdi  10/30/07
-## modified by delia 09/22/08
-################################
-
-qi.mer <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  
-  x <- as.data.frame(x)
-  if (!is.null(x1))
-    x1 <- as.data.frame(x1)
-  
-  
-  ## original dataset
-  D <- eval(object at call$data, envir = parent.frame())
-  fml <- eval(object at call$formula)
-  parsefml <- .getRandAndFixedTerms(fml)
-  
-  betas <- simpar[["betas"]]
-  gammas <- simpar[["gammas"]]
-  alpha <- simpar[["scale"]]
-  
-  
-  fTermsNames <- colnames(model.matrix(parsefml$fixed, data = D))
-                                        #cat(fTermsNames, "\n")
-  
-  fTerms <- x[,fTermsNames]
-  rTerms <- list()
-  for (i in 1:length(parsefml$random)){
-    ## for now, intercept is always present
-    tt <- terms(parsefml$random[[i]])
-    attr(tt,"intercept") <- 1   
-    rTermsNames <- colnames(model.matrix(tt,data=D))
-    rTerms[[i]] <- x[, rTermsNames]
-  }
-  names(rTerms) <- names(parsefml$random)
-  if (!is.null(x1)){
-    fTermsNames <- colnames(model.matrix(parsefml$fixed, data = D))
-    fTerms.x1 <- x1[,fTermsNames]
-    rTerms.x1 <- list()
-    for (i in 1:length(parsefml$random)){
-      tt <- terms(parsefml$random[[i]])
-      attr(tt,"intercept") <- 1   
-      rTermsNames <- colnames(model.matrix(tt,data=D))
-      rTerms.x1[[i]] <- x1[, rTermsNames]
-      ##rTermsNames <- colnames(model.matrix(parsefml$random[[i]],data=D))
-      ##rTerms.x1[[i]] <- x1[, rTermsNames]
-    }
-    names(rTerms.x1) <- names(parsefml$random)
-  }
-        
-  ## Expected Values and Predicted Values    
-  if (class(object) == "mer"){
-
-
-    family <- try(object at nlmodel$family$family ,silent=TRUE)
-    if (inherits(family,"try-error")) {
-        family=NULL
-    }
-
-    link <- try(object at nlmodel$family$link ,silent=TRUE)
-    if (inherits(link,"try-error")) {
-      link=NULL
-    } 
-    
-    eta <- betas %*% t(as.matrix(fTerms))
-    mu <- eta
-    ## For predicted values, add in random effects draws
-    for (i in 1:length(rTerms)){
-      mu <- mu + gammas[[names(rTerms[i])]] %*% t(as.matrix(rTerms[[i]]))
-    }
-    if (is.null(family)){
-      ev <- eta
-      n <- length(mu[,1])
-      pr <- matrix(NA, nrow=nrow(mu), ncol=ncol(mu))
-      for (i in 1:ncol(mu)){
-        pr[,i] <- rnorm(n, mean=mu[,i], sd=alpha)
-      }
-    } else {
-      theta <- matrix(object at nlmodel$family$linkinv(eta), nrow=nrow(betas))
-      mut <- matrix(object at nlmodel$family$linkinv(mu), nrow=nrow(betas))
-      ev <- matrix(NA, nrow=nrow(theta), ncol=ncol(theta))
-      pr <- matrix(NA, nrow=nrow(mut), ncol=ncol(mut))
-      dimnames(ev) <- dimnames(pr) <- dimnames(theta)
-      n <- length(mut[, 1])
-      if (family == "binomial"){
-        ev <- theta
-        for (i in 1:ncol(mut)){
-          pr[,i] <- as.character(rbinom(n, 1, mut[,i]))
-        }
-        if (!is.null(y)) {
-          if (NCOL(y) > 1) {
-            y <- y[,1]
-          }
-        }
-      }
-      else if (family == "Gamma"){
-        ev <- theta * 1/alpha
-        n <- length(mut[i,])
-        for (i in 1:nrow(mut)){
-          pr[i,] <- rgamma(n, shape = mut[i,], scale= 1/alpha)
-        }
-      }
-      else if (family == "gaussian"){
-        ev <- theta
-        if (link == "log"){
-          for (i in 1:ncol(mut)){
-            pr[,i] <- rlnorm(n, meanlog=mut[,i], sdlog=alpha)
-          }
-        } else {
-          stop(sprintf("no method for %s family and %s link", family, link))
-        }
-      }
-      else if (family == "poisson"){
-        ev <- theta
-        for (i in 1:ncol(mut)){
-          pr[,i] <- rpois(n, lambda = mut[,i])
-        }
-      } 
-      else {
-        stop(sprintf("no method for %s family", family))
-      }
-    }
-  }
-  qi <- list(ev=ev, pr=pr)
-  qi.name <- list(ev="Expected Values: E(Y|X)", pr="Predicted Values: Y|X")
-  if (!is.null(x1)){
-    if (class(object) == "mer"){
-      if (is.null(family)){
-        ev1 <- betas %*% t(as.matrix(fTerms.x1))
-      }
-      else {
-        theta1 <- 
-          matrix(object at nlmodel$family$linkinv(betas %*% t(as.matrix(fTerms.x1))),
-                 nrow = nrow(betas))
-        if (family == "Gamma") {
-          ev1 <- theta1 * 1/alpha
-        }
-        else {
-          ev1 <- theta1
-        }
-        if (family == "binomial") {
-          qi$rr <- ev1/ev
-          qi.name$rr <- "Risk Ratios: P(Y=1|X1)/P(Y=1|X)"
-        }
-      }
-      qi$fd <- ev1-ev
-      qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)" 
-    }
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow = TRUE)
-    tmp.ev <- yvar - qi$ev
-    if (!is.null(family)){
-      if (family == "binomial"){ 
-        tmp.pr <- yvar - as.integer(qi$pr)
-      } else {
-        tmp.pr <- yvar - qi$pr
-      }
-    }
-    else {
-      tmp.pr <- yvar - qi$pr
-    }
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-    qi$att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = nrow(simpar))
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-    qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
diff --git a/R/qi.multinom.R b/R/qi.multinom.R
deleted file mode 100644
index 797e03d..0000000
--- a/R/qi.multinom.R
+++ /dev/null
@@ -1,35 +0,0 @@
-qi.polr <- function(object, simpar, x = x, x1 = NULL) {
-  m <- ncol(x)
-  lev <- object$lev
-  k <- length(lev)
-  ev.multinom<-function(num, sims, x, lev, k, m){
-    eta <- NULL
-    for (i in 1:(k-1))
-      eta <- cbind(eta, sims[,(m*(i-1)+1):(m*i)]%*%t(x))
-    eta <- exp(cbind(rep(0, num), eta))
-    ev <- eta/apply(eta, 1, sum)
-    colnames(ev) <- lev
-    ev
-  }
-  ev <- ev.multinom(num, simpar, x, lev, k, m)
-  Ipr <- sim.cut <- matrix(NA, nrow = nrow(ev), ncol = ncol(ev))
-  colnames(Ipr) <- colnames(sim.cut) <- lev
-  sim.cut[,1] <- ev[,1]
-  for (j in 2:k) 
-    sim.cut[,j] <- sim.cut[,(j-1)] + ev[,j]
-  tmp <- runif(nrow(ev), 0, 1)
-  for (l in 1:k) 
-    Ipr[,l] <- tmp > sim.cut[,l]
-  pr <- NULL
-  for (m in 1:nrow(Ipr))
-    pr[m] <- 1 + sum(Ipr[m,])
-  res$qi <- list(ev=ev, pr=pr)
-  res$qi.name <- list(ev = "Predicted Probabilities: P(Y=j|X)",
-                      pr = "Predicted Values: Y|X")
-  if(!is.null(x1)){
-    ev1 <- ev.multinom(num, simpar, x1, lev, k, m)
-    res$qi$fd <- ev1-ev
-    res$qi.name$fd <- "First Differences: P(Y=j|X1)-P(Y=j|X)"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
diff --git a/R/qi.multiple.R b/R/qi.multiple.R
deleted file mode 100644
index 0c2a3c2..0000000
--- a/R/qi.multiple.R
+++ /dev/null
@@ -1,71 +0,0 @@
-qi.multiple <- function(object, simpar, x, x1 = NULL, y = NULL) {
- 
-  check <- FALSE
-  model <- getzelig(object)
-  coef<-list()
-  tt<-terms(object)
-  nreq<-length(tt)
-  nms<-names(tt)
-#print(colnames(simpar))
-  start<-1
-  for(i in 1:nreq){
-    eqni<-nms[[i]]
-    coef[[i]]<-simpar[,start:(start+length(attr(tt,"term.labels")[[eqni]])-1)]
-    start<-start+length(attr(x,eqni))
-  }
-
-  fillmatrix<-function(simpar,x,nreq){
-    r<-list()
-    eta<-list()
-    if(nrow(x)==1)
-      q<-array(NA,c(nrow(simpar),nreq))
-    else
-      q<- array(NA,c(nrow(simpar),nreq,nrow(x)))
-
-    for(i in 1:nreq){
-      eqn<-nms[[i]]
-      #print("from qi.multiple")
-      #print(eqn)
-      r[[i]]= x[,attr(tt,"term.labels")[[eqn]],drop=FALSE]
-      #print(r[[i]])
-  
-      #  #print("yes")
-        eta[[i]] <- coef[[i]] %*% t(r[[i]])
-      #print("etai is calculated")
-      if(nrow(r[[i]])==1){
-        q[,i] <- eta[[i]]
-      }
-      else
-        {
-        #  print("coefi\n")
-        #  print(coef[[i]])
-       #   eta[[i]] <- coef[[i]] %*% (r[[i]])
-       #   print("etai\n")
-       #   print(eta[[i]])
-          q[,i,] <- eta[[i]]
-        }
-    }
-    return (q)
-
-  }
-  pr<-ev<-fillmatrix(simpar,x,nreq)
-
-  qi <- list(ev = ev,pr=pr)
-  qi.name <- list(ev = "Expected Values: E(Y|X)",pr = "Predicted Values: Y|X")
-
-  
-  if (!is.null(x1)){
-      theta1<-fillmatrix(simpar,x1,nreq)
-      ev1 <- theta1
-      qi$fd <- ev1-ev
-      qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
-
-
-
-
-
diff --git a/R/qi.netglm.R b/R/qi.netglm.R
deleted file mode 100644
index 12e4c80..0000000
--- a/R/qi.netglm.R
+++ /dev/null
@@ -1,81 +0,0 @@
-qi.netglm <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  check <- FALSE
-  model <- getzelig(object)
-  k <- length(object$coef)
-  coef <- simpar[,1:k]
-  if (k < ncol(simpar)) 
-    alpha <- simpar[,(k+1):ncol(simpar)]
-  eta <- coef %*% t(x)
-  theta <- matrix(object$family$linkinv(eta), nrow = nrow(coef))
-  pr <- ev <- matrix(NA, nrow = nrow(theta), ncol = ncol(theta))
-  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
-  if (model %in% c("logit.net", "probit.net", "cloglog.net", "netbinom")) {
-    check <- TRUE
-    ev <- theta
-    for (i in 1:ncol(theta)) 
-      pr[,i] <- as.character(rbinom(length(ev[,i]), 1, ev[,i]))
-    if (!is.null(y)) {
-      if (NCOL(y) > 1)
-        y <- y[,1]
-    }
-  }
-  else if (model == "normal.net") {
-    ev <- theta
-    for (i in 1:nrow(ev)) 
-      pr[i,] <- rnorm(length(ev[i,]), mean = ev[i,], sd = alpha[i])
-  }
-  else if (model == "gamma.net") {
-    ev <- theta * 1/alpha
-    for (i in 1:nrow(ev))  
-      pr[i,] <- rgamma(length(ev[i,]), shape = theta[i,], scale = 1/alpha[i])
-  }
-  else if (model == "poisson.net") {
-    ev <- theta
-    for (i in 1:ncol(ev))
-      pr[,i] <- rpois(length(ev[,i]), lambda = ev[,i])
-  }
-  ## Still need to add inverse gaussian
-  qi <- list(ev = ev, pr = pr)
-  qi.name <- list(ev = "Expected Values: E(Y|X)",
-                  pr = "Predicted Values: Y|X")
-  if (!is.null(x1)){
-    theta1 <- matrix(object$family$linkinv(coef %*% t(as.matrix(x1))),
-                     nrow = nrow(coef))
-    if (model == "gamma.net")
-      ev1 <- theta1 * 1/alpha
-    else
-      ev1 <- theta1
-    qi$fd <- ev1-ev
-    qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-    if (model %in% c("netbinom")) {
-      qi$rr <- ev1/ev
-      qi.name$rr <- "Risk Ratios: P(Y=1|X1)/P(Y=1|X)"
-    }
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow = TRUE)
-#    tmp.ev <- qi$tt.ev <- yvar - qi$ev
-#    if (check)
-#      tmp.pr <- qi$tt.pr <- yvar - as.integer(qi$pr)
-#    else
-#      tmp.pr <- qi$tt.pr <- yvar - qi$pr
-#    qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-#    qi.name$tt.pr <- "Unit Treatment Effect for the Treated: Y - PR"
-    tmp.ev <- yvar - qi$ev
-    if (check)
-      tmp.pr <- yvar - as.integer(qi$pr)
-    else
-      tmp.pr <- yvar - qi$pr
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-    qi$att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = nrow(simpar))
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-    qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
-
-
-
-
-
diff --git a/R/qi.netlm.R b/R/qi.netlm.R
deleted file mode 100644
index 412181c..0000000
--- a/R/qi.netlm.R
+++ /dev/null
@@ -1,26 +0,0 @@
-qi.netlm <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  k <- length(getcoef(object))
-  coef <- simpar[,1:k]
-  #alpha <- simpar[,(k+1):ncol(simpar)]
-  ev <- coef %*% t(x)
-  qi <- list(ev=ev)
-  qi.name <- list(ev="Expected Values: E(Y|X)")
-  if(!is.null(x1)){
-    ev1 <- coef %*% t(x1)
-    qi$fd <- ev1-ev
-    qi.name$fd <-
-      "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow = TRUE)
-    #tmp.ev <- qi$tt.ev <- yvar - qi$ev
-    #qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-    tmp.ev <- yvar - qi$ev
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
-
-
diff --git a/R/qi.netlogit.R b/R/qi.netlogit.R
deleted file mode 100644
index 2b636a1..0000000
--- a/R/qi.netlogit.R
+++ /dev/null
@@ -1,62 +0,0 @@
-qi.logit.net <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  check <- FALSE
-  model <- getzelig(object)
-  k <- length(getcoef(object))
-  coef <- simpar[,1:k]
-  if (k < ncol(simpar)) 
-    alpha <- simpar[,(k+1):ncol(simpar)]
-  eta <- coef %*% t(x)
-  theta <- matrix(object$family$linkinv(eta), nrow = nrow(coef))
-  pr <- ev <- matrix(NA, nrow = nrow(theta), ncol = ncol(theta))
-  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
-  if (model == "logit.net") {
-    check <- TRUE
-    ev <- theta
-    for (i in 1:ncol(theta)) 
-      pr[,i] <- as.character(rbinom(length(ev[,i]), 1, ev[,i]))
-    if (!is.null(y)) {
-      if (NCOL(y) > 1)
-        y <- y[,1]
-    }
-  }
-  qi <- list(ev = ev, pr = pr)
-  qi.name <- list(ev = "Expected Values: E(Y|X)",
-                  pr = "Predicted Values: Y|X")
-  if (!is.null(x1)){
-    theta1 <- matrix(object$family$linkinv(coef %*% t(as.matrix(x1))),
-                     nrow = nrow(coef))
-    if (model == "logit.net")
-      ev1 <- theta1
-    qi$fd <- ev1-ev
-    qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-    if (model %in% c("logit", "probit", "relogit")) {
-      qi$rr <- ev1/ev
-      qi.name$rr <- "Risk Ratios: P(Y=1|X1)/P(Y=1|X)"
-    }
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow = TRUE)
-#    tmp.ev <- qi$tt.ev <- yvar - qi$ev
-#    qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-#    if (check)
-#      tmp.pr <- qi$tt.pr <- yvar - as.integer(qi$pr)
-#    else
-#      tmp.pr <- qi$tt.pr <- yvar - qi$pr
-    tmp.ev <- yvar - qi$ev
-    if (check)
-      tmp.pr <- yvar - as.integer(qi$pr)
-    else
-      tmp.pr <- yvar - qi$pr
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-    qi$att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = nrow(simpar))
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-    qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
-
-
-
-
-
diff --git a/R/qi.polr.R b/R/qi.polr.R
deleted file mode 100644
index 20f52e0..0000000
--- a/R/qi.polr.R
+++ /dev/null
@@ -1,107 +0,0 @@
-qi.polr <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  num <- nrow(simpar)
-  m <- length(getcoef(object))
-  sim.coef <- simpar[,1:m,drop=F]
-  sim.zeta <- sim.theta <- simpar[,(m+1):ncol(simpar), drop=F]
-
-  # zeta's coefficients should hold the transformed results
-  # This was a change that was added for some unknown version of 'polr'
-  # (Changed by Matt Owen at the request of Kosuke Imai 11-15-2011)
-
-  #sim.zeta[,-1] <- exp(sim.theta[,-1])
-
-  #sim.zeta <- t(apply(sim.zeta, 1, cumsum))
-
-
-  k <- length(object$zeta) + 1
-  lev <- object$lev
-  eta <- t(x[,-1] %*% t(sim.coef)) 
-  Ipr <- cuts <- tmp0 <- array(0, dim = c(num, k, nrow(x)),
-                        dimnames = list(1:num, lev, rownames(x)))
-
-  for (i in 1:num) 
-    cuts[i,,] <- t(object$inv.link(eta[i,], sim.zeta[i,]))
-
-  tmp0[,(2:k),] <- cuts[,(1:(k-1)),]
-  ev <- cuts - tmp0
-  if (dim(ev)[3] == 1) ev <- ev[,,1]
-  pr <- matrix(NA, nrow = num, ncol = nrow(x))
-
-  tmp <- matrix(
-                runif(length(cuts[,1,]), 0, 1),
-                nrow = num,
-                ncol = nrow(x)
-                )
-
-  for (i in 1:k)
-    Ipr[,i,] <- as.integer(tmp > cuts[,i,])
-
-  for (n in 1:nrow(x))
-    pr[,n] <- 1 + rowSums(Ipr[,,n,drop=F])
-  pr <- matrix(factor(pr, labels = lev[1:length(lev) %in% sort(unique(pr))],
-                      ordered = TRUE),
-               nrow = num, ncol = nrow(x))
-  colnames(pr) <- rownames(x)
-  qi <- list(ev = ev, pr = pr)
-  qi.name <- list(ev = "Expected Values: P(Y=j|X)",
-                  pr = "Predicted Values: Y|X")
-  if(!is.null(x1)){
-    eta1 <- t(x1[,-1] %*% t(sim.coef))
-    Ipr <- cuts <- tmp0 <- array(0, dim = c(num, k, nrow(x)),
-                                 dimnames = list(1:num, lev, rownames(x)))
-    for (i in 1:num) 
-      cuts[i,,] <- t(object$inv.link(eta1[i,], sim.zeta[i,]))
-    tmp0[,(2:k),] <- cuts[,(1:(k-1)),]
-    ev1 <- cuts - tmp0
-    if (dim(ev1)[3] == 1) ev1 <- ev1[,,1]
-    qi$fd <- ev1 - ev
-    qi$rr <- ev1 / ev
-    qi.name$fd <- "First Differences: P(Y=j|X1)-P(Y=j|X)"
-    qi.name$rr <- "Risk Ratio: P(Y=j|X1)-P(Y=j|X)"
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(NA, nrow = length(y), ncol = length(lev))
-    tmp.ev <- tmp.pr <- array(NA, dim = dim(qi$ev))
-    pr.idx <- array(NA, dim = c(nrow(pr), length(lev), nrow(x)))
-    qi$att.ev <- qi$att.pr <- matrix(NA, dim(qi$ev)[1], dim(qi$ev)[2])
-    for (i in 1:length(lev)) {
-      yvar[,i] <- as.integer(y == lev[i])
-      pr.idx[,i,] <- as.integer(pr[,i] == lev[i])
-    }
-    colnames(yvar) <- lev 
-    for (j in 1:ncol(yvar)) {
-      tmp.ev[,j,] <- yvar[,j] - qi$ev[,j,]
-      tmp.pr[,j,] <- yvar[,j] - pr.idx[,j,]
-      qi$att.ev[,j] <- apply(tmp.ev[,j,], 1, mean)
-      qi$att.pr[,j] <- apply(tmp.pr[,j,], 1, mean)
-    }
-    colnames(qi$att.ev) <- colnames(qi$att.pr) <- lev
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-    qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
-
-
-rejection.sample <- function (num, size, cuts) {
-
-  # Define how to produce a single sample
-  single.sample <- function(size, cuts) {
-    repeat {
-      samps <- runif(length(cuts))
-    }
-  }
-
-  #
-
-
-}
-
-
-
-
-
-
-
-
diff --git a/R/qi.relogit.R b/R/qi.relogit.R
deleted file mode 100644
index b373b34..0000000
--- a/R/qi.relogit.R
+++ /dev/null
@@ -1,79 +0,0 @@
-qi.relogit <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  if ("relogit2" %in% class(object)) {
-    num <- nrow(simpar$par0)
-    tmp0 <- object$lower.estimate
-    tmp1 <- object$upper.estimate
-    
-    low <- qi.relogit(tmp0, simpar$par0, x, x1)
-    up <- qi.relogit(tmp1, simpar$par1, x, x1)
-    PP <- PR <- array(NA, dim = c(num, 2, nrow(x)),
-                      dimnames = list(NULL, c("Lower Bound", "Upper Bound"),
-                        rownames(x)))
-    PP[,1,] <- P00 <- low$qi$ev
-    PP[,2,] <- P10 <- up$qi$ev
-    qi <- list(ev = PP)
-    qi.name <- list(ev = "Expected Values: E(Y|X)")
-    if (!is.null(x1)) {
-      FD <- RR <- array(NA, dim = c(num, 2, nrow(x)),
-                        dimnames = list(NULL, d2 = c("Lower Bound", "Upper Bound"), 
-                          rownames(x)))
-      sim01 <- qi.glm(tmp0, simpar$par0, x = x1, x1 = NULL)
-      sim11 <- qi.glm(tmp1, simpar$par1, x = x1, x1 = NULL)
-      tau0 <- object$lower.estimate$tau
-      tau1 <- object$upper.estimate$tau
-      P01 <- as.matrix(sim01$qi$ev)
-      P11 <- as.matrix(sim11$qi$ev)
-      OR <- (P10/(1-P10)) / (P00/(1-P00))
-      RR[,1,] <- pmin(as.matrix(P01/P00), as.matrix(P11/P10))
-      RR[,2,] <- pmax(as.matrix(P01/P00), as.matrix(P11/P10))
-      RD0 <- as.matrix(P01-P00)
-      RD1 <- as.matrix(P11-P10)
-      RD <- as.matrix((sqrt(OR)-1) / (sqrt(OR)+1))
-      ## checking monotonicity
-      y.bar <- mean(object$y)
-      beta0.e <- coef(tmp0)
-      beta1.e <- coef(tmp1)
-      ## evaluating RD at tau0 and tau1
-      RD0.p <- 1/(1+exp(-t(beta0.e) %*% t(x1))) - 1/(1+exp(-t(beta0.e) %*% t(x)))
-      RD1.p <- 1/(1+exp(-t(beta1.e) %*% t(x1))) - 1/(1+exp(-t(beta1.e) %*% t(x)))
-      ## evaluating RD at tau0+e and tau1+e
-      e <- 0.001
-      beta0.e["(Intercept)"] <- beta0.e["(Intercept)"]+log(1-tau0)-log(tau0) -
-        log(1-tau0-0.001)+log(tau0+0.001)
-      beta1.e["(Intercept)"] <- beta1.e["(Intercept)"]+log(1-tau1)-log(tau1) -
-        log(1-tau1-e)+log(tau1+e)
-      RD0.e <- 1/(1+exp(-t(beta0.e) %*% t(x1))) - 1/(1+exp(-t(beta0.e) %*% t(x)))
-      RD1.e <- 1/(1+exp(-t(beta1.e) %*% t(x1))) - 1/(1+exp(-t(beta1.e) %*% t(x)))
-      ## checking the sign and computing the bounds
-      check <- sum((RD1.e-RD1.p) * (RD0.e-RD0.p))
-      if (check > 0) {
-        FD[,1,] <- pmin(RD0, RD1)
-        FD[,2,] <- pmax(RD0, RD1)
-      }
-      else {
-        FD[,1,] <- pmin(RD0, RD1, RD)
-        FD[,2,] <- pmax(RD0, RD1, RD)
-      }
-      qi$fd <- FD
-      qi$rr <- RR
-      qi.name$fd <- "First Differences: P(Y=1|X1) - P(Y=1|X)"
-      qi.name$rr <- "Risk Ratios: P(Y=1|X1) / P(Y=1|X)"
-    }
-    if (!is.null(y)) {
-      yvar <- matrix(rep(y, num), nrow = num, byrow = TRUE)
-#      tmp.ev <- qi$tt.ev <- yvar - qi$ev
-#      tmp.pr <- qi$tt.pr <- yvar - as.integer(qi$pr)
-#      qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-#      qi.name$tt.pr <- "Unit Treatment Effect for the Treated: Y - PR"
-      tmp.ev <- yvar - qi$ev
-      tmp.pr <- yvar - as.integer(qi$pr)
-      qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = num)
-      qi$att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = num)
-      qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-      qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-    }
-    return(list(qi = qi, qi.name = qi.name))
-  }
-  else
-    return(qi.glm(object = object, simpar = simpar, x = x, x1 = x1, y = y))
-}
diff --git a/R/qi.rq.R b/R/qi.rq.R
deleted file mode 100644
index d5159b5..0000000
--- a/R/qi.rq.R
+++ /dev/null
@@ -1,51 +0,0 @@
-qi.rq <- function(object, simpar, x, x1=NULL, y=NULL) {
-    tau <- object$tau 
-    eps <- .Machine$double.eps^(2/3)
-    data <- eval(object$call$data, environment(object$terms)) 
-    n <- nrow(data) #grab to determine optimal bandwidth for sparsity estimation 
-
-    k <- length(getcoef(object))
-    coef <- simpar[,1:k]
-    if(k < ncol(x))
-        x <- as.data.frame(x[,names(coef(object)),drop=FALSE])
-
-    ev <- coef %*% t(x)
-    qi <- list(ev=ev)
-    qi$pr <- qi$ev
-    qi.name <- list(ev=paste("Expected Quantile Values: Q(tau=",tau,"|X)"))
-
-    #Estimate sparsity to get variance estimate for fundamental uncertainty
-    #Approximates using a difference estimate
-    h <- bandwidth.rq(tau, n) #estimate optimal bandwidth for sparsity
-    if(tau+h > 1) stop("tau+h > 1. Sparsity estimate failed. Please specify a tau closer to 0.5")
-    if(tau-h < 0) stop("tau-h < 1. Sparsity estimate failed. Please specify a tau cloer  to 0.5")
-    beta_high <- rq(object$formula, data=data, tau=tau+h)$coef
-    beta_low <- rq(object$formula, data=data, tau=tau-h)$coef
-    F_diff <- x %*% (beta_high-beta_low)
-    if(any(F_diff <= 0))
-        warning(paste(sum(F_diff <= 0), "density estimates were non-positive. Predicted values will likely be non-sensical."))
-    f <- pmax(0, (2*h)/(F_diff-eps)) #Includes machine error correction as per summary.rq for nid case
-
-    #Use asymptotic approximation of Q(tau|X,beta) distribution
-    for(i in 1:nrow(ev))
-        #Asymptotic distribution as per Koenker 2005 _Quantile Regression_ p. 72
-        qi$pr[i,] <- rnorm(length(ev[i,]), mean=ev[i,], sqrt((tau*(1-tau)))/(f*sqrt(n)))
-    qi.name$pr <- paste("Predicted Quantile Values: Q(tau=",tau,"|X)")
-    if(!is.null(x1)){
-        if(k < ncol(x1))
-            x1 <- as.data.frame(x1[,names(coef(object)),drop=FALSE])
-        ev1 <- coef %*% t(x1)
-        qi$fd <- ev1-ev
-        qi.name$fd <- paste("First Differences in Expected Quantile Values: Q(tau=",tau,"|X1)-Q(tau=",tau,"|X)")
-    }
-
-    if(!is.null(y)){
-        stop("Conditional inference is not supported in rq.")
-        #yvar <- matrix(rep(y, nrow(simpar)), nrow=nrow(simpar), byrow=TRUE)
-        #qi$att.ev <- matrix(apply(yvar-qi$ev, 1, mean), nrow=(simpar))
-        #qi$att.pr <- matrix(apply(yvar-qi$pr, 1, mean), nrow=(simpar))
-        #qi.name$att.ev <- paste("Average Treatment Effect for the Treated: Y - Expected Q(tau=",tau,"|X)")
-        #qi.name$att.pr <- paste("Average Treatment Effect for the Treated: Y - Predicted Q(tau=",tau,"|X)")
-    }
-    list(qi=qi, qi.name=qi.name)
-}
diff --git a/R/qi.summarized.R b/R/qi.summarized.R
new file mode 100644
index 0000000..702ae75
--- /dev/null
+++ b/R/qi.summarized.R
@@ -0,0 +1,103 @@
+#' Constructor for QI Summarized Class
+#' This class takes an arbitrary number of the _same_ type of 
+#' quantities of interest labels them, then
+#' merges them into one simple printable block. In particular,
+#' this class determines which print function to use based on the
+#' the type and size od data to be passed to the print function.
+#' @param title a character-string specifying the title of the QI
+#' @param x a list of summarized quantities of interest
+#' @param ... additional quantities of interest (the parameter that
+#'            titles these will be used as the name of the data.frame
+#' @return the list of QI's (invisibly)
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+qi.summarize <- function (title, x, ...) {
+  qis <- append(x, list(...))
+
+  attr(qis, 'title') <- title
+
+  class(qis) <- 'qi.summarized'
+
+  for (key in names(qis)) {
+    val <- x[[key]]
+
+    if (is.matrix(val))
+      next
+
+    qis[[key]] <- matrix(val, nrow=1, ncol=length(val))
+  }
+
+  nrows <- Map(nrow, qis)
+
+  if (all(nrows == 1))
+    attr(qis, 'print') <- 'matrix'
+
+  else
+    attr(qis, 'print') <- 'list'
+
+  invisible(qis)
+}
+
+#' Print Method for Summarized Quantities of Interest
+#' @usage \method{print}{qi.summarized}(x, \dots)
+#' @S3method print qi.summarized
+#' @param x a 'summarized.qi' object
+#' @param ... parameters to be passed to the specific print functions
+#' @return x (invisibly)
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @seealso \link{special_print_MATRIX} and
+#'   \link{special_print_LIST}
+print.qi.summarized <- function (x, ...) {
+
+  if (attr(x, 'print') == 'matrix')
+    .print.qi.summarized.MATRIX(x, ...)
+
+  else if (attr(x, 'print') == 'list')
+    .print.qi.summarized.LIST(x, ...)
+
+  else
+    print(x, ...)
+}
+
+#' Method for Printing Summarized QI's in a Matrix Form
+#' @name special_print_MATRIX
+#' @aliases special_print_MATRIX .print.qi.summarized.MATRIX
+#' @note This function is used internall by Zelig
+#' @param x a 'summarized.qi' object
+#' @param ... additional parameters
+#' @return x (invisibly)
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+.print.qi.summarized.MATRIX <- function (x, ...) {
+  m <- matrix(NA, 0, 0)
+
+  for (key in names(x)) {
+    m <- .bind(m, x[[key]])
+  }
+
+  rownames(m) <- names(x)
+
+  cat(attr(x, 'title'), "\n")
+  print(m, ...)
+
+  invisible(x)
+}
+
+#' Method for Printing Summarized QI's in a List Form
+#' @name special_print_LIST
+#' @aliases special_print_LIST .print.qi.summarized.LIST
+#' @note This function is used internall by Zelig
+#' @param x a 'summarized.qi' object
+#' @param ... additional parameters to be used by the 'print.matrix' method
+#' @return x (invisibly)
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+.print.qi.summarized.LIST <- function (x, ...) {
+
+  cat(attr(x, 'title'), "\n\n")
+
+  for (key in names(x)) {
+    cat('imputed data =  ', key, "\n")
+    print(x[[key]], ...)
+    cat("\n")
+  }
+  
+  invisible(x)
+}
diff --git a/R/qi.survreg.R b/R/qi.survreg.R
deleted file mode 100644
index e91e6fe..0000000
--- a/R/qi.survreg.R
+++ /dev/null
@@ -1,112 +0,0 @@
-qi.survreg <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  model <- getzelig(object)
-  k <- length(getcoef(object))
-  sim.coef <- as.matrix(simpar[,1:k])
-  if (model %in% c("weibull", "Weibull")) {
-    if (ncol(simpar) == (k + 1)) 
-      sim.scale <- simpar[,(k+1):ncol(simpar)]
-    else
-      sim.scale <- rep(object$scale, nrow(simpar))
-  }
-  else if (model %in% c("lognorm", "tobit"))
-    sim.scale <- simpar[,(k+1):ncol(simpar)]
-  if (!is.null(y)) {
-    status <- y[,2]
-    y <- y[,1]
-  }
-  if (summary(object)$robust) {
-    x <- x[,-ncol(x), drop = FALSE]
-    if (!is.null(x1))
-      x1 <- x1[,-ncol(x1), drop = FALSE]
-  }
-  if (model %in% c("weibull", "Weibull", "lognorm", "exp"))
-    link <- survreg.distributions[[object$dist]]$itrans
-  else if (model == "tobit")
-    link <- function(x) x
-  ev.surv <- function(model, sim.coef, sim.scale, x, link) {
-    eta <- sim.coef %*% t(x)
-    theta <- as.matrix(apply(eta, 2, link))
-    if (model == "lognorm") {
-      ev <- exp(log(theta) + 0.5*(exp(sim.scale))^2)
-      dimnames(ev) <- dimnames(theta)
-    }
-    else if (model %in% c("weibull", "Weibull")) {
-      ev <- theta * gamma(1 + exp(sim.scale))
-      dimnames(ev) <- dimnames(theta)
-    }
-    else if (model %in% c("exp", "tobit")) {
-      ev <- theta
-    }
-    list(ev = as.matrix(ev), theta = as.matrix(theta))
-  }
-  pr.surv <- function(model, theta, sim.scale, ev) { 
-    if (model == "exp") 
-      pr <- rexp(length(ev), rate = 1/ev)
-    else if (model %in% c("weibull", "Weibull")) 
-      pr <- rweibull(length(ev), shape=1/exp(sim.scale),
-                         scale=theta)
-    else if (model == "lognorm") 
-      pr <- rlnorm(length(ev), meanlog = log(theta),
-                       sdlog = exp(sim.scale))
-    else if (model == "tobit") {
-      pr <- rnorm(length(ev), mean = ev, sd = exp(sim.scale))
-    }
-    pr
-  }
-  ev <- ev.surv(model, sim.coef, sim.scale, x, link)
-  pr <- matrix(NA, ncol=ncol(ev$ev), nrow=nrow(ev$ev))
-  dimnames(pr) <- dimnames(ev$ev) 
-  for (i in 1:nrow(ev$ev))
-    pr[i,] <- pr.surv(model, ev$theta[i,], sim.scale[i], ev$ev[i,])
-  qi <- list(ev = ev$ev, pr = pr)
-  qi.name <- list(ev="Expected Values: E(Y|X)",
-                  pr="Predicted Values: Y|X")
-  if (!is.null(x1)) {
-    ev1 <- ev.surv(model, sim.coef, sim.scale, x1, link)
-    qi$fd <- ev1$ev - qi$ev
-    qi.name$fd <- "First Differences: E(Y|X1)-E(Y|X)"
-  }
-  if (!is.null(y)) {
-    if (any(status == 0)) { 
-      tmp <- list(ev = ev$ev[, which(status == 0)],
-                  theta = ev$theta[, which(status == 0)])
-      y.obs <- matrix(y[status == 1], nrow = nrow(qi$ev),
-                      ncol = length(y[status == 1]), byrow = TRUE)
-      y.imp <- matrix(NA, nrow = nrow(qi$ev), ncol = length(y[status == 0]))
-      tmp.scale <- c(matrix(sim.scale, nrow = length(sim.scale),
-                            ncol = length(y[status == 0])))
-      y.imp <- matrix(pr.surv(model, tmp$theta, tmp.scale, tmp$ev),
-                      nrow = nrow(qi$ev), ncol = length(y[status == 0]))
-      y.c <- y[status == 0]
-      idx <- t(apply(y.imp, 1, '>=', y.c))
-      count <- 1
-      while ((sum(idx) < length(idx)) & count < 1001) {
-        count <- count + 1
-        tmp.idx <- which(!idx, TRUE)
-        y.imp[tmp.idx] <- pr.surv(model, tmp$theta[tmp.idx],
-                                  sim.scale[tmp.idx[,1]], tmp$ev[tmp.idx])
-        idx[tmp.idx] <- y.imp[tmp.idx] >= y.c[tmp.idx[,2]]
-      }
-      if (count == 1001) {
-        warning("    Maximum number of imputed values (1000) reached for censored Y.  \n    Using censoring point as observed value, since Pr(Y > Yc | sims) <= 0.001.")
-        y.imp[which(idx == 0, TRUE)] <- y.c[which(idx == 0, TRUE)[,2]]
-      }
-      yvar <- matrix(NA, ncol = length(y), nrow = nrow(qi$ev))
-      yvar[, which(status == 1)] <- y.obs
-      yvar[, which(status == 0)] <- y.imp
-    }
-    else
-      yvar <- matrix(y, ncol = length(y), nrow = nrow(qi$ev), byrow = TRUE)
-#    tmp.ev <- qi$tt.ev <- yvar - qi$ev
-#    tmp.pr <- qi$tt.pr <- yvar - qi$pr
-#    qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-#    qi.name$tt.pr <- "Unit Treatment Effect for the Treated: Y - PR"
-    tmp.ev <- yvar - qi$ev
-    tmp.pr <- yvar - qi$pr
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-    qi$att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = nrow(simpar))
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-    qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-  }
-  list(qi=qi, qi.name=qi.name)
-}  
diff --git a/R/qi.svyglm.R b/R/qi.svyglm.R
deleted file mode 100644
index 2adb542..0000000
--- a/R/qi.svyglm.R
+++ /dev/null
@@ -1,82 +0,0 @@
-qi.svyglm <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  check <- FALSE
-  model <- getzelig(object)
-  k <- length(getcoef(object))
-  coef <- simpar[,1:k]
-  if (k < ncol(simpar)) 
-    alpha <- simpar[,(k+1):ncol(simpar)]
-  eta <- coef %*% t(x)
-  theta <- matrix(object$family$linkinv(eta), nrow = nrow(coef))
-  pr <- ev <- matrix(NA, nrow = nrow(theta), ncol = ncol(theta))
-  dimnames(pr) <- dimnames(ev) <- dimnames(theta)
-  if (model %in% c("logit.survey", "probit.survey")) {  
-    check <- TRUE
-    ev <- theta
-    for (i in 1:ncol(theta)) 
-      pr[,i] <- as.character(rbinom(length(ev[,i]), 1, ev[,i]))
-    if (!is.null(y)) {
-      if (NCOL(y) > 1)
-        y <- y[,1]
-    }
-  }
-  else if (model == "normal.survey" ) {  
-    ev <- theta
-    for (i in 1:nrow(ev)) 
-      pr[i,] <- rnorm(length(ev[i,]), mean = ev[i,], sd = alpha[i])
-  }  
-  else if (model == "gamma.survey") {  
-    ev <- theta 
-    for (i in 1:nrow(ev))  
-      pr[i,] <- rgamma(length(ev[i,]), shape = alpha[i], scale = theta[i,]/alpha[i])
-  }
-  else if (model %in% c("poisson.survey")) {	
-    ev <- theta
-    for (i in 1:ncol(ev))
-      pr[,i] <- rpois(length(ev[,i]), lambda = ev[,i])
-  }
-  else if (model == "negbin") {  
-    ev <- theta
-    for (i in 1:nrow(ev)) 
-      pr[i,] <- rnegbin(length(ev[i,]), mu = ev[i,], theta = alpha[i])
-  }
-  qi <- list(ev = ev, pr = pr)
-  qi.name <- list(ev = "Expected Values: E(Y|X)",
-                  pr = "Predicted Values: Y|X")
-  if (!is.null(x1)){
-    ev1 <- theta1 <- matrix(object$family$linkinv(coef %*% t(as.matrix(x1))),
-                     nrow = nrow(coef))
-    qi$fd <- ev1-ev
-    qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-    if (model %in% c("logit.survey", "probit.survey")) {
-      qi$rr <- ev1/ev
-      qi.name$rr <- "Risk Ratios: P(Y=1|X1)/P(Y=1|X)"
-    }
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow = TRUE)
-    tmp.ev <- yvar - qi$ev
-    if (check)
-      tmp.pr <- yvar - as.integer(qi$pr)
-    else
-      tmp.pr <- yvar - qi$pr
-#    tmp.ev <- qi$tt.ev <- yvar - qi$ev
-#    if (check)
-#      tmp.pr <- qi$tt.pr <- yvar - as.integer(qi$pr)
-#    else
-#      tmp.pr <- qi$tt.pr <- yvar - qi$pr
-#    qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-#    qi.name$tt.pr <- "Unit Treatment Effect for the Treated: Y - PR"
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-    qi$att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = nrow(simpar))
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-    qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
-
-
-
-
-
-
diff --git a/R/qi.vglm.R b/R/qi.vglm.R
deleted file mode 100644
index bd9c895..0000000
--- a/R/qi.vglm.R
+++ /dev/null
@@ -1,202 +0,0 @@
-qi.vglm <- function (object, simpar, x, x1=NULL, y = NULL) {
-  model <- getzelig(object)
-  cm <- object at constraints
-  if (model=="mlogit")
-    ndim <- (ncol(object at y)-1)
-  else if (model=="blogit" || model=="bprobit")
-    ndim <- 3
-  v <- rep(list(NULL), ndim)
-  for(i in 1:length(cm)) {
-    if(ncol(cm[[i]])==1){
-      for(j in 1:ndim)
-        if(sum(cm[[i]][j,])==1)
-          v[[j]] <- c(v[[j]], names(cm)[i])
-    }
-    else {
-      for (j in 1:ndim)
-        if (sum(cm[[i]][j,])==1)
-          v[[j]] <- c(v[[j]], paste(names(cm)[i], ":", j, sep=""))
-    }
-  }
-  all.coef <- NULL
-  for(i in 1:ndim)
-    all.coef <- c(all.coef, list(simpar[,v[[i]]]))
-  if (model=="mlogit"){
-    if(is.null(colnames(object at y)))
-      cnames <- ynames <- seq(1,ndim+1,1)
-    else
-      cnames <- ynames <- colnames(object at y)
-    for(i in 1:(ndim+1))
-      cnames[i] <- paste("Pr(Y=", ynames[i],")",sep="")
-  }
-  else if(model=="blogit" || model=="bprobit") 
-    cnames <- c("Pr(Y1=0, Y2=0)", "Pr(Y1=0, Y2=1)",
-                "Pr(Y1=1, Y2=0)", "Pr(Y1=1, Y2=1)")
-  else
-    stop(paste(model, "is not supported"))
-  pp.vglm <- function(object, cm, all.coef, x, ndim, cnames){
-    xm <- rep(list(NULL), ndim)
-    for (i in 1:length(cm))
-      for (j in 1:length(xm))
-        if (sum(cm[[i]][j,]) == 1)
-          xm[[j]] <- c(xm[[j]], x[,names(cm)[i]])
-    sim.eta <- NULL
-    for (i in 1:ndim)
-      sim.eta <- cbind(sim.eta, all.coef[[i]] %*% as.matrix(xm[[i]]))
-    ev <- object at family@linkinv(sim.eta)
-    colnames(ev) <- cnames
-    ev
-  }
-  pr.vglm <- function(object, ev, ynames) { # To assign predicted values.
-    model <- getzelig(object)
-    if (model == "mlogit") {
-      k <- ncol(ev)
-      Ipr <- sim.cut <- matrix(NA, nrow = nrow(ev), ncol = ncol(ev))
-      colnames(Ipr) <- colnames(sim.cut) <- colnames(ev)
-      sim.cut[,1] <- ev[,1]
-      for (j in 2:k) 
-        sim.cut[,j] <- sim.cut[,(j-1)] + ev[,j]
-      tmp <- runif(nrow(ev), 0, 1)
-      for (l in 1:k) 
-        Ipr[,l] <- tmp > sim.cut[,l]
-      pr <- NULL
-      for (m in 1:nrow(Ipr))
-        pr[m] <- 1 + sum(Ipr[m,])
-      pr <- factor(pr, levels = sort(unique(pr)), labels = ynames)
-      if (model == "mlogit")
-        pr <- factor(pr, ordered = FALSE)
-      pr <- matrix(pr, nrow = dim(ev)[1])
-    }
-    else if (model == "blogit" || model == "bprobit") {
-      mpr <- cbind((ev[,3] + ev[,4]), (ev[,2] + ev[,4]))
-      index <- matrix(NA, ncol = 2, nrow = nrow(mpr))
-      for (i in 1:2)
-        index[,i] <- rbinom(length(mpr[,i]), 1, mpr[,i])
-      pr <- matrix(NA, nrow(simpar), 4)
-      pr[,1] <- as.integer(index[,1] == 0 & index[,2] == 0)
-      pr[,2] <- as.integer(index[,1] == 0 & index[,2] == 1)
-      pr[,3] <- as.integer(index[,1] == 1 & index[,2] == 0)
-      pr[,4] <- as.integer(index[,1] == 1 & index[,2] == 1)
-      colnames(pr) <- c("(Y1=0, Y2=0)", "(Y1=0, Y2=1)", "(Y1=1, Y2=0)",
-                        "(Y1=1, Y2=1)")
-    }
-    pr
-  }
-  if (nrow(x) == 1) {
-    ev <- pp.vglm(object, cm, all.coef, x, ndim, cnames)
-    pr <- pr.vglm(object, ev, ynames)
-  }
-  else {
-    ev <- array(dim = c(nrow(simpar), ndim+1, nrow(x)))
-    if (model == "mlogit")
-      pr <- matrix(nrow=nrow(simpar), ncol=nrow(x))
-    else if (model == "blogit" || model == "bprobit")
-      pr <- array(dim = c(nrow(simpar), 4, nrow(x)))
-    for (i in 1:nrow(x)){
-      tmp <- matrix(x[i,], nrow=1)
-      colnames(tmp) <- colnames(x)
-      rownames(tmp) <- rownames(x)[i]
-      tmp.ev <- pp.vglm(object, cm, all.coef, tmp, ndim, cnames)
-      tmp.pr <- pr.vglm(object, tmp.ev, ynames)
-      ev[,,i] <- tmp.ev
-      if (model == "blogit" || model == "bprobit")
-        pr[,,i] <- tmp.pr
-      else if (model == "mlogit")
-        pr[,i] <- tmp.pr
-    }
-    dimnames(ev) <- list(rownames(tmp.ev), colnames(tmp.ev), NULL)
-    if (model == "blogit" || model == "bprobit")
-      dimnames(pr) <- list(rownames(tmp.pr), colnames(tmp.pr), NULL)
-    else if (model == "mlogit")
-      dimnames(pr) <- list(c(1:nrow(simpar)), NULL)
-  }
-  if (model=="mlogit") {
-    qi <- list(ev=ev, pr=pr)
-    qi.name <- list(ev="Predicted Probabilities: Pr(Y=k|X)",
-                    pr="Predicted Values: Y=k|X")
-  }
-  else if (model=="blogit" || model=="bprobit") {
-    qi <- list(ev=ev, pr=pr)
-    qi.name <- list(ev="Predicted Probabilities: Pr(Y1=k,Y2=l|X)",
-                    pr="Predicted Values: (Y1,Y2)|X")
-  }
-  if (!is.null(x1)) {
-    if (nrow(x1) == 1)
-      ev1 <- pp.vglm(object, cm, all.coef, x1, ndim, cnames)
-    else {
-      ev1 <- array(dim = c(nrow(simpar), ndim+1, nrow(x1)))
-      for (i in 1:nrow(x1)) {
-        tmp <- matrix(x1[i,], nrow=1)
-        colnames(tmp) <- colnames(x1)
-        rownames(tmp) <- rownames(x1)[i]
-        tmp <- pp.vglm(object, cm, all.coef, tmp, ndim, cnames)
-        ev1[,,i] <- tmp
-      }
-      dimnames(ev1) <- list(rownames(tmp), colnames(tmp), NULL)
-    }
-    qi$fd <- ev1 - ev
-    if (model=="mlogit") {
-      qi$rr <- ev1 / ev
-      qi.name$fd <- "First Differences: Pr(Y=k|X1) - Pr(Y=k|X)"
-      qi.name$rr <- "Risk Ratios: Pr(Y=k|X1) / Pr(Y=k|X)"
-    }
-    else if (model=="blogit" || model=="bprobit") {
-      qi$rr <- ev1/ev
-      qi.name$fd <- "First Differences: Pr(Y1=k,Y2=l|X1) - Pr(Y1=k,Y2=l|X)"
-      qi.name$rr <- "Risk Ratios: Pr(Y1=k,Y2=l|X1) / Pr(Y1=k,Y2=l|X)"
-    }
-  }
-  if (!is.null(y)) {
-    tmp.ev <- tmp.pr <- array(NA, dim = dim(qi$ev))
-    qi$att.ev <- qi$att.pr <- matrix(NA, dim(qi$ev)[1], dim(qi$ev)[2])
-    if (model=="mlogit" || model=="oprobit") {
-      yvar <- matrix(NA, nrow = length(y), ncol = length(ynames))
-      pr.idx <- array(NA, dim = c(nrow(pr), length(ynames), nrow(x)))
-      for (i in 1:length(ynames)) {
-        yvar[,i] <- as.integer(y == ynames[i])
-        pr.idx[,i,] <- as.integer(pr[,i] == ynames[i])
-      }
-      colnames(qi$att.ev) <- colnames(qi$att.pr) <- ynames
-    }
-    else if (model=="blogit" || model=="bprobit") {
-      yvar <- matrix(NA, nrow = nrow(y), ncol = 4)
-      yvar[,1] <- as.integer(y[,1] == 0 & y[,2] == 0)
-      yvar[,2] <- as.integer(y[,1] == 0 & y[,2] == 1)
-      yvar[,3] <- as.integer(y[,1] == 1 & y[,2] == 0)
-      yvar[,4] <- as.integer(y[,1] == 1 & y[,2] == 1)
-      pr.idx <- array(NA, dim = c(nrow(pr), 4, nrow(x)))
-      for (i in 1:4)
-        pr.idx[,i,] <- as.integer(pr[,i,])
-      colnames(qi$att.ev) <- colnames(qi$att.pr) <-
-        c("(Y1=0, Y2=0)", "(Y1=0, Y2=1)",
-          "(Y1=1, Y2=0)", "(Y1=1, Y2=1)")
-    }
-    for (j in 1:ncol(yvar)) {
-      for (i in 1:nrow(simpar)) {
-        tmp.ev[i,j,] <- yvar[,j] - qi$ev[i,j,]
-        tmp.pr[i,j,] <- yvar[,j] - pr.idx[i,j,]
-      }
-      qi$att.ev[,j] <- apply(tmp.ev[,j,], 1, mean)
-      qi$att.pr[,j] <- apply(tmp.pr[,j,], 1, mean)
-    }
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-    qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/R/qi.zmlm.R b/R/qi.zmlm.R
deleted file mode 100644
index cad3fdc..0000000
--- a/R/qi.zmlm.R
+++ /dev/null
@@ -1,71 +0,0 @@
-qi.zmlm <- function(object, simpar, x, x1 = NULL, y = NULL) {
-  
-  k <- length(getcoef(object))
-  coef <- simpar[,1:k]
-  alpha <- simpar[,(k+1):ncol(simpar)]
-  if(length(dim(coef(object))))
-    rw <- rownames(coef(object))
-  else
-    rw <- names(coef(object))
-  
-  
-  if (k < ncol(x))
-    x <- as.data.frame(x[,rw,drop = FALSE])
-  dx <- dim(x)[[2]]
-  ##return a list to obtain expected values for each element
-  ##that stands for each depVar in mlm or maov
-  
-  coeflist <- split.simpar(coef, dx)     
-  ev <- matrix(,nrow(simpar), length(coeflist))
-  colnames(ev) <- names(coeflist)
-  for(n in 1:ncol(ev))
-    ev[,n] <- coeflist[[n]] %*% t(x)
-  qi <- list(ev=ev)
-  qi.name <- list(ev="Expected Values: E(Y|X)")
-  if(!is.null(x1)){
-    if (k < ncol(x1))
-      x1 <- as.data.frame(x1[,rw,drop=FALSE])
-    dx1 <- dim(x1)[[2]]
-    coeflist <- split.simpar(coef, dx1)     
-    ev1 <- matrix(,nrow(simpar), length(coeflist))
-    colnames(ev1) <- names(coeflist)
-    for(n in 1:ncol(ev1))
-      ev1[,n] <- coeflist[[n]] %*% t(x1)
-    
-    qi$fd <- ev1-ev
-    qi.name$fd <-
-      "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(simpar)), nrow = nrow(simpar), byrow = TRUE)
-    #tmp.ev <- qi$tt.ev <- apply(as.matrix(qi$ev), 2, function(m)yvar - m)
-    tmp.ev <- apply(as.matrix(qi$ev), 2, function(m)yvar - m)
-    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(simpar))
-    #qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
-    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
-
-
-split.simpar <- function(coef, dx){
-  ix <- grep("Intercept", colnames(coef))
-  nmall <- colnames(coef)[ix]
-  vecnm <- NULL
-  for(nm in nmall)
-    vecnm <- c(vecnm, sub("(.*):(.*)", "\\1",nm))
-  
-  dc <- dim(coef)[[2]]
-  nt <- dc/dx
-  lst <- list()
-  ptf <- 0
-  for(n in 1:nt){
-    pti <- ptf + 1 
-    ptf <- ifelse(n<= 1, dx, dx +ptf) 
-    pc <- coef[,pti:ptf]
-    lst <- c(lst, list(pc))
-  }
-  names(lst) <- vecnm
-  return(lst)
-}
diff --git a/R/relogit.R b/R/relogit.R
index 6052c42..78c8b7e 100644
--- a/R/relogit.R
+++ b/R/relogit.R
@@ -1,5 +1,22 @@
-relogit <- function(formula, data = sys.parent(), tau = NULL,
-                    bias.correct = TRUE, case.control = "prior", ...){
+#' Fit a rare-event logistic model in Zelig
+#' 
+#' Fits a rare-event (``relogit'') model.
+#' @param formula a formula object
+#' @param data ...
+#' @param tau ...
+#' @param bias.correct ...
+#' @param case.control ...
+#' @param ... ???
+#' @return a ``relogit'' ``glm'' object
+#' @export
+relogit <- function(
+                    formula,
+                    data = sys.parent(),
+                    tau = NULL,
+                    bias.correct = TRUE,
+                    case.control = "prior",
+                    ...
+                    ){
   mf <- match.call()
   mf$tau <- mf$bias.correct <- mf$case.control <- NULL
   if (!is.null(tau)) {
@@ -9,7 +26,8 @@ relogit <- function(formula, data = sys.parent(), tau = NULL,
     ck1 <- grep("p", case.control)
     ck2 <- grep("w", case.control)
     if (length(ck1) == 0 & length(ck2) == 0)
-      stop("choose either case.control = \"prior\" or case.control = \"weighting\"")
+      stop("choose either case.control = \"prior\" ",
+           "or case.control = \"weighting\"")
     if (length(ck2) == 0)
       weighting <- FALSE
     else 
@@ -26,7 +44,8 @@ relogit <- function(formula, data = sys.parent(), tau = NULL,
     res$lower.estimate <- eval(as.call(mf))
     mf$tau <- max(tau)
     res$upper.estimate <- eval(as.call(mf))
-    class(res) <- c("relogit2", "relogit")
+    res$formula <- formula
+    class(res) <- c("Relogit2", "Relogit")
     return(res)
   }
   else {
@@ -90,9 +109,271 @@ relogit <- function(formula, data = sys.parent(), tau = NULL,
 
     res$linear.predictors <- t(res$coefficients) %*% t(X) 
     res$fitted.values <- 1/(1+exp(-res$linear.predictors))
-    res$zelig <- "relogit"
-    class(res) <- c("relogit", "glm")
+    res$zelig <- "Relogit"
+    class(res) <- c("Relogit", "glm")
     return(res)
   }
 }
 
+#' Zelig2 bridge function
+#'
+#' ...
+#' @note  T
+#' @param formula a formula object
+#' @param ... ignored parameters
+#' @param tau ...
+#' @param bias.correct ...
+#' @param case.control ...
+  #' @param data a data.frame that will be used to fit the model
+#' @return a list used internally by zelig
+#' @export
+zelig2relogit <- function(
+                          formula,
+                          ...,
+                          tau = NULL,
+                          bias.correct = NULL,
+                          case.control = NULL,
+                          data
+                          ) {
+
+  # Catch NULL case.control
+  if (is.null(case.control))
+    case.control <- "prior"
+
+  # Catch NULL bias.correct
+  if (is.null(bias.correct))
+    bias.correct = TRUE
+
+  # Construct formula. Relogit models have the structure:
+  #   cbind(y, 1-y) ~ x1 + x2 + x3 + ... + xN
+  # Where y is the response.
+  form <- update(formula, cbind(., 1 - .) ~ .)
+
+  # Set the environment to be this function's
+  environment(form) <- environment()
+
+  # Return the obvious answer
+  z(
+    .function = relogit,
+    formula = form,
+    bias.correct = bias.correct,
+    case.control = case.control,
+    tau = tau,
+    data = data
+    )
+}
+#' Estimate Parameters for the ``relogit'' Zelig Mdoel
+#'
+#' Returns estimates on parameters, as well as, specifying link and
+#' inverse-link functions.
+#' @note This method merely calls ``param.logit''.
+#' @usage \method{param}{relogit}(obj, num, ...)
+#' @S3method param relogit
+#' @param obj a zelig object containing the fitted model
+#' @param num an integer specifying the number of simulations to compute
+#' @param ... unspecified parameters
+#' @return a list specifying important parameters for the ``relogit'' model
+param.relogit <- param.logit
+
+
+#' Estimate Parameters for the ``relogit'' Zelig Mdoel
+#'
+#' Returns estimates on parameters, as well as, specifying link and inverse-link
+#' functions.
+#' @usage \method{param}{relogit2}(obj, num, x, ...)
+#' @S3method param relogit2
+#' @param obj a zelig object containing the fitted model
+#' @param num an integer specifying the number of simulations to compute
+#' @param x ideally we should be able to remove this parameter
+#' @param ... unspecified parameters
+#' @return a list specifying important parameters for the ``relogit'' model
+param.relogit2 <- function (obj, num, x, ...) {
+  object <- obj
+  stop("Currently zelig does not support relogit models containing 2 ",
+       "tau parameters")
+
+  pping <- function(tmp0, tmp1, num, bootstrap, x) {
+
+    par0 <- param.relogit(tmp0, num=num, x=x, bootstrap=bootstrap)
+    par1 <- param.relogit(tmp1, num=num, x=x, bootstrap=bootstrap)
+
+    P00 <- qi.relogit(tmp0, par0, x=x)
+
+    P00 <- as.matrix(qi.relogit(tmp0, param = par0, x=x)$qi$ev)
+    message("P01")
+    P10 <- as.matrix(qi.relogit(tmp1, param = par1, x=x)$qi$ev)
+
+    test <- P00[,1] < P10[,1]
+    par0 <- as.matrix(par0[test,])
+    par1 <- as.matrix(par1[test,])
+    list(par0 = par0, par1 = par1)
+  }
+  tmp0 <- tmp1 <- object
+
+  tmp0$result <- object$result$lower.estimate
+  tmp1$result <- object$result$upper.estimate
+
+  tmp <- pping(tmp0, tmp1, num = num, bootstrap=bootstrap, x=x)
+
+  par0 <- tmp$par0
+  par1 <- tmp$par1
+
+
+  while (nrow(par0) < num) {
+    tmp <- pping(tmp0, tmp1, num=num, bootstrap=bootstrap, x=x)
+    par0 <- rbind(par0, tmp$par0)
+    par1 <- rbind(par1, tmp$par1)
+  }
+  if (nrow(par0) > num) {
+    par0 <- par0[1:num,]
+    par1 <- par1[1:num,]
+  }
+  par0 <- as.matrix(par0)
+  par1 <- as.matrix(par1)
+  rownames(par0) <- 1:nrow(par0)
+  rownames(par1) <- 1:nrow(par1)
+  return(list(par0 = par0, par1 = par1))    
+}
+#' simulate quantities of interest for the zelig ``relogit'' model
+#'
+#' ...
+#' @usage
+#' \method{qi}{relogit}(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
+#' @S3method qi relogit
+#' @param obj a zelig object, containing the fitted ``relogit'' model
+#' @param x a ``setx'' object
+#' @param x1 a ``setx'' object
+#' @param y this parameter is reserved for simulating average treatment effects,
+#' though this feature is currentlysupported by only a handful of models
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a ``parameter'' obejct containing information about the link,
+#' inverse-link, and simulated parameters
+#' @return a param
+qi.relogit <- qi.logit
+
+
+#' simulate quantities of interest for the zelig ``relogit'' model
+#'
+#' ...
+#' @usage
+#' \method{qi}{relogit2}(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
+#' @S3method qi relogit2
+#' @param obj a zelig object, containing the fitted ``relogit'' model
+#' @param x a ``setx'' object
+#' @param x1 a ``setx'' object
+#' @param y this parameter is reserved for simulating average treatment effects,
+#' though this feature is currentlysupported by only a handful of models
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a ``parameter'' obejct containing information about the link,
+#' inverse-link, and simulated parameters
+#' @return a param
+qi.relogit2 <- function (obj, x = NULL, x1 = NULL, y = NULL, num=1000, param = NULL) {
+  simpar <- param
+  # Aliased, because
+  object <- obj
+
+  # This model needs work, so it will be discontinued for now
+  stop("Relogit 2 is not currently supported")
+
+  num <- nrow(simpar$par0)
+  tmp0 <- object$result$lower.estimate
+  tmp1 <- object$result$upper.estimate
+  
+  low <- qi.relogit(tmp0, simpar$par0, x, x1)
+  up <- qi.relogit(tmp1, simpar$par1, x, x1)
+
+  PP <- PR <- array(NA, dim = c(num, 2, nrow(x)),
+                    dimnames = list(NULL, c("Lower Bound", "Upper Bound"),
+                      rownames(x)))
+  PP[,1,] <- P00 <- low$qi$ev
+  PP[,2,] <- P10 <- up$qi$ev
+  qi <- list(ev = PP)
+  qi.name <- list(ev = "Expected Values: E(Y|X)")
+  if (!is.null(x1)) {
+    FD <- RR <- array(NA, dim = c(num, 2, nrow(x)),
+                      dimnames = list(NULL,
+                                      d2 = c("Lower Bound", "Upper Bound"), 
+                                      rownames(x)
+                      ))
+
+    sim01 <- qi.relogit(tmp0, simpar$par0, x = x1, x1 = NULL)
+    sim11 <- qi.relogit(tmp1, simpar$par1, x = x1, x1 = NULL)
+    tau0 <- object$result$lower.estimate$tau
+    tau1 <- object$result$upper.estimate$tau
+    P01 <- as.matrix(sim01$qi$ev)
+    P11 <- as.matrix(sim11$qi$ev)
+    OR <- (P10/(1-P10)) / (P00/(1-P00))
+    RR[,1,] <- pmin(as.matrix(P01/P00), as.matrix(P11/P10))
+    RR[,2,] <- pmax(as.matrix(P01/P00), as.matrix(P11/P10))
+    RD0 <- as.matrix(P01-P00)
+    RD1 <- as.matrix(P11-P10)
+    RD <- as.matrix((sqrt(OR)-1) / (sqrt(OR)+1))
+    ## checking monotonicity
+    y.bar <- mean(object$y)
+    beta0.e <- coef(tmp0)
+    beta1.e <- coef(tmp1)
+    ## evaluating RD at tau0 and tau1
+    RD0.p <- 1/(1+exp(-t(beta0.e) %*% t(x1))) - 1/(1+exp(-t(beta0.e) %*% t(x)))
+    RD1.p <- 1/(1+exp(-t(beta1.e) %*% t(x1))) - 1/(1+exp(-t(beta1.e) %*% t(x)))
+    ## evaluating RD at tau0+e and tau1+e
+    e <- 0.001
+    beta0.e["(Intercept)"] <- beta0.e["(Intercept)"]+log(1-tau0)-log(tau0) -
+      log(1-tau0-0.001)+log(tau0+0.001)
+    beta1.e["(Intercept)"] <- beta1.e["(Intercept)"]+log(1-tau1)-log(tau1) -
+      log(1-tau1-e)+log(tau1+e)
+    RD0.e <- 1/(1+exp(-t(beta0.e) %*% t(x1))) - 1/(1+exp(-t(beta0.e) %*% t(x)))
+    RD1.e <- 1/(1+exp(-t(beta1.e) %*% t(x1))) - 1/(1+exp(-t(beta1.e) %*% t(x)))
+    ## checking the sign and computing the bounds
+    check <- sum((RD1.e-RD1.p) * (RD0.e-RD0.p))
+    if (check > 0) {
+      FD[,1,] <- pmin(RD0, RD1)
+      FD[,2,] <- pmax(RD0, RD1)
+    }
+    else {
+      FD[,1,] <- pmin(RD0, RD1, RD)
+      FD[,2,] <- pmax(RD0, RD1, RD)
+    }
+    qi$fd <- FD
+    qi$rr <- RR
+    qi.name$fd <- "First Differences: P(Y=1|X1) - P(Y=1|X)"
+    qi.name$rr <- "Risk Ratios: P(Y=1|X1) / P(Y=1|X)"
+  }
+  if (!is.null(y)) {
+    yvar <- matrix(rep(y, num), nrow = num, byrow = TRUE)
+#      tmp.ev <- qi$tt.ev <- yvar - qi$ev
+#      tmp.pr <- qi$tt.pr <- yvar - as.integer(qi$pr)
+#      qi.name$tt.ev <- "Unit Treatment Effect for the Treated: Y - EV"
+#      qi.name$tt.pr <- "Unit Treatment Effect for the Treated: Y - PR"
+    tmp.ev <- yvar - qi$ev
+    tmp.pr <- yvar - as.integer(qi$pr)
+    qi$att.ev <- matrix(apply(tmp.ev, 1, mean), nrow = num)
+    qi$att.pr <- matrix(apply(tmp.pr, 1, mean), nrow = num)
+    qi.name$att.ev <- "Average Treatment Effect for the Treated: Y - EV"
+    qi.name$att.pr <- "Average Treatment Effect for the Treated: Y - PR"
+  }
+  return(list(qi = qi, qi.name = qi.name))
+}
+
+#' Describe a `logit' model to Zelig
+#' @usage \method{describe}{relogit}(...)
+#' @S3method describe relogit
+#' @param ... ignored parameters
+#' @return a list to be processed by `as.description'
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+describe.relogit <- function(...) {
+  # return list
+  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
+       year     = 2007,
+       category = "dichotomous",
+       text = "Rare Events Logistic Regression for Dichotomous Dependent Variables"
+       )
+}
+
+# Return Names of Relogit Model
+#
+names.Relogit <- function(x){
+  res <- list(default=names(unclass(x)),
+            estimate = names(x$lower.estimate), tau = x$tau)
+  class(res) <- "names.relogit"
+  res
+}
diff --git a/R/repl.R b/R/repl.R
index 4d083a4..d2da35c 100644
--- a/R/repl.R
+++ b/R/repl.R
@@ -1,2 +1,81 @@
-repl <- function(object, data, ...)
+#' Generic Method for Replicating Data
+#' @param object a 'zelig' object
+#' @param ... parameters
+#' @return a replicated object
+#' @export
+#' @author Kosuke Imai and Olivia Lau \email{mowen@@iq.harvard.edu}
+repl <- function(object, ...)
   UseMethod("repl")
+#' Default Method for Replicating Statistics
+#'
+#' Replicate a simulation
+#' @usage \method{repl}{default}(object, data=NULL, ...)
+#' @S3method repl default
+#' @param object an object to replicate
+#' @param data a data.frame
+#' @param ... ignored parameters
+#' @return a replicated object
+#' @author Kosuke Imai and Olivia Lau \email{mowen@@iq.harvard.edu}
+repl.default <- function(object, data=NULL, ...) {
+  if (!is.null(data))
+    obectj$call$data <- data
+
+  eval(object$call$data, sys.parent())
+}
+#' Method for Replicating Simulated Quantities of Interest
+#'
+#' Replicate simulated quantities of interest
+#' @usage \method{repl}{sim}(object, x=NULL, x1=NULL, y=NULL,
+#'                     num=1000,
+#'                     prev = NULL, bootstrap = FALSE,
+#'                     boot.fn=NULL,
+#'                     cond.data = NULL, ...)
+#' @S3method repl sim
+#' @param object a 'zelig' object
+#' @param x a 'setx' object
+#' @param x1 a secondary 'setx' object used to perform particular computations
+#'   of quantities of interest
+#' @param y a parameter reserved for the computation of particular quantities of
+#'   interest (average treatment effects). Few models currently support this
+#'   parameter
+#' @param num an integer specifying the number of simulations to compute
+#' @param prev ignored
+#' @param bootstrap ignored
+#' @param boot.fn ignored
+#' @param cond.data ignored
+#' @param ... special parameters which are reserved for future versions of Zelig
+#' @return a 'sim' object storing the replicated quantities of interest
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+repl.sim <- function(object, x=NULL, x1=NULL, y=NULL,
+                     num=1000,
+                     prev = NULL, bootstrap = FALSE,
+                     boot.fn=NULL,
+                     cond.data = NULL, ...) {
+  # would rather use a factory function
+  new.call <- object$call
+
+
+  # this should always give the same value...
+  rep.zelig <- eval(object$zcall, sys.parent())
+
+  # 
+  new.call$z <- rep.zelig
+
+  # x
+  new.call$x <- if (is.null(x))
+    object$x
+  else
+    x
+
+  # x1
+  new.call$x1 <- if (is.null(x1))
+    object$x1
+  else
+    x1
+
+  # how is this EVER true?
+  if (!is.null(object$seed))
+    set.seed(object$seed)
+
+  eval(new.call, sys.parent())
+}
diff --git a/R/repl.default.R b/R/repl.default.R
deleted file mode 100644
index 74dd4e1..0000000
--- a/R/repl.default.R
+++ /dev/null
@@ -1,5 +0,0 @@
-repl.default <- function(object, data=NULL, ...) {
-  if (!is.null(data))
-    object$call$data <- data
-  eval(object$call, sys.parent())
-}
diff --git a/R/repl.zelig.R b/R/repl.zelig.R
deleted file mode 100644
index d8a5536..0000000
--- a/R/repl.zelig.R
+++ /dev/null
@@ -1,21 +0,0 @@
-repl.zelig <- function(object, data=NULL, prev = NULL, x=NULL,
-  x1=NULL, bootfn=NULL, ...) {
-  rep.zelig <- eval(object$zelig.call, sys.parent())
-  object$call$object <- rep.zelig
-  if (is.null(x))
-    object$call$x <- object$x
-  else
-    object$call$x <- x
-  if (is.null(x1))
-    object$call$x1 <- object$x1
-  else
-    object$call$x1
-  if (!is.null(prev))
-    object$call$prev <- prev
-  if (!is.null(object$seed)) set.seed(object$seed)
-  eval(object$call, sys.parent())
-}
-
-
-
-
diff --git a/R/robust.glm.hook.R b/R/robust.glm.hook.R
new file mode 100644
index 0000000..93f2ef5
--- /dev/null
+++ b/R/robust.glm.hook.R
@@ -0,0 +1,32 @@
+#' Hook for ``glm'' Models in Zelig
+#'
+#' Adds support for robust error-estimates in the Zelig ``glm'' models.
+#' @param obj a zelig object
+#' @param zcall the original call to the zelig model
+#' @param call the call that will be evaluated for the 
+#' @param robust a logical specifying whether or not to use robust error
+#' estimates
+#' @param ... ignored parameters
+#' @return the fitted model object
+#' @export
+robust.glm.hook <- function (obj, zcall, call, robust = FALSE, ...) {
+
+  # If "robust" is a list, 
+  if (is.list(robust)) {
+
+    # if none of the entries of robust belong to the vector below
+    if (!any(robust$method %in% c("vcovHAC", "kernHAC", "weave")))
+      stop("robust contains elements that are not supported.")
+
+    # Acquire the value of the robust parameter
+    obj$robust <- robust
+  }
+  else if (!is.logical(robust))
+    stop("Invalid input for robust: choose either TRUE or a list of options.")
+
+  # Set as a robust generalized linear model model (in addition to other types)
+  class(obj) <- c("glm.robust", class(obj))
+
+  # Return...
+  obj
+}
diff --git a/R/robust.hook.R b/R/robust.hook.R
new file mode 100644
index 0000000..6862564
--- /dev/null
+++ b/R/robust.hook.R
@@ -0,0 +1,20 @@
+#' @export
+robust.gee.hook <- function(obj, Zall, Call, robust, ...) {
+  
+  # Assume robust, if nothing is specified
+  if (missing(robust) || is.null(robust))
+    robust <- TRUE
+
+  # Invalid robust parameters should stop program
+  if (!is.logical(robust))
+    stop("robust must be a logical (TRUE or FALSE)")
+
+  if (robust)
+    class(obj) <- c("gee.robust", class(obj))
+
+  else
+    class(obj) <- c("gee.naive", class(obj))
+
+  #
+  obj
+}
diff --git a/R/rocplot.R b/R/rocplot.R
deleted file mode 100644
index 0952a61..0000000
--- a/R/rocplot.R
+++ /dev/null
@@ -1,43 +0,0 @@
-rocplot <- function(y1, y2, fitted1, fitted2,
-                    cutoff = seq(from=0, to=1, length=100), lty1="solid",
-                    lty2="dashed", lwd1=par("lwd"), lwd2=par("lwd"),
-                    col1=par("col"), col2=par("col"), main="ROC Curve",
-                    xlab = "Proportion of 1's Correctly Predicted",
-                    ylab="Proportion of 0's Correctly Predicted", plot = TRUE, ...) {
-  roc1 <- roc2 <- matrix(NA, nrow = length(cutoff), ncol = 2)
-  colnames(roc1) <- colnames(roc2) <- c("ones", "zeros")
-  for (i in 1:length(cutoff)) {
-    roc1[i,1] <- mean(fitted1[y1==1] >= cutoff[i]) 
-    roc2[i,1] <- mean(fitted2[y2==1] >= cutoff[i])
-    roc1[i,2] <- mean(fitted1[y1==0] < cutoff[i])
-    roc2[i,2] <- mean(fitted2[y2==0] < cutoff[i])
-  }
-  if (plot) {
-    plot(0:1, 0:1, type = "n", xaxs = "i", yaxs = "i",
-         main=main, xlab=xlab, ylab=ylab, ...)
-    lines(roc1, lty = lty1, lwd = lwd1, col=col1)
-    lines(roc2, lty = lty2, lwd = lwd2, col=col2)
-    abline(1, -1, lty = "dotted")
-  }
-  else {
-    area1 <- area2 <- array()
-    for (i in 2:length(cutoff)) {
-      area1[i-1] <- (roc1[i,2] - roc1[(i-1),2]) * roc1[i,1] 
-      area2[i-1] <- (roc2[i,2] - roc2[(i-1),2]) * roc2[i,1] 
-    }
-    return(list(roc1 = roc1, 
-                roc2 = roc2,
-                area1 = sum(na.omit(area1)),
-                area2 = sum(na.omit(area2))))
-  }
-}
-
-
-
-
-
-
-
-
-
-
diff --git a/R/set.start.R b/R/set.start.R
deleted file mode 100644
index ff4c721..0000000
--- a/R/set.start.R
+++ /dev/null
@@ -1,15 +0,0 @@
-set.start <- function(start.val = NULL, terms) {
-  if (any(class(terms) == "multiple")) 
-    labs <- make.parameters(terms = terms, shape = "vector", ancillary = TRUE)
-  else
-    labs <- attr(terms, "term.labels")
-  if (is.null(start.val))
-    start.val <- rep(0, length(labs))
-  else {
-    if (length(start.val) != length(labs))
-      stop(paste("length of 'start.val' does not equal number of model parameters = ",
-                 length(labs), ".", sep = ""))
-  }
-  names(start.val) <- labs
-  start.val
-}
diff --git a/R/setx.Arima.R b/R/setx.Arima.R
deleted file mode 100644
index d4363cf..0000000
--- a/R/setx.Arima.R
+++ /dev/null
@@ -1,54 +0,0 @@
-setx.Arima <- function(object, cond=FALSE, data=NULL, counter=NULL,
-                      pred.ahead=0, ...){
-  if (!is.null(counter)) 
-    warning("counter ignored in ARIMA models.")
-  t.effect <-cond 
-  mc <- match.call()
-  env <- attr(object$terms, ".Environment")
-  if (is.null(data)){
-    data <- eval(getcall(object)$data, envir=env)
-  }
-  if(!is.null(data)){
-   data <- as.data.frame(data)
- }
-  trew <- na.omit(pmatch(names(mc), colnames(data), duplicates.ok=TRUE))
-  wert <- na.omit(pmatch(colnames(data), names(mc), duplicates.ok=TRUE))
-  wert <- sort(wert)
-  if (length(trew>0)){
-    if (pred.ahead!=0){
-      warning("'pred.ahead' available only for prediction models\nwith no external regressors\npred.ahead being set to zero\n")
-      pred.ahead <- 0 
-    }
-    test.time <- unlist(lapply(1:length(wert),
-                               function(i, mc) eval(mc[[wert[i]]]$time), mc=mc))
-    max.time <- max(test.time)
-    min.time <- min(test.time)
-    if (max.time > length(object$residuals)){
-      data.new <- matrix(as.matrix(data[nrow(data),]), ncol=ncol(data),
-                         nrow=(max.time - length(object$residuals)), byrow=TRUE)
-      colnames(data.new) <- colnames(data)
-      dta <- as.matrix(rbind(data, data.new))
-      rownames(dta) <- 1:nrow(dta)
-    } 
-    else { dta  <- data[1:(max.time),] }
-    for (i in 1:length(trew)){
-      dta[eval(mc[[wert[i]]]$time, envir=env), trew[i]] <-
-        eval(mc[[wert[i]]]$value, envir=env)
-    }
-    dta <- as.matrix(dta[,na.omit(pmatch(names(object$coef), colnames(data)))])
-  }
-  if (length(trew)==0) {
-    data.new <- matrix(as.matrix(data[nrow(data),]), ncol=ncol(data),
-                       nrow=pred.ahead, byrow=TRUE)
-    colnames(data.new) <- colnames(data)
-    dta <- as.matrix(rbind(data, data.new))
-    dta <- as.matrix(dta[, na.omit(pmatch(names(object$coef), colnames(data)))])
-    rownames(dta) <- 1:nrow(dta)
-    min.time <- length(object$residuals)
-    max.time <- length(object$residuals)
-  }
-  out <- list(dta=dta, min.time=min.time, max.time=max.time,
-              pred.ahead=pred.ahead, t.effect = cond)
-  class(out) <- "setxArima"
-  out
-}
diff --git a/R/setx.MI.R b/R/setx.MI.R
deleted file mode 100644
index 81c1a81..0000000
--- a/R/setx.MI.R
+++ /dev/null
@@ -1,43 +0,0 @@
-setx.MI <- function(object, fn = list(numeric = mean, ordered =
-                              median, other = mode), data = NULL,
-                    cond = FALSE, counter = NULL, ...) {
-  M <- length(object)
-  dta <- NULL
-  obj <- object[[1]]
-  mf <- match.call()
-  if (!cond) {# unconditional predition
-    tt.attr <- attributes(terms(obj))
-    for (i in 1:M) {
-      if(is.null(data)) {
-        #tmp <- as.data.frame(eval(getcall(obj)$data,
-        #                          sys.parent())[[i]])
-        tmp <- as.data.frame(eval(getcall(obj)$data,
-                                  tt.attr$.Environment)[[i]])
-      } else {
-        tmp <- data[[i]]
-      }
-      dta <- rbind(dta, tmp)
-    }
-    X <- NextMethod("setx", object = object[[1]], fn = fn, data = dta, cond = FALSE,
-                    counter = NULL, ...)
-    class(X) <- c("setx.MI", "setx", "data.frame")
-  } else { # conditional prediction
-    X <- list()
-    if (is.null(data)) {
-      ## data <- eval(getcall(obj)$data, sys.parent())
-      tt.attr <- attributes(terms(obj))
-      data <- eval(getcall(obj)$data, tt.attr$.Environment)
-    }
-    for (i in 1:M){
-      X[[i]] <- NextMethod("setx", object = object[[i]], fn = NULL,
-                           data = data[[i]], cond = TRUE,
-                           counter = counter, ...)
-      #X[[i]] <- setx(object[[i]], fn = NULL, data = data[[i]], cond = TRUE,
-      #                        counter = counter, ...)
-      class(X[[i]]) <- c("cond", "data.frame")
-    }
-    class(X) <- c("setx.MI", "setx.cond", "cond")
-  }
-  return(X)
-}
-
diff --git a/R/setx.R b/R/setx.R
index 006716c..7ef9926 100644
--- a/R/setx.R
+++ b/R/setx.R
@@ -1,3 +1,312 @@
-setx<-function(object, ...)
+#' Setting Explanatory Variable Values
+#'
+#' The \code{setx} command uses the variables identified in
+#' the \code{formula} generated by \code{zelig} and sets the values of
+#' the explanatory variables to the selected values.  Use \code{setx}
+#' after \code{zelig} and before \code{sim} to simulate quantities of
+#' interest.
+#' @param obj the saved output from zelig
+#' @param fn a list of functions to apply to the data frame
+#' @param data a new data frame used to set the values of
+#'   explanatory variables. If data = NULL (the default), the
+#'   data frame called in zelig is used
+#' @param cond   a logical value indicating whether unconditional
+#'   (default) or conditional (choose \code{cond = TRUE}) prediction
+#'   should be performed.  If you choose \code{cond = TRUE}, \code{setx}
+#'   will coerce \code{fn = NULL} and ignore the additional arguments in 
+#'   \code{\dots}.  If \code{cond = TRUE} and \code{data = NULL},
+#'   \code{setx} will prompt you for a data frame.
+#' @param ... user-defined values of specific variables for overwriting the
+#'   default values set by the function \code{fn}.  For example, adding
+#'   \code{var1 = mean(data\$var1)} or \code{x1 = 12} explicitly sets the value
+#'   of \code{x1} to 12.  In addition, you may specify one explanatory variable
+#'   as a range of values, creating one observation for every unique value in
+#'   the range of values
+#' @return For unconditional prediction, \code{x.out} is a model matrix based
+#'   on the specified values for the explanatory variables.  For multiple
+#'   analyses (i.e., when choosing the \code{by} option in \code{\link{zelig}},
+#'   \code{setx} returns the selected values calculated over the entire
+#'   data frame.  If you wish to calculate values over just one subset of
+#'   the data frame, the 5th subset for example, you may use:  
+#'   \code{x.out <- setx(z.out[[5]])}
+#' @export
+#' @examples
+#'
+#' # Unconditional prediction:
+#' data(turnout)
+#' z.out <- zelig(vote ~ race + educate, model = "logit", data = turnout)
+#' x.out <- setx(z.out)
+#' s.out <- sim(z.out, x = x.out)
+#'
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Olivia Lau and Kosuke Imai 
+#' @seealso The full Zelig manual may be accessed online at
+#'   \url{http://gking.harvard.edu/zelig}
+#' @keywords file
+setx <- function(obj, fn=NULL, data=NULL, cond=FALSE, ...)
   UseMethod("setx")
+#' Set explanatory variables
+#'
+#' Set explanatory variables
+#' @usage \method{setx}{default}(obj, fn=NULL, data=NULL, cond=FALSE, ...)
+#' @S3method setx default
+#' @param obj a 'zelig' object
+#' @param fn a list of key-value pairs specifying which function apply to
+#'           columns of the keys data-types
+#' @param data a data.frame
+#' @param cond ignored
+#' @param ... parameters specifying what to explicitly set each column as. This
+#'            is used to produce counterfactuals
+#' @return a 'setx' object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Kosuke Imai, and Olivia Lau 
+setx.default <- function(obj, fn=NULL, data=NULL, cond=FALSE, ...) {
 
+  # Warnings and errors
+  if (!missing(cond))
+    warning('"cond" is not currently supported by this version of Zelig')
+
+  # Get formula used for the call to the model
+  form <- formula(obj)
+
+  # Parsed formula. This is an intermediate for used for processin design
+  # matrices, etc.
+  parsed.formula <- parseFormula(form, data)
+
+  # If data.frame is not explicitly set, use the one from the Zelig call
+  if (is.null(data))
+    data <- obj$data
+
+  # Create a variable to hold the values of the dot parameters
+  dots <- list()
+
+  # Get the dots as a set of expressions
+  symbolic.dots <- match.call(expand.dots = FALSE)[["..."]]
+
+  # Assign values to the dot parameters
+  for (key in names(symbolic.dots)) {
+    result <- with(data, eval(symbolic.dots[[key]]))
+    dots[[key]] <- result
+  }
+
+  # Extract information about terms
+  # Note: the functions 'getPredictorTerms' and 'getOutcomeTerms' are in need
+  # of a rewrite. At the moment, they are pretty kludgey (written by Matt O.).
+  vars.obj <- getPredictorTerms(form)
+  not.vars <- getResponseTerms(form)
+
+  # Default the environment to the parent
+  env.obj <- parent.frame()
+
+  # explanatory variables
+  explan.obj <- Filter(function (x) x %in% vars.obj, names(dots))
+
+  # defaults for fn
+  if (missing(fn) || !is.list(fn))
+    # set fn to appropriate values, if NULL
+    fn <- list(numeric = mean,
+               ordered = Median,
+               other   = Mode
+               )
+
+  # res
+  res <- list()
+
+  # compute values
+  # if fn[[mode(data(, key))]] exists,
+  # then use that function to compute result
+  for (key in all.vars(form[[3]])) {
+    # skip values that are explicitly set
+    if (key %in% names(dots) || key %in% not.vars)
+      next
+
+    m <- class(data[,key])[[1]]
+
+    # Match the class-type with the correct function to call
+    if (m %in% names(fn))
+      res[[key]] <- fn[[m]](data[ ,key])
+
+    # If it is a numeric, then we just evaluate it like a numeric
+    else if (is.numeric(data[,key]))
+      res[[key]] <- fn$numeric(data[ ,key])
+
+    # If it's ordered, then we take the median, because that's the best we got
+    else if (is.ordered(data[,key]))
+      res[[key]] <- fn$ordered(data[ ,key])
+
+    # Otherwise we take the mode, because that always kinda makes sense.
+    else
+      res[[key]] <- fn$other(data[ ,key])
+  }
+
+  # Add explicitly set values
+  for (key in names(symbolic.dots)) {
+    if (! key %in% colnames(data)) {
+      warning("`", key,
+              "` is not an column in the data-set, and will be ignored")
+      next
+    }
+
+    res[[key]] <- if (is.factor(data[,key])) {
+      factor(dots[[key]], levels=levels(data[,key]))
+    }
+    else
+      dots[[key]]
+  }
+
+  # Convert "res" into a list of lists. This makes atomic entries into lists.
+  for (k in 1:length(res)) {
+    if (!is.factor(res[[k]]))
+      res[[k]] <- as.list(res[[k]])
+  }
+
+  # Combine all the sublists
+  res <- do.call("mix", res)
+
+  # A list containing paired design matrices and their corresponding data.frame's
+  frames.and.designs <- list()
+
+  # Iterate through all the results
+  for (k in 1:length(res)) {
+    #
+    label <- paste(names(res[[k]]), "=", res[[k]], sep="", collapse=", ")
+
+    # Get specified explanatory variables
+    specified <- res[[k]]
+
+    # Construct data-frame
+    d <- constructDataFrame(data, specified)
+
+    # Construct model/design matrix
+    # NOTE: THIS NEEDS TO BE MORE ROBUST
+    m <- constructDesignMatrix(d, parsed.formula)
+
+    # Model matrix, as a data.frame
+    dat <- tryCatch(as.data.frame(m), error = function (e) NA)
+
+    # Specify information
+    frames.and.designs[[label]] <- list(
+      label = label,
+      data.frame = d,
+      model.matrix = m,
+      as.data.frame = dat
+      )
+  }
+
+  # Phonetically... setx's
+  setexes <- list()
+
+  for (key in names(frames.and.designs)) {
+    mod <- frames.and.designs[[key]]$model.matrix
+    d <- frames.and.designs[[key]]$data.frame
+    dat <- frames.and.designs[[key]]$as.data.frame
+    specified <- res[[k]]
+
+    setexes[[key]] <- list(
+      name   = obj$name,
+      call   = match.call(),
+      formula= form,
+      matrix = mod,
+      updated = d,
+      data   = dat,
+      values = specified,
+      fn     = fn,
+      cond   = cond,
+      new.data = data,
+      special.parameters = dots,
+      symbolic.parameters = symbolic.dots,
+      label = obj$label,
+      explan = vars.obj,
+      pred   = not.vars,
+      package.name = obj$package.name
+    )
+    attr(setexes[[key]], "pooled") <- F
+    class(setexes[[key]]) <- c(obj$name, "setx")
+  }
+
+  if (length(setexes) == 1) {
+    attr(setexes, "pooled") <- FALSE
+    setexes <- setexes[[1]]
+    class(setexes) <- c(obj$name, "setx")
+  }
+  else {
+    attr(setexes, "pooled") <- TRUE
+    class(setexes) <- c(obj$name, "pooled.setx", "setx")
+  }
+
+  # Return
+  setexes
+}
+
+
+#' Construct Data Frame
+#' Construct and return a tiny (single-row) data-frame from a larger data-frame,
+#' a list of specified values, and a formula
+#' @param data a ``data.frame'' that will be used to create a small design matrix
+#' @param specified a list with key-value pairs that will be used to explicitly
+#' set several values
+#' @return a ``data.frame'' containing a single row
+constructDataFrame <- function (data, specified) {
+  # Make a tiny data-frame with all the necessary columns
+  d <- data[1,]
+
+  # Give the computed values to those entries
+  for (key in names(specified)) {
+    val <- specified[[key]]
+
+    if (is.factor(val) || !(is.numeric(val) || is.ordered(val)))
+      val <- factor(val, levels=levels(data[,key]))
+
+    d[, key] <- val
+  }
+
+ 
+  # Return tiny data-frame
+  d
+}
+
+#' Construct Design Matrix from
+#' Construct and return a design matrix based on a tiny data-frame (single-row).
+#' @param data a ``data.frame'' (preferably single-rowed) that will be used to
+#' create a small design matrix
+#' @param formula a formula, whose predictor variables will be used to create a
+#' design matrix
+#' @return a design (model) matrix
+constructDesignMatrix <- function (data, formula) {
+  tryCatch(
+           # Attempt to generate the design matrix of the formula
+           model.matrix(formula, data), 
+
+           # If there is a warning... probably do nothing
+           # warning = function (w) w,
+
+           # If there is an error, warn the user and specify the design
+           # matrix as NA
+           error = function (e) {
+             NA
+           }
+           )
+}
+#' Set Explanatory Variables for Multiply Imputed Data-sets
+#' This function simply calls setx.default once for every fitted model
+#' within the 'zelig.MI' object
+#' @usage \method{setx}{MI}(obj, ..., data=NULL)
+#' @S3method setx MI
+#' @param obj a 'zelig' object
+#' @param ... user-defined values of specific variables for overwriting the
+#'   default values set by the function \code{fn}
+#' @param data a new data-frame
+#' @return a 'setx.mi' object used for computing Quantities of Interest by the
+#'   'sim' method
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @seealso \link{setx}
+setx.MI <- function(obj, ..., data = NULL) {
+
+  results.list <- list()
+
+  for (key in names(obj)) {
+    object <- obj[[key]]
+    results.list[[key]] <- setx(object, ..., data = data)
+  }
+
+  class(results.list) <- c("setx.mi", "setx")
+  results.list
+}
diff --git a/R/setx.coxph.R b/R/setx.coxph.R
deleted file mode 100644
index 9a446c3..0000000
--- a/R/setx.coxph.R
+++ /dev/null
@@ -1,182 +0,0 @@
-setx.coxph <- function(object, fn = list(numeric = mean, ordered =
-                                   median, other = mode), data = NULL,
-                         cond = FALSE, counter = NULL, ...){
-  mc <- match.call()
-  if (class(object)[1]=="MI")
-    object <- object[[1]]
-  mode <- function(x){
-    tb <- tapply(x, x, length)
-    if(is.factor(x))
-      value <- factor(unlist(labels(tb[seq(along=tb)[tb==max(tb)]])),
-                      levels=levels(x))
-    else if (is.logical(x))
-      value <- as.logical(unlist(labels(tb[seq(along=tb)[tb==max(tb)]])))
-    else if (is.character(x))
-      value <- as.character(unlist(labels(tb[seq(along=tb)[tb==max(tb)]])))
-    else
-      stop(paste(vars[i], "is not a supported variable type."))
-    if (length(value)>1) {
-      warning("There is more than one mode. The first level is selected.")
-      value <- sort(value)[1]
-    }
-    return(value)
-  }
-  median.default <- median
-  median <- function(x) {
-    if(is.numeric(x))
-      value <- median.default(x)
-    else if (is.ordered(x)) {
-      value <- factor(levels(x)[quantile(as.integer(x), type = 1, prob = 0.5)],
-                      levels=levels(x)) 
-    } else
-      stop("median cannot be calculated for this data type")
-    return(value)
-  }
-  max.default <- max
-  max <- function(x, na.rm=FALSE) {
-    if(is.numeric(x))
-      value <- max.default(x, na.rm=na.rm)
-    else if (is.ordered(x)) 
-      value <- factor(levels(x)[length(levels(x))], levels=levels(x))
-    else
-      stop("max cannot be calculated for this data type")
-    return(value)
-  }
-  min.default <- min
-  min <- function(x, na.rm=FALSE) {
-    if(is.numeric(x))
-      value <- min.default(x, na.rm = na.rm)
-    else if (is.ordered(x))
-      value <- factor(levels(x)[1], levels=levels(x))
-    else
-      stop("min cannot be calculated for this data type")
-    return(value)
-  }
-  
-  
-  # Testing From Here
-  
-  
-  tt <- terms(object)
-  tt.attr <- attributes(tt)
-  env <- tt.attr$.Environment
-  if (is.null(env))
-    env <- parent.frame()
-  ## original data
-  if (is.null(data)) {
-    if (nrow(as.data.frame(getdata(object))) > 0)
-      dta <- getdata(object)
-    else
-      dta <- eval(getcall(object)$data, envir = env)
-  }
-  else
-    dta <- as.data.frame(data)
-
-################################ create new terms without strata or cluster
-  if(!is.null(tt.attr$specials$strata) | !is.null(tt.attr$specials$cluster)){
-    no.st.cl <- colnames(tt.attr$factors)[-c(tt.attr$specials$strata - 1,
-                                       tt.attr$specials$cluster - 1)]
-    rhs <- paste(no.st.cl, collapse="+")
-    lhs <- rownames(tt.attr$factors)[1]
-    nf <- as.formula(paste(paste(lhs), paste("~"), rhs)) #new formula for terms
-#####extract strata
-    mf1 <- model.frame(tt, data = dta, na.action = na.pass)
-
-    if(!is.null(tt.attr$specials$strata)){
-      stratas <- mf1[complete.cases(mf1),tt.attr$specials$strata]
-      st <- na.omit(pmatch(names(mc), as.character("strata")))
-      if (length(st>0))
-	strata <- mc[["strata"]]
-      else
-	strata <- mode(stratas)
-    }
-    else
-      strata <- NULL
-    
-    tt <- terms(nf)
-    tt.attr <- attributes(tt)
-  }
-  else
-    strata <- NULL
-  #################################################
-
-  ## extract variables we need
-  mf <- model.frame(tt, data = dta, na.action = na.pass)
-  if(any(class(tt)=="multiple"))
-    vars<-unlist(c(attr(tt,"depVars"),attr(tt,"indVars")),use.names=FALSE)
-  else
-  vars <- all.vars(tt)
-  if (!is.null(tt.attr$response) && tt.attr$response)
-    resvars <- all.vars(tt.attr$variables[[1+tt.attr$response]])
-  else
-    resvars <- NULL
-  data <- dta[complete.cases(mf), names(dta)%in%vars, drop=FALSE]
-  if (cond) {
-      stop("conditional prediction not supported for coxph models")
-  }
-  else if (!is.null(fn)) {
-    if (is.null(fn$numeric) || !is.function(fn$numeric)) {
-      warning("fn$numeric coerced to mean().")
-      fn$numeric <- mean
-    }
-    if (is.null(fn$ordered) || !is.function(fn$ordered) || 
-        identical(mean, fn$ordered)) {
-      warning("fn$ordered coreced to median().")
-      fn$ordered <- median
-    }
-    else if (identical(min.default, fn$ordered)) 
-      fn$ordered <- min
-    else if (identical(max.default, fn$ordered)) 
-      fn$ordered <- max
-    else if (identical(median.default, fn$ordered)) 
-      fn$ordered <- median			
-    if (is.null(fn$other) || !is.function(fn$other)) { 
-      warning("the only available fn for other is mode.")
-      fn$other <- mode
-    }
-    for (i in 1:ncol(data)) {
-      if (!(colnames(data)[i] %in% resvars)) {
-        if (is.numeric(data[,i]))
-          value <- lapply(list(data[,i]), fn$numeric)[[1]]
-        else if (is.ordered(data[,i])) 
-          value <- lapply(list(data[,i]), fn$ordered)[[1]]
-        else 
-          value <- lapply(list(data[,i]), fn$other)[[1]]
-        data[,i] <- value
-      }
-    }
-    maxl <- 1
-  } else {
-    maxl <- nrow(data)
-  }
-  opt <- vars[na.omit(pmatch(names(mc), vars))]
-  if (length(opt) > 0)
-    for (i in 1:length(opt)) {
-      value <- eval(mc[[opt[i]]], envir = env)
-      lv <- length(value)
-      if (lv>1)
-        if (maxl==1 || maxl==lv) {
-          maxl <- lv
-          data <- data[1:lv,,drop = FALSE]
-        }
-        else
-          stop("vector inputs should have the same length.")
-      if (is.factor(data[,opt[i]]))
-        data[,opt[i]] <- list(factor(value, levels=levels(data[,opt[i]])))
-      else if (is.numeric(data[,opt[i]]))
-        data[,opt[i]] <- list(as.numeric(value))
-      else if (is.logical(data[,opt[i]]))
-        data[,opt[i]] <- list(as.logical(value))
-      else
-        data[,opt[i]] <- list(value)
-    }
-  data <- data[1:maxl,,drop = FALSE]
-  
-  X <- as.data.frame(model.matrix(tt, data = data))[-1] #delete cluster
-  if(!is.null(strata))
-      X <- as.data.frame(cbind(X, strata))
-  class(X) <- c("data.frame", "coxph")
-  
-  return(X)
-}
-
diff --git a/R/setx.default.R b/R/setx.default.R
deleted file mode 100644
index 7b2dc74..0000000
--- a/R/setx.default.R
+++ /dev/null
@@ -1,253 +0,0 @@
-setx.default <- function(object,
-                         fn = list(numeric = mean, ordered = median, other = mode),
-                         data = NULL,
-                         cond = FALSE, counter = NULL,
-                         ...) {
-
-
-  mc <- match.call()
-  if (class(object)[1]=="MI")
-    object <- object[[1]]
-
-  mode <- function(x){
-    tb <- tapply(x, x, length)
-    if(is.factor(x))
-      value <- factor(unlist(labels(tb[seq(along=tb)[tb==max(tb)]])),
-                      levels=levels(x))
-    else if (is.logical(x))
-      value <- as.logical(unlist(labels(tb[seq(along=tb)[tb==max(tb)]])))
-    else if (is.character(x))
-      value <- as.character(unlist(labels(tb[seq(along=tb)[tb==max(tb)]])))
-    else
-      stop(paste(vars[i], "is not a supported variable type."))
-    if (length(value)>1) {
-      warning("There is more than one mode. The first level is selected.")
-      value <- sort(value)[1]
-    }
-    return(value)
-  }
-  
-  median.default <- median
-  median <- function(x) {
-    if(is.numeric(x))
-      value <- median.default(x)
-    else if (is.ordered(x)) {
-      value <- factor(levels(x)[quantile(as.integer(x), type = 1, prob = 0.5)],
-                      levels=levels(x)) 
-    } else
-      stop("median cannot be calculated for this data type")
-    return(value)
-  }
-  
- 
-  max.default <- max
-  max <- function(x, na.rm=FALSE) {
-    if(is.numeric(x))
-      value <- max.default(x, na.rm=na.rm)
-    else if (is.ordered(x)) 
-      value <- factor(levels(x)[length(levels(x))], levels=levels(x))
-    else
-      stop("max cannot be calculated for this data type")
-    return(value)
-  }
-  
-  min.default <- min
-  min <- function(x, na.rm=FALSE) {
-    if(is.numeric(x))
-      value <- min.default(x, na.rm = na.rm)
-    else if (is.ordered(x))
-      value <- factor(levels(x)[1], levels=levels(x))
-    else
-      stop("min cannot be calculated for this data type")
-    return(value)
-  }
- 
-  
-  # Testing From Here
-  if(length(fn))
-    fn <- updatefn(fn, operVec=c("mode", "median","min", "max"),
-                   ev=environment(), global=parent.frame())
-  
-  tt <- terms(object)
-  tt.attr <- attributes(tt)
-  env <- tt.attr$.Environment
-  if (is.null(env))
-    env <- parent.frame()
-  ## original data
-  if (is.null(data)) {
-    if (nrow(as.data.frame(getdata(object))) > 0)
-      dta <- getdata(object)
-    else
-      dta <- eval(getcall(object)$data, envir = env)
-  }
-  else
-    dta <- as.data.frame(data)
-  ## extract variables we need
-  mf <- model.frame(tt, data = dta, na.action = na.pass)
-  if(any(class(tt)=="multiple"))
-    vars<-unlist(c(attr(tt,"depVars"),attr(tt,"indVars")),use.names=FALSE)
-  else
-  vars <- all.vars(tt)
-  if (!is.null(tt.attr$response) && tt.attr$response)
-    resvars <- all.vars(tt.attr$variables[[1+tt.attr$response]])
-  else
-    resvars <- NULL
-  opt <- vars[na.omit(pmatch(names(mc), vars))]
-  data <- dta[complete.cases(mf), names(dta)%in%vars, drop=FALSE]
-  if (!is.null(counter)) {
-    if (!any(counter == vars))
-      stop("the variable specified for counter is not used in the model")
-    treat <- data[, names(data)==counter]
-    if(is.numeric(treat)) {
-      data[treat==1, names(data)==counter] <- 0
-      data[treat==0, names(data)==counter] <- 1
-    } else if(is.factor(treat)) {
-      lev <- levels(treat)
-      if(length(lev)==2) {
-        treat <- as.numeric(treat) - 1 
-        data[treat==1, names(data)==counter] <- lev[1]
-        data[treat==0, names(data)==counter] <- lev[2]
-      } else {
-        stop("counter only takes a binary variable")
-      }
-    } else if(is.logical(treat)) {
-      treat <- as.numeric(treat)
-      data[treat==1, names(data)==counter] <- FALSE
-      data[treat==0, names(data)==counter] <- TRUE
-    } else {
-      stop("not supported variable type for counter")
-    }
-    if(!cond)
-      stop("if counter is specified, cond must be TRUE")
-  }
-  if (cond) {
-    if (is.null(data)) 
-      stop("if cond = TRUE, you must specify the data frame.")
-    if (is.null(mc$fn))
-      fn <- NULL
-    if (!is.null(fn)) {
-      warning("when cond = TRUE, fn is coerced to NULL")
-      fn <- NULL
-    }
-    maxl <- nrow(data)
-  } else if (!is.null(fn)) {
-    if (is.null(fn$numeric) || !is.function(fn$numeric)) {
-      warning("fn$numeric coerced to mean().")
-      fn$numeric <- mean
-    }
-    if (is.null(fn$ordered) || !is.function(fn$ordered) || 
-        identical(mean, fn$ordered)) {
-      warning("fn$ordered coreced to median().")
-      fn$ordered <- median
-    } else if (identical(min.default, fn$ordered)) {
-      fn$ordered <- min
-    } else if (identical(max.default, fn$ordered)) {
-      fn$ordered <- max
-    } else if (identical(median.default, fn$ordered)) {
-      fn$ordered <- median
-    }
-    if (is.null(fn$other) || !is.function(fn$other)) { 
-      warning("the only available fn for other is mode.")
-      fn$other <- mode
-    }
-    for (i in 1:ncol(data)) {
-      if (!(colnames(data)[i] %in% opt)) {
-        if (!(colnames(data)[i] %in% resvars)) {
-          if (is.numeric(data[,i]))
-            value <- lapply(list(data[,i]), fn$numeric)[[1]]
-          else if (is.ordered(data[,i])) 
-            value <- lapply(list(data[,i]), fn$ordered)[[1]]
-          else 
-            value <- lapply(list(data[,i]), fn$other)[[1]]
-          data[,i] <- value
-        }
-      }
-    }
-    maxl <- 1
-  } else {
-    maxl <- nrow(data)
-  }
-  if (length(opt) > 0)
-    for (i in 1:length(opt)) {
-      arg_frame <- 1
-      value <- NULL
-      while((is.null(value) || class(value)=="try-error")
-             && ((arg_frame == 1) || !identical(parent.frame(n=arg_frame-1),.GlobalEnv))){
-        value <- try(eval(mc[[opt[i]]], envir = parent.frame(n=arg_frame)), TRUE)
-        arg_frame <- arg_frame+1
-      }
-      if(class(value)=="try-error") stop(value) 
-      lv <- length(value)
-      if (lv>1)
-        if (maxl==1 || maxl==lv) {
-          maxl <- lv
-          data <- data[1:lv,,drop = FALSE]
-        }
-        else
-          stop("vector inputs should have the same length.")
-      if (is.factor(data[,opt[i]]))
-        data[,opt[i]] <- list(factor(value, levels=levels(data[,opt[i]])))
-      else if (is.numeric(data[,opt[i]]))
-        data[,opt[i]] <- list(as.numeric(value))
-      else if (is.logical(data[,opt[i]]))
-        data[,opt[i]] <- list(as.logical(value))
-      else
-        data[,opt[i]] <- list(value)
-    }
-  data <- data[1:maxl,,drop = FALSE]
-  
-  if (cond) {
-    X <- model.frame(tt, data = dta)
-    if (!is.null(counter)) {
-      X <- list(treat=X[treat==1,,drop=FALSE],
-                control=X[treat==0,,drop=FALSE])
-      class(X$treat) <- class(X$control) <- c("data.frame", "cond")
-      class(X) <- "setx.counter"
-    }
-    else
-      class(X) <- c("data.frame", "cond")
-  }
-  else {
-    X <- as.data.frame(model.matrix(tt, data = data))
-  }
-  return(X)
-}
-### DESCRIPTION: Takes the operations in vector operVec and updates list fn
-###              so that list elements "numeric", "ordered", and "other" in fn
-###              are as defined in setx rather
-###              than those taken from .GlobalEnv or namespace:base
-###
-### INPUTS: fn a list with default operations for numeric, ordered, other
-###         operVec a vector of chars with operations, e.g max, min, median, mode
-###         ev, parent environment; global, granparent environment
-###
-updatefn <- function(fn, operVec=c("mode", "median","min", "max"), ev=parent.frame(), global=.GlobalEnv)
-   {
-     mode   <- get("mode", envir = ev)
-     median <- get("median", envir = ev)
-     max   <- get("max", envir = ev)
-     min   <- get("min", envir = ev)
-      
-     modeG   <- get("mode", envir = global)
-     medianG <- get("median.default", envir = global)
-     minG <- get("min", envir = global)
-     maxG <-  get("max", envir = global)
-    if(!identical(sort(c("max", "median", "min", "mode")), sort(operVec)))
-      stop("updatefn missing some operations from setx")
-     
-    for(oper in operVec){     
-      operGlob <- switch(EXPR=oper,"mode"=, "mode.default"=modeG,"median"=, "median.default"=medianG,
-                         "min"=,"min.default"= minG,"max"=, "max.default"=maxG)
-      operSetx <- switch(EXPR=oper,"mode"=, "mode.default"=mode,"median"=, "median.default"=median,
-                         "min"=,"min.default"= min,"max"=, "max.default"=max)
-      if(identical(fn$other, operGlob))
-        fn$other <-  operSetx
-   
-      if(identical(fn$numeric, operGlob))
-        fn$numeric <-  operSetx
-      
-      if(identical(fn$ordered, operGlob))
-        fn$ordered <- operSetx
-    }
-     fn
-   }
diff --git a/R/setx.eiRxC.R b/R/setx.eiRxC.R
deleted file mode 100644
index 028d341..0000000
--- a/R/setx.eiRxC.R
+++ /dev/null
@@ -1,17 +0,0 @@
-setx.eiRxC <- function(object, fn = list(numeric = mean, ordered =
-                                   median, other = mode), data = NULL,
-                         cond = FALSE, counter = NULL, ...)
-{
-  if (!is.null(object$covar)){
-    object1 <- object
-    #object1$call$data<-as.data.frame(object$covar);
-    origCall <- getcall(object)
-    covFormula<-eval(origCall[["covar"]])
-    object1$terms <- terms.formula(covFormula)
-    res<- setx.default(object=object1, fn=fn, data=data, cond=cond, counter=counter,...)
-    return (res[,2:ncol(res)])
-  } else
-  {
- return(setx.default(object,fn=NULL))
- }
-}
diff --git a/R/setx.gam.R b/R/setx.gam.R
deleted file mode 100644
index 340cf92..0000000
--- a/R/setx.gam.R
+++ /dev/null
@@ -1,236 +0,0 @@
-setx.gam <- function(object, fn = list(numeric = mean, ordered =
-                                   median, other = mode), data = NULL,
-                         cond = FALSE, counter = NULL, ...){
-  mc <- match.call()
-  if (class(object)[1]=="MI")
-    object <- object[[1]]
-  mode <- function(x){
-    tb <- tapply(x, x, length)
-    if(is.factor(x))
-      value <- factor(unlist(labels(tb[seq(along=tb)[tb==max(tb)]])),
-                      levels=levels(x))
-    else if (is.logical(x))
-      value <- as.logical(unlist(labels(tb[seq(along=tb)[tb==max(tb)]])))
-    else if (is.character(x))
-      value <- as.character(unlist(labels(tb[seq(along=tb)[tb==max(tb)]])))
-    else
-      stop(paste(vars[i], "is not a supported variable type."))
-    if (length(value)>1) {
-      warning("There is more than one mode. The first level is selected.")
-      value <- sort(value)[1]
-    }
-    return(value)
-  }
-  median.default <- median
-  median <- function(x) {
-    if(is.numeric(x))
-      value <- median.default(x)
-    else if (is.ordered(x)) {
-      value <- factor(levels(x)[quantile(as.integer(x), type = 1, prob = 0.5)],
-                      levels=levels(x)) 
-    } else
-      stop("median cannot be calculated for this data type")
-    return(value)
-  }
-  max.default <- max
-  max <- function(x, na.rm=FALSE) {
-    if(is.numeric(x))
-      value <- max.default(x, na.rm=na.rm)
-    else if (is.ordered(x)) 
-      value <- factor(levels(x)[length(levels(x))], levels=levels(x))
-    else
-      stop("max cannot be calculated for this data type")
-    return(value)
-  }
-  min.default <- min
-  min <- function(x, na.rm=FALSE) {
-    if(is.numeric(x))
-      value <- min.default(x, na.rm = na.rm)
-    else if (is.ordered(x))
-      value <- factor(levels(x)[1], levels=levels(x))
-    else
-      stop("min cannot be calculated for this data type")
-    return(value)
-  }
-  
-  
-  # Testing From Here
-  
-  
-  tt <- terms(object)
-  tt.attr <- attributes(tt)
-  env <- tt.attr$.Environment
-  if (is.null(env))
-    env <- parent.frame()
-  ## original data
-  if (is.null(data))
-    if (nrow(as.data.frame(object$zelig.data)) > 0)
-      dta <- object$zelig.data
-    else
-      dta <- eval(object$call$data, envir = env)
-  else
-    dta <- as.data.frame(data)
-  ## extract variables we need
-  mf <- model.frame(tt, data = dta, na.action = na.pass)
-  if(any(class(tt)=="multiple"))
-    vars<-unlist(c(attr(tt,"depVars"),attr(tt,"indVars")),use.names=FALSE)
-  else
-  vars <- all.vars(tt)
-  if (!is.null(tt.attr$response) && tt.attr$response)
-    resvars <- all.vars(tt.attr$variables[[1+tt.attr$response]])
-  else
-    resvars <- NULL
-  data <- dta[complete.cases(mf), names(dta)%in%vars, drop=FALSE]
-  if (!is.null(counter)) {
-    if (!any(counter == vars))
-      stop("the variable specified for counter is not used in the model")
-    treat <- data[, names(data)==counter]
-    if(is.numeric(treat)) {
-      data[treat==1, names(data)==counter] <- 0
-      data[treat==0, names(data)==counter] <- 1
-    }
-    else if(is.factor(treat)) {
-      lev <- levels(treat)
-      if(length(lev)==2) {
-        treat <- as.numeric(treat) - 1 
-        data[treat==1, names(data)==counter] <- lev[1]
-        data[treat==0, names(data)==counter] <- lev[2]
-      }
-      else
-        stop("counter only takes a binary variable")
-    }
-    else if(is.logical(treat)) {
-      treat <- as.numeric(treat)
-      data[treat==1, names(data)==counter] <- FALSE
-      data[treat==0, names(data)==counter] <- TRUE
-    }
-    else
-      stop("not supported variable type for counter")
-    if(!cond)
-      stop("if counter is specified, cond must be TRUE")
-  }
-  if (cond) {
-    if (is.null(data)) 
-      stop("if cond = TRUE, you must specify the data frame.")
-    if (is.null(mc$fn))
-      fn <- NULL
-    if (!is.null(fn)) {
-      warning("when cond = TRUE, fn is coerced to NULL")
-      fn <- NULL
-    }
-    maxl <- nrow(data)
-  }
-  else if (!is.null(fn)) {
-    if (is.null(fn$numeric) || !is.function(fn$numeric)) {
-      warning("fn$numeric coerced to mean().")
-      fn$numeric <- mean
-    }
-    if (is.null(fn$ordered) || !is.function(fn$ordered) || 
-        identical(mean, fn$ordered)) {
-      warning("fn$ordered coreced to median().")
-      fn$ordered <- median
-    }
-    else if (identical(min.default, fn$ordered)) 
-      fn$ordered <- min
-    else if (identical(max.default, fn$ordered)) 
-      fn$ordered <- max
-    else if (identical(median.default, fn$ordered)) 
-      fn$ordered <- median			
-    if (is.null(fn$other) || !is.function(fn$other)) { 
-      warning("the only available fn for other is mode.")
-      fn$other <- mode
-    }
-    for (i in 1:ncol(data)) {
-      if (!(colnames(data)[i] %in% resvars)) {
-        if (is.numeric(data[,i]))
-          value <- lapply(list(data[,i]), fn$numeric)[[1]]
-        else if (is.ordered(data[,i])) 
-          value <- lapply(list(data[,i]), fn$ordered)[[1]]
-        else 
-          value <- lapply(list(data[,i]), fn$other)[[1]]
-        data[,i] <- value
-      }
-    }
-    maxl <- 1
-  } else {
-    maxl <- nrow(data)
-  }
-  opt <- vars[na.omit(pmatch(names(mc), vars))]
-  if (length(opt) > 0)
-    for (i in 1:length(opt)) {
-      value <- eval(mc[[opt[i]]], envir = env)
-      lv <- length(value)
-      if (lv>1)
-        if (maxl==1 || maxl==lv) {
-          maxl <- lv
-          data <- data[1:lv,,drop = FALSE]
-        }
-        else
-          stop("vector inputs should have the same length.")
-      if (is.factor(data[,opt[i]]))
-        data[,opt[i]] <- list(factor(value, levels=levels(data[,opt[i]])))
-      else if (is.numeric(data[,opt[i]]))
-        data[,opt[i]] <- list(as.numeric(value))
-      else if (is.logical(data[,opt[i]]))
-        data[,opt[i]] <- list(as.logical(value))
-      else
-        data[,opt[i]] <- list(value)
-    }
-  data <- data[1:maxl,,drop = FALSE]
-  
-  if (cond) {
-    X <- model.frame(tt, data = dta)
-    if (!is.null(counter)) {
-      X <- list(treat=X[treat==1,,drop=FALSE],
-                control=X[treat==0,,drop=FALSE])
-      class(X$treat) <- class(X$control) <- c("data.frame", "cond")
-      class(X) <- "setx.counter"
-    }
-    else
-      class(X) <- c("data.frame", "cond")
-  }
-  else {
-    X <- as.data.frame(model.matrix(tt, data = data))
-  }
-  
-  ## Now X has the values I want
-  ## Introduce the repetitions
-  
-  pterms <- attr(object$pterms, "term.labels")
-  sterms <- list()
-	for(i in 1: length(object$smooth)){
-		sterms[[i]] <- object$smooth[[i]]$term  }
-
-  
-  df.vec <- vector()
-  for(i in 1: length(object$smooth)){
-	df.vec[[i]] <- object$smooth[[i]]$df}
-
-  if(attr(object$terms, "intercept") > 0){
-	newX <- data.frame(X[[1]]) 
-	names(newX) <- names(X)[[1]] }
-  if(attr(object$terms, "intercept") == 0){
-	stop("So far, the model must have an intercept")}  
-
-
-  for(i in 2:ncol(X)){
-  if( names(X)[[i]] %in% pterms){
-	temp <- as.data.frame(X[[i]])
-	names(temp) <- names(X)[[i]]
-	newX <- cbind(newX, temp) }    
- 
-    if( names(X)[[i]] %in% sterms){
-	for(j in 1: length(object$smooth)){
-		if(names(X)[[i]] == object$smooth[[j]]$term)
-			repnum <- object$smooth[[j]]$df}
-	temp <- as.data.frame(t(rep(X[[i]], repnum)))
-	names(temp) <- rep(names(X)[[i]] , repnum)
-	newX <- cbind(newX, temp)
-	}
-	
-	
-  }
-  
-  
-  return(newX)
-}
diff --git a/R/setx.netglm.R b/R/setx.netglm.R
deleted file mode 100644
index 245fa4e..0000000
--- a/R/setx.netglm.R
+++ /dev/null
@@ -1,196 +0,0 @@
-setx.netglm <- function(object, fn = list(numeric = mean, ordered =
-                                   median, other = mode), data = NULL,
-                         cond = FALSE, counter = NULL, ...){
-  mc <- match.call()
-  if (class(object)[1]=="MI")
-    object <- object[[1]]
-  mode <- function(x){
-    tb <- tapply(x, x, length)
-    if(is.factor(x))
-      value <- factor(unlist(labels(tb[seq(along=tb)[tb==max(tb)]])),
-                      levels=levels(x))
-    else if (is.logical(x))
-      value <- as.logical(unlist(labels(tb[seq(along=tb)[tb==max(tb)]])))
-    else if (is.character(x))
-      value <- as.character(unlist(labels(tb[seq(along=tb)[tb==max(tb)]])))
-    else
-      stop(paste(vars[i], "is not a supported variable type."))
-    if (length(value)>1) {
-      warning("There is more than one mode. The first level is selected.")
-      value <- sort(value)[1]
-    }
-    return(value)
-  }
-  median.default <- median
-  median <- function(x) {
-    if(is.numeric(x))
-      value <- median.default(x)
-    else if (is.ordered(x))
-      value <- factor(levels(x)[median.default(as.integer(x))],
-                      levels=levels(x)) 
-    else
-      stop("median cannot be calculated for this data type")
-    return(value)
-  }
-  max.default <- max
-  max <- function(x, na.rm=FALSE) {
-    if(is.numeric(x))
-      value <- max.default(x, na.rm=na.rm)
-    else if (is.ordered(x)) 
-      value <- factor(levels(x)[length(levels(x))], levels=levels(x))
-    else
-      stop("max cannot be calculated for this data type")
-    return(value)
-  }
-  min.default <- min
-  min <- function(x, na.rm=FALSE) {
-    if(is.numeric(x))
-      value <- min.default(x, na.rm = na.rm)
-    else if (is.ordered(x))
-      value <- factor(levels(x)[1], levels=levels(x))
-    else
-      stop("min cannot be calculated for this data type")
-    return(value)
-  }
-  
-  
-  # Testing From Here
-  
-  
-  tt <- terms(object)
-  tt.attr <- attributes(tt)
-  env <- tt.attr$.Environment
-  if (is.null(env))
-    env <- parent.frame()
-  ## original data
-  if (is.null(data))
-    if (is.data.frame(object$data))
-      dta <- object$data
-    else
-      dta <- eval(object$call$data, envir = env)
-  else
-    dta <- as.data.frame(data)
-  ## extract variables we need
-  mf <- model.frame(tt, data = dta, na.action = na.pass)
-  if(any(class(tt)=="multiple"))
-    vars<-unlist(c(attr(tt,"depVars"),attr(tt,"indVars")),use.names=FALSE)
-  else
-  vars <- all.vars(tt)
-  if (!is.null(tt.attr$response) && tt.attr$response)
-    resvars <- all.vars(tt.attr$variables[[1+tt.attr$response]])
-  else
-    resvars <- NULL
-  data <- dta[complete.cases(mf), names(dta)%in%vars, drop=FALSE]
-  if (!is.null(counter)) {
-    if (!any(counter == vars))
-      stop("the variable specified for counter is not used in the model")
-    treat <- data[, names(data)==counter]
-    if(is.numeric(treat)) {
-      data[treat==1, names(data)==counter] <- 0
-      data[treat==0, names(data)==counter] <- 1
-    }
-    else if(is.factor(treat)) {
-      lev <- levels(treat)
-      if(length(lev)==2) {
-        treat <- as.numeric(treat) - 1 
-        data[treat==1, names(data)==counter] <- lev[1]
-        data[treat==0, names(data)==counter] <- lev[2]
-      }
-      else
-        stop("counter only takes a binary variable")
-    }
-    else if(is.logical(treat)) {
-      treat <- as.numeric(treat)
-      data[treat==1, names(data)==counter] <- FALSE
-      data[treat==0, names(data)==counter] <- TRUE
-    }
-    else
-      stop("not supported variable type for counter")
-    if(!cond)
-      stop("if counter is specified, cond must be TRUE")
-  }
-  if (cond) {
-    if (is.null(data)) 
-      stop("if cond = TRUE, you must specify the data frame.")
-    if (is.null(mc$fn))
-      fn <- NULL
-    if (!is.null(fn)) {
-      warning("when cond = TRUE, fn is coerced to NULL")
-      fn <- NULL
-    }
-    maxl <- nrow(data)
-  }
-  else if (!is.null(fn)) {
-    if (is.null(fn$numeric) || !is.function(fn$numeric)) {
-      warning("fn$numeric coerced to mean().")
-      fn$numeric <- mean
-    }
-    if (is.null(fn$ordered) || !is.function(fn$ordered) || 
-        identical(mean, fn$ordered)) {
-      warning("fn$ordered coreced to median().")
-      fn$ordered <- median
-    }
-    else if (identical(min.default, fn$ordered)) 
-      fn$ordered <- min
-    else if (identical(max.default, fn$ordered)) 
-      fn$ordered <- max
-    else if (identical(median.default, fn$ordered)) 
-      fn$ordered <- median								# "this is what sna.lm ends up with"
-    if (is.null(fn$other) || !is.function(fn$other)) { 
-      warning("the only available fn for other is mode.")
-      fn$other <- mode
-    }
-    for (i in 1:ncol(data)) {
-      if (!(colnames(data)[i] %in% resvars)) {
-        if (is.numeric(data[,i]))
-          value <- lapply(list(data[,i]), fn$numeric)[[1]]     # "This is the Problem"
-        else if (is.ordered(data[,i])) 
-          value <- lapply(list(data[,i]), fn$ordered)[[1]]
-        else 
-          value <- lapply(list(data[,i]), fn$other)[[1]]
-        data[,i] <- value
-      }
-    }
-    maxl <- 1
-  } else {
-    maxl <- nrow(data)
-  }
-  opt <- vars[na.omit(pmatch(names(mc), vars))]
-  if (length(opt) > 0)
-    for (i in 1:length(opt)) {
-      value <- eval(mc[[opt[i]]], envir = env)
-      lv <- length(value)
-      if (lv>1)
-        if (maxl==1 || maxl==lv) {
-          maxl <- lv
-          data <- data[1:lv,,drop = FALSE]
-        }
-        else
-          stop("vector inputs should have the same length.")
-      if (is.factor(data[,opt[i]]))
-        data[,opt[i]] <- list(factor(value, levels=levels(data[,opt[i]])))
-      else if (is.numeric(data[,opt[i]]))
-        data[,opt[i]] <- list(as.numeric(value))
-      else if (is.logical(data[,opt[i]]))
-        data[,opt[i]] <- list(as.logical(value))
-      else
-        data[,opt[i]] <- list(value)
-    }
-  data <- data[1:maxl,,drop = FALSE]
-  
-  if (cond) {
-    X <- model.frame(tt, data = dta)
-    if (!is.null(counter)) {
-      X <- list(treat=X[treat==1,,drop=FALSE],
-                control=X[treat==0,,drop=FALSE])
-      class(X$treat) <- class(X$control) <- c("data.frame", "cond")
-      class(X) <- "setx.counter"
-    }
-    else
-      class(X) <- c("data.frame", "cond")
-  }
-  else {
-    X <- as.data.frame(model.matrix(tt, data = data))
-  }
-  return(X)
-}
diff --git a/R/setx.noX.R b/R/setx.noX.R
deleted file mode 100644
index 3c89145..0000000
--- a/R/setx.noX.R
+++ /dev/null
@@ -1,24 +0,0 @@
-setx.noX <- function(object, data = NULL, fn = NULL, cond = TRUE, ...) {
-  if (!is.null(fn) || !cond)
-    stop(paste("\n", getzelig(object), "is only appropriate in a CONDITIONAL prediction research design!"))
-  if (any(class(object) == "latent"))  
-    x <- eval(getcall(object)$formula[[2]])
-  else if (any(class(object) == "EI")) {
-    if (is.null(data))
-      data <- eval(getcall(object)$data, sys.parent())
-    x <- model.frame(object, data = data, ...)
-    x1 <- x[[1]]
-    for (i in 2:length(x))
-      x1 <- cbind(x[[1]], x[[i]])
-    x1 <- data.frame(x1)
-    if (is.null(getcall(object)$covar)) 
-      names(x1) <- c(colnames(x[[1]]), colnames(x[[2]]))
-    else
-      names(x1) <- c(colnames(x[[1]]), colnames(x[[2]]),
-                    deparse(getcall(object)$covar[[3]]))
-    rownames(x1) <- rownames(x)
-    x <- x1
-  }
-  class(x) <- c("cond", "data.frame")
-  x
-}
diff --git a/R/setx.relogit2.R b/R/setx.relogit2.R
deleted file mode 100644
index b554175..0000000
--- a/R/setx.relogit2.R
+++ /dev/null
@@ -1,7 +0,0 @@
-setx.relogit2 <- function(object, fn = list(numeric = mean,
-                                    ordered = median, other = mode), data =
-                          NULL, cond = FALSE,
-                          counter = NULL, ...) {
-  return(setx.default(object$lower.estimate))
-}
-                          
diff --git a/R/setx.rq.R b/R/setx.rq.R
deleted file mode 100644
index 55a9a51..0000000
--- a/R/setx.rq.R
+++ /dev/null
@@ -1,8 +0,0 @@
-setx.rq <- function(object, fn=list(numeric=mean, ordered=median, other=mode), data=NULL, cond=FALSE, counter=NULL, ...) {
-  mc <- match.call(expand.dots=T)
-  env <- parent.frame()
-  if(cond==TRUE)
-    stop("Conditional prediction is not supported for quantile regression. Please set cond=FALSE.")
-  mc[[1]] <- setx.default
-  return(eval(mc, envir=env))
-}
diff --git a/R/setx.rq.process.R b/R/setx.rq.process.R
deleted file mode 100644
index 46f1198..0000000
--- a/R/setx.rq.process.R
+++ /dev/null
@@ -1,3 +0,0 @@
-setx.rq.process <- function(object, ...){
-    stop("Zelig does not support inference on rq.process objects. Please rerun zelig setting tau to a value or values on the interval [0,1]")
-}
diff --git a/R/setx.rqs.R b/R/setx.rqs.R
deleted file mode 100644
index 8e6daba..0000000
--- a/R/setx.rqs.R
+++ /dev/null
@@ -1,12 +0,0 @@
-setx.rqs <- function(object, ...){
-    x <- vector("list", length(object$tau))
-    object <- stratify.rqs(object)
-    mc <- match.call(expand.dots=T)
-    mc[[1]] <- setx.rq
-    for(i in 1:length(object)){
-        x[[i]] <- eval(mc,envir = parent.frame()) 
-    }
-    names(x) <- names(object) 
-    class(x) <- c("setx.rqs", "data.frame")
-    return(x)
-}
diff --git a/R/setx.strata.R b/R/setx.strata.R
deleted file mode 100644
index 80ebc8d..0000000
--- a/R/setx.strata.R
+++ /dev/null
@@ -1,47 +0,0 @@
-setx.strata <- function(object, fn = list(numeric = mean, ordered =
-                                    median, other = mode), data =
-                          NULL, cond = FALSE, counter = NULL, ...) {
-  obj <- object[[1]]
-  x <- list()
-  if (any(class(obj)=="MI")) { # with multiple imputation
-    if (is.null(data))
-      dta <- eval(getcall(obj[[1]])$data, sys.parent())
-    else
-      dta <- data
-    by <- getcall(obj[[1]])$by
-    M <- length(dta)
-    d <- dta[[1]]
-    idx <- pmatch(by, names(d))[1]
-    lev <- sort(unique(eval(d[[idx]], sys.parent())))
-    for (i in 1:length(lev)) {
-      d <- list()
-      for (j in 1:M) {
-        dM <- dta[[j]]
-        d[[j]] <- as.data.frame(dM[dM[[idx]] == lev[i],])
-      }
-      x[[i]] <- setx(object[[i]], fn = fn, data = d, cond = cond,
-                     counter = counter, ...)
-    }
-  }
-  else { # without multiple imputation
-    if (is.null(data))
-      dta <- eval(getcall(obj)$data, sys.parent())
-    else
-      dta <- data
-    by <- getcall(obj)$by
-    idx <- pmatch(by, names(dta))[1]
-    lev <- sort(unique(eval(dta[[idx]], sys.parent())))
-    for (i in 1:length(lev)) {
-      d <- as.data.frame(dta[dta[[idx]] == lev[i],])
-      x[[i]] <- setx(object[[i]], fn = fn, data = d, cond = cond,
-                   counter = counter, ...)
-    }
-  }
-  names(x) <- names(object)
-  class(x) <- c("setx.strata", "data.frame")
-  return(x) 
-}
-
-
-
-
diff --git a/R/setx.zaovlist.R b/R/setx.zaovlist.R
deleted file mode 100644
index c5bb72e..0000000
--- a/R/setx.zaovlist.R
+++ /dev/null
@@ -1,9 +0,0 @@
-setx.zaovlist <- function(object,fn = list(numeric = mean, ordered =
-                                   median, other = mode),
-                          data = NULL,
-                          cond = FALSE, counter = NULL, ...) {
-        
-        x <- setx.default(object, fn=fn,data=data,cond=cond,counter=counter,...)
-        class(x) <- c("zaovlist", class(x))
-        x
-}
diff --git a/R/sim.MI.R b/R/sim.MI.R
new file mode 100644
index 0000000..d7b0dd6
--- /dev/null
+++ b/R/sim.MI.R
@@ -0,0 +1,40 @@
+#' Simulate Multiply Imputed Data
+#' @usage \method{sim}{MI}(obj, x=NULL, x1=NULL, y=NULL, num=1000, ...)
+#' @S3method sim MI
+#' @param obj a 'zelig.MI' object containing several fits for two or more 
+#'   subsetted data-frames
+#' @param x a 'setx.mi' object containing explanatory variables for each
+#'   fitted model
+#' @param x1 a 'setx.mi' object containing explanatory variables for each
+#'   fitted model
+#' @param y this feature is currently unimplemented
+#' @param num an integer specifying the number of simulations to compute
+#' @param ... ignored parameters
+#' @return a 'sim.MI' with simulated quantities of interest for each fitted
+#'   contained by 'obj'
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @seealso \link{sim}
+sim.MI <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, ...) {
+
+  sim.results <- list()
+
+  for (key in names(obj)) {
+    object <- obj[[key]]
+    new.x <- x[[key]]
+    new.x1 <- x1[[key]]
+    new.y <- y[[key]]
+
+    sim.results[[key]] <- sim(object, x=new.x, x1=new.x1, y=new.y, num=num)
+  }
+
+  model <- get('model', attr(obj, 'state'))
+
+  class(sim.results) <- c(
+                          'MI.sim',
+                          paste(model, "mi-sim", sep="-"),
+                          paste(model, "mi.sim", sep=".")
+                          )
+
+  sim.results
+}
+
diff --git a/R/sim.R b/R/sim.R
index 03b125e..6a5f282 100644
--- a/R/sim.R
+++ b/R/sim.R
@@ -1,6 +1,92 @@
-sim <- function(object, x = NULL, ...) {
-  if (is.null(x))
-    UseMethod("sim")
-  else
-    UseMethod("sim", x)
+#' Generic Method for Computing and Organizing Simulated Quantities of Interest
+#' Simulate quantities of interest from the estimated model
+#' output from \code{zelig()} given specified values of explanatory
+#' variables established in \code{setx()}.  For classical \emph{maximum
+#' likelihood} models, \code{sim()} uses asymptotic normal
+#' approximation to the log-likelihood.  For \emph{Bayesian models},
+#' Zelig simulates quantities of interest from the posterior density,
+#' whenever possible.  For \emph{robust Bayesian models}, simulations
+#' are drawn from the identified class of Bayesian posteriors.
+#' Alternatively, you may generate quantities of interest using
+#' bootstrapped parameters.
+#' @param obj the output object from zelig
+#' @param x values of explanatory variables used for simulation,
+#'   generated by setx
+#' @param x1 optional values of explanatory variables (generated by a
+#'   second call of setx)
+#'           particular computations of quantities of interest
+#' @param y a parameter reserved for the computation of particular
+#'          quantities of interest (average treatment effects). Few
+#'          models currently support this parameter
+#' @param num an integer specifying the number of simulations to compute
+#' @param bootstrap currently unsupported
+#' @param bootfn currently unsupported
+#' @param cond.data currently unsupported
+#' @param ... arguments reserved future versions of Zelig
+#' @return The output stored in \code{s.out} varies by model.  Use the
+#'  \code{names} command to view the output stored in \code{s.out}.
+#'  Common elements include: 
+#'  \item{x}{the \code{\link{setx}} values for the explanatory variables,
+#'    used to calculate the quantities of interest (expected values,
+#'    predicted values, etc.). }
+#'  \item{x1}{the optional \code{\link{setx}} object used to simulate
+#'    first differences, and other model-specific quantities of
+#'    interest, such as risk-ratios.}
+#'  \item{call}{the options selected for \code{\link{sim}}, used to
+#'    replicate quantities of interest. } 
+#'  \item{zelig.call}{the original command and options for
+#'    \code{\link{zelig}}, used to replicate analyses. }
+#'  \item{num}{the number of simulations requested. }
+#'  \item{par}{the parameters (coefficients, and additional
+#'    model-specific parameters).  You may wish to use the same set of
+#'    simulated parameters to calculate quantities of interest rather
+#'    than simulating another set.}
+#'  \item{qi\$ev}{simulations of the expected values given the
+#'    model and \code{x}. }
+#'  \item{qi\$pr}{simulations of the predicted values given by the
+#'    fitted values. }
+#'  \item{qi\$fd}{simulations of the first differences (or risk
+#'    difference for binary models) for the given \code{x} and \code{x1}.
+#'    The difference is calculated by subtracting the expected values
+#'    given \code{x} from the expected values given \code{x1}.  (If do not
+#'    specify \code{x1}, you will not get first differences or risk
+#'    ratios.) }
+#'  \item{qi\$rr}{simulations of the risk ratios for binary and
+#'    multinomial models.  See specific models for details.}
+#'  \item{qi\$ate.ev}{simulations of the average expected
+#'    treatment effect for the treatment group, using conditional
+#'    prediction. Let \eqn{t_i} be a binary explanatory variable defining
+#'    the treatment (\eqn{t_i=1}) and control (\eqn{t_i=0}) groups.  Then the
+#'    average expected treatment effect for the treatment group is
+#'    \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
+#'      E[Y_i(t_i=0)] \mid t_i=1 \,],} 
+#'    where \eqn{Y_i(t_i=1)} is the value of the dependent variable for
+#'    observation \eqn{i} in the treatment group.  Variation in the
+#'    simulations are due to uncertainty in simulating \eqn{E[Y_i(t_i=0)]},
+#'    the counterfactual expected value of \eqn{Y_i} for observations in the
+#'    treatment group, under the assumption that everything stays the
+#'    same except that the treatment indicator is switched to \eqn{t_i=0}. }
+#'  \item{qi\$ate.pr}{simulations of the average predicted
+#'    treatment effect for the treatment group, using conditional
+#'    prediction. Let \eqn{t_i} be a binary explanatory variable defining
+#'    the treatment (\eqn{t_i=1}) and control (\eqn{t_i=0}) groups.  Then the
+#'    average predicted treatment effect for the treatment group is
+#'    \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
+#'      \widehat{Y_i(t_i=0)} \mid t_i=1 \,],} 
+#'    where \eqn{Y_i(t_i=1)} is the value of the dependent variable for
+#'    observation \eqn{i} in the treatment group.  Variation in the
+#'    simulations are due to uncertainty in simulating
+#'    \eqn{\widehat{Y_i(t_i=0)}}, the counterfactual predicted value of
+#'    \eqn{Y_i} for observations in the treatment group, under the
+#'    assumption that everything stays the same except that the
+#'    treatment indicator is switched to \eqn{t_i=0}.}
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Olivia Lau and Kosuke Imai 
+sim <- function(
+                obj,
+                x=NULL, x1=NULL, y=NULL, num=1000,
+                bootstrap=F, bootfn=NULL, cond.data=NULL,
+                ...
+                ) {
+  UseMethod("sim")
 }
diff --git a/R/sim.cond.R b/R/sim.cond.R
deleted file mode 100644
index 06b8eb5..0000000
--- a/R/sim.cond.R
+++ /dev/null
@@ -1,63 +0,0 @@
-sim.cond <- function(object, x, x1=NULL, num=c(1000, 100),
-                     qoi = c("ev", "pr"), prev = NULL,
-                     bootstrap = FALSE, bootfn=NULL, ...) {
-  if (!is.null(x1)) {
-    warning("First Differences are not calculated in conditional prediction models.")
-    x1 <- NULL
-  }
-  xvar <- model.matrix(object, data = x)
-  yvar <- model.response(x)
-  class(xvar) <- c("matrix", "cond")
-  if (any(class(object) == "MCMCZelig"))
-    num <- nrow(object$coefficients)
-  if (length(num) == 2) {
-    if (!bootstrap)
-      num <- num[1]
-    else
-      num <- num[2]
-  }
-  if (is.null(prev)) {
-    if (!bootstrap & any(class(object) != "relogit")) 
-      simpar <- param(object, num=num, bootstrap=bootstrap)
-    else if (any(class(object) == "relogit")) 
-      simpar <- param.relogit(object, num=num, x=xvar,
-                              bootstrap=bootstrap, bootfn=bootfn, ...) 
-    else {
-      tt <- terms(object)
-      dta <- eval(object$data, sys.parent())
-      dta <- dta[complete.cases(model.frame(tt, dta)),]
-      if (is.null(bootfn)) 
-        bootfn <- bootfn.default
-      res <- boot(dta, bootfn, R = num, object = object, ...)
-      colnames(res$t) <- names(res$t0)
-      simpar <- res$t
-    }
-  } else {
-    if (bootstrap)
-      stop("Error: Choosing 'bootstrap = TRUE' generates new parameters.  \nIf you wish to use previously generated parameters, \nplease specify only 'prev'.")
-    else
-      simpar <- prev
-  }
-  fn <- paste("zelig4", getzelig(object), sep = "")
-  if(exists(fn)){
-    if(!bootstrap)
-      simpar <- do.call(fn, list(object=object, simpar=simpar, x=xvar, x1=x1, bootstrap=bootstrap, bootfn=bootfn))
-    else
-	simpar <- do.call(fn, list(object=object, simpar=simpar, x=xvar, x1=x1, bootstrap=bootstrap, bootfn=bootfn, dta=dta))
-  } 
-  simqi <- qi(object, simpar = simpar, x = xvar, x1 = x1, y = yvar)
-  class(xvar) <- c("matrix", "cond")
-  ca <- match.call()
-  ca$num <- num
-  res <- list(x=xvar, x1=x1, call = ca, zelig.call = getcall(object),
-              par = simpar, qi=simqi$qi, qi.name=simqi$qi.name)
-  class(res) <- "zelig"
-  res
-}
-
-
-
-
-
-
-
diff --git a/R/sim.counter.R b/R/sim.counter.R
deleted file mode 100644
index 8ce35c7..0000000
--- a/R/sim.counter.R
+++ /dev/null
@@ -1,21 +0,0 @@
-sim.counter <- function(object, x, x1=NULL, bootstrap = FALSE, num = c(100, 1000), ...) {
-  if (any(class(object) == "MCMCZelig"))
-    num <- nrow(object$coefficients)
-  else if (length(num) == 2) {
-    if (!bootstrap)
-      num <- num[1]
-    else
-      num <- num[2]
-  }
-  res <- list()
-  N <- 0
-  for (i in 1:length(x))
-    N <- N + nrow(x[[i]])
-  for (i in 1:length(x)) {
-    numX <- round((nrow(x[[i]]) / N) * num)
-    res[[i]] <- sim.default(object, x = x[[i]], x1 = x1[[i]], num =
-                            numX, ...)
-  }
-  res
-}
-
diff --git a/R/sim.coxph.R b/R/sim.coxph.R
deleted file mode 100644
index 637d9d9..0000000
--- a/R/sim.coxph.R
+++ /dev/null
@@ -1,55 +0,0 @@
-sim.coxph <- function(object, x=NULL, x1=NULL, num=c(1000, 100),
-                        prev = NULL, bootstrap = FALSE, bootfn=NULL,
-                        cond.data = NULL, ...) {
- 
-  if (length(num) == 2) {
-    if (!bootstrap)
-      num <- num[1]
-    else
-      num <- num[2]
-  }
-  if (is.null(prev)) {
-    if (!bootstrap)
-      simpar <- param(object, num=num, bootstrap=bootstrap)
-    else {
-      tt <- terms(object)
-      dta <- eval(getcall(object)$data, sys.parent())
-      dta <- dta[complete.cases(model.frame(tt, dta)),]
-      if (is.null(bootfn))
-        bootfn <- bootfn.default
-      res <- boot(dta, bootfn, R = num, object = object, ...)
-      colnames(res$t) <- names(res$t0)
-      simpar <- res$t
-    }
-  }
-  else {
-    if (bootstrap)
-      stop("Error: Choosing 'bootstrap = TRUE' generates new parameters.  \nIf you wish to use previously generated parameters, \nplease specify only 'prev'.")
-    else
-      simpar <- prev
-  }
- 
-  simqi <- qi(object, simpar = simpar, x = x, x1 = x1, y = NULL)
-  c <- match.call()
-  c[[1]] <- as.name("sim")
-  c$num <- num
-  res <- list(x=x, x1=x1, call = c, zelig.call = getcall(object),
-              par = simpar, qi=simqi$qi, qi.name=simqi$qi.name)
-  class(res) <- "zelig"
-  res
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/R/sim.default.R b/R/sim.default.R
index 5ea4c6f..1b7c5ad 100644
--- a/R/sim.default.R
+++ b/R/sim.default.R
@@ -1,68 +1,252 @@
-sim.default <- function(object, x=NULL, x1=NULL, num=c(1000, 100),
-                        prev = NULL, bootstrap = FALSE, bootfn=NULL,
-                        cond.data = NULL, ...) {
-  if (!is.null(x))
-    x <- as.matrix(x)
-  if (!is.null(x1))
-    x1 <- as.matrix(x1)
-  if (any(class(object) == "MCMCZelig"))
-    num <- nrow(object$coefficients)
-  else if (length(num) == 2) {
-    if (!bootstrap)
-      num <- num[1]
-    else
-      num <- num[2]
-  }
-  if (is.null(prev)) {
-    if (any(class(object) == "relogit")) 
-      simpar <- param.relogit(object, num=num, x=x, bootstrap=bootstrap) 
-    else if (!bootstrap)
-      simpar <- param(object, num=num, bootstrap=bootstrap)
-    else {
-      tt <- terms(object)
-      dta <- eval(getcall(object)$data, sys.parent())
-      dta <- dta[complete.cases(model.frame(tt, dta)),]
-      if (is.null(bootfn))
-        bootfn <- bootfn.default
-      res <- boot(dta, bootfn, R = num, object = object, ...)
-      colnames(res$t) <- names(res$t0)
-      simpar <- res$t
+#' Method for Simulating Quantities of Interest wrom 'zelig' Objects
+#'
+#' Simulate quantities of interest
+#' @usage \method{sim}{default}(obj,
+#'                     x=NULL, x1=NULL, y=NULL,
+#'                     num=1000, bootstrap = FALSE,
+#'                     bootfn=NULL,
+#'                     cond.data = NULL,
+#'                     ...)
+#' @S3method sim default
+#' @param obj a 'zelig' object
+#' @param x a 'setx' object
+#' @param x1 a secondary 'setx' object used to perform particular computations
+#' of quantities of interest
+#' @param y a parameter reserved for the computation of particular quantities of
+#' interest (average treatment effects). Few models currently support this
+#' parameter
+#' @param num an integer specifying the number of simulations to compute
+#' @param bootstrap ignored
+#' @param bootfn ignored
+#' @param cond.data ignored
+#' @param ... parameters to be passed to the boot function, if one is supplied
+#' @return a 'sim' object storing the replicated quantities of interest
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+sim.default <- function(
+                        obj,
+                        x = NULL,
+                        x1 = NULL,
+                        y = NULL,
+                        num = 1000,
+                        bootstrap = FALSE,
+                        bootfn = NULL,
+                        cond.data = NULL,
+                        ...
+                        ) {
+  # Create environment of local variables
+  model.env <- new.env()
+
+  # Add local variables
+  assign(".object", obj$result, model.env)
+  assign(".fitted", obj$result, model.env)
+  assign(".model", "model-name", model.env)
+
+  # Get S3 methods
+  paramfunction <- getS3method("param", obj$name, FALSE)
+  qifunction <- getS3method("qi", obj$name, FALSE)
+  bootfunction <- getS3method("bootstrap", obj$name, TRUE)
+
+  parent.env(model.env) <- environment(paramfunction)
+
+  environment(paramfunction) <- model.env
+  environment(qifunction) <- model.env
+
+  # Begin function
+
+  if (length(attr(x, "pooled")) > 0 && attr(x, "pooled")) {
+
+    xes <- list()
+    titles <- NULL
+
+    for (key in names(x)) {
+      xes[[key]] <- sim(obj, x[[key]], x1[[key]], y, num, bootstrap, bootfn, cond.data, ...)
+      attr(xes[[key]], "pooled") <- FALSE
+      titles <- append(titles, xes[[key]]$titles)
     }
-  } else {
-    if (bootstrap)
-      stop("Error: Choosing 'bootstrap = TRUE' generates new parameters.  \nIf you wish to use previously generated parameters, \nplease specify only 'prev'.")
-    else
-      simpar <- prev
-  }
-  fn <- paste("zelig4", getzelig(object), sep = "")
-  if(exists(fn)){
-    if(!bootstrap)
-      simpar <- do.call(fn, list(object=object, simpar=simpar, x=x, x1=x1, bootstrap=bootstrap, bootfn=bootfn))
-    else
-      simpar <- do.call(fn, list(object=object, simpar=simpar, x=x, x1=x1, bootstrap=bootstrap, bootfn=bootfn, dta=dta))
-  }
-  simqi <- qi(object, simpar = simpar, x = x, x1 = x1, y = NULL)
-  c <- match.call()
-  c[[1]] <- as.name("sim")
-  c$num <- num
-  res <- list(x=x, x1=x1, call = c, zelig.call = getcall(object),
-              par = simpar, qi=simqi$qi, qi.name=simqi$qi.name)
-  class(res) <- "zelig"
-  res
-}
 
+    attr(xes, "pooled") <- TRUE
+    attr(xes, "pooled.setx") <- x
+    attr(xes, "titles") <- unique(titles)
+
+    class(xes) <- c("pooled.sim")
 
+    return(xes)
+  }
 
+  # Stop on unimplemented features
+  if (!is.null(cond.data))
+    warning("conditions are not yet supported")
 
+  # Simulate Parameters
+  # param <- param(obj, num=num)
+  param <- paramfunction(obj, num=num)
 
+  # Cast list into a "parameters" object
+  param <- as.parameters(param, num)
 
+  # Define the pre-sim hook name
+  post.hook <- obj$zc$.post
 
+  # apply the hook if it exists
+  if (!is.null(post.hook)) {
+    zelig2 <- get(paste("zelig2", obj$name, sep=""))
+    envir <- environment(zelig2)
 
+    # Produce a warning if the post-hook defined cannot be found
+    if (!exists(post.hook, mode="function", envir=envir))
+      warning("the hook '", post.hook, "' cannot be found")
+    
+    # Otherwise, business as usual. Extract the hook and apply it to the zelig
+    # object. Note that a post-hook always has the arguments:
+    #   obj, x, x1, bootstrap, bootfn, param
+    else {
+      # Retrieve the hook, since it exists
+      hook <- get(post.hook, envir=envir)
 
+      # Assign the param object. In the case of bootstrapping, the param object
+      # might not have any meaning.
+      param <- if (bootstrap)
+        param
 
+      # Otherwise apply the hook and return it as the parameters
+      else
+        hook(obj, x, x1, bootstrap, bootfn, param=param)
+    }
+  }
 
+  # Get default boot-strapping function if boot is enabled and no boot-function
+  # is specified
+  if (bootstrap && missing(bootfn))
+    bootfn <- bootfn.default
+
+  # Boot-strapping!!
+  if (!missing(bootfn) && !is.null(bootfn)) {
+
+    # Get the appropriate 
+    d <- obj$data
+    d <- d[complete.cases(d), ]
+
+    # Add several private variables to bootfn:
+    #   .fitted : a fitted model object
+    #   .data : the data-set used to fit the original model
+    #   .call : the call used to fit the original model
+    #   .env : the environment in which the .call variable should/can be
+    #          evaluated
+    boot.env <- obj$method.env
+    bootfn <- attach.env(bootfn, obj$method.env)
+
+    # Bootstrapfn
+    bootstrapfn <- getS3method("bootstrap", obj$name, TRUE)
+    environment(bootstrapfn) <- model.env
+
+    # If is.null then we just get the default bootstrap fn, which is merely to
+    # simulate the systematic paramaters
+    if (is.null(bootstrapfn))
+      bootstrapfn <- Zelig:::bootstrap.default
+
+    # Attach the appropriate environment to the function
+    bootstrapfn <- attach.env(bootstrapfn, model.env)
+
+    # Get a sample, so we know how to re-size the result.
+    # Note: This "example" object will be used at the end of this if-clause to
+    # build an object similar in structure to that of "bootstrapfn(obj)"
+    example <- bootstrapfn(obj)
+    example <- as.bootvector(example)
+
+    # Bootstrap using a function with parameters: data, i, object
+    # Where data is a data.frame, i is an vector of integers used to sample the
+    # data.frame, and object is a fitted model object.
+    res <- boot(d, bootfn, num,
+                object = obj$result,
+                bootstrapfn = bootstrapfn,
+                num = num
+                )
+
+    # Copy the param object that was made earlier via ``param'' method
+    res.param <- param
+
+    # Reverse-construct a bootlist object from this
+    bl <- as.bootlist(res$t, example$lengths, example$names)
+
+    # Replace slots corresponding to "alpha" and "beta" on the "param" object
+    param$coefficients <- bl$beta
+    param$alpha <- bl$alpha
+  }
 
+  # Compute quantities of interest
+  res.qi <- qifunction(obj, x=x, x1=x1, y=y, param=param, num=num)
+  
+  # Cast as a "qi" object if it is not one
+  res.qi <- as.qi(res.qi)
+
+  # Assign class
+  class(res.qi) <- c(obj$name, class(res.qi))
+
+  # This is kludge (for now)
+  # This can be removed as of 4-27-2011
+  if (inherits(obj, "MI"))
+    class(res.qi) <- c("MI", class(res.qi))
+
+  # build object
+  s <- list(
+            model     = obj$name,
+            x        = x,
+            x1       = x1,
+            stats    = summarize(res.qi),
+            qi       = res.qi,
+            titles   = names(res.qi),
+            bootfn   = bootfn,
+            cond.data= cond.data,
+            zelig    = obj,
+            call     = match.call(),
+            zcall    = obj$call,
+            result   = obj$result,
+            num      = num,
+            special.parameters = list(...),
+            package.name = obj$package.name
+            )
+
+  # cast class
+  sim.class <- if (inherits(obj, "MI"))
+    sim.class <- "MI.sim"
+
+  attr(s, "titles") <- unique(names(res.qi))
+
+  class(s) <- c(sim.class,
+                paste("sim", obj$name, sep="."),
+                obj$name,
+                "sim"
+                )
+
+  # return
+  s
+}
 
+create.pooled.sim <- function(
+                        obj,
+                        x = NULL,
+                        x1 = NULL,
+                        y = NULL,
+                        num = 1000,
+                        bootstrap = FALSE,
+                        bootfn = NULL,
+                        cond.data = NULL,
+                        ...
+                        ) {
+  xes <- list()
+  titles <- NULL
+
+  for (key in names(x)) {
+    xes[[key]] <- sim(obj, x[[key]], x1[[key]], y, num, bootstrap, bootfn, cond.data, ...)
+    attr(xes[[key]], "pooled") <- FALSE
+    titles <- append(titles, xes[[key]]$titles)
+  }
 
+  attr(xes, "pooled") <- TRUE
+  attr(xes, "pooled.setx") <- x
+  attr(xes, "titles") <- unique(titles)
 
+  class(xes) <- c("pooled.sim")
 
+  return(xes)
+}
diff --git a/R/sim.eiRxC.R b/R/sim.eiRxC.R
deleted file mode 100644
index 37fd95f..0000000
--- a/R/sim.eiRxC.R
+++ /dev/null
@@ -1,59 +0,0 @@
-###
-##  sim function for eiRxC model.
-##  NOTES:
-##  - this model accepts only bootstrap = FALSE argument.
-##  - the default bootfn will be "paramsei.estim"
-##
-
-sim.eiRxC <- function(object, x=NULL, x1=NULL, num= 100,
-                        prev = NULL, bootstrap = TRUE, bootfn=NULL,
-                        cond.data = NULL, ...) {
-        if (!is.null(x))
-          x <- as.matrix(x)
-
-        if (!is.null(x1))
-          warning ("no first difference are available for EI models")
-
-        if (!is.null (prev))
-          stop("Error: prev option is not supported for this model")
-
-        if (!bootstrap)
-          stop ("Error: please use bootstrapping to simulate parameters for this model")
-
-        ## get the data to pass to boot function
-        tt <- terms(object)
-        dta <- eval(getcall(object)$data, sys.parent())
-        dta <- dta[complete.cases(model.frame(tt, dta)),]
-        if (is.null(bootfn))
-          bootfn <- bootfn.default
-        res <- boot(dta, bootfn, R = num, object = object, ...)
-        #res <- boot(data= dta, statistic = bootfn, R = num, nR = object$dims[[1]], nC = object$dims[[2]], ...)
-        colnames(res$t) <- names(res$t0)
-        simpar <- res$t
-        
-    
-        simqi <- qi(object, simpar = simpar, x = x, x1 = x1, y = NULL)
-        c <- match.call()
-        c[[1]] <- as.name("sim")
-        c$num <- num
-        res <- list(x=x, x1=x1, call = c, zelig.call = getcall(object),
-                    par = simpar, qi=simqi$qi, qi.name=simqi$qi.name)
-        class(res) <- "zelig"
-        res
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/R/sim.netglm.R b/R/sim.netglm.R
deleted file mode 100644
index b957866..0000000
--- a/R/sim.netglm.R
+++ /dev/null
@@ -1,46 +0,0 @@
-sim.logit.net <- function(object, x=NULL, x1=NULL, num=c(1000, 100),
-                        prev = NULL, bootstrap = FALSE, bootfn=NULL,
-                        cond.data = NULL, ...) {
-  if (!is.null(x))
-    x <- as.matrix(x)
-  if (!is.null(x1))
-    x1 <- as.matrix(x1)
-  if (any(class(object) == "MCMCZelig"))
-    num <- nrow(object$coefficients)
-  else if (length(num) == 2) {
-    if (!bootstrap)
-      num <- num[1]
-    else
-      num <- num[2]
-  }
-  if (is.null(prev)) {
-    if (any(class(object) == "relogit")) 
-      simpar <- param.netglm(object, num=num, x=x, bootstrap=bootstrap) 
-    else if (!bootstrap)
-      simpar <- param.netglm(object, num=num, bootstrap=bootstrap)
-    else {
-      tt <- terms(object)
-      dta <- eval(object$data, sys.parent())
-      dta <- dta[complete.cases(model.frame(tt, dta)),]
-      if (is.null(bootfn))
-        bootfn <- bootfn.default
-      res <- boot(dta, bootfn, R = num, object = object, ...)
-      colnames(res$t) <- names(res$t0)
-      simpar <- res$t
-    }
-  }
-  else {
-    if (bootstrap)
-      stop("Error: Choosing 'bootstrap = TRUE' generates new parameters.  \nIf you wish to use previously generated parameters, \nplease specify only 'prev'.")
-    else
-      simpar <- prev
-  }
-  simqi <- qi.netglm(object, simpar = simpar, x = x, x1 = x1, y = NULL)
-  c <- match.call()
-  c[[1]] <- as.name("sim")
-  c$num <- num
-  res <- list(x=x, x1=x1, call = c, zelig.call = object$call,
-              par = simpar, qi=simqi$qi, qi.name=simqi$qi.name)
-  class(res) <- "zelig"
-  res
-}
\ No newline at end of file
diff --git a/R/sim.setx.MI.R b/R/sim.setx.MI.R
deleted file mode 100644
index de93c67..0000000
--- a/R/sim.setx.MI.R
+++ /dev/null
@@ -1,77 +0,0 @@
-sim.setx.MI <- function(object, x, x1 = NULL, num = c(1000, 100), prev = NULL, 
-                          bootstrap = FALSE, bootfn = NULL, ...) {
-  if (any(class(object) == "MCMCZelig"))
-    num <- nrow(object[[1]]$coefficients) * length(object)
-  else if (length(num) == 2) {
-    if (!bootstrap)
-      num <- num[1]
-    else 
-      num <- num[2]
-  }
-  ca <- match.call()
-  if (!any(class(x) == "cond")) {
-    simpar <- MIsimulation(object, num, prev, bootstrap, bootfn=bootfn, x=x, x1=x1, ...)
-    if(any(class(object[[1]]) == "coxph"))
-      simqi <- qi.coxph(object, simpar = simpar, x = x, x1 = x1)
-    else
-      simqi <- qi(object[[1]], simpar = simpar, x = as.matrix(x), 
-                x1 = if (!is.null(x1)) as.matrix(x1))
-    ca$num <- num
-    res <- list(x = x, x1 = x1, call = ca, zelig.call = getcall(object[[1]]), 
-                par = simpar, qi = simqi$qi, qi.name = simqi$qi.name)
-  }
-  else {
-    simpar <- MIsimulation(object, num, prev, bootstrap, bootfn=bootfn, x=x, x1=NULL, ...)
-    tmp.qi <- list()
-    for (i in 1:length(x)) {
-      if (!is.null(x1)) {
-        warning("First Differences are not calculated in conditional prediction models.")
-        x1 <- NULL
-      }
-      if (object[[i]]$call$model %in% c("bprobit", "blogit")) {
-        yvar <- x[[i]][,1:2]
-        x[[i]] <- x[[i]][,3:ncol(x[[i]])]
-	  x[[i]] <- cbind(1,x[[i]])
-      }
-      else {
-        yvar <- x[[i]][,1]
-        x[[i]] <- x[[i]][,2:ncol(x[[i]])]
-	  x[[i]] <- cbind(1,x[[i]])
-      }
-      tmp.qi[[i]] <- qi(object[[1]], simpar = simpar, x = x[[i]], x1 =
-                        x1[[i]], y = yvar)
-    }
-    simqi <- tmp.qi[[1]]
-    for (i in 2:length(tmp.qi)) {
-      for (j in 1:length(simqi)) {
-        if (length(dim(simqi$qi[[j]])) == 2)
-          simqi$qi[[j]] <- cbind(simqi$qi[[j]], tmp.qi[[i]]$qi[[j]])
-        else {
-          tmp <- array(NA, dim = c(dim(simqi$qi[[j]])[1],
-                             dim(simqi$qi[[j]])[2],
-                             (dim(simqi$qi[[j]])[3] +
-                              dim(tmp.qi[[i]]$qi[[j]])[3])))
-          tmp[,, 1:dim(simqi$qi[[j]])[3]] <- simqi$qi[[j]]
-          tmp[,, (dim(simqi$qi[[j]])[3]+1):dim(tmp)[3]] <-
-            tmp.qi[[i]]$qi[[j]]
-          simqi$qi[[j]] <- tmp
-        }
-      }
-      print(object[[1]]$call)
-      ca$num <- num
-      res <- list(x = x, x1 = x1, call = ca,
-                  zelig.call = getcall(object[[1]]), 
-                  par = simpar, qi = simqi$qi, qi.name =
-                  simqi$qi.name)
-    }
-  }
-  class(res) <- "zelig"
-  res
-}
-
-
-
-
-
-
-
diff --git a/R/sim.setx.rqs.R b/R/sim.setx.rqs.R
deleted file mode 100644
index fe5110f..0000000
--- a/R/sim.setx.rqs.R
+++ /dev/null
@@ -1,20 +0,0 @@
-sim.setx.rqs <- function(object, x, x1=NULL, num= c(1000, 100), bootstrap=FALSE, ...){
-    if (length(num)==2) {
-        if(!bootstrap)
-            num <- num[1]
-        else
-            num <- num[2]
-    }
-    res <- list()
-    object <- stratify.rqs(object)
-    N <- length(object)
-    lev <- names(object)
-    for(i in 1:length(lev)) {
-        numN <- round(num/N)   
-        res[[i]] <- sim(object[[i]], x=x[[i]], x1=x1[[i]], num=numN,
-                        bootstrap=bootstrap, ...)
-    }
-    class(res) <- "zelig.rqs.strata"
-    names(res) <- lev 
-    res
-}
diff --git a/R/sim.setx.strata.R b/R/sim.setx.strata.R
deleted file mode 100644
index ded8966..0000000
--- a/R/sim.setx.strata.R
+++ /dev/null
@@ -1,40 +0,0 @@
-sim.setx.strata <- function(object, x, x1 = NULL, num = c(1000, 100),
-                            bootstrap = FALSE, ...){
-  if (any(class(object) == "MCMCZelig"))
-    num <- nrow(getcoef(object))
-  if (length(num) == 2) {
-    if (!bootstrap)
-      num <- num[1]
-    else
-      num <- num[2]
-  }
-  if(any(class(object[[1]])=="MI")) {
-    dta <- eval(getcall((object[[1]])[[1]])$data, sys.parent())[[1]]
-    by <- getcall((object[[1]])[[1]])$by
-  }
-  else {	
-    dta <- eval(getcall(object[[1]])$data, sys.parent())
-    by <- getcall(object[[1]])$by
-  }
-  N <- length(object)
-  res <- list()
-  idx <- match(by, names(dta))
-  lev <- names(object)
-  for (i in 1:length(lev)) {
-    dat <- dta[dta[[idx]] == lev[i],]
-    numN <- round(num/N)
-    res[[i]] <- sim(object[[i]], x = x[[i]], x1 = x1[[i]], num = numN,
-                    bootstrap = bootstrap, ...)
-    res[[i]]$nx <- nrow(dat)/nrow(dta)
-  }
-  class(res) <- "zelig.strata"
-  names(res) <- names(object)
-  res
-}
-
-
-
-
-
-
-
diff --git a/R/sim.setxArima.R b/R/sim.setxArima.R
deleted file mode 100644
index 4fa1162..0000000
--- a/R/sim.setxArima.R
+++ /dev/null
@@ -1,217 +0,0 @@
-sim.setxArima <- function(object, x, x1 = NULL, num = 1000, prev = NULL,
-                     bootstrap = FALSE, bootfn = NULL, cond.data = NULL,
-                     max.iter=10, ...) {
-  require(mvtnorm)
-#  library.dynam("stats")
-  if (bootstrap | !is.null(bootfn) | !is.null(cond.data)){
-    warning("boostrap, bootfn, and cond.data are ignored in ARIMA models")
-  }
-  t.effect <- x$t.effect
-  ##extracting out the series
-  if (is.data.frame(object$zelig.data))
-    dat <- object$zelig.data
-  else{
-    envir<- attr(object$terms, ".Environment")
-    dat <- eval(object$call$data, envir)
-	}
-  series <- eval(eval(object$call$formula[[2]])$name, envir=dat)
-  pred.ahead <- x$pred.ahead
-  if (!is.null(prev)){
-    draw.parm <- prev
-  }
-  if (is.null(prev)){
-    draw.parm <- rmvnorm(num, mean=object$coef, sigma=object$var.coef)
-###inserting a function that will be used to ensure that the 
-###ma portion is invertible.  This code is taken from 
-##the implementation of ARIMA in the stats package
-    maInvert <- function(ma) {
-        q <- length(ma)
-        q0 <- max(which(c(1, ma) != 0)) - 1
-        if (!q0) 
-            return(ma)
-        roots <- polyroot(c(1, ma[1:q0]))
-        ind <- Mod(roots) < 1
-        if (all(!ind)) 
-            return(ma)
-        if (q0 == 1) 
-            return(c(1/ma[1], rep(0, q - q0)))
-        roots[ind] <- 1/roots[ind]
-        x <- 1
-        for (r in roots) x <- c(x, 0) - c(0, x)/r
-        c(Re(x[-1]), rep(0, q - q0))
-    }
-##
-temp<- which(object$arma[1:4]>0)
-temp<- sort(temp)
-for(j in 1:nrow(draw.parm)){
-if(length(temp)==1){
-	if(temp==2 | temp==4){
-		draw.parm[j, 1:object$arma[temp]]<- maInvert(draw.parm[i, 1:object$arma[temp]])
-		}
-	if(temp==1){
-		draw.parm[j, 1:object$arma[temp]]<- .Call("ARIMA_transPars", draw.parm[j, 1:sum(object$arma[temp[1:length(temp)]])], 
-			as.integer(object$arma[1:5]), TRUE, PACKAGE = "stats")[[1]][1:object$arma[temp]]
-		}
-	if(temp==3){
-		draw.parm[j, 1:object$arma[temp]]<- .Call("ARIMA_transPars", draw.parm[j, 1:sum(object$arma[temp[1:length(temp)]])], 
-			as.integer(object$arma[1:5]), TRUE, PACKAGE = "stats")[[1]][(1:object$arma[3])*object$arma[5]]
-		}
-	}
-if(length(temp)==2){
-	if(temp[1]==2 | temp[1]==4){
-		draw.parm[i, 1:object$arma[temp[1]]]<- maInvert(draw.parm[j, 1:object$arma[temp[1]]])
-		}
-	if(temp[1]==1){
-		draw.parm[j, 1:object$arma[temp[1]]]<- .Call("ARIMA_transPars", draw.parm[j, 1:sum(object$arma[temp[1:length(temp)]])], as.integer(object$arma[1:5]), TRUE, PACKAGE = "stats")[[1]][1:object$arma[temp[1]]]
-		}
-	if(temp[1]==3){
-		draw.parm[j, 1:object$arma[temp[1]]]<- .Call("ARIMA_transPars", draw.parm[j, 1:sum(object$arma[temp[1:length(temp)]])], 
-			as.integer(object$arma[1:5]), TRUE, PACKAGE = "stats")[[1]][(1:object$arma[3])*object$arma[5]]
-		}
-	if(temp[2]==2 | temp[2]==4){
-		draw.parm[j, (object$arma[temp[1]] + 1):(sum(object$arma[temp[1]:temp[2]]))]<- maInvert(draw.parm[j, (object$arma[temp[1]] + 1):(sum(object$arma[temp[1]:temp[2]]))])
-		}
-	if(temp[2]==1){
-		draw.parm[j, (sum(object$arma[temp[1]]) + 1):(sum(object$arma[temp[1]:temp[2]]))]<- .Call("ARIMA_transPars",draw.parm[j, 1:sum(object$arma[temp[1:length(temp)]])],
-													 as.integer(object$arma[1:5]), TRUE, PACKAGE = "stats")[[1]][(sum(object$arma[1]) + 1):(sum(object$arma[1:2]))]
-}
-	if(temp[2]==3){
-		draw.parm[j, (sum(object$arma[temp[1]]) + 1):object$arma[temp[2]]]<- .Call("ARIMA_transPars", draw.parm[j, 1:sum(object$arma[temp[1:length(temp)]])], 
-			as.integer(object$arma[1:5]), TRUE, PACKAGE = "stats")[[1]][(1:object$arma[3])*object$arma[5]]
-
-  }
-}
-if(length(temp)>2){
-if(temp[1]==2 | temp[1]==4){
-		draw.parm[i, 1:object$arma[temp[1]]]<- maInvert(draw.parm[j, 1:object$arma[temp[1]]])
-		}
-	if(temp[1]==1){
-		draw.parm[j, 1:object$arma[temp[1]]]<- .Call("ARIMA_transPars", draw.parm[j, 1:sum(object$arma[temp[1:length(temp)]])], 
-				as.integer(object$arma[1:5]), TRUE, PACKAGE = "stats")[[1]][1:object$arma[temp[1]]]
-		}
-	if(temp[1]==3){
-		draw.parm[j, 1:object$arma[temp[3]]]<- .Call("ARIMA_transPars", draw.parm[j, 1:sum(object$arma[temp[1:length(temp)]])], 
-			as.integer(object$arma[1:5]), TRUE, PACKAGE = "stats")[[1]][(1:object$arma[3])*object$arma[5]]
-		}
-for(i in 2:length(temp)){
-if(temp[i]==2 | temp[i]==4){
-		draw.parm[j, (sum(object$arma[temp[1]:temp[(i-1)]]) + 1):(sum(object$arma[temp[1]:temp[(i)]]))]<- maInvert(draw.parm[j, 
-						(sum(object$arma[temp[1]:temp[(i-1)]]) + 1):(sum(object$arma[temp[1]:temp[(i)]]))])
-		}
-	if(temp[i]==1){
-		draw.parm[j, (sum(object$arma[temp[1]:temp[(i-1)]]) + 1):(sum(object$arma[temp[1]:temp[(i)]]))]<- .Call("ARIMA_transPars",draw.parm[j, 1:sum(object$arma[temp[1:length(temp)]])],
-													 as.integer(object$arma[1:5]), TRUE, PACKAGE = "stats")[[1]][(sum(object$arma[temp[1]:temp[(i-1)]]) + 1):(sum(object$arma[temp[1]:temp[(i)]]))]
-}
-	if(temp[i]==3){
-		draw.parm[j, (sum(object$arma[temp[1]:temp[(i-1)]]) + 1):(sum(object$arma[temp[1]:temp[(i)]]))]<- .Call("ARIMA_transPars", draw.parm[j, 1:sum(object$arma[temp[1:length(temp)]])], 
-			as.integer(object$arma[1:5]), TRUE, PACKAGE = "stats")[[1]][(1:object$arma[3])*object$arma[5]]
-		}
-
-  }
-}
-}
-}
-  if (x$min.time==1 | x$min.time==2)
-    stop("Counterfactuals can only be specified from the third observation and later \n")
-  if (ncol(x$dta)==0){
-    ev <- matrix(NA, nrow=nrow(draw.parm), ncol=(pred.ahead))
-    se <- matrix(NA, nrow=nrow(draw.parm), ncol=(pred.ahead))
-    for (i in 1:nrow(draw.parm)){
-      temp <- arima(series, xreg=NULL,
-                    order=c(object$arma[1], object$arma[6], object$arma[2]), 
-                    seasonal=list(order=c(object$arma[3], object$arma[7], object$arma[4]),
-                      period=object$arma[5]), fixed=draw.parm[i,], transform.pars=TRUE)
-      temp2 <-predict(temp, newxreg=NULL, n.ahead=(pred.ahead))
-      ev[i,] <- temp2$pred[1:(pred.ahead)]
-      se[i,] <- temp2$se[1:(pred.ahead)]	
-    }
-    ev <- as.matrix(ev)
-    se <- as.matrix(se)
-    if (!is.null(x1)){
-      warning("First differences are only calculated when external regressors are used \n")
-    }
-  }
-  if (ncol(x$dta) > 0){
-    x.obs <- as.matrix(x$dta[1:(x$min.time-1),])
-    x.cf <- as.matrix(x$dta[x$min.time: (x$max.time), ])
-    ev <- matrix(NA, nrow=nrow(draw.parm), ncol=(length(x$min.time:x$max.time)))
-    se <- matrix(NA, nrow=nrow(draw.parm), ncol=(length(x$min.time:x$max.time)))
-    if (x$min.time == nrow(x$dta)){
-      x.cf<- t(x.cf)
-    }
-    for (i in 1:nrow(draw.parm)){
-      temp <- arima(series[1:(x$min.time-1)], xreg=x.obs,
-                    order=c(object$arma[1], object$arma[6], object$arma[2]), 
-                    seasonal=list(order=c(object$arma[3], object$arma[7], object$arma[4]),
-                      period=object$arma[5]), fixed=draw.parm[i,])
-      temp2 <-predict(temp, newxreg=x.cf, n.ahead=nrow(x.cf))
-      ev[i,] <- temp2$pred
-      se[i,] <- temp2$se
-    }
-    ev <- as.matrix(ev)
-  }
-  if (!is.null(x1)){
-    if (ncol(x1$dta) > 0){
-      x1.obs <- as.matrix(x1$dta[1:(x1$min.time-1), ])
-      x1.cf <- as.matrix(x1$dta[x1$min.time:(x1$max.time),])
-      ev.1 <- matrix(NA, nrow=nrow(draw.parm), ncol=length(x$min.time:x$max.time))
-      se.1 <- matrix(NA, nrow=nrow(draw.parm), ncol=length(x$min.time:x$max.time))
-      pred.ahead <- x$pred.ahead
-      pred.arima.x1 <- list()
-      false.arima.x1 <- list() 
-      if (x1$min.time == nrow(x1$dta)){
-	x1.cf <- t(x1.cf)
-      }
-      for (i in 1:nrow(draw.parm)){
-	temp3<- arima(series[1:(x1$min.time-1)], xreg=x1.obs,
-                                     order=c(object$arma[1], object$arma[6], object$arma[2]),
-                                    seasonal=list(order=c(object$arma[3], object$arma[7],
-                                                    object$arma[4]), period=object$arma[5]),
-                                     fixed=draw.parm[i,])
-	temp4 <- predict(temp3, newxreg=x1.cf, n.ahead=nrow(x.cf))
-	ev.1[i,] <- temp4$pred	
-	se.1[i,] <- temp4$se
-      }
-      ev.1 <- as.matrix(ev.1)
-    }
-  }
-  if (!is.null(x1) & t.effect){
-    warning("First differences and treatment effects are not both calculated.Calculating first differences only.\n")
-  }
-  if (is.null(x1) & (t.effect)){
-    t.eff <- matrix(NA, nrow=nrow(ev), ncol=ncol(ev))
-    for (i in 1:nrow(ev)){
-      t.eff[i,] <- series[(x$min.time):x$max.time] - ev[i,]
-    }
-  }
-  if (!is.null(x1) | !(t.effect)){
-    t.eff <- NULL
-  }
-  if (!is.null(x1)){
-    se <- as.matrix(se)
-    se.1 <- as.matrix(se.1)
-    fd <- ev.1 -  ev
-    qi <- list(ev=ev, se=se, fd=fd, t.eff=t.eff)
-    qi.name <- list(ev="Expected Values, E(Y|X)", se="Prediction Standard Error",
-                    fd="First Difference, E(Y|X1) - E(Y|X)", t.eff="Treatment Effect")
-    res <- list(min.time=x$min.time, qi=qi, qi.name=qi.name,
-                zelig.call = object$call, 
-                t.series = eval(eval(object$call$formula[[2]],
-                  envir=dat)$name, envir=dat))
-    class(res) <- c("zelig.arima", "zelig")
-    return(res)
-  } 
-  if (is.null(x1)){
-    se <- as.matrix(se)
-    qi <- list(ev=ev, se=se, t.eff=t.eff)
-    qi.name <- list(ev="Expected Values, E(Y|X)", se="Prediction Standard Error",
-                    t.eff="Treatment Effect") 
-    res <- list(min.time=x$min.time, qi=qi, qi.name=qi.name,
-                t.series=eval(eval(object$call$formula[[2]],
-                  envir=dat)$name, envir=dat),
-                zelig.call = object$call)
-    class(res) <- c("zelig.arima", "zelig")
-    return(res) 
-  } 
-} 
-
diff --git a/R/sim.zaovlist.R b/R/sim.zaovlist.R
deleted file mode 100644
index aa0a249..0000000
--- a/R/sim.zaovlist.R
+++ /dev/null
@@ -1,119 +0,0 @@
-### DESCRIPTION: Simulation of quantities of interest for model=aovlist
-###              object is of class = c(zaovlist,aovlist,listof)
-###              It contains list elements for multiple strata, 
-###              which individual class is c(aov,lm). 
-###              A formula with an Error term describes the strata
-###              for the "Accross" and "Within" aov-models.  
-###              We extract those aov-models from object with relevant
-###              information and apply sim.default to each of
-###              them separately.
-###
-### OUTPUT a list after applying sim.default to each aov-model 
-###        of multiple-strata object separately. 
-###        The result is of class zeliglist because contains a list
-###        of zelig simulation models.
-###
-### Elena Villalon (evillalon at iq.harvard.edu)
-
-sim.zaovlist <- function(object, x=NULL, x1=NULL, num=c(1000, 100),
-                         prev = NULL, bootstrap = FALSE,
-                         bootfn=NULL, cond.data=NULL,...) {
-                     
-        tt <- attributes(object)$terms
-        tt.attr <- attributes(tt)
-        vars <- tt.attr$term.labels
-        z.out <- object
-        ## only those lists elements (or strata) that are relevant to sim()
-        ind <- unlist(find.ind(object))
-        if(length(ind))
-          object <- object[ind]
-   
-        nm <- names(object)
-   
-        sout <- list()
-        zcall <- getcall(z.out) 
- 
-        for(n in 1:length(object)){
-                xn <- x
-                x1n <- x1
-                obj <- object[[n]]
-                objcoef <- coefficients(obj)
-                if(length(x))
-                  xn <- pick.coef(objcoef,x)
-                if(length(x1))
-                  x1n <- pick.coef(objcoef,x1)
-                
-                
-                if(bootstrap){
-                        stop("Option bootstrap=TRUE with Error term is not yet supported") 
-                }
-   
-                ss <- sim.default(obj,x=xn, x1=x1n, num=num,
-                                  prev = prev, bootstrap = bootstrap,
-                                  bootfn=bootfn,cond.data=cond.data, ...)
-                
-                ix <- grep("^zelig\\.call$", names(ss)$default)
-                
-                if(length(ix))
-                  ss[[ix]] <- zcall
-                else
-                  ss <- c(ss,list(zelig.call=zcall))
-                sout <- c(sout, list(ss))
-        }
-        names(sout) <- nm
-        class(sout) <- c("zeliglist", class(sout))
-        sout
-}
-
-###DESCRIPTION: object is a list of elements, each is class c("aov", "lm")
-###             Find those elemnts of the list with relevant information
-###
-###INPUT: object, a list of class c("aovlist", "listof")
-###OUTPUT: index to relevant list elements
-###
-### AUTHOR: Elena Villalon (evillalon at iq.harvard.edu)
-###
-find.ind <- function(object){
-        nmobj <- names(object)
- 
-        ixa <- sapply(1:length(object), function(n){
-                obj <- object[[n]]
-                nm  <- nmobj[n]
-                ixd <- c(grep("Intercept", nm),grep("^[Cc]all", nm))
-                if(length(ixd))
-                  return(NULL)
-                resterms <- ifelse(!length(grep("terms", attributes(obj)$names)), NA, n)
-                
-                rescoef <- ifelse(!length(coefficients(obj)), NA, n)
-                res <- NULL
-                if(!is.na(resterms) && !is.na(rescoef))
-                  res <- n
-                return(res)
-        })
-        return(ixa)
-}
-   
-
-###DESCRIPTION: objectcoef is vector with the names and values of
-###             explanatory variables. Find the values of x applicable to
-###             objcoef, i.e. that are also values of objcoef
-###             There is no need to use this function in sim.zaovlist
-###
-###INPUT: objectcoef, the coefficients for obj of class c("aov", "lm")
-###       x the result of setx, with values for explanatory variables
-###
-###OUTPUT: x but with only explanatory variables in objcoef 
-###
-### AUTHOR: Elena Villalon (evillalon at iq.harvard.edu)
-###              
-pick.coef <- function(objcoef,x){
-      
-        objnmcoef <- names(objcoef)
-        objnmcoef <- sapply(objnmcoef,function(m) paste("^", m,"$", sep=""))
-        if(length(x)) xnmcoef <- names(x)
-        ix <- sapply(objnmcoef, grep, xnmcoef)
-        ix <- unlist(ix)
-        if (length(ix)) 
-          x <- x[ix]
-        return(x)
-}
diff --git a/R/simulation.matrix.R b/R/simulation.matrix.R
new file mode 100644
index 0000000..5555454
--- /dev/null
+++ b/R/simulation.matrix.R
@@ -0,0 +1,116 @@
+#' Get Simulations as a Matrix
+#'
+#' Returns a MxN matrix where N is the number of simulations and M is the number
+#' of predicted values. Additionally, a ``labels'' attribute is attached that
+#' produces a human-readable identifier for each column.
+#' @param obj an object, typically a ``sim'' or ``pooled.sim'' object.
+#' @param which a character-vector specifying the \emph{titles} of quantities of
+#' interest to extract
+#' @param ... additional parameters
+#' @return a simulation matrix
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+simulation.matrix <- function (obj, which = NULL, ...) {
+  UseMethod("simulation.matrix")
+}
+
+#' @S3method simulation.matrix sim
+simulation.matrix.sim <- function (obj, which, ...) {
+
+  which <- find.match(which, attr(obj, "titles"))
+
+  if (is.na(which)) {
+    warning(
+      'The "which" parameter does not exist. Valid titles are:\n    ',
+      paste('"', names(obj$qi), '"', sep="", collapse=", ")
+      )
+
+    # Return a matrix containing the single entry NA
+    return(matrix(NA))
+  }
+
+  # Store the little matrix (probably a column-vector)
+  lil.matrix <- as.matrix(obj$qi[[which]])
+
+  # Specify what quantities of interest this matrix represents
+  attr(lil.matrix, "qi") <- which
+
+  # Return the little, modified matrix
+  lil.matrix
+}
+
+#' @S3method simulation.matrix pooled.sim
+simulation.matrix.pooled.sim <- function (obj, which, ...) {
+
+  # Get the best match for the value "which"
+  which <- find.match(which, attr(obj, "titles"))
+
+  # This will become the matrix that is returned
+  big.matrix <- NULL
+
+  # Iterate through all the results
+  for (label in names(obj)) {
+    # Get the matrix for the single quantity of interest
+    small.matrix <- simulation.matrix(obj[[label]], which = which, exact.match = FALSE)
+
+    # Column-bind this result with the total matrix.
+    # This might want to be wrapped by a tryCatch in case weird things happen
+    big.matrix <- cbind(big.matrix, small.matrix)
+  }
+
+  # Column-wise specification
+  attr(big.matrix, "labels") <- names(obj)
+  attr(big.matrix, "which") <- 1:ncol(big.matrix)
+  names(attr(big.matrix, "which")) <- names(obj)
+
+  # Specify what quantities of interest this matrix represents
+  attr(big.matrix, "qi") <- which
+
+  # Return the big matrix
+  big.matrix
+}
+
+#' Find a Partial or Exact Match from a Vector of Strings
+#' Searches a vector of character-string, and returns the best match.
+#' @param needle a character-string to search for in the 
+#' @param haystack a vector of character-strings
+#' @param fail the value to return in case no match is found. Defaults to NA
+#' @return the best-matched string or NA
+#' @details ``find.match'' attempts to use several common matching functions in
+#' an order that sequentially prefers less strict matching, until a suitable
+#' match is found. If none is found, then return the value of the ``fail''
+#' parameter (defaults to NA). The functions used for matching are: ``match'',
+#' ``charmatch'', and finally ``grep''.
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+find.match <- function (needle, haystack, fail = NA) {
+
+  # Having multiple approximate hits is bad form, since the string "x" can match
+  # "xe", "xen", "xs", etc. If it allows this possibility, we'll be constructing
+  # matrices out of potentially disparate quantities of interest. That is, it
+  # obviously would not be good to match the string "Average" with 
+  # "Averge Treatment Effect" and "Average Value".
+  # That is, we want our matrices to be constructed consistently
+  if (length(needle) != 1)
+    return(NA)
+
+  # Search the strings all at once for code clarity. We can write this smoother,
+  # but then it sacrifices readability for nested if clauses.
+  exact.match <- match(needle, haystack, nomatch = 0)
+  partial.match <- charmatch(needle, haystack, nomatch = 0)
+  grep.match <- grep(needle, haystack)[1]
+
+  # If we found an exact match, then we go with it.
+  if (exact.match != 0)
+    return(haystack[exact.match])
+
+  # If there is a unique partial match, then that will work too.
+  else if (partial.match != 0)
+    return(haystack[partial.match])
+
+  # If there are non-unique partial matches, then we take the first incidence
+  else if (!is.na(grep.match))
+    return(haystack[grep.match])
+
+  # If nothing else is good, then return whatever value a failure should be. NA by default
+  return(fail)
+}
diff --git a/R/simulations.plot.R b/R/simulations.plot.R
new file mode 100644
index 0000000..888c380
--- /dev/null
+++ b/R/simulations.plot.R
@@ -0,0 +1,186 @@
+#' Plot Quantities of Interest in a Zelig-fashion
+#'
+#' Various graph generation for different common types of simulated results from
+#' Zelig
+#' @usage simulations.plot(y, y1=NULL, xlab="", ylab="", main="", col=NULL, line.col=NULL, axisnames=TRUE)
+#' @param y A matrix or vector of simulated results generated by Zelig, to be
+#' graphed.
+#' @param y1 For comparison of two sets of simulated results at different
+#' choices of covariates, this should be an object of the same type and
+#' dimension as y.  If no comparison is to be made, this should be NULL. 
+#' @param xlab Label for the x-axis.
+#' @param ylab Label for the y-axis.
+#' @param main Main plot title.
+#' @param col A vector of colors.  Colors will be used in turn as the graph is
+#' built for main plot objects. For nominal/categorical data, this colors 
+#' renders as the bar color, while for numeric data it renders as the background
+#' color.
+#' @param line.col  A vector of colors.  Colors will be used in turn as the graph is
+#' built for line color shading of plot objects.
+#' @param axisnames a character-vector, specifying the names of the axes
+#' @return nothing
+#' @author James Honaker
+simulations.plot <-function(
+                      y, y1=NULL,
+                      xlab="", ylab="",
+                      main="",
+                      col=NULL,
+                      line.col=NULL,
+                      axisnames=TRUE
+                      ) {
+  ## Univariate Plots ##
+  if(is.null(y1)){
+
+    if (is.null(col))
+      col <- rgb(100,149,237,maxColorValue=255)
+
+    if (is.null(line.col))
+      line.col <- "black"
+
+    # Character
+    if (is.character(y)) {
+
+      # Try to cast y as integers, note that this is not always possible for the
+      # general case of characters
+      newy <- tryCatch(
+        as.numeric(y),
+        warning = function (w) NULL,
+        error = function (e) NULL
+        )
+
+      # If:
+      #   newy is not NULL (can be cast as a numeric) AND
+      #   newy is actually a collection of integers (not just numeric)
+      # Then:
+      #   we can tabulate (so sick)
+      if (!is.null(newy) && all(as.integer(y) == y)) {
+
+        # Create a sequence of names
+        nameseq <- paste("Y=", min(newy):max(newy), sep="")
+
+        # Set the heights of the barplots.
+        # Note that tablar requires that all out values are greater than zero.
+        # So, we subtract the min value (ensuring everything is at least zero)
+        # then add 1
+        bar.heights <- tabulate(newy - min(newy) + 1) / length(y)
+
+        # Barplot with (potentially) some zero columns
+        output <- barplot(
+          bar.heights,
+          xlab=xlab, ylab=ylab, main=main, col=col[1],
+          axisnames=axisnames, names.arg=nameseq
+          )
+      }
+
+      # Otherwise, we stick with old-style tables
+      else {
+        y <- if (is.null(levels(y)))
+          factor(y)
+        else
+          factor(y, levels = levels(y))
+
+        bar.heights <- table(y)/length(y)
+        bar.names <- paste("Y=", names(bar.heights), sep="")
+
+        output <- barplot(
+          bar.heights,
+          xlab=xlab, ylab=ylab, main=main, col=col[1],
+          axisnames=axisnames, names.arg=bar.names
+          )
+      }
+    }
+
+     ## Numeric
+     else if(is.numeric(y)){ 
+       den.y <- density(y)
+       output <- plot(den.y, xlab=xlab, ylab=ylab, main=main, col=line.col[1])
+       if(!identical(col[1],"n")){
+         polygon(den.y$x, den.y$y, col=col[1])
+       } 
+     }
+
+## Comparison Plots ##
+
+  }
+  else{
+
+## Character - Plot and shade a matrix  
+    if(is.character(y) & is.character(y1) & length(y)==length(y1) ){
+     
+       newy<-trunc(as.numeric(y))
+       newy1<-trunc(as.numeric(y1))
+
+       yseq<-min(c(newy,newy1)):max(c(newy,newy1))
+       nameseq<- paste("Y=",yseq,sep="")
+       n.y<-length(yseq)
+
+       colors<-rev(heat.colors(n.y^2))
+       lab.colors<-c("black","white")
+       comp<-matrix(NA,nrow=n.y,ncol=n.y)
+
+       for(i in 1:n.y){
+         for(j in 1:n.y){
+           flag<- newy==yseq[i] & newy1==yseq[j]
+           comp[i,j]<-mean(flag)
+         }
+       }
+
+       old.pty<-par()$pty
+       old.mai<-par()$mai
+
+       par(pty="s")
+       par(mai=c(0.3,0.3,0.3,0.1))
+
+       image(z=comp, axes=FALSE, col=colors, zlim=c(min(comp),max(comp)),main=main )  
+ 
+       locations.x<-seq(from=0,to=1,length=nrow(comp))
+       locations.y<-locations.x
+
+       for(m in 1:n.y){
+         for(n in 1:n.y){
+           text(x=locations.x[m],y=locations.y[n],labels=paste(round(100*comp[m,n])/100),col=lab.colors[(comp[m,n]> ((max(comp)-min(comp))/2) )+1])
+         }
+       }
+
+       axis(side=1,labels=nameseq, at=seq(0,1,length=n.y), cex.axis=1, las=1)
+       axis(side=2,labels=nameseq, at=seq(0,1,length=n.y), cex.axis=1, las=3)
+       box()
+       par(pty=old.pty,mai=old.mai)
+
+
+## Numeric - Plot two densities on top of each other
+    }else if(is.numeric(y) & is.numeric(y1)){
+      if(is.null(col)){
+        col<-c("blue","red")
+      }else if(length(col)<2){
+        col<-c(col,col)
+      }
+
+      if(is.null(col)){
+        semi.col.x <-rgb(142,229,238,150,maxColorValue=255)
+        semi.col.x1<-rgb(255,114,86,150,maxColorValue=255)
+        col<-c(semi.col.x,semi.col.x1)
+      }else if(length(col)<2){
+        col<-c(col,col)
+      }
+
+      den.y<-density(y)
+      den.y1<-density(y1,bw=den.y$bw)
+
+      all.xlim<-c(min(c(den.y$x,den.y1$x)),max(c(den.y$x,den.y1$x)))
+      all.ylim<-c(min(c(den.y$y,den.y1$y)),max(c(den.y$y,den.y1$y)))
+
+      output<-plot(den.y,xlab=xlab,ylab=ylab,main=main,col=col[1],xlim=all.xlim,ylim=all.ylim)
+      par(new=TRUE)
+      output<-plot(den.y1,xlab=xlab,ylab=ylab,main="",col=col[2],xlim=all.xlim,ylim=all.ylim)
+  
+      if(!identical(col[1],"n")){
+        polygon(den.y$x,den.y$y,col=col[1])
+      }
+      if(!identical(col[1],"n")){
+        polygon(den.y1$x,den.y1$y,col=col[2])
+      }
+    }
+  }
+}
+
diff --git a/R/summarize.R b/R/summarize.R
index 62fd6b5..5f7f4a2 100644
--- a/R/summarize.R
+++ b/R/summarize.R
@@ -1,2 +1,145 @@
-summarize <- function(x, ...)
-  UseMethod("summarize", x)
+#' Generic methonf for summarizing simualted quantities of interest
+#' 
+#' @S3method summarize default
+#'
+#' @param obj a \code{qi} object, storing simulations of quantities of interest
+#' @return a \code{summarized.qi} object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+summarize <- function(obj)
+  UseMethod("summarize")
+
+#' Summarize Simualted Quantities of Interest
+#'
+#' @usage \method{summarize}{default}(obj)
+#' @S3method summarize default
+#' @param obj a \code{qi} object, storing simulations of quantities of interest
+#' @return a 'summarized.qi' object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+summarize.default <- function(obj) {
+  res <- list()
+  titles <- names(attr(obj, ".index"))
+
+  for (key in titles) {
+    val <- obj[[key]]
+
+    if (!is.qi(val))
+      next
+
+    if (!is.matrix(val))
+      val <- matrix(val, ncol=1, nrow=length(val))
+
+    
+    # make a matrix that is data-friendly
+    m <- if (is.numeric(val)) {
+      matrix(NA, nrow=ncol(val), ncol=5)
+    }
+    else if (is.character(val) || is.factor(val)) {
+      levels <- levels(val)
+
+      if (is.null(levels)) {
+        #warning("Indeterminate number of levels for qi: ", key)
+        levels <- unique(c(val))
+      }
+
+      levels <- sort(levels)
+
+      matrix(NA, nrow=ncol(val), ncol=length(levels), dimnames=list(NULL, levels))
+    }
+
+    #
+    for (k in 1:ncol(val)) {
+      if (is.numeric(val[,k])) {
+        row <-c(
+                mean(val[,k], na.rm = TRUE),
+                sd(val[,k], na.rm = TRUE),
+                quantile(val[,k], c(.5, .025, .975), na.rm=TRUE)
+                ) 
+        m[k,] <- row
+
+
+        #
+        colnames(m) <- c("mean", "sd", "50%", "2.5%", "97.5%")
+      }
+    
+      else if (is.character(val[,k]) || is.factor(val[,k])) {
+
+        # A table specifying the _percentage_ of simulations matching
+        # each particular level of the factor qi's
+        result.table <- table.levels(val[,k], levels = levels)
+        result.table <- result.table/length(val[,k])
+
+        # A character-vector specifying the factors found in the qi
+        factor.names <- sort(names(result.table))
+
+        # This should prevent size errors for qi's with
+        # a NULL levels attribute
+        # in particular, it resolves issues 
+        m[k, ] <- 0
+        m[k, factor.names] <- result.table[factor.names]
+
+        m[k,] <- result.table
+        colnames(m) <- names(result.table)
+      }
+
+      else
+        m[k,] <- NA
+
+      col.names <- colnames(val)
+      rownames(m) <- if (is.null(col.names))
+        ""
+      else
+        col.names
+    }
+
+    # add to list
+    res[[key]] <- m
+  }
+
+  # cast as class - for some reason - then return
+  class(res) <- "summarized.qi"
+  res
+}
+
+
+#' Test If Value is Interpretable as a QI
+#' @param qi a potential quantity of interest
+#' @return a logical specifying whether this value should or should-not
+#'         be output
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+is.qi <- function(qi) {
+  if (is.null(qi))
+    return(FALSE)
+
+  else if (!length(qi))
+    return(FALSE)
+
+  else if (all(is.na(qi)))
+    return(FALSE)
+
+  TRUE
+}
+
+
+#' Create a table, but ensure that the correct
+#' columns exist. In particular, this allows for
+#' entires with zero as a value, which is not
+#' the default for standard tables
+#' @param x a vector
+#' @param levels a vector of levels
+#' @param ... parameters for table
+#' @return a table
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+table.levels <- function (x, levels, ...) {
+  # if levels are not explicitly set, then
+  # search inside of x
+  if (missing(levels)) {
+    levels <- attr(x, 'levels')
+    table(factor(x, levels=levels), ...)
+  }
+
+  # otherwise just do the normal thing
+  else {
+    table(factor(x, levels=levels), ...)
+  }
+}
diff --git a/R/summarize.array.R b/R/summarize.array.R
deleted file mode 100644
index 1149f4a..0000000
--- a/R/summarize.array.R
+++ /dev/null
@@ -1,24 +0,0 @@
-summarize.array <- function(x, rows = NULL, cip, stats, subset = NULL) {
-  if (is.function(subset)) { # subset = all; all is class "function"
-    res <- apply(x, c(2,3), summarize.default, stats = stats, cip = cip)
-    dimnames(res)[[3]] <- rows
-  }
-  if (is.null(subset)){# subset = NULL; summarizes all obs at once
-    tmp <- NULL
-    for (j in 1:dim(x)[3])
-      tmp <- rbind(tmp, x[,,j])
-    res <- apply(tmp, 2, summarize.default,
-                 stats = stats, cip = cip)
-  }
-  if (is.numeric(subset)) { # subset=integer, summarizes identified obs
-    if (length(subset) > 1) {
-      res <- apply(x[, , subset], c(2,3), summarize.default,
-                   stats = stats, cip = cip)
-      dimnames(res)[[3]] <- rows
-    }
-    else 
-      res <- apply(x[, , subset], 2, summarize.default,
-                   stats = stats, cip = cip)
-  }
-  res
-}
diff --git a/R/summarize.coxhazard.R b/R/summarize.coxhazard.R
deleted file mode 100644
index c5e9a36..0000000
--- a/R/summarize.coxhazard.R
+++ /dev/null
@@ -1,3 +0,0 @@
-summarize.coxhazard <- function(x, rows, cip, stats, subset = NULL) {
-  x
-}
diff --git a/R/summarize.default.R b/R/summarize.default.R
deleted file mode 100644
index 8ec79fd..0000000
--- a/R/summarize.default.R
+++ /dev/null
@@ -1,13 +0,0 @@
-summarize.default <- function(x, rows = NULL, cip, stats, subset = NULL) {
-  res <- NULL
-  if (is.numeric(x)) {
-    for (i in 1:length(stats))
-      res <- c(res, do.call(stats[i], list(x)))
-    res <- c(res, quantile(x, cip, na.rm=TRUE))
-    names(res) <- c(stats, paste(cip*100, "%", sep = ""))
-  }
-  else if (is.character(x)) {
-    res <- c(table(x) / length(x))
-  }
-  res
-}
diff --git a/R/summarize.ei.R b/R/summarize.ei.R
deleted file mode 100644
index 2436c67..0000000
--- a/R/summarize.ei.R
+++ /dev/null
@@ -1,24 +0,0 @@
-summarize.ei <- function(x, rows = NULL, cip, stats, subset = NULL) {
-  if (is.function(subset)) { # subset = all; all is class "function"
-    res <- apply(x, c(2,3,4), summarize.default, stats = stats, cip = cip)
-    dimnames(res)[[4]] <- rows
-  }
-  if (is.null(subset)){# subset = NULL; summarizes all obs at once
-    tmp <- NULL
-    tmp <- apply(x, c(2,3), rbind, tmp)
-    res <- apply(tmp, c(2,3), summarize.default,
-                 stats = stats, cip = cip)
-  }
-  if (is.numeric(subset)) { # subset=integer, summarizes identified obs
-    if (length(subset) > 1) {
-      res <- apply(x[, , , subset], c(2,3,4), summarize.default,
-                   stats = stats, cip = cip)
-      dimnames(res)[[4]] <- rows
-    }
-    else 
-      res <- apply(x[, , subset], 2, summarize.default,
-                   stats = stats, cip = cip)
-  }
-  dimnames(res)[2:3] <- dimnames(x)[2:3] 
-  res
-}
diff --git a/R/summarize.matrix.R b/R/summarize.matrix.R
deleted file mode 100644
index 5bdcb14..0000000
--- a/R/summarize.matrix.R
+++ /dev/null
@@ -1,25 +0,0 @@
-summarize.matrix <- function(x, rows, cip, stats, subset = NULL) {
-  if (is.function(subset)) {
-    res <- apply(x, 2, summarize.default, stats = stats, cip = cip)
-    colnames(res) <- rows
-  }
-  if (is.null(subset)) {
-    if (length(rows) == 1) {
-      res <- apply(x, 2, summarize.default, stats = stats, cip = cip)
-      if (length(res) == 1)
-        names(res) <- as.character(unique(x))
-    }
-    else {
-      tmp <- NULL
-      for (i in 1:dim(x)[2])
-        tmp <- c(tmp, x[,i])
-      res <- summarize.default(tmp, stats = stats, cip = cip)
-    }
-  }
-  if (is.numeric(subset)) {
-    res <- apply(as.matrix(x[,subset]), 2, summarize.default,
-                 cip = cip, stats = stats)
-    colnames(res) <- rows
-  }
-  res
-}
diff --git a/R/summary.Arima.R b/R/summary.Arima.R
deleted file mode 100644
index b6d0566..0000000
--- a/R/summary.Arima.R
+++ /dev/null
@@ -1,4 +0,0 @@
-summary.Arima <- function(object, ...){
-  print(object, ...)
-}
-
diff --git a/R/summary.BetaReg.R b/R/summary.BetaReg.R
deleted file mode 100644
index c218b69..0000000
--- a/R/summary.BetaReg.R
+++ /dev/null
@@ -1,10 +0,0 @@
-summary.BetaReg <- function(object, digits = getOption("digits"), ...) {
-  summ <- matrix(NA, nrow = length(object$coef), ncol = 3)
-  colnames(summ) <- c("Estimate", "SD", "t-stat")
-  rownames(summ) <- names(object$coef)
-  summ[,1] <- object$coef
-  summ[,2] <- sqrt(diag(object$variance))
-  summ[,3] <- summ[,1] / summ[,2]
-  object$coefficients <- summ
-  object
-}
diff --git a/R/summary.MCMCZelig.R b/R/summary.MCMCZelig.R
deleted file mode 100644
index c297013..0000000
--- a/R/summary.MCMCZelig.R
+++ /dev/null
@@ -1,19 +0,0 @@
-
-summary.MCMCZelig <- function(object, quantiles = c(0.025, 0.5, 0.975), ...) {
-  require(coda)
-  out <- list()
-  out$summary <- cbind(summary(object$coefficients)$statistics[,1:2],
-                          summary(object$coefficients,
-  quantiles=quantiles)$quantiles)
-                       
-  colnames(out$summary) <- c("Mean", "SD", paste(quantiles*100, "%",sep=""))
-  stuff <- attributes(object$coefficients)
-  out$call <- object$call
-  out$start <- stuff$mcpar[1]
-  out$end <- stuff$mcpar[2]
-  out$thin <- stuff$mcpar[3]
-  out$nchain <- 1
-  class(out) <- "summary.MCMCZelig"
-  out
-}
-
diff --git a/R/summary.MI.R b/R/summary.MI.R
deleted file mode 100644
index 6705679..0000000
--- a/R/summary.MI.R
+++ /dev/null
@@ -1,49 +0,0 @@
-summary.MI <- function(object, subset = NULL, ...){
-  res <- list()
-  if (is.null(subset))
-    M <- 1:length(object)
-  else
-    M <- c(subset)
-  for (i in M)  res[[i]] <- summary(object[[i]])
-  ans <- list()
-  ans$zelig <- getzelig(object[[1]])
-  ans$call <- getcall(object[[1]])
-  if (length(M) > 1) {
-    ans$all <- res
-    coef1 <- se1 <- NULL
-    for (i in M){
-      tmp <-  getcoef(res[[i]])
-      coef1 <- cbind(coef1, tmp[,1])
-      se1 <- cbind(se1, tmp[,2])
-    }
-    rows <- nrow(coef1)
-    Q <- apply(coef1, 1, mean)
-    U <- apply(se1^2, 1, mean)
-    B <- apply((coef1-Q)^2, 1, sum)/(length(M)-1)
-    var <- U+(1+1/length(M))*B
-    nu <- (length(M)-1)*(1+U/((1+1/length(M))*B))^2
-    coef.table <- matrix(NA, nrow = rows, ncol = 4)
-    dimnames(coef.table) <- list(rownames(coef1),
-                                 c("Value", "Std. Error", "t-stat", "p-value"))
-    coef.table[,1] <- Q
-    coef.table[,2] <- sqrt(var)
-    coef.table[,3] <- Q/sqrt(var)
-    coef.table[,4] <- pt(abs(Q/sqrt(var)), df=nu, lower.tail=F)*2
-    ans$coefficients <- coef.table
-    ans$cov.scaled <- ans$cov.unscaled <- NULL
-    for (i in 1:length(ans)) {
-      if (is.numeric(ans[[i]]) && !names(ans)[i] %in% c("coefficients")){
-        tmp <- NULL
-        for (j in M)
-          tmp <- cbind(tmp, res[[j]][[pmatch(names(ans)[i], names(res[[j]]))]])
-        ans[[i]] <- apply(tmp, 1, mean)
-      }
-    }
-    class(ans) <- "summary.MI"
-  } else if (length(M) == 1) {
-    ans <- summary(object[[M]])
-  } else {
-    stop("invalid input for `subset'")
-  }
-  return(ans)
-}
diff --git a/R/summary.R b/R/summary.R
new file mode 100644
index 0000000..2a7576e
--- /dev/null
+++ b/R/summary.R
@@ -0,0 +1,404 @@
+# Summary of MCMCZelig Object
+#
+# This method produces a summary object for \code{MCMCZelig} objects
+# @param object an "MCMCZelig" object
+# @param quantiles a numeric vector specifying the quantiles to use in the
+# summary object.
+# @param ... ignored parameters
+# @return a \code{summary.MCMCZelig} object
+#' @S3method summary MCMCZelig
+summary.MCMCZelig <- function(object, quantiles = c(0.025, 0.5, 0.975), ...) {
+  out <- list()
+  out$summary <- cbind(
+                       summary(coef(object))$statistics[,1:2],
+                       summary(coef(object), quantiles=quantiles)$quantiles
+                       )
+                       
+  colnames(out$summary) <- c("Mean", "SD", paste(quantiles*100, "%",sep=""))
+  stuff <- attributes(coef(object))
+  out$call <- object$call
+  out$start <- stuff$mcpar[1]
+  out$end <- stuff$mcpar[2]
+  out$thin <- stuff$mcpar[3]
+  out$nchain <- 1
+  class(out) <- "summary.MCMCZelig"
+  out
+}
+#' Method for summarizing simulations of multiply imputed quantities of interest
+#'
+#' @S3method summary MI.sim
+#' @usage \method{summary}{MI.sim}(object, ...)
+#' @param object a `MI.sim' object
+#' @param ... ignored parameters
+#' @return a `summarized.MI.sim' object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+summary.MI.sim <- function(object, ...) {
+
+  summarized.list <- list()
+
+  for (key in names(object)) {
+
+    stats <- object[[key]]$stats
+
+    for (qi.name in names(stats))
+      summarized.list[[qi.name]][[key]] <- stats[[qi.name]]
+
+  }
+
+  class(summarized.list) <- "summarySim.MI"
+
+  summarized.list
+}
+#' Summary of Generalized Linear Model with Robust Error Estimates
+#'
+#' Returns summary of a glm model with robust error estimates. This only
+#' slightly differs from how the standard GLM's behave.
+#' @usage \method{summary}{glm.robust}(object, ...)
+#' @S3method summary glm.robust
+#' @param object a ``glm.robust'' fitted model
+#' @param ... parameters to pass to the standard ``summary.glm'' method
+#' @return a object of type ``summary.glm.robust'' and ``summary.glm''
+summary.glm.robust <- function(object, ...) {
+  class(object) <- c("glm", "lm")
+  res <- summary.glm(object, ...)
+  if (is.null(object$robust)) {
+    res$cov.unscaled <- covmat.unscaled <- vcovHAC(object)
+    res$robust <- "vcovHAC"
+  } else {
+    fn <- object$robust$method
+    res$robust <- object$robust$method
+    object$robust$method <- NULL
+    arg <- object$robust
+    arg$x <- object
+    res$cov.unscaled <- covmat.unscaled <- eval(do.call(fn, args=arg))
+  }
+  res$cov.scaled <- covmat <- covmat.unscaled*res$dispersion
+  if (!is.null(res$correlation)) {
+    dd <- sqrt(diag(res$cov.unscaled))
+    res$correlation <- res$cov.unscaled/outer(dd, dd)
+    dimnames(res$correlation) <- dimnames(res$cov.unscaled)
+  }
+
+  res$coefficients[,2] <- s.err <- sqrt(diag(covmat))
+  res$coefficients[,3] <- tvalue <- coefficients(object)/s.err
+  if (length(dimnames(res$coefficients)[[2]])>3) {
+    if (dimnames(res$coefficients)[[2]][3]=="z value")
+      res$coefficients[,4] <- 2 * pnorm(-abs(tvalue))
+    else
+      res$coefficients[,4] <- 2 * pt(-abs(tvalue), object$df.residual)
+  }
+  class(res) <- c("summary.glm.robust","summary.glm")
+  return(res)
+}
+#' Return a Summary of a Set of Pooled Simulated Interests
+#'
+#' Returns the summary information from a set of pooled simulated interests.
+#' The object returned contains the slots ``labels'', a character-vector
+#' specifying the labels (explanatory variable titles) of the qi's, ``titles'',
+#' a character vector specifying the names of the quantities of interest, and
+#" ``stats'', a list containing quantities of interests.
+#' @usage \method{summary}{pooled.sim}(object, ...)
+#' @S3method summary pooled.sim
+#' @param object a ``pooled.sim'' object, containing information about
+#' simulated quantities of interest
+#' @param ... Ignored parameters
+#' @return a ``summary.pooled.sim'' object storing the replicated quantities of
+#' interest
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+summary.pooled.sim <- function (object, ...) {
+  model <- list()
+  stats <- list()
+  titles <- list()
+  original <- list()
+  call <- list()
+  x <- list()
+  x1 <- list()
+
+  #
+  for (key in names(object)) {
+    o <- object[[key]]
+
+    stats[[key]] <- o$stats
+    titles[[key]] <- o$titles
+  }
+
+  s <- list(
+            labels = names(object),
+            titles = names(object[[1]]$stats),
+            stats = stats
+            )
+
+  class(s) <- "summary.pooled.sim"
+
+  s
+}
+#' Summary for ``Relogit'' Fitted Model
+#'
+#' Summarize important components of the ``relogit'' model
+#' @usage \method{summary}{Relogit}(object, ...)
+#' @S3method summary Relogit
+#' @param object a ``Relogit'' object
+#' @param ... other parameters
+#' @return a ``summary.relogit'' object
+summary.Relogit <- function(object, ...) {
+  dta <- model.matrix(terms(object), data=model.frame(object))
+  class(object) <- class(object)[2]
+  res <- summary(object, ...)
+  if (object$bias.correct) {
+    n <- nrow(dta)
+    k <- ncol(dta)
+    res$cov.unscaled <- res$cov.unscaled * (n/(n+k))^2
+    res$cov.scaled <- res$cov.unscaled * res$dispersion
+    res$coefficients[,2] <- sqrt(diag(res$cov.scaled))
+    res$coefficients[,3] <- res$coefficients[,1] / res$coefficients[,2]
+    res$coefficients[,4 ] <- 2*pt(-abs(res$coefficients[,3]), res$df.residual)
+  }
+  res$call <- object$call
+  res$tau <- object$tau
+  res$bias.correct <- object$bias.correct
+  res$prior.correct <- object$prior.correct
+  res$weighting <- object$weighting
+  class(res) <- "summary.relogit"
+  return(res)
+}
+#' Summary for ``Relogit2'' Fitted Model
+#'
+#' Summarize important components of the ``relogit'' model
+#' @usage \method{summary}{Relogit2}(object, ...)
+#' @S3method summary Relogit2
+#' @param object a ``Relogit2'' object
+#' @param ... other parameters
+#' @return a ``summary.relogit2'' object
+summary.Relogit2 <- function(object, ...) {
+  res <- list()
+  res$lower.estimate <- summary.Relogit(object$lower.estimate)
+  res$upper.estimate <- summary.Relogit(object$upper.estimate)
+  res$call <- object$call
+  class(res) <- "summary.relogit2"
+  return(res)
+}
+
+
+
+
+
+
+
+
+
+
+
+
+#' Method for summarizing simulations of quantities of interest
+#'
+#' Return a ``summary.sim'' object (typically for display)
+#' @S3method summary sim
+#' @usage \method{summary}{sim}(object, ...)
+#' @param object a 'MI.sim' object
+#' @param ... ignored parameters
+#' @return a 'summarized.MI.sim' object
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+summary.sim <- function(object, ...) {
+  res <- list(
+              model    = object$model,
+              stats    = object$stats,
+              titles   = object$titles,
+              original = object$result,
+              call     = object$call,
+              zeligcall= object$zcall,
+              x        = object$x,
+              x1       = object$x1,
+              num      = object$num
+              )
+  class(res) <- c(object$name, "summary.sim")
+  res
+}
+#' Zelig Object Summaries
+#'
+#' Compute summary data for zelig objects
+#' @S3method summary zelig
+#' @usage \method{summary}{zelig}(object, ...)
+#' @param object a zelig object
+#' @param ... parameters forwarded to the generic summary object
+#' @return the summary of the fitted model
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+summary.zelig <- function (object, ...) {
+  # For now, simply get the summary of the result object
+  obj <- eval(object$result)
+
+  if (isS4(obj)) {
+
+    sigs <- findMethodSignatures('summary')
+    classes <- class(obj)
+
+    # Remove classes that do not have 'summary' methods
+    intersection <- classes[ ! sigs %in% classes ]
+    intersection <- na.omit(intersection)
+    intersection <- as.character(intersection)
+
+    # Summary only has one parameter, so we only consider the first one
+    # This may be slightly dangerous, but it should not fail
+    sig <- intersection[1]
+    
+    # if an attempt to get the summary fails, replace with a call to the S3
+    SUMMARY <- tryCatch(getMethod('summary', sig), error = function(e) summary)
+
+    # return
+    SUMMARY(obj)
+  }
+
+  else
+    # S3 objects have no problem figuring out which method to use
+    summary(obj)
+}
+#' Sumary of ``setx'' Object
+#'
+#' Compute summary data for ``setx'' objects
+#' @S3method summary zelig
+#' @usage \method{summary}{zelig}(object, ...)
+#' @param object a zelig object
+#' @param ... parameters forwarded to the generic summary object
+#' @return the summary of the fitted model
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+summary.setx <- function (object, ...) {
+  mm <- object$matrix
+  attr(mm, "assign") <- NULL
+  attr(mm, "contrasts") <- NULL
+
+
+  structure(
+    list(
+      call = object$call,
+      label = object$label,
+      model.name = object$name,
+      formula = object$formula,
+      model.matrix = mm
+    ),
+    class = "summary.setx"
+    )
+}
+#' Summary of Multiply Imputed Statistical Models Using Rubin's Rule
+#'
+#' ...
+#' @S3method summary MI
+#' @usage \method{summary}{MI}(object, ...)
+#' @param object a set of fitted statistical models
+#' @param ... parameters to forward
+#' @return a list of summaries
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+summary.MI <- function (object, subset = NULL, ...) {
+
+  if (length(object) == 0)
+    stop('Invalid input for "subset"')
+
+  else if (length(object) == 1)
+    return(summary(object[[1]]))
+
+  #
+  getcoef <- function(obj) {
+    # S4
+    if (!isS4(obj))
+      coef(obj)
+    else if ("coef3" %in% slotNames(obj))
+      obj at coef3
+    else
+      obj at coef
+  }
+
+
+  #
+  res <- list()
+
+  # Get indices
+  subset <- if (is.null(subset))
+    1:length(object)
+  else
+    c(subset)
+
+  # Compute the summary of all objects
+  for (k in subset) {
+    res[[k]] <- summary(object[[k]])
+  }
+
+
+  # Answer
+  ans <- list(
+              zelig = object[[1]]$name,
+              call = object[[1]]$result$call,
+              all = res
+              )
+
+  #
+  coef1 <- se1 <- NULL
+
+  #
+  for (k in subset) {
+    tmp <-  getcoef(res[[k]])
+    coef1 <- cbind(coef1, tmp[, 1])
+    se1 <- cbind(se1, tmp[, 2])
+  }
+
+  rows <- nrow(coef1)
+  Q <- apply(coef1, 1, mean)
+  U <- apply(se1^2, 1, mean)
+  B <- apply((coef1-Q)^2, 1, sum)/(length(subset)-1)
+  var <- U+(1+1/length(subset))*B
+  nu <- (length(subset)-1)*(1+U/((1+1/length(subset))*B))^2
+
+  coef.table <- matrix(NA, nrow = rows, ncol = 4)
+  dimnames(coef.table) <- list(rownames(coef1),
+                               c("Value", "Std. Error", "t-stat", "p-value"))
+  coef.table[,1] <- Q
+  coef.table[,2] <- sqrt(var)
+  coef.table[,3] <- Q/sqrt(var)
+  coef.table[,4] <- pt(abs(Q/sqrt(var)), df=nu, lower.tail=F)*2
+  ans$coefficients <- coef.table
+  ans$cov.scaled <- ans$cov.unscaled <- NULL
+
+  for (i in 1:length(ans)) {
+    if (is.numeric(ans[[i]]) && !names(ans)[i] %in% c("coefficients")) {
+      tmp <- NULL
+      for (j in subset) {
+        r <- res[[j]]
+        tmp <- cbind(tmp, r[[pmatch(names(ans)[i], names(res[[j]]))]])
+      }
+      ans[[i]] <- apply(tmp, 1, mean)
+    }
+  }
+
+  class(ans) <- "summaryMI"
+  ans
+}
+
+print.summaryMI <- function(x, subset = NULL, ...){
+  m <- length(x$all)
+  if (m == 0)
+    m <- 1
+  if (any(subset > max(m)))
+    stop("the subset selected lies outside the range of available \n        observations in the MI regression output.")
+  cat("\n  Model:", x$zelig)
+  cat("\n  Number of multiply imputed data sets:", m, "\n")
+  if (is.null(subset)) {
+    cat("\nCombined results:\n\n")
+    cat("Call:\n")
+    print(x$call)
+    cat("\nCoefficients:\n")
+    print(x$coefficients)
+    cat("\nFor combined results from datasets i to j, use summary(x, subset = i:j).\nFor separate results, use print(summary(x), subset = i:j).\n\n")
+  }
+  else {
+    if (is.function(subset))
+      M <- 1:m
+    if (is.numeric(subset))
+      M <- subset
+    for(i in M){
+      cat(paste("\nResult with dataset", i, "\n"))
+      print(x$all[[i]], ...)
+    }
+  }
+}
+
diff --git a/R/summary.coxph.naive.R b/R/summary.coxph.naive.R
deleted file mode 100644
index 311085e..0000000
--- a/R/summary.coxph.naive.R
+++ /dev/null
@@ -1,12 +0,0 @@
-summary.coxph.naive <- function(object, ...){
-  class(object) <- c("coxph")
-  res <- summary(object,...)
-  naivese <- res$coef[,3]
-  expcoef <- res$coef[,2]
-  res$coef[,2] <- naivese
-  res$coef[,3] <- expcoef
-  colnames(res$coef)[2] <- "se(coef)"
-  colnames(res$coef)[3] <- "exp(coef)"
-  class(res) <- c("summary.coxph.naive", "summary.coxph")
-  return(res)
-}
diff --git a/R/summary.coxph.robust.R b/R/summary.coxph.robust.R
deleted file mode 100644
index c120113..0000000
--- a/R/summary.coxph.robust.R
+++ /dev/null
@@ -1,15 +0,0 @@
-summary.coxph.robust <- function(object, ...){
-  class(object) <- c("coxph")
-  res <- summary(object,...)
-  robustse <- res$coef[,4]
-  expcoef <- res$coef[,2]
-  res$coef[,2] <- robustse
-  res$coef[,4] <- expcoef
-  colnames(res$coef)[2] <- "robust se"
-  colnames(res$coef)[4] <- "exp(coef)"
-  class(res) <- c("summary.coxph.robust", "summary.coxph")
-  return(res)
-}
-
-
-
diff --git a/R/summary.eiRxC.R b/R/summary.eiRxC.R
deleted file mode 100644
index 7b8273a..0000000
--- a/R/summary.eiRxC.R
+++ /dev/null
@@ -1,13 +0,0 @@
-summary.eiRxC <- function(object, ...) {
-  out <- list(call = object$call)
-  coef <- matrix(NA, nrow = length(object$coefficients), ncol = 3)
-  rownames(coef) <- names(object$coefficients)
-  coef[,1] <- object$coefficients
-  coef[,2] <- sqrt(diag(object$vcov))
-  coef[,3] <- coef[,1] / coef[,2]
-  #coef[,4] <- dchisq(coef[,3])  # Fix
-  colnames(coef) <- c("Estimate", "Std. Error", "t value")
-  out$coefficients <- coef
-  class(out) <- "eiRxC"
-  out
-}
diff --git a/R/summary.gee.naive.R b/R/summary.gee.naive.R
deleted file mode 100644
index 7300683..0000000
--- a/R/summary.gee.naive.R
+++ /dev/null
@@ -1,12 +0,0 @@
-summary.gee.naive <- function(object, ...){
-  class(object) <- c("gee", "glm")	
-  ##res <- summary.gee(object,...)
-  res <- summary(object,...)
-  res$coefficients <- res$coefficients[,-5]	
-  res$coefficients[,2] <- s.err <- sqrt(diag(object$naive.variance))	
-  res$coefficients[,3] <- zvalue <- coef(object)/s.err	
-  res$coefficients[,4] <- 2 * pnorm(-abs(zvalue))	
-  colnames(res$coefficients) <- c("Estimate", "Naive SE", "z value", "Pr(>|z|)")
-  class(res) <- c("summary.gee.naive", "summary.gee")
-  return(res)
-}
diff --git a/R/summary.gee.robust.R b/R/summary.gee.robust.R
deleted file mode 100644
index e27c8df..0000000
--- a/R/summary.gee.robust.R
+++ /dev/null
@@ -1,12 +0,0 @@
-summary.gee.robust <- function(object, ...){
-  class(object) <- c("gee", "glm")
-  ##res <- summary.gee(object,...)
-  res <- summary(object,...)	
-  res$coefficients <- res$coefficients[,-5]
-  res$coefficients[,2] <- s.err <- sqrt(diag(object$robust.variance))
-  res$coefficients[,3] <- zvalue <- coef(object)/s.err
-  res$coefficients[,4] <- 2 * pnorm(-abs(zvalue))
-  colnames(res$coefficients) <- c("Estimate", "Robust SE", "z value", "Pr(>|z|)")
-  class(res) <- c("summary.gee.robust", "summary.gee")
-  return(res)
-}
diff --git a/R/summary.glm.robust.R b/R/summary.glm.robust.R
deleted file mode 100644
index ea731fe..0000000
--- a/R/summary.glm.robust.R
+++ /dev/null
@@ -1,32 +0,0 @@
-summary.glm.robust <- function(object, ...) {
-  class(object) <- c("glm", "lm")
-  res <- summary.glm(object, ...)
-  if (is.null(object$robust)) {
-    res$cov.unscaled <- covmat.unscaled <- vcovHAC(object)
-    res$robust <- "vcovHAC"
-  } else {
-    fn <- object$robust$method
-    res$robust <- object$robust$method
-    object$robust$method <- NULL
-    arg <- object$robust
-    arg$x <- object
-    res$cov.unscaled <- covmat.unscaled <- eval(do.call(fn, args=arg))
-  }
-  res$cov.scaled <- covmat <- covmat.unscaled*res$dispersion
-  if (!is.null(res$correlation)) {
-    dd <- sqrt(diag(res$cov.unscaled))
-    res$correlation <- res$cov.unscaled/outer(dd, dd)
-    dimnames(res$correlation) <- dimnames(res$cov.unscaled)
-  }
-
-  res$coefficients[,2] <- s.err <- sqrt(diag(covmat))
-  res$coefficients[,3] <- tvalue <- coefficients(object)/s.err
-  if (length(dimnames(res$coefficients)[[2]])>3) {
-    if (dimnames(res$coefficients)[[2]][3]=="z value")
-      res$coefficients[,4] <- 2 * pnorm(-abs(tvalue))
-    else
-      res$coefficients[,4] <- 2 * pt(-abs(tvalue), object$df.residual)
-  }
-  class(res) <- c("summary.glm.robust","summary.glm")
-  return(res)
-}
diff --git a/R/summary.lm.robust.R b/R/summary.lm.robust.R
deleted file mode 100644
index b55b09f..0000000
--- a/R/summary.lm.robust.R
+++ /dev/null
@@ -1,26 +0,0 @@
-summary.lm.robust <- function(object, ...) {
-  class(object) <- "lm"
-  res <- summary.lm(object, ...)
-  if (is.null(object$robust)) {
-    res$cov.unscaled <- R <- vcovHC(object)/(res$sigma^2)
-    res$robust <- "vcovHC"
-  }
-  else {
-    fn <- object$robust$method
-    res$robust <- object$robust$method
-    object$robust$method <- NULL
-    arg <- object$robust
-    arg$x <- object
-    res$cov.unscaled <- R <- eval(do.call(fn, arg))/(res$sigma^2)
-  }
-  res$coefficients[,2] <- se <- sqrt(diag(R))*res$sigma
-  if (!is.null(res$correlation)) {
-    res$correlation <- (R * res$sigma^2)/outer(se, se)
-    dimnames(res$correlation) <- dimnames(res$cov.unscaled)
-  }
-  res$coefficients[,3] <- tval <- coefficients(object)/se
-  res$coefficients[,4] <- 2*pt(abs(tval), res$df[2], lower.tail =
-                               FALSE)
-  class(res) <- c("summary.lm.robust", "summary.lm")
-  return(res)
-}
diff --git a/R/summary.netglm.R b/R/summary.netglm.R
deleted file mode 100644
index 2a1ec54..0000000
--- a/R/summary.netglm.R
+++ /dev/null
@@ -1,74 +0,0 @@
-summary.netglm <- function (object, dispersion = NULL, correlation = FALSE, symbolic.cor = FALSE, 
-    ...) 
-{
-    est.disp <- FALSE
-    df.r <- object$df.residual
-    if (is.null(dispersion)) 
-        dispersion <- if (object$family$family %in% c("poisson", 
-            "binomial")) 
-            1
-        else if (df.r > 0) {
-            est.disp <- TRUE
-            if (any(object$weights == 0)) 
-                warning("observations with zero weight not used for calculating dispersion")
-            sum((object$weights * object$residuals^2)[object$weights > 
-                0])/df.r
-        }
-        else {
-            est.disp <- TRUE
-            NaN
-        }
-    aliased <- is.na(coef(object))
-    p <- object$rank
-    if (p > 0) {
-        p1 <- 1:p
-        Qr <- object$qr
-        coef.p <- object$coefficients[Qr$pivot[p1]]
-        names(coef.p) <- object$names
-        covmat.unscaled <- chol2inv(Qr$qr[p1, p1, drop = FALSE])
-        dimnames(covmat.unscaled) <- list(names(coef.p), names(coef.p))
-        covmat <- dispersion * covmat.unscaled
-        var.cf <- diag(covmat)
-        s.err <- sqrt(var.cf)
-        tvalue <- coef.p/s.err
-        dn <- c("Estimate", "Std. Error")
-        if (!est.disp) {
-            pvalue <- 2 * pnorm(-abs(tvalue))
-            coef.table <- cbind(coef.p, s.err, tvalue, pvalue)
-            dimnames(coef.table) <- list(names(coef.p), c(dn, 
-                "z value", "Pr(>|z|)"))
-        }
-        else if (df.r > 0) {
-            pvalue <- 2 * pt(-abs(tvalue), df.r)
-            coef.table <- cbind(coef.p, s.err, tvalue, pvalue)
-            dimnames(coef.table) <- list(names(coef.p), c(dn, 
-                "t value", "Pr(>|t|)"))
-        }
-        else {
-            coef.table <- cbind(coef.p, NaN, NaN, NaN)
-            dimnames(coef.table) <- list(names(coef.p), c(dn, 
-                "t value", "Pr(>|t|)"))
-        }
-        df.f <- NCOL(Qr$qr)
-    }
-    else {
-        coef.table <- matrix(, 0, 4)
-        dimnames(coef.table) <- list(NULL, c("Estimate", "Std. Error", 
-            "t value", "Pr(>|t|)"))
-        covmat.unscaled <- covmat <- matrix(, 0, 0)
-        df.f <- length(aliased)
-    }
-    ans <- c(object[c("call", "terms", "family", "deviance", 
-        "aic", "contrasts", "df.residual", "null.deviance", "df.null", 
-        "iter", "na.action")], list(deviance.resid = residuals(object, 
-        type = "deviance"), coefficients = coef.table, aliased = aliased, 
-        dispersion = dispersion, df = c(object$rank, df.r, df.f), 
-        cov.unscaled = covmat.unscaled, cov.scaled = covmat))
-    if (correlation && p > 0) {
-        dd <- sqrt(diag(covmat.unscaled))
-        ans$correlation <- covmat.unscaled/outer(dd, dd)
-        ans$symbolic.cor <- symbolic.cor
-    }
-    class(ans) <- "summary.glm"
-    return(ans)
-}
diff --git a/R/summary.relogit.R b/R/summary.relogit.R
deleted file mode 100644
index bce1f7b..0000000
--- a/R/summary.relogit.R
+++ /dev/null
@@ -1,34 +0,0 @@
-summary.relogit <- function(object, ...) {
-
-  dta <- model.matrix(terms(object), data=model.frame(object))
-  class(object) <- class(object)[2]
-  res <- summary(object, ...)
-  if (object$bias.correct) {
-    n <- nrow(dta)
-    k <- ncol(dta)
-    res$cov.unscaled <- res$cov.unscaled * (n/(n+k))^2
-    res$cov.scaled <- res$cov.unscaled * res$dispersion
-    res$coef[,2] <- sqrt(diag(res$cov.scaled))
-    res$coef[,3] <- res$coef[,1] / res$coef[,2]
-    res$coef[,4 ] <- 2*pt(-abs(res$coef[,3]), res$df.residual)
-  }
-  res$call <- object$call
-  res$tau <- object$tau
-  res$bias.correct <- object$bias.correct
-  res$prior.correct <- object$prior.correct
-  res$weighting <- object$weighting
-  class(res) <- "summary.relogit"
-  return(res)
-}
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/R/summary.relogit2.R b/R/summary.relogit2.R
deleted file mode 100644
index c6ee09b..0000000
--- a/R/summary.relogit2.R
+++ /dev/null
@@ -1,21 +0,0 @@
-summary.relogit2 <- function(object, ...) {
-
-  res <- list()
-  res$lower.estimate <- summary.relogit(object$lower.estimate)
-  res$upper.estimate <- summary.relogit(object$upper.estimate)
-  res$call <- object$call
-  class(res) <- "summary.relogit2"
-  return(res)
-}
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/R/summary.setx.R b/R/summary.setx.R
deleted file mode 100644
index 336a96a..0000000
--- a/R/summary.setx.R
+++ /dev/null
@@ -1,8 +0,0 @@
-summary.setx <- function(object, ...) {
-  res <- matrix(NA, nrow(object), ncol(object))
-  for (i in 1:ncol(object))
-    res[,i] <- object[,i]
-  colnames(res) <- colnames(object)
-  res <- as.data.frame(res)
-  summary(res, ...)
-}
diff --git a/R/summary.setx.cond.R b/R/summary.setx.cond.R
deleted file mode 100644
index e585b1c..0000000
--- a/R/summary.setx.cond.R
+++ /dev/null
@@ -1,23 +0,0 @@
-summary.setx.cond <- function(object, ...) {
-  if(any(class(object)=="setx.MI")){
-	res <- list()
-      for (j in 1:length(object)){
-	  res[[j]] <- matrix(NA, nrow(object[[j]]), ncol(object[[j]]))
-	  for (i in 1:ncol(object[[j]])){
-          res[[j]][,i] <- object[[j]][,i]
-	  }
-  	    colnames(res[[j]]) <- colnames(object[[j]])
-  	    res[[j]] <- as.data.frame(res[[j]])
-  	    res[[j]] <- summary(res[[j]],...)
-	}
-  }	
-  else{
-    res <- matrix(NA, nrow(object), ncol(object))
-    for (i in 1:ncol(object))
-      res[,i] <- object[,i]
-    colnames(res) <- colnames(object)
-    res <- as.data.frame(res)
-    res <- summary(res, ...)
-  }
-  return(res)
-}
\ No newline at end of file
diff --git a/R/summary.strata.R b/R/summary.strata.R
deleted file mode 100644
index ff89c12..0000000
--- a/R/summary.strata.R
+++ /dev/null
@@ -1,27 +0,0 @@
-summary.strata <- function(object, subset = NULL, ...) {
-  res <- list()
-  if(is.null(subset))
-    M <- 1:length(object)
-  else
-    M <- c(subset)
-  for (i in 1:length(M))
-    res[[i]] <- summary(object[[i]])
-  if (any(class(object[[1]]) == "MI")) {
-    by <- object[[1]][[1]]$call$by
-    call <- object[[1]][[1]]$call
-  }
-  else {
-    by <- object[[1]]$call$by
-    call <- object[[1]]$call
-  }
-  names(res) <- paste("summary.", by, names(object)[M], sep = "")
-  res$call <- call
-  res$by <- by
-  res$lev <- names(object)[M]
-  res$M <- M
-  class(res) <- "summary.strata"
-  res
-}
-
-
-
diff --git a/R/summary.vglm.R b/R/summary.vglm.R
deleted file mode 100644
index 35e6deb..0000000
--- a/R/summary.vglm.R
+++ /dev/null
@@ -1,4 +0,0 @@
-summary.vglm<-function(object, ...) {
-  return (summaryvlm(object,...))
-}
-
diff --git a/R/summary.zaov.R b/R/summary.zaov.R
deleted file mode 100644
index 86c697a..0000000
--- a/R/summary.zaov.R
+++ /dev/null
@@ -1,26 +0,0 @@
-summary.zaovlist <- function (object, ...) {
-        object$call <- NULL
-        stats:::summary.aovlist(object,...)
-}
-
-
-summary.zmaov <- function(object, ...){
-        summary.aov(object)
-}
-
-
-summary.zmlm <- function (object, ...) {
-        frm <- object$call$formula
-        frm <- eval(frm)
-        ## solving a bug in the stats::summary.mlm, it does not work if
-        ## you pass the formula as a variable
-        object$call$formula <- frm
-        ## coef.aov gives the wrong formatting and I want
-        ## to use coef.default instead for class aov
-        if("aov" %in% class(object)){
-                ix <- grep("aov", class(object))
-                class(object) <- class(object)[-ix]
-        }
-        
-        summary.mlm(object,...)
-}
diff --git a/R/summary.zelig.R b/R/summary.zelig.R
deleted file mode 100644
index 8f24649..0000000
--- a/R/summary.zelig.R
+++ /dev/null
@@ -1,71 +0,0 @@
-summary.zelig<-function(object, subset = NULL, CI=95, 
-                        stats=c("mean", "sd"), ...){
-  cip <- c((100-CI)/200, 1-(100-CI)/200)
-  qi.stats <- list()
-  X <- object$x
-  X1 <- object$x1
-  if (any(class(X)=="setx.MI") & any(class(X)=="cond")) {
-    X <- NULL
-    for (i in 1:length(object$x)) 
-      X <- rbind(X, object$x[[i]])
-  }
-  if (any(class(X1)=="setx.MI")) {
-    X1 <- NULL
-    for (i in 1:length(object$x1)) 
-      X1 <- rbind(X1, object$x1[[i]])
-  }
-  if (is.null(dim(X))) {
-    if (!is.null(X)) { 
-      X <- matrix(X, ncol = 1)
-      colnames(X) <- "(Intercept)"
-      if (!is.null(X1)) {
-        X1 <- matrix(X1, ncol = 1)
-        colnames(X1) <- "(Intercept)"
-      }
-    }
-  }
-  if (is.numeric(subset)) {
-    X <- X[subset,]
-    if (!is.null(X1))
-      X1 <- X1[subset,]
-  }
-  rows <- rownames(X)
-  #object$qi$tt.pr <- object$qi$tt.ev <- 
-  #  object$qi.name$tt.pr <- object$qi.name$tt.ev <- NULL
-  for (i in 1:length(object$qi)) {
-    qi.stats[[i]] <- summarize(object$qi[[i]], rows = rows,
-                               stats = stats, cip = cip, 
-                               subset = subset)
-    if (is.matrix(qi.stats[[i]]))
-      qi.stats[[i]] <- t(qi.stats[[i]])
-    if (is.table(qi.stats[[i]]))
-      qi.stats[[i]] <- t(as.matrix(qi.stats[[i]]))
-    if (all(c(is.null(subset), !is.null(X), nrow(X) > 1)))
-      object$qi.name[i] <- paste("Pooled", object$qi.name[[i]])
-  }
-  names(qi.stats) <- names(object$qi)
-  res <- list(model=object$zelig$model, num=object$call$num, x=X,
-              x1=X1, qi.stats=qi.stats, qi.name=object$qi.name) 
-  class(res) <- "summary.zelig"
-  return(res)
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/R/summary.zelig.arima.R b/R/summary.zelig.arima.R
deleted file mode 100644
index cc62b9c..0000000
--- a/R/summary.zelig.arima.R
+++ /dev/null
@@ -1,57 +0,0 @@
-summary.zelig.arima <- function(object, CI=95, stats=c("mean", "sd"),  ...){
-  if (is.null(object$qi$fd)){
-    if (is.null(object$qi$t.eff)){
-      test.array <- array(NA, c(ncol(object$qi[[1]]), length(stats)+2, 2))
-      for (i in 1:(length(object$qi)-1)){
-        for (j in 1:length(stats)){
-          test.array[,j, i]<- t(as.matrix(apply(object$qi[[i]], 2, stats[j])))
-        }
-        test.array[, c(length(stats) +1 , length(stats) + 2), i] <-
-          t(as.matrix(apply(object$qi[[i]], 2, "quantile",
-                            c((100-CI)/200, 1-(100-CI) / 200))))
-      }
-      colnames(test.array) <- c(stats, paste(as.character((100-CI)/200), "%", sep=""),
-                               paste(as.character(1-(100-CI)/200), "%", sep=""))
-      dimnames(test.array)[[3]] <- object$qi.name[1:2] 
-      rownames(test.array) <- (object$min.time+1):(object$min.time + nrow(test.array))  
-      number.sim <- nrow(object$qi$ev)
-    }
-    
-    if (!is.null(object$qi$t.eff)){
-      test.array <- array(NA, c(ncol(object$qi[[1]]), length(stats)+2, 3))
-      for (i in 1:length(object$qi)){
-        for (j in 1:length(stats)){
-          test.array[,j, i]<- t(as.matrix(apply(object$qi[[i]], 2, stats[j])))
-        }
-        test.array[, c(length(stats) +1 , length(stats) + 2), i] <-
-          t(as.matrix(apply(object$qi[[i]], 2, "quantile",
-                            c((100-CI)/200, 1-(100-CI)/200))))
-      }
-      colnames(test.array) <- c(stats, paste(as.character((100-CI)/200), "%", sep=""),
-                               paste(as.character(1-(100-CI)/200), "%", sep=""))
-      dimnames(test.array)[[3]] <- object$qi.name 
-      rownames(test.array) <- (object$min.time+1):(object$min.time + nrow(test.array))  
-      number.sim <- nrow(object$qi$ev)
-    }
-  }
-  if (!is.null(object$qi$fd)){
-    test.array <- array(NA, c(ncol(object$qi[[1]]), length(stats)+2, 3))
-    for (i in 1:(length(object$qi)-1)){
-      for (j in 1:length(stats)){
-        test.array[,j, i] <- t(as.matrix(apply(object$qi[[i]], 2, stats[j])))
-      }
-      test.array[, c(length(stats) +1 , length(stats) + 2), i] <-
-        t(as.matrix(apply(object$qi[[i]], 2, "quantile",
-                          c((100-CI)/200, 1-(100-CI)/200))))
-    }
-    colnames(test.array) <- c(stats, paste(as.character((100-CI)/200), "%", sep=""),
-                              paste(as.character(1-(100-CI)/200), "%", sep=""))
-    dimnames(test.array)[[3]] <- object$qi.name[1:3]
-    rownames(test.array) <- (object$min.time+1):(object$min.time + nrow(test.array)) 
-    number.sim <- nrow(object$qi$ev)   
-  }
-  out <- list(number.sim = number.sim, test.array = test.array,
-              zelig.call = object$zelig.call)
-  class(out) <- "arimaSummary"
-  out
-}
diff --git a/R/summary.zelig.rqs.strata.R b/R/summary.zelig.rqs.strata.R
deleted file mode 100644
index 6a85386..0000000
--- a/R/summary.zelig.rqs.strata.R
+++ /dev/null
@@ -1,8 +0,0 @@
-summary.zelig.rqs.strata <- function(object, subset=NULL, CI=95,
-                                     stats=c("mean", "sd", "min", "max"),...)
-{
-    class(object) <- "zelig.strata"
-    if(is.null(subset))
-        subset <- rq
-    summary(object, subset, CI, stats)
-}
diff --git a/R/summary.zelig.strata.R b/R/summary.zelig.strata.R
deleted file mode 100644
index 756cd59..0000000
--- a/R/summary.zelig.strata.R
+++ /dev/null
@@ -1,84 +0,0 @@
-summary.zelig.strata <-function(object, subset = NULL, CI=95, 
-                        stats=c("mean", "sd", "min", "max"), ...){
-  m <- length(object)
-  if(is.null(subset)) { # summary for all strata together
-    qi <- object[[1]]$qi
-    if(length(dim(qi[[1]]))==3) {
-      w <- NULL
-      for (i in 1:length(object))
-        w <- c(w, dim(object[[i]]$qi[[1]])[3])
-    }
-    for (i in 1:length(qi)){
-      qi1i <- object[[1]]$qi[[i]]
-      if(any(class(object[[1]]$x)=="cond")) {# conditional prediction
-        if(length(dim(qi1i))==3){
-          tmp <- array(NA, dim=c(dim(qi1i)[1:2], sum(w)))
-          tmp[,1:dim(qi1i)[2],1:dim(qi1i)[3]] <- qi1i
-        } else {
-          tmp <- as.matrix(qi1i)
-        }
-      } else { # unconditional prediction
-        if(length(dim(qi1i))==3){
-          tmp <- array(NA, dim=c(dim(qi1i)[1:2], sum(w)))
-          tmp[,,1:w[1]] <- qi1i[,,1:w[1]] 
-        } else {
-          tmp <- as.matrix(qi1i)
-        }
-      }
-      for (j in 2:m) {
-        qiji <- object[[j]]$qi[[i]]
-        if(any(class(object[[j]]$x)=="cond")) {# conditional prediction
-          if(length(dim(qi1i))==3) 
-            tmp[,,(sum(w[1:(m-1)])+1):sum(w[1:m])] <- qiji
-          else
-            tmp <- cbind(tmp, qiji)
-        } else{ # unconditional prediction
-          if(length(dim(qi1i))==3)
-            tmp[(sum(w[1:(j-1)])+1):sum(w[1:j]),,] <- qiji[1:w[j],,]
-          else
-            tmp <- cbind(tmp, as.matrix(qiji))
-        }
-      }
-      qi[[i]] <- tmp
-    }
-    c <- match.call()
-    c$num <- object[[1]]$call$num
-    res <- list(qi=qi, qi.name=object[[1]]$qi.name,
-                x=object[[1]]$x, x1=NULL, call=c,
-                zelig.call=object[[1]]$zelig.call)
-    return(summary.zelig(res, ...))  
-  } else { # summary for each strata
-    res <- list()
-    if(is.function(subset)){
-      m <- length(object)
-      subset <- 1:m
-    }
-    else
-      m <- length(subset)
-    for (i in 1:m) {
-      res[[i]] <- summary.zelig(object[[subset[i]]], subset=NULL, ...)
-      names(res)[i] <- names(object)[i]
-    }
-    class(res) <- "summary.zelig.strata"
-    return(res)
-  }
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/R/summary.zeliglist.R b/R/summary.zeliglist.R
deleted file mode 100644
index fde97d8..0000000
--- a/R/summary.zeliglist.R
+++ /dev/null
@@ -1,33 +0,0 @@
-summary.zeliglist<-function(object, subset = NULL, CI=95, 
-                        stats=c("mean", "sd"), ...){
-        nm <- names(object)
-        lst <- list()
-        for(obj in object){
-                class(obj) <- c("zelig", class(obj))
-                res <- summary.zelig(obj, subset = subset, CI=CI,stats=stats, ...)
-                
-                lst <- c(lst, list(res))
-        }
-        names(lst) <- nm
-        return(lst)
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/R/t.setx.R b/R/t.setx.R
new file mode 100644
index 0000000..525c178
--- /dev/null
+++ b/R/t.setx.R
@@ -0,0 +1,14 @@
+#' Matrix Transpose of a ``setx'' Object
+#'
+#' Returns a ``setx'' object as column vector. If multiple values for each
+#' explanatory term has been set, then return a NxM matrix where `N'
+#' is the number of explanatory terms and `M' is the number of values set for
+#' each term.
+#'
+#' @S3method t setx
+#' @usage \method{t}{setx}(x)
+#' @param x a `setx' object
+#' @return a transposed matrix
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+t.setx <- function(x)
+  t(x$matrix)
diff --git a/R/terms.R b/R/terms.R
new file mode 100644
index 0000000..cc8165c
--- /dev/null
+++ b/R/terms.R
@@ -0,0 +1,235 @@
+###
+## terms
+## -accept single and multiple equations:
+## -in case of single equations, the equation is named "mu". is this right?
+## -if mu=y~x:z then the attr(tt,"variable") gives list(y,x:z). Should it be list(y,x,z) ??
+## -
+
+
+#' Extract Terms from a \code{multiple} Object
+#'
+#' Extracts terms from Zelig-3.5-style formulae. This function is scheduled for
+#' removal.
+#' @usage \method{terms}{multiple}(x, data=NULL,...)
+#' @param x a Zelig v3.5 formula
+#' @param data a \code{data.frame}
+#' @param ... ignored parameters
+#' @author Kosuke Imai, Olivia Lau, Gary King and Ferdinand Alimadhi
+#' @S3method terms multiple
+terms.multiple<-function(x, data=NULL,...){
+        object <- x
+        termsexist<-attr(object,"terms")
+        if(!(is.null(termsexist)))
+          return (termsexist)
+        
+        nreq<-nrConstr<-nrEquationsNew<-0
+        constr<-XconstrEqn<-variables<-termlabels<-depVars<-objectNew<-intercAttr<-depFactors<-list()
+        depFactorVar<-depLevels<-namesConstr<-c()
+        if(!(any(class(object)=="list"))){
+                object<-list(object)
+                names(object)<-"mu"
+        }
+        namesOfEquations<- names(object)
+        nrEquations <-length(object)
+        "%w/o%" <- function(x,y) x[!x %in% y]
+        
+        for (i in 1:nrEquations){
+                TT<-terms.formula(object[[i]], specials=c("id","tag"))               
+                attrTTvars<-attr(TT,"variables")
+                attrTTlabels<-attr(TT,"term.labels")
+                
+                eqni<-object[[i]]                    
+                namei<-namesOfEquations[[i]]            
+                tagattr<-attr(TT,"specials")$tag         
+                hastag<-!(is.null(tagattr))
+                if (hastag){
+                        ## has tag so make a new list of variables and term.labels
+                        newVars<-list()           
+                        newLabels<-c()
+                        indxV<-indxL<-1
+                        constrTmp<-c()
+                        for(j in 1:length(tagattr)){
+                                taglabels<-c()
+                                if(length(eqni)==3)
+                                  lind<-tagattr[[j]]-1
+                                else
+                                  lind<-tagattr[[j]]
+                                vind<-tagattr[[j]]+1
+                                ## add all vars/terms prior to tag into new list of
+                                ## newVars and newLabels
+                                for(v in indxV:(vind))
+                                  newVars<-c(newVars,attrTTvars[[v]])
+                                newVars[[length(newVars)]]<-NULL
+                                indxV<-vind+1
+                                
+                                for(l in c(indxL:lind))
+                                  newLabels<-c(newLabels,attrTTlabels[[l]])
+                                newLabels<-newLabels[-(length(newLabels))]
+                                indxL<-lind+1
+                                
+                                ## deparse and fix the tag
+                                tagAsList <-.fixTag(.deparseTag(attrTTvars[[vind]]))
+                                for (tindx in 1:length(tagAsList)){
+                                        t<-tagAsList[[tindx]]
+                                        if(((t$var %in% namesOfEquations)==FALSE) && t$var != "none" && t$var != "1"){
+                                                newVars<-c(newVars,parse(text=t$var)[[1]])
+                                                newLabels<-c(newLabels,t$var)
+                                        }
+                                        if(((t$id %in% namesOfEquations)==FALSE) && t$id !="none" && t$id !="1"){
+                                                ##print(t$id)
+                                                newVars<-c(newVars,parse(text=t$id)[[1]])
+                                                newLabels<-c(newLabels,t$id)
+                                        }
+                                        ## constraints ?
+                                        if(t$var !="none" && t$label !="none" && t$id =="none"){
+                                                nrConstr<-nrConstr+1
+                                                namesConstr<-c(namesConstr,t$label)
+                                                constr[[nrConstr]]<-c(i,t$label,t$var)
+                                                constrTmp<-c(constrTmp,t$var)   ##???? what is constrTMP?
+                                        }
+                                }
+                        }
+                        ## if there is any var/term remaining after tags
+                        ## add them to newVars and newLabels
+                        if(length(attrTTvars)>vind){
+                                for(v in (vind+1):length(attrTTvars))
+                                  newVars<-c(newVars,attrTTvars[[v]])
+                        }
+                        
+                        if(length(attrTTlabels)>lind){
+                                for(l in (lind+1):length(attrTTlabels))
+                                  newLabels<-c(newLabels,attrTTlabels[[l]])
+                        }
+                        
+                        XconstrEqn[[i]]<-constrTmp
+
+                        ## make newVars and newLabels unique
+                        newVars<-unique(newVars)  
+                        newLabels <- unique(newLabels)
+                } else{
+                        ## there is no tag => newVars and newLabels remain unchanged
+                        newVars<-attrTTvars
+                        newLabels<-attrTTlabels
+                }
+                nrEquationsNew<-nrEquationsNew+1
+                objectNew[[namei]]<-eqni
+                if (length(eqni)==3){
+
+                        nreq=nreq+1    ## number of required equations
+                        lhs<-eqni[[2]]
+                        if (length(lhs)>1 && lhs[[1]]=="id"){
+                                depVars[[namei]]<-lhs[[3]]
+                                depFactorVar<-c(depFactors,deparse(lhs[[2]]))
+                                depLevels<-c(depLevels,lhs[[3]])
+                        }else
+                        depVars[[namei]]<-deparse(eqni[[2]])
+                        
+                }
+                attr(TT,"variables")<-as.call(newVars)
+                attr(TT,"term.labels")<-newLabels
+                variables[[namei]]<-attr(TT,"variables")
+                termlabels[[namei]]<-attr(TT,"term.labels")
+                intercAttr[[namei]]<-attr(TT,"intercept")
+        }  ## end of for each equation
+        
+        namesOfEquations<-names(objectNew)
+        myattr<-list()
+        result<-objectNew
+        constraints<-subs<-FALSE
+
+        ## construct constraints
+        namesConstr<-unique(namesConstr)
+        if(length(constr)>0){
+                constraints<-matrix(NA,nrow=nrEquationsNew,ncol=length(namesConstr),dimnames=list(namesOfEquations,namesConstr))
+                for(i in 1:length(constr)){
+                        constri<-constr[[i]]
+                        eqind<-constri[[1]]
+                        eq<-namesOfEquations[as.numeric(eqind)]
+                        lab<-constri[[2]]
+                        constraints[eq,lab]<-constri[[3]]
+                }
+        }
+        
+        indVars<-unique(unlist(termlabels))
+        if(length(depFactorVar) !=0)
+          depFactors<-list("depFactorVar"=unique(unlist(depFactorVar)),"depLevels"=depLevels)
+        else
+          depFactors<-FALSE
+        
+        whiche<-which(lapply(termlabels,length)!=0)
+        myattr$systEqns<-names(whiche)
+        myattr$ancilEqns<-"%w/o%"(namesOfEquations,myattr$systEqns)
+        
+        myattr$variables<-variables
+        myattr$term.labels<-termlabels
+        myattr$indVars<-indVars
+        
+        myattr$depVars<-depVars
+        myattr$depFactors<-depFactors
+        myattr$constraints<-constraints
+        myattr$subs<-subs
+        myattr$response<-1
+        myattr$intercept<-intercAttr
+        attributes(result)<-myattr
+        names(result)<-namesOfEquations
+        class(result)<-c("terms","multiple","list")
+        return(result)
+}
+
+###
+## Fix the deparsed tag
+## 
+
+
+.fixTag <- function(l){
+        
+        if(l$var == "1" && l$label!="none"){
+                ## tag(1,z1 | state) == tag (z1|state)
+                l$var <- l$label
+                l$label <- "none"
+                
+        }
+        if(l$label =="none"){
+                ## tag(1+z1|state)
+                vars<-.trim(unlist(strsplit(l$var,"+", fixed=TRUE)))
+        }else{
+                ## tag(z1,w1+w2|state)
+                vars<-.trim(unlist(strsplit(l$label,"+", fixed=TRUE)))
+        }
+        if(length(vars) == 1){
+                ## nothing to expand
+                return (list(l))
+        }else{
+                alltgs<-list()
+                for(i in 1:length(vars)){
+                        if(l$label == "none")
+                          alltgs[[i]] <- list(label="none",var=vars[[i]],id=l$id)
+                        else
+                          alltgs[[i]] <- list(label="none",var=paste(l$var,":",vars[[i]],sep=""),id=l$id)
+                        
+                }
+        }
+        return (alltgs)
+        
+}
+#' Model Terms for 'vglm' Models
+#' @usage \method{terms}{vglm}(x, ...)
+#' @S3method terms vglm
+#' @param x a fitted model object from the VGAM library
+#' @param ... ignored parameters
+#' @return the models terms of this fitted model object
+#' @author Ferdinand Alimadhi, Kosuke Imai and Olivia Lau
+terms.vglm <- function(x, ...)
+  x at terms$terms
+#' Model Terms for a Zelig Object
+#' 
+#' This method simply extracts the model terms for the fitted model passed to 
+#' the \code{zelig} function.
+#' @S3method terms zelig
+#' @usage \method{terms}{zelig}(x, ...)
+#' @param x a \code{zelig} object
+#' @param ... forwarded parameters
+#' @return terms of the original fitted model
+terms.zelig <- function (x, ...) {
+  terms(x$result, ...)
+}
diff --git a/R/terms.multiple.R b/R/terms.multiple.R
deleted file mode 100644
index 81de637..0000000
--- a/R/terms.multiple.R
+++ /dev/null
@@ -1,204 +0,0 @@
-###
-## terms
-## -accept single and multiple equations:
-## -in case of single equations, the equation is named "mu". is this right?
-## -if mu=y~x:z then the attr(tt,"variable") gives list(y,x:z). Should it be list(y,x,z) ??
-## -
-
-
-terms.multiple<-function(x, data=NULL,...){
-        object <- x
-        termsexist<-attr(object,"terms")
-        if(!(is.null(termsexist)))
-          return (termsexist)
-        
-        nreq<-nrConstr<-nrEquationsNew<-0
-        constr<-XconstrEqn<-variables<-termlabels<-depVars<-objectNew<-intercAttr<-depFactors<-list()
-        depFactorVar<-depLevels<-namesConstr<-c()
-        if(!(any(class(object)=="list"))){
-                object<-list(object)
-                names(object)<-"mu"
-        }
-        namesOfEquations<- names(object)
-        nrEquations <-length(object)
-        "%w/o%" <- function(x,y) x[!x %in% y]
-        
-        for (i in 1:nrEquations){
-                TT<-terms.formula(object[[i]], specials=c("id","tag"))               
-                attrTTvars<-attr(TT,"variables")
-                attrTTlabels<-attr(TT,"term.labels")
-                
-                eqni<-object[[i]]                    
-                namei<-namesOfEquations[[i]]            
-                tagattr<-attr(TT,"specials")$tag         
-                hastag<-!(is.null(tagattr))
-                if (hastag){
-                        ## has tag so make a new list of variables and term.labels
-                        newVars<-list()           
-                        newLabels<-c()
-                        indxV<-indxL<-1
-                        constrTmp<-c()
-                        for(j in 1:length(tagattr)){
-                                taglabels<-c()
-                                if(length(eqni)==3)
-                                  lind<-tagattr[[j]]-1
-                                else
-                                  lind<-tagattr[[j]]
-                                vind<-tagattr[[j]]+1
-                                ## add all vars/terms prior to tag into new list of
-                                ## newVars and newLabels
-                                for(v in indxV:(vind))
-                                  newVars<-c(newVars,attrTTvars[[v]])
-                                newVars[[length(newVars)]]<-NULL
-                                indxV<-vind+1
-                                
-                                for(l in c(indxL:lind))
-                                  newLabels<-c(newLabels,attrTTlabels[[l]])
-                                newLabels<-newLabels[-(length(newLabels))]
-                                indxL<-lind+1
-                                
-                                ## deparse and fix the tag
-                                tagAsList <-.fixTag(.deparseTag(attrTTvars[[vind]]))
-                                for (tindx in 1:length(tagAsList)){
-                                        t<-tagAsList[[tindx]]
-                                        if(((t$var %in% namesOfEquations)==FALSE) && t$var != "none" && t$var != "1"){
-                                                newVars<-c(newVars,parse(text=t$var)[[1]])
-                                                newLabels<-c(newLabels,t$var)
-                                        }
-                                        if(((t$id %in% namesOfEquations)==FALSE) && t$id !="none" && t$id !="1"){
-                                                ##print(t$id)
-                                                newVars<-c(newVars,parse(text=t$id)[[1]])
-                                                newLabels<-c(newLabels,t$id)
-                                        }
-                                        ## constraints ?
-                                        if(t$var !="none" && t$label !="none" && t$id =="none"){
-                                                nrConstr<-nrConstr+1
-                                                namesConstr<-c(namesConstr,t$label)
-                                                constr[[nrConstr]]<-c(i,t$label,t$var)
-                                                constrTmp<-c(constrTmp,t$var)   ##???? what is constrTMP?
-                                        }
-                                }
-                        }
-                        ## if there is any var/term remaining after tags
-                        ## add them to newVars and newLabels
-                        if(length(attrTTvars)>vind){
-                                for(v in (vind+1):length(attrTTvars))
-                                  newVars<-c(newVars,attrTTvars[[v]])
-                        }
-                        
-                        if(length(attrTTlabels)>lind){
-                                for(l in (lind+1):length(attrTTlabels))
-                                  newLabels<-c(newLabels,attrTTlabels[[l]])
-                        }
-                        
-                        XconstrEqn[[i]]<-constrTmp
-
-                        ## make newVars and newLabels unique
-                        newVars<-unique(newVars)  
-                        newLabels <- unique(newLabels)
-                } else{
-                        ## there is no tag => newVars and newLabels remain unchanged
-                        newVars<-attrTTvars
-                        newLabels<-attrTTlabels
-                }
-                nrEquationsNew<-nrEquationsNew+1
-                objectNew[[namei]]<-eqni
-                if (length(eqni)==3){
-
-                        nreq=nreq+1    ## number of required equations
-                        lhs<-eqni[[2]]
-                        if (length(lhs)>1 && lhs[[1]]=="id"){
-                                depVars[[namei]]<-lhs[[3]]
-                                depFactorVar<-c(depFactors,deparse(lhs[[2]]))
-                                depLevels<-c(depLevels,lhs[[3]])
-                        }else
-                        depVars[[namei]]<-deparse(eqni[[2]])
-                        
-                }
-                attr(TT,"variables")<-as.call(newVars)
-                attr(TT,"term.labels")<-newLabels
-                variables[[namei]]<-attr(TT,"variables")
-                termlabels[[namei]]<-attr(TT,"term.labels")
-                intercAttr[[namei]]<-attr(TT,"intercept")
-        }  ## end of for each equation
-        
-        namesOfEquations<-names(objectNew)
-        myattr<-list()
-        result<-objectNew
-        constraints<-subs<-FALSE
-
-        ## construct constraints
-        namesConstr<-unique(namesConstr)
-        if(length(constr)>0){
-                constraints<-matrix(NA,nrow=nrEquationsNew,ncol=length(namesConstr),dimnames=list(namesOfEquations,namesConstr))
-                for(i in 1:length(constr)){
-                        constri<-constr[[i]]
-                        eqind<-constri[[1]]
-                        eq<-namesOfEquations[as.numeric(eqind)]
-                        lab<-constri[[2]]
-                        constraints[eq,lab]<-constri[[3]]
-                }
-        }
-        
-        indVars<-unique(unlist(termlabels))
-        if(length(depFactorVar) !=0)
-          depFactors<-list("depFactorVar"=unique(unlist(depFactorVar)),"depLevels"=depLevels)
-        else
-          depFactors<-FALSE
-        
-        whiche<-which(lapply(termlabels,length)!=0)
-        myattr$systEqns<-names(whiche)
-        myattr$ancilEqns<-"%w/o%"(namesOfEquations,myattr$systEqns)
-        
-        myattr$variables<-variables
-        myattr$term.labels<-termlabels
-        myattr$indVars<-indVars
-        
-        myattr$depVars<-depVars
-        myattr$depFactors<-depFactors
-        myattr$constraints<-constraints
-        myattr$subs<-subs
-        myattr$response<-1
-        myattr$intercept<-intercAttr
-        attributes(result)<-myattr
-        names(result)<-namesOfEquations
-        class(result)<-c("terms","multiple","list")
-        return(result)
-}
-
-###
-## Fix the deparsed tag
-## 
-
-
-.fixTag <- function(l){
-        
-        if(l$var == "1" && l$label!="none"){
-                ## tag(1,z1 | state) == tag (z1|state)
-                l$var <- l$label
-                l$label <- "none"
-                
-        }
-        if(l$label =="none"){
-                ## tag(1+z1|state)
-                vars<-.trim(unlist(strsplit(l$var,"+", fixed=TRUE)))
-        }else{
-                ## tag(z1,w1+w2|state)
-                vars<-.trim(unlist(strsplit(l$label,"+", fixed=TRUE)))
-        }
-        if(length(vars) == 1){
-                ## nothing to expand
-                return (list(l))
-        }else{
-                alltgs<-list()
-                for(i in 1:length(vars)){
-                        if(l$label == "none")
-                          alltgs[[i]] <- list(label="none",var=vars[[i]],id=l$id)
-                        else
-                          alltgs[[i]] <- list(label="none",var=paste(l$var,":",vars[[i]],sep=""),id=l$id)
-                        
-                }
-        }
-        return (alltgs)
-        
-}
diff --git a/R/terms.vglm.R b/R/terms.vglm.R
deleted file mode 100644
index be245cb..0000000
--- a/R/terms.vglm.R
+++ /dev/null
@@ -1,2 +0,0 @@
-terms.vglm <- function(x, ...)
-  x at terms$terms
diff --git a/R/terms.zaov.R b/R/terms.zaov.R
deleted file mode 100644
index e177e4e..0000000
--- a/R/terms.zaov.R
+++ /dev/null
@@ -1,13 +0,0 @@
-terms.zaovlist <- function(object,...){
-        Terms<- attr(object,"terms")
-        indError <- attr(Terms, "specials")$Error
-        if (length(indError)){
-                errorterm <- attr(Terms, "variables")[[1 + indError]]
-                formula <- update.formula(Terms, paste(". ~ .-", deparse(errorterm, 
-                           width.cutoff = 500, backtick = TRUE), "+", deparse(errorterm[[2]], 
-                           width.cutoff = 500, backtick = TRUE)))
-                terms.formula(formula)
-        } else {
-                terms.formula(object)
-        }
-}
diff --git a/R/termsFromFormula.R b/R/termsFromFormula.R
new file mode 100644
index 0000000..c3397c4
--- /dev/null
+++ b/R/termsFromFormula.R
@@ -0,0 +1,15 @@
+#' Extract Terms from Zelig-style Formulae
+#'
+#' This method is a sugary function to extract terms from any type of 
+#' Zelig-style formula.
+#' @param obj a Zelig-style formula
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+termsFromFormula <- function (obj) {
+  # Do not put all of this function on one line, because it will make error
+  # messages long and confusing
+  obj <- parseFormula(obj)
+
+  # Actually extract the terms, then return
+  terms(obj)
+}
diff --git a/R/ternaryplot.R b/R/ternaryplot.R
deleted file mode 100644
index 7ffc497..0000000
--- a/R/ternaryplot.R
+++ /dev/null
@@ -1,89 +0,0 @@
-ternaryplot <- function (x, scale = 1, dimnames = NULL, dimnames.position = c("corner", 
-    "edge", "none"), dimnames.color = "black", id = NULL, id.color = "black", 
-    coordinates = FALSE, grid = TRUE, grid.color = "gray", labels = c("inside", 
-        "outside", "none"), labels.color = "darkgray", border = "black", 
-    bg = "white", pch = 19, cex = 1, prop.size = FALSE, col = "red", 
-    main = "ternary plot", ...)  {
-
-    ## From vcd (Version  0.1-3.3).  Function by David Meyer
-    labels <- match.arg(labels)
-    if (grid == TRUE) 
-        grid <- "dotted"
-    if (coordinates) 
-        id <- paste("(", round(x[, 1] * scale, 1), ",", round(x[, 
-            2] * scale, 1), ",", round(x[, 3] * scale, 1), ")", 
-            sep = "")
-    dimnames.position <- match.arg(dimnames.position)
-    if (is.null(dimnames) && dimnames.position != "none") 
-        dimnames <- colnames(x)
-    if (is.logical(prop.size) && prop.size) 
-        prop.size <- 3
-    if (ncol(x) != 3) 
-        stop("Need a matrix with 3 columns")
-    if (any(x) < 0) 
-        stop("X must be non-negative")
-    s <- rowSums(x)
-    if (any(s <= 0)) 
-        stop("each row of X must have a positive sum")
-    x <- x/s
-    top <- sqrt(3)/2
-    par(plt = c(0.06, 0.94, 0.15, 0.87))
-    plot.new()
-    xlim <- c(-0.03, 1.03)
-    ylim <- c(0, top)
-    par(usr = c(xlim, ylim), oma = c(0, 0, 1, 0))
-    plot.window(xlim = xlim, ylim = ylim, asp = 1)
-    eps <- 0.01
-    polygon(c(0, 0.5, 1), c(0, top, 0), col = bg, xpd = NA, border = border, 
-        ...)
-    title(main, outer = TRUE, line = -1)
-    if (dimnames.position == "corner") {
-        axis(1, at = c(-0.03, 1.03), labels = dimnames[1:2], 
-            tick = FALSE, font = 2)
-        axis(3, at = 0.5, labels = dimnames[3], tick = FALSE, 
-            font = 2)
-    }
-    if (dimnames.position == "edge") {
-        shift <- eps * if (labels == "outside") 
-            8
-        else 0
-        text(0.25 - 2 * eps - shift, 0.5 * top + shift, dimnames[2], 
-            srt = 60, col = dimnames.color)
-        text(0.75 + 3 * eps + shift, 0.5 * top + shift, dimnames[1], 
-            srt = -60, col = dimnames.color)
-        text(0.5, 0, dimnames[3], pos = 1, offset = 0.5 + 30 * 
-            shift, xpd = NA, col = dimnames.color)
-    }
-    if (is.character(grid)) 
-        for (i in 1:4 * 0.2) {
-            lines(c(1 - i, (1 - i)/2), c(0, 1 - i) * top, lty = grid, 
-                col = grid.color)
-            lines(c(1 - i, 1 - i + i/2), c(0, i) * top, lty = grid, 
-                col = grid.color)
-            lines(c(i/2, 1 - i + i/2), c(i, i) * top, lty = grid, 
-                col = grid.color)
-            if (labels == "inside") {
-                text((1 - i) * 3/4 - eps, (1 - i)/2 * top, i * 
-                  scale, col = labels.color, srt = 120)
-                text(1 - i + i/4 + eps, i/2 * top - eps, (1 - 
-                  i) * scale, col = labels.color, srt = -120)
-                text(0.5, i * top + eps, i * scale, col = labels.color)
-            }
-            if (labels == "outside") {
-                text((1 - i)/2 - 6 * eps, (1 - i) * top, (1 - 
-                  i) * scale, col = labels.color)
-                text(1 - (1 - i)/2 + 3 * eps, (1 - i) * top + 
-                  5 * eps, i * scale, srt = -120, col = labels.color)
-                text(i + eps, 0, (1 - i) * scale, pos = 1, offset = 1.5, 
-                  srt = 120, xpd = NA, col = labels.color)
-            }
-        }
-    xp <- x[, 2] + x[, 3]/2
-    yp <- x[, 3] * top
-    points(xp, yp, pch = pch, col = col, cex = if (prop.size) 
-        prop.size * (s/max(s))
-    else cex, ...)
-    if (!is.null(id)) 
-        text(xp, yp, as.character(id), pos = 1, offset = 0.5 * 
-            cex, col = id.color)
-}
diff --git a/R/ternarypoints.R b/R/ternarypoints.R
deleted file mode 100644
index 804100c..0000000
--- a/R/ternarypoints.R
+++ /dev/null
@@ -1,10 +0,0 @@
-ternarypoints <- function(object, pch = 19, col = "blue", ...){
-    s <- rowSums(object)
-    if (any(s <= 0))
-        stop("each row of the input `object' must have a positive sum")
-    object <- object/s
-    top <- sqrt(3)/2
-    xp <- object[, 2] + object[, 3]/2
-    yp <- object[, 3] * top
-    points(xp, yp, pch = pch, col = col, ...)
-}
diff --git a/R/tobit.R b/R/tobit.R
new file mode 100644
index 0000000..948bec3
--- /dev/null
+++ b/R/tobit.R
@@ -0,0 +1,143 @@
+#' Interface between the Zelig Model tobit and 
+#' the Pre-existing Model-fitting Method
+#' @param formula a formula
+#' @param ... additonal parameters
+#' @param below a numeric or infinite specifying a lower boundary for censored
+#' responses
+#' @param above a numeric or infinite specifying an upper boundary for censored
+#' responses
+#' @param robust a boolean specifying whether to produce robust error estimates
+#' @param cluster ...
+#' @param data a data.frame 
+#' @return a list specifying '.function'
+#' @export
+zelig2tobit <- function (
+                         formula, ..., 
+                         below = 0, above = Inf, 
+                         robust = FALSE,
+                         cluster = NULL,
+                         data
+                         ) {
+
+  # Load survival
+  loadDependencies("survival")
+
+  if (!(is.null(cluster) || robust))
+    stop("If cluster is specified, then `robust` must be TRUE")
+
+  # Add cluster term
+  if (robust || !is.null(cluster))
+    formula <- cluster.formula(formula, cluster)
+
+  # Make surv demands that the model 
+  formula <- make.surv(formula, below, above)
+  formula <- cluster.formula(formula, cluster)
+
+  z(
+    .function = "survreg",
+
+    formula = formula,
+    dist = "gaussian",
+    data = data,
+    robust = robust,
+    ...
+    )
+}
+
+
+#
+make.surv <- function (formula, below, above) {
+
+  lhs <- formula[[2]]
+
+  if (grepl("Surv", as.character(lhs)))
+    return(formula)
+
+  if (!(is.numeric(below) && is.numeric(above))) {
+    warning("`below` and `above` must be numeric; ",
+            "returning the original formula")
+    return(formula)
+  }
+
+  if (above == Inf) {
+    # Empty?
+    # This seems like a mistake inherited from old code
+  }
+
+  else if (below == -Inf && above == Inf)
+    stop("This model does not support censoring. Try the \"normal\" model")
+
+  else if (below == -Inf && above != Inf)
+    stop("This model does not support right-censored data")
+
+  else if (is.finite(below) && is.finite(above))
+    stop("This model does not support interval-censored data")
+
+  # That is, this model only supports left-censored data
+  # Surv( <outcome> , <below> < <outcomes> )
+  lhs <- call("Surv", lhs, call("<", below, lhs), type="left")
+
+  # Place back within formula
+  formula[[2]] <- lhs
+
+  # Return
+  formula
+}
+#' Param Method for the \code{tobit} Zelig Model
+#' @note This method is used by the \code{tobit} Zelig model
+#' @usage \method{param}{tobit}(obj, num, ...)
+#' @S3method param tobit
+#' @param obj a 'zelig' object
+#' @param num an integer specifying the number of simulations to sample
+#' @param ... ignored parameters
+#' @return a list to be cast as a 'parameters' object
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+param.tobit <- function(obj, num=1000, ...) {
+  cov <- vcov(.fitted)
+  mu <- c(coef(.fitted), log(.fitted$scale))
+
+  # Return
+  list(
+       coef = mvrnorm(num, mu=mu, Sigma=cov),
+       linkinv = NULL
+       )
+}
+#' Compute quantities of interest for 'tobit' Zelig models
+#' @usage \method{qi}{tobit}(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL)
+#' @S3method qi tobit
+#' @param obj a 'zelig' object
+#' @param x a 'setx' object or NULL
+#' @param x1 an optional 'setx' object
+#' @param y this parameter is reserved for simulating average treatment effects,
+#' though this feature is currentlysupported by only a handful of models
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a parameters object
+#' @return a list of key-value pairs specifying pairing titles of quantities of
+#' interest with their simulations
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+qi.tobit <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+
+  # This needs to be fixed.
+  ev1 <- ev2 <- pr1 <- pr2 <- fd <- NA
+
+  # return
+  list("Expected Values: E(Y|X)"  = ev1,
+       "Expected Values: E(Y|X1)" = ev2,
+       "Predicted Values: Y|X"    = pr1,
+       "Predicted Values: Y|X1"   = pr2,
+       "First Differences: E(Y|X1) - E(Y|X)" = fd
+       )
+}
+#' Describe a ``tobit'' model to Zelig
+#' @usage \method{describe}{tobit}(...)
+#' @S3method describe tobit
+#' @param ... ignored parameters
+#' @return a list to be processed by `as.description'
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+describe.tobit <- function(...) {
+  list(authors  = c("Kosuke Imai", "Gary King", "Olivia Lau"),
+       year     = 2011,
+       category = "continuous",
+       text = "Linear regression for Left-Censored Dependent Variable"
+       )
+}
diff --git a/R/twosls.R b/R/twosls.R
new file mode 100644
index 0000000..bbf40d3
--- /dev/null
+++ b/R/twosls.R
@@ -0,0 +1,279 @@
+#' Interface between the Zelig Model twosls and 
+#' the Pre-existing Model-fitting Method
+#' @param formula a formula
+#' @param ... additonal parameters
+#' @param data a data.frame 
+#' @return a list specifying '.function'
+#' @export
+zelig2twosls <- function (formula, ..., data) {
+
+  loadDependencies("systemfit")
+
+  # Helper function to perform set-difference
+  "%w/o%" <- function(x, y)
+    x[!x %in% y]
+
+  formula<-parse.formula(formula, "twosls")
+  tt<-terms(formula)
+
+  ins<-names(tt) %w/o% names(attr(tt,"depVars"))
+  if(length(ins)!=0)
+    if(length(ins)==1)
+      inst <- formula[[ins]]
+    else 
+      inst <- formula[ins]
+
+  else
+    stop("twosls model requires instrument!!\n")
+
+  class(formula) <- c("multiple", "list")
+
+  # Return
+  list(
+       .function = "callsystemfit",
+       formula = formula[names(attr(tt,"depVars"))],
+       method  = "2SLS",
+       inst    = inst,
+       data = data,
+       ...
+       )
+}
+
+#' @S3method param twosls
+param.twosls <- function(obj, num=1000, ...) {
+
+  # Produce a vector of all terms
+  big.coef <- coef(obj)
+
+  # Produce a pretty sparse matrix containing 3 vcov matrices.
+  #
+  # Note that this matrix will give a value of zero to any invalid row-column
+  # combination.
+  # In particular, any terms that do not belong to the same equation will have
+  # a zero value.
+  big.vcov <- vcov(obj)
+
+  # This is a complete list of the terms. This is largely ignored, aside from
+  # the fact that we need a list of the formulae. In general, terms.multiple
+  # produced a pretty unwieldy list of items.
+  all.terms <- terms(obj)
+
+  # This list stores the results
+  simulations.list <- list()
+
+  # Iterate through the set of terms, and simulate each list separately.
+  for (key in names(all.terms)) {
+
+    # Extract the terms for an individual model.
+    eq.terms <- terms(all.terms[[key]])
+
+    # Extract the labels for the terms
+    eq.term.labels <- attr(eq.terms, "term.labels")
+
+    # Add the labeled for the intercept column, if it should exist
+    if (attr(eq.terms, "intercept"))
+      eq.term.labels <- c("(Intercept)", eq.term.labels)
+
+    # Format the title, this should look like:
+    #   <list-item-name>_<term-label>
+    #
+    # So for the list: list(mu1 = y ~ x + sin(x))
+    # We get:
+    #   "mu1_(Intercept)" "mu1_x" "mu1_sin(x)"
+    entries <- paste(key, eq.term.labels, sep = "_")
+
+    # Extract the mean-value of this term (from the lumped-toegether vector)
+    eq.coef <- big.coef[entries]
+
+    # Extract the vcov matrix of this term (from the lumped-together matrix)
+    eq.vcov <- big.vcov[entries, entries]
+
+    # Simulate the parameters
+    eq.simulations <- mvrnorm(num, eq.coef, eq.vcov)
+
+    # Name the columns
+    colnames(eq.simulations) <- eq.term.labels
+
+    # Add to the list
+    simulations.list[[key]] <- eq.simulations
+
+  }
+
+
+  # Return.
+  list(
+       coef = simulations.list,
+       linkinv = NULL
+       )
+}
+
+#' @S3method qi twosls
+qi.twosls <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+
+  # Compute the expected value of multistage LS methods
+  compute.ev <- function (obj, x, param) {
+    #
+    if (is.null(x) || is.na(x)) {
+      return(NA)
+    }
+
+    # If 'x' has too many rows, there will currently be errors. This is an issue
+    # in Zelig-core
+    if (nrow(x$matrix) > 1) {
+      warning("This package does not currently support pooled results.")
+      x <- x[1, ]
+    }
+
+    # Begin regular function
+    terms <- terms(obj)
+
+    # :q
+    coef.list <- coef(param)
+
+    # Hold Results
+    eta <- list()
+
+    #
+    for (key in names(coef.list)) {
+      #
+      coef <- coef.list[[key]]
+      # print(colnames(coef))
+      small.x <- as.matrix(x$matrix[, colnames(coef)])
+      #
+      eta[[key]] <- coef %*% (small.x)
+    }
+
+
+    # Convert list into a matrix
+    eta <- Reduce(function (x, y) cbind(x, y), eta)
+    colnames(eta) <- names(terms)
+
+    eta
+  }
+
+  ev1 <- compute.ev(obj, x, param)
+  ev2 <- compute.ev(obj, x1, param)
+  fd <- ev2 - ev1
+
+  # Name each column after the associated equation
+
+  # Return the results
+  list(
+       "Expected Value: E(Y|X)" = ev1,
+       "Expected Value (for X1): E(Y|X1)" = ev2,
+       "First Differences: E(Y|X1)-E(Y|X)" = ev2 - ev1
+       )
+}
+
+#' @S3method describe twosls
+describe.twosls <- function (...) {
+  category <- "continuous"
+  description  <- "Two Stage Least Squares"
+  authors <- c("Ferdinand Alimadhi", "Ying Lu", "Elena Villalon")
+  year <- 2007
+
+  package <-list(
+                 name = "systemfit",
+		 version = "0.8"
+		 )
+
+  parameters <- list()
+  parameters$mu <-list(
+                       equations=c(2,Inf),
+                       tagsAllowed=TRUE,
+                       depVar=TRUE,
+                       expVar=TRUE
+                       )
+  parameters$inst<-list(
+                        equations=c(1,1),
+                        tagsAllowed=FALSE,
+                        depVar=FALSE,
+                        expVar=TRUE
+                        )
+ 
+  list(category = category, authors = authors, year = year, description = description, package = package, parameters = parameters)
+}
+
+#' @S3method plot sim.twosls
+plot.sim.twosls <- function (x, ...) {
+
+  # Define locak function to plot a set of quantities of interest
+  plotSet <- function (title) {
+    for (col in colnames(qis[[title]])) {
+      q <- qis[[title]][, col]
+      plot(density(q), main = paste(col, title, sep=": "))
+    }
+  }
+
+  # Code begins here
+
+  cols <- c( rep(), rep(), rep() )
+
+  qis <- as.list.qi(x$qi)
+  qis <- Filter(function (y) any(!is.na(y)), qis)
+  qis <- Filter(is.matrix, qis)
+
+
+  max.cols <- max(unlist(Map(ncol, qis)))
+  layout.matrix <- matrix(0, length(qis), max.cols)
+  rownames(layout.matrix) <- names(qis)
+
+  count <- 1
+
+  for (title in names(qis)) {
+    for (k in 1:ncol(qis[[title]])) {
+      layout.matrix[title, k] <- count
+      count <- count + 1
+    }
+  }
+
+  layout(layout.matrix)
+
+  for (key in names(qis)) {
+    plotSet(key)
+  }
+}
+
+callsystemfit<-function(formula,data,method,inst=NULL,...){
+  # Call systemfit..
+  out <- systemfit(
+                   data = data,
+                   formula = formula,
+                   method = method,
+                   inst = inst,
+                   ...
+                   )
+
+  # Assign class to formula, so that it is correctly parsed
+  class(formula) <- c("multiple", "list")
+  
+  # Set the terms explicitly
+  attr(out,"terms") <- terms(formula)
+
+  # Set the class explicitly
+  class(out) <- c("multiple", class(out))
+
+  # Fin. Return the modified object
+  return(out)
+}
+
+as.list.qi <- function (x, names = "") {
+  class(x) <- "list"
+  indices <- attr(x, ".index")
+  attr(x, ".index") <- NULL
+  rename.keys(x, indices, names(indices))
+}
+
+rename.keys <- function (x, keys, to, warn = TRUE) {
+  all.names <- names(x)
+  indices <- match(keys, all.names)
+
+  if (any(is.na(indices)))
+    stop("Keys contains values that are not in `x`")
+
+  all.names[indices] <- to
+  names(x) <- all.names
+
+  x
+}
+
diff --git a/R/user.prompt.R b/R/user.prompt.R
index ca31067..0332698 100644
--- a/R/user.prompt.R
+++ b/R/user.prompt.R
@@ -1,2 +1,14 @@
-user.prompt <- function() 
-  silent <- readline("\nPress <return> to continue: ")
+#' Prompts user to hit enter
+#' @title Prompt User
+#' @param msg a character-string, specifying a message to be displayed
+#' @return This function is used for its side effects
+#' @export
+#' @note This function is primarily used by Zelig demo scripts
+user.prompt <- function (msg = NULL) {
+  if (is.null(msg))
+    msg <- "Press <return> to continue: "
+
+  msg <- paste("\n", msg, sep="")
+
+  invisible(readline(msg))
+}
diff --git a/R/vcov.BetaReg.R b/R/vcov.BetaReg.R
deleted file mode 100644
index a606926..0000000
--- a/R/vcov.BetaReg.R
+++ /dev/null
@@ -1,2 +0,0 @@
-vcov.BetaReg <- function(object, ...)
-  object$variance
diff --git a/R/vcov.R b/R/vcov.R
new file mode 100644
index 0000000..f64e685
--- /dev/null
+++ b/R/vcov.R
@@ -0,0 +1,17 @@
+#' @S3method vcov gee.naive
+vcov.gee.naive <- function(object, ...)
+  object$naive.variance
+
+#' @S3method vcov gee.robust
+vcov.gee.robust <- function(object, ...)
+  object$robust.variance
+
+#' @S3method vcov glm.robust
+vcov.glm.robust <- function(object, ...) {
+  so <- summary.glm.robust(object, corr=FALSE, ...)
+  so$dispersion * so$cov.unscaled
+}
+
+#' @S3method vcov Relogit
+vcov.Relogit <- function(object, ...) 
+  summary.Relogit(object, ...)$cov.scaled
diff --git a/R/vcov.eiRxC.R b/R/vcov.eiRxC.R
deleted file mode 100644
index 4f101de..0000000
--- a/R/vcov.eiRxC.R
+++ /dev/null
@@ -1,2 +0,0 @@
-vcov.eiRxC <- function(object, ...)
-  object$vcov
diff --git a/R/vcov.gee.naive.R b/R/vcov.gee.naive.R
deleted file mode 100644
index ba42664..0000000
--- a/R/vcov.gee.naive.R
+++ /dev/null
@@ -1,6 +0,0 @@
-vcov.gee.naive <- function(object, ...){
-  class(object) <- c("gee", "glm")
-  return(object$naive.variance)
-}  
-
-  
\ No newline at end of file
diff --git a/R/vcov.gee.robust.R b/R/vcov.gee.robust.R
deleted file mode 100644
index 2665feb..0000000
--- a/R/vcov.gee.robust.R
+++ /dev/null
@@ -1,4 +0,0 @@
-vcov.gee.robust <- function(object, ...){
-  class(object) <- c("gee", "glm")
-  return(object$robust.variance)
-}  
diff --git a/R/vcov.glm.robust.R b/R/vcov.glm.robust.R
deleted file mode 100644
index 53ccfbc..0000000
--- a/R/vcov.glm.robust.R
+++ /dev/null
@@ -1,4 +0,0 @@
-vcov.glm.robust <- function(object, ...) {
-  so <- summary.glm.robust(object, corr=FALSE, ...)
-  so$dispersion * so$cov.unscaled
-}
diff --git a/R/vcov.lm.robust.R b/R/vcov.lm.robust.R
deleted file mode 100644
index b127be1..0000000
--- a/R/vcov.lm.robust.R
+++ /dev/null
@@ -1,4 +0,0 @@
-vcov.lm.robust <- function(object, ...) {
-  so <- summary.lm.robust(object, corr=FALSE, ...)
-  so$cov.unscaled * so$sigma^2
-}
diff --git a/R/vcov.netglm.R b/R/vcov.netglm.R
deleted file mode 100644
index 05d2bd1..0000000
--- a/R/vcov.netglm.R
+++ /dev/null
@@ -1,5 +0,0 @@
-vcov.netglm <- function(object, ...)
-{
-    so <- summary.glm(object, correlation=FALSE, ...)
-    so$dispersion * so$cov.unscaled
-}
diff --git a/R/vcov.netlm.R b/R/vcov.netlm.R
deleted file mode 100644
index 92cb758..0000000
--- a/R/vcov.netlm.R
+++ /dev/null
@@ -1,4 +0,0 @@
-vcov.netlm <- function(object, ...) {
-  so <- summary.lm(object, correlation = FALSE, ...)
-  so$sigma^2 * so$cov.unscaled
-}
diff --git a/R/vcov.netlogit.R b/R/vcov.netlogit.R
deleted file mode 100644
index fd5cc93..0000000
--- a/R/vcov.netlogit.R
+++ /dev/null
@@ -1,5 +0,0 @@
-vcov.logit.net <- function(object, ...)
-{
-    so <- summary.glm(object, correlation=FALSE, ...)
-    so$dispersion * so$cov.unscaled
-}
diff --git a/R/vcov.relogit.R b/R/vcov.relogit.R
deleted file mode 100644
index db6bce1..0000000
--- a/R/vcov.relogit.R
+++ /dev/null
@@ -1,2 +0,0 @@
-vcov.relogit <- function(object, ...) 
-  summary.relogit(object, ...)$cov.scaled
diff --git a/R/vcov.survreg.R b/R/vcov.survreg.R
deleted file mode 100644
index be335e6..0000000
--- a/R/vcov.survreg.R
+++ /dev/null
@@ -1 +0,0 @@
-vcov.survreg <- function(object, ...) object$var
diff --git a/R/vcov.zmlm.R b/R/vcov.zmlm.R
deleted file mode 100644
index b9d924b..0000000
--- a/R/vcov.zmlm.R
+++ /dev/null
@@ -1,4 +0,0 @@
-vcov.zmlm <- function(object,...){
-   so <- summary.zmlm(object, corr = FALSE)[[1]]
-    kronecker(estVar(object), so$cov.unscaled, make.dimnames = TRUE)
- }
diff --git a/R/vdc.R b/R/vdc.R
deleted file mode 100644
index c27e0be..0000000
--- a/R/vdc.R
+++ /dev/null
@@ -1,259 +0,0 @@
-zeligListModels<-function(inZeligOnly=TRUE) {
-     if (inZeligOnly) {
-    		tmp = ls(envir=asNamespace("Zelig"),pattern="^zelig2")
-     } else { 
-    		tmp = c( ls(envir=asNamespace("Zelig"),pattern="^zelig2"),
-         		apropos("zelig2",mode="function"))
-     }
-     sub("zelig2","", tmp)
-}
-
-
-
-
-
-zeligInstalledModels<-function(inZeligOnly=TRUE,schemaVersion="1.1") {
-  chkpkgs<-function(name)  {
-    zd=zeligDescribeModelXML(name,schemaVersion=schemaVersion)
-    if (is.null(zd)) {
-      return (FALSE)
-    }
-    zdpd= zeligModelDependency(name)[,1]
-    if (is.null(zdpd)) {
-      return(TRUE)
-    }
-    ow=options(warn=-1)
-    ret=sapply(zdpd,function(x) require(x,character.only=TRUE)==TRUE)
-    options(ow)
-    return (ret)
-  }
-  models<-zeligListModels(inZeligOnly=inZeligOnly)
-     # Not being the trusting sort, lets check to see if we can run
-     # a dummy formula. If not -- almost always means that something
-     # required() is missing
-     tmpModels<-sapply(models,chkpkgs)
-  models[which(tmpModels)]
-}
-
-zeligGetSpecial<-function(modelName) {
-	 modelDesc = zeligDescribeModel(modelName)
-	 return(modelDesc$parameters[[1]]$specialFunction)
-}
-
-zeligModelDependency<-function(modelName,repos="") {
-        zd= zeligDescribeModel(modelName)
-
-        if (is.null(zd)) { return (NULL) }
-
-        zdpd=zd[which(names(zd)=="package")]
-        
-        cbind(sapply(zdpd,function(x)x$name),
-                sapply(zdpd,function(x){if (is.null(x$CRAN))
-                {rv<-repos} else{rv<-x$CRAN};rv;}))
-      }
-
-
-zeligDescribeModel<-function(name,force=FALSE,schemaVersion="1.1") {
-    res=try(eval(call(paste("describe.",name,sep=""))),silent=TRUE)
-    if (inherits(res,"try-error")) {
-        if (force) {
-                res=describe.default()
-        } else {
-                res=NULL
-        }
-    }
-#    res$name<-name           # only here we have access to the model name, so add it to the list.
-    if(!is.null(res)){
-   res<-check.full(res,name)
-  }
-    return(res)
-}
-
-zeligDescribeModelXML<-function(modelName,force=FALSE,schemaVersion="1.1") {
-	zd = zeligDescribeModel(modelName,force,schemaVersion)
-	if (is.null(zd)) {
-		return(NULL)
-	} else {
-		return(zmodel2string(zd))
-	}
-
-}
-
-
-zmodel2string<-function(x) {
-     xmlList(x)
-}
-printZeligSchemaInstance<-function(filename=NULL, serverName=NULL,vdcAbsDirPrefix=NULL){
-	# open connection 
-	schemaURL<-'http://gking.harvard.edu/zelig';
-	if (is.null(serverName)) {
-		serverName<-system('hostname -f', intern=TRUE)
-	}
-	if (is.null(vdcAbsDirPrefix)){
-		locationURL<-paste('http://', serverName, '/VDC/Schema/analysis/ZeligInterfaceDefinition.xsd',sep="");
-	} else {
-		locationURL<-paste('file://', vdcAbsDirPrefix, '/VDC/Schema/analysis/ZeligInterfaceDefinition.xsd',sep="");
-	}
-	schemaLocation<-paste(schemaURL, ' ', locationURL, sep='');
-	con<-"";
-	if (!is.null(filename)){
-		con<-file(filename,"w");
-	}
-	cat(file=con, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<zelig xmlns=\"",schemaURL,"\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"",schemaLocation,"\">\n", sep="");
-	mssg<- sapply(zeligInstalledModels(),function(x){cat(file=con,zmodel2string(zeligDescribeModel(x)),sep="")},simplify=FALSE);
-	cat(file=con,"\n</zelig>\n",sep="");
-}
-
-
-xmlList<-function(z){
-  if(is.null(z))
-    return ("")
-  mins<-c()
-  maxs<-c()
-  for(i in 1:length(z$parameters)){
-    mins<-c(mins,z$parameters[[i]]$equations[[1]])
-    maxs<-c(maxs,z$parameters[[i]]$equations[[2]])
-  }
-  min<-sum(mins)
-  if(any(!is.finite(maxs)))
-    max<-Inf
-  else
-    max<-sum(maxs)
-  if(max !=1)
-    return("")
-  res<-paste("<model name=",'"',z$name,'"'," label=",'"',categories()[[z$category]],'"', sep="")
-  if(!(is.na(z$parameters[[1]]$specialFunction)))
-    res<-paste(res," specialFunction=",'"',z$parameters[[1]]$specialFunction,'"',sep="")
-  res<-paste(res,">\n",sep="")
-  res<-paste(res,"<description>",z$description, "</description>\n",sep="")
-  url <- paste("http://gking.harvard.edu/zelig/doc/",z$name,".pdf",sep="")
-  #if(z$name=="irtkd")
-  #  res<-paste(res,"<helpLink url=",'"',"http://gking.harvard.edu/zelig/docs/_TT_irtkd_TT__tex2htm.html",'"',sep="")
-  #else
-  #res<-paste(res,"<helpLink url=",'"',modelURL(z$name,z$description),'"',sep="")
-res<-paste(res,"<helpLink url=",'"',url,'"',sep="")
-
-  res<-paste(res,"/>\n", sep="")
-  if(any(!(is.null(z$package)))){
-    res<-paste(res,"<packageDependency",sep="")
-    if(!(is.na(z$package$name)))
-      res<-paste(res," name= ",'"',z$package$name,'"',sep="")
-    if(!(is.na(z$package$version)))
-      res<-paste(res," version= ",'"',z$package$version,'"',sep="")
-    if(!is.null(z$package$CRAN) && !(is.na(z$package$CRAN)))
-      res<-paste(res," CRAN= ",'"',z$package$CRAN,'"',sep="")  
-    res<-paste(res,"/>\n",sep="")
-  }
-  res<-paste(res,"<formula minEquations=",'"',min,'"',sep="")
-  if(is.finite(max))
-    res<-paste(res," maxEquations=",'"',max,'"',sep="")
-  if(max==1)
-    res<-paste(res," simulEq=",'"',0,'"',sep="")
-  res<-paste(res,">\n",sep="")
-  
-  res<-paste(res,"<equation name=",'"',names(z$parameters)[[1]],'"',">\n",sep="")
-  if(!(z$name %in% c("exp","lognorm","weibull"))){    ##we are going to delete this !!!
-    if(z$parameters[[1]]$depVar){
-      res<-paste(res,"<outcome",sep="")
-      if(!is.na(z$parameters[[1]]$specialFunction))                 
-        {
-          if(is.finite(z$parameters[[1]]$varInSpecialFunction[[2]] ))
-            res<-paste(res," maxVar=",'"',z$parameters[[1]]$varInSpecialFunction[[2]],'"',sep="")
-          res<-paste(res," minVar=",'"',z$parameters[[1]]$varInSpecialFunction[[1]],'"',sep="")
-        }
-      else
-        {
-          if(z$parameters[[1]]$depVar !=TRUE)
-            res<-paste(res," minVar=",'"',0,'"'," maxVar=",'"',0,'"',sep="")
-        }
-      res<-paste(res,">\n")
-      for(i in 1:length(modeling.types()$depVar[[z$category]]))
-        res<-paste(res,"<modelingType>",modeling.types()$depVar[[z$category]][[i]],"</modelingType>\n",sep="")
-      res<-paste(res,"</outcome>\n",sep="")
-    }
-  } else
-  res<-paste(res,durOutput())
-                                        #explanatory
-  if(z$parameters[[1]]$expVar){
-    res<-paste(res,"<explanatory ")
-    if(z$parameters[[1]]$expVar == TRUE)
-      res<-paste(res," minVar=",'"',1,'"',sep="")
-    else
-      res<-paste(res," minVar=",'"',0,'"'," maxVar=",'"',0,'"',sep="")
-    res<-paste(res,">\n")
-    
- 
-    for(i in 1:length(modeling.types()$expVar[[z$category]]))
-      res<-paste(res,"<modelingType>",  modeling.types()$expVar[[z$category]][[i]],"</modelingType>\n",sep="")
-    res<-paste(res,"</explanatory>\n",sep="")
-  }
-  res<-paste(res,"</equation>\n",sep="")
-   res<-paste(res,"</formula>\n",sep="")
-   if(z$parameters[[1]]$expVar)
-     sext<-2
-  else
-    sext<-0
-  res<-paste(res,"<setx maxSetx=",'"',sext,'"',"/>\n",sep="")
-  res<-paste(res,"</model>\n")
-  return(res)
-}
-
-
-
-check.full<-function(z,name){
- # we suppose that describe.model pass the check
-  z$name<-name
-  if(is.null(z$package))
-  z$package<-list(name=NA,version=NA, CRAN=NA)
-  
-  for (i in length(z$parameters)){
-  if(is.null(z$parameters[[i]]$specialFunction)) z$parameters[[i]]$specialFunction<-NA
-  if(is.null(z$parameters[[i]]$varInSpecialFunction)) z$parameters[[i]]$varInSpecialFunction<-NA
-}
- return(z)
-  
-}
-
-
-modelURL<-function(modelName,modelDesc){
-  baseUrl<-"http://gking.harvard.edu/zelig/docs/"
-  spec<-"_TT_"
-  res<-paste(baseUrl,spec,modelName,spec,"_",substr(modelDesc,0, 13-nchar(modelName)) ,".html",sep="")
-  res<-gsub(".","_",res,fixed=TRUE)
-  res<-gsub(" ","_",res,fixed=TRUE)
-  res
-}
-
-modeling.types <-function(){
-  res<-list(
-            expVar=list(continuous=c("continuous","discrete","nominal","ordinal","binary"),
-              dichotomous=c("continuous","discrete","nominal","ordinal","binary"),
-              ordinal=c("continuous","discrete","nominal","ordinal","binary"),
-              bounded=c("continuous","discrete","nominal","ordinal","binary"),
-              multinomial=c("continuous","discrete","nominal","ordinal","binary"),
-              count=c("continuous","discrete","nominal","ordinal","binary"),
-              mixed=c("continuous","discrete","nominal","ordinal","binary"),
-              ei=c("continuous","discrete","nominal","ordinal","binary")
-              ),
-            depVar=list(
-              continuous="continuous",
-              dichotomous="binary",
-              ordinal="ordinal",
-              bounded="continuous",
-              multinomial=c("nominal","ordinal"),
-              count="discrete",
-              mixed=c("continuous","discrete","nominal","ordinal","binary"),
-              ei=c("continuous")
-              )
-            )
-  res
-}
-
-
-## this is a temporary function; Is going to be removed after we change "describe" for this duration models;
-
-durOutput <-function(){
-res<-"<outcome minVar=\"1\" maxVar=\"1\" label=\"Duration\">\n<modelingType>continuous</modelingType>\n</outcome>\n<outcome maxVar=\"1\" minVar=\"0\" label=\"Censored\">\n<modelingType>binary</modelingType>\n</outcome>\n"
-return (res)
-  
-}
diff --git a/R/vignettesMenu.R b/R/vignettesMenu.R
deleted file mode 100644
index 988ef6d..0000000
--- a/R/vignettesMenu.R
+++ /dev/null
@@ -1,28 +0,0 @@
-## adds a vignette menu for zelig packages (hacked from Seth's code )
-
-addVigs2WinMenu <- function(pkgName) {
-   if ((.Platform$OS.type == "windows") && (.Platform$GUI == "Rgui")
-       && interactive()) {
-       vigFile <- system.file("Meta", "vignette.rds", package=pkgName)
-       if (!file.exists(vigFile)) {
-           warning(sprintf("%s contains no vignette, nothing is added to the menu bar", pkgName))
-       } else {
-           vigMtrx <- readRDS(vigFile)
-           vigs <- file.path(.find.package(pkgName), "doc", vigMtrx[,"PDF"])
-           names(vigs) <- vigMtrx[,"Title"]
-
-           if (!"Vignettes" %in% winMenuNames())
-             winMenuAdd("Vignettes")
-           pkgMenu <- paste("Vignettes", pkgName, sep="/")
-           winMenuAdd(pkgMenu)
-           for (i in vigs) {
-               item <- sub(".pdf", "", basename(i))
-               winMenuAddItem(pkgMenu, item, paste("shell.exec(\"", as.character(i), "\")", sep = ""))
-           }
-       } ## else
-       ans <- TRUE
-   } else {
-       ans <- FALSE
-   }
-   ans
-}
diff --git a/R/z.R b/R/z.R
new file mode 100644
index 0000000..0944f2c
--- /dev/null
+++ b/R/z.R
@@ -0,0 +1,53 @@
+#' Return value for a zelig2-function
+#'
+#' This is an API-function that bridges a model-fitting function with a zelig
+#' interface.
+#' @note This is used internally by Zelig-dependent packages to instruct Zelig
+#' how to evaluate the function call to a particular statistical model.
+#' @param .function a function
+#' @param ... a set of parameters to be evaluated symbolically
+#' @return a ``z'' object which specifies how to evaluate the fitted model
+#' @export
+z <- function (.function, ..., .hook = NULL) {
+  # Construct the function call
+  .call <- as.call(as.list(match.call())[-1])
+  .function.name <- as.character(.call[[1]])
+  .parent <- parent.frame()
+  .dots <- list(...)
+
+  # Ensure that hook works appropriately
+  if(!missing(.hook)) {
+    if (!is.function(.hook)) {
+      warning(".hook parameter must be a function. ignoring.")
+      .hook <- NULL
+    }
+  }
+
+  s <- append(list(as.name(.function.name)), list(...))
+  literal.call <- as.call(s)
+
+  # Construct the object
+  s <- list(
+            "function" = .function,
+            "hook" = .hook,
+
+            "call" = .call,
+            "env" = .parent,
+
+            "function.name" = .function.name,
+            "dots" = .dots,
+
+            "literal.call" = literal.call
+            )
+
+  # Set attributes
+  attr(s, 'baseenv') <- baseenv()
+  attr(s, 'call') <- match.call()
+  attr(s, 'function') <- substitute(.function)
+
+  # Set the class
+  class(s) <- 'z'
+
+  # Return
+  s
+}
diff --git a/R/zelig.R b/R/zelig.R
index 19ad096..96a5966 100644
--- a/R/zelig.R
+++ b/R/zelig.R
@@ -1,3 +1,323 @@
-zelig <- function(formula, model, data, by = NULL, save.data =
-                          FALSE, cite = TRUE, ...)
-  UseMethod("zelig")
+#' Estimating a Statistical Model
+#'
+#' The zelig command estimates a variety of statistical
+#' models.  Use \code{zelig} output with \code{setx} and \code{sim} to compute
+#' quantities of interest, such as predicted probabilities, expected values, and
+#' first differences, along with the associated measures of uncertainty
+#' (standard errors and confidence intervals).
+#'
+#' @param formula a symbolic representation of the model to be
+#'   estimated, in the form \code{y \~\, x1 + x2}, where \code{y} is the
+#'   dependent variable and \code{x1} and \code{x2} are the explanatory
+#'   variables, and \code{y}, \code{x1}, and \code{x2} are contained in the
+#'   same dataset.  (You may include more than two explanatory variables,
+#'   of course.)  The \code{+} symbol means ``inclusion'' not
+#'   ``addition.''  You may also include interaction terms and main
+#'   effects in the form \code{x1*x2} without computing them in prior
+#'   steps; \code{I(x1*x2)} to include only the interaction term and
+#'   exclude the main effects; and quadratic terms in the form
+#'   \code{I(x1^2)}
+#' @param model the name of a statistical model.
+#'   Type \code{help.zelig("models")} to see a list of currently supported
+#'   models
+#' @param data the name of a data frame containing the variables
+#'   referenced in the formula, or a list of multiply imputed data frames
+#'   each having the same variable names and row numbers (created by
+#'   \code{mi}) 
+#' @param ... additional arguments passed to \code{zelig},
+#'   depending on the model to be estimated
+#' @param by a factor variable contained in \code{data}.  Zelig will subset
+#'   the data frame based on the levels in the \code{by} variable, and
+#'   estimate a model for each subset.  This a particularly powerful option
+#'   which will allow you to save a considerable amount of effort.  For
+#'   example, to run the same model on all fifty states, you could type:
+#'   \code{z.out <- zelig(y ~ x1 + x2, data = mydata, model = "ls", by = "state")}
+#'   You may also use \code{by} to run models using MatchIt subclass
+#' @param cite If is set to "TRUE" (default), the model citation will be
+#' @return Depending on the class of model selected, \code{zelig} will return
+#'   an object with elements including \code{coefficients}, \code{residuals},
+#'   and \code{formula} which may be summarized using
+#'   \code{summary(z.out)} or individually extracted using, for example,
+#'   \code{z.out\$coefficients}.  See the specific models listed above
+#'   for additional output values, or simply type \code{names(z.out)}.  
+#'
+#' @name zelig
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}, Kosuke Imai, Olivia Lau, and
+#' Gary King 
+#' Maintainer: Matt Owen \email{mowen@@iq.harvard.edu}
+#' @keywords package
+zelig <- function (formula, model, data, ..., by=NULL, cite=T) {
+
+  # Yea this forever
+  model.warnings(model)
+
+  # Split data.frame
+  if (!missing(by)) {
+    if (length(by) > 1) {
+      warning("by cannot have length greater than 1")
+      by <- NULL
+    }
+
+    if (!is.data.frame(data))
+      warning("")
+
+
+    else if (any(by %in% all.vars(formula))) {
+      warning("by cannot list contain a variable from the model's formula")
+      by <- NULL
+    }
+
+    else
+      data <- divide(data, by)
+  }
+
+  # Almost equivalient to:
+  #   data <- multi.dataset(data)
+  # 
+  # but we want to keep the name of the original data object as our title (sometimes).
+  divided.data <- eval(call("multi.dataset", substitute(data)))
+
+  # 
+  Call <- match.call()
+
+  # expand dot arguments
+  dots <- list()
+
+  # get non-dot arguments in a general fashion
+  notdots <- as.list(match.call(expand.dots=F)[-1])
+  notdots[["..."]] <- NULL
+
+  # only get the non-dot arguments
+  # that do not exist in the dot arguments
+  names.notdots <- Filter(function(x) !x%in%names(dots), names(notdots))
+  notdots <- notdots[names.notdots]
+
+  # build parameter list (including optional parameters)
+  params <- c(dots, notdots)
+
+  # set up list
+  res <- NULL
+  old.style.oop <- TRUE
+
+  # Call make.mi symbolically so that the function can implicitly label
+  # data.frame's from context. For example, the expression:
+  #   mi(turnout[1:1000, ], )
+  # will contain a data.frame labeled:
+  #   turnout[1:1000, ]
+  # m <- eval(call("multi.dataset", substitute(data), by=by))
+
+  # Ensure certain values remain consistent between any object on this list
+  # by giving them all a pointer to the same environment object which contains
+  # a few pieces of information
+  state <- new.env()
+
+  # Begin constructing zelig object
+  object <- list()
+
+  # Create zelig2* function
+  zelig2 <- paste("zelig2", as.character(model), sep="")
+  zelig2 <- get(zelig2, mode="function")
+
+  # Get package name. This is useful for writing methods that apply to all
+  # models within a particular software package
+  package.name <- getPackageName(environment(zelig2), FALSE)
+
+  # repeat
+  for (key in names(divided.data)) {
+    d.f <- divided.data[[key]]
+    label <- key
+
+
+    # catch end-of-list error
+    if (is.null(d.f))
+      next
+
+    zclist <- zelig2(formula, ..., data=d.f)
+
+    new.call <- zclist$call
+    env <- zclist$env
+
+    if (!inherits(zclist, "z")) {
+      if (!is.list(zclist))
+        warning("invalid object returned from `zelig2` method")
+
+      else {
+        wl <- zclist
+
+        # reserved words taken from the zelig2 method
+        .func <- as.name(wl$.function)
+        .hook <- wl$.hook
+
+        # remove the reserved words
+        wl$.function <- NULL
+        wl$.hook <- NULL
+        wl$.post <- NULL
+        wl$.model.matrix <- NULL
+
+        new.call <- as.call(append(list(.func), wl))
+        mock.call <- match.call()
+        env <- NULL
+      }
+    }
+    else if (inherits(zclist, "z")) {
+      new.call <- zclist$literal.call
+      mock.call <- zclist$call
+      env <- NULL
+    }
+    else {
+      warning("zelig2 function is returning an invalid type of object")
+    }
+
+    # Default value for result object
+    new.res <- NULL
+
+    tryCatch(
+      {
+        new.res <- eval(new.call)
+      },
+      error = function (e) {
+        warning("There was an error fitting this statistical model.")
+        print(e)
+      }
+      )
+
+    # Apply first hook if it exists
+    if (!is.null(zclist$.hook)) {
+      zclist$.hook <- get(zclist$.hook, mode='function')
+      new.res <- zclist$.hook(new.res, new.call, match.call(), ..., data = d.f)
+    }
+    else if (!is.null(zclist$hook) && is.function(zclist$hook)) {
+      new.res <- zclist$hook(new.res, new.call, match.call(), ..., data = d.f)
+    }
+    # Determine whether this is an S4 object
+    old.style.oop <- ! isS4(new.res)
+
+    if (exists("mock.call")) {
+      if (isS4(new.res))
+        new.res at call <- mock.call
+      else
+        new.res$call <- mock.call
+    }
+
+    # This is the only "obj" assignment that matters
+    obj <- makeZeligObject(new.res,
+                           model,
+                           new.call, match.call(),
+                           d.f, label,
+                           env,
+                           package.name = package.name
+                           )
+
+    # Specify the appropriate class
+
+    # Attach shared environment as an attribtute
+    attr(obj, 'state') <- state
+
+    # Add to list of results
+    object[[label]] <- obj
+  }
+
+  if (missing(by) && is.data.frame(data)) {
+    object <- object[[1]]
+  }
+  else {
+    attr(object, 'state') <- state
+    class(object) <- c(model, paste(model, 'mi', sep='-'), "MI")
+  }
+
+  # Update the shared environment
+  assign('old-formula', formula, state)
+  assign('args', list(...), state)
+  assign('parent', parent.frame(), state)
+  assign('call', match.call(), state)
+  assign('by', by, state)
+  # assign('methods', methods.env, state)
+  assign('methods', NULL, state)
+  assign('model', model, state)
+
+
+  # The below line should probably remain commented out
+  # assign('mi', m, state)
+
+  # Display citation information
+  if (cite) {
+    described <- describe(object)
+    descr <- description(
+                         authors = described$authors,
+                         year  = described$description,
+                         text  = described$text,
+                         url   = described$url,
+                         model = model
+                         )
+    cat("\n\n", cite(descr), "\n")
+  }
+
+  object
+}
+
+
+
+
+
+#' Make an Individual Zelig Object
+#'
+#' Returns a ``zelig'' object with the proper specifications
+#' @param object a fitted statistical model
+#' @param model a character-string specifying the name of the model
+#' @param call The call that produced the fitted model
+#' @param zelig_call The call made to the original zelig function
+#' @param data the data.frame used to fit the model
+#' @param label a character-string or symbol used as a human-readable label for
+#' the data-set
+#' @param env an environment variable that contains all variables to evaluate
+#' the call ``zelig_call''
+#' @param package.name a character-string specifyign the name of the package
+#' that is the source of the model used to fit this object
+#' @return A ``zelig'' object
+makeZeligObject <- function (object,
+                             model,
+                             call,
+                             zelig_call,
+                             data,
+                             label,
+                             env,
+                             package.name = NULL
+                             ) {
+  # This is a set of variables that will be visible to the following methods:
+  # param, bootstrap, qi
+  implied.variables <- new.env()
+
+  # The fitted model
+  assign(".fitted", object, implied.variables)
+
+  # The name of the model
+  assign(".model", model, implied.variables)
+
+  # The call to the model-fitting function
+  assign(".call", call, implied.variables)
+
+  # The environment used to evaluate the model-fitting functino
+  assign(".env", env, implied.variables)
+
+  # Create list-object
+  self <- list(
+               result = object,
+               formula = formula(object),
+               zelig.call = zelig_call,
+               name  = model,
+               label = label,
+               env  = env,
+               call = call,
+               data = data,
+               S4   = isS4(object),
+               method.env = implied.variables,
+               package.name = package.name
+               )
+
+  # Specify as a ``zelig'' object
+  class(self) <- c("zelig", model)
+
+  # Return 
+  self
+}
diff --git a/R/zelig.citation.R b/R/zelig.citation.R
deleted file mode 100644
index e89893e..0000000
--- a/R/zelig.citation.R
+++ /dev/null
@@ -1,68 +0,0 @@
-getModelCitation <- function (modelName){
-  citeobj <- .getModelCitation(modelName)
-  authors <- citeobj$mauthors
-  year <- citeobj$myear
-  title <- citeobj$mtitle
-  const <- citeobj$zconst
-  url <- citeobj$zurl
-  res <- paste("How to cite this model in Zelig:\n", authorsToText(authors),". ", year ,". ", descToText(title,modelName), " ", const, " ", url, "\n" , sep="")
-  res
-  
-}
-
-authorsToText <- function(auths){
-  howmany <- length(auths)
-  
-  ## if empty it's Kosuke, Gary, Olivia
-  if (howmany == 0) {
-    return ("Kosuke Imai, Gary King, and Oliva Lau")
-  }
-
-  ## if aurhor return it
-  if (howmany == 1){
-    return(auths[[1]])
-  } 
-
-  ## if 2 just an "and" in between
-  if (howmany == 2) {
-    return (paste(auths[[1]]," and ", auths[[2]], sep = ""))
-  }
-
-  ## separate by comma and an "and" before the last author
-  res <- paste(auths[1:(howmany - 1)], collapse = ", ", sep="")
-  res <- paste(res, ", and ", auths[[howmany]], sep = "")
-  return(res)  
-}
-
-.getModelCitation <- function(modelName){
-  
-  zconst <- "in Kosuke Imai, Gary King, and Olivia Lau, \"Zelig: Everyone's Statistical Software,\""
-  zurl <- "http://gking.harvard.edu/zelig"
-  descObject <- Zelig:::zeligDescribeModel(modelName)
-  if (!is.null(descObject)){
-    mauthors <- descObject$authors
-    mtitle <- descObject$description
-    myear <- descObject$year
-    
-  }
-  list(mauthors=mauthors, mtitle=mtitle, myear=myear, zconst=zconst, zurl=zurl)
-  
-}
-
-descToText <- function(desc, modelName){
-  paste('"',modelName,": ",desc,'"',sep="")
-}
-
-
-
-.getModelCitationTex <- function (modelName){
-  citeobj <- .getModelCitation(modelName)
-  authors <- citeobj$mauthors
-  year <- citeobj$myear
-  title <- citeobj$mtitle
-  const <- citeobj$zconst
-  url <- citeobj$zurl
-  res <- paste(authorsToText(authors),". ", year ,". ", descToText(title,modelName), " ", const, "\\url{", url, '}' , sep="")
-  res
-  
-}
diff --git a/R/zelig.default.R b/R/zelig.default.R
deleted file mode 100644
index 4541c8b..0000000
--- a/R/zelig.default.R
+++ /dev/null
@@ -1,135 +0,0 @@
-zelig.default <- function(formula, model, data, by = NULL, save.data =
-                          FALSE, cite = TRUE, ...) {
-
-  fn1 <- paste("zelig2", model, sep = "")
-  fn2 <- paste("zelig3", model, sep = "")
-
-  if (!exists(fn1))
-    stop(model,
-         " not supported. Type help.zelig(\"models\") to list supported models.")
-
-  ## load model dependeny pakcages
-  loadModelDeps(model)
-  
-  mf <- zelig.call <- match.call(expand.dots = TRUE)
-  zelig.call[[1]] <- as.name("zelig")
-  if (missing(by))
-    by <- NULL
-  N <- M <- 1
-  object <- list()
-  if ("mi" %in% class(data))
-    M <- length(data)
-  if (M > 1)
-    dat <- data[[1]]
-  else
-    dat <- data
-  if (!is.null(by)) {
-          if (any(as.character(by) %in% c(formula[[2]], formula[[3]])))
-            stop("the variable selected for subsetting cannot be called in the formula.")
-          idx <- dat[,by]
-          mf$by <- NULL
-          lev <- sort(unique(idx))
-          N <- length(lev)
-  }
-  
-  ## call zelig2* function
-  mf <- do.call(fn1, list(formula, model, dat, N, ...))
-    
-  for (i in 1:N) {
-          if (N > 1) {
-                  dat <- list()
-                  if (M > 1) {
-                          for (j in 1:M)
-                            dat[[j]] <- data[[j]][idx == lev[i],]
-                  } else {
-                          dat <- data[idx == lev[i],]
-                  }
-          } else {
-                  dat <- data
-          }
-          obj <- list()
-          for (j in 1:M) {
-            if (M > 1)
-              d <- dat[[j]]
-            else
-              d <- dat
-            if (is.data.frame(d)) {
-              ## List-wise deletion on d performed on the
-              ## rows which have NAs only for the variables which appear in
-              ## formula.
-              d <- d[complete.cases(d[,all.vars(as.expression(formula))]),]
-              mf$data <- d
-            }
-            ## evaluate the call returned by zelig2* function
-            ## (which is basically a call to the foreign model
-            res <- eval(as.call(mf))
-            
-            ## if zelig3* exists (i.e. we want to manipulate the output coming
-            ## from the foreign model) then call it
-            if (exists(fn2)) 
-              res <- do.call(fn2, list(res = res, fcall = mf,
-                                       zcall = as.list(zelig.call)))
-            
-            ## check the class of the object (S3/S4)
-            ##check <- length(slotNames(res)) > 0
-            check <- isS4(res)
-            if (check) {                             #S4
-              if ("call" %in% slotNames(res))
-                res at call <- as.call(zelig.call)
-              else
-                stop("no slot \"call\" in the result")
-              
-              ## if is S4 and "model" slot found, save data in that slot
-              ## otherwise, we cannot save the data
-              if (save.data){
-                if ("model" %in% slotNames(res))
-                  res at model <- as.data.frame(d)
-                else
-                  stop("\"save.data\" option is not supported for this model")
-              }
-            } else {                                 #S3
-              res$call <- as.call(zelig.call)
-              if (save.data) res$zelig.data <- as.data.frame(d)
-            }
-            ##        res$zelig <- model
-            if (M > 1) 
-              obj[[j]] <- res
-            else
-              obj <- res
-          }
-          if (M > 1) 
-            class(obj) <- "MI"
-          if (N > 1) 
-            object[[i]] <- obj
-          else
-            object <- obj
-        }
-  if (N > 1) {
-    class(object) <- "strata"
-    names(object) <- lev
-  }
-  if (cite){
-    cat(getModelCitation(model))
-  }
-  return(object)
-}
-
-###
-## loadModelDeps
-##
-## For a given model, tries to load the package it depends on
-## model : model name
-##
-
-loadModelDeps <- function(model){
-        zdpd <- Zelig:::zeligModelDependency(model)
-        
-        ## e.g. describe does not exist 
-        if(!is.null(zdpd)){
-                pkg <- zdpd[1,1]   ## what if more then 1 dependency?!
-                if (!is.na(pkg)) {
-                        if (!suppressWarnings(require(pkg,quietly=TRUE,character.only=TRUE)))
-                          stop("This model depends on package \"",pkg,"\". Please install this package and try again")
-                }
-        }
-}
diff --git a/R/zelig.skeleton.R b/R/zelig.skeleton.R
new file mode 100644
index 0000000..1fd2962
--- /dev/null
+++ b/R/zelig.skeleton.R
@@ -0,0 +1,134 @@
+#' 'zelig.skeleton' generates the necessary files used to create a Zelig
+#' package. Based on (and using) R's 'package.skeleton' it removes some of the
+#' monotony of building statistical packages. In particular, 'zelig.skeleton'
+#' produces templates for the \code{zelig2}, \code{describe}, \code{param}, and
+#' \code{qi} methods. For more information about creating these files on an
+#' individual basis, please refer to the tech manuals, which are available 
+#' by typing: \code{?zelig2}, \code{?param}, or \code{?qi}.
+#' @title Creates a Skeleton for a New Zelig package
+#' @param pkg a character-string specifying the name of the Zelig package
+#' @param models a vector of strings specifying models to be included in the
+#'   package
+#' @param author a vector of strings specifying contributors to the package
+#' @param path a character-string specifying the path to the package
+#' @param force a logical specifying whether to overwrite files and create
+#'   necessary directories
+#' @param email a string specifying the email address of the package's
+#'   maintainer
+#' @param depends a vector of strings specifying package dependencies
+#' @param ... ignored parameters
+#' @param .gitignore a logical specifying whether to include a copy of a 
+#'   simple \code{.gitignore} in the appropriate folders (\code{inst/doc} and
+#'   the package root
+#' @param .Rbuildignore a logical specifying whether to include a copy of a 
+#'   simple \code{.Rbuildignore} in the appropriate folders (\code{inst/doc} 
+#'   and the package root
+#' @return nothing
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+zelig.skeleton <- function (
+    pkg, models=c(), author="UNKNOWN AUTHOR",
+    path = ".",
+    force = FALSE,
+    email = "maintainer at software-project.org",
+    depends = c(),
+    ...,
+    .gitignore = TRUE,
+    .Rbuildignore = TRUE
+    ) {
+
+
+  # WARNING BLOCK
+  # so that developers are aware of potential pitfalls that will prevent
+  # installation of their packages
+  if (!is.character(pkg)) {
+    warning("invalid 'pkg' parameter; should be a character string")
+    pkg <- as.character(pkg)
+  }
+
+  if (length(pkg) > 1) {
+    warning("invalid 'pkg' parameter; length cannot be greater than one")
+    pkg <- pkg[1]
+  }
+
+  if (!is.character(models)) {
+    warning("invalid 'models' parameter; should be a character vector")
+    models <- as.character(models)
+  }
+
+  if (!length(models))
+    warning("invalid 'models' parameter; should contain at least one model-name")
+
+  if (missing(author))
+    warning("missing 'author' parameter; please change the value in the",
+      "'DESCRIPTION' file's 'Author' field")
+
+  if (missing(email))
+    warning("Missing 'email' parameter; please change the value in the ",
+      "'DESCRIPTION' file's 'Maintainer' field")
+
+  if (missing(depends))
+    warning("Missing 'depends' parameter")
+
+  # new environment
+  e <- new.env()
+
+  for (m in models) {
+    # Place proper functions in
+    # correct environment (out of global)
+    # this technically doesn't work
+    # (bug in package.skeleton)
+    describe <- function (...) list()
+    zelig2 <- function (formula, ..., data) list(.function = "")
+    param <- function (obj, num, ...) list(coef=NULL)
+    qi <- function (obj, x, x1, y, param, num) list()
+
+    assign(paste("describe", m, sep="."), describe, e)
+    assign(paste("zelig2", m, sep=""), describe, e)
+    assign(paste("param", m, sep="."), describe, e)
+    assign(paste("qi", m, sep="."), describe, e)
+  }
+
+  # Invoke package.skeleton
+  package.skeleton(
+                   name = pkg,
+                   environment = e,
+                   path = path,
+                   force = force,
+                   namespace = TRUE
+                   )
+
+  # Copy files over - as of 3/11 these files are blank
+  for (m in models) {
+    .copy.templates(m, pkg, path)
+  }
+
+  .make.description(pkg, author, email, depends, url, path)
+  .make.package.R(pkg, author, email, depends, url, path)
+
+  # copy .gitignore and .Rbuildignore
+  if (.gitignore) {
+    src <- system.file('hidden', 'gitignore', package='Zelig')
+    dest <- file.path(path, pkg, '.gitignore')
+    file.copy(src, dest)
+
+    dest <- file.path(path, pkg, 'man', '.gitignore')
+    file.copy(src, dest)
+  }
+
+  if (.Rbuildignore) {
+    src <- system.file('hidden', 'Rbuildignore', package='Zelig')
+    
+    dest <- file.path(path, pkg, '.Rbuildignore')
+    file.copy(src, dest)
+
+    dest <- file.path(path, pkg, 'inst', 'doc', '.Rbuildignore')
+    dir.create(file.path(path, pkg, 'inst', 'doc'), recursive=TRUE)
+    file.copy(src, dest)
+  }
+
+
+  # Why zero? Eh, maybe a return code thing. This function is really just used
+  # for side-effects
+  invisible(0)
+}
diff --git a/R/zelig2.R b/R/zelig2.R
new file mode 100644
index 0000000..596231b
--- /dev/null
+++ b/R/zelig2.R
@@ -0,0 +1,49 @@
+#' The \code{zelig2} function acts as a simple interface between a user's call
+#' to the \code{zelig} function and the zelig functions subsequent call to the
+#' pre-existing external model. The external model varies based on which model
+#' is being called.
+#'
+#' @title Interface Between Zelig Models and External Functions
+#' @note Writing \code{zelig2} functions is required of Zelig developers. In
+#'   particular, \code{zelig2} functions act as an interface between external
+#'   models (models not included in the Zelig package) and the \code{zelig}
+#'   function which must use that model.
+#'
+#'   \code{zelig2} is not an actual function. Rather, 
+#'
+#' @name zelig2
+#' @return
+#'   The main purpose of the \code{zelig2} function is to return a list of
+#'   key-value pairs, specifying how Zelig should interface with the external
+#'   method. This list has the following format:
+#'
+#'   \item{\code{.function}}{specifies the name of the external method to be
+#'     called by \code{zelig} function. Subsequent parameters, are called and
+#'     evaluated as a function call to the function of the named string.}
+#'   \item{\code{.hook}}{specifies the name of a hook function as a string. The
+#'     hook function is only evaluated on zelig object once the external method
+#'     fits the statistical model}
+#'   \item{...}{any parameters aside from \code{.function} and \code{.hook} is 
+#'     as part of the function call to the external model}
+#'
+#' @examples
+#'  zelig2some.model <- function (formula, weights, verbose, ..., data) {
+#'    list(
+#'         .function = 'some.other.method',
+#'         .hook = NULL,
+#'         formula = formula,
+#'         weights = 2 * weights,
+#'         data = data
+#'         )
+#'  }
+#'
+#' ## This \code{zelig2} function equates the following function call:
+#' ##  zelig(formula, weights = weights, verbose = TRUE, data = data, model="some.model")
+#' ##
+#' ## with:
+#' ##  some.other.method(formula = formula, weights = 2 * weights, data=data)
+#'
+#' ## Note that the 'verbose' parameter is ignored, since the 
+#' ## 'zelig2some.model' does not include the 'verbose' parameter in its return
+#' ## value.
+NULL
diff --git a/R/zelig2MCMC.R b/R/zelig2MCMC.R
deleted file mode 100644
index 6ea0212..0000000
--- a/R/zelig2MCMC.R
+++ /dev/null
@@ -1,392 +0,0 @@
-zelig2ei.hier <- function(formula, model, data, M, ...) {
-  packageConflicts("VGAM")
-
-  mf <- match.call(expand.dots = TRUE)
-  
-  if (is.null(mf$verbose) || !mf$verbose){
-          mf$verbose <- 0
-  } else {
-          if (is.null(mf$mcmc)) {
-                  mcmc <- 50000
-          } else {
-                  mcmc <- mf$mcmc
-          }
-          if (is.null(mf$burnin)){
-                  burnin <- 5000
-          } else {
-                  burnin <- mf$burnin
-          }
-          mf$verbose <- round((mcmc+burnin)/10)
-  }
-  
-  mf$model <- mf$M <- NULL
-  temp <- mcmcei(formula=formula, data=data)
-  
-  if ((any(temp<0)) || ((any(temp<1) && !any(temp==0) ) && any(temp>1)))
-    stop("data need to be either counts or proportions.\n") 
-
-  if (is.null(mf$N)) {
-          if (all(temp>=0)){  #N is not needed
-                  mf$r0 <- temp$r0
-                  mf$r1 <- temp$r1
-                  mf$c0 <- temp$c0
-                  mf$c1 <- temp$c1
-          } else {
-                  stop("Needs total counts for inputs as porportion.\n")
-          }
-  } else if (((length(mf$N)!= nrow(data)) && (length(mf$N)!=1)) || (any(mf$N<1)))
-    stop("N needs to have same length as the observations and be postive numbers\n.")
-  else if ((all(temp<1)) && (all(mf$N>1))){
-          mf$r0 <- round(temp$r0*mf$N)
-          mf$r1 <- mf$N-mf$r0
-          mf$c0 <- round(temp$c0*mf$N)
-          mf$c1 <- mf$N-mf$c0
-          
-  }
-  
-  mf[[1]] <- MCMCpack::MCMChierEI
-  as.call(mf)
-}
-
-zelig2ei.dynamic <- function(formula, model, data, M, ...) {
-        packageConflicts("VGAM")
-        
-        mf <- match.call(expand.dots = TRUE)
-  
-        if (is.null(mf$verbose) || !mf$verbose) {
-                mf$verbose <- 0
-        } else {
-                if (is.null(mf$mcmc))
-                  mcmc <- 50000
-                else
-                  mcmc <- mf$mcmc
-                if (is.null(mf$burnin))
-                  burnin <- 5000
-                else
-                  burnin <- mf$burnin
-                mf$verbose <- round((mcmc+burnin)/10)
-        }
-  
-        mf$model <- mf$M <- NULL
-        temp <- mcmcei(formula=formula, data=data)
-
-        if ((any(temp<0)) || ((any(temp<1) && !any(temp==0) ) && any(temp>1)))
-          stop("data need to be either counts or proportions.\n") 
-        if (is.null(mf$N)) {
-                if (all(temp>=0)){  #N is not needed
-                        mf$r0 <- temp$r0
-                        mf$r1 <- temp$r1
-                        mf$c0 <- temp$c0
-                        mf$c1 <- temp$c1
-                }
-                else stop("Needs total counts for inputs as porportion.\n")
-        }
-        else if (((length(mf$N)!= nrow(data)) && (length(mf$N)!=1)) || (any(mf$N<1)))
-          stop("N needs to have same length as the observations and be postive numbers\n.")
-        else if ((all(temp<1)) && (all(mf$N>1))){
-                mf$r0 <- round(temp$r0*mf$N)
-                mf$r1 <- mf$N-mf$r0
-                mf$c0 <- round(temp$c0*mf$N)
-                mf$c1 <- mf$N-mf$c0
-         }
-        mf[[1]] <- MCMCpack::MCMCdynamicEI
-        as.call(mf)
-}
-
-zelig2logit.bayes <-  function(formula, model, data, M, ...) {
-   packageConflicts("VGAM")
-
-  mf <- match.call(expand.dots = TRUE)
-
-  if (is.null(mf$verbose) || !mf$verbose)
-    mf$verbose <- 0
-  else {
-          if (is.null(mf$mcmc))
-            mcmc <- 10000
-          else
-            mcmc <- mf$mcmc
-          if (is.null(mf$burnin))
-            burnin <- 1000
-          else
-            burnin <- mf$burnin
-          mf$verbose <- round((mcmc+burnin)/10)
-  }
-   
-   mf$model <- mf$M <- NULL
-   
-   mf[[1]] <- MCMCpack::MCMClogit
-   as.call(mf)
-}
-
-zelig2probit.bayes <-  function(formula, model, data, M, ...) {
-   packageConflicts("VGAM")
-
-  mf <- match.call(expand.dots = TRUE)
-   
-   if (is.null(mf$verbose) || !mf$verbose)
-     mf$verbose <- 0
-   else {
-           if (is.null(mf$mcmc))
-             mcmc <- 10000
-           else mcmc <- mf$mcmc
-           if (is.null(mf$burnin))
-             burnin <- 1000
-           else burnin <- mf$burnin
-           mf$verbose <- round((mcmc+burnin)/10)
-   }
-
-   mf$model <- mf$M <- NULL
-   
-   mf[[1]] <- MCMCpack::MCMCprobit
-   as.call(mf)
-}
-
-zelig2normal.bayes <-  function(formula, model, data, M, ...) {
-   packageConflicts("VGAM")
-
-   mf <- match.call(expand.dots = TRUE)
-   mf$model <- mf$M <- NULL
-   if (is.null(mf$verbose) || !mf$verbose)
-     mf$verbose <- 0
-   else {
-           if (is.null(mf$mcmc))
-             mcmc <- 10000
-           else
-             mcmc <- mf$mcmc
-           if (is.null(mf$burnin))
-             burnin <- 1000
-           else
-             burnin <- mf$burnin
-           mf$verbose <- round((mcmc+burnin)/10)
-      }
-
-   mf[[1]] <- MCMCpack::MCMCregress
-   as.call(mf)
-}
-
-zelig2poisson.bayes <-  function(formula, model, data, M, ...) {
-   packageConflicts("VGAM")
-
-  mf <- match.call(expand.dots = TRUE)
-  mf$model <- mf$M <- NULL
-   if (is.null(mf$verbose) || !mf$verbose)
-     mf$verbose <- 0
-   else {
-           if (is.null(mf$mcmc))
-             mcmc <- 10000
-           else
-             mcmc <- mf$mcmc
-           if (is.null(mf$burnin))
-             burnin <- 1000
-           else
-             burnin <- mf$burnin
-           mf$verbose <- round((mcmc+burnin)/10)
-   }
-   mf[[1]] <- MCMCpack::MCMCpoisson
-   as.call(mf)
-}
-
-zelig2tobit.bayes <-  function(formula, model, data, M, ...) {
-        packageConflicts("VGAM")
-
-        mf <- match.call(expand.dots = TRUE)
-        if (is.null(mf$verbose) || !mf$verbose)
-          mf$verbose <- 0
-        else {
-                if (is.null(mf$mcmc))
-                  mcmc <- 10000
-                else mcmc <- mf$mcmc
-                if (is.null(mf$burnin))
-                  burnin <- 1000
-                else burnin <- mf$burnin
-                mf$verbose <- round((mcmc+burnin)/10)
-        }
-        
-        mf$model <- mf$M <- NULL
-        mf[[1]] <- MCMCpack::MCMCtobit
-        as.call(mf)
-}
-
-zelig2mlogit.bayes <-  function(formula, model, data, M, ...) {
-        packageConflicts("VGAM")
-        
-        require(stats)
-        mf <- match.call(expand.dots = TRUE)
-        mf$model <- mf$M <- NULL
-        if (is.null(mf$verbose) || !mf$verbose)
-          mf$verbose <- 0
-        else {
-                if (is.null(mf$mcmc))
-                  mcmc <- 10000
-                else mcmc <- mf$mcmc
-                if (is.null(mf$burnin))
-                  burnin <- 1000
-                else
-                  burnin <- mf$burnin
-                mf$verbose <- round((mcmc+burnin)/10)
-        }
-        
-        mf[[1]] <- MCMCpack::MCMCmnl
-        as.call(mf)
-}
-
-zelig2oprobit.bayes <-  function(formula, model, data, M, ...) {
-        packageConflicts("VGAM")
-        
-        require(stats)
-        mf <- match.call(expand.dots = TRUE)
-        mf$model <- mf$M <- NULL
-        if (is.null(mf$verbose) || !mf$verbose)
-          mf$verbose <- 0
-        else {
-                if (is.null(mf$mcmc))
-                  mcmc <- 10000
-                else mcmc <- mf$mcmc
-                if (is.null(mf$burnin))
-                  burnin <- 1000
-                else
-                  burnin <- mf$burnin
-                mf$verbose <- round((mcmc+burnin)/10)
-        }
-        
-        mf[[1]] <- MCMCpack::MCMCoprobit
-        as.call(mf)
-}
-
-
-zelig2factor.bayes <- function(formula, model, data, M, ...) {
-   packageConflicts("VGAM")
-
-  mf <- match.call(expand.dots = TRUE)
-   if (is.null(mf$verbose) || !mf$verbose)
-     mf$verbose <- 0
-   else {
-           if (is.null(mf$mcmc))
-             mcmc <- 20000
-           else
-             mcmc <- mf$mcmc
-           if (is.null(mf$burnin))
-             burnin <- 1000
-           else burnin <- mf$burnin
-           mf$verbose <- round((mcmc+burnin)/10)
-   }
-   
-   if (is.null(mf$factors))
-     mf$factors<-2
-   else if (mf$factors<2) stop("number of factors needs to be at
-    least 2")
-   mf$model <- mf$M <- NULL
-   mf$x <- as.matrix(model.response(model.frame(formula, data=data, na.action=NULL)))
-   mf[[1]] <- MCMCpack::MCMCfactanal
-   as.call(mf)
-}
-
-zelig2factor.ord <- function(formula, model, data, M, ...) {
-        packageConflicts("VGAM")
-        
-        mf <- match.call(expand.dots = TRUE)
-        if (is.null(mf$verbose) || !mf$verbose)
-          mf$verbose <- 0
-        else {
-                if (is.null(mf$mcmc))
-                  mcmc <- 20000
-                else
-                  mcmc <- mf$mcmc
-                if (is.null(mf$burnin))
-                  burnin <- 1000
-                else
-                  burnin <- mf$burnin
-                mf$verbose <- round((mcmc+burnin)/10)
-        }
-        if (is.null(mf$factors)) mf$factors<-2
-        else if (mf$factors<1) stop("number of factors needs to be at
-    least 1")
-        mf$model <- mf$M <- NULL
-        mf$x <- as.matrix(model.response(model.frame(formula, data=data, na.action=NULL)))
-        
-        mf[[1]] <- MCMCpack::MCMCordfactanal
-        as.call(mf)
-}
-
-zelig2factor.mix <- function(formula, model, data, M, ...) {
-        packageConflicts("VGAM")
-        
-        mf <- match.call(expand.dots = TRUE)
-        if (is.null(mf$verbose) || !mf$verbose)
-          mf$verbose <- 0
-        else {
-                if (is.null(mf$mcmc))
-                  mcmc <- 10000
-                else mcmc <- mf$mcmc
-                if (is.null(mf$burnin))
-                  burnin <- 1000
-                else
-                  burnin <- mf$burnin
-                mf$verbose <- round((mcmc+burnin)/10)
-        }
-        
-        if (is.null(mf$factors))
-          mf$factors<-2
-        else if (mf$factors<1)
-          stop("number of factors needs to be at
-    least 1")
-        mf$model <- mf$M <- NULL
-        
-        var <- model.response(model.frame(formula, data=data,
-                                          na.action=NULL))
-        varnames <- colnames(var)
-        mf$x <- as.formula(paste("~", paste(varnames, collapse="+")))
-        mf[[1]] <- MCMCpack::MCMCmixfactanal
-        
-        as.call(mf)
-}
-
-
-zelig2irt1d <- function(formula, model, data, M, ...) {
-        packageConflicts("VGAM")
-        
-        mf <- match.call(expand.dots = TRUE)
-        if (is.null(mf$verbose) || !mf$verbose)
-          mf$verbose <- 0
-        else {
-                if (is.null(mf$mcmc))  mcmc <- 20000
-                else mcmc <- mf$mcmc
-                if (is.null(mf$burnin)) burnin <- 1000
-                else burnin <- mf$burnin
-                mf$verbose <- round((mcmc+burnin)/10)
-        }
-        
-        mf$model <- mf$M <- NULL
-        mf$datamatrix <- as.matrix(model.response(model.frame(formula, data=data,
-                                                              na.action=NULL)))
-        mf$datamatrix <- t(mf$datamatrix)
-        mf[[1]] <- MCMCpack::MCMCirt1d
-        
-        as.call(mf)
-}
-
-
-zelig2irtkd <- function(formula, model, data, M, ...) {
-        packageConflicts("VGAM")
-        
-        mf <- match.call(expand.dots = TRUE)
-        if (is.null(mf$verbose) || !mf$verbose)
-          mf$verbose <- 0
-        else {
-                if (is.null(mf$mcmc))  mcmc <- 10000
-                else mcmc <- mf$mcmc
-                if (is.null(mf$burnin)) burnin <- 1000
-                else burnin <- mf$burnin
-                mf$verbose <- round((mcmc+burnin)/10)
-        }
-        if (is.null(mf$dimensions)) mf$dimensions <- 1
-        
-        mf$model <- mf$M <- NULL
-        mf$datamatrix <- as.matrix(model.response(model.frame(formula, data=data,
-                                                              na.action=NULL)))
-        mf$datamatrix <- t(mf$datamatrix)
-        mf[[1]] <- MCMCpack::MCMCirtKd
-        
-        as.call(mf)
-}
-
diff --git a/R/zelig2aov.R b/R/zelig2aov.R
deleted file mode 100644
index 6d83822..0000000
--- a/R/zelig2aov.R
+++ /dev/null
@@ -1,11 +0,0 @@
-zelig2aov <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf$model <- FALSE
-  mf$M <- mf$robust <- NULL
-  mf[[1]] <- aov
-#  ix <- grep("Error", as.character(formula))
-#  if(length(ix)) class(formula) <- c(class(formula),"aoverrorF")
-  mf$formula <- formula
-  as.call(mf)
-}
-
diff --git a/R/zelig2arima.R b/R/zelig2arima.R
deleted file mode 100644
index fb4abb3..0000000
--- a/R/zelig2arima.R
+++ /dev/null
@@ -1,70 +0,0 @@
-zelig2arima <- function(formula, model, data, M, ...){
-  mf <- match.call()
-  ##arima.wrap allows arima to run without using a formula
-  mf[[1]] <- arima.wrap
-  ##assembling the order of the ARIMA model
-  dep.var <- eval(mf[[2]][[2]])$name
-  d <- eval(mf[[2]][[2]])$d
-  d.s <- eval(mf[[2]][[2]])$ds
-  per <- eval(mf[[2]][[2]])$per
-  ##the don't use vector will be used below to assemble the actual formula employed
-  dont.use <- vector()
-  ##this for loop is used to find the values of p, p.s., q, and q.s.
-  n.par <- length(unlist(strsplit(deparse(mf[[2]][[3]], width.cutoff=500), "\\+")))
-  for(i in 1:n.par){
-    if(class(eval(parse(text=unlist(strsplit(deparse(mf[[2]][[3]], width.cutoff=500), "\\+"))[i]),
-                  envir=data))=="list"){
-      if(eval(parse(text=unlist(strsplit(deparse(mf[[2]][[3]], width.cutoff=500), "\\+"))[i]),
-              envir=data)$ep==TRUE){
-        dont.use[1] <- i
-        q <- eval(parse(text=unlist(strsplit(deparse(mf[[2]][[3]], width.cutoff=500), "\\+"))[i]),
-                  envir=data)$q
-        q.s <- eval(parse(text=unlist(strsplit(deparse(mf[[2]][[3]], width.cutoff=500), "\\+"))[i]),
-                    envir=data)$qs
-      }
-      if(eval(parse(text=unlist(strsplit(deparse(mf[[2]][[3]], width.cutoff=500), "\\+"))[i]),
-              envir=data)$y==TRUE){
-        dont.use[2] <- i
-        p <- eval(parse(text=unlist(strsplit(deparse(mf[[2]][[3]], width.cutoff=500), "\\+"))[i]),
-                  envir=data)$p
-        p.s <- eval(parse(text=unlist(strsplit(deparse(mf[[2]][[3]], width.cutoff=500), "\\+"))[i]),
-                    envir=data)$ps
-      }
-    }
-  }
-  
-  if (length(dont.use) < n.par){
-    ##this vector then denotes what we are going to use on the right hand side
-    use.vec <- c(1:length(unlist(strsplit(deparse(mf[[2]][[3]], width.cutoff=500), "\\+"))))
-    use.vec <- use.vec[-dont.use]
-    ##now, we are creating the right hand side portion of the formula
-    rhs <- vector()
-    for (i in 1:length(use.vec)){
-      rhs[2*i-1] <- paste(unlist(strsplit(deparse(mf[[2]][[3]], width.cutoff=500), "\\+"))[use.vec[i]])
-      if (i+1 <= length(use.vec))
-        rhs[2*i] <- paste("+")
-      if (i + 1 > length(use.vec))
-       	break  
-    }
-    mf$formula <-formula <- as.formula(paste(paste(dep.var,"~", collapse=""),
-                                             paste(rhs, collapse=""), collapse=""))
-    D <- model.frame(mf$formula, data)
-    cols.D <- colnames(D)
-    mf$x <- D[,1]
-    X <- as.matrix(cbind(D[,2:ncol(D)]))
-    colnames(X) <- cols.D[2:length(cols.D)]
-    mf$xreg <- X
-  }
-  if(length(dont.use)==n.par){
-    mf$x <- as.matrix(eval(dep.var, envir=data))
-    mf$xreg <- NULL
-    mf$formula <- eval(dep.var, envir=data)~1 
-  }	
-  mf$order<- c(p,d,q) 
-  if(!is.null(d.s) & !is.null(p.s) & !is.null(q.s) & !is.null(per)){	
-    mf$seasonal$order <- c(p.s, d.s, q.s)
-    mf$seasonal$period <- per
-  }
-  mf$model <- mf$M <- mf$data <- NULL
-  as.call(mf) 
-}
diff --git a/R/zelig2blogit.R b/R/zelig2blogit.R
deleted file mode 100644
index e28fa72..0000000
--- a/R/zelig2blogit.R
+++ /dev/null
@@ -1,16 +0,0 @@
-zelig2blogit <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf[[1]] <- VGAM::vglm
-  mf$family <- as.name("blogit")
-  mf$... <- NULL
-  formula<-parse.formula(formula,model)
-  tmp <- cmvglm(formula, model, 3)
-  mf$formula <- tmp$formula 
-  mf$constraints <- tmp$constraints
-
-  mf$model <- mf$constant <- mf$M <- NULL
-  mf$robust <- NULL
-  as.call(mf)
-}
-
-  blogit <- function() binom2.or(zero=NULL)
diff --git a/R/zelig2bprobit.R b/R/zelig2bprobit.R
deleted file mode 100644
index 042662a..0000000
--- a/R/zelig2bprobit.R
+++ /dev/null
@@ -1,16 +0,0 @@
-zelig2bprobit <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf[[1]] <- VGAM::vglm
-  mf$family <- as.name("bprobit")
-   formula<-parse.formula(formula,model)
-  tmp <- cmvglm(formula, model,3)
-  mf$formula <- tmp$formula
-  mf$constraints <- tmp$constraints
-
-  mf$model <- mf$constant <- mf$M <- NULL
-
-  as.call(mf)
-}
-
-  bprobit <- function() binom2.rho(zero=NULL)
-
diff --git a/R/zelig2coxph.R b/R/zelig2coxph.R
deleted file mode 100644
index 0bb5b42..0000000
--- a/R/zelig2coxph.R
+++ /dev/null
@@ -1,24 +0,0 @@
-zelig2coxph <- function(formula, model, data, M, ...) {
-  require(survival)
-  mf <- match.call(expand.dots = TRUE)
-  mf$model <- mf$M <- NULL
-  mf[[1]] <- survival::coxph
-  if (is.null(mf$robust))
-    mf$robust <- FALSE
-  if (!is.null(mf$cluster) & !mf$robust)
-    stop("\nIf cluster is specified, robust must be TRUE.")
-  if (!is.null(mf$cluster)) {
-    mf$formula <- update(mf$formula, paste(". ~ . + ",
-                                           paste("cluster(",mf$cluster,")")))
-    mf$cluster <- NULL
-  }
-    #mf$formula <- as.formula(paste(paste(deparse(formula[[2]])),
-     #                              paste(deparse(formula[[1]])),
-      #                             paste(deparse(formula[[3]], width.cutoff=500)),
-       #                            paste("+", " cluster(",
-        #                                 mf$cluster, ")")))
-    #mf$cluster <- NULL
-  #}
-  as.call(mf)
-}
-
diff --git a/R/zelig2ei.RxC.R b/R/zelig2ei.RxC.R
deleted file mode 100644
index a9e84a9..0000000
--- a/R/zelig2ei.RxC.R
+++ /dev/null
@@ -1,7 +0,0 @@
-zelig2ei.RxC <- function(formula, model, data, M, covar= NULL, ...) {
-
-  mf <- match.call(expand.dots = TRUE)
-  mf[[1]] <- as.name("callparamseiestim")
-  mf$model<-mf$M<-NULL
-  as.call(mf)
-}
diff --git a/R/zelig2exp.R b/R/zelig2exp.R
deleted file mode 100644
index a3a2ad6..0000000
--- a/R/zelig2exp.R
+++ /dev/null
@@ -1,16 +0,0 @@
-zelig2exp <- function(formula, model, data, M, ...) {
-        mf <- match.call(expand.dots = TRUE)
-        mf$model <- mf$M <- NULL
-        mf[[1]] <- survival::survreg
-        mf$dist <- "exponential"
-        if (is.null(mf$robust))
-          mf$robust <- FALSE
-        if (!is.null(mf$cluster) & !mf$robust) 
-          stop("\nIf cluster is specified, robust must be TRUE.")
-        if (!is.null(mf$cluster)) {
-                mf$formula <- update(mf$formula, paste(". ~ . + ", paste("cluster(",mf$cluster,")")))
-                mf$cluster <- NULL
-        } else if (mf$robust)
-          mf$formula <- update(formula, paste(". ~ . + ", paste("cluster(1:nrow(",deparse(formula[[2]]),"))")))
-        as.call(mf)
-}
diff --git a/R/zelig2gam.logit.R b/R/zelig2gam.logit.R
deleted file mode 100644
index 491e4eb..0000000
--- a/R/zelig2gam.logit.R
+++ /dev/null
@@ -1,9 +0,0 @@
-zelig2logit.gam <- function(formula, model, data, M, ...) {
-	mf <- match.call(expand.dots = TRUE)
-	mf[[1]] <- as.name("gam")	
-	mf$M <- mf$model  <- NULL	
-	class(formula) <- "gamF" 
-	mf$formula <- formula
-	mf$family <- binomial()
-	as.call(mf)
-}
diff --git a/R/zelig2gam.normal.R b/R/zelig2gam.normal.R
deleted file mode 100644
index ab1b5b6..0000000
--- a/R/zelig2gam.normal.R
+++ /dev/null
@@ -1,8 +0,0 @@
-zelig2normal.gam <- function(formula, model, data, M, ...) {
-	mf <- match.call(expand.dots = TRUE)
-	mf[[1]] <- as.name("gam")	
-	mf$M <- mf$model  <- NULL
-	class(formula) <- "gamF"
-	mf$formula <- formula
-	as.call(mf)
-}
diff --git a/R/zelig2gam.poisson.R b/R/zelig2gam.poisson.R
deleted file mode 100644
index 78439fa..0000000
--- a/R/zelig2gam.poisson.R
+++ /dev/null
@@ -1,9 +0,0 @@
-zelig2poisson.gam <- function(formula, model, data, M, ...) {
-	mf <- match.call(expand.dots = TRUE)
-	mf[[1]] <- as.name("gam")	
-	mf$M <- mf$model  <- NULL
-	class(formula) <- "gamF"
-	mf$formula <- formula
-	mf$family <- poisson()
-	as.call(mf)
-}
diff --git a/R/zelig2gam.probit.R b/R/zelig2gam.probit.R
deleted file mode 100644
index 559cc63..0000000
--- a/R/zelig2gam.probit.R
+++ /dev/null
@@ -1,9 +0,0 @@
-zelig2probit.gam <- function(formula, model, data, M, ...) {
-	mf <- match.call(expand.dots = TRUE)
-	mf[[1]] <- as.name("gam")	
-	mf$M <- mf$model  <- NULL
-	class(formula) <- "gamF"
-	mf$formula <- formula
-	mf$family <- binomial(link="probit")
-	as.call(mf)
-}
diff --git a/R/zelig2gamma.R b/R/zelig2gamma.R
deleted file mode 100644
index 3d6080a..0000000
--- a/R/zelig2gamma.R
+++ /dev/null
@@ -1,10 +0,0 @@
-zelig2gamma <- function(formula, model, data, M, ...) {
-  mf <-  match.call(expand.dots = TRUE)
-  mf$M <- mf$robust <- NULL
-  mf$model <- FALSE
-  mf[[1]] <- glm
-  mf$family <- Gamma
-  if (is.character(mf$weights))
-    mf$weights <- as.name(mf$weights)
-  as.call(mf)
-}
diff --git a/R/zelig2gamma.gee.R b/R/zelig2gamma.gee.R
deleted file mode 100644
index 1e202f9..0000000
--- a/R/zelig2gamma.gee.R
+++ /dev/null
@@ -1,15 +0,0 @@
-zelig2gamma.gee <- function(formula, model, data, M, ...) {
-  require(gee)
-  mf <- match.call(expand.dots = TRUE)
-  if(is.null(mf$corstr))
-    mf$corstr <- as.character("independence")
-  if(mf$corstr=="fixed" & is.null (mf$R))
-    stop("R must be defined.")
-  mf$model <- mf$M <- NULL
-  mf[[1]] <- gee::gee
-  if (is.character(mf$id))
-    mf$id <- as.name(mf$id)
-  mf$family <- Gamma
-  mf$robust <- NULL
-  as.call(mf)
-}
\ No newline at end of file
diff --git a/R/zelig2gamma.mixed.R b/R/zelig2gamma.mixed.R
deleted file mode 100644
index 765f8c0..0000000
--- a/R/zelig2gamma.mixed.R
+++ /dev/null
@@ -1,17 +0,0 @@
-zelig2gamma.mixed <- function(formula, model, data, M, ...){
-        mf <- match.call(expand.dots=TRUE)
-        mf[[1]]<- as.name("lmer")
-        
-        mf$formula <- tolmerFormat(reduceMI(formula))
-        
-        mf$model <- mf$M <- NULL
-        
-        if (is.null(mf$family)){
-                mf$family <- Gamma(link="identity")
-        }
-        return(as.call(mf))
-}
-
-model.frame.lmer <- function(obj){
-        obj at frame
-}
\ No newline at end of file
diff --git a/R/zelig2gamma.survey.R b/R/zelig2gamma.survey.R
deleted file mode 100644
index 764908d..0000000
--- a/R/zelig2gamma.survey.R
+++ /dev/null
@@ -1,50 +0,0 @@
-zelig2gamma.survey <- function(formula, model, data, M, 
-                                weights=NULL, 
-                                ids=NULL, probs=NULL, strata = NULL,  
-                                fpc=NULL, nest = FALSE, check.strata = !nest, 			
-                                repweights = NULL, 				
-                                type, combined.weights=FALSE, rho = NULL, bootstrap.average=NULL, 
-                                scale=NULL, rscales=NULL, fpctype="fraction",
-                                return.replicates=FALSE,    			
-                                na.action="na.omit", start=NULL, etastart=NULL, 
-                                mustart=NULL, offset=NULL, 	      		
-                                model1=TRUE, method="glm.fit", x=FALSE, y=TRUE, contrasts=NULL,
-                                design=NULL, link=NULL){
-        
-        mf <- match.call(expand.dots = TRUE)					
-        mf$M <- mf$model <- NULL
-	  if(is.null(link)){mf$family <- Gamma} else {mf$family <- Gamma(link=mf$link)}
-	  mf$link <- NULL
-        
-        if(is.null(ids)){ids<-~1}
-      
-        if(is.null(repweights)){
-						
-                mf$design <- svydesign(data=data, ids=ids, probs=probs,		
-                                       strata=strata, fpc=fpc, nest=nest, check.strata=check.strata,
-						   weights=weights)
-                
-                mf$weights <- mf$ids <- mf$probs <- mf$strata <- mf$fpc <- NULL
-                mf$nest <- mf$check.strata <- mf$repweights <- mf$type <- NULL
-                mf$combined.weights <- mf$rho <- mf$bootstrap.average <- NULL
-                mf$scale <- mf$rscales <- mf$fpctype <- mf$return.replicates <- NULL
-                mf$data <- NULL
-                
-        } else {		
-        assign(".survey.prob.weights", weights, envir = globalenv())				
-                mf$design <- svrepdesign(data=data, repweights=repweights, 	
-                                         type=type, weights=weights, combined.weights=combined.weights, 
-                                         rho=rho, bootstrap.average=bootstrap.average, scale=scale, 
-                                         rscales=rscales, fpctype=fpctype, fpc=fpc)
-                                        # ...drop extraneous options
-                mf$weights <- mf$ids <- mf$probs <- mf$strata <- mf$fpc <- NULL
-                mf$nest <- mf$check.strata <- mf$repweights <- mf$type <- NULL
-                mf$combined.weights <- mf$bootstrap.average <- NULL
-                mf$scale <- mf$rscales <- mf$fpctype <- NULL
-                mf$data <- NULL
-        }							
-        
-        mf[[1]] <- as.name("svyglm")
-        as.call(mf)    
-				
-}
diff --git a/R/zelig2lm.mixed.R b/R/zelig2lm.mixed.R
deleted file mode 100644
index 294cd2d..0000000
--- a/R/zelig2lm.mixed.R
+++ /dev/null
@@ -1,17 +0,0 @@
-zelig2ls.mixed <- function(formula, model, data, M, ...){
-        mf <- match.call(expand.dots=TRUE)
-
-        mf[[1]]<- as.name("lmer")
-
-        mf$formula <- tolmerFormat(reduceMI(formula))
-
-        mf$model <- mf$M <- NULL
-        
-        mf$family <- NULL
-
-        return (as.call(mf))
-}
-
-model.frame.lmer<-function(obj){
-obj at frame
-}
\ No newline at end of file
diff --git a/R/zelig2logit.R b/R/zelig2logit.R
deleted file mode 100644
index 713f5a6..0000000
--- a/R/zelig2logit.R
+++ /dev/null
@@ -1,10 +0,0 @@
-zelig2logit <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf$M <- mf$robust <- NULL
-  mf$model <- FALSE
-  mf[[1]] <- stats::glm
-  mf$family <- binomial(link="logit")
-  if (is.character(mf$weights))
-    mf$weights <- as.name(mf$weights)
-  as.call(mf)
-}
diff --git a/R/zelig2logit.gee.R b/R/zelig2logit.gee.R
deleted file mode 100644
index cca362d..0000000
--- a/R/zelig2logit.gee.R
+++ /dev/null
@@ -1,15 +0,0 @@
-zelig2logit.gee <- function(formula, model, data, M, ...) {
-  require(gee)
-  mf <- match.call(expand.dots = TRUE)
-  if(is.null(mf$corstr))
-    mf$corstr <- as.character("independence")
-  if(mf$corstr=="fixed" & is.null (mf$R))
-    stop("R must be defined.")
-  mf$model <- mf$M <- NULL
-  mf[[1]] <- gee::gee
-  if (is.character(mf$id))
-    mf$id <- as.name(mf$id)
-  mf$family <- binomial(link="logit")
-  mf$robust <- NULL
-  as.call(mf)
-}
\ No newline at end of file
diff --git a/R/zelig2logit.mixed.R b/R/zelig2logit.mixed.R
deleted file mode 100644
index 180f5ba..0000000
--- a/R/zelig2logit.mixed.R
+++ /dev/null
@@ -1,17 +0,0 @@
-zelig2logit.mixed <- function(formula, model, data, M, ...){
-  mf <- match.call(expand.dots=TRUE)
-  mf[[1]]<- as.name("lmer")
-
-  mf$formula <- tolmerFormat(reduceMI(formula))
-
-  mf$model <- mf$M <- NULL
-
-  if (is.null(mf$family)){
-    mf$family <- binomial(link="logit")
-  }
-  return(as.call(mf))
-}
-
-model.frame.lmer <- function(obj){
-  obj at frame
-}
diff --git a/R/zelig2logit.survey.R b/R/zelig2logit.survey.R
deleted file mode 100644
index 37672a9..0000000
--- a/R/zelig2logit.survey.R
+++ /dev/null
@@ -1,50 +0,0 @@
-zelig2logit.survey <- function(formula, model, data, M, 
-                                weights=NULL, 
-                                ids=NULL, probs=NULL, strata = NULL,  
-                                fpc=NULL, nest = FALSE, check.strata = !nest, 			
-                                repweights = NULL, 				
-                                type, combined.weights=FALSE, rho = NULL, bootstrap.average=NULL, 
-                                scale=NULL, rscales=NULL, fpctype="fraction",
-                                return.replicates=FALSE,    			
-                                na.action="na.omit", start=NULL, etastart=NULL, 
-                                mustart=NULL, offset=NULL, 	      		
-                                model1=TRUE, method="glm.fit", x=FALSE, y=TRUE, contrasts=NULL,
-                                design=NULL){
-        
-        mf <- match.call(expand.dots = TRUE)					
-        mf$M <- mf$model <- NULL
-	  mf$family <- binomial(link="logit")
-	  mf$link <- NULL
-        
-        if(is.null(ids)){ids<-~1}
-      
-        if(is.null(repweights)){
-						
-                mf$design <- svydesign(data=data, ids=ids, probs=probs,		
-                                       strata=strata, fpc=fpc, nest=nest, check.strata=check.strata,
-						   weights=weights)
-                
-                mf$weights <- mf$ids <- mf$probs <- mf$strata <- mf$fpc <- NULL
-                mf$nest <- mf$check.strata <- mf$repweights <- mf$type <- NULL
-                mf$combined.weights <- mf$rho <- mf$bootstrap.average <- NULL
-                mf$scale <- mf$rscales <- mf$fpctype <- mf$return.replicates <- NULL
-                mf$data <- NULL
-                
-        } else {		
-        assign(".survey.prob.weights", weights, envir = globalenv())				
-                mf$design <- svrepdesign(data=data, repweights=repweights, 	
-                                         type=type, weights=weights, combined.weights=combined.weights, 
-                                         rho=rho, bootstrap.average=bootstrap.average, scale=scale, 
-                                         rscales=rscales, fpctype=fpctype, fpc=fpc)
-                                        # ...drop extraneous options
-                mf$weights <- mf$ids <- mf$probs <- mf$strata <- mf$fpc <- NULL
-                mf$nest <- mf$check.strata <- mf$repweights <- mf$type <- NULL
-                mf$combined.weights <- mf$bootstrap.average <- NULL
-                mf$scale <- mf$rscales <- mf$fpctype <- NULL
-                mf$data <- NULL
-        }							
-        
-        mf[[1]] <- as.name("svyglm")
-        as.call(mf)    
-				
-}
diff --git a/R/zelig2lognorm.R b/R/zelig2lognorm.R
deleted file mode 100644
index bf00fa3..0000000
--- a/R/zelig2lognorm.R
+++ /dev/null
@@ -1,19 +0,0 @@
-zelig2lognorm <- function(formula, model, data, M, ...) {
-        mf <- match.call(expand.dots = TRUE)
-        mf$model <- mf$M <- NULL
-        
-        mf[[1]] <- survival::survreg
-        mf$dist <- "lognormal"
-        if (is.null(mf$robust))
-          mf$robust <- FALSE
-        if (!is.null(mf$cluster) & !mf$robust)
-          stop("\nIf cluster is specified, robust must be TRUE.")
-
-        if (!is.null(mf$cluster)) {
-                mf$formula <- update(mf$formula, paste(". ~ . + ", paste("cluster(",mf$cluster,")")))
-                mf$cluster <- NULL
-        } else if (mf$robust)
-          mf$formula <- update(formula, paste(". ~ . + ", paste("cluster(1:nrow(",deparse(formula[[2]]),"))")))
-
-        as.call(mf)
-}
diff --git a/R/zelig2ls.R b/R/zelig2ls.R
deleted file mode 100644
index 18c0c61..0000000
--- a/R/zelig2ls.R
+++ /dev/null
@@ -1,10 +0,0 @@
-zelig2ls <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf$model <- FALSE
-  mf$M <- mf$robust <- NULL
-  if (is.character(mf$weights))
-    mf$weights <- as.name(mf$weights)
-  
-  mf[[1]] <- lm
-  as.call(mf)
-}
diff --git a/R/zelig2mlogit.R b/R/zelig2mlogit.R
deleted file mode 100644
index cfc15f0..0000000
--- a/R/zelig2mlogit.R
+++ /dev/null
@@ -1,14 +0,0 @@
-zelig2mlogit <- function(formula, model, data, M, ...) {
-        mf <- match.call(expand.dots = TRUE)
-        mf[[1]] <- VGAM::vglm 
-        mf$family <- VGAM::multinomial
-        formula<-parse.formula(formula,model,data)
-        tt<-terms(formula)
-        fact<-attr(tt,"depFactors")$depFactorVar
-        ndim<-length(attr(tt,"depFactors")$depLevels)
-        tmp <- cmvglm(formula, mf$model, ndim,data,fact)
-        mf$formula <- tmp$formula  
-        mf$constraints <- tmp$constraints
-        mf$model <- mf$M <- NULL
-        as.call(mf)
-}
diff --git a/R/zelig2negbin.R b/R/zelig2negbin.R
deleted file mode 100644
index 9c04282..0000000
--- a/R/zelig2negbin.R
+++ /dev/null
@@ -1,8 +0,0 @@
-zelig2negbin <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf$model <- mf$M <- mf$robust <- NULL
-  mf[[1]] <- MASS::glm.nb
-  if (is.character(mf$weights))
-    mf$weights <- as.name(mf$weights)
-  as.call(mf)
-}
diff --git a/R/zelig2netcloglog.R b/R/zelig2netcloglog.R
deleted file mode 100644
index d4d615f..0000000
--- a/R/zelig2netcloglog.R
+++ /dev/null
@@ -1,8 +0,0 @@
-zelig2cloglog.net <- function(formula, model, data, M, LF="cloglog", ...) {
-	mf <- match.call(expand.dots = TRUE)
-	mf[[1]] <- as.name("netbinom")	
-	mf$M <- mf$model  <- NULL
-	mf$formula <- formula
-	mf$LF <- "cloglog"
-	as.call(mf)
-}
diff --git a/R/zelig2netgamma.R b/R/zelig2netgamma.R
deleted file mode 100644
index 437d96d..0000000
--- a/R/zelig2netgamma.R
+++ /dev/null
@@ -1,8 +0,0 @@
-zelig2gamma.net <- function(formula, model, data, M, ...) {
-        
-	mf <- match.call(expand.dots = TRUE)
-	mf[[1]] <- as.name("gamma.net")	
-	mf$M <- mf$model  <- NULL
-	mf$formula <- formula
-	as.call(mf)
-}
diff --git a/R/zelig2netlogit.R b/R/zelig2netlogit.R
deleted file mode 100644
index e312ad9..0000000
--- a/R/zelig2netlogit.R
+++ /dev/null
@@ -1,8 +0,0 @@
-zelig2logit.net <- function(formula, model, data, M, LF="logit", ...) {
-	mf <- match.call(expand.dots = TRUE)
-	mf[[1]] <- as.name("netbinom")	
-	mf$M <- mf$model  <- NULL
-	mf$formula <- formula
-	mf$LF <- "logit"
-	as.call(mf)
-}
diff --git a/R/zelig2netls.R b/R/zelig2netls.R
deleted file mode 100644
index 51376de..0000000
--- a/R/zelig2netls.R
+++ /dev/null
@@ -1,7 +0,0 @@
-zelig2ls.net <- function(formula, model, data, M, ...) {
-	mf <- match.call(expand.dots = TRUE)
-	mf[[1]] <- as.name("callnetlm")	
-	mf$M <- mf$model  <- NULL
-	mf$formula <- formula
-	as.call(mf)
-}
diff --git a/R/zelig2netnormal.R b/R/zelig2netnormal.R
deleted file mode 100644
index 404d935..0000000
--- a/R/zelig2netnormal.R
+++ /dev/null
@@ -1,7 +0,0 @@
-zelig2normal.net <- function(formula, model, data, M, ...) {
-	mf <- match.call(expand.dots = TRUE)
-	mf[[1]] <- as.name("normal.net")	
-	mf$M <- mf$model  <- NULL
-	mf$formula <- formula
-	as.call(mf)
-}
diff --git a/R/zelig2netpoisson.R b/R/zelig2netpoisson.R
deleted file mode 100644
index e17d3f4..0000000
--- a/R/zelig2netpoisson.R
+++ /dev/null
@@ -1,7 +0,0 @@
-zelig2poisson.net <- function(formula, model, data, M, ...) {
-	mf <- match.call(expand.dots = TRUE)
-	mf[[1]] <- as.name("poisson.net")	
-	mf$M <- mf$model  <- NULL
-	mf$formula <- formula
-	as.call(mf)
-}
diff --git a/R/zelig2netprobit.R b/R/zelig2netprobit.R
deleted file mode 100644
index 0fc7c2d..0000000
--- a/R/zelig2netprobit.R
+++ /dev/null
@@ -1,12 +0,0 @@
-zelig2probit.net <- function(formula, model, data, M, ...) {
-  check <- library()
-  if(any(check$results[,"Package"] == "sna")) 
-    require(sna)
-  else
-        stop("Please install sna using \n	install.packages(\"sna\")")
-	mf <- match.call(expand.dots = TRUE)
-	mf[[1]] <- as.name("netbinom")	
-	mf$M <- mf$model  <- NULL
-	mf$formula <- formula
-	mf$LF <- "probit"
-	as.call(mf)	}
\ No newline at end of file
diff --git a/R/zelig2normal.R b/R/zelig2normal.R
deleted file mode 100644
index b80af1a..0000000
--- a/R/zelig2normal.R
+++ /dev/null
@@ -1,10 +0,0 @@
-zelig2normal <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf$M <- mf$robust <- NULL
-  mf$model <- FALSE
-  mf[[1]] <- glm
-  mf$family <- gaussian
-  if (is.character(mf$weights))
-    mf$weights <- as.name(mf$weights)
-  as.call(mf)
-}
diff --git a/R/zelig2normal.gee.R b/R/zelig2normal.gee.R
deleted file mode 100644
index f48f308..0000000
--- a/R/zelig2normal.gee.R
+++ /dev/null
@@ -1,15 +0,0 @@
-zelig2normal.gee <- function(formula, model, data, M, ...) {
-  require(gee)
-  mf <- match.call(expand.dots = TRUE)
-  if(is.null(mf$corstr))
-    mf$corstr <- as.character("independence")
-  if(mf$corstr=="fixed" & is.null (mf$R))
-    stop("R must be defined.")
-  mf$model <- mf$M <- NULL
-  mf[[1]] <- gee::gee
-  if (is.character(mf$id))
-    mf$id <- as.name(mf$id)
-  mf$family <- gaussian
-  mf$robust <- NULL
-  as.call(mf)
-}
\ No newline at end of file
diff --git a/R/zelig2normal.survey.R b/R/zelig2normal.survey.R
deleted file mode 100644
index d504ea3..0000000
--- a/R/zelig2normal.survey.R
+++ /dev/null
@@ -1,48 +0,0 @@
-zelig2normal.survey <- function(formula, model, data, M, 
-                                weights=NULL, 
-                                ids=NULL, probs=NULL, strata = NULL,  
-                                fpc=NULL, nest = FALSE, check.strata = !nest, 			
-                                repweights = NULL, 				
-                                type, combined.weights=FALSE, rho = NULL, bootstrap.average=NULL, 
-                                scale=NULL, rscales=NULL, fpctype="fraction",
-                                return.replicates=FALSE,    			
-                                na.action="na.omit", start=NULL, etastart=NULL, 
-                                mustart=NULL, offset=NULL, 	      		
-                                model1=TRUE, method="glm.fit", x=FALSE, y=TRUE, contrasts=NULL,
-                                design=NULL){
-        
-        mf <- match.call(expand.dots = TRUE)					
-        mf$M <- mf$model <- NULL    							
-        
-        if(is.null(ids)){ids<-~1}
-
-        if(is.null(repweights)){
-						
-                mf$design <- svydesign(data=data, ids=ids, probs=probs,		
-                                       strata=strata, fpc=fpc, nest=nest, check.strata=check.strata,
-						   weights=weights)  ### added conventional weights input here
-                
-                mf$weights <- mf$ids <- mf$probs <- mf$strata <- mf$fpc <- NULL
-                mf$nest <- mf$check.strata <- mf$repweights <- mf$type <- NULL
-                mf$combined.weights <- mf$rho <- mf$bootstrap.average <- NULL
-                mf$scale <- mf$rscales <- mf$fpctype <- mf$return.replicates <- NULL
-                mf$data <- NULL
-                
-        } else {		
-        assign(".survey.prob.weights", weights, envir = globalenv())	 ### moved global assignment down here
-                mf$design <- svrepdesign(data=data, repweights=repweights, 	
-                                         type=type, weights=weights, combined.weights=combined.weights, 
-                                         rho=rho, bootstrap.average=bootstrap.average, scale=scale, 
-                                         rscales=rscales, fpctype=fpctype, fpc=fpc)
-                                        # ...drop extraneous options
-                mf$weights <- mf$ids <- mf$probs <- mf$strata <- mf$fpc <- NULL
-                mf$nest <- mf$check.strata <- mf$repweights <- mf$type <- NULL
-                mf$combined.weights <- mf$bootstrap.average <- NULL
-                mf$scale <- mf$rscales <- mf$fpctype <- NULL
-                mf$data <- NULL
-        }							
-        
-        mf[[1]] <- as.name("svyglm")
-        as.call(mf)    
-				
-}
diff --git a/R/zelig2ologit.R b/R/zelig2ologit.R
deleted file mode 100644
index 206d048..0000000
--- a/R/zelig2ologit.R
+++ /dev/null
@@ -1,10 +0,0 @@
-zelig2ologit <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf$model <- mf$M <- NULL
-  mf[[1]] <- MASS::polr
-  mf$Hess <- TRUE
-  mf$method <- "logistic"
-  if (is.character(mf$weights))
-    mf$weights <- as.name(mf$weights)
-  as.call(mf)
-}
diff --git a/R/zelig2oprobit.R b/R/zelig2oprobit.R
deleted file mode 100644
index e93f01a..0000000
--- a/R/zelig2oprobit.R
+++ /dev/null
@@ -1,10 +0,0 @@
-zelig2oprobit <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf$model <- mf$M <- NULL
-  mf[[1]] <- MASS::polr
-  mf$Hess <- TRUE
-  mf$method <- "probit"
-  if (is.character(mf$weights))
-    mf$weights <- as.name(mf$weights)
-  as.call(mf)
-}
diff --git a/R/zelig2poisson.R b/R/zelig2poisson.R
deleted file mode 100644
index 378f5d2..0000000
--- a/R/zelig2poisson.R
+++ /dev/null
@@ -1,10 +0,0 @@
-zelig2poisson <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf$M <- mf$robust <- NULL
-  mf$model <- FALSE
-  mf[[1]] <- glm
-  mf$family <- poisson
-  if (is.character(mf$weights))
-    mf$weights <- as.name(mf$weights)
-  as.call(mf)
-}
diff --git a/R/zelig2poisson.gee.R b/R/zelig2poisson.gee.R
deleted file mode 100644
index 590838b..0000000
--- a/R/zelig2poisson.gee.R
+++ /dev/null
@@ -1,15 +0,0 @@
-zelig2poisson.gee <- function(formula, model, data, M, ...) {
-  require(gee)
-  mf <- match.call(expand.dots = TRUE)
-  if(is.null(mf$corstr))
-    mf$corstr <- as.character("independence")
-  if(mf$corstr=="fixed" & is.null (mf$R))
-    stop("R must be defined.")
-  mf$model <- mf$M <- NULL
-  mf[[1]] <- gee::gee
-  if (is.character(mf$id))
-    mf$id <- as.name(mf$id)
-  mf$family <- poisson
-  mf$robust <- NULL
-  as.call(mf)
-}
\ No newline at end of file
diff --git a/R/zelig2poisson.mixed.R b/R/zelig2poisson.mixed.R
deleted file mode 100644
index 774fec8..0000000
--- a/R/zelig2poisson.mixed.R
+++ /dev/null
@@ -1,17 +0,0 @@
-zelig2poisson.mixed <- function(formula, model, data, M, ...){
-        mf <- match.call(expand.dots=TRUE)
-        mf[[1]]<- as.name("lmer")
-        
-        mf$formula <- tolmerFormat(reduceMI(formula))
-        
-        mf$model <- mf$M <- NULL
-        
-        if (is.null(mf$family)){
-                mf$family <- poisson
-        }
-        return(as.call(mf))
-}
-
-model.frame.lmer <- function(obj){
-        obj at frame
-}
diff --git a/R/zelig2poisson.survey.R b/R/zelig2poisson.survey.R
deleted file mode 100644
index c9d9d2d..0000000
--- a/R/zelig2poisson.survey.R
+++ /dev/null
@@ -1,50 +0,0 @@
-zelig2poisson.survey <- function(formula, model, data, M, 
-                                weights=NULL, 
-                                ids=NULL, probs=NULL, strata = NULL,  
-                                fpc=NULL, nest = FALSE, check.strata = !nest, 			
-                                repweights = NULL, 				
-                                type, combined.weights=FALSE, rho = NULL, bootstrap.average=NULL, 
-                                scale=NULL, rscales=NULL, fpctype="fraction",
-                                return.replicates=FALSE,    			
-                                na.action="na.omit", start=NULL, etastart=NULL, 
-                                mustart=NULL, offset=NULL, 	      		
-                                model1=TRUE, method="glm.fit", x=FALSE, y=TRUE, contrasts=NULL,
-                                design=NULL, link="log"){
-        
-        mf <- match.call(expand.dots = TRUE)					
-        mf$M <- mf$model <- NULL
-	  mf$family <- poisson(link=link)
-	  mf$link <- NULL
-        
-        if(is.null(ids)){ids<-~1}
-      
-        if(is.null(repweights)){
-						
-                mf$design <- svydesign(data=data, ids=ids, probs=probs,		
-                                       strata=strata, fpc=fpc, nest=nest, check.strata=check.strata,
-						   weights=weights)
-                
-                mf$weights <- mf$ids <- mf$probs <- mf$strata <- mf$fpc <- NULL
-                mf$nest <- mf$check.strata <- mf$repweights <- mf$type <- NULL
-                mf$combined.weights <- mf$rho <- mf$bootstrap.average <- NULL
-                mf$scale <- mf$rscales <- mf$fpctype <- mf$return.replicates <- NULL
-                mf$data <- NULL
-                
-        } else {		
-        assign(".survey.prob.weights", weights, envir = globalenv())				
-                mf$design <- svrepdesign(data=data, repweights=repweights, 	
-                                         type=type, weights=weights, combined.weights=combined.weights, 
-                                         rho=rho, bootstrap.average=bootstrap.average, scale=scale, 
-                                         rscales=rscales, fpctype=fpctype, fpc=fpc)
-                                        # ...drop extraneous options
-                mf$weights <- mf$ids <- mf$probs <- mf$strata <- mf$fpc <- NULL
-                mf$nest <- mf$check.strata <- mf$repweights <- mf$type <- NULL
-                mf$combined.weights <- mf$bootstrap.average <- NULL
-                mf$scale <- mf$rscales <- mf$fpctype <- NULL
-                mf$data <- NULL
-        }							
-        
-        mf[[1]] <- as.name("svyglm")
-        as.call(mf)    
-				
-}
diff --git a/R/zelig2probit.R b/R/zelig2probit.R
deleted file mode 100644
index 3c781e8..0000000
--- a/R/zelig2probit.R
+++ /dev/null
@@ -1,10 +0,0 @@
-zelig2probit <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf$robust <- mf$M <- mf$probit <- NULL
-  mf$model <- FALSE
-  mf[[1]] <- stats::glm
-  mf$family <- binomial(link="probit")
-  if (is.character(mf$weights))
-    mf$weights <- as.name(mf$weights)
-  as.call(mf)
-}
diff --git a/R/zelig2probit.gee.R b/R/zelig2probit.gee.R
deleted file mode 100644
index e8fb809..0000000
--- a/R/zelig2probit.gee.R
+++ /dev/null
@@ -1,15 +0,0 @@
-zelig2probit.gee <- function(formula, model, data, M, ...) {
-  require(gee)
-  mf <- match.call(expand.dots = TRUE)
-  if(is.null(mf$corstr))
-    mf$corstr <- as.character("independence")
-  if(mf$corstr=="fixed" & is.null (mf$R))
-    stop("R must be defined.")
-  mf$model <- mf$M <- NULL
-  mf[[1]] <- gee::gee
-  if (is.character(mf$id))
-    mf$id <- as.name(mf$id)
-  mf$family <- binomial(link="probit")
-  mf$robust <- NULL
-  as.call(mf)
-}
\ No newline at end of file
diff --git a/R/zelig2probit.mixed.R b/R/zelig2probit.mixed.R
deleted file mode 100644
index e8eb356..0000000
--- a/R/zelig2probit.mixed.R
+++ /dev/null
@@ -1,19 +0,0 @@
-zelig2probit.mixed <- function(formula, model, data, M, ...){
-        mf <- match.call(expand.dots=TRUE)
-
-        mf[[1]]<- as.name("lmer")
-
-        mf$formula <- tolmerFormat(reduceMI(formula))
-
-        mf$model <- mf$M <- NULL
-
-        if (is.null(mf$family)){
-          mf$family <- binomial(link="probit")
-        }
-
-        return (as.call(mf))
-}
-
-model.frame.lmer<-function(obj){
-obj at frame
-}
diff --git a/R/zelig2probit.survey.R b/R/zelig2probit.survey.R
deleted file mode 100644
index fc82c23..0000000
--- a/R/zelig2probit.survey.R
+++ /dev/null
@@ -1,50 +0,0 @@
-zelig2probit.survey <- function(formula, model, data, M, 
-                                weights=NULL, 
-                                ids=NULL, probs=NULL, strata = NULL,  
-                                fpc=NULL, nest = FALSE, check.strata = !nest, 			
-                                repweights = NULL, 				
-                                type, combined.weights=FALSE, rho = NULL, bootstrap.average=NULL, 
-                                scale=NULL, rscales=NULL, fpctype="fraction",
-                                return.replicates=FALSE,    			
-                                na.action="na.omit", start=NULL, etastart=NULL, 
-                                mustart=NULL, offset=NULL, 	      		
-                                model1=TRUE, method="glm.fit", x=FALSE, y=TRUE, contrasts=NULL,
-                                design=NULL){
-        
-        mf <- match.call(expand.dots = TRUE)					
-        mf$M <- mf$model <- NULL
-	  mf$family <- binomial(link="probit")
-	  mf$link <- NULL
-        
-        if(is.null(ids)){ids<-~1}
-      
-        if(is.null(repweights)){
-						
-                mf$design <- svydesign(data=data, ids=ids, probs=probs,		
-                                       strata=strata, fpc=fpc, nest=nest, check.strata=check.strata,
-						   weights=weights)
-                
-                mf$weights <- mf$ids <- mf$probs <- mf$strata <- mf$fpc <- NULL
-                mf$nest <- mf$check.strata <- mf$repweights <- mf$type <- NULL
-                mf$combined.weights <- mf$rho <- mf$bootstrap.average <- NULL
-                mf$scale <- mf$rscales <- mf$fpctype <- mf$return.replicates <- NULL
-                mf$data <- NULL
-                
-        } else {		
-        assign(".survey.prob.weights", weights, envir = globalenv())				
-                mf$design <- svrepdesign(data=data, repweights=repweights, 	
-                                         type=type, weights=weights, combined.weights=combined.weights, 
-                                         rho=rho, bootstrap.average=bootstrap.average, scale=scale, 
-                                         rscales=rscales, fpctype=fpctype, fpc=fpc)
-                                        # ...drop extraneous options
-                mf$weights <- mf$ids <- mf$probs <- mf$strata <- mf$fpc <- NULL
-                mf$nest <- mf$check.strata <- mf$repweights <- mf$type <- NULL
-                mf$combined.weights <- mf$bootstrap.average <- NULL
-                mf$scale <- mf$rscales <- mf$fpctype <- NULL
-                mf$data <- NULL
-        }							
-        
-        mf[[1]] <- as.name("svyglm")
-        as.call(mf)    
-				
-}
diff --git a/R/zelig2quantile.R b/R/zelig2quantile.R
deleted file mode 100644
index 3d3eb9a..0000000
--- a/R/zelig2quantile.R
+++ /dev/null
@@ -1,20 +0,0 @@
-zelig2quantile <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots=TRUE)
-  tau <- mf$tau
-  if(is.null(tau))
-    mf$tau<- tau <- 0.5
-  
-#  if(length(tau) > 1)
-#    stop("Zelig does not yet support multiple quantile estimates. Please provide only one value for tau on the interval [0,1].")
-#  if(any(tau < 0) || any(tau > 1))
-#    stop("Zelig does  not support solutions over the full set of quantiles. Please specify tau on the interval [0,1].")
-
-  mf$model <- FALSE
-  mf$se <- NULL #This gets handled by zelig3rq
-  mf$M <- NULL
-  if (is.character(mf$weights))
-    mf$weights <- as.name(mf$weights)
-
-  mf[[1]] <- rq
-  as.call(mf)
-}
diff --git a/R/zelig2relogit.R b/R/zelig2relogit.R
deleted file mode 100644
index 4b82fe6..0000000
--- a/R/zelig2relogit.R
+++ /dev/null
@@ -1,12 +0,0 @@
-zelig2relogit <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf$model <- mf$M <- mf$robust <- NULL
-  if (is.null(mf$case.control))
-    mf$case.control <- "prior"
-  ## transforms y ~ rhs into cbind(y, 1-y) ~ rhs
-  mf$formula<- as.formula(call("~", call("cbind",formula[[2]], call("-",1, formula[[2]])), formula[[3]]))
-  if (is.null(mf$bias.correct))
-    mf$bias.correct <- TRUE
-  mf[[1]] <- as.name("relogit")
-  as.call(mf)
-}
diff --git a/R/zelig2rq.R b/R/zelig2rq.R
deleted file mode 100644
index 4e41fb1..0000000
--- a/R/zelig2rq.R
+++ /dev/null
@@ -1,20 +0,0 @@
-zelig2rq <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots=TRUE)
-  tau <- mf$tau
-  if(is.null(tau))
-    mf$tau<- tau <- 0.5
-  
-#  if(length(tau) > 1)
-#    stop("Zelig does not yet support multiple quantile estimates. Please provide only one value for tau on the interval [0,1].")
-#  if(any(tau < 0) || any(tau > 1))
-#    stop("Zelig does  not support solutions over the full set of quantiles. Please specify tau on the interval [0,1].")
-
-  mf$model <- FALSE
-  mf$se <- NULL #This gets handled by zelig3rq
-  mf$M <- NULL
-  if (is.character(mf$weights))
-    mf$weights <- as.name(mf$weights)
-
-  mf[[1]] <- rq
-  as.call(mf)
-}
diff --git a/R/zelig2sur.R b/R/zelig2sur.R
deleted file mode 100644
index 811a2a2..0000000
--- a/R/zelig2sur.R
+++ /dev/null
@@ -1,11 +0,0 @@
-zelig2sur <- function(formula, model, data, M,...) {
-        mf <- match.call(expand.dots = TRUE)
-        mf[[1]] <- as.name("callsystemfit")
-        formula<-parse.formula(formula,model)
-        tt<-terms(formula)
-        mf$method<-"SUR"
-        mf$model<- mf$M<-NULL
-        #mf$formula<-formula
-        mf$formula<-formula[names(attr(tt,"depVars"))]
-        as.call(mf)
-}
diff --git a/R/zelig2threesls.R b/R/zelig2threesls.R
deleted file mode 100644
index c9e3290..0000000
--- a/R/zelig2threesls.R
+++ /dev/null
@@ -1,20 +0,0 @@
-zelig2threesls <- function(formula, model, data, M,...) {
-        "%w/o%" <- function(x,y) x[!x %in% y]
-        mf <- match.call(expand.dots = TRUE)
-        mf[[1]] <- as.name("callsystemfit")
-        formula<-parse.formula(formula,model)
-        tt<-terms(formula)
-        ins<-names(tt) %w/o% names(attr(tt,"depVars"))
-        if(length(ins)!=0)
-          if(length(ins)==1)
-            inst<-formula[[ins]]
-          else inst<-formula[ins]
-        else
-          stop("threesls model requires instrument!!\n")
-        mf$method<-"3SLS"
-        mf$inst<-inst
-        mf$model<- mf$M<-NULL
-        mf$formula<-formula[names(attr(tt,"depVars"))]
-        #class(mf$formula)<-c("multiple","list")
-        as.call(mf)
-}
diff --git a/R/zelig2tobit.R b/R/zelig2tobit.R
deleted file mode 100644
index 7a54e1b..0000000
--- a/R/zelig2tobit.R
+++ /dev/null
@@ -1,52 +0,0 @@
-zelig2tobit <- function(formula, model, data, M, ...) {
-        mf <- match.call(expand.dots = TRUE)
-        mf$model <- mf$M <- NULL
-        
-        mf[[1]] <- survival::survreg
-        mf$dist <- "gaussian"
-        if (is.null(mf$above)) 
-          above <- Inf
-        else {
-                above <- mf$above
-                mf$above <- NULL
-        }
-        if (is.null(mf$below))
-          below <- 0
-        else {
-                below <- mf$below
-                mf$below <- NULL
-        }
-        
-        ## Fixing the call for robust SEs
-        if (is.null(mf$robust))
-          mf$robust <- FALSE
-        if (!is.null(mf$cluster) & !mf$robust) 
-          stop("\nIf cluster is specified, robust must be TRUE.")
-
-  
-        if (!is.null(mf$cluster)) {
-                mf$formula <- update(mf$formula, paste(". ~ . + ", paste("cluster(",mf$cluster,")")))
-                mf$cluster <- NULL
-        } else if (mf$robust)
-          mf$formula <- update(formula, paste(". ~ . + ", paste("cluster(1:nrow(",deparse(formula[[2]]),"))")))
-    
-        ## Fixing the call for left censoring
-        if (length(grep("Surv", as.character(formula[[2]]))) == 0) { 
-                if (above == Inf & is.numeric(below) & below != -Inf) {
-                        tt <- "left"
-                }
-                else if (below == -Inf & above == Inf) {
-                        stop("No censoring, use zelig(..., model = \"normal\") instead!")
-                }
-                else if (below == -Inf & is.numeric(above) & above != Inf) {
-                        stop("Right censored data not supported in a tobit model.")
-                }
-                else if (is.numeric(above) & is.numeric(below) & above != Inf & below != -Inf) {
-                        stop("Interval censored data not suppored in a tobit model.")
-                }
-                mf$formula[[2]] <- call("Surv", formula[[2]],
-                                        call("<", below, formula[[2]]),
-                                        type = "left")
-        }
-        as.call(mf)
-}
diff --git a/R/zelig2twosls.R b/R/zelig2twosls.R
deleted file mode 100644
index 5595ec5..0000000
--- a/R/zelig2twosls.R
+++ /dev/null
@@ -1,23 +0,0 @@
-zelig2twosls <- function(formula, model, data, M,...) {
-        "%w/o%" <- function(x,y) x[!x %in% y]
-        mf <- match.call(expand.dots = TRUE)
-        
-        mf[[1]] <- as.name("callsystemfit")
-        
-        formula<-parse.formula(formula,model)
-        tt<-terms(formula)
-        
-        ins<-names(tt) %w/o% names(attr(tt,"depVars"))
-        if(length(ins)!=0)
-          if(length(ins)==1)
-            inst<-formula[[ins]]
-          else inst<-formula[ins]
-        else
-          stop("twosls model requires instrument!!\n")
-        mf$method<-"2SLS"
-        mf$inst<-inst
-        mf$model<- mf$M<-NULL
-        mf$formula<-formula[names(attr(tt,"depVars"))]
-
-        as.call(mf)
-}
diff --git a/R/zelig2weibull.R b/R/zelig2weibull.R
deleted file mode 100644
index f56eac9..0000000
--- a/R/zelig2weibull.R
+++ /dev/null
@@ -1,18 +0,0 @@
-zelig2weibull <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)
-  mf$model <- mf$M <- NULL
-
-  mf[[1]] <- survival::survreg
-  mf$dist <- "weibull"
-  if (is.null(mf$robust))
-    mf$robust <- FALSE
-  if (!is.null(mf$cluster) & !mf$robust)
-    stop("\nIf cluster is specified, robust must be TRUE.")
-  
-  if (!is.null(mf$cluster)) {
-          mf$formula <- update(mf$formula, paste(". ~ . + ", paste("cluster(",mf$cluster,")")))
-          mf$cluster <- NULL
-  } else if (mf$robust)
-    mf$formula <- update(formula, paste(". ~ . + ", paste("cluster(1:nrow(",deparse(formula[[2]]),"))")))
-  as.call(mf)
-}
diff --git a/R/zelig3MCMC.R b/R/zelig3MCMC.R
deleted file mode 100644
index 5d48c24..0000000
--- a/R/zelig3MCMC.R
+++ /dev/null
@@ -1,136 +0,0 @@
-zelig3ei.hier <- zelig3ei.dynamic <- function(res, fcall=NULL, zcall=NULL) {
-
-  out <- list()
-  out$coefficients <- res
-  out$formula <- zcall$formula
-  out$data <- zcall$data
-
-  if (!is.null(zcall$N))
-    out$N <- zcall$N
-  
-  out$model <- model.frame(formula=eval(out$formula),
-  data=eval(out$data))
-  out$terms <- attr(out$model, "terms")
-  attr(out$terms,"intercept") <- 0
-  if (is.null(zcall$seed)) out$seed <- NA
-    else out$seed <- zcall$seed
-  class(out) <- "MCMCZelig"
-
- out
-}
-
-
-zelig3logit.bayes <- zelig3oprobit.bayes <- zelig3poisson.bayes <-
-  zelig3mlogit.bayes <- zelig3normal.bayes <- function(res, fcall=NULL, zcall=NULL) {
-
-  out <- list()
-  out$coefficients <- res
-  out$formula <- zcall$formula
-  out$data <- zcall$data
-
-  out$model <- model.frame(formula=eval(out$formula),
-  data=eval(out$data))
-  out$terms <- attr(out$model, "terms")
-  if (is.null(zcall$seed)) out$seed <- NA
-    else out$seed <- zcall$seed
-
-  class(out) <- "MCMCZelig"
-
- out
-}
-
-zelig3probit.bayes <- function(res, fcall=NULL, zcall=NULL) {
-
-  out <- list()
-  if (is.null(zcall$bayes.resid)) 
-    zcall$bayes.resid <- FALSE
-
-  if (zcall$bayes.resid==FALSE)
-    out$coefficients <- res
-  else 
-    {
-      p<-dim(model.matrix(eval(zcall$formula), eval(zcall$data)))[2]
-      out$coefficients <- res[,1:p]
-      out$bayes.residuals <- res[, -(1:p)]
-    }  
-  
-  out$formula <- zcall$formula
-  out$data <- zcall$data
-
-  out$model <- model.frame(formula=eval(out$formula),data=eval(out$data))
-  out$terms <- attr(out$model, "terms")
-  if (is.null(zcall$seed)) out$seed <- NA
-    else out$seed <- zcall$seed
-
-  class(out) <- "MCMCZelig"
-
- out
-
-  }
-
-  zelig3tobit.bayes <- function(res, fcall=NULL, zcall=NULL) {
-
-  out <- list()
-  out$coefficients <- res
-  out$formula <- zcall$formula
-  if (!is.null(zcall$below)) out$below <- zcall$below
-  else out$below <- 0
-
-  if (!is.null(zcall$above)) out$above <- zcall$above
-  else out$above <- Inf 
- 
-  out$data <- zcall$data
-
-  out$model <- model.frame(formula=eval(out$formula),
-  data=eval(out$data))
-  out$terms <- attr(out$model, "terms")
-  if (is.null(zcall$seed)) out$seed <- NA
-    else out$seed <- zcall$seed
-
-  class(out) <- "MCMCZelig"
-
- out
-}
-
-  zelig3factor.bayes <- zelig3factor.ord <- zelig3factor.mix <- function(res, fcall=NULL, zcall=NULL) {
-
-  out <- list()
-  out$coefficients <- res
-  out$formula <- zcall$formula
-  out$data <- zcall$data
-  
-  out$model <- model.frame(formula=eval(out$formula),
-  data=eval(out$data))
-  out$terms <- attr(out$model, "terms")
-  attr(out$terms,"intercept") <- 0
-  if (is.null(zcall$seed)) out$seed <- NA
-    else out$seed <- zcall$seed
-
-  class(out) <- "MCMCZelig"
-
- out
-}
-
-
-  zelig3irt1d <- zelig3irtkd <- function(res, fcall=NULL, zcall=NULL) {
-
-  out <- list()
-  out$coefficients <- res
-  out$formula <- zcall$formula
-  out$data <- zcall$data
-  
-  out$model <- model.frame(formula=eval(out$formula),
-  data=eval(out$data))
-  out$terms <- attr(out$model, "terms")
-  attr(out$terms,"intercept") <- 0
-  if (is.null(zcall$seed)) out$seed <- NA
-    else out$seed <- zcall$seed
-
-  class(out) <- "MCMCZelig"
-
- out
-}
-
-
-
-
diff --git a/R/zelig3aov.R b/R/zelig3aov.R
deleted file mode 100644
index d3b7da6..0000000
--- a/R/zelig3aov.R
+++ /dev/null
@@ -1,9 +0,0 @@
-zelig3aov <- function(res, fcall = NULL, zcall = NULL) {
-  
- if(length(attr(attributes(res)$terms, "specials")$Error))
-   class(res) <- c("zaovlist", class(res))    
- if("maov" %in% class(res))
-    class(res) <- c("zmaov", "zmlm", class(res))  
-  return(res)
-}
-
diff --git a/R/zelig3coxph.R b/R/zelig3coxph.R
deleted file mode 100644
index 907128a..0000000
--- a/R/zelig3coxph.R
+++ /dev/null
@@ -1,15 +0,0 @@
-zelig3coxph <- function(res, fcall=NULL, zcall=NULL){	
-  rob <- eval(zcall$robust)	
-  if (!is.null(rob)){		
-    if (!is.logical(rob))	
-      stop("invalid input for robust.  Choose either TRUE or FALSE.")
-    else if(!rob)   		
-      class(res) <- c("coxph.naive", class(res))	
-    else if (rob)		
-      class(res) <- c("coxph.robust", class(res))	
-	}
-  else {				
-    class(res) <- c("coxph.naive", class(res))	
-	}
-  return(res)
-}
diff --git a/R/zelig3gee.R b/R/zelig3gee.R
deleted file mode 100755
index c865bcd..0000000
--- a/R/zelig3gee.R
+++ /dev/null
@@ -1,16 +0,0 @@
-zelig3logit.gee <- zelig3probit.gee <- zelig3normal.gee <- 
-zelig3poisson.gee <- zelig3gamma.gee <- function(res, fcall=NULL, zcall=NULL){	
-  rob <- eval(zcall$robust)	
-  if (!is.null(rob)){		
-    if (!is.logical(rob))	
-      stop("invalid input for robust.  Choose either TRUE or FALSE.")
-    else if(!rob)   		
-      class(res) <- c("gee.naive", class(res))	
-    else if (rob)		
-      class(res) <- c("gee.robust", class(res))	
-	}
-  else {				
-    class(res) <- c("gee.robust", class(res))	
-	}
-  return(res)
-}
\ No newline at end of file
diff --git a/R/zelig3glm.R b/R/zelig3glm.R
deleted file mode 100644
index a29b4a2..0000000
--- a/R/zelig3glm.R
+++ /dev/null
@@ -1,22 +0,0 @@
-zelig3logit <- zelig3probit <- zelig3normal <- zelig3gamma <-
-  zelig3poisson <- zelig3negbin <- zelig3relogit <-
-  function(res, fcall = NULL, zcall = NULL) {
-    rob <- eval(zcall$robust)
-    if (!is.null(rob)) {
-      require(sandwich)
-      if (is.list(rob)) {
-        if (!any(rob$method %in% c("vcovHAC", "kernHAC", "weave")))
-          stop("such a robust option is not supported")
-        else {
-          class(res) <- c("glm.robust", class(res))    
-          res$robust <- rob
-        }
-      }
-      else if (!is.logical(rob)) 
-        stop("invalid input for robust.  Choose either TRUE or a list of options.")
-      else if (rob) 
-        class(res) <- c("glm.robust", class(res))    
-    }
-    return(res)
-  }
-
diff --git a/R/zelig3ls.R b/R/zelig3ls.R
deleted file mode 100644
index 6210a75..0000000
--- a/R/zelig3ls.R
+++ /dev/null
@@ -1,22 +0,0 @@
-zelig3ls <- function(res, fcall = NULL, zcall = NULL) {
-  rob <- eval(zcall$robust)
-  if (!is.null(rob)) {
-    require(sandwich)
-    if (is.list(rob)) {
-      if (!any(rob$method %in% c("vcovHC", "vcovHAC",
-                                          "kernHAC", "weave")))
-        stop("such a robust option is not supported")
-      res$robust <- rob
-      class(res) <- c("lm.robust", class(res))    
-    }
-    else if (!is.logical(rob)) 
-      stop("invalid input for robust.  Choose either TRUE or a list of options.")
-    else if (rob) 
-      class(res) <- c("lm.robust", class(res))    
-  }
-  rc <- class(res)
-  if("mlm" %in% class(res))
-    class(res) <- c("zmlm", rc)
-  return(res)
-}
-
diff --git a/R/zelig3mixed.R b/R/zelig3mixed.R
deleted file mode 100644
index 0510905..0000000
--- a/R/zelig3mixed.R
+++ /dev/null
@@ -1,8 +0,0 @@
-## modified by delia/ferdi 09/22/08
-############################
-
-zelig3ls.mixed <- zelig3gamma.mixed <- zelig3poisson.mixed <- zelig3probit.mixed <- 
-	zelig3logit.mixed <- function(res, fcall = NULL, zcall=NULL){
-		res at nlmodel <- fcall at call
-		return (res)
-}
\ No newline at end of file
diff --git a/R/zelig3ologit.R b/R/zelig3ologit.R
deleted file mode 100644
index 29c6608..0000000
--- a/R/zelig3ologit.R
+++ /dev/null
@@ -1,12 +0,0 @@
-zelig3ologit <- function(res, fcall = NULL, zcall = NULL) { 
-  inv.link <- function(eta, zeta) {
-    tmp1 <- matrix(1, nrow = length(eta), ncol = length(zeta) + 1)
-    ilogit <- function(z, e) {
-      exp(z - e) / (1 + exp(z - e))
-    }
-    tmp1[, 1:length(zeta)] <- sapply(zeta, ilogit, e = eta)
-    tmp1
-  }
-  res$inv.link <- as.function(inv.link)
-  res
-}
diff --git a/R/zelig3oprobit.R b/R/zelig3oprobit.R
deleted file mode 100644
index 157823b..0000000
--- a/R/zelig3oprobit.R
+++ /dev/null
@@ -1,12 +0,0 @@
-zelig3oprobit <- function(res, fcall = NULL, zcall = NULL) {
-  inv.link <- function(eta, zeta) {
-    tmp1 <- matrix(1, nrow = length(eta), ncol = length(zeta) + 1)
-    iprobit <- function(z, e)
-      pnorm(z - e)
-    tmp1[, 1:length(zeta)] <- sapply(zeta, iprobit, e = eta)
-    tmp1
-  }
-  res$inv.link <- as.function(inv.link)
-  res
-}
-
diff --git a/R/zelig3quantile.R b/R/zelig3quantile.R
deleted file mode 100644
index fcd62ca..0000000
--- a/R/zelig3quantile.R
+++ /dev/null
@@ -1,26 +0,0 @@
-zelig3quantile <- function(res, fcall = NULL, zcall = NULL) {
-    se <- eval(zcall$se)
-    if(!is.null(se) && se=="rank"){
-        warning("Rank test inversion is incompatible with estimation of the covariance matrix. Switching se method to \"nid\".")
-        se <- "nid"
-    }
-    else if(is.null(se))
-        se <- "nid" 
-    res$se <- se
-    res
-}
-
-stratify.rqs <- function(object){
-    x <- vector("list", length(object$tau))
-    for(i in 1:length(object$tau)){
-        xi <- object
-        xi$coefficients <- xi$coefficients[,i]
-        xi$residuals <- xi$residuals[,i]
-        xi$tau <- xi$tau[i]
-        class(xi) <- "rq"
-        x[[i]] <- xi 
-    }
-    names(x) <- object$tau
-    x
-}
-
diff --git a/R/zelig3relogit.R b/R/zelig3relogit.R
deleted file mode 100644
index 2528fa5..0000000
--- a/R/zelig3relogit.R
+++ /dev/null
@@ -1,51 +0,0 @@
-zelig3relogit <- function(res, fcall = NULL, zcall = NULL) {
-
-  if ("relogit2" %in% class(res)) {
-    obj <- list()
-    obj$lower.estimate <- zelig3relogit(res$lower.estimate, fcall =
-                                        fcall, zcall = zcall)
-    obj$upper.estimate <- zelig3relogit(res$upper.estimate, fcall =
-                                        fcall, zcall = zcall)
-    obj$upper.estimate$call <- obj$lower.estimate$call <- as.call(zcall)
-    class(obj) <- class(res)
-    return(obj)
-  }
-  
-  zcall$robust <- eval(zcall$robust)
-
-  if (is.null(zcall$robust)) {
-    if (res$weighting) {
-      warning("robust is set to TRUE because weighting is used")
-      rob <- TRUE
-    }
-    else
-      rob <- FALSE
-  }
-  else if (is.logical(zcall$robust)) {
-    if (!zcall$robust & res$weighting) {
-      rob <- TRUE
-      warning("robust is set to TRUE because weighting is used")
-    }
-    else
-      rob <- zcall$robust
-  }
-  else
-    rob <- zcall$robust
-  if (is.list(rob)) {
-    require(sandwich)
-    if (!any(rob$method %in% c("vcovHAC", "kernHAC", "weave")))
-      stop("such a robust option is not supported")
-    else {
-      class(res) <- c("relogit", "glm.robust")    
-      res$robust <- rob
-    }
-  }
-  else if (!is.logical(rob)) 
-    stop("invalid input for robust.  Choose either TRUE or a list of options.")
-  else if (rob) {
-    require(sandwich)
-    class(res) <- c("relogit", "glm.robust")
-  }
-  return(res)
-}
-
diff --git a/R/zelig3rq.R b/R/zelig3rq.R
deleted file mode 100644
index 1288116..0000000
--- a/R/zelig3rq.R
+++ /dev/null
@@ -1,26 +0,0 @@
-zelig3rq <- function(res, fcall = NULL, zcall = NULL) {
-    se <- eval(zcall$se)
-    if(!is.null(se) && se=="rank"){
-        warning("Rank test inversion is incompatible with estimation of the covariance matrix. Switching se method to \"nid\".")
-        se <- "nid"
-    }
-    else if(is.null(se))
-        se <- "nid" 
-    res$se <- se
-    res
-}
-
-stratify.rqs <- function(object){
-    x <- vector("list", length(object$tau))
-    for(i in 1:length(object$tau)){
-        xi <- object
-        xi$coefficients <- xi$coefficients[,i]
-        xi$residuals <- xi$residuals[,i]
-        xi$tau <- xi$tau[i]
-        class(xi) <- "rq"
-        x[[i]] <- xi 
-    }
-    names(x) <- object$tau
-    x
-}
-
diff --git a/R/zelig4gee.R b/R/zelig4gee.R
deleted file mode 100644
index 3cd0901..0000000
--- a/R/zelig4gee.R
+++ /dev/null
@@ -1,46 +0,0 @@
-zelig4gamma.gee <- function(object, simpar, x, x1=NULL, bootstrap = FALSE, bootfn = NULL, dta = NULL){
-	num <- nrow(simpar)
-	coef <- simpar
-	eta <- coef %*% t(x)
-	if(!is.null(x1))
-		eta1 <- coef %*% t(x1)
-	else
-		eta1 <- NULL
-
-	good.params <- function(par, x, x1=NULL){
-		eta <- par %*% t(x)
-		if(!is.null(x1)){
-			eta1 <- par %*% t(x1)
-			pos <- which(eta>0 & eta1>0)
-		}
-		else{
-			pos <- which(apply(eta > 0,1,all))
-		}
-		params <- matrix(par[pos,], nrow=length(pos), ncol=ncol(par))
-		return(params)
-	}
-
-	if(length(which(apply(eta<=0,1,any)))>0 | (!is.null(eta1) & any(eta1<=0))){
-		warning(paste("Negative expected values in simulations.  Rejection sampling method used."))
-		sum.neg <- length(which(apply(eta<=0,1,any)))
-		coef <- good.params(par=coef, x=x, x1=x1)
-		counter <- 1
-		while(sum.neg > 0){
-			if(!bootstrap)
-				new.coef <- matrix(mvrnorm(sum.neg, mu = coef(object), Sigma = vcov(object)), nrow=sum.neg)
-			else
-				new.coef <- matrix(boot(dta, bootfn, R = sum.neg, object = object)$t, nrow=sum.neg)
-				
-			new.coef <- good.params(par=new.coef, x=x, x1=x1)
-			coef <- rbind(coef, new.coef)	
-			sum.neg <- num - nrow(coef)
-			counter <- counter + 1
-			if(counter==200)
-				warning(paste("Suitable parameters not found after 200 iterations of rejection sampling.  Iterations will continue, but choosing another x is suggested for non-conditional prediction models."))
-			if(counter==2000)
-				stop("Rejection sampling stopped after 2000 iterations.  Please choose another x value.")
-		}
-	}
-	res <- coef
-	return(res)
-}
\ No newline at end of file
diff --git a/R/zelig4glm.R b/R/zelig4glm.R
deleted file mode 100644
index 5913ced..0000000
--- a/R/zelig4glm.R
+++ /dev/null
@@ -1,75 +0,0 @@
-zelig4gamma <- function(object, simpar, x, x1=NULL, bootstrap = FALSE, bootfn = NULL, dta = NULL){
-	k <- length(getcoef(object))
-	num <- nrow(simpar)
-	coef <- simpar[,1:k]
-    	alpha <- simpar[,k+1]
-	eta <- coef %*% t(x)
-	if(!is.null(x1))
-		eta1 <- coef %*% t(x1)
-	else
-		eta1 <- NULL
-
-	good.params <- function(par, x, x1=NULL){
-		eta <- par[,1:k] %*% t(x)
-		if(!is.null(x1)){
-			eta1 <- par[,1:k] %*% t(x1)
-			pos <- which(eta>0 & eta1>0)
-		}
-		else{
-			pos <- which(apply(eta > 0,1,all))
-		}
-		params <- matrix(par[pos,], nrow=length(pos), ncol=ncol(par))
-		return(params)
-	}
-
-
-	if(length(which(apply(eta<=0,1,any)))>0 | (!is.null(eta1) & any(eta1<=0))){
-		warning(paste("Negative expected values in simulations.  Rejection sampling method used."))
-		if(!is.null(eta1))
-			sum.neg <- length(unique(c(which(apply(eta<=0,1,any)), which(apply(eta1<=0,1,any)))))
-		else
-			sum.neg <- length(which(apply(eta<=0,1,any)))
-		if(!bootstrap)
-			coef <- good.params(par=coef, x=x, x1=x1)
-		else
-			simpar <- good.params(par=simpar, x=x, x1=x1)
-		counter <- 1
-		while(sum.neg > 0){
-			if(!bootstrap){
-				new.coef <- matrix(mvrnorm(sum.neg, mu = coef(object), Sigma = vcov(object)), nrow=sum.neg)
-				new.coef <- good.params(par=new.coef, x=x, x1=x1)
-				coef <- rbind(coef, new.coef)	
-				sum.neg <- num - nrow(coef)
-			}
-			else{
-				new.simpar <- matrix(boot(dta, bootfn, R = sum.neg, object = object)$t, nrow=sum.neg)
-				new.simpar <- good.params(par=new.simpar, x=x, x1=x1)
-				simpar <- rbind(simpar, new.simpar)
-				sum.neg <- num - nrow(simpar)
-			}
-			counter <- counter + 1
-			if(counter==200)
-				warning(paste("Suitable parameters not found after 200 iterations of rejection sampling.  Iterations will continue, but choosing another x is suggested for non-conditional prediction models."))
-			if(counter==2000)
-				stop("Rejection sampling stopped after 2000 iterations.  Please choose another x value.")
-		}
-	}
-	if(!bootstrap & any(alpha<=0)){
-		pos <- which(alpha > 0)
-		alpha <- alpha[pos]
-		sum.neg <- num-length(pos)
-		while(sum.neg > 0){
-			new.alpha <- rnorm(sum.neg, mean = gamma.shape(object)$alpha, sd = gamma.shape(object)$SE)
-			pos <- which(new.alpha > 0)
-			alpha <- c(alpha, new.alpha[pos])
-			sum.neg <- num - length(pos)
-		}
-	}
-	if(!bootstrap){
-		res <- cbind(coef, alpha)
-	}
-	else{
-		res <- simpar
-	}
-	return(res)
-}
\ No newline at end of file
diff --git a/R/zvcClient.R b/R/zvcClient.R
deleted file mode 100644
index 0edc012..0000000
--- a/R/zvcClient.R
+++ /dev/null
@@ -1,1114 +0,0 @@
-### 
-### DESCRIPTION: Returns matrix of details corresponding to packages
-###              Zelig depends on directly or indirectly
-###              Every row corresponds to  a dependent package with the name,
-###              version installed in server,the node (parent pkg),and URL 
-###              Dependent packages are not repeated (no duplicates) and the highest
-###              version is reported 
-###              
-### USE    matinst <- zelig.all.packages()
-###        matinst <- zelig.all.packages(zmat)
-###        zmat, matrix of zelig dependencies--OR-- directory to get the matrix 
-###
-### OUTPUT: zmat is reduced to unique package rows (no repetitions)
-###         that Zelig depends on and that are installed in server.
-###         Rows for each package that Zelig depends, which are unique;
-###         
-### AUTHOR Elena Villalon
-###        evillalon at iq.harvard.edu
-###
-###        Last modified: 01/18/2007
-###
-### 
-zelig.all.packages <- function(zmat=matrixDependencies(uniqPkg=FALSE))
-{
-
-  if(length(zmat) <= 0 )
-    stop("Provide matrix of dependencies")
-  
-  zmat <- getRidVersion(zmat)
-  dm <- dim(zmat)
-  if(length(dm) <= 0)
-    return(zmat); 
-  pkgorg <- zmat[dm[1], ] ###last row
-  lv0 <- trim.blanks(zmat[dm[1],"depth"])
-  if(lv0 != "0"){
-    ix0 <- grep("^0$", zmat[,"depth"])
-    if(length(ix0)) pkgorg <- zmat[ix0, ]
-  }
-  cl <- ncol(zmat)
-  nmorg <- trim.blanks(pkgorg["Package"])
-  names(nmorg) <- NULL
-  ncl <- ncol(zmat)
-  pkginst <- zmat[, "Package"]
-
-  pkginst <- unique.default(pkginst) ###preserve order
-  ln <- length(pkginst)
-  ix <- match(nmorg,pkginst) ###put the root package in last entry or element
-  if(is.na(ix)) 
-    pkginst <- c(pkginst,nmorg)
-  
-  if(!is.na(ix) && !identical(nmorg, pkginst[ln])) {
-    pkginst <- pkginst[-ix]
-    nm <- names(pkginst)
-    pkginst <- c(pkginst,nmorg)
-    names(pkginst) <- c(nm, nmorg)
-    
-  }
-
-  zelstm  <- split.data.frame(zmat, zmat[,"Package"]) ###may not preserve order
-### to preserve the original order of package rows in zmat
-  ind <- sapply(pkginst, match,names(zelstm) )
-  zelstm <- zelstm[ind] 
-### I do not know if several versions of the same package
-### are stored in the data matrix;
-### keep lowest depth of dependency closer to root pkg
-  
-  zelst <- lapply(zelstm, function(mat){
-    vers <- na.omit(mat[,"Version"])
-    ix1 <- grep("1", mat[,"depth"])
-    
-    if(length(ix1) > 1){ ###returns fst level dependency
-      ixz <- c(grep("1z", mat[,"depth"]),grep("1Z", mat[,"depth"]))
-      if(length(ixz))
-        return(mat[ixz, ]) ###first from describe, "1z"
-      else
-        return(mat[ix1, ]) ###snd level "1"
-    }
-
-    return(mat[nrow(mat),])})  ##returns lowest level
-
-  ind <- sapply(pkginst, match,names(zelst) ) ###preserver order of rows packages
-  zelst <- zelst[ind]
-  
-  nmzelig <- names(zelst)
-  
-  nmzelig <- sapply(nmzelig, FUN="trim.blanks")
-  zmatuq <- matrix(unlist(zelst), ncol=cl, byrow=T)
-  
-  colnames(zmatuq) <- colnames(zmat)
-  
-  rwnm <- zmatuq[, "Package"]
-  rownames(zmatuq) <- zmatuq[, "Package"]
-  zelig <- trim.blanks(pkgorg["Package"])
-  names(zelig) <- NULL
-  
-  return(zmatuq)
-}
-
-### 
-### DESCRIPTION: Returns/Creates matrix of details corresponding to packages
-###              Zelig depends on directly or indirectly
-###              and are installed in the local machine.
-###              Every row corresponds to  a dependent package with the name,
-###              and version,installed in local machine, which migth not 
-###              be the same as the server where Zelig is running. 
-###
-### USES:     installed.packages(priority=prior,noCache=!cache)
-###           prior="NA"(large number), prior="high" (small number); 
-###           prior=NULL which picks up all in "NA" and "high".
-###           matinst <- zelig.installed.packages(mat <- zelig.all.packages(zmat))
-###              
-### INPUT  
-###        zmat = matrixDependencies(), is matrix of dependencies,
-###        as obtained from create.zelig.all.packages("Zelig")
-###        and then applying  zelig.all.packages
-###        that eliminates duplicates packages (fst column)
-###        disp boolean to show the putput in a GUI
-###        prior is the priority of installed.packages
-###        cache also input for parameter noCahe of installed.packages
-###        libpath library directory to search for; example, libpath= "~/.R/library-x86_64"
-###
-### OUTPUT: Matrix similar to available.packages, but only those packages
-###         that Zelig depends on and that are installed in the local machine.
-###         Rows for each package that Zelig depends on;
-###         columns are the name, and the locally installed version
-###         Also returns the R.version installed locally,as the last row of the matrix 
-###             
-###         
-### AUTHOR Elena Villalon
-###        evillalon at iq.harvard.edu
-###
-###        Last modified: 01/18/2007
-###
-zelig.installed.packages <- function(zmat=matrixDependencies(), disp=F, prior=NULL,cache=F, libpath=NULL)
-{
-
-  if(length(zmat) <= 0){
-    stop("Provide dependency matrix as from...zelig.all.packages")
-    
-  }
-  localinst <- installed.packages(lib.loc=libpath,priority=prior,noCache=!cache)
-  localinst <- getRidVersion(localinst)
-###  colnames(localinst)
-###  "Package"  "LibPath"  "Version"  "Priority" "Bundle"   "Contains"
-###  "Depends"  "Imports"  "Suggests" "Built"
-###  rownames(localinst)    names of the packages installed
-  no1 <- sessionInfo()$R.version$major
-  no2 <- sessionInfo()$R.version$minor
-  vv <- paste(no1,".",no2, sep="")
-  Rvers <- paste("R.version ", no1,".",no2, sep="")
-  
-  zrw   <- rownames(zmat) ### names of packages derived from Zelig
-  zrw   <- unlist(sapply(zrw, FUN="trim.blanks"))
-  zrwuq <- unique.default(zrw)
-  
-###eliminate duplicate rows if any
-  if(length(zrwuq) < length(zrw)){
-    zmat <-  zelig.all.packages(zmat)
-    zrw   <- rownames(zmat)
-    zrw   <- unlist(sapply(zrw, FUN="trim.blanks"))
-    zrwuq <- unique.default(zrw)
-  }
-  
-  lrw   <- rownames(localinst) ### names packages installed in local machine
-  lrw   <- unlist(sapply(lrw, FUN="trim.blanks"))
-  lrwuq <- unique.default(lrw)
-  
-  ind <- sapply(zrwuq, match, lrwuq)
-  
-  pkginst <- NULL
-  
-  if(length(ind) > 0){
-    ind <- na.omit(ind)
-    pkginst <- lrwuq[ind]
-    names(pkginst) <- lrwuq[ind]
-    
-  }
-  if(length(pkginst) <= 0){
-    message("No ",zrw[length(zrw)]," descendent packages installed in locally")
-    print(Rvers)
-    return(list())
-  }
-###print(pkginst)
-  
-  pkgin <- unique.default(intersect(zrwuq, lrwuq))
-  names(pkgin) <- pkgin
-  if(length(pkginst) <= 0 && length(pkgin) <= 0){
-    message("No ", zmat[1,1]," descendent packages installed in locally")
-    return(Rvers)
-  }
-  
-### no need just for testing.  Commented out because of warnings
-### messages that I do not want to fix. 
-###  if(any(sort(na.omit(pkgin)) != sort(na.omit(pkginst)))){
-
-###    stop("Bad calculation of package installed")
-###  }
-  
-  ind <- sapply(pkginst, match, lrw)
-  
-  matinst  <- NULL
-  versinst <- NULL
-  if(length(ind) > 0){
-    
-    matinst  <- localinst[ind,]
-    pkgsinst <- matinst[, "Package"]
-    versinst <- matinst[, "Version"]
-    
-    rownames(matinst) <- pkgsinst
-    
-  }
-  lst <- list()
-  ind <- 1:nrow(matinst)
-  lst <- sapply(ind, function(n){
-    vec <- paste(matinst[n, "Package"],"  ", matinst[n, "Version"],  sep="")
-    return(vec)
-  })
-  lst <- sapply(lst, FUN="trim.blanks")
-  lst <- unlist(lst)
-  lst <- unique.default(lst)
-
-  if(disp)
-    res   <- menu(lst, graphics=disp)
-  ix <- sapply(c("Package", "Version"), match, colnames(localinst))
-  ix <- na.omit(unlist(ix))
-  ret <- matinst[, ix]
-  ret <- rbind(ret, R=c("R", vv))
-  return(ret)}
-
-### DESCRIPTION stand alone function that gets the R version
-###             in the local environment and, if zmat is provided,
-###             in the environment were Zelig was built. We can
-###             also obtained the matrix zmat from 'file'.
-###
-### OUTPUT: the R version in the local and Zelig environments
-
-getRVersion <- function(zmat=matrixDependencies())
-{
-
-  if(length(zmat) <= 1 && zmat=="")
-    stop("Provide matrix dependencies")
-
-  no1 <- sessionInfo()$R.version$major
-  no2 <- sessionInfo()$R.version$minor
-  vv  <- paste(no1,".",no2, sep="")
-  Rvers <- paste("R ", no1,".",no2, sep="")
-  lst <- c(local=Rvers)
-  
-
-  rw  <- rownames(zmat)
-  rw  <- sapply(rw, FUN="trim.blanks")
-  
-  ln  <- nrow(zmat)
-  pkgorg <- zmat[ln,"Package"]
-  lv0 <- trim.blanks(zmat[ln,"depth"])
-  if(lv0 != "0"){
-    ln <- grep("^0$", zmat[,"depth"])
-    if(length(ln)) pkgorg <- zmat[ln,"Package"]
-  }
-  
-  if(length(dim(zmat)) > 0)
-    Rserver <- zmat[ln, "Node"]
-  else
-    Rserver <- zmat[["Node"]]
-  nm  <- names(lst)
-  lst <- c(lst, Rserver)
-  names(lst) <- c(nm, pkgorg)
-  
-  lst <- sapply(lst,function(m) strsplit(m, " ")[[1]][2])
-  return(lst)
-  
-}
-### DESCRIPTION stand alone function that gets version of Zelig
-###             in the local environment and, if zmat is provided,
-###             in the environment were Zelig was built. We can
-###             also obtained the matrix zmat from 'file'.
-###
-### OUTPUT: version of Zelig in both the local and Zelig environments
-
-getZeligVersion <-  function(zmat=matrixDependencies(),lib.loc=NULL){
-  
-  if(length(zmat) <= 1 && zmat=="")
-    stop("Provide matrix dependencies")
-
-  pk <- "Zelig"
-  
-  if(class(zmat) != "try-error"){
-    if(length(dim(zmat)) > 0)
-      { ### make sure itr is level "0"
-        ln <- nrow(zmat)
-        pk <- zmat[ln,"Package"]
-        lv0 <- trim.blanks(zmat[ln,"depth"])
-        if(lv0 != "0"){
-          ln <- grep("^0$", zmat[,"depth"])
-          if(length(ln)) pk <- zmat[ln,"Package"]
-        }
-      }else
-    pk <- zmat[["Package"]]
-  }
-  
-  desc <- try(packageDescription(pk,lib.loc=lib.loc), silent=TRUE)
-  Zvers <- NULL
-  Zvers <- desc$Version
-  if(length(Zvers) <= 0)
-    message(pk," is not installed locally")
-
-  lst <- c(local=Zvers)
-  if(class(zmat) =="try-error")
-    return(lst)
-  rw  <- rownames(zmat)
-  rw  <- sapply(rw, FUN="trim.blanks")
-  ix  <- c( grep("Zelig", rw),grep("zelig", rw)) 
-  if(length(ix) <= 0) ix <- nrow(zmat)
-  if(length(dim(zmat)) >0)
-    Zserver <- zmat[ix, "Version"]
-  else
-    Zserver <- zmat[["Version"]]
-
-  lst <- c(lst, Zideal=Zserver)
-  names(lst) <- c("local", pk)
-  return(lst)
-  
-}
-###DESCRIPTION: Takes the matrix of Zelig dependencies, zmat
-###             and the name of the library to search for Zelig.
-###             Finds the versions locally and in the server
-###             Returns a vector with versions and report differences
-###
-###
-compareZeligVersion <- function(zmat=matrixDependencies(),lib.loc=NULL){
-  vec <- getZeligVersion(zmat,lib.loc)
-  if(length(vec) <= 0)
-    return(vec)
-### only one row
-  
-  localvers <- vec[["local"]]
-  ix <- grep("local", names(vec))
-  
-  zvers <- vec[[-ix]]
-  
-  res <- compareVersion(localvers, zvers)
-  if(res != 0){
-    message("Local installed version for ",names(vec)[2] , " differ from Zideal")
-    return(list())
-  }
-  return(vec)
-}
-
-
-
-### 
-### DESCRIPTION: Returns/Creates matrix of details corresponding to packages
-###              Zelig depends on directly or indirectly
-###              and are NOT installed in the local environment.
-###              Every row corresponds to  a dependent package with the name,
-###              the version, the parent (node), relation, URL to download it,
-###              and depth in graph or level of dependency. 
-###
-### USES:        installed.packages()
-###              
-### USE   zmat is the matrix return with create.zelig.all.packages("Zelig")
-###       zmat <- zelig.all.packages(matrixDependencies()),
-###       Calls zelig.installed.packages, and will
-###       find derived packages installed locally,
-###       i.e. required or derived from zelig and live in local machine
-###       Compares then and returns new packages 
-###       level, which depth of dependencies pkg are derived from; default 1
-###       cache, use the installed.packages matrix from environmnet or  calculate
-###              from fresh as in noCache of installed.packages
-###
-### INPUT lib.loc libraries directories to search for.
-###        zmat is matrix of dependencies stored in system files
-###         matin, matrix with packages installed locally that belongs to zmat
-###         matin <- zelig.installed.packages(zmat,cache=noCache,libpath=lib.loc)
-###         level, the depth from root package
-###        (level of dependency of Zelig package)
-###       
-### OUTPUT: Matrix similar to available.packages,newpkgmat, but only those packages
-###         that Zelig depends on and that are NOT installed in the local machine.
-###         Rows for each package that Zelig depends; columns are
-###         the name, version, parent (node) and type of dependency, URL,
-###         and level of graph
-###         
-### AUTHOR Elena Villalon
-###        evillalon at iq.harvard.edu
-###
-###        Last modified: 01/24/2007
-### 
-zelig.new.packages <- function(lib.loc=NULL,zmat=matrixDependencies(),matin=NULL,level=1 )
-{
-####Input arguments################
-### present a graphic of results
-  disp  <- FALSE
-### whether to use cache information, correspond to noCache=!cache
-  cache <- FALSE
-#################################################
-
-  pkgnm <- zmat[dim(zmat)[1],1]
-  zrw <- zmat[, "Package"]
-  zrwuq <- unique.default(zrw)
-  lrw   <- rownames(matin)
-### names packages installed in local machine derived from zelig
-  lrw   <- unlist(suppressWarnings(sapply(lrw, FUN="trim.blanks")))
-  lrwuq <- unique.default(lrw)
-  dec <- 0
-  if(length(grep("^R",lrwuq[length(lrwuq)])) > 0) dec <- 1
-###  print(zrwuq)
-
-  ind <- sapply(lrwuq, match, zrwuq)
-  ind <- na.omit(unlist(ind))
-
-  if(length(ind) < (length(lrwuq)-dec))
-    stop("Bad calculation of zelig.installed.packages or bad input for locally installed")    
-  
-  if(length(ind) <= 0)
-    stop("Provides the installed pkgs with zelig.installed.packages")
-  
-  if(length(zrwuq[ind]) >= length(zrwuq)){
-    message("All ",pkgnm ," pkgs are installed.CHECK their versions")
-    lst <- NULL
-    return(lst)
-  }
-
-  pkgsnoinst <- zrwuq[-ind]
-###  print(zrwuq[-ind])
-  pkgnoin <- unique.default(setdiff(zrwuq, lrwuq))
-  names(pkgnoin) <- pkgnoin
-
-  if(length(pkgsnoinst) <= 0 && length(pkgnoin) <= 0){
-    
-    message("All ",  pkgnm, " pkgs are installed. CHECK their versions")
-    lst <- NULL
-    return(lst)
-  }
-  
-###  if(length(na.omit(pkgnoin)) && length(na.omit(pkgsnoinst)))
-###    if(any(sort(na.omit(pkgnoin)) != sort(na.omit(pkgsnoinst)))){
-  
-###    stop("Bad calculation of package installed")
-###  }
-
-  newpkgmat <- NULL
-  pkgmod <- sapply(pkgsnoinst,function(m) paste("^",m,"$",sep=""))
-  ind <- unlist(sapply(pkgmod,grep,zrw))
-  
-  if(length(ind) > 0){
-    ind <- unlist(ind)
-    ix <- suppressWarnings(grep.exact(zrw[ind], pkgsnoinst))$index ### you do not need this with extended=T
-    if(length(ix) > 0) ind <- ind[-ix]
-  }
-  if(length(ind) > 0){
-    newpkgmat <- zmat[ind,]
-    rwn <- rownames(newpkgmat)
-    rwn <- unlist(suppressWarnings(sapply(rwn, FUN="trim.blanks")))
-    rownames(newpkgmat) <- rwn
-  }
-
-
-  ints  <- intersect(pkgsnoinst, zrw)
-  rwz <- suppressWarnings(sapply(rownames(zmat), FUN="trim.blanks"))
-  rwz <- unlist(rwz)
-  rownames(zmat) <- rwz
-  matnoin <- zmat[rownames(zmat) %in% ints,]
-  if(length(matnoin)== ncol(zmat))
-    matnoin <- matrix(matnoin, nrow=1)
-
-  if(length(ind) <= 0){
-    message("No new packages ")
-    ##return(Rvers)  ## this var does not exists, r cmd check complains ## ferdi
-    return (NULL)
-  }
-###for display
-  nrw <- nrow(newpkgmat)
-  if(length(nrw) <= 0){
-    nrw <- length(newpkgmat)
-    lst <- as.list(paste(newpkgmat["Package"],"  ", newpkgmat["Version"], sep=""))
-  }else{
-    ind <- as.list(1:nrw)
-    lst <- sapply(ind, function(n){
-      vec <- paste(newpkgmat[n, "Package"],"  ", newpkgmat[n, "Version"], sep="")
-      
-      return(vec)
-    })
-  }
-
-  lst <- suppressWarnings(sapply(lst, FUN="trim.blanks"))
-  lst <- unlist(lst)
-  lst <- unique.default(lst)
-### if(disp) ###controls the Selection of packages as well
-  if(disp)
-    res <- menu(lst, graphics=disp)
-###select.list(ls)
-
-  if(length(level) > 0 || !is.na(level))
-    newpkgmat <- suppressWarnings(selectLevels(newpkgmat, level))
-  
-  return(newpkgmat)
-}
-### 
-### DESCRIPTION: Returns/Creates matrix of details corresponding to packages
-###              Zelig depends on directly or indirectly
-###              and are  installed in the local machine.
-###              Every row corresponds to  a dependent package with the name,
-###              the version, the parent, the relation to the parent.
-###              The version is the pkg version in the local machine, which
-###              could not be the same as the server where Zelig is running. 
-###
-### USES:        installed.packages()
-###              
-### USE  matrixDependencies() with zideal 
-###      matin is matrix of two columns (pkg, vers) with locally installed
-###      packages that are in zideal (derived from Zelig) as obtained from 
-###      zelig.installed.packages. 
-###      level, which depth of dependencies pkg are derived from; default 1
-###
-### INPUT   lib.loc libraries directories to search for.
-###         zmat, matrix with packages dependencies
-###         matin, matrix with packages installed locally that belongs to zmat
-###         matin <- zelig.installed.packages(zmat,cache=noCache,libpath=lib.loc)
-###         level, the depth from root package
-###
-###       
-### OUTPUT: Matrix similar to available.packages,but only those packages
-###         that Zelig depends and that are NOT installed in the local machine.
-###         Rows for each package that Zelig depends; columns are
-###         the name, version, parent and type of relation (dependency)
-###         from parent description.
-###         
-### AUTHOR Elena Villalon
-###        evillalon at iq.harvard.edu
-###
-###        Last modified: 02/06/2007
-###                   
-
-zelig.old.packages <- function(lib.loc=NULL,zmat= matrixDependencies(),matin=NULL, level=1 )
-{
-####Input arguments################
-### zmat is the matrix of dependencies stored in system files  
-### present a graphic of results
-  disp  <- FALSE
-### whether to use cache information, correspond to noCache=!cache
-  cache <- FALSE
-#################################################
-  
-### colnames(zmat) "Package" "Version" "Node" "Relation" "URL" "depth"
-  
-  rwm <- rownames(matin)
-  if(length(rwm) > 0) {
-    rwm <- sapply(rwm, FUN="trim.blanks")
-    ir <- grep("^R$",rwm)
-    if(length(ir)){
-      matin <- matin[-ir, ]
-      rwm <- rwm[-ir]
-    }
-    rownames(matin) <- rwm
-  }
-  zrwm <- rownames(zmat)
-  zrwm <- sapply(zrwm, FUN="trim.blanks")
-  rownames(zmat) <- zrwm
-  ind <- unlist(sapply(rownames(matin), match, rownames(zmat)))
-  ind <- na.omit(ind)
-  zmatinst <- zmat[ind, ]
- 
-  
-  ix <- sapply(rownames(matin),match,rownames(zmatinst))
-  ix <- na.omit(unlist(ix))
-  zmatinst <- zmatinst[ix, ]
-  diffmat <- NULL
-  for(n in 1:nrow(matin))
-    {
-      lpkg  <- trim.blanks(matin[n, 1])
-      lvers <- trim.blanks(matin[n, 2])
-      zpkg  <- trim.blanks(zmatinst[n,"Package"])
-      zvers <- trim.blanks(zmatinst[n,"Version"])
-      zdepth <- trim.blanks(zmatinst[n,"depth"])
-      if(!identical(lpkg, zpkg))
-        stop("Packages name should be identicals")
-      if(!identical(zvers,lvers)){
-        diffmat <- rbind(diffmat,c(matin[n,],Zideal =zvers,depth=zdepth))
-        rownames(diffmat) <- diffmat[,"Package"]
-      }
-    }
-  if(length(diffmat) <= 0){
-    message("All packages version are up-to-date")
-    lst <- NULL
-    return(lst)
-  }
-  
-  if(!is.na(level))
-    diffmat <- selectLevels(diffmat, level)
-  if(length(diffmat) > 0)
-    diffmat <- check.advance.versions(diffmat)
-  
-  return(diffmat)
-
-}
-### DESCRIPTION: Given the matrix output of zelig.new.packages, or zelig.old.packages
-###              select the level of dependencies specified by the integer parameter depth
-###              Returns mat with only the rows in depth
-###
-selectLevels <- function(mat, depth){
-  
-  if(is.na(depth))
-    return(mat)
-  
-  d1z <- "1Z"
-
-  if(!length(grep("1[z-Z]", depth)))
-    depth <- as.numeric(depth)
-  if(length(dim(mat))<=0 || dim(mat)[1] <= 1){
-    nmvec <- names(mat)
-    nmcol <- colnames(mat)
-    ix  <- unique.default(c(grep("depth", nmvec),grep("depth", nmcol)))
-    mat.depth <- mat[[ix]]
-    if(length(grep("^1[z-Z]$",mat.depth))<=0){
-      bool1 <-  as.numeric(mat.depth) <= depth 
-    }else{
-      bool1 <- mat.depth <= "1z" || mat.depth <= "1Z"
-    }
-    bool2 <- mat.depth <= as.character(depth)
-    
-    bool <- c(bool1, bool2)
-    
-    if(length(bool) && is.na(all(bool)))
-      bool <- length(grep(depth,mat.depth))
-    else if(length(bool2) && is.na(bool2))
-      bool <- length(grep(depth,mat.depth)) || bool1
-    else if(length(bool1) && is.na(bool1))
-      bool <- length(grep(depth,mat.depth)) || bool2
-    else
-      bool <- length(grep(depth,mat.depth)) || bool2 || bool1
-    
-    if(bool) return(mat)
-    
-    message("No packages found at level selected of dependencies")
-###    print(mat)
-    return(list())
-    
-  }
-  
-  mat.depth <- mat[,"depth"]
-  ord <- order(mat.depth)
-  mat <- mat[ord, ]
-  mat.depth <- mat[,"depth"]  
-  
-  ix   <- grep("^1[z-Z]*$|^0$", mat.depth)
-  ln   <- length(ix)
-  if(depth=="1z" || depth=="1Z")
-     return(mat[sort(ix),])
-  if(as.numeric(depth) <= 1)
-    return(mat[sort(ix),])
-  
-  vv <- paste("^",2:depth,"$", sep="")
-  
-  ind <- unlist(sapply(vv, grep, mat.depth))
-  if(length(ind) <= 0)
-    return(mat[sort(ix), ])
-  
-  ix <- unique.default(c(ind, sort(ix)))
-  
-  return(mat[ix, ])
-  
-}
-### DESCRIPTION: finds if the locally installed versions of
-###              Zelig derived packages are higher than those
-###              of the required by the Zelig installation or zideal.
-###              If that is the case it rermoves the row corresponding
-###              to the package from the matrix mat
-###
-### INPUT: matrix of 4 columns from zelig.old.packages
-###        with name of pkg, version local, zelig version and depth of dependency
-###
-### OUTPUT same matrix mat but with possible rows removed.
-###        Those rows correspond to packages that have local versions larger than Zelig versions
-### helper function tro zelig.old.packages
-
-check.advance.versions <- function(mat){
-  ind.to.rm <- NULL
-  if(length(mat) <= 0)
-    return(mat)
-### only one row
-  if(length(dim(mat)) <= 0 )
-    {
-      localvers <- mat[["Version"]]
-      ix <- grep("Version", names(mat))
-      
-      zvers <- mat[[3]]
-      res <- compareVersion(localvers, zvers)
-      if(res >= 1){
-        message("Local installed version for ", mat["Package"], " higher than Zideal")
-        return(list())
-      }
-      return(mat)
-    }
-  
-  
-### more than one package
-  for(n in 1:nrow(mat)){
-    localvers <- mat[n,"Version"]
-    zvers <- mat[n,"Zideal"]
-    res <- compareVersion(localvers, zvers)
-    if(res >= 1){
-      message("Local installed version for ", mat[n,"Package"], " higher than Zideal")
-      ind.to.rm <- c(ind.to.rm, n)
-      next;
-    }
-  }
-  if(length(ind.to.rm) > 0)
-    mat <- mat[-ind.to.rm, ]
-  return(mat)
-  
-}
-
-### DESCRIPTION: Compares version numbers of Zelig derived packages
-###              that are installed in the local machine with those
-###              from the environment where Zelig was built, dependency level first.
-###              Also, obtaine the list of packages that are part of Zelig and
-###              not installed in the local machine, level 1. 
-###              Update the outdated packages and installed new pkgs on the fly.
-###
-### USES: zmat  matrix with dependencies as, zmat <- zelig.all.packages()
-###       --OR-- zmat <- matrixDependencies()
-###       zelig.installed.packages, zelig.old.packages and
-###       zelig.new.packages, pkg.update
-###
-### INPUT  destdir to save the download packages
-###        installWithVers as in install.packages, save pkg as YourCast_version#
-###        noCache as in install.packages, get it from fresh
-###        dependencies as in install.packages
-###        repos repository to download from 
-###        lib.loc directory path to search for packages
-###
-###
-### Elena Villalon
-### evillalon at iq.harvard.edu
-###
-
-zeligDepUpdate <- function(destdir=NULL,installWithVers=FALSE,lib.loc=NULL, repos="http://cran.r-project.org")
-{
-  
-####Input arguments################
-### zmat is the matrix of dependencies stored in system files  
-  zmat  <- matrixDependencies()
-### level of dependency
-  level <- 1
-### whether to install all dependent packages
-  dependencies <- TRUE
-### whether to use cache information, correspond to noCache=!cache
-  noCache <- TRUE
-####whether to install existent packages with older versions
-  oldpkg <- TRUE
-### whether to install newer packages in the dependency list of Zelig
-  newpkg <- TRUE
-#################################################
-  vec <- compareZeligVersion(zmat,lib.loc)
-  if(length(vec) <= 0)
-    stop("Update ",zmat[nrow(zmat),"Package"])
-
-  zrw <- zmat[, "Package"]
-  zrwuq <- unique.default(zrw)
-  if(length(zrwuq) < length(zrw))
-    zmat <- zelig.all.packages(zmat) ### eliminates duplicates pkgs
-  Rvers <- getRVersion(zmat)
-  Rvers <- unlist(Rvers)
-  val <- compareVersion(Rvers[[1]], Rvers[[2]])
-  if(val != 0) stop("Local R version different from Zelig required version")
-  dm <- dim(zmat)
-  if(length(level) <= 0 || is.na(level))
-    level <- check.max.depth(zmat)
-  
-  if(length(dm) <= 0 || dm[1]==1){
-    message(zmat[1,1]," has no dependents")
-    return(zmat)
-  }else
-  pkgnm <- zmat[dm[1], 1]
-
-  if(!length(destdir))
-    destdir <- getwd()
-  
-  zrw   <- rownames(zmat) ### names of packages derived from Zelig
-  zrw   <- unlist(sapply(zrw, FUN="trim.blanks"))
-  zrwuq <- unique.default(zrw)
-  zold <- NULL
-  znew <- NULL
-  zinst <- zelig.installed.packages(zmat, disp=F, prior=NULL,cache=!noCache, libpath=lib.loc)
-  
-  if(oldpkg)
-    zold  <- zelig.old.packages(lib.loc=lib.loc,zmat,zinst,1)
-  
-  if(newpkg)
-    znew <-  zelig.new.packages(lib.loc=lib.loc,zmat,zinst,1)
-  if(!length(zold) && !length(znew)) {
-    message("All Zelig derived packages up-to-date")
-    return(list())
-  }
-
-  if(length(zold)>0)    
-    if(length(dim(zold)) <= 0)
-      ret <- pkg.update(zold,zmat,depend=dependencies,versinst=installWithVers,destdir=destdir,repos=repos)
-    else
-      ret <- apply(zold,1, FUN="pkg.update",zmat,dependencies,installWithVers,destdir,repos)
-
-  if(length(znew) >0)
-    if(length(dim(znew)) <= 0)
-      ret <- pkg.update(znew,zmat,depend=dependencies,versinst=installWithVers,destdir=destdir,repos=repos)
-    else 
-      ret <- apply(znew,1, FUN="pkg.update",zmat,dependencies,installWithVers,destdir,repos) 
-  
-}
-### DESCRIPTION: helper function to install packages derived from Zelig that 
-###              are either installed in the local machine and outdated or
-###              they are not all 
-###              
-### INPUT: vec contains the name of package and the desired version to install
-###        mat is the matrix of Zelig dependencies that is used to check the URL
-###        dependencies same as in install.packages
-###        repos repository to download from 
-###
-### USES: install.packages
-###
-pkg.update <- function(vec,mat,depend=F, versinst=T,destdir=NULL, repos="http://cran.r-project.org")
-{
-  pkg <- vec["Package"]
-  names(pkg)  <- NULL
-  pkg  <- trim.blanks(pkg)
-  vers <- vec["Zideal"]
-  names(vers) <- NULL
-  ix  <- match(pkg, rownames(mat))
-  url <- NULL
-  if(!is.na(ix)){
-    url <- try(mat[ix,"URL"], silent=T)
-    
-    if(class(url)=="try-error") url <- repos
-    if(url=="CRAN") url <- repos
-  }
-  
-  message("Trying url....", url)
-  message("For package...", pkg)
-  message("Destination directory for source files...", destdir)
-  
-  install.packages(pkg,repos=url,installWithVers=versinst,dependencies=depend,destdir=destdir)
-  return(list())
-}
-
-trim.blanks <- function(x) {
-### at the beginning of string"^" gets anny number (+) of white spaces
-  f <- x
-  if(length(x))
-    f <- na.omit(x)
-  
-  if(length(f) <= 0)
-    return(x)
-  if(length(f)>1)
-    print(f)
-  if(f=="" )
-    return(x)
-  x <- sub("^[[:space:]]*(.*)", "\\1",x) ###get \n\t
-  x <- sub('^ +', '', x) ###get white spaces
-  
-### at the ending of string"$" gets anny number (+) of white spaces
-  
-  x <- sub("(.*)[[:space:]]*$", "\\1", x)
-  x <- sub(' +$', '', x)
-  return(x)
-}
-### DESCRIPTION: Utility function to check the results of applying grep
-###              grep may not get the exact full name but uses a loose
-###              regex to get all names that contains the input words
-###              For example grep("abc", c("abc", "pab", "dabcm", "clr, "abc""))
-###              will return 1, 3, 5. This function eliminates 3, counting characters
-###              NOTE: no need to apply this function if you use grep with extended=TRUE
-###              grep("^describe$", c("describe", "le describe", "desc", "describeport"), extended=T)
-###              gets only [1] 1
-###
-### NOTE: match will get the exact full string and will dismiss anything
-###       that is not an exact match: match("abc", "pabcm")=NA; however,
-###       it only finds the first occurance,
-###       i.e. match("abc",c("pabcqr", "abc", "lmn","vabc","abc"))= 2
-###       Same as grep("^abc$", c("pabcqr", "abc", "lmn","vabc","abc"), extended=T)
-###  
-### USES:        grep
-###              
-### INPUT:  matinst <- grep.exact(); outcome a vector of character
-###         we want to check for correctness.
-###         input is another vector of the strings
-###         that need to be found in the outcome.  
-###        
-### OUTPUT: It checks that outcome and input contain the same values 
-###         and eliminate those that are not exact match between outcome and input.
-###         Returns outcome with all not exact matches eliminated; and
-###         index ix of the strins that were eliminated from the
-###         original outcome.
-###         
-### AUTHOR Elena Villalon
-###        evillalon at iq.harvard.edu
-###
-###        Last modified: 01/24/2007
-### 
-grep.exact <- function(outcome, input){
-  ind <- 1:length(outcome) 
-  names(ind) <- outcome
-  
-  ix <- sapply(ind, function(n){
-    ret <- NULL
-    if(length(outcome[n]) <= 0)
-      return(ret)
-    nm  <- trim.blanks(outcome[n])
-    pkg <- trim.blanks(input[n])
-    
-    if(nchar(nm) != nchar(pkg))
-      ret <- n
-    return(ret)})
-  ix <- unlist(ix)
-  if(length(ix) > 0)
-    outcome <- outcome[-ix]
-  lst <- list(list(outcome=outcome), list(index=ix))
-  return(lst)
-}
-### DESCRIPTION Helper function. If the package have attached the version number
-###             it removes them.Example, "YourCast_2.9-8" becomes "YourCast"
-###
-getRidVersion <- function(zmat, oneonly=FALSE){
-  nm <- NULL
-  
-  if(oneonly && length(grep("_", zmat)) <= 0)
-    return(zmat)
-  else if(oneonly){
-    nm <- sub("(.*)_([-0-9.]*)","\\1", zmat)
-    nm <- trim.blanks(nm)
-    return(nm)
-  }
-
-  if(length(dim(zmat)) <= 0 || dim(zmat)[1] <=1){
-    pkginst <- zmat["Package"]
-    
-  }else{
-    pkginst <- zmat[,"Package"]
-  }
-  
-  pkginst <- sapply(pkginst, function(nm){
-    if(length(grep("_", nm)) <= 0)
-      return(nm)
-    nm <- sub("(.*)_([-0-9.]*)","\\1", nm)
-    nm <- trim.blanks(nm)
-  })
-
-  pkginst <- unlist(pkginst)
-  if(length(dim(zmat)) <= 0|| dim(zmat)[1] <=1){
-    zmat["Package"] <- pkginst
-    
-  }else{
-
-    zmat[,"Package"] <- pkginst
-    rownames(zmat) <-  zmat[,"Package"]
-    
-    
-  }
-  return(zmat)
-}
-
-###   
-check.max.depth <- function(zmat,level="depth"){
-  clnm <- colnames(zmat)
-  ix <- grep(level, clnm)
-  if(length(ix) <= 0){
-    message("Level of dependency not included")
-    return(NULL)
-  }
-  vec <- zmat[,ix]
-  vec[vec == "1z"] <- "1"
-  vec[vec == "1Z"] <- "1"
-  return(max(as.numeric(vec)))
-  
-}
-###DESCRIPTION takes a file that stores the matrix with Zelig
-###            dependencies as obtained create.zelig.all.packages
-###            Returns the object or matrix stored in the file
-###            If uniquePkg=T, then it eliminates duplicates pkgs in the matrix
-###            of dependencies and only the lowest dependency level is included.
-###
-matrixDependencies <- function(file=system.file("zideal", "zideal.RData", package="Zelig"), uniqPkg=TRUE){
-  zmat <- try(eval(as.symbol(load(file))))
-  if(class(zmat) == "try-error"){
-    message("Bad input file ", file)
-    return(NULL)
-  }
-  if(uniqPkg)
-    zmat <- zelig.all.packages(zmat)
-  return(zmat)
-}
-###
-### DESCRIPTION: Compares the packages in Zelig matrix of dependencies,
-###              from the environment where Zelig was built, 
-###              with those that are installed in the local machine 
-###              at dependency level first, level=1.
-###              Finds those packages with older versions and thoes that are not installed 
-###              
-###
-### INPUT: lib.loc library to find locally packages that are installed
-###
-### USES:zelig.installed.packages, zelig.old.packages and
-###      zelig.new.packages, getRVersion, compareZeligVersion
-###
-### OUTPUT list with tow elements, znew for Zelig packages that are not installed
-###        and zold for Zelig packages that are installed wqith older versions. 
-###      
-### Elena Villalon
-### evillalon at iq.harvard.edu
-###
-zeligDepStatus <- function(lib.loc=NULL)
-{
-####Hiden input arguments################
-### zmat is the matrix of dependencies stored in system files  
-  zmat  <- matrixDependencies()
-### matin matrix of locally installed packages
-  matin <- NULL
-### level of dependency
-  level <- 1
-### present a graphic of results
-  disp  <- FALSE
-### whether to use cache information, correspond to noCache=!cache
-  cache <- FALSE
-#################################################
-  vec <- compareZeligVersion(zmat,lib.loc)
-  if(length(vec) <= 0)
-    stop("Update ",zmat[nrow(zmat),"Package"])
-  lst <- getRVersion(zmat)
-  lst <- unlist(lst)
-  val <- compareVersion(lst[[1]], lst[[2]])
-  if(val != 0)
-    stop("User R version different from Zelig required R version")
-  
-  if(length(level) <= 0 || is.na(level))
-    level <- check.max.depth(zmat)
-
-  zrw   <- rownames(zmat) ### names of packages derived from Zelig
-  zrw   <- unlist(suppressWarnings(sapply(zrw, FUN="trim.blanks")))
-  zrwuq <- unique.default(zrw)
-
-  if(length(zrwuq) < length(zrw)){
-    zmat <-  zelig.all.packages(zmat)
-    zrw   <- rownames(zmat)
-    zrw   <- unlist(suppressWarnings(sapply(zrw, FUN="trim.blanks")))
-    zrwuq <- unique.default(zrw)
-  }
- 
-  dm <- dim(zmat)
-  
-  if(length(dm) <= 0||dm[1]==1){
-    message(zmat[1,1]," has no dependents")
-    return(zmat)
-  }else
-  pkgnm <- zmat[dm[1], 1]
-  
-###  colnames(localinst)
-###  "Package"  "LibPath"  "Version"  "Priority" "Bundle"   "Contains"
-###  "Depends"  "Imports"  "Suggests" "Built"
-###  rownames(localinst)    names of the packages installed
-
-  if(length(matin) <= 0)
-    matin <- zelig.installed.packages(zmat,disp=F,cache=cache,libpath=lib.loc)
-  if(all(is.na(matin))|| length(matin) <= 0)
-    {
-      message("No packages derived from ",pkgnm, " are installed locally")
-      if(length(level) > 0 || !is.na(level))
-        newpkgmat <- suppressWarnings(selectLevels(zmat, level))
-      else
-        newpkgmat <- zmat
-      
-      tmp <- cbind(newpkgmat[,"Package"], rep(NA,nrow(newpkgmat)), newpkgmat[,"Version"])
-      colnames(tmp) <- c("Package", "Version", "Zideal")
-      ret <- c(znew=tmp)
-      return(ret)
-    }
-  
-###matin packages that zelig depends on and are installed locally:
-### matin has two columns pkg name and local version
-
-  zold <- zelig.old.packages(lib.loc, zmat, matin,level)
-  dmold <- dim(zold)
-  if(length(dmold) && dmold[1]>=1)
-    zold <- zold[, c("Package", "Version", "Zideal")]
-  else if(length(zold))
-    zold <- zold[c("Package", "Version", "Zideal")]
-  else
-    zold <- NULL
- 
-  znew <- zelig.new.packages(lib.loc, zmat,matin,level)
-  dmnew <- dim(znew)
-  tmp <- znew
-  if(length(dmnew) && dmnew[1] >= 1){
-    tmp <- cbind(znew[,"Package"], rep(NA,nrow(znew)), znew[,"Version"])
-    colnames(tmp) <- c("Package", "Version", "Zideal")
-  }else if(length(znew)){
-    tmp <- cbind(Package=znew["Package"], Version=rep(NA,1), Zideal=znew["Version"])
-   
-  }else
-  tmp <- NULL
-    
-  
-  mat <- rbind(zold,tmp)
-  if(length(dim(mat))){
-    pkgnm <- mat[,"Package"]
-    rownames(mat) <- pkgnm
-  }else{
-    pkgnm <-  mat["Package"]
-   
-  }
-  return(mat)
-}
-
-
-
-
diff --git a/R/zzz.R b/R/zzz.R
new file mode 100644
index 0000000..243b3d1
--- /dev/null
+++ b/R/zzz.R
@@ -0,0 +1,1381 @@
+# THIS FILE CONTAINS PACKAGE HOOKS FOR ZELIG
+# ------------------------------------------
+
+# @...: nothing
+# spill-over: output information about Zelig
+.onAttach <- function(...) {
+
+  package.name <- "Zelig"
+  mylib <- dirname(system.file(package = package.name))
+  ver <- packageDescription(package.name, lib.loc = mylib)$Version
+  build.date <- packageDescription(package.name, lib.loc = mylib)$Date
+
+
+  # build info
+  packageStartupMessage("ZELIG (Versions ", ver, ", built: ", build.date, ")")
+
+  # cat, for readability of the message text
+
+  # Zelig info - do not exceed 80char/line
+  packageStartupMessage("
++----------------------------------------------------------------+
+|  Please refer to http://gking.harvard.edu/zelig for full       |
+|  documentation or help.zelig() for help with commands and      |
+|  models support by Zelig.                                      |
+|                                                                |
+|  Zelig project citations:                                      |
+|    Kosuke Imai, Gary King, and Olivia Lau.  (2009).            |
+|    ``Zelig: Everyone's Statistical Software,''                 |
+|    http://gking.harvard.edu/zelig                              |
+|   and                                                          |
+|    Kosuke Imai, Gary King, and Olivia Lau. (2008).             |
+|    ``Toward A Common Framework for Statistical Analysis        |
+|    and Development,'' Journal of Computational and             |
+|    Graphical Statistics, Vol. 17, No. 4 (December)             |
+|    pp. 892-913.                                                |
+|                                                                |
+|   To cite individual Zelig models, please use the citation     |
+|   format printed with each model run and in the documentation. |
++----------------------------------------------------------------+
+
+")
+}
+
+# @param object a zelig object
+# @param envir an environment
+.GetGenericsS4 <- function(object, envir=parent.frame()) {
+  if (inherits(object$result, "list")) {
+    .ListS4Generics(classes=class(object$result[[1]]), env=envir)
+  }
+  else 
+    .ListS4Generics(classes=class(object$result), env=envir)
+}
+
+
+# @classes: classes
+# @where: compatibility with showMethods
+# @env: the environment to search for generics
+# return: a character-vector of function names
+# ********************************************
+# this function searches .AllMTable within the namespace
+# of the functions environment
+.ListS4Generics <- function(classes=NULL, where=NULL,
+                          env=topenv(parent.frame())) {
+  # get list of all generic functions
+  functions <- if (missing(where))
+    getGenerics()
+  else
+    getGenerics(where)
+
+  #
+  matches <- c()
+  functions <- as.character(functions)
+
+  #
+  for (f in functions) {
+    fdef <- getGeneric(f)
+    env <- environment(fdef)
+
+    table <- tryCatch(get(".AllMTable", envir=env), error=function(e) NULL)
+
+    if (is.null(table))
+      next
+
+    if (any(classes %in% ls(table)))
+      matches <- append(matches, f)
+  }
+
+  # return
+  flist <- c("zelig", "param", "as.parameters", "sim", "setx", "register", 'summary')
+  matches[ ! matches %in% flist ]
+}
+
+#' Describe a Zelig Model
+#'
+#' @param model.name 
+#' @param ... ignored parameters
+#' @return a 'description' object containing citation information
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+.ZeligDescribeModel <- function(model.name, ...) {
+  # lie to zelig
+  dummy.zelig <- "dummy"
+  class(dummy.zelig) <- model.name
+
+  # return as a description
+  as.description(describe(dummy.zelig))
+}
+
+#' Get a Character-Vector of All Models with a 'zelig2' Function
+#'
+#' @note In order for a Zelig model to either execute correctly or be listed as
+#'   a legal Zelig model, the function name must be prefixed with 'zelig2'.
+#' @param zelig.only a boolean specifying whether we want to search only the 
+#'   Zelig namespace
+#' @return a character-vector of the Zelig models loaded on the user's machine
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+ZeligListModels <- function(zelig.only=FALSE) {
+  results <- if (zelig.only)
+    ls(pattern="^zelig2", envir=asNamespace("Zelig"))
+  else
+    apropos("^zelig2", mode="function")
+
+  # substitute and return
+  sub("^zelig2", "", results)
+}
+
+#' Get a Text-Block of Citation Information about a Zelig Model
+#' 
+#' @note This function is strictly used internally by Zelig
+#' @param model.name the name of a Zelig model
+#' @return a block of text giving a human readable (and APA compliant) block
+#'   citation text
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+.GetModelCitationTex <- function(model.name)
+  cite(ZeligDescribeModel(model.name))
+
+#' Produce a 'description' Object from the Name of a Model
+#' @note The 'description' object is a list-style object containing citation
+#'   information
+#' @param model.name a character-string specifying a Zelig model
+#' @return a 'description' object specified by the 'model.name' parameter. This
+#'   object is created by executing the specified Zelig models' 'describe'
+#'   function
+#' @export
+ZeligDescribeModel <- function(model.name) {
+  dummy <-
+    "I love baseball.  You know, it doesn't have to mean anything.
+It's just very beautiful to watch."
+  
+  class(dummy) <- model.name
+
+  # describe
+  res <- describe(dummy)
+
+  # add model name
+  res$model <- model.name
+
+  # return
+  as.description(res)
+}
+
+#' Get a TeX-style Citation
+#' @param model a character-string specifying the name of the Zelig model of which 
+#'   to describe in TeX-style
+#' @return a string to be rendered as part of a LaTeX-style document
+#' @export
+TexCite <- function(model) {
+  # description object
+  descr <- ZeligDescribeModel(model)
+
+  # url
+  url <- "http://gking.harvard.edu/zelig"
+
+  # define title
+  title <- if (is.null(descr$text))
+    descr$model
+  else
+    paste(descr$model, ": ", descr$text, sep="")
+
+  # quote title string
+  title <- paste('"', title, '"', sep="")
+
+  # construct string
+  str <- paste(
+               "{\bf To cite this model in Zelig:}",
+               paste(descr$authors, descr$year, sep="."),
+               paste(title, "in Kosuke Imai, Gary King and Olivia Lau,"),
+               "\"Zelig: Everyone's Statistical Software,\"",
+               url,
+               sep = "\n"
+               )
+  str
+}
+
+#' Get a List of Categories for Describing Zelig Models
+#' @note This feature is being deprecated, as original functionality with the
+#'   Dataverse Project \url{thedata.org} is being reevaluated.
+#' @return a list of character-string specifying legal category types (as the
+#'   keys of the list) and their human-counterparts (as the values)
+#' @export
+.ZeligModelCategories <- function() {
+  list(continuous  = "Models for Continuous Dependent Variables",
+       dichotomous = "Models for Dichotomous Dependent Variables",
+       ordinal     = "Models for Ordinal Dependent Variables",
+       bounded     = "Models for Continous Bounded Dependent Variables",
+       multinomial = "Multinomial Choice Models",
+       count       = "Event Count Models",
+       mixed       = "Models for Mixed Dependent Variables",
+       ei          = "Ecological Inference Models"
+       )
+}
+
+#' List the Titles of the Zelig Statistical Models
+#' @return a list of manual titles for the Zelig software 
+#' @export
+ZeligListTitles <- function() {
+
+  #
+  models <- ZeligListModels()
+
+  #
+  lis <- list()
+
+  #
+  for (m in models)
+    lis[[m]] <- ZeligDescribeModel(m)$text
+
+  # turn into a vector with each entry having:
+  #  model_name: model_description
+  # e.g.
+  #  probit: Probit Regression for Dichotomous Dependent Variables
+  paste(names(lis), lis, sep=": ")
+}
+
+#' Whether an Arbitrary R-package has a Zelig2 Function within Its Namespace
+#' @note This function is used primarily internally to determine whether a
+#'   a package is contributing a function to the Zelig software suite
+#' @param pkg a character-string representing a package name
+#' @return whether the package contains any zelig2-functions
+#' @export
+has.zelig2 <- function(pkg) {
+  env <- asNamespace(pkg)
+  hits <- grep("^zelig2*", ls(envir=env))
+  length(hits) > 0
+}
+
+#' Whether a Statistical Package Depends on the Zelig Software Suite
+#' @note This function is used primarily internally to determine whether a
+#'   a package is contributing a function to the Zelig software suite
+#' @param package a character-string representing a package name
+#' @return whether the package lists Zelig as a dependency in its DESCRIPTION
+#' @export
+depends.on.zelig <- function(package="") {
+  zcomp <- packageDescription(package, fields="Depends")
+
+  if (is.na(zcomp))
+    return(FALSE)
+
+  zcomp <- unlist(strsplit(zcomp, " *, *"))
+
+  # "Zelig" %in% zcomp
+
+  # pattern to match things leading with Zelig, some spaces, and a parenthesis ending
+  # ex:
+  #     Zelig
+  #     Zelig (>= 3)
+  #     Zelig      (blah blah)
+  pattern <- "^Zelig *(?:\\(.*?\\))$"
+  length(grep(pattern, zcomp)) != 0
+}
+
+#' Get a List of Packages Installed on the Current Machine that Depend on Zelig
+#' @note This function is used primarily internally to determine whether a
+#'   a package is contributing a function to the Zelig software suite
+#' @return a character-vector of all zelig-dependent packages on the current
+#'   machine
+list.zelig.dependent.packages <- function() 
+  Filter(depends.on.zelig, .packages(all.available=TRUE))
+
+#' List Zelig Models Installed on the Current Machine
+#' @note This list is not necessarily complete
+#' @param with.namespace a boolean specifying whether 
+#' @return list of all zelig models
+list.zelig.models <- function(with.namespace=TRUE) {
+  # list the zelig-dependent packages
+  pkgs <- list.zelig.dependent.packages()
+
+  # include the core package
+  pkgs <- c("Zelig", pkgs)
+
+  # initialize functions variable
+  functions <- NULL
+
+  # create a list of every zelig2 function
+  for (pkg in pkgs) {
+    # get all zelig2 functions, then get their model name
+    models <- ls(pattern="^zelig2", envir=asNamespace(pkg))
+    models <- sub("^zelig2", "", models)
+
+    # add to results list
+    functions[models] <- pkg
+  }
+
+  # return
+  if (with.namespace)
+    # with model-name as the key, and namespace as the value
+    functions
+  
+  else
+    # with just a list of models
+    names(functions)
+}
+
+#' Append a Prefix to a Character String
+#' @note This function is exclusively used internally by Zelig
+#' @param name a character-string specifying the name of a variable
+#' @param envir an environment variable to search
+#' @param prefix a character-string to prefix the string with
+#'   this is applied until the name is unique
+#' @param sep a character-string that separates prefix and name
+.prefix <- function(name, envir, prefix="zelig", sep=".") {
+
+  # check to make sure this is an environment variable
+  if (!is.environment(envir)) {
+    warning()
+    envir <- globalenv()
+  }
+
+  # ensure some name is returned
+  if (!is.character(c(name, prefix, sep))) {
+    warning()
+    name
+  }
+
+  else if (length(name) > 1 || length(prefix) > 1 || length(sep) > 1) {
+    warning()
+    name
+  }
+
+  else if (!nchar(name)) {
+    warning()
+    sep <- "."
+  }
+
+  else {
+    while(exists(name, envir=envir))
+      name <- paste(prefix, name, sep=sep)
+
+    # return if nothing wonky happened
+    name
+  }
+}
+
+
+.GetGenerics <- function(...) UseMethod(".GetGenerics")
+
+# needs work
+.GetGenerics.MI <- function(...) new.env()
+
+# @zelig.object: a zelig object
+# @envir:        namespace to search with 'ls'
+# return:        a list of generic functions names to
+#                to define for zelig
+.GetGenerics.default <- function(zelig.object, envir=parent.frame()) {
+  if (is.null(zelig.object$S4))
+    stop(as.character(zelig.object$family[[1]]))
+  else if (zelig.object$S4) 
+    suppressWarnings(.GetGenericsS4(zelig.object, envir))
+  else
+    suppressWarnings(.GetGenericsS3(zelig.object, envir))
+}
+
+.GetGenericsS3 <- function(zelig.object, envir=parent.frame()) {
+  #
+  hash <- list()
+  cls <- class(zelig.object$result)
+  method.list <- as.character(unlist(mapply(methods, class=cls)))
+
+  regex <- paste("(", paste(cls, collapse="|"), ")", sep="|")
+
+
+  method.list <- gsub(regex, "", method.list)
+
+  meth.list <- c()
+  for (cl in c(class(zelig.object$result), "default")) {
+    method.list <- as.character(methods(class=cl))
+    method.list <- gsub(paste("\\.", cl, "$", sep=""), "", method.list)
+    meth.list <- unique(c(meth.list, method.list))
+  }
+
+  # final list
+  flist <- c("zelig", "param", "as.parameters", "sim", "setx", "register", 'qi', 'summary')
+  meth.list <- sort(unique(c(meth.list,
+                             names(get(".knownS3Generics")))))
+
+  meth.list[ ! meth.list %in% flist ]
+}
+
+# Numerical Derivative
+#
+# This method computes the numerical derivative at a point
+# @param f function (differentiable)
+# @param stencil number of points in stencil. This is currently ignored.
+# @param h size of mesh
+# @return anonymous function with the approximation
+# @note single variable numerical derivative
+.nderiv <- function(f, stencil=5, h=sqrt(.Machine$double.eps)) {
+  # return approximated derivative function
+  function (x) {
+    # construct the 5-point mesh, middle point omitted
+    # since it gets deleted anyway
+    x.stencil <- rep(x, 4) + c(2, 1, -1, -2)*h
+
+    # compute approximation
+    sum(sapply(x.stencil, f) %*% c(-1, 8, -8, 1))/12/h
+  }
+}
+
+
+
+# @F: function to invert
+# @f: derivative of function, or NULL to use numerical approximation
+# @x: initial guess
+# @tol: error-tolerance
+# @h: mesh size
+# @max.iter: number of iterations to perform before giving up
+# return: df(x_0)/dx
+# **note: newton-rhapson for single variables
+# **suggestion: replace with C code, otherwise won't be truly fast-enough
+.nr <- function(F, f=NULL, x = 1, a = 0,
+                tol      = sqrt(.Machine$double.eps),
+                h        = sqrt(.Machine$double.eps),
+                max.iter = 50) {
+  # save function to prevent recursions
+  saved.function <- F
+
+  # rewrite function to solve for a
+  if (!missing(a))
+    F <- function(x) saved.function(x) - a
+  
+  # if NULL assign numerical derivative
+  if (is.null(f))
+    f <- .nderiv(F)
+
+  #
+  count <- 1
+
+  #
+  while (abs(F(x)) > tol && count <= max.iter) {
+    # increment counter
+    count <- count + 1
+
+    # if derivative is zero, or near it
+    # (otherwise we have issues with solutions where x=0)
+    if (abs(f(x)) < 10^-8) {
+      x <- x + runif(1, min=-1, max=1)
+      next
+    }
+
+    # iterate
+    x <- x - F(x)/f(x)
+  }
+
+  if (count > max.iter)
+    warning("approximation failed to converge given specified tolerance")
+
+  # return result
+  x
+}
+
+
+# @F:
+# @f:
+# @x: initial guess
+# @tol: 
+# return: a functional form of the newton-rhapson approximation
+.NumInverse <- function(F, f=NULL, x = 1,
+                        tol      = (.Machine$double.eps)^.5,
+                        h        = sqrt(.Machine$double.eps),
+                        max.iter = 50) {
+  function (a) {
+    res <- c()
+
+    # kludgey, but just a hold-over for now
+    for (val in a) {
+      val <- .nr(F=F, f=f, x=x, a=val, tol=tol, h=h, max.iter=max.iter)
+      res <- c(res, val)
+    }
+
+    res
+  }
+}
+# This file contains overloaded operators 
+# However, developers - in general - should avoid the use of these features,
+# and instead use iterators when dealing with multiple fitted models or
+# quantities of interest.
+# The methods primarily come up when defining 'summarize' and 'plot' functions
+
+
+#' Extract a Value from a Fitted Model Object (Wrapped by Zelig)
+#' @S3method "[[" zelig
+#' @param z an object of type 'zelig'
+#' @param slot a character-string specifying the slot to extract from the fitted
+#'   model object
+#' @param ... subsequent slots to extract from the fitted model object
+#' @return contents of the specified slots
+#' @author Matt Owen \emph{mowen@@iq.harvard.edu}
+"[[.zelig" <- GetSlot.zelig
+
+#' Extraction Operator for Quantities of Interest
+#' This function is exclusively used internally by Zelig, and behaves in a very
+#' fishy manner. \code{qi} objects maintain an internal list of indices which
+#' are used to find the appropriate slot which holds a particular quantity of
+#' interest.
+#' When a \code{qi} object is defined, all the quantities of interest are
+#' converted into acronyms, so that elements of the \code{qi} object can be
+#' stored without a lengthy name containing spaces (since most qi's are
+#' human-readable). As a result, these objects contain an \code{.index}
+#' attribute which pairs every quantity of interest with its acronym. This
+#' index is then used to extract (using the \code{$} operator) the appropriate
+#' element of the list.
+#' In short, it pairs the key "Expected Value" with the slot \code{ev}. This
+#' allows that the following will always be true (in the mentioned example):
+#'   \code{qi$ev == qi[["Expected Value"]]}
+#' @note When possible, \code{qi} objects should be handled with iterators
+#'   rather than list-style extraction operators.
+#' @S3method "[[" qi
+#' @param self the \code{qi} object
+#' @param key a character-string specifying the title of the quantity of
+#'   interest to extract.
+#' @return if the quantity of interest exists, that entry. Otherwise,
+#'   \code{NULL}
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+"[[.qi" <- function(self, key) {
+
+  # Produce the index of titles of qi's
+  index <- attr(self, ".index")
+
+  # Find the 'short-name' matching
+  qi.short.name <- index[[key]]
+
+  if (is.null(qi.short.name))
+    NULL
+  else
+    # if this title => key pair is found, invoke the "$" operator on the
+    # shortname. In effect, this makes:
+    #   qi[['Expected Value']]
+    #
+    # equivalent to:
+    #   qi$ev
+    do.call("$", list(self, qi.short.name))
+}
+#' Receiver Operator Characteristic Plots
+#'
+#' The 'rocplot' command generates a receiver operator characteristic plot to
+#' compare the in-sample (default) or out-of-sample fit for two logit or probit
+#' regressions.
+#'
+#' @usage
+#' rocplot(y1, y2, fitted1, fitted2,
+#' cutoff = seq(from=0, to=1, length=100), lty1="solid",
+#' lty2="dashed", lwd1=par("lwd"), lwd2=par("lwd"),
+#' col1=par("col"), col2=par("col"),
+#' main="ROC Curve",
+#' xlab = "Proportion of 1's Correctly Predicted",
+#' ylab="Proportion of 0's Correctly Predicted",
+#' plot = TRUE, 
+#' ...
+#' )
+#'
+#' @param y1 response variable for the first model
+#' @param y2 response variable for the second model
+#' @param fitted1 fitted values for the first model. These values may represent
+#'   either the in-sample or out-of-sample fitted values
+#' @param fitted2 fitted values for the second model
+#' @param cutoff A vector of cut-off values between 0 and 1, at which to
+#'   evaluate the proportion of 0s and 1s correctly predicted by the first and
+#'   second model.  By default, this is 100 increments between 0 and 1
+#'   inclusive
+#' @param lty1 the line type of the first model (defaults to 'line')
+#' @param lty2 the line type of the second model (defaults to 'dashed')
+#' @param lwd1 the line width of the first model (defaults to 1)
+#' @param lwd2 the line width of the second model (defaults to 1)
+#' @param col1 the color of the first model (defaults to 'black')
+#' @param col2 the color of the second model (defaults to 'black')
+#' @param main a title for the plot (defaults to "ROC Curve")
+#' @param xlab a label for the X-axis
+#' @param ylab a lavel for the Y-axis
+#' @param plot whether to generate a plot to the selected device
+#' @param \dots additional parameters to be passed to the plot
+#' @return if plot is TRUE, rocplot simply generates a plot. Otherwise, a list
+#'   with the following is produced:
+#'   \item{roc1}{a matrix containing a vector of x-coordinates and
+#'     y-coordinates corresponding to the number of ones and zeros correctly
+#'     predicted for the first model.}
+#'   \item{roc2}{a matrix containing a vector of x-coordinates and
+#'     y-coordinates corresponding to the number of ones and zeros correctly
+#'     predicted for the second model.}
+#'   \item{area1}{the area under the first ROC curve, calculated using
+#'     Reimann sums.}
+#'   \item{area2}{the area under the second ROC curve, calculated using
+#'     Reimann sums.}
+#' @export
+#" @author Kosuke Imai and Olivia Lau
+rocplot <- function(y1, y2, fitted1, fitted2,
+                    cutoff = seq(from=0, to=1, length=100), lty1="solid",
+                    lty2="dashed", lwd1=par("lwd"), lwd2=par("lwd"),
+                    col1=par("col"), col2=par("col"),
+                    main="ROC Curve",
+                    xlab = "Proportion of 1's Correctly Predicted",
+                    ylab="Proportion of 0's Correctly Predicted",
+                    plot = TRUE, 
+                    ...) {
+  roc1 <- roc2 <- matrix(NA, nrow = length(cutoff), ncol = 2)
+  colnames(roc1) <- colnames(roc2) <- c("ones", "zeros")
+  for (i in 1:length(cutoff)) {
+    roc1[i,1] <- mean(fitted1[y1==1] >= cutoff[i]) 
+    roc2[i,1] <- mean(fitted2[y2==1] >= cutoff[i])
+    roc1[i,2] <- mean(fitted1[y1==0] < cutoff[i])
+    roc2[i,2] <- mean(fitted2[y2==0] < cutoff[i])
+  }
+  if (plot) {
+    plot(0:1, 0:1, type = "n", xaxs = "i", yaxs = "i",
+         main=main, xlab=xlab, ylab=ylab, ...)
+    lines(roc1, lty = lty1, lwd = lwd1, col=col1)
+    lines(roc2, lty = lty2, lwd = lwd2, col=col2)
+    abline(1, -1, lty = "dotted")
+  }
+  else {
+    area1 <- area2 <- array()
+    for (i in 2:length(cutoff)) {
+      area1[i-1] <- (roc1[i,2] - roc1[(i-1),2]) * roc1[i,1] 
+      area2[i-1] <- (roc2[i,2] - roc2[(i-1),2]) * roc2[i,1] 
+    }
+    return(list(roc1 = roc1, 
+                roc2 = roc2,
+                area1 = sum(na.omit(area1)),
+                area2 = sum(na.omit(area2))))
+  }
+}
+#' Create Function Call
+#'
+#' 
+#' @param Call a \code{call} object, typically specifying the original function
+#'   call to \code{zelig}
+#' @param zelig2 the return-value of the \code{zelig2} method
+#' @param remove a list of character vectors specifying which parameters to
+#'   ignore from the original call to \code{zelig}
+#' @return a function call used to fit the statistical model
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+zelig.call <- function(Call, zelig2, remove = NULL) {
+  #
+  envir <- new.env()
+
+  # reserved words taken from the zelig2 method
+  func <- as.name(zelig2$.function)
+  hook <- zelig2$.hook
+
+  # remove the reserved words
+  zelig2$.function <- NULL
+  zelig2$.hook <- NULL
+  zelig2$.post <- NULL
+  zelig2$.model.matrix <- NULL
+
+  # make a list of the parameters to be passed to the external model
+  args <- names(formals(as.character(func)))
+
+  # remove certain parameters
+   for (key in remove) {
+     if (key %in% names(Call))
+       Call[[key]] <- NULL
+   }
+
+  # remove invalid params
+  for (key in names(Call[-1])) {
+    if (! key %in% args)
+      Call[[key]] <- NULL
+  }
+
+
+
+  # A static list of objects that do not printout well or should be stored
+  # within a separate environment
+  messy.objects <- c("data.frame", "function", 'matrix', "family", "function")
+  neat.objects <- c("formula", "family")
+  skip <- c()
+
+  # Store values within 'messy.objects' within another environment, and give a 
+  # pseudonym
+  for (key in names(zelig2)) {
+    obj <- zelig2[[key]]
+    Class <- class(obj)
+    first.class <- Class[1]
+
+    if (is.object(obj)) {
+      if (all(Class %in% neat.objects)) {
+        Call[[key]] <- obj
+      }
+      else {
+        Name <- store.object(obj, envir, ucfirst(first.class))
+        Call[[key]] <- as.name(Name)
+        skip <- c(skip, key)
+      }
+    }
+
+    else if (is.function(obj)) {
+      Name <- store.object(obj, envir, "Function")
+      Call[[key]] <- as.name(Name)
+      skip <- c(skip, key)
+    }
+    else if (is.atomic(obj) && length(obj) > 5) {
+      Name <- store.object(obj, envir, paste(toupper(Class[1]), length(obj),
+                                             sep=""))
+      Call[[key]] <- as.name(Name)
+      skip <- c(skip, key)
+    }
+    else if (is.list(obj) && length(obj) > 5) {
+      Name <- store.object(obj, envir, paste("List", length(obj), sep=""))
+      Call[[key]] <- as.name(Name)
+      skip <- c(skip, key)
+    }
+    else {
+      # this is a hack to prevent removal of elements if the value is NULL
+      null.list <- list(NULL)
+      names(null.list) <- key
+
+      # the two statement are *slightly* different
+      if (is.null(obj)) {
+        Call <- as.call(append(as.list(Call), null.list))
+      }
+      else {
+        Call[[key]] <- obj
+      }
+    }
+
+  }
+
+  # Guarantee all zelig2 names are included (including model, etc)
+  for (key in names(zelig2)) {
+    if (key %in% skip)
+      next;
+
+    if (!is.null(zelig2[[key]]))
+      Call[[key]] <- zelig2[[key]]
+    else {
+      # Clear the entry. Don't worry. It's going to get re-added later in this
+      # Else-block.
+      Call[[key]] <- NULL
+
+      # Create the NULL paramater
+      dummylist <- list(NULL)
+      names(dummylist) <- key
+
+      # Cast as a list, so we can use append
+      Call <- as.list(Call)
+
+      # Append the entry
+      Call <- as.call(append(Call, dummylist))
+    }
+  }
+
+
+  # Change function value
+  Call[[1]] <- func
+
+  list(call=Call, envir=envir)
+}
+
+#' Store Object in Environment with a Fake Name
+#'
+#' This function takes the value of an object and stores it within a specified 
+#' environment. This is similar to simply using the \code{assign} function, but
+#' will not overwrite existing values in the specified environment. It
+#' accomplishes this by appending a prefix to the name of the variable until
+#' the name becomes unique.
+#' @note This method does not correct invalid names. That is, there is no test
+#'   to determine whether the submitted name is valid.
+#' @param obj any object
+#' @param envir an environment object, which will contain the object with the
+#'   assigned name
+#' @param name a character-string specifying the name that the object will be
+#'   stored as in the specified environment
+#' @param prefix a character string specifying the prefixes to append to names
+#'   that already have matches in the destination environment
+#' @return a character-string specifying the name of the object in the
+#'   destination environment
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+store.object <- function (obj, envir, name=NULL, prefix=".") {
+
+  variables <- ls(envir=envir)
+  
+  # ensure name is unique
+  while (name %in% variables)
+    name <- paste(prefix, name, sep="")
+
+  assign(name, obj, envir)
+
+  name
+}
+
+#' Uppercase First Letter of a String
+#' 
+#' This method sets the first character of a string to its uppercase,
+#' sets all other characters to lowercase.
+#' @param str a vector of charaqcter-strings
+#' @return a vector of character strings
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+ucfirst <- function (str) {
+  paste(
+        toupper(substring(str, 1, 1)),
+        tolower(substring(str, 2)),
+        sep = ""
+        )
+}
+#' Search for, Copy, and Customize Template for a Newly Create Zelig Package
+#' This is used internally by \code{zelig.skeleton}
+#' @param model a character-string specifying the name of the model
+#' @param the file template to search for and copy
+#' @param pkg a character-string specifying the name of the package
+#' @param path a character-string specifying the base path to the package's u
+#'  parent directory
+#' @return This function is used for its side-effects.
+.copy.templates <- function (model, pkg, path) {
+  # path to R folder
+  r.path <- file.path(path, pkg, 'R')
+
+  # source files
+  zelig2 <- system.file('templates', 'zelig2.R', package="Zelig")
+  param <- system.file('templates', 'param.R', package="Zelig")
+  qi <- system.file('templates', 'qi.R', package="Zelig")
+  describe <- system.file('templates', 'describe.R', package="Zelig")
+
+  # create R directory
+  dir.create(r.path, showWarnings=FALSE)
+
+  # destination files
+  zelig2.dest <- file.path(r.path, paste('zelig2', model, '.R', sep=""))
+  param.dest <- file.path(r.path, paste('param', model, 'R', sep="."))
+  qi.dest <- file.path(r.path, paste('qi', model, 'R', sep="."))
+  describe.dest <- file.path(r.path, paste('describe', model, 'R', sep="."))
+
+  # create blank files
+  file.create(zelig2.dest, param.dest, qi.dest)
+
+  # substitute
+  zelig2.lines <- .substitute.expressions(zelig2, model=model)
+  param.lines <- .substitute.expressions(param, model=model)
+  qi.lines <- .substitute.expressions(qi, model=model)
+  describe.lines <- .substitute.expressions(describe, model=model)
+
+  # write to file
+  writeLines(zelig2.lines, con = zelig2.dest)
+  writeLines(param.lines, con = param.dest)
+  writeLines(qi.lines, con = qi.dest)
+  writeLines(describe.lines, con = describe.dest)
+
+  TRUE
+}
+
+
+#' make a description file for a specific package
+#' param pkg a character-string specifying the name of the package
+#' param author a vector of strings specifying the names of contributors
+#' param email a character-string specifying the email of the maintainer
+#' param depends a vector of strings specifying package dependencies
+#' param url - ignored -
+#' param path a character-string specifying the location of the package
+#' return nothing
+.make.description <- function (pkg, author, email, depends, url, path='.') {
+  model <- pkg
+  description.file <- file.path(path, model, 'DESCRIPTION')
+
+  # make author list human-readable
+  author <- .get.list.as.text(author)
+
+  maintainer <- paste(author[1L], ' <', email, '>', sep="")
+
+  depends <- c("Zelig", depends)
+  depends <- unique(depends)
+  depends <- paste(depends, collapse=", ")
+
+  fields <- c(
+      Package = model,
+      Version = .1,
+      Date = as.character(Sys.Date()),
+      Title = "A Zelig Model",
+      Author = author,
+      Maintainer = maintainer,
+      Depends = depends,
+      Description = "A Zelig Model",
+      License = "GPL (>=2)",
+      URL = "http://gking.harvard.edu/zelig",
+      Packaged = gsub('\\s+', ' ', date())
+      )
+
+  # correctly write to file:
+  #   Package: 'model'
+  #   Version: .1
+  # etc.
+  writeLines(
+      paste(names(fields), ': ', fields, sep=""),
+      con = description.file
+      )
+}
+
+
+#' @note This function fails if passed non-alphanumeric variable names. In
+#'   particular, the parameters cannot contain periods, etc.
+#' @param .file the name of the file to replace
+#' @param ... 
+#' @return a character-string
+.substitute.expressions <- function(.file, ...) {
+  lines <- readLines(con = .file, warn = FALSE)
+
+  replacements <- list(...)
+
+  for (key in names(replacements)) {
+    val <- replacements[[key]]
+    expr <- paste('\\\\\\\\', key, '\\\\\\\\', sep="")
+
+    lines <- gsub(expr, val, lines)
+  }
+
+  lines
+}
+
+#' Make \code{pkg}-package.R File for Roxygen Compliancy
+#' @param pkg the package name
+#' @param author a vector of characters specifying the authors of the Zelig
+#'   models
+#' @param email the email address of the package's maintainer
+#' @param depends a vector specifying package dependencies
+#' @param URL specifying the package's website
+#' @param path location of the package
+#' @return NULL
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+.make.package.R <- function (pkg, author, email, depends, url, path='.') {
+  file <- system.file('templates', 'PACKAGE.R', package='Zelig')
+  dest <- file.path(path, pkg, 'R', paste(pkg, 'package.R', sep='-'))
+
+  author <- .get.list.as.text(author)
+  depends <- paste(c('Zelig', depends), collapse=', ', sep=', ')
+
+  lines <- .substitute.expressions(author=author, package=pkg, .file=file,
+    depends=depends
+    )
+
+  writeLines(lines, con = dest)
+}
+
+
+#' Convert Character-Strings into Human-Readable Lists
+#' This functions converts its parameters into a human-readable and
+#' grammatically correct series.
+#' @param ... character-vectors and list of characters
+#' @param final.comma whether to add the final comma to the series. Grammatical
+#'   correctness is debateable
+#' @return a comma delineated string
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+.get.list.as.text <- function (..., final.comma=FALSE) {
+
+  authors <- c(...)
+  length <- length(authors)
+
+  if (!length)
+    ""
+
+  else if (length == 1)
+    authors[[1L]]
+
+  else if (length == 2)
+    paste(authors, collapse = " and ")
+
+  else {
+    beginning <- head(authors, -1)
+    beginning <- paste(beginning, collapse= ', ')
+
+    end <- tail(authors, 1)
+
+    final.sep <- ifelse(final.comma, ', and ', ' and ')
+
+    paste(beginning, end, sep = final.sep)
+  }
+}
+#' Compute the Statistical Mode of a Vector
+#' @param x a vector of numeric, factor, or ordered values
+#' @return the statistical mode of the vector. If two modes exist, one is
+#'   randomly selected (by design)
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+Mode <- function (x) {
+  # build a table of values of x
+  tab <- table(as.factor(x))
+
+  # find the mode, then if there's more than one, select one randomly
+  v <- sample(names(which(tab == max(tab))), size=1)
+
+  # if it came in as a factor, we need to re-cast it
+  # as a factor, with the same exact levels
+  if (is.factor(x))
+    return(factor(v, levels=levels(x)))
+
+  # re-cast as any other data-type
+  as(v, class(x))
+}
+
+
+#' Compute the Statistical Median of a Vector
+#' @param x a vector of numeric or ordered values
+#' @param na.rm ignored
+#' @return the median of the vector
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+Median <- function (x, na.rm=NULL) {
+  v <- ifelse(is.numeric(x),
+              median(v),
+              levels(x)[ceiling(median(as.numeric(x)))]
+              )
+  if (is.ordered(x))
+    v <- factor(v, levels(x))
+  v
+}
+
+#' Compute the Maximum Value of a Vector
+#' @param x a numeric or ordered vector
+#' @param na.rm ignored
+#' @return the maximum value of the vector
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+Max <- function (x, na.rm=NULL) {
+  if (is.numeric(x))
+    return(max(x))
+  
+  else if (is.ordered(x))
+    return(factor(max(levels(x),
+                      na.rm=T
+                      ),
+                  levels=levels(x)
+                  )
+           )
+
+  else
+    stop("Error: max cannot be computed for non-numeric and non-ordered values")
+}
+
+#' Compute the Minumum Value of a Vector
+#' @param x a vector of numeric or ordered values
+#' @param na.rm ignored
+#' @return the minimum value of the vector
+#' @export
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+Min <- function (x, na.rm=NULL) {
+  if (is.numeric(x))
+    return(min(x))
+  
+  else if (is.ordered(x))
+    return(factor(min(levels(x),
+                      na.rm=T
+                      ),
+                  levels=levels(x)
+                  )
+           )
+
+  else
+    stop("Error: min cannot be computed for non-numeric and non-ordered values")
+}
+#' @export
+loadDependencies <- function (..., character.only = FALSE) {
+  # Get arguments that aren't "character.only"
+
+  if (character.only) {
+    packs <- match.call(expand.dots = TRUE)[-1]
+    packs$character.only <- NULL
+    packs <- as.character(packs)
+  }
+  else
+    packs <- as.character(list(...))
+
+  #
+  results <- list()
+
+  #
+  for (pkg in packs)
+    results[pkg] <- require(pkg, character.only = TRUE)
+
+  if (all(unlist(results)))
+    invisible(TRUE)
+  else {
+    failed.packs <- Filter(function (x) { return(x == FALSE) }, results)
+    list.of.packages <- paste('"', names(failed.packs), '"', sep = '', collapse = ', ')
+
+    message('The following packages did not load: ')
+    cat('  ')
+    message(list.of.packages)
+    message()
+
+    install.string <- paste('  install.packages(', names(failed.packs), ')', sep = '', collapse = '\n')
+
+    message('To run this model, install these packages with the following command:')
+    message(install.string)
+    message()
+
+    stop('')
+  }
+}
+
+#' Produce All Combinations of a Set of Lists
+#' @note This function is used internall by the 'mi' constructors in order to
+#' produce the complete set of combinations of data-frames and factors by to
+#' subset the data-frames.
+#' @param ... a set of lists to mix together
+#' @return all the combinations of the lists with repetition
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+mix <- function(...) {
+  # expand dot arguments
+  dots <- list(...)
+
+  # error-catching
+  if (length(dots) < 1)
+    return(NULL)
+
+  # prepare lists for first iteration
+  res <- dots[[1]]
+  dots <- dots[-1]
+
+  # this entire algorithm could be optimized,
+  # however, it will always be exponential time
+  while(length(dots) > 0) {
+    # get list to store new combinations in
+    new.list <- list()
+
+    # divide list
+    first <- dots[[1]]
+
+    # add new combinations
+    for (f in first) {
+      for (r in res) {
+        row <- append(as.list(r), f)
+        new.list[['']] <- row
+      }
+    }
+
+    # Update list
+    res <- new.list
+
+    # Shift first entry off
+    dots <- dots[-1]
+  }
+
+  # Appropriately name each entry
+  for (k in 1:length(res))
+    names(res[[k]]) <- names(list(...))
+
+  res
+}
+#' Produce All Combinations of a Set of Lists
+#' @note This function is used internall by the 'mi' constructors in order to
+#'   produce the complete set of combinations of data-frames and factors by
+#'   to subset the data-frames.
+#' @param ... a set of lists to mix together
+#' @return all the combinations of the lists with repetition
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+combine <- function(...) {
+  # expand dot arguments
+  dots <- list(...)
+
+  # error-catching
+  if (length(dots) < 1)
+    return(NULL)
+
+  # prepare lists for first iteration
+  res <- dots[[1]]
+  dots <- dots[-1]
+
+  # this entire algorithm could be optimized,
+  # however, it will always be exponential time
+  while(length(dots) > 0) {
+    # get list to store new combinations in
+    new.list <- list()
+
+    # divide list
+    first <- dots[[1]]
+
+    # add new combinations
+    for (f in first)
+      for (r in res)
+        new.list[['']] <- c(r, f)
+
+    # update list
+    res <- new.list
+
+    # shift first entry off
+    dots <- dots[-1]
+  }
+
+  # m, as in matrix
+  m <- NULL
+
+  # format results as a matrix
+  for (r in res)
+    m <- rbind(m, r)
+
+  # name rows/cols
+  rownames(m) <- 1:length(res)
+  colnames(m) <- names(list(...))
+
+  # return
+  m
+}
+
+#' Split a List into Two Lists
+#' This functions takes any list, and splits into two lists - one containing
+#' the values of arguments with specifically specified values and those without
+#' specified values.
+#' @note This function is a good candidate for deprecation
+#' @param args a list
+#' @return a list containing two entries: the key-value paired entires (titled
+#'   wordful) and the unkeyed entried (titled wordless)
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+#' @export
+#' @examples
+#' #list(wordful = list(x=1, y=2), wordless=list(2, "red"))
+splitUp <- function(args) {
+  wordless <- list()
+  wordful <- list()
+
+  k <- 1
+
+  if (is.null(names(args)))
+    return(list(wordless=unlist(args), wordfull=NULL))
+
+  for (key in names(args)) {
+    if (nchar(key) == 0)
+      wordless <- c(wordless, args[[k]])
+    else
+      wordful[[key]] <- args[[k]]
+
+    k <- k+1
+  }
+
+  list(wordless=wordless, wordful=wordful)
+}
+
+
+
+
+# @topic: character-string representing help-topic
+# @package: package containing help-topic
+# return: character-string of processed Rd file
+.get.help.file <- function(topic, package) {
+  # get package help-file if no topic is set
+  if (missing(topic))
+    topic <- package
+  
+  # error-checking:
+  #   ensure file and package are strings
+  if (!is.character(topic) && length(topic) > 1L)
+    stop()
+
+  if (!is.character(package) && length(package) > 1L)
+    stop()
+
+  # 
+  directory <- system.file(package=package)
+
+  # 
+  path <- utils:::index.search(
+                               topic=topic,
+                               paths=directory
+                               )
+
+  # search package-help-dataabase, get Rd file as string
+  utils:::.getHelpFile(file=path)
+}
+
+
+
+# @package: character-string specifying the name of a package to
+#           scan for help files
+# @as.table: boolean specifying whether the return value will be
+#            a table or names of Rd files
+# return: either a named vector (table), or an unnamed vector
+.list.help.files <- function(package, as.table=TRUE) {
+  # index for help files
+  fi <- file.path(
+                  system.file(package=package),
+                  "help",
+                  "AnIndex"
+                  )
+
+  if (file.exists(fi)) {
+    # get index of search-values and corresponding
+    #  Rd file
+    index <- scan(fi,
+                  what = list(names="", values=""),
+                  sep = "\t",
+                  quote = "",
+                  na.strings = "",
+                  quiet = TRUE
+                  )
+
+    # the if-else below is a return value
+    if (as.table)
+      # return as an index
+      structure(index$values, names=index$names)
+    
+    else
+      # return only the names of the Rd files
+      index$names
+  }
+  else {
+    warning("nothing was found")
+    NULL
+  }
+}
+
+#' Compute the Intersection of Two Sets
+#' @note This function is used internally by Zelig
+#' @param a a vector
+#' @param b a vector
+#' @param unique a boolean determining whether a intersect b will contain only
+#'   unique elements
+#' @return the intersection of a and b
+.intersection <- function(a, b, unique=TRUE) {
+  intersection <- a[a %in% b]
+
+  if (unique)
+    intersection <- unique(intersection)
+
+  if (is.null(intersection))
+    c()
+  else
+    intersection
+}
+
+#' Hook to Update the Zelig Call with the Appropriate Call Object
+#' @note This function is used internally by Zelig, and currently deprecated.
+#' @param zobj a 'zelig' object
+#' @param call1 the original call to Zelig
+#' @param call2 the manuafactured call to the model fitting function
+#' @return the 'zelig' object with a modified 'call' slot
+replace.call <- function(zobj, call1, call2) {
+  # what if it doesn't exist?
+  if (!is.null(zobj$result$call) && is.call(zobj$result$call2))
+    zobj$result$call <- call2
+
+  zobj
+}
+
+#' Wether an Installed R-Pack Depends on Zelig
+#' @note This package was used internally to determine whether an R-package is
+#'   Zelig compliant, but is now likely deprecated. This test is useless if not
+#'   paired with 
+#' @param package a character-string naming a package
+#' @return whether this package depends on Zelig
+is.zelig.package <- function(package="") {
+  "Zelig" %in% tools:::pkgDepends(package)$Depends
+}
+
+#' Whether a R-Package Contains a 'Yes' in its DESCRIPTION File's 'Zelig' Field
+#' @note This package was used internally to determine whether an R-package is
+#'   Zelig compliant, but is now likely deprecated.
+#' @param package a character-string specifying an installed R-package
+#' @return whether the package's DESCRIPTION file specifies Zelig-compliancy
+#' @seealso is.zelig.package
+#' @author Matt Owen \email{mowen@@iq.harvard.edu}
+is.zelig.compliant <- function(package="") {
+  #
+  zcomp <- packageDescription(package, fields="Zelig-Compliant")
+  zcomp <- tolower(zcomp)
+
+  #
+
+  if (! zcomp %in% c('yes', 'no'))
+    stop("")
+
+  zcomp == "yes"
+}
diff --git a/README b/README
index bde1ea3..2972baa 100644
--- a/README
+++ b/README
@@ -1,6 +1,33 @@
-3.5-2 (Nov. 8, 2011): Bug-fix release for R 2.13. Updated qi method to
-  correspond with a change to VGAM 0.8-4. This bugfix should not affect Mac
-  users.
+4.0-6 (August 25th, 2011): Experimental branch to improve the formula parser.
+
+4.0-5 (August 25th, 2011): Stable release for R.13.1. Removed dependency on 
+  the 'iterators' package. This is part of a general move towards shrinking the
+  size of Zelig's dependency list. To facilitate this change, the 'mi' object
+  has been made more robust, and  the 'zframe' helper object has been removed.
+
+  For specifics, please refer to the CHANGES file
+
+4.0-2 (May 16, 2011): Stable release for R 2.12,1. Major version update, and
+  and addition of numerous API features. Core package now contains a mere 7
+  models. Dependencies having correspondingly been reduced to:
+    MASS
+    iterators
+    survival
+    methods
+
+  For the missing models, please see the software packages:
+    bivariate.zelig: bivaraite generalized linear regressions
+    mixed.zelig: multilevel (mixed) generalized linear regressions
+    multinomial.zelig: multinomial logit and probit regressions
+    ordinal.zelig: ordinal logit and probit regressions
+    survey.zelig: survey-weighted generalized linear models
+
+  These models can be found on the Harvard IQSS website at:
+    http://gking.harvard.edu/zelig/
+  
+  Or via installations with:
+    source('http://people.iq.harvard.edu/~mowen/install.R')
+
 
 2.8-3 (May 29, 2007):  Stable release for R 2.4.0-2.5.0.  Fixed bugs in 
   help.zelig(), and summary for multinomial logit, bivariate probit, 
diff --git a/RELEASE_NOTES b/RELEASE_NOTES
new file mode 100644
index 0000000..0e7d2d7
--- /dev/null
+++ b/RELEASE_NOTES
@@ -0,0 +1,88 @@
+Zelig v4.0-4 Release Notes (May 16, 2011)
+
+
+Introduction
+================================================================================
+This document is a brief overview of the current state of the Zelig project as
+of the 4.0-3 release. This release hopes to maintain the canonical Zelig syntax
+and interface for end-users, while supplying developers with tools to aid in
+the development of effective statistical modeling techniques. Emphasis has been
+placed on readability and modularity.
+
+As a result of this gargantuan change, a plethora of features, API
+functionality, and documentation has been added to the Zelig R-package. Several
+previously existing models, however, have been removed temporarily or moved
+from the Zelig core package to more-specific Zelig extensions.
+
+
+Project Information
+================================================================================
+The Zelig software suite is an easy-to-use R-package geared towards making
+complex statistical techniques available to end users, particularly those
+researching the quantitative social sciences. In particular, it offers unifying
+syntax and programming-style between seemingly disparate and unrelated 
+statistical mdoels.
+
+To facilitate this purpose, Zelig (as of May 16th, 2011) includes an array of
+programming tools, geared towards allowing the rapid development, debugging, 
+and inclusion of new statistical models. That is, Zelig now facilitates and
+encourages collaboration between novel and pre-existing statistical packages.
+
+
+Author Information
+================================================================================
+Zelig is a collaborative effort by Harvard's Institute for Quantitive Social
+Sciences (Harvard IQSS). Publications, software releases, and additional
+information can be found at:
+  http://gking.harvard.edu/
+  http://iq.harvard.edu/
+
+
+Licensing
+================================================================================
+Zelig is licensed under the GNU General Public License version 2, and as such
+can be freely used and edited given proper attribution to Harvard's IQSS
+Department.
+
+
+What's New in this Release?
+================================================================================
+This release offers a large variety of coding style changes, as well as core
+functionality. Please carefully read the following:
+
+Major Changes (from version 3.5)
+--------------------------------------------------------------------------------
+- Models are now added to Zelig as separate extensions. The method
+  'zelig.skeleton' has been added to the R-package to facilitate this change.
+- The main Zelig package now contains a mere 8 models. 22 additional models are
+  available via extensions, geared towards adding specific functionality to the
+  Zelig software suite.
+- zelig.skeleton: a method used to create blank zelig packages. This follows
+  the style and format of the R-core method 'package.skeleton'
+- Simplified zelig2-function API. See "?zelig2" within an R-session for help
+- Enhanced API for 'param' and 'qi' functions. See develoepr documentation for
+  more information
+
+Minor Changes (from version 3.5)
+--------------------------------------------------------------------------------
+- Slight changes to the plotting of simulated quantities of interest. Most
+  changes are stylistic
+- Quantities of interest using two different sets of explanatory variables now
+  output information containing concerning the simulations of the second 'setx'
+  object's Predicted Values and Expected Values. This was previuosly not the
+  case
+- ZeligListModels: a method used to list available models, installed on the
+  current operating system.
+- More robust support for various ways of describing 'terms' of a statistical
+  model. This is essentially light error-detection
+
+Missing Features
+--------------------------------------------------------------------------------
+- The 'setx' method currently doesn not support setting multiple
+  counterfactuals in a single call to 'setx'. This feature is under current
+  development, and should soon be admitted into the main branch.
+- "ternaryplot" plotting style is ommitted from the core package, and will be 
+  instead moved to the 'bivariate' and 'multinomial' Zelig modules.
+- "Average Treatment Effect" quantities of interest are not being included in 
+  zelig models temporarily. Simulation of these qi's will return pending a 
+  minor update to the 'setx' function.
diff --git a/data/friendship.RData b/data/friendship.RData
index ae95746..5def678 100644
Binary files a/data/friendship.RData and b/data/friendship.RData differ
diff --git a/data/sna.ex.RData b/data/sna.ex.RData
index 11d1ab6..6ee4208 100644
Binary files a/data/sna.ex.RData and b/data/sna.ex.RData differ
diff --git a/demo/00Index b/demo/00Index
index a90b9d0..6be303b 100644
--- a/demo/00Index
+++ b/demo/00Index
@@ -1,82 +1,30 @@
-aov                Analysis of Variance Model
-arima              Arima models
-blogit             Bivariate Logit regression and simulation 
-bivariate.probit   New model example: Bivariate probit MLE
-bprobit            Bivariate Probit regression and simulation 
-chopit             Compound Hierarchical Ordered Probit regression 
-conditional        Conditional prediction
 exp                Exponential regression and simulation 
 gamma              Gamma regression and simulation
-logit.gam          Generalized Additive Model for Dichotomous Dependent Variables
-normal.gam         Generalized Additive Model for Continuous Dependent Variables
-poisson.gam        Generalized Additive Model for Count Dependent Variables
-probit.gam         Generalized Additive Model for Dichotomous Dependent Variable
 logit              Logit regression and simulation 
 lognorm            Lognormal regression and simulation 
 ls                 Least Squares regression and simulation 
-match              Regression and simulation on a matched data set 
-mi                 Regression and simulation on multiply imputed data sets
-mlogit             Multinomial Logit regression and simulation 
-negbin             Negative Binomial regression and simulation
-ls.net             Network analysis least squares
-logit.net          Network analysis logit 
-cloglog.net        Social Network Complementary Log Log Regression for Dichotomous Dependent Variables
-gamma.net          Social Network Gamma Regression for Continuous, Positive Dependent Variables
-normal.net         Social Network Normal Regression for Continuous Dependent Variables
-poisson.net        Social Network Poisson Regression for Event Count Dependent Variables
-probit.net         Social Network Probit Regression for Dichotomous Dependent Variables
+mi                 Multiply imputed regressions and simulations
+negbinom           Negative Binomial regression and simulation
 normal             Normal (Gaussian) regression and simulation
-normal.regression  New model example for normal regression
-ologit             Ordinal Logit regression and simulation
-oprobit            Ordinal Probit regression and simulation
 poisson            Poisson regression and simulation
 probit             Probit regression and simulation
-quantile	   Quantile regresion model
 relogit            Rare events logit regression and simulation
-robust	           Robust estimation and simulation
-strata             Regression and simulation in a stratified model
-tobit              Regression for classical tobit model
-repl               Replication of model fitting and simulation procedures
-roc                ROC plot
-vertci             Vertical confidence interval plot
-weibull            Weibull regression and simulation
-ei.hier            Hierarchical Ecological Inference model and simulation
-ei.dynamic         Dynamic Ecological Inference model and simulation
-normal.bayes       MCMC regression model and simulation
+twosls             Two Stage Least Squares
+factor.bayes       MCMC factor analysis
 logit.bayes        MCMC logistic regression model and simulation
+normal.bayes       MCMC regression model and simulation
 probit.bayes       MCMC probit regression model and simulation
-tobit.bayes        MCMC tobit regression model and simulation
 poisson.bayes      MCMC poisson regression model and simulation
 mlogit.bayes       MCMC multinomial regression model and simulation
 oprobit.bayes      MCMC ordered probit regression model and simulation
-factor.bayes       MCMC factor analysis
-factor.ord         MCMC factor analysis for ordinal data
-factor.mix         MCMC factor analysis for mixed data
-irt1d              MCMC one-dimensional item response theory model
-irtkd              MCMC K-dimensional item response theory model
-ei.RxC 		   RxC ecological inference via penalized least-squares
-sur                Seemingly Unrelated Regression
-twosls             Two Stage Least Squares
-threesls           Three Stage Least Squares
 logit.gee          GEE logistic regression
 gamma.gee          GEE gamma regression
 normal.gee         GEE normal regression
 poisson.gee        GEE poisson regression
 probit.gee         GEE probit regression
-gamma.negvalues	   Rejection sampling with gamma regression
-ls.mixed           Mixed effects linear regression
-logit.mixed        Mixed effects logistic regression
-gamma.mixed        Mixed effects gamma regression
-poisson.mixed      Mixed effects poisson regression
-probit.mixed       Mixed effects probit regression
-coxph              Cox Proportional Hazard Regression for Duration Dependent Variables
-normal.survey	       Survey-Weighted Normal Regression for Continuous Dependent Variables
-logit.survey	       Survey-Weighted Logistic Regression for Dichotomous Dependent Variables
-probit.survey	       Survey-Weighted Probit Regression for Dichotomous Dependent Variables
-poisson.survey       Survey-Weighted Poisson Regression for Event-count Dependent Variables
+normal.survey	   Survey-Weighted Normal Regression for Continuous Dependent Variables
+logit.survey	   Survey-Weighted Logistic Regression for Dichotomous Dependent Variables
+probit.survey	   Survey-Weighted Probit Regression for Dichotomous Dependent Variables
+poisson.survey     Survey-Weighted Poisson Regression for Event-count Dependent Variables
 gamma.survey       Survey-Weighted Poisson Regression for Positive Continuous Dependent Variables
-
-
-
-
-
+Zelig.HelloWorld   Step-by-step demo on creating Zelig packages
diff --git a/demo/Zelig.HelloWorld.R b/demo/Zelig.HelloWorld.R
new file mode 100644
index 0000000..2c07199
--- /dev/null
+++ b/demo/Zelig.HelloWorld.R
@@ -0,0 +1,173 @@
+## Load data
+data(turnout)
+
+# The following demo is a step-by-step instruction guide on building a Zelig
+# model. For the most part, the steps have been simplified, and the model
+# itself is simply written to show broad ideas, rather than the specifics
+# of developing a fully functioning statistical model
+
+user.prompt("Press <return> to Read about External Methods")
+
+# Step 1: Creating and Using External Methods (optional)
+# ======================================================
+# Create a model to be used to be used by the Zelig function. This method
+# should be designed with the singular purpose of fitting a statistical model.
+# That is, it should analyze a data-set given several parameters
+#
+# For the most part, this step is optional, as quite often R contains builtin
+# functions for doing these kinds of analyses. Regardless, this step is kept
+# here for completeness.
+#
+# The foreign model, in its simplest form, has only one of two requirements,
+# either:
+#   1. The model contains a slot labeled "formula", or
+#   2. There is a "formula" method defined for objects of this class
+
+user.prompt("Press <return> to Continue to Step 1")
+
+
+
+HelloWorldMethod <- function(formula, verbose=TRUE, data) {
+  if (verbose) {
+    print.form <- paste(as.character(formula), collapse=" ")
+    print.data <- as.character(substitute(data))
+  cat("Hello, Zelig!\n")
+  }
+
+  x <- list(formula = formula)
+  class(x) <- "HelloWorld"
+  x
+}
+
+user.prompt("Press <return> to Read about Describing Zelig Models")
+
+
+
+# Step 2: Describing Zelig Models (optional)
+# ==========================================
+# Describing the model is an optional, though important step if the developer
+# would like to be correctly cited in scholarly documents. In its most basic
+# form, it is simply a list specifying "authors", "text" as the title-text, 
+# and and publication year.
+
+user.prompt("Press <return> to Continute to Step 2")
+
+describe.hello <- function (...) {
+  list(authors = "You", text='A "Hello, World!" Model')
+}
+
+user.prompt("Press <return> to Read about zelig2 Functions")
+
+# Step 3: Interfacing between the External Model and Zelig (crucial)
+# ==================================================================
+# The 'zegli2' function of a model is named in the style the model's name
+# appended to "zelig2". This informs Zelig that a model by the appropriate
+# name exists. In this demo, "hello" is the model's name, and, as such,
+# the zelig2 function is named "zelig2hello".
+#
+# In the upcoming example, please note that the parameters of the external
+# method "HelloWorldMethod" are all included within the list that is being
+# returned from the "zelig2hello" function.
+#
+# In general, all "zelig2" functions follow this format. For more detailed
+# information concerning "zelig2" functions, type:
+#    ?zelig2
+#
+# within an R session.
+
+user.prompt("Press <return> to See an Example of a \"zelig2\" Method")
+
+
+
+zelig2hello <- function (formula, ..., data) {
+  list(                                            
+       .function = "HelloWorldMethod",
+       formula = formula,
+       data = data
+       )
+}
+
+user.prompt('Press <return> to Read about the "param" Functions')
+
+# Step 4: Simulating Parameters
+# =============================
+# The "param" function of a Zelig model is written by concatenating "param."
+# with the model's name. In the ongoing example, the "hello" model will have
+# a param function named "param.hello". 
+#
+# The retun value of a "param" function is a list optionally containing the 
+# values: simulations, alpha, link, linkinv, and family. For more detailed
+# concerning writing "param" functions, type:
+#   ?param
+#
+# within an R session.
+
+user.prompt('Press <return> to See an Example "param" Function')
+
+param.hello <- function(obj, num=1000, ...) {
+  list(
+       simulations = rbinom(n=num, 1, .5),
+       alpha = .5,
+       linkinv = NULL
+       )
+}
+
+user.prompt('Press <return> to Read about "qi" Methods')
+
+
+# Step 5: Simulating Quantities of Interest
+# =========================================
+# The "qi" method of a Zelig model is written by concatentating "qi." with the
+# model's name. In the ongoing example, the "hello" model will have a qi method
+# named "qi.hello".
+#
+# The return-value of a qi method is a list pairing titles of quantities of
+# interest and their simulations. For example, a model that computes
+# "Expected Values" will have a return value:
+#    list("Expected Values" = ev)
+#
+# where 'ev' is a variable containing the simulated expected value. For more 
+# detailed information concerning writing 'qi' methods, type:
+#   ?qi
+#
+# within an R session.
+
+user.prompt('Press <return> to See and Example "qi" Method')
+
+qi.hello <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+
+  possibilities <- c('Hello', 'World')
+  success.prob <- alpha(param)
+  
+  
+  sims <- rbinom(n=num, 1, success.prob) + 1
+  pv <- possibilities[sims]
+
+  list(
+       "Predicted Value: E(Y|X)" = pv
+       )
+}
+
+user.prompt('Press <return> to Read More about Zelig')
+
+# More Information about Zelig
+# ============================
+# That's it! Now that the zelig2, qi, and param methods are defined, Zelig can
+# run the "hello" model. For more detailed information concerning the Zelig
+# package, visit:
+#   http://gking.harvard.edu/zelig
+#
+# or type:
+#   ?Zelig
+#
+# within an R-session
+
+user.prompt('Press <return> to see the results of the "hello" model')
+
+## Run Zelig Functions
+z <- zelig(~ 1, model="hello", data=turnout)
+x <- setx(z)
+s <- sim(z)
+
+## Display Fictional Summary
+summary(s)
diff --git a/demo/aov.R b/demo/aov.R
deleted file mode 100644
index f2017c5..0000000
--- a/demo/aov.R
+++ /dev/null
@@ -1,47 +0,0 @@
- ## From Venables and Ripley (2002) p.165.
-### Attach data and set contrasts
-data(npk, package="MASS")
- op <- options(contrasts=c("contr.helmert", "contr.poly"))
-user.prompt()
-z.out1 <- zelig(formula=yield ~ block + N*P*K, model="aov", data=npk)
-user.prompt()
-summary(z.out1)
-user.prompt()
-###Set explanatory variables
- x <- setx(z.out1)
-###Simulate model at explanatory variables 
-user.prompt()
-s.out1 <- sim(z.out1, x = x)
-user.prompt()
-plot(s.out1)
-user.prompt()
-###Example with Error term
-z.out2 <- zelig(yield ~  N*P*K + Error(block), model="aov",data=npk)
-user.prompt()
-summary(z.out2)
-user.prompt()
- x <- setx(z.out2)
-user.prompt()
-s.out2 <- sim(z.out2, x=x)
-user.prompt()
-plot(s.out2)
-###Reset previous contrasts
-options(op)
-### Use data set oats from MASS
- z.out3 <- zelig(Y ~ N*V + Error(B/V), model="aov", data=oats)
-user.prompt()
-summary(z.out3)
-user.prompt()
- x.out <- setx(z.out3, N="0.0cwt", V="Golden.rain")
-x.out1 <- setx(z.out3, N="0.0cwt", V="Victory")
-user.prompt()
-s.out3 <- sim(z.out3, x = x.out,x1=x.out1)
-summary(s.out3)
-user.prompt()
- plot(s.out3)
-
-
-
-
-
-        
diff --git a/demo/arima.R b/demo/arima.R
deleted file mode 100644
index a31b5dc..0000000
--- a/demo/arima.R
+++ /dev/null
@@ -1,75 +0,0 @@
-#### Example 1: No External Regressors ####
-data(approval)
-
-# Estimate the ARIMA model, and summarize the results
-
-z.out1<- zelig(Diff(approve, 1)~lag.eps(2) + lag.y(2), data=approval, model="arima")
-summary(z.out1)
-user.prompt()
-
-# Set the number of time periods ahead for the prediction to run
-
-
-x.out1<- setx(z.out1, pred.ahead=10)
-
-
-
-# Simulate the predicted quantities of interest
-user.prompt()
-
-s.out1<- sim(z.out1, x=x.out1)
-
-# summarize and plot the results
-user.prompt()
-
-summary(z.out1)
-plot(s.out1, lty.set=2)
-
-#### Example 2: External Regressors, 1 Counterfactual, 1 Time Period ####
-
-# Estimates an ARIMA model where we include exogenous regressors 
-# in addition to lagged errors and lagged values of the dependent variable.  
-# This example shows the output if only one counterfactual value is specified for
-# a time period.  
-
-z.out2<- zelig(Diff(approve, 1)~ iraq.war + sept.oct.2001 + avg.price + lag.eps(1) + lag.y(2),
-               data=approval, model="arima")
-
-# Set the both the value and time period of counterfactual of interest.
-user.prompt()
-x.out2<- setx(z.out2, sept.oct.2001=list(time=45, value=0), cond=TRUE)
-
-# Simulate the quantities of interest
-user.prompt()
-
-s.out2<-sim(z.out2, x=x.out2) 
-
-# Summarizing and plotting the quantities of interest
-
-user.prompt()
-
-summary(s.out2)
-plot(s.out2)
-
-
-
-#### Example 3: External Regressors, Counterfactuals Over Many Time Periods ####
-
-# This example continues to use the same model specification as above, but will show
-# the output when several counterfactual values are specified.  
-
-user.prompt()
-
-x.out3<- setx(z.out2, sept.oct.2001=list(time=45:50, value=0))
-x1.out3<- setx(z.out2, sept.oct.2001=list(time=45:50, value=1))
-
-# Simulating the quantities of interest
-
-user.prompt()
-
-s.out3<- sim(z.out2, x=x.out3, x1=x1.out3)
-# Summarizing and plotting the quantities of interest.  Here we are 
-# only displaying the uncertainty resulting from parameter estimation  
-
-summary(s.out3)
-plot(s.out3, pred.se=FALSE)
diff --git a/demo/bivariate.probit.R b/demo/bivariate.probit.R
deleted file mode 100644
index b89f9d9..0000000
--- a/demo/bivariate.probit.R
+++ /dev/null
@@ -1,112 +0,0 @@
-describe.bivariate.probit <- function() {
-  category <- "bivaraite.dichotomous"
-  package <- list(name = "mvtnorm", 
-                  version = "0.7")
-  mu <- list(equations = c(2,2),               # Systematic component 
-             tagsAllowed = TRUE,          
-             depVar = TRUE, 
-             expVar = TRUE)
-  rho <- list(equations = c(1,1),              # Optional systematic component
-             tagsAllowed = FALSE,         #   Estimated as an ancillary
-             depVar = FALSE,              #   parameter by default
-             expVar = TRUE)
-  pars <- list(mu = mu, rho = rho)
-  list(category = category, parameters = pars)
-}
-
-zelig2bivariate.probit <- function(formula, model, data, M, ...) {
-  Zelig:::packageConflicts("Matrix")
-  require(mvtnorm)
-  mf <- match.call(expand.dots = TRUE)
-  mf$model <- mf$M <- NULL
-  mf[[1]] <- as.name("bivariate.probit")
-  as.call(mf)
-}
-
-bivariate.probit <- function(formula, data, start.val = NULL, ...) {
-
-  # fml <- parse.formula(formula, req=c("mu1","mu2"), opt="rho") # [1]
-  fml <- parse.formula(formula, model = "bivariate.probit")      # [1]
-  D <- model.frame(fml, data = data)
-  X <- model.matrix(fml, data = D, eqn = c("mu1", "mu2")) # [2a]
-  # X <- model.matrix(fml, data = D, shape = "stacked",   # [2b]
-  #                 eqn = c("mu1", "mu2"))
-  # X <- model.matrix(fml, data = D, shape = "array",     # [2c]
-  #                   eqn = c("mu1", "mu2"))
-  Xrho <- model.matrix(fml, data = D, eqn = "rho")
-
-  Y <- model.response(D)
-  terms <- attr(D,"terms")
-  start.val <- set.start(start.val, terms)
-  start.val <- put.start(start.val, 1, terms, eqn = "rho")
-
-  log.lik <- function(par, X, Y, terms) {
-    Beta <- parse.par(par, terms, eqn = c("mu1", "mu2"))   # [3a]
-    # Beta <- parse.par(par, terms, shape = "vector",      # [3b] & [3c]
-    #                   eqn = c("mu1", "mu2"))             
-    gamm <- parse.par(par, terms, eqn = "rho")
-    rho <- (exp(Xrho %*% gamm) - 1) / (1 + exp(Xrho %*% gamm))
-
-    mu <- X %*% Beta                                       # [4a]
-    # mu <- X %*% Beta; mu <- matrix(mu, ncol = 2)         # [4b]
-    # mu <- apply(X, 3, '%*%', Beta)                       # [4c]
-    llik <- 0
-    for (i in 1:length(rho)){
-      Sigma <- matrix(c(1, rho[i], rho[i], 1), 2, 2)
-      if (Y[i,1]==1)
-        if (Y[i,2]==1)
-          llik <- llik + log(pmvnorm(lower = c(0, 0), upper = c(Inf, Inf), 
-                                     mean = mu[i,], corr = Sigma))
-        else
-          llik <- llik + log(pmvnorm(lower = c(0, -Inf), upper = c(Inf, 0), 
-                                     mean = mu[i,], corr = Sigma))
-      else
-        if (Y[i,2]==1)
-          llik <- llik + log(pmvnorm(lower = c(-Inf, 0), upper = c(0, Inf),
-                                     mean = mu[i,], corr = Sigma))
-        else
-          llik <- llik + log(pmvnorm(lower = c(-Inf, -Inf), upper = c(0, 0), 
-                                     mean = mu[i,], corr = Sigma))
-        }
-    return(llik)
-  }
-
-  res <- optim(start.val, log.lik, method = "BFGS",
-               hessian = TRUE, control = list(fnscale = -1),
-               X = X, Y = Y, terms = terms, ...)
-
-  fit <- model.end(res, D) 
-  class(fit) <- "bivariate.probit"
-  fit
-}
-
-data(bivariate)
-
-user.prompt()
-
-## Shorthand notation for list(mu1 = y1 ~ x1, mu2 = y2 ~ x1)
-z.out1 <- zelig(cbind(y1, y2) ~ x1, model = "bivariate.probit",
-                data = bivariate)
-z.out1$coef
-user.prompt()
-
-## Different explanatory variables for each response
-z.out2 <- zelig(list(mu1 = y1 ~ x1, mu2 = y2 ~ x2),
-                model = "bivariate.probit", data = bivariate)
-z.out2$coef
-user.prompt()
-## Using tag() to constrain x1 to "gamma" in both formulae;
-##  while mu1:x3 and mu2:x3 are estimated separately
-z.out3 <- zelig(list(mu1 = y1 ~ tag(x1, "theta") + x3,
-                     mu2 = y2 ~ tag(x1, "theta") + x3),
-                model = "bivariate.probit", data = bivariate)
-z.out3$coef
-user.prompt()
-
-## Using tag() to constrain mu1:x1 = mu2:x2 = "gamma",
-##  and specifying an explanatory variable for rho.
-z.out4 <- zelig(list(mu1 = y1 ~ tag(x1, "theta") + x2,
-                     mu2 = y2 ~ tag(x2, "theta") + x3,
-                     rho = ~ x4),
-                model = "bivariate.probit", data = bivariate)
-z.out4$coef
diff --git a/demo/blogit.R b/demo/blogit.R
deleted file mode 100644
index c541edd..0000000
--- a/demo/blogit.R
+++ /dev/null
@@ -1,89 +0,0 @@
-# Load the sample data:
-data(sanction)
-
-#####  Example 1: Basics #####
-
-# Note that by default, zelig() estimates two parameter estimates
-# for explanatory variable as well as the correlation parameter; this
-# formulation is parametrically independent (estimating separate effects
-# for each explanatory variable), but stochastically dependent because
-# the models share an odds ratio.  See Example 2 for a more constrained
-# form of stochastic dependence.a list of at most 3 elements (corresponding to the 3
-# equations).  Each element consists of a character vector of the
-# variables omitted from each equation.  
-z.out1 <- zelig(cbind(import, export) ~ coop + cost + target,
-                model = "blogit", data = sanction)
-user.prompt()
-print(summary(z.out1))
-user.prompt()
-# Generate baseline values for the explanatory variables (with cost set
-# to 1, net gain to sender) and alternative values (with cost set to 4,
-# major loss to sender):
-x.low <- setx(z.out1, cost = 1)
-x.high <- setx(z.out1, cost = 4)
-# Simulate fitted values and first differences:  
-user.prompt()
-s.out1 <- sim(z.out1, x = x.low, x1 = x.high)
-user.prompt()
-print(summary(s.out1))
-
-# Plot the s.out
-user.prompt()
-plot(s.out1)
-user.prompt()
-
-##### Example 2: Joint Estimation of a Model with        #####
-#####            Different Sets of Explanatory Variables #####
-
-# Estimate the statistical model, with import a function of coop
-# in the first equation and export a function of cost and target
-# in the second equation, by using the zeros argument:
-z.out2 <- zelig(list(mu1=import~coop,mu2=export~cost+target), 
-                model = "blogit", data = sanction)
-user.prompt()
-print(summary(z.out2))
-user.prompt()
-# Set the explanatory variables to their default values:
-x.out2 <- setx(z.out2)
-
-# Simulate draws from the posterior distribution:
-user.prompt()
-s.out2 <- sim(z.out2, x = x.out2)
-user.prompt()
-print(summary(s.out2))
-
-# Plotting marginal densities:
-user.prompt()
-plot(s.out2)
-
-##### Example 3: Joint Estimation of a Parametrically #####
-##### and Stochastically Dependent Model              #####
-
-# A bivariate model is parametrically dependent if Y1 and Y2 share
-# some or all explanatory variables, {\it and} the effects of the shared
-# explanatory variables are jointly estimated.  For example,
-user.prompt()
-z.out3 <- zelig(cbind(import, export) ~ coop + cost + target, 
-                constrain = list("1" = c("coop", "cost", "target"),
-                                 "2" = c("coop", "cost", "target")),
-                model = "blogit", data = sanction)
-user.prompt()
-print(summary(z.out3))
-
-# Note that this model only returns one parameter estimate for each of
-# coop, cost, and target.  Contrast this to Example 1 which returns two
-# parameter estimates for each of the explanatory variables.
-
-# Set values for the explanatory variables:
-user.prompt()
-x.out3 <- setx(z.out3, cost = 1:4)
-
-# Draw simulated expected values:  
-user.prompt()
-s.out3 <- sim(z.out3, x = x.out3)
-user.prompt()
-print(summary(s.out3))
-
-
-
-
diff --git a/demo/bprobit.R b/demo/bprobit.R
deleted file mode 100644
index 97d63b5..0000000
--- a/demo/bprobit.R
+++ /dev/null
@@ -1,90 +0,0 @@
-# Load the sample data:
-data(sanction)
-
-#####  Example 1: Basics #####
-
-# Note that by default, zelig() estimates two parameter estimates
-# for explanatory variable as well as the correlation parameter; this
-# formulation is parametrically independent (estimating separate effects
-# for each explanatory variable), but stochastically dependent because
-# the models share an odds ratio.  See Example 2 for a more constrained
-# form of stochastic dependence.
-user.prompt()
-z.out1 <- zelig(cbind(import, export) ~ coop + cost + target, 
-                  model = "bprobit", data = sanction)
-user.prompt()
-print(summary(z.out1))
-
-# Generate baseline values for the explanatory variables (with cost set
-# to 1, net gain to sender) and alternative values (with cost set to 4,
-# major loss to sender):
-user.prompt()
-x.low <- setx(z.out1, cost = 1)
-x.high <- setx(z.out1, cost = 4)
-
-# Simulate fitted values and first differences:  
-user.prompt()
-s.out1 <- sim(z.out1, x = x.low, x1 = x.high)
-user.prompt()
-print(summary(s.out1))
-
-# Plot the s.out
-user.prompt()
-plot(s.out1)
-
-
-##### Example 2: Joint Estimation of a Model with        #####
-#####            Different Sets of Explanatory Variables #####
-
-# Estimate the statistical model, with import a function of coop
-# in the first equation and export a function of cost and target
-# in the second equation, by using the zeros argument:
-user.prompt()
-z.out2 <- zelig(list(mu1 = import ~ coop, mu2 = export ~ cost + target), 
-                  model = "bprobit", data = sanction)
-user.prompt()
-print(summary(z.out2))
-
-# Set the explanatory variables to their default values:
-user.prompt()
-x.out2 <- setx(z.out2)
-
-# Simulate draws from the posterior distribution:
-user.prompt()
-s.out2 <- sim(z.out2, x = x.out2)
-user.prompt()
-print(summary(s.out2))
-
-# Plotting marginal densities:
-user.prompt()
-plot(s.out2)
-
-##### Example 3: Joint Estimation of a Parametrically #####
-##### and Stochastically Dependent Model              #####
-
-# A bivariate model is parametrically dependent if Y1 and Y2 share
-# some or all explanatory variables, {\it and} the effects of the shared
-# explanatory variables are jointly estimated.  For example,
-user.prompt()
-z.out3 <- zelig(list(mu1 = import ~ tag(coop,"coop") + tag(cost, "cost") + tag (target, "target"),
-                     mu2 = export ~ tag(coop,"coop") + tag(cost, "cost") + tag (target, "target")), 
-                model = "bprobit", data = sanction)
-user.prompt()
-print(summary(z.out3))
-
-# Note that this model only returns one parameter estimate for each of
-# coop, cost, and target.  Contrast this to Example 1 which returns two
-# parameter estimates for each of the explanatory variables.
-
-# Set values for the explanatory variables:
-user.prompt()
-x.out3 <- setx(z.out3, cost = 1:4)
-
-# Draw simulated expected values:  
-user.prompt()
-s.out3 <- sim(z.out3, x = x.out3)
-user.prompt()
-print(summary(s.out3))
-
-
-
diff --git a/demo/chopit.R b/demo/chopit.R
deleted file mode 100644
index 8395fff..0000000
--- a/demo/chopit.R
+++ /dev/null
@@ -1,36 +0,0 @@
-data(free1, free2) 
-
-## Setting up the formula as a list for the self-response,
-##  vignettes, and the cut points (drawn from both the self-response
-##  and vignette data sets).  
-formulas <- list(self = y ~ sex + age + factor(country),
-                 vign = cbind(v1, v2, v3, v4, v5) ~ 1,
-                 tau  = ~ sex + age + factor(country))
-
-## Setting up the data as a list, one data set corresponding to the
-##  self-response, and one to the vignette responses.  Note that the
-##  tau variables must be in both data sets.  
-data <- list(self = free1, vign = free2)
-z.out <- zelig(formulas, data = data, model = "chopit")
-user.prompt()
-
-## Using defaults
-x.out1 <- setx(z.out)
-s.out1 <- sim(z.out, x = x.out1)
-summary(s.out1)
-user.prompt()
-
-## Calculating first differences
-x.out2 <- setx(z.out, age = 25)
-s.out2 <- sim(z.out, x = x.out1, x1 = x.out2)
-summary(s.out2)
-user.prompt()
-
-## Conditional predication in this case calculates E(mu|X,Y).
-##  This procedure involves numeric integration, which takes
-##  approximately 1 second per observation on 64-bit R.  
-x.out3 <- setx(z.out, cond = TRUE)
-s.out3 <- sim(z.out, x = x.out3)
-user.prompt()
-summary(s.out3) 
-
diff --git a/demo/cloglog.net.R b/demo/cloglog.net.R
deleted file mode 100644
index 6ef29a7..0000000
--- a/demo/cloglog.net.R
+++ /dev/null
@@ -1,24 +0,0 @@
-## Example CLogLog Model
-
-## Load sample data
-## Estimate the model
-## Summarize the results
-data(friendship)
-z.out <- zelig(friends ~ advice + prestige + perpower, model="cloglog.net", data=friendship)
-summary(z.out)
-user.prompt()
-
-## Estimating the risk difference (and risk ratio) between low personal power 
-## (25th percentile) and high personal power (75th percentile) while all the 
-## other variables are held at their default values. 
-x.high <- setx(z.out, perpower = quantile(friendship$perpower, prob=0.75))
-x.low <- setx(z.out, perpower = quantile(friendship$perpower, prob=0.25))
-user.prompt()
-
-## Simulate quantities of interest
-## Summarize the results of the simulation
-## Plot those results
-s.out <- sim(z.out, x = x.high, x1 = x.low)
-summary(s.out)
-plot(s.out)
-
diff --git a/demo/conditional.R b/demo/conditional.R
deleted file mode 100644
index 84312a5..0000000
--- a/demo/conditional.R
+++ /dev/null
@@ -1,13 +0,0 @@
-data(turnout)
-z.out <- zelig(vote ~ age + educate + income, by = "race",
-               data = turnout, model = "probit")
-user.prompt()
-x.white <- setx(z.out$others, fn = NULL, data = turnout[turnout$race == "white",], cond = TRUE)
-user.prompt()
-s.others <- sim(z.out$others, x = x.white)
-summary(s.others)
-user.prompt()
-x.others <- setx(z.out$white, fn = NULL, data = turnout[turnout$race == "others",], cond = TRUE)
-s.others <- sim(z.out$white, x = x.others)
-summary(s.others)
-
diff --git a/demo/coxph.R b/demo/coxph.R
deleted file mode 100644
index 7ba3db9..0000000
--- a/demo/coxph.R
+++ /dev/null
@@ -1,90 +0,0 @@
-#### Example 1: Basic Example ####
-
-data(coalition)
-
-# Running the basic coxph model with robust clustered standard errors.
-user.prompt()
-z.out1 <- zelig(Surv(duration, ciep12) ~ invest + numst2 + crisis, 
-               robust = TRUE, cluster = "polar", model = "coxph", 
-               data = coalition)
-user.prompt()
-summary(z.out1)
-
-#  Setting the explanatory variables at their default values
-#  (mode for factor variables and mean for non-factor variables),
-#  with numst2 set to the vector 0 = no crisis, 1 = crisis. 
-user.prompt()
-x.low1<- setx(z.out1, numst2 = 0)
-x.high1 <- setx(z.out1, numst2 = 1)
-
-#  Simulating draws using the default method.
-user.prompt()
-s.out1 <- sim(z.out1, x = x.low1, x1 = x.high1)
-user.prompt()
-
-#  Viewing the simulated quantities of interest.
-summary(s.out1)
-user.prompt()
-plot(s.out1)
-
-#### Example 2: Example with Stratified Cox Model ####
-
-# Running the stratified coxph model with strata defined by polar variable.
-user.prompt()
-z.out2 <- zelig(Surv(duration, ciep12) ~ invest + numst2 + crisis + strata(polar), 
-                model = "coxph", data = coalition)
-user.prompt()
-summary(z.out2)
-
-#  Setting the explanatory variables at their default values
-#  with numst2 set to the vector 0 = no crisis, 1 = crisis and 
-#  strata set to polar=3. 
-user.prompt()
-x.low2<- setx(z.out2, numst2 = 0, strata = "polar=3")
-x.high2 <- setx(z.out2, numst2 = 1, strata = "polar=3")
-
-#  Simulating draws using the default method.
-user.prompt()
-s.out2 <- sim(z.out2, x = x.low2, x1 = x.high2)
-user.prompt()
-
-#  Viewing the simulated quantities of interest.
-summary(s.out2)
-user.prompt()
-plot(s.out2)
-
-#### Example 3: Example with Time-Varying Covariates
-
-#  Create sample toy dataset (from survival package):
-user.prompt()
-toy <- as.data.frame(list(start=c(1, 2, 5, 2, 1, 7, 3, 4, 8, 8),
-            stop=c(2, 3, 6, 7, 8, 9, 9, 9,14,17),
-            event=c(1, 1, 1, 1, 1, 1, 1, 0, 0, 0),
-            x=c(1, 0, 0, 1, 0, 1, 1, 1, 0, 0),
-	    x1=c(5, 5, 7, 4, 5, 6, 3, 2, 7, 4) ))
-
-#  Estimating parameter values for the coxph regression:
-user.prompt()
-z.out3 <- zelig(Surv(start, stop, event) ~ x + x1, model = "coxph", data = toy)
-user.prompt()
-summary(z.out3)
-
-#  Setting values for the explanatory variables:
-user.prompt()
-x.low3 <- setx(z.out3, x = 0)
-x.high3 <- setx(z.out3, x = 1)
-
-#  Simulating quantities of interest:
-user.prompt()
-s.out3 <- sim(z.out3, x = x.low3, x1 = x.high3)
-user.prompt()
-
-#  Viewing the simulated quantites of interest
-summary(s.out3)
-user.prompt()
-plot(s.out3)
-
-
-
-
-
diff --git a/demo/ei.RxC.R b/demo/ei.RxC.R
deleted file mode 100644
index da8df6f..0000000
--- a/demo/ei.RxC.R
+++ /dev/null
@@ -1,70 +0,0 @@
-## Attaching the example dataset:
-data(Weimar)
-
-## Estimating the model using eiRxC:
-z.out1 <- zelig(cbind(Nazi, Government, Communists, FarRight, Other) ~  
-		shareunemployed + shareblue + sharewhite + shareself + 
-		sharedomestic, model = "ei.RxC", data = Weimar)
-
-## summarizing the output
-summary(z.out1)
-user.prompt()
-                      
-## In-sample simulations from the posterior distribution:
-s.out1 <- sim(z.out1, num =10)
-
-## Summarizing in-sample simulations at aggregate level
-## weighted by the count in each unit:
-summary(s.out1)
-user.prompt()
-
-################# using covariate
-
-
-## Attaching the example dataset:
-data(Weimar)
-
-## Estimating the model using eiRxC:
-z.out2 <- zelig(cbind(Nazi, Government, Communists, FarRight, Other) ~  
-		shareunemployed + shareblue + sharewhite + shareself + 
-		sharedomestic, 
-                covar = ~ shareprotestants, 
-                model = "ei.RxC", data = Weimar)
-
-
-## summarizing the output
-summary(z.out2)
-user.prompt()
-
-## Setting values for in-sample simulations given 
-##  marginal values of X0, X1, T0 and T1:
-x.out2 <- setx(z.out2)
-user.prompt()
-                      
-## In-sample simulations from the posterior distribution:
-s.out2 <- sim(z.out2, num = 10)
-
-## Summarizing in-sample simulations at aggregate level
-## weighted by the count in each unit:
-summary(s.out2)
-
- 
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/demo/ei.dynamic.R b/demo/ei.dynamic.R
deleted file mode 100644
index 23334eb..0000000
--- a/demo/ei.dynamic.R
+++ /dev/null
@@ -1,55 +0,0 @@
-## Attaching the example dataset:
-data(eidat)
-
-## Estimating the model using MCMCdynamicEI:
-z.out <- zelig(cbind(t0, t1) ~ x0 + x1, model = "ei.dynamic", data = eidat,
-               mcmc = 40000, thin = 10, burnin = 10000, verbose = TRUE)
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$coefficients)  
-user.prompt()
-heidel.diag(z.out$coefficients)  
-user.prompt()
-raftery.diag(z.out$coefficients)  
-user.prompt()
-
-## summarizing the output
-summary(z.out)
-user.prompt()
-
-## Setting values for in-sample simulations given 
-##  marginal values of X0, X1, T0 and T1:
-x.out <- setx(z.out, fn = NULL, cond=TRUE)
-user.prompt()
-                      
-## In-sample simulations from the posterior distribution:
-s.out <- sim(z.out, x=x.out)
-
-## Summarizing in-sample simulations at aggregate level
-## weighted by the count in each unit:
-summary(s.out)
-user.prompt()
-## Summarizing in-sample simulations at unit level 
-## for the first 5 units:
-summary(s.out, subset = 1:5)
- 
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/demo/ei.hier.R b/demo/ei.hier.R
deleted file mode 100644
index e6d65d5..0000000
--- a/demo/ei.hier.R
+++ /dev/null
@@ -1,55 +0,0 @@
-## Attaching the example dataset:
-data(eidat)
-
-## Estimating the model using MCMChierEI:
-z.out <- zelig(cbind(t0, t1) ~ x0 + x1, model="ei.hier", data = eidat,
-               mcmc = 40000, thin = 10, burnin = 10000, verbose = TRUE)
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$coefficients)
-user.prompt()
-heidel.diag(z.out$coefficients)
-user.prompt()
-raftery.diag(z.out$coefficients)
-user.prompt()
-
-## summarizing the output
-summary(z.out)
-user.prompt()
-
-## Setting values for in-sample simulations given 
-##  marginal values of X0, X1, T0 and T1:
-x.out <- setx(z.out, fn = NULL, cond = TRUE)
-user.prompt()             
-         
-## In-sample simulations from the posterior distribution:
-s.out <- sim(z.out, x = x.out)
-
-## Summarizing in-sample simulations at aggregate level
-## weighted by the count in each unit:
-summary(s.out)
-user.prompt()
-
-## Summarizing in-sample simulations at unit level 
-## for the first 5 units:
-summary(s.out, subset = 1:5)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/demo/exp.R b/demo/exp.R
index 82dd4ec..63535b5 100644
--- a/demo/exp.R
+++ b/demo/exp.R
@@ -1,33 +1,29 @@
+# exp
+# exp
+# exp
+
+# Fit the statistical model
+
 data(coalition)
 
-##  Creating a censored object for the dependent variable using
-##  Surv(duration, ciep12), where duration is the dependent variable
-##  (number of days alive during the observation period), and ciep12
-##  is the censoring indicator (coded 0 if alive and 1 if dead at the
-##  end of the observation period), and regressing this censored
-##  object using the selected explanatory variables:  
-user.prompt()
-z.out <- zelig(Surv(duration, ciep12) ~ invest + polar + numst2 + crisis,
-               model = "exp", data = coalition)
-user.prompt()
-summary(z.out)
+z.out <- zelig(Surv(duration, ciep12) ~ invest + polar + numst2 + crisis, model = "exp", data = coalition)
 
-##  Setting the explanatory variables at their default values
-##  (mode for factor variables and mean for non-factor variables),
-##  with numst2 set to the vector 0 = no crisis, 1 = crisis. 
 user.prompt()
+
+# Set explanatory variables
+
 x.low<- setx(z.out, numst2 = 0)
 x.high <- setx(z.out, numst2 = 1)
 
-##  Simulating draws using the default bootstrap method.
-user.prompt()
-s.out <- sim(z.out, x = x.low, x1 = x.high)
 user.prompt()
 
-##  Viewing the simulated quantities of interest, for every
-##  observation:
+# Simulate quantities of interest
+
+s.out <- sim(z.out, x = x.low, x1 = x.high, num = 10)
 summary(s.out)
+
 user.prompt()
-plot(s.out)
 
+# Plot simualted results
 
+plot(s.out)
diff --git a/demo/factor.bayes.R b/demo/factor.bayes.R
index de85df7..84d841d 100644
--- a/demo/factor.bayes.R
+++ b/demo/factor.bayes.R
@@ -16,11 +16,11 @@ z.out <- zelig(cbind(Agr,Exam,Educ,Cath,InfMort)~NULL,
 user.prompt()
 
 ## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$coefficients)
+geweke.diag(z.out$result$coefficients)
 user.prompt()
-heidel.diag(z.out$coefficients)
+heidel.diag(z.out$result$coefficients)
 user.prompt()
-raftery.diag(z.out$coefficients)
+raftery.diag(z.out$result$coefficients)
 user.prompt()
 
 ## summarizing the output
diff --git a/demo/factor.mix.R b/demo/factor.mix.R
deleted file mode 100644
index 45e4c70..0000000
--- a/demo/factor.mix.R
+++ /dev/null
@@ -1,27 +0,0 @@
-## Attaching the example dataset:
-data(PErisk)
-
-## Estimating the model using factor.mix:
-z.out <- zelig(cbind(courts,barb2,prsexp2,prscorr2,gdpw2)~NULL, 
-		data=PErisk, model="factor.mix",factors=1, 
-             	burnin=5000,mcmc=100000, thin=50, verbose=TRUE,
-                L0=0.25,tune=1.2)
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$coefficients)
-user.prompt()
-heidel.diag(z.out$coefficients)
-user.prompt()
-
-
-## summarizing the output
-summary(z.out)
-
-
-
-
-
-
-
-
diff --git a/demo/factor.ord.R b/demo/factor.ord.R
deleted file mode 100644
index 21a9ea9..0000000
--- a/demo/factor.ord.R
+++ /dev/null
@@ -1,28 +0,0 @@
-## Attaching the example dataset:
-data(newpainters)
-
-## Estimating the model using factor.ord:
-z.out <- zelig(cbind(Composition,Drawing,Colour,Expression)~NULL,   
-                    data=newpainters, model="factor.ord",  
-                    factors=1,
-                    burin=5000,mcmc=30000, thin=5, verbose=TRUE,
-                    L0=0.5,tune=1.2)
-
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$coefficients)
-user.prompt()
-
-heidel.diag(z.out$coefficients)
-user.prompt()
-
-## summarizing the output
-summary(z.out)
-
-
-
-
-
-
-
diff --git a/demo/gamma.R b/demo/gamma.R
index 5412083..83c7a68 100644
--- a/demo/gamma.R
+++ b/demo/gamma.R
@@ -1,27 +1,30 @@
 data(coalition)
-user.prompt()
+
+# Fit the statistical model
+
 z.out <- zelig(duration ~ fract + numst2, model = "gamma", data = coalition)
-user.prompt()
-summary(z.out)
 
 ##  Setting the explanatory variables at their default values
 ##  (mode for factor variables and mean for non-factor variables),
 ##  with numst2 set to the vector 0 = no crisis, 1 = crisis. 
-user.prompt()
+
 x.low <- setx(z.out, numst2 = 0)
 x.high <- setx(z.out, numst2 = 1)
 
+
 ##  Simulating draws using the default bootstrap method.
-user.prompt()
+
 s.out <- sim(z.out, x = x.low, x1 = x.high)
-user.prompt()
 
-##  Viewing the simulated quantities of interest, for every
-##  observation:
-summary(s.out)
-user.prompt()
-plot(s.out)
 
+# Summary of fitted model
+
+summary(z.out)
+
+# Summary of simulated quantities of interest
 
+summary(s.out)
 
+# Plot of simulated quantities of interest
 
+plot(s.out)
diff --git a/demo/gamma.mixed.R b/demo/gamma.mixed.R
deleted file mode 100644
index 502abf7..0000000
--- a/demo/gamma.mixed.R
+++ /dev/null
@@ -1,26 +0,0 @@
-data(coalition2)
-user.prompt()
-z.out <- zelig(duration ~ invest + fract + polar + numst2 + crisis +
-                          tag(1 | country),
-               data=coalition2, model="gamma.mixed", method="PQL",
-               family=Gamma(link="log"))
-user.prompt()
-summary(z.out)
-
-##  Setting the explanatory variables at their default values
-##  (mode for factor variables and mean for non-factor variables),
-##  comparing the ruling coalition in the minority to the ruling
-##  coalition in the majority.
-user.prompt()
-x.high <- setx(z.out, numst2 = 1)
-x.low <- setx(z.out, numst2 = 0)
-
-##  Simulating draws using the default bootstrap method.
-user.prompt()
-s.out <- sim(z.out, x = x.low, x1=x.high)
-user.prompt()
-
-##  Viewing the simulated quantities of interest, for every
-##  observation:
-summary(s.out)
-user.prompt()
diff --git a/demo/gamma.negvalues.R b/demo/gamma.negvalues.R
deleted file mode 100644
index 40b238a..0000000
--- a/demo/gamma.negvalues.R
+++ /dev/null
@@ -1,35 +0,0 @@
-data(coalition)
-user.prompt()
-z.out <- zelig(duration ~ fract + numst2 + crisis, model = "gamma", data = coalition)
-user.prompt()
-summary(z.out)
-
-##  Setting the explanatory variables.
-user.prompt()
-x.low <- setx(z.out, fract=300, numst2 = 0, crisis=200)
-x.high <- setx(z.out, fract=300, numst2 = 1, crisis=200)
-
-##  Simulating draws using the default method.
-user.prompt()
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-user.prompt()
-
-## Simulating draws using bootstrap method.
-user.prompt()
-s.out1 <- sim(z.out, x=x.low, x1=x.high, bootstrap=T, num=10)
-user.prompt()
-
-##  Viewing the simulated quantities of interest, for every
-##  observation:
-summary(s.out)
-user.prompt()
-plot(s.out)
-
-user.prompt()
-summary(s.out1)
-user.prompt()
-plot(s.out1)
-
-
-
-
diff --git a/demo/gamma.net.R b/demo/gamma.net.R
deleted file mode 100644
index aed44c5..0000000
--- a/demo/gamma.net.R
+++ /dev/null
@@ -1,23 +0,0 @@
-## Example Gamma Model
-
-## Load sample data
-## Estimate the model
-## Summarize the results
-data(friendship)
-z.out <- zelig(per ~ perpower, LF="inverse", model="gamma.net", data=friendship)
-summary(z.out)
-
-## Estimating the risk difference (and risk ratio) between low personal power 
-## (25th percentile) and high personal power (75th percentile) while all the 
-## other variables are held at their default values. 
-user.prompt()
-x.low <- setx(z.out, numst2 = 0)
-x.high <- setx(z.out, numst2 = 1)
-
-## Simulate quantities of interest
-## Summarize the results of the simulation
-## Plot those results
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-summary(s.out)
-plot(s.out)
-
diff --git a/demo/irt1d.R b/demo/irt1d.R
deleted file mode 100644
index 572c5e9..0000000
--- a/demo/irt1d.R
+++ /dev/null
@@ -1,32 +0,0 @@
-## Attaching the example dataset:
-data(SupremeCourt)
-names(SupremeCourt) <- c("Rehnquist","Stevens","OConnor","Scalia",
-                         "Kennedy","Souter","Thomas","Ginsburg","Breyer")
-
-user.prompt()
-
-## Estimating the model using MCMCirt1d:
-z.out <- zelig(cbind(Rehnquist,Stevens,OConnor,Scalia,
-               Kennedy,Souter,Thomas,Ginsburg,Breyer)~NULL,
-               data=SupremeCourt, model="irt1d",
-               B0.alpha=0.2, B0.beta=0.2, burnin=500, mcmc=10000,
-               thin=20, verbose=FALSE)
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$coefficients)
-user.prompt()
-
-heidel.diag(z.out$coefficients)
-user.prompt()
-
-
-## summarizing the output
-summary(z.out)
-
-
-
-
-
-
-
diff --git a/demo/irtkd.R b/demo/irtkd.R
deleted file mode 100644
index d3f6493..0000000
--- a/demo/irtkd.R
+++ /dev/null
@@ -1,34 +0,0 @@
-## Attaching the example dataset:
-data(SupremeCourt)
-names(SupremeCourt) <- c("Rehnquist","Stevens","OConnor","Scalia",
-                         "Kennedy","Souter","Thomas","Ginsburg","Breyer") 
-user.prompt()
-
-## Estimating the model using MCMCirtKd:
-z.out <- zelig(cbind(Rehnquist,Stevens,OConnor,Scalia,
-               Kennedy,Souter,Thomas,Ginsburg,Breyer)~NULL,
-               dimensions=1, data=SupremeCourt, model="irtkd",
-               B0=0.25, burnin=5000, mcmc=50000, thin=10, verbose=TRUE)
-               
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$coefficients)
-user.prompt()
-
-heidel.diag(z.out$coefficients)
-user.prompt()
-
-raftery.diag(z.out$coefficients)
-user.prompt()
-
-## summarizing the output
-summary(z.out)
-
-
-
-
-
-
-
-
diff --git a/demo/logit.R b/demo/logit.R
index 449e929..c9952ef 100644
--- a/demo/logit.R
+++ b/demo/logit.R
@@ -1,70 +1,54 @@
-##  Attaching the sample turnout dataset:
+# Attach the data frame
 data(turnout)
 
-#####  Example 1:  Simple Example 
-
 ##  Generating empirical estimates:
-user.prompt()
+
 z.out1 <- zelig(vote ~ age + race, model = "logit", data = turnout)
-user.prompt()
-##  Viewing the regression output:
-summary(z.out1)
 
 ##  Using setx to generate baseline and alternative velus for the
 ##  explanatory variables.  
-user.prompt()
+
 x.out1 <- setx(z.out1, age = 36, race = "white")
+x.out1
 
 ##  Simulating quantities of interest (predicted probabilites, risk
 ##  ratios, and risk differences):
-user.prompt()
-s.out1 <- sim(z.out1, x = x.out1)
-user.prompt()
-## Summarizing the simulated quantities of interest:
-summary(s.out1)
-
-## Diagnostic plot of the s.out:
-user.prompt()
-plot(s.out1)
-
-##  Example 2: First Differences
-
-user.prompt()
-z.out2 <-  zelig(vote ~ race + educate, model = "logit", data = turnout)
-x.high <- setx(z.out2, educate = quantile(turnout$educate, prob = 0.75))
-x.low <- setx(z.out2, educate = quantile(turnout$educate, prob = 0.25))
-
-user.prompt()
-s.out2 <- sim(z.out2, x = x.high, x1 = x.low)
-user.prompt()
-summary(s.out2)
-user.prompt()
-plot(s.out2)
-
-
-
-
-
-
-
-
-
 
+s.out1 <- sim(z.out1, x = x.out1)
 
+# Summary of fitted statistical model
 
+summary(z.out1)
 
+# Summary of simulations of quantities of interest
 
+summary(s.out1)
 
+# Plot simulations of quantities of interest
 
+plot(s.out1)
 
+##  Example 2: First Differences
 
+# Fit the statistical model
 
+z.out2 <-  zelig(vote ~ race + educate, model = "logit", data = turnout)
 
+# Set alternate values
 
+x.high <- setx(z.out2, educate = quantile(turnout$educate, prob = 0.75))
+x.low <- setx(z.out2, educate = quantile(turnout$educate, prob = 0.25))
 
+s.out2 <- sim(z.out2, x = x.high, x1 = x.low)
 
+# Summary of the fitted model
 
+summary(z.out2)
 
+# Summary of the simulated quantities of interest
 
+summary(s.out2)
 
+# Plot of the simulated quantities of interest
 
+plot(s.out2)
diff --git a/demo/logit.bayes.R b/demo/logit.bayes.R
index f1a3a2a..73b65c6 100644
--- a/demo/logit.bayes.R
+++ b/demo/logit.bayes.R
@@ -7,11 +7,11 @@ z.out <- zelig(vote ~ race + educate, model = "logit.bayes",
 user.prompt()
 
 ## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$coefficients)
+geweke.diag(z.out$result$coefficients)
 user.prompt()
-heidel.diag(z.out$coefficients)
+heidel.diag(z.out$result$coefficients)
 user.prompt()
-raftery.diag(z.out$coefficients)
+raftery.diag(z.out$result$coefficients)
 user.prompt()
 
 ## summarizing the output
diff --git a/demo/logit.gam.R b/demo/logit.gam.R
deleted file mode 100644
index 0a9ccb3..0000000
--- a/demo/logit.gam.R
+++ /dev/null
@@ -1,62 +0,0 @@
-#####  Example 1: Basic Example with First Differences  #####
-
-# Create some sample data:  
-set.seed(0) 
-n<-400
-sig<-2
-x0 <- runif(n, 0, 1)
-x1 <- runif(n, 0, 1)
-x2 <- runif(n, 0, 1)
-x3 <- runif(n, 0, 1)
-f0 <- function(x) 2 * sin(pi * x)
-f1 <- function(x) exp(2 * x)
-f2 <- function(x) 0.2*x^11*(10*(1-x))^6+10*(10*x)^3*(1-x)^10
-f3 <- function(x) 0*x
-f <- f0(x0) + f1(x1) + f2(x2)
-g <- (f-5)/3
-g <- binomial()$linkinv(g)
-y <- rbinom(g,1,g)
-my.data <- as.data.frame(cbind(y, x0, x1, x2, x3))
-
-# Estimate model, present a summary and a plot of the results:
-user.prompt()
-z.out <- zelig(y~s(x0)+s(x1)+s(x2)+s(x3), model="logit.gam", data=my.data)
-user.prompt()
-summary(z.out)
-user.prompt()
-plot(z.out, pages=1)
-
-## Plot model components with truth overlaid in red
-op <- par(mfrow=c(2,2))
-for (k in 1:4) {
-  plot(z.out,residuals=TRUE,select=k)
-  xx <- sort(eval(parse(text=paste("x",k-1,sep=""))))
-  ff <- eval(parse(text=paste("f",k-1,"(xx)",sep="")))
-  lines(xx,(ff-mean(ff))/3,col=2)   }
-par(op)
-
-# Set explanatory variables to their default (mean/mode) values, with
-# high (80th percentile) and low (20th percentile) values:
-user.prompt()
-x.high <- setx(z.out, x3= quantile(my.data$x3, 0.8))
-x.low  <- setx(z.out, x3 = quantile(my.data$x3, 0.2))
-
-# Generate first differences for the effect of high versus low x3 on y:
-user.prompt()
-s.out <- sim(z.out, x=x.high, x1=x.low)
-user.prompt()
-summary(s.out)
-
-# Generate a second set of fitted values and a plot:
-user.prompt()
-plot(s.out)
-
-## We can also run ANOVA on the model output
-user.prompt()
-anova(z.out)
-
-## We can pull out and compare AICs too...
-user.prompt()
-z1.out <- zelig(y~s(x0)+s(x1)+s(x2), model="logit.gam", data=my.data)
-z2.out <- zelig(y~s(x1)+s(x2), model="logit.gam", data=my.data)
-AIC(z.out,z1.out,z2.out)
diff --git a/demo/logit.gee.R b/demo/logit.gee.R
old mode 100755
new mode 100644
diff --git a/demo/logit.mixed.R b/demo/logit.mixed.R
deleted file mode 100644
index a7ad563..0000000
--- a/demo/logit.mixed.R
+++ /dev/null
@@ -1,23 +0,0 @@
-data(voteincome)
-user.prompt()
-z.out <- zelig(vote ~ education + age + female + tag(1 | state),
-                   data=voteincome, model="logit.mixed")
-user.prompt()
-summary(z.out)
-
-##  Setting the explanatory variables at their default values
-##  (mode for factor variables and mean for non-factor variables),
-##  with education set to 80th and 20th percentiles.
-user.prompt()
-x.low <- setx(z.out, education=quantile(voteincome$education, 0.8))
-x.high <- setx(z.out, education=quantile(voteincome$education, 0.2))
-
-##  Simulating draws using the default bootstrap method.
-user.prompt()
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-user.prompt()
-
-##  Viewing the simulated quantities of interest, for every
-##  observation:
-summary(s.out)
-user.prompt()
diff --git a/demo/logit.net.R b/demo/logit.net.R
deleted file mode 100644
index 0787057..0000000
--- a/demo/logit.net.R
+++ /dev/null
@@ -1,29 +0,0 @@
-## Example Logit Model
-
-## Load sample data
-## Estimate the model
-## Summarize the results
-data(friendship)
-z.out <- zelig(friends ~ advice + prestige + perpower, model="logit.net", data=friendship)
-summary(z.out)
-user.prompt()
-
-## Estimating the risk difference (and risk ratio) between low personal power 
-## (25th percentile) and high personal power (75th percentile) while all the 
-## other variables are held at their default values. 
-x.high <- setx(z.out, perpower = quantile(friendship$perpower, prob=0.75))
-x.low <- setx(z.out, perpower = quantile(friendship$perpower, prob=0.25))
-user.prompt()
-
-## Simulate quantities of interest
-## Summarize the results of the simulation
-## Plot those results
-s.out <- sim(z.out, x = x.high, x1 = x.low)
-summary(s.out)
-plot(s.out)
-
-
-
-
-
-
diff --git a/demo/lognorm.R b/demo/lognorm.R
index 824c9ae..24247fd 100644
--- a/demo/lognorm.R
+++ b/demo/lognorm.R
@@ -1,3 +1,4 @@
+library(ZeligCommon)
 # Load the sample data:  
 data(coalition)
 
diff --git a/demo/ls.R b/demo/ls.R
index 8c5abbc..bfdc1bb 100644
--- a/demo/ls.R
+++ b/demo/ls.R
@@ -1,29 +1,38 @@
 #####  Example 1: Basic Example with First Differences  #####
 
 # Attach sample data and variable names:  
+
 data(macro)
 
 # Estimate model and present a summary:
-user.prompt()
+
 z.out1 <- zelig(unem ~ gdp + capmob + trade, model = "ls", data = macro)
-user.prompt()
-summary(z.out1)
 
 # Set explanatory variables to their default (mean/mode) values, with
 # high (80th percentile) and low (20th percentile) values:
-user.prompt()
+
 x.high<- setx(z.out1, trade = quantile(macro$trade, 0.8))
 x.low <- setx(z.out1, trade = quantile(macro$trade, 0.2))
 
+x.high
+x.low
+
+
 # Generate first differences for the effect of high versus low trade on
 # GDP:
-user.prompt()
+
 s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-user.prompt()
+
+# Summary of fitted statistical model
+
+summary(z.out1)
+
+# Summary of simualted quantities of interest
+
 summary(s.out1)
 
-# Generate a second set of fitted values and a plot:
-user.prompt()
+# Plot of simulated quantities of interest
+
 plot(s.out1)
 
 #####  Example 2:  Using Dummy Variables #####
@@ -32,20 +41,27 @@ plot(s.out1)
 # Note that you do not need to create dummy variables, as the program 
 # will automatically parse the unique values in the selected variables 
 # into dummy variables.
-user.prompt()
+
 z.out2 <- zelig(unem ~ gdp + trade + capmob + as.factor(country), 
                 model = "ls", data = macro)
-user.prompt()
+
 # Set values for the explanatory variables, using the default mean/mode
 # values, with country set to the United States and Japan, respectively:
-user.prompt()
 x.US <- setx(z.out2, country = "United States")
 x.Japan <- setx(z.out2, country = "Japan")
 
+
+
 # Simulate quantities of interest:
-user.prompt()
 s.out2 <- sim(z.out2, x = x.US, x1 = x.Japan)
 
+# Summary of fitted statistical model
+
+summary(z.out2)
+
+# Summary of simulated quantities of interest
+
+summary(s.out2)
+
 # Plot differences:  
-user.prompt()
 plot(s.out2)
diff --git a/demo/ls.mixed.R b/demo/ls.mixed.R
deleted file mode 100644
index 1cad42b..0000000
--- a/demo/ls.mixed.R
+++ /dev/null
@@ -1,23 +0,0 @@
-data(voteincome)
-user.prompt()
-z.out <- zelig(income ~ education + age + female + tag(1 | state),
-                   data=voteincome, model="ls.mixed")
-user.prompt()
-summary(z.out)
-
-##  Setting the explanatory variables at their default values
-##  (mode for factor variables and mean for non-factor variables),
-##  with education set to 80th and 20th percentiles.
-user.prompt()
-x.low <- setx(z.out, education=quantile(voteincome$education, 0.8))
-x.high <- setx(z.out, education=quantile(voteincome$education, 0.2))
-
-##  Simulating draws using the default bootstrap method.
-user.prompt()
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-user.prompt()
-
-##  Viewing the simulated quantities of interest, for every
-##  observation:
-summary(s.out)
-user.prompt()
diff --git a/demo/ls.net.R b/demo/ls.net.R
deleted file mode 100644
index c98dac4..0000000
--- a/demo/ls.net.R
+++ /dev/null
@@ -1,31 +0,0 @@
-## Attaching the sample dataset:
-data(sna.ex)
-
-##### Example 1: Simple Example with First Differences
-
-## Generating empirical estimates:
-user.prompt()
-z.out <- zelig(Var1 ~ Var2 + Var3 + Var4, model = "ls.net", data=sna.ex)
-user.prompt()
-## Viewing the regression output:
-summary(z.out)
-
-## Using setx to set explanatory variables to their default (mean/mode) values,
-## with high (80th percentile) and low (20th percentile) for the second
-## explanatory variable:
-user.prompt()
-x.high <- setx(z.out, Var3 = quantile(sna.ex$Var3, 0.8))
-x.low <- setx(z.out, Var3 = quantile(sna.ex$Var3, 0.2))
-
-## Simulating quantities of interest
-user.prompt()
-s.out <- sim(z.out, x=x.high, x1=x.low)
-user.prompt()
-
-## Summarizing the simulated quantities of interest:
-summary(s.out)
-
-## Diagnostic plot of the s.out:
-user.prompt()
-plot(s.out)
-
diff --git a/demo/match.R b/demo/match.R
deleted file mode 100644
index fefd4c7..0000000
--- a/demo/match.R
+++ /dev/null
@@ -1,115 +0,0 @@
-###
-### Example 1: Calculating the conditional average treatment effect
-###            for the matched treatment group using nearest neighbor
-###            propensity score matching
-###
-
-## load the Lalonde data
-library(MatchIt)
-data(lalonde)
-user.prompt()
-
-## propensity score matching
-m.out1 <- matchit(treat ~ age + educ + black + hispan + nodegree + married + re74 + re75, 
-                  method = "nearest", data = lalonde)
-user.prompt()
-
-## fit the linear model to the entire sample controlling for propensity score and other covariates
-z.out1 <- zelig(re78 ~ treat + age + educ + black + hispan + nodegree + married + re74 + re75 +
-                       distance, data = match.data(m.out1), model = "ls")
-user.prompt()
-
-## set the covariates to the covariates using only matched treated units:
-x.out0 <- setx(z.out1, data = match.data(m.out1, "treat"), fn = NULL, treat=0)
-x.out1 <- setx(z.out1, data = match.data(m.out1, "treat"), fn = NULL)
-user.prompt()
-
-## simulate conditional average treatment effect for the treated
-s.out1 <- sim(z.out1, x = x.out0, x1 = x.out1)
-user.prompt()
-
-## obtain a summary
-summary(s.out1)
-user.prompt()
-
-
-###
-### Example 2: Calculating the conditional average treatment effect
-###            for the matched control group using nearest neighbor
-###            propensity score matching
-###
-
-
-## set the covariates to the covariates using only matched control units:
-x.out2 <- setx(z.out1, data = match.data(m.out1, "control"), fn = NULL)
-x.out3 <- setx(z.out1, data = match.data(m.out1, "control"), fn = NULL, treat = 1)
-user.prompt()
-
-## simulate conditional average treatment effect for the treated
-s.out2 <- sim(z.out1, x = x.out2, x1 = x.out3)
-user.prompt()
-
-## obtain a summary
-summary(s.out2)
-user.prompt()
-
-
-###
-### Example 3: Calculating the conditional average treatment effect
-###            for the entire matched sample using nearest neighbor
-###            propensity score matching
-###
-
-## set the covariates to the covariates using all matched units:
-x.out4 <- setx(z.out1, fn = NULL, treat = 0)
-x.out5 <- setx(z.out1, fn = NULL, treat = 1)
-user.prompt()
-
-## simulate conditional average treatment effect for the treated
-s.out3 <- sim(z.out1, x = x.out4, x1 = x.out5)
-user.prompt()
-
-## obtain a summary
-summary(s.out3)
-user.prompt()
-
-
-###
-### Example 4: Calculating the conditional average treatment effect
-###            for the entire sample using subclassification
-###
-
-## subclassification with 4 subclasses
-m.out2 <- matchit(treat ~ age + educ + black + hispan + nodegree + married + re74 + re75,  
-                  data = lalonde, method = "subclass", subclass = 4)
-user.prompt()
-
-## controlling only for the estimated prpensity score and lagged Y within each subclass
-## one can potentially control for more
-z.out2 <- zelig(re78 ~ treat + re74 + re75 + distance, data = match.data(m.out2), 
-                model = "ls", by = "subclass")
-user.prompt()
-
-## conducting simulations
-x.out6 <- setx(z.out2, fn = NULL, treat = 0)
-x.out7 <- setx(z.out2, fn = NULL, treat = 1)
-user.prompt()
-
-## for the demonstration purpose, we set the number of simulations to be 100
-s.out4 <- sim(z.out2, x = x.out6, x1 = x.out7, num = 100)
-user.prompt()
-
-## overall results
-summary(s.out4) 
-user.prompt()
-
-## summary for each subclass
-summary(s.out4, subset = 1) 
-user.prompt()
-
-summary(s.out4, subset = 2) 
-user.prompt()
-
-summary(s.out4, subset = 3) 
-
-
diff --git a/demo/mi.R b/demo/mi.R
index 89b0495..467e00d 100644
--- a/demo/mi.R
+++ b/demo/mi.R
@@ -1,32 +1,9 @@
-data(immi1, immi2, immi3, immi4, immi5)
-user.prompt()
+library(Zelig)
 
-z.out <- zelig(as.factor(ipip) ~ wage1992 + prtyid + ideol, model = "ologit",
-               data = mi(immi1, immi2, immi3, immi4, immi5), by = "gender")
-user.prompt()
-summary(z.out)
-user.prompt()
+data(turnout)
 
-x.out <- setx(z.out) 
-user.prompt()
-s.out <- sim(z.out, x = x.out)
-user.prompt()
-summary(s.out)
-user.prompt()
-plot(s.out)
-user.prompt()
-z.out <- zelig(as.factor(ipip) ~ wage1992 + prtyid + ideol, model = "mlogit",
-               data = mi(immi1, immi2, immi3, immi4, immi5))
-user.prompt()
-summary(z.out)
-user.prompt()
-               
-x.out <- setx(z.out)
-user.prompt()
-s.out <- sim(z.out, x = x.out)
-user.prompt()
-summary(s.out)
-user.prompt()
-plot(s.out)
+z <- zelig(vote ~ age, model = "logit", data = mi(turnout[1:10, ], turnout[100:110, ]))
 
+x <- setx(z, age = 90)
 
+s <- sim(z, x=x, num=20)
diff --git a/demo/mlogit.R b/demo/mlogit.R
deleted file mode 100644
index 2328e88..0000000
--- a/demo/mlogit.R
+++ /dev/null
@@ -1,49 +0,0 @@
-data(mexico)
-user.prompt()
-z.out1 <- zelig(as.factor(vote88) ~ pristr + othcok + othsocok, model = "mlogit", 
-               data = mexico)
-user.prompt()
-print(summary(z.out1))
-
-user.prompt()
-x.weak <- setx(z.out1, pristr = 1)
-x.strong <- setx(z.out1, pristr = 3)
-
-user.prompt()
-s.out1 <- sim(z.out1, x = x.strong, x1 = x.weak)
-user.prompt()
-print(summary(s.out1))
-
-user.prompt()
-ev.weak <- s.out1$qi$ev + s.out1$qi$fd
-
-user.prompt()
-ternaryplot(s.out1$qi$ev, pch = ".", col = "blue",
-            main = "1988 Mexican Presidential Election")
-user.prompt()
-ternarypoints(ev.weak, pch = ".", col = "red")
-
-# Specifying different sets of explanatory variables for each factor level
-user.prompt()
-z.out2 <- zelig(list(id(vote88,"1")~pristr + othcok, id(vote88,"2")~othsocok), model = "mlogit", 
-               data = mexico)
-user.prompt()
-print(summary(z.out2))
-
-user.prompt()
-x.weak <- setx(z.out2, pristr = 1)
-x.strong <- setx(z.out2, pristr = 3)
-
-user.prompt()
-s.out2 <- sim(z.out2, x = x.strong, x1 = x.weak)
-user.prompt()
-print(summary(s.out2))
-
-user.prompt()
-ev.weak <- s.out2$qi$ev + s.out2$qi$fd
-
-user.prompt()
-ternaryplot(s.out2$qi$ev, pch = ".", col = "blue",
-            main = "1988 Mexican Presidential Election")
-user.prompt()
-ternarypoints(ev.weak, pch = ".", col = "red")
diff --git a/demo/mlogit.bayes.R b/demo/mlogit.bayes.R
index aeb785f..f097fc7 100644
--- a/demo/mlogit.bayes.R
+++ b/demo/mlogit.bayes.R
@@ -7,10 +7,10 @@ z.out <- zelig(vote88 ~ pristr + othcok + othsocok, model = "mlogit.bayes",
 user.prompt()
 
 ## Checking for convergence before summarizing the estimates:
-heidel.diag(z.out$coefficients)
+heidel.diag(z.out$result$coefficients)
 user.prompt()
 
-raftery.diag(z.out$coefficients)
+raftery.diag(z.out$result$coefficients)
 user.prompt()
 
 ## Summarizing the output
diff --git a/demo/negbin.R b/demo/negbin.R
deleted file mode 100644
index 87119a9..0000000
--- a/demo/negbin.R
+++ /dev/null
@@ -1,13 +0,0 @@
-data(sanction)
-user.prompt()
-z.out <- zelig(num ~ target + coop, model = "negbin", data = sanction)
-user.prompt()
-summary(z.out)
-user.prompt()
-x.out <- setx(z.out)
-user.prompt()
-s.out <- sim(z.out, x = x.out)
-user.prompt()
-summary(s.out)
-user.prompt()
-plot(s.out)
diff --git a/demo/negbinom.R b/demo/negbinom.R
new file mode 100644
index 0000000..e13b99a
--- /dev/null
+++ b/demo/negbinom.R
@@ -0,0 +1,26 @@
+# Attach the data-frame
+data(sanction)
+
+# Fit the statistical model
+
+z <- zelig(num ~ target + coop, model = "negbinom", data = sanction)
+
+# Set explanatory variables (in this case, nothing is explicitly set)
+
+x <- setx(z)
+
+# Simulate Quantities of Interest
+
+s <- sim(z, x)
+
+# Summarize the statistical model
+
+summary(z)
+
+# Summarize the simulated quantities of interest
+
+summary (s)
+
+# Plot the results
+
+plot(s)
diff --git a/demo/normal.R b/demo/normal.R
index b10e6d2..1762683 100644
--- a/demo/normal.R
+++ b/demo/normal.R
@@ -1,55 +1,34 @@
+library(Zelig)
+
 #####  Example 1: Basic Example with First Differences  #####
 
 # Attach sample data and variable names:  
 data(macro)
 
 # Estimate model and present a summary:
-user.prompt()
+
 z.out1 <- zelig(unem ~ gdp + capmob + trade, model = "normal", data = macro)
-user.prompt()
-summary(z.out1)
+
 
 # Set explanatory variables to their default (mean/mode) values, with
 # high (80th percentile) and low (20th percentile) values:
-user.prompt()
+
 x.high <- setx(z.out1, trade = quantile(macro$trade, 0.8))
 x.low <- setx(z.out1, trade = quantile(macro$trade, 0.2))
 
 # Generate first differences for the effect of high versus low trade on
 # GDP:
-user.prompt()
-s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-user.prompt()
-summary(s.out1)
 
-# Generate a second set of fitted values and a plot:
-user.prompt()
-plot(s.out1)
-
-#####  Example 2:  Using Dummy Variables #####
+s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
 
-# Estimate a model with a dummy variable for each year and country.  
-# Note that you do not need to create dummy variables, as the program 
-# will automatically parse the unique values in the selected variables 
-# into dummy variables.  
-#user.prompt()
-#z.out2 <- zelig(unem ~ gdp + trade + capmob + as.factor(year) 
-#                    + as.factor(country), model = "normal", data = macro)
+# Summarize the fitted model
 
-# Set values for the explanatory variables, using the default mean/mode
-# values, with country set to the United States and Japan, respectively:
-#user.prompt()
-#x.US <- setx(z.out2, country = "United States")
-#x.Japan <- setx(z.out2, country = "Japan")
+summary(z.out1)
 
-# Simulate quantities of interest:
-#user.prompt()
-#s.out2 <- sim(z.out2, x = x.US, x1 = x.Japan)
-#user.prompt()
-#summary(s.out2) 
+# Summarize the simulated quantities of interest
 
-# Plot differences:  
-#user.prompt()
-#plot(s.out2)
+summary(s.out1)
 
+# Plot the simulated quantities of interest
 
+plot(s.out1)
diff --git a/demo/normal.bayes.R b/demo/normal.bayes.R
index 7745f15..c5ffd88 100644
--- a/demo/normal.bayes.R
+++ b/demo/normal.bayes.R
@@ -7,13 +7,13 @@ z.out <- zelig(unem ~ gdp + capmob + trade, model = "normal.bayes",
 user.prompt()
 
 ## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$coefficients)  
+geweke.diag(z.out$result$coefficients)  
 user.prompt()
 
-heidel.diag(z.out$coefficients)  
+heidel.diag(z.out$result$coefficients)  
 user.prompt()
 
-raftery.diag(z.out$coefficients)  
+raftery.diag(z.out$result$coefficients)  
 user.prompt()
 
 ## summarizing the output
diff --git a/demo/normal.gam.R b/demo/normal.gam.R
deleted file mode 100644
index f86eb5e..0000000
--- a/demo/normal.gam.R
+++ /dev/null
@@ -1,76 +0,0 @@
-
-#####  Example 1: Basic Example with First Differences  #####
-
-# Create some sample data:  
-set.seed(0) 
-n<-400
-sig<-2
-x0 <- runif(n, 0, 1)
-x1 <- runif(n, 0, 1)
-x2 <- runif(n, 0, 1)
-x3 <- runif(n, 0, 1)
-f0 <- function(x) 2 * sin(pi * x)
-f1 <- function(x) exp(2 * x)
-f2 <- function(x) 0.2*x^11*(10*(1-x))^6+10*(10*x)^3*(1-x)^10
-f3 <- function(x) 0*x
-f <- f0(x0) + f1(x1) + f2(x2)
-e <- rnorm(n, 0, sig)
-y <- f + e
-my.data <- as.data.frame(cbind(y, x0, x1, x2, x3))
-
-# Estimate model, present a summary and a plot of the results:
-user.prompt()
-z.out <- zelig(y~s(x0)+s(x1)+s(x2)+s(x3), model="normal.gam", data=my.data)
-user.prompt()
-summary(z.out)
-user.prompt()
-plot(z.out,pages=1,residuals=TRUE)
-
-# Set explanatory variables to their default (mean/mode) values, with
-# high (80th percentile) and low (20th percentile) values:
-user.prompt()
-x.high <- setx(z.out, x3= quantile(my.data$x3, 0.8))
-x.low  <- setx(z.out, x3 = quantile(my.data$x3, 0.2))
-
-# Generate first differences for the effect of high versus low x3 on y:
-user.prompt()
-s.out <- sim(z.out, x=x.high, x1=x.low)
-user.prompt()
-summary(s.out)
-
-# Generate a second set of fitted values and a plot:
-user.prompt()
-plot(s.out)
-
-#####  Example 2: An extra ridge penalty (useful with convergence problems) #####
-
-user.prompt()
-z.out <- zelig(y~s(x0)+s(x1)+s(x2)+s(x3), H=diag(0.5,37), model="normal.gam", data=my.data)
-user.prompt()
-# Set values for the explanatory variables, using the default mean/mode values
-user.prompt()
-x.out <- setx(z.out)
-
-# Simulate quantities of interest:
-user.prompt()
-s.out <- sim(z.out, x=x.high)
-
-# Plot differences:  
-user.prompt()
-plot(s.out)
-
-#####  Example 3: Set the smoothing parameter for the first term, estimate the rest #####
-user.prompt()
-z.out <- zelig(y~s(x0)+s(x1)+s(x2)+s(x3),sp=c(0.01,-1,-1,-1), model="normal.gam", data=my.data)
-plot(z.out,pages=1)
-
-#####  Example 4: Set lower bounds on smoothing parameters #####
-user.prompt()
-z.out <- zelig(y~s(x0)+s(x1)+s(x2)+s(x3),min.sp=c(0.001,0.01,0,10), model="normal.gam", data=my.data) 
-print(z.out)
-
-#####  Example 5: A GAM with 3df regression spline term & 2 penalized terms #####
-user.prompt()
-z.out <-zelig(y~s(x0,k=4,fx=TRUE,bs="tp")+s(x1,k=12)+s(x2,k=15), model="normal.gam", data=my.data)
-plot(z.out,pages=1)
-
diff --git a/demo/normal.net.R b/demo/normal.net.R
deleted file mode 100644
index e8c114f..0000000
--- a/demo/normal.net.R
+++ /dev/null
@@ -1,24 +0,0 @@
-## Example Normal Model
-
-## Load sample data
-## Estimate the model
-## Summarize the results
-data(friendship)
-z.out <- zelig(perpower ~ friends + advice + prestige, LF="identity", model="normal.net", data=friendship)
-summary(z.out)
-
-
-## Estimating the risk difference (and risk ratio) between low personal power 
-## (25th percentile) and high personal power (75th percentile) while all the 
-## other variables are held at their default values. 
-x.high <- setx(z.out, perpower = quantile(friendship$perpower, prob=0.75))
-x.low <- setx(z.out, perpower = quantile(friendship$perpower, prob=0.25))
-user.prompt()
-
-## Simulate quantities of interest
-## Summarize the results of the simulation
-## Plot those results
-s.out <- sim(z.out, x = x.high, x1 = x.low)
-summary(s.out)
-plot(s.out)
-
diff --git a/demo/normal.regression.R b/demo/normal.regression.R
deleted file mode 100644
index 39c4ac8..0000000
--- a/demo/normal.regression.R
+++ /dev/null
@@ -1,99 +0,0 @@
-describe.normal.regression <- function() {
-  category <- "continuous"
-  mu <- list(equations = 1,              # Systematic component
-             tagsAllowed = FALSE, 
-             depVar = TRUE, 
-             expVar = TRUE)
-  sigma2 <- list(equations = 1,          # Scalar ancillary parameter
-                 tagsAllowed = FALSE, 
-                 depVar = FALSE, 
-                 expVar = FALSE)
-  pars <- list(mu = mu, sigma2 = sigma2)
-  model <- list(category = category, parameters = pars)
-}
-
-zelig2normal.regression <- function(formula, model, data, M, ...) {
-  mf <- match.call(expand.dots = TRUE)                     # [1]
-  mf$model <- mf$M <- NULL                                 # [2]
-  mf[[1]] <- as.name("normal.regression")                  # [3]
-  as.call(mf)                                              # [4] 
-}
-
-normal.regression <- function(formula, data, start.val = NULL, ...) {
-  # fml <- parse.formula(formula, req = "mu", ancil = "sigma2")  # [1a]
-  fml <- parse.formula(formula, model = "normal.regression") # [1b]
-  D <- model.frame(fml, data = data)
-  X <- model.matrix(fml, data = D)
-  Y <- model.response(D)
-  terms <- attr(D, "terms")
-                                    
-  start.val <- set.start(start.val, terms)                     # [2]
-
-  ll.normal <- function(par, X, Y, n, terms) {                 # [3]
-    beta <- parse.par(par, terms, eqn = "mu")                  # [3a]
-    gamma <- parse.par(par, terms, eqn = "sigma2")             # [3b]
-    sigma2 <- exp(gamma)
-    -0.5 * (n * log(sigma2) + sum((Y - X %*% beta)^2 / sigma2)) 
-  }
-
-  res <- optim(start.val, ll.normal, method = "BFGS",          # [4]
-               hessian = TRUE, control = list(fnscale = -1),
-               X = X, Y = Y, n = nrow(X), terms = terms, ...)      
-
-  fit <- model.end(res, D)                                     # [5]
-  class(fit) <- "normal"                                    
-  fit                                                        
-}
-
-param.normal <- function(object, num = NULL, bootstrap = FALSE, 
-                   terms = NULL) {
-  if (!bootstrap) {
-    par <- mvrnorm(num, mu = coef(object), Sigma = vcov(object))
-    Beta <- parse.par(par, terms = terms, eqn = "mu")
-    sigma2 <- exp(parse.par(par, terms = terms, eqn = "sigma2"))
-    res <- cbind(Beta, sigma2)
-  }
-  else {
-    par <- coef(object)
-    Beta <- parse.par(par, terms = terms,  eqn = "mu")
-    sigma2 <- exp(parse.par(par, terms = terms, eqn = "sigma2"))
-    res <- c(coef, sigma2)
-  }
-  res
-}
-
-qi.normal <- function(object, par, x, x1 = NULL, y = NULL) {
-  Beta <- parse.par(par, eqn = "mu")
-  sigma2 <- parse.par(par, eqn = "sigma2")
-  ev <- Beta %*% t(x)    
-  pr <- matrix(NA, ncol = ncol(ev), nrow = nrow(ev))
-  for (i in 1:ncol(ev))        # Using R's built-in poisson generator.
-    pr[,i] <- rnorm(length(ev[,i]), mean = ev[,i], sigma = sd(sigma2[i]))
-  qi <- list(ev = ev, pr = pr)
-  qi.name <- list(ev = "Expected Values: E(Y|X)",
-                  pr = "Predicted Values: Y|X")
-  if (!is.null(x1)){
-    ev1 <- par %*% t(x1)
-    qi$fd <- ev1 - ev
-    qi.name$fd <- "First Differences in Expected Values: E(Y|X1)-E(Y|X)"
-  }
-  if (!is.null(y)) {
-    yvar <- matrix(rep(y, nrow(par)), nrow = nrow(par), byrow = TRUE)
-    tmp.ev <- yvar - qi$ev
-    tmp.pr <- yvar - qi$pr
-    qi$ate.ev <- matrix(apply(tmp.ev, 1, mean), nrow = nrow(par))
-    qi$ate.pr <- matrix(apply(tmp.pr, 1, mean), nrow = nrow(par))
-    qi.name$ate.ev <- "Average Treatment Effect: Y - EV"
-    qi.name$ate.pr <- "Average Treatment Effect: Y - PR"
-  }
-  list(qi=qi, qi.name=qi.name)
-}
-
-data(macro)
-
-user.prompt()
-
-z.out <- zelig(unem ~ gdp + capmob + trade, model = "normal.regression", 
-data = macro)
-x.out <- setx(z.out)
-s.out <- setx(z.out, x = x.out) 
diff --git a/demo/ologit.R b/demo/ologit.R
deleted file mode 100644
index 96318a0..0000000
--- a/demo/ologit.R
+++ /dev/null
@@ -1,67 +0,0 @@
-#####  Example 1: First Differences
-
-# Load the sample data: 
-data(sanction)
-
-# Estimate the empirical model and returning the coefficients:
-user.prompt()
-z.out1 <- zelig(as.factor(cost) ~ mil + coop, model = "ologit", 
-                   data = sanction)
-user.prompt()
-summary(z.out1)
-
-# Set the explanatory variables to their means, with 'mil' set
-# to 0 (no military action in addition to sanctions) in the baseline
-# case and set to 1 (military action in addition to sanctions) in the
-# alternative case:
-user.prompt()
-x.low <- setx(z.out1, coop = 1)
-x.high <- setx(z.out1, coop = 4)
-
-# Generate simulated fitted values and first differences, and view 
-# the results:
-user.prompt()
-s.out1 <- sim(z.out1, x = x.low, x1 = x.high)
-user.prompt()
-summary(s.out1)
-user.prompt()
-plot(s.out1)
-
-##### Example 2: Creating An Ordered Dependent Variable #####
-
-# Create an ordered dependent variable: 
-user.prompt()
-sanction$ncost <- factor(sanction$ncost, ordered = TRUE,
-                         levels = c("net gain", "little effect", 
-                           "modest loss", "major loss"))
-
-# Estimate the model:
-user.prompt()
-z.out2 <- zelig(ncost ~ mil + coop, model = "ologit", data = sanction)
-user.prompt()
-summary(z.out2)
-
-# Set the explanatory variables to their observed values:  
-user.prompt()
-x.out2 <- setx(z.out2, fn = NULL)
-
-# Simulate fitted values given Xval and view the results:
-user.prompt()
-s.out2 <- sim(z.out2, x = x.out2)
-user.prompt()
-summary(s.out2)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/demo/oprobit.R b/demo/oprobit.R
deleted file mode 100644
index b5bfc82..0000000
--- a/demo/oprobit.R
+++ /dev/null
@@ -1,62 +0,0 @@
-#####  Example 1: First Differences
-
-# Load the sample data: 
-data(sanction)
-
-# Estimate the empirical model and returning the coefficients:
-user.prompt()
-z.out1 <- zelig(as.factor(cost) ~ mil + coop, model = "oprobit", 
-                    data = sanction)
-user.prompt()
-summary(z.out1)
-
-# Set the explanatory variables to their means, with 'mil' set
-# to 0 (no military action in addition to sanctions) in the baseline
-# case and set to 1 (military action in addition to sanctions) in the
-# alternative case:
-user.prompt()
-x.low <- setx(z.out1, mil = 0)
-x.high <- setx(z.out1, mil = 1)
-
-# Generate simulated fitted values and first differences, and view 
-# the results:
-user.prompt()
-s.out1 <- sim(z.out1, x = x.low, x1 = x.high)
-user.prompt()
-summary(s.out1)
-
-##### Example 2: Creating An Ordered Dependent Variable #####
-
-# Create an ordered dependent variable: 
-user.prompt()
-sanction$ncost <- factor(sanction$ncost, ordered = TRUE,
-                         levels = c("net gain", "little effect", 
-                         "modest loss", "major loss"))
-
-# Z.Out the model:
-user.prompt()
-z.out2 <- zelig(ncost ~ mil + coop, model = "oprobit", data = sanction)
-user.prompt()
-summary(z.out2)
-
-# Set the explanatory variables to their observed values:  
-user.prompt()
-x.out2 <- setx(z.out2, fn = NULL)
-
-# Simulate fitted values given Xval and view the results:
-user.prompt()
-s.out2 <- sim(z.out2, x = x.out2)
-user.prompt()
-summary(s.out2)
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/demo/oprobit.bayes.R b/demo/oprobit.bayes.R
index b1a5a03..39d4319 100644
--- a/demo/oprobit.bayes.R
+++ b/demo/oprobit.bayes.R
@@ -17,10 +17,10 @@ user.prompt()
 #geweke.diag(z.out$coefficients)
 #user.prompt()
 
-heidel.diag(z.out$coefficients)
+heidel.diag(z.out$result$coefficients)
 user.prompt()
 
-raftery.diag(z.out$coefficients)
+raftery.diag(z.out$result$coefficients)
 user.prompt()
 
 ## summarizing the output
diff --git a/demo/poisson.R b/demo/poisson.R
index 0c6e185..53d0766 100644
--- a/demo/poisson.R
+++ b/demo/poisson.R
@@ -1,16 +1,26 @@
+# Attach the data frame
 data(sanction)
-user.prompt()
+
+# Fit the statistical model
 
 z.out <- zelig(num ~ target + coop, model = "poisson", data = sanction)
-user.prompt()
-summary(z.out)
-user.prompt()
+
+# Set explanatory variables (in this case non are explicitly set)
 
 x.out <- setx(z.out)
-user.prompt()
+
+# Simulate the quantities of interest
 
 s.out <- sim(z.out, x = x.out)
-user.prompt()
+
+# Summary of the statistical model
+
+summary(z.out)
+
+# Summary of the simulated quantities of interest
+
 summary(s.out)
-user.prompt()
+
+# Plot the simulated quantities of interest
+
 plot(s.out)
diff --git a/demo/poisson.bayes.R b/demo/poisson.bayes.R
index 81b3db6..54e7ce9 100644
--- a/demo/poisson.bayes.R
+++ b/demo/poisson.bayes.R
@@ -7,13 +7,13 @@ z.out <- zelig(num ~ target + coop, model = "poisson.bayes",
 user.prompt()
 
 ## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$coefficients)
+geweke.diag(z.out$result$coefficients)
 user.prompt()
 
-heidel.diag(z.out$coefficients)
+heidel.diag(z.out$result$coefficients)
 user.prompt()
 
-raftery.diag(z.out$coefficients)
+raftery.diag(z.out$result$coefficients)
 user.prompt()
 
 ## summarizing the output
diff --git a/demo/poisson.gam.R b/demo/poisson.gam.R
deleted file mode 100644
index 8aa100a..0000000
--- a/demo/poisson.gam.R
+++ /dev/null
@@ -1,62 +0,0 @@
-
-#####  Example 1: Basic Example with First Differences  #####
-
-# Create some Poisson data:  
-set.seed(0) 
-n<-400
-sig<-2
-x0 <- runif(n, 0, 1)
-x1 <- runif(n, 0, 1)
-x2 <- runif(n, 0, 1)
-x3 <- runif(n, 0, 1)
-f0 <- function(x) 2 * sin(pi * x)
-f1 <- function(x) exp(2 * x)
-f2 <- function(x) 0.2*x^11*(10*(1-x))^6+10*(10*x)^3*(1-x)^10
-f3 <- function(x) 0*x
-f <- f0(x0) + f1(x1) + f2(x2)
-g<-exp(f/4)
-y<-rpois(rep(1,n),g)
-my.data <- as.data.frame(cbind(y, x0, x1, x2, x3))
-
-# Estimate model, present a summary and a plot of the results:
-user.prompt()
-z.out <- zelig(y~s(x0)+s(x1)+s(x2)+s(x3), model="poisson.gam", data=my.data)
-user.prompt()
-summary(z.out)
-user.prompt()
-plot(z.out,pages=1)
-
-# Set explanatory variables to their default (mean/mode) values, with
-# high (80th percentile) and low (20th percentile) values:
-user.prompt()
-x.high <- setx(z.out,  x3= quantile(my.data$x3, 0.8))
-x.low <- setx(z.out, x3 = quantile(my.data$x3, 0.2))
-
-# Generate first differences for the effect of high versus low x3 on y:
-user.prompt()
-s.out <- sim(z.out, x=x.high, x1=x.low)
-user.prompt()
-summary(s.out)
-
-# Generate a second set of fitted values and a plot:
-user.prompt()
-plot(s.out)
-
-#####  Example 2: Repeat fit using performance iteration #####
-
-user.prompt()
-gm <- gam.method(gam="perf.magic")
-z.out <- zelig(y~s(x0)+s(x1)+s(x2)+s(x3), method=gm, model="poisson.gam", data=my.data )
-plot(z.out,pages=1)
-
-# Set values for the explanatory variables, using the default mean/mode values
-user.prompt()
-x.out <- setx(z.out)
-
-# Simulate quantities of interest:
-user.prompt()
-s.out <- sim(z.out, x=x.high)
-
-# Plot differences:  
-user.prompt()
-plot(s.out)
diff --git a/demo/poisson.mixed.R b/demo/poisson.mixed.R
deleted file mode 100644
index 5bc2b00..0000000
--- a/demo/poisson.mixed.R
+++ /dev/null
@@ -1,21 +0,0 @@
-data(homerun)
-user.prompt()
-z.out <- zelig(homeruns ~ player + tag(player - 1 | month),
-                   data=homerun, model="poisson.mixed")
-user.prompt()
-summary(z.out)
-
-##  Setting the explanatory variables at their default values
-##  (mode for factor variables and mean for non-factor variables),
-user.prompt()
-x.out <- setx(z.out)
-
-##  Simulating draws using the default bootstrap method.
-user.prompt()
-s.out <- sim(z.out, x = x.out)
-user.prompt()
-
-##  Viewing the simulated quantities of interest, for every
-##  observation:
-summary(s.out)
-user.prompt()
diff --git a/demo/poisson.net.R b/demo/poisson.net.R
deleted file mode 100644
index 1ec822b..0000000
--- a/demo/poisson.net.R
+++ /dev/null
@@ -1,24 +0,0 @@
-## Example Poisson Model
-
-## Load sample data
-## Estimate the model
-## Summarize the results
-data(friendship)
-z.out <- zelig(count ~ advice + prestige + perpower, model="poisson.net", data=friendship)
-summary(z.out)
-user.prompt()
-
-## Estimating the risk difference (and risk ratio) between low personal power 
-## (25th percentile) and high personal power (75th percentile) while all the 
-## other variables are held at their default values. 
-x.high <- setx(z.out, perpower = quantile(friendship$perpower, prob=0.75))
-x.low <- setx(z.out, perpower = quantile(friendship$perpower, prob=0.25))
-user.prompt()
-
-## Simulate quantities of interest
-## Summarize the results of the simulation
-## Plot those results
-s.out <- sim(z.out, x = x.high, x1 = x.low)
-summary(s.out)
-plot(s.out)
-
diff --git a/demo/probit.R b/demo/probit.R
index 2e27ab1..640c8f2 100644
--- a/demo/probit.R
+++ b/demo/probit.R
@@ -4,66 +4,48 @@ data(turnout)
 #####  Example 1:  Simple Example 
 
 ##  Generating empirical estimates:
-user.prompt()
+
 z.out1 <- zelig(vote ~ race + educate, model = "probit", data = turnout)
 ##  Viewing the regression output:
-user.prompt()
-summary(z.out1)
+
 
 ##  Using setx to generate baseline and alternative velus for the
 ##  explanatory variables.  
-user.prompt()
+
 x.out1 <- setx(z.out1)
+x.out1
+
 
 ##  Simulating quantities of interest (predicted probabilites, risk
 ##  ratios, and risk differences):
-user.prompt()
+
 s.out1 <- sim(z.out1, x = x.out1)
-user.prompt()
-## Summarizing the simulated quantities of interest:
+
+# Summary of fitted the statistical model
+
+summary(z.out1)
+
+# Summary of the simulated quantities of interest
+
 summary(s.out1)
 
 ## Diagnostic plot of the s.out:
-user.prompt()
+
 plot(s.out1)
 
 ##  Example 2: First Differences
 
-user.prompt()
 x.low <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.75))
 x.high <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.25))
 
-user.prompt()
-s.out2 <- sim(z.out1, x = x.low, x1 = x.high)
-user.prompt()
-summary(s.out2)
-user.prompt()
-plot(s.out2)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+# Simulate quantities of interest (include first-differences, etc.)
 
+s.out2 <- sim(z.out1, x = x.low, x1 = x.high)
 
+# Summary of quantities of interest (for difference in x.low and x.high
 
+summary(s.out2)
 
+# Plot of quantities of interest
 
+plot(s.out2)
diff --git a/demo/probit.bayes.R b/demo/probit.bayes.R
index 40b87ad..3029814 100644
--- a/demo/probit.bayes.R
+++ b/demo/probit.bayes.R
@@ -7,13 +7,13 @@ z.out <- zelig(vote ~ race + educate, model = "probit.bayes",
 user.prompt()
 
 ## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$coefficients)
+geweke.diag(z.out$result$coefficients)
 user.prompt()
 
-heidel.diag(z.out$coefficients)
+heidel.diag(z.out$result$coefficients)
 user.prompt()
 
-raftery.diag(z.out$coefficients)
+raftery.diag(z.out$result$coefficients)
 user.prompt()
 
 ## summarizing the output
diff --git a/demo/probit.gam.R b/demo/probit.gam.R
deleted file mode 100644
index 3a65425..0000000
--- a/demo/probit.gam.R
+++ /dev/null
@@ -1,62 +0,0 @@
-#####  Example 1: Basic Example with First Differences  #####
-
-# Create some sample data:  
-set.seed(0) 
-n<-400
-sig<-2
-x0 <- runif(n, 0, 1)
-x1 <- runif(n, 0, 1)
-x2 <- runif(n, 0, 1)
-x3 <- runif(n, 0, 1)
-f0 <- function(x) 2 * sin(pi * x)
-f1 <- function(x) exp(2 * x)
-f2 <- function(x) 0.2*x^11*(10*(1-x))^6+10*(10*x)^3*(1-x)^10
-f3 <- function(x) 0*x
-f <- f0(x0) + f1(x1) + f2(x2)
-g <- (f-5)/3
-g <- binomial()$linkinv(g)
-y <- rbinom(g,1,g)
-my.data <- as.data.frame(cbind(y, x0, x1, x2, x3))
-
-# Estimate model, present a summary and a plot of the results:
-user.prompt()
-z.out <- zelig(y~s(x0)+s(x1)+s(x2)+s(x3), model="probit.gam", data=my.data)
-user.prompt()
-summary(z.out)
-user.prompt()
-plot(z.out, pages=1)
-
-## Plot model components with truth overlaid in red
-op <- par(mfrow=c(2,2))
-for (k in 1:4) {
-  plot(z.out,residuals=TRUE,select=k)
-  xx <- sort(eval(parse(text=paste("x",k-1,sep=""))))
-  ff <- eval(parse(text=paste("f",k-1,"(xx)",sep="")))
-  lines(xx,(ff-mean(ff))/3,col=2)   }
-par(op)
-
-# Set explanatory variables to their default (mean/mode) values, with
-# high (80th percentile) and low (20th percentile) values:
-user.prompt()
-x.high <- setx(z.out, x3= quantile(my.data$x3, 0.8))
-x.low  <- setx(z.out, x3 = quantile(my.data$x3, 0.2))
-
-# Generate first differences for the effect of high versus low x3 on y:
-user.prompt()
-s.out <- sim(z.out, x=x.high, x1=x.low)
-user.prompt()
-summary(s.out)
-
-# Generate a second set of fitted values and a plot:
-user.prompt()
-plot(s.out)
-
-## We can also run ANOVA on the model output
-user.prompt()
-anova(z.out)
-
-## We can pull out and compare AICs too...
-user.prompt()
-z1.out <- zelig(y~s(x0)+s(x1)+s(x2), model="probit.gam", data=my.data)
-z2.out <- zelig(y~s(x1)+s(x2), model="probit.gam", data=my.data)
-AIC(z.out,z1.out,z2.out)
diff --git a/demo/probit.mixed.R b/demo/probit.mixed.R
deleted file mode 100644
index 8c627f8..0000000
--- a/demo/probit.mixed.R
+++ /dev/null
@@ -1,23 +0,0 @@
-data(voteincome)
-user.prompt()
-z.out <- zelig(vote ~ education + age + female + tag(1 | state),
-                   data=voteincome, model="probit.mixed")
-user.prompt()
-summary(z.out)
-
-##  Setting the explanatory variables at their default values
-##  (mode for factor variables and mean for non-factor variables),
-##  with education set to 80th and 20th percentiles.
-user.prompt()
-x.low <- setx(z.out, education=quantile(voteincome$education, 0.8))
-x.high <- setx(z.out, education=quantile(voteincome$education, 0.2))
-
-##  Simulating draws using the default bootstrap method.
-user.prompt()
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-user.prompt()
-
-##  Viewing the simulated quantities of interest, for every
-##  observation:
-summary(s.out)
-user.prompt()
diff --git a/demo/probit.net.R b/demo/probit.net.R
deleted file mode 100644
index b32abe9..0000000
--- a/demo/probit.net.R
+++ /dev/null
@@ -1,23 +0,0 @@
-## Example Probit Model
-
-## Load sample data
-## Estimate the model
-## Summarize the results
-data(friendship)
-z.out <- zelig(friends ~ advice + prestige + perpower, model="probit.net", data=friendship)
-summary(z.out)
-user.prompt()
-
-## Estimating the risk difference (and risk ratio) between low personal power 
-## (25th percentile) and high personal power (75th percentile) while all the 
-## other variables are held at their default values. 
-x.high <- setx(z.out, perpower = quantile(friendship$perpower, prob=0.75))
-x.low <- setx(z.out, perpower = quantile(friendship$perpower, prob=0.25))
-user.prompt()
-
-## Simulate quantities of interest
-## Summarize the results of the simulation
-## Plot those results
-s.out <- sim(z.out, x = x.high, x1 = x.low)
-summary(s.out)
-plot(s.out)
diff --git a/demo/quantile.R b/demo/quantile.R
deleted file mode 100644
index c48321f..0000000
--- a/demo/quantile.R
+++ /dev/null
@@ -1,83 +0,0 @@
-##### Example 1: Basic Examle with First Differences #####
-
-# Sample data about the efficiency of a plant
-data(stackloss)
-
-#Estimate the model, observe the output
-user.prompt()
-z.out1 <- zelig(stack.loss ~  Air.Flow + Water.Temp + Acid.Conc., model = "quantile", 
-                data = stackloss, tau=0.5)
-summary(z.out1)
-
-#Set explanatory variables
-user.prompt()
-x.high <- setx(z.out1, Water.Temp = quantile(stackloss$Water.Temp, 0.8))
-x.low <- setx(z.out1, Water.Temp = quantile(stackloss$Water.Temp, 0.2))
-
-#Simulate
- user.prompt()
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
- summary(s.out1)
- plot(s.out1)
-
-##### Example 2: Example Using Dummy Variables #####
-
-#Macroeconomic data to demonstrate use of dummy variables.
-#Note that to measure country-level effects, we only need to include
-#the country factor variable, from which R will create a matrix of
-#dummy variables. Convenient!
- user.prompt()
- data(macro)
- z.out2 <- zelig(unem ~ gdp + trade + capmob + as.factor(country), 
-                  model = "quantile", tau=0.5, data = macro)
-
-#Set values of the country dummy variable to explore first differences
-#between the US and Japan holding other variables at their means
- user.prompt()
- x.US <- setx(z.out2, country = "United States")
- x.Japan <- setx(z.out2, country = "Japan")
-
-#Simulate quantities of interest:
- user.prompt()
- s.out2 <- sim(z.out2, x = x.US, x1 = x.Japan)
-
-#Plot results
- user.prompt()
- plot(s.out2)
-
-##### Example 3: Example of Fitting Multiple Quantiles #####
-
-#We estimate a model of food expenditure as a function of household income.
-#This dataset is interesting because there is clear heteroskedasticity. Thus,
-#estimating multiple quantiles lets us get a fuller picture of the conditional
-#distribution of the data than a mean estimate from OLS would.
- user.prompt()
- data(engel)
- z.out3 <- zelig(foodexp ~ income, model = "quantile", tau=seq(0.1,0.9,by=0.1), data = engel)
-
-#The summary function provides information about each fit that was specified
-#in the call.
- user.prompt()
- summary(z.out3)
-
-#We can also plot the coefficients of each fit and compare them to the OLS fit.
-#This functionality is built into the quantile fitting routine and does not
-#require the user to run the fit object through Zelig's simulation utilities.
- user.prompt()
- plot(summary(z.out3))
-
-#Using setx, we can specify the levels of covariates as before.
- user.prompt()
- x.bottom <- setx(z.out3, income=quantile(engel$income, 0.25))
- x.top <- setx(z.out3, income=quantile(engel$income, 0.75))
-
-#We run simulations without our counterfactual values. The simulation reruns
-#every fit that was specified in zelig().
- user.prompt()
- s.out3 <- sim(z.out3, x = x.bottom, x1 = x.top)
-
-#Summarize the results of all of the fits at once, or plot the results one as a time.
- user.prompt()
- summary(s.out3)
- user.prompt()
- plot(s.out3[[1]]) # You can plot any one of the sim outputs from 1 to 9
diff --git a/demo/relogit.R b/demo/relogit.R
index 894b226..841ffb5 100644
--- a/demo/relogit.R
+++ b/demo/relogit.R
@@ -35,25 +35,3 @@ user.prompt()
 
 s.out2 <- sim(z.out2, x = x.out2)
 user.prompt()
-
-
-## bounds
-user.prompt()
-z.out3 <- zelig(conflict ~ major + contig + power + maxdem + mindem + years,
-                data = mid, model = "relogit", tau = c(0.002, 0.005))
-user.prompt()
-
-summary(z.out3)
-user.prompt()
-
-x.out3 <- setx(z.out3)
-user.prompt()
-
-s.out3 <- sim(z.out3, x = x.out3)
-user.prompt()
-
-summary(s.out3)
-user.prompt()
-
-plot(s.out3)
-
diff --git a/demo/repl.R b/demo/repl.R
deleted file mode 100644
index 650f114..0000000
--- a/demo/repl.R
+++ /dev/null
@@ -1,39 +0,0 @@
-## Setting up output to be replicated
-data(turnout)
-z.out <- zelig(vote ~ race + age, model = "logit", data = turnout)
-x.out <- setx(z.out)
-set.seed(12345)
-s.out <- sim(z.out, x = x.out)
-s.out$seed <- 12345
-user.prompt()
-
-## Saving replication files
-save(turnout, z.out, s.out, file = "demo_replication.RData")
-user.prompt()
-
-## Replicating simulations assuming that the seed was saved
-load("demo_replication.RData")
-s.rep <- repl(s.out)
-identical(s.out$qi, s.rep$qi)
-user.prompt()
-
-## Replicating simulations, with previously generated parameters
-s.rep2 <- repl(s.out, prev = s.out$par)
-identical(s.rep2$qi$ev, s.out$qi$ev)
-user.prompt()
-
-## Replicating analyses on original data, assumes that the 
-##  data frame is in the workspace with the original name
-z.rep <- repl(z.out)
-identical(coef(z.rep), coef(z.out))
-user.prompt()
-
-## Replicating analyses on new data
-z.alt <- repl(z.out, data = turnout[1:100,])
-
-## Saving replication files
-save(turnout, z.out, s.out, file = "demo_replication.RData")
-user.prompt()
-
-##  Cleaning up the directory
-unlink("demo_replication.RData")
diff --git a/demo/robust.R b/demo/robust.R
deleted file mode 100644
index a816aed..0000000
--- a/demo/robust.R
+++ /dev/null
@@ -1,66 +0,0 @@
-#####
-##### robust estimation of covariance matrix
-#####
- 
-#####  Example 1: linear least squares regression with
-#####             heteroskedasticity consistent standard errors (default) 
-# Attach sample data and variable names:  
-data(macro)
-
-# Fit the model with robust standard error
-user.prompt()
-z.out1 <- zelig(unem ~ gdp + capmob + trade, model = "ls", data = macro, robust = TRUE)
-user.prompt()
-print(summary(z.out1))
-
-# usual procedure applies
-user.prompt()
-x <- setx(z.out1)
-user.prompt()
-s.out1 <- sim(z.out1, x = x)
-user.prompt()
-print(summary(s.out1))
-user.prompt()
-plot(s.out1)
-
-#####  Example 2: linear least squares regression with
-#####             heteroskedasticity and autocorrelation consistent standard errors 
-
-# Attach sample data and variable names:
-data(hoff)
-# Fit the model with robust standard error
-user.prompt()
-z.out2 <- zelig(L2SocSec ~ Just503D + Just503R + Just503D:RGovDumy +
-                Just503R:I(1-RGovDumy), model = "ls", data = hoff,
-                robust = list(method="vcovHAC", order.by=hoff$year, adjust=TRUE))
-user.prompt()
-print(summary(z.out2))
-
-#####  Example 3: weibull regression with
-#####             heteroskedasticity consistent standard errors
-#####             and using invest as a cluster
-
-# Attach sample data and variable names:
-data(coalition)
-# Fit the model with robust standard error
-user.prompt()
-z.out3 <- zelig(Surv(duration, ciep12) ~ polar + numst2 +
-                crisis, model = "weibull", data = coalition,
-                cluster = "invest", robust = TRUE)
-user.prompt()
-print(summary(z.out3))
-
-
-#####
-##### Example 4: logit regression with heteroskedasticity and
-#####            autocorrelation consistent standard errors
-
-# Attach sample data and variable names
-data(turnout)
-# Fit the model with robust standrad error
-user.prompt()
-z.out4 <- zelig(vote ~ race + educate, model = "logit",
-                data = turnout, robust=TRUE)
-user.prompt()
-print(summary(z.out4))
- 
diff --git a/demo/roc.R b/demo/roc.R
deleted file mode 100644
index 068750d..0000000
--- a/demo/roc.R
+++ /dev/null
@@ -1,8 +0,0 @@
-data(turnout)
-z.out1 <- zelig(vote ~ race + educate + age, model = "logit", 
-                data = turnout)
-user.prompt()
-z.out2 <- zelig(vote ~ race + educate, model = "logit", 
-                data = turnout)
-user.prompt()
-rocplot(z.out1$y, z.out2$y, fitted(z.out1), fitted(z.out2))
diff --git a/demo/strata.R b/demo/strata.R
deleted file mode 100644
index 2b95562..0000000
--- a/demo/strata.R
+++ /dev/null
@@ -1,27 +0,0 @@
-
-data(turnout)
-z.out1 <- zelig(vote ~ educate + age + income, model = "logit", data = turnout, by = "race")
-user.prompt()
-##  Viewing the regression output:
-summary(z.out1)
-
-##  Using setx to generate baseline and alternative values for the
-##  explanatory variables.  
-user.prompt()
-x.out1 <- setx(z.out1, age = 65)
-
-##  Simulating quantities of interest (predicted probabilites, risk
-##  ratios, and risk differences):
-user.prompt()
-s.out1 <- sim(z.out1, x = x.out1)
-user.prompt()
-## Summarizing the simulated quantities of interest:
-summary(s.out1)
-user.prompt()
-
-## Conditional prediction:
-x.out2 <- setx(z.out1, fn = NULL, cond = TRUE)
-s.out2 <- sim(z.out1, x = x.out2)
-user.prompt()
-summary(s.out2)
-
diff --git a/demo/sur.R b/demo/sur.R
deleted file mode 100644
index e0ca802..0000000
--- a/demo/sur.R
+++ /dev/null
@@ -1,19 +0,0 @@
-data(grunfeld)
-
-formula<-list(mu1=Ige~Fge+Cge, mu2=Iw~Fw+Cw)
-user.prompt() 
-z.out<-zelig(formula=formula,model="sur",data=grunfeld)
-user.prompt()
-summary(z.out)
-user.prompt() 
- 
- x.out <- setx(z.out)
-user.prompt() 
- 
- s.out <- sim(z.out,x=x.out)
-user.prompt() 
-  
-summary(s.out)
-user.prompt()
-
-plot(s.out)
diff --git a/demo/threesls.R b/demo/threesls.R
deleted file mode 100644
index ade9a9a..0000000
--- a/demo/threesls.R
+++ /dev/null
@@ -1,53 +0,0 @@
-# Example 1
-
-
-# Attach sample data and variable names (Kmenta's simple supply/demand model). 
-# 	q 	food consumption per capita.
-#	p 	ratio of food prices to general consumer prices.
-# 	d 	disposable income in constant dollars.
-#     f 	ratio of preceding year's prices received by farmers to general consumer prices.
-#	a 	time in years.
-
-
-data(kmenta)
-
-# Suppose that we want to estimate the following list of equations
-
-#	q ~ p + d
-#	q ~ p + f + a
-
-
-# with the following instrumental variable
-#	inst <- ~ d + f + a
-
-
-# Write the formula according to Zelig syntax
-
-formula<-list(	mu1= q ~ p + d,
-		mu2=q ~ p + f + a,
-		inst =~ d + f + a)
-
-# estimate model and present a summary
-user.prompt()
-z.out<-zelig(formula=formula, model ="threesls",data=kmenta)
-user.prompt()
-summary(z.out)
-
-# Set explanatory variables to their default (mean/mode) values
-user.prompt()
-x.out<-setx(z.out)
-
-# Simulate the quantities of interest and present a summary
-
-user.prompt()
-s.out<-sim(z.out,x=x.out)
-user.prompt()
-summary(s.out)
-
-# plot the quantities of interest for each equation
-
-user.prompt()
-plot(s.out)
-
-
-
diff --git a/demo/tobit.R b/demo/tobit.R
deleted file mode 100644
index 5a8ec98..0000000
--- a/demo/tobit.R
+++ /dev/null
@@ -1,47 +0,0 @@
-## Attaching the example dataset:
-data(tobin)
-
-## Estimating the model using tobit.bayes:
-z.out <- zelig(durable ~ age + quant, data = tobin, model = "tobit")
-user.prompt()
-
-summary(z.out)
-user.prompt()
-
-## Setting values for the explanatory variables to 
-## their sample averages:
-x.out <- setx(z.out)
-user.prompt()
-
-## Simulating quantities of interest from the posterior 
-## distribution given x.out:
-s.out1 <- sim(z.out, x = x.out)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out1)
-user.prompt()
-
-## Simulating First Differences:
-## Setting explanatory variables to their default(mean/mode)
-## values, with high (80th percentile) and low (20th percentile) 
-## liquidity ratio(\texttt{quant}):
-x.high <- setx(z.out, quant = quantile(tobin$quant, prob = 0.8))
-x.low <- setx(z.out, quant = quantile(tobin$quant, prob = 0.2)) 
-user.prompt()
-
-## Estimating the first difference for the effect of
-## high versus low liquidity ratio:
-s.out2 <- sim(z.out, x = x.high, x1 = x.low)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out2)
-
-
-
-
-
-
-
-
diff --git a/demo/tobit.bayes.R b/demo/tobit.bayes.R
deleted file mode 100644
index 30726a3..0000000
--- a/demo/tobit.bayes.R
+++ /dev/null
@@ -1,59 +0,0 @@
-## Attaching the example dataset:
-data(tobin)
-
-## Estimating the model using tobit.bayes:
-z.out <- zelig(durable ~ age + quant, model = "tobit.bayes",
-                  data = tobin, verbose=TRUE)
-user.prompt()
-
-## Checking for convergence before summarizing the estimates:
-geweke.diag(z.out$coefficients)
-user.prompt()
-
-heidel.diag(z.out$coefficients)
-user.prompt()
-
-raftery.diag(z.out$coefficients)
-user.prompt()
-
-## summarizing the output
-summary(z.out)
-user.prompt()
-
-## Setting values for the explanatory variables to 
-## their sample averages:
-x.out <- setx(z.out)
-user.prompt()
-
-## Simulating quantities of interest from the posterior 
-## distribution given x.out:
-s.out1 <- sim(z.out, x = x.out)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out1)
-user.prompt()
-
-## Simulating First Differences:
-## Setting explanatory variables to their default(mean/mode)
-## values, with high (80th percentile) and low (20th percentile) 
-## liquidity ratio(\texttt{quant}):
-x.high <- setx(z.out, quant = quantile(tobin$quant, prob = 0.8))
-x.low <- setx(z.out, quant = quantile(tobin$quant, prob = 0.2)) 
-user.prompt()
-
-## Estimating the first difference for the effect of
-## high versus low liquidity ratio:
-s.out2 <- sim(z.out, x = x.high, x1 = x.low)
-user.prompt()
-
-## Summarizing the simulation results:
-summary(s.out2)
-
-
-
-
-
-
-
-
diff --git a/demo/twosls.R b/demo/twosls.R
index 940070c..771521e 100644
--- a/demo/twosls.R
+++ b/demo/twosls.R
@@ -1,56 +1,21 @@
-#	Example 1
-
-# 	Attach sample data and variable names.
-#	year	year
-#	C	consumption
-#	P	corporate profits
-#	P1	previous year corporate profits 
-#	Wtot	total wage	
-#	Wp	private wage bill
-#	Wg	goverment wage bill
-#	I	investment
-#	K1	previous year capital stock
-#	X	GNP
-#	G	govermment spending
-#	T	Taxes
-#	X1	previous year GNP
-# 	Tm	Year-1931
-
 data(klein)
 
-# Suppose that we want to estimate the following list of equations
-
-#	C~Wtot + P + P1
-#	I~P + P1 + K1
-#	Wp~ X + X1 + Tm
+formula <- list(
+                mu1 = C ~ Wtot + P1,
+                mu2 = I ~ P + P1 + K1,
+                mu3 = Wp ~ X + X1 + Tm,
+                inst= ~ P1 + K1 + X1 + Tm + Wg + G
+                )
 
-# with the following instrumental variable
-#	~ P1 + K1 + X1 + Tm + Wg + G
+z.out<-zelig(formula=formula, model="twosls",data=klein, cite=F)
 
-# Write the formula conform Zelig syntax and the instrumental variable (required for "2sls"):
-formula <- list(mu1=C~Wtot + P + P1,
-               mu2=I~P + P1 + K1,
-               mu3=Wp~ X + X1 + Tm,
-               inst= ~ P1 + K1 + X1 + Tm + Wg + G)
+x.out <-setx(z.out)
 
-# Estimate model and present a summary:
-user.prompt()
-z.out<-zelig(formula=formula, model="twosls",data=klein)
-user.prompt()
-summary(z.out)
-
-# Set explanatory variables to their default (mean/mode) values
-
-user.prompt()
-x.out <-setx(z.out,x=x.out)
-
-# Simulate quantities of interests and present a summary
-
-user.prompt()
 s.out <-sim(z.out,x=x.out)
-user.prompt()
+
 summary(s.out)
 
+
 # Plot
 
 user.prompt()
diff --git a/demo/vertci.R b/demo/vertci.R
deleted file mode 100644
index cb5033c..0000000
--- a/demo/vertci.R
+++ /dev/null
@@ -1,42 +0,0 @@
-
-##  Attaching the sample turnout dataset:
-data(turnout)
-
-##  Estimate the model:
-user.prompt()
-z.out <- zelig(vote ~ race + educate + age + I(age^2) + income,
-               model = "logit", data = turnout)
-user.prompt()
-summary(z.out)
-
-##  Creating setx structures with education set to high school and
-##  post-college levels, for the whole range of the age variable.  
-user.prompt()
-x.low <- setx(z.out, educate = 12, age = 18:95)
-x.high <- setx(z.out, educate = 16, age = 18:95)
-
-##  Using sim to generate the simulated predicted probabilites:
-user.prompt()
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-user.prompt()
-plot.ci(s.out, xlab = "Age in Years",
-        ylab = "Predicted Probability of Voting",
-        main = "Effect of Education and Age on Voting Behavior")
-user.prompt()
-legend(45, 0.55, legend = c("College Education (16 years)",
-       "High School Education (12 years)"), col = c("blue","red"), 
-       lty = c("solid"))
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/demo/weibull.R b/demo/weibull.R
deleted file mode 100644
index f9728b6..0000000
--- a/demo/weibull.R
+++ /dev/null
@@ -1,39 +0,0 @@
-data(coalition)
-
-##  Creating a censored object for the dependent variable using
-##  Surv(duration, ciep12), where duration is the dependent variable
-##  (number of days alive during the observation period), and ciep12
-##  is the censoring indicator (coded 0 if alive and 1 if dead at the
-##  end of the observation period), and regressing this censored
-##  object using the selected explanatory variables:  
-user.prompt()
-z.out <- zelig(Surv(duration, ciep12) ~ invest + polar + numst2 + crisis,
-               model = "weibull", data = coalition, robust = TRUE)
-user.prompt()
-##  Viewing the regression output.  Note that the Weibull model
-##  differs from the exponential model in that the Weibull has an
-##  optional scale parameter.  (The exponential is a special case of
-##  the Weibull with scale set to 1.)
-summary(z.out)
-
-##  Setting the explanatory variables at their default values
-##  (mode for factor variables and mean for non-factor variables).
-user.prompt()
-x.out <- setx(z.out)
-
-##  Simulating draws from the posterior distribution.
-user.prompt()
-s.out <- sim(z.out, x = x.out)
-user.prompt()
-##  Viewing the simulated quantities of interest (in this case, the
-##  expected value is the ceteris paribus predicted duration): 
-summary(s.out)
-
-##  Plotting the differences in the expected values for the Weibull
-##  predictions:
-user.prompt()
-plot(s.out)
-
-
-
-
diff --git a/inst/doc/.latex2html-init b/inst/doc/.latex2html-init
deleted file mode 100644
index 92e6b95..0000000
--- a/inst/doc/.latex2html-init
+++ /dev/null
@@ -1,244 +0,0 @@
-#LaTeX2HTML Version 96.1 : dot.latex2html-init
-#
-### Command Line Argument Defaults #######################################
-
-$MAX_SPLIT_DEPTH = 8;	# Stop making separate files at this depth
-
-$MAX_LINK_DEPTH = 4;    # Stop showing child nodes at this depth   
-
-$NOLATEX = 0;           # 1 = do not pass unknown environments to Latex
-
-$EXTERNAL_IMAGES = 0;   # 1 = leave the images outside the document 
-
-$ASCII_MODE = 0;        # 1 = do not use any icons or internal images
-
-# 1 =  use links to external postscript images rather than inlined bitmap
-# images.
-$PS_IMAGES = 0;
-
-$TITLE = $default_title;      # The default is "No Title" 
-
-# $STYLESHEET_CASCADE = "<link rel='stylesheet' href='/gking.css' MEDIA='screen' TYPE='text/css'>";
-
-# add new UI Javascript to HEAD section. -rmesard 30-nov-2005
-# $LATEX2HTML_META = "<script language='Javascript' src='/gking.js'></script>";
-
-$DESTDIR = '';         # Put the result in this directory 
-
-# When this is set, the generated HTML files will be placed in the 
-# current directory. If set to 0 the default behaviour is to create (or reuse)
-# another file directory.
-$NO_SUBDIR = 0;
-
-
-# Supply your own string if you don't like the default <Name> <Date>
-$ADDRESS = "Gary King \n$address_data[1]<script language='Javascript'>displayFooter()</script>";
-
-$NO_NAVIGATION = 0;	# 1 = do not put a navigation panel at the top of each page
-
-# Put navigation links at the top of each  page.  If  the page  exceeds
-# $WORDS_IN_PAGE  number of words then put one at the bottom of the page.
-$AUTO_NAVIGATION = 0;
-
-# Put a link to the index page in  the  navigation  panel
-$INDEX_IN_NAVIGATION = 1;
-
-# Put a link to the table of contents  in  the  navigation  panel
-$CONTENTS_IN_NAVIGATION = 1;
-
-# Put a link to the next logical page  in  the  navigation  panel
-$NEXT_PAGE_IN_NAVIGATION = 1;
-
-# Put a link to the previous logical page  in  the  navigation  panel
-$PREVIOUS_PAGE_IN_NAVIGATION = 1;
-
-$INFO = 1;              # 0 = do not make a "About this document..." section 
-
-# Reuse images generated during previous runs
-$REUSE = 2;
-
-# When this is 1, the section numbers are shown. The section numbers should 
-# then match those that would have bee produced by LaTeX.
-# The correct section numbers are obtained from the $FILE.aux file generated 
-# by LaTeX.
-# Hiding the seciton numbers encourages use of particular sections 
-# as standalone documents. In this case the cross reference to a section 
-# is shown using the default symbol rather than the section number.
-$SHOW_SECTION_NUMBERS = 0;
-
-### Other global variables ###############################################
-$CHILDLINE = "<BR> <HR>\n";
-
-# This is the line width measured in pixels and it is used to right justify
-# equations and equation arrays; 
-$LINE_WIDTH = 500;		
-
-# Used in conjunction with AUTO_NAVIGATION
-$WORDS_IN_PAGE = 300;	
-
-# Affects ONLY the way accents are processed 
-$default_language = 'english';	
-#$default_language = 'japanese';	
-
-# The value of this variable determines how many words to use in each 
-# title that is added to the navigation panel (see below)
-# 
-$WORDS_IN_NAVIGATION_PANEL_TITLES = 4;
-
-# This number will determine the size of the equations, special characters,
-# and anything which will be converted into an inlined image
-# *except* "image generating environments" such as "figure", "table" 
-# or "minipage".
-# Effective values are those greater than 0.
-# Sensible values are between 0.1 - 4.
-$MATH_SCALE_FACTOR = 1.6;
-
-# This number will determine the size of 
-# image generating environments such as "figure", "table" or "minipage".
-# Effective values are those greater than 0.
-# Sensible values are between 0.1 - 4.
-$FIGURE_SCALE_FACTOR = 1.6;
-
-
-#  If this is set then intermediate files are left for later inspection.
-#  This includes $$_images.tex and $$_images.log created during image
-#  conversion.
-#  Caution: Intermediate files can be *enormous*.
-$DEBUG = 0;
-
-#  If both of the following two variables are set then the "Up" button
-#  of the navigation panel in the first node/page of a converted document
-#  will point to $EXTERNAL_UP_LINK. $EXTERNAL_UP_TITLE should be set
-#  to some text which describes this external link.
-$EXTERNAL_UP_LINK = "";
-$EXTERNAL_UP_TITLE = "";
-
-# If this is set then the resulting HTML will look marginally better if viewed 
-# with Netscape.
-$NETSCAPE_HTML = 0;
-
-# Valid paper sizes are "letter", "legal", "a4","a3","a2" and "a0"
-# Paper sizes has no effect other than in the time it takes to create inlined
-# images and in whether large images can be created at all ie
-#  - larger paper sizes *MAY* help with large image problems 
-#  - smaller paper sizes are quicker to handle
-$PAPERSIZE = "a4";
-
-# Replace "english" with another language in order to tell LaTeX2HTML that you 
-# want some generated section titles (eg "Table of Contents" or "References")
-# to appear in a different language. Currently only "english" and "french"
-# is supported but it is very easy to add your own. See the example in the
-# file "latex2html.config" 
-$TITLES_LANGUAGE = "english";
-
-### Navigation Panel ##########################################################
-#
-# The navigation panel is constructed out of buttons and section titles.
-# These can be configured in any combination with arbitrary text and 
-# HTML tags interspersed between them. 
-# The buttons available are:
-# $PREVIOUS - points to the previous section
-# $UP  - points up to the "parent" section
-# $NEXT - points to the next section
-# $NEXT_GROUP - points to the next "group" section
-# $PREVIOUS_GROUP - points to the previous "group" section
-# $CONTENTS - points to the contents page if there is one
-# $INDEX - points to the index page if there is one
-#
-# If the corresponding section exists the button will contain an
-# active link to that section. If the corresponding section does
-# not exist the button will be inactive.
-#
-# Also for each of the $PREVIOUS $UP $NEXT $NEXT_GROUP and $PREVIOUS_GROUP
-# buttons there are equivalent $PREVIOUS_TITLE, $UP_TITLE, etc variables
-# which contain the titles of their corresponding sections. 
-# Each title is empty if there is no corresponding section.
-#
-# The subroutine below constructs the navigation panels in each page.
-# Feel free to mix and match buttons, titles, your own text, your logos,
-# and arbitrary HTML (the "." is the Perl concatenation operator).
-sub top_navigation_panel {
-
-    # call script func from gking.js to open new UI 
-    # this should be the 1st thing after BODY tag -rmesard 30-nov-2005
-    "<script language='Javascript'>displayHeader()</script>\n" .
-
-    # Now add a few buttons with a space between them
-    # "$NEXT $UP $PREVIOUS $CONTENTS $INDEX $CUSTOM_BUTTONS" .
-    
-    # "<BR>\n" .		# Line break
-      "<table border='0' cellpadding='0' cellspacing='0' class='latexnav'>" .
-      "<tr><td class='latexnavhome'>" .
-      "<a href='/'>" .
-#      "<img src='/images/gking_name_sm.gif' height='30' width='119' border='0' alt='Gary King Homepage' />" .
-      "</a></td>" .
-	
-   # try for the contents link
-   # ($CONTENTS_TITLE ? "<td>c: $CONTENTS_TITLE </td>\n" : undef) .
-     
-   # ... and the ``previous'' title
-    ($PREVIOUS_TITLE ? "<td> Previous: $PREVIOUS_TITLE </td>\n" : undef) .
-
-    # Similarly with the ``up'' title ...
-    ($UP_TITLE ? "<td> Up: $UP_TITLE </td>\n" : undef) . 
-
-    # If ``next'' section exists, add its title to the navigation panel
-    ($NEXT_TITLE ? "<td> Next: $NEXT_TITLE </td>\n" : undef) . 
-    
- 
-   
-    #  Line Break, horizontal rule (3-d dividing line) and new paragraph  
-    "</tr></table>\n" .
-    "<BR> <P>\n"		
-}
-
-sub bot_navigation_panel {
-
-    #  Start with a horizontal rule (3-d dividing line)
-    "<HR>"			
-    
-    # Now add a few buttons with a space between them
-    # "$NEXT $UP $PREVIOUS $CONTENTS $INDEX $CUSTOM_BUTTONS" .
-    
-    # "<BR>\n" .		# Line break
-	
-    # If ``next'' section exists, add its title to the navigation panel
-    # ($NEXT_TITLE ? "<B> Next:</B> $NEXT_TITLE\n" : undef) . 
-    
-    # Similarly with the ``up'' title ...
-    # ($UP_TITLE ? "<B>Up:</B> $UP_TITLE\n" : undef) . 
- 
-    # ... and the ``previous'' title
-    # ($PREVIOUS_TITLE ? "<B> Previous:</B> $PREVIOUS_TITLE\n" : undef)
-
-}
-
-sub meta_information {
-    local($_) = @_;
-    # Cannot have nested HTML tags...
-    do { s/<[^>]*>//g;
-        "<SCRIPT LANGUAGE=\"Javascript\" SRC=\"/gking.js\"></SCRIPT>\n" .
-        "<META NAME=\"description\" CONTENT=\"$_\">\n" .
-        "<META NAME=\"keywords\" CONTENT=\"$FILE\">\n" .
-        "<META NAME=\"resource-type\" CONTENT=\"document\">\n" .
-        "<META NAME=\"distribution\" CONTENT=\"global\">\n"
-    } if $_;
-} 
-
- our(%custom_filenames);
- sub custom_title_hook {
- my($sec_name) = shift;
-   $sec_name=~tr/[a-z][A-Z][0-9] /_/sc;
-   my(@words)= split(/\s+/,$sec_name);
-   my($fn)= join("_", at words[0..2]);
-   $fn=substr($fn,0,22);
-   $fn=~s/(.*?)_*$/$1/;
-   $custom_filenames{"$fn"}++;
-   if ($custom_filenames{"$fn"}>1) {
-        $fn=$fn . $custom_filenames{"$fn"};
-   }
-   return($fn);
- }
- $CUSTOM_TITLES=1;
-
-1;	# This must be the last line
diff --git a/inst/doc/bl.pdf b/inst/doc/bl.pdf
deleted file mode 100644
index e882b55..0000000
Binary files a/inst/doc/bl.pdf and /dev/null differ
diff --git a/inst/doc/blogit.Rnw b/inst/doc/blogit.Rnw
deleted file mode 100644
index 841ad81..0000000
--- a/inst/doc/blogit.Rnw
+++ /dev/null
@@ -1,349 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=blogit}
-\include{zinput}
-
-%\VignetteIndexEntry{Bivariate Logistic Regression for Two Dichotomous Dependent Variables}
-%\VignetteDepends{Zelig, VGAM}
-%\VignetteKeyWords{model,logistic regression, dichotomous}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@ 
-
-
-\section{{\tt blogit}: Bivariate Logistic Regression for Two
-Dichotomous Dependent Variables}\label{blogit}
-
-Use the bivariate logistic regression model if you have two binary
-dependent variables $(Y_1, Y_2)$, and wish to model them jointly as a
-function of some explanatory variables.  Each pair of dependent
-variables $(Y_{i1}, Y_{i2})$ has four potential outcomes, $(Y_{i1}=1,
-Y_{i2}=1)$, $(Y_{i1}=1, Y_{i2}=0)$, $(Y_{i1}=0, Y_{i2}=1)$, and
-$(Y_{i1}=0, Y_{i2}=0)$.  The joint probability for each of these four
-outcomes is modeled with three systematic components: the marginal
-Pr$(Y_{i1} = 1)$ and Pr$(Y_{i2} = 1)$, and the odds ratio $\psi$,
-which describes the dependence of one marginal on the other.  Each of
-these systematic components may be modeled as functions of (possibly
-different) sets of explanatory variables.
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(list(mu1 = Y1 ~ X1 + X2 , 
-                      mu2 = Y2 ~ X1 + X3), 
-                 model = "blogit", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-\subsubsection{Input Values}
-
-In every bivariate logit specification, there are three equations which
-correspond to each dependent variable ($Y_1$, $Y_2$), and $\psi$, the
-odds ratio. You should provide a list of formulas for each equation or, 
-you may use {\tt cbind()} if the right hand side is the same for both equations
-<<InputValues.list>>=
-formulae <- list(cbind(Y1,Y2) ~ X1 + X2)
-@ 
-which means that all the explanatory variables in equations 1 and 2
-(corresponding to $Y_1$ and $Y_2$) are included, but only an intercept
-is estimated (all explanatory variables are omitted) for equation 3
-($\psi$).  
-
-You may use the function {\tt tag()} to constrain variables across
-equations:
-<<InputValues.list.mu>>=
-formulae <- list(mu1 = y1 ~ x1 + tag(x3, "x3"), 
-                 mu2 = y2 ~ x2 + tag(x3, "x3"))
-@ 
-where {\tt tag()} is a special function that constrains variables to
-have the same effect across equations.  Thus, the coefficient for {\tt
-x3} in equation {\tt mu1} is constrained to be equal to the
-coefficient for {\tt x3} in equation {\tt mu2}.  
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-
-\item {Basic Example} \label{basic.bl}
-
-Load the data and estimate the model:  
-<<BasicExample.data>>=
- data(sanction)
-## sanction
-@ 
-<<BasicExample.zelig>>=
- z.out1 <- zelig(cbind(import, export) ~ coop + cost + target, 
-                  model = "blogit", data = sanction)
-@ 
-By default, {\tt zelig()} estimates two effect parameters
-for each explanatory variable in addition to the odds ratio parameter;
-this formulation is parametrically independent (estimating
-unconstrained effects for each explanatory variable), but
-stochastically dependent because the models share an odds ratio.
-\newline \newline Generate baseline values for the explanatory
-variables (with cost set to 1, net gain to sender) and alternative
-values (with cost set to 4, major loss to sender):
-<<BasicExample.setx.low>>=
- x.low <- setx(z.out1, cost = 1)
-@ 
-<<BasicExample.setx.high>>=
-x.high <- setx(z.out1, cost = 4)
-@ 
-Simulate fitted values and first differences:  
-<<BasicExample.sim>>=
- s.out1 <- sim(z.out1, x = x.low, x1 = x.high)
- summary(s.out1)
-@
-\begin{center}
-<<label=BasicExamplePlot,fig=true>>= 
- plot(s.out1)
-@ 
-\end{center}
-
-\item {Joint Estimation of a Model with Different Sets of Explanatory Variables}\label{sto.dep.logit}
-
-Using sample data \texttt{sanction}, estimate the statistical model, 
-with {\tt import} a function of {\tt coop} in the first equation and {\tt export} a 
-function of {\tt cost} and {\tt target} in the second equation:
-<<JointExample.zelig>>=
- z.out2 <- zelig(list(import ~ coop, export ~ cost + target), 
-                  model = "blogit", data = sanction)
- summary(z.out2)
-@ 
-Set the explanatory variables to their means:
-<<JointExample.setx>>=
- x.out2 <- setx(z.out2)
-@ 
-Simulate draws from the posterior distribution:
-<<JointExample.sim>>=
- s.out2 <- sim(z.out2, x = x.out2)
- summary(s.out2)
-@ 
-\begin{center}
-<<label=JointExamplePlot,fig=true>>= 
- plot(s.out2)
-@ 
-\end{center}
-
-\item Joint Estimation of a Parametrically and Stochastically
-Dependent Model 
-\label{pdep.l}
-  
-Using the sample data \texttt{sanction}
-The bivariate model is parametrically dependent if $Y_1$ and $Y_2$ share
-some or all explanatory variables, {\it and} the effects of the shared
-explanatory variables are jointly estimated.  For example,
-<<JointEstimation.zelig>>=
- z.out3 <- zelig(list(import ~ tag(coop,"coop") + tag(cost,"cost") + 
-                           tag(target,"target"), 
-                       export ~ tag(coop,"coop") + tag(cost,"cost") + 
-                           tag(target,"target")), 
-                       model = "blogit", data = sanction)
- summary(z.out3)
-@ 
-Note that this model only returns one parameter estimate for each of
-{\tt coop}, {\tt cost}, and {\tt target}.  Contrast this to
-Example~\ref{basic.bl} which returns two parameter estimates for each
-of the explanatory variables.  \newline \newline Set values for the
-explanatory variables:
-<<JointEstimation.setx>>=
-x.out3 <- setx(z.out3, cost = 1:4)
-@ 
-Draw simulated expected values:  
-<<JointEstimation.sim>>=
- s.out3 <- sim(z.out3, x = x.out3)
- summary(s.out3)
-@ 
-
-
-\end{enumerate}
-
-\subsubsection{Model}
-
-For each observation, define two binary dependent variables, $Y_1$ and
-$Y_2$, each of which take the value of either 0 or 1 (in the
-following, we suppress the observation index).  We model the joint
-outcome $(Y_1$, $Y_2)$ using a marginal probability for each dependent
-variable, and the odds ratio, which parameterizes the relationship
-between the two dependent variables. Define $Y_{rs}$ such that it is
-equal to 1 when $Y_1=r$ and $Y_2=s$ and is 0 otherwise, where $r$ and
-$s$ take a value of either 0 or 1. Then, the model is defined as follows,
-
-\begin{itemize}
- 
-\item The \emph{stochastic component} is
-\begin{eqnarray*}
-  Y_{11} &\sim& \textrm{Bernoulli}(y_{11} \mid \pi_{11}) \\
-  Y_{10} &\sim& \textrm{Bernoulli}(y_{10} \mid \pi_{10}) \\
-  Y_{01} &\sim& \textrm{Bernoulli}(y_{01} \mid \pi_{01})
-\end{eqnarray*}
-where $\pi_{rs}=\Pr(Y_1=r, Y_2=s)$ is the joint probability, and
-$\pi_{00}=1-\pi_{11}-\pi_{10}-\pi_{01}$.
-
-
-\item The \emph{systematic components} model the marginal probabilities,
-  $\pi_j=\Pr(Y_j=1)$, as well as the odds ratio.  The odds ratio
-  is defined as $\psi = \pi_{00} \pi_{01}/\pi_{10}\pi_{11}$ and
-  describes the relationship between the two outcomes.  Thus, for each
-  observation we have
-\begin{eqnarray*}
-\pi_j & = & \frac{1}{1 + \exp(-x_j \beta_j)} \quad \textrm{ for} \quad
-j=1,2, \\
-\psi &= & \exp(x_3 \beta_3).
-\end{eqnarray*}
-
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) for the bivariate logit model
-  are the predicted joint probabilities. Simulations of $\beta_1$,
-  $\beta_2$, and $\beta_3$ (drawn from their sampling distributions)
-  are substituted into the systematic components $(\pi_1, \pi_2,
-  \psi)$ to find simulations of the predicted joint probabilities:
-\begin{eqnarray*}
-\pi_{11} & = & \left\{ \begin{array}{ll}
-                 \frac{1}{2}(\psi - 1)^{-1} - {a - \sqrt{a^2 + b}} &
-                 \textrm{for} \; \psi \ne 1 \\
-                 \pi_1 \pi_2 & \textrm{for} \; \psi = 1 
-                 \end{array} \right., \\
-\pi_{10} &=& \pi_1 - \pi_{11}, \\
-\pi_{01} &=& \pi_2 - \pi_{11}, \\
-\pi_{00} &=& 1 - \pi_{10} - \pi_{01} - \pi_{11},
-\end{eqnarray*}
-where $a = 1 + (\pi_1 + \pi_2)(\psi - 1)$, $b = -4 \psi(\psi - 1)
-\pi_1 \pi_2$, and the joint probabilities for each observation must sum
-to one.  For $n$ simulations, the expected values form an $n \times 4$
-matrix for each observation in {\tt x}.  
-
-\item The predicted values ({\tt qi\$pr}) are draws from the
-  multinomial distribution given the expected joint probabilities. 
-
-\item The first differences ({\tt qi\$fd}) for each
-  of the predicted joint probabilities are given by $$\textrm{FD}_{rs}
-  = \Pr(Y_1=r, Y_2=s \mid x_1)-\Pr(Y_1=r, Y_2=s \mid x).$$  
-  
-\item The risk ratio ({\tt qi\$rr}) for each of the predicted joint
-  probabilities are given by
-\begin{equation*}
-\textrm{RR}_{rs} = \frac{\Pr(Y_1=r, Y_2=s \mid x_1)}{\Pr(Y_1=r, Y_2=s \mid x)}
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_{ij}(t_i=1) -
-      E[Y_{ij}(t_i=0)] \right\} \textrm{ for } j = 1,2,
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_{ij}(t_i=0)]$,
-    the counterfactual expected value of $Y_{ij}$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_{ij}(t_i=1) -
-      \widehat{Y_{ij}(t_i=0)} \right\} \textrm{ for } j = 1,2,
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating
-    $\widehat{Y_{ij}(t_i=0)}$, the counterfactual predicted value of
-    $Y_{ij}$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "blogit", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-obtain a default summary of information through {\tt summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: the named vector of coefficients.   
-   \item {\tt fitted.values}: an $n \times 4$ matrix of the in-sample
-     fitted values.
-   \item {\tt predictors}: an $n \times 3$ matrix of the linear
-     predictors $x_j \beta_j$.
-   \item {\tt residuals}: an $n \times 3$ matrix of the residuals.  
-   \item {\tt df.residual}: the residual degrees of freedom.  
-   \item {\tt df.total}: the total degrees of freedom.
-   \item {\tt rss}: the residual sum of squares.  
-   \item {\tt y}: an $n \times 2$ matrix of the dependent variables.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract:
-  \begin{itemize}
-  \item {\tt coef3}: a table of the coefficients with their associated
-    standard errors and $t$-statistics.
-  \item {\tt cov.unscaled}: the variance-covariance matrix. 
-  \item {\tt pearson.resid}: an $n \times 3$ matrix of the Pearson residuals.  
-  \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as arrays indexed by simulation
-  $\times$ quantity $\times$ {\tt x}-observation (for more than one
-  {\tt x}-observation; otherwise the quantities are matrices).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected joint probabilities (or expected
-     values) for the specified values of {\tt x}.  
-   \item {\tt qi\$pr}: the simulated predicted outcomes drawn from a
-     distribution defined by the expected joint probabilities.
-   \item {\tt qi\$fd}: the simulated first difference in the
-     expected joint probabilities for the values specified in {\tt x} and
-     {\tt x1}.
-   \item {\tt qi\$rr}: the simulated risk ratio in the predicted
-     probabilities for given {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection*{How to Cite}
-\input{cites/blogit}
-\input{citeZelig}
-\subsection*{See also}
-The bivariate logit function is part of the VGAM package by Thomas Yee \citep{YeeHas03}. In addition, advanced users may wish to refer to \texttt{help(vglm)} 
-in the VGAM library.  Additional documentation is available at
-\url{http://www.stat.auckland.ac.nz/\~\,yee}{http://www.stat.auckland.ac.nz/~yee}.Sample data are from \citep{Martin92}
-
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-
-
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
diff --git a/inst/doc/blogit.pdf b/inst/doc/blogit.pdf
deleted file mode 100644
index b2fc376..0000000
Binary files a/inst/doc/blogit.pdf and /dev/null differ
diff --git a/inst/doc/bprobit.Rnw b/inst/doc/bprobit.Rnw
deleted file mode 100644
index 703a5fc..0000000
--- a/inst/doc/bprobit.Rnw
+++ /dev/null
@@ -1,385 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=bprobit}
-\include{zinput}
-%\VignetteIndexEntry{Bivariate Probit Regression for Dichotomous Dependent Variables}
-%\VignetteDepends{Zelig, VGAM}
-%\VignetteKeyWords{model,prpbit, logistic regression, dichotomous}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-pkg <- search()
-if(!length(grep("package:Zelig",pkg)))
-library(Zelig)
-@ 
-
-\section{{\tt bprobit}: Bivariate Logistic Regression for Two
-Dichotomous Dependent Variables}\label{bprobit}
-
-Use the bivariate probit regression model if you have two binaryrun
-dependent variables $(Y_1, Y_2)$, and wish to model them jointly as a
-function of some explanatory variables.  Each pair of dependent
-variables $(Y_{i1}, Y_{i2})$ has four potential outcomes, $(Y_{i1}=1,
-Y_{i2}=1)$, $(Y_{i1}=1, Y_{i2}=0)$, $(Y_{i1}=0, Y_{i2}=1)$, and
-$(Y_{i1}=0, Y_{i2}=0)$.  The joint probability for each of these four
-outcomes is modeled with three systematic components: the marginal
-Pr$(Y_{i1} = 1)$ and Pr$(Y_{i2} = 1)$, and the correlation parameter
-$\rho$ for the two marginal distributions.  Each of these systematic
-components may be modeled as functions of (possibly different) sets of
-explanatory variables.
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(list(mu1 = Y1 ~ X1 + X2, 
-                      mu2 = Y2 ~ X1 + X3,
-                      rho = ~ 1),
-                 model = "bprobit", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-\subsubsection{Input Values}
-
-In every bivariate probit specification, there are three equations
-which correspond to each dependent variable ($Y_1$, $Y_2$), and the
-correlation parameter $\rho$.  Since the correlation parameter does
-not correspond to one of the dependent variables, the model estimates
-$\rho$ as a constant by default.  Hence, only two formulas (for
-$\mu_1$ and $\mu_2$) are required.  If the explanatory variables for
-$\mu_1$ and $\mu_2$ are the same and effects are estimated separately
-for each parameter, you may use the following short hand:  
-<<InputValues.list>>=
-fml <- list(cbind(Y1,Y2) ~ X1 + X2)
-@ 
-which has the same meaning as:  
-<<InputValues.list.rho>>=
-fml <- list(mu1 = Y1 ~ X1 + X2,  
-            mu2 = Y2 ~ X1 + X2, 
-            rho = ~ 1)
-@ 
-You may use the function {\tt tag()} to constrain variables across
-equations.  The {\tt tag()} function takes a variable and a label for
-the effect parameter.  Below, the constrained effect of {\tt
-x3} in both equations is called the {\tt age} parameter:  
-<<InputValues.list.mu>>=
-fml <- list(mu1 = y1 ~ x1 + tag(x3, "age"), 
-            mu2 = y2 ~ x2 + tag(x3, "age"))
-@ 
-You may also constrain different variables across different equations
-to have the same effect.  
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-
-\item {Basic Example} \label{basic.bp}
-
-Load the data and estimate the model:  
-<<BasicExample.data>>=
- data(sanction)
-@ 
-<<BasicExample.zelig>>=
- z.out1 <- zelig(cbind(import, export) ~ coop + cost + target, 
-                  model = "bprobit", data = sanction)
-@ 
-By default, {\tt zelig()} estimates two effect parameters
-for each explanatory variable in addition to the correlation coefficient;
-this formulation is parametrically independent (estimating
-unconstrained effects for each explanatory variable), but
-stochastically dependent because the models share a correlation parameter.
-\newline \newline Generate baseline values for the explanatory
-variables (with cost set to 1, net gain to sender) and alternative
-values (with cost set to 4, major loss to sender):
-<<BasicExample.setx>>=
- x.low <- setx(z.out1, cost = 1)
- x.high <- setx(z.out1, cost = 4)
-@ 
-Simulate fitted values and first differences:  
-<<BasicExample.sim>>=
- s.out1 <- sim(z.out1, x = x.low, x1 = x.high)
- summary(s.out1)
-@ 
-\begin{center}
-<<label=BasicExamplePlot,fig=true>>= 
- plot(s.out1)
-@ 
-\end{center}
-
-
-\item {Joint Estimation of a Model with Different Sets of Explanatory Variables}\label{sto.dep.probit}
-
-Using the sample data \texttt{sanction}, estimate the statistical model, 
-with {\tt import} a function of {\tt coop} in the first equation and 
-{\tt export} a function of {\tt cost} and {\tt target} in the second equation:
-<<JointEstimation.list>>=
- fml2 <- list(mu1 = import ~ coop, 
-               mu2 = export ~ cost + target)
-@ 
-<<JointEstimation.zelig>>=
- z.out2 <- zelig(fml2, model = "bprobit", data = sanction)
- summary(z.out2)
-@ 
-Set the explanatory variables to their means:
-<<JointEstimation.setx>>=
- x.out2 <- setx(z.out2)
-@ 
-Simulate draws from the posterior distribution:
-<<JointEstimation.sim>>=
- s.out2 <- sim(z.out2, x = x.out2)
- summary(s.out2)
-@
-\begin{center}
-<<label=JointEstimationPlot,fig=true>>= 
- plot(s.out2)
-@ 
-\end{center}
-
-
-\item Joint Estimation of a Parametrically and Stochastically
-Dependent Model 
-\label{pdep.p}
-  
-Using the sample data \texttt{sanction}.     
-The bivariate model is parametrically dependent if $Y_1$ and $Y_2$ share
-some or all explanatory variables, {\it and} the effects of the shared
-explanatory variables are jointly estimated.  For example,
-<<JointEstimationParam.list>>= 
- fml3 <- list(mu1 = import ~ tag(coop,"coop") + tag(cost,"cost") + 
-                          tag(target,"target"), 
-               mu2 = export ~ tag(coop,"coop") + tag(cost,"cost") + 
-                          tag(target,"target"))
-@ 
-<<JointEstimationParam.zelig>>= 
- z.out3 <- zelig(fml3, model = "bprobit", data = sanction)
- summary(z.out3)
-@ 
-
-Note that this model only returns one parameter estimate for each of
-{\tt coop}, {\tt cost}, and {\tt target}.  Contrast this to
-Example~\ref{basic.bp} which returns two parameter estimates for each
-of the explanatory variables.  \newline \newline Set values for the
-explanatory variables:
-<<JointEstimationParam.setx>>= 
- x.out3 <- setx(z.out3, cost = 1:4)
-@ 
-Draw simulated expected values:  
-<<JointEstimationParam.sim>>= 
- s.out3 <- sim(z.out3, x = x.out3)
- summary(s.out3)
-@ 
-
-\end{enumerate}
-
-\subsubsection{Model}
-
-For each observation, define two binary dependent variables, $Y_1$ and
-$Y_2$, each of which take the value of either 0 or 1 (in the
-following, we suppress the observation index $i$).  We model the joint
-outcome $(Y_1$, $Y_2)$ using two marginal probabilities for each
-dependent variable, and the correlation parameter, which describes how
-the two dependent variables are related. 
-%Define $Y_{rs}$ such that it
-%is equal to 1 when $Y_1=r$ and $Y_2=s$ and is 0 otherwise where $r$
-%and $s$ take a value of either 0 or 1. Then, the model is defined as
-%follows,
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by two latent (unobserved)
-  continuous variables which follow the bivariate Normal distribution:
-\begin{eqnarray*}
-  \left ( \begin{array}{c} 
-      Y_1^* \\
-      Y_2^* 
-    \end{array}
-  \right ) &\sim &  
-  N_2 \left \{ \left ( 
-      \begin{array}{c}
-        \mu_1 \\ \mu_2
-      \end{array} \right ), \left( \begin{array}{cc}
-                 1 & \rho \\
-                 \rho & 1 
-                 \end{array} \right) \right\},
-\end{eqnarray*}
-where $\mu_j$ is a mean for $Y_j^*$ and $\rho$ is a scalar correlation
-parameter. The following observation mechanism links the observed
-dependent variables, $Y_j$, with these latent variables
-\begin{eqnarray*}
-Y_j & = & \left \{ \begin{array}{cc}
-                   1 & {\rm if} \; Y_j^* \ge 0, \\
-                   0 & {\rm otherwise.}
-                   \end{array} 
-                   \right.
-\end{eqnarray*}
-
-%Alternatively, the stochastic component for the observed dependent
-%variables can be written as
-%\begin{eqnarray*}
-%  Y_{11} &\sim& \textrm{Bernoulli}(y_{11} \mid \pi_{11}) \\
-%  Y_{10} &\sim& \textrm{Bernoulli}(y_{10} \mid \pi_{10}) \\
-% Y_{01} &\sim& \textrm{Bernoulli}(y_{01} \mid \pi_{01})
-%\end{eqnarray*}
-%where $\pi_{rs}=\Pr(Y_1=r, Y_2=s)$ is the joint probability, and
-%$\pi_{00}=1-\pi_{11}-\pi_{10}-\pi_{01}$. Each of these joint
-%probabilities is modeled using the bivariate normal cumulative
-%distribution function.
-
-\item The \emph{systemic components} for each observation are 
-  \begin{eqnarray*}
-    \mu_j & = & x_{j} \beta_j \quad {\rm for} \quad j=1,2, \\
-    \rho & = & \frac{\exp(x_3 \beta_3) - 1}{\exp(x_3 \beta_3) + 1}.
-\end{eqnarray*}
-
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-For $n$ simulations, expected values form an $n \times 4$
-matrix.  
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) for the binomial probit model
-  are the predicted joint probabilities. Simulations of $\beta_1$,
-  $\beta_2$, and $\beta_3$ (drawn form their sampling distributions)
-  are substituted into the systematic components, to find simulations
-  of the predicted joint probabilities $\pi_{rs}=\Pr(Y_1=r, Y_2=s)$:
-\begin{eqnarray*}
-\pi_{11} &= \Pr(Y_1^* \geq 0 , Y_2^* \geq 0) &= \int_0^{\infty}
-\int_0^{\infty} \phi_2 (\mu_1, \mu_2, \rho) \, dY_2^*\, dY_1^* \\
-\pi_{10} &= \Pr(Y_1^* \geq 0 , Y_2^* < 0)  &= \int_0^{\infty}
-\int_{-\infty}^{0} \phi_2 (\mu_1, \mu_2, \rho) \, dY_2^*\, dY_1^*\\
-\pi_{01} &= \Pr(Y_1^* < 0 , Y_2^* \geq 0)  &= \int_{-\infty}^{0}
-\int_0^{\infty} \phi_2 (\mu_1, \mu_2, \rho) \, dY_2^*\, dY_1^*\\
-\pi_{11} &= \Pr(Y_1^* < 0 , Y_2^* < 0)  &= \int_{-\infty}^{0}
-\int_{-\infty}^{0} \phi_2 (\mu_1, \mu_2, \rho) \, dY_2^*\, dY_1^*\\
-\end{eqnarray*}
-where $r$ and $s$ may take a value of either 0 or 1, $\phi_2$ is the
-bivariate Normal density.
-  
-\item The predicted values ({\tt qi\$pr}) are draws from the
-  multinomial distribution given the expected joint probabilities.  
-
-\item The first difference ({\tt qi\$fd}) in each of the predicted joint
-  probabilities are given by
-  $$\textrm{FD}_{rs} = \Pr(Y_1=r, Y_2=s \mid x_1)-\Pr(Y_1=r, Y_2=s
-  \mid x).$$
-  
-\item The risk ratio ({\tt qi\$rr}) for each of the predicted joint
-  probabilities are given by
-\begin{equation*}
-\textrm{RR}_{rs} = \frac{\Pr(Y_1=r, Y_2=s \mid x_1)}{\Pr(Y_1=r, Y_2=s \mid x)}.
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_{ij}(t_i=1) -
-      E[Y_{ij}(t_i=0)] \right\} \textrm{ for } j = 1,2,
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_{ij}(t_i=0)]$,
-    the counterfactual expected value of $Y_{ij}$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_{ij}(t_i=1) -
-      \widehat{Y_{ij}(t_i=0)}\right\} \textrm{ for } j = 1,2,
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating
-    $\widehat{Y_{ij}(t_i=0)}$, the counterfactual predicted value of
-    $Y_{ij}$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\, x,
-  model = "bprobit", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-obtain a default summary of information through
-\texttt{summary(z.out)}.  Other elements available through the {\tt
-  \$} operator are listed below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: the named vector of coefficients.   
-   \item {\tt fitted.values}: an $n \times 4$ matrix of the in-sample
-     fitted values.
-   \item {\tt predictors}: an $n \times 3$ matrix of the linear
-     predictors $x_j \beta_j$.
-   \item {\tt residuals}: an $n \times 3$ matrix of the residuals.  
-   \item {\tt df.residual}: the residual degrees of freedom.  
-   \item {\tt df.total}: the total degrees of freedom.
-   \item {\tt rss}: the residual sum of squares.  
-   \item {\tt y}: an $n \times 2$ matrix of the dependent variables.  
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract:
-\begin{itemize}
-  \item {\tt coef3}: a table of the coefficients with their associated
-    standard errors and $t$-statistics.
-  \item {\tt cov.unscaled}: the variance-covariance matrix. 
-  \item {\tt pearson.resid}: an $n \times 3$ matrix of the Pearson residuals.  
-\end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as arrays indexed by simulation
-  $\times$ quantity $\times$ {\tt x}-observation (for more than one
-  {\tt x}-observation; otherwise the quantities are matrices).  Available quantities
-  are:  
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values (joint predicted
-     probabilities) for the specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted outcomes drawn from a
-     distribution defined by the joint predicted probabilities.
-   \item {\tt qi\$fd}: the simulated first difference in the predicted
-     probabilities for the values specified in {\tt x} and {\tt x1}.
-   \item {\tt qi\$rr}: the simulated risk ratio in the predicted
-     probabilities for given {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection*{How to Cite}
-\input{cites/bprobit}
-\input{citeZelig}
-\subsection*{See also}
-The bivariate probit function is part of the VGAM package by Thomas Yee \citep{YeeHas03}. In addition, advanced users may wish to refer to \texttt{help(vglm)} 
-in the VGAM library.  Additional documentation is available at
-\url{http://www.stat.auckland.ac.nz/\~\,yee}{http://www.stat.auckland.ac.nz/~yee}.Sample data are from \cite{Martin92}
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
-
-
-
-
-
diff --git a/inst/doc/bprobit.pdf b/inst/doc/bprobit.pdf
deleted file mode 100644
index e9d0fc7..0000000
Binary files a/inst/doc/bprobit.pdf and /dev/null differ
diff --git a/inst/doc/gamma.Rnw b/inst/doc/gamma.Rnw
deleted file mode 100644
index 0d59b08..0000000
--- a/inst/doc/gamma.Rnw
+++ /dev/null
@@ -1,252 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=gamma}
-\include{zinput}
-%\VignetteIndexEntry{Gamma Regression for Continuous, Positive Dependent Variables}
-%\VignetteDepends{Zelig, MCMCpack}
-%\VignetteKeyWords{model,regression,gamma distribution}
-%\VignettePackage{Zelig, stats}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@ 
-
-\section{{\tt gamma}: Gamma Regression for Continuous, Positive Dependent Variables}\label{gamma}
-
-Use the gamma regression model if you have a positive-valued dependent
-variable such as the number of years a parliamentary cabinet endures,
-or the seconds you can stay airborne while jumping.  The gamma
-distribution assumes that all waiting times are complete by the end
-of the study (censoring is not allowed).
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "gamma", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out, x1 = NULL)
-\end{verbatim}
-
-\subsubsection{Additional Inputs} 
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for gamma regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE} is
-selected, {\tt zelig()} computes robust standard errors via the {\tt
-sandwich} package (see \cite{Zeileis04}).  The default type of robust
-standard error is heteroskedastic and autocorrelation consistent (HAC),
-and assumes that observations are ordered by time index.
-
-In addition, {\tt robust} may be a list with the following options:  
-\begin{itemize}
-\item {\tt method}:  Choose from 
-\begin{itemize}
-\item {\tt "vcovHAC"}: (default if {\tt robust = TRUE}) HAC standard
-errors. 
-\item {\tt "kernHAC"}: HAC standard errors using the
-weights given in \cite{Andrews91}. 
-\item {\tt "weave"}: HAC standard errors using the
-weights given in \cite{LumHea99}.  
-\end{itemize}  
-\item {\tt order.by}: defaults to {\tt NULL} (the observations are
-chronologically ordered as in the original data).  Optionally, you may
-specify a vector of weights (either as {\tt order.by = z}, where {\tt
-z} exists outside the data frame; or as {\tt order.by = \~{}z}, where
-{\tt z} is a variable in the data frame).  The observations are
-chronologically ordered by the size of {\tt z}.
-\item {\tt \dots}:  additional options passed to the functions 
-specified in {\tt method}.   See the {\tt sandwich} library and
-\cite{Zeileis04} for more options.   
-\end{itemize}
-\end{itemize}
-
-\subsubsection{Example}
-
-Attach the sample data: 
-<<Example.data>>=
- data(coalition)
-@ 
-Estimate the model: 
-<<Example.zelig>>=
- z.out <- zelig(duration ~ fract + numst2, model = "gamma", data = coalition)
-@ 
-View the regression output:  
-<<Example.summary>>=
- summary(z.out)
-@ 
-Set the baseline values (with the ruling coalition in the minority)
-and the alternative values (with the ruling coalition in the majority)
-for X:
-<<Example.setx>>=
- x.low <- setx(z.out, numst2 = 0)
- x.high <- setx(z.out, numst2 = 1)
-@ 
-Simulate expected values ({\tt qi\$ev}) and first differences ({\tt qi\$fd}):
-<<Example.sim>>=
- s.out <- sim(z.out, x = x.low, x1 = x.high)
-@ 
-<<Example.summary>>=
-summary(s.out)
-@ 
-\begin{center}
-<<label=ExamplePlot,fig=true,echo=true>>=
- plot(s.out)
-@ 
-\end{center}
-
-\subsubsection{Model}
-
-\begin{itemize}
-\item The Gamma distribution with scale parameter $\alpha$ has a
-\emph{stochastic component}:
-\begin{eqnarray*}
-Y &\sim& \textrm{Gamma}(y_i \mid \lambda_i, \alpha) \\
-f(y)  &=& \frac{1}{\alpha^{\lambda_i} \, \Gamma \lambda_i} \, y_i^{\lambda_i
-  - 1} \exp -\left\{ \frac{y_i}{\alpha} \right\}
-\end{eqnarray*}
-for $\alpha, \lambda_i, y_i > 0$.  \\
-
-\item The \emph{systematic component} is given by
-\begin{equation*}
-  \lambda_i = \frac{1}{x_i \beta}
-\end{equation*}
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) are simulations of the mean
-  of the stochastic component given draws of $\alpha$ and
-  $\beta$ from their posteriors:  $$E(Y) = \alpha \lambda_i.$$  
-\item The predicted values ({\tt qi\$pr}) are draws from the gamma
-  distribution for each given set of parameters $(\alpha, \lambda_i)$.
-\item If {\tt x1} is specified, {\tt sim()} also returns the
-  differences in the expected values ({\tt qi\$fd}), $$E(Y \mid x_1) -
-  E(Y \mid x)$$.
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.  
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "gamma", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: the vector of fitted values.
-   \item {\tt linear.predictors}: the vector of $x_{i}\beta$.
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values for the specified
-     values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from a
-     distribution defined by $(\alpha, \lambda_i)$.
-   \item {\tt qi\$fd}: the simulated first difference in the expected
-     values for the specified values in {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-
-\subsection* {How to Cite} 
-
-\input{cites/gamma}
-\input{citeZelig}
-
-
-\subsection* {See also}
-The gamma model is part of the stats package by \citet{VenRip02}.
-Advanced users may wish to refer to \texttt{help(glm)} and
-\texttt{help(family)}, as well as \cite{McCNel89}. Robust standard
-errors are implemented via the sandwich package by \citet{Zeileis04}.
-Sample data are from \cite{KinTomWit00}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
-
-
-
-
-
-
-
-
-
diff --git a/inst/doc/gamma.mixed.Rnw b/inst/doc/gamma.mixed.Rnw
deleted file mode 100644
index 273b5c6..0000000
--- a/inst/doc/gamma.mixed.Rnw
+++ /dev/null
@@ -1,181 +0,0 @@
-\SweaveOpts{eval=false, results=hide, prefix.string=gammamixed}
-\include{zinput}
-%\VignetteIndexEntry{Gamma mixed effects linear regression}
-%\VignetteDepends{Zelig}
-%\VignetteKeyWords{mixed,linear, linear regression, gamma}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt gamma.mixed}: Mixed effects gamma regression}
-\label{gamma.mixed}
-
-Use generalized multi-level linear regression if you have covariates that are grouped according to one or more classification factors. Gamma regression models a continuous, positive dependent variable.
-
-While generally called multi-level models in the social sciences, this class of models is often referred to as mixed-effects models in the statistics literature and as hierarchical models in a Bayesian setting. This general class of models consists of linear models that are expressed as a function of both \emph{fixed effects}, parameters corresponding to an entire population or certain repeatable levels of experimental factors, and \emph{random effects}, parameters corresponding to indiv [...]
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-z.out <- zelig(formula= y ~ x1 + x2 + tag(z1 + z2 | g),
-               data=mydata, model="gamma.mixed")
-
-z.out <- zelig(formula= list(mu=y ~ xl + x2 + tag(z1, delta | g),
-               delta= ~ tag(w1 + w2 | g)), data=mydata, model="gamma.mixed")
-\end{verbatim}
-
-\subsubsection{Inputs}
-
-\noindent {\tt zelig()} takes the following arguments for {\tt mixed}:
-\begin{itemize}
-\item {\tt formula:} a two-sided linear formula object describing the systematic component of the model, with the response on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1 + ... + zn | g)} with {\tt z1 + ... + zn} specifying the model for the random effects and {\tt g} the grouping structure. Random intercept terms are included with the notation {\tt ta [...]
-Alternatively, {\tt formula} may be a list where the first entry, {\tt mu}, is a two-sided linear formula object describing the systematic component of the model, with the repsonse on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1, delta | g)} with {\tt z1} specifying the individual level model for the random effects, {\tt g} the grouping structure and { [...]
-\end{itemize}
-
-\subsubsection{Additional Inputs}
-
-In addition, {\tt zelig()} accepts the following additional arguments for model specification:
-
-\begin{itemize}
-\item {\tt data:} An optional data frame containing the variables named in {\tt formula}. By default, the variables are taken from the environment from which {\tt zelig()} is called.
-\item {\tt method:} a character string. The criterion is always the log-likelihood but this criterion does not have a closed form expression and must be approximated. The default approximation is {\tt "PQL"} or penalized quasi-likelihood. Alternatives are {\tt "Laplace"} or {\tt "AGQ"} indicating the Laplacian and adaptive Gaussian quadrature approximations respectively.
-\item {\tt na.action:} A function that indicates what should happen when the data contain {\tt NAs}. The default action ({\tt na.fail}) causes {\tt zelig()} to print an error message and terminate if there are any incomplete observations.
-\end{itemize}
-Additionally, users may with to refer to {\tt lmer} in the package {\tt lme4} for more information, including control parameters for the estimation algorithm and their defaults.
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-\item Basic Example with First Differences \\
-\\
-Attach sample data: \\
-<<Examples.data>>=
-data(coalition2)
-@
-
-Estimate model using optional arguments to specify approximation method for the log-likelihood, and the log link function for the Gamma family:
-<<Examples.zelig>>=
-z.out1 <- zelig(duration ~ invest + fract + polar + numst2 + crisis + tag(1 | country), data=coalition2, model="gamma.mixed", method="PQL",family=Gamma(link=log))
-@
-
-\noindent Summarize regression coefficients and estimated variance of random effects:\\
-<<Examples.summary>>=
-summary(z.out1)
-@
-
-Set the baseline values (with the ruling coalition in the minority) and the alternative values (with the ruling coalition in the majority) for X:\\
-<<Examples.setx>>=
-x.high <- setx(z.out1, numst2 = 1)
-x.low <- setx(z.out1, numst2 = 0)
-@
-
-Simulate expected values ({\tt qi\$ev}) and first differences({\tt qi\$fd}): \\
-<<Examples.sim>>=
-s.out1 <- sim(z.out1, x=x.high, x1=x.low)
-summary(s.out1)
-@
-
-\end{enumerate}
-
-\subsubsection{Mixed effects gamma regression Model}
-
-Let $Y_{ij}$ be the continuous, positive dependent variable, realized for observation $j$ in group $i$ as $y_{ij}$, for $i = 1, \ldots, M$, $j = 1, \ldots, n_i$.
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by a Gamma model with scale parameter $\alpha$.
-\begin{equation*}
-Y_{ij} \sim \mathrm{Gamma}(y_{ij} | \lambda_{ij}, \alpha)
-\end{equation*}
-where
-\begin{equation*}
-Gamma(y_{ij} | \lambda_{ij}, \alpha) = \frac{1}{\alpha^{\lambda_{ij}} \Gamma \lambda_{ij}} y_{ij}^{\lambda_{ij} - 1} \exp (- \{ \frac{y_{ij}}{\alpha} \})
-\end{equation*}
-for $\alpha, \; \lambda_{ij}, \; y_{ij} \; > 0$.
-\item The $q$-dimensional vector of \emph{random effects}, $b_i$, is restricted to be mean zero, and therefore is completely characterized by the variance covarance matrix $\Psi$, a $(q \times q)$ symmetric positive semi-definite matrix.
-\begin{equation*}
-b_i \sim Normal(0, \Psi)
-\end{equation*}
-\item The \emph{systematic component} is
-\begin{equation*}
-\lambda_{ij} \equiv \frac{1}{X_{ij} \beta + Z_{ij} b_i}
-\end{equation*}
-where $X_{ij}$ is the $(n_i \times p \times M)$ array of known fixed effects explanatory variables, $\beta$ is the $p$-dimensional vector of fixed effects coefficients, $Z_{ij}$ is the $(n_i \times q \times M)$ array of known random effects explanatory variables and $b_i$ is the $q$-dimensional vector of random effects.
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The predicted values ({\tt qi\$pr}) are draws from the gamma distribution for each given set of parameters $(\alpha, \lambda_{ij})$, for
-\begin{equation*}
-\lambda_{ij} = \frac{1}{X_{ij} \beta + Z_{ij} b_i}
-\end{equation*}
-given $X_{ij}$ and $Z_{ij}$ and simulations of of $\beta$ and $b_i$ from their posterior distributions. The estimated variance covariance matrices are taken as correct and are themselves not simulated.
-
-\item The expected values ({\tt qi\$ev}) are simulations of the mean of the stochastic component given draws of $\alpha$, $\beta$ from their posteriors:
-\begin{equation*}
-E(Y_{ij} | X_{ij}) = \alpha \lambda_{ij} = \frac{\alpha}{X_{ij} \beta}.
-\end{equation*}
-
-\item The first difference ({\tt qi\$fd}) is given by the difference in expected values, conditional on $X_{ij}$ and $X_{ij}^\prime$, representing different values of the explanatory variables.
-\begin{equation*}
-FD(Y_{ij} | X_{ij}, X_{ij}^\prime) = E(Y_{ij} | X_{ij}) - E(Y_{ij} | X_{ij}^\prime)
-\end{equation*}
-
-\item In conditional prediction models, the average predicted treatment effect ({\tt qi\$att.pr}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - \widehat{Y_{ij}(t_{ij} = 0)} \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $Y_{ij}(t_{ij} = 0)$, the counterfactual predicted value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\item In conditional prediction models, the average expected treatment effect ({\tt qi\$att.ev}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - E[Y_{ij}(t_{ij} = 0)] \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $E[Y_{ij}(t_{ij} = 0)]$, the counterfactual expected value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-The output of each Zelig command contains useful information which you may view. You may examine the available information in {\tt z.out} by using {\tt slotNames(z.out)}, see the fixed effect coefficients by using {\tt summary(z.out)@coefs}, and a default summary of information through {\tt summary(z.out)}. Other elements available through the {\tt \@} operator are listed below.
-\begin{itemize}
-\item From the {\tt zelig()} output stored in {\tt summary(z.out)}, you may extract:
-\begin{itemize}
-\item[--] {\tt fixef}: numeric vector containing the conditional estimates of the fixed effects.
-\item[--] {\tt ranef}: numeric vector containing the conditional modes of the random effects.
-\item[--] {\tt frame}: the model frame for the model.
-\end{itemize}
-\item From the {\tt sim()} output stored in {\tt s.out}, you may extract quantities of interest stored in a data frame:
-\begin{itemize}
-\item {\tt qi\$pr}: the simulated predicted values drawn from the distributions defined by the expected values.
-\item {\tt qi\$ev}: the simulated expected values for the specified values of x.
-\item {\tt qi\$fd}: the simulated first differences in the expected values for the values specified in x and x1.
-\item {\tt qi\$ate.pr}: the simulated average predicted treatment effect for the treated from conditional prediction models.
-\item {\tt qi\$ate.ev}: the simulated average expected treatment effect for the treated from conditional prediction models.
-\end{itemize}
-\end{itemize}
-
-
-\subsection* {How to Cite}
-
-\input{cites/gamma.mixed}
-\input{citeZelig}
-
-\subsection* {See also}
-Mixed effects gamma regression is part of {\tt lme4} package by Douglas M. Bates \citep{Bates07}. For a detailed discussion of mixed-effects models, please see \cite{JosBat00}
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after <- search()
- torm <- setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
-\end{document}
diff --git a/inst/doc/gamma.mixed.pdf b/inst/doc/gamma.mixed.pdf
deleted file mode 100644
index 984f399..0000000
Binary files a/inst/doc/gamma.mixed.pdf and /dev/null differ
diff --git a/inst/doc/gamma.pdf b/inst/doc/gamma.pdf
index 3c0ff8a..fb369d7 100644
Binary files a/inst/doc/gamma.pdf and b/inst/doc/gamma.pdf differ
diff --git a/inst/doc/gamma.survey.Rnw b/inst/doc/gamma.survey.Rnw
deleted file mode 100644
index 8a46ef7..0000000
--- a/inst/doc/gamma.survey.Rnw
+++ /dev/null
@@ -1,501 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=gammasurvey}
-\include{zinput}
-%\VignetteIndexEntry{Survey-Weighted Gamma Regression for Continuous, Positive Dependent Variables}
-%\VignetteDepends{Zelig, stats, survey}
-%\VignetteKeyWords{model,gamma ,continuous, regression, survey}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography* 
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>= 
-library(Zelig)
-library(survey) 
-@
-\section{{\tt gamma.survey}: Survey-Weighted Gamma Regression for Continuous, Positive Dependent Variables}
-\label{gamma.survey}
-
-The survey-weighted Gamma regression model is appropriate for 
-survey data obtained using complex sampling techniques, such as 
-stratified random or cluster sampling (e.g., not simple random 
-sampling).  Like the conventional Gamma regression models (see 
-\Sref{gamma}), survey-weighted Gamma regression specifies a 
-continuous, positive dependent variable as function of a set of explanatory 
-variables.  The survey-weighted Gamma model reports estimates of 
-model parameters identical to conventional Gamma estimates, but uses 
-information from the survey design to correct variance estimates.
-
-The {\tt gamma.survey} model accommodates three common types of 
-complex survey data.  Each method listed here requires selecting 
-specific options which are detailed in the ``Additional Inputs'' 
-section below.  \begin{enumerate}
-
-\item \textbf{Survey weights}:  Survey data are often published along
-with weights for each observation.  For example, if a survey
-intentionally over-samples a particular type of case, weights can be
-used to correct for the over-representation of that type of case in
-the dataset. Survey weights come in two forms:
-\begin{enumerate}
-
-\item \textit{Probability} weights report the probability that each
-case is drawn from the population.  For each stratum or cluster, 
-this is computed as the number of observations in the sample drawn 
-from that group divided by the number of observations in the 
-population in the group.
-
-\item \textit{Sampling} weights are the inverse of the probability
-weights.   
-
-\end{enumerate}
-
-\item \textbf{Strata/cluster identification}:  A complex survey 
-dataset may include variables that identify the strata or cluster 
-from which observations are drawn.  For stratified random sampling 
-designs, observations may be nested in different strata.  There are 
-two ways to employ these identifiers:
-
-\begin{enumerate}
-
-\item Use \textit{finite population corrections} to specify the
-total number of cases in the stratum or cluster from which each
-observation was drawn.
-
-\item For stratified random sampling designs, use the raw strata ids
-to compute sampling weights from the data.
-
-\end{enumerate}
-
-\item \textbf{Replication weights}: To preserve the anonymity of
-survey participants, some surveys exclude strata and cluster ids 
-from the public data and instead release only pre-computed replicate 
-weights.
-
-\end{enumerate}
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "gamma.survey", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-
-\subsubsection{Additional Inputs}
-
-In addition to the standard {\tt zelig} inputs (see
-\Sref{Zcommands}), survey-weighted Gamma models accept the following
-optional inputs:
-\begin{enumerate}
-
-\item Datasets that include survey weights:. 
-
-\begin{itemize}  
-
-\item {\tt probs}: An optional formula or numerical vector specifying each 
-case's probability weight, the probability that the case was 
-selected.  Probability weights need not (and, in most cases, will 
-not) sum to one.  Cases with lower probability weights are weighted 
-more heavily in the computation of model coefficients.
-
-\item {\tt weights}: An optional numerical vector specifying each 
-case's sample weight, the inverse of the probability that the case 
-was selected.  Sampling weights need not (and, in most cases, will 
-not) sum to one.  Cases with higher sampling weights are weighted 
-more heavily in the computation of model coefficients.
-
-\end{itemize}
-
-\item Datasets that include strata/cluster identifiers:
-
-\begin{itemize} 
-
-\item {\tt ids}: An optional formula or numerical vector identifying the 
-cluster from which each observation was drawn (ordered from largest level to smallest level).  
-For survey designs  that do not involve cluster sampling, {\tt ids} defaults to {\tt NULL}.
-
-\item {\tt fpc}: An optional numerical vector identifying each 
-case's frequency weight, the total number of units in the population 
-from which each observation was sampled. 
-
-\item {\tt strata}: An optional formula or vector identifying the 
-stratum from which each observation was sampled.  Entries may be 
-numerical, logical, or strings.  For survey designs that do not 
-involve cluster sampling, {\tt strata} defaults to {\tt NULL}. 
-
-\item {\tt nest}: An optional logical value specifying whether 
-primary sampling unites (PSUs) have non-unique ids across multiple 
-strata.  {\tt nest=TRUE} is appropriate when PSUs reuse the same 
-identifiers across strata.  Otherwise, {\tt nest} defaults to {\tt 
-FALSE}. 
-
-\item {\tt check.strata}: An optional input specifying whether to 
-check that clusters are nested in strata.  If {\tt check.strata} is 
-left at its default, {\tt !nest}, the check is not performed.  If 
-{\tt check.strata} is specified as {\tt TRUE}, the check is carried 
-out.  
-
-\end{itemize}
-
-\item Datasets that include replication weights:
-\begin{itemize}
-  \item {\tt repweights}: A formula or matrix specifying
-    replication weights, numerical vectors of weights used
-    in a process in which the sample is repeatedly re-weighted and parameters
-    are re-estimated in order to compute the variance of the model parameters.
-  \item {\tt type}: A string specifying the type of replication weights being used.
-    This input is required if replicate weights are specified.  The following types
-    of replication weights are recognized: {\tt"BRR"}, {\tt "Fay"},
-    {\tt "JK1"}, {\tt "JKn"}, {\tt "bootstrap"}, or {\tt "other"}.
-  \item {\tt weights}: An optional vector or formula specifying each case's sample weight,
-    the inverse of the probability that the case was selected.  If a survey includes both sampling 
-    weights and replicate weights separately for the same survey, both should be included in 
-    the model specification.  In these cases, sampling weights are used to correct potential biases 
-    in in the computation of coefficients and replication weights are used to compute the variance 
-    of coefficient estimates.  
-  \item {\tt combined.weights}: An optional logical value that 
-    should be specified as {\tt TRUE} if the replicate weights include the sampling weights.  Otherwise, 
-    {\tt combined.weights} defaults to {\tt FALSE}.  
-  \item {\tt rho}:  An optional numerical value specifying a shrinkage factor
-    for replicate weights of type {\tt "Fay"}.
-  \item {\tt bootstrap.average}: An optional numerical input specifying
-    the number of iterations over which replicate weights of type {\tt "bootstrap"} were averaged. 
-    This input should be left as {\tt NULL} for {\tt "bootstrap"} weights that were
-    not were created by averaging.
-\item {\tt scale}:  When replicate weights are included,
-    the variance is computed as the sum of squared deviations of the replicates from their mean.
-    {\tt scale} is an optional overall multiplier for the standard deviations.
-\item {\tt rscale}: Like {\tt scale}, {\tt rscale} specifies an 
-optional vector of replicate-specific multipliers for the squared 
-deviations used in variance computation. 
-
-\item {\tt fpc}: For models in which {\tt "JK1"}, {\tt "JKn"}, or 
-{\tt "other"} replicates are specified, {\tt fpc} is an optional 
-numerical vector (with one entry for each replicate) designating the 
-replicates' finite population corrections.   
-
-\item {\tt fpctype}: When a finite population correction is included 
-as an {\tt fpc} input, {\tt fpctype} is a required input specifying 
-whether the input to {\tt fpc} is a sampling fraction ({\tt 
-fpctype="fraction"}) or a direct correction ({\tt 
-fpctype="correction"}).  
-
-\item {\tt return.replicates}: An optional logical value    
-specifying whether the replicates should be returned as a component 
-of the output.  {\tt return.replicates} defaults to {\tt FALSE}.  
-
-\end{itemize}
-
-\end{enumerate}
-
-\subsubsection{Examples}
-
-\begin{enumerate} 
-
-\item A dataset that includes survey weights:
-
-Attach the sample data: 
-<<Existing.data>>= 
-data(api, package="survey") 
-@ 
-
-Suppose that a dataset included a positive and continuous measure of
-public schools' performance ({\tt api00}), a measure of 
-the percentage of students at each school who receive subsidized 
-meals ({\tt meals}), an indicator for whether each school
-holds classes year round ({\tt year.rnd}), and sampling 
-weights computed by the survey house ({\tt pw}).  Estimate a model
-that regresses school performance on the {\tt meals} and {\tt year.rnd}
-variables:
-<<Existing.zelig>>= 
-z.out1 <- zelig(api00 ~ meals + yr.rnd, model = "gamma.survey",  
-weights=~pw, data = apistrat)
-@ 
-Summarize regression coefficients:
-<<Existing.summary>>=
- summary(z.out1)
-@ 
-Set explanatory variables to their default (mean/mode) values, and
-set a high (80th percentile) and low (20th percentile) value for
-``meals'': 
-<<Existing.setx>>= 
-x.low <- setx(z.out1, meals=quantile(apistrat$meals, 0.2)) 
-x.high <- setx(z.out1, meals=quantile(apistrat$meals, 0.8)) 
-@ 
-Generate first differences for the
-effect of high versus low concentrations of children receiving
-subsidized meals on the probability of holding school year-round: 
-<<Existing.sim>>=
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-@ 
-<<Existing.summary.sim>>=
- summary(s.out1)
-@ 
-Generate a visual summary of the quantities of interest:
-\begin{center}
-<<label=ExistingPlot,fig=true,echo=true>>=
- plot(s.out1)
-@
-\end{center}
-
-\item  A dataset that includes strata/cluster identifiers:
-
-Suppose that the survey house that provided the dataset used in the
-previous example excluded sampling weights but made other details
-about the survey design available.  A user can still estimate a model
-without sampling weights that instead uses inputs that identify the
-stratum and/or cluster to which each observation belongs and the
-size of the finite population from which each observation was drawn.
-
-Continuing the example above, suppose the survey house drew at
-random a fixed number of elementary schools, a fixed number of
-middle schools, and a fixed number of high schools. If the variable
-{\tt stype} is a vector of characters ({\tt "E"} for elementary
-schools, {\tt "M"} for middle schools, and {\tt "H"} for high schools)
-that identifies the type of school each case
-represents and {\tt fpc} is a numerical vector that identifies for
-each case the total number of schools of the same type in the
-population, then the user could estimate the following model:
-
-<<Complex.zelig>>= 
-z.out2 <- zelig(api00 ~ meals + yr.rnd, model = "gamma.survey", strata=~stype, fpc=~fpc, data = apistrat)
-@
-Summarize the regression output:
-<<Complex.output>>= 
-summary(z.out2) 
-@ 
-The coefficient estimates for this example are
-identical to the point estimates in the first example, when
-pre-existing sampling weights were used.  When sampling weights are
-omitted, they are estimated automatically for {\tt "gamma.survey"}
-models based on the user-defined description of sampling designs. 
-
-Moreover, because the user provided information about the survey
-design, the standard error estimates are lower in this example than
-in the previous example, in which the user omitted variables pertaining
-to the details of the complex survey design.
-
-\item A dataset that includes replication weights:
-
-Survey houses sometimes supply
-replicate weights (in lieu of details about the survey design).  
-Suppose that the survey house that published these school 
-data withheld strata/cluster identifiers and instead 
-published replication weights.  For the sake
-of illustrating how replicate weights can be used as inputs in {\tt
-gamma.survey} models, create a set of jack-knife 
-(JK1) replicate weights: 
-<<Replicate.rw>>= 
-jk1reps <- jk1weights(psu=apistrat$dnum)
-@ 
-Again, estimate a model that regresses school performance on 
-the {\tt meals} and {\tt year.rnd} variables, using
-the JK1 replicate weights in {\tt jk1reps} to compute standard errors:
-<<Replicate.zelig>>= 
-z.out3 <- zelig(api00 ~ meals + yr.rnd, model = "gamma.survey", data = apistrat, 
-repweights=jk1reps$weights, type="JK1") 
-@
-Summarize the regression coefficients: 
-<<Replicate.summary>>=
- summary(z.out3)
-@ 
-Set the explanatory variable {\tt meals} at its 20th and 80th quantiles:
-<<Replicate.setx>>= 
-x.low <- setx(z.out3, meals= quantile(apistrat$meals, 0.2))
-x.high <- setx(z.out3, meals= quantile(apistrat$meals, 0.8))
-@ 
-Generate first
-differences for the effect of high versus low 
-concentrations of poverty on school performance:
-<<Replicate.sim>>= 
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-@ 
-<<Replicate.summary.sim>>=
- summary(s.out3)
-@ 
-Generate a visual summary of quantities of interest:
-\begin{center}
-<<label=ReplicatePlot,fig=true,echo=true>>=
- plot(s.out3)
-@
-\end{center}
-
-
-\end{enumerate}
-
-\subsubsection{Model}
-
-\begin{itemize}
-\item The Gamma distribution with scale parameter $\alpha$ has a
-\emph{stochastic component}:
-\begin{eqnarray*}
-Y &\sim& \textrm{Gamma}(y_i \mid \lambda_i, \alpha) \\
-f(y)  &=& \frac{1}{\alpha^{\lambda_i} \, \Gamma \lambda_i} \, y_i^{\lambda_i
-  - 1} \exp -\left\{ \frac{y_i}{\alpha} \right\}
-\end{eqnarray*}
-for $\alpha, \lambda_i, y_i > 0$.  \\
-
-\item The \emph{systematic component} is given by
-\begin{equation*}
-  \lambda_i = \frac{1}{x_i \beta}
-\end{equation*}
-\end{itemize}
-
-\subsubsection{Variance}
-
-When replicate weights are not used, the variance of the
-coefficients is estimated as
-\[
-\hat{\boldsymbol{\Sigma}} \left[
- \sum_{i=1}^n
-\frac{(1-\pi_i)}{\pi_i^2}
-(\mathbf{X}_i(Y_i-\mu_i))^\prime(\mathbf{X}_i(Y_i-\mu_i)) + 2
-\sum_{i=1}^n \sum_{j=i+1}^n \frac{(\pi_{ij} - \pi_i\pi_j)}{\pi_i
-\pi_j \pi_{ij}}(\mathbf{X}_i(Y_i-\mu_i))^\prime
-(\mathbf{X}_j(Y_j-\mu_j)) \right] \hat{\boldsymbol{\Sigma}}
-\]
-where ${\pi_i}$ is the probability of case $i$ being sampled,
-$\mathbf{X}_i$ is a vector of the values of the explanatory
-variables for case $i$, $Y_i$ is value of the dependent variable for
-case $i$, $\hat{\mu}_i$ is the predicted value of the dependent
-variable for case $i$ based on the linear model estimates, and
-$\hat{\boldsymbol{\Sigma}}$ is the conventional variance-covariance
-matrix in a parametric glm. This statistic is derived from the
-method for estimating the variance of sums described in \cite{Bin83}
-and the Horvitz-Thompson estimator of the variance of a sum
-described in \cite{HorTho52}.
-
-When replicate weights are used, the model is re-estimated for each
-set of replicate weights, and the variance of each parameter is
-estimated by summing the squared deviations of the replicates from
-their mean.
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) are simulations of the mean
-  of the stochastic component given draws of $\alpha$ and
-  $\beta$ from their posteriors:  $$E(Y) = \alpha \lambda_i.$$  
-\item The predicted values ({\tt qi\$pr}) are draws from the gamma
-  distribution for each given set of parameters $(\alpha, \lambda_i)$.
-\item If {\tt x1} is specified, {\tt sim()} also returns the
-  differences in the expected values ({\tt qi\$fd}), $$E(Y \mid x_1) -
-  E(Y \mid x)$$.
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.  
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "gamma.survey", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: the vector of fitted values.
-   \item {\tt linear.predictors}: the vector of $x_{i}\beta$.
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values for the specified
-     values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from a
-     distribution defined by $(\alpha, \lambda_i)$.
-   \item {\tt qi\$fd}: the simulated first difference in the expected
-     values for the specified values in {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-When users estimate {\tt gamma.survey} models with replicate weights in {\tt Zelig}, an 
-object called {\tt .survey.prob.weights} is created in the global environment.  
-{\tt Zelig} will over-write any existing object with that name, and users 
-are therefore advised to re-name any object called {\tt .survey.prob.weights} before using {\tt gamma.survey} models in {\tt Zelig}.
-
-\subsection* {How to Cite}
-\input{cites/gamma.survey}
- \input{citeZelig}
- 
- \subsection* {See also}
- 
- Survey-weighted linear models and the sample data used in the
- examples above are a part of the {\tt survey} package by Thomas
- Lumley. Users may wish to refer to the help files for the three
- functions that Zelig draws upon when estimating survey-weighted
- models, namely, {\tt help(svyglm)}, {\tt help(svydesign)}, and {\tt
- help(svrepdesign)}.  The Gamma model is part of the stats package
- by \citet{VenRip02}. Advanced users may wish to refer to
- \texttt{help(glm)} and \texttt{help(family)}, as well as
- \cite{McCNel89}.
-  
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-
-<<afterpkgs, echo=FALSE>>=
-  after<-search()
-  torm<-setdiff(after,before)
-  for (pkg in torm)
-  detach(pos=match(pkg,search()))
-@
-  \end{document}
diff --git a/inst/doc/gamma.survey.pdf b/inst/doc/gamma.survey.pdf
deleted file mode 100644
index 2d37107..0000000
Binary files a/inst/doc/gamma.survey.pdf and /dev/null differ
diff --git a/inst/doc/logit.Rnw b/inst/doc/logit.Rnw
deleted file mode 100644
index 0ca3cc8..0000000
--- a/inst/doc/logit.Rnw
+++ /dev/null
@@ -1,299 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=logit}
-\include{zinput}
-%\VignetteIndexEntry{Logistic Regression for Dichotomous Dependent Variables}
-%\VignetteDepends{Zelig, stats}
-%\VignetteKeyWords{model,logistic,dichotomous, regression}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@ 
-
-\section{{\tt logit}: Logistic Regression for Dichotomous Dependent
-Variables}\label{logit}
-
-Logistic regression specifies a dichotomous dependent variable as a
-function of a set of explanatory variables.  For a Bayesian
-implementation, see \Sref{logit.bayes}.  
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "logit", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out, x1 = NULL)
-\end{verbatim}
-
-\subsubsection{Additional Inputs} 
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for logistic regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE} is
-selected, {\tt zelig()} computes robust standard errors via the {\tt
-sandwich} package (see \cite{Zeileis04}).  The default type of robust
-standard error is heteroskedastic and autocorrelation consistent (HAC),
-and assumes that observations are ordered by time index.
-
-In addition, {\tt robust} may be a list with the following options:  
-\begin{itemize}
-\item {\tt method}:  Choose from 
-\begin{itemize}
-\item {\tt "vcovHAC"}: (default if {\tt robust = TRUE}) HAC standard
-errors. 
-\item {\tt "kernHAC"}: HAC standard errors using the
-weights given in \cite{Andrews91}. 
-\item {\tt "weave"}: HAC standard errors using the
-weights given in \cite{LumHea99}.  
-\end{itemize}  
-\item {\tt order.by}: defaults to {\tt NULL} (the observations are
-chronologically ordered as in the original data).  Optionally, you may
-specify a vector of weights (either as {\tt order.by = z}, where {\tt
-z} exists outside the data frame; or as {\tt order.by = \~{}z}, where
-{\tt z} is a variable in the data frame)  The observations are
-chronologically ordered by the size of {\tt z}.
-\item {\tt \dots}:  additional options passed to the functions 
-specified in {\tt method}.   See the {\tt sandwich} library and
-\cite{Zeileis04} for more options.   
-\end{itemize}
-\end{itemize}
-
-\subsubsection{Examples}
-\begin{enumerate}
-\item {Basic Example}
- 
-Attaching the sample turnout dataset:
-<<Example.data>>=
- data(turnout)
-@ 
-Estimating parameter values for the logistic regression:
-<<Example.zelig>>=
- z.out1 <- zelig(vote ~ age + race,  model = "logit", data = turnout) 
-@ 
-Setting values for the explanatory variables:
-<<Example.setx>>=
- x.out1 <- setx(z.out1, age = 36, race = "white")
-@ 
-Simulating quantities of interest from the posterior distribution.
-<<Example.sim>>=
- s.out1 <- sim(z.out1, x = x.out1)
-@
-<<Example.summary>>= 
- summary(s.out1)
-@ 
-\begin{center}
-<<label=ExamplePlot,fig=true,echo=true>>= 
- plot(s.out1)
-@ 
-\end{center}
-
-\item {Simulating First Differences}
-
-Estimating the risk difference (and risk ratio) between low education
-(25th percentile) and high education (75th percentile) while all the
-other variables held at their default values.
-<<FirstDifferences.setx>>=
- z.out2 <- zelig(vote ~ race + educate, model = "logit", data = turnout)
- x.high <- setx(z.out2, educate = quantile(turnout$educate, prob = 0.75))
- x.low <- setx(z.out2, educate = quantile(turnout$educate, prob = 0.25))
-@ 
-
-<<FirstDifferences.sim>>=
- s.out2 <- sim(z.out2, x = x.high, x1 = x.low)
-@ 
-<<FirstDifferences.summary>>=
- summary(s.out2)
-@
-\begin{center}
-<<label=FirstDifferencesPlot,fig=true>>= 
- plot(s.out2)
-@ 
-\end{center} 
-
-
-\item {Presenting Results: An ROC Plot}  \label{ROC}
-  
-  One can use an ROC plot to evaluate the fit of alternative model
-  specifications.  (Use {\tt demo(roc)} to view this example, or see
-  King and Zeng (2002)\nocite{KinZen02}.)  
-<<ROC.zelig>>=
- z.out1 <- zelig(vote ~ race + educate + age, model = "logit", 
-                  data = turnout)
- z.out2 <- zelig(vote ~ race + educate, model = "logit", data = turnout)
-@
-\begin{center}
-<<label=ROCPlot,fig=true, echo=true>>= 
-
-rocplot(z.out1$y, z.out2$y, fitted(z.out1), fitted(z.out2))
-@ 
-\end{center}
-\end{enumerate}
-
-\subsubsection{Model}
-Let $Y_i$ be the binary dependent variable for observation $i$ which
-takes the value of either 0 or 1.
-\begin{itemize}
-
-\item The \emph{stochastic component} is given by  
-\begin{eqnarray*}
-Y_i &\sim& \textrm{Bernoulli}(y_i \mid \pi_i) \\
-    &=& \pi_i^{y_i} (1-\pi_i)^{1-y_i}
-\end{eqnarray*}
-where $\pi_i=\Pr(Y_i=1)$.
-
-\item The \emph{systematic component} is given by: 
-\begin{equation*}
-\pi_i \; = \; \frac{1}{1 + \exp(-x_i \beta)}.
-\end{equation*}
-where $x_i$ is the vector of $k$ explanatory variables for observation $i$
-and $\beta$ is the vector of coefficients.
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) for the logit model are
-  simulations of the predicted probability of a success: $$E(Y) =
-  \pi_i= \frac{1}{1 + \exp(-x_i \beta)},$$ given draws of $\beta$ from
-  its sampling distribution.
-
-\item The predicted values ({\tt qi\$pr}) are draws from the Binomial
-  distribution with mean equal to the simulated expected value $\pi_i$.  
-
-\item The first difference ({\tt qi\$fd}) for the logit model is defined as
-\begin{equation*}
-\textrm{FD} = \Pr(Y = 1 \mid x_1) - \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item The risk ratio ({\tt qi\$rr}) is defined as
-\begin{equation*}
-\textrm{RR} = \Pr(Y = 1 \mid x_1) \ / \ \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)}\right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\, x,
-  model = "logit", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: the vector of fitted values for the
-     systemic component, $\pi_i$.
-   \item {\tt linear.predictors}: the vector of $x_{i}\beta$
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt data}: the name of the input data frame.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected probabilities for the
-     specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values for the
-     specified values of {\tt x}.
-   \item {\tt qi\$fd}: the simulated first difference in the expected
-     probabilities for the values specified in {\tt x} and {\tt x1}.
-   \item {\tt qi\$rr}: the simulated risk ratio for the expected
-     probabilities simulated from {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection* {How to Cite} 
-
-\input{cites/logit}
-\input{citeZelig}
-
-
-\subsection*{See also}
-
-The logit model is part of the stats package by \citet{VenRip02}.
-Advanced users may wish to refer to \texttt{help(glm)} and
-\texttt{help(family)}, as well as \cite{McCNel89}. Robust standard
-errors are implemented via the sandwich package by \citet{Zeileis04}.
-Sample data are from \cite{KinTomWit00}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
-
-
-
-
-
-
-
-
-
diff --git a/inst/doc/logit.mixed.Rnw b/inst/doc/logit.mixed.Rnw
deleted file mode 100644
index e72feee..0000000
--- a/inst/doc/logit.mixed.Rnw
+++ /dev/null
@@ -1,185 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=logitmixed}
-\include{zinput}
-%\VignetteIndexEntry{Mixed effects logistic regression}
-%\VignetteDepends{Zelig}
-%\VignetteKeyWords{mixed, logistic regression}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt logit.mixed}: Mixed effects logistic Regression}
-\label{mixed}
-
-Use generalized multi-level linear regression if you have covariates that are grouped according to one or more classification factors. The logit model is appropriate when the dependent variable is dichotomous.
-
-While generally called multi-level models in the social sciences, this class of models is often referred to as mixed-effects models in the statistics literature and as hierarchical models in a Bayesian setting. This general class of models consists of linear models that are expressed as a function of both \emph{fixed effects}, parameters corresponding to an entire population or certain repeatable levels of experimental factors, and \emph{random effects}, parameters corresponding to indiv [...]
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-z.out <- zelig(formula= y ~ x1 + x2 + tag(z1 + z2 | g),
-               data=mydata, model="logit.mixed")
-
-z.out <- zelig(formula= list(mu=y ~ xl + x2 + tag(z1, gamma | g),
-               gamma= ~ tag(w1 + w2 | g)), data=mydata, model="logit.mixed")
-\end{verbatim}
-
-\subsubsection{Inputs}
-
-\noindent {\tt zelig()} takes the following arguments for {\tt mixed}:
-\begin{itemize}
-\item {\tt formula:} a two-sided linear formula object describing the systematic component of the model, with the response on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1 + ... + zn | g)} with {\tt z1 + ... + zn} specifying the model for the random effects and {\tt g} the grouping structure. Random intercept terms are included with the notation {\tt ta [...]
-Alternatively, {\tt formula} may be a list where the first entry, {\tt mu}, is a two-sided linear formula object describing the systematic component of the model, with the repsonse on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1, gamma | g)} with {\tt z1} specifying the individual level model for the random effects, {\tt g} the grouping structure and { [...]
-\end{itemize}
-
-\subsubsection{Additional Inputs}
-
-In addition, {\tt zelig()} accepts the following additional arguments for model specification:
-
-\begin{itemize}
-\item {\tt data:} An optional data frame containing the variables named in {\tt formula}. By default, the variables are taken from the environment from which {\tt zelig()} is called.
-\item {\tt na.action:} A function that indicates what should happen when the data contain {\tt NAs}. The default action ({\tt na.fail}) causes {\tt zelig()} to print an error message and terminate if there are any incomplete observations.
-\end{itemize}
-Additionally, users may with to refer to {\tt lmer} in the package {\tt lme4} for more information, including control parameters for the estimation algorithm and their defaults.
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-\item Basic Example with First Differences \\
-\\
-Attach sample data: \\
-<<Examples.data>>=
-data(voteincome)
-@
-Estimate model:
-<<Examples.zelig>>=
-z.out1 <- zelig(vote ~ education + age + female + tag(1 | state), data=voteincome, model="logit.mixed")
-@
-
-\noindent Summarize regression coefficients and estimated variance of random effects:\\
-<<Examples.summary>>=
-summary(z.out1)
-@
-Set explanatory variables to their default values, with high (80th percentile) and low (20th percentile) values for education:\\
-
-<<Examples.setx>>=
-x.high <- setx(z.out1, education=quantile(voteincome$education, 0.8))
-x.low <- setx(z.out1, education=quantile(voteincome$education, 0.2))
-@
-Generate first differences for the effect of high versus low education on voting: \\
-
-<<Examples.sim>>=
-s.out1 <- sim(z.out1, x=x.high, x1=x.low)
-summary(s.out1)
-@
-
-\end{enumerate}
-
-
-\subsubsection{Mixed effects Logistic Regression Model}
-
-Let $Y_{ij}$ be the binary dependent variable, realized for observation $j$ in group $i$ as $y_{ij}$ which takes the value of either 0 or 1, for $i = 1, \ldots, M$, $j = 1, \ldots, n_i$.
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by a Bernoulli distribution with mean vector $\pi_{ij}$.
-\begin{equation*}
-Y_{ij} \sim \mathrm{Bernoulli}(y_{ij} | \pi_{ij}) = \pi_{ij}^{y_{ij}} (1 - \pi_{ij})^{1 - y_{ij}}
-\end{equation*}
-where
-\begin{equation*}
-\pi_{ij} = \mathrm{Pr}(Y_{ij} = 1)
-\end{equation*}
-\item The $q$-dimensional vector of \emph{random effects}, $b_i$, is restricted to be mean zero, and therefore is completely characterized by the variance covarance matrix $\Psi$, a $(q \times q)$ symmetric positive semi-definite matrix.
-\begin{equation*}
-b_i \sim Normal(0, \Psi)
-\end{equation*}
-\item The \emph{systematic component} is
-\begin{equation*}
-\pi_{ij} \equiv \frac{1}{1 + \exp(-(X_{ij} \beta + Z_{ij} b_i))}
-\end{equation*}
-where $X_{ij}$ is the $(n_i \times p \times M)$ array of known fixed effects explanatory variables, $\beta$ is the $p$-dimensional vector of fixed effects coefficients, $Z_{ij}$ is the $(n_i \times q \times M)$ array of known random effects explanatory variables and $b_i$ is the $q$-dimensional vector of random effects.
-\end{itemize}
-
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The predicted values ({\tt qi\$pr}) are draws from the Binomial distribution with mean equal to the simulated expected value, $\pi_{ij}$ for
-\begin{equation*}
-\pi_{ij} = \frac{1}{1 + \exp(-(X_{ij} \beta + Z_{ij} b_i))}
-\end{equation*}
-given $X_{ij}$ and $Z_{ij}$ and simulations of of $\beta$ and $b_i$ from their posterior distributions. The estimated variance covariance matrices are taken as correct and are themselves not simulated.
-
-\item The expected values ({\tt qi\$ev}) are simulations of the predicted probability of a success given draws of $\beta$ from its posterior:
-\begin{equation*}
-E(Y_{ij} | X_{ij}) = \pi_{ij} = \frac{1}{1 + exp(- X_{ij} \beta)}.
-\end{equation*}
-
-\item The first difference ({\tt qi\$fd}) is given by the difference in predicted probabilities, conditional on $X_{ij}$ and $X_{ij}^\prime$, representing different values of the explanatory variables.
-\begin{equation*}
-FD(Y_{ij} | X_{ij}, X_{ij}^\prime) = Pr(Y_{ij} = 1 | X_{ij}) - Pr(Y_{ij} = 1 | X_{ij}^\prime)
-\end{equation*}
-
-\item The risk ratio ({\tt qi\$rr}) is defined as
-\begin{equation*}
-RR(Y_{ij} | X_{ij}, X_{ij}^{\prime}) = \frac{Pr(Y_{ij} = 1 | X_{ij})}{Pr(Y_{ij} = 1 | X_{ij}^{\prime})}
-\end{equation*}
-
-\item In conditional prediction models, the average predicted treatment effect ({\tt qi\$att.pr}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - \widehat{Y_{ij}(t_{ij} = 0)} \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $Y_{ij}(t_{ij} = 0)$, the counterfactual predicted value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\item In conditional prediction models, the average expected treatment effect ({\tt qi\$att.ev}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - E[Y_{ij}(t_{ij} = 0)] \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $E[Y_{ij}(t_{ij} = 0)]$, the counterfactual expected value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-The output of each Zelig command contains useful information which you may view. You may examine the available information in {\tt z.out} by using {\tt slotNames(z.out)}, see the fixed effect coefficients by using {\tt summary(z.out)@coefs}, and a default summary of information through {\tt summary(z.out)}. Other elements available through the {\tt \@} operator are listed below.
-\begin{itemize}
-\item From the {\tt zelig()} output stored in {\tt summary(z.out)}, you may extract:
-\begin{itemize}
-\item[--] {\tt fixef}: numeric vector containing the conditional estimates of the fixed effects.
-\item[--] {\tt ranef}: numeric vector containing the conditional modes of the random effects.
-\item[--] {\tt frame}: the model frame for the model.
-\end{itemize}
-\item From the {\tt sim()} output stored in {\tt s.out}, you may extract quantities of interest stored in a data frame:
-\begin{itemize}
-\item {\tt qi\$pr}: the simulated predicted values drawn from the distributions defined by the expected values.
-\item {\tt qi\$ev}: the simulated expected values for the specified values of x.
-\item {\tt qi\$fd}: the simulated first differences in the expected values for the values specified in x and x1.
-\item {\tt qi\$ate.pr}: the simulated average predicted treatment effect for the treated from conditional prediction models.
-\item {\tt qi\$ate.ev}: the simulated average expected treatment effect for the treated from conditional prediction models.
-\end{itemize}
-\end{itemize}
-
-
-\subsection* {How to Cite}
-
-\input{cites/logit.mixed}
-\input{citeZelig}
-
-\subsection* {See also}
-Mixed effects logistic regression is part of {\tt lme4} package by Douglas M. Bates \citep{Bates07}. For a detailed discussion of mixed-effects models, please see \cite{JosBat00}
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after <- search()
- torm <- setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
-\end{document}
diff --git a/inst/doc/logit.mixed.pdf b/inst/doc/logit.mixed.pdf
deleted file mode 100644
index 156efef..0000000
Binary files a/inst/doc/logit.mixed.pdf and /dev/null differ
diff --git a/inst/doc/logit.pdf b/inst/doc/logit.pdf
index 1b76c1f..2fba868 100644
Binary files a/inst/doc/logit.pdf and b/inst/doc/logit.pdf differ
diff --git a/inst/doc/logit.survey.Rnw b/inst/doc/logit.survey.Rnw
deleted file mode 100644
index 3441f90..0000000
--- a/inst/doc/logit.survey.Rnw
+++ /dev/null
@@ -1,520 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=logitsurvey}
-\include{zinput}
-%\VignetteIndexEntry{Survey-Weighted Logistic Regression for Dichotomous Dependent Variables}
-%\VignetteDepends{Zelig, stats, survey}
-%\VignetteKeyWords{model,logistic,dichotomous, regression, survey}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography* 
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>= 
-library(Zelig)
-library(survey) 
-@
-\section{{\tt logit.survey}: Survey-Weighted Logistic Regression for Dichotomous Dependent Variables}
-\label{logit.survey}
-
-The survey-weighted logistic regression model is appropriate for 
-survey data obtained using complex sampling techniques, such as 
-stratified random or cluster sampling (e.g., not simple random 
-sampling).  Like the conventional logistic regression models (see 
-\Sref{logit}), survey-weighted logistic regression specifies a 
-dichotomous dependent variable as function of a set of explanatory 
-variables.  The survey-weighted logit model reports estimates of 
-model parameters identical to conventional logit estimates, but uses 
-information from the survey design to correct variance estimates.
-
-The {\tt logit.survey} model accommodates three common types of 
-complex survey data.  Each method listed here requires selecting 
-specific options which are detailed in the ``Additional Inputs'' 
-section below.  \begin{enumerate}
-
-\item \textbf{Survey weights}:  Survey data are often published along
-with weights for each observation.  For example, if a survey
-intentionally over-samples a particular type of case, weights can be
-used to correct for the over-representation of that type of case in
-the dataset. Survey weights come in two forms:
-\begin{enumerate}
-
-\item \textit{Probability} weights report the probability that each
-case is drawn from the population.  For each stratum or cluster, 
-this is computed as the number of observations in the sample drawn 
-from that group divided by the number of observations in the 
-population in the group.
-
-\item \textit{Sampling} weights are the inverse of the probability
-weights.   
-
-\end{enumerate}
-
-\item \textbf{Strata/cluster identification}:  A complex survey 
-dataset may include variables that identify the strata or cluster 
-from which observations are drawn.  For stratified random sampling 
-designs, observations may be nested in different strata.  There are 
-two ways to employ these identifiers:
-
-\begin{enumerate}
-
-\item Use \textit{finite population corrections} to specify the
-total number of cases in the stratum or cluster from which each
-observation was drawn.
-
-\item For stratified random sampling designs, use the raw strata ids
-to compute sampling weights from the data.
-
-\end{enumerate}
-
-\item \textbf{Replication weights}: To preserve the anonymity of
-survey participants, some surveys exclude strata and cluster ids 
-from the public data and instead release only pre-computed replicate 
-weights.
-
-\end{enumerate}
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "logit.survey", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-
-\subsubsection{Additional Inputs}
-
-In addition to the standard {\tt zelig} inputs (see
-\Sref{Zcommands}), survey-weighted logistic models accept the following
-optional inputs:
-\begin{enumerate}
-
-\item Datasets that include survey weights:. 
-
-\begin{itemize}  
-
-\item {\tt probs}: An optional formula or numerical vector specifying each 
-case's probability weight, the probability that the case was 
-selected.  Probability weights need not (and, in most cases, will 
-not) sum to one.  Cases with lower probability weights are weighted 
-more heavily in the computation of model coefficients.
-
-\item {\tt weights}: An optional numerical vector specifying each 
-case's sample weight, the inverse of the probability that the case 
-was selected.  Sampling weights need not (and, in most cases, will 
-not) sum to one.  Cases with higher sampling weights are weighted 
-more heavily in the computation of model coefficients.
-
-\end{itemize}
-
-\item Datasets that include strata/cluster identifiers:
-
-\begin{itemize} 
-
-\item {\tt ids}: An optional formula or numerical vector identifying the 
-cluster from which each observation was drawn (ordered from largest level to smallest level).  
-For survey designs  that do not involve cluster sampling, {\tt ids} defaults to {\tt NULL}.
-
-\item {\tt fpc}: An optional numerical vector identifying each 
-case's frequency weight, the total number of units in the population 
-from which each observation was sampled. 
-
-\item {\tt strata}: An optional formula or vector identifying the 
-stratum from which each observation was sampled.  Entries may be 
-numerical, logical, or strings.  For survey designs that do not 
-involve cluster sampling, {\tt strata} defaults to {\tt NULL}. 
-
-\item {\tt nest}: An optional logical value specifying whether 
-primary sampling unites (PSUs) have non-unique ids across multiple 
-strata.  {\tt nest=TRUE} is appropriate when PSUs reuse the same 
-identifiers across strata.  Otherwise, {\tt nest} defaults to {\tt 
-FALSE}. 
-
-\item {\tt check.strata}: An optional input specifying whether to 
-check that clusters are nested in strata.  If {\tt check.strata} is 
-left at its default, {\tt !nest}, the check is not performed.  If 
-{\tt check.strata} is specified as {\tt TRUE}, the check is carried 
-out.  
-
-\end{itemize}
-
-\item Datasets that include replication weights:
-\begin{itemize}
-  \item {\tt repweights}: A formula or matrix specifying
-    replication weights, numerical vectors of weights used
-    in a process in which the sample is repeatedly re-weighted and parameters
-    are re-estimated in order to compute the variance of the model parameters.
-  \item {\tt type}: A string specifying the type of replication weights being used.
-    This input is required if replicate weights are specified.  The following types
-    of replication weights are recognized: {\tt"BRR"}, {\tt "Fay"},
-    {\tt "JK1"}, {\tt "JKn"}, {\tt "bootstrap"}, or {\tt "other"}.
-  \item {\tt weights}: An optional vector or formula specifying each case's sample weight,
-    the inverse of the probability that the case was selected.  If a survey includes both sampling 
-    weights and replicate weights separately for the same survey, both should be included in 
-    the model specification.  In these cases, sampling weights are used to correct potential biases 
-    in in the computation of coefficients and replication weights are used to compute the variance 
-    of coefficient estimates.  
-  \item {\tt combined.weights}: An optional logical value that 
-    should be specified as {\tt TRUE} if the replicate weights include the sampling weights.  Otherwise, 
-    {\tt combined.weights} defaults to {\tt FALSE}.  
-  \item {\tt rho}:  An optional numerical value specifying a shrinkage factor
-    for replicate weights of type {\tt "Fay"}.
-  \item {\tt bootstrap.average}: An optional numerical input specifying
-    the number of iterations over which replicate weights of type {\tt "bootstrap"} were averaged. 
-    This input should be left as {\tt NULL} for {\tt "bootstrap"} weights that were
-    not were created by averaging.
-\item {\tt scale}:  When replicate weights are included,
-    the variance is computed as the sum of squared deviations of the replicates from their mean.
-    {\tt scale} is an optional overall multiplier for the standard deviations.
-\item {\tt rscale}: Like {\tt scale}, {\tt rscale} specifies an 
-optional vector of replicate-specific multipliers for the squared 
-deviations used in variance computation. 
-
-\item {\tt fpc}: For models in which {\tt "JK1"}, {\tt "JKn"}, or 
-{\tt "other"} replicates are specified, {\tt fpc} is an optional 
-numerical vector (with one entry for each replicate) designating the 
-replicates' finite population corrections.   
-
-\item {\tt fpctype}: When a finite population correction is included 
-as an {\tt fpc} input, {\tt fpctype} is a required input specifying 
-whether the input to {\tt fpc} is a sampling fraction ({\tt 
-fpctype="fraction"}) or a direct correction ({\tt 
-fpctype="correction"}).  
-
-\item {\tt return.replicates}: An optional logical value    
-specifying whether the replicates should be returned as a component 
-of the output.  {\tt return.replicates} defaults to {\tt FALSE}.  
-
-\end{itemize}
-
-\end{enumerate}
-
-\subsubsection{Examples}
-
-\begin{enumerate} 
-
-\item A dataset that includes survey weights:
-
-Attach the sample data: 
-<<Existing.data>>= 
-data(api, package="survey") 
-@ 
-
-Suppose that a dataset included a dichotomous indicator 
-for whether each public school attends classes year round ({\tt yr.rnd}), a measure of 
-the percentage of students at each school who receive subsidized 
-meals ({\tt meals}), a measure of the percentage of students at 
-each school who are new to to the school ({\tt mobility}), and sampling 
-weights computed by the survey house ({\tt pw}).  Estimate a model
-that regresses the year-round schooling indicator on the {\tt meals} and {\tt mobility}
-variables:
-<<Existing.zelig>>= 
-z.out1 <- zelig(yr.rnd ~ meals + mobility, model = "logit.survey", weights=~pw, data = apistrat)
-@ 
-Summarize regression coefficients:
-<<Existing.summary>>=
- summary(z.out1)
-@ 
-Set explanatory variables to their default (mean/mode) values, and
-set a high (80th percentile) and low (20th percentile) value for
-``meals'': 
-<<Existing.setx>>= 
-x.low <- setx(z.out1, meals=quantile(apistrat$meals, 0.2)) 
-x.high <- setx(z.out1, meals=quantile(apistrat$meals, 0.8)) 
-@ 
-Generate first differences for the
-effect of high versus low concentrations of children receiving
-subsidized meals on the probability of holding school year-round: 
-<<Existing.sim>>=
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-@ 
-<<Existing.summary.sim>>=
- summary(s.out1)
-@ 
-Generate a visual summary of the quantities of interest:
-\begin{center}
-<<label=ExistingPlot,fig=true,echo=true>>=
- plot(s.out1)
-@
-\end{center}
-
-\item  A dataset that includes strata/cluster identifiers:
-
-Suppose that the survey house that provided the dataset used in the
-previous example excluded sampling weights but made other details
-about the survey design available.  A user can still estimate a model
-without sampling weights that instead uses inputs that identify the
-stratum and/or cluster to which each observation belongs and the
-size of the finite population from which each observation was drawn.
-
-Continuing the example above, suppose the survey house drew at
-random a fixed number of elementary schools, a fixed number of
-middle schools, and a fixed number of high schools. If the variable
-{\tt stype} is a vector of characters ({\tt "E"} for elementary
-schools, {\tt "M"} for middle schools, and {\tt "H"} for high schools)
-that identifies the type of school each case
-represents and {\tt fpc} is a numerical vector that identifies for
-each case the total number of schools of the same type in the
-population, then the user could estimate the following model:
-
-<<Complex.zelig>>= 
-z.out2 <- zelig(yr.rnd ~ meals + mobility, model = "logit.survey", strata=~stype, fpc=~fpc, data = apistrat)
-@
-Summarize the regression output:
-<<Complex.output>>= 
-summary(z.out2) 
-@ 
-The coefficient estimates for this example are
-identical to the point estimates in the first example, when
-pre-existing sampling weights were used.  When sampling weights are
-omitted, they are estimated automatically for {\tt "logit.survey"}
-models based on the user-defined description of sampling designs. 
-
-Moreover, because the user provided information about the survey
-design, the standard error estimates are lower in this example than
-in the previous example, in which the user omitted variables pertaining
-to the details of the complex survey design.
-
-\item A dataset that includes replication weights:
-
-Consider a dataset that includes information for a sample of hospitals
-about the number of out-of-hospital cardiac arrests that each
-hospital treats and the number of patients who arrive alive
-at each hospital: 
-<<Replicate.data>>= 
-data(scd, package="survey") 
-@ 
-Survey houses sometimes supply
-replicate weights (in lieu of details about the survey design).  For the sake
-of illustrating how replicate weights can be used as inputs in {\tt
-logit.survey} models, create a set of balanced repeated replicate
-(BRR) weights and an (artificial) dependent variable to simulate an indicator 
-for whether each hospital was sued:
-<<Replicate.rw>>= 
-BRRrep<-2*cbind(c(1,0,1,0,1,0),c(1,0,0,1,0,1), c(0,1,1,0,0,1),c(0,1,0,1,1,0)) 
-scd$sued <- as.vector(c(0,0,0,1,1,1))
-@ 
-Estimate a model that regresses the indicator for hospitals that were
-sued on the number of patients who arrive alive in
-each hospital and the number of cardiac arrests that each hospital treats, using
-the BRR replicate weights in {\tt BRRrep} to compute standard errors.
-<<Replicate.zelig>>= 
-z.out3 <- zelig(formula=sued ~ arrests + alive , model = "logit.survey", 
-  repweights=BRRrep, type="BRR", data=scd)
-@
-Summarize the regression coefficients: 
-<<Replicate.summary>>=
- summary(z.out3)
-@ 
-Set {\tt alive} at its mean and set {\tt arrests} at its 20th and 80th quantiles:
-<<Replicate.setx>>= 
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8)) 
-@ 
-Generate first
-differences for the effect of high versus low cardiac arrests
-on the probability that a hospital will be sued:
-<<Replicate.sim>>= 
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-@ 
-<<Replicate.summary.sim>>=
- summary(s.out3)
-@ 
-Generate a visual summary of quantities of interest:
-\begin{center}
-<<label=ReplicatePlot,fig=true,echo=true>>=
- plot(s.out3)
-@
-\end{center}
-
-
-\end{enumerate}
-
-\subsubsection{Model}
-Let $Y_i$ be the binary dependent variable for observation $i$ which
-takes the value of either 0 or 1.
-\begin{itemize}
-
-\item The \emph{stochastic component} is given by  
-\begin{eqnarray*}
-Y_i &\sim& \textrm{Bernoulli}(y_i \mid \pi_i) \\
-    &=& \pi_i^{y_i} (1-\pi_i)^{1-y_i}
-\end{eqnarray*}
-where $\pi_i=\Pr(Y_i=1)$.
-
-\item The \emph{systematic component} is given by: 
-\begin{equation*}
-\pi_i \; = \; \frac{1}{1 + \exp(-x_i \beta)}.
-\end{equation*}
-where $x_i$ is the vector of $k$ explanatory variables for observation $i$
-and $\beta$ is the vector of coefficients.
-\end{itemize}
-
-\subsubsection{Variance}
-
-When replicate weights are not used, the variance of the
-coefficients is estimated as
-\[
-\hat{\boldsymbol{\Sigma}} \left[
- \sum_{i=1}^n
-\frac{(1-\pi_i)}{\pi_i^2}
-(\mathbf{X}_i(Y_i-\mu_i))^\prime(\mathbf{X}_i(Y_i-\mu_i)) + 2
-\sum_{i=1}^n \sum_{j=i+1}^n \frac{(\pi_{ij} - \pi_i\pi_j)}{\pi_i
-\pi_j \pi_{ij}}(\mathbf{X}_i(Y_i-\mu_i))^\prime
-(\mathbf{X}_j(Y_j-\mu_j)) \right] \hat{\boldsymbol{\Sigma}}
-\]
-where ${\pi_i}$ is the probability of case $i$ being sampled,
-$\mathbf{X}_i$ is a vector of the values of the explanatory
-variables for case $i$, $Y_i$ is value of the dependent variable for
-case $i$, $\hat{\mu}_i$ is the predicted value of the dependent
-variable for case $i$ based on the linear model estimates, and
-$\hat{\boldsymbol{\Sigma}}$ is the conventional variance-covariance
-matrix in a parametric glm. This statistic is derived from the
-method for estimating the variance of sums described in \cite{Bin83}
-and the Horvitz-Thompson estimator of the variance of a sum
-described in \cite{HorTho52}.
-
-When replicate weights are used, the model is re-estimated for each
-set of replicate weights, and the variance of each parameter is
-estimated by summing the squared deviations of the replicates from
-their mean.
-
-\subsubsection{Quantities of Interest}
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) for the survey-weighted logit model are
-  simulations of the predicted probability of a success: $$E(Y) =
-  \pi_i= \frac{1}{1 + \exp(-x_i \beta)},$$ given draws of $\beta$ from
-  its sampling distribution.
-
-\item The predicted values ({\tt qi\$pr}) are draws from the Binomial
-  distribution with mean equal to the simulated expected value $\pi_i$.  
-
-\item The first difference ({\tt qi\$fd}) for the survey-weighted logit model is defined as
-\begin{equation*}
-\textrm{FD} = \Pr(Y = 1 \mid x_1) - \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item The risk ratio ({\tt qi\$rr}) is defined as
-\begin{equation*}
-\textrm{RR} = \Pr(Y = 1 \mid x_1) \ / \ \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)}\right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\ x,
-  model = "logit.survey", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: the vector of fitted values for the
-     systemic component, $\pi_i$.
-   \item {\tt linear.predictors}: the vector of $x_{i}\beta$
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt data}: the name of the input data frame.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected probabilities for the
-     specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values for the
-     specified values of {\tt x}.
-   \item {\tt qi\$fd}: the simulated first difference in the expected
-     probabilities for the values specified in {\tt x} and {\tt x1}.
-   \item {\tt qi\$rr}: the simulated risk ratio for the expected
-     probabilities simulated from {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-When users estimate {\tt logit.survey} models with replicate weights in {\tt Zelig}, an 
-object called {\tt .survey.prob.weights} is created in the global environment.  
-{\tt Zelig} will over-write any existing object with that name, and users 
-are therefore advised to re-name any object called {\tt .survey.prob.weights} before using {\tt logit.survey} models in {\tt Zelig}.
-
-\subsection* {How to Cite}
-
-\input{cites/logit.survey}
- \input{citeZelig}
- 
- \subsection* {See also}
- 
- Survey-weighted linear models and the sample data used in the
- examples above are a part of the {\tt survey} package by Thomas
- Lumley. Users may wish to refer to the help files for the three
- functions that Zelig draws upon when estimating survey-weighted
- models, namely, {\tt help(svyglm)}, {\tt help(svydesign)}, and {\tt
- help(svrepdesign)}.  The Gamma model is part of the stats package
- by \citet{VenRip02}. Advanced users may wish to refer to
- \texttt{help(glm)} and \texttt{help(family)}, as well as
- \cite{McCNel89}.
- 
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
-  after<-search()
-  torm<-setdiff(after,before)
-  for (pkg in torm)
-  detach(pos=match(pkg,search()))
-@
-
-\end{document}
diff --git a/inst/doc/logit.survey.pdf b/inst/doc/logit.survey.pdf
deleted file mode 100644
index 6b2bd51..0000000
Binary files a/inst/doc/logit.survey.pdf and /dev/null differ
diff --git a/inst/doc/ls.Rnw b/inst/doc/ls.Rnw
deleted file mode 100644
index f3c00ab..0000000
--- a/inst/doc/ls.Rnw
+++ /dev/null
@@ -1,288 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=ls}
-\include{zinput}
-%\VignetteIndexEntry{Least Squares Regression for Continuous Dependent Variables}
-%\VignetteDepends{Zelig, stats}
-%\VignetteKeyWords{model,least squares,continuous, regression}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@ 
-
-\section{{\tt ls}: Least Squares Regression for Continuous
-Dependent Variables}
-\label{ls}
-
-Use least squares regression analysis to estimate the best linear
-predictor for the specified dependent variables.
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "ls", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-\subsubsection{Additional Inputs}  
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for least squares regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE} is
-selected, {\tt zelig()} computes robust standard errors based on
-sandwich estimators (see \cite{Zeileis04}, \cite{Huber81}, and
-\cite{White80}).  The default type of robust standard error is
-heteroskedastic consistent (HC), \emph{not} heteroskedastic and
-autocorrelation consistent (HAC).  
-
-In addition, {\tt robust} may be a list with the following options:  
-\begin{itemize}
-\item {\tt method}:  choose from 
-\begin{itemize}
-\item {\tt "vcovHC"}: (the default if {\tt robust = TRUE}), HC standard errors.
-\item {\tt "vcovHAC"}: HAC standard errors without weights.  
-\item {\tt "kernHAC"}: HAC standard errors using the weights given in
-\cite{Andrews91}.   
-\item {\tt "weave"}: HAC standard errors using the weights given in
-\cite{LumHea99}.
-\end{itemize} 
-\item {\tt order.by}: only applies to the HAC methods above.  Defaults to
-{\tt NULL} (the observations are chronologically ordered as in the
-original data).  Optionally, you may specify a time index (either as
-{\tt order.by = z}, where {\tt z} exists outside the data frame; or
-as {\tt order.by = \~{}z}, where {\tt z} is a variable in the data
-frame).  The observations are chronologically ordered by the size of
-{\tt z}.
-\item {\tt \dots}:  additional options passed to the functions
-specified in {\tt method}.  See the {\tt sandwich} library and
-\cite{Zeileis04} for more options.   
-\end{itemize}
-\end{itemize}
-
-\subsubsection{Examples}\begin{enumerate}
-\item Basic Example with First Differences
-
-Attach sample data:
-<<Examples.data>>=
- data(macro)
-@ 
-Estimate model:
-<<Examples.zelig>>=
- z.out1 <- zelig(unem ~ gdp + capmob + trade, model = "ls", data = macro)
-@ 
-Summarize regression coefficients:
-<<Examples.summary>>=
- summary(z.out1)
-@ 
-Set explanatory variables to their default (mean/mode) values, with
-high (80th percentile) and low (20th percentile) values for the trade variable:
-<<Examples.setx>>=
- x.high <- setx(z.out1, trade = quantile(macro$trade, 0.8))
- x.low <- setx(z.out1, trade = quantile(macro$trade, 0.2))
-@ 
-Generate first differences for the effect of high versus low trade on
-GDP:
-<<Examples.sim>>=
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-@ 
-<<Examples.summary.sim>>= 
-summary(s.out1)
-@ 
-\begin{center}
-<<label=ExamplesPlot,fig=true,echo=true,width=5.5,height=4>>=  
- plot(s.out1)
-@ 
-\end{center}
-
-\item Using Dummy Variables
-
-Estimate a model with fixed effects for each country (see
-\Sref{factors} for help with dummy variables).  Note that you do not
-need to create dummy variables, as the program will automatically
-parse the unique values in the selected variable into discrete levels.  
-<<Dummy.zelig>>=
- z.out2 <- zelig(unem ~ gdp + trade + capmob + as.factor(country), 
-                  model = "ls", data = macro)
-@   
-Set values for the explanatory variables, using the default mean/mode
-values, with country set to the United States and Japan, respectively:
-<<Dummy.setx>>=
- x.US <- setx(z.out2, country = "United States")
- x.Japan <- setx(z.out2, country = "Japan")
-@ 
-Simulate quantities of interest:
-<<Dummy.sim>>=
- s.out2 <- sim(z.out2, x = x.US, x1 = x.Japan)
-@ 
-\begin{center}
-<<label=DummyPlot,fig=true,echo=true, width=5.5, height=4>>=   
- plot(s.out2)
-@ 
-\end{center}
-
-\item Multiple responses (least squares regression will be fitted
-  separately to each dependent variable)
-
-Two responses for data set macro: 
-<<Multiple.zelig>>=
- z.out3 <- zelig(cbind(unem, gdp) ~ capmob + trade,model = "ls", data = macro)
-@
-<<Multiple.zelig.summary>>=
-summary(z.out3)
-@    
-Set values for the explanatory variables, using the default mean/mode
-values, with country set to the United States and Japan, respectively:
-<<Multiple.setx>>=
- x.US <- setx(z.out3, country = "United States")
- x.Japan <- setx(z.out3, country = "Japan")
-@ 
-Simulate quantities of interest:
-<<Multiple.sim>>=
- s.out3 <- sim(z.out3, x = x.US, x1 = x.Japan)
-@
-Summary
-<<Example4.sim.summary>>=
-summary(s.out3)
-@  
-\begin{center}
-<<label=Example4Plot,fig=true,echo=true,  width=7.5, height=6>>= 
- plot(s.out3)
-@ 
-\end{center}
-
-\end{enumerate}
-
-\subsubsection{Model}
-\begin{itemize}
-\item The \emph{stochastic component} is described by a density
-  with mean $\mu_i$ and the common variance $\sigma^2$
-  \begin{equation*}
-    Y_i \; \sim \; f(y_i \mid \mu_i, \sigma^2).
-  \end{equation*}
-\item The \emph{systematic component} models the conditional mean as
-  \begin{equation*}
-     \mu_i =  x_i \beta
-  \end{equation*} 
-  where $x_i$ is the vector of covariates, and $\beta$ is the vector
-  of coefficients.
-  
-  The least squares estimator is the best linear predictor of a
-  dependent variable given $x_i$, and minimizes the sum of squared
-  residuals, $\sum_{i=1}^n (Y_i-x_i \beta)^2$.  
-\end{itemize}
-
-\subsubsection{Quantities of Interest} 
-\begin{itemize}
-\item The expected value ({\tt qi\$ev}) is the mean of simulations
-  from the stochastic component,  
-\begin{equation*}
-E(Y) = x_i \beta,\end{equation*}
-given a draw of $\beta$ from its sampling distribution.  
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "ls", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-  \item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: fitted values.
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-  
-\item From {\tt summary(z.out)}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-     \begin{equation*}
-       \hat{\beta} \; = \; \left(\sum_{i=1}^n x_i' x_i\right)^{-1} \sum x_i y_i
-     \end{equation*}
-   \item {\tt sigma}: the square root of the estimate variance of the
-     random error $e$:
-     \begin{equation*}
-       \hat{\sigma} \; = \; \frac{\sum (Y_i-x_i\hat{\beta})^2}{n-k}
-     \end{equation*}
-   \item {\tt r.squared}: the fraction of the variance explained by
-     the model. 
-     \begin{equation*}
-       R^2 \; = \; 1 - \frac{\sum (Y_i-x_i\hat{\beta})^2}{\sum (y_i -
-         \bar{y})^2} 
-     \end{equation*}
-   \item {\tt adj.r.squared}: the above $R^2$ statistic, penalizing
-     for an increased number of explanatory variables.  
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-   
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values for the specified
-     values of {\tt x}.
-   \item {\tt qi\$fd}:  the simulated first differences (or
-     differences in expected values) for the specified values of {\tt
-       x} and {\tt x1}. 
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection* {How to Cite} 
-\input{cites/ls}
-\input{citeZelig}
-
-\subsection* {See also}
-The least squares regression is part of the stats package by William N.
-Venables and Brian D. Ripley \citep{VenRip02}.In addition, advanced users may wish to refer to \texttt{help(lm)} and \texttt{help(lm.fit)}.Robust standard errors are implemented via the sandwich package by Achim Zeileis \citep{Zeileis04}.Sample data are from \cite{KinTomWit00}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% TeX-master: t
-%%% End: 
diff --git a/inst/doc/ls.mixed.Rnw b/inst/doc/ls.mixed.Rnw
deleted file mode 100644
index 0710951..0000000
--- a/inst/doc/ls.mixed.Rnw
+++ /dev/null
@@ -1,187 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=lmmixed}
-\include{zinput}
-%\VignetteIndexEntry{Mixed effects linear regression}
-%\VignetteDepends{Zelig}
-%\VignetteKeyWords{mixed,linear, linear regression}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt ls.mixed}: Mixed effects Linear Regression}
-\label{mixed}
-
-Use multi-level linear regression if you have covariates that are grouped according to one or more classification factors and a continuous dependent variable.
-
-While generally called multi-level models in the social sciences, this class of models is often referred to as mixed-effects models in the statistics literature and as hierarchical models in a Bayesian setting. This general class of models consists of linear models that are expressed as a function of both \emph{fixed effects}, parameters corresponding to an entire population or certain repeatable levels of experimental factors, and \emph{random effects}, parameters corresponding to indiv [...]
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-z.out <- zelig(formula= y ~ x1 + x2 + tag(z1 + z2 | g),
-               data=mydata, model="lm.multi")
-
-z.out <- zelig(formula= list(mu=y ~ xl + x2 + tag(z1, gamma | g),
-               gamma= ~ tag(w1 + w2 | g)), data=mydata, model="lm.multi")
-\end{verbatim}
-
-\subsubsection{Inputs}
-
-\noindent {\tt zelig()} takes the following arguments for {\tt multi}:
-\begin{itemize}
-\item {\tt formula:} a two-sided linear formula object describing the systematic component of the model, with the response on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1 + ... + zn | g)} with {\tt z1 + ... + zn} specifying the model for the random effects and {\tt g} the grouping structure. Random intercept terms are included with the notation {\tt ta [...]
-Alternatively, {\tt formula} may be a list where the first entry, {\tt mu}, is a two-sided linear formula object describing the systematic component of the model, with the repsonse on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1, gamma | g)} with {\tt z1} specifying the individual level model for the random effects, {\tt g} the grouping structure and { [...]
-\end{itemize}
-
-\subsubsection{Additional Inputs}
-
-In addition, {\tt zelig()} accepts the following additional arguments for model specification:
-
-\begin{itemize}
-\item {\tt data:} An optional data frame containing the variables named in {\tt formula}. By default, the variables are taken from the environment from which {\tt zelig()} is called.
-\item {\tt family:} A GLM family, see {\tt glm} and {\tt family} in the {\tt stats} package. If {\tt family} is missing then a linear mixed model is fit; otherwise a generalized linear mixed model is fit. In the later case only {\tt gaussian} family with {\tt "log"} link is supported at the moment.
-\item {\tt na.action:} A function that indicates what should happen when the data contain {\tt NAs}. The default action ({\tt na.fail}) causes {\tt zelig()} to print an error message and terminate if there are any incomplete observations.
-\end{itemize}
-Additionally, users may wish to refer to {\tt lmer} in the package {\tt lme4} for more information, including control parameters for the estimation algorithm and their defaults.
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-\item Basic Example with First Differences \\
-\\
-Attach sample data: \\
-<<Examples.data>>=
-data(voteincome)
-@
-
-Estimate model:
-
-<<Examples.zelig>>=
-z.out1 <- zelig(income ~ education + age + female + tag(1 | state), data=voteincome, model="ls.mixed")
-@
-
-\noindent Summarize regression coefficients and estimated variance of random effects:\\
-<<Examples.summary>>=
-summary(z.out1)
-@
-Set explanatory variables to their default values, with high (80th percentile) and low (20th percentile) values for education:\\
-<<Examples.setx>>=
-x.high <- setx(z.out1, education=quantile(voteincome$education, 0.8))
-x.low <- setx(z.out1, education=quantile(voteincome$education, 0.2))
-@
-
-Generate first differences for the effect of high versus low education on income: \\
-<<Examples.sim>>=
-s.out1 <- sim(z.out1, x=x.high, x1=x.low)
-summary(s.out1)
-@
-\begin{center}
-<<label=ExamplesPlot, fig=true, echo=true>>=
-plot(s.out1)
-@
-\end{center}
-
-\end{enumerate}
-
-\subsubsection{Mixed effects linear regression model}
-
-Let $Y_{ij}$ be the continuous dependent variable, realized for observation $j$ in group $i$ as $y_{ij}$, for $i = 1, \ldots, M$, $j = 1, \ldots, n_i$.
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by a univariate normal model with a vector of means $\mu_{ij}$ and scalar variance $\sigma^2$.
-\begin{equation*}
-Y_{ij} \sim \mathrm{Normal}(y_{ij} | \mu_{ij}, \sigma^2)
-\end{equation*}
-\item The $q$-dimensional vector of \emph{random effects}, $b_i$, is restricted to be mean zero, and therefore is completely characterized by the variance covarance matrix $\Psi$, a $(q \times q)$ symmetric positive semi-definite matrix.
-\begin{equation*}
-b_i \sim Normal(0, \Psi)
-\end{equation*}
-\item The \emph{systematic component} is
-\begin{equation*}
-\mu_{ij} \equiv X_{ij} \beta + Z_{ij} b_i
-\end{equation*}
-where $X_{ij}$ is the $(n_i \times p \times M)$ array of known fixed effects explanatory variables, $\beta$ is the $p$-dimensional vector of fixed effects coefficients, $Z_{ij}$ is the $(n_i \times q \times M)$ array of known random effects explanatory variables and $b_i$ is the $q$-dimensional vector of random effects.
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-
-\item The predicted values ({\tt qi\$pr}) are draws from the normal distribution defined by mean $\mu_{ij}$ and variance $\sigma^2$,
-\begin{equation*}
-\mu_{ij} = X_{ij} \beta + Z_{ij} b_{i}
-\end{equation*}
-given $X_{ij}$ and $Z_{ij}$ and simulations of $\beta$ and $b_i$ from their posterior distributions. The estimated variance covariance matrices are taken as correct and are themselves not simulated.
-
-\item The expected values ({\tt qi\$ev}) are averaged over the stochastic components and are given by
-\begin{equation*}
-E(Y_{ij} | X_{ij}) = X_{ij} \beta.
-\end{equation*}
-
-\item The first difference ({\tt qi\$fd}) is given by the difference in expected values, conditional on $X_{ij}$ and $X_{ij}^\prime$, representing different values of the explanatory variables.
-\begin{equation*}
-FD(Y_{ij} | X_{ij}, X_{ij}^\prime) = E(Y_{ij} | X_{ij}) - E(Y_{ij} | X_{ij}^\prime)
-\end{equation*}
-
-\item In conditional prediction models, the average predicted treatment effect ({\tt qi\$att.pr}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - \widehat{Y_{ij}(t_{ij} = 0)} \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $Y_{ij}(t_{ij} = 0)$, the counterfactual predicted value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\item In conditional prediction models, the average expected treatment effect ({\tt qi\$att.ev}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - E[Y_{ij}(t_{ij} = 0)] \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $E[Y_{ij}(t_{ij} = 0)]$, the counterfactual expected value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\item If {\tt "log"} link is used, expected values are computed as above and then exponentiated, while predicted values are draws from the log-normal distribution whose logarithm has mean and variance equal to $\mu_{ij}$ and $\sigma^2$, respectively.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you may view. You may examine the available information in {\tt z.out} by using {\tt slotNames(z.out)}, see the fixed effect coefficients by using {\tt summary(z.out)@coefs}, and a default summary of information through {\tt summary(z.out)}. Other elements available through the {\tt \@} operator are listed below.
-\begin{itemize}
-\item From the {\tt zelig()} output stored in {\tt summary(z.out)}, you may extract:
-\begin{itemize}
-\item[--] {\tt fixef}: numeric vector containing the conditional estimates of the fixed effects.
-\item[--] {\tt ranef}: numeric vector containing the conditional modes of the random effects.
-\item[--] {\tt frame}: the model frame for the model.
-\end{itemize}
-\item From the {\tt sim()} output stored in {\tt s.out}, you may extract quantities of interest stored in a data frame:
-\begin{itemize}
-\item {\tt qi\$pr}: the simulated predicted values drawn from the distributions defined by the expected values.
-\item {\tt qi\$ev}: the simulated expected values for the specified values of x.
-\item {\tt qi\$fd}: the simulated first differences in the expected values for the values specified in x and x1.
-\item {\tt qi\$ate.pr}: the simulated average predicted treatment effect for the treated from conditional prediction models.
-\item {\tt qi\$ate.ev}: the simulated average expected treatment effect for the treated from conditional prediction models.
-\end{itemize}
-\end{itemize}
-
-
-
-
-\subsection* {How to Cite}
-
-\input{cites/ls.mixed}
-\input{citeZelig}
-
-\subsection* {See also}
-Mixed effects linear regression is part of {\tt lme4} package by Douglas M. Bates \citep{Bates07}. For a detailed discussion of mixed-effects models, please see \cite{JosBat00}
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after <- search()
- torm <- setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
-\end{document}
diff --git a/inst/doc/ls.mixed.pdf b/inst/doc/ls.mixed.pdf
deleted file mode 100644
index 03a9591..0000000
Binary files a/inst/doc/ls.mixed.pdf and /dev/null differ
diff --git a/inst/doc/ls.pdf b/inst/doc/ls.pdf
index 452c01a..306dae0 100644
Binary files a/inst/doc/ls.pdf and b/inst/doc/ls.pdf differ
diff --git a/inst/doc/manual-bayes.pdf b/inst/doc/manual-bayes.pdf
new file mode 100644
index 0000000..d686b22
Binary files /dev/null and b/inst/doc/manual-bayes.pdf differ
diff --git a/inst/doc/manual-gee.pdf b/inst/doc/manual-gee.pdf
new file mode 100644
index 0000000..5901f8d
Binary files /dev/null and b/inst/doc/manual-gee.pdf differ
diff --git a/inst/doc/manual.pdf b/inst/doc/manual.pdf
new file mode 100644
index 0000000..f509b03
Binary files /dev/null and b/inst/doc/manual.pdf differ
diff --git a/inst/doc/negbin.Rnw b/inst/doc/negbin.Rnw
deleted file mode 100644
index c22d8c3..0000000
--- a/inst/doc/negbin.Rnw
+++ /dev/null
@@ -1,254 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=negbin}
-\include{zinput}
-%\VignetteIndexEntry{Negative Binomial Regression for Event Count Dependent Variables}
-%\VignetteDepends{Zelig, MASS}
-%\VignetteKeyWords{model, binomial,negative, regression, count}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-
-\section{{\tt negbin}: Negative Binomial Regression for Event
-Count Dependent Variables}\label{negbin}
-
-Use the negative binomial regression if you have a count of events for
-each observation of your dependent variable.  The negative binomial
-model is frequently used to estimate over-dispersed event count
-models.
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "negbin", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-\subsubsection{Additional Inputs} 
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for negative binomial regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE} is
-selected, {\tt zelig()} computes robust standard errors via the {\tt
-sandwich} package (see \cite{Zeileis04}).  The default type of robust
-standard error is heteroskedastic and autocorrelation consistent (HAC),
-and assumes that observations are ordered by time index.
-
-In addition, {\tt robust} may be a list with the following options:  
-\begin{itemize}
-\item {\tt method}:  Choose from 
-\begin{itemize}
-\item {\tt "vcovHAC"}: (default if {\tt robust = TRUE}) HAC standard
-errors. 
-\item {\tt "kernHAC"}: HAC standard errors using the
-weights given in \cite{Andrews91}. 
-\item {\tt "weave"}: HAC standard errors using the
-weights given in \cite{LumHea99}.  
-\end{itemize}  
-\item {\tt order.by}: defaults to {\tt NULL} (the observations are
-chronologically ordered as in the original data).  Optionally, you may
-specify a vector of weights (either as {\tt order.by = z}, where {\tt
-z} exists outside the data frame; or as {\tt order.by = \~{}z}, where
-{\tt z} is a variable in the data frame).  The observations are
-chronologically ordered by the size of {\tt z}.
-\item {\tt \dots}:  additional options passed to the functions 
-specified in {\tt method}.   See the {\tt sandwich} library and
-\cite{Zeileis04} for more options.   
-\end{itemize}
-\end{itemize}
-
-\subsubsection{Example}
-
-Load sample data:  
-<<Example.data>>=
- data(sanction)
-@ 
-Estimate the model:  
-<<Example.zelig>>=
- z.out <- zelig(num ~ target + coop, model = "negbin", data = sanction)
-@ 
-<<Example.summary>>= 
-summary(z.out)
-@ 
-Set values for the explanatory variables to their default mean values:  
-<<Example.setx>>=
- x.out <- setx(z.out)
-@ 
-Simulate fitted values:  
-<<Example.sim>>=
- s.out <- sim(z.out, x = x.out)
-@
-<<Example.summary.sim>>= 
-summary(s.out)
-@ 
-\begin{center}
-<<label=Example1Plot,fig=true>>= 
- plot(s.out)
-@ 
-\end{center}
-\subsubsection{Model}
-Let $Y_i$ be the number of independent events that occur during a
-fixed time period. This variable can take any non-negative integer value.
-
-\begin{itemize}
-\item The negative binomial distribution is derived by letting the
-  mean of the Poisson distribution vary according to a fixed
-  parameter $\zeta$ given by the Gamma distribution. The
-  \emph{stochastic component} is given by
-   \begin{eqnarray*}
-     Y_i \mid \zeta_i & \sim & \textrm{Poisson}(\zeta_i \mu_i),\\
-     \zeta_i & \sim & \frac{1}{\theta}\textrm{Gamma}(\theta).
-   \end{eqnarray*}
-   The marginal distribution of $Y_i$ is then the negative binomial
-   with mean $\mu_i$ and variance $\mu_i + \mu_i^2/\theta$:
-   \begin{eqnarray*}
-   Y_i & \sim & \textrm{NegBin}(\mu_i, \theta), \\
-       & = & \frac{\Gamma (\theta + y_i)}{y! \, \Gamma(\theta)} 
-             \frac{\mu_i^{y_i} \, \theta^{\theta}}{(\mu_i + \theta)^{\theta + y_i}},
-   \end{eqnarray*}
-   where $\theta$ is the systematic parameter of the Gamma
-   distribution modeling $\zeta_i$.  
-
- \item The \emph{systematic component} is given by
-   \begin{equation*}
-     \mu_i = \exp(x_i \beta)
-   \end{equation*}
-   where $x_i$ is the vector of $k$ explanatory variables and $\beta$ is
-   the vector of coefficients.
- \end{itemize}
-
-\subsubsection{Quantities of Interest}
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) are simulations of the mean
-  of the stochastic component.  Thus, $$E(Y) = \mu_i = \exp(x_i
-  \beta),$$ given simulations of $\beta$.  
-  
-\item The predicted value ({\tt qi\$pr}) drawn from the distribution
-  defined by the set of parameters $(\mu_i, \theta)$.
-
-\item The first difference ({\tt qi\$fd}) is
-\begin{equation*}
-\textrm{FD} \; = \; E(Y | x_1) - E(Y \mid x)
-\end{equation*}
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "negbin", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt theta}: the maximum likelihood estimate for the
-     stochastic parameter $\theta$.  
-   \item {\tt SE.theta}: the standard error for {\tt theta}.  
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: a vector of the fitted values for the systemic
-     component $\lambda$.  
-   \item {\tt linear.predictors}: a vector of $x_{i} \beta$.  
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values given the specified
-     values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from the
-     distribution defined by $(\mu_i, \theta)$.  
-   \item {\tt qi\$fd}: the simulated first differences in the
-     simulated expected values given the specified values of {\tt x}
-     and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection* {How to Cite} 
-
-\input{cites/negbin}
-\input{citeZelig}
-
-\subsection* {See also}
-The negative binomial model is part of the MASS package by William N. Venable and Brian D. Ripley \citep{VenRip02}. Advanced users may wish to refer to \texttt{help(glm.nb)} as well as \cite{McCNel89}. Robust standard errors are implemented via sandwich package by Achim Zeileis \citep{Zeileis04}.Sample data are from \cite{Martin92}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
-
-
-
-
-
diff --git a/inst/doc/negbin.pdf b/inst/doc/negbin.pdf
deleted file mode 100644
index 158b160..0000000
Binary files a/inst/doc/negbin.pdf and /dev/null differ
diff --git a/inst/doc/negbinom.pdf b/inst/doc/negbinom.pdf
new file mode 100644
index 0000000..7ccabd9
Binary files /dev/null and b/inst/doc/negbinom.pdf differ
diff --git a/inst/doc/normal.pdf b/inst/doc/normal.pdf
new file mode 100644
index 0000000..d56377f
Binary files /dev/null and b/inst/doc/normal.pdf differ
diff --git a/inst/doc/normal.survey.Rnw b/inst/doc/normal.survey.Rnw
deleted file mode 100644
index 0503248..0000000
--- a/inst/doc/normal.survey.Rnw
+++ /dev/null
@@ -1,511 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=normal}
-\include{zinput}
-%\VignetteIndexEntry{Survey-Weighted Normal Regression  for Continuous Dependent Variables}
-%\VignetteDepends{Zelig, stats, survey}
-%\VignetteKeyWords{model, normal, regression, continuous, least squares, survey-weight}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography* 
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>= 
-library(Zelig)
-library(survey) 
-@
-\section{{\tt normal.survey}: Survey-Weighted Normal Regression for Continuous Dependent Variables}
-\label{normal.survey}
-
-The survey-weighted Normal regression model is appropriate for 
-survey data obtained using complex sampling techniques, such as 
-stratified random or cluster sampling (e.g., not simple random 
-sampling).  Like the least squares and Normal regression models (see 
-\Sref{ls} and \Sref{normal}), survey-weighted Normal regression 
-specifies a continuous dependent variable as a linear function of a 
-set of explanatory variables.  The survey-weighted normal model 
-reports estimates of model parameters identical to least squares or 
-Normal regression estimates, but uses information from the survey 
-design to correct variance estimates.
-
-The {\tt normal.survey} model accommodates three common types of
-complex survey data.  Each method listed here requires selecting
-specific options which are detailed in the ``Additional Inputs''
-section below.  \begin{enumerate}
-
-\item \textbf{Survey weights}:  Survey data are often published along
-with weights for each observation.  For example, if a survey
-intentionally over-samples a particular type of case, weights can be
-used to correct for the over-representation of that type of case in
-the dataset. Survey weights come in two forms:
-\begin{enumerate}
-
-\item \textit{Probability} weights report the probability that each
-case is drawn from the population.  For each stratum or cluster, 
-this is computed as the number of observations in the sample drawn 
-from that group divided by the number of observations in the 
-population in the group.
-
-\item \textit{Sampling} weights are the inverse of the probability
-weights.   
-
-\end{enumerate}
-
-\item \textbf{Strata/cluster identification}:  A complex survey 
-dataset may include variables that identify the strata or cluster 
-from which observations are drawn.  For stratified random sampling 
-designs, observations may be nested in different strata.  There are 
-two ways to employ these identifiers:
-
-\begin{enumerate}
-
-\item Use \textit{finite population corrections} to specify the
-total number of cases in the stratum or cluster from which each
-observation was drawn.
-
-\item For stratified random sampling designs, use the raw strata ids
-to compute sampling weights from the data.
-
-\end{enumerate}
-
-\item \textbf{Replication weights}: To preserve the anonymity of
-survey participants, some surveys exclude strata and cluster ids 
-from the public data and instead release only pre-computed replicate 
-weights.
-
-\end{enumerate}
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "normal.survey", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-
-\subsubsection{Additional Inputs}
-
-In addition to the standard {\tt zelig} inputs (see
-\Sref{Zcommands}), survey-weighted Normal models accept the following
-optional inputs:
-\begin{enumerate}
-
-\item Datasets that include survey weights: 
-
-\begin{itemize}  
-
-\item {\tt probs}: An optional formula or numerical vector specifying each 
-case's probability weight, the probability that the case was 
-selected.  Probability weights need not (and, in most cases, will 
-not) sum to one.  Cases with lower probability weights are weighted 
-more heavily in the computation of model coefficients.
-
-\item {\tt weights}: An optional numerical vector specifying each 
-case's sample weight, the inverse of the probability that the case 
-was selected.  Sampling weights need not (and, in most cases, will 
-not) sum to one.  Cases with higher sampling weights are weighted 
-more heavily in the computation of model coefficients.
-
-\end{itemize}
-
-\item Datasets that include strata/cluster identifiers:
-
-\begin{itemize} 
-
-\item {\tt ids}: An optional formula or numerical vector identifying the 
-cluster from which each observation was drawn (ordered from largest level to smallest level).  
-For survey designs  that do not involve cluster sampling, {\tt ids} defaults to {\tt NULL}.  
-
-\item {\tt fpc}: An optional numerical vector identifying each 
-case's frequency weight, the total number of units in the population 
-from which each observation was sampled. 
-
-\item {\tt strata}: An optional formula or vector identifying the 
-stratum from which each observation was sampled.  Entries may be 
-numerical, logical, or strings.  For survey designs that do not 
-involve cluster sampling, {\tt strata} defaults to {\tt NULL}. 
-
-\item {\tt nest}: An optional logical value specifying whether 
-primary sampling unites (PSUs) have non-unique ids across multiple 
-strata.  {\tt nest=TRUE} is appropriate when PSUs reuse the same 
-identifiers across strata.  Otherwise, {\tt nest} defaults to {\tt 
-FALSE}. 
-
-\item {\tt check.strata}: An optional input specifying whether to 
-check that clusters are nested in strata.  If {\tt check.strata} is 
-left at its default, {\tt !nest}, the check is not performed.  If 
-{\tt check.strata} is specified as {\tt TRUE}, the check is carried 
-out.  
-
-\end{itemize}
-
-\item Datasets that include replication weights:
-\begin{itemize}
-  \item {\tt repweights}: A formula or matrix specifying
-    replication weights, numerical vectors of weights used
-    in a process in which the sample is repeatedly re-weighted and parameters
-    are re-estimated in order to compute the variance of the model parameters.
-  \item {\tt type}: A string specifying the type of replication weights being used.
-    This input is required if replicate weights are specified.  The following types
-    of replication weights are recognized: {\tt"BRR"}, {\tt "Fay"},
-    {\tt "JK1"}, {\tt "JKn"}, {\tt "bootstrap"}, or {\tt "other"}.
-  \item {\tt weights}: An optional vector or formula specifying each case's sample weight,
-    the inverse of the probability that the case was selected.  If a survey includes both sampling 
-    weights and replicate weights separately for the same survey, both should be included in 
-    the model specification.  In these cases, sampling weights are used to correct potential biases 
-    in in the computation of coefficients and replication weights are used to compute the variance 
-    of coefficient estimates.  
-  \item {\tt combined.weights}: An optional logical value that 
-    should be specified as {\tt TRUE} if the replicate weights include the sampling weights.  Otherwise, 
-    {\tt combined.weights} defaults to {\tt FALSE}.  
-  \item {\tt rho}:  An optional numerical value specifying a shrinkage factor
-    for replicate weights of type {\tt "Fay"}.
-  \item {\tt bootstrap.average}: An optional numerical input specifying
-    the number of iterations over which replicate weights of type {\tt "bootstrap"} were averaged. 
-    This input should be left as {\tt NULL} for {\tt "bootstrap"} weights that were
-    not were created by averaging.
-\item {\tt scale}:  When replicate weights are included,
-    the variance is computed as the sum of squared deviations of the replicates from their mean.
-    {\tt scale} is an optional overall multiplier for the standard deviations.
-\item {\tt rscale}: Like {\tt scale}, {\tt rscale} specifies an 
-optional vector of replicate-specific multipliers for the squared 
-deviations used in variance computation. 
-
-\item {\tt fpc}: For models in which {\tt "JK1"}, {\tt "JKn"}, or 
-{\tt "other"} replicates are specified, {\tt fpc} is an optional 
-numerical vector (with one entry for each replicate) designating the 
-replicates' finite population corrections.   
-
-\item {\tt fpctype}: When a finite population correction is included 
-as an {\tt fpc} input, {\tt fpctype} is a required input specifying 
-whether the input to {\tt fpc} is a sampling fraction ({\tt 
-fpctype="fraction"}) or a direct correction ({\tt 
-fpctype="correction"}).  
-
-\item {\tt return.replicates}: An optional logical value    
-specifying whether the replicates should be returned as a component 
-of the output.  {\tt return.replicates} defaults to {\tt FALSE}.  
-
-\end{itemize}
-
-\end{enumerate}
-
-\subsubsection{Examples}
-
-\begin{enumerate} 
-
-\item A dataset that includes survey weights:
-
-Attach the sample data: 
-<<Existing.data>>= 
-data(api, package="survey") 
-@ 
-
-Suppose that a dataset included a continuous measure of
-public schools' performance ({\tt api00}), a measure of 
-the percentage of students at each school who receive subsidized 
-meals ({\tt meals}), an indicator for whether each school
-holds classes year round ({\tt year.rnd}), and sampling 
-weights computed by the survey house ({\tt pw}).  Estimate a model
-that regresses school performance on the {\tt meals} and {\tt year.rnd}
-variables:
-<<Existing.zelig>>= 
-z.out1 <- zelig(api00 ~ meals + yr.rnd, model = "normal.survey",  weights=~pw,
-data = apistrat) 
-@ 
-Summarize regression coefficients:
-<<Existing.summary>>=
- summary(z.out1)
-@ 
-Set explanatory variables to their default (mean/mode) values, and
-set a high (80th percentile) and low (20th percentile) value for
-``meals'': 
-<<Existing.setx>>= 
-x.low <- setx(z.out1, meals=quantile(apistrat$meals, 0.2)) 
-x.high <- setx(z.out1, meals=quantile(apistrat$meals, 0.8)) 
-@ 
-Generate first differences for the
-effect of high versus low concentrations of children receiving
-subsidized meals on academic performance: 
-<<Existing.sim>>=
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-@ 
-<<Existing.summary.sim>>=
- summary(s.out1)
-@ 
-Generate a visual summary of the quantities of interest:
-\begin{center}
-<<label=ExistingPlot,fig=true,echo=true>>=
- plot(s.out1)
-@
-\end{center}
-
-\item  A dataset that includes strata/cluster identifiers:
-
-Suppose that the survey house that provided the dataset used in the
-previous example excluded sampling weights but made other details
-about the survey design available.  A user can still estimate a model
-without sampling weights that instead uses inputs that identify the
-stratum and/or cluster to which each observation belongs and the
-size of the finite population from which each observation was drawn.
-
-Continuing the example above, suppose the survey house drew at
-random a fixed number of elementary schools, a fixed number of
-middle schools, and a fixed number of high schools. If the variable
-{\tt stype} is a vector of characters ({\tt "E"} for elementary
-schools, {\tt "M"} for middle schools, and {\tt "H"} for high schools)
-that identifies the type of school each case
-represents and {\tt fpc} is a numerical vector that identifies for
-each case the total number of schools of the same type in the
-population, then the user could estimate the following model:
-
-<<Complex.zelig>>= 
-z.out2 <- zelig(api00 ~ meals + yr.rnd, 
-model = "normal.survey", strata=~stype, fpc=~fpc, data = apistrat) 
-@
-Summarize the regression output:
-<<Complex.output>>= 
-summary(z.out2) 
-@ 
-The coefficient estimates for this example are
-identical to the point estimates in the first example, when
-pre-existing sampling weights were used.  When sampling weights are
-omitted, they are estimated automatically for {\tt "normal.survey"}
-models based on the user-defined description of sampling designs. 
-
-Moreover, because the user provided information about the survey
-design, the standard error estimates are lower in this example than
-in the previous example, in which the user omitted variables pertaining
-to the details of the complex survey design.
-
-\item A dataset that includes replication weights:
-
-Consider a dataset that includes information for a sample of hospitals
-that includes counts of the number of out-of-hospital cardiac arrests that each
-hospital treats and the number of patients who arrive alive
-at each hospital: 
-<<Replicate.data>>= 
-data(scd, package="survey") 
-@ 
-Survey houses sometimes supply
-replicate weights (in lieu of details about the survey design).  For the sake
-of illustrating how replicate weights can be used as inputs in {\tt
-normal.survey} models, create a set of balanced repeated replicate
-(BRR) weights: 
-<<Replicate.rw>>= 
-BRRrep<-2*cbind(c(1,0,1,0,1,0),c(1,0,0,1,0,1), c(0,1,1,0,0,1),c(0,1,0,1,1,0)) 
-@ 
-Estimate a model that regresses counts of patients who arrive alive in
-each hospital on the number of cardiac arrests that each hospital treats, using
-the BRR replicate weights in {\tt BRRrep} to compute standard errors.
-<<Replicate.zelig>>= 
-z.out3 <- zelig(alive ~ arrests , model = "poisson.survey", 
-  repweights=BRRrep, type="BRR", data=scd)
-@
-Summarize the regression coefficients: 
-<<Replicate.summary>>=
- summary(z.out3)
-@ 
-Set {\tt arrests} at its 20th and 80th quantiles:
-<<Replicate.setx>>= 
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8))
-@ 
-Generate first
-differences for the effect of minimal versus maximal cardiac arrests
-on numbers of patients who arrive alive: 
-<<Replicate.sim>>= 
-s.out3<- sim(z.out3, x=x.low, x1=x.high) 
-@ 
-<<Replicate.summary.sim>>=
- summary(s.out3)
-@ 
-Generate a visual summary of quantities of interest:
-\begin{center}
-<<label=ReplicatePlot,fig=true,echo=true>>=
- plot(s.out3)
-@
-\end{center}
-
-
-\end{enumerate}
-
-\subsubsection{Model}
-Let $Y_i$ be the continuous dependent variable for observation $i$.
-\begin{itemize}
-\item The \emph{stochastic component} is described by a univariate normal
-  model with a vector of means $\mu_i$ and scalar variance $\sigma^2$:
-  \begin{equation*}
-    Y_i \; \sim \; \textrm{Normal}(\mu_i, \sigma^2).
-  \end{equation*}
-
-\item The \emph{systematic component} is
-  \begin{equation*}
-    \mu_i \;= \; x_i \beta,
-  \end{equation*}
-  where $x_i$ is the vector of $k$ explanatory variables and $\beta$ is
-  the vector of coefficients.
-\end{itemize}
-
-\subsubsection{Variance}
-
-When replicate weights are not used, the variance of the
-coefficients is estimated as
-\[
-\hat{\boldsymbol{\Sigma}} \left[
- \sum_{i=1}^n
-\frac{(1-\pi_i)}{\pi_i^2}
-(\mathbf{X}_i(Y_i-\mu_i))^\prime(\mathbf{X}_i(Y_i-\mu_i)) + 2
-\sum_{i=1}^n \sum_{j=i+1}^n \frac{(\pi_{ij} - \pi_i\pi_j)}{\pi_i
-\pi_j \pi_{ij}}(\mathbf{X}_i(Y_i-\mu_i))^\prime
-(\mathbf{X}_j(Y_j-\mu_j)) \right] \hat{\boldsymbol{\Sigma}}
-\]
-where ${\pi_i}$ is the probability of case $i$ being sampled,
-$\mathbf{X}_i$ is a vector of the values of the explanatory
-variables for case $i$, $Y_i$ is value of the dependent variable for
-case $i$, $\hat{\mu}_i$ is the predicted value of the dependent
-variable for case $i$ based on the linear model estimates, and
-$\hat{\boldsymbol{\Sigma}}$ is the conventional variance-covariance
-matrix in a parametric glm. This statistic is derived from the
-method for estimating the variance of sums described in \cite{Bin83}
-and the Horvitz-Thompson estimator of the variance of a sum
-described in \cite{HorTho52}.
-
-When replicate weights are used, the model is re-estimated for each
-set of replicate weights, and the variance of each parameter is
-estimated by summing the squared deviations of the replicates from
-their mean.
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The expected value ({\tt qi\$ev}) is the mean of simulations
-  from the the stochastic component, $$E(Y) = \mu_i = x_i \beta,$$
-  given a draw of $\beta$ from its posterior.
-
-\item The predicted value ({\tt qi\$pr}) is drawn from the distribution
-  defined by the set of parameters $(\mu_i, \sigma)$.
-
-\item The first difference ({\tt qi\$fd}) is:
-\begin{equation*}
-\textrm{FD}\; = \;E(Y \mid x_1) -  E(Y \mid x)
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*}
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*}
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which
-you may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "normal.survey", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)}, see
-the {\tt coefficients} by using {\tt z.out\$coefficients}, and a
-default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: fitted values.  For the survey-weighted normal model,
-     these are identical to the {\tt linear predictors}.
-   \item {\tt linear.predictors}: fitted values.  For the survey-weighted normal
-     model, these are identical to {\tt fitted.values}.
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values for the specified
-     values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from the
-     distribution defined by $(\mu_i, \sigma)$.
-   \item {\tt qi\$fd}: the simulated first difference in the simulated
-     expected values for the values specified in {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.
-   \end{itemize}
-\end{itemize}
-
-When users estimate {\tt normal.survey} models with replicate weights in {\tt Zelig}, an 
-object called {\tt .survey.prob.weights} is created in the global environment.  
-{\tt Zelig} will over-write any existing object with that name, and users 
-are therefore advised to re-name any object called {\tt .survey.prob.weights} before using {\tt normal.survey} models in {\tt Zelig}.
-
-\subsection* {How to Cite}
-\input{cites/normal.survey}
- \input{citeZelig}
- 
- \subsection* {See also}
- 
- Survey-weighted linear models and the sample data used in the
- examples above are a part of the {\tt survey} package by Thomas
- Lumley. Users may wish to refer to the help files for the three
- functions that Zelig draws upon when estimating survey-weighted
- models, namely, {\tt help(svyglm)}, {\tt help(svydesign)}, and {\tt
- help(svrepdesign)}.  The Gamma model is part of the stats package
- by \citet{VenRip02}. Advanced users may wish to refer to
- \texttt{help(glm)} and \texttt{help(family)}, as well as
- \cite{McCNel89}.
- 
- 
- 
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
-  after<-search()
-  torm<-setdiff(after,before)
-  for (pkg in torm)
-  detach(pos=match(pkg,search()))
-@
-\end{document}
diff --git a/inst/doc/normal.survey.pdf b/inst/doc/normal.survey.pdf
deleted file mode 100644
index 7eaa1c0..0000000
Binary files a/inst/doc/normal.survey.pdf and /dev/null differ
diff --git a/inst/doc/parse.formula.pdf b/inst/doc/parse.formula.pdf
new file mode 100644
index 0000000..b7bcccf
Binary files /dev/null and b/inst/doc/parse.formula.pdf differ
diff --git a/inst/doc/poisson.Rnw b/inst/doc/poisson.Rnw
deleted file mode 100644
index 2194225..0000000
--- a/inst/doc/poisson.Rnw
+++ /dev/null
@@ -1,241 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=poisson}
-\include{zinput}
-%\VignetteIndexEntry{Poisson Regression for Event Count Dependent Variables}
-%\VignetteDepends{Zelig, stats}
-%\VignetteKeyWords{model, poisson,regression, count}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt poisson}: Poisson Regression for Event Count
-Dependent Variables}\label{poisson}
-
-Use the Poisson regression model if the observations of your dependent
-variable represents the number of independent events that occur during
-a fixed period of time (see the negative binomial model, \Sref{negbin},
-for over-dispersed event counts.)  For a Bayesian implementation of
-this model, see \Sref{poisson.bayes}.  
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "poisson", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-\subsubsection{Additional Inputs} 
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for poisson regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE} is
-selected, {\tt zelig()} computes robust standard errors via the {\tt
-sandwich} package (see \cite{Zeileis04}).  The default type of robust
-standard error is heteroskedastic and autocorrelation consistent (HAC),
-and assumes that observations are ordered by time index.
-
-In addition, {\tt robust} may be a list with the following options:  
-\begin{itemize}
-\item {\tt method}:  Choose from 
-\begin{itemize}
-\item {\tt "vcovHAC"}: (default if {\tt robust = TRUE}) HAC standard
-errors. 
-\item {\tt "kernHAC"}: HAC standard errors using the
-weights given in \cite{Andrews91}. 
-\item {\tt "weave"}: HAC standard errors using the
-weights given in \cite{LumHea99}.  
-\end{itemize}  
-\item {\tt order.by}: defaults to {\tt NULL} (the observations are
-chronologically ordered as in the original data).  Optionally, you may
-specify a vector of weights (either as {\tt order.by = z}, where {\tt
-z} exists outside the data frame; or as {\tt order.by = \~{}z}, where
-{\tt z} is a variable in the data frame).  The observations are
-chronologically ordered by the size of {\tt z}.
-\item {\tt \dots}:  additional options passed to the functions 
-specified in {\tt method}.   See the {\tt sandwich} library and
-\cite{Zeileis04} for more options.   
-\end{itemize}
-\end{itemize}
-
-\subsubsection{Example}
-
-Load sample data:  
-<<Example.data>>=
- data(sanction)
-@ 
-Estimate Poisson model:  
-<<Example.zelig>>=
- z.out <- zelig(num ~ target + coop, model = "poisson", data = sanction)
-@ 
-<<Example.summary>>= 
-summary(z.out)
-@ 
-Set values for the explanatory variables to their default mean values:  
-<<Example.setx>>=
- x.out <- setx(z.out)
-@ 
-Simulate fitted values:  
-<<Example.sim>>=
- s.out <- sim(z.out, x = x.out)
-@ 
-<<Example.summary.sim>>= 
-summary(s.out)
-@
-\begin{center}
-<<label=ExamplePlot,fig=true,echo=true>>= 
- plot(s.out)
-@ 
-\end{center}
-
-\subsubsection{Model}
-Let $Y_i$ be the number of independent events that occur during a
-fixed time period. This variable can take any non-negative integer.
-
-\begin{itemize}
-\item The Poisson distribution has \emph{stochastic component}
-  \begin{equation*}
-    Y_i \; \sim \; \textrm{Poisson}(\lambda_i),
-  \end{equation*}
-  where $\lambda_i$ is the mean and variance parameter.
-  
-\item The \emph{systematic component} is 
-  \begin{equation*}
-    \lambda_i \; = \; \exp(x_i \beta),
-  \end{equation*}
-  where $x_i$ is the vector of explanatory variables, and $\beta$ is
-  the vector of coefficients.
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-  
-\item The expected value ({\tt qi\$ev}) is the mean of simulations
-  from the stochastic component, $$E(Y) = \lambda_i =  \exp(x_i
-  \beta),$$ given draws of $\beta$ from its sampling distribution.  
-  
-\item The predicted value ({\tt qi\$pr}) is a random draw from the
-  poisson distribution defined by mean $\lambda_i$.
-
-\item The first difference in the expected values ({\tt qi\$fd}) is given by:
-\begin{equation*}
-\textrm{FD} \; = \; E(Y | x_1) - E(Y \mid x)
-\end{equation*}
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "poisson", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: a vector of the fitted values for the systemic
-     component $\lambda$.  
-   \item {\tt linear.predictors}: a vector of $x_{i}\beta$.  
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values given the
-     specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from the
-     distributions defined by $\lambda_i$.
-   \item {\tt qi\$fd}: the simulated first differences in the expected
-     values given the specified values of {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection* {How to Cite} 
-
-\input{cites/poisson}
-\input{citeZelig}
-
-\subsection* {See also}
-The poisson model is part of the stats package by \citet{VenRip02}.
-Advanced users may wish to refer to \texttt{help(glm)} and
-\texttt{help(family)}, as well as \cite{McCNel89}. Robust standard
-errors are implemented via the sandwich package by \citet{Zeileis04}.
-Sample data are from \cite{Martin92}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
diff --git a/inst/doc/poisson.mixed.Rnw b/inst/doc/poisson.mixed.Rnw
deleted file mode 100644
index 425312d..0000000
--- a/inst/doc/poisson.mixed.Rnw
+++ /dev/null
@@ -1,178 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=poissonmixed}
-\include{zinput}
-%\VignetteIndexEntry{Mixed effects poisson regression}
-%\VignetteDepends{Zelig}
-%\VignetteKeyWords{mixed,poisson regression}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt poisson.mixed}: Mixed effects poisson Regression}
-\label{mixed}
-
-Use generalized multi-level linear regression if you have covariates that are grouped according to one or more classification factors. Poisson regression applies to dependent variables that represent the number of independent events that occur during a fixed period of time.
-
-While generally called multi-level models in the social sciences, this class of models is often referred to as mixed-effects models in the statistics literature and as hierarchical models in a Bayesian setting. This general class of models consists of linear models that are expressed as a function of both \emph{fixed effects}, parameters corresponding to an entire population or certain repeatable levels of experimental factors, and \emph{random effects}, parameters corresponding to indiv [...]
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-z.out <- zelig(formula= y ~ x1 + x2 + tag(z1 + z2 | g),
-               data=mydata, model="poisson.mixed")
-
-z.out <- zelig(formula= list(mu=y ~ xl + x2 + tag(z1, gamma | g),
-               gamma= ~ tag(w1 + w2 | g)), data=mydata, model="poisson.mixed")
-\end{verbatim}
-
-\subsubsection{Inputs}
-
-\noindent {\tt zelig()} takes the following arguments for {\tt mixed}:
-\begin{itemize}
-\item {\tt formula:} a two-sided linear formula object describing the systematic component of the model, with the response on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1 + ... + zn | g)} with {\tt z1 + ... + zn} specifying the model for the random effects and {\tt g} the grouping structure. Random intercept terms are included with the notation {\tt ta [...]
-Alternatively, {\tt formula} may be a list where the first entry, {\tt mu}, is a two-sided linear formula object describing the systematic component of the model, with the repsonse on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1, gamma | g)} with {\tt z1} specifying the individual level model for the random effects, {\tt g} the grouping structure and { [...]
-\end{itemize}
-
-\subsubsection{Additional Inputs}
-
-In addition, {\tt zelig()} accepts the following additional arguments for model specification:
-
-\begin{itemize}
-\item {\tt data:} An optional data frame containing the variables named in {\tt formula}. By default, the variables are taken from the environment from which {\tt zelig()} is called.
-\item {\tt na.action:} A function that indicates what should happen when the data contain {\tt NAs}. The default action ({\tt na.fail}) causes {\tt zelig()} to print an error message and terminate if there are any incomplete observations.
-\end{itemize}
-Additionally, users may with to refer to {\tt lmer} in the package {\tt lme4} for more information, including control parameters for the estimation algorithm and their defaults.
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-\item Basic Example \\
-\\
-Attach sample data: \\
-<<Examples.data>>=
-data(homerun)
-@
-Estimate model:
-<<Examples.zelig>>=
-z.out1 <- zelig(homeruns ~ player + tag(player - 1 | month), data=homerun, model="poisson.mixed")
-@
-
-\noindent Summarize regression coefficients and estimated variance of random effects:\\
-<<Examples.summary>>=
-summary(z.out1)
-@
-Set explanatory variables to their default values:\\
-<<Examples.setx>>=
-x.out <- setx(z.out1)
-@
-Simulate draws using the default bootstrap method and view simulated quantities of interest: \\
-<<Examples.sim>>=
-s.out1 <- sim(z.out1, x=x.out)
-summary(s.out1)
-@
-
-\end{enumerate}
-
-
-\subsubsection{Mixed effects Poisson Regression Model}
-
-Let $Y_{ij}$ be the number of independent events that occur during a fixed time period, realized for observation $j$ in group $i$ as $y_{ij}$, which takes any non-negative integer as its value, for $i = 1, \ldots, M$, $j = 1, \ldots, n_i$.
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by a Poisson distribution with mean and variance parameter $\lambda_{ij}$.
-\begin{equation*}
-Y_{ij} \sim \mathrm{Poisson}(y_{ij} | \lambda_{ij}) = \frac{\exp(-\lambda_{ij}) \lambda_{ij}^{y_{ij}}}{y_{ij}!}
-\end{equation*}
-where
-\begin{equation*}
-y_{ij} = 0, 1, \ldots
-\end{equation*}
-\item The $q$-dimensional vector of \emph{random effects}, $b_i$, is restricted to be mean zero, and therefore is completely characterized by the variance covarance matrix $\Psi$, a $(q \times q)$ symmetric positive semi-definite matrix.
-\begin{equation*}
-b_i \sim Normal(0, \Psi)
-\end{equation*}
-\item The \emph{systematic component} is
-\begin{equation*}
-\lambda_{ij} \equiv \exp(X_{ij} \beta + Z_{ij} b_i)
-\end{equation*}
-where $X_{ij}$ is the $(n_i \times p \times M)$ array of known fixed effects explanatory variables, $\beta$ is the $p$-dimensional vector of fixed effects coefficients, $Z_{ij}$ is the $(n_i \times q \times M)$ array of known random effects explanatory variables and $b_i$ is the $q$-dimensional vector of random effects.
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The predicted values ({\tt qi\$pr}) are draws from the poisson distribution defined by mean $ \lambda_{ij} $, for
-\begin{equation*}
-\lambda_{ij} = \exp(X_{ij} \beta + Z_{ij} b_i)
-\end{equation*}
-given $X_{ij}$ and $Z_{ij}$ and simulations of of $\beta$ and $b_i$ from their posterior distributions. The estimated variance covariance matrices are taken as correct and are themselves not simulated.
-
-\item The expected values ({\tt qi\$ev}) is the mean of simulations of the stochastic component given draws of $\beta$ from its posterior:
-\begin{equation*}
-E(Y_{ij} | X_{ij}) = \lambda_{ij} = \exp(X_{ij} \beta).
-\end{equation*}
-
-\item The first difference ({\tt qi\$fd}) is given by the difference in expected values, conditional on $X_{ij}$ and $X_{ij}^\prime$, representing different values of the explanatory variables.
-\begin{equation*}
-FD(Y_{ij} | X_{ij}, X_{ij}^\prime) = E(Y_{ij} | X_{ij}) - E(Y_{ij} | X_{ij}^\prime)
-\end{equation*}
-
-\item In conditional prediction models, the average predicted treatment effect ({\tt qi\$att.pr}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - \widehat{Y_{ij}(t_{ij} = 0)} \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $Y_{ij}(t_{ij} = 0)$, the counterfactual predicted value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\item In conditional prediction models, the average expected treatment effect ({\tt qi\$att.ev}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - E[Y_{ij}(t_{ij} = 0)] \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $E[Y_{ij}(t_{ij} = 0)]$, the counterfactual expected value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you may view. You may examine the available information in {\tt z.out} by using {\tt slotNames(z.out)}, see the fixed effect coefficients by using {\tt summary(z.out)@coefs}, and a default summary of information through {\tt summary(z.out)}. Other elements available through the {\tt \@} operator are listed below.
-\begin{itemize}
-\item From the {\tt zelig()} output stored in {\tt summary(z.out)}, you may extract:
-\begin{itemize}
-\item[--] {\tt fixef}: numeric vector containing the conditional estimates of the fixed effects.
-\item[--] {\tt ranef}: numeric vector containing the conditional modes of the random effects.
-\item[--] {\tt frame}: the model frame for the model.
-\end{itemize}
-\item From the {\tt sim()} output stored in {\tt s.out}, you may extract quantities of interest stored in a data frame:
-\begin{itemize}
-\item {\tt qi\$pr}: the simulated predicted values drawn from the distributions defined by the expected values.
-\item {\tt qi\$ev}: the simulated expected values for the specified values of x.
-\item {\tt qi\$fd}: the simulated first differences in the expected values for the values specified in x and x1.
-\item {\tt qi\$ate.pr}: the simulated average predicted treatment effect for the treated from conditional prediction models.
-\item {\tt qi\$ate.ev}: the simulated average expected treatment effect for the treated from conditional prediction models.
-\end{itemize}
-\end{itemize}
-
-
-
-\subsection* {How to Cite}
-
-\input{cites/logit.mixed}
-\input{citeZelig}
-
-\subsection* {See also}
-Mixed effects poisson regression is part of {\tt lme4} package by Douglas M. Bates \citep{Bates07}. For a detailed discussion of mixed-effects models, please see \cite{JosBat00}
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after <- search()
- torm <- setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
-\end{document}
diff --git a/inst/doc/poisson.mixed.pdf b/inst/doc/poisson.mixed.pdf
deleted file mode 100644
index 3ba4c6a..0000000
Binary files a/inst/doc/poisson.mixed.pdf and /dev/null differ
diff --git a/inst/doc/poisson.pdf b/inst/doc/poisson.pdf
index a1250c2..a65ba64 100644
Binary files a/inst/doc/poisson.pdf and b/inst/doc/poisson.pdf differ
diff --git a/inst/doc/poisson.survey.Rnw b/inst/doc/poisson.survey.Rnw
deleted file mode 100644
index 68d7457..0000000
--- a/inst/doc/poisson.survey.Rnw
+++ /dev/null
@@ -1,513 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=poissonsurvey}
-\include{zinput}
-%\VignetteIndexEntry{Survey-Weighted Poisson Regression for Event Count Dependent Variables}
-%\VignetteDepends{Zelig, stats, survey}
-%\VignetteKeyWords{model,poisson,event,regression, survey}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography* 
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>= 
-library(Zelig)
-library(survey) 
-@
-\section{{\tt poisson.survey}: Survey-Weighted Poisson Regression for Event Count Dependent Variables}
-\label{poisson.survey}
-
-The survey-weighted poisson regression model is appropriate for 
-survey data obtained using complex sampling techniques, such as 
-stratified random or cluster sampling (e.g., not simple random 
-sampling).  Like the conventional poisson regression model (see 
-\Sref{poisson}), survey-weighted poisson regression specifies a 
-dependent variable representing the number of independent events 
-that occur during a fixed period of time as function of a set of 
-explanatory variables.  The survey-weighted poisson model reports 
-estimates of model parameters identical to conventional poisson 
-estimates, but uses information from the survey design to correct 
-variance estimates.
-
-The {\tt poisson.survey} model accommodates three common types of 
-complex survey data.  Each method listed here requires selecting 
-specific options which are detailed in the ``Additional Inputs'' 
-section below.  \begin{enumerate}
-
-\item \textbf{Survey weights}:  Survey data are often published along
-with weights for each observation.  For example, if a survey
-intentionally over-samples a particular type of case, weights can be
-used to correct for the over-representation of that type of case in
-the dataset. Survey weights come in two forms:
-\begin{enumerate}
-
-\item \textit{Probability} weights report the probability that each
-case is drawn from the population.  For each stratum or cluster, 
-this is computed as the number of observations in the sample drawn 
-from that group divided by the number of observations in the 
-population in the group.
-
-\item \textit{Sampling} weights are the inverse of the probability
-weights.   
-
-\end{enumerate}
-
-\item \textbf{Strata/cluster identification}:  A complex survey 
-dataset may include variables that identify the strata or cluster 
-from which observations are drawn.  For stratified random sampling 
-designs, observations may be nested in different strata.  There are 
-two ways to employ these identifiers:
-
-\begin{enumerate}
-
-\item Use \textit{finite population corrections} to specify the
-total number of cases in the stratum or cluster from which each
-observation was drawn.
-
-\item For stratified random sampling designs, use the raw strata ids
-to compute sampling weights from the data.
-
-\end{enumerate}
-
-\item \textbf{Replication weights}: To preserve the anonymity of
-survey participants, some surveys exclude strata and cluster ids 
-from the public data and instead release only pre-computed replicate 
-weights.
-
-\end{enumerate}
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "poisson.survey", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-
-\subsubsection{Additional Inputs}
-
-In addition to the standard {\tt zelig} inputs (see
-\Sref{Zcommands}), survey-weighted poisson models accept the following
-optional inputs:
-\begin{enumerate}
-
-\item Datasets that include survey weights:. 
-
-\begin{itemize}  
-
-\item {\tt probs}: An optional formula or numerical vector specifying each 
-case's probability weight, the probability that the case was 
-selected.  Probability weights need not (and, in most cases, will 
-not) sum to one.  Cases with lower probability weights are weighted 
-more heavily in the computation of model coefficients.
-
-\item {\tt weights}: An optional numerical vector specifying each 
-case's sample weight, the inverse of the probability that the case 
-was selected.  Sampling weights need not (and, in most cases, will 
-not) sum to one.  Cases with higher sampling weights are weighted 
-more heavily in the computation of model coefficients.
-
-\end{itemize}
-
-\item Datasets that include strata/cluster identifiers:
-
-\begin{itemize} 
-
-\item {\tt ids}: An optional formula or numerical vector identifying the 
-cluster from which each observation was drawn (ordered from largest level to smallest level).  
-For survey designs  that do not involve cluster sampling, {\tt ids} defaults to {\tt NULL}.
-
-\item {\tt fpc}: An optional numerical vector identifying each 
-case's frequency weight, the total number of units in the population 
-from which each observation was sampled. 
-
-\item {\tt strata}: An optional formula or vector identifying the 
-stratum from which each observation was sampled.  Entries may be 
-numerical, logical, or strings.  For survey designs that do not 
-involve cluster sampling, {\tt strata} defaults to {\tt NULL}. 
-
-\item {\tt nest}: An optional logical value specifying whether 
-primary sampling unites (PSUs) have non-unique ids across multiple 
-strata.  {\tt nest=TRUE} is appropriate when PSUs reuse the same 
-identifiers across strata.  Otherwise, {\tt nest} defaults to {\tt 
-FALSE}. 
-
-\item {\tt check.strata}: An optional input specifying whether to 
-check that clusters are nested in strata.  If {\tt check.strata} is 
-left at its default, {\tt !nest}, the check is not performed.  If 
-{\tt check.strata} is specified as {\tt TRUE}, the check is carried 
-out.  
-
-\end{itemize}
-
-\item Datasets that include replication weights:
-\begin{itemize}
-  \item {\tt repweights}: A formula or matrix specifying
-    replication weights, numerical vectors of weights used
-    in a process in which the sample is repeatedly re-weighted and parameters
-    are re-estimated in order to compute the variance of the model parameters.
-  \item {\tt type}: A string specifying the type of replication weights being used.
-    This input is required if replicate weights are specified.  The following types
-    of replication weights are recognized: {\tt"BRR"}, {\tt "Fay"},
-    {\tt "JK1"}, {\tt "JKn"}, {\tt "bootstrap"}, or {\tt "other"}.
-  \item {\tt weights}: An optional vector or formula specifying each case's sample weight,
-    the inverse of the probability that the case was selected.  If a survey includes both sampling 
-    weights and replicate weights separately for the same survey, both should be included in 
-    the model specification.  In these cases, sampling weights are used to correct potential biases 
-    in in the computation of coefficients and replication weights are used to compute the variance 
-    of coefficient estimates.  
-  \item {\tt combined.weights}: An optional logical value that 
-    should be specified as {\tt TRUE} if the replicate weights include the sampling weights.  Otherwise, 
-    {\tt combined.weights} defaults to {\tt FALSE}.  
-  \item {\tt rho}:  An optional numerical value specifying a shrinkage factor
-    for replicate weights of type {\tt "Fay"}.
-  \item {\tt bootstrap.average}: An optional numerical input specifying
-    the number of iterations over which replicate weights of type {\tt "bootstrap"} were averaged. 
-    This input should be left as {\tt NULL} for {\tt "bootstrap"} weights that were
-    not were created by averaging.
-\item {\tt scale}:  When replicate weights are included,
-    the variance is computed as the sum of squared deviations of the replicates from their mean.
-    {\tt scale} is an optional overall multiplier for the standard deviations.
-\item {\tt rscale}: Like {\tt scale}, {\tt rscale} specifies an 
-optional vector of replicate-specific multipliers for the squared 
-deviations used in variance computation. 
-
-\item {\tt fpc}: For models in which {\tt "JK1"}, {\tt "JKn"}, or 
-{\tt "other"} replicates are specified, {\tt fpc} is an optional 
-numerical vector (with one entry for each replicate) designating the 
-replicates' finite population corrections.   
-
-\item {\tt fpctype}: When a finite population correction is included 
-as an {\tt fpc} input, {\tt fpctype} is a required input specifying 
-whether the input to {\tt fpc} is a sampling fraction ({\tt 
-fpctype="fraction"}) or a direct correction ({\tt 
-fpctype="correction"}).  
-
-\item {\tt return.replicates}: An optional logical value    
-specifying whether the replicates should be returned as a component 
-of the output.  {\tt return.replicates} defaults to {\tt FALSE}.  
-
-\end{itemize}
-
-\end{enumerate}
-
-\subsubsection{Examples}
-
-\begin{enumerate} 
-
-\item A dataset that includes survey weights:
-
-Attach the sample data: 
-<<Existing.data>>= 
-data(api, package="survey") 
-@ 
-
-Suppose that a dataset included a variable reporting the number of
-times a new student enrolled during the previous school year ({\tt enroll}), 
-a measure of each school's academic performance ({\tt api99}), 
-an indicator for whether each school holds classes year round ({\tt year.rnd}), and sampling 
-weights computed by the survey house ({\tt pw}).  Estimate a model
-that regresses {\tt enroll} on {\tt api99} and {\tt year.rnd}:
-<<Existing.zelig>>= 
-z.out1 <- zelig(enroll ~ api99 + yr.rnd , model = "poisson.survey", 
-weights=~pw, data = apistrat)
-@ 
-Summarize regression coefficients:
-<<Existing.summary>>=
- summary(z.out1)
-@ 
-Set explanatory variables to their default (mean/mode) values, and
-set a high (80th percentile) and low (20th percentile) value for
-the measure of academic performance:
-<<Existing.setx>>= 
-x.low <- setx(z.out1, api99= quantile(apistrat$api99, 0.2))
-x.high <- setx(z.out1, api99= quantile(apistrat$api99, 0.8))
-@ 
-Generate first differences for the
-effect of high versus low concentrations of children receiving
-subsidized meals on the probability of holding school year-round: 
-<<Existing.sim>>=
-s.out1 <- sim(z.out1, x=x.low, x1=x.high)
-@ 
-<<Existing.summary.sim>>=
- summary(s.out1)
-@ 
-Generate a visual summary of the quantities of interest:
-\begin{center}
-<<label=ExistingPlot,fig=true,echo=true>>=
- plot(s.out1)
-@
-\end{center}
-
-\item  A dataset that includes strata/cluster identifiers:
-
-Suppose that the survey house that provided the dataset used in the
-previous example excluded sampling weights but made other details
-about the survey design available.  A user can still estimate a model
-without sampling weights that instead uses inputs that identify the
-stratum and/or cluster to which each observation belongs and the
-size of the finite population from which each observation was drawn.
-
-Continuing the example above, suppose the survey house drew at
-random a fixed number of elementary schools, a fixed number of
-middle schools, and a fixed number of high schools. If the variable
-{\tt stype} is a vector of characters ({\tt "E"} for elementary
-schools, {\tt "M"} for middle schools, and {\tt "H"} for high schools)
-that identifies the type of school each case
-represents and {\tt fpc} is a numerical vector that identifies for
-each case the total number of schools of the same type in the
-population, then the user could estimate the following model:
-
-<<Complex.zelig>>= 
-z.out2 <- zelig(enroll ~ api99 + yr.rnd , model = "poisson.survey", data = apistrat, 
-  strata=~stype, fpc=~fpc)
-@
-Summarize the regression output:
-<<Complex.output>>= 
-summary(z.out2) 
-@ 
-The coefficient estimates for this example are
-identical to the point estimates in the first example, when
-pre-existing sampling weights were used.  When sampling weights are
-omitted, they are estimated automatically for {\tt "poisson.survey"}
-models based on the user-defined description of sampling designs. 
-
-Moreover, because the user provided information about the survey
-design, the standard error estimates are lower in this example than
-in the previous example, in which the user omitted variables pertaining
-to the details of the complex survey design.
-
-\item A dataset that includes replication weights:
-
-Consider a dataset that includes information for a sample of hospitals
-about the number of out-of-hospital cardiac arrests that each
-hospital treats and the number of patients who arrive alive
-at each hospital: 
-<<Replicate.data>>= 
-data(scd, package="survey") 
-@ 
-
-Survey houses sometimes supply
-replicate weights (in lieu of details about the survey design).  For the sake
-of illustrating how replicate weights can be used as inputs in {\tt
-normal.survey} models, create a set of balanced repeated replicate
-(BRR) weights: 
-<<Replicate.rw>>= 
-BRRrep<-2*cbind(c(1,0,1,0,1,0),c(1,0,0,1,0,1), c(0,1,1,0,0,1),c(0,1,0,1,1,0)) 
-@ 
-Estimate a model that regresses the count of patients who arrived alive at
-the hospital last year on the number of patients treated for cardiac arrests, using
-the BRR replicate weights in {\tt BRRrep} to compute standard errors:
-<<Replicate.zelig>>= 
-z.out3 <- zelig(alive ~ arrests , model = "poisson.survey",
-repweights=BRRrep, type="BRR", data=scd)
-summary(z.out3) 
-@
-Summarize the regression coefficients: 
-<<Replicate.summary>>=
- summary(z.out3)
-@ 
-Set the explanatory variable {\tt arrests} at its 20th and 80th quantiles:
-<<Replicate.setx>>= 
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8)) 
-@ 
-Generate first
-differences for the effect of high versus low cardiac arrests
-on the count of patients who arrive alive:
-<<Replicate.sim>>= 
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-@ 
-<<Replicate.summary.sim>>=
- summary(s.out3)
-@ 
-Generate a visual summary of quantities of interest:
-\begin{center}
-<<label=ReplicatePlot,fig=true,echo=true>>=
- plot(s.out3)
-@
-\end{center}
-
-
-\end{enumerate}
-
-\subsubsection{Model}
-Let $Y_i$ be the number of independent events that occur during a
-fixed time period. This variable can take any non-negative integer.
-
-\begin{itemize}
-\item The Poisson distribution has \emph{stochastic component}
-  \begin{equation*}
-    Y_i \; \sim \; \textrm{Poisson}(\lambda_i),
-  \end{equation*}
-  where $\lambda_i$ is the mean and variance parameter.
-  
-\item The \emph{systematic component} is 
-  \begin{equation*}
-    \lambda_i \; = \; \exp(x_i \beta),
-  \end{equation*}
-  where $x_i$ is the vector of explanatory variables, and $\beta$ is
-  the vector of coefficients.
-\end{itemize}
-
-\subsubsection{Variance}
-
-When replicate weights are not used, the variance of the
-coefficients is estimated as
-\[
-\hat{\boldsymbol{\Sigma}} \left[
- \sum_{i=1}^n
-\frac{(1-\pi_i)}{\pi_i^2}
-(\mathbf{X}_i(Y_i-\mu_i))^\prime(\mathbf{X}_i(Y_i-\mu_i)) + 2
-\sum_{i=1}^n \sum_{j=i+1}^n \frac{(\pi_{ij} - \pi_i\pi_j)}{\pi_i
-\pi_j \pi_{ij}}(\mathbf{X}_i(Y_i-\mu_i))^\prime
-(\mathbf{X}_j(Y_j-\mu_j)) \right] \hat{\boldsymbol{\Sigma}}
-\]
-where ${\pi_i}$ is the probability of case $i$ being sampled,
-$\mathbf{X}_i$ is a vector of the values of the explanatory
-variables for case $i$, $Y_i$ is value of the dependent variable for
-case $i$, $\hat{\mu}_i$ is the predicted value of the dependent
-variable for case $i$ based on the linear model estimates, and
-$\hat{\boldsymbol{\Sigma}}$ is the conventional variance-covariance
-matrix in a parametric glm. This statistic is derived from the
-method for estimating the variance of sums described in \cite{Bin83}
-and the Horvitz-Thompson estimator of the variance of a sum
-described in \cite{HorTho52}.
-
-When replicate weights are used, the model is re-estimated for each
-set of replicate weights, and the variance of each parameter is
-estimated by summing the squared deviations of the replicates from
-their mean.
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-  
-\item The expected value ({\tt qi\$ev}) is the mean of simulations
-  from the stochastic component, $$E(Y) = \lambda_i =  \exp(x_i
-  \beta),$$ given draws of $\beta$ from its sampling distribution.  
-  
-\item The predicted value ({\tt qi\$pr}) is a random draw from the
-  poisson distribution defined by mean $\lambda_i$.
-
-\item The first difference in the expected values ({\tt qi\$fd}) is given by:
-\begin{equation*}
-\textrm{FD} \; = \; E(Y | x_1) - E(Y \mid x)
-\end{equation*}
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "poisson.survey", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: a vector of the fitted values for the systemic
-     component $\lambda$.  
-   \item {\tt linear.predictors}: a vector of $x_{i}\beta$.  
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values given the
-     specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from the
-     distributions defined by $\lambda_i$.
-   \item {\tt qi\$fd}: the simulated first differences in the expected
-     values given the specified values of {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-When users estimate {\tt poisson.survey} models with replicate weights in {\tt Zelig}, an 
-object called {\tt .survey.prob.weights} is created in the global environment.  
-{\tt Zelig} will over-write any existing object with that name, and users 
-are therefore advised to re-name any object called {\tt .survey.prob.weights} before using {\tt poisson.survey} models in {\tt Zelig}.
-
-
-\subsection* {How to Cite}
-
-\input{cites/poisson.survey}
- \input{citeZelig}
- 
- \subsection* {See also}
- 
- Survey-weighted linear models and the sample data used in the
- examples above are a part of the {\tt survey} package by Thomas
- Lumley. Users may wish to refer to the help files for the three
- functions that Zelig draws upon when estimating survey-weighted
- models, namely, {\tt help(svyglm)}, {\tt help(svydesign)}, and {\tt
- help(svrepdesign)}.  The Gamma model is part of the stats package
- by \citet{VenRip02}. Advanced users may wish to refer to
- \texttt{help(glm)} and \texttt{help(family)}, as well as
- \cite{McCNel89}.
-  
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
-  after<-search()
-  torm<-setdiff(after,before)
-  for (pkg in torm)
-  detach(pos=match(pkg,search()))
-@
-
-\end{document}
diff --git a/inst/doc/poisson.survey.pdf b/inst/doc/poisson.survey.pdf
deleted file mode 100644
index 843da20..0000000
Binary files a/inst/doc/poisson.survey.pdf and /dev/null differ
diff --git a/inst/doc/probit.Rnw b/inst/doc/probit.Rnw
deleted file mode 100644
index 37b9c2e..0000000
--- a/inst/doc/probit.Rnw
+++ /dev/null
@@ -1,241 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=probit}
-\include{zinput}
-%\VignetteIndexEntry{Probit Regression for Dichotomous Dependent Variables}
-%\VignetteDepends{Zelig}
-%\VignetteKeyWords{model, probit,regression,dichotomous, binary}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-pkg <- search()
-if(!length(grep("package:Zelig",pkg)))
-library(Zelig)
-@
-
-\section{{\tt probit}: Probit Regression for Dichotomous Dependent Variables}\label{probit}
-
-Use probit regression to model binary dependent variables
-specified as a function of a set of explanatory variables.  For a
-Bayesian implementation of this model, see \Sref{probit.bayes}.  
-
-\subsubsection{Syntax}
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "probit", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out, x1 = NULL)
-\end{verbatim}
-
-\subsubsection{Additional Inputs} 
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for probit regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE} is
-selected, {\tt zelig()} computes robust standard errors via the {\tt
-sandwich} package (see \cite{Zeileis04}).  The default type of robust
-standard error is heteroskedastic and autocorrelation consistent (HAC),
-and assumes that observations are ordered by time index.
-
-In addition, {\tt robust} may be a list with the following options:  
-\begin{itemize}
-\item {\tt method}:  Choose from 
-\begin{itemize}
-\item {\tt "vcovHAC"}: (default if {\tt robust = TRUE}) HAC standard
-errors. 
-\item {\tt "kernHAC"}: HAC standard errors using the
-weights given in \cite{Andrews91}. 
-\item {\tt "weave"}: HAC standard errors using the
-weights given in \cite{LumHea99}.  
-\end{itemize}  
-\item {\tt order.by}: defaults to {\tt NULL} (the observations are
-chronologically ordered as in the original data).  Optionally, you may
-specify a vector of weights (either as {\tt order.by = z}, where {\tt
-z} exists outside the data frame; or as {\tt order.by = \~{}z}, where
-{\tt z} is a variable in the data frame).  The observations are
-chronologically ordered by the size of {\tt z}.
-\item {\tt \dots}:  additional options passed to the functions 
-specified in {\tt method}.   See the {\tt sandwich} library and
-\cite{Zeileis04} for more options.   
-\end{itemize}
-\end{itemize}
-
-\subsubsection{Examples}
-Attach the sample turnout dataset:
-<<Examples.data>>=
- data(turnout)
-@ 
-Estimate parameter values for the probit regression:
-<<Examples.zelig>>=
- z.out <- zelig(vote ~ race + educate,  model = "probit", data = turnout) 
-@ 
-<<Examples.summary>>=
- summary(z.out)
-@ 
-Set values for the explanatory variables to their default values.
-<<Examples.setx>>=
- x.out <- setx(z.out)
-@ 
-Simulate quantities of interest from the posterior distribution.
-<<Examples.sim>>=
-s.out <- sim(z.out, x = x.out)
-@ 
-<<Examples.summary.sim>>=
-summary(s.out)
-@ 
-
-\subsubsection{Model}
-Let $Y_i$ be the observed binary dependent variable for observation
-$i$ which takes the value of either 0 or 1.
-\begin{itemize}
-\item The \emph{stochastic component} is given by  
-\begin{equation*}
-Y_i \; \sim \; \textrm{Bernoulli}(\pi_i), 
-\end{equation*}
-where $\pi_i=\Pr(Y_i=1)$.
-
-\item The \emph{systematic component} is 
-\begin{equation*}
-  \pi_i \; = \; \Phi (x_i \beta)
-\end{equation*}
-where $\Phi(\mu)$ is the cumulative distribution function of the
-Normal distribution with mean 0 and unit variance.
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-
-\item The expected value ({\tt qi\$ev}) is a simulation of predicted
-  probability of success $$E(Y) = \pi_i = \Phi(x_i
-  \beta),$$ given a draw of $\beta$ from its sampling distribution.  
-
-\item The predicted value ({\tt qi\$pr}) is a draw from a Bernoulli
-  distribution with mean $\pi_i$.  
-  
-\item The first difference ({\tt qi\$fd}) in expected values is
-  defined as
-\begin{equation*}
-\textrm{FD} = \Pr(Y = 1 \mid x_1) - \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item The risk ratio ({\tt qi\$rr}) is defined as
-\begin{equation*}
-\textrm{RR} = \Pr(Y = 1 \mid x_1) / \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "probit", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: a vector of the in-sample fitted values.
-   \item {\tt linear.predictors}: a vector of $x_{i}\beta$.  
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt data}: the name of the input data frame.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values, or predicted
-     probabilities, for the specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from the
-     distributions defined by the predicted probabilities.  
-   \item {\tt qi\$fd}: the simulated first differences in the predicted
-     probabilities for the values specified in {\tt x} and {\tt x1}.
-   \item {\tt qi\$rr}: the simulated risk ratio for the predicted
-     probabilities simulated from {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-
-\subsection* {How to Cite} 
-
-\input{cites/probit}
-\input{citeZelig}
-
-\subsection* {See also}
-The probit model is part of the stats package by \citet{VenRip02}.
-Advanced users may wish to refer to \texttt{help(glm)} and
-\texttt{help(family)}, as well as \cite{McCNel89}. Robust standard
-errors are implemented via the sandwich package by \citet{Zeileis04}.
-Sample data are from \cite{KinTomWit00}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
diff --git a/inst/doc/probit.mixed.Rnw b/inst/doc/probit.mixed.Rnw
deleted file mode 100644
index 6885ba0..0000000
--- a/inst/doc/probit.mixed.Rnw
+++ /dev/null
@@ -1,185 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=probitmixed}
-\include{zinput}
-%\VignetteIndexEntry{Mixed effects probit regression}
-%\VignetteDepends{Zelig}
-%\VignetteKeyWords{mixed,probit regression}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt probit.mixed}: Mixed effects probit Regression}
-\label{mixed}
-
-Use generalized multi-level linear regression if you have covariates that are grouped according to one or more classification factors. The probit model is appropriate when the dependent variable is dichotomous.
-
-While generally called multi-level models in the social sciences, this class of models is often referred to as mixed-effects models in the statistics literature and as hierarchical models in a Bayesian setting. This general class of models consists of linear models that are expressed as a function of both \emph{fixed effects}, parameters corresponding to an entire population or certain repeatable levels of experimental factors, and \emph{random effects}, parameters corresponding to indiv [...]
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-z.out <- zelig(formula= y ~ x1 + x2 + tag(z1 + z2 | g),
-               data=mydata, model="probit.mixed")
-
-z.out <- zelig(formula= list(mu=y ~ xl + x2 + tag(z1, gamma | g),
-               gamma= ~ tag(w1 + w2 | g)), data=mydata, model="probit.mixed")
-\end{verbatim}
-
-\subsubsection{Inputs}
-
-\noindent {\tt zelig()} takes the following arguments for {\tt mixed}:
-\begin{itemize}
-\item {\tt formula:} a two-sided linear formula object describing the systematic component of the model, with the response on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1 + ... + zn | g)} with {\tt z1 + ... + zn} specifying the model for the random effects and {\tt g} the grouping structure. Random intercept terms are included with the notation {\tt ta [...]
-Alternatively, {\tt formula} may be a list where the first entry, {\tt mu}, is a two-sided linear formula object describing the systematic component of the model, with the repsonse on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1, gamma | g)} with {\tt z1} specifying the individual level model for the random effects, {\tt g} the grouping structure and { [...]
-\end{itemize}
-
-\subsubsection{Additional Inputs}
-
-In addition, {\tt zelig()} accepts the following additional arguments for model specification:
-
-\begin{itemize}
-\item {\tt data:} An optional data frame containing the variables named in {\tt formula}. By default, the variables are taken from the environment from which {\tt zelig()} is called.
-\item {\tt na.action:} A function that indicates what should happen when the data contain {\tt NAs}. The default action ({\tt na.fail}) causes {\tt zelig()} to print an error message and terminate if there are any incomplete observations.
-\end{itemize}
-Additionally, users may with to refer to {\tt lmer} in the package {\tt lme4} for more information, including control parameters for the estimation algorithm and their defaults.
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-\item Basic Example with First Differences \\
-\\
-Attach sample data: \\
-<<Examples.data>>=
-data(voteincome)
-@
-Estimate model:
-<<Examples.zelig>>=
-z.out1 <- zelig(vote ~ education + age + female + tag(1 | state), data=voteincome, model="probit.mixed")
-@
-
-\noindent Summarize regression coefficients and estimated variance of random effects:\\
-<<Examples.summary>>=
-summary(z.out1)
-@
-Set explanatory variables to their default values, with high (80th percentile) and low (20th percentile) values for education:\\
-<<Examples.setx>>=
-x.high <- setx(z.out1, education=quantile(voteincome$education, 0.8))
-x.low <- setx(z.out1, education=quantile(voteincome$education, 0.2))
-@
-Generate first differences for the effect of high versus low education on voting: \\
-<<Examples.sim>>=
-s.out1 <- sim(z.out1, x=x.high, x1=x.low)
-summary(s.out1)
-@
-
-\end{enumerate}
-
-\subsubsection{Mixed effects probit Regression Model}
-
-Let $Y_{ij}$ be the binary dependent variable, realized for observation $j$ in group $i$ as $y_{ij}$ which takes the value of either 0 or 1, for $i = 1, \ldots, M$, $j = 1, \ldots, n_i$.
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by a Bernoulli distribution with mean vector $\pi_{ij}$.
-\begin{equation*}
-Y_{ij} \sim \mathrm{Bernoulli}(y_{ij} | \pi_{ij}) = \pi_{ij}^{y_{ij}} (1 - \pi_{ij})^{1 - y_{ij}}
-\end{equation*}
-where
-\begin{equation*}
-\pi_{ij} = \mathrm{Pr}(Y_{ij} = 1)
-\end{equation*}
-\item The $q$-dimensional vector of \emph{random effects}, $b_i$, is restricted to be mean zero, and therefore is completely characterized by the variance covarance matrix $\Psi$, a $(q \times q)$ symmetric positive semi-definite matrix.
-\begin{equation*}
-b_i \sim Normal(0, \Psi)
-\end{equation*}
-\item The \emph{systematic component} is
-\begin{equation*}
-\pi_{ij} \equiv \Phi(X_{ij} \beta + Z_{ij} b_i)
-\end{equation*}
-where $\Phi(\mu)$ is the cumulative distribution function of the Normal distribution with mean 0 and unit variance, and \\
-where $X_{ij}$ is the $(n_i \times p \times M)$ array of known fixed effects explanatory variables, $\beta$ is the $p$-dimensional vector of fixed effects coefficients, $Z_{ij}$ is the $(n_i \times q \times M)$ array of known random effects explanatory variables and $b_i$ is the $q$-dimensional vector of random effects.
-\end{itemize}
-
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The predicted values ({\tt qi\$pr}) are draws from the Binomial distribution with mean equal to the simulated expected value, $\pi_{ij}$ for
-\begin{equation*}
-\pi_{ij} = \Phi(X_{ij} \beta + Z_{ij} b_i)
-\end{equation*}
-given $X_{ij}$ and $Z_{ij}$ and simulations of of $\beta$ and $b_i$ from their posterior distributions. The estimated variance covariance matrices are taken as correct and are themselves not simulated.
-
-\item The expected values ({\tt qi\$ev}) are simulations of the predicted probability of a success given draws of $\beta$ from its posterior:
-\begin{equation*}
-E(Y_{ij} | X_{ij}) = \pi_{ij} = \Phi(X_{ij} \beta).
-\end{equation*}
-
-\item The first difference ({\tt qi\$fd}) is given by the difference in predicted probabilities, conditional on $X_{ij}$ and $X_{ij}^\prime$, representing different values of the explanatory variables.
-\begin{equation*}
-FD(Y_{ij} | X_{ij}, X_{ij}^\prime) = Pr(Y_{ij} = 1 | X_{ij}) - Pr(Y_{ij} = 1 | X_{ij}^\prime)
-\end{equation*}
-
-\item The risk ratio ({\tt qi\$rr}) is defined as
-\begin{equation*}
-RR(Y_{ij} | X_{ij}, X_{ij}^{\prime}) = \frac{Pr(Y_{ij} = 1 | X_{ij})}{Pr(Y_{ij} = 1 | X_{ij}^{\prime})}
-\end{equation*}
-
-\item In conditional prediction models, the average predicted treatment effect ({\tt qi\$att.pr}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - \widehat{Y_{ij}(t_{ij} = 0)} \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $Y_{ij}(t_{ij} = 0)$, the counterfactual predicted value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\item In conditional prediction models, the average expected treatment effect ({\tt qi\$att.ev}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - E[Y_{ij}(t_{ij} = 0)] \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $E[Y_{ij}(t_{ij} = 0)]$, the counterfactual expected value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you may view. You may examine the available information in {\tt z.out} by using {\tt slotNames(z.out)}, see the fixed effect coefficients by using {\tt summary(z.out)@coefs}, and a default summary of information through {\tt summary(z.out)}. Other elements available through the {\tt \@} operator are listed below.
-\begin{itemize}
-\item From the {\tt zelig()} output stored in {\tt summary(z.out)}, you may extract:
-\begin{itemize}
-\item[--] {\tt fixef}: numeric vector containing the conditional estimates of the fixed effects.
-\item[--] {\tt ranef}: numeric vector containing the conditional modes of the random effects.
-\item[--] {\tt frame}: the model frame for the model.
-\end{itemize}
-\item From the {\tt sim()} output stored in {\tt s.out}, you may extract quantities of interest stored in a data frame:
-\begin{itemize}
-\item {\tt qi\$pr}: the simulated predicted values drawn from the distributions defined by the expected values.
-\item {\tt qi\$ev}: the simulated expected values for the specified values of x.
-\item {\tt qi\$fd}: the simulated first differences in the expected values for the values specified in x and x1.
-\item {\tt qi\$ate.pr}: the simulated average predicted treatment effect for the treated from conditional prediction models.
-\item {\tt qi\$ate.ev}: the simulated average expected treatment effect for the treated from conditional prediction models.
-\end{itemize}
-\end{itemize}
-
-
-
-\subsection* {How to Cite}
-
-\input{cites/probit.mixed}
-\input{citeZelig}
-
-\subsection* {See also}
-Mixed effects probit regression is part of {\tt lme4} package by Douglas M. Bates \citep{Bates07}. For a detailed discussion of mixed-effects models, please see \cite{JosBat00}
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after <- search()
- torm <- setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
-\end{document}
diff --git a/inst/doc/probit.mixed.pdf b/inst/doc/probit.mixed.pdf
deleted file mode 100644
index 58dba3e..0000000
Binary files a/inst/doc/probit.mixed.pdf and /dev/null differ
diff --git a/inst/doc/probit.pdf b/inst/doc/probit.pdf
index 9129131..a2bc6bd 100644
Binary files a/inst/doc/probit.pdf and b/inst/doc/probit.pdf differ
diff --git a/inst/doc/probit.survey.Rnw b/inst/doc/probit.survey.Rnw
deleted file mode 100644
index 7ec5ee0..0000000
--- a/inst/doc/probit.survey.Rnw
+++ /dev/null
@@ -1,521 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=probitsurvey}
-\include{zinput}
-%\VignetteIndexEntry{Survey-Weighted Probit Regression for Dichotomous Dependent Variables}
-%\VignetteDepends{Zelig, stats, survey}
-%\VignetteKeyWords{model,probit ,dichotomous, regression, survey}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography* 
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>= 
-library(Zelig)
-library(survey) 
-@
-\section{{\tt probit.survey}: Survey-Weighted Probit Regression for Dichotomous Dependent Variables}
-\label{probit.survey}
-
-The survey-weighted probit regression model is appropriate for 
-survey data obtained using complex sampling techniques, such as 
-stratified random or cluster sampling (e.g., not simple random 
-sampling).  Like the conventional probit regression models (see 
-\Sref{probit}), survey-weighted probit regression specifies a 
-dichotomous dependent variable as function of a set of explanatory 
-variables.  The survey-weighted probit model reports estimates of 
-model parameters identical to conventional probit estimates, but uses 
-information from the survey design to correct variance estimates.
-
-The {\tt probit.survey} model accommodates three common types of 
-complex survey data.  Each method listed here requires selecting 
-specific options which are detailed in the ``Additional Inputs'' 
-section below.  \begin{enumerate}
-
-\item \textbf{Survey weights}:  Survey data are often published along
-with weights for each observation.  For example, if a survey
-intentionally over-samples a particular type of case, weights can be
-used to correct for the over-representation of that type of case in
-the dataset. Survey weights come in two forms:
-\begin{enumerate}
-
-\item \textit{Probability} weights report the probability that each
-case is drawn from the population.  For each stratum or cluster, 
-this is computed as the number of observations in the sample drawn 
-from that group divided by the number of observations in the 
-population in the group.
-
-\item \textit{Sampling} weights are the inverse of the probability
-weights.   
-
-\end{enumerate}
-
-\item \textbf{Strata/cluster identification}:  A complex survey 
-dataset may include variables that identify the strata or cluster 
-from which observations are drawn.  For stratified random sampling 
-designs, observations may be nested in different strata.  There are 
-two ways to employ these identifiers:
-
-\begin{enumerate}
-
-\item Use \textit{finite population corrections} to specify the
-total number of cases in the stratum or cluster from which each
-observation was drawn.
-
-\item For stratified random sampling designs, use the raw strata ids
-to compute sampling weights from the data.
-
-\end{enumerate}
-
-\item \textbf{Replication weights}: To preserve the anonymity of
-survey participants, some surveys exclude strata and cluster ids 
-from the public data and instead release only pre-computed replicate 
-weights.
-
-\end{enumerate}
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "probit.survey", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-
-\subsubsection{Additional Inputs}
-
-In addition to the standard {\tt zelig} inputs (see
-\Sref{Zcommands}), survey-weighted probit models accept the following
-optional inputs:
-\begin{enumerate}
-
-\item Datasets that include survey weights:. 
-
-\begin{itemize}  
-
-\item {\tt probs}: An optional formula or numerical vector specifying each 
-case's probability weight, the probability that the case was 
-selected.  Probability weights need not (and, in most cases, will 
-not) sum to one.  Cases with lower probability weights are weighted 
-more heavily in the computation of model coefficients.
-
-\item {\tt weights}: An optional numerical vector specifying each 
-case's sample weight, the inverse of the probability that the case 
-was selected.  Sampling weights need not (and, in most cases, will 
-not) sum to one.  Cases with higher sampling weights are weighted 
-more heavily in the computation of model coefficients.
-
-\end{itemize}
-
-\item Datasets that include strata/cluster identifiers:
-
-\begin{itemize} 
-
-\item {\tt ids}: An optional formula or numerical vector identifying the 
-cluster from which each observation was drawn (ordered from largest level to smallest level).  
-For survey designs  that do not involve cluster sampling, {\tt ids} defaults to {\tt NULL}.
-
-\item {\tt fpc}: An optional numerical vector identifying each 
-case's frequency weight, the total number of units in the population 
-from which each observation was sampled. 
-
-\item {\tt strata}: An optional formula or vector identifying the 
-stratum from which each observation was sampled.  Entries may be 
-numerical, logical, or strings.  For survey designs that do not 
-involve cluster sampling, {\tt strata} defaults to {\tt NULL}. 
-
-\item {\tt nest}: An optional logical value specifying whether 
-primary sampling unites (PSUs) have non-unique ids across multiple 
-strata.  {\tt nest=TRUE} is appropriate when PSUs reuse the same 
-identifiers across strata.  Otherwise, {\tt nest} defaults to {\tt 
-FALSE}. 
-
-\item {\tt check.strata}: An optional input specifying whether to 
-check that clusters are nested in strata.  If {\tt check.strata} is 
-left at its default, {\tt !nest}, the check is not performed.  If 
-{\tt check.strata} is specified as {\tt TRUE}, the check is carried 
-out.  
-
-\end{itemize}
-
-\item Datasets that include replication weights:
-\begin{itemize}
-  \item {\tt repweights}: A formula or matrix specifying
-    replication weights, numerical vectors of weights used
-    in a process in which the sample is repeatedly re-weighted and parameters
-    are re-estimated in order to compute the variance of the model parameters.
-  \item {\tt type}: A string specifying the type of replication weights being used.
-    This input is required if replicate weights are specified.  The following types
-    of replication weights are recognized: {\tt"BRR"}, {\tt "Fay"},
-    {\tt "JK1"}, {\tt "JKn"}, {\tt "bootstrap"}, or {\tt "other"}.
-  \item {\tt weights}: An optional vector or formula specifying each case's sample weight,
-    the inverse of the probability that the case was selected.  If a survey includes both sampling 
-    weights and replicate weights separately for the same survey, both should be included in 
-    the model specification.  In these cases, sampling weights are used to correct potential biases 
-    in in the computation of coefficients and replication weights are used to compute the variance 
-    of coefficient estimates.  
-  \item {\tt combined.weights}: An optional logical value that 
-    should be specified as {\tt TRUE} if the replicate weights include the sampling weights.  Otherwise, 
-    {\tt combined.weights} defaults to {\tt FALSE}.  
-  \item {\tt rho}:  An optional numerical value specifying a shrinkage factor
-    for replicate weights of type {\tt "Fay"}.
-  \item {\tt bootstrap.average}: An optional numerical input specifying
-    the number of iterations over which replicate weights of type {\tt "bootstrap"} were averaged. 
-    This input should be left as {\tt NULL} for {\tt "bootstrap"} weights that were
-    not were created by averaging.
-\item {\tt scale}:  When replicate weights are included,
-    the variance is computed as the sum of squared deviations of the replicates from their mean.
-    {\tt scale} is an optional overall multiplier for the standard deviations.
-\item {\tt rscale}: Like {\tt scale}, {\tt rscale} specifies an 
-optional vector of replicate-specific multipliers for the squared 
-deviations used in variance computation. 
-
-\item {\tt fpc}: For models in which {\tt "JK1"}, {\tt "JKn"}, or 
-{\tt "other"} replicates are specified, {\tt fpc} is an optional 
-numerical vector (with one entry for each replicate) designating the 
-replicates' finite population corrections.   
-
-\item {\tt fpctype}: When a finite population correction is included 
-as an {\tt fpc} input, {\tt fpctype} is a required input specifying 
-whether the input to {\tt fpc} is a sampling fraction ({\tt 
-fpctype="fraction"}) or a direct correction ({\tt 
-fpctype="correction"}).  
-
-\item {\tt return.replicates}: An optional logical value    
-specifying whether the replicates should be returned as a component 
-of the output.  {\tt return.replicates} defaults to {\tt FALSE}.  
-
-\end{itemize}
-
-\end{enumerate}
-
-\subsubsection{Examples}
-
-\begin{enumerate} 
-
-\item A dataset that includes survey weights:
-
-Attach the sample data: 
-<<Existing.data>>= 
-data(api, package="survey") 
-@ 
-
-Suppose that a dataset included a dichotomous indicator 
-for whether each public school attends classes year round ({\tt yr.rnd}), a measure of 
-the percentage of students at each school who receive subsidized 
-meals ({\tt meals}), a measure of the percentage of students at 
-each school who are new to to the school ({\tt mobility}), and sampling 
-weights computed by the survey house ({\tt pw}).  Estimate a model
-that regresses the year-round schooling indicator on the {\tt meals} and {\tt mobility}
-variables:
-<<Existing.zelig>>= 
-z.out1 <- zelig(yr.rnd ~ meals + mobility, model = "probit.survey", weights=~pw, data = apistrat)
-@ 
-Summarize regression coefficients:
-<<Existing.summary>>=
- summary(z.out1)
-@ 
-Set explanatory variables to their default (mean/mode) values, and
-set a high (80th percentile) and low (20th percentile) value for
-``meals'': 
-<<Existing.setx>>= 
-x.low <- setx(z.out1, meals=quantile(apistrat$meals, 0.2)) 
-x.high <- setx(z.out1, meals=quantile(apistrat$meals, 0.8)) 
-@ 
-Generate first differences for the
-effect of high versus low concentrations of children receiving
-subsidized meals on the probability of holding school year-round: 
-<<Existing.sim>>=
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-@ 
-<<Existing.summary.sim>>=
- summary(s.out1)
-@ 
-Generate a visual summary of the quantities of interest:
-\begin{center}
-<<label=ExistingPlot,fig=true,echo=true>>=
- plot(s.out1)
-@
-\end{center}
-
-\item  A dataset that includes strata/cluster identifiers:
-
-Suppose that the survey house that provided the dataset used in the
-previous example excluded sampling weights but made other details
-about the survey design available.  A user can still estimate a model
-without sampling weights that instead uses inputs that identify the
-stratum and/or cluster to which each observation belongs and the
-size of the finite population from which each observation was drawn.
-
-Continuing the example above, suppose the survey house drew at
-random a fixed number of elementary schools, a fixed number of
-middle schools, and a fixed number of high schools. If the variable
-{\tt stype} is a vector of characters ({\tt "E"} for elementary
-schools, {\tt "M"} for middle schools, and {\tt "H"} for high schools)
-that identifies the type of school each case
-represents and {\tt fpc} is a numerical vector that identifies for
-each case the total number of schools of the same type in the
-population, then the user could estimate the following model:
-
-<<Complex.zelig>>= 
-z.out2 <- zelig(yr.rnd ~ meals + mobility, model = "probit.survey", strata=~stype, fpc=~fpc, data = apistrat)
-@
-Summarize the regression output:
-<<Complex.output>>= 
-summary(z.out2) 
-@ 
-The coefficient estimates for this example are
-identical to the point estimates in the first example, when
-pre-existing sampling weights were used.  When sampling weights are
-omitted, they are estimated automatically for {\tt "probit.survey"}
-models based on the user-defined description of sampling designs. 
-
-Moreover, because the user provided information about the survey
-design, the standard error estimates are lower in this example than
-in the previous example, in which the user omitted variables pertaining
-to the details of the complex survey design.
-
-\item A dataset that includes replication weights:
-
-Consider a dataset that includes information for a sample of hospitals
-about the number of out-of-hospital cardiac arrests that each
-hospital treats and the number of patients who arrive alive
-at each hospital: 
-<<Replicate.data>>= 
-data(scd, package="survey") 
-@ 
-Survey houses sometimes supply
-replicate weights (in lieu of details about the survey design).  For the sake
-of illustrating how replicate weights can be used as inputs in {\tt
-probit.survey} models, create a set of balanced repeated replicate
-(BRR) weights and an (artificial) dependent variable to simulate an indicator 
-for whether each hospital was sued:
-<<Replicate.rw>>= 
-BRRrep<-2*cbind(c(1,0,1,0,1,0),c(1,0,0,1,0,1), c(0,1,1,0,0,1),c(0,1,0,1,1,0)) 
-scd$sued <- as.vector(c(0,0,0,1,1,1))
-@ 
-Estimate a model that regresses the indicator for hospitals that were
-sued on the number of patients who arrive alive in
-each hospital and the number of cardiac arrests that each hospital treats, using
-the BRR replicate weights in {\tt BRRrep} to compute standard errors.
-<<Replicate.zelig>>= 
-z.out3 <- zelig(formula=sued ~ arrests + alive , model = "probit.survey", 
-  repweights=BRRrep, type="BRR", data=scd)
-@
-Summarize the regression coefficients: 
-<<Replicate.summary>>=
- summary(z.out3)
-@ 
-Set {\tt alive} at its mean and set {\tt arrests} at its 20th and 80th quantiles:
-<<Replicate.setx>>= 
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8)) 
-@ 
-Generate first
-differences for the effect of high versus low cardiac arrests
-on the probability that a hospital will be sued:
-<<Replicate.sim>>= 
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-@ 
-<<Replicate.summary.sim>>=
- summary(s.out3)
-@ 
-Generate a visual summary of quantities of interest:
-\begin{center}
-<<label=ReplicatePlot,fig=true,echo=true>>=
- plot(s.out3)
-@
-\end{center}
-
-
-\end{enumerate}
-
-\subsubsection{Model}
-Let $Y_i$ be the observed binary dependent variable for observation
-$i$ which takes the value of either 0 or 1.
-\begin{itemize}
-\item The \emph{stochastic component} is given by  
-\begin{equation*}
-Y_i \; \sim \; \textrm{Bernoulli}(\pi_i), 
-\end{equation*}
-where $\pi_i=\Pr(Y_i=1)$.
-
-\item The \emph{systematic component} is 
-\begin{equation*}
-  \pi_i \; = \; \Phi (x_i \beta)
-\end{equation*}
-where $\Phi(\mu)$ is the cumulative distribution function of the
-Normal distribution with mean 0 and unit variance.
-\end{itemize}
-
-\subsubsection{Variance}
-
-When replicate weights are not used, the variance of the
-coefficients is estimated as
-\[
-\hat{\boldsymbol{\Sigma}} \left[
- \sum_{i=1}^n
-\frac{(1-\pi_i)}{\pi_i^2}
-(\mathbf{X}_i(Y_i-\mu_i))^\prime(\mathbf{X}_i(Y_i-\mu_i)) + 2
-\sum_{i=1}^n \sum_{j=i+1}^n \frac{(\pi_{ij} - \pi_i\pi_j)}{\pi_i
-\pi_j \pi_{ij}}(\mathbf{X}_i(Y_i-\mu_i))^\prime
-(\mathbf{X}_j(Y_j-\mu_j)) \right] \hat{\boldsymbol{\Sigma}}
-\]
-where ${\pi_i}$ is the probability of case $i$ being sampled,
-$\mathbf{X}_i$ is a vector of the values of the explanatory
-variables for case $i$, $Y_i$ is value of the dependent variable for
-case $i$, $\hat{\mu}_i$ is the predicted value of the dependent
-variable for case $i$ based on the linear model estimates, and
-$\hat{\boldsymbol{\Sigma}}$ is the conventional variance-covariance
-matrix in a parametric glm. This statistic is derived from the
-method for estimating the variance of sums described in \cite{Bin83}
-and the Horvitz-Thompson estimator of the variance of a sum
-described in \cite{HorTho52}.
-
-When replicate weights are used, the model is re-estimated for each
-set of replicate weights, and the variance of each parameter is
-estimated by summing the squared deviations of the replicates from
-their mean.
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-
-\item The expected value ({\tt qi\$ev}) is a simulation of predicted
-  probability of success $$E(Y) = \pi_i = \Phi(x_i
-  \beta),$$ given a draw of $\beta$ from its sampling distribution.  
-
-\item The predicted value ({\tt qi\$pr}) is a draw from a Bernoulli
-  distribution with mean $\pi_i$.  
-  
-\item The first difference ({\tt qi\$fd}) in expected values is
-  defined as
-\begin{equation*}
-\textrm{FD} = \Pr(Y = 1 \mid x_1) - \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item The risk ratio ({\tt qi\$rr}) is defined as
-\begin{equation*}
-\textrm{RR} = \Pr(Y = 1 \mid x_1) / \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\ x,
-  model = "probit.survey", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: the vector of fitted values for the
-     systemic component, $\pi_i$.
-   \item {\tt linear.predictors}: the vector of $x_{i}\beta$
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt data}: the name of the input data frame.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected probabilities for the
-     specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values for the
-     specified values of {\tt x}.
-   \item {\tt qi\$fd}: the simulated first difference in the expected
-     probabilities for the values specified in {\tt x} and {\tt x1}.
-   \item {\tt qi\$rr}: the simulated risk ratio for the expected
-     probabilities simulated from {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-When users estimate {\tt probit.survey} models with replicate weights in {\tt Zelig}, an 
-object called {\tt .survey.prob.weights} is created in the global environment.  
-{\tt Zelig} will over-write any existing object with that name, and users 
-are therefore advised to re-name any object called {\tt .survey.prob.weights} before using {\tt probit.survey} models in {\tt Zelig}.
-
-\subsection* {How to Cite}
-
-\input{cites/probit.survey}
- \input{citeZelig}
- 
- \subsection* {See also}
- 
- Survey-weighted linear models and the sample data used in the
- examples above are a part of the {\tt survey} package by Thomas
- Lumley. Users may wish to refer to the help files for the three
- functions that Zelig draws upon when estimating survey-weighted
- models, namely, {\tt help(svyglm)}, {\tt help(svydesign)}, and {\tt
- help(svrepdesign)}.  The Gamma model is part of the stats package
- by \citet{VenRip02}. Advanced users may wish to refer to
- \texttt{help(glm)} and \texttt{help(family)}, as well as
- \cite{McCNel89}.
-  
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-
-<<afterpkgs, echo=FALSE>>=
-  after<-search()
-  torm<-setdiff(after,before)
-  for (pkg in torm)
-  detach(pos=match(pkg,search()))
-@
-
-\end{document}
diff --git a/inst/doc/probit.survey.pdf b/inst/doc/probit.survey.pdf
deleted file mode 100644
index f54e5b0..0000000
Binary files a/inst/doc/probit.survey.pdf and /dev/null differ
diff --git a/inst/doc/twosls.pdf b/inst/doc/twosls.pdf
new file mode 100644
index 0000000..69ae436
Binary files /dev/null and b/inst/doc/twosls.pdf differ
diff --git a/inst/doc/weibull.Rnw b/inst/doc/weibull.Rnw
deleted file mode 100644
index d25bded..0000000
--- a/inst/doc/weibull.Rnw
+++ /dev/null
@@ -1,292 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=weibull}
-\include{zinput}
-%\VignetteIndexEntry{Weibull Regression for Duration Dependent Variables}
-%\VignetteDepends{Zelig, survival}
-%\VignetteKeyWords{model, weibull,regression,bounded, duration}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt weibull}: Weibull Regression for Duration
-Dependent Variables}\label{weibull}
-
-Choose the Weibull regression model if the values in your dependent
-variable are duration observations.  The Weibull model relaxes the
-exponential model's (see \Sref{exp}) assumption of constant hazard,
-and allows the hazard rate to increase or decrease monotonically with
-respect to elapsed time.
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Surv(Y, C) ~ X1 + X2, model = "weibull", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-Weibull models require that the dependent variable be in the form {\tt
-  Surv(Y, C)}, where {\tt Y} and {\tt C} are vectors of length $n$.
-For each observation $i$ in 1, \dots, $n$, the value $y_i$ is the
-duration (lifetime, for example), and the associated $c_i$ is a binary
-variable such that $c_i = 1$ if the duration is not censored ({\it
-  e.g.}, the subject dies during the study) or $c_i = 0$ if the
-duration is censored ({\it e.g.}, the subject is still alive at the
-end of the study).  If $c_i$ is omitted, all Y are assumed to be
-completed; that is, time defaults to 1 for all observations.
-
-\subsubsection{Input Values} 
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for weibull regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE}, {\tt
-zelig()} computes robust standard errors based on sandwich estimators
-(see \cite{Huber81} and \cite{White80}) based on the options in {\tt
-cluster}.
-\item {\tt cluster}:  if {\tt robust = TRUE}, you may select a
-variable to define groups of correlated observations.  Let {\tt x3} be
-a variable that consists of either discrete numeric values, character
-strings, or factors that define strata.  Then
-\begin{verbatim}
-> z.out <- zelig(y ~ x1 + x2, robust = TRUE, cluster = "x3", 
-                 model = "exp", data = mydata)
-\end{verbatim}
-means that the observations can be correlated within the strata defined by
-the variable {\tt x3}, and that robust standard errors should be
-calculated according to those clusters.  If {\tt robust = TRUE} but
-{\tt cluster} is not specified, {\tt zelig()} assumes that each
-observation falls into its own cluster.  
-\end{itemize}  
-
-\subsubsection{Example}
-
-Attach the sample data: 
-<<Example.data>>=
- data(coalition)
-@ 
-Estimate the model: 
-<<Example.zelig>>=
- z.out <- zelig(Surv(duration, ciep12) ~ fract + numst2, model = "weibull",
-                 data = coalition)
-@ 
-View the regression output:  
-<<Example.summary>>=
- summary(z.out)
-@ 
-Set the baseline values (with the ruling coalition in the minority)
-and the alternative values (with the ruling coalition in the majority)
-for X:
-<<Example.setx>>=
- x.low <- setx(z.out, numst2 = 0)
- x.high <- setx(z.out, numst2 = 1)
-@ 
-Simulate expected values ({\tt qi\$ev}) and first differences ({\tt qi\$fd}):
-<<Example.sim>>=
- s.out <- sim(z.out, x = x.low, x1 = x.high)
-@
-<<Example.summary.sim>>= 
- summary(s.out)
-@  
-\begin{center}
-<<label=ExamplePlot,fig=true,echo=true>>= 
-plot(s.out)
-@ 
-\end{center}
-
-\subsubsection{Model}
-Let $Y_i^*$ be the survival time for observation $i$. This variable
-might be censored for some observations at a fixed time $y_c$ such
-that the fully observed dependent variable, $Y_i$, is defined as
-\begin{equation*}
-  Y_i = \left\{ \begin{array}{ll}
-      Y_i^* & \textrm{if }Y_i^* \leq y_c \\
-      y_c & \textrm{if }Y_i^* > y_c 
-    \end{array} \right.
-\end{equation*}
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by the distribution
-  of the partially observed variable $Y^*$.  We assume $Y_i^*$ follows
-  the Weibull distribution whose density function is given by
-  \begin{equation*}
-    f(y_i^*\mid \lambda_i, \alpha) = \frac{\alpha}{\lambda_i^\alpha}
-    y_i^{* \alpha-1} \exp \left\{ -\left( \frac{y_i^*}{\lambda_i}
-\right)^{\alpha} \right\}
-  \end{equation*}
-  for $y_i^* \ge 0$, the scale parameter $\lambda_i > 0$, and the shape
-  parameter $\alpha > 0$. The mean of this distribution is $\lambda_i
-  \Gamma(1 + 1 / \alpha)$. When $\alpha = 1$, the distribution reduces to
-  the exponential distribution (see Section~\ref{exp}).  (Note that
-the output from {\tt zelig()} parameterizes {\tt scale}$ = 1 / \alpha$.)
-
-In addition, survival models like the Weibull have three additional
-properties.  The hazard function $h(t)$ measures the probability of
-not surviving past time $t$ given survival up to $t$. In general,
-the hazard function is equal to $f(t)/S(t)$ where the survival
-function $S(t) = 1 - \int_{0}^t f(s) ds$ represents the fraction still
-surviving at time $t$.  The cumulative hazard function $H(t)$
-describes the probability of dying before time $t$.  In general,
-$H(t)= \int_{0}^{t} h(s) ds = -\log S(t)$.  In the case of the Weibull
-model,
-\begin{eqnarray*}
-h(t) &=& \frac{\alpha}{\lambda_i^{\alpha}} t^{\alpha - 1}  \\
-S(t) &=&  \exp \left\{ -\left( \frac{t}{\lambda_i} \right)^{\alpha} \right\} \\
-H(t) &=& \left( \frac{t}{\lambda_i} \right)^{\alpha}
-\end{eqnarray*}
-For the Weibull model, the hazard function $h(t)$ can increase or
-decrease monotonically over time.  
-
-\item The \emph{systematic component} $\lambda_i$ is modeled as
-  \begin{equation*}
-    \lambda_i = \exp(x_i \beta),
-  \end{equation*}
-  where $x_i$ is the vector of explanatory variables, and $\beta$ is
-  the vector of coefficients.
-  
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) for the Weibull model are
-  simulations of the expected duration:
-\begin{equation*}
-E(Y) = \lambda_i \, \Gamma (1 + \alpha^{-1}),
-\end{equation*}
-given draws of $\beta$ and $\alpha$ from their sampling
-distributions. 
-
-\item The predicted value ({\tt qi\$pr}) is drawn from a distribution
-  defined by $(\lambda_i, \alpha)$.  
-
-\item The first difference ({\tt qi\$fd}) in expected value is
-\begin{equation*}
-\textrm{FD} = E(Y \mid x_1) - E(Y \mid x). 
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups. When
-    $Y_i(t_i=1)$ is censored rather than observed, we replace it with
-    a simulation from the model given available knowledge of the
-    censoring process.  Variation in the simulations are due to
-    uncertainty in simulating $E[Y_i(t_i=0)]$, the counterfactual
-    expected value of $Y_i$ for observations in the treatment group,
-    under the assumption that everything stays the same except that
-    the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  When
-    $Y_i(t_i=1)$ is censored rather than observed, we replace it with
-    a simulation from the model given available knowledge of the
-    censoring process.  Variation in the simulations are due to
-    uncertainty in simulating $\widehat{Y_i(t_i=0)}$, the
-    counterfactual predicted value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "weibull", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt icoef}: parameter estimates for the intercept and ``scale''
-     parameter $1 / \alpha$.  
-   \item {\tt var}: the variance-covariance matrix.  
-   \item {\tt loglik}: a vector containing the log-likelihood for the
-     model and intercept only (respectively).
-   \item {\tt linear.predictors}: a vector of the
-     $x_{i}\beta$.
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-\item Most of this may be conveniently summarized using {\tt
-   summary(z.out)}.  From {\tt summary(z.out)}, you may
- additionally extract: 
-   \begin{itemize}
-   \item {\tt table}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values for the specified
-     values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from a
-     distribution defined by $(\lambda_i, \alpha)$.  
-   \item {\tt qi\$fd}: the simulated first differences between the
-     simulated expected values for {\tt x} and {\tt x1}.  
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection* {How to Cite} 
-
-\input{cites/weibull}
-\input{citeZelig}
-
-\subsection* {See also}
-The Weibull model is part of the survival library by Terry Therneau,
-ported to R by Thomas Lumley.  Advanced users may wish to refer to
-\texttt{help(survfit)} in the survival library, and \cite{VenRip02}.
-Sample data are from \cite{KinAltBur90}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
-
-
-
-
-
-
-
-
diff --git a/inst/doc/weibull.pdf b/inst/doc/weibull.pdf
deleted file mode 100644
index f8a7805..0000000
Binary files a/inst/doc/weibull.pdf and /dev/null differ
diff --git a/inst/po/en/LC_MESSAGES/R-Zelig.mo b/inst/po/en/LC_MESSAGES/R-Zelig.mo
new file mode 100644
index 0000000..e69de29
diff --git a/inst/templates/DESCRIPTION b/inst/templates/DESCRIPTION
new file mode 100644
index 0000000..a1dd7a6
--- /dev/null
+++ b/inst/templates/DESCRIPTION
@@ -0,0 +1,11 @@
+Package:
+Version: .1
+Date:
+Title: A Zelig Model
+Author:
+Maintainer:
+Depends:
+Description: A Zelig Model
+License: GPL (>=2)
+URL:
+Packaged: 
diff --git a/inst/templates/PACKAGE.R b/inst/templates/PACKAGE.R
new file mode 100644
index 0000000..6570107
--- /dev/null
+++ b/inst/templates/PACKAGE.R
@@ -0,0 +1,20 @@
+#' \\package\\
+#' 
+#' \tabular{ll}{
+#'   Package: \tab \\package\\\cr
+#'   Version: \tab 0.1\cr
+#'   Date: \tab 2011-04-25\cr
+#'   Depends: \\depends\\
+#'   License: \tab GPL version 2 or newer\cr
+#' }
+#'
+#' Edit this description
+#'
+#' @name \\package\\-package
+#' @aliases \\package\\-package \\package\\
+#' @docType package
+#' @importFrom Zelig describe param qi
+#' @author \\author\\
+#' @keywords package
+NULL
+
diff --git a/inst/templates/ZELIG.README b/inst/templates/ZELIG.README
new file mode 100644
index 0000000..e69de29
diff --git a/inst/templates/describe.R b/inst/templates/describe.R
new file mode 100644
index 0000000..80db33c
--- /dev/null
+++ b/inst/templates/describe.R
@@ -0,0 +1,10 @@
+#' Describe the \\model\\ Zelig Model
+#' @param ... ignored parameters
+#' @return a list specifying author, title, etc. information
+#' @export
+describe.\\model\\ <- function(...) {
+  list(
+       authors = "",
+       text = ""
+       )
+}
diff --git a/inst/templates/param.R b/inst/templates/param.R
new file mode 100644
index 0000000..691dc4c
--- /dev/null
+++ b/inst/templates/param.R
@@ -0,0 +1,13 @@
+#' Extract Samples from a Distribution in Order to Pass Them to the \code{qi} Function
+#' (this is primarily a helper function for the \\model\\ model)
+#' @param obj a zelig object
+#' @param num an integer specifying the number of simulations to compute
+#' @param ... additional parameters
+#' @return a list specifying link, link-inverse, random samples, and ancillary parameters
+#' @export
+param.\\model\\ <- function(obj, num=1000, ...) {
+  list(
+       coef = NULL,
+       linkinv = NULL
+       )
+}
diff --git a/inst/templates/qi.R b/inst/templates/qi.R
new file mode 100644
index 0000000..e23b68d
--- /dev/null
+++ b/inst/templates/qi.R
@@ -0,0 +1,16 @@
+#' Compute Quantities of Interest for the Zelig Model \\model\\
+#' @param obj a zelig object
+#' @param x a setx object
+#' @param x1 an optional setx object
+#' @param y ...
+#' @param num an integer specifying the number of simulations to compute
+#' @param param a parameters object
+#' @return a list of key-value pairs specifying pairing titles of quantities of interest
+#'         with their simulations
+#' @export
+qi.\\model\\ <- function(obj, x=NULL, x1=NULL, y=NULL, num=1000, param=NULL) {
+
+  list(
+       "Expected Value: E(Y|X)" = NA
+       )
+}
diff --git a/inst/templates/zelig2.R b/inst/templates/zelig2.R
new file mode 100644
index 0000000..f341dc0
--- /dev/null
+++ b/inst/templates/zelig2.R
@@ -0,0 +1,14 @@
+#' Interface between the Zelig Model \\model\\ and 
+#' the Pre-existing Model-fitting Method
+#' @param formula a formula
+#' @param ... additonal parameters
+#' @param data a data.frame 
+#' @return a list specifying '.function'
+#' @export
+zelig2\\model\\ <- function (formula, ..., data) {
+  list(
+       .function = "",
+       formula = formula,
+       data = data
+       )
+}
diff --git a/inst/zideal/zvcServer.R b/inst/zideal/zvcServer.R
deleted file mode 100644
index ab2fd8c..0000000
--- a/inst/zideal/zvcServer.R
+++ /dev/null
@@ -1,1292 +0,0 @@
-
-### DESCRIPTION: Reads all functions describe.#.R that are part of
-###              Zelig package and gets all direct dependencies of Zelig
-###              packages names and url's to installed them
-###              Taken from Ferdi Alhimadi's code
-###
-zeligDescribeModels <- function(PKG="Zelig", baseOrRecPkgs=NULL)
-  {
-    descList <- ls(envir=asNamespace(PKG),pattern="^describe")
-    model <- NULL
-    if(length(baseOrRecPkgs) <= 0){
-      baseOrRecPkgs <- rownames(installed.packages(priority = "base"))
-      baseOrRecPkgs <- getRidVersion( baseOrRecPkgs)
-    }
-###  model <- pkgDepends(PKG)$Depends
-    url <- NULL
-    for (desc in descList){
-      ## call the function which return a list
-      tmp <- do.call(desc,list(),env=asNamespace("Zelig"))
-      ## get the package name from the list (maybe CRAN too)
-      nm <- names(model)
-      tmpmodel <- tmp$package$name
-      tmpmodel <- trim.blanks(tmpmodel)
-      tmpmodel <- getRidVersion(tmpmodel, TRUE)
-      model <- c(model, tmpmodel)
-      names(model) <- c(nm,tmpmodel)
-      nmu <- names(url)
-      tmpurl <- tmp$package$CRAN
-      
-      if(length(tmpurl) > 0 )
-        tmpurl <- trim.blanks(tmpurl)
-      else
-        tmpurl <- NA
-      url <- c(url,tmpurl)
-      names(url) <- c(nmu,tmpmodel)
-    
-    }
-    model <- unique.default(model)
-  
-    savenm <- F
-    allPkgs <- names(model)
-    if(length(allPkgs) <= 0){
-      savenm <- T
-      allPkgs <- model
-    }
-    allPkgs <- matrix(as.vector(allPkgs), ncol=1)
-    if(length(baseOrRecPkgs))
-      allPkgs  <- allPkgs[!(allPkgs %in% baseOrRecPkgs)]
-    model <- allPkgs
-    if(savenm)
-      names(model) <- allPkgs
- 
-    ind <- sapply(model, match, names(url))
-    if(length(ind)){
-      ind <- suppressWarnings(na.omit(unlist(ind)))
-      url <- url[ind]
-    }
-  
-    lst <- c(list(model=model), list(url=url))
-    return(lst)
-  }
-
-### DESCRIPTION: Gets all the depends packages recursevely starting
-###              with the input package repList as argument of the function call.  
-###              It obtains all dependends packages and their version numbers
-###              from the description docs of the packages.   
-###
-### OUTPUT: A list of two elements, the packages edges and their versions. 
-###         if res<- makeLinks("Zelig"), then names(res) = c(edges, vers)
-###         res$edges is a list whose elements are packages names,i.e. nodes,
-###         and whose values are a vector of packages names that the list node
-###         depends upon; res$vers is also a list that contains the nodes of 
-###         res$edges but the values of the list nodes are the versions of the
-###         dependencies.
-###
-### INPUT: repList the mane of a package or vector with packages names;
-###        it can also be an url, in that case gets all vailable packages.
-###        keep.builtin where to stop the recursion, if false it will
-###        stop for the packages that are installed in the machine
-###        at priority="base";  if keep.builtin is string or null 
-###        it stops according to the specify priority,
-###        keep.builtin = "high","NA", NULL,"recommended"; 
-###        where "high"= c("base", "recommended");  
-###        if keep.builtin =TRUE it continues recursively until returns nothing.
-###        baseOrRecPkgs if not null it has a vector of packages and then
-###        overwrites keep.builtin and stops the recursion for packages in the vector.  
-###        norepeat means that, for given node, packages that are listed in depends
-###        and also in either imports or suggests are shown only once for depends.   
-###       
-###
-### USE res <- makeLinks(pkg)
-###
-### USES grep.exact, readDescription
-###
-### AUTHOR Elena Villalon
-###        evillalon at iq.harvard.edu
-###
-###        Last modified: 12/19/2006
-###
-
-makeLinks <- function (repList, keep.builtin = FALSE,baseOrRecPkgs=NULL,norepeat=FALSE)
-{
-  iter <- 100 ### not to get into an infinite loop
-
-  fromurl <- FALSE
-  if (is.character(repList) && length(grep("http", repList)) >0)
-    pkgMatList <- lapply(repList, function(x) {
-      available.packages(contrib.url(x))
-      fromurl <- TRUE
-    })
-
-  if(length(baseOrRecPkgs) <= 0 ){
-    if(!is.logical(keep.builtin))
-      baseOrRecPkgs <- rownames(installed.packages(priority = keep.builtin))
-    else if (!keep.builtin) 
-      baseOrRecPkgs <- rownames(installed.packages(priority = "base"))
-    baseOrRecPkgs <- getRidVersion( baseOrRecPkgs)
-  }
-###
-  keep.builtin <- ifelse(length(baseOrRecPkgs) >0, FALSE, TRUE)
-  
-  if(is.character(repList) && fromurl){
-    
-    allPkgs <- unlist(sapply(pkgMatList, function(x) rownames(x)))
-  }else if (length(repList) > 0){
-    
-    allPkgs <- matrix(as.vector(repList), ncol=1)
-  }
-  if (!length(allPkgs)) 
-      stop("no packages in specified repositories")
-  allPkgs   <- unique(allPkgs)
-  allPkgs   <- unlist(sapply(allPkgs, FUN="trim.blanks"))
-  allPkgs   <- sort(allPkgs)
-  basePkgs  <- allPkgs
-  basePkgs  <- getRidVersion(basePkgs)
- 
-  if (length(allPkgs) && !keep.builtin) 
-    allPkgs  <- allPkgs[!(basePkgs %in% baseOrRecPkgs)]
-  if(length(allPkgs) <= 0){
-    message(basePkgs, " is part of base packages. Set keep.builtin = T") 
-    return(list())
-  }
-  pkgEdges  <- list()
-  pkgVers   <- list()
-  pkgURL    <- list()
-  pkgLevel  <- list()
-  cnt=0;
-
-  Rvers <- unlist(packageDescription(basePkgs[1])$Built)
-  if(class(Rvers) == "try-error")
-     Rvers <- unlist(packageDescription(allPkgs[1])$Built)
-  
-  Rvers <- trim.blanks(Rvers)
-
-  RV1   <-  strsplit(sub("^R[[:space:]]*([-0-9 .]*)","\\1", Rvers), ";")[[1]][1]
-  Rvers <- trim.blanks(RV1)
-  RV2 <- strsplit(sub("^R[[:space:]]*([-[:digit:].]*)","\\1", Rvers), ";")[[1]][1]
-  RV2 <- trim.blanks(RV2)
-  RV  <- strsplit(sub("^R[[:space:]]*(.*)","\\1", Rvers), ";")[[1]][1]
-  Rvers <- trim.blanks(RV)
- if (!identical(RV1, Rvers)||!identical(RV2,Rvers ))
-   warning("Bad regx in makeLinks...check")
-###   0-9==[:digit:]
-  
- 
-  while(length(allPkgs)>0){
-     cnt <- cnt +1
-     
-###     message("counting ....")
-###     print(cnt)
-     iterPkgs <- NULL
-     for(n in 1:length(allPkgs)){
-       cat(n, ": ")
-       cat(allPkgs[n],"\n")
-       pack <- allPkgs[n]
-###      print(pack)
-      
-       lst <- suppressWarnings(readDescription(pack, norepeat))
-###    lst <- readDescription(pack, norepeat)
-        
-       if(class(lst) == "try-error")
-         next; 
-    
-       if(length(lst$totdepend) <= 0)
-         next; 
-       
-       pkgNames <- lst$totdepend
-       nm <- names(pkgNames)
-       pkgNames <- getRidVersion(pkgNames)
-       names(pkgNames) <- nm
-       url <- unlist(lst$url)
-       Rvers <- unlist(lst$Rvers)    
-       pkgVersions <- lst$totversion
-       
-       deps  <- unlist(pkgNames)
-       
-       
-       depsV <- unlist(pkgVersions)
-       ix <- match("R",names(depsV))
-       if(!is.na(ix))
-         depsV <- depsV[-ix]
- ###     message("Packages names...")
- ###     print(deps)
- ###     message("Packages versions...")
- ###     print(depsV)
-     
-      if (length(deps) && !keep.builtin) {
-        deps  <- deps[!(deps %in% baseOrRecPkgs)]
-        depsV <- depsV[!(names(depsV) %in% baseOrRecPkgs)]
-        url <- url[!(names(url) %in%  baseOrRecPkgs)]
-      }
-      if(length(deps) <= 0)
-        next;
-      nm <- NULL
-      if(length(pkgEdges) > 0)
-        nm <- names(pkgEdges)      
-       pkgEdges <- c(pkgEdges, list(deps))
-       names(pkgEdges) <- c(nm, pack)
-       pkgVers <- c(pkgVers, list(depsV))
-       names(pkgVers) <- c(nm, pack)
-       pkgURL <- c(pkgURL, list(url))
-       names(pkgURL) <- c(nm, pack)
-       pkgLevel <- c(pkgLevel, list(cnt))
-       names(pkgLevel) <- c(nm, pack)
-       iterPkgs <- c(iterPkgs, deps)  
-      
-     }
-    
-     nmedge   <- names(pkgEdges)
-     nmedge   <- sapply(nmedge, FUN="trim.blanks")
-     countfor <- NULL
-     nmedge   <- unique.default(nmedge)
-     iterPkgs <- unlist(sapply(iterPkgs, FUN="trim.blanks"))
-     iterPkgs <- unique.default(iterPkgs)
-     diffPkgs <- NULL
-     if(length(nmedge) > 0){
-       commonPkgs <- intersect(nmedge, iterPkgs)
-       diffPkgs   <- setdiff(iterPkgs, nmedge)
-       diffPkgs <- unique.default(diffPkgs)
-       nmedgemo <- sapply(nmedge,function(nm) paste("^",nm,"$", sep=""))
-       ### no need to call grep.exact 
-       countfor <- unlist(sapply(nmedgemo,grep,iterPkgs,extended=T))
-    
-     }
-    
-     if(length(countfor) > 0){
-       names(iterPkgs)[countfor] <-iterPkgs[countfor] ### packages found by grepping 
-       ix <- grep.exact(iterPkgs[countfor], nmedge)$index ### eliminate not exact matches
-       if(length(ix) > 0)
-         countfor <- countfor[-ix]
-     }
-     if(length(countfor) > 0)
-       iterPkgs <- iterPkgs[-countfor]
-     
-     
-     if(length(diffPkgs) > 0 && length(iterPkgs) > 0
-        && any(sort(iterPkgs) != sort(diffPkgs))){
-       warning("Bad counting of packages...check code")
-       best1 <- setdiff(iterPkgs, diffPkgs)
-       best2 <- setdiff(diffPkgs, iterPkgs)
-     }
-     if(length(iterPkgs) > 0)
-       allPkgs <- iterPkgs
-     else
-       break;
-     
-###     message("next iteration...")
-     names(allPkgs) <- NULL
-     allPkgs <- unlist(sapply(allPkgs, FUN="trim.blanks"))
-     allPkgs <- sort(allPkgs)
-###     message("What is left....")
-  
-     
-     if(cnt > iter) {
-       warning("Iterations limit to get the dependencies reach at..",iter) 
-       break;
-     }
-   }
-       
-   
-###    nodeDataDefaults(depG, attr = "size") <- as.numeric(NA)
-###        if (dosize) {
-###            sizes <- getDownloadSizesBatched(makePkgUrl(pMat))
-###            nodeData(depG, n = rownames(pMat), attr = "size") <- sizes
-###        }
-###  print(pkgEdges)
-###  print(pkgVers)
-### 
-  
- 
-  if((length(basePkgs) <= 1 && !identical("Zelig", basePkgs))){
-    rootlst  <- read.root(basePkgs)
-    pkgEdges <- c(rootlst[1], pkgEdges)
-    nm <- names(pkgEdges)
-    pkgVers <- c(rootlst[2], pkgVers)
-    names(pkgVers) <- nm
-    pkgURL <- c(rootlst[4], pkgURL)
-    names(pkgURL) <- nm
-    pkgLevel <- c(rootlst[5], pkgLevel)
-    names(pkgLevel) <- nm
-  }
-   pkgVers <- c(Rvers,pkgVers)
-  lst <- c(edges=list(pkgEdges),vers=list(pkgVers),url=list(pkgURL), level=list(pkgLevel))
-  return(lst)
-}
-
-### DESCRIPTION: helper func that gets information about packages in vector basePkgs
-###              as stored in packageDescription and returns list
-###              with elements of list that will be used in Zelig.
-###
-
-read.root <- function(basePkgs){
-  alledge <- NULL 
-  allvers <- NULL
-  allnode <- NULL
-  allurl  <- NULL
-  nm <- names(basePkgs) 
-  basePkgs <- getRidVersion(basePkgs)
-  names(basePkgs) <- nm
-  for(pack in basePkgs){
-    
-    lst <- try(packageDescription(pack), silent=TRUE)
-    vers <- lst$Version
-    Rbuilt <- lst$Built
-    Rbuilt <- trim.blanks(Rbuilt)
- ###trying different ways for regex; they should all give same result
-    RV1 <-  strsplit(sub("R[[:space:]]*([0-9 .]*)","\\1", Rbuilt), ";")[[1]][1]
-    RV1 <- trim.blanks(RV1)
-    RV2 <-  strsplit(sub("R[[:space:]]*([[:digit:].]*)","\\1", Rbuilt), ";")[[1]][1]
-    RV2 <- trim.blanks(RV2)
-    RV <-  strsplit(sub("R[[:space:]]*(.*)","\\1", Rbuilt), ";")[[1]][1]
-###   print(RV)
-    Rvers <- trim.blanks(RV)
-    if(!identical(RV1, Rvers) || !identical(RV2, Rvers))
-      warning("Bad regex in read.root...check")
-    
-    Rvers <- paste("R", Rvers)
-    Rvers <- trim.blanks(Rvers)
-    url <- lst$URL
-    
-    if(length(url) <= 0 )url <-  "CRAN"
-    if( is.na(url)) url <-  "CRAN"
-   
-    alledge <- c(alledge, pack)
-    names(alledge) <- rep("depends", length(alledge))
-    allvers <- c(allvers, vers)
-    
-    allnode <- c(allnode, Rvers)
- 
-    allurl  <- c(allurl,url)
-    
-  }
- 
-   names(allvers) <- alledge
-   names(allurl)  <- alledge
-  
-  allnode <- unique.default(allnode)
-  mxR <- sort(allnode)[1]
-  redge <- c(list(alledge), list(allvers), list(allnode), list(allurl), list("0"))
-  names(redge) <- c(mxR, "vers", "node", "url", "level")
-  
-
-  return(redge)
-    
-    
-  }
-### DESCRIPTION: helper function to makeLinks; it reads the package
-###              description and returns dependencies (i.e. depends, imports, suggests)
-###              of derived packages, the url's to download dependent packages and R version
-### INPUT: name of a package
-###
-
-readDescription <- function(pack,norep){
-### Get rid of leading a trailing white spaces
-
-  nm <- names(pack)
-  pack   <- suppressWarnings(trim.blanks(pack))
-  packno <- pack
-  if(length(grep("_", pack)) > 0){ 
-    packno <- sub("(.*)_([-0-9.]*)","\\1", packno)
-    packno <- suppressWarnings(trim.blanks(packno))
-    names(packno) <- nm
-  }
- 
-  lst0 <- try(packageDescription(packno), silent=TRUE)
- 
-  if(class(lst0) == "try-error")
-    lst0 <- try(packageDescription(pack), silent=TRUE)
-###      print(lst$Package) 
-###      print(lst$Version)
-  if(class(lst0) == "try-error")
-    return;
-                
-  depends  <- lst0$Depends
-  Rvers <- NA
-  dependsV <- NULL
-  if(length(depends) > 0){
-  
-    dependslst <- sapply(strsplit(depends,",")[[1]], FUN="trim.blanks")
-   
-    lst <- find.names.vers(dependslst)
-    
-    depends  <- unlist(lst$pkgnames)
-    dependsV <- unlist(lst$pkgnumbers)
-    Rvers <- try(dependsV["R"], silent=T)
-    ix <- match("R", depends)
-    ixg <- grep("^R", depends, extended=T)
-    if(length(ixg)> 1)
-      warning("check readDescription")
-   
-    if(!is.na(ix))
-      depends <- depends[-ix]
-    ix <- try(match("R", names(dependsV)), silent=T)
-   
-    if(!is.na(ix))
-      dependsV <- dependsV[-ix]
-      
-  }
-  
-    
-  rdep <-  trim.blanks(lst0$Built)
- 
-  
-  if(!is.logical(Rvers) && length(rdep) > 0 && rdep != ""){
-###   RV <-  strsplit(sub("R[[:space:]]*([0-9 .]*)","\\1", rdep), ";")[[1]][1]
-      RV <-  strsplit(sub("R[[:space:]]*(.*)","\\1", rdep), ";")[[1]][1]
-###   print(RV)
-    Rvers <- trim.blanks(RV)
-
-  }
-     
-###      print(depends)
-  suggests <- lst0$Suggests
-  suggestsV <- NULL
-  if(!is.null(suggests)){
-  
-    suggestslst <- unlist(sapply(strsplit(suggests,",")[[1]], FUN="trim.blanks"))
-    lst <-  find.names.vers(suggestslst)
-    suggests <- lst$pkgnames
-    suggestsV <- lst$pkgnumbers
-    ind <- na.omit(unlist(sapply(depends,match,suggests)))
-    ind1 <- na.omit(unlist(sapply(depends,match,names(suggestsV))))
-    ### package are shown under any category if repeated when norep=F
-   
-    
-    if(length(unlist(ind)) > 0 && !norep)
-      suggests <- suggests[-ind]
-    if(length(unlist(ind1)) > 0 && norep)
-      suggestsV <- suggests[-ind1]
-
-  }
-  
-  imports  <- lst0$Imports
-  importsV <- NULL
-  if(length(imports)>0){
-    
-    importslst <- unlist(sapply(strsplit(imports,",")[[1]], FUN="trim.blanks"))
-    lst <-  find.names.vers(importslst)
-    imports <- lst$pkgnames
-    importsV <- lst$pkgnumbers
-
-    ind <- na.omit(unlist(sapply(depends,match,imports)))
-    ind1 <- na.omit(unlist(sapply(depends,match,names(importsV))))
-   
-### package are shown under any category if repeated and norep =F
-    
-    if(length(unlist(ind1)) > 0 && !norep)
-      importsV <- imports[-ind1]
-    if(length(unlist(ind)) > 0 && !norep)
-      imports <- imports[-ind]
-      
-    ind <- na.omit(unlist(sapply(suggests,match,imports)))
-    ind1 <- na.omit(unlist(sapply(suggests,match,names(importsV))))
- 
-    if(length(unlist(ind1)) > 0 && !norep)
-      importsV <- imports[-ind1]
-    if(length(unlist(ind)) > 0 && !norep)
-      imports <- imports[-ind]
-    
-  }
-     
-  if(length(depends) <= 0 && length(suggests) <= 0 && length(imports) <= 0)
-    return(list());
-     
-  allpkg <- c(depends, imports, suggests)
-  allvers <- c(dependsV, importsV, suggestsV)
-
-  dep <- rep("depends",length(depends))
-  if(length(depends) > 0)
-    names(depends) <- dep
-  sgg <- rep("suggests",length(suggests))
-  if(length(suggests) > 0)
-    names(suggests) <- sgg
-  imp <- rep("imports",length(imports))
-  if(length(imports) > 0)
-    names(imports) <- imp
-        
-  totdepend <- c(depends,imports, suggests)
-  if(length(totdepend) <= 0){
-    lst <- c(Rvers=list(Rvers))
-    return(lst)
-  }
- 
-  url <- sapply(allpkg, function(pkg)
-                {
-                  nm <- packageDescription(pkg)$URL
-                  if(length(nm) > 0)
-                    nm <- trim.blanks(nm)
-              
-                  if(length(nm) <= 0) 
-                    nm <- "CRAN"
-               
-                  return(nm)
-                })
-  
-###  print(totdepend)                    
-  lst <- c(totdepend =list(totdepend), totversion = list(allvers),
-           url=list(url), Rvers=list(Rvers))
-  return(lst)
-}
-### DESCRIPTION: helper function to makeLinks & readDescription; it reads the package
-###              description and returns the name, version # of pkgs in dependlst 
-###              
-### INPUT: dependlst a list with the string return from one of: dependlst = 
-###        packageDescription(pkg)$Depends,  packageDescription(pkg)$Imports
-###         packageDescription(pkg)$Suggests
-###        
-###
-### OUTPUT: names and version numbers for the packages in dependlst
-###
- find.names.vers <- function(dependslst){
-   
-   depends <- suppressWarnings(unlist(sapply(dependslst, FUN="trim.blanks")))
-   tag <- "([[:alnum:]_ .]*)[[:space:]]*([-\\(<>= .[:alnum:][:space:]\\)]*)"
-   
-   ### from packages.dependencies (library tools):
-   ### pat <- "^([^\\([:space:]]+)[[:space:]]*\\(([^\\)]+)\\).*"
-  
-   pkgnames <- sapply(depends, function(nm) {
-     res <- sub(tag,"\\1", nm)})
-   names(pkgnames) <- NULL
-   pkgnames <- suppressWarnings(sapply(pkgnames, FUN="trim.blanks"))
-   names(pkgnames) <- pkgnames
-   ind <- 1:length(dependslst)
-   names(ind) <- pkgnames
-   pkgnumbers <- sapply(ind, function(n){
-     dep <- dependslst[n]
-     pk  <- pkgnames[n]
-     names(pk) <- NULL
-     if(suppressWarnings(identical(pk,"R"))){ 
-       names(dep) <- NULL
-       res <- sub(tag,"\\2", dep)
-       res <- suppressWarnings(trim.blanks(res))
-       pat <- "[-\\(<>= [:space:]]*([0-9.]*)[\\)]*"
-       res <-  sub(pat,"\\1",res)  
-       return(res)
-     }
-     desc <- NULL
-     names(pk) <- NULL
-     if(!suppressWarnings(identical(pk, "R"))){
-       
-       desc <- try(packageDescription(pk), silent=TRUE)
-     }
-     nm <- NULL
-     nm0 <- nm
-     if(class(desc)!="try-error" && length(desc) > 0) 
-       nm0 <- try(desc$Version, silent=TRUE)
-    
-     if(class(nm0)!="try-error" && length(nm0) > 0)
-       nm <- trim.blanks(nm0)
-        
-     if(length(nm) <= 0 || nm=="" ) nm <- NA
-     return(nm)})
-   
-   lst <- c(list(pkgnames=pkgnames), list(pkgnumbers=pkgnumbers))}
-                      
-    
-
-trim.blanks <- function(x) {
-### at the beginning of string"^" gets anny number (+) of white spaces
-  f <- x
-  if(length(x))
-   f <- na.omit(x)
-    
-   if(length(f) <= 0)
-     return(x)
-   if(length(f)>1)
-    print(f)
-    if(f=="" )
-      return(x)
-    x <- sub("^[[:space:]]*(.*)", "\\1",x) ###get \n\t
-    x <- sub('^ +', '', x) ###get white spaces
-     
-### at the ending of string"$" gets anny number (+) of white spaces
- 
-    x <- sub("(.*)[[:space:]]*$", "\\1", x)
-    x <- sub(' +$', '', x)
-     return(x)
-    }
-
-### 
-### DESCRIPTION: Creates list with packages that are in input list edge
-###              List elements are vectors with the package name,
-###              its version and the parent.  For every package in
-###              edge, finds out the version stored in vers and the parent, 
-###              which is the name of the list element the package belongs to
-###              
-### INPUT: The list output of function makeLinks
-###        lstoutput <- makeLinks("Zelig")
-###
-### OUTPUT: A list of packages, whose values is a vector of three values 
-###         First, the package name with any of the tags depends, suggests
-###         and imports as name of the vector component.  Second, the version
-###         of the package and third the parent it was derived from.
-###
-### USE: lst the output of makeLinks, then
-###      res <- reverseLinks(lstoutput <- makeLinks(pkg))
-###         
-### AUTHOR Elena Villalon
-###        evillalon at iq.harvard.edu
-###
-###        Last modified: 01/11/2007
-###
-
-reverseLinks <- function(lstoutput){
-  edge <- lstoutput$edges
-  if(length(edge) <= 0)
-    stop(paste("No leaves for ", lstoutput))
-  
-  vers <- lstoutput$vers
-  urls <- lstoutput$url
-  level <- sapply(lstoutput$level, as.numeric)
-  mxlevel <- max(unlist(level))
-  
-  cnt    <- length(edge)
-  nmpkg  <- names(edge)
-  outlst <- list()
-  inc <- 0
-  if(length(vers) > length(edge)) inc <- 1
-  
-  while(cnt > 0){
-    
-    pkgs <- edge[[cnt]]
-    nm   <- nmpkg[cnt]
-    pkgV <- vers[[cnt+inc]]
-    pkgU <- urls[[cnt]]
-    ind  <- as.list(1:length(pkgs))
-    depth <- level[[cnt]] 
- ###   print(depth)
-    
-    lst <- lapply(ind, function(n){
-      pkg <- pkgs[n]
-      ver <- pkgV[n]
-      url <- pkgU[n]
-   ###   print(url)
-      names(ver) <- NULL
-      names(url) <- NULL
-      ret <- c(pkg, vers=ver, parent=nm, URL=url, depth=depth)
-    })
-                  
-    nmlst <- names(outlst)
-    outlst <- c(outlst, lst)
-    names(outlst) <- c(nmlst, nm)
-    cnt = cnt -1;
-    
-  }
-  vec <- sapply(outlst, function(m) m[1])
-  names(outlst) <- vec
-  return(outlst)
-    
-}
-
-### 
-### DESCRIPTION: Creates matrix with packages that are input list lst
-###              List elements are vectors with the package name,
-###              its version and the parent.  Unlist the input lst
-###              and form a matrix, which every row the package name, version, parent  
-###              and type of dependency (depends, imports, suggests).  
-###              
-### INPUT: list lst that is the output of function reverseLinks
-###        Each element is a package and the value is the version and parent
-###        
-### OUTPUT: Dependency matrix similar to available.packages. 
-###         Rows for every package; columns the name, version, parent
-###         and type of dependency from parent description.
-###
-### USE: mat <- listomat( res <- reverseLinks(res <- makeLinks(package)))
-###
-###
-### AUTHOR Elena Villalon
-###        evillalon at iq.harvard.edu
-###
-###        Last modified: 01/11/2007
-###
-listomat <- function(lst){
-  
-  rw  <-  length(lst)
-###  print(lst)
-  if(length(lst) <=0)
-    stop(paste("No leaves for ", lst)) 
-  
-  if(length(lst) <= 1){
-    mat <- as.matrix(lst[[1]])
-    mat <- t(mat)
-  }else {  
-    cl  <- length(lst[[1]])
-    mat <- matrix(unlist(lst), ncol=cl, byrow=T)
-  }
- 
-  colnames(mat) <- c("Package", "Version", "Node", "URL", "Depth")
-  matdepth <- mat[,5]
-  maturl <- mat[, 4]
-  mat <- mat[,1:3 ]
-
-  nm  <- names(lst[[1]])
-  nm  <- c(nm, "Relation")
-  nm[1] <- "Package"
-  vec <- sapply(lst, function(m) names(m)[1])
-
-  if(length(dim(mat)) > 0)
-    mat <- cbind(mat, Relation=vec, URL=maturl, depth=matdepth)
-  else{
-    names(vec) <- NULL
-    mat <- c(mat,  Relation=vec, URL=maturl, depth=matdepth)
-  }
-   mat <- clean.sweep(mat)
- 
-  return(mat)
-}
-
-###DESCRIPTION: Utility function to remove trailing white spaces
-
- clean.sweep <- function(mat){
-    if(length(dim(mat))>0){
-      nm <- colnames(mat)
-      mat <- apply(mat,2, function(cl) {sapply(cl, FUN="trim.blanks")})
-      rownames(mat) <- mat[,"Package"]
-      colnames(mat) <- nm
-      return(mat)
-    }
-      nm <- names(mat) 
-      mat <- unlist(sapply(mat,FUN="trim.blanks"))
-      names(mat) <- nm
-    return(mat)
-  }
-  
-### DESCRIPTION: Utility function to check the results of applying grep
-###              grep may not get the exact full name but uses a loose
-###              regex to get all names that contains the input words
-###              For example grep("abc", c("abc", "pab", "dabcm", "clr, "abc""))
-###              will return 1, 3, 5. This function eliminates 3, counting characters
-###              NOTE: no need to apply this function if you use grep with extended=TRUE
-###              grep("^describe$", c("describe", "le describe", "desc", "describeport"), extended=T)
-###              gets only [1] 1
-###
-### NOTE: match will get the exact full string and will dismiss anything
-###       that is not an exact match: match("abc", "pabcm")=NA; however,
-###       it only finds the first occurance,
-###       i.e. match("abc",c("pabcqr", "abc", "lmn","vabc","abc"))= 2
-###       Same as grep("^abc$", c("pabcqr", "abc", "lmn","vabc","abc"), extended=T)
-###  
-### USES:        grep
-###              
-### INPUT:  matinst <- grep.exact(); outcome a vector of character
-###         we want to check for correctness.
-###         input is another vector of the strings
-###         that need to be found in the outcome.  
-###        
-### OUTPUT: It checks that outcome and input contain the same values 
-###         and eliminate those that are not exact match between outcome and input.
-###         Returns outcome with all not exact matches eliminated; and
-###         index ix of the strins that were eliminated from the
-###         original outcome.
-###         
-### AUTHOR Elena Villalon
-###        evillalon at iq.harvard.edu
-###
-###        Last modified: 01/24/2007
-### 
-  grep.exact <- function(outcome, input){
-   ind <- 1:length(outcome) 
-   names(ind) <- outcome
-     
-   ix <- sapply(ind, function(n){
-     ret <- NULL
-     if(length(outcome[n]) <= 0)
-       return(ret)
-     nm  <- trim.blanks(outcome[n])
-     pkg <- trim.blanks(input[n])
-    
-    if(nchar(nm) != nchar(pkg))
-      ret <- n
-    return(ret)})
-   ix <- unlist(ix)
-   if(length(ix) > 0)
-    outcome <- outcome[-ix]
-   lst <- list(list(outcome=outcome), list(index=ix))
-   return(lst)
- }
-### DESCRIPTION: Utility function to create a matrix with packages
-###              that are in one of the describe.#.R functions of Zelig
-###              Columns are package name, version, R or Zelig dependency,
-###              relation and URL 
-###
-### USES:        packageDescription 
-###              
-### INPUT:  vector with packages names
-###         cran is string with the URL of the pkg obtained from describe.# 
-###        
-### OUTPUT: Matrix of dependencies and versions. 
-###         
-### AUTHOR Elena Villalon
-###        evillalon at iq.harvard.edu
-###
-###        Last modified: 01/31/2007
-###
-pkgZeligDescribe <- function(pkg,cran){
-  
-    pkg <- unlist(suppressWarnings(sapply(pkg, FUN="trim.blanks")))
-    zelig <- pkg[length(pkg)]
-  #####R Version under which packages in pkg were built ##### 
-    pkgV <- sapply(pkg, function(p){
-      desc <- packageDescription(p)$Version})
-    
-    pkgV <- unlist(pkgV)
-    pkgUrl <- sapply(pkg, function(p){
-    
-      url <- packageDescription(p)$URL
-      url <- trim.blanks(url)
-      if(length(url) <= 0)
-        url <- "CRAN"
-      else if(is.na(url) || identical(url,"") )
-        url <- "CRAN"
-      return(url)
-    })
-    pkgUrl.old <- unlist(pkgUrl)
-    ln <- length(pkgUrl.old)
-    
-### as per Ferdi's last advised (02/15/2007) use information 
-### in the describe.#.R funcs for the URL of packages
-    zeligurl <- pkgUrl.old[ln]
-    pkgUrl <- c(cran, Zelig=zeligurl)
-   
-    pkgUrl[is.na(pkgUrl)] <- "CRAN"
-    
-    matadd <- matrix(NA, nrow=length(pkg), ncol=5)
-   
-    for(n in 1:length(pkg)){
-     
-      vec <- pkg[n]###1st col: package name
-      vec <- c(vec,pkgV[n])###2nd col: package version
-      if(n == nrow(matadd)){
-        rdep <- packageDescription(pkg[n])$Built 
-        rdep <- strsplit(sub("(R[-0-9 .]*)","\\1", rdep), ";")[[1]][1]
-        vec <- c(vec, rdep)
-      }else
-      vec <- c(vec,zelig)### 3rd col: Zelig
-      vec <- c(vec,"depends")### 4th col: relation
-      vec <- c(vec,pkgUrl[n]) ### 5th col: URL
-      if(length(vec) > 0)
-        matadd[n,] <- vec
-    
-    }
-    
-    rownames(matadd) <- pkg
-### end of R version######
-   
-    colnames(matadd) <- c("Package", "Version", "Node", "Relation", "URL")
-   
- 
-    return(matadd)
-}
-### DESCRIPTION first level dependencies (depends, imports, suggests)
-###             of Zelig as obtained from packageDescription, no recursion
-###
-### USES: readDescription
-###
-### INPUT: name of package, and boolean keep.builtin to discard or
-###        not to discard base packages that comes with R version
-###        baseOrRecPkgs vector of packages not included in output
-###        norepeat only includes packages once for given node
-###        (see makeLinks)
-###
-### OUTPUT: matrix with information of first level dependencies
-###
-ZeligfstLevel <- function(pkg0, keep.builtin,baseOrRecPkgs, norepeat){
-
-  fstlevel <- readDescription(pkg0,norepeat)
-  pkgfst   <- unlist(fstlevel$totdepend)
-  fstnames <- names(pkgfst)
-  urlfst   <- unlist(fstlevel$url)
-  names(urlfst) <- fstnames
-  fstvers <- fstlevel$totversion
-  names(fstvers) <- pkgfst
-  matdev <- NULL
-  if(length(pkgfst) <= 0) return(matdev)
-  
-  if(length(baseOrRecPkgs) <= 0){
-    if(!is.logical(keep.builtin))
-      baseOrRecPkgs <- rownames(installed.packages(priority = keep.builtin))
-    else if(!keep.builtin) 
-      baseOrRecPkgs <- rownames(installed.packages(priority = "base"))
-    baseOrRecPkgs <- getRidVersion(baseOrRecPkgs)
-  }
-  keep.builtin <- ifelse(length(baseOrRecPkgs) >0, FALSE, TRUE)
-    
-  pkgfst  <- pkgfst[!(pkgfst %in% baseOrRecPkgs)]
-  if(length(pkgfst) <= 0) return(matdev)
-  fstnames <- names(pkgfst)
-  fstvers <- fstvers[!(names(fstvers) %in% baseOrRecPkgs)]
-  urlfst <- urlfst[!(names(urlfst) %in%  baseOrRecPkgs)]
-  
-  matdev <- matrix(NA, nrow=length(pkgfst), ncol=5)
-  colnames(matdev) <- c("Package", "Version", "Node", "Relation", "URL")
-  for(m in (1:length(pkgfst)))
-    matdev[m, ] <-  c(pkgfst[m],fstvers[m], pkg0, fstnames[m],urlfst[m] )
-    
-  rownames(matdev) <- pkgfst
-  
-  return(matdev)              
-}
-### DESCRIPTION Helper function. If the package have attached the version number
-###             it removes them.Example, "YourCast_2.9-8" becomes "YourCast"
-###
-getRidVersion <- function(zmat, oneonly=FALSE){
- nm <- NULL
- 
- if(oneonly && length(grep("_", zmat)) <= 0)
-   return(zmat)
- else if(oneonly){
-   nm <- sub("(.*)_([-0-9.]*)","\\1", zmat)
-   nm <- trim.blanks(nm)
-   return(nm)
- }
-
-   if(length(dim(zmat)) <= 0 || dim(zmat)[1] <=1){
-   pkginst <- zmat["Package"]
-  
- }else{
-   pkginst <- zmat[,"Package"]
- }
- 
- pkginst <- sapply(pkginst, function(nm){
-   if(length(grep("_", nm)) <= 0)
-     return(nm)
-   nm <- sub("(.*)_([-0-9.]*)","\\1", nm)
-   nm <- trim.blanks(nm)
- })
-
- pkginst <- unlist(pkginst)
- if(length(dim(zmat)) <= 0|| dim(zmat)[1] <=1){
-   zmat["Package"] <- pkginst
-  
- }else{
-
-   zmat[,"Package"] <- pkginst
-   rownames(zmat) <-  zmat[,"Package"]
-  
-  
- }
- return(zmat)
-}
-
-
-
-###   
-### 
-### DESCRIPTION: Returns/Creates matrix of details corresponding to packages
-###              Zelig depends on directly or indirectly and are installed
-###              in the server where Zelig runs.
-###              Level of dependency is depends, imports, suggests.
-###              Every row describes  a dependent package with the name,
-###              the version, the parent package and the relation to the parent.
-###
-### USES:        makeLinks,reverseLinks,listomat,zeligDescribeModels,ZeligfstLevel
-###              
-### USE:         mat <- create.zelig.all.packages(pkg)
-###              Even when this function is specifically written to return
-###              the matrix of Zelig dependencies, it may be used for
-###              any other packages.  
-###
-### INPUT:       the package name pkg, boolean zdescribe (default true); 
-###              if it is Zelig and zdescribe=T, then calls zeligDescribeModels
-###              that returns a list of packages;
-###              keep.builtin where to stop the recursion.  If false it stops
-###              for packages that are installed in the machine at priority="base",
-###              if true it will continue recursively to all dependencies levels.
-###              If keepbuiltin is a character string then it will set priority
-###              of installed.packages to the string value of keep.builtin.  
-###              norepeat boolean indicating if from a given node the descendants
-###              are repeated if they appear simultaneously in, depends, suggests, imports.
-###              showurl, boolean to display the urls as in packageDescription or not.    
-###
-### OUTPUT: Dependency matrix similar to available.packages. 
-###         Rows for each package that Zelig depends;
-###         columns the name, version, parent
-###         and type of relation (dependency) from parent description. 
-###         
-### AUTHOR Elena Villalon
-###        evillalon at iq.harvard.edu
-###
-###        Last modified: 01/12/2007
-###
-create.zelig.all.packages <- function(pkg="Zelig", zdescribe=TRUE, keep.builtin = FALSE,
-                                      norepeat=FALSE, showurl=FALSE)
-{
- ### pkg <- trim.blanks(pkg)
-  pkg0 <- pkg
-  baseOrRecPkgs <- NULL
-  matzelig <- NULL
-  
-  
-  if(length(baseOrRecPkgs) <= 0 ){
-    if(!is.logical(keep.builtin))
-      baseOrRecPkgs <- rownames(installed.packages(priority = keep.builtin))
-    else if (!keep.builtin) 
-      baseOrRecPkgs <- rownames(installed.packages(priority = "base"))
-    baseOrRecPkgs <- getRidVersion( baseOrRecPkgs)
-  }
-  keep.builtin <- ifelse(length(baseOrRecPkgs) >0, FALSE, TRUE)
-  
-###specific to Zelig: include all models in functions describe.R
-  cran <- NULL
-  if(identical(pkg, "Zelig") && zdescribe){
-    zpack <- zeligDescribeModels(PKG=pkg, baseOrRecPkgs)
-    pkg <- c(zpack$model, pkg)
-    cran <- c(zpack$url, cran)
-  }
-###specific to Zelig:include first level dependencies of Zelig
-  
-  if(identical(pkg0, "Zelig")){
-    pkgz <- NULL
-    matzelig <- ZeligfstLevel(pkg0,keep.builtin, baseOrRecPkgs,norepeat)
- 
-    if(length(matzelig) > 0)
-      pkgz <- rownames(matzelig)
-    matadd <- pkgZeligDescribe(pkg,cran)
-    lnz <- dim(matzelig)
-    lnz <- ifelse(length(lnz) > 0,lnz[1],1)
-    if(!length(matzelig)) lnz <- 0
-  
-    lnf <- dim(matadd)
-    lnf <- ifelse(length(lnf) > 0,lnf[1],1)
-    if(!length(matadd)) lnf <- 0
-  
-    matzelig <- rbind(matzelig, matadd)
-    clnm <- colnames(matzelig)
-    vlevz <- NULL
-    vlevf <- NULL
-    if(lnz) vlevz <- rep("1", lnz)
-    if(lnf) vlevf <- rep("1z",lnf) 
-    depth <- c(vlevz, vlevf)
-   
-    matzelig <- cbind(matzelig, depth)
- 
-    ind <- match("Zelig", matzelig[,1])
-    if(!is.na(ind)) matzelig[ind, "depth"] <- "0"
-    pkg <- pkg[-length(pkg)]
-    pkg <- c(pkg, pkgz)
-  }
-
- 
-### valid for Zelig or any other package
-  
-  res <- makeLinks(pkg,keep.builtin, baseOrRecPkgs,norepeat)
-
-
-  if(length(res$edges) <= 0)
-    return(matzelig)
-  edge <- res$edges
-  vers <- res$vers
-  
-   ### edge is a list with the direct dependencies of Zelig 
-   ### and any of the packages derived from Zelig dependencies
-   ### edge corresponds to nodes and the edges in graph theory
-   ### The list names are the nodes or packages names.
-   ### The list elements are the direct dependencies of the nodes  
-   ### vers is the same list as edge but instead of
-   ### of the package names it has their versions
-   ### or NA if not provided in the description of each package
-   ### The first list element of vers is the R version, the rest
-   ### of the elements map one to one with those in edge.  
-   ### URL are the websites to get the packages as in package Description
-   ### depth the level of graphs (or dependencies) with root starting at 0
-
-   lst <- reverseLinks(res)
-
-###   print(lst)
-   ### it is a list with every element a package and
-   ### the value is the name, version, parent, and relation to parent
-   if(length(unlist(lst)) <= 0)
-     return(matzelig)
-   
-   mat <- listomat(lst)
-   add <- F
-
-   if(length(dim(mat)) <= 0){ ### only one row
-     nmadd <- mat[1]
-     add <- T
-   }
-   ### converts the list return with reverseLinks into a matrix
-   ### colnames(mmm) =  
-   ### "Package"  "Version"  "Node"   "Relation", "URL", "depth"
-  pkg <- sapply(pkg,FUN="trim.blanks")
-
-  
-  if(identical(pkg0, "Zelig")){
-    if(length(dim(mat)) > 0)
-      mat[,"depth"] <- as.numeric(mat[,"depth"]) + 1
-    else
-      mat["depth"] <- as.numeric(mat["depth"]) + 1
-  }
- 
-  mat <- rbind(mat, matzelig)
-  
-  if(add)
-    rownames(mat)[1] <- nmadd
-  ix <- match("URL", colnames(mat))
-  
-  if(!showurl && !identical(pkg0,"Zelig")) 
-    mat <- mat[,-ix]
-  
-  if(!showurl &&identical(pkg0,"Zelig") ){
-    ixz <- grep("^1z$", mat[, "depth"], extended=T)
-
-    if (length(ixz) >0){
-      nr <- nrow(mat[-ixz, ])
-      mat[-ixz, "URL"] <-  rep("CRAN", nr)
-   } else
-      mat <- mat[, -ix]
-               
-  }
-  if(!identical(pkg0,"Zelig")){
-    vec <- addzerodepth(pkg0,colnames(mat))
-    rnm <- rownames(mat)
-    mat <- rbind(mat, vec)
-    nm <- mat[,"Package"]
-    rownames(mat) <- nm
-  }
- 
-    
-   return(mat)
-}
-
-###DESCRIPTION : If no Zelig add a bottom row with the root pkg information
- addzerodepth <- function(pkg,cols){
-    pkg <- trim.blanks(pkg)
-    pkg0 <- pkg
-    if(length(grep("_", pkg)) > 0){
-      pkg <- sub("(.*)_([-0-9.]*)","\\1", pkg0)
-      pkg <- trim.blanks(pkg)
-    }
-    lst <- try(packageDescription(pkg), silent=T)
-    if(class(lst) =="try-error")
-      lst <-  try(packageDescription(pkg0), silent=T)
-    if(class(lst) =="try-error")
-      return(list())
-    rdep <- lst$Built 
-    rdep <- strsplit(sub("(R[-0-9 .]*)","\\1", rdep), ";")[[1]][1]
-    vec <- c(pkg,lst$Version, rdep, "depends")
-    if(length(cols) > 5)
-      vec <- c(vec, "CRAN","0")
-    else
-      vec <- c(vec, "0")
-    names(vec) <- cols
-    return(vec)
-}
-### DESCRIPTION Apply the function create.zelig.all.packages
-###             to all packages in the system as obtained from 
-###             mat <-installed.packages().
-###             Gets pkgs names as mat[,1]
-###
-### USES installed.packages,create.zelig.all.packages
-###
-### OUTPUT a list with matrices, each matrix has the dependencies
-###        of the packages in the system. Eliminates erros. 
-###
-### 02/14/2007
-###
-testall <- function(prior=NULL){
-  localinst <- installed.packages(prior)
-  str <-  " "
-  res <- lapply(localinst[,1], function(nm) {
-  res0 <- NULL
-  res <- try(create.zelig.all.packages(nm))
-  if(class(res)!="try-error")
-    return(res)
-  else{
-    message("Error pkg ", nm)
-    str <- paste(str, nm)
-    
-    return(res0)
-  }})
-  
-  return(invisible(res))
-}
-### DESCRIPTION:Takes the list output of testall and finds the dimensions
-###             of all the matrix elements in the list,e.g. rows and columns
-
-dimtestall <- function(lst=NULL){
-  if (length(lst) <= 0)
-    lst <- testall()
-  ret <- lapply(lst, dim)
-}
-ind.null <- function(d=NULL){
-  if(length(d) <= 0)
-    d <- dimtestall(outp <- testall())
-  ix <- sapply(d,is.null)
-  return(ix)
-}
-
-### DESCRIPTION: Same as testall but the list element output for pkgs
-###              with errors messages is the name of the package
-###              instead of NULL for that element.
-###              If erroronly=T, then it calls
-###              testerror(lst <- testinst(erroronly=T))
-###
-testinst <- function(prior=NULL, erroronly=F){
-  localinst <- installed.packages(prior)
-  res <- lapply(localinst[,1], function(nm) {
-  res0 <- nm
-  print(nm)
-  res <- try(create.zelig.all.packages(nm))
-  if(class(res)!="try-error")
-    return(res)
-  else{
-    message("Error pkg ", nm)
-    return(res0)
-  }})
-}
-### DESCRIPTION: Uses the list output of testinst and eliminates
-###              packages that do not produce errors' messages
-###              It returns the list of packages with errors.
-###              The errors may be due to packages in
-###              baseOrRecPkgs <- rownames(installed.packages(priority = "base"))
-###
-testerror <- function(lst=NULL){
-  if(length(lst) <= 0)
-    lst <- testinst()
-  error <- sapply(lst, function(mat) {
-    ret <- NULL
-    if(length(mat) == 1)
-      return(mat)
-    else
-      return(ret)})
-  error <- unlist(error)
-  return(error)
-  
-}
-### Eliminates null values from the list=lst
-### lst <- testall()
-getNull <- function(lst=NULL){
-  if(!length(lst))
-    lst <- testall()
-  
-  ixnull <- unlist(sapply(lst,is.null))
-  if(length(ixnull)){
-    print(names(lst)[ixnull])
-    lst <- lst[!ixnull]
-  }
-  return(lst)
-  
-}
-checkspaces <- function(allpkg){  
-  res <- lapply(allpkg, function(mat) {
-    rw <- rownames(mat)
-    spc <- sapply(rw, function(nm){ grep("[[:space:]]", nm, extended=T)})
-    spc <- unlist(spc)
-  })
-return(res)
-}
-### err <-  testerror(outp <- testinst())
-###
-###       "base"        "boot"       "class"     "cluster"    "datasets" 
-###    "foreign"    "graphics"   "grDevices"        "grid" "httpRequest" 
-### "KernSmooth"     "lattice"        "MASS"     "methods"        "mgcv" 
-###       "nlme"        "nnet"       "rpart"     "spatial"     "splines" 
-###      "stats"      "stats4"    "survival"       "tcltk"       "tools" 
-###      "utils"
-###
-### Total : 26 packages, with 25 of them in  baseOrRecPkgs.  
-### Note: All errors pkgs are in baseOrRecPkgs <- rownames(installed.packages(priority = "base"))
-### except for "httpRequest", with fields
-### packageDescription("httpRequest")$Depends, Imports, Suggests = NULL
-### > dim(zmatrecm)= 21  6
-### > dim(zmat)= 18  6
-### > dim(zmatbase)= 18  6
-### > dim(zmatnull)= 50  6
-### > dim(zmathigh)= 9 6
-### > dim(zmatNA) = 38  6
-
-
diff --git a/man/GetObject.Rd b/man/GetObject.Rd
new file mode 100644
index 0000000..79b8337
--- /dev/null
+++ b/man/GetObject.Rd
@@ -0,0 +1,19 @@
+\name{GetObject}
+\alias{GetObject}
+\title{Extract the fitted model object from the Zelig object}
+\usage{
+  GetObject(obj)
+}
+\arguments{
+  \item{obj}{an object of type `zelig'}
+}
+\value{
+  the fitted model object
+}
+\description{
+  Extract the fitted model object from the Zelig object
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/GetSlot.Rd b/man/GetSlot.Rd
new file mode 100644
index 0000000..f08ade6
--- /dev/null
+++ b/man/GetSlot.Rd
@@ -0,0 +1,26 @@
+\name{GetSlot}
+\alias{GetSlot}
+\title{Generic method for extracting variables from both
+S3 and S4 fitted model object}
+\usage{
+  GetSlot(obj, key, ...)
+}
+\arguments{
+  \item{obj}{an object of type `zelig'}
+
+  \item{key}{a character-string specifying the name of the
+  variable to extract}
+
+  \item{...}{typically ignored parameters}
+}
+\value{
+  the value of that extracted object or NULL
+}
+\description{
+  Generic method for extracting variables from both S3 and
+  S4 fitted model object
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/GetSlot.zelig.Rd b/man/GetSlot.zelig.Rd
new file mode 100644
index 0000000..e3bfa71
--- /dev/null
+++ b/man/GetSlot.zelig.Rd
@@ -0,0 +1,30 @@
+\name{GetSlot.zelig}
+\alias{GetSlot.zelig}
+\title{Return a Value from a \code{zelig} Fitted Model}
+\usage{
+  \method{GetSlot}{zelig}(obj, key, ...)
+}
+\arguments{
+  \item{obj}{a \code{zelig} object}
+
+  \item{key}{a character-string specifying the which value
+  to extract from the fitted model object}
+
+  \item{...}{subsequent values to extract from the fitted
+  model object}
+}
+\value{
+  values of the specified keys
+}
+\description{
+  Returns a value from the result of a model fitting
+  function
+}
+\note{
+  This function is primarily used by Zelig developers
+  within \code{qi} functions
+}
+\author{
+  Matt Owen \emph{mowen at iq.harvard.edu}
+}
+
diff --git a/man/MCMChook.Rd b/man/MCMChook.Rd
new file mode 100644
index 0000000..e1fffb6
--- /dev/null
+++ b/man/MCMChook.Rd
@@ -0,0 +1,35 @@
+\name{MCMChook}
+\alias{MCMChook}
+\title{Hook to Clean-up MCMC Objects}
+\usage{
+  MCMChook(obj, model.call, zelig.call, seed = NULL, ..., data = NULL)
+}
+\arguments{
+  \item{obj}{the fitted model object (in this case a
+  \code{mcmc} object.}
+
+  \item{model.call}{the call made to the external model}
+
+  \item{zelig.call}{the actual call to zelig itself}
+
+  \item{seed}{a seed for the MCMC algorithm}
+
+  \item{...}{ignored parameters}
+
+  \item{data}{the data.frame being used to fit the statistical model}
+}
+\value{
+  an object useable by Zelig
+}
+\description{
+  This method gives valid methods to the resulting MCMC
+  object so that it can be used with Zelig.
+}
+\note{
+  This function is used internally by the ZeligBayesian
+  package.
+}
+\author{
+  Olivia Lau, Kosuke Imai, Gary King and Matt Owen
+}
+
diff --git a/man/Max.Rd b/man/Max.Rd
new file mode 100644
index 0000000..11aa02e
--- /dev/null
+++ b/man/Max.Rd
@@ -0,0 +1,21 @@
+\name{Max}
+\alias{Max}
+\title{Compute the Maximum Value of a Vector}
+\usage{
+  Max(x, na.rm = NULL)
+}
+\arguments{
+  \item{x}{a numeric or ordered vector}
+
+  \item{na.rm}{ignored}
+}
+\value{
+  the maximum value of the vector
+}
+\description{
+  Compute the Maximum Value of a Vector
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/McmcHookFactor.Rd b/man/McmcHookFactor.Rd
new file mode 100644
index 0000000..dd7e5e6
--- /dev/null
+++ b/man/McmcHookFactor.Rd
@@ -0,0 +1,34 @@
+\name{McmcHookFactor}
+\alias{McmcHookFactor}
+\title{Hook to Clean-up MCMC Factor Object}
+\usage{
+  McmcHookFactor(obj, model.call, zelig.call, seed = NULL,
+    ...)
+}
+\arguments{
+  \item{obj}{the fitted model object (in this case a
+  \code{mcmc} object.}
+
+  \item{model.call}{the call made to the external model}
+
+  \item{zelig.call}{the actual call to zelig itself}
+
+  \item{seed}{a seed for the MCMC algorithm}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  an object useable by Zelig
+}
+\description{
+  This method gives valid methods to the resulting MCMC
+  object so that it can be used with Zelig.
+}
+\note{
+  This function is used internally by the ZeligBayesian
+  package.
+}
+\author{
+  Olivia Lau, Kosuke Imai, Gary King and Matt Owen
+}
+
diff --git a/man/Median.Rd b/man/Median.Rd
new file mode 100644
index 0000000..fa9a892
--- /dev/null
+++ b/man/Median.Rd
@@ -0,0 +1,21 @@
+\name{Median}
+\alias{Median}
+\title{Compute the Statistical Median of a Vector}
+\usage{
+  Median(x, na.rm = NULL)
+}
+\arguments{
+  \item{x}{a vector of numeric or ordered values}
+
+  \item{na.rm}{ignored}
+}
+\value{
+  the median of the vector
+}
+\description{
+  Compute the Statistical Median of a Vector
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/Min.Rd b/man/Min.Rd
new file mode 100644
index 0000000..cb76236
--- /dev/null
+++ b/man/Min.Rd
@@ -0,0 +1,21 @@
+\name{Min}
+\alias{Min}
+\title{Compute the Minumum Value of a Vector}
+\usage{
+  Min(x, na.rm = NULL)
+}
+\arguments{
+  \item{x}{a vector of numeric or ordered values}
+
+  \item{na.rm}{ignored}
+}
+\value{
+  the minimum value of the vector
+}
+\description{
+  Compute the Minumum Value of a Vector
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/Mode.Rd b/man/Mode.Rd
new file mode 100644
index 0000000..ec705b9
--- /dev/null
+++ b/man/Mode.Rd
@@ -0,0 +1,20 @@
+\name{Mode}
+\alias{Mode}
+\title{Compute the Statistical Mode of a Vector}
+\usage{
+  Mode(x)
+}
+\arguments{
+  \item{x}{a vector of numeric, factor, or ordered values}
+}
+\value{
+  the statistical mode of the vector. If two modes exist,
+  one is randomly selected (by design)
+}
+\description{
+  Compute the Statistical Mode of a Vector
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/TexCite.Rd b/man/TexCite.Rd
new file mode 100644
index 0000000..039fb7b
--- /dev/null
+++ b/man/TexCite.Rd
@@ -0,0 +1,17 @@
+\name{TexCite}
+\alias{TexCite}
+\title{Get a TeX-style Citation}
+\usage{
+  TexCite(model)
+}
+\arguments{
+  \item{model}{a character-string specifying the name of
+  the Zelig model of which to describe in TeX-style}
+}
+\value{
+  a string to be rendered as part of a LaTeX-style document
+}
+\description{
+  Get a TeX-style Citation
+}
+
diff --git a/man/Zelig-package.Rd b/man/Zelig-package.Rd
index 34fffaa..796708f 100644
--- a/man/Zelig-package.Rd
+++ b/man/Zelig-package.Rd
@@ -1,160 +1,38 @@
+\docType{package}
 \name{Zelig-package}
-\alias{Zelig-package}
 \alias{Zelig}
-\docType{package}
-\title{
-Zelig: Everyone's Statistical Software
-}
+\alias{Zelig-package}
+\title{Zelig Everyone's Statistical Software}
 \description{
-Zelig is an easy-to-use program that can estimate, and
-help interpret the results of, an enormous range of statistical
-models. It literally is ``everyone's statistical software''
-because Zelig's simple unified framework incorporates everyone
-else's (R) code. We also hope it will become ``everyone's
-statistical software'' for applications and teaching, and so
-have designed Zelig so that anyone can easily use it or add
-their programs to it.  Zelig also comes with infrastructure
-that facilitates the use of any existing method, such as by
-allowing multiply imputed data for any model, and mimicking the
-program Clarify (for Stata) that takes the raw output of
-existing statistical procedures and translates them into
-quantities of direct interest.
+  Zelig is an easy-to-use program that can estimate, and
+  help interpret the results of, an enormous range of
+  statistical models. It literally is ``everyone's
+  statistical software'' because Zelig's simple unified
+  framework incorporates everyone else's (R) code. We also
+  hope it will become ``everyone's statistical software''
+  for applications and teaching, and so have designed Zelig
+  so that anyone can easily use it or add their programs to
+  it.  Zelig also comes with infrastructure that
+  facilitates the use of any existing method, such as by
+  allowing multiply imputed data for any model, and
+  mimicking the program Clarify (for Stata) that takes the
+  raw output of existing statistical procedures and
+  translates them into quantities of direct interest.
 }
 \details{
-\tabular{ll}{
-Package: \tab Zelig\cr
-Version: \tab 3.5.4\cr
-Date: \tab 2007-06-12\cr
-Depends: \tab R (>= 2.4.0), MASS, boot\cr
-Suggests: \tab VGAM (>= 0.7-1), MCMCpack (>= 0.7-4), mvtnorm, survival,
-sandwich (>= 2.0-0), zoo (>= 1.2-1), coda, nnet, sna, gee,
-systemfit, mgcv\cr
-License: \tab GPL version 2 or newer\cr
-URL: \tab http://gking.harvard.edu/zelig\cr
-}
-
-Index:
-\preformatted{
-approval                U.S. Presidential Approval Data
-coalition               Coalition Dissolution in Parliamentary
-                        Democracies
-current.packages        Find all packages in a dependency chain
-dims                    Return Dimensions of Vectors, Arrays, and Data
-                        Frames
-eidat                   Simulation Data for Ecological Inference
-friendship              Simulated Example of Schoolchildren Friendship
-                        Network
-gsource                 Read Data As a Space-Delimited Table
-help.zelig              HTML Help for Zelig Commands and Models
-hoff                    Social Security Expenditure Data
-immigration             Individual Preferences Over Immigration Policy
-macro                   Macroeconomic Data
-match.data              Output matched data sets
-MatchIt.url             Table of links for Zelig
-mexico                  Voting Data from the 1988 Mexican Presidental
-                        Election
-mi                      Bundle multiply imputed data sets as a list
-mid                     Militarized Interstate Disputes
-model.end               Cleaning up after optimization
-model.frame.multiple    Extracting the "environment" of a model formula
-model.matrix.multiple   Design matrix for multivariate models
-network                 Format matricies into a data frame for social
-                        network analysis
-newpainters             The Discretized Painter's Data of de Piles
-parse.formula           Parsing user-input formulas into multiple
-                        syntax
-parse.par               Select and reshape parameter vectors
-PErisk                  Political Economic Risk Data from 62 Countries
-                        in 1987
-plot.ci                 Plotting Vertical confidence Intervals
-plot.zelig              Graphing Quantities of Interest
-put.start               Set specific starting values for certain
-                        parameters
-repl                    Replicating Analyses
-rocplot                 Receiver Operator Characteristic Plots
-sanction                Multilateral Economic Sanctions
-set.start               Set starting values for all parameters
-setx                    Setting Explanatory Variable Values
-sim                     Simulating Quantities of Interest
-sna.ex                  Simulated Example of Social Network Data
-summary.zelig           Summary of Simulated Quantities of Interest
-SupremeCourt            U.S. Supreme Court Vote Matrix
-swiss                   Swiss Fertility and Socioeconomic Indicators
-                        (1888) Data
-ternaryplot             Ternary diagram
-ternarypoints           Adding Points to Ternary Diagrams
-tobin                   Tobin's Tobit Data
-turnout                 Turnout Data Set from the National Election
-                        Survey
-user.prompt             Pause in demo files
-Weimar                  1932 Weimar election data
-zelig                   Estimating a Statistical Model
-zeligDepStatus          Zelig Dependencies Packages Client Status
-zeligDepUpdate          Download Zelig Dependencies Packages
-zeligDescribeModelXML   Zelig interface functions
-Zelig-package           Everyone's Statistical Software
-Zelig.url               Table of links for Zelig
-zideal                  Zelig Matrix of Dependencies
+  \tabular{ll}{ Package: \tab Zelig\cr Version: \tab
+  4.0-11\cr Date: \tab 2012-10-28\cr Depends: \tab R (>=
+  2.14), boot, MASS, methods, sandwich, survival\cr
+  Suggests: \tab mvtnorm, Formula \cr License: \tab GPL
+  version 2 or newer\cr URL: \tab
+  http://gking.harvard.edu/zelig\cr }
 }
-
-Further information is available in the following vignettes:
-
-\tabular{ll}{
-\code{arima} \tab ARIMA Models for Time Series Data (source)\cr
-\code{blogit} \tab Bivariate Logistic Regression for Two Dichotomous Dependent Variables (source)\cr
-\code{bprobit} \tab Bivariate Probit Regression for Dichotomous Dependent Variables (source)\cr
-\code{ei.RxC} \tab Hierarchical Multinomial-Dirichlet Ecological Inference Model (source)\cr
-\code{ei.dynamic} \tab Quinn's Dynamic Ecological Inference (source)\cr
-\code{ei.hier} \tab Hierarchical Ecological Inference Model (source)\cr
-\code{exp} \tab Exponential Regression for Duration Dependent Variables (source)\cr
-\code{factor.bayes} \tab Bayesian Factor Analysis (source)\cr
-\code{factor.mix} \tab Mixed Data Factor Analysis (source)\cr
-\code{factor.ord} \tab Ordinal Data Factor Analysis (source)\cr
-\code{gam.logit} \tab gam.logit: Generalized Additive Model for Dichotomous Dependent Variables (source)\cr
-\code{gam.normal} \tab Generalized Additive Model for Continuous Dependent Variables (source)\cr
-\code{gam.poisson} \tab Generalized Additive Model for Count Dependent Variables (source)\cr
-\code{gam.probit} \tab Generalized Additive Model for Dichotomous Dependent Variables (source)\cr
-\code{gamma} \tab Gamma Regression for Continuous, Positive Dependent Variables (source)\cr
-\code{irt1d} \tab  One Dimensional Item Response Mode (source)\cr
-\code{irtkd} \tab K-Dimensional Item Response Model (source)\cr
-\code{logit} \tab Logistic Regression for Dichotomous Dependent Variables (source)\cr
-\code{logit.bayes} \tab Bayesian Logistic Regression for Dichotomous Dependent Variables (source)\cr
-\code{logit.gee} \tab Generalized Estimating Equation for Logistic Regression (source)\cr
-\code{lognorm} \tab Log-Normal Regression for Duration Dependent Variables (source)\cr
-\code{ls} \tab Least Squares Regression for Continuous Dependent Variables (source)\cr
-\code{mlogit} \tab Multinomial Logistic Regression for Dependent Variables with Unordered Categorical Values (source)\cr
-\code{mlogit.bayes} \tab Bayesian Multinomial Logistic Regression for Dependent Variables with Unordered Categorical Values (source)\cr
-\code{mloglm} \tab Multinomial Log-Linear Regression for Contingency Table Models (source)\cr
-\code{negbin} \tab Negative Binomial Regression for Event Count Dependent Variables (source)\cr
-\code{netcloglog} \tab Least Squares Regression for Continuous Dependent Variables (source)\cr
-\code{netgamma} \tab Least Squares Regression for Continuous Dependent Variables (source)\cr
-\code{netlogit} \tab Least Squares Regression for Continuous Dependent Variables (source)\cr
-\code{netls} \tab Network Least Squares Regression for Continuous Proximity Matrix Dependent Variables (source)\cr
-\code{netnormal} \tab Least Squares Regression for Continuous Dependent Variables (source)\cr
-\code{netpoisson} \tab Least Squares Regression for Continuous Dependent Variables (source)\cr
-\code{netprobit} \tab Least Squares Regression for Continuous Dependent Variables (source)\cr
-\code{normal} \tab Normal Regression  for Continuous Dependent Variables (source)\cr
-\code{normal.bayes} \tab Bayesian Normal Linear Regression (source)\cr
-\code{ologit} \tab Ordinal Logistic Regression  for Ordered Categorical Dependent Variables (source)\cr
-\code{oprobit} \tab Ordinal Probit Regression for Ordered Categorical Dependent Variables (source)\cr
-\code{oprobit.bayes} \tab Bayesian Ordered Probit Regression (source)\cr
-\code{poisson} \tab Poisson Regression for Event Count Dependent Variables (source)\cr
-\code{poisson.bayes} \tab Bayesian Poisson Regression (source)\cr
-\code{probit} \tab Probit Regression for Dichotomous Dependent Variables (source)\cr
-\code{probit.bayes} \tab Bayesian Probit Regression for Dichotomous Dependent Variable (source)\cr
-\code{relogit} \tab Rare Events Logistic Regression for Dichotomous Dependent Variables (source)\cr
-\code{sur} \tab Seemingly Unrelated Regression (source)\cr
-\code{threesls} \tab  Three Stage Least Squares (source)\cr
-\code{tobit} \tab Linear regression for Left-Censored Dependet Variable (source)\cr
-\code{tobit.bayes} \tab Bayesian Linear Regression for a Censored Dependent Variable (source)\cr
-\code{twosls} \tab  Two Stage Least Squares (source)\cr
-\code{weibull} \tab Weibull Regression for Duration Dependent Variables (source)\cr
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}, Kosuke Imai,
+  Olivia Lau, and Gary King
 }
+\seealso{
+  zelig setx sim
 }
-\author{
-Kosuke Imai <kimai at Princeton.Edu>, Gary King
-<king at harvard.edu>, Olivia Lau <olau at fas.harvard.edu>
+\keyword{package}
 
-Maintainer: Kosuke Imai <kimai at Princeton.Edu>
-}
-\keyword{ package }
diff --git a/man/ZeligDescribeModel.Rd b/man/ZeligDescribeModel.Rd
new file mode 100644
index 0000000..76d1377
--- /dev/null
+++ b/man/ZeligDescribeModel.Rd
@@ -0,0 +1,23 @@
+\name{ZeligDescribeModel}
+\alias{ZeligDescribeModel}
+\title{Produce a 'description' Object from the Name of a Model}
+\usage{
+  ZeligDescribeModel(model.name)
+}
+\arguments{
+  \item{model.name}{a character-string specifying a Zelig
+  model}
+}
+\value{
+  a 'description' object specified by the 'model.name'
+  parameter. This object is created by executing the
+  specified Zelig models' 'describe' function
+}
+\description{
+  Produce a 'description' Object from the Name of a Model
+}
+\note{
+  The 'description' object is a list-style object
+  containing citation information
+}
+
diff --git a/man/ZeligListModels.Rd b/man/ZeligListModels.Rd
new file mode 100644
index 0000000..e499eba
--- /dev/null
+++ b/man/ZeligListModels.Rd
@@ -0,0 +1,27 @@
+\name{ZeligListModels}
+\alias{ZeligListModels}
+\title{Get a Character-Vector of All Models with a 'zelig2' Function}
+\usage{
+  ZeligListModels(zelig.only = FALSE)
+}
+\arguments{
+  \item{zelig.only}{a boolean specifying whether we want to
+  search only the Zelig namespace}
+}
+\value{
+  a character-vector of the Zelig models loaded on the
+  user's machine
+}
+\description{
+  Get a Character-Vector of All Models with a 'zelig2'
+  Function
+}
+\note{
+  In order for a Zelig model to either execute correctly or
+  be listed as a legal Zelig model, the function name must
+  be prefixed with 'zelig2'.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/ZeligListTitles.Rd b/man/ZeligListTitles.Rd
new file mode 100644
index 0000000..5248a7e
--- /dev/null
+++ b/man/ZeligListTitles.Rd
@@ -0,0 +1,13 @@
+\name{ZeligListTitles}
+\alias{ZeligListTitles}
+\title{List the Titles of the Zelig Statistical Models}
+\usage{
+  ZeligListTitles()
+}
+\value{
+  a list of manual titles for the Zelig software
+}
+\description{
+  List the Titles of the Zelig Statistical Models
+}
+
diff --git a/man/alpha.Rd b/man/alpha.Rd
new file mode 100644
index 0000000..c2a1e8b
--- /dev/null
+++ b/man/alpha.Rd
@@ -0,0 +1,21 @@
+\name{alpha}
+\alias{alpha}
+\title{Extract ancillary parameters from
+`parameters' objects}
+\usage{
+  alpha(param)
+}
+\arguments{
+  \item{param}{a `parameters' object}
+}
+\value{
+  the ancillary parameters \emph{specified} for the
+  statistical model
+}
+\description{
+  Extract ancillary parameters from `parameters' objects
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.bootlist.Rd b/man/as.bootlist.Rd
new file mode 100644
index 0000000..15e2fa6
--- /dev/null
+++ b/man/as.bootlist.Rd
@@ -0,0 +1,25 @@
+\name{as.bootlist}
+\alias{as.bootlist}
+\title{Convert of Vector of Bootstrapped Parameters to a List-style Boot Object}
+\usage{
+  as.bootlist(bootstraps, lengths, names)
+}
+\arguments{
+  \item{bootstraps}{...}
+
+  \item{lengths}{...}
+
+  \item{names}{a character-vector specifying the names of
+  the boot terms}
+}
+\value{
+  ...
+}
+\description{
+  This inverts the ``as.bootvector'' function, and returns
+  a list containing the slots ``alpha'' and ``beta''.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.bootvector.Rd b/man/as.bootvector.Rd
new file mode 100644
index 0000000..8da16bb
--- /dev/null
+++ b/man/as.bootvector.Rd
@@ -0,0 +1,30 @@
+\name{as.bootvector}
+\alias{as.bootvector}
+\title{Convert Boot Object to a Vector}
+\usage{
+  as.bootvector(obj)
+}
+\arguments{
+  \item{obj}{a list with two slots: ``alpha'' and ``beta''.
+  Respectively, these represent bootstrap samples for
+  ancillary parameters and systematic component of the
+  bootstrapped GLM.}
+}
+\value{
+  a list containing the resulting vector, as well as an
+  object used to reverse-build the list (``obj'') from the
+  resulting call to ``bootstrap''.
+}
+\description{
+  Receives a list with 2 slots as its input, and returns a
+  vector of the two smashed together alongwith the offsets
+  used to reverse-construct the object.
+}
+\note{
+  This method is used internally by Zelig to allow an
+  intuitive, ``param''-like API for bootstrapping.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.data.frame.setx.Rd b/man/as.data.frame.setx.Rd
new file mode 100644
index 0000000..6efb73c
--- /dev/null
+++ b/man/as.data.frame.setx.Rd
@@ -0,0 +1,31 @@
+\name{as.data.frame.setx}
+\alias{as.data.frame.setx}
+\title{Coerce a \code{setx} Object into a \code{data.frame}}
+\usage{
+  \method{as.data.frame}{setx}(x, row.names=NULL,
+    optional=FALSE, ...)
+}
+\arguments{
+  \item{x}{a \code{setx} object}
+
+  \item{row.names}{ignored parameter}
+
+  \item{optional}{ignored parameter}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  the \code{setx} object interpretted as a
+  \code{data.frame}. The column-names of the resulting
+  \code{data.frame} are specified by the names of the
+  \code{setx} object. The row-names are typically
+  unlabeled.
+}
+\description{
+  Coerce a \code{setx} Object into a \code{data.frame}
+}
+\note{
+  In subsequent versions of Zelig, this version is expected
+  to undergo minor modifications.
+}
+
diff --git a/man/as.description.Rd b/man/as.description.Rd
new file mode 100644
index 0000000..93f2029
--- /dev/null
+++ b/man/as.description.Rd
@@ -0,0 +1,28 @@
+\name{as.description}
+\alias{as.description}
+\title{Generic Method for Casting 'description' Objects}
+\usage{
+  as.description(descr, ...)
+}
+\arguments{
+  \item{descr}{an object to cast an object of type
+  'description'}
+
+  \item{...}{parameters which are reserved for future Zelig
+  revisions}
+}
+\value{
+  an object of type 'description'
+}
+\description{
+  Convert the result of a call to the 'describe' method
+  into an object parseble by Zelig. Currently conversions
+  only exist for lists and description objects.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+\seealso{
+  as.description.description as.description.list
+}
+
diff --git a/man/as.description.description.Rd b/man/as.description.description.Rd
new file mode 100644
index 0000000..a8ca580
--- /dev/null
+++ b/man/as.description.description.Rd
@@ -0,0 +1,21 @@
+\name{as.description.description}
+\alias{as.description.description}
+\title{description -> description}
+\usage{
+  \method{as.description}{description}(descr, ...)
+}
+\arguments{
+  \item{descr}{an object of type 'description'}
+
+  \item{...}{ignored}
+}
+\value{
+  the same object
+}
+\description{
+  Identity operation on a description object.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.description.list.Rd b/man/as.description.list.Rd
new file mode 100644
index 0000000..d9be365
--- /dev/null
+++ b/man/as.description.list.Rd
@@ -0,0 +1,21 @@
+\name{as.description.list}
+\alias{as.description.list}
+\title{list -> description}
+\usage{
+  \method{as.description}{list}(descr, ...)
+}
+\arguments{
+  \item{descr}{a list}
+
+  \item{...}{ignored}
+}
+\value{
+  an object of type 'description'
+}
+\description{
+  Convert list into a description object.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.matrix.pooled.setx.Rd b/man/as.matrix.pooled.setx.Rd
new file mode 100644
index 0000000..cbe5d3b
--- /dev/null
+++ b/man/as.matrix.pooled.setx.Rd
@@ -0,0 +1,33 @@
+\name{as.matrix.pooled.setx}
+\alias{as.matrix.pooled.setx}
+\title{Convert a ``pooled.setx'' Object to a Matrix}
+\usage{
+  \method{as.matrix}{pooled.setx}(x, ...)
+}
+\arguments{
+  \item{x}{a setx object}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a matrix containing columns and rows corrseponding to the
+  explanatory variables specified in the call to the 'setx'
+  function
+}
+\description{
+  The setx object is, in its most basic form, a list of
+  column names and values specified for each of these
+  column names. This function simply converts the key-value
+  pairs of column-name and specified value into a matrix.
+}
+\note{
+  This method allows basic matrix arithmetic operations on
+  data objects, which mirror values stored within setx
+  objects. In many scenarios, simulations require
+  matrix-multiplication, etc. to be performed on a
+  data-set. This function faciliates that need.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.matrix.setx.Rd b/man/as.matrix.setx.Rd
new file mode 100644
index 0000000..20433fb
--- /dev/null
+++ b/man/as.matrix.setx.Rd
@@ -0,0 +1,33 @@
+\name{as.matrix.setx}
+\alias{as.matrix.setx}
+\title{Convert a 'setx' Object to a Matrix}
+\usage{
+  \method{as.matrix}{setx}(x, ...)
+}
+\arguments{
+  \item{x}{a setx object}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a matrix containing columns and rows corrseponding to the
+  explanatory variables specified in the call to the 'setx'
+  function
+}
+\description{
+  The setx object is, in its most basic form, a list of
+  column names and values specified for each of these
+  column names. This function simply converts the key-value
+  pairs of column-name and specified value into a matrix.
+}
+\note{
+  This method allows basic matrix arithmetic operations on
+  data objects, which mirror values stored within setx
+  objects. In many scenarios, simulations require
+  matrix-multiplication, etc. to be performed on a
+  data-set. This function faciliates that need.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.parameters.Rd b/man/as.parameters.Rd
new file mode 100644
index 0000000..6e34666
--- /dev/null
+++ b/man/as.parameters.Rd
@@ -0,0 +1,43 @@
+\name{as.parameters}
+\alias{as.parameters}
+\title{Generic Method for Converting Objects into 'parameters'}
+\usage{
+  as.parameters(params, ...)
+}
+\arguments{
+  \item{params}{the object to be casted}
+
+  \item{...}{parameters reserved for future revisions}
+}
+\value{
+  an object of type `parameters'
+}
+\description{
+  Converts list-style objects into Parameter lists
+  primarily used by the 'qi' methods. These list-style
+  objects may contain keys specifying: 'link' (the link
+  function of a statistical model), 'linkinv' (the
+  inverse-link function), 'family' (a object of 'family'
+  class used to specify the model's classification),
+  'alpha' (a vector of ancillary parameters, and
+  'simulations' (a vector of simulated draws from the
+  model's underlying distribution.
+}
+\note{
+  Only three scenarios may exist - converting 'parameters'
+  to 'parameters', 'list' to 'parameters', and vectors to
+  'parameters'. The third in particular is needed only for
+  backwards compatibility, and support will likely be
+  deprecated.
+
+  Furthermore, this function should be exlusively used
+  implicitly and by Zelig.
+}
+\author{
+  Matt Owen \email{mowen at ig.harvard.edu}
+}
+\seealso{
+  as.parameters.list as.parameters.parameters,
+  as.parameters.default
+}
+
diff --git a/man/as.parameters.default.Rd b/man/as.parameters.default.Rd
new file mode 100644
index 0000000..7ac30f8
--- /dev/null
+++ b/man/as.parameters.default.Rd
@@ -0,0 +1,27 @@
+\name{as.parameters.default}
+\alias{as.parameters.default}
+\title{??? -> parameters}
+\usage{
+  as.parameters.default(params, num = NULL, ...)
+}
+\arguments{
+  \item{params}{any non-supported data-type}
+
+  \item{num}{an integer specifying the number of
+  simulations to compute}
+
+  \item{...}{ignored}
+}
+\value{
+  the object passed in
+}
+\description{
+  ??? -> parameters
+}
+\note{
+  This function should be deprecated.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.parameters.list.Rd b/man/as.parameters.list.Rd
new file mode 100644
index 0000000..7890dda
--- /dev/null
+++ b/man/as.parameters.list.Rd
@@ -0,0 +1,28 @@
+\name{as.parameters.list}
+\alias{as.parameters.list}
+\title{list -> parameters}
+\usage{
+  as.parameters.list(params, num = NULL, ...)
+}
+\arguments{
+  \item{params}{a list object}
+
+  \item{num}{an integer specifying the number of
+  simulations to be taken}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  an object of type `parameters'
+}
+\description{
+  The list may contain: 'link', 'linkinv', 'family',
+  'alpha', and 'simulations' keys.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+\seealso{
+  as.parameters
+}
+
diff --git a/man/as.parameters.parameters.Rd b/man/as.parameters.parameters.Rd
new file mode 100644
index 0000000..03dcc65
--- /dev/null
+++ b/man/as.parameters.parameters.Rd
@@ -0,0 +1,25 @@
+\name{as.parameters.parameters}
+\alias{as.parameters.parameters}
+\title{parameters -> parameters
+This is merely an identity function when casting 'parameters' objects into
+'parameters'.}
+\usage{
+  as.parameters.parameters(params, ...)
+}
+\arguments{
+  \item{params}{a parameters object}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  the same parameter object
+}
+\description{
+  parameters -> parameters This is merely an identity
+  function when casting 'parameters' objects into
+  'parameters'.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.qi.Rd b/man/as.qi.Rd
new file mode 100644
index 0000000..a3d1297
--- /dev/null
+++ b/man/as.qi.Rd
@@ -0,0 +1,40 @@
+\name{as.qi}
+\alias{as.qi}
+\title{Generic Method for Converting Various Objects into 'qi' Objects
+'qi' objects are list-style objects used by the 'summarize' function to
+compute simple summaries about the simulated data. For readability and
+and simplicity purposes, the 'qi' function typically returns a list of
+named simulations. This list is converted internally by Zelig into a 'qi'
+object so that several methods can be easily applied to the Quantities of
+Interest: plot, summarize, and print}
+\usage{
+  as.qi(s)
+}
+\arguments{
+  \item{s}{the object to be casted}
+}
+\value{
+  an object of type `qi'
+}
+\description{
+  Generic Method for Converting Various Objects into 'qi'
+  Objects 'qi' objects are list-style objects used by the
+  'summarize' function to compute simple summaries about
+  the simulated data. For readability and and simplicity
+  purposes, the 'qi' function typically returns a list of
+  named simulations. This list is converted internally by
+  Zelig into a 'qi' object so that several methods can be
+  easily applied to the Quantities of Interest: plot,
+  summarize, and print
+}
+\note{
+  These functions are primarily used internall by Zelig and
+  should not be used in the Global namespace.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+\seealso{
+  as.qi.default as.qi.qi as.qi.list
+}
+
diff --git a/man/as.qi.default.Rd b/man/as.qi.default.Rd
new file mode 100644
index 0000000..2af74ac
--- /dev/null
+++ b/man/as.qi.default.Rd
@@ -0,0 +1,19 @@
+\name{as.qi.default}
+\alias{as.qi.default}
+\title{??? -> qi}
+\usage{
+  as.qi.default(s)
+}
+\arguments{
+  \item{s}{any unsupported object}
+}
+\value{
+  an object of type `qi'
+}
+\description{
+  ??? -> qi
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.qi.list.Rd b/man/as.qi.list.Rd
new file mode 100644
index 0000000..a45d7ad
--- /dev/null
+++ b/man/as.qi.list.Rd
@@ -0,0 +1,31 @@
+\name{as.qi.list}
+\alias{as.qi.list}
+\title{list -> qi
+This function has a lot of room to go wrong. It tries o detect whether the
+zelig model is old-style or new-style (as of 4/4/2011). Eventually this
+feature should be phased out.}
+\usage{
+  as.qi.list(s)
+}
+\arguments{
+  \item{s}{a list}
+}
+\value{
+  an object of type `qi'
+}
+\description{
+  list -> qi This function has a lot of room to go wrong.
+  It tries o detect whether the zelig model is old-style or
+  new-style (as of 4/4/2011). Eventually this feature
+  should be phased out.
+}
+\note{
+  This method has peculiar behavior when the list contains
+  only two elements. The crucial fix is to simply remove
+  the portion of code which intentionally implements this
+  perculiar behavior.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.qi.qi.Rd b/man/as.qi.qi.Rd
new file mode 100644
index 0000000..a04b300
--- /dev/null
+++ b/man/as.qi.qi.Rd
@@ -0,0 +1,19 @@
+\name{as.qi.qi}
+\alias{as.qi.qi}
+\title{qi -> qi}
+\usage{
+  as.qi.qi(s)
+}
+\arguments{
+  \item{s}{an object of type `qi'}
+}
+\value{
+  s an object of type `qi'
+}
+\description{
+  qi -> qi
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.summarized.Rd b/man/as.summarized.Rd
new file mode 100644
index 0000000..e495a26
--- /dev/null
+++ b/man/as.summarized.Rd
@@ -0,0 +1,28 @@
+\name{as.summarized}
+\alias{as.summarized}
+\title{Generic Method for Casting Objectst as 'summarized' Objects}
+\usage{
+  as.summarized(x, ...)
+}
+\arguments{
+  \item{x}{an object}
+
+  \item{...}{unspecified parameters}
+}
+\value{
+  a 'summarized.qi' object
+}
+\description{
+  This function is particularly for use by the 'summarize'
+  method, which summarizes the simulations taken from the
+  'qi' method. The generic function 'summary' when applied
+  to a Zelig Simulation implicitly uses this function.
+}
+\note{
+  This is made available on the Global namespace as a
+  matter of potential future compliancy.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.summarized.list.Rd b/man/as.summarized.list.Rd
new file mode 100644
index 0000000..a495064
--- /dev/null
+++ b/man/as.summarized.list.Rd
@@ -0,0 +1,23 @@
+\name{as.summarized.list}
+\alias{as.summarized.list}
+\title{list -> summarized.qi
+Convert a list into a ``summarized.qi'' object}
+\usage{
+  \method{as.summarized}{list}(x, ...)
+}
+\arguments{
+  \item{x}{a list}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a ``summarized.qi'' object
+}
+\description{
+  list -> summarized.qi Convert a list into a
+  ``summarized.qi'' object
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/as.summarized.summarized.qi.Rd b/man/as.summarized.summarized.qi.Rd
new file mode 100644
index 0000000..6cdb02a
--- /dev/null
+++ b/man/as.summarized.summarized.qi.Rd
@@ -0,0 +1,21 @@
+\name{as.summarized.summarized.qi}
+\alias{as.summarized.summarized.qi}
+\title{summarized.qi -> summarized.qi}
+\usage{
+  \method{as.summarized}{summarized.qi}(x, ...)
+}
+\arguments{
+  \item{x}{an object of type 'summarized.qi'}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  the same 'summarized.qi' object
+}
+\description{
+  Identity operation on ``summarized.qi'' objects
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/attach.env.Rd b/man/attach.env.Rd
new file mode 100644
index 0000000..47e1e2c
--- /dev/null
+++ b/man/attach.env.Rd
@@ -0,0 +1,40 @@
+\name{attach.env}
+\alias{attach.env}
+\title{Attach Variables to a Function}
+\usage{
+  attach.env(f, env = NULL, ...)
+}
+\arguments{
+  \item{f}{a function which will be modified}
+
+  \item{env}{an environment variable which will be attached
+  to the function being returned}
+
+  \item{...}{arbitrary key-value paired parameters which
+  will be assigned to the environment of the function being
+  returned}
+}
+\value{
+  the original function ``f'' with a different environment
+  attached to it.
+}
+\description{
+  Returns a function, specified by the user, with the
+  variables of a specified environment attached. This, in
+  essence, allows programmers to write functions that have
+  forms of private memory. This makes the function behave
+  similarly to an object.
+}
+\note{
+  This function is used by Zelig to ensure that particular
+  method calls - param, qi, bootstap - will contain the
+  private variables: ``.fitted'', ``.model'', ``.call'' and
+  ``.env'' which respectively contain the fitted model
+  object, the name of the zelig model being invoked, the
+  original call to the model-fitting function and the
+  environment in which to call the function call.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/bootfn.default.Rd b/man/bootfn.default.Rd
new file mode 100644
index 0000000..79033a3
--- /dev/null
+++ b/man/bootfn.default.Rd
@@ -0,0 +1,34 @@
+\name{bootfn.default}
+\alias{bootfn.default}
+\title{Default Boot-strapping procedure}
+\usage{
+  bootfn.default(data, i, object, bootstrapfn = NULL, num,
+    ...)
+}
+\arguments{
+  \item{data}{a data.frame}
+
+  \item{i}{an integer or chacter-string specifying the
+  index of the row to be used in the bootstrapping
+  procedure.}
+
+  \item{object}{the fitted model object}
+
+  \item{bootstrapfn}{a function used to bootstrap the
+  object}
+
+  \item{num}{an integer specifying the number of samples to
+  simulate}
+
+  \item{...}{unspecified parameters}
+}
+\value{
+  a list of paramters
+}
+\description{
+  The default procedure for extracting bootstrap
+  information. Note that this method re-fits the data and
+  resamples the data frequently. This is a good candidate
+  for fixing-up.
+}
+
diff --git a/man/bootstrap.Rd b/man/bootstrap.Rd
new file mode 100644
index 0000000..da6c506
--- /dev/null
+++ b/man/bootstrap.Rd
@@ -0,0 +1,33 @@
+\name{bootstrap}
+\alias{bootstrap}
+\title{Generic Method for ``bootstrap''}
+\usage{
+  bootstrap(obj, ...)
+}
+\arguments{
+  \item{obj}{a fitted model object that will be used to
+  produce boot-strapped parameters. This object usually
+  inherits the class ``glm'' or ``lm'' object}
+
+  \item{...}{unspecified parameters}
+}
+\value{
+  a list with the ``alpha'' and ``beta'' slots set. Note
+  that ``alpha'' corresponds to ancillary parameters and
+  ``beta'' corresponds to systematic components of the
+  model
+}
+\description{
+  This method is intended to be overried by statistical
+  models that would like to support statistical
+  bootstrapping.
+}
+\note{
+  This method has private memory storage and can reference
+  the objects: ``.fitted'', ``.data'', ``.call'', ``.env'',
+  despite having no declaration in the argument list.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/bootstrap.default.Rd b/man/bootstrap.default.Rd
new file mode 100644
index 0000000..40543f3
--- /dev/null
+++ b/man/bootstrap.default.Rd
@@ -0,0 +1,26 @@
+\name{bootstrap.default}
+\alias{bootstrap.default}
+\title{Produce Boot-strapped Parameters for a Statistical Model}
+\usage{
+  \method{bootstrap}{default}(obj, ...)
+}
+\arguments{
+  \item{obj}{a fitted model object. This is typically of
+  type ``glm'' or ``lm''}
+
+  \item{...}{unspecified parameters}
+}
+\value{
+  a list with the ``alpha'' and ``beta'' slots set
+}
+\description{
+  This method is a fallback for bootstrapping models that
+  do not have a defined ``bootstrap'' method. For most
+  models, this default is sufficient, so long as the model
+  follows the usual convention that ``coef(obj)'' returns
+  the systematic parameters of a fitted model.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/bootstrap.gamma.Rd b/man/bootstrap.gamma.Rd
new file mode 100644
index 0000000..ad1cb88
--- /dev/null
+++ b/man/bootstrap.gamma.Rd
@@ -0,0 +1,26 @@
+\name{bootstrap.gamma}
+\alias{bootstrap.gamma}
+\title{Bootstrap Parameters for Zelig ``gamma'' GLM}
+\usage{
+  \method{bootstrap}{gamma}(obj, ...)
+}
+\arguments{
+  \item{obj}{a ``zelig'' object that will be used to
+  produce boot-strapped parameters}
+
+  \item{...}{extra parameters to be passed to the ``boot''
+  method. These are typically ignored, but is included for
+  further expansion.}
+}
+\value{
+  a list containing information concerning link,
+  link-inverses, etc.
+}
+\description{
+  Returns bootstrapped parameter estimates for a ``gamma''
+  GLM.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/bootstrap.negbinom.Rd b/man/bootstrap.negbinom.Rd
new file mode 100644
index 0000000..5fdf331
--- /dev/null
+++ b/man/bootstrap.negbinom.Rd
@@ -0,0 +1,26 @@
+\name{bootstrap.negbinom}
+\alias{bootstrap.negbinom}
+\title{Bootstrap Parameters for Zelig ``negbinom'' GLM}
+\usage{
+  \method{bootstrap}{negbinom}(obj, ...)
+}
+\arguments{
+  \item{obj}{a ``zelig'' object that will be used to
+  produce boot-strapped parameters}
+
+  \item{...}{extra parameters to be passed to the ``boot''
+  method. These are typically ignored, but is included for
+  further expansion.}
+}
+\value{
+  a list containing information concerning link,
+  link-inverses, etc.
+}
+\description{
+  Returns bootstrapped parameter estimates for a
+  negative-binomial GLM.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/bootstrap.normal.Rd b/man/bootstrap.normal.Rd
new file mode 100644
index 0000000..e6d2f82
--- /dev/null
+++ b/man/bootstrap.normal.Rd
@@ -0,0 +1,29 @@
+\name{bootstrap.normal}
+\alias{bootstrap.normal}
+\title{Bootstrap Parameters for Zelig ``normal'' GLM}
+\usage{
+  \method{bootstrap}{normal}(obj, num, ...)
+}
+\arguments{
+  \item{obj}{a ``zelig'' object that will be used to
+  produce boot-strapped parameters}
+
+  \item{num}{an integer specifying the number of
+  simulations to produce}
+
+  \item{...}{extra parameters to be passed to the ``boot''
+  method. These are typically ignored, but is included for
+  further expansion.}
+}
+\value{
+  a list containing information concerning link,
+  link-inverses, etc.
+}
+\description{
+  Returns bootstrapped parameter estimates for a Gaussian
+  GLM.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/callToString.Rd b/man/callToString.Rd
new file mode 100644
index 0000000..fb78a61
--- /dev/null
+++ b/man/callToString.Rd
@@ -0,0 +1,22 @@
+\name{callToString}
+\alias{callToString}
+\title{Convert \code{call} Object to a String}
+\usage{
+  callToString(x, ...)
+}
+\arguments{
+  \item{x}{a \code{call} object}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a character-string representing the \code{call} object
+}
+\description{
+  This method concerts \code{call} objects into a simple,
+  intuitive human-readable form.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/cite.Rd b/man/cite.Rd
new file mode 100644
index 0000000..1798afc
--- /dev/null
+++ b/man/cite.Rd
@@ -0,0 +1,19 @@
+\name{cite}
+\alias{cite}
+\title{Citation information for a 'description' object}
+\usage{
+  cite(descr)
+}
+\arguments{
+  \item{descr}{an object of type 'description'}
+}
+\value{
+  a character-string giving citation info
+}
+\description{
+  Citation information for a 'description' object
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/cluster.formula.Rd b/man/cluster.formula.Rd
new file mode 100644
index 0000000..c37f0b5
--- /dev/null
+++ b/man/cluster.formula.Rd
@@ -0,0 +1,19 @@
+\name{cluster.formula}
+\alias{cluster.formula}
+\title{Generate Formulae that Consider Clustering}
+\usage{
+  cluster.formula(formula, cluster)
+}
+\arguments{
+  \item{formula}{a formula object}
+
+  \item{cluster}{a vector}
+}
+\value{
+  a formula object describing clustering
+}
+\description{
+  This method is used internally by the "Zelig" Package to
+  interpret clustering.
+}
+
diff --git a/man/cmvglm.Rd b/man/cmvglm.Rd
new file mode 100644
index 0000000..3651c81
--- /dev/null
+++ b/man/cmvglm.Rd
@@ -0,0 +1,25 @@
+\name{cmvglm}
+\alias{cmvglm}
+\title{cmvglm}
+\usage{
+  cmvglm(formula, model, ndim, data = NULL, fact = NULL)
+}
+\arguments{
+  \item{formula}{a formula}
+
+  \item{model}{the names of the Zelig model}
+
+  \item{ndim}{the number of dimensions in the statistical
+  model}
+
+  \item{data}{a data-frame}
+
+  \item{fact}{???}
+}
+\description{
+  cmvglm
+}
+\author{
+  Kosuke Imai and Olivia Lau
+}
+
diff --git a/man/coef.parameters.Rd b/man/coef.parameters.Rd
new file mode 100644
index 0000000..54a0252
--- /dev/null
+++ b/man/coef.parameters.Rd
@@ -0,0 +1,28 @@
+\name{coef.parameters}
+\alias{coef.parameters}
+\title{Return Simulations of Parameter Coefficients}
+\usage{
+  \method{coef}{parameters}(object, ...)
+}
+\arguments{
+  \item{object}{a 'parameters' object}
+
+  \item{\dots}{ignored}
+}
+\value{
+  simulations, specified by the Zelig model, of the
+  ancillary parameters
+}
+\description{
+  Returns simulated parameters of coefficients for use in
+  statistical simulation. The values are set by the
+  model-fitting function and the developer of the qi.<model
+  name> method.
+}
+\note{
+  This function may not differ at all from coef.default
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/combine.Rd b/man/combine.Rd
new file mode 100644
index 0000000..04e9fa9
--- /dev/null
+++ b/man/combine.Rd
@@ -0,0 +1,24 @@
+\name{combine}
+\alias{combine}
+\title{Produce All Combinations of a Set of Lists}
+\usage{
+  combine(...)
+}
+\arguments{
+  \item{...}{a set of lists to mix together}
+}
+\value{
+  all the combinations of the lists with repetition
+}
+\description{
+  Produce All Combinations of a Set of Lists
+}
+\note{
+  This function is used internall by the 'mi' constructors
+  in order to produce the complete set of combinations of
+  data-frames and factors by to subset the data-frames.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/constructDataFrame.Rd b/man/constructDataFrame.Rd
new file mode 100644
index 0000000..ea9ec97
--- /dev/null
+++ b/man/constructDataFrame.Rd
@@ -0,0 +1,24 @@
+\name{constructDataFrame}
+\alias{constructDataFrame}
+\title{Construct Data Frame
+Construct and return a tiny (single-row) data-frame from a larger data-frame,
+a list of specified values, and a formula}
+\usage{
+  constructDataFrame(data, specified)
+}
+\arguments{
+  \item{data}{a ``data.frame'' that will be used to create
+  a small design matrix}
+
+  \item{specified}{a list with key-value pairs that will be
+  used to explicitly set several values}
+}
+\value{
+  a ``data.frame'' containing a single row
+}
+\description{
+  Construct Data Frame Construct and return a tiny
+  (single-row) data-frame from a larger data-frame, a list
+  of specified values, and a formula
+}
+
diff --git a/man/constructDesignMatrix.Rd b/man/constructDesignMatrix.Rd
new file mode 100644
index 0000000..42b9e6c
--- /dev/null
+++ b/man/constructDesignMatrix.Rd
@@ -0,0 +1,22 @@
+\name{constructDesignMatrix}
+\alias{constructDesignMatrix}
+\title{Construct Design Matrix from
+Construct and return a design matrix based on a tiny data-frame (single-row).}
+\usage{
+  constructDesignMatrix(data, formula)
+}
+\arguments{
+  \item{data}{a ``data.frame'' (preferably single-rowed)
+  that will be used to create a small design matrix}
+
+  \item{formula}{a formula, whose predictor variables will
+  be used to create a design matrix}
+}
+\value{
+  a design (model) matrix
+}
+\description{
+  Construct Design Matrix from Construct and return a
+  design matrix based on a tiny data-frame (single-row).
+}
+
diff --git a/man/current.packages.Rd b/man/current.packages.Rd
deleted file mode 100644
index db55e3a..0000000
--- a/man/current.packages.Rd
+++ /dev/null
@@ -1,37 +0,0 @@
-\name{current.packages}
-
-\alias{current.packages}
-
-\title{Find all packages in a dependency chain}
-
-\description{
-  Use \code{current.packages} to find all the packages suggested or 
-required by a given package, and the currently installed version number 
-for each.  
-}
-
-\usage{
-current.packages(package)
-}
-
-\arguments{
-\item{package}{a character string corresponding to the name of an 
-installed package}
-}
-
-\value{
-  A matrix containing the current version number of the packages 
-suggested or required by \code{package}.  
-}
-
-\seealso{\code{packageDescription}}
-
-\author{ Olivia Lau <\email{olau at fas.harvard.edu}>
-}
-
-\examples{
-\dontrun{
-current.packages("Zelig")
-}}
-
-\keyword{file}
diff --git a/man/depends.on.zelig.Rd b/man/depends.on.zelig.Rd
new file mode 100644
index 0000000..84763ee
--- /dev/null
+++ b/man/depends.on.zelig.Rd
@@ -0,0 +1,24 @@
+\name{depends.on.zelig}
+\alias{depends.on.zelig}
+\title{Whether a Statistical Package Depends on the Zelig Software Suite}
+\usage{
+  depends.on.zelig(package = "")
+}
+\arguments{
+  \item{package}{a character-string representing a package
+  name}
+}
+\value{
+  whether the package lists Zelig as a dependency in its
+  DESCRIPTION
+}
+\description{
+  Whether a Statistical Package Depends on the Zelig
+  Software Suite
+}
+\note{
+  This function is used primarily internally to determine
+  whether a a package is contributing a function to the
+  Zelig software suite
+}
+
diff --git a/man/describe.Rd b/man/describe.Rd
new file mode 100644
index 0000000..9a67f49
--- /dev/null
+++ b/man/describe.Rd
@@ -0,0 +1,19 @@
+\name{describe}
+\alias{describe}
+\title{Method to describe a model to Zelig}
+\usage{
+  describe(...)
+}
+\arguments{
+  \item{...}{parameters which are typically ignored}
+}
+\value{
+  a list to be processed by `as.description'
+}
+\description{
+  Method to describe a model to Zelig
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/describe.default.Rd b/man/describe.default.Rd
new file mode 100644
index 0000000..33c095f
--- /dev/null
+++ b/man/describe.default.Rd
@@ -0,0 +1,25 @@
+\name{describe.default}
+\alias{describe.default}
+\title{Default describe function for an arbitrary model
+This method exists solely as a backup when an author does not contribute a
+'describe' function for their model}
+\usage{
+  \method{describe}{default}(...)
+}
+\arguments{
+  \item{...}{dummy parameters purely to cast the correct
+  object. That is, the parameters of the function should
+  not BE referenced specifically}
+}
+\value{
+  a list to be processed by \code{as.description}
+}
+\description{
+  Default describe function for an arbitrary model This
+  method exists solely as a backup when an author does not
+  contribute a 'describe' function for their model
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/describe.exp.Rd b/man/describe.exp.Rd
new file mode 100644
index 0000000..b07a311
--- /dev/null
+++ b/man/describe.exp.Rd
@@ -0,0 +1,19 @@
+\name{describe.exp}
+\alias{describe.exp}
+\title{Describe a ``exp'' model to Zelig}
+\usage{
+  \method{describe}{exp}(...)
+}
+\arguments{
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be processed by `as.description'
+}
+\description{
+  Describe a ``exp'' model to Zelig
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/describe.gamma.Rd b/man/describe.gamma.Rd
new file mode 100644
index 0000000..d218923
--- /dev/null
+++ b/man/describe.gamma.Rd
@@ -0,0 +1,19 @@
+\name{describe.gamma}
+\alias{describe.gamma}
+\title{Describe the \code{gamma} model to Zelig}
+\usage{
+  \method{describe}{gamma}(...)
+}
+\arguments{
+  \item{...}{ignored parameters}
+}
+\value{
+  a list of important information
+}
+\description{
+  Describe the \code{gamma} model to Zelig
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/describe.logit.Rd b/man/describe.logit.Rd
new file mode 100644
index 0000000..be99219
--- /dev/null
+++ b/man/describe.logit.Rd
@@ -0,0 +1,19 @@
+\name{describe.logit}
+\alias{describe.logit}
+\title{Describe a `logit' model to Zelig}
+\usage{
+  \method{describe}{logit}(...)
+}
+\arguments{
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be processed by `as.description'
+}
+\description{
+  Describe a `logit' model to Zelig
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/describe.ls.Rd b/man/describe.ls.Rd
new file mode 100644
index 0000000..e49e808
--- /dev/null
+++ b/man/describe.ls.Rd
@@ -0,0 +1,22 @@
+\name{describe.ls}
+\alias{describe.ls}
+\title{Describe a \code{ls} model to Zelig}
+\usage{
+  \method{describe}{ls}(...)
+}
+\arguments{
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be processed by \code{as.description}
+}
+\description{
+  Describe a \code{ls} model to Zelig
+}
+\note{
+  \code{ls} stands for "least squares fit"
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/describe.negbinom.Rd b/man/describe.negbinom.Rd
new file mode 100644
index 0000000..6d72eb9
--- /dev/null
+++ b/man/describe.negbinom.Rd
@@ -0,0 +1,22 @@
+\name{describe.negbinom}
+\alias{describe.negbinom}
+\title{Describe the \code{negbinom} model to Zelig}
+\usage{
+  \method{describe}{negbinom}(...)
+}
+\arguments{
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be processed by \code{as.description}
+}
+\description{
+  Describe the \code{negbinom} model to Zelig
+}
+\note{
+  \code{negbinom} stands for "negative binomial"
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/describe.normal.Rd b/man/describe.normal.Rd
new file mode 100644
index 0000000..bc20e51
--- /dev/null
+++ b/man/describe.normal.Rd
@@ -0,0 +1,19 @@
+\name{describe.normal}
+\alias{describe.normal}
+\title{Describe the \code{normal} model to Zelig}
+\usage{
+  \method{describe}{normal}(...)
+}
+\arguments{
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be processed by `as.description'
+}
+\description{
+  Describe the \code{normal} model to Zelig
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/describe.poisson.Rd b/man/describe.poisson.Rd
new file mode 100644
index 0000000..b48168e
--- /dev/null
+++ b/man/describe.poisson.Rd
@@ -0,0 +1,19 @@
+\name{describe.poisson}
+\alias{describe.poisson}
+\title{Describe the `poisson' model to Zelig}
+\usage{
+  \method{describe}{poisson}(...)
+}
+\arguments{
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be processed by `as.description'
+}
+\description{
+  Describe the `poisson' model to Zelig
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/describe.probit.Rd b/man/describe.probit.Rd
new file mode 100644
index 0000000..954b823
--- /dev/null
+++ b/man/describe.probit.Rd
@@ -0,0 +1,19 @@
+\name{describe.probit}
+\alias{describe.probit}
+\title{Describe the `probit' model to Zelig}
+\usage{
+  \method{describe}{probit}(...)
+}
+\arguments{
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be processed by `as.description'
+}
+\description{
+  Describe the `probit' model to Zelig
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/describe.tobit.Rd b/man/describe.tobit.Rd
new file mode 100644
index 0000000..b2e1220
--- /dev/null
+++ b/man/describe.tobit.Rd
@@ -0,0 +1,19 @@
+\name{describe.tobit}
+\alias{describe.tobit}
+\title{Describe a ``tobit'' model to Zelig}
+\usage{
+  \method{describe}{tobit}(...)
+}
+\arguments{
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be processed by `as.description'
+}
+\description{
+  Describe a ``tobit'' model to Zelig
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/describe.zelig.Rd b/man/describe.zelig.Rd
new file mode 100644
index 0000000..a644e6e
--- /dev/null
+++ b/man/describe.zelig.Rd
@@ -0,0 +1,28 @@
+\name{describe.zelig}
+\alias{describe.zelig}
+\title{Get Description Object Used to Cite this Zelig Model}
+\usage{
+  \method{describe}{zelig}(object, ...)
+}
+\arguments{
+  \item{object}{a 'zelig' object}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a 'description' object used internally to produce
+  citation text
+}
+\description{
+  Get Description Object Used to Cite this Zelig Model
+}
+\note{
+  This function should be reevaluated in design, since
+  'description' objects are exclusively used internally. In
+  particular, this method would be more useful to users as
+  a 'cite' method.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/description.Rd b/man/description.Rd
new file mode 100644
index 0000000..4186693
--- /dev/null
+++ b/man/description.Rd
@@ -0,0 +1,37 @@
+\name{description}
+\alias{description}
+\title{Constructor for the 'description' class}
+\usage{
+  description(authors = c("Kosuke Imai", "Gary King", "Olivia Lau"),
+    year = NULL, model = "", text = "", url = "",
+    category = NULL)
+}
+\arguments{
+  \item{authors}{a character-vector of author names}
+
+  \item{year}{a numeric specifying the year}
+
+  \item{model}{a character-string specifying model name}
+
+  \item{text}{a character-string specifying the title of
+  the model. This typically includes more exact information
+  than 'model'. E.g., for the 'logit' the title 'Logistic
+  Regression for Dichotomous Variables' would be a suitable
+  text parameter.}
+
+  \item{url}{a character-string specifying the model's
+  software page}
+
+  \item{category}{deprecated until data-verse bindings are
+  reevaluated}
+}
+\value{
+  an object of type 'description'
+}
+\description{
+  Constructor for the 'description' class
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/dims.Rd b/man/dims.Rd
deleted file mode 100644
index 9c20e00..0000000
--- a/man/dims.Rd
+++ /dev/null
@@ -1,38 +0,0 @@
-\name{dims}
-
-\alias{dims}
-
-\title{Return Dimensions of Vectors, Arrays, and Data Frames}
-
-\description{
-  Retrieve the dimensions of a vector, array, or data frame.  
-}
-
-\usage{
-dims(x)
-}
-
-\arguments{
-  \item{x}{An R object.  For example, a vector, matrix, array, or data 
-frame.}
-}
-
-\value{
-  The function \code{dims} performs exactly the same as \code{dim}, and 
-additionally returns the \code{length} of vectors (treating them as 
-one-dimensional arrays).}
-
-\seealso{\code{dim}, \code{length}}
-
-\author{ Olivia Lau <\email{olau at fas.harvard.edu}>
-}
-
-\examples{
-a <- 1:12
-dims(a)
-
-a <- matrix(1, nrow = 4, ncol = 9)
-dims(a)
-}
-
-\keyword{file}
diff --git a/man/find.match.Rd b/man/find.match.Rd
new file mode 100644
index 0000000..2ade535
--- /dev/null
+++ b/man/find.match.Rd
@@ -0,0 +1,35 @@
+\name{find.match}
+\alias{find.match}
+\title{Find a Partial or Exact Match from a Vector of Strings
+Searches a vector of character-string, and returns the best match.}
+\usage{
+  find.match(needle, haystack, fail = NA)
+}
+\arguments{
+  \item{needle}{a character-string to search for in the}
+
+  \item{haystack}{a vector of character-strings}
+
+  \item{fail}{the value to return in case no match is
+  found. Defaults to NA}
+}
+\value{
+  the best-matched string or NA
+}
+\description{
+  Find a Partial or Exact Match from a Vector of Strings
+  Searches a vector of character-string, and returns the
+  best match.
+}
+\details{
+  ``find.match'' attempts to use several common matching
+  functions in an order that sequentially prefers less
+  strict matching, until a suitable match is found. If none
+  is found, then return the value of the ``fail'' parameter
+  (defaults to NA). The functions used for matching are:
+  ``match'', ``charmatch'', and finally ``grep''.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/get.package.Rd b/man/get.package.Rd
new file mode 100644
index 0000000..606b2a4
--- /dev/null
+++ b/man/get.package.Rd
@@ -0,0 +1,26 @@
+\name{get.package}
+\alias{get.package}
+\title{Find the Zelig package that a particular model belong to}
+\usage{
+  get.package(model, quiet = TRUE, ...)
+}
+\arguments{
+  \item{model}{a character-string specifying a Zelig model}
+
+  \item{quiet}{a logical indicating whether to display
+  messages and warnings}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  NA or a character-string specifying the name of the
+  package which contains a specific model
+}
+\description{
+  This method is used to help transition Zelig v3.5 users
+  to Zelig v4
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/getPredictorTerms.Rd b/man/getPredictorTerms.Rd
new file mode 100644
index 0000000..5dffa10
--- /dev/null
+++ b/man/getPredictorTerms.Rd
@@ -0,0 +1,26 @@
+\name{getPredictorTerms}
+\alias{getPredictorTerms}
+\title{Get Predictor Terms from Zelig-style Formulae}
+\usage{
+  getPredictorTerms(x, ...)
+}
+\arguments{
+  \item{x}{a Zelig-style formula ('formula' or 'list')}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a character-vector or NA
+}
+\description{
+  This function extracts the predictor terms from a
+  Zelig-style object.
+}
+\note{
+  This function is used exclusively in the development of
+  Zelig-core.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/getResponseTerms.Formula-not-formula.Rd b/man/getResponseTerms.Formula-not-formula.Rd
new file mode 100644
index 0000000..50247a3
--- /dev/null
+++ b/man/getResponseTerms.Formula-not-formula.Rd
@@ -0,0 +1,31 @@
+\name{getResponseTerms.Formula}
+\alias{getResponse.Formula}
+\alias{getResponseTerms.Formula}
+\title{Get Response Terms from a ``Formula'' Object}
+\usage{
+  \method{getResponseTerms}{Formula}(x, ...,
+    single.only=FALSE, duplicates=TRUE)
+}
+\arguments{
+  \item{x}{a formula}
+
+  \item{...}{ignored parameters}
+
+  \item{single.only}{a logical specifying whether 'cbind'
+  or 'list' keywords are allowed}
+
+  \item{duplicates}{a logical specifying whether the
+  returned character-vector will only return duplicates.}
+}
+\value{
+  a character-vector specifying the response terms of the
+  formula
+}
+\description{
+  This method gets the response terms from a ``Formula''
+  Object
+}
+\author{
+  Matt Owen
+}
+
diff --git a/man/getResponseTerms.Rd b/man/getResponseTerms.Rd
new file mode 100644
index 0000000..6ece6cc
--- /dev/null
+++ b/man/getResponseTerms.Rd
@@ -0,0 +1,20 @@
+\name{getResponseTerms}
+\alias{getResponseTerms}
+\title{Get Response Terms from a Zelig-style Formula}
+\usage{
+  getResponseTerms(x, ...)
+}
+\arguments{
+  \item{x}{a formula or list of formulae}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a character-vector specifying a the of response terms in
+  this formula
+}
+\description{
+  This method acquires the response variables from
+  Zelig-style input.
+}
+
diff --git a/man/getResponseTerms.formula.Rd b/man/getResponseTerms.formula.Rd
new file mode 100644
index 0000000..2e664ab
--- /dev/null
+++ b/man/getResponseTerms.formula.Rd
@@ -0,0 +1,30 @@
+\name{getResponseTerms.formula}
+\alias{getResponseTerms.formula}
+\title{Get Response Terms from a Standard Formula}
+\usage{
+  \method{getResponseTerms}{formula}(x, ...,
+    single.only=FALSE, duplicates=TRUE)
+}
+\arguments{
+  \item{x}{a formula}
+
+  \item{...}{ignored parameters}
+
+  \item{single.only}{a logical specifying whether 'cbind'
+  or 'list' keywords are allowed}
+
+  \item{duplicates}{a logical specifying whether the
+  returned character-vector will only return duplicates.}
+}
+\value{
+  a character-vector specifying the response terms of the
+  formula
+}
+\description{
+  This method gets the response terms from a standard
+  formula
+}
+\author{
+  Matt Owen
+}
+
diff --git a/man/getResponseTerms.list.Rd b/man/getResponseTerms.list.Rd
new file mode 100644
index 0000000..b29d5cc
--- /dev/null
+++ b/man/getResponseTerms.list.Rd
@@ -0,0 +1,23 @@
+\name{getResponseTerms.list}
+\alias{getResponseTerms.list}
+\title{Get Response Terms from a List-style Formula}
+\usage{
+  \method{getResponseTerms}{list}(x, ...)
+}
+\arguments{
+  \item{x}{a list of formulae}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a character-vector specifying the response terms of the
+  formula
+}
+\description{
+  This method gets the response terms from a standard
+  formula
+}
+\author{
+  Matt Owen
+}
+
diff --git a/man/gsource.Rd b/man/gsource.Rd
deleted file mode 100644
index 1d92197..0000000
--- a/man/gsource.Rd
+++ /dev/null
@@ -1,51 +0,0 @@
-\name{gsource}
-
-\alias{gsource}
-
-\title{Read Data As a Space-Delimited Table}
-
-\description{
-  The \code{gsource} function allows you to read a space delimited table
-  as a data frame.  Unlike \code{scan}, you may use \code{gsource} in a
-  \code{source}ed file, and unlike \code{read.table}, you may use
-  \code{gsource} to include a small (or large) data set in a file that
-  also contains other commands.
-}
-
-\usage{
-gsource(var.names = NULL, variables)
-}
-
-\arguments{
-  \item{var.names}{An optional vector of character strings representing
-    the column names.  By default, \code{var.names = NULL}. }
-  \item{variables}{A single character string representing the data.}
-}
-
-\value{
-  The output from \code{gsource} is a data frame, which you may save to
-  an object in your workspace.
-}
-
-\seealso{\code{read.table}, \code{scan}}
-
-\author{ Olivia Lau <\email{olau at fas.harvard.edu}>
-}
-
-\examples{
-\dontrun{
-data <- gsource(variables =  "
-                 1 2 3 4 5    
-                 6 7 8 9 10   
-                 3 4 5 1 3    
-                 6 7 8 1 9    ")
-
-data <- gsource(var.names = "Vote Age Party", variables = "
-                             0    23 Democrat             
-                             0    27 Democrat             
-			     1    45 Republican           
-                             1    65 Democrat             ")
-}
-}
-
-\keyword{file}
diff --git a/man/has.zelig2.Rd b/man/has.zelig2.Rd
new file mode 100644
index 0000000..f92531c
--- /dev/null
+++ b/man/has.zelig2.Rd
@@ -0,0 +1,23 @@
+\name{has.zelig2}
+\alias{has.zelig2}
+\title{Whether an Arbitrary R-package has a Zelig2 Function within Its Namespace}
+\usage{
+  has.zelig2(pkg)
+}
+\arguments{
+  \item{pkg}{a character-string representing a package
+  name}
+}
+\value{
+  whether the package contains any zelig2-functions
+}
+\description{
+  Whether an Arbitrary R-package has a Zelig2 Function
+  within Its Namespace
+}
+\note{
+  This function is used primarily internally to determine
+  whether a a package is contributing a function to the
+  Zelig software suite
+}
+
diff --git a/man/help.zelig.Rd b/man/help.zelig.Rd
index 4af2258..a32eaec 100644
--- a/man/help.zelig.Rd
+++ b/man/help.zelig.Rd
@@ -1,41 +1,19 @@
 \name{help.zelig}
-
 \alias{help.zelig}
-
-\title{HTML Help for Zelig Commands and Models}
-
-\description{
-  The \code{help.zelig} command launches html help for Zelig commands
-  and supported models.  The full manual is available online at
-  \url{http://gking.harvard.edu/zelig}.
-  }
-
+\title{Help system for Zelig models}
 \usage{
-help.zelig(...)
+  help.zelig(...)
 }
-
 \arguments{
-  \item{...}{a Zelig command or model. 
-    \code{help.zelig(command)} will take you to an index of Zelig
-    commands and \code{help.zelig(model)} will take you to a list of
-    models. }
-  }
-
-\seealso{The complete document is available online at
-  \url{http://gking.harvard.edu/zelig}.  
+  \item{...}{the help files to look-up}
+}
+\value{
+  results of calling the specific help function
+}
+\description{
+  Help system for Zelig models
 }
-
 \author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau<\email{olau at fas.harvard.edu}>
+  Matt Owen \emph{mowen at iq.harvard.edu}
 }
 
-\keyword{documentation}
-
-
-
-
-
-
-
-
diff --git a/man/ignore.Rd b/man/ignore.Rd
new file mode 100644
index 0000000..643a478
--- /dev/null
+++ b/man/ignore.Rd
@@ -0,0 +1,26 @@
+\name{ignore}
+\alias{ignore}
+\title{Constructor for the 'ignore' class
+This class is included for future use, and is currently
+not used in any Zelig model. It is designed for use with
+zelig2* functions}
+\usage{
+  ignore(default = NULL, type = "no pass")
+}
+\arguments{
+  \item{default}{default value}
+
+  \item{type}{ignored parameter}
+}
+\value{
+  an 'ignore' object
+}
+\description{
+  Constructor for the 'ignore' class This class is included
+  for future use, and is currently not used in any Zelig
+  model. It is designed for use with zelig2* functions
+}
+\author{
+  Matt Owen \emph{mowen at iq.harvard.edu}
+}
+
diff --git a/man/is.formula.Rd b/man/is.formula.Rd
new file mode 100644
index 0000000..d4d8c7e
--- /dev/null
+++ b/man/is.formula.Rd
@@ -0,0 +1,24 @@
+\name{is.formula}
+\alias{is.formula}
+\title{Whether an Object is a Formula}
+\usage{
+  is.formula(x)
+}
+\arguments{
+  \item{x}{an object}
+}
+\value{
+  a logical specifying whether an object is a formula
+}
+\description{
+  This is a boolean-check to see whether an object is a
+  formula.
+}
+\note{
+  This will not be shared in the Zelig/ZeligFormulae
+  namespace.
+}
+\author{
+  Matt Owen
+}
+
diff --git a/man/is.qi.Rd b/man/is.qi.Rd
new file mode 100644
index 0000000..d0e3d59
--- /dev/null
+++ b/man/is.qi.Rd
@@ -0,0 +1,20 @@
+\name{is.qi}
+\alias{is.qi}
+\title{Test If Value is Interpretable as a QI}
+\usage{
+  is.qi(qi)
+}
+\arguments{
+  \item{qi}{a potential quantity of interest}
+}
+\value{
+  a logical specifying whether this value should or
+  should-not be output
+}
+\description{
+  Test If Value is Interpretable as a QI
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/is.valid.qi.list.Rd b/man/is.valid.qi.list.Rd
new file mode 100644
index 0000000..c07fa6b
--- /dev/null
+++ b/man/is.valid.qi.list.Rd
@@ -0,0 +1,19 @@
+\name{is.valid.qi.list}
+\alias{is.valid.qi.list}
+\title{Check If Object Is a List of Valid Quantities of Interest}
+\usage{
+  is.valid.qi.list(x)
+}
+\arguments{
+  \item{x}{an object to be tested}
+}
+\value{
+  TRUE or FALSE
+}
+\description{
+  Check If Object Is a List of Valid Quantities of Interest
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/is.zelig.compliant.Rd b/man/is.zelig.compliant.Rd
new file mode 100644
index 0000000..2f43505
--- /dev/null
+++ b/man/is.zelig.compliant.Rd
@@ -0,0 +1,30 @@
+\name{is.zelig.compliant}
+\alias{is.zelig.compliant}
+\title{Whether a R-Package Contains a 'Yes' in its DESCRIPTION File's 'Zelig' Field}
+\usage{
+  is.zelig.compliant(package = "")
+}
+\arguments{
+  \item{package}{a character-string specifying an installed
+  R-package}
+}
+\value{
+  whether the package's DESCRIPTION file specifies
+  Zelig-compliancy
+}
+\description{
+  Whether a R-Package Contains a 'Yes' in its DESCRIPTION
+  File's 'Zelig' Field
+}
+\note{
+  This package was used internally to determine whether an
+  R-package is Zelig compliant, but is now likely
+  deprecated.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+\seealso{
+  is.zelig.package
+}
+
diff --git a/man/is.zelig.package.Rd b/man/is.zelig.package.Rd
new file mode 100644
index 0000000..9db9839
--- /dev/null
+++ b/man/is.zelig.package.Rd
@@ -0,0 +1,21 @@
+\name{is.zelig.package}
+\alias{is.zelig.package}
+\title{Wether an Installed R-Pack Depends on Zelig}
+\usage{
+  is.zelig.package(package = "")
+}
+\arguments{
+  \item{package}{a character-string naming a package}
+}
+\value{
+  whether this package depends on Zelig
+}
+\description{
+  Wether an Installed R-Pack Depends on Zelig
+}
+\note{
+  This package was used internally to determine whether an
+  R-package is Zelig compliant, but is now likely
+  deprecated. This test is useless if not paired with
+}
+
diff --git a/man/link.Rd b/man/link.Rd
new file mode 100644
index 0000000..14689f7
--- /dev/null
+++ b/man/link.Rd
@@ -0,0 +1,21 @@
+\name{link}
+\alias{link}
+\title{Method for extracting the link function from 'parameters' objects}
+\usage{
+  link(param)
+}
+\arguments{
+  \item{param}{a 'parameters' object}
+}
+\value{
+  the link function specified by the `param' function for
+  the given Zelig model
+}
+\description{
+  Method for extracting the link function from 'parameters'
+  objects
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/linkinv.Rd b/man/linkinv.Rd
new file mode 100644
index 0000000..22c3c05
--- /dev/null
+++ b/man/linkinv.Rd
@@ -0,0 +1,23 @@
+\name{linkinv}
+\alias{linkinv}
+\title{Method for extracting the inverse link function from 'parameters' objects}
+\usage{
+  linkinv(param)
+}
+\arguments{
+  \item{param}{a 'parameters' object}
+}
+\value{
+  the inverse link function specified by the 'param'
+  function for the given Zelig model
+}
+\description{
+  Returns the inverse link function of a ``parameters''
+  object. If the model's developer did not specify one (but
+  did specify a link function) this function returns a
+  numerical approximation of the link function.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/list.depth.Rd b/man/list.depth.Rd
new file mode 100644
index 0000000..eec5413
--- /dev/null
+++ b/man/list.depth.Rd
@@ -0,0 +1,21 @@
+\name{list.depth}
+\alias{list.depth}
+\title{Count the Depth of a List Object}
+\usage{
+  list.depth(obj)
+}
+\arguments{
+  \item{obj}{a vector or list object}
+}
+\description{
+  This function recursively computes the depth of a list
+  object. That is, it determines how many layers or levels
+  exist within the object.
+}
+\note{
+  This function is used internally by Zelig.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/list.zelig.dependent.packages.Rd b/man/list.zelig.dependent.packages.Rd
new file mode 100644
index 0000000..112c91f
--- /dev/null
+++ b/man/list.zelig.dependent.packages.Rd
@@ -0,0 +1,20 @@
+\name{list.zelig.dependent.packages}
+\alias{list.zelig.dependent.packages}
+\title{Get a List of Packages Installed on the Current Machine that Depend on Zelig}
+\usage{
+  list.zelig.dependent.packages()
+}
+\value{
+  a character-vector of all zelig-dependent packages on the
+  current machine
+}
+\description{
+  Get a List of Packages Installed on the Current Machine
+  that Depend on Zelig
+}
+\note{
+  This function is used primarily internally to determine
+  whether a a package is contributing a function to the
+  Zelig software suite
+}
+
diff --git a/man/list.zelig.models.Rd b/man/list.zelig.models.Rd
new file mode 100644
index 0000000..1af2286
--- /dev/null
+++ b/man/list.zelig.models.Rd
@@ -0,0 +1,19 @@
+\name{list.zelig.models}
+\alias{list.zelig.models}
+\title{List Zelig Models Installed on the Current Machine}
+\usage{
+  list.zelig.models(with.namespace = TRUE)
+}
+\arguments{
+  \item{with.namespace}{a boolean specifying whether}
+}
+\value{
+  list of all zelig models
+}
+\description{
+  List Zelig Models Installed on the Current Machine
+}
+\note{
+  This list is not necessarily complete
+}
+
diff --git a/man/loadDependencies.Rd b/man/loadDependencies.Rd
new file mode 100644
index 0000000..2b0d3cd
--- /dev/null
+++ b/man/loadDependencies.Rd
@@ -0,0 +1,36 @@
+\name{loadDependencies}
+
+\alias{loadDependencies}
+\alias{load.dependencies}
+
+\title{Load External Dependencies Safely and Dynamically}
+
+\usage{
+  loadDependencies(..., character.only = FALSE)
+}
+
+\arguments{
+  \item{\ldots}{
+    A collection of packages to load. If ``character.only''=FALSE, these can be
+    entered symbolically (e.g. loadDependencies(MASS)). Otherwise, these
+    arguments are character-strings.
+  }
+
+  \item{character.only}{
+    A boolean specifying whether the arguments are strictly character-strings.
+  }
+}
+
+\value{
+  TRUE (invisibly) if successful. Otherwise the script is stopped.
+}
+
+\description{
+  ``loadDependencies'' is a helper function for loading external dependencies
+  at runtime.
+}
+
+\note{
+  This is used by Zelig developers to dynamically load ``dependent'' pacakges at
+  runtime.
+}
diff --git a/man/make.parameters.Rd b/man/make.parameters.Rd
new file mode 100644
index 0000000..c7662fd
--- /dev/null
+++ b/man/make.parameters.Rd
@@ -0,0 +1,26 @@
+\name{make.parameters}
+\alias{make.parameters}
+\title{??? For use with cmvglm}
+\usage{
+  make.parameters(terms, shape = "vector",
+    ancillary = TRUE, eqns = NULL)
+}
+\arguments{
+  \item{terms}{???}
+
+  \item{shape}{???}
+
+  \item{ancillary}{???}
+
+  \item{eqns}{???}
+}
+\value{
+  ???
+}
+\description{
+  ??? For use with cmvglm
+}
+\author{
+  Kosuke Imai and Olivia Lau
+}
+
diff --git a/man/makeModelMatrix.Rd b/man/makeModelMatrix.Rd
new file mode 100644
index 0000000..8bf2e27
--- /dev/null
+++ b/man/makeModelMatrix.Rd
@@ -0,0 +1,22 @@
+\name{makeModelMatrix}
+\alias{makeModelMatrix}
+\title{Make a Model Matrix from a Zelig-Style Formula}
+\usage{
+  makeModelMatrix(formula, data)
+}
+\arguments{
+  \item{formula}{a Zelig-style formula}
+
+  \item{data}{a \code{data.frame}}
+}
+\value{
+  a design (or model) matrix
+}
+\description{
+  This is a helper function that creates a
+  \code{model.matrix} like object of Zelig-style formulae.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/makeZeligObject.Rd b/man/makeZeligObject.Rd
new file mode 100644
index 0000000..f6bb678
--- /dev/null
+++ b/man/makeZeligObject.Rd
@@ -0,0 +1,37 @@
+\name{makeZeligObject}
+\alias{makeZeligObject}
+\title{Make an Individual Zelig Object}
+\usage{
+  makeZeligObject(object, model, call, zelig_call, data,
+    label, env, package.name = NULL)
+}
+\arguments{
+  \item{object}{a fitted statistical model}
+
+  \item{model}{a character-string specifying the name of
+  the model}
+
+  \item{call}{The call that produced the fitted model}
+
+  \item{zelig_call}{The call made to the original zelig
+  function}
+
+  \item{data}{the data.frame used to fit the model}
+
+  \item{label}{a character-string or symbol used as a
+  human-readable label for the data-set}
+
+  \item{env}{an environment variable that contains all
+  variables to evaluate the call ``zelig_call''}
+
+  \item{package.name}{a character-string specifyign the
+  name of the package that is the source of the model used
+  to fit this object}
+}
+\value{
+  A ``zelig'' object
+}
+\description{
+  Returns a ``zelig'' object with the proper specifications
+}
+
diff --git a/man/mi.Rd b/man/mi.Rd
index 5a27314..856bf83 100644
--- a/man/mi.Rd
+++ b/man/mi.Rd
@@ -1,40 +1,27 @@
 \name{mi}
-
 \alias{mi}
-
-\title{Bundle multiply imputed data sets as a list}
-
-\description{The code \code{mi} bundles multiply imputed data sets as a
-  list for further analysis.}
-
+\title{Bundle Data-sets for Multiple Imputation}
 \usage{
   mi(...)
 }
-
-\arguments{ 
-  \item{...}{multiply imputed data sets, separated by commas. The
-    arguments can be tagged by \code{name=data} where \code{name} is the
-    element named used for the data set \code{data}.}
+\arguments{
+  \item{...}{a set of \code{data.frame}'s}
 }
-
-\value{The list containing each multiply imputed data set as an
-  element. The class name is \code{mi}. The list can be inputted into
-  \code{zelig} for statistical analysis with multiply imputed data
-  sets. See \code{zelig} for details.
+\value{
+  an \code{almost.mi} object, which contains the important
+  internals of a valid, useful \code{mi} object
 }
-
-\examples{
-  data(immi1, immi2, immi3, immi4, immi5)
-  mi(immi1, immi2, immi3, immi4, immi5)
+\description{
+  This object prepares data-sets for processing with
+  multiple imputation.
 }
-
-\seealso{ The full Zelig manual is available at
-  \url{http://gking.harvard.edu/zelig}.
+\note{
+  This function is largely identical to simply creating a
+  list object, with the exception that any unnamed
+  data-sets are automatically labeled via the
+  \code{substitute} function
 }
-
 \author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>
+  Matt Owen \email{mowen at iq.harvard.edu}
 }
 
-\keyword{methods}
diff --git a/man/mix.Rd b/man/mix.Rd
new file mode 100644
index 0000000..a96e5e7
--- /dev/null
+++ b/man/mix.Rd
@@ -0,0 +1,24 @@
+\name{mix}
+\alias{mix}
+\title{Produce All Combinations of a Set of Lists}
+\usage{
+  mix(...)
+}
+\arguments{
+  \item{...}{a set of lists to mix together}
+}
+\value{
+  all the combinations of the lists with repetition
+}
+\description{
+  Produce All Combinations of a Set of Lists
+}
+\note{
+  This function is used internall by the 'mi' constructors
+  in order to produce the complete set of combinations of
+  data-frames and factors by to subset the data-frames.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/model.end.Rd b/man/model.end.Rd
deleted file mode 100644
index a30b3fb..0000000
--- a/man/model.end.Rd
+++ /dev/null
@@ -1,40 +0,0 @@
-\name{model.end}
-\alias{model.end}
-\title{Cleaning up after optimization}
-
-\description{ The \code{model.end} function creates a list of regression output from \code{\link{optim}} 
-output.  The list includes coefficients (from the \code{\link{optim}} \code{par} output), a 
-variance-covariance matrix (from the \code{\link{optim}} Hessian output), and any terms, contrasts, or 
-xlevels (from the model frame).  Use \code{model.end} after calling \code{\link{optim}}, but before 
-assigning a 
-class to the regression output.}
-
-\usage{
-model.end(res, mf)
-}
-
-\arguments{
-\item{res}{the output from \code{\link{optim}} or another fitting-algorithm}
-\item{mf}{the model frame output by \code{\link{model.frame}}}
-}
-
-\value{
-A list of regression output, including: 
-\item{coefficients}{the optimized parameters}
-\item{variance}{the variance-covariance matrix (the negative
-  inverse of the Hessian matrix returned from the optimization
-  procedure)}  
-\item{terms}{the terms object.  See \code{\link{terms.object}}
-  for more information}
-\item{\dots}{additional elements passed from \code{res}}
-}
-
-\seealso{The full Zelig manual at \url{http://gking.harvard.edu/zelig} for examples.}
-
-\author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>; Ferdinand Alimadhi
-<\email{falimadhi at iq.harvard.edu}>
-}
-
-\keyword{utilities}
diff --git a/man/model.frame.multiple.Rd b/man/model.frame.multiple.Rd
index c6b106d..ca05376 100644
--- a/man/model.frame.multiple.Rd
+++ b/man/model.frame.multiple.Rd
@@ -1,63 +1,28 @@
 \name{model.frame.multiple}
 \alias{model.frame.multiple}
-\title{Extracting the ``environment'' of a model formula}
-
-\description{ Use \code{model.frame.multiple} after \code{\link{parse.par}} to create a
-data frame of the unique variables identified in the formula (or list
-of formulas).}
-  
+\title{Create Model Frame from \code{multiple} Object}
 \usage{
-\method{model.frame}{multiple}(formula, data, eqn = NULL, ...)
+  \method{model.frame}{multiple}(formula,data,eqn=NULL,...)
 }
-
 \arguments{
-\item{formula}{a list of formulas of class \code{"multiple"}, returned from \code{\link{parse.par}}}  
-\item{data}{a data frame containing all the variables used in \code{formula}}  
-\item{eqn}{an optional character string or vector of character strings specifying 
-the equations (specified in \code{describe.mymodel}) for which you would like to 
-pull out the relevant variables.}
-\item{\dots}{additional arguments passed to 
-\code{\link{model.frame.default}}} }  
-
-\value{
-The output is a data frame (with a terms attribute) containing all the
-unique explanatory and response variables identified in the list of
-formulas.  By default, missing (\code{NA}) values are listwise deleted.
-
-If \code{as.factor} appears on the left-hand side, the response
-variables will be returned as an indicator (0/1) matrix with columns
-corresponding to the unique levels in the factor variable.  
-	
-If any formula contains more than one \code{tag} statement, \code{model.frame.multiple}
-will return the original variable in the data frame and use the \code{tag} information in the terms 
-attribute only.
-}
+  \item{formula}{an object of both type \code{formula} and
+  \code{multiple}}
 
-\examples{
-\dontrun{
-data(sanction)
-formulae <- list(import ~ coop + cost + target,
-                 export ~ coop + cost + target)
-fml <- parse.formula(formulae, model = "bivariate.logit")
-D <- model.frame(fml, data = sanction)
-}}
+  \item{data}{a \code{data.frame}}
 
-\seealso{\code{\link{model.matrix.default}}, \code{\link{parse.formula}} and the full Zelig manual at
-  \url{http://gking.harvard.edu/zelig}}
+  \item{eqn}{the number of equations in the formula}
 
+  \item{...}{ignored parameters}
+}
+\value{
+  a \code{model.frame} object
+}
+\description{
+  This method creates a \code{model.frame} from a
+  \code{multiple} object. This method will be deprecated as
+  the development of Zelig 4 progresses.
+}
 \author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>; Ferdinand Alimadhi
-<\email{falimadhi at iq.harvard.edu}>
+  Kosuke Imai, Olivia Lau, Gary King and Ferdinand Alimadhi
 }
 
-\keyword{utilities}
-
-
-
-
-
-
-
-
-
diff --git a/man/model.matrix.multiple.Rd b/man/model.matrix.multiple.Rd
index 932da32..a8ff19e 100644
--- a/man/model.matrix.multiple.Rd
+++ b/man/model.matrix.multiple.Rd
@@ -1,91 +1,28 @@
 \name{model.matrix.multiple}
 \alias{model.matrix.multiple}
-\title{Design matrix for multivariate models}
-
-\description{Use \code{model.matrix.multiple} after \code{\link{parse.formula}} to
-create a design matrix for multiple-equation models.  }
-  
+\title{Create Design Matrix of a \code{multiple} Object}
 \usage{
-\method{model.matrix}{multiple}(object, data, shape = "compact", eqn = NULL, ...)
+  \method{model.matrix}{multiple}(object,data,shape="compact",eqn=NULL,...)
 }
-
 \arguments{
-\item{object}{the list of formulas output from \code{\link{parse.formula}}}
-\item{data}{a data frame created with \code{\link{model.frame.multiple}}} 
-\item{shape}{a character string specifying the shape of the outputed matrix.  Available options are 
-\itemize{
-\item{"compact"}{(default) the output matrix will be an \eqn{n \times v}{n x v},
-where \eqn{v} is the number of unique variables in all of the equations
-(including the intercept term)}
-\item{"array"}{the output is an \eqn{n \times K \times J}{n x K x J} array where \eqn{J} is the
-total number of equations and \eqn{K} is the total number of parameters
-across all the equations.  If a variable is not in a certain equation,
-it is observed as a vector of 0s.}
-\item{"stacked"}{the output will be a \eqn{2n \times K}{2n x K} matrix where \eqn{K} is the total number of 
-parameters across all the equations.}
-}}
-\item{eqn}{a character string or a vector of character strings identifying the equations from which to 
-construct the design matrix. The defaults to \code{NULL}, which only uses the systematic
-  parameters (for which \code{DepVar = TRUE} in the appropriate \code{describe.model} function)}  
-\item{\dots}{additional arguments passed to \code{model.matrix.default}} 
-}
-
-\value{A design matrix or array, depending on the options chosen in \code{shape}, with appropriate terms 
-attributes.} 
-
-\examples{
-
-# Let's say that the name of the model is "bivariate.probit", and
-# the corresponding describe function is describe.bivariate.probit(),
-# which identifies mu1 and mu2 as systematic components, and an
-# ancillary parameter rho, which may be parameterized, but is estimated
-# as a scalar by default.  Let par be the parameter vector (including
-# parameters for rho), formulae a user-specified formula, and mydata
-# the user specified data frame.
+  \item{object}{an object of type \code{multiple}. This
+  represents a Zelig 3.5 formula}
 
-# Acceptable combinations of parse.par() and model.matrix() are as follows:
-## Setting up
-\dontrun{ 
-data(sanction)
-formulae <- cbind(import, export) ~ coop + cost + target
-fml <- parse.formula(formulae, model = "bivariate.probit")
-D <- model.frame(fml, data = sanction)
-terms <- attr(D, "terms")
+  \item{data}{a \code{data.frame}}
 
-## Intuitive option
-Beta <- parse.par(par, terms, shape = "vector", eqn = c("mu1", "mu2"))
-X <- model.matrix(fml, data = D, shape = "stacked", eqn = c("mu1", "mu2")  
-eta <- X %*% Beta
+  \item{shape}{a character-string specifying the shape of
+  the matrix}
 
-## Memory-efficient (compact) option (default)
-Beta <- parse.par(par, terms, eqn = c("mu1", "mu2"))
-X <- model.matrix(fml, data = D, eqn = c("mu1", "mu2"))   
-eta <- X %*% Beta
+  \item{eqn}{an integer specifying the number of equations}
 
-## Computationally-efficient (array) option
-Beta <- parse.par(par, terms, shape = "vector", eqn = c("mu1", "mu2"))
-X <- model.matrix(fml, data = D, shape = "array", eqn = c("mu1", "mu2"))
-eta <- apply(X, 3, '%*%', Beta)
-}}
-
-\seealso{\code{\link{parse.par}}, \code{\link{parse.formula}} and the full Zelig manual at
-  \url{http://gking.harvard.edu/zelig}}
-
-\author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>; Ferdinand Alimadhi
-<\email{falimadhi at iq.harvard.edu}>
+  \item{...}{ignored parameters}
+}
+\description{
+  This method is used to generate a \code{model.matrix}
+  adhering to the specifications in the help document
+  "model.matrix".
+}
+\note{
+  This method is scheduled to be deprecated.
 }
-
-\keyword{utilities}
-
-
-
-
-
-
-
-
-
-
 
diff --git a/man/model.matrix.parseFormula.Rd b/man/model.matrix.parseFormula.Rd
new file mode 100644
index 0000000..495ebf1
--- /dev/null
+++ b/man/model.matrix.parseFormula.Rd
@@ -0,0 +1,32 @@
+\name{model.matrix.parseFormula}
+\alias{model.matrix.parseFormula}
+\title{Construct Design Matrix from a Parsed, Zelig-style Formula}
+\usage{
+  \method{model.matrix}{parseFormula}(object, data = NULL,
+    ...)
+}
+\arguments{
+  \item{object}{a "parseFormula" object}
+
+  \item{data}{a "data.frame"}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a "model.matrix" specifying information relevant to a
+  statistical model
+}
+\description{
+  This method constructs a design matrix from a Zelig-style
+  formula. This matrix is commonly used in statistical
+  simulation, and will likely be relevent as the relevant
+  form of a \code{setx} object.
+}
+\note{
+  This method is primarily used by the \code{setx}
+  function.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/multilevel.Rd b/man/multilevel.Rd
new file mode 100644
index 0000000..447c621
--- /dev/null
+++ b/man/multilevel.Rd
@@ -0,0 +1,29 @@
+\name{multilevel}
+\alias{multilevel}
+\title{Multilevel}
+\usage{
+  multilevel(tt, data, mode, eqn, ...)
+}
+\arguments{
+  \item{tt}{a terms object}
+
+  \item{data}{a \code{data.frame}}
+
+  \item{mode}{???}
+
+  \item{eqn}{an integer specifying the number of equations
+  in a model}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a list with the "terms" attribute specified
+}
+\description{
+  This function currently has no documentation, but is
+  essential in Zelig 3.5's implementation of formulae.
+}
+\author{
+  Kosuke Imai, Olivia Lau, Gary King and Ferdinand Alimadhi
+}
+
diff --git a/man/name.object.Rd b/man/name.object.Rd
new file mode 100644
index 0000000..29290d0
--- /dev/null
+++ b/man/name.object.Rd
@@ -0,0 +1,30 @@
+\name{name.object}
+\alias{name.object}
+\title{Name Elements of an Object}
+\usage{
+  name.object(obj, names)
+}
+\arguments{
+  \item{obj}{a vector or matrix}
+
+  \item{names}{a character-vector specifying names}
+}
+\value{
+  the original object, with a "colnames" or "names" equal
+  to the parameter "names". If "names" is larger than
+  "obj", the "names" parameter is truncated appropriately.
+  If it is smaller, then the latter part of "obj" is
+  replaced with a numbered generic column name
+}
+\description{
+  Returns an object
+}
+\note{
+  This method is used internally by Zelig to name the
+  columns and elements of matrices and vectors for
+  simulations and bootstrapped parameters.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/names.qi.Rd b/man/names.qi.Rd
new file mode 100644
index 0000000..0aa5459
--- /dev/null
+++ b/man/names.qi.Rd
@@ -0,0 +1,29 @@
+\name{names.qi}
+\alias{names.qi}
+\title{The Names of a 'qi' Object}
+\usage{
+  \method{names}{qi}(x)
+}
+\arguments{
+  \item{x}{a 'qi' object}
+}
+\value{
+  a character-vector containing the names of the Quantities
+  of Interest
+}
+\description{
+  Function to get the names of a 'qi' object. This function
+  does not entirely parallel the functionality of
+  traditional 'names' methods; this is because the \code{$}
+  operator has been overloaded to support a unique style of
+  value extraction. For technical details, please see the
+  source code.
+}
+\note{
+  No method exists to set the names of a 'qi' object, once
+  it is constructed. This will be a feature added later.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/network.Rd b/man/network.Rd
deleted file mode 100644
index bbd86f5..0000000
--- a/man/network.Rd
+++ /dev/null
@@ -1,39 +0,0 @@
-\name{network}
-\alias{network}
-
-\title{Format matricies into a data frame for social network analysis}
-\description{ This function accepts individual matricies as its inputs,
-combining the input matricies into a single data frame which can then be
-used in the \code{data} argument for social network analysis (models
-\code{"netlm"} and \code{"netlogit"}) in Zelig.}
-
-\usage{
-network(...)
-}
-
-\arguments{
-  \item{...}{matricies representing variables, with rows and columns corresponding to
-    individuals.  These can be given as named arguments and should be
-    given in the order the in which the user wishes them to appear in
-    the output data frame.}
-}
-
-\value{ The \code{network} function creates a data frame which
-  contains matricies instead of vectors as its variables.  Inputs to the
-  function should all be square matricies and can be given as named
-  arguments.}
-
-
-\examples{\dontrun{
-## Let Var1, Var2, Var3, Var4, and Var5 be matrices
-friendship <- network(Var1, Var2, Var3, Var4, Var5)
-}
-}
-
-\seealso{ The full Zelig manual is available at
-  \url{http://gking.harvard.edu/zelig}.
-}
-
-\author{Skyler J. Cranmer}
-
-\keyword{methods}
diff --git a/man/param.Rd b/man/param.Rd
new file mode 100644
index 0000000..4a10e07
--- /dev/null
+++ b/man/param.Rd
@@ -0,0 +1,67 @@
+\name{param}
+\alias{param}
+\title{Generic Method for Simulating Ancillary/Auxillary Parameters of Zelig
+  Models}
+\usage{
+  param(obj, num, ...)
+}
+\arguments{
+  \item{obj}{a \code{zelig} object}
+
+  \item{num}{an integer specifying the number of
+  simulations to sample}
+
+  \item{...}{optional parameters which will likely be
+  ignored}
+}
+\value{
+  The main purpose of the \code{param} function is to
+  return a list of key-value pairs, specifuing information
+  that should be shared between the \code{qi} function and
+  the fitted statistical model (produced by the
+  \code{zelig2} function. This list can contain the
+  following entries:
+
+  \item{\code{simulations}}{specifies a set of simulated
+  parameters used to describe the statistical model's
+  underlying distribution} \item{\code{alpha}}{specifies
+  the fixed (non-simulated) ancillary parameters used by
+  the statistical model's underlying distribution}
+  \item{\code{family}}{specifies a family object used to
+  implicitly define the \code{link} and \code{linkinv}
+  functions. That is, this specifies the "link" and
+  "inverse link" functions of generalized linear models}
+  \item{\code{link}}{specifies the \code{link} function to
+  be used. This parameter is largely unimportant compared
+  to the "inverse link" function}
+  \item{\code{linkinv}}{specifies the \code{linkinv}
+  function to be used.}
+}
+\description{
+  The \code{param} method is used by developers to specify
+  simulated and fixed ancillary parameters of the Zelig
+  statistical model. That is, this method is used between
+  the \code{zelig2} function and the \link{qi} as a helper
+  function that specifies all the necessary details needed
+  to simulate quantities of interest, given the fitted
+  statistical model produced by the \code{zelig2} function.
+}
+\note{
+  The 'param' function is a method meant to be overloaded
+  by Zelig Developers
+}
+\examples{
+param.some.model <- function (obj, num, ...) {
+  list(
+       simulations = NULL,
+       alpha = NULL,
+       link = NULL,
+       linkinv = NULL,
+       fam = NULL
+       )
+}
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/param.default.Rd b/man/param.default.Rd
new file mode 100644
index 0000000..7df1f96
--- /dev/null
+++ b/man/param.default.Rd
@@ -0,0 +1,21 @@
+\name{param.default}
+\alias{param.default}
+\title{Default Method for ``param''}
+\usage{
+  \method{param}{default}(obj, num, ...)
+}
+\arguments{
+  \item{obj}{ignored parameter}
+
+  \item{num}{ignored parameter}
+
+  \item{...}{ignored parameters}
+}
+\description{
+  If no \code{param} function is set for a Zelig model,
+  then this function will return NULL.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/param.exp.Rd b/man/param.exp.Rd
new file mode 100644
index 0000000..6f6e87c
--- /dev/null
+++ b/man/param.exp.Rd
@@ -0,0 +1,27 @@
+\name{param.exp}
+\alias{param.exp}
+\title{Param Method for the \code{exp} Zelig Model}
+\usage{
+  \method{param}{exp}(obj, num, ...)
+}
+\arguments{
+  \item{obj}{a 'zelig' object}
+
+  \item{num}{an integer specifying the number of
+  simulations to sample}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be cast as a 'parameters' object
+}
+\description{
+  Param Method for the \code{exp} Zelig Model
+}
+\note{
+  This method is used by the \code{param} Zelig model
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/param.gamma.Rd b/man/param.gamma.Rd
new file mode 100644
index 0000000..7ba8e0e
--- /dev/null
+++ b/man/param.gamma.Rd
@@ -0,0 +1,25 @@
+\name{param.gamma}
+\alias{param.gamma}
+\title{param method for the `gamma' Zelig model}
+\usage{
+  \method{param}{gamma}(obj, num, ...)
+}
+\arguments{
+  \item{obj}{a `zelig' object}
+
+  \item{num}{an integer specifying the number of
+  simulations to sample}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be cast as a `parameters' object
+}
+\description{
+  Return parameter estimates for the ``gamma'' GLM in
+  Zelig.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/param.logit.Rd b/man/param.logit.Rd
new file mode 100644
index 0000000..63957c9
--- /dev/null
+++ b/man/param.logit.Rd
@@ -0,0 +1,27 @@
+\name{param.logit}
+\alias{param.logit}
+\title{Param Method for the \code{logit} Zelig Model}
+\usage{
+  \method{param}{logit}(obj, num, ...)
+}
+\arguments{
+  \item{obj}{a 'zelig' object}
+
+  \item{num}{an integer specifying the number of
+  simulations to sample}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be cast as a 'parameters' object
+}
+\description{
+  Param Method for the \code{logit} Zelig Model
+}
+\note{
+  This method is used by the \code{logit} Zelig model
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/param.ls.Rd b/man/param.ls.Rd
new file mode 100644
index 0000000..28e2a10
--- /dev/null
+++ b/man/param.ls.Rd
@@ -0,0 +1,27 @@
+\name{param.ls}
+\alias{param.ls}
+\title{Param Method for the 'ls' Zelig Model}
+\usage{
+  \method{param}{ls}(obj, num, \dots)
+}
+\arguments{
+  \item{obj}{a 'zelig' object}
+
+  \item{num}{an integer specifying the number of
+  simulations to sample}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be cast as a 'parameters' object
+}
+\description{
+  Param Method for the 'ls' Zelig Model
+}
+\note{
+  This method currently returns via a deprectated style
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/param.negbinom.Rd b/man/param.negbinom.Rd
new file mode 100644
index 0000000..86c9f5a
--- /dev/null
+++ b/man/param.negbinom.Rd
@@ -0,0 +1,27 @@
+\name{param.negbinom}
+\alias{param.negbinom}
+\title{Param Method for the 'negbinom' Zelig Model}
+\usage{
+  \method{param}{negbinom}(obj, num=1000, ...)
+}
+\arguments{
+  \item{obj}{a 'zelig' object}
+
+  \item{num}{an integer specifying the number of
+  simulations to sample}
+
+  \item{...}{ignored}
+}
+\value{
+  a list to be cast as a 'parameters' object
+}
+\description{
+  Param Method for the 'negbinom' Zelig Model
+}
+\note{
+  This method is used by the 'negbinom' Zelig model
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/param.normal.Rd b/man/param.normal.Rd
new file mode 100644
index 0000000..d918aec
--- /dev/null
+++ b/man/param.normal.Rd
@@ -0,0 +1,27 @@
+\name{param.normal}
+\alias{param.normal}
+\title{Param Method for the 'normal' Zelig Model}
+\usage{
+  \method{param}{normal}(obj, num=1000, ...)
+}
+\arguments{
+  \item{obj}{a 'zelig' object}
+
+  \item{num}{an integer specifying the number of
+  simulations to sample}
+
+  \item{...}{ignored}
+}
+\value{
+  a list to be cast as a 'parameters' object
+}
+\description{
+  Param Method for the 'normal' Zelig Model
+}
+\note{
+  This method is used by the 'normal' Zelig model
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/param.poisson.Rd b/man/param.poisson.Rd
new file mode 100644
index 0000000..0fb637f
--- /dev/null
+++ b/man/param.poisson.Rd
@@ -0,0 +1,27 @@
+\name{param.poisson}
+\alias{param.poisson}
+\title{Param Method for the 'poisson' Zelig Model}
+\usage{
+  \method{param}{poisson}(obj, num=1000, ...)
+}
+\arguments{
+  \item{obj}{a 'zelig' object}
+
+  \item{num}{an integer specifying the number of
+  simulations to sample}
+
+  \item{...}{ignored}
+}
+\value{
+  a list to be cast as a 'parameters' object
+}
+\description{
+  Param Method for the 'poisson' Zelig Model
+}
+\note{
+  This method is used by the 'poisson' Zelig model
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/param.probit.Rd b/man/param.probit.Rd
new file mode 100644
index 0000000..2237681
--- /dev/null
+++ b/man/param.probit.Rd
@@ -0,0 +1,27 @@
+\name{param.probit}
+\alias{param.probit}
+\title{Param Method for the 'probit' Zelig Model}
+\usage{
+  \method{param}{probit}(obj, num=1000, ...)
+}
+\arguments{
+  \item{obj}{a 'zelig' object}
+
+  \item{num}{an integer specifying the number of
+  simulations to sample}
+
+  \item{...}{ignored}
+}
+\value{
+  a list to be cast as a 'parameters' object
+}
+\description{
+  Param Method for the 'probit' Zelig Model
+}
+\note{
+  This method is used by the 'probit' Zelig model
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/param.relogit.Rd b/man/param.relogit.Rd
new file mode 100644
index 0000000..c43dcea
--- /dev/null
+++ b/man/param.relogit.Rd
@@ -0,0 +1,26 @@
+\name{param.relogit}
+\alias{param.relogit}
+\title{Estimate Parameters for the ``relogit'' Zelig Mdoel}
+\usage{
+  \method{param}{relogit}(obj, num, ...)
+}
+\arguments{
+  \item{obj}{a zelig object containing the fitted model}
+
+  \item{num}{an integer specifying the number of
+  simulations to compute}
+
+  \item{...}{unspecified parameters}
+}
+\value{
+  a list specifying important parameters for the
+  ``relogit'' model
+}
+\description{
+  Returns estimates on parameters, as well as, specifying
+  link and inverse-link functions.
+}
+\note{
+  This method merely calls ``param.logit''.
+}
+
diff --git a/man/param.relogit2.Rd b/man/param.relogit2.Rd
new file mode 100644
index 0000000..c0e44e0
--- /dev/null
+++ b/man/param.relogit2.Rd
@@ -0,0 +1,26 @@
+\name{param.relogit2}
+\alias{param.relogit2}
+\title{Estimate Parameters for the ``relogit'' Zelig Mdoel}
+\usage{
+  \method{param}{relogit2}(obj, num, x, ...)
+}
+\arguments{
+  \item{obj}{a zelig object containing the fitted model}
+
+  \item{num}{an integer specifying the number of
+  simulations to compute}
+
+  \item{x}{ideally we should be able to remove this
+  parameter}
+
+  \item{...}{unspecified parameters}
+}
+\value{
+  a list specifying important parameters for the
+  ``relogit'' model
+}
+\description{
+  Returns estimates on parameters, as well as, specifying
+  link and inverse-link functions.
+}
+
diff --git a/man/param.tobit.Rd b/man/param.tobit.Rd
new file mode 100644
index 0000000..8c13c29
--- /dev/null
+++ b/man/param.tobit.Rd
@@ -0,0 +1,27 @@
+\name{param.tobit}
+\alias{param.tobit}
+\title{Param Method for the \code{tobit} Zelig Model}
+\usage{
+  \method{param}{tobit}(obj, num, ...)
+}
+\arguments{
+  \item{obj}{a 'zelig' object}
+
+  \item{num}{an integer specifying the number of
+  simulations to sample}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a list to be cast as a 'parameters' object
+}
+\description{
+  Param Method for the \code{tobit} Zelig Model
+}
+\note{
+  This method is used by the \code{tobit} Zelig model
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/parameters.Rd b/man/parameters.Rd
new file mode 100644
index 0000000..0211e84
--- /dev/null
+++ b/man/parameters.Rd
@@ -0,0 +1,34 @@
+\name{parameters}
+\alias{parameters}
+\title{Constructor for `parameters' class}
+\usage{
+  parameters(simulations, alpha, fam = NULL, link = NULL,
+    linkinv = NULL)
+}
+\arguments{
+  \item{simulations}{a vector or matrix containing
+  simulated values}
+
+  \item{alpha}{ancillary parameters for the Zelig
+  statistical model}
+
+  \item{fam}{a family object which implicitly specifies the
+  link and link-inverse functions for the}
+
+  \item{link}{the link function of the specified
+  statistical model.  The `linkinv' parameter is implicitly
+  defined by by the `link' parameter, when `linkinv' is
+  omitted}
+
+  \item{linkinv}{the inverse link function}
+}
+\value{
+  a `parameters' object
+}
+\description{
+  Constructor for `parameters' class
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/parse.formula.Rd b/man/parse.formula.Rd
index e6b7354..d60c7d9 100644
--- a/man/parse.formula.Rd
+++ b/man/parse.formula.Rd
@@ -1,88 +1,24 @@
 \name{parse.formula}
 \alias{parse.formula}
-
-\title{Parsing user-input formulas into multiple syntax}
-
-\description{Parse the input formula (or list of formulas) into the 
-standard format described below.  Since labels for this format will vary 
-by model, \code{parse.formula} will evaluate a function \code{describe.model},
-where \code{model} is given as an input to \code{parse.formula}.
-
-If the \code{describe.model} function has more than one parameter for
-which \code{ExpVar = TRUE} and \code{DepVar = TRUE}, then the
-user-specified equations must have labels to match those parameters,
-else \code{parse.formula} should return an error. In addition, if the
-formula entries are not unambiguous, then \code{parse.formula} returns an error.
-}
-  
+\title{Parse Formulas for Zelig Models}
 \usage{
-parse.formula(formula, model, data = NULL)
+  parse.formula(formula, model, data = NULL)
 }
-
 \arguments{
-\item{formula}{either a single formula or a list of \code{formula} objects}  
-\item{model}{a character string specifying the name of the model}
-\item{data}{an optional data frame for models that require a factor response variable}
-}
+  \item{formula}{a formula}
 
-\value{The output is a list of formula objects with class 
-\code{c("multiple", "list")}.  Let's say that the name of the model is 
-\code{"bivariate.probit"}, and the corresponding describe function is 
-\code{describe.bivariate.probit}, which identifies \code{mu1} and 
-\code{mu2} as systematic components, and an ancillary parameter \code{rho}, which
-may be parameterized, but is estimated as a scalar by default.  
-}
-
-\details{Acceptable user inputs are as follows:
-
-\tabular{lll}{
-                 \tab User Input   \tab Output from \code{parse.formula}\cr
-\tab \tab \cr
-Same covariates, \tab cbind(y1, y2) ~ x1 + x2 * x3  \tab list(mu1 = y1 ~ x1 + x2 * x3,\cr
-separate effects \tab                               \tab      mu2 = y2 ~ x1 + x2 * x3,\cr
-                 \tab                               \tab      rho = ~ 1)\cr
-\tab \tab \cr
-With \code{rho} as a \tab list(cbind(y1, y2) ~ x1 + x2, \tab list(mu1 = y1 ~ x1 + x2,\cr
-systematic equation  \tab rho = ~ x4 + x5)              \tab      mu2 = y2 ~ x1 + x2,\cr
-                     \tab                               \tab      rho = ~ x4 + x5)\cr
-\tab \tab \cr
-With constraints \tab list(mu1 = y1 ~ x1 + tag(x2, "x2"), \tab list(mu1 = y1 ~ x1 + tag(x2, "x2"),\cr
-(same variable)  \tab      mu2 = y2 ~ x3 + tag(x2, "x2")) \tab      mu2 = y2 ~ x3 + tag(x2, "x2"),\cr
-                 \tab                                     \tab      rho = ~ 1)\cr
-\tab \tab \cr
-With constraints \tab  list(mu1 = y1 ~ x1 + tag(x2, "z1"), \tab list(mu1 = y1 ~ x1 + tag(x2, "z1"),\cr
-(different variables) \tab     mu2 = y2 ~ x3 + tag(x4, "z1")) \tab     mu2 = y2 ~ x3 + tag(x4, "z1"),\cr
-                      \tab                                  \tab         rho = ~ 1)\cr
-}}
+  \item{model}{a Zelid model}
 
-\examples{
-\dontrun{
-data(sanction)
-formulae <- list(cbind(import, export) ~ coop + cost + target)
-fml <- parse.formula(formulae, model = "bivariate.probit")
-D <- model.frame(fml, data = sanction)
-}}
-
-\seealso{
-\code{\link{parse.par}}, \code{\link{model.frame.multiple}}, 
-\code{\link{model.matrix.multiple}}, and the full Zelig manual at
-  \url{http://gking.harvard.edu/zelig}.
+  \item{data}{a data-frame}
+}
+\description{
+  Parse Formulas for Zelig Models
+}
+\note{
+  This is used typically in multinomial and multivariate
+  Zelig models
 }
-
 \author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>; Ferdinand Alimadhi 
-<\email{falimadhi at iq.harvard.edu}>
+  Kosuke Imai and Olivia Lau
 }
 
-\keyword{utilities}
-
-
-
-
-
-
-
-
-
-
diff --git a/man/parse.par.Rd b/man/parse.par.Rd
deleted file mode 100644
index 5d97a52..0000000
--- a/man/parse.par.Rd
+++ /dev/null
@@ -1,82 +0,0 @@
-\name{parse.par}
-\alias{parse.par}
-
-\title{Select and reshape parameter vectors}
-
-\description{ The \code{parse.par} function reshapes parameter vectors for
-comfortability with the output matrix from \code{\link{model.matrix.multiple}}. 
-Use \code{parse.par} to identify sets of parameters; for example, within
-optimization functions that require vector input, or within \code{qi}
-functions that take matrix input of all parameters as a lump.  
-}
-
-\usage{
-parse.par(par, terms, shape = "matrix", eqn = NULL)
-}
-
-\arguments{
-\item{par}{the vector (or matrix) of parameters}
-\item{terms}{the terms from either \code{\link{model.frame.multiple}} or 
-\code{\link{model.matrix.multiple}}}
-\item{shape}{a character string (either \code{"matrix"} or \code{"vector"})
-that identifies the type of output structure}
-\item{eqn}{a character string (or strings) that identify the
-parameters that you would like to subset from the larger \code{par}
-structure}
-}
-
-\value{
-A matrix or vector of the sub-setted (and reshaped) parameters for the specified
-parameters given in \code{"eqn"}.   By default, \code{eqn = NULL}, such that all systematic
-components are selected.  (Systematic components have \code{ExpVar = TRUE} in the appropriate 
-\code{describe.model} function.)  
-
-If an ancillary parameter (for which \code{ExpVar = FALSE} in
-\code{describe.model}) is specified in \code{eqn}, it is
-always returned as a vector (ignoring \code{shape}).  (Ancillary
-parameters are all parameters that have intercept only formulas.)  
-}
-\examples{
-# Let's say that the name of the model is "bivariate.probit", and
-# the corresponding describe function is describe.bivariate.probit(), 
-# which identifies mu1 and mu2 as systematic components, and an 
-# ancillary parameter rho, which may be parameterized, but is estimated 
-# as a scalar by default.  Let par be the parameter vector (including 
-# parameters for rho), formulae a user-specified formula, and mydata
-# the user specified data frame.  
-
-# Acceptable combinations of parse.par() and model.matrix() are as follows:
-## Setting up
-\dontrun{
-data(sanction)
-formulae <- cbind(import, export) ~ coop + cost + target
-fml <- parse.formula(formulae, model = "bivariate.probit")
-D <- model.frame(fml, data = sanction)
-terms <- attr(D, "terms")
-
-## Intuitive option
-Beta <- parse.par(par, terms, shape = "vector", eqn = c("mu1", "mu2"))
-X <- model.matrix(fml, data = D, shape = "stacked", eqn = c("mu1", "mu2")
-eta <- X %*% Beta
-
-## Memory-efficient (compact) option (default)
-Beta <- parse.par(par, terms, eqn = c("mu1", "mu2"))    
-X <- model.matrix(fml, data = D, eqn = c("mu1", "mu2"))
-eta <- X %*% Beta
-
-## Computationally-efficient (array) option
-Beta <- parse.par(par, terms, shape = "vector", eqn = c("mu1", "mu2"))
-X <- model.matrix(fml, data = D, shape = "array", eqn = c("mu1", "mu2"))
-eta <- apply(X, 3, '%*%', Beta)
-}}
-
-\seealso{\code{\link{model.matrix.multiple}}, \code{\link{parse.formula}} and the full Zelig manual at
-  \url{http://gking.harvard.edu/zelig}}
- 
-\author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>; Ferdinand Alimadhi
-<\email{falimadhi at iq.harvard.edu}>
-}
-
-\keyword{utilities}
diff --git a/man/parseFormula.Rd b/man/parseFormula.Rd
new file mode 100644
index 0000000..8fd899c
--- /dev/null
+++ b/man/parseFormula.Rd
@@ -0,0 +1,25 @@
+\name{parseFormula}
+\alias{parseFormula}
+\title{Parse Zelig-style Formulae}
+\usage{
+  parseFormula(obj, data = NULL)
+}
+\arguments{
+  \item{obj}{a list or formula}
+
+  \item{data}{the data set associated with the formula
+  object}
+}
+\value{
+  an object of type "parseFormula". This object has slots
+  specifying:
+}
+\description{
+  Zelig uses three distinct types of formulae. This method
+  is a re-design of the Zelig function
+  \code{parse.formula}.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/parseFormula.formula.Rd b/man/parseFormula.formula.Rd
new file mode 100644
index 0000000..f4b4e89
--- /dev/null
+++ b/man/parseFormula.formula.Rd
@@ -0,0 +1,24 @@
+\name{parseFormula.Formula}
+\alias{parseFormula.Formula}
+\title{Parse ``Formula''-style Zelig Formulae}
+\usage{
+  \method{parseFormula}{Formula}(obj, data=NULL)
+}
+\arguments{
+  \item{obj}{a list of formulae}
+
+  \item{data}{a data frame}
+}
+\value{
+  an object of type ``parseFormula''
+}
+\description{
+  This method parses a ``Formula''-style Zelig formula.
+  This is to support the ``Formula'' object. It seems like
+  it has the right idea when it comes to expressing
+  multiple responses.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/parseFormula.list.Rd b/man/parseFormula.list.Rd
new file mode 100644
index 0000000..17c2ca3
--- /dev/null
+++ b/man/parseFormula.list.Rd
@@ -0,0 +1,21 @@
+\name{parseFormula.list}
+\alias{parseFormula.list}
+\title{Parse List-Style Zelig Formulae}
+\usage{
+  \method{parseFormula}{list}(obj, data=NULL)
+}
+\arguments{
+  \item{obj}{a list of formulae}
+
+  \item{data}{a data frame}
+}
+\value{
+  an object of type "parseFormula"
+}
+\description{
+  This method parses a list-style Zelig formula.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/plot.MI.sim.Rd b/man/plot.MI.sim.Rd
new file mode 100644
index 0000000..460cdde
--- /dev/null
+++ b/man/plot.MI.sim.Rd
@@ -0,0 +1,20 @@
+\name{plot.MI.sim}
+\alias{plot.MI.sim}
+\title{Plot graphs of simulated multiply-imputed data}
+\usage{
+  \method{plot}{MI.sim}(...)
+}
+\arguments{
+  \item{...}{ignored parameters}
+}
+\value{
+  NULL (invisibly)
+}
+\description{
+  This function is currently unimplemented, and reserved
+  for future use.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/plot.ci.Rd b/man/plot.ci.Rd
index eb5e7f5..2ca904f 100644
--- a/man/plot.ci.Rd
+++ b/man/plot.ci.Rd
@@ -1,80 +1,57 @@
 \name{plot.ci}
-
 \alias{plot.ci}
-
-\title{Plotting Vertical confidence Intervals}
-
-\description{ The \code{plot.ci} command generates vertical
-  confidence intervals for linear or generalized linear univariate
-  response models.  }
-
+\title{Method for plotting pooled simulations by confidence intervals}
 \usage{
-\method{plot}{ci}(x, CI = 95, qi = "ev", main = "", ylab = NULL, xlab = NULL,
-        xlim = NULL, ylim = NULL, col = c("red", "blue"), ...) 
+  \method{plot}{ci}(x, qi="ev", var=NULL, ..., main = NULL, sub = NULL, xlab = NULL, ylab = NULL, xlim = NULL, ylim = NULL, legcol="gray20", col=NULL, leg=1, legpos=NULL)
 }
-
 \arguments{
-  \item{x}{stored output from \code{sim}.  The \code{x$x} and optional
-  \code{x$x1} values used to generate the \code{sim} output object must
-  have more than one observation.}  
-\item{CI}{the selected confidence interval.  Defaults to 95
-  percent.}
-\item{qi}{the selected quantity of interest.  Defaults to
-  expected values.}
-\item{main}{a title for the plot.}
-\item{ylab}{label for the y-axis.}
-\item{xlab}{label for the x-axis.}
-\item{xlim}{limits on the x-axis.}
-\item{ylim}{limits on the y-axis.}
-\item{col}{a vector of at most two colors for plotting the
-  expected value given by \code{x} and the alternative set of expected
-  values given by \code{x1} in \code{sim}.  If the quantity of
-  interest selected is not the expected value, or \code{x1 = NULL},
-  only the first color will be used.}
-\item{\dots}{Additional parameters passed to \code{plot}.}
-}  
+  \item{x}{A `sim' object}
 
-\value{
-For all univariate response models, \code{plot.ci()} returns vertical
-confidence intervals over a specified range of one explanatory
-variable.  You may save this plot using the commands described in the
-Zelig manual (\url{http://gking.harvard.edu/zelig}).  
-}
+  \item{qi}{a character-string specifying the quantity of
+  interest to plot}
 
-\examples{
-data(turnout)
-z.out <- zelig(vote ~ race + educate + age + I(age^2) + income,
-               model = "logit", data = turnout)
-age.range <- 18:95
-x.low <- setx(z.out, educate = 12, age = age.range)
-x.high <- setx(z.out, educate = 16, age = age.range)
-s.out <- sim(z.out, x = x.low, x1 = x.high)
-plot.ci(s.out, xlab = "Age in Years",
-        ylab = "Predicted Probability of Voting",
-        main = "Effect of Education and Age on Voting Behavior")
-legend(45, 0.52, legend = c("College Education (16 years)",
-       "High School Education (12 years)"), col = c("blue","red"), 
-       lty = c("solid"))
+  \item{var}{The variable to be used on the x-axis. Default
+  is the variable across all the chosen values with
+  smallest nonzero variance}
 
-## adding lines connecting point estimates
-lines(age.range, apply(s.out$qi$ev, 2, mean))
-lines(age.range, apply(s.out$qi$fd+s.out$qi$ev, 2, mean))
-}
+  \item{...}{Parameters to be passed to the `truehist'
+  function which is implicitly called for numeric
+  simulations}
 
-\seealso{
-  The full Zelig manual is available at
-  \url{http://gking.harvard.edu/zelig}, and users may also wish to see
-  \code{plot}, \code{lines}.
-  }
+  \item{main}{A character-string, specifying the main title of the plot}
 
-\author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>
-}
+  \item{sub}{A character-string, specifying the sub-title of the plot}
 
-\keyword{hplot}
+  \item{xlab}{A character-string, specifying the label for the x-axis}
 
+  \item{ylab}{A character-string, specifying the label for the y-axis}
 
+  \item{xlim}{A vector of length 2, specifying the left-most and right-most values for the plot}
 
+  \item{ylim}{A vector of length 2, specifying the bottom-most and top-most values for the plot}
 
+  \item{legcol}{``legend color'', an valid color used for
+  plotting the line colors in the legend}
+
+  \item{col}{a valid vector of colors of at least length 3
+  to use to color the confidence intervals}
+
+  \item{leg}{``legend position'', an integer from 1 to 4,
+  specifying the position of the legend. 1 to 4 correspond
+  to ``SE'', ``SW'', ``NW'', and ``NE'' respectively}
+
+  \item{legpos}{``legend type'', exact coordinates and
+  sizes for legend. Overrides argment ``leg.type''}
+}
+\value{
+  the current graphical parameters. This is subject to
+  change in future implementations of Zelig
+}
+\description{
+  Plot confidence intervals of pooled simulated values.
+}
+\author{
+  James Honaker, adapted by Matt Owen
+  \email{mowen at iq.harvard.edu}
+}
 
diff --git a/man/plot.pooled.sim.Rd b/man/plot.pooled.sim.Rd
new file mode 100644
index 0000000..9ff2852
--- /dev/null
+++ b/man/plot.pooled.sim.Rd
@@ -0,0 +1,57 @@
+\name{plot.pooled.sim}
+\alias{plot.pooled.sim}
+\title{Method for plotting pooled simulations by confidence intervals}
+\usage{
+  \method{plot}{pooled.sim}(x, qi="ev", var=NULL, ..., main = NULL, sub = NULL, xlab = NULL, ylab = NULL, xlim = NULL, ylim = NULL, legcol="gray20", col=NULL, leg=1, legpos=NULL)
+}
+\arguments{
+  \item{x}{A `sim' object}
+
+  \item{qi}{a character-string specifying the quantity of
+  interest to plot}
+
+  \item{var}{The variable to be used on the x-axis. Default
+  is the variable across all the chosen values with
+  smallest nonzero variance}
+
+  \item{...}{Parameters to be passed to the `truehist'
+  function which is implicitly called for numeric
+  simulations}
+
+  \item{main}{A character-string, specifying the main title of the plot}
+
+  \item{sub}{A character-string, specifying the sub-title of the plot}
+
+  \item{xlab}{A character-string, specifying the label for the x-axis}
+
+  \item{ylab}{A character-string, specifying the label for the y-axis}
+
+  \item{xlim}{A vector of length 2, specifying the left-most and right-most values for the plot}
+
+  \item{ylim}{A vector of length 2, specifying the bottom-most and top-most values for the plot}
+
+  \item{legcol}{``legend color'', an valid color used for
+  plotting the line colors in the legend}
+
+  \item{col}{a valid vector of colors of at least length 3
+  to use to color the confidence intervals}
+
+  \item{leg}{``legend position'', an integer from 1 to 4,
+  specifying the position of the legend. 1 to 4 correspond
+  to ``SE'', ``SW'', ``NW'', and ``NE'' respectively}
+
+  \item{legpos}{``legend type'', exact coordinates and
+  sizes for legend. Overrides argment ``leg.type''}
+}
+\value{
+  the current graphical parameters. This is subject to
+  change in future implementations of Zelig
+}
+\description{
+  Plot pooled simulated quantities of interest.
+}
+\author{
+  James Honaker, adapted by Matt Owen
+  \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/plot.sim.Rd b/man/plot.sim.Rd
new file mode 100644
index 0000000..1acbb82
--- /dev/null
+++ b/man/plot.sim.Rd
@@ -0,0 +1,23 @@
+\name{plot.sim}
+\alias{plot.sim}
+\title{Method for plotting simulations}
+\usage{
+  \method{plot}{sim}(x, ...)
+}
+\arguments{
+  \item{x}{a `sim' object}
+
+  \item{...}{parameters to be passed to the `truehist'
+  function which is implicitly called for numeric
+  simulations}
+}
+\value{
+  nothing
+}
+\description{
+  Plot simulated quantities of interest.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/plot.simulations.Rd b/man/plot.simulations.Rd
new file mode 100644
index 0000000..1398b41
--- /dev/null
+++ b/man/plot.simulations.Rd
@@ -0,0 +1,28 @@
+\name{plot.simulations}
+\alias{plot.simulations}
+\title{Plot Any Simulation from the Zelig Core Package}
+\usage{
+  plot.simulations(x, ...)
+}
+\arguments{
+  \item{x}{an object}
+
+  \item{...}{parameters passed to the ``plot'' and
+  ``barplot'' functions}
+}
+\value{
+  the original graphical parameters
+}
+\description{
+  Plots any simulation from the core package. In general,
+  this function can \emph{neatly} plot simulations
+  containing five of the popular ``quantities of interest''
+  - ``Expected Values: E(Y|X)'', ``Predicted Values: Y|X'',
+  ``Expected Values (for X1): E(Y|X1)'', ``Predicted Values
+  (for X1): Y|X1'' and ``First Differences: E(Y|X1) -
+  E(Y|X)''.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/plot.surv.Rd b/man/plot.surv.Rd
deleted file mode 100644
index f3d846c..0000000
--- a/man/plot.surv.Rd
+++ /dev/null
@@ -1,66 +0,0 @@
-\name{plot.surv}
-
-\alias{plot.surv}
-
-\title{Plotting Confidence Intervals for Survival Curves}
-
-\description{ The \code{plot.surv} command generates confidence intervals for Kaplan-Meier survival curves}
-
-\usage{
-     \method{plot}{surv}(x, duration, censor, type = "line", plotcensor=TRUE,
-               plottimes = FALSE, int = c(0.025,0.975), ...) 
-}
-
-\arguments{
-\item{x}{output from \code{sim} stored as a list.  Each element of the list is the \code{sim} output for a particular survival curve.}  
-\item{duration}{the duration variable (e.g. lifetime, survival, etc.).}
-\item{censor}{the censored data}
-\item{type}{the type of confidence interval.  Defaults to \code{"line"}, which draws vertical confidence intervals at observed event times.  \code{"poly"} draws confidence regions using polygons.}
-\item{plotcensor}{default is \code{TRUE}. Plots censoring times as a \code{rug} object.}
-\item{plottimes}{default is \code{FALSE}. Plots step function with indicators at observed event times.}
-\item{int}{vector of quantile limits for the confidence interval.  Default is 95\% interval.}
-\item{\dots}{Additional parameters passed to \code{plot}.}
-}  
-
-\value{
-For survival models, \code{plot.surv()} returns vertical
-confidence intervals or polygon survival regions for Kaplan-Meier survival curves.  You may save this plot using the commands described in the
-Zelig manual (\url{http://gking.harvard.edu/zelig}).  
-}
-
-\examples{\dontrun{
-data(coalition)
-z.out1 <- zelig(Surv(duration,ciep12)~invest+numst2+crisis,
-robust=TRUE,cluster="polar",model="coxph",data=coalition)
-low <- setx(z.out1,numst2=0)
-high <- setx(z.out1,numst2=1
-# Simulate Survival Curves for Each Group
-s.out1 <- sim(z.out1,x=low) 
-s.out2 <- sim(z.out1,x=high)
-
-# Organize simulated output as a list
-out <- list(s.out1,s.out2)
-
-plot.surv(x = out, duration = coalition$duration, censor=coalition$ciep12,
-          type="line", plottimes=FALSE, plotcensor=FALSE,
-          main="Survival", xlab="Time", ylab="Survival")
-}
-}
-
-
-\seealso{
-  The full Zelig manual is available at
-  \url{http://gking.harvard.edu/zelig}, and users may also wish to see
-  \code{plot}, \code{lines}.
-  }
-
-\author{
-  John A. Graves <\email{graveja0 at gmail.com}>
-}
-
-\keyword{hplot}
-
-
-
-
-
diff --git a/man/plot.zelig.Rd b/man/plot.zelig.Rd
deleted file mode 100644
index 9abd49f..0000000
--- a/man/plot.zelig.Rd
+++ /dev/null
@@ -1,49 +0,0 @@
-\name{plot.zelig}
-
-\alias{plot.zelig}
-\alias{plot}
-
-\title{Graphing Quantities of Interest}
-
-\description{ The \code{zelig} method for the generic \code{plot}
-  command generates default plots for \code{\link{sim}} output with
-  one-observation values in \code{x} and \code{x1}.  }
-
-\usage{
-\method{plot}{zelig}(x, xlab = "", user.par = FALSE, ...)
-}
-
-\arguments{
-\item{x}{stored output from \code{\link{sim}}.  If the \code{x$x} or
-  \code{x$x1} values stored in the object contain more than one
-  observation, \code{plot.zelig} will return an error.  For linear or
-  generalized linear models with more than one observation in \code{x$x}
-  and optionally \code{x$x1}, you may use \code{\link{plot.ci}}.  }
-\item{xlab}{a character string for the x-axis label for all graphs.}
-\item{user.par}{a logical value indicating whether to use the default
-    Zelig plotting parameters (\code{user.par = FALSE}) or
-    user-defined parameters (\code{user.par = TRUE}), set using the
-    \code{par} function prior to plotting. }
-\item{\dots}{Additional parameters passed to \code{plot.default}.
-  Because \code{plot.zelig} primarily produces diagnostic plots, many
-  of these parameters are hard-coded for convenience and
-  presentation. }  
-}
-
-\value{
-Depending on the class of model selected, \code{plot.zelig} will
-return an on-screen window with graphs of the various quantities of
-interest.  You may save these plots using the commands described in
-the Zelig manual (available at \url{http://gking.harvard.edu/zelig}).  
-}
-
-\seealso{ The full Zelig manual at
-  \url{http://gking.harvard.edu/zelig} and \code{plot}, \code{lines},
-  and \code{par}.  }
-
-\author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>
-}
-
-\keyword{hplot}
diff --git a/man/print.qi.Rd b/man/print.qi.Rd
new file mode 100644
index 0000000..95a0b77
--- /dev/null
+++ b/man/print.qi.Rd
@@ -0,0 +1,22 @@
+\name{print.qi}
+\alias{print.qi}
+\title{Print a Quantity of Interest in Human-Readable Form}
+\usage{
+  \method{print}{qi}(x, ...)
+}
+\arguments{
+  \item{x}{a qi object}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  the object that was printed (invisibly)
+}
+\description{
+  Print simulated quantities of interest in a
+  human-readable form
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/print.qi.summarized.Rd b/man/print.qi.summarized.Rd
new file mode 100644
index 0000000..ced2448
--- /dev/null
+++ b/man/print.qi.summarized.Rd
@@ -0,0 +1,25 @@
+\name{print.qi.summarized}
+\alias{print.qi.summarized}
+\title{Print Method for Summarized Quantities of Interest}
+\usage{
+  \method{print}{qi.summarized}(x, \dots)
+}
+\arguments{
+  \item{x}{a 'summarized.qi' object}
+
+  \item{...}{parameters to be passed to the specific print
+  functions}
+}
+\value{
+  x (invisibly)
+}
+\description{
+  Print Method for Summarized Quantities of Interest
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+\seealso{
+  \link{special_print_MATRIX} and \link{special_print_LIST}
+}
+
diff --git a/man/print.setx.Rd b/man/print.setx.Rd
new file mode 100644
index 0000000..6a4c8f1
--- /dev/null
+++ b/man/print.setx.Rd
@@ -0,0 +1,21 @@
+\name{print.setx}
+\alias{print.setx}
+\title{Print values of `setx' objects}
+\usage{
+  \method{print}{setx}(x, ...)
+}
+\arguments{
+  \item{x}{a `setx' object}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  the value of x (invisibly)
+}
+\description{
+  Print a ``setx'' object in human-readable form.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/print.setx.mi.Rd b/man/print.setx.mi.Rd
new file mode 100644
index 0000000..f6fe655
--- /dev/null
+++ b/man/print.setx.mi.Rd
@@ -0,0 +1,21 @@
+\name{print.setx.mi}
+\alias{print.setx.mi}
+\title{Print a Bundle of Data-sets}
+\usage{
+  \method{print}{setx.mi}(x, ...)
+}
+\arguments{
+  \item{x}{a \code{setx} object to print}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  the \code{setx} object (invisibly)
+}
+\description{
+  Print a Bundle of Data-sets
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/print.sim.Rd b/man/print.sim.Rd
new file mode 100644
index 0000000..20106dd
--- /dev/null
+++ b/man/print.sim.Rd
@@ -0,0 +1,22 @@
+\name{print.sim}
+\alias{print.sim}
+\title{Print values of `sim' objects}
+\usage{
+  \method{print}{sim}(x, ...)
+}
+\arguments{
+  \item{x}{a `sim' object (ignored)}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  NULL (invisibly)
+}
+\description{
+  This function is currently unimplemented, and included
+  for future development
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/print.summary.MCMCZelig.Rd b/man/print.summary.MCMCZelig.Rd
new file mode 100644
index 0000000..6e72e49
--- /dev/null
+++ b/man/print.summary.MCMCZelig.Rd
@@ -0,0 +1,27 @@
+\name{print.summary.MCMCZelig}
+
+\alias{print.summary.MCMCZelig}
+
+\title{Print a Summary MCMCZelig Object}
+
+\usage{
+  \method{print}{summary.MCMCZelig}(x, digits=max(3, getOption("digits") - 3), ...)
+}
+
+\arguments{
+  \item{x}{an "MCMCZelig" object}
+
+  \item{digits}{a numeric specifying the precision of the
+  summary object}
+
+  \item{...}{ignored parameters}
+}
+
+\value{
+  a \code{summary.MCMCZelig} object
+}
+
+\description{
+  This method prints a summary object for \code{MCMCZelig}
+  objects
+}
diff --git a/man/print.summary.pooled.sim.Rd b/man/print.summary.pooled.sim.Rd
new file mode 100644
index 0000000..b3b97c1
--- /dev/null
+++ b/man/print.summary.pooled.sim.Rd
@@ -0,0 +1,28 @@
+\name{print.summary.pooled.sim}
+\alias{print.summary.pooled.sim}
+\title{Print a Summary of a Set of Pooled Simulated Interests}
+\usage{
+  \method{print}{summary.pooled.sim}(x, ...)
+}
+\arguments{
+  \item{x}{a ``summary.pooled.sim'' object, containing
+  summarized information about simulated quantities of
+  interest}
+
+  \item{...}{Optional parameters that will be passed onward
+  to ``print.matrix'' (the matrix printing function)}
+}
+\value{
+  a ``summary.pooled.sim'' object storing the quantities of
+  interest
+}
+\description{
+  Prints the summary information from a set of pooled
+  simulated interests. This method assumes that quantities
+  of interest are kept in a data type which can be used
+  with ``rbind''.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/print.summary.relogit.Rd b/man/print.summary.relogit.Rd
new file mode 100644
index 0000000..d753d1b
--- /dev/null
+++ b/man/print.summary.relogit.Rd
@@ -0,0 +1,24 @@
+\name{print.summary.relogit}
+\alias{print.summary.relogit}
+\title{Print Summary of a Rare-event Logistic Model}
+\usage{
+  \method{print}{summary.relogit}(x, digits = max(3,
+    getOption("digits") - 3), ...)
+}
+\arguments{
+  \item{x}{an ``relogit.summary'' object produced by the
+  ``summary'' method.}
+
+  \item{digits}{an integer specifying the number of digits
+  of precision to specify}
+
+  \item{...}{parameters passed forward to the ``print.glm''
+  function}
+}
+\value{
+  x (invisibly)
+}
+\description{
+  Prints the
+}
+
diff --git a/man/print.summary.relogit2.Rd b/man/print.summary.relogit2.Rd
new file mode 100644
index 0000000..eeef863
--- /dev/null
+++ b/man/print.summary.relogit2.Rd
@@ -0,0 +1,22 @@
+\name{print.summary.relogit2}
+\alias{print.summary.relogit2}
+\title{Print Summary of a Rare-event Logistic Model}
+\usage{
+  \method{print}{summary.relogit2}(x, digits = max(3,
+    getOption("digits") - 3), ...)
+}
+\arguments{
+  \item{x}{the object to print}
+
+  \item{digits}{an integer specifying the number of digits
+  of precision}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  x (invisibly)
+}
+\description{
+  ...
+}
+
diff --git a/man/print.summary.sim.Rd b/man/print.summary.sim.Rd
new file mode 100644
index 0000000..40a6434
--- /dev/null
+++ b/man/print.summary.sim.Rd
@@ -0,0 +1,22 @@
+\name{print.summary.sim}
+\alias{print.summary.sim}
+\title{Print Values of a Summarized ``sim'' Object}
+\usage{
+  \method{print}{summary.sim}(x, ...)
+}
+\arguments{
+  \item{x}{a 'summary.sim' object}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  the value of the `summary.sim' object (invisibly)
+}
+\description{
+  Print values of simulated quantities of interest (stored
+  in a ``summary.sim'' object.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/print.summarySim.MI.Rd b/man/print.summarySim.MI.Rd
new file mode 100644
index 0000000..3760575
--- /dev/null
+++ b/man/print.summarySim.MI.Rd
@@ -0,0 +1,21 @@
+\name{print.summarySim.MI}
+\alias{print.summarySim.MI}
+\title{Print Multiply Imputed Simulations Summary}
+\usage{
+  \method{print}{summarySim.MI}(x, digits=3, ...)
+}
+\arguments{
+  \item{x}{a 'summarySim.MI' object}
+
+  \item{digits}{an integer specifying the number of digits
+  of precision to print}
+
+  \item{...}{ignored parameters}
+}
+\description{
+  Prints summary information about Multiply Imputed Fits
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/print.zelig.Rd b/man/print.zelig.Rd
new file mode 100644
index 0000000..8560a44
--- /dev/null
+++ b/man/print.zelig.Rd
@@ -0,0 +1,21 @@
+\name{print.zelig}
+\alias{print.zelig}
+\title{Print values of ``zelig'' objects}
+\usage{
+  \method{print}{zelig}(x, ...)
+}
+\arguments{
+  \item{x}{a `zelig' object}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  the `zelig' object (invisibly)
+}
+\description{
+  Print the zelig object as a list
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/put.start.Rd b/man/put.start.Rd
deleted file mode 100644
index 2a41bd2..0000000
--- a/man/put.start.Rd
+++ /dev/null
@@ -1,34 +0,0 @@
-\name{put.start}
-\alias{put.start}
-\title{Set specific starting values for certain parameters}
-
-\description{ After calling \code{\link{set.start}} to create default starting values, use \code{put.start} 
-to change starting values for specific parameters or parameter sets. }
-
-\usage{
-put.start(start.val, value, terms, eqn)
-}
-
-\arguments{
-\item{start.val}{the vector of starting values created by \code{\link{set.start}}} 
-\item{value}{the scalar or vector of replacement starting values}  
-\item{terms}{the terms output from \code{\link{model.frame.multiple}}}
-\item{eqn}{character vector of the parameters for which you would like to replace
-the default values with \code{value}}
-}
-
-\value{A vector of starting values (of the same length as \code{start.val})}
-
-\seealso{\code{\link{set.start}}, and the full Zelig manual at
-  \url{http://gking.harvard.edu/zelig}.
-}
-
-\author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>; Ferdinand Alimadhi
-<\email{falimadhi at iq.harvard.edu}>
-}
-
-
-\keyword{utilities}
-
diff --git a/man/qi.Rd b/man/qi.Rd
new file mode 100644
index 0000000..5b9625e
--- /dev/null
+++ b/man/qi.Rd
@@ -0,0 +1,61 @@
+\name{qi}
+
+\alias{qi}
+\alias{qi.exp.Rd}
+\alias{qi.logit.Rd}
+\alias{qi.negbinom.Rd}
+\alias{qi.normal.survey.Rd}
+\alias{qi.poisson.survey.Rd}
+\alias{qi.relogit.Rd}
+\alias{qi.gamma.Rd}
+\alias{qi.ls.Rd}
+\alias{qi.normal.Rd}
+\alias{qi.poisson.Rd}
+\alias{qi.probit.Rd}
+\alias{qi.relogit2.Rd}
+\alias{qi.tobit.Rd}
+
+\title{Generic Method for Computing Quantities of Interest}
+\usage{
+  qi(obj, x = NULL, x1 = NULL, y = NULL, num, param = NULL)
+}
+\arguments{
+  \item{obj}{a \code{zelig} object}
+
+  \item{x}{a \code{setx} object or NULL}
+
+  \item{x1}{an optional \code{setx} object}
+
+  \item{y}{this parameter is reserved for simulating
+  average treatment effects, though this feature is
+  currentlysupported by only a handful of models}
+
+  \item{num}{an integer specifying the number of
+  simulations to compute}
+
+  \item{param}{a parameters object}
+}
+\value{
+  a list of key-value pairs specifying pairing titles of
+  quantities of interest with their simulations
+}
+\description{
+  The \code{qi} function is used by developers to simulated
+  quantities of interest. This method, as a result, is the
+  most significant method of any Zelig statistical model.
+}
+\note{
+  Run \code{example(qi)} to see a trivial version of
+}
+\examples{
+qi.some.model <- function(obj, x=NULL, x1=NULL, y=NULL, param=NULL) {
+  list(
+       "Expected Values: E(Y|X)" = NA,
+       "Predicted Values: Y|X"   = NA
+       )
+}
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/qi.exp.Rd b/man/qi.exp.Rd
new file mode 100644
index 0000000..6df220a
--- /dev/null
+++ b/man/qi.exp.Rd
@@ -0,0 +1,34 @@
+\name{qi.exp}
+\alias{qi.exp}
+\title{Compute quantities of interest for 'exp' Zelig models}
+\usage{
+  \method{qi}{exp}(obj, x=NULL, x1=NULL, y=NULL, num=1000,
+    param=NULL)
+}
+\arguments{
+  \item{obj}{a 'zelig' object}
+
+  \item{x}{a 'setx' object or NULL}
+
+  \item{x1}{an optional 'setx' object}
+
+  \item{y}{this parameter is reserved for simulating
+  average treatment effects, though this feature is
+  currentlysupported by only a handful of models}
+
+  \item{num}{an integer specifying the number of
+  simulations to compute}
+
+  \item{param}{a parameters object}
+}
+\value{
+  a list of key-value pairs specifying pairing titles of
+  quantities of interest with their simulations
+}
+\description{
+  Compute quantities of interest for 'exp' Zelig models
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/qi.summarize.Rd b/man/qi.summarize.Rd
new file mode 100644
index 0000000..fc047cd
--- /dev/null
+++ b/man/qi.summarize.Rd
@@ -0,0 +1,36 @@
+\name{qi.summarize}
+\alias{qi.summarize}
+\title{Constructor for QI Summarized Class
+This class takes an arbitrary number of the _same_ type of
+quantities of interest labels them, then
+merges them into one simple printable block. In particular,
+this class determines which print function to use based on the
+the type and size od data to be passed to the print function.}
+\usage{
+  qi.summarize(title, x, ...)
+}
+\arguments{
+  \item{title}{a character-string specifying the title of
+  the QI}
+
+  \item{x}{a list of summarized quantities of interest}
+
+  \item{...}{additional quantities of interest (the
+  parameter that titles these will be used as the name of
+  the data.frame}
+}
+\value{
+  the list of QI's (invisibly)
+}
+\description{
+  Constructor for QI Summarized Class This class takes an
+  arbitrary number of the _same_ type of quantities of
+  interest labels them, then merges them into one simple
+  printable block. In particular, this class determines
+  which print function to use based on the the type and
+  size od data to be passed to the print function.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/reduceMI.Rd b/man/reduceMI.Rd
new file mode 100644
index 0000000..e042068
--- /dev/null
+++ b/man/reduceMI.Rd
@@ -0,0 +1,24 @@
+\name{reduceMI}
+\alias{reduceMI}
+\title{Reduce MI Formulas
+Take a formula in any of the reduced form or in a structural form and return
+the most reduced form of that formula}
+\usage{
+  reduceMI(f)
+}
+\arguments{
+  \item{f}{a formula}
+}
+\description{
+  Reduce MI Formulas Take a formula in any of the reduced
+  form or in a structural form and return the most reduced
+  form of that formula
+}
+\note{
+  This formula is used primarily by 'zelig2' functions of
+  multivariate Zelig models
+}
+\author{
+  Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
+}
+
diff --git a/man/relogit.Rd b/man/relogit.Rd
new file mode 100644
index 0000000..8d4751f
--- /dev/null
+++ b/man/relogit.Rd
@@ -0,0 +1,27 @@
+\name{relogit}
+\alias{relogit}
+\title{Fit a rare-event logistic model in Zelig}
+\usage{
+  relogit(formula, data = sys.parent(), tau = NULL,
+    bias.correct = TRUE, case.control = "prior", ...)
+}
+\arguments{
+  \item{formula}{a formula object}
+
+  \item{data}{...}
+
+  \item{tau}{...}
+
+  \item{bias.correct}{...}
+
+  \item{case.control}{...}
+
+  \item{...}{???}
+}
+\value{
+  a ``relogit'' ``glm'' object
+}
+\description{
+  Fits a rare-event (``relogit'') model.
+}
+
diff --git a/man/repl.Rd b/man/repl.Rd
index 709908b..2b26275 100644
--- a/man/repl.Rd
+++ b/man/repl.Rd
@@ -1,89 +1,21 @@
 \name{repl}
-
 \alias{repl}
-\alias{repl.zelig}
-\alias{repl.default}
-
-\title{Replicating Analyses}
-
-\description{ The generic function \code{repl} command takes 
-\code{\link{zelig}} or
-  \code{\link{sim}} output objects and replicates (literally, re-runs)
-  the entire analysis.  The results should be an output 
-object
-  identical to the original input object in the case of
-  \code{\link{zelig}} output.  In the case of \code{\link{sim}}
-  output, the replicated analyses may differ slightly due to
-  stochastic randomness in the simulation procedure.  }
-
+\title{Generic Method for Replicating Data}
 \usage{
-repl(object, data, \dots)
-\method{repl}{default}(object, data = NULL, \dots)
-\method{repl}{zelig}(object, data = NULL, prev = NULL, x = NULL, x1 = NULL,
-     bootfn = NULL, \dots) 
+  repl(object, ...)
 }
-
 \arguments{
-\item{object}{Stored output from either \code{\link{zelig}} or
-    \code{\link{sim}}.}
-\item{data}{You may manually input the data frame name rather
-  than allowing \code{repl} to draw the data frame name from the object
-  to be replicated.}
-\item{prev}{When replicating \code{\link{sim}} output, you may
-  optionally use the previously simulated parameters to calculate the
-  quantities of interest rather than simulating a new set of
-  parameters.  For all models, this should produce identical
-  quantities of interest.  In addition, for if the parameters were
-  bootstrapped in the original analysis, this will save a considerable
-  amount of time. }
-\item{x}{When replicating \code{\link{sim}} output, you may
-  optionally use an alternative \code{\link{setx}} value for the \code{x}
-  input. } 
-\item{x1}{When replicating \code{\link{sim}} output, you may
-  optionally use an alternative \code{\link{setx}} object for the \code{x1}
-  input to replicating the \code{\link{sim}} object. }
-\item{bootfn}{When replicating \code{\link{sim}} output with
-    bootstrapped parameters, you should manually specify the
-    \code{bootfn} if a non-default option was used.  }
-\item{\dots}{Additional arguments passed to either \code{\link{zelig}} or 
-\code{\link{sim}}.  }
-}
+  \item{object}{a 'zelig' object}
 
+  \item{...}{parameters}
+}
 \value{
-For \code{\link{zelig}} output, \code{repl} will create output that is in
-every way identical to the original input.  You may check to see
-whether they are identical by using the \code{identical} command.  
-
-For \code{\link{sim}} output, \code{repl} output will be will be
-identical to the original object if you choose not to simulate new
-parameters, and instead choose to calculate quantities of interest
-using the previously simulated parameters (using the \code{prev}
-option.  If you choose to simulate new parameters, the summary
-statistics for each quantity of interest should be identical, up to a
-random approximation error.  As the number of simulations increases,
-this error decreases.
+  a replicated object
 }
-
-\seealso{ \code{\link{zelig}}, \code{\link{setx}}, and
-  \code{\link{sim}}.  In addition, the full Zelig manual may be
-  accessed online at \url{http://gking.harvard.edu/zelig}.  }
-
-\examples{
-data(turnout)
-z.out <- zelig(vote ~ race + educate, model = "logit", data = turnout[1:1000,])
-x.out <- setx(z.out)
-s.out <- sim(z.out, x = x.out)
-z.rep <- repl(z.out)
-identical(z.out$coef, z.rep$coef)
-z.alt <- repl(z.out, data = turnout[1001:2000,])
-s.rep <- repl(s.out, prev = s.out$par)
-identical(s.out$ev, s.rep$ev)
+\description{
+  Generic Method for Replicating Data
 }
-
-
 \author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>
+  Kosuke Imai and Olivia Lau \email{mowen at iq.harvard.edu}
 }
 
-\keyword{file}
diff --git a/man/repl.default.Rd b/man/repl.default.Rd
new file mode 100644
index 0000000..02cbdbf
--- /dev/null
+++ b/man/repl.default.Rd
@@ -0,0 +1,23 @@
+\name{repl.default}
+\alias{repl.default}
+\title{Default Method for Replicating Statistics}
+\usage{
+  \method{repl}{default}(object, data=NULL, ...)
+}
+\arguments{
+  \item{object}{an object to replicate}
+
+  \item{data}{a data.frame}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a replicated object
+}
+\description{
+  Replicate a simulation
+}
+\author{
+  Kosuke Imai and Olivia Lau \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/repl.sim.Rd b/man/repl.sim.Rd
new file mode 100644
index 0000000..8cac275
--- /dev/null
+++ b/man/repl.sim.Rd
@@ -0,0 +1,45 @@
+\name{repl.sim}
+\alias{repl.sim}
+\title{Method for Replicating Simulated Quantities of Interest}
+\usage{
+  \method{repl}{sim}(object, x=NULL, x1=NULL, y=NULL,
+    num=1000, prev = NULL, bootstrap = FALSE, boot.fn=NULL,
+    cond.data = NULL, ...)
+}
+\arguments{
+  \item{object}{a 'zelig' object}
+
+  \item{x}{a 'setx' object}
+
+  \item{x1}{a secondary 'setx' object used to perform
+  particular computations of quantities of interest}
+
+  \item{y}{a parameter reserved for the computation of
+  particular quantities of interest (average treatment
+  effects). Few models currently support this parameter}
+
+  \item{num}{an integer specifying the number of
+  simulations to compute}
+
+  \item{prev}{ignored}
+
+  \item{bootstrap}{ignored}
+
+  \item{boot.fn}{ignored}
+
+  \item{cond.data}{ignored}
+
+  \item{...}{special parameters which are reserved for
+  future versions of Zelig}
+}
+\value{
+  a 'sim' object storing the replicated quantities of
+  interest
+}
+\description{
+  Replicate simulated quantities of interest
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/replace.call.Rd b/man/replace.call.Rd
new file mode 100644
index 0000000..e511dd6
--- /dev/null
+++ b/man/replace.call.Rd
@@ -0,0 +1,26 @@
+\name{replace.call}
+\alias{replace.call}
+\title{Hook to Update the Zelig Call with the Appropriate Call Object}
+\usage{
+  replace.call(zobj, call1, call2)
+}
+\arguments{
+  \item{zobj}{a 'zelig' object}
+
+  \item{call1}{the original call to Zelig}
+
+  \item{call2}{the manuafactured call to the model fitting
+  function}
+}
+\value{
+  the 'zelig' object with a modified 'call' slot
+}
+\description{
+  Hook to Update the Zelig Call with the Appropriate Call
+  Object
+}
+\note{
+  This function is used internally by Zelig, and currently
+  deprecated.
+}
+
diff --git a/man/robust.gee.hook.Rd b/man/robust.gee.hook.Rd
new file mode 100644
index 0000000..e62d57b
--- /dev/null
+++ b/man/robust.gee.hook.Rd
@@ -0,0 +1,32 @@
+\name{robust.gee.hook}
+\alias{robust.gee.hook}
+\title{Classify Fitted Object as Naive or Robust}
+\usage{
+  robust.gee.hook(obj, Zall, Call, robust, ...)
+}
+\arguments{
+  \item{obj}{a \code{zelig} object}
+
+  \item{Zall}{the call made to the \code{zelig} function}
+
+  \item{Call}{the call made to the external model}
+
+  \item{robust}{a logical specifying whether to use the
+  naive or robust covariance matrix}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a \code{zelig} object with the additional class
+  \code{gee.robust} or \code{gee.naive}
+}
+\description{
+  This hook is ran after the call to the external mode. It
+  sets the class of the object (in addition to its other
+  designations) as 'gee.naive' or 'gee.robust' depending on
+  the value of the \code{robust} parameter.
+}
+\author{
+  Skyler
+}
+
diff --git a/man/robust.glm.hook.Rd b/man/robust.glm.hook.Rd
new file mode 100644
index 0000000..ccdeb84
--- /dev/null
+++ b/man/robust.glm.hook.Rd
@@ -0,0 +1,26 @@
+\name{robust.glm.hook}
+\alias{robust.glm.hook}
+\title{Hook for ``glm'' Models in Zelig}
+\usage{
+  robust.glm.hook(obj, zcall, call, robust = FALSE, ...)
+}
+\arguments{
+  \item{obj}{a zelig object}
+
+  \item{zcall}{the original call to the zelig model}
+
+  \item{call}{the call that will be evaluated for the}
+
+  \item{robust}{a logical specifying whether or not to use
+  robust error estimates}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  the fitted model object
+}
+\description{
+  Adds support for robust error-estimates in the Zelig
+  ``glm'' models.
+}
+
diff --git a/man/rocplot.Rd b/man/rocplot.Rd
index ec943bd..6d388e6 100644
--- a/man/rocplot.Rd
+++ b/man/rocplot.Rd
@@ -1,83 +1,80 @@
 \name{rocplot}
-
 \alias{rocplot}
-\alias{roc}
-\alias{ROC}
-\alias{ROCplot}
-
 \title{Receiver Operator Characteristic Plots}
-
-\description{ The \code{rocplot} command generates a receiver operator
-characteristic plot to compare the in-sample (default) or out-of-sample
-fit for two logit or probit regressions.  }
-
 \usage{
-rocplot(y1, y2, fitted1, fitted2, cutoff = seq(from=0, to=1, length=100), 
-        lty1 = "solid", lty2 = "dashed", lwd1 = par("lwd"), lwd2 = par("lwd"),
-        col1 = par("col"), col2 = par("col"), main, xlab, ylab,
-        plot = TRUE, ...)
+  rocplot(y1, y2, fitted1, fitted2, cutoff = seq(from=0,
+    to=1, length=100), lty1="solid", lty2="dashed",
+    lwd1=par("lwd"), lwd2=par("lwd"), col1=par("col"),
+    col2=par("col"), main="ROC Curve",
+    xlab = "Proportion of 1's Correctly Predicted",
+    ylab="Proportion of 0's Correctly Predicted",
+    plot = TRUE, ... )
 }
-
 \arguments{
-\item{y1}{Response variable for the first model.}
-\item{y2}{Response variable for the second model.}
-\item{fitted1}{Fitted values for the first model.  These values
-  may represent either the in-sample or out-of-sample fitted values.}
-\item{fitted2}{Fitted values for the second model.} 
-\item{cutoff}{A vector of cut-off values between 0 and 1, at
-  which to evaluate the proportion of 0s and 1s correctly predicted by
-  the first and second model.  By default, this is 100 increments
-  between 0 and 1, inclusive.}
-\item{lty1, lty2}{The line type for the first model (\code{lty1}) and
-  the second model (\code{lty2}), defaulting to solid and dashed,
-  respectively.}
-\item{lwd1, lwd2}{The width of the line for the first model
-  (\code{lwd1}) and the second model (\code{lwd2}), defaulting to 1 for both.}
-\item{col1, col2}{The colors of the line for the first
-  model (\code{col1}) and the second model (\code{col2}), defaulting to
-  black for both.}
-\item{main}{a title for the plot.  Defaults to \samp{ROC Curve}.}
-\item{xlab}{a label for the x-axis.  Defaults to \samp{Proportion of 1's 
-    Correctly Predicted}.}
-\item{ylab}{a label for the y-axis.  Defaults to \samp{Proportion of 0's 
-    Correctly Predicted}.}
-\item{plot}{defaults to \code{TRUE}, which generates a plot to the
-  selected device.  If \code{FALSE}, returns a list of
-items (see below).} 
-\item{\dots}{Additional parameters passed to plot, including
-  \code{xlab}, \code{ylab}, and \code{main}.  }
-}
+  \item{y1}{response variable for the first model}
 
-\value{ If \code{plot = TRUE}, \code{rocplot} generates an ROC plot for
-two logit or probit models.  If \code{plot = FALSE}, \code{rocplot}
-returns a list with the following elements:
-  \item{roc1}{a matrix containing a vector of x-coordinates and
-    y-coordinates corresponding to the number of ones and zeros correctly
-    predicted for the first model.}
-  \item{roc2}{a matrix containing a vector of x-coordinates and
-    y-coordinates corresponding to the number of ones and zeros correctly
-    predicted for the second model.}
-  \item{area1}{the area under the first ROC curve, calculated using
-    Reimann sums.}
-  \item{area2}{the area under the second ROC curve, calculated using
-    Reimann sums.}
-}
+  \item{y2}{response variable for the second model}
 
-\examples{
-data(turnout)
-z.out1 <- zelig(vote ~ race + educate + age, model = "logit", 
-  data = turnout)
-z.out2 <- zelig(vote ~ race + educate, model = "logit", 
-  data = turnout)
-rocplot(z.out1$y, z.out2$y, fitted(z.out1), fitted(z.out2))
-}
+  \item{fitted1}{fitted values for the first model. These
+  values may represent either the in-sample or
+  out-of-sample fitted values}
+
+  \item{fitted2}{fitted values for the second model}
+
+  \item{cutoff}{A vector of cut-off values between 0 and 1,
+  at which to evaluate the proportion of 0s and 1s
+  correctly predicted by the first and second model.  By
+  default, this is 100 increments between 0 and 1
+  inclusive}
+
+  \item{lty1}{the line type of the first model (defaults to
+  'line')}
 
-\seealso{ The full Zelig manual (available at
-  \url{http://gking.harvard.edu/zelig}), \code{plot}, \code{lines}.  }
+  \item{lty2}{the line type of the second model (defaults
+  to 'dashed')}
 
-\author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>
+  \item{lwd1}{the line width of the first model (defaults
+  to 1)}
+
+  \item{lwd2}{the line width of the second model (defaults
+  to 1)}
+
+  \item{col1}{the color of the first model (defaults to
+  'black')}
+
+  \item{col2}{the color of the second model (defaults to
+  'black')}
+
+  \item{main}{a title for the plot (defaults to "ROC
+  Curve")}
+
+  \item{xlab}{a label for the X-axis}
+
+  \item{ylab}{a lavel for the Y-axis}
+
+  \item{plot}{whether to generate a plot to the selected
+  device}
+
+  \item{\dots}{additional parameters to be passed to the
+  plot}
+}
+\value{
+  if plot is TRUE, rocplot simply generates a plot.
+  Otherwise, a list with the following is produced:
+  \item{roc1}{a matrix containing a vector of x-coordinates
+  and y-coordinates corresponding to the number of ones and
+  zeros correctly predicted for the first model.}
+  \item{roc2}{a matrix containing a vector of x-coordinates
+  and y-coordinates corresponding to the number of ones and
+  zeros correctly predicted for the second model.}
+  \item{area1}{the area under the first ROC curve,
+  calculated using Reimann sums.} \item{area2}{the area
+  under the second ROC curve, calculated using Reimann
+  sums.}
+}
+\description{
+  The 'rocplot' command generates a receiver operator
+  characteristic plot to compare the in-sample (default) or
+  out-of-sample fit for two logit or probit regressions.
 }
 
-\keyword{file}
diff --git a/man/set.start.Rd b/man/set.start.Rd
deleted file mode 100644
index 64e8ae6..0000000
--- a/man/set.start.Rd
+++ /dev/null
@@ -1,40 +0,0 @@
-\name{set.start}
-\alias{set.start}
-\title{Set starting values for all parameters}
-
-\description{After using \code{\link{parse.par}} and \code{\link{model.matrix.multiple}}, use 
-\code{set.start} to set starting values for all parameters.  By default, starting values are set to 0.  If 
-you wish to select alternative starting values for certain parameters, use \code{\link{put.start}} after 
-\code{set.start}.}
-
-\usage{
-set.start(start.val = NULL, terms)
-}
-
-\arguments{ 
-\item{start.val}{user-specified starting values.  If \code{NULL} (default), the default 
-starting values for all parameters are set to 0.} 
-\item{terms}{the terms output from \code{\link{model.frame.multiple}}}
-}
-
-\value{
-A named vector of starting values for all parameters specified in \code{terms}, defaulting to 0.  
-}
-
-\examples{
-\dontrun{
-fml <- parse.formula(formula, model = "bivariate.probit")
-D <- model.frame(fml, data = data)
-terms <- attr(D, "terms")
-start.val <- set.start(start.val = NULL, terms)
-}}
-
-\seealso{\code{\link{put.start}}, \code{\link{parse.par}}, \code{\link{model.frame.multiple}}, and the 
-full Zelig manual at \url{http://gking.harvard.edu/zelig}.}
-
-\author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>; Ferdinand Alimadhi
-<\email{falimadhi at iq.harvard.edu}>
-}
-\keyword{utilities}
diff --git a/man/setx.MI.Rd b/man/setx.MI.Rd
new file mode 100644
index 0000000..e0c0b41
--- /dev/null
+++ b/man/setx.MI.Rd
@@ -0,0 +1,33 @@
+\name{setx.MI}
+\alias{setx.MI}
+\title{Set Explanatory Variables for Multiply Imputed Data-sets
+This function simply calls setx.default once for every fitted model
+within the 'zelig.MI' object}
+\usage{
+  \method{setx}{MI}(obj, ..., data=NULL)
+}
+\arguments{
+  \item{obj}{a 'zelig' object}
+
+  \item{...}{user-defined values of specific variables for
+  overwriting the default values set by the function
+  \code{fn}}
+
+  \item{data}{a new data-frame}
+}
+\value{
+  a 'setx.mi' object used for computing Quantities of
+  Interest by the 'sim' method
+}
+\description{
+  Set Explanatory Variables for Multiply Imputed Data-sets
+  This function simply calls setx.default once for every
+  fitted model within the 'zelig.MI' object
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+\seealso{
+  \link{setx}
+}
+
diff --git a/man/setx.Rd b/man/setx.Rd
index c480b98..ac33610 100644
--- a/man/setx.Rd
+++ b/man/setx.Rd
@@ -1,122 +1,66 @@
 \name{setx}
-
 \alias{setx}
-\alias{setx.default}
-
 \title{Setting Explanatory Variable Values}
-
-\description{ The \code{setx} command uses the variables identified in
-  the \code{formula} generated by \code{zelig} and sets the values of
-  the explanatory variables to the selected values.  Use \code{setx}
-  after \code{zelig} and before \code{sim} to simulate quantities of
-  interest.  }
-
 \usage{
-setx(object, ...)
-
-\method{setx}{default}(object, 
-             fn = list(numeric = mean, ordered = median, other = mode), 
-             data = NULL, 
-             cond = FALSE, counter = NULL,
-             \dots
-             )
+  setx(obj, fn = NULL, data = NULL, cond = FALSE, ...)
 }
-
 \arguments{
-  \item{object}{the saved output from \code{\link{zelig}}. }
-  \item{fn}{a list of functions to apply to three types of variables:
-      \itemize{
-      \item{numeric}{\code{numeric} variables are set to their mean by
-      default, but you may select any mathematical function to apply to
-      numeric variables.}
-      \item{ordered}{\code{ordered} factors are set to their meidan by
-      default, and most mathematical operations will work on them.  If
-      you select \code{ordered = mean}, however, \code{setx} will
-      default to median with a warning.}
-      \item{other}{variables may consist of unordered factors, character
-	strings, or logical variables.  The \code{other} variables may
-	only be set to their mode.  If you wish to set one of the other
-	variables to a specific value, you may do so using \code{\dots}
-	below. }
-      }
-    In the special case \code{fn = NULL}, \code{setx} will return all
-    of the observations without applying any function to the data.  }
+  \item{obj}{the saved output from zelig}
+
+  \item{fn}{a list of functions to apply to the data frame}
+
   \item{data}{a new data frame used to set the values of
-    explanatory variables. If \code{data = NULL} (the default), the
-    data frame called in \code{zelig} is used. }
-  \item{cond}{a logical value indicating whether unconditional
-    (default) or conditional (choose \code{cond = TRUE}) prediction
-    should be performed.  If you choose \code{cond = TRUE}, \code{setx}
-    will coerce \code{fn = NULL} and ignore the additional arguments in 
-    \code{\dots}.  If \code{cond = TRUE} and \code{data = NULL},
-    \code{setx} will prompt you for a data frame.  }  
-  \item{counter}{a deprecated parameter}
-  \item{\dots}{user-defined values of specific variables
-    overwriting the default values set by the function \code{fn}.  For
-    example, adding \code{var1 = mean(data\$var1)} or \code{x1 = 12}
-    explicitly sets the value of \code{x1} to 12.  In addition, you may
-    specify one explanatory variable as a range of values, creating one
-    observation for every unique value in the range of values. }
+  explanatory variables. If data = NULL (the default), the
+  data frame called in zelig is used}
+
+  \item{cond}{a logical value indicating whether
+  unconditional (default) or conditional (choose \code{cond
+  = TRUE}) prediction should be performed.  If you choose
+  \code{cond = TRUE}, \code{setx} will coerce \code{fn =
+  NULL} and ignore the additional arguments in
+  \code{\dots}.  If \code{cond = TRUE} and \code{data =
+  NULL}, \code{setx} will prompt you for a data frame.}
+
+  \item{...}{user-defined values of specific variables for
+  overwriting the default values set by the function
+  \code{fn}.  For example, adding \code{var1 =
+  mean(data\$var1)} or \code{x1 = 12} explicitly sets the
+  value of \code{x1} to 12.  In addition, you may specify
+  one explanatory variable as a range of values, creating
+  one observation for every unique value in the range of
+  values}
 }
-
 \value{
-  For unconditional prediction, \code{x.out} is a model matrix based
-  on the specified values for the explanatory variables.  For multiple
-  analyses (i.e., when choosing the \code{by} option in \code{\link{zelig}},
-  \code{setx} returns the selected values calculated over the entire
-  data frame.  If you wish to calculate values over just one subset of
-  the data frame, the 5th subset for example, you may use:  
-\code{x.out <- setx(z.out[[5]])}
-
-For conditional prediction, \code{x.out} includes the model matrix
-  and the dependent variables.  For multiple analyses (when choosing
-  the \code{by} option in \code{zelig}), \code{setx} returns the
-  observed explanatory variables in each subset.
+  For unconditional prediction, \code{x.out} is a model
+  matrix based on the specified values for the explanatory
+  variables.  For multiple analyses (i.e., when choosing
+  the \code{by} option in \code{\link{zelig}}, \code{setx}
+  returns the selected values calculated over the entire
+  data frame.  If you wish to calculate values over just
+  one subset of the data frame, the 5th subset for example,
+  you may use: \code{x.out <- setx(z.out[[5]])}
+}
+\description{
+  The \code{setx} command uses the variables identified in
+  the \code{formula} generated by \code{zelig} and sets the
+  values of the explanatory variables to the selected
+  values.  Use \code{setx} after \code{zelig} and before
+  \code{sim} to simulate quantities of interest.
 }
-
 \examples{
 # Unconditional prediction:
 data(turnout)
 z.out <- zelig(vote ~ race + educate, model = "logit", data = turnout)
 x.out <- setx(z.out)
 s.out <- sim(z.out, x = x.out)
-
-# Unconditional prediction with all observations:
-x.out <- setx(z.out, fn = NULL)
-s.out <- sim(z.out, x = x.out)
-
-# Unconditional prediction with out of sample data:
-z.out <- zelig(vote ~ race + educate, model = "logit",
-               data = turnout[1:1000,])
-x.out <- setx(z.out, data = turnout[1001:2000,])
-s.out <- sim(z.out, x = x.out)
-
-# Using a user-defined function in fn:
-\dontrun{
-quants <- function(x)
-  quantile(x, 0.25)
-x.out <- setx(z.out, fn = list(numeric = quants))
-}
-
-# Conditional prediction:  
-\dontrun{library(MatchIt)
-data(lalonde)
-match.out <- matchit(treat ~ age + educ + black + hispan + married + 
-                     nodegree + re74 + re75, data = lalonde)
-z.out <- zelig(re78 ~ distance, data = match.data(match.out, "control"), 
-               model = "ls")
-x.out <- setx(z.out, fn = NULL, data = match.data(match.out, "treat"),
-	      cond = TRUE)
-s.out <- sim(z.out, x = x.out)
 }
-}
-
-\seealso{ The full Zelig manual may be accessed online at
-  \url{http://gking.harvard.edu/zelig}.  }
-
 \author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>
+  Matt Owen \email{mowen at iq.harvard.edu}, Olivia Lau and
+  Kosuke Imai
+}
+\seealso{
+  The full Zelig manual may be accessed online at
+  \url{http://gking.harvard.edu/zelig}
 }
-
 \keyword{file}
+
diff --git a/man/setx.default.Rd b/man/setx.default.Rd
new file mode 100644
index 0000000..beda3a6
--- /dev/null
+++ b/man/setx.default.Rd
@@ -0,0 +1,31 @@
+\name{setx.default}
+\alias{setx.default}
+\title{Set explanatory variables}
+\usage{
+  \method{setx}{default}(obj, fn=NULL, data=NULL,
+    cond=FALSE, ...)
+}
+\arguments{
+  \item{obj}{a 'zelig' object}
+
+  \item{fn}{a list of key-value pairs specifying which
+  function apply to columns of the keys data-types}
+
+  \item{data}{a data.frame}
+
+  \item{cond}{ignored}
+
+  \item{...}{parameters specifying what to explicitly set
+  each column as. This is used to produce counterfactuals}
+}
+\value{
+  a 'setx' object
+}
+\description{
+  Set explanatory variables
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}, Kosuke Imai, and
+  Olivia Lau
+}
+
diff --git a/man/sim.MI.Rd b/man/sim.MI.Rd
new file mode 100644
index 0000000..c55f64b
--- /dev/null
+++ b/man/sim.MI.Rd
@@ -0,0 +1,38 @@
+\name{sim.MI}
+\alias{sim.MI}
+\title{Simulate Multiply Imputed Data}
+\usage{
+  \method{sim}{MI}(obj, x=NULL, x1=NULL, y=NULL, num=1000,
+    ...)
+}
+\arguments{
+  \item{obj}{a 'zelig.MI' object containing several fits
+  for two or more subsetted data-frames}
+
+  \item{x}{a 'setx.mi' object containing explanatory
+  variables for each fitted model}
+
+  \item{x1}{a 'setx.mi' object containing explanatory
+  variables for each fitted model}
+
+  \item{y}{this feature is currently unimplemented}
+
+  \item{num}{an integer specifying the number of
+  simulations to compute}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a 'sim.MI' with simulated quantities of interest for each
+  fitted contained by 'obj'
+}
+\description{
+  Simulate Multiply Imputed Data
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+\seealso{
+  \link{sim}
+}
+
diff --git a/man/sim.Rd b/man/sim.Rd
index ebe03c1..d10da7d 100644
--- a/man/sim.Rd
+++ b/man/sim.Rd
@@ -1,147 +1,123 @@
 \name{sim}
-
 \alias{sim}
-\alias{sim.default}
+\title{Generic Method for Computing and Organizing Simulated Quantities of Interest
+Simulate quantities of interest from the estimated model
+output from \code{zelig()} given specified values of explanatory
+variables established in \code{setx()}.  For classical \emph{maximum
+likelihood} models, \code{sim()} uses asymptotic normal
+approximation to the log-likelihood.  For \emph{Bayesian models},
+Zelig simulates quantities of interest from the posterior density,
+whenever possible.  For \emph{robust Bayesian models}, simulations
+are drawn from the identified class of Bayesian posteriors.
+Alternatively, you may generate quantities of interest using
+bootstrapped parameters.}
+\usage{
+  sim(obj, x = NULL, x1 = NULL, y = NULL, num = 1000,
+    bootstrap = F, bootfn = NULL, cond.data = NULL, ...)
+}
+\arguments{
+  \item{obj}{the output object from zelig}
 
-\title{Simulating Quantities of Interest}
+  \item{x}{values of explanatory variables used for
+  simulation, generated by setx}
 
-\description{ Simulate quantities of interest from the estimated model
-  output from \code{zelig()} given specified values of explanatory
-  variables established in \code{setx()}.  For classical \emph{maximum
-    likelihood} models, \code{sim()} uses asymptotic normal
-  approximation to the log-likelihood.  For \emph{Bayesian models},
-  Zelig simulates quantities of interest from the posterior density,
-  whenever possible.  For \emph{robust Bayesian models}, simulations
-  are drawn from the identified class of Bayesian posteriors.
-  Alternatively, you may generate quantities of interest using
-  bootstrapped parameters.  }
+  \item{x1}{optional values of explanatory variables
+  (generated by a second call of setx) particular
+  computations of quantities of interest}
 
-\usage{
-sim(object, x = NULL, ...)
+  \item{y}{a parameter reserved for the computation of
+  particular quantities of interest (average treatment
+  effects). Few models currently support this parameter}
 
-\method{sim}{default}(object, x=NULL, x1=NULL,
-            num=c(1000, 100),
-            prev = NULL, bootstrap = FALSE, bootfn=NULL,
-            cond.data = NULL, \dots)
-}
+  \item{num}{an integer specifying the number of
+  simulations to compute}
 
-\arguments{
-  \item{object}{the output object from \code{\link{zelig}}. }
-  \item{x}{values of explanatory variables used for simulation,
-      generated by \code{\link{setx}}.  }
-  \item{x1}{optional values of explanatory variables (generated by a
-      second call of \code{\link{setx}}), used to simulate first
-      differences and risk ratios.  (Not available for conditional
-      prediction.) }
-  \item{num}{the number of simulations, i.e., posterior draws.  If the
-      \code{num} argument is omitted, \code{sim} draws 1,000
-      simulations by if \code{bootstrap = FALSE} (the default), or 100
-      simulations if \code{bootstrap = TRUE}.  You may increase this
-      value to improve accuracy.  (Not available for conditional
-      prediction.) }
-  \item{prev}{a previous setx object to use to simulate}
-  \item{bootstrap}{a logical value indicating if parameters
-    should be generated by re-fitting the model for bootstrapped
-    data, rather than from the likelihood or posterior.  (Not
-    available for conditional prediction.) }
-  \item{bootfn}{a function which governs how the data is
-    sampled, re-fits the model, and returns the bootstrapped model
-    parameters.  If \code{bootstrap = TRUE} and \code{bootfn = NULL},
-    \code{\link{sim}} will sample observations from the original data
-    (with
-    replacement) until it creates a sampled dataset with the same
-    number of observations as the original data.  Alternative
-    bootstrap methods include sampling the residuals rather than the
-    observations, weighted sampling, and parametric bootstrapping.
-    (Not available for conditional prediction.) }  
-  \item{cond.data}{specify conditional data}
-  \item{\dots}{additional optional arguments passed to
-    \code{boot}. }
-}
+  \item{bootstrap}{currently unsupported}
+
+  \item{bootfn}{currently unsupported}
+
+  \item{cond.data}{currently unsupported}
 
-\value{ The output stored in \code{s.out} varies by model.  Use the
-  \code{names} command to view the output stored in \code{s.out}.
-  Common elements include: 
-  \item{x}{the \code{\link{setx}} values for the explanatory variables,
-    used to calculate the quantities of interest (expected values,
-    predicted values, etc.). }
-  \item{x1}{the optional \code{\link{setx}} object used to simulate
-    first differences, and other model-specific quantities of
-    interest, such as risk-ratios.}
-  \item{call}{the options selected for \code{\link{sim}}, used to
-    replicate quantities of interest. } 
-\item{zelig.call}{the original command and options for
-    \code{\link{zelig}}, used to replicate analyses. }
-  \item{num}{the number of simulations requested. }
-  \item{par}{the parameters (coefficients, and additional
-    model-specific parameters).  You may wish to use the same set of
-    simulated parameters to calculate quantities of interest rather
-    than simulating another set.}
-  \item{qi\$ev}{simulations of the expected values given the
-    model and \code{x}. }
-  \item{qi\$pr}{simulations of the predicted values given by the
-    fitted values. }
-  \item{qi\$fd}{simulations of the first differences (or risk
-    difference for binary models) for the given \code{x} and \code{x1}.
-    The difference is calculated by subtracting the expected values
-    given \code{x} from the expected values given \code{x1}.  (If do not
-    specify \code{x1}, you will not get first differences or risk
-    ratios.) }
-  \item{qi\$rr}{simulations of the risk ratios for binary and
-    multinomial models.  See specific models for details.}
+  \item{...}{arguments reserved future versions of Zelig}
+}
+\value{
+  The output stored in \code{s.out} varies by model.  Use
+  the \code{names} command to view the output stored in
+  \code{s.out}.  Common elements include: \item{x}{the
+  \code{\link{setx}} values for the explanatory variables,
+  used to calculate the quantities of interest (expected
+  values, predicted values, etc.). } \item{x1}{the optional
+  \code{\link{setx}} object used to simulate first
+  differences, and other model-specific quantities of
+  interest, such as risk-ratios.} \item{call}{the options
+  selected for \code{\link{sim}}, used to replicate
+  quantities of interest. } \item{zelig.call}{the original
+  command and options for \code{\link{zelig}}, used to
+  replicate analyses. } \item{num}{the number of
+  simulations requested. } \item{par}{the parameters
+  (coefficients, and additional model-specific parameters).
+  You may wish to use the same set of simulated parameters
+  to calculate quantities of interest rather than
+  simulating another set.} \item{qi\$ev}{simulations of the
+  expected values given the model and \code{x}. }
+  \item{qi\$pr}{simulations of the predicted values given
+  by the fitted values. } \item{qi\$fd}{simulations of the
+  first differences (or risk difference for binary models)
+  for the given \code{x} and \code{x1}.  The difference is
+  calculated by subtracting the expected values given
+  \code{x} from the expected values given \code{x1}.  (If
+  do not specify \code{x1}, you will not get first
+  differences or risk ratios.) } \item{qi\$rr}{simulations
+  of the risk ratios for binary and multinomial models.
+  See specific models for details.}
   \item{qi\$ate.ev}{simulations of the average expected
-    treatment effect for the treatment group, using conditional
-    prediction. Let \eqn{t_i} be a binary explanatory variable defining
-    the treatment (\eqn{t_i=1}) and control (\eqn{t_i=0}) groups.  Then the
-    average expected treatment effect for the treatment group is
-    \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \mid t_i=1 \,],} 
-    where \eqn{Y_i(t_i=1)} is the value of the dependent variable for
-    observation \eqn{i} in the treatment group.  Variation in the
-    simulations are due to uncertainty in simulating \eqn{E[Y_i(t_i=0)]},
-    the counterfactual expected value of \eqn{Y_i} for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to \eqn{t_i=0}. }
+  treatment effect for the treatment group, using
+  conditional prediction. Let \eqn{t_i} be a binary
+  explanatory variable defining the treatment (\eqn{t_i=1})
+  and control (\eqn{t_i=0}) groups.  Then the average
+  expected treatment effect for the treatment group is
+  \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
+  E[Y_i(t_i=0)] \mid t_i=1 \,],} where \eqn{Y_i(t_i=1)} is
+  the value of the dependent variable for observation
+  \eqn{i} in the treatment group.  Variation in the
+  simulations are due to uncertainty in simulating
+  \eqn{E[Y_i(t_i=0)]}, the counterfactual expected value of
+  \eqn{Y_i} for observations in the treatment group, under
+  the assumption that everything stays the same except that
+  the treatment indicator is switched to \eqn{t_i=0}. }
   \item{qi\$ate.pr}{simulations of the average predicted
-    treatment effect for the treatment group, using conditional
-    prediction. Let \eqn{t_i} be a binary explanatory variable defining
-    the treatment (\eqn{t_i=1}) and control (\eqn{t_i=0}) groups.  Then the
-    average predicted treatment effect for the treatment group is
-    \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \mid t_i=1 \,],} 
-    where \eqn{Y_i(t_i=1)} is the value of the dependent variable for
-    observation \eqn{i} in the treatment group.  Variation in the
-    simulations are due to uncertainty in simulating
-    \eqn{\widehat{Y_i(t_i=0)}}, the counterfactual predicted value of
-    \eqn{Y_i} for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to \eqn{t_i=0}. 
+  treatment effect for the treatment group, using
+  conditional prediction. Let \eqn{t_i} be a binary
+  explanatory variable defining the treatment (\eqn{t_i=1})
+  and control (\eqn{t_i=0}) groups.  Then the average
+  predicted treatment effect for the treatment group is
+  \deqn{ \frac{1}{n}\sum_{i=1}^n [ \, Y_i(t_i=1) -
+  \widehat{Y_i(t_i=0)} \mid t_i=1 \,],} where
+  \eqn{Y_i(t_i=1)} is the value of the dependent variable
+  for observation \eqn{i} in the treatment group.
+  Variation in the simulations are due to uncertainty in
+  simulating \eqn{\widehat{Y_i(t_i=0)}}, the counterfactual
+  predicted value of \eqn{Y_i} for observations in the
+  treatment group, under the assumption that everything
+  stays the same except that the treatment indicator is
+  switched to \eqn{t_i=0}.}
 }
-
-In the case of censored $Y$ in the exponential, Weibull, and lognormal
-models, \code{sim} first imputes the uncensored values for $Y$ before
-calculating the ATE.  
-
-You may use the \code{\$} operator to extract any of the
-above from \code{s.out}.  For example, \code{s.out\$qi\$ev} extracts the
-simulated expected values.
+\description{
+  Generic Method for Computing and Organizing Simulated
+  Quantities of Interest Simulate quantities of interest
+  from the estimated model output from \code{zelig()} given
+  specified values of explanatory variables established in
+  \code{setx()}.  For classical \emph{maximum likelihood}
+  models, \code{sim()} uses asymptotic normal approximation
+  to the log-likelihood.  For \emph{Bayesian models}, Zelig
+  simulates quantities of interest from the posterior
+  density, whenever possible.  For \emph{robust Bayesian
+  models}, simulations are drawn from the identified class
+  of Bayesian posteriors. Alternatively, you may generate
+  quantities of interest using bootstrapped parameters.
 }
-
-\seealso{The full Zelig at \url{http://gking.harvard.edu/zelig}, and \code{boot}.  }
-
 \author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>
+  Matt Owen \email{mowen at iq.harvard.edu}, Olivia Lau and
+  Kosuke Imai
 }
 
-\keyword{file}
-
-
-
-
-
-
-
-
-
-
diff --git a/man/sim.default.Rd b/man/sim.default.Rd
new file mode 100644
index 0000000..92a7ee8
--- /dev/null
+++ b/man/sim.default.Rd
@@ -0,0 +1,43 @@
+\name{sim.default}
+\alias{sim.default}
+\title{Method for Simulating Quantities of Interest wrom 'zelig' Objects}
+\usage{
+  \method{sim}{default}(obj, x=NULL, x1=NULL, y=NULL,
+    num=1000, bootstrap = FALSE, bootfn=NULL, cond.data =
+    NULL, ...)
+}
+\arguments{
+  \item{obj}{a 'zelig' object}
+
+  \item{x}{a 'setx' object}
+
+  \item{x1}{a secondary 'setx' object used to perform
+  particular computations of quantities of interest}
+
+  \item{y}{a parameter reserved for the computation of
+  particular quantities of interest (average treatment
+  effects). Few models currently support this parameter}
+
+  \item{num}{an integer specifying the number of
+  simulations to compute}
+
+  \item{bootstrap}{ignored}
+
+  \item{bootfn}{ignored}
+
+  \item{cond.data}{ignored}
+
+  \item{...}{parameters to be passed to the boot function,
+  if one is supplied}
+}
+\value{
+  a 'sim' object storing the replicated quantities of
+  interest
+}
+\description{
+  Simulate quantities of interest
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/simulation.matrix.Rd b/man/simulation.matrix.Rd
new file mode 100644
index 0000000..8117fbb
--- /dev/null
+++ b/man/simulation.matrix.Rd
@@ -0,0 +1,28 @@
+\name{simulation.matrix}
+\alias{simulation.matrix}
+\title{Get Simulations as a Matrix}
+\usage{
+  simulation.matrix(obj, which = NULL, ...)
+}
+\arguments{
+  \item{obj}{an object, typically a ``sim'' or
+  ``pooled.sim'' object.}
+
+  \item{which}{a character-vector specifying the
+  \emph{titles} of quantities of interest to extract}
+
+  \item{...}{additional parameters}
+}
+\value{
+  a simulation matrix
+}
+\description{
+  Returns a MxN matrix where N is the number of simulations
+  and M is the number of predicted values. Additionally, a
+  ``labels'' attribute is attached that produces a
+  human-readable identifier for each column.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/simulations.parameters.Rd b/man/simulations.parameters.Rd
new file mode 100644
index 0000000..5a15d39
--- /dev/null
+++ b/man/simulations.parameters.Rd
@@ -0,0 +1,28 @@
+\name{simulations.parameters}
+\alias{simulations.parameters}
+\title{Return Simulations of Parameter Coefficients}
+\usage{
+  \method{simulations}{parameters}(object, ...)
+}
+\arguments{
+  \item{object}{a 'parameters' object}
+
+  \item{\dots}{ignored}
+}
+\value{
+  simulations, specified by the Zelig model, of the
+  ancillary parameters
+}
+\description{
+  Returns simulated parameters of coefficients for use in
+  statistical simulation. The values are set by the
+  model-fitting function and the developer of the qi.<model
+  name> method.
+}
+\note{
+  This function does not differ at all from coef.default
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/simulations.plot.Rd b/man/simulations.plot.Rd
new file mode 100644
index 0000000..6d7ec3e
--- /dev/null
+++ b/man/simulations.plot.Rd
@@ -0,0 +1,46 @@
+\name{simulations.plot}
+\alias{simulations.plot}
+\title{Plot Quantities of Interest in a Zelig-fashion}
+\usage{
+  simulations.plot(y, y1=NULL, xlab="", ylab="", main="",
+    col=NULL, line.col=NULL, axisnames=TRUE)
+}
+\arguments{
+  \item{y}{A matrix or vector of simulated results
+  generated by Zelig, to be graphed.}
+
+  \item{y1}{For comparison of two sets of simulated results
+  at different choices of covariates, this should be an
+  object of the same type and dimension as y.  If no
+  comparison is to be made, this should be NULL.}
+
+  \item{xlab}{Label for the x-axis.}
+
+  \item{ylab}{Label for the y-axis.}
+
+  \item{main}{Main plot title.}
+
+  \item{col}{A vector of colors.  Colors will be used in
+  turn as the graph is built for main plot objects. For
+  nominal/categorical data, this colors renders as the bar
+  color, while for numeric data it renders as the
+  background color.}
+
+  \item{line.col}{A vector of colors.  Colors will be used
+  in turn as the graph is built for line color shading of
+  plot objects.}
+
+  \item{axisnames}{a character-vector, specifying the names
+  of the axes}
+}
+\value{
+  nothing
+}
+\description{
+  Various graph generation for different common types of
+  simulated results from Zelig
+}
+\author{
+  James Honaker
+}
+
diff --git a/man/special_print_LIST.Rd b/man/special_print_LIST.Rd
new file mode 100644
index 0000000..f32e7fb
--- /dev/null
+++ b/man/special_print_LIST.Rd
@@ -0,0 +1,26 @@
+\name{special_print_LIST}
+\alias{.print.qi.summarized.LIST}
+\alias{special_print_LIST}
+\title{Method for Printing Summarized QI's in a List Form}
+\usage{
+  .print.qi.summarized.LIST(x, ...)
+}
+\arguments{
+  \item{x}{a 'summarized.qi' object}
+
+  \item{...}{additional parameters to be used by the
+  'print.matrix' method}
+}
+\value{
+  x (invisibly)
+}
+\description{
+  Method for Printing Summarized QI's in a List Form
+}
+\note{
+  This function is used internall by Zelig
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/special_print_MATRIX.Rd b/man/special_print_MATRIX.Rd
new file mode 100644
index 0000000..2b52a97
--- /dev/null
+++ b/man/special_print_MATRIX.Rd
@@ -0,0 +1,25 @@
+\name{special_print_MATRIX}
+\alias{.print.qi.summarized.MATRIX}
+\alias{special_print_MATRIX}
+\title{Method for Printing Summarized QI's in a Matrix Form}
+\usage{
+  .print.qi.summarized.MATRIX(x, ...)
+}
+\arguments{
+  \item{x}{a 'summarized.qi' object}
+
+  \item{...}{additional parameters}
+}
+\value{
+  x (invisibly)
+}
+\description{
+  Method for Printing Summarized QI's in a Matrix Form
+}
+\note{
+  This function is used internall by Zelig
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/splitUp.Rd b/man/splitUp.Rd
new file mode 100644
index 0000000..a770f20
--- /dev/null
+++ b/man/splitUp.Rd
@@ -0,0 +1,33 @@
+\name{splitUp}
+\alias{splitUp}
+\title{Split a List into Two Lists
+This functions takes any list, and splits into two lists - one containing
+the values of arguments with specifically specified values and those without
+specified values.}
+\usage{
+  splitUp(args)
+}
+\arguments{
+  \item{args}{a list}
+}
+\value{
+  a list containing two entries: the key-value paired
+  entires (titled wordful) and the unkeyed entried (titled
+  wordless)
+}
+\description{
+  Split a List into Two Lists This functions takes any
+  list, and splits into two lists - one containing the
+  values of arguments with specifically specified values
+  and those without specified values.
+}
+\note{
+  This function is a good candidate for deprecation
+}
+\examples{
+#list(wordful = list(x=1, y=2), wordless=list(2, "red"))
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/store.object.Rd b/man/store.object.Rd
new file mode 100644
index 0000000..2803fb5
--- /dev/null
+++ b/man/store.object.Rd
@@ -0,0 +1,41 @@
+\name{store.object}
+\alias{store.object}
+\title{Store Object in Environment with a Fake Name}
+\usage{
+  store.object(obj, envir, name = NULL, prefix = ".")
+}
+\arguments{
+  \item{obj}{any object}
+
+  \item{envir}{an environment object, which will contain
+  the object with the assigned name}
+
+  \item{name}{a character-string specifying the name that
+  the object will be stored as in the specified
+  environment}
+
+  \item{prefix}{a character string specifying the prefixes
+  to append to names that already have matches in the
+  destination environment}
+}
+\value{
+  a character-string specifying the name of the object in
+  the destination environment
+}
+\description{
+  This function takes the value of an object and stores it
+  within a specified environment. This is similar to simply
+  using the \code{assign} function, but will not overwrite
+  existing values in the specified environment. It
+  accomplishes this by appending a prefix to the name of
+  the variable until the name becomes unique.
+}
+\note{
+  This method does not correct invalid names. That is,
+  there is no test to determine whether the submitted name
+  is valid.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/structuralToReduced.Rd b/man/structuralToReduced.Rd
new file mode 100644
index 0000000..29e866d
--- /dev/null
+++ b/man/structuralToReduced.Rd
@@ -0,0 +1,20 @@
+\name{structuralToReduced}
+\alias{structuralToReduced}
+\title{Transform the Multilevel's Structural Formulas Into Reduced Form}
+\usage{
+  structuralToReduced(f)
+}
+\arguments{
+  \item{f}{a list of formulas}
+}
+\value{
+  a formula in reduced form
+}
+\description{
+  Transform the Multilevel's Structural Formulas Into
+  Reduced Form
+}
+\author{
+  Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
+}
+
diff --git a/man/summarize.Rd b/man/summarize.Rd
new file mode 100644
index 0000000..231bb8c
--- /dev/null
+++ b/man/summarize.Rd
@@ -0,0 +1,21 @@
+\name{summarize}
+\alias{summarize}
+\title{Generic methonf for summarizing simualted quantities of interest}
+\usage{
+  summarize(obj)
+}
+\arguments{
+  \item{obj}{a \code{qi} object, storing simulations of
+  quantities of interest}
+}
+\value{
+  a \code{summarized.qi} object
+}
+\description{
+  Generic methonf for summarizing simualted quantities of
+  interest
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/summarize.default.Rd b/man/summarize.default.Rd
new file mode 100644
index 0000000..4b9d039
--- /dev/null
+++ b/man/summarize.default.Rd
@@ -0,0 +1,20 @@
+\name{summarize.default}
+\alias{summarize.default}
+\title{Summarize Simualted Quantities of Interest}
+\usage{
+  \method{summarize}{default}(obj)
+}
+\arguments{
+  \item{obj}{a \code{qi} object, storing simulations of
+  quantities of interest}
+}
+\value{
+  a 'summarized.qi' object
+}
+\description{
+  Summarize Simualted Quantities of Interest
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/summary.MI.Rd b/man/summary.MI.Rd
new file mode 100644
index 0000000..f25cb79
--- /dev/null
+++ b/man/summary.MI.Rd
@@ -0,0 +1,23 @@
+\name{summary.MI}
+\alias{summary.MI}
+\title{Summarry of Multiply Imputed Statistical Models}
+\usage{
+  \method{summary}{MI}(object, subset = NULL, ...)
+}
+\arguments{
+  \item{object}{a set of fitted statistical models}
+
+  \item{subset}{an integer vector, specifying the indices of the data.frames to be used in the subset}
+
+  \item{...}{parameters to forward}
+}
+\value{
+  a list of summaries
+}
+\description{
+  ...
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/summary.MI.sim.Rd b/man/summary.MI.sim.Rd
new file mode 100644
index 0000000..61a8ae1
--- /dev/null
+++ b/man/summary.MI.sim.Rd
@@ -0,0 +1,22 @@
+\name{summary.MI.sim}
+\alias{summary.MI.sim}
+\title{Method for summarizing simulations of multiply imputed quantities of interest}
+\usage{
+  \method{summary}{MI.sim}(object, ...)
+}
+\arguments{
+  \item{object}{a `MI.sim' object}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a `summarized.MI.sim' object
+}
+\description{
+  Method for summarizing simulations of multiply imputed
+  quantities of interest
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/summary.Relogit2.Rd b/man/summary.Relogit2.Rd
new file mode 100644
index 0000000..ae2e22b
--- /dev/null
+++ b/man/summary.Relogit2.Rd
@@ -0,0 +1,18 @@
+\name{summary.Relogit2}
+\alias{summary.Relogit2}
+\title{Summary for ``Relogit2'' Fitted Model}
+\usage{
+  \method{summary}{Relogit2}(object, ...)
+}
+\arguments{
+  \item{object}{a ``Relogit2'' object}
+
+  \item{...}{other parameters}
+}
+\value{
+  a ``summary.relogit2'' object
+}
+\description{
+  Summarize important components of the ``relogit'' model
+}
+
diff --git a/man/summary.glm.robust.Rd b/man/summary.glm.robust.Rd
new file mode 100644
index 0000000..c9f4c26
--- /dev/null
+++ b/man/summary.glm.robust.Rd
@@ -0,0 +1,22 @@
+\name{summary.glm.robust}
+\alias{summary.glm.robust}
+\title{Summary of Generalized Linear Model with Robust Error Estimates}
+\usage{
+  \method{summary}{glm.robust}(object, ...)
+}
+\arguments{
+  \item{object}{a ``glm.robust'' fitted model}
+
+  \item{...}{parameters to pass to the standard
+  ``summary.glm'' method}
+}
+\value{
+  a object of type ``summary.glm.robust'' and
+  ``summary.glm''
+}
+\description{
+  Returns summary of a glm model with robust error
+  estimates. This only slightly differs from how the
+  standard GLM's behave.
+}
+
diff --git a/man/summary.pooled.sim.Rd b/man/summary.pooled.sim.Rd
new file mode 100644
index 0000000..d03d810
--- /dev/null
+++ b/man/summary.pooled.sim.Rd
@@ -0,0 +1,28 @@
+\name{summary.pooled.sim}
+\alias{summary.pooled.sim}
+\title{Return a Summary of a Set of Pooled Simulated Interests}
+\usage{
+  \method{summary}{pooled.sim}(object, ...)
+}
+\arguments{
+  \item{object}{a ``pooled.sim'' object, containing
+  information about simulated quantities of interest}
+
+  \item{...}{Ignored parameters}
+}
+\value{
+  a ``summary.pooled.sim'' object storing the replicated
+  quantities of interest
+}
+\description{
+  Returns the summary information from a set of pooled
+  simulated interests. The object returned contains the
+  slots ``labels'', a character-vector specifying the
+  labels (explanatory variable titles) of the qi's,
+  ``titles'', a character vector specifying the names of
+  the quantities of interest, and
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/summary.relogit.Rd b/man/summary.relogit.Rd
new file mode 100644
index 0000000..5f5a412
--- /dev/null
+++ b/man/summary.relogit.Rd
@@ -0,0 +1,18 @@
+\name{summary.Relogit}
+\alias{summary.Relogit}
+\title{Summary for ``Relogit'' Fitted Model}
+\usage{
+  \method{summary}{Relogit}(object, ...)
+}
+\arguments{
+  \item{object}{a ``Relogit'' object}
+
+  \item{...}{other parameters}
+}
+\value{
+  a ``summary.relogit'' object
+}
+\description{
+  Summarize important components of the ``relogit'' model
+}
+
diff --git a/man/summary.sim.Rd b/man/summary.sim.Rd
new file mode 100644
index 0000000..804a9c6
--- /dev/null
+++ b/man/summary.sim.Rd
@@ -0,0 +1,21 @@
+\name{summary.sim}
+\alias{summary.sim}
+\title{Method for summarizing simulations of quantities of interest}
+\usage{
+  \method{summary}{sim}(object, ...)
+}
+\arguments{
+  \item{object}{a 'MI.sim' object}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  a 'summarized.MI.sim' object
+}
+\description{
+  Return a ``summary.sim'' object (typically for display)
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/summary.zelig.Rd b/man/summary.zelig.Rd
index 8dd4e48..e71209c 100644
--- a/man/summary.zelig.Rd
+++ b/man/summary.zelig.Rd
@@ -1,58 +1,22 @@
 \name{summary.zelig}
-
 \alias{summary.zelig}
-\alias{summary}
-
-\title{Summary of Simulated Quantities of Interest}
-
-\description{Summarizes the object of class \code{\link{zelig}} (output
-  from \code{\link{sim}}) which contains simulated quantities of
-  interst.} 
-
+\title{Zelig Object Summaries}
 \usage{
-\method{summary}{zelig}(object, subset = NULL, CI = 95, stats = c("mean", "sd"), \dots)
+  \method{summary}{zelig}(object, ...)
 }
-
 \arguments{
-  \item{object}{output object from \code{\link{sim}} (of class
-    \code{"zelig"}).}
-  \item{subset}{takes one of three values:
-    \itemize{
-    \item{NULL}{(default) for more than one observation, summarizes all the
-      observations at once for each quantity of interest.}
-    \item{a numeric vector}{indicates which observations to summarize,
-      and summarizes each one independently.}
-    \item{all}{summarizes all the observations independently.}
-    }
-  }
-  \item{stats}{summary statistics to be calculated.}
-  \item{CI}{a confidence interval to be calculated.}
-  \item{\ldots}{further arguments passed to or from other methods.}
-}
+  \item{object}{a zelig object}
 
+  \item{...}{parameters forwarded to the generic summary
+  object}
+}
 \value{
-  \item{sim}{number of simulations, i.e., posterior draws.}
-  \item{x}{values of explanatory variables used for simulation.}
-  \item{x1}{values of explanatory variables used for simulation of first
-    differences etc.}
-  \item{qi.stats}{summary of quantities of interst.  Use
-    \code{\link{names}} to view the model-specific items available in
-    \code{qi.stats}.}
+  the summary of the fitted model
+}
+\description{
+  Compute summary data for zelig objects
 }
-
-\seealso{
-  \code{\link{zelig}}, \code{\link{setx}}, \code{\link{sim}},
-  and \code{\link{names}}, and the full Zelig manual at
-  \url{http://gking.harvard.edu/zelig}.  }
-
 \author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>
+  Matt Owen \email{mowen at iq.harvard.edu}
 }
 
-\keyword{file}
-
-
-
-
-
diff --git a/man/t.setx.Rd b/man/t.setx.Rd
new file mode 100644
index 0000000..95e87be
--- /dev/null
+++ b/man/t.setx.Rd
@@ -0,0 +1,23 @@
+\name{t.setx}
+\alias{t.setx}
+\title{Matrix Transpose of a ``setx'' Object}
+\usage{
+  \method{t}{setx}(x)
+}
+\arguments{
+  \item{x}{a `setx' object}
+}
+\value{
+  a transposed matrix
+}
+\description{
+  Returns a ``setx'' object as column vector. If multiple
+  values for each explanatory term has been set, then
+  return a NxM matrix where `N' is the number of
+  explanatory terms and `M' is the number of values set for
+  each term.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/table.levels.Rd b/man/table.levels.Rd
new file mode 100644
index 0000000..58d37f7
--- /dev/null
+++ b/man/table.levels.Rd
@@ -0,0 +1,28 @@
+\name{table.levels}
+\alias{table.levels}
+\title{Create a table, but ensure that the correct
+columns exist. In particular, this allows for
+entires with zero as a value, which is not
+the default for standard tables}
+\usage{
+  table.levels(x, levels, ...)
+}
+\arguments{
+  \item{x}{a vector}
+
+  \item{levels}{a vector of levels}
+
+  \item{...}{parameters for table}
+}
+\value{
+  a table
+}
+\description{
+  Create a table, but ensure that the correct columns
+  exist. In particular, this allows for entires with zero
+  as a value, which is not the default for standard tables
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/terms.multiple.Rd b/man/terms.multiple.Rd
new file mode 100644
index 0000000..15246a3
--- /dev/null
+++ b/man/terms.multiple.Rd
@@ -0,0 +1,21 @@
+\name{terms.multiple}
+\alias{terms.multiple}
+\title{Extract Terms from a \code{multiple} Object}
+\usage{
+  \method{terms}{multiple}(x, data=NULL,...)
+}
+\arguments{
+  \item{x}{a Zelig v3.5 formula}
+
+  \item{data}{a \code{data.frame}}
+
+  \item{...}{ignored parameters}
+}
+\description{
+  Extracts terms from Zelig-3.5-style formulae. This
+  function is scheduled for removal.
+}
+\author{
+  Kosuke Imai, Olivia Lau, Gary King and Ferdinand Alimadhi
+}
+
diff --git a/man/terms.vglm.Rd b/man/terms.vglm.Rd
new file mode 100644
index 0000000..02ba4fa
--- /dev/null
+++ b/man/terms.vglm.Rd
@@ -0,0 +1,21 @@
+\name{terms.vglm}
+\alias{terms.vglm}
+\title{Model Terms for 'vglm' Models}
+\usage{
+  \method{terms}{vglm}(x, ...)
+}
+\arguments{
+  \item{x}{a fitted model object from the VGAM library}
+
+  \item{...}{ignored parameters}
+}
+\value{
+  the models terms of this fitted model object
+}
+\description{
+  Model Terms for 'vglm' Models
+}
+\author{
+  Ferdinand Alimadhi, Kosuke Imai and Olivia Lau
+}
+
diff --git a/man/terms.zelig.Rd b/man/terms.zelig.Rd
new file mode 100644
index 0000000..d6d6b3f
--- /dev/null
+++ b/man/terms.zelig.Rd
@@ -0,0 +1,19 @@
+\name{terms.zelig}
+\alias{terms.zelig}
+\title{Model Terms for a Zelig Object}
+\usage{
+  \method{terms}{zelig}(x, ...)
+}
+\arguments{
+  \item{x}{a \code{zelig} object}
+
+  \item{...}{forwarded parameters}
+}
+\value{
+  terms of the original fitted model
+}
+\description{
+  This method simply extracts the model terms for the
+  fitted model passed to the \code{zelig} function.
+}
+
diff --git a/man/termsFromFormula.Rd b/man/termsFromFormula.Rd
new file mode 100644
index 0000000..dfc48b0
--- /dev/null
+++ b/man/termsFromFormula.Rd
@@ -0,0 +1,17 @@
+\name{termsFromFormula}
+\alias{termsFromFormula}
+\title{Extract Terms from Zelig-style Formulae}
+\usage{
+  termsFromFormula(obj)
+}
+\arguments{
+  \item{obj}{a Zelig-style formula}
+}
+\description{
+  This method is a sugary function to extract terms from
+  any type of Zelig-style formula.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/ternaryplot.Rd b/man/ternaryplot.Rd
deleted file mode 100644
index e466fce..0000000
--- a/man/ternaryplot.Rd
+++ /dev/null
@@ -1,74 +0,0 @@
-\name{ternaryplot}
-\alias{ternaryplot}
-%- Also NEED an `\alias' for EACH other topic documented here.
-\title{Ternary diagram}
-\description{
-Visualizes compositional, 3-dimensional data in an equilateral triangle  
-(from the vcd library, Version 0.1-3.3, Date 2004-04-21), using plot graphics.  
-Differs from implementation in vcd (0.9-7), which uses grid graphics.}
-\usage{
-ternaryplot(x, scale = 1, dimnames = NULL, dimnames.position = c("corner","edge","none"),
-            dimnames.color = "black", id = NULL, id.color = "black", coordinates = FALSE,
-	    grid = TRUE, grid.color = "gray", labels = c("inside", "outside", "none"),
-	    labels.color = "darkgray", border = "black", bg = "white", pch = 19, cex = 1,
-	    prop.size = FALSE, col = "red", main = "ternary plot", ...)
-}
-\arguments{
-  \item{x}{a matrix with three columns.}
-  \item{scale}{row sums scale to be used.}
-  \item{dimnames}{dimension labels (defaults to the column names of
-    \code{x}).}
-  \item{dimnames.position, dimnames.color}{position and color of dimension labels.}
-  \item{id}{optional labels to be plotted below the plot
-    symbols. \code{coordinates} and \code{id} are mutual exclusive.}
-  \item{id.color}{color of these labels.}
-  \item{coordinates}{if \code{TRUE}, the coordinates of the points are
-    plotted below them. \code{coordinates} and \code{id} are mutual exclusive.}
-  \item{grid}{if \code{TRUE}, a grid is plotted. May optionally
-    be a string indicating the line type (default: \code{"dotted"}).}
-  \item{grid.color}{grid color.}
-  \item{labels, labels.color}{position and color of the grid labels.}
-  \item{border}{color of the triangle border.}
-  \item{bg}{triangle background.}
-  \item{pch}{plotting character. Defaults to filled dots.}
-  \item{cex}{a numerical value giving the amount by which plotting text
-    and symbols should be scaled relative to the default. Ignored for
-    the symbol size if \code{prop.size} is not \code{FALSE}.}
-  \item{prop.size}{if \code{TRUE}, the symbol size is plotted
-    proportional to the row sum of the three variables, i.e. represents
-    the weight of the observation.}
-  \item{col}{plotting color.}
-  \item{main}{main title.}
-  \item{\dots}{additional graphics parameters (see \code{par})}
-}
-\details{
-A points' coordinates are found by computing the gravity center
-of mass points using the data entries as weights. Thus, the coordinates
-of a point P(a,b,c), \eqn{a + b + c = 1}, are: P(b + c/2, c * sqrt(3)/2).
-}
-
-\examples{
-data(mexico)
-if (require(VGAM)) { 
-z.out <- zelig(as.factor(vote88) ~ pristr + othcok + othsocok, 
-                model = "mlogit", data = mexico)
-x.out <- setx(z.out)
-s.out <- sim(z.out, x = x.out)
-
-ternaryplot(s.out$qi$ev, pch = ".", col = "blue",
-            main = "1988 Mexican Presidential Election")
-}
-}
-
-\seealso{\code{\link{ternarypoints}}}
-
-\references{
-M. Friendly (2000),
-\emph{Visualizing Categorical Data}. SAS Institute, Cary, NC.
-}
-\author{
-  David Meyer\cr
-  \email{david.meyer at ci.tuwien.ac.at}
-}
-
-\keyword{hplot}
diff --git a/man/ternarypoints.Rd b/man/ternarypoints.Rd
deleted file mode 100644
index 2e5cd60..0000000
--- a/man/ternarypoints.Rd
+++ /dev/null
@@ -1,48 +0,0 @@
-\name{ternarypoints}
-
-\alias{ternarypoints}
-
-\title{Adding Points to Ternary Diagrams}
-
-\description{
-Use \code{ternarypoints} to add points to a ternary diagram generated
-using the \code{ternaryplot} function in the vcd library.  Use
-ternary diagrams to plot expected values for multinomial choice models
-with three categories in the dependent variable.  
-}
-
-\usage{
-ternarypoints(object, pch = 19, col = "blue", ...)
-}
-
-\arguments{
-\item{object}{The input object must be a matrix with three
-  columns. }
-\item{pch}{The selected type of point.  By default, \code{pch =
-    19}, solid disks. } 
-\item{col}{The color of the points.  By default, \code{col =
-    "blue"}. }  
-\item{\dots}{Additional parameters passed to \code{points}. }  
-}
-
-\value{
-The \code{ternarypoints} command adds points to a previously existing
-ternary diagram.  Use \code{ternaryplot} in the \code{vcd} library to
-generate the main ternary diagram.  
-}
-
-\seealso{ The full Zelig manual at
-  \url{http://gking.harvard.edu/zelig}, \code{points}, and
-  \code{\link{ternaryplot}}.  }
-
-\author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>
-}
-
-\keyword{aplot}
-
-
-
-
-
diff --git a/man/toBuildFormula.Rd b/man/toBuildFormula.Rd
new file mode 100644
index 0000000..3b2d1c0
--- /dev/null
+++ b/man/toBuildFormula.Rd
@@ -0,0 +1,21 @@
+\name{toBuildFormula}
+\alias{toBuildFormula}
+\title{Build Formula ???}
+\usage{
+  toBuildFormula(Xnames, sepp = "+")
+}
+\arguments{
+  \item{Xnames}{a character-vector}
+
+  \item{sepp}{a seperator (???)}
+}
+\value{
+  a character-string
+}
+\description{
+  This function builds a formula
+}
+\author{
+  ???
+}
+
diff --git a/man/tolmerFormat.Rd b/man/tolmerFormat.Rd
new file mode 100644
index 0000000..89d2302
--- /dev/null
+++ b/man/tolmerFormat.Rd
@@ -0,0 +1,25 @@
+\name{tolmerFormat}
+\alias{tolmerFormat}
+\title{Convert a Formula into 'lmer' Representation from Reduced Form
+Take a formula in its reducd from and return it as a 'lmer' representation
+(from the lme4 package). This is basically removing the starting 'tag' from
+each term.}
+\usage{
+  tolmerFormat(f)
+}
+\arguments{
+  \item{f}{a formula in reduced form}
+}
+\value{
+  the 'lmer' representation of 'f'
+}
+\description{
+  Convert a Formula into 'lmer' Representation from Reduced
+  Form Take a formula in its reducd from and return it as a
+  'lmer' representation (from the lme4 package). This is
+  basically removing the starting 'tag' from each term.
+}
+\author{
+  Ferdinand Alimadhi, Kosuke Imai, and Olivia Lau
+}
+
diff --git a/man/ucfirst.Rd b/man/ucfirst.Rd
new file mode 100644
index 0000000..da86fdf
--- /dev/null
+++ b/man/ucfirst.Rd
@@ -0,0 +1,20 @@
+\name{ucfirst}
+\alias{ucfirst}
+\title{Uppercase First Letter of a String}
+\usage{
+  ucfirst(str)
+}
+\arguments{
+  \item{str}{a vector of charaqcter-strings}
+}
+\value{
+  a vector of character strings
+}
+\description{
+  This method sets the first character of a string to its
+  uppercase, sets all other characters to lowercase.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/user.prompt.Rd b/man/user.prompt.Rd
index 277b4fb..83c6c61 100644
--- a/man/user.prompt.Rd
+++ b/man/user.prompt.Rd
@@ -1,27 +1,20 @@
 \name{user.prompt}
-
 \alias{user.prompt}
-
-\title{Pause in demo files}
-
-\description{
-  Use \code{user.prompt} while writing demo files to force users to hit
-  return before continuing.  
-}
-
+\title{Prompt User}
 \usage{
-user.prompt()
+  user.prompt(msg = NULL)
 }
-
-\seealso{\code{readline}}
-
-\author{Olivia Lau <\email{olau at fas.harvard.edu}>
+\arguments{
+  \item{msg}{a character-string, specifying a message to be
+  displayed}
 }
-
-\examples{
-\dontrun{
-user.prompt()
+\value{
+  This function is used for its side effects
+}
+\description{
+  Prompts user to hit enter
 }
+\note{
+  This function is primarily used by Zelig demo scripts
 }
 
-\keyword{file}
diff --git a/man/z.Rd b/man/z.Rd
new file mode 100644
index 0000000..6f0fa8e
--- /dev/null
+++ b/man/z.Rd
@@ -0,0 +1,29 @@
+\name{z}
+\alias{z}
+\title{Return value for a zelig2-function}
+\usage{
+  z(.function, ..., .hook = NULL)
+}
+\arguments{
+  \item{.function}{a function}
+
+  \item{...}{a set of parameters to be evaluated
+  symbolically}
+
+  \item{.hook}{a function to be applied after the external, model-fitting
+  function is called}
+}
+\value{
+  a ``z'' object which specifies how to evaluate the fitted
+  model
+}
+\description{
+  This is an API-function that bridges a model-fitting
+  function with a zelig interface.
+}
+\note{
+  This is used internally by Zelig-dependent packages to
+  instruct Zelig how to evaluate the function call to a
+  particular statistical model.
+}
+
diff --git a/man/zelig.Rd b/man/zelig.Rd
index 818a275..3a28af1 100644
--- a/man/zelig.Rd
+++ b/man/zelig.Rd
@@ -1,96 +1,71 @@
 \name{zelig}
-
 \alias{zelig}
-
 \title{Estimating a Statistical Model}
-
-\description{ The \code{zelig} command estimates a variety of statistical
-models.  Use \code{zelig} output with \code{setx} and \code{sim} to compute
-quantities of interest, such as predicted probabilities, expected values, and
-first differences, along with the associated measures of uncertainty
-(standard errors and confidence intervals). }
-
 \usage{
-zelig(formula, model, data, by, save.data, cite, \dots) 
+  zelig(formula, model, data, ..., by = NULL, cite = T)
 }
-
 \arguments{
-\item{formula}{a symbolic representation of the model to be
-  estimated, in the form \code{y \~\, x1 + x2}, where \code{y} is the
-  dependent variable and \code{x1} and \code{x2} are the explanatory
-  variables, and \code{y}, \code{x1}, and \code{x2} are contained in the
-  same dataset.  (You may include more than two explanatory variables,
-  of course.)  The \code{+} symbol means ``inclusion'' not
-  ``addition.''  You may also include interaction terms and main
-  effects in the form \code{x1*x2} without computing them in prior
-  steps; \code{I(x1*x2)} to include only the interaction term and
+  \item{formula}{a symbolic representation of the model to
+  be estimated, in the form \code{y \~\, x1 + x2}, where
+  \code{y} is the dependent variable and \code{x1} and
+  \code{x2} are the explanatory variables, and \code{y},
+  \code{x1}, and \code{x2} are contained in the same
+  dataset.  (You may include more than two explanatory
+  variables, of course.)  The \code{+} symbol means
+  ``inclusion'' not ``addition.'' You may also include
+  interaction terms and main effects in the form
+  \code{x1*x2} without computing them in prior steps;
+  \code{I(x1*x2)} to include only the interaction term and
   exclude the main effects; and quadratic terms in the form
-  \code{I(x1^2)}.  }
-\item{model}{the name of a statistical model, enclosed in \code{""}.
-  Type \code{help.zelig("models")} to see a list of currently supported
-  models.  }
-\item{data}{the name of a data frame containing the variables
-  referenced in the formula, or a list of multiply imputed data frames
-  each having the same variable names and row numbers (created by
-  \code{mi}). }
-
-\item{save.data}{If is set to "TRUE", the input dataframe will be saved
-  as an attribute ("zelig.data") of the zelig output object. }
-
-\item{cite}{If is set to "TRUE" (default), the model citation will be
-  will be printed out when this function is called. }
-
-\item{by}{a factor variable contained in \code{data}.  Zelig will subset
-the data frame based on the levels in the \code{by} variable, and
-estimate a model for each subset.  This a particularly powerful option
-which will allow you to save a considerable amount of effort.  For
-example, to run the same model on all fifty states, you could type:
-\code{z.out <- zelig(y ~ x1 + x2, data = mydata, model = "ls", by = "state")}
-  You may also use \code{by} to run models using MatchIt subclass.  }
-\item{\dots}{additional arguments passed to \code{zelig},
-  depending on the model to be estimated. }
+  \code{I(x1^2)}}
+
+  \item{model}{the name of a statistical model.  Type
+  \code{help.zelig("models")} to see a list of currently
+  supported models}
+
+  \item{data}{the name of a data frame containing the
+  variables referenced in the formula, or a list of
+  multiply imputed data frames each having the same
+  variable names and row numbers (created by \code{mi})}
+
+  \item{...}{additional arguments passed to \code{zelig},
+  depending on the model to be estimated}
+
+  \item{by}{a factor variable contained in \code{data}.
+  Zelig will subset the data frame based on the levels in
+  the \code{by} variable, and estimate a model for each
+  subset.  This a particularly powerful option which will
+  allow you to save a considerable amount of effort.  For
+  example, to run the same model on all fifty states, you
+  could type: \code{z.out <- zelig(y ~ x1 + x2, data =
+  mydata, model = "ls", by = "state")} You may also use
+  \code{by} to run models using MatchIt subclass}
+
+  \item{cite}{If is set to "TRUE" (default), the model
+  citation will be}
 }
-
 \value{
-Depending on the class of model selected, \code{zelig} will return
-an object with elements including \code{coefficients}, \code{residuals},
-and \code{formula} which may be summarized using
-\code{summary(z.out)} or individually extracted using, for example,
-\code{z.out\$coefficients}.  See the specific models listed above
-for additional output values, or simply type \code{names(z.out)}.  
+  Depending on the class of model selected, \code{zelig}
+  will return an object with elements including
+  \code{coefficients}, \code{residuals}, and \code{formula}
+  which may be summarized using \code{summary(z.out)} or
+  individually extracted using, for example,
+  \code{z.out\$coefficients}.  See the specific models
+  listed above for additional output values, or simply type
+  \code{names(z.out)}.
 }
-
-\seealso{ The full Zelig manual is available at
-  \url{http://gking.harvard.edu/zelig}.
+\description{
+  The zelig command estimates a variety of statistical
+  models.  Use \code{zelig} output with \code{setx} and
+  \code{sim} to compute quantities of interest, such as
+  predicted probabilities, expected values, and first
+  differences, along with the associated measures of
+  uncertainty (standard errors and confidence intervals).
 }
-
 \author{
-  Kosuke Imai <\email{kimai at princeton.edu}>; Gary King
-  <\email{king at harvard.edu}>; Olivia Lau <\email{olau at fas.harvard.edu}>
+  Matt Owen \email{mowen at iq.harvard.edu}, Kosuke Imai,
+  Olivia Lau, and Gary King Maintainer: Matt Owen
+  \email{mowen at iq.harvard.edu}
 }
-
-\keyword{file}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+\keyword{package}
 
diff --git a/man/zelig.call.Rd b/man/zelig.call.Rd
new file mode 100644
index 0000000..f98f2ba
--- /dev/null
+++ b/man/zelig.call.Rd
@@ -0,0 +1,27 @@
+\name{zelig.call}
+\alias{zelig.call}
+\title{Create Function Call}
+\usage{
+  zelig.call(Call, zelig2, remove = NULL)
+}
+\arguments{
+  \item{Call}{a \code{call} object, typically specifying
+  the original function call to \code{zelig}}
+
+  \item{zelig2}{the return-value of the \code{zelig2}
+  method}
+
+  \item{remove}{a list of character vectors specifying
+  which parameters to ignore from the original call to
+  \code{zelig}}
+}
+\value{
+  a function call used to fit the statistical model
+}
+\description{
+  Create Function Call
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/zelig.skeleton.Rd b/man/zelig.skeleton.Rd
new file mode 100644
index 0000000..79da88a
--- /dev/null
+++ b/man/zelig.skeleton.Rd
@@ -0,0 +1,62 @@
+\name{zelig.skeleton}
+\alias{zelig.skeleton}
+\title{Creates a Skeleton for a New Zelig package}
+\usage{
+  zelig.skeleton(pkg, models = c(),
+    author = "UNKNOWN AUTHOR", path = ".", force = FALSE,
+    email = "maintainer at software-project.org",
+    depends = c(), ..., .gitignore = TRUE,
+    .Rbuildignore = TRUE)
+}
+\arguments{
+  \item{pkg}{a character-string specifying the name of the
+  Zelig package}
+
+  \item{models}{a vector of strings specifying models to be
+  included in the package}
+
+  \item{author}{a vector of strings specifying contributors
+  to the package}
+
+  \item{path}{a character-string specifying the path to the
+  package}
+
+  \item{force}{a logical specifying whether to overwrite
+  files and create necessary directories}
+
+  \item{email}{a string specifying the email address of the
+  package's maintainer}
+
+  \item{depends}{a vector of strings specifying package
+  dependencies}
+
+  \item{...}{ignored parameters}
+
+  \item{.gitignore}{a logical specifying whether to include
+  a copy of a simple \code{.gitignore} in the appropriate
+  folders (\code{inst/doc} and the package root}
+
+  \item{.Rbuildignore}{a logical specifying whether to
+  include a copy of a simple \code{.Rbuildignore} in the
+  appropriate folders (\code{inst/doc} and the package
+  root}
+}
+\value{
+  nothing
+}
+\description{
+  'zelig.skeleton' generates the necessary files used to
+  create a Zelig package. Based on (and using) R's
+  'package.skeleton' it removes some of the monotony of
+  building statistical packages. In particular,
+  'zelig.skeleton' produces templates for the
+  \code{zelig2}, \code{describe}, \code{param}, and
+  \code{qi} methods. For more information about creating
+  these files on an individual basis, please refer to the
+  tech manuals, which are available by typing:
+  \code{?zelig2}, \code{?param}, or \code{?qi}.
+}
+\author{
+  Matt Owen \email{mowen at iq.harvard.edu}
+}
+
diff --git a/man/zelig2-bayes.Rd b/man/zelig2-bayes.Rd
new file mode 100644
index 0000000..28e3092
--- /dev/null
+++ b/man/zelig2-bayes.Rd
@@ -0,0 +1,48 @@
+\name{zelig2-bayes}
+
+\alias{zelig2factor.bayes}
+\alias{zelig2logit.bayes}
+\alias{zelig2mlogit.bayes}
+\alias{zelig2normal.bayes}
+\alias{zelig2oprobit.bayes}
+\alias{zelig2poisson.bayes}
+\alias{zelig2probit.bayes}
+
+\title{Zelig Bridge Functions to Bayesian Models}
+
+\usage{
+  zelig2factor.bayes(formula, factors = 2, burnin = 1000, mcmc = 20000, verbose = 0, ..., data)
+  zelig2logit.bayes(formula, burnin = 1000, mcmc = 10000, verbose = 0, ..., data)
+  zelig2mlogit.bayes(formula, burnin = 1000, mcmc = 10000, verbose = 0, ..., data)
+  zelig2normal.bayes(formula, burnin = 1000, mcmc = 10000, verbose = 0, ..., data)
+  zelig2oprobit.bayes(formula, burnin = 1000, mcmc = 10000, verbose = 0, ..., data)
+  zelig2poisson.bayes(formula, burnin = 1000, mcmc = 10000, verbose = 0, ..., data)
+  zelig2probit.bayes(formula, burnin = 1000, mcmc = 10000, verbose = 0, ..., data)
+}
+
+\arguments{
+  \item{formula}{a formula}
+
+  \item{...}{additonal parameters}
+
+  \item{data}{a data.frame}
+
+  \item{factors}{a list of factors}
+
+  \item{burnin}{a parameter corresponding to the 'burnin'
+  paramater for the MCMCprobit function}
+
+  \item{mcmc}{a parameter corresponding to the 'mcmc'
+  paramater for the MCMCprobit function}
+
+  \item{verbose}{a parameter corresponding to the 'verbose'
+  paramater for the MCMCprobit function}
+}
+
+\value{
+  a list specifying '.function'
+}
+
+\description{
+  Interface between Zelig and the bayesian models.
+}
diff --git a/man/zelig2-core.Rd b/man/zelig2-core.Rd
new file mode 100644
index 0000000..4f28639
--- /dev/null
+++ b/man/zelig2-core.Rd
@@ -0,0 +1,69 @@
+\name{zelig2-core}
+
+\alias{zelig2exp}
+\alias{zelig2gamma}
+\alias{zelig2logit}
+\alias{zelig2lognorm}
+\alias{zelig2ls}
+\alias{zelig2negbinom}
+\alias{zelig2normal}
+\alias{zelig2poisson}
+\alias{zelig2probit}
+\alias{zelig2relogit}
+\alias{zelig2tobit}
+\alias{zelig2twosls}
+
+\title{
+  Zelig to Basic GLM Fitting Functions
+}
+
+\usage{
+  zelig2exp(formula, ..., robust = FALSE, cluster = NULL, data)
+  zelig2gamma(formula, ..., data)
+  zelig2logit(formula, weights = NULL, robust = F, ..., data)
+  zelig2lognorm(formula, ..., robust = FALSE, cluster = NULL, data)
+  zelig2ls(formula, ..., data, weights = NULL)
+  zelig2negbinom(formula, weights = NULL, ..., data)
+  zelig2normal(formula, weights = NULL, ..., data)
+  zelig2poisson(formula, weights = NULL, ..., data)
+  zelig2probit(formula, weights = NULL, ..., data)
+  zelig2relogit(formula, ..., tau = NULL, bias.correct = NULL, case.control = NULL, data)
+  zelig2tobit(formula, ..., below = 0, above = Inf, robust = FALSE, cluster = NULL, data)
+  zelig2twosls(formula, ..., data)
+}
+
+\arguments{
+  \item{formula}{a formula}
+
+  \item{...}{additonal parameters}
+
+  \item{weights}{a numeric vector}
+
+  \item{robust}{a boolean specifying whether to use robust
+  error estimates}
+
+  \item{cluster}{a vector describing the clustering of the
+  data}
+
+  \item{data}{a data.frame}
+
+  \item{tau}{...}
+
+  \item{bias.correct}{...}
+
+  \item{case.control}{...}
+
+  \item{below}{a numeric or infinite specifying a lower
+  boundary for censored responses}
+
+  \item{above}{a numeric or infinite specifying an upper
+  boundary for censored responses}
+}
+
+\value{
+  a list used by Zelig to call the model-fitting function
+}
+
+\description{
+  Interface between Zelig and Basic GLM Fitting Functions
+}
diff --git a/man/zelig2-gee.Rd b/man/zelig2-gee.Rd
new file mode 100644
index 0000000..0106b60
--- /dev/null
+++ b/man/zelig2-gee.Rd
@@ -0,0 +1,48 @@
+\name{zelig2-gee}
+
+\alias{zelig2gamma.gee}
+\alias{zelig2logit.gee}
+\alias{zelig2normal.gee}
+\alias{zelig2poisson.gee}
+\alias{zelig2probit.gee}
+
+\title{Bridge between Zelig and the GEE Model Fitting Functions}
+
+\usage{
+  zelig2gamma.gee(formula, id, robust, ..., R, corstr = "independence", data)
+  zelig2logit.gee(formula, id, robust, ..., R, corstr = "independence", data)
+  zelig2normal.gee(formula, id, robust, ..., R, corstr = "independence", data)
+  zelig2poisson.gee(formula, id, robust, ..., R, corstr = "independence", data)
+  zelig2probit.gee(formula, id, robust, ..., R, corstr = "independence", data)
+}
+
+\arguments{
+  \item{formula}{a formula}
+
+  \item{id}{a character-string specifying the column of the
+  data-set to use for clustering}
+
+  \item{robust}{a logical specifying whether to robustly or
+  naively compute the covariance matrix. This parameter is
+  ignore in the \code{zelig2} method, and instead used in
+  the \code{robust.hook} function, which executes after the
+  call to the \code{gee} function}
+
+  \item{...}{ignored parameters}
+
+  \item{R}{a square-matrix specifying the correlation}
+
+  \item{corstr}{a character-string specifying the
+  correlation structure}
+
+  \item{data}{a data.frame}
+}
+
+\value{
+  a list specifying the call to the external model
+}
+
+\description{
+  Bridge between Zelig and the GEE Model Fitting Functions
+}
+
diff --git a/man/zelig2-survey.Rd b/man/zelig2-survey.Rd
new file mode 100644
index 0000000..7abe49a
--- /dev/null
+++ b/man/zelig2-survey.Rd
@@ -0,0 +1,148 @@
+\name{zelig2-survey}
+
+\alias{zelig2gamma.survey}
+\alias{zelig2logit.survey}
+\alias{zelig2normal.survey}
+\alias{zelig2poisson.survey}
+\alias{zelig2probit.survey}
+
+\title{Interface between \code{Zelig} and \code{svyglm}}
+
+\usage{
+  zelig2gamma.survey(formula, weights = NULL, ids = NULL,
+    probs = NULL, strata = NULL, fpc = NULL, nest = FALSE,
+    check.strata = !nest, repweights = NULL, type,
+    combined.weights = FALSE, rho = NULL,
+    bootstrap.average = NULL, scale = NULL, rscales = NULL,
+    fpctype = "fraction", return.replicates = FALSE,
+    na.action = "na.omit", start = NULL, etastart = NULL,
+    mustart = NULL, offset = NULL, model1 = TRUE,
+    method = "glm.fit", x = FALSE, y = TRUE,
+    contrasts = NULL, design = NULL, link = "inverse",
+    data, ...)
+
+  zelig2logit.survey(formula, weights = NULL, ids = NULL,
+    probs = NULL, strata = NULL, fpc = NULL, nest = FALSE,
+    check.strata = !nest, repweights = NULL, type,
+    combined.weights = FALSE, rho = NULL,
+    bootstrap.average = NULL, scale = NULL, rscales = NULL,
+    fpctype = "fraction", return.replicates = FALSE,
+    na.action = "na.omit", start = NULL, etastart = NULL,
+    mustart = NULL, offset = NULL, model1 = TRUE,
+    method = "glm.fit", x = FALSE, y = TRUE,
+    contrasts = NULL, design = NULL, data)
+
+  zelig2normal.survey(formula, weights = NULL, ids = NULL,
+    probs = NULL, strata = NULL, fpc = NULL, nest = FALSE,
+    check.strata = !nest, repweights = NULL, type,
+    combined.weights = FALSE, rho = NULL,
+    bootstrap.average = NULL, scale = NULL, rscales = NULL,
+    fpctype = "fraction", return.replicates = FALSE,
+    na.action = "na.omit", start = NULL, etastart = NULL,
+    mustart = NULL, offset = NULL, model1 = TRUE,
+    method = "glm.fit", x = FALSE, y = TRUE,
+    contrasts = NULL, design = NULL, data)
+
+  zelig2poisson.survey(formula, weights = NULL, ids = NULL,
+    probs = NULL, strata = NULL, fpc = NULL, nest = FALSE,
+    check.strata = !nest, repweights = NULL, type,
+    combined.weights = FALSE, rho = NULL,
+    bootstrap.average = NULL, scale = NULL, rscales = NULL,
+    fpctype = "fraction", return.replicates = FALSE,
+    na.action = "na.omit", start = NULL, etastart = NULL,
+    mustart = NULL, offset = NULL, model1 = TRUE,
+    method = "glm.fit", x = FALSE, y = TRUE,
+    contrasts = NULL, design = NULL, data)
+
+  zelig2probit.survey(formula, weights = NULL, ids = NULL,
+    probs = NULL, strata = NULL, fpc = NULL, nest = FALSE,
+    check.strata = !nest, repweights = NULL, type,
+    combined.weights = FALSE, rho = NULL,
+    bootstrap.average = NULL, scale = NULL, rscales = NULL,
+    fpctype = "fraction", return.replicates = FALSE,
+    na.action = "na.omit", start = NULL, etastart = NULL,
+    mustart = NULL, offset = NULL, model1 = TRUE,
+    method = "glm.fit", x = FALSE, y = TRUE,
+    contrasts = NULL, design = NULL, data)
+}
+
+\arguments{
+  \item{formula}{a \code{formula}}
+
+  \item{weights}{...}
+
+  \item{ids}{...}
+
+  \item{probs}{...}
+
+  \item{strata}{...}
+
+  \item{fpc}{...}
+
+  \item{nest}{...}
+
+  \item{check.strata}{...}
+
+  \item{repweights}{...}
+
+  \item{type}{...}
+
+  \item{combined.weights}{...}
+
+  \item{rho}{...}
+
+  \item{bootstrap.average}{...}
+
+  \item{scale}{...}
+
+  \item{rscales}{...}
+
+  \item{fpctype}{...}
+
+  \item{return.replicates}{...}
+
+  \item{na.action}{...}
+
+  \item{start}{...}
+
+  \item{etastart}{...}
+
+  \item{mustart}{...}
+
+  \item{offset}{...}
+
+  \item{model1}{...}
+
+  \item{method}{...}
+
+  \item{x}{...}
+
+  \item{y}{...}
+
+  \item{contrasts}{...}
+
+  \item{design}{...}
+
+  \item{link}{an object specifying a link function between the predictor and response variables}
+
+  \item{data}{a \code{data.frame}}
+
+  \item{\dots}{
+    Additional parameters passed to teh ``gamma.survey'' model fitting function
+  }
+}
+\value{
+  a \code{list} used to construct parameters for the
+  \code{svyglm} function
+}
+\description{
+  Interface between \code{zelig} and \code{svyglm} for the
+  \code{logit.survey}
+}
+\note{
+  This manual file is largely incomplete, and needs a
+  significant amount of filling out. This, in itself, might
+  be motivation to divide this package into more models
+  with more specific function.
+}
+
diff --git a/man/zelig2.Rd b/man/zelig2.Rd
new file mode 100644
index 0000000..7929bca
--- /dev/null
+++ b/man/zelig2.Rd
@@ -0,0 +1,58 @@
+\name{zelig2}
+\alias{zelig2}
+\title{Interface Between Zelig Models and External Functions}
+\value{
+  The main purpose of the \code{zelig2} function is to
+  return a list of key-value pairs, specifying how Zelig
+  should interface with the external method. This list has
+  the following format:
+
+  \item{\code{.function}}{specifies the name of the
+  external method to be called by \code{zelig} function.
+  Subsequent parameters, are called and evaluated as a
+  function call to the function of the named string.}
+  \item{\code{.hook}}{specifies the name of a hook function
+  as a string. The hook function is only evaluated on zelig
+  object once the external method fits the statistical
+  model} \item{...}{any parameters aside from
+  \code{.function} and \code{.hook} is as part of the
+  function call to the external model}
+}
+\description{
+  The \code{zelig2} function acts as a simple interface
+  between a user's call to the \code{zelig} function and
+  the zelig functions subsequent call to the pre-existing
+  external model. The external model varies based on which
+  model is being called.
+}
+\note{
+  Writing \code{zelig2} functions is required of Zelig
+  developers. In particular, \code{zelig2} functions act as
+  an interface between external models (models not included
+  in the Zelig package) and the \code{zelig} function which
+  must use that model.
+
+  \code{zelig2} is not an actual function. Rather,
+}
+\examples{
+zelig2some.model <- function (formula, weights, verbose, ..., data) {
+   list(
+        .function = 'some.other.method',
+        .hook = NULL,
+        formula = formula,
+        weights = 2 * weights,
+        data = data
+        )
+ }
+
+## This \\code{zelig2} function equates the following function call:
+##  zelig(formula, weights = weights, verbose = TRUE, data = data, model="some.model")
+##
+## with:
+##  some.other.method(formula = formula, weights = 2 * weights, data=data)
+
+## Note that the 'verbose' parameter is ignored, since the
+## 'zelig2some.model' does not include the 'verbose' parameter in its return
+## value.
+}
+
diff --git a/man/zeligDepStatus.Rd b/man/zeligDepStatus.Rd
deleted file mode 100644
index f1198c4..0000000
--- a/man/zeligDepStatus.Rd
+++ /dev/null
@@ -1,57 +0,0 @@
-\name{zeligDepStatus}
-\alias{zeligDepStatus}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{Zelig Dependencies Packages Client Status}
-\description{
-  Compares Zelig-matrix of dependencies to the locally installed
-  packages. Finds those packages that \pkg{Zelig} depends on and are not
-  installed in local environment. Also finds those packages that are
-  locally installed but with lower versions than those required in the
-  dependencies matrix.
-  The Zelig-matrix includes any of dependency fields in the
-  \sQuote{DESCRIPTION} files, i.e. depends, imports and suggests,
-  for any packages directly derived from \pkg{Zelig} and for any of the models that \pkg{Zelig} supports.   
-}
-\usage{
-zeligDepStatus(lib.loc = NULL)
-}
-%- maybe also 'usage' for other objects documented here.
-\arguments{
-  \item{lib.loc}{a character vector of directory names 
-   of \code{R} libraries, or NULL.
-  The default value NULL corresponds to all libraries currently known. If the default is used, the loaded packages are searched before the libraries. }
-}
-
-\value{
-  Returns a matrix of packages that are either installed locally 
-  with lower versions, or packages not installed but listed in the
-  Zelig-matrix of dependencies.  The matrix rows correspond to the packages and 
-  the columns contain the following fields
-   \item{Package}{names of packages.}
-  \item{Version}{versions locally installed}
-  \item{Zideal}{versions required in Zelig-matrix of dependencies.}}
-
-\references{King, Gary. Zelig: Everyones Statistical Software. 
-  \url{http://gking.harvard.edu/zelig}.}
-
-\author{Ferdinand Alimadhi and Elena Villalon}
-\seealso{
-\code{\link{installed.packages}}
-\code{\link{packageDescription}}
-\code{\link{zeligDepUpdate}}
-}
-\note{
-If the R version in the local environment is different from the R
-version that \pkg{Zelig}  
-depends on, then, it is reported with a message and no futher action is taken. 
-If the installed packages have versions higher than the corresponding 
-values in the Zelig-matrix, it is reported with a message.}
-
-\examples{
-## find packages in all libraries currently installed
-\dontrun{zstatus <- zeligDepStatus()}
-## find packages only in lib.loc
-\dontrun{zstatus <- zeligDepStatus(lib.loc="~/.R/mylibrary")}
-}
-\keyword{documentation}
-
diff --git a/man/zeligDepUpdate.Rd b/man/zeligDepUpdate.Rd
deleted file mode 100644
index 5b888d9..0000000
--- a/man/zeligDepUpdate.Rd
+++ /dev/null
@@ -1,72 +0,0 @@
-\name{zeligDepUpdate}
-\alias{zeligDepUpdate}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{
-  Download Zelig Dependencies Packages
-}
-\description{
-  Compares the packages in Zelig-matrix of dependencies  to the
-  locally installed packages. Finds local packages that have lower
-  versions than in the corresponding row of the Zelig-matrix.
-  Also, finds packages that \pkg{Zelig} required but are not
-  installed locally.
-  Downloads packages that are locally installed with lower versions and
-  those from the Zelig-matrix that are not installed.
-  The download repository is taken from either the default
-  \code{\var{repos}} argument or from the \sQuote{URL} column of
-  the Zelig-matrix of dependencies.        
-}
-
-\usage{
-zeligDepUpdate(destdir = NULL, installWithVers = FALSE, lib.loc = NULL,
-repos = "http://cran.r-project.org")
-}
-%- maybe also 'usage' for other objects documented here.
-
-\arguments{
-  \item{destdir}{directory to store the compress source-codes of
-    packages that are downloaded from web repositories.}
-  \item{installWithVers}{if TRUE, will invoke the install of the
-    package such that it can be referenced by package version.}
-  \item{lib.loc}{character vector describing the location of R
-    library trees to search through (and update packages therein). }
-  \item{repos}{character vector, the base URL(s) of the repositories to
-    use, i.e. the URL of the CRAN master such as
-    "http://cran.r-project.org", which is the default, or its Statlib
-    mirror, "http://lib.stat.cmu.edu/R/CRAN". Can be NULL to install
-    from local zip files.}
-}
-
-\value{
-  No return value. 
-}
-
-\references{
-  King, Gary. Zelig: Everyones Statistical Software. 
-  \url{http://gking.harvard.edu/zelig}.
-}
-\author{
-  Ferdinand Alimadhi and Elena Villalon
-}
-\seealso{
-  \code{\link{zeligDepStatus}}
-  \code{\link{install.packages}}
-  
-}
-\note{
-  Installs first level dependencies packages of \pkg{Zelig} using R function
-  \code{install.packages} with the variable \code{\var{dependencies}} set
-  equal to TRUE.  If the installed packages have versions higher than the
- corresponding entry in Zelig-matrix, they are reported with a
- message. If the R version in the local environment is different from
- the R version that \pkg{Zelig} depends on, then, it is reported with a message and no futher action is taken.
-}
-
-\examples{
-##checks all libraries curently know for packages
-\dontrun{zeligDepUpdate()}
-##finds packages only in lib.loc
-\dontrun{zeligDepUpdate(lib.loc="~/.R/mylibrary")}
-}
-\keyword{documentation}
-
diff --git a/man/zeligVDC.Rd b/man/zeligVDC.Rd
deleted file mode 100644
index 252517b..0000000
--- a/man/zeligVDC.Rd
+++ /dev/null
@@ -1,83 +0,0 @@
-\name{zeligDescribeModelXML}
-\alias{zeligDescribeModelXML}
-\alias{zeligInstalledModels}
-\alias{zeligListModels}
-\alias{zeligModelDependency}
-\alias{zeligGetSpecial}
-\title{ Zelig interface functions}
-\description{
-	Zelig interface functions. Used by VDC DSB to  communicate with Zelig.
-}
-\usage{
-	zeligDescribeModelXML(modelName,force=FALSE,schemaVersion="1.1")
-	zeligInstalledModels(inZeligOnly=TRUE,schemaVersion="1.1")
-	zeligListModels(inZeligOnly=TRUE) 
-	zeligModelDependency(modelName,repos) 
-	zeligGetSpecial(modelName)
-}
-
-\arguments{
-  \item{modelName}{Name of model as returned by zeligInstalledModels or zeligListModels.}
-  \item{inZeligOnly}{Flag, include only models in official Zelig distribution}
-  \item{repos}{URL of default repository to use}
-  \item{schemaVersion}{version of Zelig schema}
-  \item{force}{generate a description even if no custom description supplied}
-}
-
-\value{
-Use zeligInstalledModels and zeligListModels to determine what models are available in zelig
-for a particular schema level. Use zmodel2string(zeligDescribeModel()) to generate an XML
-instance describing a model. Use zeligModelDependencies to generate a list of package
-dependencies for models. Use zeligGetSpecial to get the name special function, if any,
-to apply to the outcome variables. All functions return NULL if results are
-not available for that model.
-}
-
-\examples{\dontrun{
-	# show all available models
-	zeligListModels(inZeligOnly=FALSE)
-	# show installed models
-	zeligInstalledModels()
-	# show dependency for normal.bayes
-	zeligModelDependency("normal.bayes","http://cran.r-project.org/")
-	# description of logit
-	cat(zeligDescribeModelXML("ologit"))
-	# special function for factor analysis
- 	zeligGetSpecial("factor.mix")
-}
-\dontshow{
-\dontrun{
-
-# test model lists
-zd= zeligInstalledModels(schemaVersion="1.1")
-if (length(zd)<8 || sum(zd=="ls")!=1   || length(zeligListModels())<25 ) {
-	stop("Failed zeligListModels/zeligInstalledModels self test")
-}
-
-if (zeligModelDependency("poisson.bayes","")[1]!="MCMCpack") {
-	stop("Failed zeligModelDependency self test")
-}
-
-if (zeligGetSpecial("factor.mix")!="cbind") {
-	stop("Failed zeligGetSpecial self test")
-}
-
-if (grep("explanatory",  zeligDescribeModelXML("ologit"))!=1) {
-	stop("Failed zmodel2string/zeligDescribeModel self test")
-}
-}
-}
-
-}
-
-\author{
-Micah Altman
-\email{thedata-users\@lists.sourceforge.net}
-\url{http://thedata.org}
-}
-
-
-\seealso{ \link[Zelig]{zelig}}
-
-\keyword{IO}
-\keyword{print}
diff --git a/messages/templates/en/describe.canned b/messages/templates/en/describe.canned
new file mode 100644
index 0000000..d0aed58
--- /dev/null
+++ b/messages/templates/en/describe.canned
@@ -0,0 +1,21 @@
+"describe canned"
+describe.<<model name>> <- function () {
+package <- list(name="stats",
+version=".9"
+)
+
+# edit the below line to add a description
+# to this zelig model
+description <- "a zelig model"
+
+# edit the below with information about
+# the model that this zelig module is based on
+# the citation year may differ than the auto-generated one
+list(category = "",
+authors = "<<author>>",
+year = <<year>>,
+description = description,
+package = package,
+parameters=list(list())
+)
+}
diff --git a/messages/templates/en/describe.credit b/messages/templates/en/describe.credit
new file mode 100644
index 0000000..e9c4fea
--- /dev/null
+++ b/messages/templates/en/describe.credit
@@ -0,0 +1,7 @@
+"describe credit"
+# @author: <<author>>
+# @date:   <<date>>
+# .<<model>>.R
+# auto-generated by zkeleton, written by Matt Owen
+# info: describe.<<model>> generates citation information
+#       for the zelig model <<model>>
diff --git a/messages/templates/en/describe.how.to b/messages/templates/en/describe.how.to
new file mode 100644
index 0000000..67af97c
--- /dev/null
+++ b/messages/templates/en/describe.how.to
@@ -0,0 +1,11 @@
+"describe how-to"
+# HOW-TO WRITE A DESCRIBE FUNCTION
+# ================================
+# 1. Fill in the "description" variable with a *short*
+#    description of the model. e.g. "multinomial probit model"
+# 2. Fill in the return-value for "category"
+# 3. Fill in the return-value for "authors" with either:
+#    i.  <<author name>>, or
+#    ii. c(<<author 1>>, <author 2>>, <<author 3>>, ...)
+# 4. Fill in the return-value for "year"
+# 5. <optional> Fill in the package variable
diff --git a/messages/templates/en/print.summary.sim.canned b/messages/templates/en/print.summary.sim.canned
new file mode 100644
index 0000000..a0d710e
--- /dev/null
+++ b/messages/templates/en/print.summary.sim.canned
@@ -0,0 +1,37 @@
+"print.summary.sim canned"
+print.summary.sim.<<model name>> <- function (obj, digits=F, print.x=F, ...) {
+  # prints typically have qi, and qi.names defined as part of the summary object
+  if (is.null(obj$qi.stat) || is.null(obj$qi.name)) {
+    stop("Error: ")
+  }
+
+  # warn if name lists do not match
+  if (any(sort(names(obj$qi.stat)) != sort(names(obj$qi.name)))) {
+    warning("warning: quantities of interest do not match its name list")  
+  }
+
+  print(obj$original)
+  
+  for (key in names(obj$qi.stat)) {
+    # value
+    val <- obj$qi.stat[[key]]
+
+    # pass-by conditions
+    if (is.na(val) || (is.list(val) && !length(val)) || is.null(val))
+      next
+
+    # print the title of the qi
+    s <- gsub("\\s+$", "", obj$qi.name[[key]])
+    message(s)
+    message(rep("=", min(nchar(s), 30)))
+    
+    # print the qi (should be a simple data-type, such as matrix or float)
+    print(val)
+    
+    # line-feed
+    message()
+  }
+  
+  # return invisibly
+  invisible(obj)
+}
diff --git a/messages/templates/en/print.summary.sim.credit b/messages/templates/en/print.summary.sim.credit
new file mode 100644
index 0000000..69d71bd
--- /dev/null
+++ b/messages/templates/en/print.summary.sim.credit
@@ -0,0 +1,7 @@
+"print.summary.sim credit"
+# @author: <<author>>
+# @date:   <<date>>
+# .<<model>>.R
+# auto-generated by zkeleton, written by Matt Owen
+# info: print.summary.sim.<<model>>.R outputs summary
+#       information from the zelig model <<model>>
diff --git a/messages/templates/en/print.summary.sim.how.to b/messages/templates/en/print.summary.sim.how.to
new file mode 100644
index 0000000..39832e0
--- /dev/null
+++ b/messages/templates/en/print.summary.sim.how.to
@@ -0,0 +1,9 @@
+"print.summary.sim how-to"
+# HOW TO WRITE A PRINT.SUMMARY.SIM FUNCTION
+# =========================================
+# 0. print.summary functions typically display the result
+#    from a summary object (a list) in an organized fashion
+#    with various text-formatting.
+# 1. for most purpose the default print function (below) should
+#    work, however, various formatting, etc. can be added typically
+#    without any impact on the operation of the program
diff --git a/messages/templates/en/qi.canned b/messages/templates/en/qi.canned
new file mode 100644
index 0000000..d650343
--- /dev/null
+++ b/messages/templates/en/qi.canned
@@ -0,0 +1,36 @@
+"qi canned"
+# @obj:    zelig object
+# @simpar: parameters passed to the qi
+# return:  qi list (qi.stat) and qi.names list (qi.name)
+
+# NOTE THIS FILE MUST ALWAYS BE EDITED!!!!
+# IT IS THE MOST IMPORTANT COMPONENT TO
+# ANY ZELIG MODULE
+qi.<<model name>> <- function(obj, simpar=NULL, x, x1=NULL, y=NULL) {
+  # initialize values that necessarily must be
+  # returned.
+  qi.stat <- list()
+  qi.name <- list()
+  
+
+  # add entries to qi.stat and qi.name
+  # in the end, names(qi.stat) should == names(qi.name)
+  # so that printing can be handled by the auto-generated
+  # function
+
+  # ...
+
+
+  # qi computation must be written by the developer,
+  # as it is impossible to tell automatically what is
+  # the statistic of interest (or how to compute it)
+  
+  # ...
+
+
+  # compute the quantities of interest
+  # of this model
+  list(qi.stat=qi.stat,
+       qi.name=qi.name
+       )
+}
diff --git a/messages/templates/en/qi.credit b/messages/templates/en/qi.credit
new file mode 100644
index 0000000..56de075
--- /dev/null
+++ b/messages/templates/en/qi.credit
@@ -0,0 +1,6 @@
+"qi credit"
+# @author: <<author>>
+# @date:   <<date>>
+# qi.<<model name>>, auto-generated by zkeleton, written by Matt Owen
+# ===========================
+# info: produced quantities of interest for zelig model <<model>>
diff --git a/messages/templates/en/qi.how.to b/messages/templates/en/qi.how.to
new file mode 100644
index 0000000..14af23c
--- /dev/null
+++ b/messages/templates/en/qi.how.to
@@ -0,0 +1,29 @@
+"qi how-to"
+# HOW-TO WRITE A QI FILE 
+# ======================
+# qi functions are the heart of any zelig module.
+# The qi function is passed information from the setx
+# function (via x, x1), parameters (simpar), and the
+# original zelig model (obj or object)
+# The developer (you) then writes the software that he/she
+# believes produces a significant quantity of interest.
+# The result should always be returned in the fashion
+# list(qi.stat=qi.stat
+#      qi.name=qi.name
+#     )
+# where qi.stat is a list of qi.stat and qi.name have the form
+# qi.stat <- list(qi.1 = <<qi.1>>,
+#                 qi.2 = <<qi.2>>,
+#                 qi.3 = <<qi.3>>,
+#                 ...
+#                 )
+#
+# qi.name <- list(qi.1 = <<qi.1 name>>,
+#                 qi.2 = <<qi.2 name>>,
+#                 qi.3 = <<qi.3 name>>,
+#                 ...
+#                 )
+#
+# qi.1, qi.2, etc. should be named in an easy to comprehend manner
+# the indices of qi.stat and qi.name (qi.1, qi.2, etc.) should match,
+# otherwise a warning will be displayed during the print stage
diff --git a/messages/templates/en/setx.canned b/messages/templates/en/setx.canned
new file mode 100644
index 0000000..39baf60
--- /dev/null
+++ b/messages/templates/en/setx.canned
@@ -0,0 +1,18 @@
+"setx canned"
+setx.<<model name>> <- function(obj, data=NULL, ...) {
+# send to default
+res <- setx.default(obj, ...)
+
+# cast as appropriate data-type, then return
+class(res) <- "setx.<<model name>>"
+
+# attach data frame here, if the model
+# requires sophisticated number-crunching
+# after setx is called
+# if not, remove the below line
+if (!is.null(data)) {
+res$data <- data
+}
+
+res
+}
diff --git a/messages/templates/en/setx.credit b/messages/templates/en/setx.credit
new file mode 100644
index 0000000..e640503
--- /dev/null
+++ b/messages/templates/en/setx.credit
@@ -0,0 +1,7 @@
+"setx credit"
+# @author: <<author>>
+# @date:   <<date>>
+# setx.<<model name>>
+# auto-generated by zkeleton, written by Matt Owen
+# info: produces data based on the explanatory variables
+#       in the model (set by user, not developer)
diff --git a/messages/templates/en/setx.how.to b/messages/templates/en/setx.how.to
new file mode 100644
index 0000000..8129c39
--- /dev/null
+++ b/messages/templates/en/setx.how.to
@@ -0,0 +1,22 @@
+"setx how-to"
+# HOW-TO WRITE A SETX FUNCTION
+# ============================
+# 0. For most purposes setx.default will compute
+#    correctly values of interest, which are needed
+#    to compute the quantities of interest.  However,
+#    some models will not provide data in the correct
+#    fashion, etc. (e.g. computing a covariance matrix
+#    of the explanatory variables may not make sense
+#    or be relevant for certain models)
+# 1. parameters are passed in as a zelig model and potentially
+#    a new data-set.  The new data-set is used in place of the
+#    original one that was passed into zelig.  This
+# 2. the result of the setx function should be of class
+#    "setx.<<model name>>"
+#    this is important, because it ensures that the correct
+#    qi function and sim function are called
+# 3. <optional> the data frame used to compute this setx may
+#    be attached with the line
+#      res$data <- data
+#    if the sim function needs to make further computations
+#    if this is not the case, please omit that line
diff --git a/messages/templates/en/sim.canned b/messages/templates/en/sim.canned
new file mode 100644
index 0000000..e16427b
--- /dev/null
+++ b/messages/templates/en/sim.canned
@@ -0,0 +1,32 @@
+"sim canned"
+sim.<<model name>> <- function(obj,
+x=NULL,
+x1=NULL,
+num=c(1000, 100),
+prev = NULL,
+cond.data = NULL, ...
+) {
+# error-catching
+if (is.null(x))
+stop("Error: x cannot be NULL")
+
+# simulate qi's for x
+# invoke qi.<model name>
+res <- qi.<<model name>>(obj, x=x, x1=x1)
+
+# change call name
+obj$call[[1]] <- as.name("sim")
+
+
+# append
+res$call <- match.call(expand.dots=T)
+res$zelig.call <- obj$call
+res$par <- NA
+res$obj <- obj
+
+# change class so correct summary/print function
+# can be called
+class(res) <- "sim.<<model name>>"
+
+res
+}
diff --git a/messages/templates/en/sim.credit b/messages/templates/en/sim.credit
new file mode 100644
index 0000000..b5b5f01
--- /dev/null
+++ b/messages/templates/en/sim.credit
@@ -0,0 +1,8 @@
+"sim credit"
+# @author: <<author>>
+# @date:   <<date>>
+# sim.<<model>>.R
+# auto-generated by zkeleton, written by Matt Owen
+# info: simulates quantities of interest, then arranges
+#       the data in an easily interprettable manner.
+#       invokes qi.<<model>>.R
diff --git a/messages/templates/en/sim.how.to b/messages/templates/en/sim.how.to
new file mode 100644
index 0000000..26f9a4c
--- /dev/null
+++ b/messages/templates/en/sim.how.to
@@ -0,0 +1,16 @@
+"sim how-to"
+# HOW-TO WRITE A SIM FUNCTION
+# ===========================
+# 0. The sim function invokes the qi function.
+#    Then, returns the quantities of interests
+#    alongside a host of other relevant data, that
+#    is presented along with summary and print
+# 1. importantly, sim should always have the line:
+#      res <- qi(obj, x=x, x1=x1)
+#    this enesure that the qi's are computed with the
+#    exact parameters that enter the sim function itself
+# 2. the call to sim and the call should be returned along
+#    with the quantities of interest (qi.stat) and their
+#    titles (qi.name)
+# 3. the returned object should have class type:
+#     "sim.<<model name>>:
diff --git a/messages/templates/en/sim.setx.canned b/messages/templates/en/sim.setx.canned
new file mode 100644
index 0000000..bcaaca0
--- /dev/null
+++ b/messages/templates/en/sim.setx.canned
@@ -0,0 +1,8 @@
+"sim.setx canned"
+sim.setx.<<model name>> <- function (obj, x, ...) {
+# this function exists so that if
+# sim(obj) and sim(obj, x) should have radically
+# different behavior, we will be able to place them
+# in seperate files easily
+sim.<<model name>>(obj, x, ...)
+}
diff --git a/messages/templates/en/sim.setx.credit b/messages/templates/en/sim.setx.credit
new file mode 100644
index 0000000..6dfa9bd
--- /dev/null
+++ b/messages/templates/en/sim.setx.credit
@@ -0,0 +1,8 @@
+"sim.setx credit"
+# @author: <<author>>
+# @date:   <<date>>
+# sim.setx.<<model>>.R
+# auto-generated by zkeleton, written by Matt Owen
+# info: simulates qi's when additional explanatory
+#       information is provided.  usually simply
+#       invokes the method specified in sim.<<model>>.R
diff --git a/messages/templates/en/sim.setx.how.to b/messages/templates/en/sim.setx.how.to
new file mode 100644
index 0000000..7cb2233
--- /dev/null
+++ b/messages/templates/en/sim.setx.how.to
@@ -0,0 +1,28 @@
+"sim.setx how-to"
+# HOW-TO WRITE A SIM.SETX FILE
+# ============================
+# 0. sim.setx functions offer alternative ways
+#    to simulate quantities of interest.  That is,
+#    sim functions are called in this fashion
+#      sim(zelig.out)
+#
+#    while sim.setx functs are called as:
+#      sim(zelig.out, x)
+#    or
+#      sim(zelig.out, x, x1)
+#
+#    this allows the developer to separate
+#    the different types of simulation algorithms
+#    that may exist for his or her model
+#
+#    if the model simulates quantities of interest
+#    identically to that of the standard sim function
+#    it should then only contain the line:
+#      sim.<<model name>>(obj, x, ...)
+#
+# 1. invoke qi with
+#      qi(obj, x=x, x=x1)
+#
+#    do relevant computation on the return quantities
+#    of interest, and attach relevant data that needs
+#    to be passed to print and summary functions
diff --git a/messages/templates/en/summary.sim.canned b/messages/templates/en/summary.sim.canned
new file mode 100644
index 0000000..c1f74ae
--- /dev/null
+++ b/messages/templates/en/summary.sim.canned
@@ -0,0 +1,18 @@
+"summary.sim canned"
+summary.sim.<<model name>> <- function(obj, ...) {
+# set important summary objects
+# zelig models always have qi.stat, and qi.name
+# elements
+res <- list(model="mprobit",
+qi.stat   = obj$qi.stat,
+qi.name    = obj$qi.name,
+original   = obj$obj,
+call       = obj$call,
+zelig.call = obj$zelig.call
+)
+
+# cast as class
+class(res) <- "summary.sim.<<model name>>"
+
+res
+}
diff --git a/messages/templates/en/summary.sim.credit b/messages/templates/en/summary.sim.credit
new file mode 100644
index 0000000..2a61755
--- /dev/null
+++ b/messages/templates/en/summary.sim.credit
@@ -0,0 +1,7 @@
+"summary.sim credit"
+# @author: <<author>>
+# @date:   <<date>>
+# summary.sim.<<model>>.R
+# auto-generated by zkeleton, written by Matt Owen
+# info: returns a list of data summarizing the sim object
+#       should always include qi.stat, qi.name entry
diff --git a/messages/templates/en/summary.sim.how.to b/messages/templates/en/summary.sim.how.to
new file mode 100644
index 0000000..9f5b9ad
--- /dev/null
+++ b/messages/templates/en/summary.sim.how.to
@@ -0,0 +1,24 @@
+"summary.sim how-to"
+# HOW TO WRITE A SUMMARY.SIM FUNCTION
+# ===================================
+# 0. summary.sim functions exclusively return
+#    a list of important data, *summarizing*
+#    important features of the result of the sim
+#    function
+# 1. like summaries of most objects, the result should
+#    contain a reference to the call that created it,
+#    information on the class-type, etc.:
+#      list(call = obj$call,
+#           zelig.call = obj$zelig.call,
+#           ...)
+# 2. importantly, summary.sim must return a qi.stat
+#    and a qi.name data object.  the indices of these
+#    objects must have the same values for consistency
+#    that is, names(qi.stat) == names(qi.name)
+#    the return should resemble:
+#      list(
+#           qi.stat    = obj$qi.stat,
+#           qi.name    = obj$qi.name,
+#           call       = obj$call,
+#           zelig.call = obj$zelig.call
+#           ...)
diff --git a/messages/templates/en/zelig2.canned b/messages/templates/en/zelig2.canned
new file mode 100644
index 0000000..bce3b0b
--- /dev/null
+++ b/messages/templates/en/zelig2.canned
@@ -0,0 +1,17 @@
+"zelig2 canned"
+zelig2<<model name>> <- function (formula, model, data, M, ...) {
+# this file acts as an interface to the original model
+# the return ("mf") is a function call that zelig will later invoke
+# in order to process the specified data set
+#
+# any parameters unnecessary to the model
+# should be set to NULL (e.g. mf$M, mf$robust, etc...)
+
+mf <- match.call(expand.dots=T)
+mf$M <- mf$robust <- NULL
+mf[[1]] <- <<model function>>
+mf$model <- NULL
+mf$data <- data
+mf$formula <- formula
+as.call(mf)
+}
diff --git a/messages/templates/en/zelig2.credit b/messages/templates/en/zelig2.credit
new file mode 100644
index 0000000..3363a00
--- /dev/null
+++ b/messages/templates/en/zelig2.credit
@@ -0,0 +1,7 @@
+"zelig2 credit"
+# @author: <<author>>
+# @date:   <<date>>
+# zelig2<<model>>.R
+# auto-generated by zkeleton, written by Matt Owen
+# info: re-interprets parameters passed into zelig as
+#       legal parameters to pass into <<model function>>
diff --git a/messages/templates/en/zelig2.how.to b/messages/templates/en/zelig2.how.to
new file mode 100644
index 0000000..c716544
--- /dev/null
+++ b/messages/templates/en/zelig2.how.to
@@ -0,0 +1,25 @@
+"zelig2 how-to"
+# 0. the zelig2 function acts as an interface between
+#    the existing model and the zelig module that is
+#    being created.
+# 1. construct a call object containing all the parameters
+#    passed into "zelig2<<model name>>"
+# 2. remove all parameters that will not make sense within
+#    the original model (in the demo code, e.g. robust, M, etc.)
+# 3. re-assign the call object's first entry to the name of the
+#    model that must be called.
+#    This step is crucial, as it is how your model invokes the
+#    pre-existing model's name
+# 4. attach the data frame (mf$data <- data)
+# 5. return the call (as a call)
+#
+# NOTE: the returned value is going to be evaluated, and -
+#       as a result - call the pre-existing model.  Any
+#       parameters passed to the original zelig function
+#       will be forwarded to the model unless set to NULL
+#
+#
+# NOTE: THIS FUNCTION IS INVOKED BY THE ZELIG FUNCTION
+#       (NOT THE USER)
+#
+# call order: zelig -> zelig2<<model name>>"
diff --git a/messages/templates/en/zelig3.canned b/messages/templates/en/zelig3.canned
new file mode 100644
index 0000000..0ef1cf8
--- /dev/null
+++ b/messages/templates/en/zelig3.canned
@@ -0,0 +1,11 @@
+"zelig3 canned"
+zelig3<<model name>> <- function (res, ...) {
+class(res) <- c("<<model name>>", class(res))
+
+# give it a terms object
+if (is.null(res$terms))
+res$terms <- terms(res$call$formula)
+
+# return
+res
+}
diff --git a/messages/templates/en/zelig3.credit b/messages/templates/en/zelig3.credit
new file mode 100644
index 0000000..92d9bc6
--- /dev/null
+++ b/messages/templates/en/zelig3.credit
@@ -0,0 +1,8 @@
+"zelig3 credit"
+# @author: <<author>>
+# @date:   <<date>>
+# zelig3<<model>>.R
+# auto-generated by zkeleton, written by Matt Owen
+# info: not always necessary, but is useful to cast data
+#       the result of a zelig2* call into whatever data-type
+#       we want our setx function to work with
diff --git a/messages/templates/en/zelig3.how.to b/messages/templates/en/zelig3.how.to
new file mode 100644
index 0000000..44a87a7
--- /dev/null
+++ b/messages/templates/en/zelig3.how.to
@@ -0,0 +1,16 @@
+"zelig3 how-to"
+# HOW-TO WRITE A ZELIG3 FUNCTION
+# ==============================
+# 0. zelig3 functions act as in-betweens between
+#    the zelig2 function and the zelig and sim function.
+#    That is, after the model runs its initial computations,
+#    it is often important to cast the result as an object of
+#    the class which it is named after.  This is crucial to
+#    ensure that setx invokes the correct method
+# 1. set the class of the res passed into with the line:
+#      class(res) <- class("<<model name>>", class(res))
+# 2. return the obj (with class newly extended)
+#
+# NOTE: if the class "<<model name>>" is not added, the function
+#       setx.default will be used, which may have unexpected
+#       results
diff --git a/po/R-en.po b/po/R-en.po
new file mode 100644
index 0000000..95d07b9
--- /dev/null
+++ b/po/R-en.po
@@ -0,0 +1,524 @@
+msgid "describe canned"
+msgstr
+"describe.<<model name>> <- function () {\n"
+"package <- list(name=\"stats\",\n"
+"version=\".9\"\n"
+")\n"
+"\n"
+"# edit the below line to add a description\n"
+"# to this zelig model\n"
+"description <- \"a zelig model\"\n"
+"\n"
+"# edit the below with information about\n"
+"# the model that this zelig module is based on\n"
+"# the citation year may differ than the auto-generated one\n"
+"list(category = \"\",\n"
+"authors = \"<<author>>\",\n"
+"year = <<year>>,\n"
+"description = description,\n"
+"package = package,\n"
+"parameters=list(list())\n"
+")\n"
+"}"
+
+
+msgid "describe how-to"
+msgstr
+"# HOW-TO WRITE A DESCRIBE FUNCTION\n"
+"# ================================\n"
+"# 1. Fill in the \"description\" variable with a *short*\n"
+"#    description of the model. e.g. \"multinomial probit model\"\n"
+"# 2. Fill in the return-value for \"category\"\n"
+"# 3. Fill in the return-value for \"authors\" with either:\n"
+"#    i.  <<author name>>, or\n"
+"#    ii. c(<<author 1>>, <author 2>>, <<author 3>>, ...)\n"
+"# 4. Fill in the return-value for \"year\"\n"
+"# 5. <optional> Fill in the package variable"
+
+
+msgid "describe credit"
+msgstr
+"# @author: <<author>>\n"
+"# @date:   <<date>>\n"
+"# .<<model>>.R\n"
+"# auto-generated by zkeleton, written by Matt Owen\n"
+"# info: describe.<<model>> generates citation information\n"
+"#       for the zelig model <<model>>"
+
+
+msgid "zelig2 canned"
+msgstr
+"zelig2<<model name>> <- function (formula, model, data, M, ...) {\n"
+"# this file acts as an interface to the original model\n"
+"# the return (\"mf\") is a function call that zelig will later invoke\n"
+"# in order to process the specified data set\n"
+"#\n"
+"# any parameters unnecessary to the model\n"
+"# should be set to NULL (e.g. mf$M, mf$robust, etc...)\n"
+"\n"
+"mf <- match.call(expand.dots=T)\n"
+"mf$M <- mf$robust <- NULL\n"
+"mf[[1]] <- <<model function>>\n"
+"mf$model <- NULL\n"
+"mf$data <- data\n"
+"mf$formula <- formula\n"
+"as.call(mf)\n"
+"}"
+
+
+msgid "zelig2 how-to"
+msgstr
+"# 0. the zelig2 function acts as an interface between\n"
+"#    the existing model and the zelig module that is\n"
+"#    being created.\n"
+"# 1. construct a call object containing all the parameters\n"
+"#    passed into \"zelig2<<model name>>\"\n"
+"# 2. remove all parameters that will not make sense within\n"
+"#    the original model (in the demo code, e.g. robust, M, etc.)\n"
+"# 3. re-assign the call object's first entry to the name of the\n"
+"#    model that must be called.\n"
+"#    This step is crucial, as it is how your model invokes the\n"
+"#    pre-existing model's name\n"
+"# 4. attach the data frame (mf$data <- data)\n"
+"# 5. return the call (as a call)\n"
+"#\n"
+"# NOTE: the returned value is going to be evaluated, and -\n"
+"#       as a result - call the pre-existing model.  Any\n"
+"#       parameters passed to the original zelig function\n"
+"#       will be forwarded to the model unless set to NULL\n"
+"#\n"
+"#\n"
+"# NOTE: THIS FUNCTION IS INVOKED BY THE ZELIG FUNCTION\n"
+"#       (NOT THE USER)\n"
+"#\n"
+"# call order: zelig -> zelig2<<model name>>\""
+
+
+msgid "zelig2 credit"
+msgstr
+"# @author: <<author>>\n"
+"# @date:   <<date>>\n"
+"# zelig2<<model>>.R\n"
+"# auto-generated by zkeleton, written by Matt Owen\n"
+"# info: re-interprets parameters passed into zelig as\n"
+"#       legal parameters to pass into <<model function>>"
+
+
+msgid "zelig3 canned"
+msgstr
+"zelig3<<model name>> <- function (res, ...) {\n"
+"class(res) <- c(\"<<model name>>\", class(res))\n"
+"\n"
+"# give it a terms object\n"
+"if (is.null(res$terms))\n"
+"res$terms <- terms(res$call$formula)\n"
+"\n"
+"# return\n"
+"res\n"
+"}"
+
+
+msgid "zelig3 how-to"
+msgstr
+"# HOW-TO WRITE A ZELIG3 FUNCTION\n"
+"# ==============================\n"
+"# 0. zelig3 functions act as in-betweens between\n"
+"#    the zelig2 function and the zelig and sim function.\n"
+"#    That is, after the model runs its initial computations,\n"
+"#    it is often important to cast the result as an object of\n"
+"#    the class which it is named after.  This is crucial to\n"
+"#    ensure that setx invokes the correct method\n"
+"# 1. set the class of the res passed into with the line:\n"
+"#      class(res) <- class(\"<<model name>>\", class(res))\n"
+"# 2. return the obj (with class newly extended)\n"
+"#\n"
+"# NOTE: if the class \"<<model name>>\" is not added, the function\n"
+"#       setx.default will be used, which may have unexpected\n"
+"#       results"
+
+
+msgid "zelig3 credit"
+msgstr
+"# @author: <<author>>\n"
+"# @date:   <<date>>\n"
+"# zelig3<<model>>.R\n"
+"# auto-generated by zkeleton, written by Matt Owen\n"
+"# info: not always necessary, but is useful to cast data\n"
+"#       the result of a zelig2* call into whatever data-type\n"
+"#       we want our setx function to work with"
+
+
+msgid "setx canned"
+msgstr
+"setx.<<model name>> <- function(obj, data=NULL, ...) {\n"
+"# send to default\n"
+"res <- setx.default(obj, ...)\n"
+"\n"
+"# cast as appropriate data-type, then return\n"
+"class(res) <- \"setx.<<model name>>\"\n"
+"\n"
+"# attach data frame here, if the model\n"
+"# requires sophisticated number-crunching\n"
+"# after setx is called\n"
+"# if not, remove the below line\n"
+"if (!is.null(data)) {\n"
+"res$data <- data\n"
+"}\n"
+"\n"
+"res\n"
+"}"
+
+
+msgid "setx how-to"
+msgstr
+"# HOW-TO WRITE A SETX FUNCTION\n"
+"# ============================\n"
+"# 0. For most purposes setx.default will compute\n"
+"#    correctly values of interest, which are needed\n"
+"#    to compute the quantities of interest.  However,\n"
+"#    some models will not provide data in the correct\n"
+"#    fashion, etc. (e.g. computing a covariance matrix\n"
+"#    of the explanatory variables may not make sense\n"
+"#    or be relevant for certain models)\n"
+"# 1. parameters are passed in as a zelig model and potentially\n"
+"#    a new data-set.  The new data-set is used in place of the\n"
+"#    original one that was passed into zelig.  This\n"
+"# 2. the result of the setx function should be of class\n"
+"#    \"setx.<<model name>>\"\n"
+"#    this is important, because it ensures that the correct\n"
+"#    qi function and sim function are called\n"
+"# 3. <optional> the data frame used to compute this setx may\n"
+"#    be attached with the line\n"
+"#      res$data <- data\n"
+"#    if the sim function needs to make further computations\n"
+"#    if this is not the case, please omit that line"
+
+
+msgid "setx credit"
+msgstr
+"# @author: <<author>>\n"
+"# @date:   <<date>>\n"
+"# setx.<<model name>>\n"
+"# auto-generated by zkeleton, written by Matt Owen\n"
+"# info: produces data based on the explanatory variables\n"
+"#       in the model (set by user, not developer)"
+
+
+msgid "sim canned"
+msgstr
+"sim.<<model name>> <- function(obj,\n"
+"x=NULL,\n"
+"x1=NULL,\n"
+"num=c(1000, 100),\n"
+"prev = NULL,\n"
+"cond.data = NULL, ...\n"
+") {\n"
+"# error-catching\n"
+"if (is.null(x))\n"
+"stop(\"Error: x cannot be NULL\")\n"
+"\n"
+"# simulate qi's for x\n"
+"# invoke qi.<model name>\n"
+"res <- qi.<<model name>>(obj, x=x, x1=x1)\n"
+"\n"
+"# change call name\n"
+"obj$call[[1]] <- as.name(\"sim\")\n"
+"\n"
+"\n"
+"# append\n"
+"res$call <- match.call(expand.dots=T)\n"
+"res$zelig.call <- obj$call\n"
+"res$par <- NA\n"
+"res$obj <- obj\n"
+"\n"
+"# change class so correct summary/print function\n"
+"# can be called\n"
+"class(res) <- \"sim.<<model name>>\"\n"
+"\n"
+"res\n"
+"}"
+
+
+msgid "sim how-to"
+msgstr
+"# HOW-TO WRITE A SIM FUNCTION\n"
+"# ===========================\n"
+"# 0. The sim function invokes the qi function.\n"
+"#    Then, returns the quantities of interests\n"
+"#    alongside a host of other relevant data, that\n"
+"#    is presented along with summary and print\n"
+"# 1. importantly, sim should always have the line:\n"
+"#      res <- qi(obj, x=x, x1=x1)\n"
+"#    this enesure that the qi's are computed with the\n"
+"#    exact parameters that enter the sim function itself\n"
+"# 2. the call to sim and the call should be returned along\n"
+"#    with the quantities of interest (qi.stat) and their\n"
+"#    titles (qi.name)\n"
+"# 3. the returned object should have class type:\n"
+"#     \"sim.<<model name>>:"
+
+
+msgid "sim credit"
+msgstr
+"# @author: <<author>>\n"
+"# @date:   <<date>>\n"
+"# sim.<<model>>.R\n"
+"# auto-generated by zkeleton, written by Matt Owen\n"
+"# info: simulates quantities of interest, then arranges\n"
+"#       the data in an easily interprettable manner.\n"
+"#       invokes qi.<<model>>.R"
+
+
+msgid "sim.setx canned"
+msgstr
+"sim.setx.<<model name>> <- function (obj, x, ...) {\n"
+"# this function exists so that if\n"
+"# sim(obj) and sim(obj, x) should have radically\n"
+"# different behavior, we will be able to place them\n"
+"# in seperate files easily\n"
+"sim.<<model name>>(obj, x, ...)\n"
+"}"
+
+
+msgid "sim.setx how-to"
+msgstr
+"# HOW-TO WRITE A SIM.SETX FILE\n"
+"# ============================\n"
+"# 0. sim.setx functions offer alternative ways\n"
+"#    to simulate quantities of interest.  That is,\n"
+"#    sim functions are called in this fashion\n"
+"#      sim(zelig.out)\n"
+"#\n"
+"#    while sim.setx functs are called as:\n"
+"#      sim(zelig.out, x)\n"
+"#    or\n"
+"#      sim(zelig.out, x, x1)\n"
+"#\n"
+"#    this allows the developer to separate\n"
+"#    the different types of simulation algorithms\n"
+"#    that may exist for his or her model\n"
+"#\n"
+"#    if the model simulates quantities of interest\n"
+"#    identically to that of the standard sim function\n"
+"#    it should then only contain the line:\n"
+"#      sim.<<model name>>(obj, x, ...)\n"
+"#\n"
+"# 1. invoke qi with\n"
+"#      qi(obj, x=x, x=x1)\n"
+"#\n"
+"#    do relevant computation on the return quantities\n"
+"#    of interest, and attach relevant data that needs\n"
+"#    to be passed to print and summary functions"
+
+
+msgid "sim.setx credit"
+msgstr
+"# @author: <<author>>\n"
+"# @date:   <<date>>\n"
+"# sim.setx.<<model>>.R\n"
+"# auto-generated by zkeleton, written by Matt Owen\n"
+"# info: simulates qi's when additional explanatory\n"
+"#       information is provided.  usually simply\n"
+"#       invokes the method specified in sim.<<model>>.R"
+
+
+msgid "summary.sim canned"
+msgstr
+"summary.sim.<<model name>> <- function(obj, ...) {\n"
+"# set important summary objects\n"
+"# zelig models always have qi.stat, and qi.name\n"
+"# elements\n"
+"res <- list(model=\"mprobit\",\n"
+"qi.stat   = obj$qi.stat,\n"
+"qi.name    = obj$qi.name,\n"
+"original   = obj$obj,\n"
+"call       = obj$call,\n"
+"zelig.call = obj$zelig.call\n"
+")\n"
+"\n"
+"# cast as class\n"
+"class(res) <- \"summary.sim.<<model name>>\"\n"
+"\n"
+"res\n"
+"}"
+
+
+msgid "summary.sim how-to"
+msgstr
+"# HOW TO WRITE A SUMMARY.SIM FUNCTION\n"
+"# ===================================\n"
+"# 0. summary.sim functions exclusively return\n"
+"#    a list of important data, *summarizing*\n"
+"#    important features of the result of the sim\n"
+"#    function\n"
+"# 1. like summaries of most objects, the result should\n"
+"#    contain a reference to the call that created it,\n"
+"#    information on the class-type, etc.:\n"
+"#      list(call = obj$call,\n"
+"#           zelig.call = obj$zelig.call,\n"
+"#           ...)\n"
+"# 2. importantly, summary.sim must return a qi.stat\n"
+"#    and a qi.name data object.  the indices of these\n"
+"#    objects must have the same values for consistency\n"
+"#    that is, names(qi.stat) == names(qi.name)\n"
+"#    the return should resemble:\n"
+"#      list(\n"
+"#           qi.stat    = obj$qi.stat,\n"
+"#           qi.name    = obj$qi.name,\n"
+"#           call       = obj$call,\n"
+"#           zelig.call = obj$zelig.call\n"
+"#           ...)"
+
+
+msgid "summary.sim credit"
+msgstr
+"# @author: <<author>>\n"
+"# @date:   <<date>>\n"
+"# summary.sim.<<model>>.R\n"
+"# auto-generated by zkeleton, written by Matt Owen\n"
+"# info: returns a list of data summarizing the sim object\n"
+"#       should always include qi.stat, qi.name entry"
+
+
+msgid "print.summary.sim canned"
+msgstr
+"print.summary.sim.<<model name>> <- function (obj, digits=F, print.x=F, ...) {\n"
+"# prints typically have qi, and qi.names defined as part of the summary object\n"
+"if (is.null(obj$qi.stat) || is.null(obj$qi.name)) {\n"
+"stop(\"Error: \")\n"
+"}\n"
+"\n"
+"# warn if name lists do not match\n"
+"if (any(sort(names(obj$qi.stat)) != sort(names(obj$qi.name)))) {\n"
+"warning(\"warning: quantities of interest do not match its name list\")\n"
+"}\n"
+"\n"
+"print(obj$original)\n"
+"\n"
+"for (key in names(obj$qi.stat)) {\n"
+"# value\n"
+"val <- obj$qi.stat[[key]]\n"
+"\n"
+"# pass-by conditions\n"
+"if (is.na(val) || (is.list(val) && !length(val)) || is.null(val))\n"
+"next\n"
+"\n"
+"# print the title of the qi\n"
+"s <- gsub(\"\\\\s+$\", \"\", obj$qi.name[[key]])\n"
+"message(s)\n"
+"message(rep(\"=\", min(nchar(s), 30)))\n"
+"\n"
+"# print the qi (should be a simple data-type, such as matrix or float)\n"
+"print(val)\n"
+"\n"
+"# line-feed\n"
+"message()\n"
+"}\n"
+"\n"
+"# return invisibly\n"
+"invisible(obj)\n"
+"}"
+
+
+msgid "print.summary.sim how-to"
+msgstr
+"# HOW TO WRITE A PRINT.SUMMARY.SIM FUNCTION\n"
+"# =========================================\n"
+"# 0. print.summary functions typically display the result\n"
+"#    from a summary object (a list) in an organized fashion\n"
+"#    with various text-formatting.\n"
+"# 1. for most purpose the default print function (below) should\n"
+"#    work, however, various formatting, etc. can be added typically\n"
+"#    without any impact on the operation of the program"
+
+
+msgid "print.summary.sim credit"
+msgstr
+"# @author: <<author>>\n"
+"# @date:   <<date>>\n"
+"# .<<model>>.R\n"
+"# auto-generated by zkeleton, written by Matt Owen\n"
+"# info: print.summary.sim.<<model>>.R outputs summary\n"
+"#       information from the zelig model <<model>>"
+
+
+msgid "qi canned"
+msgstr
+"# @obj:    zelig object\n"
+"# @simpar: parameters passed to the qi\n"
+"# return:  qi list (qi.stat) and qi.names list (qi.name)\n"
+"\n"
+"# NOTE THIS FILE MUST ALWAYS BE EDITED!!!!\n"
+"# IT IS THE MOST IMPORTANT COMPONENT TO\n"
+"# ANY ZELIG MODULE\n"
+"qi.<<model name>> <- function(obj, simpar=NULL, x, x1=NULL, y=NULL) {\n"
+"# initialize values that necessarily must be\n"
+"# returned.\n"
+"qi.stat <- list()\n"
+"qi.name <- list()\n"
+"\n"
+"\n"
+"# add entries to qi.stat and qi.name\n"
+"# in the end, names(qi.stat) should == names(qi.name)\n"
+"# so that printing can be handled by the auto-generated\n"
+"# function\n"
+"\n"
+"# ...\n"
+"\n"
+"\n"
+"# qi computation must be written by the developer,\n"
+"# as it is impossible to tell automatically what is\n"
+"# the statistic of interest (or how to compute it)\n"
+"\n"
+"# ...\n"
+"\n"
+"\n"
+"# compute the quantities of interest\n"
+"# of this model\n"
+"list(qi.stat=qi.stat,\n"
+"qi.name=qi.name\n"
+")\n"
+"}"
+
+
+msgid "qi how-to"
+msgstr
+"# HOW-TO WRITE A QI FILE\n"
+"# ======================\n"
+"# qi functions are the heart of any zelig module.\n"
+"# The qi function is passed information from the setx\n"
+"# function (via x, x1), parameters (simpar), and the\n"
+"# original zelig model (obj or object)\n"
+"# The developer (you) then writes the software that he/she\n"
+"# believes produces a significant quantity of interest.\n"
+"# The result should always be returned in the fashion\n"
+"# list(qi.stat=qi.stat\n"
+"#      qi.name=qi.name\n"
+"#     )\n"
+"# where qi.stat is a list of qi.stat and qi.name have the form\n"
+"# qi.stat <- list(qi.1 = <<qi.1>>,\n"
+"#                 qi.2 = <<qi.2>>,\n"
+"#                 qi.3 = <<qi.3>>,\n"
+"#                 ...\n"
+"#                 )\n"
+"#\n"
+"# qi.name <- list(qi.1 = <<qi.1 name>>,\n"
+"#                 qi.2 = <<qi.2 name>>,\n"
+"#                 qi.3 = <<qi.3 name>>,\n"
+"#                 ...\n"
+"#                 )\n"
+"#\n"
+"# qi.1, qi.2, etc. should be named in an easy to comprehend manner\n"
+"# the indices of qi.stat and qi.name (qi.1, qi.2, etc.) should match,\n"
+"# otherwise a warning will be displayed during the print stage"
+
+
+msgid "qi credit"
+msgstr
+"# @author: <<author>>\n"
+"# @date:   <<date>>\n"
+"# qi.<<model name>>, auto-generated by zkeleton, written by Matt Owen\n"
+"# ===========================\n"
+"# info: produced quantities of interest for zelig model <<model>>"
+
+
diff --git a/tests/MatchIt.R b/tests/MatchIt.R
new file mode 100644
index 0000000..f0de776
--- /dev/null
+++ b/tests/MatchIt.R
@@ -0,0 +1,19 @@
+library(MatchIt)
+library(Zelig)
+
+data(lalonde)
+
+m <- matchit(
+             treat ~ age + educ + black + hispan + nodegree + married + re74 + re75,
+             data = lalonde,
+             method = "subclass",
+             subclass = 4
+             )
+
+z <- zelig(re78 ~ re74 + re75 + distance, 
+           data = match.data(m, "control"), 
+           model = "ls",
+           by = "subclass"
+           )
+
+# Fin.
diff --git a/tests/amelia.R b/tests/amelia.R
new file mode 100644
index 0000000..416049c
--- /dev/null
+++ b/tests/amelia.R
@@ -0,0 +1,45 @@
+library(Zelig)
+library(Amelia)
+
+# Create data set
+
+beta <- c(.3, -10)
+
+.x1 <- runif(1000, -5, 5)
+.x2 <- runif(1000, -2, 2)
+.x3 <- sample(1:4, 1000, TRUE)
+.y <- t(beta %*% rbind(.x1 + rnorm(1000, 0, 1.2), .x2 + rnorm(1000, 0, .1))) + 3 + rnorm(1000, 0, .3)
+
+data.set <- data.frame(y = .y, x1 = .x1, x2 = .x2, x3 = .x3)
+
+# Add missing data
+
+missing.data.percent <- .3
+missing.data.column <- "x1"
+missing.data.rows <- sample(1:nrow(data.set), round(missing.data.percent * nrow(data.set)))
+
+data.set[missing.data.rows, missing.data.column] <- NA
+
+# Impute
+
+imputed.data <- amelia(data.set)
+
+# Remove unused data sets
+
+rm(.y, .x1, .x2)
+
+# Print amelia obj
+
+imputed.data
+
+# Fit statistical model
+
+z <- zelig(y ~ x1 + x2, model = "ls", data = imputed.data)
+x <- setx(z)
+s <- sim(z, x)
+
+#
+
+summary(s)
+
+# Fin.
diff --git a/tests/by.R b/tests/by.R
new file mode 100644
index 0000000..3803d54
--- /dev/null
+++ b/tests/by.R
@@ -0,0 +1,9 @@
+library(Amelia)
+library(Zelig)
+
+data(turnout)
+
+z <- zelig(vote ~ educate + income, model = "logit", by = "race", data = turnout)
+x <- setx(z, educate = 4)
+s <- sim(z, x)
+summary(s)
diff --git a/tests/lognorm.R b/tests/lognorm.R
new file mode 100644
index 0000000..35926f5
--- /dev/null
+++ b/tests/lognorm.R
@@ -0,0 +1,26 @@
+library(Zelig)
+# Load the sample data:  
+data(coalition)
+
+# Estimate the model:
+user.prompt()
+z.out <- zelig(Surv(duration, ciep12) ~ fract + numst2, model = "lognorm",
+               data = coalition)
+user.prompt()
+# View the regression output:  
+summary(z.out)
+
+# Set the baseline values (with the ruling coalition in the minority)
+# and the alternative values (with the ruling coalition in the majority)
+# for X:
+user.prompt()
+x.low <- setx(z.out, numst2 = 0)
+x.high <- setx(z.out, numst2 = 1)
+
+# Simulate expected values qi$ev and first differences qi$fd:
+user.prompt()
+s.out <- sim(z.out, x = x.low, x1 = x.high)
+user.prompt()
+summary(s.out)
+user.prompt()
+plot(s.out)
diff --git a/tests/mi.R b/tests/mi.R
new file mode 100644
index 0000000..e71577e
--- /dev/null
+++ b/tests/mi.R
@@ -0,0 +1,9 @@
+library(Zelig)
+
+data(turnout)
+
+z <- zelig(vote ~ age, model = "logit", data = mi(turnout[1:10, ], turnout[100:110, ]))
+
+x <- setx(z, age = 90)
+
+s.out1 <- sim(z, x=x, num=20)
diff --git a/tests/mix.R b/tests/mix.R
new file mode 100644
index 0000000..d672f0c
--- /dev/null
+++ b/tests/mix.R
@@ -0,0 +1,28 @@
+library(Zelig)
+
+# mix(list('a'))
+# mix(list('a', 'b', 'c'), list(1, 2, 3, 4))
+#
+#
+#
+
+data(turnout)
+
+z1 <- zelig(vote ~ race, model = "logit", data = turnout)
+x1 <- setx(z1, race = "others")
+summary(x1)
+
+z2 <- zelig(vote ~ race, model = "logit", data = turnout)
+x2 <- setx(z1, race = c("white", "others"))
+summary(x2)
+
+z3 <- zelig(vote ~ race + educate, model = "logit", data = turnout)
+x3 <- setx(z2, race = "others", educate = 10:15)
+class(x3)
+summary(x3)
+
+z4 <- zelig(vote ~ race + educate, model = "logit", data = turnout)
+x4 <- setx(z3)
+summary(x4)
+
+# Fin.
diff --git a/tests/models-bayes.R b/tests/models-bayes.R
new file mode 100644
index 0000000..2169a54
--- /dev/null
+++ b/tests/models-bayes.R
@@ -0,0 +1,135 @@
+library(Zelig)
+
+data(turnout)
+data(mexico)
+data(macro)
+data(sanction)
+
+# mlogit.bayes
+# mlogit.bayes
+# mlogit.bayes
+
+z.out <- zelig(
+               vote88 ~ pristr + othcok + othsocok,
+               model = "mlogit.bayes", 
+               data = mexico
+               )
+x.out <- setx(z.out)
+
+s.out <- sim(z.out, x = x.out)
+
+summary(z.out)
+summary(s.out)
+
+# logit.bayes
+# logit.bayes
+# logit.bayes
+
+names(swiss) <- c("Fert","Agr","Exam","Educ","Cath","InfMort")
+
+z.out <- zelig(
+               vote ~ race + educate,
+               model = "logit.bayes",
+               verbose = FALSE,
+               data  = turnout
+               )
+summary(z.out)
+
+x.out <- setx(z.out, age=65)
+x1.out <- setx(z.out, age=10, educate=5)
+
+s.out <- sim(z.out, x.out, x1.out)
+
+summary(s.out)
+
+# normal.bayes
+# normal.bayes
+# normal.bayes
+
+z.out <- zelig(
+               unem ~ gdp + capmob + trade,
+               model = "normal.bayes", 
+               data = macro,
+               verbose=TRUE
+               )
+
+x.out <- setx(z.out)
+x1.out <- setx(z.out, gdp = 10)
+
+s.out <- sim(z.out, x.out, x1.out)
+
+summary(z.out)
+summary(s.out)
+
+sanction$ncost <- factor(sanction$ncost, ordered = TRUE,
+                         levels = c("net gain", "little effect", 
+                         "modest loss", "major loss"))
+
+
+z.out <- zelig(
+               ncost ~ mil + coop,
+               model = "oprobit.bayes",
+               data = sanction, verbose=FALSE
+               )
+
+x.out <- setx(z.out)
+x1.out <- setx(z.out, coop=3)
+
+s.out <- sim(z.out, x = x.out, num=10000)
+
+summary(z.out)
+summary(s.out)
+
+z.out <- zelig(
+               num ~ target + coop, 
+               model = "poisson.bayes",
+               data = sanction, 
+               verbose=TRUE
+               )
+
+x.out <- setx(z.out)
+x1.out <- setx(z.out, coop=3)
+
+s.out <- sim(z.out, x.out, x1.out)
+
+summary(z.out)
+summary(s.out)
+
+z.out <- zelig(
+               vote ~ race + educate,
+               model = "probit.bayes",
+               verbose = FALSE,
+               data  = turnout
+               )
+
+x.out <- setx(z.out, age=65)
+x1.out <- setx(z.out, age=10, educate=5)
+
+s.out <- sim(z.out, x.out, x1.out)
+
+summary(s.out)
+
+
+z.out <- zelig(cbind(Agr,Exam,Educ,Cath,InfMort)~NULL, 
+               model="factor.bayes",
+               data=swiss, factors=2,
+               lambda.constraints=list(Exam=list(1,"+"),
+                                 Exam=list(2,"-"), Educ=c(2,0),
+                                 InfMort=c(1,0)),
+               verbose=TRUE, a0=1, b0=0.15,
+               burnin=5000, mcmc=10000)
+
+## Checking for convergence before summarizing the estimates:
+geweke.diag(coef(z.out))
+heidel.diag(coef(z.out))
+raftery.diag(coef(z.out))
+
+## summarizing the output
+summary(z.out)
+
+# These methods should not work.
+#setx(z.out)
+#sim(z.out)
+
+
+
diff --git a/tests/models-core.R b/tests/models-core.R
new file mode 100644
index 0000000..eef2d6f
--- /dev/null
+++ b/tests/models-core.R
@@ -0,0 +1,144 @@
+library(Zelig)
+
+data(coalition)
+data(macro)
+data(mid)
+data(tobin)
+data(turnout)
+data(sanction)
+
+# exp
+# exp
+# exp
+
+z.out <- zelig(Surv(duration, ciep12) ~ invest + polar + numst2 + crisis, model = "exp", data = coalition[1:100,])
+
+x.low<- setx(z.out, numst1 = 0)
+x.high <- setx(z.out, numst2 = 1)
+
+s.out <- sim(z.out, x = x.low, x1 = x.high, num = 10)
+
+plot(s.out)
+
+# gamma
+# gamma
+# gamma
+
+z <- zelig(duration ~ fract + numst2, model = "gamma", data = coalition)
+
+x.low <- setx(z, numst2 = 0)
+x.high <- setx(z, numst2 = 1)
+
+s <- sim(z, x = x.low, x1 = x.high, num = 10)
+
+plot(s)
+
+# logit
+# logit
+# logit
+
+z <- zelig(vote ~ age*educate + race, model = "logit", data = turnout)
+
+x.high <- setx(z, educate = quantile(turnout$educate, probs = 0.75))
+x.low <- setx(z, educate = quantile(turnout$educate, probs = 0.25))
+
+s <- sim(z, x = x.low, x1 = x.high, num = 10)
+
+plot(s)
+
+# ls
+# ls
+# ls
+
+z <- zelig(unem ~ gdp + capmob + trade, model = "ls", data = macro)
+
+x.high <- setx(z, trade = quantile(trade, 0.8))
+x.low <- setx(z, trade = quantile(trade, 0.2))
+
+s <- sim(z, x = x.high, x1 = x.low, num = 10)
+
+plot(s)
+
+# negbinom
+# negbinom
+# negbinom
+
+z <- zelig(num ~ target + coop, model = "negbinom", data = sanction)
+
+x <- setx(z)
+
+s <- sim(z, x = x, num = 10)
+
+plot(s)
+
+# normal
+# normal
+# normal
+
+z <- zelig(unem ~ gdp + capmob + trade, model = "normal", data = macro)
+
+x.high <- setx(z, trade = quantile(trade, 0.8))
+x.low <- setx(z, trade = quantile(trade, 0.2))
+
+s <- sim(z, x = x.high, x1 = x.low)
+
+plot(s)
+
+# poisson
+# poisson
+# poisson
+
+z <- zelig(num ~ target + coop, model = "poisson", data = sanction)
+
+x <- setx(z)
+
+s <- sim(z, x = x, num = 10)
+
+plot(s)
+
+# probit
+# probit
+# probit
+
+z <- zelig(vote ~ race + educate, model = "probit", data = turnout)
+
+x.low <- setx(z, educate = quantile(turnout$educate, probs = 0.75))
+x.high <- setx(z, educate = quantile(turnout$educate, probs = 0.25))
+
+s <- sim(z, x = x.low, x1 = x.high, num = 10)
+
+plot(s)
+
+# relogit
+# relogit
+# relogit
+
+z.out1 <- zelig(conflict ~ major + contig + power + maxdem + mindem + years,
+                data = mid, model = "relogit",
+                tau = 1042/303772)
+
+z.out2 <- zelig(
+                conflict ~ major + contig + power + maxdem + mindem + years,
+                data = mid,
+                model = "relogit",
+                tau = 1042/303772,
+                case.control = "weighting",
+                robust = TRUE
+                )
+
+x.out1 <- setx(z.out1)
+x.out2 <- setx(z.out2)
+
+s.out1 <- sim(z.out1, x = x.out1, num=10)
+s.out2 <- sim(z.out2, x = x.out2, num=10)
+
+plot(s.out1)
+plot(s.out2)
+
+# tobit
+# tobit
+# tobit
+
+z <- zelig(durable ~ age + quant, data = tobin, model = "tobit")
+x <- setx(z)
+s <- sim(z, x = x, num = 10)
diff --git a/tests/models-gee.R b/tests/models-gee.R
new file mode 100644
index 0000000..7065f09
--- /dev/null
+++ b/tests/models-gee.R
@@ -0,0 +1,152 @@
+library(Zelig)
+
+data(coalition)
+data(turnout)
+data(macro)
+data(sanction)
+
+
+cluster <- c(rep(c(1:62),5), rep(c(63),4))
+coalition$cluster <- cluster
+
+z.out <- zelig(duration ~ fract + numst2, 
+               id = "cluster",
+               model = "gamma.gee",
+               data = coalition,
+               corstr="exchangeable"
+               )
+
+summary(z.out)
+
+#  Setting the explanatory variables at their default values
+#  (mode for factor variables and mean for non-factor variables),
+#  with numst2 set to the vector 0 = no crisis, 1 = crisis. 
+x.low <- setx(z.out, numst2 = 0)
+x.high <- setx(z.out, numst2 = 1)
+
+# Simulate quantities of interest
+s.out <- sim(z.out, x = x.low, x1 = x.high)
+
+summary(s.out)
+
+# Generate a plot of quantities of interest:
+plot(s.out)
+
+
+
+##  Attaching the sample turnout dataset:
+
+turnout$cluster <- rep(c(1:200),10)
+sorted.turnout <- turnout[order(turnout$cluster),]
+
+z.out1 <- zelig(
+                vote ~ race + educate, model = "logit.gee",
+                id = "cluster", 
+	        data = turnout,
+                corstr = "stat_M_dep",
+                Mv=3
+                )
+summary(z.out1)
+
+x.out1 <- setx(z.out1)
+s.out1 <- sim(z.out1, x = x.out1)
+
+summary(s.out1)
+plot(s.out1)
+
+
+x.high <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.75))
+x.low <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.25))
+s.out2 <- sim(z.out1, x = x.high, x1 = x.low)
+
+summary(s.out2)
+plot(s.out2)
+
+#####  Example 3:  Example with Fixed Correlation Structure
+
+##  User-defined correlation structure
+corr.mat <- matrix(rep(0.5,100), nrow=10, ncol=10)
+diag(corr.mat) <- 1 
+
+##  Generating empirical estimates:
+z.out2 <- zelig(vote ~ race + educate, model = "logit.gee", id = "cluster", 
+	data = sorted.turnout, robust = T, corstr = "fixed", R=corr.mat)
+
+##  Viewing the regression output:
+summary(z.out2)
+
+# NORMAL.GEE
+# NORMAL.GEE
+# NORMAL.GEE
+
+z.out <- zelig(unem ~ gdp + capmob + trade, model = "normal.gee", id = "country", data = macro, robust=TRUE, corstr="AR-M", Mv=1)
+summary(z.out)
+
+# Set explanatory variables to their default (mean/mode) values, with
+# high (80th percentile) and low (20th percentile) values:
+x.high <- setx(z.out, trade = quantile(macro$trade, 0.8))
+x.low <- setx(z.out, trade = quantile(macro$trade, 0.2))
+
+# Generate first differences for the effect of high versus low trade on
+# GDP:
+s.out <- sim(z.out, x = x.high, x1 = x.low)
+summary(s.out)
+
+# Generate a plot of quantities of interest:
+plot(s.out)
+
+#
+#
+#
+
+sanction$cluster <- c(rep(c(1:15),5),rep(c(16),3))
+
+z.out <- zelig(num ~ target + coop, model = "poisson.gee", id = "cluster", data = sanction, robust=TRUE, corstr="exchangeable")
+summary(z.out)
+
+x.out <- setx(z.out)
+s.out <- sim(z.out, x = x.out)
+
+summary(s.out)
+plot(s.out)
+
+
+
+
+
+
+
+
+
+turnout$cluster <- rep(c(1:200),10)
+
+z.out1 <- zelig(vote ~ race + educate, model = "probit.gee", id = "cluster", 
+	data = turnout, robust = T, corstr = "stat_M_dep", Mv=3)
+summary(z.out1)
+
+x.out1 <- setx(z.out1)
+s.out1 <- sim(z.out1, x = x.out1)
+
+plot(s.out1)
+
+x.high <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.75))
+x.low <- setx(z.out1, educate = quantile(turnout$educate, prob = 0.25))
+
+s.out2 <- sim(z.out1, x = x.high, x1 = x.low)
+
+summary(s.out2)
+
+plot(s.out2)
+
+#####  Example 3:  Example with Fixed Correlation Structure
+
+##  User-defined correlation structure
+corr.mat <- matrix(rep(0.5,100), nrow=10, ncol=10)
+diag(corr.mat) <- 1 
+
+##  Generating empirical estimates:
+z.out2 <- zelig(vote ~ race + educate, model = "probit.gee", id = "cluster", 
+	data = turnout, robust = T, corstr = "fixed", R=corr.mat)
+
+##  Viewing the regression output:
+summary(z.out2)
diff --git a/tests/models-survey.R b/tests/models-survey.R
new file mode 100644
index 0000000..0beb280
--- /dev/null
+++ b/tests/models-survey.R
@@ -0,0 +1,326 @@
+library(Zelig)
+
+data(api, package = 'survey')
+data(scd, package = 'survey')
+
+# gamma.survey (1 of 3)
+# gamma.survey (1 of 3)
+# gamma.survey (1 of 3)
+
+# TEST 1
+z.out1 <- zelig(
+                api00 ~ meals + yr.rnd,
+                model   = 'gamma.survey',  
+                weights = ~ pw,
+                data    = apistrat
+                )
+summary(z.out1)
+
+x.low <- setx(z.out1, meals= quantile(apistrat$meals, 0.2))
+x.high <- setx(z.out1, meals= quantile(apistrat$meals, 0.8))
+
+x.low
+x.high
+
+s.out1 <- sim(z.out1, x=x.high, x1=x.low)
+
+plot(s.out1)
+
+# gamma.survey (2 of 3)
+# gamma.survey (2 of 3)
+# gamma.survey (2 of 3)
+
+z.out2 <- zelig(
+                api00 ~ meals + yr.rnd,
+                model = "gamma.survey",  
+                strata=~stype,
+                fpc=~fpc,
+                data = apistrat
+                )
+
+summary(z.out2)
+
+jk1reps <- jk1weights(psu=apistrat$dnum)
+
+# gamma.survey (2 of 3)
+# gamma.survey (2 of 3)
+# gamma.survey (2 of 3)
+
+z.out3 <- zelig(
+                api00 ~ meals + yr.rnd,
+                model = "gamma.survey", 
+		data = apistrat,
+                repweights=jk1reps$weights,
+		type="JK1"
+                )
+
+summary(z.out3)
+
+x.low <- setx(z.out3, meals= quantile(apistrat$meals, 0.2))
+x.high <- setx(z.out3, meals= quantile(apistrat$meals, 0.8))
+
+x.low
+x.high
+
+s.out3 <- sim(z.out3, x=x.high, x1=x.low)
+
+
+plot(s.out3)
+
+# logit.survey (1 of 3)
+# logit.survey (1 of 3)
+# logit.survey (1 of 3)
+
+data(api, package="survey")
+
+
+# TEST 1
+z.out1 <- zelig(
+                yr.rnd ~ meals + mobility,
+                model = "logit.survey",
+                weights=~pw,
+                data = apistrat
+                )
+summary(z.out1)
+
+
+x.low <- setx(z.out1, meals= quantile(apistrat$meals, 0.2))
+x.high <- setx(z.out1, meals= quantile(apistrat$meals, 0.8))
+
+# 
+x.low
+x.high
+
+s.out1 <- sim(z.out1, x=x.low, x1=x.high)
+
+plot(s.out1)
+
+# logit.survey (2 of 3)
+# logit.survey (2 of 3)
+# logit.survey (2 of 3)
+
+z.out2 <- zelig(
+                yr.rnd ~ meals + mobility,
+                model = "logit.survey",
+                strata=~stype,
+                fpc=~fpc,
+                data = apistrat
+                )
+summary(z.out2)
+
+# logit.survey (3 of 3)
+# logit.survey (3 of 3)
+# logit.survey (3 of 3)
+
+data(scd)
+
+scd$sued <- as.vector(c(0,0,0,1,1,1))
+
+BRRrep<-2 * cbind(
+                  c(1,0,1,0,1,0),
+                  c(1,0,0,1,0,1),
+                  c(0,1,1,0,0,1),
+                  c(0,1,0,1,1,0)
+                  )
+
+
+z.out3 <- zelig(
+                formula=sued ~ arrests + alive,
+                model = "logit.survey",
+                repweights=BRRrep,
+                type="BRR",
+                data=scd
+                )
+
+summary(z.out3)
+
+x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
+x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8))
+
+s.out3 <- sim(z.out3, x=x.high, x1=x.low)
+
+# normal.survey (1 of 3)
+# normal.survey (1 of 3)
+# normal.survey (1 of 3)
+
+z.out1 <- zelig(
+                api00 ~ meals + yr.rnd,
+                model = "normal.survey",  
+                weights=~pw,
+                data = apistrat
+                )
+
+summary(z.out1)
+
+x.low <- setx(z.out1, meals= quantile(apistrat$meals, 0.2))
+x.high <- setx(z.out1, meals= quantile(apistrat$meals, 0.8))
+
+x.low
+x.high
+
+s.out1 <- sim(z.out1, x=x.high, x1=x.low)
+
+plot(s.out1)
+
+z.out2 <- zelig(
+                api00 ~ meals + yr.rnd,
+                model = "normal.survey",  
+                strata=~stype,
+                fpc=~fpc,
+                data = apistrat
+                )
+
+summary(z.out2)
+
+# normal.survey (2 of 3)
+# normal.survey (2 of 3)
+# normal.survey (2 of 3)
+
+BRRrep<-2 * cbind(
+                  c(1,0,1,0,1,0),
+                  c(1,0,0,1,0,1),
+                  c(0,1,1,0,0,1),
+                  c(0,1,0,1,1,0)
+                  )
+
+z.out3 <- zelig(
+                formula=alive ~ arrests,
+                model = "normal.survey", 
+                repweights=BRRrep,
+                type="BRR",
+                data=scd,
+                na.action=NULL
+                )
+
+summary(z.out3)
+
+x.min <- setx(z.out3, arrests = min(scd$alive))
+x.max <- setx(z.out3, arrests = max(scd$alive))
+
+x.min
+x.max
+
+s.out3 <- sim(z.out3, x=x.max, x1=x.min)
+
+plot(s.out3)
+
+data(api, package="survey")
+
+# TEST 1
+z.out1 <- zelig(enroll ~ api99 + yr.rnd , model = "poisson.survey", data = apistrat)
+summary(z.out1)
+
+x.low <- setx(z.out1, api00= quantile(apistrat$api00, 0.2))
+x.high <- setx(z.out1, api00= quantile(apistrat$api00, 0.8))
+
+x.low
+x.high
+
+s.out1 <- sim(z.out1, x=x.low, x1=x.high)
+
+plot(s.out1)
+
+
+# TEST 2
+z.out2 <- zelig(
+                enroll ~ api99 + yr.rnd,
+                model = "poisson.survey",
+                data = apistrat, 
+                strata=~stype,
+                fpc=~fpc
+                )
+
+summary(z.out2)
+
+data(scd, package="survey")
+
+BRRrep<-2*cbind(
+                c(1,0,1,0,1,0),
+                c(1,0,0,1,0,1),
+                c(0,1,1,0,0,1),
+                c(0,1,0,1,1,0)
+                )
+
+z.out3 <- zelig(
+                alive ~ arrests,
+                model = "poisson.survey", 
+                repweights=BRRrep,
+                type="BRR",
+                data=scd
+                )
+
+summary(z.out3)
+
+x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
+x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8))
+
+x.low
+x.high
+
+s.out3 <- sim(z.out3, x=x.high, x1=x.low)
+
+plot(s.out3)
+
+data(api, package="survey")
+
+z.out1 <- zelig(
+                yr.rnd ~ meals + mobility,
+                model = "probit.survey",
+                weights=~pw,
+                data = apistrat
+                )
+
+summary(z.out1)
+
+x.low <- setx(z.out1, meals= quantile(apistrat$meals, 0.2))
+x.high <- setx(z.out1, meals= quantile(apistrat$meals, 0.8))
+
+x.low
+x.high
+
+s.out1 <- sim(z.out1, x=x.low, x1=x.high)
+
+
+plot(s.out1)
+
+
+# TEST 2
+z.out2 <- zelig(
+                yr.rnd ~ meals + mobility,
+                model = "probit.survey",
+                strata=~stype,
+                fpc=~fpc,
+                data = apistrat
+                )
+
+summary(z.out2)
+
+
+data(scd)
+
+scd$sued <- as.vector(c(0,0,0,1,1,1))
+
+BRRrep<-2*cbind(
+                c(1,0,1,0,1,0),
+                c(1,0,0,1,0,1),
+                c(0,1,1,0,0,1),
+                c(0,1,0,1,1,0)
+                )
+
+z.out3 <- zelig(
+                formula=sued ~ arrests + alive,
+                model = "probit.survey", 
+                repweights=BRRrep,
+                type="BRR",
+                data=scd
+                )
+
+summary(z.out3)
+
+x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
+x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8))
+
+x.low
+x.high
+
+s.out3 <- sim(z.out3, x=x.high, x1=x.low)
diff --git a/tests/plot-ci.R b/tests/plot-ci.R
new file mode 100644
index 0000000..6b6013d
--- /dev/null
+++ b/tests/plot-ci.R
@@ -0,0 +1,35 @@
+library(Zelig)
+
+data(turnout)
+
+par(mfrow=c(2, 2))
+
+z <- zelig(vote ~ income + educate, model="relogit", data=turnout)
+x <- setx(z, educate=-5:8)
+s <- sim(z, x)
+
+plot(s, var = "educate")
+
+z <- zelig(vote ~ income + educate, model="logit", data=turnout)
+x <- setx(z, educate=0:5)
+s <- sim(z, x)
+
+plot.ci(s, var="educate")
+
+z <- zelig(vote ~ income + educate, model="logit", data=turnout)
+x <- setx(z, educate=-5:5)
+s <- sim(z, x)
+
+plot.ci(s, var="educate", ylim = c(-2, 1))
+
+z <- zelig(vote ~ income + educate, model="logit", data=turnout)
+x <- setx(z, educate=-5:5)
+s <- sim(z, x)
+
+plot.ci(s, var="educate")
+
+z <- zelig(vote ~ income + educate, model="logit", data=turnout)
+x <- setx(z, educate=12)
+s <- sim(z, x)
+
+plot.ci(s)
diff --git a/tests/pooled.R b/tests/pooled.R
new file mode 100644
index 0000000..25053ee
--- /dev/null
+++ b/tests/pooled.R
@@ -0,0 +1,11 @@
+library(Zelig)
+
+data(turnout)
+
+z <- zelig(vote ~ race + educate + age, model = "logit", data = turnout)
+x <- setx(z, educate = 6:7, age = 17)
+s <- sim(z, x, num = 200)
+
+summary(s)
+
+plot(s)
diff --git a/tests/relogit.R b/tests/relogit.R
new file mode 100644
index 0000000..a3abf62
--- /dev/null
+++ b/tests/relogit.R
@@ -0,0 +1,20 @@
+library(Zelig)
+
+data(mid)
+
+z <- zelig(conflict ~ major + contig + power + maxdem + mindem + years, model = "relogit", tau = 1042/303772, data = mid)
+x <- setx(z)
+s <- sim(z, x)
+
+summary(s)
+
+plot(s)
+
+## weighting + bias correction + robust s.e.
+z <- zelig(conflict ~ major + contig + power + maxdem + mindem + years,
+           data = mid, model = "relogit", tau = 1042/303772,
+           case.control = "weighting", robust = TRUE)
+x <- setx(z)
+s <- sim(z, x)
+
+summary(s)
diff --git a/tests/summary.MI.R b/tests/summary.MI.R
new file mode 100644
index 0000000..e49b63c
--- /dev/null
+++ b/tests/summary.MI.R
@@ -0,0 +1,13 @@
+library(Zelig)
+
+data(turnout)
+
+d1 <- turnout[1:500, ]
+d2 <- turnout[501:1000, ]
+d3 <- turnout[1001:2000, ]
+
+z <- zelig(vote ~ I(educate*income) + educate, model = "logit", data = mi(d1, d2, d3))
+
+summary(z, subset = c(1, 3))
+
+# F
diff --git a/tests/twosls.R b/tests/twosls.R
new file mode 100644
index 0000000..0a65cee
--- /dev/null
+++ b/tests/twosls.R
@@ -0,0 +1,22 @@
+library(Zelig)
+
+data(klein)
+
+formula <- list(
+                mu1  = C ~ Wtot + P1,
+                mu2  = I ~ P + P1 + K1,
+                mu3  = Wp ~ X + X1 + Tm,
+                inst = ~ P1 + K1 + X1 + Tm + Wg + G
+                )
+
+z <- zelig(formula, model="twosls", data=klein, cite=F)
+
+x <-setx(z)
+x1 <-setx(z, Wtot = 60)
+
+s <-sim(z, x, x1)
+
+summary(s)
+
+# Plot
+plot(s)
diff --git a/texput.log b/texput.log
deleted file mode 100644
index d6c0e84..0000000
--- a/texput.log
+++ /dev/null
@@ -1,20 +0,0 @@
-This is pdfTeX, Version 3.1415926-1.40.10 (TeX Live 2009) (format=pdflatex 2009.11.7)  21 FEB 2012 14:06
-entering extended mode
- %&-line parsing enabled.
-**blogit.pdf
-
-! Emergency stop.
-<*> blogit.pdf
-              
-End of file on the terminal!
-
- 
-Here is how much of TeX's memory you used:
- 3 strings out of 493848
- 100 string characters out of 3149207
- 48936 words of memory out of 3000000
- 3381 multiletter control sequences out of 15000+200000
- 3640 words of font info for 14 fonts, out of 3000000 for 9000
- 714 hyphenation exceptions out of 8191
- 0i,0n,0p,1b,6s stack positions out of 5000i,500n,10000p,200000b,50000s
-!  ==> Fatal error occurred, no output PDF file produced!
diff --git a/vignettes/Rd.sty b/vignettes/Rd.sty
deleted file mode 100644
index 62b7be4..0000000
--- a/vignettes/Rd.sty
+++ /dev/null
@@ -1,397 +0,0 @@
-%%% Rd.sty ... Style for printing the R manual
-%%% Part of the R package, http://www.R-project.org
-%%% Copyright (C) 2003-2011 The R Foundation
-%%% Distributed under GPL 2 or later
-%%%
-%%% Modified 1998/01/05 by Friedrich Leisch
-%%% Modified 1998/07/07 by Martin Maechler
-%%% Modified 1999/11/20 by Brian Ripley
-%%% Modified 1999/12/26 by Kurt Hornik
-%%% and so on.
-
-\NeedsTeXFormat{LaTeX2e}
-\ProvidesPackage{Rd}{}
-
-\RequirePackage{ifthen}
-\newboolean{Rd at has@ae}
-\newboolean{Rd at use@ae}
-\newboolean{Rd at use@hyper}
-\newboolean{Rd at has@times}
-\newboolean{Rd at use@times}
-\newboolean{Rd at use@cm-super}
-\newboolean{Rd at has@lm}
-\newboolean{Rd at use@lm}
-\newboolean{Rd at use@beramono}
-\newboolean{Rd at use@inconsolata}
-\DeclareOption{ae}{\setboolean{Rd at use@ae}{true}}
-\DeclareOption{hyper}{\setboolean{Rd at use@hyper}{true}}
-\DeclareOption{times}{\setboolean{Rd at use@times}{true}}
-\DeclareOption{lm}{\setboolean{Rd at use@lm}{true}}
-\DeclareOption{cm-super}{\setboolean{Rd at use@cm-super}{true}}
-\DeclareOption{beramono}{\setboolean{Rd at use@beramono}{true}}
-\DeclareOption{inconsolata}{\setboolean{Rd at use@inconsolata}{true}}
-\ProcessOptions
-\RequirePackage{longtable}
-\setcounter{LTchunksize}{250}
-\ifthenelse{\boolean{Rd at use@hyper}}
-{\IfFileExists{hyperref.sty}{}{\setboolean{Rd at use@hyper}{false}
-  \message{package hyperref not found}}}
-{}
-
-\RequirePackage{bm}              % standard boldsymbol
-\RequirePackage{alltt}           % {verbatim} allowing \..
-\RequirePackage{verbatim}        % small example code
-\RequirePackage{url}             % set urls
-
-%% See 'upquote.sty' for details.
-%% We use \pkg{verbatim} for our ExampleCode environment, which in its
-%% \verbatim at font has an explicit \let\do\do at noligs\verbatim at nolig@list
-%% rather than (the identical) \@noligs from the LaTeX2e kernel.
-%% Hence, we add to \verbatim at font ... suggestion by Bernd Raichle
-%% <raichle at Informatik.Uni-Stuttgart.DE>.
-\ifthenelse{\boolean{Rd at use@inconsolata}}{}{\RequirePackage{upquote}}
-\g at addto@macro\verbatim at font\@noligs
-
-\addtolength{\textheight}{12mm}
-\addtolength{\topmargin}{-9mm}   % still fits on US paper
-\addtolength{\textwidth}{24mm}   % still fits on US paper
-\setlength{\oddsidemargin}{10mm}
-\setlength{\evensidemargin}{\oddsidemargin}
-
-\newenvironment{display}[0]%
-  {\begin{list}{}{\setlength{\leftmargin}{30pt}}\item}%
-  {\end{list}}
-\newcommand{\HTML}{{\normalfont\textsc{html}}}
-\newcommand{\R}{{\normalfont\textsf{R}}{}}
-\newcommand{\Rdash}{-}
-
-\def\href#1#2{\special{html:<a href="#1">}{#2}\special{html:</a>}}
-
-\newcommand{\vneed}[1]{%
-  \penalty-1000\vskip#1 plus 10pt minus #1\penalty-1000\vspace{-#1}}
-
-\newcommand{\Rdcontents}[1]{% modified \tableofcontents -- not \chapter
-\section*{{#1}\@mkboth{\MakeUppercase#1}{\MakeUppercase#1}}
-  \@starttoc{toc}}
-
-\newcommand{\Header}[2]{%
-  \vneed{1ex}
-  \markboth{#1}{#1}
-  \noindent
-  \nopagebreak
-  \begin{center}
-  \ifthenelse{\boolean{Rd at use@hyper}}%
-    {\def\@currentHref{page.\thepage}
-    \hypertarget{Rfn.#1}{\index{#1@\texttt{#1}}}%
-    \myaddcontentsline{toc}{subsection}{#1}%
-    \pdfbookmark[1]{#1}{Rfn.#1}}
-    {\addcontentsline{toc}{subsection}{#1}
-      \index{#1@\texttt{#1}|textbf}}
-    \hrule
-    \parbox{0.95\textwidth}{%
-      \begin{ldescription}[1.5in]
-       \item[\texttt{#1}] \emph{#2}
-      \end{ldescription}}
-    \hrule
-  \end{center}
-  \nopagebreak}
-%
-%
-%
-% \alias{<alias>}{<header>}
-\ifthenelse{\boolean{Rd at use@hyper}}
-{\newcommand{\alias}[2]{\hypertarget{Rfn.#1}{\index{#1@\texttt{#1} \textit{(\texttt{#2})}}}}}
-{\newcommand{\alias}[2]{\index{#1@\texttt{#1} \textit{(\texttt{#2})}}}}
-\ifthenelse{\boolean{Rd at use@hyper}}
-{\newcommand{\methalias}[2]{\hypertarget{Rfn.#1}{\relax}}}
-{\newcommand{\methalias}[2]{}}
-% \keyword{<topic>}{<header>}
-\newcommand{\keyword}[2]{\index{$*$Topic{\large\ \textbf{#1}}!#2@\texttt{#2}}}
-%
-% used prior to 2.10.0 only
-\newcommand{\Itemize}[1]{\begin{itemize}{#1}\end{itemize}}
-\newcommand{\Enumerate}[1]{\begin{enumerate}{#1}\end{enumerate}}
-\newcommand{\describe}[1]{\begin{description}{#1}\end{description}}
-
-\newcommand{\Tabular}[2]{%
-  \par\begin{longtable}{#1}
-    #2
-  \end{longtable}}
-
-\newlength{\ldescriptionwidth}
-\newcommand{\ldescriptionlabel}[1]{%
-  \settowidth{\ldescriptionwidth}{{#1}}%
-  \ifdim\ldescriptionwidth>\labelwidth
-    {\parbox[b]{\labelwidth}%
-      {\makebox[0pt][l]{#1}\\[1pt]\makebox{}}}%
-  \else
-    \makebox[\labelwidth][l]{{#1}}%
-  \fi
-  \hfil\relax}
-\newenvironment{ldescription}[1][1in]%
-  {\begin{list}{}%
-    {\setlength{\labelwidth}{#1}%
-      \setlength{\leftmargin}{\labelwidth}%
-      \addtolength{\leftmargin}{\labelsep}%
-      \renewcommand{\makelabel}{\ldescriptionlabel}}}%
-  {\end{list}}
-
-\newenvironment{Rdsection}[1]{%
-  \ifx\@empty#1\else\subsubsection*{#1}\fi
-  \begin{list}{}{\setlength{\leftmargin}{0.25in}}\item}
-  {\end{list}}
-
-\newenvironment{Arguments}{%
-  \begin{Rdsection}{Arguments}}{\end{Rdsection}}
-\newenvironment{Author}{%
-  \begin{Rdsection}{Author(s)}}{\end{Rdsection}}
-\newenvironment{Description}{%
-  \begin{Rdsection}{Description}}{\end{Rdsection}}
-\newenvironment{Details}{%
-  \begin{Rdsection}{Details}}{\end{Rdsection}}
-\newenvironment{Examples}{%
-  \begin{Rdsection}{Examples}}{\end{Rdsection}}
-\newenvironment{Note}{%
-  \begin{Rdsection}{Note}}{\end{Rdsection}}
-\newenvironment{References}{%
-  \begin{Rdsection}{References}}{\end{Rdsection}}
-\newenvironment{SeeAlso}{%
-  \begin{Rdsection}{See Also}}{\end{Rdsection}}
-\newenvironment{Format}{%
-  \begin{Rdsection}{Format}}{\end{Rdsection}}
-\newenvironment{Source}{%
-  \begin{Rdsection}{Source}}{\end{Rdsection}}
-\newenvironment{Section}[1]{%
-  \begin{Rdsection}{#1}}{\end{Rdsection}}
-\newenvironment{Usage}{%
-  \begin{Rdsection}{Usage}}{\end{Rdsection}}
-\newenvironment{Value}{%
-  \begin{Rdsection}{Value}}{\end{Rdsection}}
-
-\newenvironment{SubSection}[1]{%
-  \begin{list}{}{\setlength{\leftmargin}{0.1in}}\item \textbf{#1: }}{\end{list}}
-\newenvironment{SubSubSection}[1]{%
-  \begin{list}{}{\setlength{\leftmargin}{0.1in}}\item \textit{#1: }}{\end{list}}
-
-\newenvironment{ExampleCode}{\small\verbatim}{\endverbatim}
-
-\ifx\textbackslash\undefined%-- e.g. for MM
-  \newcommand{\bsl}{\ifmmode\backslash\else$\backslash$\fi}
-\else
-  \newcommand{\bsl}{\ifmmode\backslash\else\textbackslash\fi}
-\fi
-%fails for index (but is not used there...)
-\newcommand{\SIs}{\relax\ifmmode\leftarrow\else$\leftarrow$\fi}
-\newcommand{\SIIs}{\relax\ifmmode<\leftarrow\else$<\leftarrow$\fi}
-\newcommand{\Sbecomes}{\relax\ifmmode\rightarrow\else$\rightarrow$\fi}
-%
-\newcommand{\deqn}[2]{\[#1\]}
-\newcommand{\eqn}[2]{$#1$}
-\newcommand{\bold}[1]{\ifmmode\bm{#1}\else\textbf{#1}\fi}
-%% as from R 2.12.0 set \file in monospaced font, not sans-serif
-\newcommand{\file}[1]{`\texttt{#1}'}
-
-\newcommand{\Figure}[2]{\includegraphics[#2]{#1}}
-
-\ifthenelse{\boolean{Rd at use@hyper}}
-{\newcommand{\link}[1]{\hyperlink{Rfn.#1}{#1}\index{#1@\texttt{#1}}}}
-{\newcommand{\link}[1]{#1\index{#1@\texttt{#1}}}}
-
-\ifthenelse{\boolean{Rd at use@hyper}}
-{\newcommand{\Rhref}[2]{\href{#1}{#2}}}
-{\newcommand{\Rhref}[2]{#2\footnote{\url{#1}}}}
-
-%% as from R 2.10.0 set \email in monospaced font (like \url)
-%\newcommand{\email}[1]{$\langle$\texttt{#1}$\rangle$}
-\newcommand{\email}[1]{\normalfont\texttt{\textless#1\textgreater}}
-
-%% \code without `-' ligatures
-{\catcode`\-=\active%
-  \global\def\code{\bgroup%
-    \catcode`\-=\active \let-\codedash%
-    \Rd at code}}
-\def\codedash{-\discretionary{}{}{}}
-\def\Rd at code#1{\normalfont\texttt{#1}\egroup}
-
-\def\AsIs{\bgroup\let\do\@makeother\Rd at AsIs@dospecials\@noligs\@vobeyspaces\Rd at AsIsX}
-\def\Rd at AsIs@dospecials{\do\$\do\&\do\#\do\^\do\_\do\%\do\~}
-\def\Rd at AsIsX#1{\normalfont #1\egroup}
-\let\command=\code
-\let\env=\code
-
-\newcommand\samp{`\bgroup\@noligs\@vobeyspaces\@sampx}
-\def\@sampx#1{{\normalfont\texttt{#1}}\egroup'}
-\let\option=\samp
-
-% This is a workaround for the old Rdconv to handle \Sexpr by echoing it
-% Rd2latex() should never let \Sexpr through to here.
-\newcommand\Sexpr[2][]{{\normalfont\texttt{\bsl Sexpr[#1]\{#2\}}}}
-
-\newcommand{\var}[1]{{\normalfont\textsl{#1}}}
-
-\newcommand{\dfn}[1]{\textsl{#1}}
-\let\Cite=\dfn
-
-\newcommand{\acronym}[1]{\textsc{\lowercase{#1}}}
-\newcommand{\kbd}[1]{{\normalfont\texttt{\textsl{#1}}}}
-
-\newcommand{\strong}[1]{{\normalfont\fontseries{b}\selectfont #1}}
-\let\pkg=\strong
-
-\newcommand{\sQuote}[1]{`#1'}
-\newcommand{\dQuote}[1]{``#1''}
-
-\IfFileExists{ae.sty}{\setboolean{Rd at has@ae}{true}}{}
-\ifthenelse{\boolean{Rd at use@ae}\and\boolean{Rd at has@ae}}{%
-  \usepackage[T1]{fontenc}
-  \usepackage{ae}
-  \input{t1aett.fd}
-  \DeclareFontShape{T1}{aett}{bx}{n}{<->ssub*aett/m/n}{}}
-  {\message{NOT loading ae}}
-\IfFileExists{times.sty}{\setboolean{Rd at has@times}{true}}{}
-\ifthenelse{\boolean{Rd at use@times}\and\boolean{Rd at has@times}}{%
-  \usepackage[T1]{fontenc}
-  \usepackage{times}}
-  {\message{NOT loading times}}
-\IfFileExists{lmodern.sty}{\setboolean{Rd at has@lm}{true}}{}
-\ifthenelse{\boolean{Rd at use@lm}\and\boolean{Rd at has@lm}}{%
-  \usepackage[T1]{fontenc}
-  \usepackage{lmodern}}
-  {\message{NOT loading lmodern}}
-\ifthenelse{\boolean{Rd at use@cm-super}}{%
-  \usepackage[T1]{fontenc}}{}
-\ifthenelse{\boolean{Rd at use@beramono}}{%
-  \usepackage[scaled=.8]{beramono}}{}
-\ifthenelse{\boolean{Rd at use@inconsolata}}{%
-  \usepackage{inconsolata}}{}
-
-\ifthenelse{\boolean{Rd at use@hyper}}{%
-  \RequirePackage{color}    
-  \def\myaddcontentsline#1#2#3{%
-    \addtocontents{#1}{\protect\contentsline{#2}{#3}{\thepage}{page.\thepage}}}
-  \RequirePackage{hyperref}
-  \DeclareTextCommand{\Rpercent}{PD1}{\045} % percent
-  %% <NOTE>
-  %% Formerly in R's hyperref.cfg, possibly to be shared with Sweave.sty
-  %% as well (but without setting pagebackref as this can give trouble
-  %% for .bib entries containing URLs with '#' characters).
-  \definecolor{Blue}{rgb}{0,0,0.8}
-  \definecolor{Red}{rgb}{0.7,0,0}
-  \hypersetup{%
-    hyperindex,%
-    colorlinks,%
-    pagebackref,%
-    linktocpage,%
-    plainpages=false,%
-    linkcolor=Blue,%
-    citecolor=Blue,%
-    urlcolor=Red,%
-    pdfstartview=Fit,%
-    pdfview={XYZ null null null}%
-  }
-  %% </NOTE>
-  \renewcommand\tableofcontents{%
-    \if at twocolumn
-      \@restonecoltrue\onecolumn
-    \else
-      \@restonecolfalse
-    \fi
-    \chapter*{\contentsname
-        \@mkboth{%
-           \MakeUppercase\contentsname}{\MakeUppercase\contentsname}}%
-    \pdfbookmark{Contents}{contents}
-    \@starttoc{toc}%
-    \if at restonecol\twocolumn\fi
-    }
-  \renewenvironment{theindex}
-  {\if at twocolumn
-    \@restonecolfalse
-    \else
-    \@restonecoltrue
-    \fi
-    \columnseprule \z@
-    \columnsep 35\p@
-    \twocolumn[\@makeschapterhead{\indexname}]%
-    \@mkboth{\MakeUppercase\indexname}%
-    {\MakeUppercase\indexname}%
-    \pdfbookmark{Index}{index}
-    \myaddcontentsline{toc}{chapter}{Index}
-    \thispagestyle{plain}\parindent\z@
-    \parskip\z@ \@plus .3\p@\relax
-    \raggedright
-    \let\item\@idxitem}
-  {\if at restonecol\onecolumn\else\clearpage\fi}
-  }{
-  \renewenvironment{theindex}
-  {\if at twocolumn
-    \@restonecolfalse
-    \else
-    \@restonecoltrue
-    \fi
-    \columnseprule \z@
-    \columnsep 35\p@
-    \twocolumn[\@makeschapterhead{\indexname}]%
-    \@mkboth{\MakeUppercase\indexname}%
-    {\MakeUppercase\indexname}%
-    \addcontentsline{toc}{chapter}{Index}
-    \thispagestyle{plain}\parindent\z@
-    \parskip\z@ \@plus .3\p@\relax
-    \raggedright
-    \let\item\@idxitem}
-  {\if at restonecol\onecolumn\else\clearpage\fi}
-  }
-
-% new definitions for R >= 2.0.0
-\ifthenelse{\boolean{Rd at use@hyper}}
-{\newcommand{\LinkA}[2]{\hyperlink{Rfn.#2}{#1}\index{#1@\texttt{#1}|textit}}}
-{\newcommand{\LinkA}[2]{#1\index{#1@\texttt{#1}|textit}}}
-%
-% \alias{<alias>}{<header>}
-\ifthenelse{\boolean{Rd at use@hyper}}
-{\newcommand{\aliasA}[3]{\hypertarget{Rfn.#3}{\index{#1@\texttt{#1} \textit{(\texttt{#2})}}}}}
-{\newcommand{\aliasA}[3]{\index{#1@\texttt{#1} \textit{(\texttt{#2})}}}}
-% \aliasB has no indexing.
-\ifthenelse{\boolean{Rd at use@hyper}}
-{\newcommand{\aliasB}[3]{\hypertarget{Rfn.#3}{\relax}}}
-{\newcommand{\aliasB}[3]{}}
-\ifthenelse{\boolean{Rd at use@hyper}}
-{\newcommand{\methaliasA}[3]{\hypertarget{Rfn.#3}{\relax}}}
-{\newcommand{\methaliasA}[3]{}}
-\newcommand{\HeaderA}[3]{%
-  \vneed{1ex}
-  \markboth{#1}{#1}
-  \noindent
-  \nopagebreak
-  \begin{center}
-  \ifthenelse{\boolean{Rd at use@hyper}}%
-    {\def\@currentHref{page.\thepage}
-    \hypertarget{Rfn.#3}{\index{#1@\texttt{#1}}}%
-    \myaddcontentsline{toc}{subsection}{#1}%
-    \pdfbookmark[1]{#1}{Rfn.#3}}
-    {\addcontentsline{toc}{subsection}{#1}
-      \index{#1@\texttt{#1}|textbf}}
-    \hrule
-    \parbox{0.95\textwidth}{%
-      \begin{ldescription}[1.5in]
-       \item[\texttt{#1}] \emph{#2}
-      \end{ldescription}}
-    \hrule
-  \end{center}
-  \nopagebreak}
-\DeclareTextCommandDefault{\Rpercent}{\%{}}
-%% for use with the output of encoded_text_to_latex
-\ProvideTextCommandDefault{\textdegree}{\ensuremath{{^\circ}}}
-\ProvideTextCommandDefault{\textonehalf}{\ensuremath{\frac12}}
-\ProvideTextCommandDefault{\textonequarter}{\ensuremath{\frac14}}
-\ProvideTextCommandDefault{\textthreequarters}{\ensuremath{\frac34}}
-\ProvideTextCommandDefault{\textcent}{\TextSymbolUnavailable\textcent}
-\ProvideTextCommandDefault{\textyen}{\TextSymbolUnavailable\textyen}
-\ProvideTextCommandDefault{\textcurrency}{\TextSymbolUnavailable\textcurrency}
-\ProvideTextCommandDefault{\textbrokenbar}{\TextSymbolUnavailable\textbrokenbar}
-\ProvideTextCommandDefault{\texteuro}{\TextSymbolUnavailable\texteuro}
-\providecommand{\mathonesuperior}{\ensuremath{^1}}
-\providecommand{\mathtwosuperior}{\ensuremath{^2}}
-\providecommand{\maththreesuperior}{\ensuremath{^3}}
-
-\InputIfFileExists{Rd.cfg}{%
-  \typeout{Reading personal defaults ...}}{}
diff --git a/vignettes/Sweave.sty b/vignettes/Sweave.sty
index 45db405..a76e1bd 100644
--- a/vignettes/Sweave.sty
+++ b/vignettes/Sweave.sty
@@ -30,10 +30,4 @@
   \ifx\pdfoutput\undefined%
   \csname newcount\endcsname\pdfoutput\fi%
   \ifcase\pdfoutput\special{#1}%
-  \else%
-   \begingroup%
-     \pdfcompresslevel=0%
-     \immediate\pdfobj stream{#1}%
-     \pdfcatalog{/SweaveConcordance \the\pdflastobj\space 0 R}%
-   \endgroup%
-  \fi}
+  \else\immediate\pdfobj{#1}\fi}
diff --git a/vignettes/Zelig.bib b/vignettes/Zelig.bib
new file mode 100644
index 0000000..550d1cf
--- /dev/null
+++ b/vignettes/Zelig.bib
@@ -0,0 +1,65 @@
+ at manual{ImaLauKin-gamma11,
+  author = { Kosuke Imai and Olivia Lau and Gary King},
+  title =  { gamma: Gamma Regression for Continuous,
+             Positive Dependent Variables
+           },
+  year =   2011,
+  url =    { http://gking.harvard.edu/zelig }
+}
+
+ at manual{ImaLauKin-logit11,
+  author = { Kosuke Imai and Olivia Lau and Gary King},
+  title =  { logit: Logistic Regression for Dichotomous Dependent },
+  year =   2011,
+  url =    { http://gking.harvard.edu/zelig }
+}
+
+ at manual{ImaLauKin-ls11,
+  author = { Kosuke Imai and Olivia Lau and Gary King},
+  title =  {
+            ls: Least Squares Regression for Continuous
+            Dependent Variables
+           },
+  year =   2011,
+  url =    { http://gking.harvard.edu/zelig }
+}
+
+
+ at manual{ImaLauKin-negbinom11,
+  author = { Kosuke Imai and Olivia Lau and Gary King},
+  title =  {
+            negbinom: Negative Binomial Regression
+            for Event Count Dependent Variables
+           },
+  year =   2011,
+  url =    { http://gking.harvard.edu/zelig }
+}
+
+
+ at manual{ImaLauKin-normal11,
+  author = { Kosuke Imai and Olivia Lau and Gary King},
+  title =  { normal: Normal Regression for Continuous Dependent Variables },
+  year =   2011,
+  url =    { http://gking.harvard.edu/zelig }
+}
+
+
+ at manual{ImaLauKin-poisson11,
+  author = { Kosuke Imai and Olivia Lau and Gary King},
+  title =  {
+            poisson: Poisson Regression for Event Count
+            Dependent Variables
+           },
+  year =   2011,
+  url =    { http://gking.harvard.edu/zelig }
+}
+
+ at manual{ImaLauKin-probit11,
+  author = { Kosuke Imai and Olivia Lau and Gary King},
+  title =  {
+            probit: Probit Regression for
+            Dichotomous Dependent Variables
+           },
+  year =   2011,
+  url =    { http://gking.harvard.edu/zelig }
+}
diff --git a/vignettes/Zelig.sty b/vignettes/Zelig.sty
new file mode 100644
index 0000000..044d562
--- /dev/null
+++ b/vignettes/Zelig.sty
@@ -0,0 +1,33 @@
+\usepackage{hyperref}
+\usepackage{Sweave}
+
+\DefineVerbatimEnvironment{Code}{Verbatim}{
+  samepage=TRUE,
+  fontsize=\small
+}
+
+\newcommand{\CiteZelig}[0]{
+  To cite Zelig as a whole, please reference these two sources:
+
+  \begin{verse}
+    Kosuke Imai, Gary King, and Olivia Lau. 2007. ``Zelig: Everyone's
+    Statistical Software,'' \url{http://GKing.harvard.edu/zelig}.
+  \end{verse}
+
+  \begin{verse}
+  Imai, Kosuke, Gary King, and Olivia Lau. (2008). ``Toward A Common Framework for Statistical Analysis and Development.'' Journal of Computational and Graphical Statistics, Vol. 17, No. 4 (December), pp. 892-913. 
+  \end{verse}
+}
+
+
+
+\newcommand{\Sref}[1]{Section~\ref{#1}}
+\newcommand{\hlink}[2]{\href{#2}{#1}}
+\newcommand{\rvers}{2.5}
+\newcommand{\rwvers}{R-2.5.1}
+\newcommand{\fullrvers}{2.5.1}
+\newcommand{\code}[1]{{\tt #1}}
+
+
+
+\usepackage[all]{xy}
diff --git a/vignettes/blogit.Rnw b/vignettes/blogit.Rnw
deleted file mode 100644
index 841ad81..0000000
--- a/vignettes/blogit.Rnw
+++ /dev/null
@@ -1,349 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=blogit}
-\include{zinput}
-
-%\VignetteIndexEntry{Bivariate Logistic Regression for Two Dichotomous Dependent Variables}
-%\VignetteDepends{Zelig, VGAM}
-%\VignetteKeyWords{model,logistic regression, dichotomous}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@ 
-
-
-\section{{\tt blogit}: Bivariate Logistic Regression for Two
-Dichotomous Dependent Variables}\label{blogit}
-
-Use the bivariate logistic regression model if you have two binary
-dependent variables $(Y_1, Y_2)$, and wish to model them jointly as a
-function of some explanatory variables.  Each pair of dependent
-variables $(Y_{i1}, Y_{i2})$ has four potential outcomes, $(Y_{i1}=1,
-Y_{i2}=1)$, $(Y_{i1}=1, Y_{i2}=0)$, $(Y_{i1}=0, Y_{i2}=1)$, and
-$(Y_{i1}=0, Y_{i2}=0)$.  The joint probability for each of these four
-outcomes is modeled with three systematic components: the marginal
-Pr$(Y_{i1} = 1)$ and Pr$(Y_{i2} = 1)$, and the odds ratio $\psi$,
-which describes the dependence of one marginal on the other.  Each of
-these systematic components may be modeled as functions of (possibly
-different) sets of explanatory variables.
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(list(mu1 = Y1 ~ X1 + X2 , 
-                      mu2 = Y2 ~ X1 + X3), 
-                 model = "blogit", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-\subsubsection{Input Values}
-
-In every bivariate logit specification, there are three equations which
-correspond to each dependent variable ($Y_1$, $Y_2$), and $\psi$, the
-odds ratio. You should provide a list of formulas for each equation or, 
-you may use {\tt cbind()} if the right hand side is the same for both equations
-<<InputValues.list>>=
-formulae <- list(cbind(Y1,Y2) ~ X1 + X2)
-@ 
-which means that all the explanatory variables in equations 1 and 2
-(corresponding to $Y_1$ and $Y_2$) are included, but only an intercept
-is estimated (all explanatory variables are omitted) for equation 3
-($\psi$).  
-
-You may use the function {\tt tag()} to constrain variables across
-equations:
-<<InputValues.list.mu>>=
-formulae <- list(mu1 = y1 ~ x1 + tag(x3, "x3"), 
-                 mu2 = y2 ~ x2 + tag(x3, "x3"))
-@ 
-where {\tt tag()} is a special function that constrains variables to
-have the same effect across equations.  Thus, the coefficient for {\tt
-x3} in equation {\tt mu1} is constrained to be equal to the
-coefficient for {\tt x3} in equation {\tt mu2}.  
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-
-\item {Basic Example} \label{basic.bl}
-
-Load the data and estimate the model:  
-<<BasicExample.data>>=
- data(sanction)
-## sanction
-@ 
-<<BasicExample.zelig>>=
- z.out1 <- zelig(cbind(import, export) ~ coop + cost + target, 
-                  model = "blogit", data = sanction)
-@ 
-By default, {\tt zelig()} estimates two effect parameters
-for each explanatory variable in addition to the odds ratio parameter;
-this formulation is parametrically independent (estimating
-unconstrained effects for each explanatory variable), but
-stochastically dependent because the models share an odds ratio.
-\newline \newline Generate baseline values for the explanatory
-variables (with cost set to 1, net gain to sender) and alternative
-values (with cost set to 4, major loss to sender):
-<<BasicExample.setx.low>>=
- x.low <- setx(z.out1, cost = 1)
-@ 
-<<BasicExample.setx.high>>=
-x.high <- setx(z.out1, cost = 4)
-@ 
-Simulate fitted values and first differences:  
-<<BasicExample.sim>>=
- s.out1 <- sim(z.out1, x = x.low, x1 = x.high)
- summary(s.out1)
-@
-\begin{center}
-<<label=BasicExamplePlot,fig=true>>= 
- plot(s.out1)
-@ 
-\end{center}
-
-\item {Joint Estimation of a Model with Different Sets of Explanatory Variables}\label{sto.dep.logit}
-
-Using sample data \texttt{sanction}, estimate the statistical model, 
-with {\tt import} a function of {\tt coop} in the first equation and {\tt export} a 
-function of {\tt cost} and {\tt target} in the second equation:
-<<JointExample.zelig>>=
- z.out2 <- zelig(list(import ~ coop, export ~ cost + target), 
-                  model = "blogit", data = sanction)
- summary(z.out2)
-@ 
-Set the explanatory variables to their means:
-<<JointExample.setx>>=
- x.out2 <- setx(z.out2)
-@ 
-Simulate draws from the posterior distribution:
-<<JointExample.sim>>=
- s.out2 <- sim(z.out2, x = x.out2)
- summary(s.out2)
-@ 
-\begin{center}
-<<label=JointExamplePlot,fig=true>>= 
- plot(s.out2)
-@ 
-\end{center}
-
-\item Joint Estimation of a Parametrically and Stochastically
-Dependent Model 
-\label{pdep.l}
-  
-Using the sample data \texttt{sanction}
-The bivariate model is parametrically dependent if $Y_1$ and $Y_2$ share
-some or all explanatory variables, {\it and} the effects of the shared
-explanatory variables are jointly estimated.  For example,
-<<JointEstimation.zelig>>=
- z.out3 <- zelig(list(import ~ tag(coop,"coop") + tag(cost,"cost") + 
-                           tag(target,"target"), 
-                       export ~ tag(coop,"coop") + tag(cost,"cost") + 
-                           tag(target,"target")), 
-                       model = "blogit", data = sanction)
- summary(z.out3)
-@ 
-Note that this model only returns one parameter estimate for each of
-{\tt coop}, {\tt cost}, and {\tt target}.  Contrast this to
-Example~\ref{basic.bl} which returns two parameter estimates for each
-of the explanatory variables.  \newline \newline Set values for the
-explanatory variables:
-<<JointEstimation.setx>>=
-x.out3 <- setx(z.out3, cost = 1:4)
-@ 
-Draw simulated expected values:  
-<<JointEstimation.sim>>=
- s.out3 <- sim(z.out3, x = x.out3)
- summary(s.out3)
-@ 
-
-
-\end{enumerate}
-
-\subsubsection{Model}
-
-For each observation, define two binary dependent variables, $Y_1$ and
-$Y_2$, each of which take the value of either 0 or 1 (in the
-following, we suppress the observation index).  We model the joint
-outcome $(Y_1$, $Y_2)$ using a marginal probability for each dependent
-variable, and the odds ratio, which parameterizes the relationship
-between the two dependent variables. Define $Y_{rs}$ such that it is
-equal to 1 when $Y_1=r$ and $Y_2=s$ and is 0 otherwise, where $r$ and
-$s$ take a value of either 0 or 1. Then, the model is defined as follows,
-
-\begin{itemize}
- 
-\item The \emph{stochastic component} is
-\begin{eqnarray*}
-  Y_{11} &\sim& \textrm{Bernoulli}(y_{11} \mid \pi_{11}) \\
-  Y_{10} &\sim& \textrm{Bernoulli}(y_{10} \mid \pi_{10}) \\
-  Y_{01} &\sim& \textrm{Bernoulli}(y_{01} \mid \pi_{01})
-\end{eqnarray*}
-where $\pi_{rs}=\Pr(Y_1=r, Y_2=s)$ is the joint probability, and
-$\pi_{00}=1-\pi_{11}-\pi_{10}-\pi_{01}$.
-
-
-\item The \emph{systematic components} model the marginal probabilities,
-  $\pi_j=\Pr(Y_j=1)$, as well as the odds ratio.  The odds ratio
-  is defined as $\psi = \pi_{00} \pi_{01}/\pi_{10}\pi_{11}$ and
-  describes the relationship between the two outcomes.  Thus, for each
-  observation we have
-\begin{eqnarray*}
-\pi_j & = & \frac{1}{1 + \exp(-x_j \beta_j)} \quad \textrm{ for} \quad
-j=1,2, \\
-\psi &= & \exp(x_3 \beta_3).
-\end{eqnarray*}
-
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) for the bivariate logit model
-  are the predicted joint probabilities. Simulations of $\beta_1$,
-  $\beta_2$, and $\beta_3$ (drawn from their sampling distributions)
-  are substituted into the systematic components $(\pi_1, \pi_2,
-  \psi)$ to find simulations of the predicted joint probabilities:
-\begin{eqnarray*}
-\pi_{11} & = & \left\{ \begin{array}{ll}
-                 \frac{1}{2}(\psi - 1)^{-1} - {a - \sqrt{a^2 + b}} &
-                 \textrm{for} \; \psi \ne 1 \\
-                 \pi_1 \pi_2 & \textrm{for} \; \psi = 1 
-                 \end{array} \right., \\
-\pi_{10} &=& \pi_1 - \pi_{11}, \\
-\pi_{01} &=& \pi_2 - \pi_{11}, \\
-\pi_{00} &=& 1 - \pi_{10} - \pi_{01} - \pi_{11},
-\end{eqnarray*}
-where $a = 1 + (\pi_1 + \pi_2)(\psi - 1)$, $b = -4 \psi(\psi - 1)
-\pi_1 \pi_2$, and the joint probabilities for each observation must sum
-to one.  For $n$ simulations, the expected values form an $n \times 4$
-matrix for each observation in {\tt x}.  
-
-\item The predicted values ({\tt qi\$pr}) are draws from the
-  multinomial distribution given the expected joint probabilities. 
-
-\item The first differences ({\tt qi\$fd}) for each
-  of the predicted joint probabilities are given by $$\textrm{FD}_{rs}
-  = \Pr(Y_1=r, Y_2=s \mid x_1)-\Pr(Y_1=r, Y_2=s \mid x).$$  
-  
-\item The risk ratio ({\tt qi\$rr}) for each of the predicted joint
-  probabilities are given by
-\begin{equation*}
-\textrm{RR}_{rs} = \frac{\Pr(Y_1=r, Y_2=s \mid x_1)}{\Pr(Y_1=r, Y_2=s \mid x)}
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_{ij}(t_i=1) -
-      E[Y_{ij}(t_i=0)] \right\} \textrm{ for } j = 1,2,
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_{ij}(t_i=0)]$,
-    the counterfactual expected value of $Y_{ij}$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_{ij}(t_i=1) -
-      \widehat{Y_{ij}(t_i=0)} \right\} \textrm{ for } j = 1,2,
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating
-    $\widehat{Y_{ij}(t_i=0)}$, the counterfactual predicted value of
-    $Y_{ij}$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "blogit", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-obtain a default summary of information through {\tt summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: the named vector of coefficients.   
-   \item {\tt fitted.values}: an $n \times 4$ matrix of the in-sample
-     fitted values.
-   \item {\tt predictors}: an $n \times 3$ matrix of the linear
-     predictors $x_j \beta_j$.
-   \item {\tt residuals}: an $n \times 3$ matrix of the residuals.  
-   \item {\tt df.residual}: the residual degrees of freedom.  
-   \item {\tt df.total}: the total degrees of freedom.
-   \item {\tt rss}: the residual sum of squares.  
-   \item {\tt y}: an $n \times 2$ matrix of the dependent variables.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract:
-  \begin{itemize}
-  \item {\tt coef3}: a table of the coefficients with their associated
-    standard errors and $t$-statistics.
-  \item {\tt cov.unscaled}: the variance-covariance matrix. 
-  \item {\tt pearson.resid}: an $n \times 3$ matrix of the Pearson residuals.  
-  \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as arrays indexed by simulation
-  $\times$ quantity $\times$ {\tt x}-observation (for more than one
-  {\tt x}-observation; otherwise the quantities are matrices).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected joint probabilities (or expected
-     values) for the specified values of {\tt x}.  
-   \item {\tt qi\$pr}: the simulated predicted outcomes drawn from a
-     distribution defined by the expected joint probabilities.
-   \item {\tt qi\$fd}: the simulated first difference in the
-     expected joint probabilities for the values specified in {\tt x} and
-     {\tt x1}.
-   \item {\tt qi\$rr}: the simulated risk ratio in the predicted
-     probabilities for given {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection*{How to Cite}
-\input{cites/blogit}
-\input{citeZelig}
-\subsection*{See also}
-The bivariate logit function is part of the VGAM package by Thomas Yee \citep{YeeHas03}. In addition, advanced users may wish to refer to \texttt{help(vglm)} 
-in the VGAM library.  Additional documentation is available at
-\url{http://www.stat.auckland.ac.nz/\~\,yee}{http://www.stat.auckland.ac.nz/~yee}.Sample data are from \citep{Martin92}
-
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-
-
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
diff --git a/vignettes/bprobit.Rnw b/vignettes/bprobit.Rnw
deleted file mode 100644
index 703a5fc..0000000
--- a/vignettes/bprobit.Rnw
+++ /dev/null
@@ -1,385 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=bprobit}
-\include{zinput}
-%\VignetteIndexEntry{Bivariate Probit Regression for Dichotomous Dependent Variables}
-%\VignetteDepends{Zelig, VGAM}
-%\VignetteKeyWords{model,prpbit, logistic regression, dichotomous}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-pkg <- search()
-if(!length(grep("package:Zelig",pkg)))
-library(Zelig)
-@ 
-
-\section{{\tt bprobit}: Bivariate Logistic Regression for Two
-Dichotomous Dependent Variables}\label{bprobit}
-
-Use the bivariate probit regression model if you have two binaryrun
-dependent variables $(Y_1, Y_2)$, and wish to model them jointly as a
-function of some explanatory variables.  Each pair of dependent
-variables $(Y_{i1}, Y_{i2})$ has four potential outcomes, $(Y_{i1}=1,
-Y_{i2}=1)$, $(Y_{i1}=1, Y_{i2}=0)$, $(Y_{i1}=0, Y_{i2}=1)$, and
-$(Y_{i1}=0, Y_{i2}=0)$.  The joint probability for each of these four
-outcomes is modeled with three systematic components: the marginal
-Pr$(Y_{i1} = 1)$ and Pr$(Y_{i2} = 1)$, and the correlation parameter
-$\rho$ for the two marginal distributions.  Each of these systematic
-components may be modeled as functions of (possibly different) sets of
-explanatory variables.
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(list(mu1 = Y1 ~ X1 + X2, 
-                      mu2 = Y2 ~ X1 + X3,
-                      rho = ~ 1),
-                 model = "bprobit", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-\subsubsection{Input Values}
-
-In every bivariate probit specification, there are three equations
-which correspond to each dependent variable ($Y_1$, $Y_2$), and the
-correlation parameter $\rho$.  Since the correlation parameter does
-not correspond to one of the dependent variables, the model estimates
-$\rho$ as a constant by default.  Hence, only two formulas (for
-$\mu_1$ and $\mu_2$) are required.  If the explanatory variables for
-$\mu_1$ and $\mu_2$ are the same and effects are estimated separately
-for each parameter, you may use the following short hand:  
-<<InputValues.list>>=
-fml <- list(cbind(Y1,Y2) ~ X1 + X2)
-@ 
-which has the same meaning as:  
-<<InputValues.list.rho>>=
-fml <- list(mu1 = Y1 ~ X1 + X2,  
-            mu2 = Y2 ~ X1 + X2, 
-            rho = ~ 1)
-@ 
-You may use the function {\tt tag()} to constrain variables across
-equations.  The {\tt tag()} function takes a variable and a label for
-the effect parameter.  Below, the constrained effect of {\tt
-x3} in both equations is called the {\tt age} parameter:  
-<<InputValues.list.mu>>=
-fml <- list(mu1 = y1 ~ x1 + tag(x3, "age"), 
-            mu2 = y2 ~ x2 + tag(x3, "age"))
-@ 
-You may also constrain different variables across different equations
-to have the same effect.  
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-
-\item {Basic Example} \label{basic.bp}
-
-Load the data and estimate the model:  
-<<BasicExample.data>>=
- data(sanction)
-@ 
-<<BasicExample.zelig>>=
- z.out1 <- zelig(cbind(import, export) ~ coop + cost + target, 
-                  model = "bprobit", data = sanction)
-@ 
-By default, {\tt zelig()} estimates two effect parameters
-for each explanatory variable in addition to the correlation coefficient;
-this formulation is parametrically independent (estimating
-unconstrained effects for each explanatory variable), but
-stochastically dependent because the models share a correlation parameter.
-\newline \newline Generate baseline values for the explanatory
-variables (with cost set to 1, net gain to sender) and alternative
-values (with cost set to 4, major loss to sender):
-<<BasicExample.setx>>=
- x.low <- setx(z.out1, cost = 1)
- x.high <- setx(z.out1, cost = 4)
-@ 
-Simulate fitted values and first differences:  
-<<BasicExample.sim>>=
- s.out1 <- sim(z.out1, x = x.low, x1 = x.high)
- summary(s.out1)
-@ 
-\begin{center}
-<<label=BasicExamplePlot,fig=true>>= 
- plot(s.out1)
-@ 
-\end{center}
-
-
-\item {Joint Estimation of a Model with Different Sets of Explanatory Variables}\label{sto.dep.probit}
-
-Using the sample data \texttt{sanction}, estimate the statistical model, 
-with {\tt import} a function of {\tt coop} in the first equation and 
-{\tt export} a function of {\tt cost} and {\tt target} in the second equation:
-<<JointEstimation.list>>=
- fml2 <- list(mu1 = import ~ coop, 
-               mu2 = export ~ cost + target)
-@ 
-<<JointEstimation.zelig>>=
- z.out2 <- zelig(fml2, model = "bprobit", data = sanction)
- summary(z.out2)
-@ 
-Set the explanatory variables to their means:
-<<JointEstimation.setx>>=
- x.out2 <- setx(z.out2)
-@ 
-Simulate draws from the posterior distribution:
-<<JointEstimation.sim>>=
- s.out2 <- sim(z.out2, x = x.out2)
- summary(s.out2)
-@
-\begin{center}
-<<label=JointEstimationPlot,fig=true>>= 
- plot(s.out2)
-@ 
-\end{center}
-
-
-\item Joint Estimation of a Parametrically and Stochastically
-Dependent Model 
-\label{pdep.p}
-  
-Using the sample data \texttt{sanction}.     
-The bivariate model is parametrically dependent if $Y_1$ and $Y_2$ share
-some or all explanatory variables, {\it and} the effects of the shared
-explanatory variables are jointly estimated.  For example,
-<<JointEstimationParam.list>>= 
- fml3 <- list(mu1 = import ~ tag(coop,"coop") + tag(cost,"cost") + 
-                          tag(target,"target"), 
-               mu2 = export ~ tag(coop,"coop") + tag(cost,"cost") + 
-                          tag(target,"target"))
-@ 
-<<JointEstimationParam.zelig>>= 
- z.out3 <- zelig(fml3, model = "bprobit", data = sanction)
- summary(z.out3)
-@ 
-
-Note that this model only returns one parameter estimate for each of
-{\tt coop}, {\tt cost}, and {\tt target}.  Contrast this to
-Example~\ref{basic.bp} which returns two parameter estimates for each
-of the explanatory variables.  \newline \newline Set values for the
-explanatory variables:
-<<JointEstimationParam.setx>>= 
- x.out3 <- setx(z.out3, cost = 1:4)
-@ 
-Draw simulated expected values:  
-<<JointEstimationParam.sim>>= 
- s.out3 <- sim(z.out3, x = x.out3)
- summary(s.out3)
-@ 
-
-\end{enumerate}
-
-\subsubsection{Model}
-
-For each observation, define two binary dependent variables, $Y_1$ and
-$Y_2$, each of which take the value of either 0 or 1 (in the
-following, we suppress the observation index $i$).  We model the joint
-outcome $(Y_1$, $Y_2)$ using two marginal probabilities for each
-dependent variable, and the correlation parameter, which describes how
-the two dependent variables are related. 
-%Define $Y_{rs}$ such that it
-%is equal to 1 when $Y_1=r$ and $Y_2=s$ and is 0 otherwise where $r$
-%and $s$ take a value of either 0 or 1. Then, the model is defined as
-%follows,
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by two latent (unobserved)
-  continuous variables which follow the bivariate Normal distribution:
-\begin{eqnarray*}
-  \left ( \begin{array}{c} 
-      Y_1^* \\
-      Y_2^* 
-    \end{array}
-  \right ) &\sim &  
-  N_2 \left \{ \left ( 
-      \begin{array}{c}
-        \mu_1 \\ \mu_2
-      \end{array} \right ), \left( \begin{array}{cc}
-                 1 & \rho \\
-                 \rho & 1 
-                 \end{array} \right) \right\},
-\end{eqnarray*}
-where $\mu_j$ is a mean for $Y_j^*$ and $\rho$ is a scalar correlation
-parameter. The following observation mechanism links the observed
-dependent variables, $Y_j$, with these latent variables
-\begin{eqnarray*}
-Y_j & = & \left \{ \begin{array}{cc}
-                   1 & {\rm if} \; Y_j^* \ge 0, \\
-                   0 & {\rm otherwise.}
-                   \end{array} 
-                   \right.
-\end{eqnarray*}
-
-%Alternatively, the stochastic component for the observed dependent
-%variables can be written as
-%\begin{eqnarray*}
-%  Y_{11} &\sim& \textrm{Bernoulli}(y_{11} \mid \pi_{11}) \\
-%  Y_{10} &\sim& \textrm{Bernoulli}(y_{10} \mid \pi_{10}) \\
-% Y_{01} &\sim& \textrm{Bernoulli}(y_{01} \mid \pi_{01})
-%\end{eqnarray*}
-%where $\pi_{rs}=\Pr(Y_1=r, Y_2=s)$ is the joint probability, and
-%$\pi_{00}=1-\pi_{11}-\pi_{10}-\pi_{01}$. Each of these joint
-%probabilities is modeled using the bivariate normal cumulative
-%distribution function.
-
-\item The \emph{systemic components} for each observation are 
-  \begin{eqnarray*}
-    \mu_j & = & x_{j} \beta_j \quad {\rm for} \quad j=1,2, \\
-    \rho & = & \frac{\exp(x_3 \beta_3) - 1}{\exp(x_3 \beta_3) + 1}.
-\end{eqnarray*}
-
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-For $n$ simulations, expected values form an $n \times 4$
-matrix.  
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) for the binomial probit model
-  are the predicted joint probabilities. Simulations of $\beta_1$,
-  $\beta_2$, and $\beta_3$ (drawn form their sampling distributions)
-  are substituted into the systematic components, to find simulations
-  of the predicted joint probabilities $\pi_{rs}=\Pr(Y_1=r, Y_2=s)$:
-\begin{eqnarray*}
-\pi_{11} &= \Pr(Y_1^* \geq 0 , Y_2^* \geq 0) &= \int_0^{\infty}
-\int_0^{\infty} \phi_2 (\mu_1, \mu_2, \rho) \, dY_2^*\, dY_1^* \\
-\pi_{10} &= \Pr(Y_1^* \geq 0 , Y_2^* < 0)  &= \int_0^{\infty}
-\int_{-\infty}^{0} \phi_2 (\mu_1, \mu_2, \rho) \, dY_2^*\, dY_1^*\\
-\pi_{01} &= \Pr(Y_1^* < 0 , Y_2^* \geq 0)  &= \int_{-\infty}^{0}
-\int_0^{\infty} \phi_2 (\mu_1, \mu_2, \rho) \, dY_2^*\, dY_1^*\\
-\pi_{11} &= \Pr(Y_1^* < 0 , Y_2^* < 0)  &= \int_{-\infty}^{0}
-\int_{-\infty}^{0} \phi_2 (\mu_1, \mu_2, \rho) \, dY_2^*\, dY_1^*\\
-\end{eqnarray*}
-where $r$ and $s$ may take a value of either 0 or 1, $\phi_2$ is the
-bivariate Normal density.
-  
-\item The predicted values ({\tt qi\$pr}) are draws from the
-  multinomial distribution given the expected joint probabilities.  
-
-\item The first difference ({\tt qi\$fd}) in each of the predicted joint
-  probabilities are given by
-  $$\textrm{FD}_{rs} = \Pr(Y_1=r, Y_2=s \mid x_1)-\Pr(Y_1=r, Y_2=s
-  \mid x).$$
-  
-\item The risk ratio ({\tt qi\$rr}) for each of the predicted joint
-  probabilities are given by
-\begin{equation*}
-\textrm{RR}_{rs} = \frac{\Pr(Y_1=r, Y_2=s \mid x_1)}{\Pr(Y_1=r, Y_2=s \mid x)}.
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_{ij}(t_i=1) -
-      E[Y_{ij}(t_i=0)] \right\} \textrm{ for } j = 1,2,
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_{ij}(t_i=0)]$,
-    the counterfactual expected value of $Y_{ij}$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_{ij}(t_i=1) -
-      \widehat{Y_{ij}(t_i=0)}\right\} \textrm{ for } j = 1,2,
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating
-    $\widehat{Y_{ij}(t_i=0)}$, the counterfactual predicted value of
-    $Y_{ij}$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\, x,
-  model = "bprobit", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-obtain a default summary of information through
-\texttt{summary(z.out)}.  Other elements available through the {\tt
-  \$} operator are listed below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: the named vector of coefficients.   
-   \item {\tt fitted.values}: an $n \times 4$ matrix of the in-sample
-     fitted values.
-   \item {\tt predictors}: an $n \times 3$ matrix of the linear
-     predictors $x_j \beta_j$.
-   \item {\tt residuals}: an $n \times 3$ matrix of the residuals.  
-   \item {\tt df.residual}: the residual degrees of freedom.  
-   \item {\tt df.total}: the total degrees of freedom.
-   \item {\tt rss}: the residual sum of squares.  
-   \item {\tt y}: an $n \times 2$ matrix of the dependent variables.  
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract:
-\begin{itemize}
-  \item {\tt coef3}: a table of the coefficients with their associated
-    standard errors and $t$-statistics.
-  \item {\tt cov.unscaled}: the variance-covariance matrix. 
-  \item {\tt pearson.resid}: an $n \times 3$ matrix of the Pearson residuals.  
-\end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as arrays indexed by simulation
-  $\times$ quantity $\times$ {\tt x}-observation (for more than one
-  {\tt x}-observation; otherwise the quantities are matrices).  Available quantities
-  are:  
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values (joint predicted
-     probabilities) for the specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted outcomes drawn from a
-     distribution defined by the joint predicted probabilities.
-   \item {\tt qi\$fd}: the simulated first difference in the predicted
-     probabilities for the values specified in {\tt x} and {\tt x1}.
-   \item {\tt qi\$rr}: the simulated risk ratio in the predicted
-     probabilities for given {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection*{How to Cite}
-\input{cites/bprobit}
-\input{citeZelig}
-\subsection*{See also}
-The bivariate probit function is part of the VGAM package by Thomas Yee \citep{YeeHas03}. In addition, advanced users may wish to refer to \texttt{help(vglm)} 
-in the VGAM library.  Additional documentation is available at
-\url{http://www.stat.auckland.ac.nz/\~\,yee}{http://www.stat.auckland.ac.nz/~yee}.Sample data are from \cite{Martin92}
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
-
-
-
-
-
diff --git a/vignettes/citeZelig.tex b/vignettes/citeZelig.tex
deleted file mode 100644
index b6c5b25..0000000
--- a/vignettes/citeZelig.tex
+++ /dev/null
@@ -1,8 +0,0 @@
-To cite Zelig as a whole, please reference these two sources:
-\begin{verse}
-  Kosuke Imai, Gary King, and Olivia Lau. 2007. ``Zelig: Everyone's
-  Statistical Software,'' \url{http://GKing.harvard.edu/zelig}.
-\end{verse}
-\begin{verse}
-Imai, Kosuke, Gary King, and Olivia Lau. (2008). ``Toward A Common Framework for Statistical Analysis and Development.'' Journal of Computational and Graphical Statistics, Vol. 17, No. 4 (December), pp. 892-913. 
-\end{verse}
diff --git a/vignettes/cites/aov.tex b/vignettes/cites/aov.tex
deleted file mode 100644
index 89bd3d1..0000000
--- a/vignettes/cites/aov.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ aov } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "aov: Fit an Analysis of Variance Model" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/arima.tex b/vignettes/cites/arima.tex
deleted file mode 100644
index fd0aaf1..0000000
--- a/vignettes/cites/arima.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ arima } Zelig model:
- \begin{verse}
- Justin Grimmer. 2007. "arima: Arima models for Time Series Data" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/blogit.tex b/vignettes/cites/blogit.tex
deleted file mode 100644
index 1f2be47..0000000
--- a/vignettes/cites/blogit.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ blogit } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "blogit: Bivariate Logistic Regression for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/bprobit.tex b/vignettes/cites/bprobit.tex
deleted file mode 100644
index 80acef3..0000000
--- a/vignettes/cites/bprobit.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ bprobit } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "bprobit: Bivariate Probit Regression for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/chopit.tex b/vignettes/cites/chopit.tex
deleted file mode 100644
index 32113a5..0000000
--- a/vignettes/cites/chopit.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ chopit } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "chopit: Compound Hierarchical Ordinal Probit Regression for Survey Vignettes" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/cloglog.net.tex b/vignettes/cites/cloglog.net.tex
deleted file mode 100644
index f28993b..0000000
--- a/vignettes/cites/cloglog.net.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ cloglog.net } Zelig model:
- \begin{verse}
- Skyler J. Cranmer. 2007. "cloglog.net: Social Network Complementary Log Log Regression for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/coxph.tex b/vignettes/cites/coxph.tex
deleted file mode 100644
index 7ef85d6..0000000
--- a/vignettes/cites/coxph.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ coxph } Zelig model:
- \begin{verse}
- Patrick Lam. 2007. "coxph: Cox Proportional Hazard Regression for Duration Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/ei.RxC.tex b/vignettes/cites/ei.RxC.tex
deleted file mode 100644
index 81ae8f6..0000000
--- a/vignettes/cites/ei.RxC.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ ei.RxC } Zelig model:
- \begin{verse}
- Jason Wittenberg, Ferdinand Alimadhi, Badri Narayan Bhaskar, and Olivia Lau. 2007. "ei.RxC: Hierarchical Multinomial-Dirichlet Ecological Inference Model for R x C Tables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/ei.dynamic.tex b/vignettes/cites/ei.dynamic.tex
deleted file mode 100644
index 8c714ba..0000000
--- a/vignettes/cites/ei.dynamic.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ ei.dynamic } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "ei.dynamic: Quinn's Dynamic Ecological Inference Model" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/ei.hier.tex b/vignettes/cites/ei.hier.tex
deleted file mode 100644
index 6636fca..0000000
--- a/vignettes/cites/ei.hier.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ ei.hier } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "ei.hier: Hierarchical Ecological Inference Model for  2 x 2 Tables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/exp.tex b/vignettes/cites/exp.tex
deleted file mode 100644
index 5176c81..0000000
--- a/vignettes/cites/exp.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ exp } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "exp: Exponential Regression for Duration Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/factor.bayes.tex b/vignettes/cites/factor.bayes.tex
deleted file mode 100644
index 5a9c53a..0000000
--- a/vignettes/cites/factor.bayes.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ factor.bayes } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "factor.bayes: Bayesian Factor Analysis" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/factor.mix.tex b/vignettes/cites/factor.mix.tex
deleted file mode 100644
index d94e52b..0000000
--- a/vignettes/cites/factor.mix.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ factor.mix } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "factor.mix: Mixed Data Factor Analysis" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/factor.ord.tex b/vignettes/cites/factor.ord.tex
deleted file mode 100644
index a532823..0000000
--- a/vignettes/cites/factor.ord.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ factor.ord } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "factor.ord: Ordinal Data Factor Analysis" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/gamma.gee.tex b/vignettes/cites/gamma.gee.tex
deleted file mode 100644
index 9a55044..0000000
--- a/vignettes/cites/gamma.gee.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ gamma.gee } Zelig model:
- \begin{verse}
- Patrick Lam. 2007. "gamma.gee: General Estimating Equation for Gamma Regression" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/gamma.mixed.tex b/vignettes/cites/gamma.mixed.tex
deleted file mode 100644
index 5165130..0000000
--- a/vignettes/cites/gamma.mixed.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ gamma.mixed } Zelig model:
- \begin{verse}
- Delia Bailey and Ferdinand Alimadhi. 2007. "gamma.mixed: Mixed effects gamma model" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/gamma.net.tex b/vignettes/cites/gamma.net.tex
deleted file mode 100644
index bff4d89..0000000
--- a/vignettes/cites/gamma.net.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ gamma.net } Zelig model:
- \begin{verse}
- Skyler J. Cranmer. 2007. "gamma.net: Social Network Gamma Regression for Continuous, Positive Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/gamma.survey.tex b/vignettes/cites/gamma.survey.tex
deleted file mode 100644
index bc36d16..0000000
--- a/vignettes/cites/gamma.survey.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ gamma.survey } Zelig model:
- \begin{verse}
- Nicholas Carnes. 2008. "gamma.survey: Survey-Weighted Gamma Regression for Continuous, Positive Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/gamma.tex b/vignettes/cites/gamma.tex
deleted file mode 100644
index df4aee2..0000000
--- a/vignettes/cites/gamma.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ gamma } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "gamma: Gamma Regression for Continuous, Positive Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/irt1d.tex b/vignettes/cites/irt1d.tex
deleted file mode 100644
index 6c5fcf7..0000000
--- a/vignettes/cites/irt1d.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ irt1d } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "irt1d: One Dimensional Item Response Model" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/irtkd.tex b/vignettes/cites/irtkd.tex
deleted file mode 100644
index b41c2dd..0000000
--- a/vignettes/cites/irtkd.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ irtkd } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "irtkd: K-Dimensional Item Response Model" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/logit.bayes.tex b/vignettes/cites/logit.bayes.tex
deleted file mode 100644
index 473fb1c..0000000
--- a/vignettes/cites/logit.bayes.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ logit.bayes } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "logit.bayes: Bayesian Logistic Regression for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/logit.gam.tex b/vignettes/cites/logit.gam.tex
deleted file mode 100644
index 499b0a1..0000000
--- a/vignettes/cites/logit.gam.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ logit.gam } Zelig model:
- \begin{verse}
- Skyler J. Cranmer. 2007. "logit.gam: Generalized Additive Model for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/logit.gee.tex b/vignettes/cites/logit.gee.tex
deleted file mode 100644
index 8a8f782..0000000
--- a/vignettes/cites/logit.gee.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ logit.gee } Zelig model:
- \begin{verse}
- Patrick Lam. 2007. "logit.gee: General Estimating Equation for Logistic Regression" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/logit.mixed.tex b/vignettes/cites/logit.mixed.tex
deleted file mode 100644
index 67c5863..0000000
--- a/vignettes/cites/logit.mixed.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ logit.mixed } Zelig model:
- \begin{verse}
- Delia Bailey and Ferdinand Alimadhi. 2007. "logit.mixed: Mixed effects logistic model" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/logit.net.tex b/vignettes/cites/logit.net.tex
deleted file mode 100644
index 20fad34..0000000
--- a/vignettes/cites/logit.net.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ logit.net } Zelig model:
- \begin{verse}
- Skyler J. Cranmer. 2007. "logit.net: Social Network Logistic Regression for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/logit.survey.tex b/vignettes/cites/logit.survey.tex
deleted file mode 100644
index da8a5b6..0000000
--- a/vignettes/cites/logit.survey.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ logit.survey } Zelig model:
- \begin{verse}
- Nicholas Carnes. 2007. "logit.survey: Survey-Weighted Logistic Regression for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/logit.tex b/vignettes/cites/logit.tex
deleted file mode 100644
index 26c3ca4..0000000
--- a/vignettes/cites/logit.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ logit } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2008. "logit: Logistic Regression for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/lognorm.tex b/vignettes/cites/lognorm.tex
deleted file mode 100644
index 351293b..0000000
--- a/vignettes/cites/lognorm.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ lognorm } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "lognorm: Log-Normal Regression for Duration Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/ls.mixed.tex b/vignettes/cites/ls.mixed.tex
deleted file mode 100644
index 46ee739..0000000
--- a/vignettes/cites/ls.mixed.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ ls.mixed } Zelig model:
- \begin{verse}
- Delia Bailey and Ferdinand Alimadhi. 2007. "ls.mixed: Mixed effects linear model" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/ls.net.tex b/vignettes/cites/ls.net.tex
deleted file mode 100644
index 134e16f..0000000
--- a/vignettes/cites/ls.net.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ ls.net } Zelig model:
- \begin{verse}
- Skyler J. Cranmer. 2007. "ls.net: Social Network Least Squares Regression for Continuous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/ls.tex b/vignettes/cites/ls.tex
deleted file mode 100644
index 78c5617..0000000
--- a/vignettes/cites/ls.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ ls } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "ls: Least Squares Regression for Continuous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/mlogit.bayes.tex b/vignettes/cites/mlogit.bayes.tex
deleted file mode 100644
index 5cdba0b..0000000
--- a/vignettes/cites/mlogit.bayes.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ mlogit.bayes } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "mlogit.bayes: Bayesian Multinomial Logistic Regression for Dependent Variables with Unordered Categorical Values" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/mlogit.tex b/vignettes/cites/mlogit.tex
deleted file mode 100644
index 4c7b26c..0000000
--- a/vignettes/cites/mlogit.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ mlogit } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "mlogit: Multinomial Logistic Regression for Dependent Variables with Unordered Categorical Values" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/mprobit.tex b/vignettes/cites/mprobit.tex
deleted file mode 100644
index db34c34..0000000
--- a/vignettes/cites/mprobit.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ mprobit } Zelig model:
- \begin{verse}
- Kosuke Imai. 2009. "mprobit: Multinomial Probit Model via Markov Chain Monte Carlo Method" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/negbin.tex b/vignettes/cites/negbin.tex
deleted file mode 100644
index 5715f18..0000000
--- a/vignettes/cites/negbin.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ negbin } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "negbin: Negative Binomial Regression for Event Count Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/normal.bayes.tex b/vignettes/cites/normal.bayes.tex
deleted file mode 100644
index 9f832a4..0000000
--- a/vignettes/cites/normal.bayes.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ normal.bayes } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "normal.bayes: Bayesian Normal Linear Regression" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/normal.gam.tex b/vignettes/cites/normal.gam.tex
deleted file mode 100644
index 08cfcb5..0000000
--- a/vignettes/cites/normal.gam.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ normal.gam } Zelig model:
- \begin{verse}
- Skyler J. Cranmer. 2007. "normal.gam: Generalized Additive Model for Continuous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/normal.gee.tex b/vignettes/cites/normal.gee.tex
deleted file mode 100644
index 055c3b4..0000000
--- a/vignettes/cites/normal.gee.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ normal.gee } Zelig model:
- \begin{verse}
- Patrick Lam. 2007. "normal.gee: General Estimating Equation for Normal Regression" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/normal.net.tex b/vignettes/cites/normal.net.tex
deleted file mode 100644
index 63f2255..0000000
--- a/vignettes/cites/normal.net.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ normal.net } Zelig model:
- \begin{verse}
- Skyler J. Cranmer. 2007. "normal.net: Social Network Normal Regression for Continuous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/normal.survey.tex b/vignettes/cites/normal.survey.tex
deleted file mode 100644
index aa801c9..0000000
--- a/vignettes/cites/normal.survey.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ normal.survey } Zelig model:
- \begin{verse}
- Nicholas Carnes. 2008. "normal.survey: Survey-Weighted Normal Regression for Continuous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/normal.tex b/vignettes/cites/normal.tex
deleted file mode 100644
index d9f5b5b..0000000
--- a/vignettes/cites/normal.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ normal } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "normal: Normal Regression for Continuous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/ologit.tex b/vignettes/cites/ologit.tex
deleted file mode 100644
index 8ccc337..0000000
--- a/vignettes/cites/ologit.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ ologit } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "ologit: Ordinal Logistic Regression for Ordered Categorical Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/oprobit.bayes.tex b/vignettes/cites/oprobit.bayes.tex
deleted file mode 100644
index 92f9694..0000000
--- a/vignettes/cites/oprobit.bayes.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ oprobit.bayes } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "oprobit.bayes: Bayesian Ordered Probit Regression" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/oprobit.tex b/vignettes/cites/oprobit.tex
deleted file mode 100644
index 904b29c..0000000
--- a/vignettes/cites/oprobit.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ oprobit } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "oprobit: Ordinal Probit Regression for Ordered Categorical Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/poisson.bayes.tex b/vignettes/cites/poisson.bayes.tex
deleted file mode 100644
index ff30ae1..0000000
--- a/vignettes/cites/poisson.bayes.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ poisson.bayes } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "poisson.bayes: Bayesian Poisson Regression" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/poisson.gam.tex b/vignettes/cites/poisson.gam.tex
deleted file mode 100644
index 6580d35..0000000
--- a/vignettes/cites/poisson.gam.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ poisson.gam } Zelig model:
- \begin{verse}
- Skyler J. Cranmer. 2007. "poisson.gam: Generalized Additive Model for Event Count Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/poisson.gee.tex b/vignettes/cites/poisson.gee.tex
deleted file mode 100644
index 92dcba6..0000000
--- a/vignettes/cites/poisson.gee.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ poisson.gee } Zelig model:
- \begin{verse}
- Patrick Lam. 2007. "poisson.gee: General Estimating Equation for Poisson Regression" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/poisson.mixed.tex b/vignettes/cites/poisson.mixed.tex
deleted file mode 100644
index 8fc4c45..0000000
--- a/vignettes/cites/poisson.mixed.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ poisson.mixed } Zelig model:
- \begin{verse}
- Delia Bailey and Ferdinand Alimadhi. 2007. "poisson.mixed: Mixed effects poisson model" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/poisson.net.tex b/vignettes/cites/poisson.net.tex
deleted file mode 100644
index cc49d4f..0000000
--- a/vignettes/cites/poisson.net.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ poisson.net } Zelig model:
- \begin{verse}
- Skyler J. Cranmer. 2007. "poisson.net: Social Network Poisson Regression for Event Count Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/poisson.survey.tex b/vignettes/cites/poisson.survey.tex
deleted file mode 100644
index 4738727..0000000
--- a/vignettes/cites/poisson.survey.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ poisson.survey } Zelig model:
- \begin{verse}
- Nicholas Carnes. 2008. "poisson.survey: Survey-Weighted Poisson Regression for Event Count Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/poisson.tex b/vignettes/cites/poisson.tex
deleted file mode 100644
index a2e1963..0000000
--- a/vignettes/cites/poisson.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ poisson } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "poisson: Poisson Regression for Event Count Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/probit.bayes.tex b/vignettes/cites/probit.bayes.tex
deleted file mode 100644
index e489b5e..0000000
--- a/vignettes/cites/probit.bayes.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ probit.bayes } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "probit.bayes: Bayesian Probit Regression for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/probit.gam.tex b/vignettes/cites/probit.gam.tex
deleted file mode 100644
index 94c7da4..0000000
--- a/vignettes/cites/probit.gam.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ probit.gam } Zelig model:
- \begin{verse}
- Skyler J. Cranmer. 2007. "probit.gam: Generalized Additive Model for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/probit.gee.tex b/vignettes/cites/probit.gee.tex
deleted file mode 100644
index eb13c90..0000000
--- a/vignettes/cites/probit.gee.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ probit.gee } Zelig model:
- \begin{verse}
- Patrick Lam. 2007. "probit.gee: General Estimating Equation for Probit Regression" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/probit.mixed.tex b/vignettes/cites/probit.mixed.tex
deleted file mode 100644
index acdc04e..0000000
--- a/vignettes/cites/probit.mixed.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ probit.mixed } Zelig model:
- \begin{verse}
- Delia Bailey and Ferdinand Alimadhi. 2007. "probit.mixed: Mixed effects probit model" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/probit.net.tex b/vignettes/cites/probit.net.tex
deleted file mode 100644
index 6bf8d72..0000000
--- a/vignettes/cites/probit.net.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ probit.net } Zelig model:
- \begin{verse}
- Skyler J. Cranmer. 2007. "probit.net: Social Network Probit Regression for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/probit.survey.tex b/vignettes/cites/probit.survey.tex
deleted file mode 100644
index d1def36..0000000
--- a/vignettes/cites/probit.survey.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ probit.survey } Zelig model:
- \begin{verse}
- Nicholas Carnes. 2008. "probit.survey: Survey-Weighted Probit Regression for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/probit.tex b/vignettes/cites/probit.tex
deleted file mode 100644
index 704caee..0000000
--- a/vignettes/cites/probit.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ probit } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "probit: Probit Regression for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/quantile.tex b/vignettes/cites/quantile.tex
deleted file mode 100644
index 2b7b4f0..0000000
--- a/vignettes/cites/quantile.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ quantile } Zelig model:
- \begin{verse}
- Alexander D'Amour. 2008. "quantile: Quantile Regression for Continuous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/relogit.tex b/vignettes/cites/relogit.tex
deleted file mode 100644
index 68db00a..0000000
--- a/vignettes/cites/relogit.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ relogit } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "relogit: Rare Events Logistic Regression for Dichotomous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/rq.tex b/vignettes/cites/rq.tex
deleted file mode 100644
index 29ae716..0000000
--- a/vignettes/cites/rq.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ rq } Zelig model:
- \begin{verse}
- Alexander D'Amour. 2008. "rq: Quantile Regression for Continuous Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/sur.tex b/vignettes/cites/sur.tex
deleted file mode 100644
index 55a6694..0000000
--- a/vignettes/cites/sur.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ sur } Zelig model:
- \begin{verse}
- Ferdinand Alimadhi, Ying Lu, and Elena Villalon. 2007. "sur: Seemingly Unrelated Regression" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/threesls.tex b/vignettes/cites/threesls.tex
deleted file mode 100644
index 4278b0d..0000000
--- a/vignettes/cites/threesls.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ threesls } Zelig model:
- \begin{verse}
- Ferdinand Alimadhi, Ying Lu, and Elena Villalon. 2007. "threesls: Three Stage Least Squares" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/tobit.bayes.tex b/vignettes/cites/tobit.bayes.tex
deleted file mode 100644
index acb5bd3..0000000
--- a/vignettes/cites/tobit.bayes.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ tobit.bayes } Zelig model:
- \begin{verse}
- Ben Goodrich and Ying Lu. 2007. "tobit.bayes: Bayesian Linear Regression for a Censored Dependent Variable" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/tobit.tex b/vignettes/cites/tobit.tex
deleted file mode 100644
index 71448ea..0000000
--- a/vignettes/cites/tobit.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ tobit } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "tobit: Linear regression for Left-Censored Dependent Variable" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/twosls.tex b/vignettes/cites/twosls.tex
deleted file mode 100644
index 923c40b..0000000
--- a/vignettes/cites/twosls.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ twosls } Zelig model:
- \begin{verse}
- Ferdinand Alimadhi, Ying Lu, and Elena Villalon. 2007. "twosls: Two Stage Least Squares" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/cites/weibull.tex b/vignettes/cites/weibull.tex
deleted file mode 100644
index 39ae932..0000000
--- a/vignettes/cites/weibull.tex
+++ /dev/null
@@ -1,4 +0,0 @@
-To cite the \emph{ weibull } Zelig model:
- \begin{verse}
- Kosuke Imai, Gary King, and Oliva Lau. 2007. "weibull: Weibull Regression for Duration Dependent Variables" in Kosuke Imai, Gary King, and Olivia Lau, "Zelig: Everyone's Statistical Software,"\url{http://gking.harvard.edu/zelig} 
-\end{verse}
\ No newline at end of file
diff --git a/vignettes/gamma.Rnw b/vignettes/gamma.Rnw
deleted file mode 100644
index 0d59b08..0000000
--- a/vignettes/gamma.Rnw
+++ /dev/null
@@ -1,252 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=gamma}
-\include{zinput}
-%\VignetteIndexEntry{Gamma Regression for Continuous, Positive Dependent Variables}
-%\VignetteDepends{Zelig, MCMCpack}
-%\VignetteKeyWords{model,regression,gamma distribution}
-%\VignettePackage{Zelig, stats}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@ 
-
-\section{{\tt gamma}: Gamma Regression for Continuous, Positive Dependent Variables}\label{gamma}
-
-Use the gamma regression model if you have a positive-valued dependent
-variable such as the number of years a parliamentary cabinet endures,
-or the seconds you can stay airborne while jumping.  The gamma
-distribution assumes that all waiting times are complete by the end
-of the study (censoring is not allowed).
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "gamma", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out, x1 = NULL)
-\end{verbatim}
-
-\subsubsection{Additional Inputs} 
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for gamma regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE} is
-selected, {\tt zelig()} computes robust standard errors via the {\tt
-sandwich} package (see \cite{Zeileis04}).  The default type of robust
-standard error is heteroskedastic and autocorrelation consistent (HAC),
-and assumes that observations are ordered by time index.
-
-In addition, {\tt robust} may be a list with the following options:  
-\begin{itemize}
-\item {\tt method}:  Choose from 
-\begin{itemize}
-\item {\tt "vcovHAC"}: (default if {\tt robust = TRUE}) HAC standard
-errors. 
-\item {\tt "kernHAC"}: HAC standard errors using the
-weights given in \cite{Andrews91}. 
-\item {\tt "weave"}: HAC standard errors using the
-weights given in \cite{LumHea99}.  
-\end{itemize}  
-\item {\tt order.by}: defaults to {\tt NULL} (the observations are
-chronologically ordered as in the original data).  Optionally, you may
-specify a vector of weights (either as {\tt order.by = z}, where {\tt
-z} exists outside the data frame; or as {\tt order.by = \~{}z}, where
-{\tt z} is a variable in the data frame).  The observations are
-chronologically ordered by the size of {\tt z}.
-\item {\tt \dots}:  additional options passed to the functions 
-specified in {\tt method}.   See the {\tt sandwich} library and
-\cite{Zeileis04} for more options.   
-\end{itemize}
-\end{itemize}
-
-\subsubsection{Example}
-
-Attach the sample data: 
-<<Example.data>>=
- data(coalition)
-@ 
-Estimate the model: 
-<<Example.zelig>>=
- z.out <- zelig(duration ~ fract + numst2, model = "gamma", data = coalition)
-@ 
-View the regression output:  
-<<Example.summary>>=
- summary(z.out)
-@ 
-Set the baseline values (with the ruling coalition in the minority)
-and the alternative values (with the ruling coalition in the majority)
-for X:
-<<Example.setx>>=
- x.low <- setx(z.out, numst2 = 0)
- x.high <- setx(z.out, numst2 = 1)
-@ 
-Simulate expected values ({\tt qi\$ev}) and first differences ({\tt qi\$fd}):
-<<Example.sim>>=
- s.out <- sim(z.out, x = x.low, x1 = x.high)
-@ 
-<<Example.summary>>=
-summary(s.out)
-@ 
-\begin{center}
-<<label=ExamplePlot,fig=true,echo=true>>=
- plot(s.out)
-@ 
-\end{center}
-
-\subsubsection{Model}
-
-\begin{itemize}
-\item The Gamma distribution with scale parameter $\alpha$ has a
-\emph{stochastic component}:
-\begin{eqnarray*}
-Y &\sim& \textrm{Gamma}(y_i \mid \lambda_i, \alpha) \\
-f(y)  &=& \frac{1}{\alpha^{\lambda_i} \, \Gamma \lambda_i} \, y_i^{\lambda_i
-  - 1} \exp -\left\{ \frac{y_i}{\alpha} \right\}
-\end{eqnarray*}
-for $\alpha, \lambda_i, y_i > 0$.  \\
-
-\item The \emph{systematic component} is given by
-\begin{equation*}
-  \lambda_i = \frac{1}{x_i \beta}
-\end{equation*}
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) are simulations of the mean
-  of the stochastic component given draws of $\alpha$ and
-  $\beta$ from their posteriors:  $$E(Y) = \alpha \lambda_i.$$  
-\item The predicted values ({\tt qi\$pr}) are draws from the gamma
-  distribution for each given set of parameters $(\alpha, \lambda_i)$.
-\item If {\tt x1} is specified, {\tt sim()} also returns the
-  differences in the expected values ({\tt qi\$fd}), $$E(Y \mid x_1) -
-  E(Y \mid x)$$.
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.  
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "gamma", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: the vector of fitted values.
-   \item {\tt linear.predictors}: the vector of $x_{i}\beta$.
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values for the specified
-     values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from a
-     distribution defined by $(\alpha, \lambda_i)$.
-   \item {\tt qi\$fd}: the simulated first difference in the expected
-     values for the specified values in {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-
-\subsection* {How to Cite} 
-
-\input{cites/gamma}
-\input{citeZelig}
-
-
-\subsection* {See also}
-The gamma model is part of the stats package by \citet{VenRip02}.
-Advanced users may wish to refer to \texttt{help(glm)} and
-\texttt{help(family)}, as well as \cite{McCNel89}. Robust standard
-errors are implemented via the sandwich package by \citet{Zeileis04}.
-Sample data are from \cite{KinTomWit00}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
-
-
-
-
-
-
-
-
-
diff --git a/vignettes/gamma.mixed.Rnw b/vignettes/gamma.mixed.Rnw
deleted file mode 100644
index 273b5c6..0000000
--- a/vignettes/gamma.mixed.Rnw
+++ /dev/null
@@ -1,181 +0,0 @@
-\SweaveOpts{eval=false, results=hide, prefix.string=gammamixed}
-\include{zinput}
-%\VignetteIndexEntry{Gamma mixed effects linear regression}
-%\VignetteDepends{Zelig}
-%\VignetteKeyWords{mixed,linear, linear regression, gamma}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt gamma.mixed}: Mixed effects gamma regression}
-\label{gamma.mixed}
-
-Use generalized multi-level linear regression if you have covariates that are grouped according to one or more classification factors. Gamma regression models a continuous, positive dependent variable.
-
-While generally called multi-level models in the social sciences, this class of models is often referred to as mixed-effects models in the statistics literature and as hierarchical models in a Bayesian setting. This general class of models consists of linear models that are expressed as a function of both \emph{fixed effects}, parameters corresponding to an entire population or certain repeatable levels of experimental factors, and \emph{random effects}, parameters corresponding to indiv [...]
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-z.out <- zelig(formula= y ~ x1 + x2 + tag(z1 + z2 | g),
-               data=mydata, model="gamma.mixed")
-
-z.out <- zelig(formula= list(mu=y ~ xl + x2 + tag(z1, delta | g),
-               delta= ~ tag(w1 + w2 | g)), data=mydata, model="gamma.mixed")
-\end{verbatim}
-
-\subsubsection{Inputs}
-
-\noindent {\tt zelig()} takes the following arguments for {\tt mixed}:
-\begin{itemize}
-\item {\tt formula:} a two-sided linear formula object describing the systematic component of the model, with the response on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1 + ... + zn | g)} with {\tt z1 + ... + zn} specifying the model for the random effects and {\tt g} the grouping structure. Random intercept terms are included with the notation {\tt ta [...]
-Alternatively, {\tt formula} may be a list where the first entry, {\tt mu}, is a two-sided linear formula object describing the systematic component of the model, with the repsonse on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1, delta | g)} with {\tt z1} specifying the individual level model for the random effects, {\tt g} the grouping structure and { [...]
-\end{itemize}
-
-\subsubsection{Additional Inputs}
-
-In addition, {\tt zelig()} accepts the following additional arguments for model specification:
-
-\begin{itemize}
-\item {\tt data:} An optional data frame containing the variables named in {\tt formula}. By default, the variables are taken from the environment from which {\tt zelig()} is called.
-\item {\tt method:} a character string. The criterion is always the log-likelihood but this criterion does not have a closed form expression and must be approximated. The default approximation is {\tt "PQL"} or penalized quasi-likelihood. Alternatives are {\tt "Laplace"} or {\tt "AGQ"} indicating the Laplacian and adaptive Gaussian quadrature approximations respectively.
-\item {\tt na.action:} A function that indicates what should happen when the data contain {\tt NAs}. The default action ({\tt na.fail}) causes {\tt zelig()} to print an error message and terminate if there are any incomplete observations.
-\end{itemize}
-Additionally, users may with to refer to {\tt lmer} in the package {\tt lme4} for more information, including control parameters for the estimation algorithm and their defaults.
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-\item Basic Example with First Differences \\
-\\
-Attach sample data: \\
-<<Examples.data>>=
-data(coalition2)
-@
-
-Estimate model using optional arguments to specify approximation method for the log-likelihood, and the log link function for the Gamma family:
-<<Examples.zelig>>=
-z.out1 <- zelig(duration ~ invest + fract + polar + numst2 + crisis + tag(1 | country), data=coalition2, model="gamma.mixed", method="PQL",family=Gamma(link=log))
-@
-
-\noindent Summarize regression coefficients and estimated variance of random effects:\\
-<<Examples.summary>>=
-summary(z.out1)
-@
-
-Set the baseline values (with the ruling coalition in the minority) and the alternative values (with the ruling coalition in the majority) for X:\\
-<<Examples.setx>>=
-x.high <- setx(z.out1, numst2 = 1)
-x.low <- setx(z.out1, numst2 = 0)
-@
-
-Simulate expected values ({\tt qi\$ev}) and first differences({\tt qi\$fd}): \\
-<<Examples.sim>>=
-s.out1 <- sim(z.out1, x=x.high, x1=x.low)
-summary(s.out1)
-@
-
-\end{enumerate}
-
-\subsubsection{Mixed effects gamma regression Model}
-
-Let $Y_{ij}$ be the continuous, positive dependent variable, realized for observation $j$ in group $i$ as $y_{ij}$, for $i = 1, \ldots, M$, $j = 1, \ldots, n_i$.
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by a Gamma model with scale parameter $\alpha$.
-\begin{equation*}
-Y_{ij} \sim \mathrm{Gamma}(y_{ij} | \lambda_{ij}, \alpha)
-\end{equation*}
-where
-\begin{equation*}
-Gamma(y_{ij} | \lambda_{ij}, \alpha) = \frac{1}{\alpha^{\lambda_{ij}} \Gamma \lambda_{ij}} y_{ij}^{\lambda_{ij} - 1} \exp (- \{ \frac{y_{ij}}{\alpha} \})
-\end{equation*}
-for $\alpha, \; \lambda_{ij}, \; y_{ij} \; > 0$.
-\item The $q$-dimensional vector of \emph{random effects}, $b_i$, is restricted to be mean zero, and therefore is completely characterized by the variance covarance matrix $\Psi$, a $(q \times q)$ symmetric positive semi-definite matrix.
-\begin{equation*}
-b_i \sim Normal(0, \Psi)
-\end{equation*}
-\item The \emph{systematic component} is
-\begin{equation*}
-\lambda_{ij} \equiv \frac{1}{X_{ij} \beta + Z_{ij} b_i}
-\end{equation*}
-where $X_{ij}$ is the $(n_i \times p \times M)$ array of known fixed effects explanatory variables, $\beta$ is the $p$-dimensional vector of fixed effects coefficients, $Z_{ij}$ is the $(n_i \times q \times M)$ array of known random effects explanatory variables and $b_i$ is the $q$-dimensional vector of random effects.
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The predicted values ({\tt qi\$pr}) are draws from the gamma distribution for each given set of parameters $(\alpha, \lambda_{ij})$, for
-\begin{equation*}
-\lambda_{ij} = \frac{1}{X_{ij} \beta + Z_{ij} b_i}
-\end{equation*}
-given $X_{ij}$ and $Z_{ij}$ and simulations of of $\beta$ and $b_i$ from their posterior distributions. The estimated variance covariance matrices are taken as correct and are themselves not simulated.
-
-\item The expected values ({\tt qi\$ev}) are simulations of the mean of the stochastic component given draws of $\alpha$, $\beta$ from their posteriors:
-\begin{equation*}
-E(Y_{ij} | X_{ij}) = \alpha \lambda_{ij} = \frac{\alpha}{X_{ij} \beta}.
-\end{equation*}
-
-\item The first difference ({\tt qi\$fd}) is given by the difference in expected values, conditional on $X_{ij}$ and $X_{ij}^\prime$, representing different values of the explanatory variables.
-\begin{equation*}
-FD(Y_{ij} | X_{ij}, X_{ij}^\prime) = E(Y_{ij} | X_{ij}) - E(Y_{ij} | X_{ij}^\prime)
-\end{equation*}
-
-\item In conditional prediction models, the average predicted treatment effect ({\tt qi\$att.pr}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - \widehat{Y_{ij}(t_{ij} = 0)} \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $Y_{ij}(t_{ij} = 0)$, the counterfactual predicted value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\item In conditional prediction models, the average expected treatment effect ({\tt qi\$att.ev}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - E[Y_{ij}(t_{ij} = 0)] \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $E[Y_{ij}(t_{ij} = 0)]$, the counterfactual expected value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-The output of each Zelig command contains useful information which you may view. You may examine the available information in {\tt z.out} by using {\tt slotNames(z.out)}, see the fixed effect coefficients by using {\tt summary(z.out)@coefs}, and a default summary of information through {\tt summary(z.out)}. Other elements available through the {\tt \@} operator are listed below.
-\begin{itemize}
-\item From the {\tt zelig()} output stored in {\tt summary(z.out)}, you may extract:
-\begin{itemize}
-\item[--] {\tt fixef}: numeric vector containing the conditional estimates of the fixed effects.
-\item[--] {\tt ranef}: numeric vector containing the conditional modes of the random effects.
-\item[--] {\tt frame}: the model frame for the model.
-\end{itemize}
-\item From the {\tt sim()} output stored in {\tt s.out}, you may extract quantities of interest stored in a data frame:
-\begin{itemize}
-\item {\tt qi\$pr}: the simulated predicted values drawn from the distributions defined by the expected values.
-\item {\tt qi\$ev}: the simulated expected values for the specified values of x.
-\item {\tt qi\$fd}: the simulated first differences in the expected values for the values specified in x and x1.
-\item {\tt qi\$ate.pr}: the simulated average predicted treatment effect for the treated from conditional prediction models.
-\item {\tt qi\$ate.ev}: the simulated average expected treatment effect for the treated from conditional prediction models.
-\end{itemize}
-\end{itemize}
-
-
-\subsection* {How to Cite}
-
-\input{cites/gamma.mixed}
-\input{citeZelig}
-
-\subsection* {See also}
-Mixed effects gamma regression is part of {\tt lme4} package by Douglas M. Bates \citep{Bates07}. For a detailed discussion of mixed-effects models, please see \cite{JosBat00}
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after <- search()
- torm <- setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
-\end{document}
diff --git a/vignettes/gamma.survey.Rnw b/vignettes/gamma.survey.Rnw
deleted file mode 100644
index 8a46ef7..0000000
--- a/vignettes/gamma.survey.Rnw
+++ /dev/null
@@ -1,501 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=gammasurvey}
-\include{zinput}
-%\VignetteIndexEntry{Survey-Weighted Gamma Regression for Continuous, Positive Dependent Variables}
-%\VignetteDepends{Zelig, stats, survey}
-%\VignetteKeyWords{model,gamma ,continuous, regression, survey}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography* 
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>= 
-library(Zelig)
-library(survey) 
-@
-\section{{\tt gamma.survey}: Survey-Weighted Gamma Regression for Continuous, Positive Dependent Variables}
-\label{gamma.survey}
-
-The survey-weighted Gamma regression model is appropriate for 
-survey data obtained using complex sampling techniques, such as 
-stratified random or cluster sampling (e.g., not simple random 
-sampling).  Like the conventional Gamma regression models (see 
-\Sref{gamma}), survey-weighted Gamma regression specifies a 
-continuous, positive dependent variable as function of a set of explanatory 
-variables.  The survey-weighted Gamma model reports estimates of 
-model parameters identical to conventional Gamma estimates, but uses 
-information from the survey design to correct variance estimates.
-
-The {\tt gamma.survey} model accommodates three common types of 
-complex survey data.  Each method listed here requires selecting 
-specific options which are detailed in the ``Additional Inputs'' 
-section below.  \begin{enumerate}
-
-\item \textbf{Survey weights}:  Survey data are often published along
-with weights for each observation.  For example, if a survey
-intentionally over-samples a particular type of case, weights can be
-used to correct for the over-representation of that type of case in
-the dataset. Survey weights come in two forms:
-\begin{enumerate}
-
-\item \textit{Probability} weights report the probability that each
-case is drawn from the population.  For each stratum or cluster, 
-this is computed as the number of observations in the sample drawn 
-from that group divided by the number of observations in the 
-population in the group.
-
-\item \textit{Sampling} weights are the inverse of the probability
-weights.   
-
-\end{enumerate}
-
-\item \textbf{Strata/cluster identification}:  A complex survey 
-dataset may include variables that identify the strata or cluster 
-from which observations are drawn.  For stratified random sampling 
-designs, observations may be nested in different strata.  There are 
-two ways to employ these identifiers:
-
-\begin{enumerate}
-
-\item Use \textit{finite population corrections} to specify the
-total number of cases in the stratum or cluster from which each
-observation was drawn.
-
-\item For stratified random sampling designs, use the raw strata ids
-to compute sampling weights from the data.
-
-\end{enumerate}
-
-\item \textbf{Replication weights}: To preserve the anonymity of
-survey participants, some surveys exclude strata and cluster ids 
-from the public data and instead release only pre-computed replicate 
-weights.
-
-\end{enumerate}
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "gamma.survey", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-
-\subsubsection{Additional Inputs}
-
-In addition to the standard {\tt zelig} inputs (see
-\Sref{Zcommands}), survey-weighted Gamma models accept the following
-optional inputs:
-\begin{enumerate}
-
-\item Datasets that include survey weights:. 
-
-\begin{itemize}  
-
-\item {\tt probs}: An optional formula or numerical vector specifying each 
-case's probability weight, the probability that the case was 
-selected.  Probability weights need not (and, in most cases, will 
-not) sum to one.  Cases with lower probability weights are weighted 
-more heavily in the computation of model coefficients.
-
-\item {\tt weights}: An optional numerical vector specifying each 
-case's sample weight, the inverse of the probability that the case 
-was selected.  Sampling weights need not (and, in most cases, will 
-not) sum to one.  Cases with higher sampling weights are weighted 
-more heavily in the computation of model coefficients.
-
-\end{itemize}
-
-\item Datasets that include strata/cluster identifiers:
-
-\begin{itemize} 
-
-\item {\tt ids}: An optional formula or numerical vector identifying the 
-cluster from which each observation was drawn (ordered from largest level to smallest level).  
-For survey designs  that do not involve cluster sampling, {\tt ids} defaults to {\tt NULL}.
-
-\item {\tt fpc}: An optional numerical vector identifying each 
-case's frequency weight, the total number of units in the population 
-from which each observation was sampled. 
-
-\item {\tt strata}: An optional formula or vector identifying the 
-stratum from which each observation was sampled.  Entries may be 
-numerical, logical, or strings.  For survey designs that do not 
-involve cluster sampling, {\tt strata} defaults to {\tt NULL}. 
-
-\item {\tt nest}: An optional logical value specifying whether 
-primary sampling unites (PSUs) have non-unique ids across multiple 
-strata.  {\tt nest=TRUE} is appropriate when PSUs reuse the same 
-identifiers across strata.  Otherwise, {\tt nest} defaults to {\tt 
-FALSE}. 
-
-\item {\tt check.strata}: An optional input specifying whether to 
-check that clusters are nested in strata.  If {\tt check.strata} is 
-left at its default, {\tt !nest}, the check is not performed.  If 
-{\tt check.strata} is specified as {\tt TRUE}, the check is carried 
-out.  
-
-\end{itemize}
-
-\item Datasets that include replication weights:
-\begin{itemize}
-  \item {\tt repweights}: A formula or matrix specifying
-    replication weights, numerical vectors of weights used
-    in a process in which the sample is repeatedly re-weighted and parameters
-    are re-estimated in order to compute the variance of the model parameters.
-  \item {\tt type}: A string specifying the type of replication weights being used.
-    This input is required if replicate weights are specified.  The following types
-    of replication weights are recognized: {\tt"BRR"}, {\tt "Fay"},
-    {\tt "JK1"}, {\tt "JKn"}, {\tt "bootstrap"}, or {\tt "other"}.
-  \item {\tt weights}: An optional vector or formula specifying each case's sample weight,
-    the inverse of the probability that the case was selected.  If a survey includes both sampling 
-    weights and replicate weights separately for the same survey, both should be included in 
-    the model specification.  In these cases, sampling weights are used to correct potential biases 
-    in in the computation of coefficients and replication weights are used to compute the variance 
-    of coefficient estimates.  
-  \item {\tt combined.weights}: An optional logical value that 
-    should be specified as {\tt TRUE} if the replicate weights include the sampling weights.  Otherwise, 
-    {\tt combined.weights} defaults to {\tt FALSE}.  
-  \item {\tt rho}:  An optional numerical value specifying a shrinkage factor
-    for replicate weights of type {\tt "Fay"}.
-  \item {\tt bootstrap.average}: An optional numerical input specifying
-    the number of iterations over which replicate weights of type {\tt "bootstrap"} were averaged. 
-    This input should be left as {\tt NULL} for {\tt "bootstrap"} weights that were
-    not were created by averaging.
-\item {\tt scale}:  When replicate weights are included,
-    the variance is computed as the sum of squared deviations of the replicates from their mean.
-    {\tt scale} is an optional overall multiplier for the standard deviations.
-\item {\tt rscale}: Like {\tt scale}, {\tt rscale} specifies an 
-optional vector of replicate-specific multipliers for the squared 
-deviations used in variance computation. 
-
-\item {\tt fpc}: For models in which {\tt "JK1"}, {\tt "JKn"}, or 
-{\tt "other"} replicates are specified, {\tt fpc} is an optional 
-numerical vector (with one entry for each replicate) designating the 
-replicates' finite population corrections.   
-
-\item {\tt fpctype}: When a finite population correction is included 
-as an {\tt fpc} input, {\tt fpctype} is a required input specifying 
-whether the input to {\tt fpc} is a sampling fraction ({\tt 
-fpctype="fraction"}) or a direct correction ({\tt 
-fpctype="correction"}).  
-
-\item {\tt return.replicates}: An optional logical value    
-specifying whether the replicates should be returned as a component 
-of the output.  {\tt return.replicates} defaults to {\tt FALSE}.  
-
-\end{itemize}
-
-\end{enumerate}
-
-\subsubsection{Examples}
-
-\begin{enumerate} 
-
-\item A dataset that includes survey weights:
-
-Attach the sample data: 
-<<Existing.data>>= 
-data(api, package="survey") 
-@ 
-
-Suppose that a dataset included a positive and continuous measure of
-public schools' performance ({\tt api00}), a measure of 
-the percentage of students at each school who receive subsidized 
-meals ({\tt meals}), an indicator for whether each school
-holds classes year round ({\tt year.rnd}), and sampling 
-weights computed by the survey house ({\tt pw}).  Estimate a model
-that regresses school performance on the {\tt meals} and {\tt year.rnd}
-variables:
-<<Existing.zelig>>= 
-z.out1 <- zelig(api00 ~ meals + yr.rnd, model = "gamma.survey",  
-weights=~pw, data = apistrat)
-@ 
-Summarize regression coefficients:
-<<Existing.summary>>=
- summary(z.out1)
-@ 
-Set explanatory variables to their default (mean/mode) values, and
-set a high (80th percentile) and low (20th percentile) value for
-``meals'': 
-<<Existing.setx>>= 
-x.low <- setx(z.out1, meals=quantile(apistrat$meals, 0.2)) 
-x.high <- setx(z.out1, meals=quantile(apistrat$meals, 0.8)) 
-@ 
-Generate first differences for the
-effect of high versus low concentrations of children receiving
-subsidized meals on the probability of holding school year-round: 
-<<Existing.sim>>=
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-@ 
-<<Existing.summary.sim>>=
- summary(s.out1)
-@ 
-Generate a visual summary of the quantities of interest:
-\begin{center}
-<<label=ExistingPlot,fig=true,echo=true>>=
- plot(s.out1)
-@
-\end{center}
-
-\item  A dataset that includes strata/cluster identifiers:
-
-Suppose that the survey house that provided the dataset used in the
-previous example excluded sampling weights but made other details
-about the survey design available.  A user can still estimate a model
-without sampling weights that instead uses inputs that identify the
-stratum and/or cluster to which each observation belongs and the
-size of the finite population from which each observation was drawn.
-
-Continuing the example above, suppose the survey house drew at
-random a fixed number of elementary schools, a fixed number of
-middle schools, and a fixed number of high schools. If the variable
-{\tt stype} is a vector of characters ({\tt "E"} for elementary
-schools, {\tt "M"} for middle schools, and {\tt "H"} for high schools)
-that identifies the type of school each case
-represents and {\tt fpc} is a numerical vector that identifies for
-each case the total number of schools of the same type in the
-population, then the user could estimate the following model:
-
-<<Complex.zelig>>= 
-z.out2 <- zelig(api00 ~ meals + yr.rnd, model = "gamma.survey", strata=~stype, fpc=~fpc, data = apistrat)
-@
-Summarize the regression output:
-<<Complex.output>>= 
-summary(z.out2) 
-@ 
-The coefficient estimates for this example are
-identical to the point estimates in the first example, when
-pre-existing sampling weights were used.  When sampling weights are
-omitted, they are estimated automatically for {\tt "gamma.survey"}
-models based on the user-defined description of sampling designs. 
-
-Moreover, because the user provided information about the survey
-design, the standard error estimates are lower in this example than
-in the previous example, in which the user omitted variables pertaining
-to the details of the complex survey design.
-
-\item A dataset that includes replication weights:
-
-Survey houses sometimes supply
-replicate weights (in lieu of details about the survey design).  
-Suppose that the survey house that published these school 
-data withheld strata/cluster identifiers and instead 
-published replication weights.  For the sake
-of illustrating how replicate weights can be used as inputs in {\tt
-gamma.survey} models, create a set of jack-knife 
-(JK1) replicate weights: 
-<<Replicate.rw>>= 
-jk1reps <- jk1weights(psu=apistrat$dnum)
-@ 
-Again, estimate a model that regresses school performance on 
-the {\tt meals} and {\tt year.rnd} variables, using
-the JK1 replicate weights in {\tt jk1reps} to compute standard errors:
-<<Replicate.zelig>>= 
-z.out3 <- zelig(api00 ~ meals + yr.rnd, model = "gamma.survey", data = apistrat, 
-repweights=jk1reps$weights, type="JK1") 
-@
-Summarize the regression coefficients: 
-<<Replicate.summary>>=
- summary(z.out3)
-@ 
-Set the explanatory variable {\tt meals} at its 20th and 80th quantiles:
-<<Replicate.setx>>= 
-x.low <- setx(z.out3, meals= quantile(apistrat$meals, 0.2))
-x.high <- setx(z.out3, meals= quantile(apistrat$meals, 0.8))
-@ 
-Generate first
-differences for the effect of high versus low 
-concentrations of poverty on school performance:
-<<Replicate.sim>>= 
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-@ 
-<<Replicate.summary.sim>>=
- summary(s.out3)
-@ 
-Generate a visual summary of quantities of interest:
-\begin{center}
-<<label=ReplicatePlot,fig=true,echo=true>>=
- plot(s.out3)
-@
-\end{center}
-
-
-\end{enumerate}
-
-\subsubsection{Model}
-
-\begin{itemize}
-\item The Gamma distribution with scale parameter $\alpha$ has a
-\emph{stochastic component}:
-\begin{eqnarray*}
-Y &\sim& \textrm{Gamma}(y_i \mid \lambda_i, \alpha) \\
-f(y)  &=& \frac{1}{\alpha^{\lambda_i} \, \Gamma \lambda_i} \, y_i^{\lambda_i
-  - 1} \exp -\left\{ \frac{y_i}{\alpha} \right\}
-\end{eqnarray*}
-for $\alpha, \lambda_i, y_i > 0$.  \\
-
-\item The \emph{systematic component} is given by
-\begin{equation*}
-  \lambda_i = \frac{1}{x_i \beta}
-\end{equation*}
-\end{itemize}
-
-\subsubsection{Variance}
-
-When replicate weights are not used, the variance of the
-coefficients is estimated as
-\[
-\hat{\boldsymbol{\Sigma}} \left[
- \sum_{i=1}^n
-\frac{(1-\pi_i)}{\pi_i^2}
-(\mathbf{X}_i(Y_i-\mu_i))^\prime(\mathbf{X}_i(Y_i-\mu_i)) + 2
-\sum_{i=1}^n \sum_{j=i+1}^n \frac{(\pi_{ij} - \pi_i\pi_j)}{\pi_i
-\pi_j \pi_{ij}}(\mathbf{X}_i(Y_i-\mu_i))^\prime
-(\mathbf{X}_j(Y_j-\mu_j)) \right] \hat{\boldsymbol{\Sigma}}
-\]
-where ${\pi_i}$ is the probability of case $i$ being sampled,
-$\mathbf{X}_i$ is a vector of the values of the explanatory
-variables for case $i$, $Y_i$ is value of the dependent variable for
-case $i$, $\hat{\mu}_i$ is the predicted value of the dependent
-variable for case $i$ based on the linear model estimates, and
-$\hat{\boldsymbol{\Sigma}}$ is the conventional variance-covariance
-matrix in a parametric glm. This statistic is derived from the
-method for estimating the variance of sums described in \cite{Bin83}
-and the Horvitz-Thompson estimator of the variance of a sum
-described in \cite{HorTho52}.
-
-When replicate weights are used, the model is re-estimated for each
-set of replicate weights, and the variance of each parameter is
-estimated by summing the squared deviations of the replicates from
-their mean.
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) are simulations of the mean
-  of the stochastic component given draws of $\alpha$ and
-  $\beta$ from their posteriors:  $$E(Y) = \alpha \lambda_i.$$  
-\item The predicted values ({\tt qi\$pr}) are draws from the gamma
-  distribution for each given set of parameters $(\alpha, \lambda_i)$.
-\item If {\tt x1} is specified, {\tt sim()} also returns the
-  differences in the expected values ({\tt qi\$fd}), $$E(Y \mid x_1) -
-  E(Y \mid x)$$.
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.  
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "gamma.survey", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: the vector of fitted values.
-   \item {\tt linear.predictors}: the vector of $x_{i}\beta$.
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values for the specified
-     values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from a
-     distribution defined by $(\alpha, \lambda_i)$.
-   \item {\tt qi\$fd}: the simulated first difference in the expected
-     values for the specified values in {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-When users estimate {\tt gamma.survey} models with replicate weights in {\tt Zelig}, an 
-object called {\tt .survey.prob.weights} is created in the global environment.  
-{\tt Zelig} will over-write any existing object with that name, and users 
-are therefore advised to re-name any object called {\tt .survey.prob.weights} before using {\tt gamma.survey} models in {\tt Zelig}.
-
-\subsection* {How to Cite}
-\input{cites/gamma.survey}
- \input{citeZelig}
- 
- \subsection* {See also}
- 
- Survey-weighted linear models and the sample data used in the
- examples above are a part of the {\tt survey} package by Thomas
- Lumley. Users may wish to refer to the help files for the three
- functions that Zelig draws upon when estimating survey-weighted
- models, namely, {\tt help(svyglm)}, {\tt help(svydesign)}, and {\tt
- help(svrepdesign)}.  The Gamma model is part of the stats package
- by \citet{VenRip02}. Advanced users may wish to refer to
- \texttt{help(glm)} and \texttt{help(family)}, as well as
- \cite{McCNel89}.
-  
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-
-<<afterpkgs, echo=FALSE>>=
-  after<-search()
-  torm<-setdiff(after,before)
-  for (pkg in torm)
-  detach(pos=match(pkg,search()))
-@
-  \end{document}
diff --git a/vignettes/gk.bib b/vignettes/gk.bib
index 49199f5..0359e26 100644
--- a/vignettes/gk.bib
+++ b/vignettes/gk.bib
@@ -5872,13 +5872,6 @@ OPTannote = {}
 	number= 391
 }
 
- at book{Huber81,
-	author={Peter J. Huber},
-	title={Robust Statistics},
-	publisher={Wiley},
-	year={1981}
-}
-
 @book{HucSpr95,
 	author={R. Robert Huckfeldt and John Sprague},
 	title={Citizens, Politics, and Social Communication},
@@ -13208,18 +13201,6 @@ OPTannote = {}
 	number= 1
 }
 
- at article{White80,
-	author={Halbert White},
-	title={A Heteroskedasticity-Consistent Covariance Matrix Estimator and a Direct
-		Test for Heteroskedasticity},
-	journal={Econometrica},
-	volume={48},
-	year={1980},
-	pages={817--838},
-	month={May},
-	number={4}
-}
-
 @book{White82,
 	author={Halbert L. White},
 	title={Asymptotic Theory For Econometricians},
@@ -14497,6 +14478,23 @@ OPTannote = {}
   YEAR =	 {2004},
 }
 
+ at BOOK{Huber81,
+  AUTHOR =	 {Peter J. Huber},
+  TITLE =	 {Robust Statistics},
+  PUBLISHER =	 {Wiley},
+  YEAR =	 {1981},
+}
+
+ at ARTICLE{White80,
+  AUTHOR =       {Halbert White},
+  TITLE =        {A Heteroscedastic-Consistent Covariance Matrix Estimator and a Direct Test for Heteroscedasticity},
+  JOURNAL =      {Econometrica},
+  YEAR =         {1980},
+  volume =       {48},
+  number =       {4},
+  pages =        {817--838},
+}
+
 @BOOK{TheGra00,
   AUTHOR =	 {Terry M. Therneau and Patricia M. Grambsch},
   TITLE =	 {Modeling Survival Data: Extending the Cox Model},
diff --git a/vignettes/gkpubs.bib b/vignettes/gkpubs.bib
index 8f252c4..e9b910d 100644
--- a/vignettes/gkpubs.bib
+++ b/vignettes/gkpubs.bib
@@ -96,6 +96,27 @@
 % Articles
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
+ at Article{SonKin11,
+  author =	 {Samir Soneji and Gary King},
+  title =	 {Statistical Security for Social Security},
+  journal =	 {Demography},
+  year =	 2011,
+  note =	 {{http://gking.harvard.edu/files/abs/ssc-abs.shtml}}
+}
+
+ at Article{,
+  author =	 {Gary King and Richard Nielsen and Carter Coberley
+                  and James Pope and Aaron Wells},
+  title =	 {Avoiding Randomization Failure in Program
+                  Evaluation},
+  journal =	 {Population Health Management},
+  year =	 2011,
+  volume =	 14,
+  number =	 1,
+  pages =	 {S11-S22},
+  note =	 {{http://gking.harvard.edu/gking/files/mhs.pdf}}
+}
+
 @Article{KinNieCob11,
   author =	 {Gary King and Richard Nielsen and Carter Coberley
                   and James Pope and Aaron Wells},
@@ -226,13 +247,12 @@
   note =	 {{http://gking.harvard.edu/files/abs/mort-abs.shtml}} 
 }
 
-
- at Article{IacKinPor09,
+ at Article{IacKinPor11,
   author =	 {Stefano M. Iacus and Gary King and Giuseppe Porro},
   title =	 {Causal Inference Without Balance Checking: Coarsened
                   Exact Matching},
-  journal =	 { },
-  year =	 2009,
+  journal =	 {Political Analysis},
+  year =	 {2011, in press},
   note =	 {{http://gking.harvard.edu/files/abs/cem-plus-abs.shtml}}
 }
 
@@ -1789,10 +1809,21 @@
                   {{http://gking.harvard.edu/files/abs/poliactiv-abs.shtml}}
 }
 
+
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 % Data
 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 
+ at Article{IacKinPor11b,
+  author =	 {Stefano M. Iacus and Gary King and Giuseppe Porro},
+  title =	 {Replication data for: Causal Inference Without
+                  Balance Checking: Coarsened Exact Matching},
+  journal =	 { },
+  year =	 2011,
+  note =	 {http://hdl.handle.net/1902.1/15601 Murray Research
+                  Archive [Distributor] V1 [Version]}
+}
+
 @article{HopKin09b,
   author =	 {Daniel Hopkins and Gary King},
   title =	 {Replication Data for: A Method of Automated
diff --git a/vignettes/html.sty b/vignettes/html.sty
deleted file mode 100644
index ec942b0..0000000
--- a/vignettes/html.sty
+++ /dev/null
@@ -1,200 +0,0 @@
-% see if we should actually do anything
-\message{Including htmltex ...}
-\expandafter\ifx\csname dohtml\endcsname\relax
-\message{dohtml undefined; skipping ...}
-\else
-\message{dohtml defined; it is \dohtml}
-%%%%%%%%%%%%%%%% BEGIN HTML TEX STUFF %%%%%%%%%%%%%%
-
-% get tex to output the output boxes
-\tracingoutput=1
-\showboxbreadth=10000000
-\showboxdepth=10000000
-\hbadness=10000
-\vbadness=10000
-%\setlength{\textheight}{200in} % one page so footnotes go at the bottom.
-\pretolerance=10000
-
-\makeatletter
-\def\@htmlwrite#1{\special{HTML:#1}}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%% Paragraphs and things %%%%%%%%%%%%%%
-\iftrue
-\message{paragraphs ...}
-\let\@@@par\@@par
-\def\@@par{\@htmlwrite{<p>}\@@@par}
-\@restorepar
-\let\@@@newline\newline
-\def\newline{\@htmlwrite{<br>}\@@@newline}
-\let\@@@cr\cr
-\def\cr{\@htmlwrite{<br>}\@@@cr}
-\let\@@@dblslsh\\
-\def\\{\@htmlwrite{<br>}\@@@dblslsh}
-%\def\discretionary#1#2#3{#3} % didn't work very well
-\def\-{}
-\fi
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-%%%%%%%%%%%%%%%%%%%%%%%%% Pages %%%%%%%%%%%%%%%%%%%%%%%%%
-% For TeX
-%\footline={\@htmlwrite{<HR>}}    % Does nothing in LaTeX
-
-% For LaTeX
-\message{pages ...}
-\def\ps at html{
-  \def\@oddfoot{\@htmlwrite{<HR></A>}}
-  \let\@evenfoot\@oddfoot
-  \def\@oddhead{\@htmlwrite{<CENTER><A NAME=PAGE\thepage>\thepage</CENTER><HR>}}
-%  \def\@oddhead{\@htmlwrite{<A NAME=PAGE\thepage>}}
-  \let\@evenhead\@oddhead
-}
-
-\pagestyle{html}
-\onecolumn
-\def\twocolumn{}
-
-%%%%%%%%%%%%%%%%%%%%%%%%%% MATH MODE %%%%%%%%%%%%%%%%%%%
-\message{math ...}
-\iffalse
-\let\@dollar=$
-\catcode`\$=13
-\let\@inmath=0
-\def${\ifx\@inmath0\@htmlwrite{<I>}\let\@inmath=1\@dollar\else\@dollar\@htmlwrite{</I>}\let\@inmath=0\fi}
-\fi
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% figures %%%%%%%%%%%%%%%%%%%
-\def\fps at figure{h}
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%% Footnotes %%%%%%%%%%%%%%%%%
-\iffalse
-\def\footnote#1{}
-\def\footnotetext[#1]#2{}
-\fi
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-%%%%%%%%%%%%%%%%%%%%%%%% All kinds of lists %%%%%%%%%%%%%%
-\message{lists ...}
-\iftrue
-\newcount\@indef
-\@indef=0
-\def\description{
-  \@htmlwrite{<DL>}
-  \advance\@indef by 1
-  \ifnum\@indef=1
-  \let\@item=\item
-  \def\item[##1]{\@htmlwrite{<DT>}##1\@htmlwrite{<DD>}}
-  \fi
-  }
-\def\enddescription{
-  \@htmlwrite{</DL>}
-  \advance\@indef by -1
-  \ifnum\@indef=0
-  \let\item\@item
-  \fi}
-\fi
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-%%%%%%%%%%%%%%%%%%%%% centering %%%%%%%%%%%%%%%%%%%%%%
-\iftrue
-\let\@htmlcenterline\centerline
-\def\centerline#1{
-  \@htmlwrite{<CENTER>}
-  \@htmlcenterline{#1}
-  \@htmlwrite{</CENTER>}}
-\fi
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-%%%%%%%%%%%%%%%%%%%% labels %%%%%%%%%%%%%%%%%%%%%%%%%%
-\let\@htmllabel\label
-\let\@htmlref\ref
-\catcode`\#=11
-\def\hash{#}
-\catcode`\#=6
-%\def\label#1{\@htmlwrite{<A NAME=#1></A>}\@htmllabel{#1}}
-
-\def\label#1{\@bsphack\if at filesw {\let\thepage\relax
-   \def\protect{\noexpand\noexpand\noexpand}%
-   \edef\@tempa{\write\@auxout{\string
-      \newlabel{#1}{{\labelname}{\thepage}}}}%
-   \expandafter}\@tempa
-   \if at nobreak \ifvmode\nobreak\fi\fi\fi\@esphack}
-
-%\def\ref#1{\@htmlwrite{<A HREF=\hash#1>}\@htmlref{#1}\@htmlwrite{</A>}}
-\let\@refstepcounter\refstepcounter
-\def\refstepcounter#1{
-  \@refstepcounter{#1}
-  \@htmlwrite{<A NAME=#1\@currentlabel></A>}
-  \def\labelname{
-    \special{HTML:<A HREF=HTMLHASH#1\@currentlabel>}
-    \@currentlabel\special{HTML:</A>}
-  }
-}
-
-\def\labelname{}
-
-% unfortunately eqnarray doesn't use refstepcounter so we'll have
-% to take care of it.
-
-%here's a better fix but it doesn't work
-
-%\let\@@eqnnum\@eqnnum
-%\def\@eqnnum{
-%  \@htmlwrite{<A NAME=equation\@currentlabel></A>}
-%  \def\labelname{
-%    \special{HTML:<A HREF=HTMLHASHequation\@currentlabel>}
-%    \@currentlabel\special{HTML:</A>}
-%  }
-%  \@@eqnnum
-%}
-
-\let\@eqnarray\eqnarray
-\def\eqnarray{
-  \@htmlwrite{<A NAME=equation\@currentlabel></A>}
-  \def\labelname{
-    \special{HTML:<A HREF=HTMLHASHequation\@currentlabel>}
-    \@currentlabel\special{HTML:</A>}
-  }
-  \let\cr\@@@cr
-  \@htmlwrite{<br>}
-  \@eqnarray
-}
-
-%\let\@endeqnarray\endeqnarray
-%\def\endeqnarray{
-%  \@htmlwrite{<A NAME=equation\@currentlabel></A>}
-%  \def\labelname{
-%    \special{HTML:<A HREF=HTMLHASHequation\@currentlabel>}
-%    \@currentlabel\special{HTML:</A>}
-%  }
-%  \@endeqnarray
-%  \def\cr{\@htmlwrite{<br>}\@@@cr}
-%}
-
-
-%\let\@@@eqncr\@@eqncr
-%\def\@@eqncr{
-%  \@htmlwrite{<A NAME=equation\@currentlabel></A>}
-%  \def\labelname{
-%    \special{HTML:<A HREF=HTMLHASHequation\@currentlabel>}
-%    \@currentlabel\special{HTML:</A>}
-%  }
-%  \@@@eqncr
-%}
-
-\def\@eqnnum{{
-  \@htmlwrite{<A NAME=equation\@currentlabel></A>}
-  \def\labelname{
-    \special{HTML:<A HREF=HTMLHASHequation\@currentlabel>}
-    \@currentlabel\special{HTML:</A>}}
-  \reset at font\rm (\theequation)
-  \@htmlwrite{<br>}}}
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-\makeatother
-
-%%%%%%%%%%%%%%% END HTML TEX STUFF %%%%%%%%%%%%%%%%%%%%%%
-\fi
diff --git a/vignettes/logit.Rnw b/vignettes/logit.Rnw
deleted file mode 100644
index 0ca3cc8..0000000
--- a/vignettes/logit.Rnw
+++ /dev/null
@@ -1,299 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=logit}
-\include{zinput}
-%\VignetteIndexEntry{Logistic Regression for Dichotomous Dependent Variables}
-%\VignetteDepends{Zelig, stats}
-%\VignetteKeyWords{model,logistic,dichotomous, regression}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@ 
-
-\section{{\tt logit}: Logistic Regression for Dichotomous Dependent
-Variables}\label{logit}
-
-Logistic regression specifies a dichotomous dependent variable as a
-function of a set of explanatory variables.  For a Bayesian
-implementation, see \Sref{logit.bayes}.  
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "logit", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out, x1 = NULL)
-\end{verbatim}
-
-\subsubsection{Additional Inputs} 
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for logistic regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE} is
-selected, {\tt zelig()} computes robust standard errors via the {\tt
-sandwich} package (see \cite{Zeileis04}).  The default type of robust
-standard error is heteroskedastic and autocorrelation consistent (HAC),
-and assumes that observations are ordered by time index.
-
-In addition, {\tt robust} may be a list with the following options:  
-\begin{itemize}
-\item {\tt method}:  Choose from 
-\begin{itemize}
-\item {\tt "vcovHAC"}: (default if {\tt robust = TRUE}) HAC standard
-errors. 
-\item {\tt "kernHAC"}: HAC standard errors using the
-weights given in \cite{Andrews91}. 
-\item {\tt "weave"}: HAC standard errors using the
-weights given in \cite{LumHea99}.  
-\end{itemize}  
-\item {\tt order.by}: defaults to {\tt NULL} (the observations are
-chronologically ordered as in the original data).  Optionally, you may
-specify a vector of weights (either as {\tt order.by = z}, where {\tt
-z} exists outside the data frame; or as {\tt order.by = \~{}z}, where
-{\tt z} is a variable in the data frame)  The observations are
-chronologically ordered by the size of {\tt z}.
-\item {\tt \dots}:  additional options passed to the functions 
-specified in {\tt method}.   See the {\tt sandwich} library and
-\cite{Zeileis04} for more options.   
-\end{itemize}
-\end{itemize}
-
-\subsubsection{Examples}
-\begin{enumerate}
-\item {Basic Example}
- 
-Attaching the sample turnout dataset:
-<<Example.data>>=
- data(turnout)
-@ 
-Estimating parameter values for the logistic regression:
-<<Example.zelig>>=
- z.out1 <- zelig(vote ~ age + race,  model = "logit", data = turnout) 
-@ 
-Setting values for the explanatory variables:
-<<Example.setx>>=
- x.out1 <- setx(z.out1, age = 36, race = "white")
-@ 
-Simulating quantities of interest from the posterior distribution.
-<<Example.sim>>=
- s.out1 <- sim(z.out1, x = x.out1)
-@
-<<Example.summary>>= 
- summary(s.out1)
-@ 
-\begin{center}
-<<label=ExamplePlot,fig=true,echo=true>>= 
- plot(s.out1)
-@ 
-\end{center}
-
-\item {Simulating First Differences}
-
-Estimating the risk difference (and risk ratio) between low education
-(25th percentile) and high education (75th percentile) while all the
-other variables held at their default values.
-<<FirstDifferences.setx>>=
- z.out2 <- zelig(vote ~ race + educate, model = "logit", data = turnout)
- x.high <- setx(z.out2, educate = quantile(turnout$educate, prob = 0.75))
- x.low <- setx(z.out2, educate = quantile(turnout$educate, prob = 0.25))
-@ 
-
-<<FirstDifferences.sim>>=
- s.out2 <- sim(z.out2, x = x.high, x1 = x.low)
-@ 
-<<FirstDifferences.summary>>=
- summary(s.out2)
-@
-\begin{center}
-<<label=FirstDifferencesPlot,fig=true>>= 
- plot(s.out2)
-@ 
-\end{center} 
-
-
-\item {Presenting Results: An ROC Plot}  \label{ROC}
-  
-  One can use an ROC plot to evaluate the fit of alternative model
-  specifications.  (Use {\tt demo(roc)} to view this example, or see
-  King and Zeng (2002)\nocite{KinZen02}.)  
-<<ROC.zelig>>=
- z.out1 <- zelig(vote ~ race + educate + age, model = "logit", 
-                  data = turnout)
- z.out2 <- zelig(vote ~ race + educate, model = "logit", data = turnout)
-@
-\begin{center}
-<<label=ROCPlot,fig=true, echo=true>>= 
-
-rocplot(z.out1$y, z.out2$y, fitted(z.out1), fitted(z.out2))
-@ 
-\end{center}
-\end{enumerate}
-
-\subsubsection{Model}
-Let $Y_i$ be the binary dependent variable for observation $i$ which
-takes the value of either 0 or 1.
-\begin{itemize}
-
-\item The \emph{stochastic component} is given by  
-\begin{eqnarray*}
-Y_i &\sim& \textrm{Bernoulli}(y_i \mid \pi_i) \\
-    &=& \pi_i^{y_i} (1-\pi_i)^{1-y_i}
-\end{eqnarray*}
-where $\pi_i=\Pr(Y_i=1)$.
-
-\item The \emph{systematic component} is given by: 
-\begin{equation*}
-\pi_i \; = \; \frac{1}{1 + \exp(-x_i \beta)}.
-\end{equation*}
-where $x_i$ is the vector of $k$ explanatory variables for observation $i$
-and $\beta$ is the vector of coefficients.
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) for the logit model are
-  simulations of the predicted probability of a success: $$E(Y) =
-  \pi_i= \frac{1}{1 + \exp(-x_i \beta)},$$ given draws of $\beta$ from
-  its sampling distribution.
-
-\item The predicted values ({\tt qi\$pr}) are draws from the Binomial
-  distribution with mean equal to the simulated expected value $\pi_i$.  
-
-\item The first difference ({\tt qi\$fd}) for the logit model is defined as
-\begin{equation*}
-\textrm{FD} = \Pr(Y = 1 \mid x_1) - \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item The risk ratio ({\tt qi\$rr}) is defined as
-\begin{equation*}
-\textrm{RR} = \Pr(Y = 1 \mid x_1) \ / \ \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)}\right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\, x,
-  model = "logit", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: the vector of fitted values for the
-     systemic component, $\pi_i$.
-   \item {\tt linear.predictors}: the vector of $x_{i}\beta$
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt data}: the name of the input data frame.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected probabilities for the
-     specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values for the
-     specified values of {\tt x}.
-   \item {\tt qi\$fd}: the simulated first difference in the expected
-     probabilities for the values specified in {\tt x} and {\tt x1}.
-   \item {\tt qi\$rr}: the simulated risk ratio for the expected
-     probabilities simulated from {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection* {How to Cite} 
-
-\input{cites/logit}
-\input{citeZelig}
-
-
-\subsection*{See also}
-
-The logit model is part of the stats package by \citet{VenRip02}.
-Advanced users may wish to refer to \texttt{help(glm)} and
-\texttt{help(family)}, as well as \cite{McCNel89}. Robust standard
-errors are implemented via the sandwich package by \citet{Zeileis04}.
-Sample data are from \cite{KinTomWit00}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
-
-
-
-
-
-
-
-
-
diff --git a/vignettes/logit.mixed.Rnw b/vignettes/logit.mixed.Rnw
deleted file mode 100644
index e72feee..0000000
--- a/vignettes/logit.mixed.Rnw
+++ /dev/null
@@ -1,185 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=logitmixed}
-\include{zinput}
-%\VignetteIndexEntry{Mixed effects logistic regression}
-%\VignetteDepends{Zelig}
-%\VignetteKeyWords{mixed, logistic regression}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt logit.mixed}: Mixed effects logistic Regression}
-\label{mixed}
-
-Use generalized multi-level linear regression if you have covariates that are grouped according to one or more classification factors. The logit model is appropriate when the dependent variable is dichotomous.
-
-While generally called multi-level models in the social sciences, this class of models is often referred to as mixed-effects models in the statistics literature and as hierarchical models in a Bayesian setting. This general class of models consists of linear models that are expressed as a function of both \emph{fixed effects}, parameters corresponding to an entire population or certain repeatable levels of experimental factors, and \emph{random effects}, parameters corresponding to indiv [...]
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-z.out <- zelig(formula= y ~ x1 + x2 + tag(z1 + z2 | g),
-               data=mydata, model="logit.mixed")
-
-z.out <- zelig(formula= list(mu=y ~ xl + x2 + tag(z1, gamma | g),
-               gamma= ~ tag(w1 + w2 | g)), data=mydata, model="logit.mixed")
-\end{verbatim}
-
-\subsubsection{Inputs}
-
-\noindent {\tt zelig()} takes the following arguments for {\tt mixed}:
-\begin{itemize}
-\item {\tt formula:} a two-sided linear formula object describing the systematic component of the model, with the response on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1 + ... + zn | g)} with {\tt z1 + ... + zn} specifying the model for the random effects and {\tt g} the grouping structure. Random intercept terms are included with the notation {\tt ta [...]
-Alternatively, {\tt formula} may be a list where the first entry, {\tt mu}, is a two-sided linear formula object describing the systematic component of the model, with the repsonse on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1, gamma | g)} with {\tt z1} specifying the individual level model for the random effects, {\tt g} the grouping structure and { [...]
-\end{itemize}
-
-\subsubsection{Additional Inputs}
-
-In addition, {\tt zelig()} accepts the following additional arguments for model specification:
-
-\begin{itemize}
-\item {\tt data:} An optional data frame containing the variables named in {\tt formula}. By default, the variables are taken from the environment from which {\tt zelig()} is called.
-\item {\tt na.action:} A function that indicates what should happen when the data contain {\tt NAs}. The default action ({\tt na.fail}) causes {\tt zelig()} to print an error message and terminate if there are any incomplete observations.
-\end{itemize}
-Additionally, users may with to refer to {\tt lmer} in the package {\tt lme4} for more information, including control parameters for the estimation algorithm and their defaults.
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-\item Basic Example with First Differences \\
-\\
-Attach sample data: \\
-<<Examples.data>>=
-data(voteincome)
-@
-Estimate model:
-<<Examples.zelig>>=
-z.out1 <- zelig(vote ~ education + age + female + tag(1 | state), data=voteincome, model="logit.mixed")
-@
-
-\noindent Summarize regression coefficients and estimated variance of random effects:\\
-<<Examples.summary>>=
-summary(z.out1)
-@
-Set explanatory variables to their default values, with high (80th percentile) and low (20th percentile) values for education:\\
-
-<<Examples.setx>>=
-x.high <- setx(z.out1, education=quantile(voteincome$education, 0.8))
-x.low <- setx(z.out1, education=quantile(voteincome$education, 0.2))
-@
-Generate first differences for the effect of high versus low education on voting: \\
-
-<<Examples.sim>>=
-s.out1 <- sim(z.out1, x=x.high, x1=x.low)
-summary(s.out1)
-@
-
-\end{enumerate}
-
-
-\subsubsection{Mixed effects Logistic Regression Model}
-
-Let $Y_{ij}$ be the binary dependent variable, realized for observation $j$ in group $i$ as $y_{ij}$ which takes the value of either 0 or 1, for $i = 1, \ldots, M$, $j = 1, \ldots, n_i$.
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by a Bernoulli distribution with mean vector $\pi_{ij}$.
-\begin{equation*}
-Y_{ij} \sim \mathrm{Bernoulli}(y_{ij} | \pi_{ij}) = \pi_{ij}^{y_{ij}} (1 - \pi_{ij})^{1 - y_{ij}}
-\end{equation*}
-where
-\begin{equation*}
-\pi_{ij} = \mathrm{Pr}(Y_{ij} = 1)
-\end{equation*}
-\item The $q$-dimensional vector of \emph{random effects}, $b_i$, is restricted to be mean zero, and therefore is completely characterized by the variance covarance matrix $\Psi$, a $(q \times q)$ symmetric positive semi-definite matrix.
-\begin{equation*}
-b_i \sim Normal(0, \Psi)
-\end{equation*}
-\item The \emph{systematic component} is
-\begin{equation*}
-\pi_{ij} \equiv \frac{1}{1 + \exp(-(X_{ij} \beta + Z_{ij} b_i))}
-\end{equation*}
-where $X_{ij}$ is the $(n_i \times p \times M)$ array of known fixed effects explanatory variables, $\beta$ is the $p$-dimensional vector of fixed effects coefficients, $Z_{ij}$ is the $(n_i \times q \times M)$ array of known random effects explanatory variables and $b_i$ is the $q$-dimensional vector of random effects.
-\end{itemize}
-
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The predicted values ({\tt qi\$pr}) are draws from the Binomial distribution with mean equal to the simulated expected value, $\pi_{ij}$ for
-\begin{equation*}
-\pi_{ij} = \frac{1}{1 + \exp(-(X_{ij} \beta + Z_{ij} b_i))}
-\end{equation*}
-given $X_{ij}$ and $Z_{ij}$ and simulations of of $\beta$ and $b_i$ from their posterior distributions. The estimated variance covariance matrices are taken as correct and are themselves not simulated.
-
-\item The expected values ({\tt qi\$ev}) are simulations of the predicted probability of a success given draws of $\beta$ from its posterior:
-\begin{equation*}
-E(Y_{ij} | X_{ij}) = \pi_{ij} = \frac{1}{1 + exp(- X_{ij} \beta)}.
-\end{equation*}
-
-\item The first difference ({\tt qi\$fd}) is given by the difference in predicted probabilities, conditional on $X_{ij}$ and $X_{ij}^\prime$, representing different values of the explanatory variables.
-\begin{equation*}
-FD(Y_{ij} | X_{ij}, X_{ij}^\prime) = Pr(Y_{ij} = 1 | X_{ij}) - Pr(Y_{ij} = 1 | X_{ij}^\prime)
-\end{equation*}
-
-\item The risk ratio ({\tt qi\$rr}) is defined as
-\begin{equation*}
-RR(Y_{ij} | X_{ij}, X_{ij}^{\prime}) = \frac{Pr(Y_{ij} = 1 | X_{ij})}{Pr(Y_{ij} = 1 | X_{ij}^{\prime})}
-\end{equation*}
-
-\item In conditional prediction models, the average predicted treatment effect ({\tt qi\$att.pr}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - \widehat{Y_{ij}(t_{ij} = 0)} \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $Y_{ij}(t_{ij} = 0)$, the counterfactual predicted value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\item In conditional prediction models, the average expected treatment effect ({\tt qi\$att.ev}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - E[Y_{ij}(t_{ij} = 0)] \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $E[Y_{ij}(t_{ij} = 0)]$, the counterfactual expected value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-The output of each Zelig command contains useful information which you may view. You may examine the available information in {\tt z.out} by using {\tt slotNames(z.out)}, see the fixed effect coefficients by using {\tt summary(z.out)@coefs}, and a default summary of information through {\tt summary(z.out)}. Other elements available through the {\tt \@} operator are listed below.
-\begin{itemize}
-\item From the {\tt zelig()} output stored in {\tt summary(z.out)}, you may extract:
-\begin{itemize}
-\item[--] {\tt fixef}: numeric vector containing the conditional estimates of the fixed effects.
-\item[--] {\tt ranef}: numeric vector containing the conditional modes of the random effects.
-\item[--] {\tt frame}: the model frame for the model.
-\end{itemize}
-\item From the {\tt sim()} output stored in {\tt s.out}, you may extract quantities of interest stored in a data frame:
-\begin{itemize}
-\item {\tt qi\$pr}: the simulated predicted values drawn from the distributions defined by the expected values.
-\item {\tt qi\$ev}: the simulated expected values for the specified values of x.
-\item {\tt qi\$fd}: the simulated first differences in the expected values for the values specified in x and x1.
-\item {\tt qi\$ate.pr}: the simulated average predicted treatment effect for the treated from conditional prediction models.
-\item {\tt qi\$ate.ev}: the simulated average expected treatment effect for the treated from conditional prediction models.
-\end{itemize}
-\end{itemize}
-
-
-\subsection* {How to Cite}
-
-\input{cites/logit.mixed}
-\input{citeZelig}
-
-\subsection* {See also}
-Mixed effects logistic regression is part of {\tt lme4} package by Douglas M. Bates \citep{Bates07}. For a detailed discussion of mixed-effects models, please see \cite{JosBat00}
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after <- search()
- torm <- setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
-\end{document}
diff --git a/vignettes/logit.survey.Rnw b/vignettes/logit.survey.Rnw
deleted file mode 100644
index 3441f90..0000000
--- a/vignettes/logit.survey.Rnw
+++ /dev/null
@@ -1,520 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=logitsurvey}
-\include{zinput}
-%\VignetteIndexEntry{Survey-Weighted Logistic Regression for Dichotomous Dependent Variables}
-%\VignetteDepends{Zelig, stats, survey}
-%\VignetteKeyWords{model,logistic,dichotomous, regression, survey}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography* 
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>= 
-library(Zelig)
-library(survey) 
-@
-\section{{\tt logit.survey}: Survey-Weighted Logistic Regression for Dichotomous Dependent Variables}
-\label{logit.survey}
-
-The survey-weighted logistic regression model is appropriate for 
-survey data obtained using complex sampling techniques, such as 
-stratified random or cluster sampling (e.g., not simple random 
-sampling).  Like the conventional logistic regression models (see 
-\Sref{logit}), survey-weighted logistic regression specifies a 
-dichotomous dependent variable as function of a set of explanatory 
-variables.  The survey-weighted logit model reports estimates of 
-model parameters identical to conventional logit estimates, but uses 
-information from the survey design to correct variance estimates.
-
-The {\tt logit.survey} model accommodates three common types of 
-complex survey data.  Each method listed here requires selecting 
-specific options which are detailed in the ``Additional Inputs'' 
-section below.  \begin{enumerate}
-
-\item \textbf{Survey weights}:  Survey data are often published along
-with weights for each observation.  For example, if a survey
-intentionally over-samples a particular type of case, weights can be
-used to correct for the over-representation of that type of case in
-the dataset. Survey weights come in two forms:
-\begin{enumerate}
-
-\item \textit{Probability} weights report the probability that each
-case is drawn from the population.  For each stratum or cluster, 
-this is computed as the number of observations in the sample drawn 
-from that group divided by the number of observations in the 
-population in the group.
-
-\item \textit{Sampling} weights are the inverse of the probability
-weights.   
-
-\end{enumerate}
-
-\item \textbf{Strata/cluster identification}:  A complex survey 
-dataset may include variables that identify the strata or cluster 
-from which observations are drawn.  For stratified random sampling 
-designs, observations may be nested in different strata.  There are 
-two ways to employ these identifiers:
-
-\begin{enumerate}
-
-\item Use \textit{finite population corrections} to specify the
-total number of cases in the stratum or cluster from which each
-observation was drawn.
-
-\item For stratified random sampling designs, use the raw strata ids
-to compute sampling weights from the data.
-
-\end{enumerate}
-
-\item \textbf{Replication weights}: To preserve the anonymity of
-survey participants, some surveys exclude strata and cluster ids 
-from the public data and instead release only pre-computed replicate 
-weights.
-
-\end{enumerate}
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "logit.survey", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-
-\subsubsection{Additional Inputs}
-
-In addition to the standard {\tt zelig} inputs (see
-\Sref{Zcommands}), survey-weighted logistic models accept the following
-optional inputs:
-\begin{enumerate}
-
-\item Datasets that include survey weights:. 
-
-\begin{itemize}  
-
-\item {\tt probs}: An optional formula or numerical vector specifying each 
-case's probability weight, the probability that the case was 
-selected.  Probability weights need not (and, in most cases, will 
-not) sum to one.  Cases with lower probability weights are weighted 
-more heavily in the computation of model coefficients.
-
-\item {\tt weights}: An optional numerical vector specifying each 
-case's sample weight, the inverse of the probability that the case 
-was selected.  Sampling weights need not (and, in most cases, will 
-not) sum to one.  Cases with higher sampling weights are weighted 
-more heavily in the computation of model coefficients.
-
-\end{itemize}
-
-\item Datasets that include strata/cluster identifiers:
-
-\begin{itemize} 
-
-\item {\tt ids}: An optional formula or numerical vector identifying the 
-cluster from which each observation was drawn (ordered from largest level to smallest level).  
-For survey designs  that do not involve cluster sampling, {\tt ids} defaults to {\tt NULL}.
-
-\item {\tt fpc}: An optional numerical vector identifying each 
-case's frequency weight, the total number of units in the population 
-from which each observation was sampled. 
-
-\item {\tt strata}: An optional formula or vector identifying the 
-stratum from which each observation was sampled.  Entries may be 
-numerical, logical, or strings.  For survey designs that do not 
-involve cluster sampling, {\tt strata} defaults to {\tt NULL}. 
-
-\item {\tt nest}: An optional logical value specifying whether 
-primary sampling unites (PSUs) have non-unique ids across multiple 
-strata.  {\tt nest=TRUE} is appropriate when PSUs reuse the same 
-identifiers across strata.  Otherwise, {\tt nest} defaults to {\tt 
-FALSE}. 
-
-\item {\tt check.strata}: An optional input specifying whether to 
-check that clusters are nested in strata.  If {\tt check.strata} is 
-left at its default, {\tt !nest}, the check is not performed.  If 
-{\tt check.strata} is specified as {\tt TRUE}, the check is carried 
-out.  
-
-\end{itemize}
-
-\item Datasets that include replication weights:
-\begin{itemize}
-  \item {\tt repweights}: A formula or matrix specifying
-    replication weights, numerical vectors of weights used
-    in a process in which the sample is repeatedly re-weighted and parameters
-    are re-estimated in order to compute the variance of the model parameters.
-  \item {\tt type}: A string specifying the type of replication weights being used.
-    This input is required if replicate weights are specified.  The following types
-    of replication weights are recognized: {\tt"BRR"}, {\tt "Fay"},
-    {\tt "JK1"}, {\tt "JKn"}, {\tt "bootstrap"}, or {\tt "other"}.
-  \item {\tt weights}: An optional vector or formula specifying each case's sample weight,
-    the inverse of the probability that the case was selected.  If a survey includes both sampling 
-    weights and replicate weights separately for the same survey, both should be included in 
-    the model specification.  In these cases, sampling weights are used to correct potential biases 
-    in in the computation of coefficients and replication weights are used to compute the variance 
-    of coefficient estimates.  
-  \item {\tt combined.weights}: An optional logical value that 
-    should be specified as {\tt TRUE} if the replicate weights include the sampling weights.  Otherwise, 
-    {\tt combined.weights} defaults to {\tt FALSE}.  
-  \item {\tt rho}:  An optional numerical value specifying a shrinkage factor
-    for replicate weights of type {\tt "Fay"}.
-  \item {\tt bootstrap.average}: An optional numerical input specifying
-    the number of iterations over which replicate weights of type {\tt "bootstrap"} were averaged. 
-    This input should be left as {\tt NULL} for {\tt "bootstrap"} weights that were
-    not were created by averaging.
-\item {\tt scale}:  When replicate weights are included,
-    the variance is computed as the sum of squared deviations of the replicates from their mean.
-    {\tt scale} is an optional overall multiplier for the standard deviations.
-\item {\tt rscale}: Like {\tt scale}, {\tt rscale} specifies an 
-optional vector of replicate-specific multipliers for the squared 
-deviations used in variance computation. 
-
-\item {\tt fpc}: For models in which {\tt "JK1"}, {\tt "JKn"}, or 
-{\tt "other"} replicates are specified, {\tt fpc} is an optional 
-numerical vector (with one entry for each replicate) designating the 
-replicates' finite population corrections.   
-
-\item {\tt fpctype}: When a finite population correction is included 
-as an {\tt fpc} input, {\tt fpctype} is a required input specifying 
-whether the input to {\tt fpc} is a sampling fraction ({\tt 
-fpctype="fraction"}) or a direct correction ({\tt 
-fpctype="correction"}).  
-
-\item {\tt return.replicates}: An optional logical value    
-specifying whether the replicates should be returned as a component 
-of the output.  {\tt return.replicates} defaults to {\tt FALSE}.  
-
-\end{itemize}
-
-\end{enumerate}
-
-\subsubsection{Examples}
-
-\begin{enumerate} 
-
-\item A dataset that includes survey weights:
-
-Attach the sample data: 
-<<Existing.data>>= 
-data(api, package="survey") 
-@ 
-
-Suppose that a dataset included a dichotomous indicator 
-for whether each public school attends classes year round ({\tt yr.rnd}), a measure of 
-the percentage of students at each school who receive subsidized 
-meals ({\tt meals}), a measure of the percentage of students at 
-each school who are new to to the school ({\tt mobility}), and sampling 
-weights computed by the survey house ({\tt pw}).  Estimate a model
-that regresses the year-round schooling indicator on the {\tt meals} and {\tt mobility}
-variables:
-<<Existing.zelig>>= 
-z.out1 <- zelig(yr.rnd ~ meals + mobility, model = "logit.survey", weights=~pw, data = apistrat)
-@ 
-Summarize regression coefficients:
-<<Existing.summary>>=
- summary(z.out1)
-@ 
-Set explanatory variables to their default (mean/mode) values, and
-set a high (80th percentile) and low (20th percentile) value for
-``meals'': 
-<<Existing.setx>>= 
-x.low <- setx(z.out1, meals=quantile(apistrat$meals, 0.2)) 
-x.high <- setx(z.out1, meals=quantile(apistrat$meals, 0.8)) 
-@ 
-Generate first differences for the
-effect of high versus low concentrations of children receiving
-subsidized meals on the probability of holding school year-round: 
-<<Existing.sim>>=
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-@ 
-<<Existing.summary.sim>>=
- summary(s.out1)
-@ 
-Generate a visual summary of the quantities of interest:
-\begin{center}
-<<label=ExistingPlot,fig=true,echo=true>>=
- plot(s.out1)
-@
-\end{center}
-
-\item  A dataset that includes strata/cluster identifiers:
-
-Suppose that the survey house that provided the dataset used in the
-previous example excluded sampling weights but made other details
-about the survey design available.  A user can still estimate a model
-without sampling weights that instead uses inputs that identify the
-stratum and/or cluster to which each observation belongs and the
-size of the finite population from which each observation was drawn.
-
-Continuing the example above, suppose the survey house drew at
-random a fixed number of elementary schools, a fixed number of
-middle schools, and a fixed number of high schools. If the variable
-{\tt stype} is a vector of characters ({\tt "E"} for elementary
-schools, {\tt "M"} for middle schools, and {\tt "H"} for high schools)
-that identifies the type of school each case
-represents and {\tt fpc} is a numerical vector that identifies for
-each case the total number of schools of the same type in the
-population, then the user could estimate the following model:
-
-<<Complex.zelig>>= 
-z.out2 <- zelig(yr.rnd ~ meals + mobility, model = "logit.survey", strata=~stype, fpc=~fpc, data = apistrat)
-@
-Summarize the regression output:
-<<Complex.output>>= 
-summary(z.out2) 
-@ 
-The coefficient estimates for this example are
-identical to the point estimates in the first example, when
-pre-existing sampling weights were used.  When sampling weights are
-omitted, they are estimated automatically for {\tt "logit.survey"}
-models based on the user-defined description of sampling designs. 
-
-Moreover, because the user provided information about the survey
-design, the standard error estimates are lower in this example than
-in the previous example, in which the user omitted variables pertaining
-to the details of the complex survey design.
-
-\item A dataset that includes replication weights:
-
-Consider a dataset that includes information for a sample of hospitals
-about the number of out-of-hospital cardiac arrests that each
-hospital treats and the number of patients who arrive alive
-at each hospital: 
-<<Replicate.data>>= 
-data(scd, package="survey") 
-@ 
-Survey houses sometimes supply
-replicate weights (in lieu of details about the survey design).  For the sake
-of illustrating how replicate weights can be used as inputs in {\tt
-logit.survey} models, create a set of balanced repeated replicate
-(BRR) weights and an (artificial) dependent variable to simulate an indicator 
-for whether each hospital was sued:
-<<Replicate.rw>>= 
-BRRrep<-2*cbind(c(1,0,1,0,1,0),c(1,0,0,1,0,1), c(0,1,1,0,0,1),c(0,1,0,1,1,0)) 
-scd$sued <- as.vector(c(0,0,0,1,1,1))
-@ 
-Estimate a model that regresses the indicator for hospitals that were
-sued on the number of patients who arrive alive in
-each hospital and the number of cardiac arrests that each hospital treats, using
-the BRR replicate weights in {\tt BRRrep} to compute standard errors.
-<<Replicate.zelig>>= 
-z.out3 <- zelig(formula=sued ~ arrests + alive , model = "logit.survey", 
-  repweights=BRRrep, type="BRR", data=scd)
-@
-Summarize the regression coefficients: 
-<<Replicate.summary>>=
- summary(z.out3)
-@ 
-Set {\tt alive} at its mean and set {\tt arrests} at its 20th and 80th quantiles:
-<<Replicate.setx>>= 
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8)) 
-@ 
-Generate first
-differences for the effect of high versus low cardiac arrests
-on the probability that a hospital will be sued:
-<<Replicate.sim>>= 
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-@ 
-<<Replicate.summary.sim>>=
- summary(s.out3)
-@ 
-Generate a visual summary of quantities of interest:
-\begin{center}
-<<label=ReplicatePlot,fig=true,echo=true>>=
- plot(s.out3)
-@
-\end{center}
-
-
-\end{enumerate}
-
-\subsubsection{Model}
-Let $Y_i$ be the binary dependent variable for observation $i$ which
-takes the value of either 0 or 1.
-\begin{itemize}
-
-\item The \emph{stochastic component} is given by  
-\begin{eqnarray*}
-Y_i &\sim& \textrm{Bernoulli}(y_i \mid \pi_i) \\
-    &=& \pi_i^{y_i} (1-\pi_i)^{1-y_i}
-\end{eqnarray*}
-where $\pi_i=\Pr(Y_i=1)$.
-
-\item The \emph{systematic component} is given by: 
-\begin{equation*}
-\pi_i \; = \; \frac{1}{1 + \exp(-x_i \beta)}.
-\end{equation*}
-where $x_i$ is the vector of $k$ explanatory variables for observation $i$
-and $\beta$ is the vector of coefficients.
-\end{itemize}
-
-\subsubsection{Variance}
-
-When replicate weights are not used, the variance of the
-coefficients is estimated as
-\[
-\hat{\boldsymbol{\Sigma}} \left[
- \sum_{i=1}^n
-\frac{(1-\pi_i)}{\pi_i^2}
-(\mathbf{X}_i(Y_i-\mu_i))^\prime(\mathbf{X}_i(Y_i-\mu_i)) + 2
-\sum_{i=1}^n \sum_{j=i+1}^n \frac{(\pi_{ij} - \pi_i\pi_j)}{\pi_i
-\pi_j \pi_{ij}}(\mathbf{X}_i(Y_i-\mu_i))^\prime
-(\mathbf{X}_j(Y_j-\mu_j)) \right] \hat{\boldsymbol{\Sigma}}
-\]
-where ${\pi_i}$ is the probability of case $i$ being sampled,
-$\mathbf{X}_i$ is a vector of the values of the explanatory
-variables for case $i$, $Y_i$ is value of the dependent variable for
-case $i$, $\hat{\mu}_i$ is the predicted value of the dependent
-variable for case $i$ based on the linear model estimates, and
-$\hat{\boldsymbol{\Sigma}}$ is the conventional variance-covariance
-matrix in a parametric glm. This statistic is derived from the
-method for estimating the variance of sums described in \cite{Bin83}
-and the Horvitz-Thompson estimator of the variance of a sum
-described in \cite{HorTho52}.
-
-When replicate weights are used, the model is re-estimated for each
-set of replicate weights, and the variance of each parameter is
-estimated by summing the squared deviations of the replicates from
-their mean.
-
-\subsubsection{Quantities of Interest}
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) for the survey-weighted logit model are
-  simulations of the predicted probability of a success: $$E(Y) =
-  \pi_i= \frac{1}{1 + \exp(-x_i \beta)},$$ given draws of $\beta$ from
-  its sampling distribution.
-
-\item The predicted values ({\tt qi\$pr}) are draws from the Binomial
-  distribution with mean equal to the simulated expected value $\pi_i$.  
-
-\item The first difference ({\tt qi\$fd}) for the survey-weighted logit model is defined as
-\begin{equation*}
-\textrm{FD} = \Pr(Y = 1 \mid x_1) - \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item The risk ratio ({\tt qi\$rr}) is defined as
-\begin{equation*}
-\textrm{RR} = \Pr(Y = 1 \mid x_1) \ / \ \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)}\right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\ x,
-  model = "logit.survey", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: the vector of fitted values for the
-     systemic component, $\pi_i$.
-   \item {\tt linear.predictors}: the vector of $x_{i}\beta$
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt data}: the name of the input data frame.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected probabilities for the
-     specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values for the
-     specified values of {\tt x}.
-   \item {\tt qi\$fd}: the simulated first difference in the expected
-     probabilities for the values specified in {\tt x} and {\tt x1}.
-   \item {\tt qi\$rr}: the simulated risk ratio for the expected
-     probabilities simulated from {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-When users estimate {\tt logit.survey} models with replicate weights in {\tt Zelig}, an 
-object called {\tt .survey.prob.weights} is created in the global environment.  
-{\tt Zelig} will over-write any existing object with that name, and users 
-are therefore advised to re-name any object called {\tt .survey.prob.weights} before using {\tt logit.survey} models in {\tt Zelig}.
-
-\subsection* {How to Cite}
-
-\input{cites/logit.survey}
- \input{citeZelig}
- 
- \subsection* {See also}
- 
- Survey-weighted linear models and the sample data used in the
- examples above are a part of the {\tt survey} package by Thomas
- Lumley. Users may wish to refer to the help files for the three
- functions that Zelig draws upon when estimating survey-weighted
- models, namely, {\tt help(svyglm)}, {\tt help(svydesign)}, and {\tt
- help(svrepdesign)}.  The Gamma model is part of the stats package
- by \citet{VenRip02}. Advanced users may wish to refer to
- \texttt{help(glm)} and \texttt{help(family)}, as well as
- \cite{McCNel89}.
- 
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
-  after<-search()
-  torm<-setdiff(after,before)
-  for (pkg in torm)
-  detach(pos=match(pkg,search()))
-@
-
-\end{document}
diff --git a/vignettes/ls.Rnw b/vignettes/ls.Rnw
deleted file mode 100644
index f3c00ab..0000000
--- a/vignettes/ls.Rnw
+++ /dev/null
@@ -1,288 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=ls}
-\include{zinput}
-%\VignetteIndexEntry{Least Squares Regression for Continuous Dependent Variables}
-%\VignetteDepends{Zelig, stats}
-%\VignetteKeyWords{model,least squares,continuous, regression}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@ 
-
-\section{{\tt ls}: Least Squares Regression for Continuous
-Dependent Variables}
-\label{ls}
-
-Use least squares regression analysis to estimate the best linear
-predictor for the specified dependent variables.
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "ls", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-\subsubsection{Additional Inputs}  
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for least squares regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE} is
-selected, {\tt zelig()} computes robust standard errors based on
-sandwich estimators (see \cite{Zeileis04}, \cite{Huber81}, and
-\cite{White80}).  The default type of robust standard error is
-heteroskedastic consistent (HC), \emph{not} heteroskedastic and
-autocorrelation consistent (HAC).  
-
-In addition, {\tt robust} may be a list with the following options:  
-\begin{itemize}
-\item {\tt method}:  choose from 
-\begin{itemize}
-\item {\tt "vcovHC"}: (the default if {\tt robust = TRUE}), HC standard errors.
-\item {\tt "vcovHAC"}: HAC standard errors without weights.  
-\item {\tt "kernHAC"}: HAC standard errors using the weights given in
-\cite{Andrews91}.   
-\item {\tt "weave"}: HAC standard errors using the weights given in
-\cite{LumHea99}.
-\end{itemize} 
-\item {\tt order.by}: only applies to the HAC methods above.  Defaults to
-{\tt NULL} (the observations are chronologically ordered as in the
-original data).  Optionally, you may specify a time index (either as
-{\tt order.by = z}, where {\tt z} exists outside the data frame; or
-as {\tt order.by = \~{}z}, where {\tt z} is a variable in the data
-frame).  The observations are chronologically ordered by the size of
-{\tt z}.
-\item {\tt \dots}:  additional options passed to the functions
-specified in {\tt method}.  See the {\tt sandwich} library and
-\cite{Zeileis04} for more options.   
-\end{itemize}
-\end{itemize}
-
-\subsubsection{Examples}\begin{enumerate}
-\item Basic Example with First Differences
-
-Attach sample data:
-<<Examples.data>>=
- data(macro)
-@ 
-Estimate model:
-<<Examples.zelig>>=
- z.out1 <- zelig(unem ~ gdp + capmob + trade, model = "ls", data = macro)
-@ 
-Summarize regression coefficients:
-<<Examples.summary>>=
- summary(z.out1)
-@ 
-Set explanatory variables to their default (mean/mode) values, with
-high (80th percentile) and low (20th percentile) values for the trade variable:
-<<Examples.setx>>=
- x.high <- setx(z.out1, trade = quantile(macro$trade, 0.8))
- x.low <- setx(z.out1, trade = quantile(macro$trade, 0.2))
-@ 
-Generate first differences for the effect of high versus low trade on
-GDP:
-<<Examples.sim>>=
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-@ 
-<<Examples.summary.sim>>= 
-summary(s.out1)
-@ 
-\begin{center}
-<<label=ExamplesPlot,fig=true,echo=true,width=5.5,height=4>>=  
- plot(s.out1)
-@ 
-\end{center}
-
-\item Using Dummy Variables
-
-Estimate a model with fixed effects for each country (see
-\Sref{factors} for help with dummy variables).  Note that you do not
-need to create dummy variables, as the program will automatically
-parse the unique values in the selected variable into discrete levels.  
-<<Dummy.zelig>>=
- z.out2 <- zelig(unem ~ gdp + trade + capmob + as.factor(country), 
-                  model = "ls", data = macro)
-@   
-Set values for the explanatory variables, using the default mean/mode
-values, with country set to the United States and Japan, respectively:
-<<Dummy.setx>>=
- x.US <- setx(z.out2, country = "United States")
- x.Japan <- setx(z.out2, country = "Japan")
-@ 
-Simulate quantities of interest:
-<<Dummy.sim>>=
- s.out2 <- sim(z.out2, x = x.US, x1 = x.Japan)
-@ 
-\begin{center}
-<<label=DummyPlot,fig=true,echo=true, width=5.5, height=4>>=   
- plot(s.out2)
-@ 
-\end{center}
-
-\item Multiple responses (least squares regression will be fitted
-  separately to each dependent variable)
-
-Two responses for data set macro: 
-<<Multiple.zelig>>=
- z.out3 <- zelig(cbind(unem, gdp) ~ capmob + trade,model = "ls", data = macro)
-@
-<<Multiple.zelig.summary>>=
-summary(z.out3)
-@    
-Set values for the explanatory variables, using the default mean/mode
-values, with country set to the United States and Japan, respectively:
-<<Multiple.setx>>=
- x.US <- setx(z.out3, country = "United States")
- x.Japan <- setx(z.out3, country = "Japan")
-@ 
-Simulate quantities of interest:
-<<Multiple.sim>>=
- s.out3 <- sim(z.out3, x = x.US, x1 = x.Japan)
-@
-Summary
-<<Example4.sim.summary>>=
-summary(s.out3)
-@  
-\begin{center}
-<<label=Example4Plot,fig=true,echo=true,  width=7.5, height=6>>= 
- plot(s.out3)
-@ 
-\end{center}
-
-\end{enumerate}
-
-\subsubsection{Model}
-\begin{itemize}
-\item The \emph{stochastic component} is described by a density
-  with mean $\mu_i$ and the common variance $\sigma^2$
-  \begin{equation*}
-    Y_i \; \sim \; f(y_i \mid \mu_i, \sigma^2).
-  \end{equation*}
-\item The \emph{systematic component} models the conditional mean as
-  \begin{equation*}
-     \mu_i =  x_i \beta
-  \end{equation*} 
-  where $x_i$ is the vector of covariates, and $\beta$ is the vector
-  of coefficients.
-  
-  The least squares estimator is the best linear predictor of a
-  dependent variable given $x_i$, and minimizes the sum of squared
-  residuals, $\sum_{i=1}^n (Y_i-x_i \beta)^2$.  
-\end{itemize}
-
-\subsubsection{Quantities of Interest} 
-\begin{itemize}
-\item The expected value ({\tt qi\$ev}) is the mean of simulations
-  from the stochastic component,  
-\begin{equation*}
-E(Y) = x_i \beta,\end{equation*}
-given a draw of $\beta$ from its sampling distribution.  
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "ls", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-  \item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: fitted values.
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-  
-\item From {\tt summary(z.out)}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-     \begin{equation*}
-       \hat{\beta} \; = \; \left(\sum_{i=1}^n x_i' x_i\right)^{-1} \sum x_i y_i
-     \end{equation*}
-   \item {\tt sigma}: the square root of the estimate variance of the
-     random error $e$:
-     \begin{equation*}
-       \hat{\sigma} \; = \; \frac{\sum (Y_i-x_i\hat{\beta})^2}{n-k}
-     \end{equation*}
-   \item {\tt r.squared}: the fraction of the variance explained by
-     the model. 
-     \begin{equation*}
-       R^2 \; = \; 1 - \frac{\sum (Y_i-x_i\hat{\beta})^2}{\sum (y_i -
-         \bar{y})^2} 
-     \end{equation*}
-   \item {\tt adj.r.squared}: the above $R^2$ statistic, penalizing
-     for an increased number of explanatory variables.  
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-   
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values for the specified
-     values of {\tt x}.
-   \item {\tt qi\$fd}:  the simulated first differences (or
-     differences in expected values) for the specified values of {\tt
-       x} and {\tt x1}. 
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection* {How to Cite} 
-\input{cites/ls}
-\input{citeZelig}
-
-\subsection* {See also}
-The least squares regression is part of the stats package by William N.
-Venables and Brian D. Ripley \citep{VenRip02}.In addition, advanced users may wish to refer to \texttt{help(lm)} and \texttt{help(lm.fit)}.Robust standard errors are implemented via the sandwich package by Achim Zeileis \citep{Zeileis04}.Sample data are from \cite{KinTomWit00}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% TeX-master: t
-%%% End: 
diff --git a/vignettes/ls.mixed.Rnw b/vignettes/ls.mixed.Rnw
deleted file mode 100644
index 0710951..0000000
--- a/vignettes/ls.mixed.Rnw
+++ /dev/null
@@ -1,187 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=lmmixed}
-\include{zinput}
-%\VignetteIndexEntry{Mixed effects linear regression}
-%\VignetteDepends{Zelig}
-%\VignetteKeyWords{mixed,linear, linear regression}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt ls.mixed}: Mixed effects Linear Regression}
-\label{mixed}
-
-Use multi-level linear regression if you have covariates that are grouped according to one or more classification factors and a continuous dependent variable.
-
-While generally called multi-level models in the social sciences, this class of models is often referred to as mixed-effects models in the statistics literature and as hierarchical models in a Bayesian setting. This general class of models consists of linear models that are expressed as a function of both \emph{fixed effects}, parameters corresponding to an entire population or certain repeatable levels of experimental factors, and \emph{random effects}, parameters corresponding to indiv [...]
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-z.out <- zelig(formula= y ~ x1 + x2 + tag(z1 + z2 | g),
-               data=mydata, model="lm.multi")
-
-z.out <- zelig(formula= list(mu=y ~ xl + x2 + tag(z1, gamma | g),
-               gamma= ~ tag(w1 + w2 | g)), data=mydata, model="lm.multi")
-\end{verbatim}
-
-\subsubsection{Inputs}
-
-\noindent {\tt zelig()} takes the following arguments for {\tt multi}:
-\begin{itemize}
-\item {\tt formula:} a two-sided linear formula object describing the systematic component of the model, with the response on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1 + ... + zn | g)} with {\tt z1 + ... + zn} specifying the model for the random effects and {\tt g} the grouping structure. Random intercept terms are included with the notation {\tt ta [...]
-Alternatively, {\tt formula} may be a list where the first entry, {\tt mu}, is a two-sided linear formula object describing the systematic component of the model, with the repsonse on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1, gamma | g)} with {\tt z1} specifying the individual level model for the random effects, {\tt g} the grouping structure and { [...]
-\end{itemize}
-
-\subsubsection{Additional Inputs}
-
-In addition, {\tt zelig()} accepts the following additional arguments for model specification:
-
-\begin{itemize}
-\item {\tt data:} An optional data frame containing the variables named in {\tt formula}. By default, the variables are taken from the environment from which {\tt zelig()} is called.
-\item {\tt family:} A GLM family, see {\tt glm} and {\tt family} in the {\tt stats} package. If {\tt family} is missing then a linear mixed model is fit; otherwise a generalized linear mixed model is fit. In the later case only {\tt gaussian} family with {\tt "log"} link is supported at the moment.
-\item {\tt na.action:} A function that indicates what should happen when the data contain {\tt NAs}. The default action ({\tt na.fail}) causes {\tt zelig()} to print an error message and terminate if there are any incomplete observations.
-\end{itemize}
-Additionally, users may wish to refer to {\tt lmer} in the package {\tt lme4} for more information, including control parameters for the estimation algorithm and their defaults.
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-\item Basic Example with First Differences \\
-\\
-Attach sample data: \\
-<<Examples.data>>=
-data(voteincome)
-@
-
-Estimate model:
-
-<<Examples.zelig>>=
-z.out1 <- zelig(income ~ education + age + female + tag(1 | state), data=voteincome, model="ls.mixed")
-@
-
-\noindent Summarize regression coefficients and estimated variance of random effects:\\
-<<Examples.summary>>=
-summary(z.out1)
-@
-Set explanatory variables to their default values, with high (80th percentile) and low (20th percentile) values for education:\\
-<<Examples.setx>>=
-x.high <- setx(z.out1, education=quantile(voteincome$education, 0.8))
-x.low <- setx(z.out1, education=quantile(voteincome$education, 0.2))
-@
-
-Generate first differences for the effect of high versus low education on income: \\
-<<Examples.sim>>=
-s.out1 <- sim(z.out1, x=x.high, x1=x.low)
-summary(s.out1)
-@
-\begin{center}
-<<label=ExamplesPlot, fig=true, echo=true>>=
-plot(s.out1)
-@
-\end{center}
-
-\end{enumerate}
-
-\subsubsection{Mixed effects linear regression model}
-
-Let $Y_{ij}$ be the continuous dependent variable, realized for observation $j$ in group $i$ as $y_{ij}$, for $i = 1, \ldots, M$, $j = 1, \ldots, n_i$.
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by a univariate normal model with a vector of means $\mu_{ij}$ and scalar variance $\sigma^2$.
-\begin{equation*}
-Y_{ij} \sim \mathrm{Normal}(y_{ij} | \mu_{ij}, \sigma^2)
-\end{equation*}
-\item The $q$-dimensional vector of \emph{random effects}, $b_i$, is restricted to be mean zero, and therefore is completely characterized by the variance covarance matrix $\Psi$, a $(q \times q)$ symmetric positive semi-definite matrix.
-\begin{equation*}
-b_i \sim Normal(0, \Psi)
-\end{equation*}
-\item The \emph{systematic component} is
-\begin{equation*}
-\mu_{ij} \equiv X_{ij} \beta + Z_{ij} b_i
-\end{equation*}
-where $X_{ij}$ is the $(n_i \times p \times M)$ array of known fixed effects explanatory variables, $\beta$ is the $p$-dimensional vector of fixed effects coefficients, $Z_{ij}$ is the $(n_i \times q \times M)$ array of known random effects explanatory variables and $b_i$ is the $q$-dimensional vector of random effects.
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-
-\item The predicted values ({\tt qi\$pr}) are draws from the normal distribution defined by mean $\mu_{ij}$ and variance $\sigma^2$,
-\begin{equation*}
-\mu_{ij} = X_{ij} \beta + Z_{ij} b_{i}
-\end{equation*}
-given $X_{ij}$ and $Z_{ij}$ and simulations of $\beta$ and $b_i$ from their posterior distributions. The estimated variance covariance matrices are taken as correct and are themselves not simulated.
-
-\item The expected values ({\tt qi\$ev}) are averaged over the stochastic components and are given by
-\begin{equation*}
-E(Y_{ij} | X_{ij}) = X_{ij} \beta.
-\end{equation*}
-
-\item The first difference ({\tt qi\$fd}) is given by the difference in expected values, conditional on $X_{ij}$ and $X_{ij}^\prime$, representing different values of the explanatory variables.
-\begin{equation*}
-FD(Y_{ij} | X_{ij}, X_{ij}^\prime) = E(Y_{ij} | X_{ij}) - E(Y_{ij} | X_{ij}^\prime)
-\end{equation*}
-
-\item In conditional prediction models, the average predicted treatment effect ({\tt qi\$att.pr}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - \widehat{Y_{ij}(t_{ij} = 0)} \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $Y_{ij}(t_{ij} = 0)$, the counterfactual predicted value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\item In conditional prediction models, the average expected treatment effect ({\tt qi\$att.ev}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - E[Y_{ij}(t_{ij} = 0)] \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $E[Y_{ij}(t_{ij} = 0)]$, the counterfactual expected value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\item If {\tt "log"} link is used, expected values are computed as above and then exponentiated, while predicted values are draws from the log-normal distribution whose logarithm has mean and variance equal to $\mu_{ij}$ and $\sigma^2$, respectively.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you may view. You may examine the available information in {\tt z.out} by using {\tt slotNames(z.out)}, see the fixed effect coefficients by using {\tt summary(z.out)@coefs}, and a default summary of information through {\tt summary(z.out)}. Other elements available through the {\tt \@} operator are listed below.
-\begin{itemize}
-\item From the {\tt zelig()} output stored in {\tt summary(z.out)}, you may extract:
-\begin{itemize}
-\item[--] {\tt fixef}: numeric vector containing the conditional estimates of the fixed effects.
-\item[--] {\tt ranef}: numeric vector containing the conditional modes of the random effects.
-\item[--] {\tt frame}: the model frame for the model.
-\end{itemize}
-\item From the {\tt sim()} output stored in {\tt s.out}, you may extract quantities of interest stored in a data frame:
-\begin{itemize}
-\item {\tt qi\$pr}: the simulated predicted values drawn from the distributions defined by the expected values.
-\item {\tt qi\$ev}: the simulated expected values for the specified values of x.
-\item {\tt qi\$fd}: the simulated first differences in the expected values for the values specified in x and x1.
-\item {\tt qi\$ate.pr}: the simulated average predicted treatment effect for the treated from conditional prediction models.
-\item {\tt qi\$ate.ev}: the simulated average expected treatment effect for the treated from conditional prediction models.
-\end{itemize}
-\end{itemize}
-
-
-
-
-\subsection* {How to Cite}
-
-\input{cites/ls.mixed}
-\input{citeZelig}
-
-\subsection* {See also}
-Mixed effects linear regression is part of {\tt lme4} package by Douglas M. Bates \citep{Bates07}. For a detailed discussion of mixed-effects models, please see \cite{JosBat00}
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after <- search()
- torm <- setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
-\end{document}
diff --git a/vignettes/natbib.sty b/vignettes/natbib.sty
deleted file mode 100644
index c3c926c..0000000
--- a/vignettes/natbib.sty
+++ /dev/null
@@ -1,724 +0,0 @@
-%%
-%% This is file `natbib.sty',
-%% generated with the docstrip utility.
-%%
-%% The original source files were:
-%%
-%% natbib.dtx  (with options: `package,all')
-%% 
-%% Full documentation can be obtained by LaTeXing the source file:
-%%   natbib.dtx.
-%% 
-%% Only a few abbreviated comments remain here to describe the usage.
-%% 
-\NeedsTeXFormat{LaTeX2e}[1994/06/01]
-\ProvidesPackage{natbib}
-        [1998/07/14 6.8c (PWD)]
-%%-------------------------------------------------------------------
-%% NOTICE:
-%% This file may be used for non-profit purposes.
-%% It may not be distributed in exchange for money,
-%%   other than distribution costs.
-%%
-%% The author provides it `as is' and does not guarantee it in any way.
-%%
-%% Natbib coding copyright (C) 1994--1998 Patrick W. Daly
-%% Max-Planck-Institut f\"ur Aeronomie
-%% Max-Planck-Str. 2
-%% D-37191 Katlenburg-Lindau
-%% Germany
-%%
-%% E-mail: daly at linmpi.mpg.de
-%%-----------------------------------------------------------
- % This package reimplements the LaTeX \cite command to be used for various
- % citation styles, both author-year and numerical. It accepts BibTeX
- % output intended for many other packages, and therefore acts as a
- % general, all-purpose citation-style interface.
- %
- % With standard numerical .bst files, only numerical citations are
- % possible. With an author-year .bst file, both numerical and
- % author-year citations are possible.
- %
- % If author-year citations are selected, \bibitem must have one of the
- %   following forms:
- %   \bibitem[Jones et al.(1990)]{key}...
- %   \bibitem[Jones et al.(1990)Jones, Baker, and Williams]{key}...
- %   \bibitem[Jones et al., 1990]{key}...
- %   \bibitem[\protect\citeauthoryear{Jones, Baker, and Williams}{Jones
- %       et al.}{1990}]{key}...
- %   \bibitem[\protect\citeauthoryear{Jones et al.}{1990}]{key}...
- %   \bibitem[\protect\astroncite{Jones et al.}{1990}]{key}...
- %   \bibitem[\protect\citename{Jones et al., }1990]{key}...
- %   \harvarditem[Jones et al.]{Jones, Baker, and Williams}{1990}{key}...
- %
- % This is either to be made up manually, or to be generated by an
- % appropriate .bst file with BibTeX.
- %                            Author-year mode     ||   Numerical mode
- % Then, \citet{key}  ==>>  Jones et al. (1990)    ||   Jones et al. [21]
- %       \citep{key}  ==>> (Jones et al., 1990)    ||   [21]
- % Multiple citations as normal:
- % \citep{key1,key2}  ==>> (Jones et al., 1990; Smith, 1989) || [21,24]
- %                           or  (Jones et al., 1990, 1991)  || [21,24]
- %                           or  (Jones et al., 1990a,b)     || [21,24]
- % \cite{key} is the equivalent of \citet{key} in author-year mode
- %                         and  of \citep{key} in numerical mode
- % Full author lists may be forced with \citet* or \citep*, e.g.
- %       \citep*{key}      ==>> (Jones, Baker, and Williams, 1990)
- % Optional notes as:
- %   \citep[chap. 2]{key}    ==>> (Jones et al., 1990, chap. 2)
- %   \citep[e.g.,][]{key}    ==>> (e.g., Jones et al., 1990)
- %   \citep[see][pg. 34]{key}==>> (see Jones et al., 1990, pg. 34)
- %  (Note: in standard LaTeX, only one note is allowed, after the ref.
- %   Here, one note is like the standard, two make pre- and post-notes.)
- %   \citealt{key}          ==>> Jones et al. 1990
- %   \citealt*{key}         ==>> Jones, Baker, and Williams 1990
- %   \citealp{key}          ==>> Jones et al., 1990
- %   \citealp*{key}         ==>> Jones, Baker, and Williams, 1990
- % Additional citation possibilities (both author-year and numerical modes)
- %   \citeauthor{key}       ==>> Jones et al.
- %   \citeauthor*{key}      ==>> Jones, Baker, and Williams
- %   \citeyear{key}         ==>> 1990
- %   \citeyearpar{key}      ==>> (1990)
- %   \citetext{priv. comm.} ==>> (priv. comm.)
- % Note: full author lists depends on whether the bib style supports them;
- %       if not, the abbreviated list is printed even when full requested.
- %
- % Defining the citation style of a given bib style:
- % Use \bibpunct (in the preamble only) with 6 mandatory arguments:
- %    1. opening bracket for citation
- %    2. closing bracket
- %    3. citation separator (for multiple citations in one \cite)
- %    4. the letter n for numerical styles, s for superscripts
- %        else anything for author-year
- %    5. punctuation between authors and date
- %    6. punctuation between years (or numbers) when common authors missing
- % One optional argument is the character coming before post-notes. It
- %   appears in square braces before all other arguments. May be left off.
- % Example (and default) \bibpunct[,]{(}{)}{;}{a}{,}{,}
- %
- % To make this automatic for a given bib style, named newbib, say, make
- % a local configuration file, natbib.cfg, with the definition
- %   \newcommand{\bibstyle at newbib}{\bibpunct...}
- % Then the \bibliographystyle{newbib} will cause \bibstyle at newbib to
- % be called on THE NEXT LATEX RUN (via the aux file).
- %
- % Such preprogrammed definitions may be invoked in the text (preamble only)
- %  by calling \citestyle{newbib}. This is only useful if the style specified
- %  differs from that in \bibliographystyle.
- %
- % With \citeindextrue and \citeindexfalse, one can control whether the
- % \cite commands make an automatic entry of the citation in the .idx
- % indexing file. For this, \makeindex must also be given in the preamble.
- %
- % LaTeX2e Options: (for selecting punctuation)
- %   round  -  round parentheses are used (default)
- %   square -  square brackets are used   [option]
- %   curly  -  curly braces are used      {option}
- %   angle  -  angle brackets are used    <option>
- %   colon  -  multiple citations separated by colon (default)
- %   comma  -  separated by comma
- %   authoryear - selects author-year citations (default)
- %   numbers-  selects numerical citations
- %   super  -  numerical citations as superscripts
- %   sort   -  sorts multiple citations according to order in ref. list
- %   sort&compress   -  like sort, but also compresses numerical citations
- %   longnamesfirst  -  makes first citation full author list
- %   sectionbib - puts bibliography in a \section* instead of \chapter*
- % Punctuation so selected dominates over any predefined ones.
- % LaTeX2e options are called as, e.g.
- %        \usepackage[square,comma]{natbib}
- % LaTeX the source file natbib.dtx to obtain more details
- % or the file natnotes.tex for a brief reference sheet.
- %-----------------------------------------------------------
-\@ifclassloaded{aguplus}{\PackageError{natbib}
-  {The aguplus class already includes natbib coding,\MessageBreak
-   so you should not add it explicitly}
-  {Type <Return> for now, but then later remove\MessageBreak
-   the command \protect\usepackage{natbib} from the document}
-  \endinput}{}
-\@ifclassloaded{nlinproc}{\PackageError{natbib}
-  {The nlinproc class already includes natbib coding,\MessageBreak
-   so you should not add it explicitly}
-  {Type <Return> for now, but then later remove\MessageBreak
-   the command \protect\usepackage{natbib} from the document}
-  \endinput}{}
-\@ifclassloaded{egs}{\PackageError{natbib}
-  {The egs class already includes natbib coding,\MessageBreak
-   so you should not add it explicitly}
-  {Type <Return> for now, but then later remove\MessageBreak
-   the command \protect\usepackage{natbib} from the document}
-  \endinput}{}
- % Define citation punctuation for some author-year styles
- % One may add and delete at this point
- % Or put additions into local configuration file natbib.cfg
-\newcommand\bibstyle at chicago{\bibpunct{(}{)}{;}{a}{,}{,}}
-\newcommand\bibstyle at named{\bibpunct{[}{]}{;}{a}{,}{,}}
-\newcommand\bibstyle at agu{\bibpunct{[}{]}{;}{a}{,}{,~}}%Amer. Geophys. Union
-\newcommand\bibstyle at egs{\bibpunct{(}{)}{;}{a}{,}{,}}%Eur. Geophys. Soc.
-\newcommand\bibstyle at agsm{\bibpunct{(}{)}{,}{a}{}{,}\gdef\harvardand{\&}}
-\newcommand\bibstyle at kluwer{\bibpunct{(}{)}{,}{a}{}{,}\gdef\harvardand{\&}}
-\newcommand\bibstyle at dcu{\bibpunct{(}{)}{;}{a}{;}{,}\gdef\harvardand{and}}
-\newcommand\bibstyle at aa{\bibpunct{(}{)}{;}{a}{}{,}} %Astronomy & Astrophysics
-\newcommand\bibstyle at pass{\bibpunct{(}{)}{;}{a}{,}{,}}%Planet. & Space Sci
-\newcommand\bibstyle at anngeo{\bibpunct{(}{)}{;}{a}{,}{,}}%Annales Geophysicae
-\newcommand\bibstyle at nlinproc{\bibpunct{(}{)}{;}{a}{,}{,}}%Nonlin.Proc.Geophys.
- % Define citation punctuation for some numerical styles
-\newcommand\bibstyle at cospar{\bibpunct{/}{/}{,}{n}{}{}%
-     \gdef\NAT at biblabelnum##1{##1.}}
-\newcommand\bibstyle at esa{\bibpunct{(}{)}{,}{n}{}{}%
-     \gdef\NAT at biblabelnum##1{##1.\hspace{1em}}%
-     \gdef\NAT at citenum##1##2##3{\NAT@@open \if\relax##2\relax\else
-       ##2\ \fi Ref.~##1\if\relax##3\relax\else\NAT at cmt\
-       ##3\fi\NAT@@close\endgroup}}
-\newcommand\bibstyle at nature{\bibpunct{}{}{,}{s}{}{\textsuperscript{,}}%
-     \gdef\NAT at biblabelnum##1{##1.}}
- % The standard LaTeX styles
-\newcommand\bibstyle at plain{\bibpunct{[}{]}{,}{n}{}{,}}
-\let\bibstyle at alpha=\bibstyle at plain
-\let\bibstyle at abbrv=\bibstyle at plain
-\let\bibstyle at unsrt=\bibstyle at plain
- % The author-year modifications of the standard styles
-\newcommand\bibstyle at plainnat{\bibpunct{[}{]}{,}{a}{,}{,}}
-\let\bibstyle at abbrvnat=\bibstyle at plainnat
-\let\bibstyle at unsrtnat=\bibstyle at plainnat
-\newif\ifNAT at numbers \NAT at numbersfalse
-\newif\ifNAT at super \NAT at superfalse
-\DeclareOption{numbers}{\NAT at numberstrue
-   \ExecuteOptions{square,comma,nobibstyle}}
-\DeclareOption{super}{\NAT at supertrue\NAT at numberstrue
-   \ExecuteOptions{nobibstyle}}
-\DeclareOption{authoryear}{\NAT at numbersfalse
-   \ExecuteOptions{round,colon,bibstyle}}
-\DeclareOption{round}{%
-      \renewcommand\NAT at open{(} \renewcommand\NAT at close{)}
-   \ExecuteOptions{nobibstyle}}
-\DeclareOption{square}{%
-      \renewcommand\NAT at open{[} \renewcommand\NAT at close{]}
-   \ExecuteOptions{nobibstyle}}
-\DeclareOption{angle}{%
-      \renewcommand\NAT at open{$<$} \renewcommand\NAT at close{$>$}
-   \ExecuteOptions{nobibstyle}}
-\DeclareOption{curly}{%
-      \renewcommand\NAT at open{\{} \renewcommand\NAT at close{\}}
-   \ExecuteOptions{nobibstyle}}
-\DeclareOption{comma}{\renewcommand\NAT at sep{,}
-   \ExecuteOptions{nobibstyle}}
-\DeclareOption{colon}{\renewcommand\NAT at sep{;}
-   \ExecuteOptions{nobibstyle}}
-\DeclareOption{nobibstyle}{\let\bibstyle=\@gobble}
-\DeclareOption{bibstyle}{\let\bibstyle=\@citestyle}
-\newif\ifNAT at openbib \NAT at openbibfalse
-\DeclareOption{openbib}{\NAT at openbibtrue}
-\DeclareOption{sectionbib}{\def\NAT at sectionbib{on}}
-\def\NAT at sort{0}
-\DeclareOption{sort}{\def\NAT at sort{1}}
-\DeclareOption{sort&compress}{\def\NAT at sort{2}}
-\@ifpackageloaded{cite}{\PackageWarningNoLine{natbib}
-  {The `cite' package should not be used\MessageBreak
-   with natbib. Use option `sort' instead}\ExecuteOptions{sort}}{}
-\newif\ifNAT at longnames\NAT at longnamesfalse
-\DeclareOption{longnamesfirst}{\NAT at longnamestrue}
-\DeclareOption{nonamebreak}{\def\NAT at nmfmt#1{\mbox{#1}}}
-\def\NAT at nmfmt#1{{#1}}
-\renewcommand\bibstyle[1]{\@ifundefined{bibstyle@#1}{\relax}
-     {\csname bibstyle@#1\endcsname}}
-\AtBeginDocument{\global\let\bibstyle=\@gobble}
-\let\@citestyle\bibstyle
-\newcommand\citestyle[1]{\@citestyle{#1}\let\bibstyle\@gobble}
-\@onlypreamble{\citestyle}\@onlypreamble{\@citestyle}
-\newcommand\bibpunct[7][,]%
-  {\gdef\NAT at open{#2}\gdef\NAT at close{#3}\gdef
-   \NAT at sep{#4}\global\NAT at numbersfalse\ifx #5n\global\NAT at numberstrue
-   \else
-   \ifx #5s\global\NAT at numberstrue\global\NAT at supertrue
-   \fi\fi
-   \gdef\NAT at aysep{#6}\gdef\NAT at yrsep{#7}%
-   \gdef\NAT at cmt{#1}%
-   \global\let\bibstyle\@gobble
-  }
-\@onlypreamble{\bibpunct}
-\newcommand\NAT at open{(} \newcommand\NAT at close{)}
-\newcommand\NAT at sep{;}
-\ProcessOptions
-\newcommand\NAT at aysep{,} \newcommand\NAT at yrsep{,}
-\newcommand\NAT at cmt{,}
-\newcommand\NAT at cite%
-    [3]{\ifNAT at swa\NAT@@open\if\relax#2\relax\else#2\ \fi
-        #1\if\relax#3\relax\else\NAT at cmt\ #3\fi\NAT@@close\else#1\fi\endgroup}
-\newcommand\NAT at citenum%
-    [3]{\ifNAT at swa\NAT@@open\if\relax#2\relax\else#2\ \fi
-        #1\if\relax#3\relax\else\NAT at cmt\ #3\fi\NAT@@close\else#1\fi\endgroup}
-\newcommand\NAT at citesuper[3]{\ifNAT at swa
-\unskip\hspace{1\p@}\textsuperscript{#1}%
-   \if\relax#3\relax\else\ (#3)\fi\else #1\fi\endgroup}
-\providecommand
-  \textsuperscript[1]{\mbox{$^{\mbox{\scriptsize#1}}$}}
-\providecommand\@firstofone[1]{#1}
-\newcommand\NAT at citexnum{}
-\def\NAT at citexnum[#1][#2]#3{%
- \NAT at sort@cites{#3}%
- \let\@citea\@empty
-  \@cite{\def\NAT at num{-1}\let\NAT at last@yr\relax\let\NAT at nm\@empty
-    \@for\@citeb:=\NAT at cite@list\do
-    {\edef\@citeb{\expandafter\@firstofone\@citeb}%
-     \if at filesw\immediate\write\@auxout{\string\citation{\@citeb}}\fi
-     \@ifundefined{b@\@citeb\@extra at b@citeb}{%
-       {\reset at font\bfseries?}
-        \NAT at citeundefined\PackageWarning{natbib}%
-       {Citation `\@citeb' on page \thepage \space undefined}}%
-     {\let\NAT at last@num\NAT at num\let\NAT at last@nm\NAT at nm
-      \NAT at parse{\@citeb}%
-      \ifNAT at longnames\@ifundefined{bv@\@citeb\@extra at b@citeb}{%
-        \let\NAT at name=\NAT at all@names
-        \global\@namedef{bv@\@citeb\@extra at b@citeb}{}}{}%
-      \fi
-      \ifNAT at full\let\NAT at nm\NAT at all@names\else
-        \let\NAT at nm\NAT at name\fi
-      \ifNAT at swa
-       \ifnum\NAT at ctype=2\relax\@citea
-        \hyper at natlinkstart{\@citeb\@extra at b@citeb}%
-            \NAT at test{2}\hyper at natlinkend\else
-       \ifnum\NAT at sort>1
-         \begingroup\catcode`\_=8
-            \ifcat _\ifnum\z@<0\NAT at num _\else A\fi
-              \global\let\NAT at nm=\NAT at num \else \gdef\NAT at nm{-2}\fi
-            \ifcat _\ifnum\z@<0\NAT at last@num _\else A\fi
-              \global\@tempcnta=\NAT at last@num \global\advance\@tempcnta by\@ne
-              \else \global\@tempcnta\m at ne\fi
-         \endgroup
-         \ifnum\NAT at nm=\@tempcnta
-           \ifx\NAT at last@yr\relax
-             \edef\NAT at last@yr{\@citea \mbox{\NAT at num}}%
-           \else
-             \edef\NAT at last@yr{--\penalty\@m\mbox{\NAT at num}}%
-           \fi
-         \else
-           \NAT at last@yr \@citea \mbox{\NAT at num}%
-           \let\NAT at last@yr\relax
-         \fi
-       \else
-         \@citea \mbox{\hyper at natlinkstart{\@citeb\@extra at b@citeb}\NAT at num
-                    \hyper at natlinkend}%
-       \fi
-       \fi
-       \def\@citea{\NAT at sep\penalty\@m\NAT at space}%
-      \else
-        \ifcase\NAT at ctype\relax
-          \ifx\NAT at last@nm\NAT at nm \NAT at yrsep\penalty\@m\NAT at space\else
-          \@citea \NAT at test{1}\ \NAT@@open
-          \if\relax#1\relax\else#1\ \fi\fi \NAT at mbox{%
-          \hyper at natlinkstart{\@citeb\@extra at b@citeb}%
-          \NAT at num\hyper at natlinkend}%
-          \def\@citea{\NAT@@close\NAT at sep\penalty\@m\ }%
-        \or\@citea
-          \hyper at natlinkstart{\@citeb\@extra at b@citeb}%
-           \NAT at test{1}\hyper at natlinkend
-          \def\@citea{\NAT at sep\penalty\@m\ }%
-        \or\@citea
-          \hyper at natlinkstart{\@citeb\@extra at b@citeb}\NAT at test{2}%
-           \hyper at natlinkend
-          \def\@citea{\NAT at sep\penalty\@m\ }%
-        \fi
-      \fi
-      }}%
-      \ifnum\NAT at sort>1\NAT at last@yr\fi
-      \ifNAT at swa\else\ifnum\NAT at ctype=0\if\relax#2\relax\else
-      \NAT at cmt\ #2\fi \NAT@@close\fi\fi}{#1}{#2}}
-\newcommand\NAT at test[1]{\ifnum#1=1 \ifx\NAT at nm\NAT at noname
-  {\reset at font\bfseries(author?)}\PackageWarning{natbib}
-  {Author undefined for citation`\@citeb'
-   \MessageBreak
-   on page \thepage}\else \NAT at nm \fi
-  \else \if\relax\NAT at date\relax
-  {\reset at font\bfseries(year?)}\PackageWarning{natbib}
-  {Year undefined for citation`\@citeb'
-   \MessageBreak
-   on page \thepage}\else \NAT at date \fi \fi}
-\newcommand\NAT at citex{}
-\def\NAT at citex%
-  [#1][#2]#3{%
-  \NAT at sort@cites{#3}%
-  \let\@citea\@empty
-  \@cite{\let\NAT at nm\@empty\let\NAT at year\@empty
-    \@for\@citeb:=\NAT at cite@list\do
-    {\edef\@citeb{\expandafter\@firstofone\@citeb}%
-     \if at filesw\immediate\write\@auxout{\string\citation{\@citeb}}\fi
-     \@ifundefined{b@\@citeb\@extra at b@citeb}{\@citea%
-       {\reset at font\bfseries ?}\NAT at citeundefined
-                 \PackageWarning{natbib}%
-       {Citation `\@citeb' on page \thepage \space undefined}}%
-     {\let\NAT at last@nm=\NAT at nm\let\NAT at last@yr=\NAT at year
-     \NAT at parse{\@citeb}%
-      \ifNAT at longnames\@ifundefined{bv@\@citeb\@extra at b@citeb}{%
-        \let\NAT at name=\NAT at all@names
-        \global\@namedef{bv@\@citeb\@extra at b@citeb}{}}{}%
-      \fi
-     \ifNAT at full\let\NAT at nm\NAT at all@names\else
-       \let\NAT at nm\NAT at name\fi
-     \ifNAT at swa\ifcase\NAT at ctype
-       \ifx\NAT at last@nm\NAT at nm\NAT at yrsep
-          \ifx\NAT at last@yr\NAT at year
-            \hyper at natlinkstart{\@citeb\@extra at b@citeb}\NAT at exlab
-            \hyper at natlinkend
-          \else\unskip\
-            \hyper at natlinkstart{\@citeb\@extra at b@citeb}\NAT at date
-            \hyper at natlinkend
-          \fi
-       \else\@citea\hyper at natlinkstart{\@citeb\@extra at b@citeb}%
-         \NAT at nmfmt{\NAT at nm}%
-         \hyper at natlinkbreak{\NAT at aysep\ }{\@citeb\@extra at b@citeb}%
-         \NAT at date\hyper at natlinkend
-       \fi
-     \or\@citea\hyper at natlinkstart{\@citeb\@extra at b@citeb}%
-         \NAT at nmfmt{\NAT at nm}\hyper at natlinkend
-     \or\@citea\hyper at natlinkstart{\@citeb\@extra at b@citeb}%
-         \NAT at date\hyper at natlinkend
-     \fi \def\@citea{\NAT at sep\ }%
-     \else\ifcase\NAT at ctype
-       \ifx\NAT at last@nm\NAT at nm\NAT at yrsep
-          \ifx\NAT at last@yr\NAT at year
-            \hyper at natlinkstart{\@citeb\@extra at b@citeb}\NAT at exlab
-            \hyper at natlinkend
-          \else\unskip\
-            \hyper at natlinkstart{\@citeb\@extra at b@citeb}\NAT at date
-            \hyper at natlinkend
-          \fi
-       \else\@citea\hyper at natlinkstart{\@citeb\@extra at b@citeb}%
-         \NAT at nmfmt{\NAT at nm}%
-         \hyper at natlinkbreak{\ \NAT@@open\if\relax#1\relax\else#1\ \fi}%
-            {\@citeb\@extra at b@citeb}%
-         \NAT at date\hyper at natlinkend\fi
-       \or\@citea\hyper at natlinkstart{\@citeb\@extra at b@citeb}%
-         \NAT at nmfmt{\NAT at nm}\hyper at natlinkend
-       \or\@citea\hyper at natlinkstart{\@citeb\@extra at b@citeb}%
-         \NAT at date\hyper at natlinkend\fi \def\@citea{\NAT@@close\NAT at sep\ }%
-     \fi
-     }}\ifNAT at swa\else\if\relax#2\relax\else\NAT at cmt\ #2\fi
-     \NAT@@close\fi}{#1}{#2}}
-\newif\ifNAT at par \NAT at partrue
-\newcommand\NAT@@open{\ifNAT at par\NAT at open\fi}
-\newcommand\NAT@@close{\ifNAT at par\NAT at close\fi}
-\newcommand\shortcites[1]{%
-  \@bsphack\@for\@citeb:=#1\do
-  {\edef\@citeb{\expandafter\@firstofone\@citeb}%
-   \global\@namedef{bv@\@citeb\@extra at b@citeb}{}}\@esphack}
-\newcommand\NAT at biblabel[1]{\hfill}
-\newcommand\NAT at biblabelnum[1]{[#1]}
-\def\@tempa#1{[#1]}
-\ifx\@tempa\@biblabel\let\@biblabel\@empty\fi
-\newcommand\NAT at bibsetnum[1]{\settowidth\labelwidth{\@biblabel{#1}}%
-   \setlength{\leftmargin}{\labelwidth}\addtolength{\leftmargin}{\labelsep}%
-   \setlength{\itemsep}{\bibsep}\setlength{\parsep}{\z@}%
-   \ifNAT at openbib
-     \addtolength{\leftmargin}{\bibindent}%
-     \setlength{\itemindent}{-\bibindent}%
-     \setlength{\listparindent}{\itemindent}%
-     \setlength{\parsep}{0pt}%
-   \fi
-}
-\newlength{\bibhang}
-\setlength{\bibhang}{1em}
-\newlength{\bibsep}
-{\@listi \global\bibsep\itemsep \global\advance\bibsep by\parsep}
-
-\newcommand\NAT at bibsetup%
-   [1]{\setlength{\leftmargin}{\bibhang}\setlength{\itemindent}{-\leftmargin}%
-       \setlength{\itemsep}{\bibsep}\setlength{\parsep}{\z@}}
-\newcommand\NAT at set@cites{\ifNAT at numbers
-  \ifNAT at super \let\@cite\NAT at citesuper
-     \def\NAT at mbox##1{\unskip\nobreak\hspace{1\p@}\textsuperscript{##1}}%
-     \let\citeyearpar=\citeyear
-     \let\NAT at space\relax\else
-     \let\NAT at mbox=\mbox
-     \let\@cite\NAT at citenum \def\NAT at space{ }\fi
-  \let\@citex\NAT at citexnum
-  \ifx\@biblabel\@empty\let\@biblabel\NAT at biblabelnum\fi
-  \let\@bibsetup\NAT at bibsetnum
-  \def\natexlab##1{}%
- \else
-  \let\@cite\NAT at cite
-  \let\@citex\NAT at citex
-  \let\@biblabel\NAT at biblabel
-  \let\@bibsetup\NAT at bibsetup
-  \def\natexlab##1{##1}%
- \fi}
-\AtBeginDocument{\NAT at set@cites}
-\AtBeginDocument{\ifx\SK at def\@undefined\else
-\ifx\SK at cite\@empty\else
-  \SK at def\@citex[#1][#2]#3{\SK@\SK@@ref{#3}\SK@@citex[#1][#2]{#3}}\fi
-\ifx\SK at citeauthor\@undefined\def\HAR at checkdef{}\else
-  \let\citeauthor\SK at citeauthor
-  \let\citefullauthor\SK at citefullauthor
-  \let\citeyear\SK at citeyear\fi
-\fi}
-\AtBeginDocument{\@ifpackageloaded{hyperref}{%
-  \ifnum\NAT at sort=2\def\NAT at sort{1}\fi}{}}
-\newif\ifNAT at full\NAT at fullfalse
-\newif\ifNAT at swa
-\DeclareRobustCommand\citet
-   {\begingroup\NAT at swafalse\def\NAT at ctype{0}\NAT at partrue
-     \@ifstar{\NAT at fulltrue\NAT at citetp}{\NAT at fullfalse\NAT at citetp}}
-\newcommand\NAT at citetp{\@ifnextchar[{\NAT@@citetp}{\NAT@@citetp[]}}
-\newcommand\NAT@@citetp{}
-\def\NAT@@citetp[#1]{\@ifnextchar[{\@citex[#1]}{\@citex[][#1]}}
-\DeclareRobustCommand\citep
-   {\begingroup\NAT at swatrue\def\NAT at ctype{0}\NAT at partrue
-         \@ifstar{\NAT at fulltrue\NAT at citetp}{\NAT at fullfalse\NAT at citetp}}
-\DeclareRobustCommand\cite
-    {\begingroup\def\NAT at ctype{0}\NAT at partrue\NAT at swatrue
-      \@ifstar{\NAT at fulltrue\NAT at cites}{\NAT at fullfalse\NAT at cites}}
-\newcommand\NAT at cites{\@ifnextchar [{\NAT@@citetp}{%
-     \ifNAT at numbers\else
-     \NAT at swafalse
-     \fi
-    \NAT@@citetp[]}}
-\DeclareRobustCommand\citealt
-   {\begingroup\NAT at swafalse\def\NAT at ctype{0}\NAT at parfalse
-         \@ifstar{\NAT at fulltrue\NAT at citetp}{\NAT at fullfalse\NAT at citetp}}
-\DeclareRobustCommand\citealp
-   {\begingroup\NAT at swatrue\def\NAT at ctype{0}\NAT at parfalse
-         \@ifstar{\NAT at fulltrue\NAT at citetp}{\NAT at fullfalse\NAT at citetp}}
-\DeclareRobustCommand\citeauthor
-   {\begingroup\NAT at swafalse\def\NAT at ctype{1}\NAT at parfalse
-    \@ifstar{\NAT at fulltrue\NAT at citetp}{\NAT at fullfalse\NAT at citetp}}
-\DeclareRobustCommand\citeyear
-   {\begingroup\NAT at swafalse\def\NAT at ctype{2}\NAT at parfalse\NAT at citetp}
-\DeclareRobustCommand\citeyearpar
-   {\begingroup\NAT at swatrue\def\NAT at ctype{2}\NAT at partrue\NAT at citetp}
-\newcommand\citetext[1]{\NAT at open#1\NAT at close}
-\DeclareRobustCommand\citefullauthor
-   {\citeauthor*}
-\renewcommand\nocite[1]{\@bsphack
-  \@for\@citeb:=#1\do{%
-    \edef\@citeb{\expandafter\@firstofone\@citeb}%
-    \if at filesw\immediate\write\@auxout{\string\citation{\@citeb}}\fi
-    \if*\@citeb\else
-    \@ifundefined{b@\@citeb\@extra at b@citeb}{%
-       \NAT at citeundefined \PackageWarning{natbib}%
-       {Citation `\@citeb' undefined}}{}\fi}%
-  \@esphack}
-\newcommand\NAT at parse[1]{{%
-     \let\protect=\@unexpandable at protect\let~\relax
-     \let\active at prefix=\@gobble
-     \xdef\NAT at temp{\csname b@#1\@extra at b@citeb\endcsname}}%
-     \expandafter\NAT at split\NAT at temp
-     \expandafter\NAT at parse@date\NAT at date??????@@%
-     \ifciteindex\NAT at index\fi
-}
-\newcommand\NAT at split[4]{%
-  \gdef\NAT at num{#1}\gdef\NAT at name{#3}\gdef\NAT at date{#2}%
-  \gdef\NAT at all@names{#4}%
-  \ifx\NAT at noname\NAT at all@names \gdef\NAT at all@names{#3}\fi}
-\newcommand\NAT at parse@date{}
-\def\NAT at parse@date#1#2#3#4#5#6@@{%
-  \ifnum\the\catcode`#1=11\def\NAT at year{}\def\NAT at exlab{#1}\else
-  \ifnum\the\catcode`#2=11\def\NAT at year{#1}\def\NAT at exlab{#2}\else
-  \ifnum\the\catcode`#3=11\def\NAT at year{#1#2}\def\NAT at exlab{#3}\else
-  \ifnum\the\catcode`#4=11\def\NAT at year{#1#2#3}\def\NAT at exlab{#4}\else
-    \def\NAT at year{#1#2#3#4}\def\NAT at exlab{{#5}}\fi\fi\fi\fi}
-\newcommand\NAT at index{}
-\let\NAT at makeindex=\makeindex
-\renewcommand\makeindex{\NAT at makeindex
-  \renewcommand\NAT at index{\@bsphack\begingroup
-     \def~{\string~}\@wrindex{\NAT at idxtxt}}}
-\newcommand\NAT at idxtxt{\NAT at name\ \NAT at open\NAT at date\NAT at close}
-\newif\ifciteindex \citeindexfalse
-\newcommand\citeindextype{default}
-\newcommand\NAT at index@alt{{\let\protect=\noexpand\let~\relax
-  \xdef\NAT at temp{\NAT at idxtxt}}\expandafter\NAT at exp\NAT at temp\@nil}
-\newcommand\NAT at exp{}
-\def\NAT at exp#1\@nil{\index[\citeindextype]{#1}}
-
-\AtBeginDocument{%
-\@ifpackageloaded{index}{\let\NAT at index=\NAT at index@alt}{}}
-\newcommand\NAT at ifcmd{\futurelet\NAT at temp\NAT at ifxcmd}
-\newcommand\NAT at ifxcmd{\ifx\NAT at temp\relax\else\expandafter\NAT at bare\fi}
-\def\NAT at bare#1(#2)#3()#4\@nil#5{%
-  \if\relax#2\relax
-  \expandafter\NAT at apalk#1, , \@nil{#5}\else
-  \stepcounter{NAT at ctr}%
-  \NAT at wrout{\arabic {NAT at ctr}}{#2}{#1}{#3}{#5}
-\fi
-}
-\newcommand\NAT at wrout[5]{%
-\if at filesw
-      {\let\protect\noexpand\let~\relax
-       \immediate
-       \write\@auxout{\string\bibcite{#5}{{#1}{#2}{{#3}}{{#4}}}}}\fi
-\ignorespaces}
-\def\NAT at noname{{}}
-\renewcommand\bibitem{%
-  \@ifnextchar[{\@lbibitem}{%
-    \global\NAT at stdbsttrue
-    \stepcounter{NAT at ctr}\@lbibitem[\arabic{NAT at ctr}]}}
-\def\@lbibitem[#1]#2{%
-  \if\relax\@extra at b@citeb\relax\else
-    \@ifundefined{br@#2\@extra at b@citeb}{}{%
-     \@namedef{br@#2}{\@nameuse{br@#2\@extra at b@citeb}}}\fi
-   \@ifundefined{b@#2\@extra at b@citeb}{\def\NAT at num{}}{\NAT at parse{#2}}%
-   \item[\hfil\hyper at natanchorstart{#2\@extra at b@citeb}\@biblabel{\NAT at num}%
-    \hyper at natanchorend]%
-    \NAT at ifcmd#1()()\@nil{#2}}
-\ifx\SK at lbibitem\@undefined\else
-   \let\SK at lbibitem\@lbibitem
-   \def\@lbibitem[#1]#2{%
-     \SK at lbibitem[#1]{#2}\SK@\SK@@label{#2}\ignorespaces}\fi
-\newif\ifNAT at stdbst \NAT at stdbstfalse
-
-\AtEndDocument
-  {\ifNAT at stdbst\if at filesw\immediate\write\@auxout{\string
-   \global\string\NAT at numberstrue}\fi\fi
-  }
-\providecommand\bibcite{}
-\renewcommand\bibcite[2]{\@ifundefined{b@#1\@extra at binfo}\relax
-     {\NAT at citemultiple
-      \PackageWarningNoLine{natbib}{Citation `#1' multiply defined}}%
-  \global\@namedef{b@#1\@extra at binfo}{#2}}
-\AtEndDocument{\NAT at swatrue\renewcommand\bibcite[2]%
-                         {\NAT at testdef{#1}{#2}}}
-\newcommand\NAT at testdef[2]{%
-  \def\NAT at temp{#2}\expandafter \ifx \csname b@#1\@extra at binfo\endcsname
-    \NAT at temp \else \ifNAT at swa \NAT at swafalse
-       \PackageWarningNoLine{natbib}{Citation(s) may have
-          changed.\MessageBreak
-          Rerun to get citations correct}\fi\fi}
-\newcommand\NAT at apalk{}
-\def\NAT at apalk#1, #2, #3\@nil#4{\if\relax#2\relax
-  \global\NAT at stdbsttrue
-  \NAT at wrout{#1}{}{}{}{#4}\else
-  \stepcounter{NAT at ctr}%
-  \NAT at wrout{\arabic {NAT at ctr}}{#2}{#1}{}{#4}\fi}
-\newcommand\citeauthoryear{}
-\def\citeauthoryear#1#2#3()()\@nil#4{\stepcounter{NAT at ctr}\if\relax#3\relax
-   \NAT at wrout{\arabic {NAT at ctr}}{#2}{#1}{}{#4}\else
-   \NAT at wrout{\arabic {NAT at ctr}}{#3}{#2}{#1}{#4}\fi}
-\newcommand\citestarts{\NAT at open}
-\newcommand\citeends{\NAT at close}
-\newcommand\betweenauthors{and}
-\newcommand\astroncite{}
-\def\astroncite#1#2()()\@nil#3{\stepcounter{NAT at ctr}\NAT at wrout{\arabic
-{NAT at ctr}}{#2}{#1}{}{#3}}
-\newcommand\citename{}
-\def\citename#1#2()()\@nil#3{\expandafter\NAT at apalk#1#2, \@nil{#3}}
-\newcommand\harvarditem[4][]%
-    {\if\relax#1\relax\bibitem[#2(#3)]{#4}\else
-        \bibitem[#1(#3)#2]{#4}\fi }
-\newcommand\harvardleft{\NAT at open}
-\newcommand\harvardright{\NAT at close}
-\newcommand\harvardyearleft{\NAT at open}
-\newcommand\harvardyearright{\NAT at close}
-\AtBeginDocument{\providecommand{\harvardand}{and}}
-\newcommand\harvardurl[1]{\textbf{URL:} \textit{#1}}
-\providecommand\bibsection{}
-\@ifundefined{chapter}%
-  {\renewcommand\bibsection{\section*{\refname
-    \@mkboth{\MakeUppercase{\refname}}{\MakeUppercase{\refname}}}}}
-  {\@ifundefined{NAT at sectionbib}%
-    {\renewcommand\bibsection{\chapter*{\bibname
-     \@mkboth{\MakeUppercase{\bibname}}{\MakeUppercase{\bibname}}}}}
-    {\renewcommand\bibsection{\section*{\bibname
-     \ifx\@mkboth\@gobbletwo\else\markright{\MakeUppercase{\bibname}}\fi}}}}
-\@ifclassloaded{amsart}%
-  {\renewcommand\bibsection{\section*{\refname}}{}}{}
-\@ifclassloaded{amsbook}%
-  {\renewcommand\bibsection{\section*{\bibname}}{}}{}
-\@ifundefined{bib at heading}{}{\let\bibsection\bib at heading}
-\newcounter{NAT at ctr}
-\renewenvironment{thebibliography}[1]{%
- \bibfont\bibsection\parindent \z@\list
-   {\@biblabel{\arabic{NAT at ctr}}}{\@bibsetup{#1}%
-    \setcounter{NAT at ctr}{0}}%
-    \ifNAT at openbib
-      \renewcommand\newblock{\par}
-    \else
-      \renewcommand\newblock{\hskip .11em \@plus.33em \@minus.07em}%
-    \fi
-    \sloppy\clubpenalty4000\widowpenalty4000
-    \sfcode`\.=1000\relax
-    \let\citeN\cite \let\shortcite\cite
-    \let\citeasnoun\cite
- }{\def\@noitemerr{%
-  \PackageWarning{natbib}
-     {Empty `thebibliography' environment}}%
-  \endlist\vskip-\lastskip}
-\let\bibfont=\relax
-\providecommand\reset at font{\relax}
-\providecommand\bibname{Bibliography}
-\providecommand\refname{References}
-\newcommand\NAT at citeundefined{\gdef \NAT at undefined {%
-    \PackageWarningNoLine{natbib}{There were undefined citations}}}
-\let \NAT at undefined \relax
-\newcommand\NAT at citemultiple{\gdef \NAT at multiple {%
-    \PackageWarningNoLine{natbib}{There were multiply defined citations}}}
-\let \NAT at multiple \relax
-\AtEndDocument{\NAT at undefined\NAT at multiple}
-\providecommand\@mkboth[2]{}
-\providecommand\MakeUppercase{\uppercase}
-\providecommand{\@extra at b@citeb}{}
-\gdef\@extra at binfo{}
-\providecommand\hyper at natanchorstart[1]{}
-\providecommand\hyper at natanchorend{}
-\providecommand\hyper at natlinkstart[1]{}
-\providecommand\hyper at natlinkend{}
-\providecommand\hyper at natlinkbreak[2]{#1}
-\@ifpackageloaded{babel}{\PackageWarningNoLine{natbib}{%
-If you use both babel and natbib\MessageBreak
-then load babel AFTER natbib}}{}
-\AtBeginDocument{\@ifpackageloaded{babel}{%
-\bbl at redefine\@citex[#1][#2]#3{%
-  \@safe at activestrue\org@@citex[#1][#2]{#3}\@safe at activesfalse}%
-}{}}
-\ifnum\NAT at sort>0
-\newcommand\NAT at sort@cites[1]{%
-\@tempcntb\m at ne
-\let\@celt\delimiter
-\def\NAT at num@list{}%
-\def\NAT at cite@list{}%
-\def\NAT at nonsort@list{}%
-\@for \@citeb:=#1\do{\NAT at make@cite at list}%
-\edef\NAT at cite@list{\NAT at cite@list\NAT at nonsort@list}%
-\edef\NAT at cite@list{\expandafter\NAT at xcom\NAT at cite@list @@}}
-\begingroup \catcode`\_=8
-\gdef\NAT at make@cite at list{%
-     \edef\@citeb{\expandafter\@firstofone\@citeb}%
-    \@ifundefined{b@\@citeb\@extra at b@citeb}{\def\NAT at num{A}}%
-    {\NAT at parse{\@citeb}}%
-      \ifcat _\ifnum\z@<0\NAT at num _\else A\fi
-       \@tempcnta\NAT at num \relax
-       \ifnum \@tempcnta>\@tempcntb
-          \edef\NAT at num@list{\NAT at num@list \@celt{\NAT at num}}%
-          \edef\NAT at cite@list{\NAT at cite@list\@citeb,}%
-          \@tempcntb\@tempcnta
-       \else
-          \let\NAT@@cite at list=\NAT at cite@list \def\NAT at cite@list{}%
-          \edef\NAT at num@list{\expandafter\NAT at num@celt \NAT at num@list \@gobble @}%
-          {\let\@celt=\NAT at celt\NAT at num@list}%
-       \fi
-    \else
-       \edef\NAT at nonsort@list{\NAT at nonsort@list\@citeb,}%
- \fi}
-\endgroup
-\def\NAT at celt#1{\ifnum #1<\@tempcnta
-  \xdef\NAT at cite@list{\NAT at cite@list\expandafter\NAT at nextc\NAT@@cite at list @@}%
-  \xdef\NAT@@cite at list{\expandafter\NAT at restc\NAT@@cite at list}%
- \else
-  \xdef\NAT at cite@list{\NAT at cite@list\@citeb,\NAT@@cite at list}\let\@celt\@gobble%
- \fi}
-\def\NAT at num@celt#1#2{\ifx \@celt #1%
-     \ifnum #2<\@tempcnta
-        \@celt{#2}%
-        \expandafter\expandafter\expandafter\NAT at num@celt
-     \else
-        \@celt{\number\@tempcnta}\@celt{#2}%
-  \fi\fi}
-\def\NAT at nextc#1,#2@@{#1,}
-\def\NAT at restc#1,#2{#2}
-\def\NAT at xcom#1,@@{#1}
-\else
- \newcommand\NAT at sort@cites[1]{\edef\NAT at cite@list{#1}}\fi
-\InputIfFileExists{natbib.cfg}
-       {\typeout{Local config file natbib.cfg used}}{}
-%% 
-%% <<<<< End of decommented file <<<<<<
-%%
-%% End of file `natbib.sty'.
diff --git a/vignettes/negbin.Rnw b/vignettes/negbin.Rnw
deleted file mode 100644
index c22d8c3..0000000
--- a/vignettes/negbin.Rnw
+++ /dev/null
@@ -1,254 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=negbin}
-\include{zinput}
-%\VignetteIndexEntry{Negative Binomial Regression for Event Count Dependent Variables}
-%\VignetteDepends{Zelig, MASS}
-%\VignetteKeyWords{model, binomial,negative, regression, count}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-
-\section{{\tt negbin}: Negative Binomial Regression for Event
-Count Dependent Variables}\label{negbin}
-
-Use the negative binomial regression if you have a count of events for
-each observation of your dependent variable.  The negative binomial
-model is frequently used to estimate over-dispersed event count
-models.
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "negbin", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-\subsubsection{Additional Inputs} 
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for negative binomial regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE} is
-selected, {\tt zelig()} computes robust standard errors via the {\tt
-sandwich} package (see \cite{Zeileis04}).  The default type of robust
-standard error is heteroskedastic and autocorrelation consistent (HAC),
-and assumes that observations are ordered by time index.
-
-In addition, {\tt robust} may be a list with the following options:  
-\begin{itemize}
-\item {\tt method}:  Choose from 
-\begin{itemize}
-\item {\tt "vcovHAC"}: (default if {\tt robust = TRUE}) HAC standard
-errors. 
-\item {\tt "kernHAC"}: HAC standard errors using the
-weights given in \cite{Andrews91}. 
-\item {\tt "weave"}: HAC standard errors using the
-weights given in \cite{LumHea99}.  
-\end{itemize}  
-\item {\tt order.by}: defaults to {\tt NULL} (the observations are
-chronologically ordered as in the original data).  Optionally, you may
-specify a vector of weights (either as {\tt order.by = z}, where {\tt
-z} exists outside the data frame; or as {\tt order.by = \~{}z}, where
-{\tt z} is a variable in the data frame).  The observations are
-chronologically ordered by the size of {\tt z}.
-\item {\tt \dots}:  additional options passed to the functions 
-specified in {\tt method}.   See the {\tt sandwich} library and
-\cite{Zeileis04} for more options.   
-\end{itemize}
-\end{itemize}
-
-\subsubsection{Example}
-
-Load sample data:  
-<<Example.data>>=
- data(sanction)
-@ 
-Estimate the model:  
-<<Example.zelig>>=
- z.out <- zelig(num ~ target + coop, model = "negbin", data = sanction)
-@ 
-<<Example.summary>>= 
-summary(z.out)
-@ 
-Set values for the explanatory variables to their default mean values:  
-<<Example.setx>>=
- x.out <- setx(z.out)
-@ 
-Simulate fitted values:  
-<<Example.sim>>=
- s.out <- sim(z.out, x = x.out)
-@
-<<Example.summary.sim>>= 
-summary(s.out)
-@ 
-\begin{center}
-<<label=Example1Plot,fig=true>>= 
- plot(s.out)
-@ 
-\end{center}
-\subsubsection{Model}
-Let $Y_i$ be the number of independent events that occur during a
-fixed time period. This variable can take any non-negative integer value.
-
-\begin{itemize}
-\item The negative binomial distribution is derived by letting the
-  mean of the Poisson distribution vary according to a fixed
-  parameter $\zeta$ given by the Gamma distribution. The
-  \emph{stochastic component} is given by
-   \begin{eqnarray*}
-     Y_i \mid \zeta_i & \sim & \textrm{Poisson}(\zeta_i \mu_i),\\
-     \zeta_i & \sim & \frac{1}{\theta}\textrm{Gamma}(\theta).
-   \end{eqnarray*}
-   The marginal distribution of $Y_i$ is then the negative binomial
-   with mean $\mu_i$ and variance $\mu_i + \mu_i^2/\theta$:
-   \begin{eqnarray*}
-   Y_i & \sim & \textrm{NegBin}(\mu_i, \theta), \\
-       & = & \frac{\Gamma (\theta + y_i)}{y! \, \Gamma(\theta)} 
-             \frac{\mu_i^{y_i} \, \theta^{\theta}}{(\mu_i + \theta)^{\theta + y_i}},
-   \end{eqnarray*}
-   where $\theta$ is the systematic parameter of the Gamma
-   distribution modeling $\zeta_i$.  
-
- \item The \emph{systematic component} is given by
-   \begin{equation*}
-     \mu_i = \exp(x_i \beta)
-   \end{equation*}
-   where $x_i$ is the vector of $k$ explanatory variables and $\beta$ is
-   the vector of coefficients.
- \end{itemize}
-
-\subsubsection{Quantities of Interest}
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) are simulations of the mean
-  of the stochastic component.  Thus, $$E(Y) = \mu_i = \exp(x_i
-  \beta),$$ given simulations of $\beta$.  
-  
-\item The predicted value ({\tt qi\$pr}) drawn from the distribution
-  defined by the set of parameters $(\mu_i, \theta)$.
-
-\item The first difference ({\tt qi\$fd}) is
-\begin{equation*}
-\textrm{FD} \; = \; E(Y | x_1) - E(Y \mid x)
-\end{equation*}
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "negbin", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt theta}: the maximum likelihood estimate for the
-     stochastic parameter $\theta$.  
-   \item {\tt SE.theta}: the standard error for {\tt theta}.  
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: a vector of the fitted values for the systemic
-     component $\lambda$.  
-   \item {\tt linear.predictors}: a vector of $x_{i} \beta$.  
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values given the specified
-     values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from the
-     distribution defined by $(\mu_i, \theta)$.  
-   \item {\tt qi\$fd}: the simulated first differences in the
-     simulated expected values given the specified values of {\tt x}
-     and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection* {How to Cite} 
-
-\input{cites/negbin}
-\input{citeZelig}
-
-\subsection* {See also}
-The negative binomial model is part of the MASS package by William N. Venable and Brian D. Ripley \citep{VenRip02}. Advanced users may wish to refer to \texttt{help(glm.nb)} as well as \cite{McCNel89}. Robust standard errors are implemented via sandwich package by Achim Zeileis \citep{Zeileis04}.Sample data are from \cite{Martin92}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
-
-
-
-
-
diff --git a/vignettes/normal.survey.Rnw b/vignettes/normal.survey.Rnw
deleted file mode 100644
index 0503248..0000000
--- a/vignettes/normal.survey.Rnw
+++ /dev/null
@@ -1,511 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=normal}
-\include{zinput}
-%\VignetteIndexEntry{Survey-Weighted Normal Regression  for Continuous Dependent Variables}
-%\VignetteDepends{Zelig, stats, survey}
-%\VignetteKeyWords{model, normal, regression, continuous, least squares, survey-weight}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography* 
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>= 
-library(Zelig)
-library(survey) 
-@
-\section{{\tt normal.survey}: Survey-Weighted Normal Regression for Continuous Dependent Variables}
-\label{normal.survey}
-
-The survey-weighted Normal regression model is appropriate for 
-survey data obtained using complex sampling techniques, such as 
-stratified random or cluster sampling (e.g., not simple random 
-sampling).  Like the least squares and Normal regression models (see 
-\Sref{ls} and \Sref{normal}), survey-weighted Normal regression 
-specifies a continuous dependent variable as a linear function of a 
-set of explanatory variables.  The survey-weighted normal model 
-reports estimates of model parameters identical to least squares or 
-Normal regression estimates, but uses information from the survey 
-design to correct variance estimates.
-
-The {\tt normal.survey} model accommodates three common types of
-complex survey data.  Each method listed here requires selecting
-specific options which are detailed in the ``Additional Inputs''
-section below.  \begin{enumerate}
-
-\item \textbf{Survey weights}:  Survey data are often published along
-with weights for each observation.  For example, if a survey
-intentionally over-samples a particular type of case, weights can be
-used to correct for the over-representation of that type of case in
-the dataset. Survey weights come in two forms:
-\begin{enumerate}
-
-\item \textit{Probability} weights report the probability that each
-case is drawn from the population.  For each stratum or cluster, 
-this is computed as the number of observations in the sample drawn 
-from that group divided by the number of observations in the 
-population in the group.
-
-\item \textit{Sampling} weights are the inverse of the probability
-weights.   
-
-\end{enumerate}
-
-\item \textbf{Strata/cluster identification}:  A complex survey 
-dataset may include variables that identify the strata or cluster 
-from which observations are drawn.  For stratified random sampling 
-designs, observations may be nested in different strata.  There are 
-two ways to employ these identifiers:
-
-\begin{enumerate}
-
-\item Use \textit{finite population corrections} to specify the
-total number of cases in the stratum or cluster from which each
-observation was drawn.
-
-\item For stratified random sampling designs, use the raw strata ids
-to compute sampling weights from the data.
-
-\end{enumerate}
-
-\item \textbf{Replication weights}: To preserve the anonymity of
-survey participants, some surveys exclude strata and cluster ids 
-from the public data and instead release only pre-computed replicate 
-weights.
-
-\end{enumerate}
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "normal.survey", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-
-\subsubsection{Additional Inputs}
-
-In addition to the standard {\tt zelig} inputs (see
-\Sref{Zcommands}), survey-weighted Normal models accept the following
-optional inputs:
-\begin{enumerate}
-
-\item Datasets that include survey weights: 
-
-\begin{itemize}  
-
-\item {\tt probs}: An optional formula or numerical vector specifying each 
-case's probability weight, the probability that the case was 
-selected.  Probability weights need not (and, in most cases, will 
-not) sum to one.  Cases with lower probability weights are weighted 
-more heavily in the computation of model coefficients.
-
-\item {\tt weights}: An optional numerical vector specifying each 
-case's sample weight, the inverse of the probability that the case 
-was selected.  Sampling weights need not (and, in most cases, will 
-not) sum to one.  Cases with higher sampling weights are weighted 
-more heavily in the computation of model coefficients.
-
-\end{itemize}
-
-\item Datasets that include strata/cluster identifiers:
-
-\begin{itemize} 
-
-\item {\tt ids}: An optional formula or numerical vector identifying the 
-cluster from which each observation was drawn (ordered from largest level to smallest level).  
-For survey designs  that do not involve cluster sampling, {\tt ids} defaults to {\tt NULL}.  
-
-\item {\tt fpc}: An optional numerical vector identifying each 
-case's frequency weight, the total number of units in the population 
-from which each observation was sampled. 
-
-\item {\tt strata}: An optional formula or vector identifying the 
-stratum from which each observation was sampled.  Entries may be 
-numerical, logical, or strings.  For survey designs that do not 
-involve cluster sampling, {\tt strata} defaults to {\tt NULL}. 
-
-\item {\tt nest}: An optional logical value specifying whether 
-primary sampling unites (PSUs) have non-unique ids across multiple 
-strata.  {\tt nest=TRUE} is appropriate when PSUs reuse the same 
-identifiers across strata.  Otherwise, {\tt nest} defaults to {\tt 
-FALSE}. 
-
-\item {\tt check.strata}: An optional input specifying whether to 
-check that clusters are nested in strata.  If {\tt check.strata} is 
-left at its default, {\tt !nest}, the check is not performed.  If 
-{\tt check.strata} is specified as {\tt TRUE}, the check is carried 
-out.  
-
-\end{itemize}
-
-\item Datasets that include replication weights:
-\begin{itemize}
-  \item {\tt repweights}: A formula or matrix specifying
-    replication weights, numerical vectors of weights used
-    in a process in which the sample is repeatedly re-weighted and parameters
-    are re-estimated in order to compute the variance of the model parameters.
-  \item {\tt type}: A string specifying the type of replication weights being used.
-    This input is required if replicate weights are specified.  The following types
-    of replication weights are recognized: {\tt"BRR"}, {\tt "Fay"},
-    {\tt "JK1"}, {\tt "JKn"}, {\tt "bootstrap"}, or {\tt "other"}.
-  \item {\tt weights}: An optional vector or formula specifying each case's sample weight,
-    the inverse of the probability that the case was selected.  If a survey includes both sampling 
-    weights and replicate weights separately for the same survey, both should be included in 
-    the model specification.  In these cases, sampling weights are used to correct potential biases 
-    in in the computation of coefficients and replication weights are used to compute the variance 
-    of coefficient estimates.  
-  \item {\tt combined.weights}: An optional logical value that 
-    should be specified as {\tt TRUE} if the replicate weights include the sampling weights.  Otherwise, 
-    {\tt combined.weights} defaults to {\tt FALSE}.  
-  \item {\tt rho}:  An optional numerical value specifying a shrinkage factor
-    for replicate weights of type {\tt "Fay"}.
-  \item {\tt bootstrap.average}: An optional numerical input specifying
-    the number of iterations over which replicate weights of type {\tt "bootstrap"} were averaged. 
-    This input should be left as {\tt NULL} for {\tt "bootstrap"} weights that were
-    not were created by averaging.
-\item {\tt scale}:  When replicate weights are included,
-    the variance is computed as the sum of squared deviations of the replicates from their mean.
-    {\tt scale} is an optional overall multiplier for the standard deviations.
-\item {\tt rscale}: Like {\tt scale}, {\tt rscale} specifies an 
-optional vector of replicate-specific multipliers for the squared 
-deviations used in variance computation. 
-
-\item {\tt fpc}: For models in which {\tt "JK1"}, {\tt "JKn"}, or 
-{\tt "other"} replicates are specified, {\tt fpc} is an optional 
-numerical vector (with one entry for each replicate) designating the 
-replicates' finite population corrections.   
-
-\item {\tt fpctype}: When a finite population correction is included 
-as an {\tt fpc} input, {\tt fpctype} is a required input specifying 
-whether the input to {\tt fpc} is a sampling fraction ({\tt 
-fpctype="fraction"}) or a direct correction ({\tt 
-fpctype="correction"}).  
-
-\item {\tt return.replicates}: An optional logical value    
-specifying whether the replicates should be returned as a component 
-of the output.  {\tt return.replicates} defaults to {\tt FALSE}.  
-
-\end{itemize}
-
-\end{enumerate}
-
-\subsubsection{Examples}
-
-\begin{enumerate} 
-
-\item A dataset that includes survey weights:
-
-Attach the sample data: 
-<<Existing.data>>= 
-data(api, package="survey") 
-@ 
-
-Suppose that a dataset included a continuous measure of
-public schools' performance ({\tt api00}), a measure of 
-the percentage of students at each school who receive subsidized 
-meals ({\tt meals}), an indicator for whether each school
-holds classes year round ({\tt year.rnd}), and sampling 
-weights computed by the survey house ({\tt pw}).  Estimate a model
-that regresses school performance on the {\tt meals} and {\tt year.rnd}
-variables:
-<<Existing.zelig>>= 
-z.out1 <- zelig(api00 ~ meals + yr.rnd, model = "normal.survey",  weights=~pw,
-data = apistrat) 
-@ 
-Summarize regression coefficients:
-<<Existing.summary>>=
- summary(z.out1)
-@ 
-Set explanatory variables to their default (mean/mode) values, and
-set a high (80th percentile) and low (20th percentile) value for
-``meals'': 
-<<Existing.setx>>= 
-x.low <- setx(z.out1, meals=quantile(apistrat$meals, 0.2)) 
-x.high <- setx(z.out1, meals=quantile(apistrat$meals, 0.8)) 
-@ 
-Generate first differences for the
-effect of high versus low concentrations of children receiving
-subsidized meals on academic performance: 
-<<Existing.sim>>=
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-@ 
-<<Existing.summary.sim>>=
- summary(s.out1)
-@ 
-Generate a visual summary of the quantities of interest:
-\begin{center}
-<<label=ExistingPlot,fig=true,echo=true>>=
- plot(s.out1)
-@
-\end{center}
-
-\item  A dataset that includes strata/cluster identifiers:
-
-Suppose that the survey house that provided the dataset used in the
-previous example excluded sampling weights but made other details
-about the survey design available.  A user can still estimate a model
-without sampling weights that instead uses inputs that identify the
-stratum and/or cluster to which each observation belongs and the
-size of the finite population from which each observation was drawn.
-
-Continuing the example above, suppose the survey house drew at
-random a fixed number of elementary schools, a fixed number of
-middle schools, and a fixed number of high schools. If the variable
-{\tt stype} is a vector of characters ({\tt "E"} for elementary
-schools, {\tt "M"} for middle schools, and {\tt "H"} for high schools)
-that identifies the type of school each case
-represents and {\tt fpc} is a numerical vector that identifies for
-each case the total number of schools of the same type in the
-population, then the user could estimate the following model:
-
-<<Complex.zelig>>= 
-z.out2 <- zelig(api00 ~ meals + yr.rnd, 
-model = "normal.survey", strata=~stype, fpc=~fpc, data = apistrat) 
-@
-Summarize the regression output:
-<<Complex.output>>= 
-summary(z.out2) 
-@ 
-The coefficient estimates for this example are
-identical to the point estimates in the first example, when
-pre-existing sampling weights were used.  When sampling weights are
-omitted, they are estimated automatically for {\tt "normal.survey"}
-models based on the user-defined description of sampling designs. 
-
-Moreover, because the user provided information about the survey
-design, the standard error estimates are lower in this example than
-in the previous example, in which the user omitted variables pertaining
-to the details of the complex survey design.
-
-\item A dataset that includes replication weights:
-
-Consider a dataset that includes information for a sample of hospitals
-that includes counts of the number of out-of-hospital cardiac arrests that each
-hospital treats and the number of patients who arrive alive
-at each hospital: 
-<<Replicate.data>>= 
-data(scd, package="survey") 
-@ 
-Survey houses sometimes supply
-replicate weights (in lieu of details about the survey design).  For the sake
-of illustrating how replicate weights can be used as inputs in {\tt
-normal.survey} models, create a set of balanced repeated replicate
-(BRR) weights: 
-<<Replicate.rw>>= 
-BRRrep<-2*cbind(c(1,0,1,0,1,0),c(1,0,0,1,0,1), c(0,1,1,0,0,1),c(0,1,0,1,1,0)) 
-@ 
-Estimate a model that regresses counts of patients who arrive alive in
-each hospital on the number of cardiac arrests that each hospital treats, using
-the BRR replicate weights in {\tt BRRrep} to compute standard errors.
-<<Replicate.zelig>>= 
-z.out3 <- zelig(alive ~ arrests , model = "poisson.survey", 
-  repweights=BRRrep, type="BRR", data=scd)
-@
-Summarize the regression coefficients: 
-<<Replicate.summary>>=
- summary(z.out3)
-@ 
-Set {\tt arrests} at its 20th and 80th quantiles:
-<<Replicate.setx>>= 
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8))
-@ 
-Generate first
-differences for the effect of minimal versus maximal cardiac arrests
-on numbers of patients who arrive alive: 
-<<Replicate.sim>>= 
-s.out3<- sim(z.out3, x=x.low, x1=x.high) 
-@ 
-<<Replicate.summary.sim>>=
- summary(s.out3)
-@ 
-Generate a visual summary of quantities of interest:
-\begin{center}
-<<label=ReplicatePlot,fig=true,echo=true>>=
- plot(s.out3)
-@
-\end{center}
-
-
-\end{enumerate}
-
-\subsubsection{Model}
-Let $Y_i$ be the continuous dependent variable for observation $i$.
-\begin{itemize}
-\item The \emph{stochastic component} is described by a univariate normal
-  model with a vector of means $\mu_i$ and scalar variance $\sigma^2$:
-  \begin{equation*}
-    Y_i \; \sim \; \textrm{Normal}(\mu_i, \sigma^2).
-  \end{equation*}
-
-\item The \emph{systematic component} is
-  \begin{equation*}
-    \mu_i \;= \; x_i \beta,
-  \end{equation*}
-  where $x_i$ is the vector of $k$ explanatory variables and $\beta$ is
-  the vector of coefficients.
-\end{itemize}
-
-\subsubsection{Variance}
-
-When replicate weights are not used, the variance of the
-coefficients is estimated as
-\[
-\hat{\boldsymbol{\Sigma}} \left[
- \sum_{i=1}^n
-\frac{(1-\pi_i)}{\pi_i^2}
-(\mathbf{X}_i(Y_i-\mu_i))^\prime(\mathbf{X}_i(Y_i-\mu_i)) + 2
-\sum_{i=1}^n \sum_{j=i+1}^n \frac{(\pi_{ij} - \pi_i\pi_j)}{\pi_i
-\pi_j \pi_{ij}}(\mathbf{X}_i(Y_i-\mu_i))^\prime
-(\mathbf{X}_j(Y_j-\mu_j)) \right] \hat{\boldsymbol{\Sigma}}
-\]
-where ${\pi_i}$ is the probability of case $i$ being sampled,
-$\mathbf{X}_i$ is a vector of the values of the explanatory
-variables for case $i$, $Y_i$ is value of the dependent variable for
-case $i$, $\hat{\mu}_i$ is the predicted value of the dependent
-variable for case $i$ based on the linear model estimates, and
-$\hat{\boldsymbol{\Sigma}}$ is the conventional variance-covariance
-matrix in a parametric glm. This statistic is derived from the
-method for estimating the variance of sums described in \cite{Bin83}
-and the Horvitz-Thompson estimator of the variance of a sum
-described in \cite{HorTho52}.
-
-When replicate weights are used, the model is re-estimated for each
-set of replicate weights, and the variance of each parameter is
-estimated by summing the squared deviations of the replicates from
-their mean.
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The expected value ({\tt qi\$ev}) is the mean of simulations
-  from the the stochastic component, $$E(Y) = \mu_i = x_i \beta,$$
-  given a draw of $\beta$ from its posterior.
-
-\item The predicted value ({\tt qi\$pr}) is drawn from the distribution
-  defined by the set of parameters $(\mu_i, \sigma)$.
-
-\item The first difference ({\tt qi\$fd}) is:
-\begin{equation*}
-\textrm{FD}\; = \;E(Y \mid x_1) -  E(Y \mid x)
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*}
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*}
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which
-you may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "normal.survey", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)}, see
-the {\tt coefficients} by using {\tt z.out\$coefficients}, and a
-default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: fitted values.  For the survey-weighted normal model,
-     these are identical to the {\tt linear predictors}.
-   \item {\tt linear.predictors}: fitted values.  For the survey-weighted normal
-     model, these are identical to {\tt fitted.values}.
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values for the specified
-     values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from the
-     distribution defined by $(\mu_i, \sigma)$.
-   \item {\tt qi\$fd}: the simulated first difference in the simulated
-     expected values for the values specified in {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.
-   \end{itemize}
-\end{itemize}
-
-When users estimate {\tt normal.survey} models with replicate weights in {\tt Zelig}, an 
-object called {\tt .survey.prob.weights} is created in the global environment.  
-{\tt Zelig} will over-write any existing object with that name, and users 
-are therefore advised to re-name any object called {\tt .survey.prob.weights} before using {\tt normal.survey} models in {\tt Zelig}.
-
-\subsection* {How to Cite}
-\input{cites/normal.survey}
- \input{citeZelig}
- 
- \subsection* {See also}
- 
- Survey-weighted linear models and the sample data used in the
- examples above are a part of the {\tt survey} package by Thomas
- Lumley. Users may wish to refer to the help files for the three
- functions that Zelig draws upon when estimating survey-weighted
- models, namely, {\tt help(svyglm)}, {\tt help(svydesign)}, and {\tt
- help(svrepdesign)}.  The Gamma model is part of the stats package
- by \citet{VenRip02}. Advanced users may wish to refer to
- \texttt{help(glm)} and \texttt{help(family)}, as well as
- \cite{McCNel89}.
- 
- 
- 
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
-  after<-search()
-  torm<-setdiff(after,before)
-  for (pkg in torm)
-  detach(pos=match(pkg,search()))
-@
-\end{document}
diff --git a/vignettes/otherworks.bib b/vignettes/otherworks.bib
deleted file mode 100644
index 6618d8e..0000000
--- a/vignettes/otherworks.bib
+++ /dev/null
@@ -1,8 +0,0 @@
- at book{FitLaiWar04,
-	author = {Garret Fitzmaurice, Nan Laird and James Ware},
-	title =  {Applied Longitudinal Analysis},
-	booktitle = {Applied Longitudinal Analysis},
-	year =      {2004},
-	publisher = {John Wiley and Sons}
-}
-
diff --git a/vignettes/poisson.Rnw b/vignettes/poisson.Rnw
deleted file mode 100644
index 2194225..0000000
--- a/vignettes/poisson.Rnw
+++ /dev/null
@@ -1,241 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=poisson}
-\include{zinput}
-%\VignetteIndexEntry{Poisson Regression for Event Count Dependent Variables}
-%\VignetteDepends{Zelig, stats}
-%\VignetteKeyWords{model, poisson,regression, count}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt poisson}: Poisson Regression for Event Count
-Dependent Variables}\label{poisson}
-
-Use the Poisson regression model if the observations of your dependent
-variable represents the number of independent events that occur during
-a fixed period of time (see the negative binomial model, \Sref{negbin},
-for over-dispersed event counts.)  For a Bayesian implementation of
-this model, see \Sref{poisson.bayes}.  
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "poisson", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-\subsubsection{Additional Inputs} 
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for poisson regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE} is
-selected, {\tt zelig()} computes robust standard errors via the {\tt
-sandwich} package (see \cite{Zeileis04}).  The default type of robust
-standard error is heteroskedastic and autocorrelation consistent (HAC),
-and assumes that observations are ordered by time index.
-
-In addition, {\tt robust} may be a list with the following options:  
-\begin{itemize}
-\item {\tt method}:  Choose from 
-\begin{itemize}
-\item {\tt "vcovHAC"}: (default if {\tt robust = TRUE}) HAC standard
-errors. 
-\item {\tt "kernHAC"}: HAC standard errors using the
-weights given in \cite{Andrews91}. 
-\item {\tt "weave"}: HAC standard errors using the
-weights given in \cite{LumHea99}.  
-\end{itemize}  
-\item {\tt order.by}: defaults to {\tt NULL} (the observations are
-chronologically ordered as in the original data).  Optionally, you may
-specify a vector of weights (either as {\tt order.by = z}, where {\tt
-z} exists outside the data frame; or as {\tt order.by = \~{}z}, where
-{\tt z} is a variable in the data frame).  The observations are
-chronologically ordered by the size of {\tt z}.
-\item {\tt \dots}:  additional options passed to the functions 
-specified in {\tt method}.   See the {\tt sandwich} library and
-\cite{Zeileis04} for more options.   
-\end{itemize}
-\end{itemize}
-
-\subsubsection{Example}
-
-Load sample data:  
-<<Example.data>>=
- data(sanction)
-@ 
-Estimate Poisson model:  
-<<Example.zelig>>=
- z.out <- zelig(num ~ target + coop, model = "poisson", data = sanction)
-@ 
-<<Example.summary>>= 
-summary(z.out)
-@ 
-Set values for the explanatory variables to their default mean values:  
-<<Example.setx>>=
- x.out <- setx(z.out)
-@ 
-Simulate fitted values:  
-<<Example.sim>>=
- s.out <- sim(z.out, x = x.out)
-@ 
-<<Example.summary.sim>>= 
-summary(s.out)
-@
-\begin{center}
-<<label=ExamplePlot,fig=true,echo=true>>= 
- plot(s.out)
-@ 
-\end{center}
-
-\subsubsection{Model}
-Let $Y_i$ be the number of independent events that occur during a
-fixed time period. This variable can take any non-negative integer.
-
-\begin{itemize}
-\item The Poisson distribution has \emph{stochastic component}
-  \begin{equation*}
-    Y_i \; \sim \; \textrm{Poisson}(\lambda_i),
-  \end{equation*}
-  where $\lambda_i$ is the mean and variance parameter.
-  
-\item The \emph{systematic component} is 
-  \begin{equation*}
-    \lambda_i \; = \; \exp(x_i \beta),
-  \end{equation*}
-  where $x_i$ is the vector of explanatory variables, and $\beta$ is
-  the vector of coefficients.
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-  
-\item The expected value ({\tt qi\$ev}) is the mean of simulations
-  from the stochastic component, $$E(Y) = \lambda_i =  \exp(x_i
-  \beta),$$ given draws of $\beta$ from its sampling distribution.  
-  
-\item The predicted value ({\tt qi\$pr}) is a random draw from the
-  poisson distribution defined by mean $\lambda_i$.
-
-\item The first difference in the expected values ({\tt qi\$fd}) is given by:
-\begin{equation*}
-\textrm{FD} \; = \; E(Y | x_1) - E(Y \mid x)
-\end{equation*}
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "poisson", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: a vector of the fitted values for the systemic
-     component $\lambda$.  
-   \item {\tt linear.predictors}: a vector of $x_{i}\beta$.  
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values given the
-     specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from the
-     distributions defined by $\lambda_i$.
-   \item {\tt qi\$fd}: the simulated first differences in the expected
-     values given the specified values of {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection* {How to Cite} 
-
-\input{cites/poisson}
-\input{citeZelig}
-
-\subsection* {See also}
-The poisson model is part of the stats package by \citet{VenRip02}.
-Advanced users may wish to refer to \texttt{help(glm)} and
-\texttt{help(family)}, as well as \cite{McCNel89}. Robust standard
-errors are implemented via the sandwich package by \citet{Zeileis04}.
-Sample data are from \cite{Martin92}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
diff --git a/vignettes/poisson.mixed.Rnw b/vignettes/poisson.mixed.Rnw
deleted file mode 100644
index 425312d..0000000
--- a/vignettes/poisson.mixed.Rnw
+++ /dev/null
@@ -1,178 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=poissonmixed}
-\include{zinput}
-%\VignetteIndexEntry{Mixed effects poisson regression}
-%\VignetteDepends{Zelig}
-%\VignetteKeyWords{mixed,poisson regression}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt poisson.mixed}: Mixed effects poisson Regression}
-\label{mixed}
-
-Use generalized multi-level linear regression if you have covariates that are grouped according to one or more classification factors. Poisson regression applies to dependent variables that represent the number of independent events that occur during a fixed period of time.
-
-While generally called multi-level models in the social sciences, this class of models is often referred to as mixed-effects models in the statistics literature and as hierarchical models in a Bayesian setting. This general class of models consists of linear models that are expressed as a function of both \emph{fixed effects}, parameters corresponding to an entire population or certain repeatable levels of experimental factors, and \emph{random effects}, parameters corresponding to indiv [...]
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-z.out <- zelig(formula= y ~ x1 + x2 + tag(z1 + z2 | g),
-               data=mydata, model="poisson.mixed")
-
-z.out <- zelig(formula= list(mu=y ~ xl + x2 + tag(z1, gamma | g),
-               gamma= ~ tag(w1 + w2 | g)), data=mydata, model="poisson.mixed")
-\end{verbatim}
-
-\subsubsection{Inputs}
-
-\noindent {\tt zelig()} takes the following arguments for {\tt mixed}:
-\begin{itemize}
-\item {\tt formula:} a two-sided linear formula object describing the systematic component of the model, with the response on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1 + ... + zn | g)} with {\tt z1 + ... + zn} specifying the model for the random effects and {\tt g} the grouping structure. Random intercept terms are included with the notation {\tt ta [...]
-Alternatively, {\tt formula} may be a list where the first entry, {\tt mu}, is a two-sided linear formula object describing the systematic component of the model, with the repsonse on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1, gamma | g)} with {\tt z1} specifying the individual level model for the random effects, {\tt g} the grouping structure and { [...]
-\end{itemize}
-
-\subsubsection{Additional Inputs}
-
-In addition, {\tt zelig()} accepts the following additional arguments for model specification:
-
-\begin{itemize}
-\item {\tt data:} An optional data frame containing the variables named in {\tt formula}. By default, the variables are taken from the environment from which {\tt zelig()} is called.
-\item {\tt na.action:} A function that indicates what should happen when the data contain {\tt NAs}. The default action ({\tt na.fail}) causes {\tt zelig()} to print an error message and terminate if there are any incomplete observations.
-\end{itemize}
-Additionally, users may with to refer to {\tt lmer} in the package {\tt lme4} for more information, including control parameters for the estimation algorithm and their defaults.
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-\item Basic Example \\
-\\
-Attach sample data: \\
-<<Examples.data>>=
-data(homerun)
-@
-Estimate model:
-<<Examples.zelig>>=
-z.out1 <- zelig(homeruns ~ player + tag(player - 1 | month), data=homerun, model="poisson.mixed")
-@
-
-\noindent Summarize regression coefficients and estimated variance of random effects:\\
-<<Examples.summary>>=
-summary(z.out1)
-@
-Set explanatory variables to their default values:\\
-<<Examples.setx>>=
-x.out <- setx(z.out1)
-@
-Simulate draws using the default bootstrap method and view simulated quantities of interest: \\
-<<Examples.sim>>=
-s.out1 <- sim(z.out1, x=x.out)
-summary(s.out1)
-@
-
-\end{enumerate}
-
-
-\subsubsection{Mixed effects Poisson Regression Model}
-
-Let $Y_{ij}$ be the number of independent events that occur during a fixed time period, realized for observation $j$ in group $i$ as $y_{ij}$, which takes any non-negative integer as its value, for $i = 1, \ldots, M$, $j = 1, \ldots, n_i$.
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by a Poisson distribution with mean and variance parameter $\lambda_{ij}$.
-\begin{equation*}
-Y_{ij} \sim \mathrm{Poisson}(y_{ij} | \lambda_{ij}) = \frac{\exp(-\lambda_{ij}) \lambda_{ij}^{y_{ij}}}{y_{ij}!}
-\end{equation*}
-where
-\begin{equation*}
-y_{ij} = 0, 1, \ldots
-\end{equation*}
-\item The $q$-dimensional vector of \emph{random effects}, $b_i$, is restricted to be mean zero, and therefore is completely characterized by the variance covarance matrix $\Psi$, a $(q \times q)$ symmetric positive semi-definite matrix.
-\begin{equation*}
-b_i \sim Normal(0, \Psi)
-\end{equation*}
-\item The \emph{systematic component} is
-\begin{equation*}
-\lambda_{ij} \equiv \exp(X_{ij} \beta + Z_{ij} b_i)
-\end{equation*}
-where $X_{ij}$ is the $(n_i \times p \times M)$ array of known fixed effects explanatory variables, $\beta$ is the $p$-dimensional vector of fixed effects coefficients, $Z_{ij}$ is the $(n_i \times q \times M)$ array of known random effects explanatory variables and $b_i$ is the $q$-dimensional vector of random effects.
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The predicted values ({\tt qi\$pr}) are draws from the poisson distribution defined by mean $ \lambda_{ij} $, for
-\begin{equation*}
-\lambda_{ij} = \exp(X_{ij} \beta + Z_{ij} b_i)
-\end{equation*}
-given $X_{ij}$ and $Z_{ij}$ and simulations of of $\beta$ and $b_i$ from their posterior distributions. The estimated variance covariance matrices are taken as correct and are themselves not simulated.
-
-\item The expected values ({\tt qi\$ev}) is the mean of simulations of the stochastic component given draws of $\beta$ from its posterior:
-\begin{equation*}
-E(Y_{ij} | X_{ij}) = \lambda_{ij} = \exp(X_{ij} \beta).
-\end{equation*}
-
-\item The first difference ({\tt qi\$fd}) is given by the difference in expected values, conditional on $X_{ij}$ and $X_{ij}^\prime$, representing different values of the explanatory variables.
-\begin{equation*}
-FD(Y_{ij} | X_{ij}, X_{ij}^\prime) = E(Y_{ij} | X_{ij}) - E(Y_{ij} | X_{ij}^\prime)
-\end{equation*}
-
-\item In conditional prediction models, the average predicted treatment effect ({\tt qi\$att.pr}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - \widehat{Y_{ij}(t_{ij} = 0)} \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $Y_{ij}(t_{ij} = 0)$, the counterfactual predicted value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\item In conditional prediction models, the average expected treatment effect ({\tt qi\$att.ev}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - E[Y_{ij}(t_{ij} = 0)] \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $E[Y_{ij}(t_{ij} = 0)]$, the counterfactual expected value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you may view. You may examine the available information in {\tt z.out} by using {\tt slotNames(z.out)}, see the fixed effect coefficients by using {\tt summary(z.out)@coefs}, and a default summary of information through {\tt summary(z.out)}. Other elements available through the {\tt \@} operator are listed below.
-\begin{itemize}
-\item From the {\tt zelig()} output stored in {\tt summary(z.out)}, you may extract:
-\begin{itemize}
-\item[--] {\tt fixef}: numeric vector containing the conditional estimates of the fixed effects.
-\item[--] {\tt ranef}: numeric vector containing the conditional modes of the random effects.
-\item[--] {\tt frame}: the model frame for the model.
-\end{itemize}
-\item From the {\tt sim()} output stored in {\tt s.out}, you may extract quantities of interest stored in a data frame:
-\begin{itemize}
-\item {\tt qi\$pr}: the simulated predicted values drawn from the distributions defined by the expected values.
-\item {\tt qi\$ev}: the simulated expected values for the specified values of x.
-\item {\tt qi\$fd}: the simulated first differences in the expected values for the values specified in x and x1.
-\item {\tt qi\$ate.pr}: the simulated average predicted treatment effect for the treated from conditional prediction models.
-\item {\tt qi\$ate.ev}: the simulated average expected treatment effect for the treated from conditional prediction models.
-\end{itemize}
-\end{itemize}
-
-
-
-\subsection* {How to Cite}
-
-\input{cites/logit.mixed}
-\input{citeZelig}
-
-\subsection* {See also}
-Mixed effects poisson regression is part of {\tt lme4} package by Douglas M. Bates \citep{Bates07}. For a detailed discussion of mixed-effects models, please see \cite{JosBat00}
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after <- search()
- torm <- setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
-\end{document}
diff --git a/vignettes/poisson.survey.Rnw b/vignettes/poisson.survey.Rnw
deleted file mode 100644
index 68d7457..0000000
--- a/vignettes/poisson.survey.Rnw
+++ /dev/null
@@ -1,513 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=poissonsurvey}
-\include{zinput}
-%\VignetteIndexEntry{Survey-Weighted Poisson Regression for Event Count Dependent Variables}
-%\VignetteDepends{Zelig, stats, survey}
-%\VignetteKeyWords{model,poisson,event,regression, survey}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography* 
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>= 
-library(Zelig)
-library(survey) 
-@
-\section{{\tt poisson.survey}: Survey-Weighted Poisson Regression for Event Count Dependent Variables}
-\label{poisson.survey}
-
-The survey-weighted poisson regression model is appropriate for 
-survey data obtained using complex sampling techniques, such as 
-stratified random or cluster sampling (e.g., not simple random 
-sampling).  Like the conventional poisson regression model (see 
-\Sref{poisson}), survey-weighted poisson regression specifies a 
-dependent variable representing the number of independent events 
-that occur during a fixed period of time as function of a set of 
-explanatory variables.  The survey-weighted poisson model reports 
-estimates of model parameters identical to conventional poisson 
-estimates, but uses information from the survey design to correct 
-variance estimates.
-
-The {\tt poisson.survey} model accommodates three common types of 
-complex survey data.  Each method listed here requires selecting 
-specific options which are detailed in the ``Additional Inputs'' 
-section below.  \begin{enumerate}
-
-\item \textbf{Survey weights}:  Survey data are often published along
-with weights for each observation.  For example, if a survey
-intentionally over-samples a particular type of case, weights can be
-used to correct for the over-representation of that type of case in
-the dataset. Survey weights come in two forms:
-\begin{enumerate}
-
-\item \textit{Probability} weights report the probability that each
-case is drawn from the population.  For each stratum or cluster, 
-this is computed as the number of observations in the sample drawn 
-from that group divided by the number of observations in the 
-population in the group.
-
-\item \textit{Sampling} weights are the inverse of the probability
-weights.   
-
-\end{enumerate}
-
-\item \textbf{Strata/cluster identification}:  A complex survey 
-dataset may include variables that identify the strata or cluster 
-from which observations are drawn.  For stratified random sampling 
-designs, observations may be nested in different strata.  There are 
-two ways to employ these identifiers:
-
-\begin{enumerate}
-
-\item Use \textit{finite population corrections} to specify the
-total number of cases in the stratum or cluster from which each
-observation was drawn.
-
-\item For stratified random sampling designs, use the raw strata ids
-to compute sampling weights from the data.
-
-\end{enumerate}
-
-\item \textbf{Replication weights}: To preserve the anonymity of
-survey participants, some surveys exclude strata and cluster ids 
-from the public data and instead release only pre-computed replicate 
-weights.
-
-\end{enumerate}
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "poisson.survey", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-
-\subsubsection{Additional Inputs}
-
-In addition to the standard {\tt zelig} inputs (see
-\Sref{Zcommands}), survey-weighted poisson models accept the following
-optional inputs:
-\begin{enumerate}
-
-\item Datasets that include survey weights:. 
-
-\begin{itemize}  
-
-\item {\tt probs}: An optional formula or numerical vector specifying each 
-case's probability weight, the probability that the case was 
-selected.  Probability weights need not (and, in most cases, will 
-not) sum to one.  Cases with lower probability weights are weighted 
-more heavily in the computation of model coefficients.
-
-\item {\tt weights}: An optional numerical vector specifying each 
-case's sample weight, the inverse of the probability that the case 
-was selected.  Sampling weights need not (and, in most cases, will 
-not) sum to one.  Cases with higher sampling weights are weighted 
-more heavily in the computation of model coefficients.
-
-\end{itemize}
-
-\item Datasets that include strata/cluster identifiers:
-
-\begin{itemize} 
-
-\item {\tt ids}: An optional formula or numerical vector identifying the 
-cluster from which each observation was drawn (ordered from largest level to smallest level).  
-For survey designs  that do not involve cluster sampling, {\tt ids} defaults to {\tt NULL}.
-
-\item {\tt fpc}: An optional numerical vector identifying each 
-case's frequency weight, the total number of units in the population 
-from which each observation was sampled. 
-
-\item {\tt strata}: An optional formula or vector identifying the 
-stratum from which each observation was sampled.  Entries may be 
-numerical, logical, or strings.  For survey designs that do not 
-involve cluster sampling, {\tt strata} defaults to {\tt NULL}. 
-
-\item {\tt nest}: An optional logical value specifying whether 
-primary sampling unites (PSUs) have non-unique ids across multiple 
-strata.  {\tt nest=TRUE} is appropriate when PSUs reuse the same 
-identifiers across strata.  Otherwise, {\tt nest} defaults to {\tt 
-FALSE}. 
-
-\item {\tt check.strata}: An optional input specifying whether to 
-check that clusters are nested in strata.  If {\tt check.strata} is 
-left at its default, {\tt !nest}, the check is not performed.  If 
-{\tt check.strata} is specified as {\tt TRUE}, the check is carried 
-out.  
-
-\end{itemize}
-
-\item Datasets that include replication weights:
-\begin{itemize}
-  \item {\tt repweights}: A formula or matrix specifying
-    replication weights, numerical vectors of weights used
-    in a process in which the sample is repeatedly re-weighted and parameters
-    are re-estimated in order to compute the variance of the model parameters.
-  \item {\tt type}: A string specifying the type of replication weights being used.
-    This input is required if replicate weights are specified.  The following types
-    of replication weights are recognized: {\tt"BRR"}, {\tt "Fay"},
-    {\tt "JK1"}, {\tt "JKn"}, {\tt "bootstrap"}, or {\tt "other"}.
-  \item {\tt weights}: An optional vector or formula specifying each case's sample weight,
-    the inverse of the probability that the case was selected.  If a survey includes both sampling 
-    weights and replicate weights separately for the same survey, both should be included in 
-    the model specification.  In these cases, sampling weights are used to correct potential biases 
-    in in the computation of coefficients and replication weights are used to compute the variance 
-    of coefficient estimates.  
-  \item {\tt combined.weights}: An optional logical value that 
-    should be specified as {\tt TRUE} if the replicate weights include the sampling weights.  Otherwise, 
-    {\tt combined.weights} defaults to {\tt FALSE}.  
-  \item {\tt rho}:  An optional numerical value specifying a shrinkage factor
-    for replicate weights of type {\tt "Fay"}.
-  \item {\tt bootstrap.average}: An optional numerical input specifying
-    the number of iterations over which replicate weights of type {\tt "bootstrap"} were averaged. 
-    This input should be left as {\tt NULL} for {\tt "bootstrap"} weights that were
-    not were created by averaging.
-\item {\tt scale}:  When replicate weights are included,
-    the variance is computed as the sum of squared deviations of the replicates from their mean.
-    {\tt scale} is an optional overall multiplier for the standard deviations.
-\item {\tt rscale}: Like {\tt scale}, {\tt rscale} specifies an 
-optional vector of replicate-specific multipliers for the squared 
-deviations used in variance computation. 
-
-\item {\tt fpc}: For models in which {\tt "JK1"}, {\tt "JKn"}, or 
-{\tt "other"} replicates are specified, {\tt fpc} is an optional 
-numerical vector (with one entry for each replicate) designating the 
-replicates' finite population corrections.   
-
-\item {\tt fpctype}: When a finite population correction is included 
-as an {\tt fpc} input, {\tt fpctype} is a required input specifying 
-whether the input to {\tt fpc} is a sampling fraction ({\tt 
-fpctype="fraction"}) or a direct correction ({\tt 
-fpctype="correction"}).  
-
-\item {\tt return.replicates}: An optional logical value    
-specifying whether the replicates should be returned as a component 
-of the output.  {\tt return.replicates} defaults to {\tt FALSE}.  
-
-\end{itemize}
-
-\end{enumerate}
-
-\subsubsection{Examples}
-
-\begin{enumerate} 
-
-\item A dataset that includes survey weights:
-
-Attach the sample data: 
-<<Existing.data>>= 
-data(api, package="survey") 
-@ 
-
-Suppose that a dataset included a variable reporting the number of
-times a new student enrolled during the previous school year ({\tt enroll}), 
-a measure of each school's academic performance ({\tt api99}), 
-an indicator for whether each school holds classes year round ({\tt year.rnd}), and sampling 
-weights computed by the survey house ({\tt pw}).  Estimate a model
-that regresses {\tt enroll} on {\tt api99} and {\tt year.rnd}:
-<<Existing.zelig>>= 
-z.out1 <- zelig(enroll ~ api99 + yr.rnd , model = "poisson.survey", 
-weights=~pw, data = apistrat)
-@ 
-Summarize regression coefficients:
-<<Existing.summary>>=
- summary(z.out1)
-@ 
-Set explanatory variables to their default (mean/mode) values, and
-set a high (80th percentile) and low (20th percentile) value for
-the measure of academic performance:
-<<Existing.setx>>= 
-x.low <- setx(z.out1, api99= quantile(apistrat$api99, 0.2))
-x.high <- setx(z.out1, api99= quantile(apistrat$api99, 0.8))
-@ 
-Generate first differences for the
-effect of high versus low concentrations of children receiving
-subsidized meals on the probability of holding school year-round: 
-<<Existing.sim>>=
-s.out1 <- sim(z.out1, x=x.low, x1=x.high)
-@ 
-<<Existing.summary.sim>>=
- summary(s.out1)
-@ 
-Generate a visual summary of the quantities of interest:
-\begin{center}
-<<label=ExistingPlot,fig=true,echo=true>>=
- plot(s.out1)
-@
-\end{center}
-
-\item  A dataset that includes strata/cluster identifiers:
-
-Suppose that the survey house that provided the dataset used in the
-previous example excluded sampling weights but made other details
-about the survey design available.  A user can still estimate a model
-without sampling weights that instead uses inputs that identify the
-stratum and/or cluster to which each observation belongs and the
-size of the finite population from which each observation was drawn.
-
-Continuing the example above, suppose the survey house drew at
-random a fixed number of elementary schools, a fixed number of
-middle schools, and a fixed number of high schools. If the variable
-{\tt stype} is a vector of characters ({\tt "E"} for elementary
-schools, {\tt "M"} for middle schools, and {\tt "H"} for high schools)
-that identifies the type of school each case
-represents and {\tt fpc} is a numerical vector that identifies for
-each case the total number of schools of the same type in the
-population, then the user could estimate the following model:
-
-<<Complex.zelig>>= 
-z.out2 <- zelig(enroll ~ api99 + yr.rnd , model = "poisson.survey", data = apistrat, 
-  strata=~stype, fpc=~fpc)
-@
-Summarize the regression output:
-<<Complex.output>>= 
-summary(z.out2) 
-@ 
-The coefficient estimates for this example are
-identical to the point estimates in the first example, when
-pre-existing sampling weights were used.  When sampling weights are
-omitted, they are estimated automatically for {\tt "poisson.survey"}
-models based on the user-defined description of sampling designs. 
-
-Moreover, because the user provided information about the survey
-design, the standard error estimates are lower in this example than
-in the previous example, in which the user omitted variables pertaining
-to the details of the complex survey design.
-
-\item A dataset that includes replication weights:
-
-Consider a dataset that includes information for a sample of hospitals
-about the number of out-of-hospital cardiac arrests that each
-hospital treats and the number of patients who arrive alive
-at each hospital: 
-<<Replicate.data>>= 
-data(scd, package="survey") 
-@ 
-
-Survey houses sometimes supply
-replicate weights (in lieu of details about the survey design).  For the sake
-of illustrating how replicate weights can be used as inputs in {\tt
-normal.survey} models, create a set of balanced repeated replicate
-(BRR) weights: 
-<<Replicate.rw>>= 
-BRRrep<-2*cbind(c(1,0,1,0,1,0),c(1,0,0,1,0,1), c(0,1,1,0,0,1),c(0,1,0,1,1,0)) 
-@ 
-Estimate a model that regresses the count of patients who arrived alive at
-the hospital last year on the number of patients treated for cardiac arrests, using
-the BRR replicate weights in {\tt BRRrep} to compute standard errors:
-<<Replicate.zelig>>= 
-z.out3 <- zelig(alive ~ arrests , model = "poisson.survey",
-repweights=BRRrep, type="BRR", data=scd)
-summary(z.out3) 
-@
-Summarize the regression coefficients: 
-<<Replicate.summary>>=
- summary(z.out3)
-@ 
-Set the explanatory variable {\tt arrests} at its 20th and 80th quantiles:
-<<Replicate.setx>>= 
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8)) 
-@ 
-Generate first
-differences for the effect of high versus low cardiac arrests
-on the count of patients who arrive alive:
-<<Replicate.sim>>= 
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-@ 
-<<Replicate.summary.sim>>=
- summary(s.out3)
-@ 
-Generate a visual summary of quantities of interest:
-\begin{center}
-<<label=ReplicatePlot,fig=true,echo=true>>=
- plot(s.out3)
-@
-\end{center}
-
-
-\end{enumerate}
-
-\subsubsection{Model}
-Let $Y_i$ be the number of independent events that occur during a
-fixed time period. This variable can take any non-negative integer.
-
-\begin{itemize}
-\item The Poisson distribution has \emph{stochastic component}
-  \begin{equation*}
-    Y_i \; \sim \; \textrm{Poisson}(\lambda_i),
-  \end{equation*}
-  where $\lambda_i$ is the mean and variance parameter.
-  
-\item The \emph{systematic component} is 
-  \begin{equation*}
-    \lambda_i \; = \; \exp(x_i \beta),
-  \end{equation*}
-  where $x_i$ is the vector of explanatory variables, and $\beta$ is
-  the vector of coefficients.
-\end{itemize}
-
-\subsubsection{Variance}
-
-When replicate weights are not used, the variance of the
-coefficients is estimated as
-\[
-\hat{\boldsymbol{\Sigma}} \left[
- \sum_{i=1}^n
-\frac{(1-\pi_i)}{\pi_i^2}
-(\mathbf{X}_i(Y_i-\mu_i))^\prime(\mathbf{X}_i(Y_i-\mu_i)) + 2
-\sum_{i=1}^n \sum_{j=i+1}^n \frac{(\pi_{ij} - \pi_i\pi_j)}{\pi_i
-\pi_j \pi_{ij}}(\mathbf{X}_i(Y_i-\mu_i))^\prime
-(\mathbf{X}_j(Y_j-\mu_j)) \right] \hat{\boldsymbol{\Sigma}}
-\]
-where ${\pi_i}$ is the probability of case $i$ being sampled,
-$\mathbf{X}_i$ is a vector of the values of the explanatory
-variables for case $i$, $Y_i$ is value of the dependent variable for
-case $i$, $\hat{\mu}_i$ is the predicted value of the dependent
-variable for case $i$ based on the linear model estimates, and
-$\hat{\boldsymbol{\Sigma}}$ is the conventional variance-covariance
-matrix in a parametric glm. This statistic is derived from the
-method for estimating the variance of sums described in \cite{Bin83}
-and the Horvitz-Thompson estimator of the variance of a sum
-described in \cite{HorTho52}.
-
-When replicate weights are used, the model is re-estimated for each
-set of replicate weights, and the variance of each parameter is
-estimated by summing the squared deviations of the replicates from
-their mean.
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-  
-\item The expected value ({\tt qi\$ev}) is the mean of simulations
-  from the stochastic component, $$E(Y) = \lambda_i =  \exp(x_i
-  \beta),$$ given draws of $\beta$ from its sampling distribution.  
-  
-\item The predicted value ({\tt qi\$pr}) is a random draw from the
-  poisson distribution defined by mean $\lambda_i$.
-
-\item The first difference in the expected values ({\tt qi\$fd}) is given by:
-\begin{equation*}
-\textrm{FD} \; = \; E(Y | x_1) - E(Y \mid x)
-\end{equation*}
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "poisson.survey", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: a vector of the fitted values for the systemic
-     component $\lambda$.  
-   \item {\tt linear.predictors}: a vector of $x_{i}\beta$.  
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values given the
-     specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from the
-     distributions defined by $\lambda_i$.
-   \item {\tt qi\$fd}: the simulated first differences in the expected
-     values given the specified values of {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-When users estimate {\tt poisson.survey} models with replicate weights in {\tt Zelig}, an 
-object called {\tt .survey.prob.weights} is created in the global environment.  
-{\tt Zelig} will over-write any existing object with that name, and users 
-are therefore advised to re-name any object called {\tt .survey.prob.weights} before using {\tt poisson.survey} models in {\tt Zelig}.
-
-
-\subsection* {How to Cite}
-
-\input{cites/poisson.survey}
- \input{citeZelig}
- 
- \subsection* {See also}
- 
- Survey-weighted linear models and the sample data used in the
- examples above are a part of the {\tt survey} package by Thomas
- Lumley. Users may wish to refer to the help files for the three
- functions that Zelig draws upon when estimating survey-weighted
- models, namely, {\tt help(svyglm)}, {\tt help(svydesign)}, and {\tt
- help(svrepdesign)}.  The Gamma model is part of the stats package
- by \citet{VenRip02}. Advanced users may wish to refer to
- \texttt{help(glm)} and \texttt{help(family)}, as well as
- \cite{McCNel89}.
-  
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
-  after<-search()
-  torm<-setdiff(after,before)
-  for (pkg in torm)
-  detach(pos=match(pkg,search()))
-@
-
-\end{document}
diff --git a/vignettes/probit.Rnw b/vignettes/probit.Rnw
deleted file mode 100644
index 37b9c2e..0000000
--- a/vignettes/probit.Rnw
+++ /dev/null
@@ -1,241 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=probit}
-\include{zinput}
-%\VignetteIndexEntry{Probit Regression for Dichotomous Dependent Variables}
-%\VignetteDepends{Zelig}
-%\VignetteKeyWords{model, probit,regression,dichotomous, binary}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-pkg <- search()
-if(!length(grep("package:Zelig",pkg)))
-library(Zelig)
-@
-
-\section{{\tt probit}: Probit Regression for Dichotomous Dependent Variables}\label{probit}
-
-Use probit regression to model binary dependent variables
-specified as a function of a set of explanatory variables.  For a
-Bayesian implementation of this model, see \Sref{probit.bayes}.  
-
-\subsubsection{Syntax}
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "probit", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out, x1 = NULL)
-\end{verbatim}
-
-\subsubsection{Additional Inputs} 
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for probit regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE} is
-selected, {\tt zelig()} computes robust standard errors via the {\tt
-sandwich} package (see \cite{Zeileis04}).  The default type of robust
-standard error is heteroskedastic and autocorrelation consistent (HAC),
-and assumes that observations are ordered by time index.
-
-In addition, {\tt robust} may be a list with the following options:  
-\begin{itemize}
-\item {\tt method}:  Choose from 
-\begin{itemize}
-\item {\tt "vcovHAC"}: (default if {\tt robust = TRUE}) HAC standard
-errors. 
-\item {\tt "kernHAC"}: HAC standard errors using the
-weights given in \cite{Andrews91}. 
-\item {\tt "weave"}: HAC standard errors using the
-weights given in \cite{LumHea99}.  
-\end{itemize}  
-\item {\tt order.by}: defaults to {\tt NULL} (the observations are
-chronologically ordered as in the original data).  Optionally, you may
-specify a vector of weights (either as {\tt order.by = z}, where {\tt
-z} exists outside the data frame; or as {\tt order.by = \~{}z}, where
-{\tt z} is a variable in the data frame).  The observations are
-chronologically ordered by the size of {\tt z}.
-\item {\tt \dots}:  additional options passed to the functions 
-specified in {\tt method}.   See the {\tt sandwich} library and
-\cite{Zeileis04} for more options.   
-\end{itemize}
-\end{itemize}
-
-\subsubsection{Examples}
-Attach the sample turnout dataset:
-<<Examples.data>>=
- data(turnout)
-@ 
-Estimate parameter values for the probit regression:
-<<Examples.zelig>>=
- z.out <- zelig(vote ~ race + educate,  model = "probit", data = turnout) 
-@ 
-<<Examples.summary>>=
- summary(z.out)
-@ 
-Set values for the explanatory variables to their default values.
-<<Examples.setx>>=
- x.out <- setx(z.out)
-@ 
-Simulate quantities of interest from the posterior distribution.
-<<Examples.sim>>=
-s.out <- sim(z.out, x = x.out)
-@ 
-<<Examples.summary.sim>>=
-summary(s.out)
-@ 
-
-\subsubsection{Model}
-Let $Y_i$ be the observed binary dependent variable for observation
-$i$ which takes the value of either 0 or 1.
-\begin{itemize}
-\item The \emph{stochastic component} is given by  
-\begin{equation*}
-Y_i \; \sim \; \textrm{Bernoulli}(\pi_i), 
-\end{equation*}
-where $\pi_i=\Pr(Y_i=1)$.
-
-\item The \emph{systematic component} is 
-\begin{equation*}
-  \pi_i \; = \; \Phi (x_i \beta)
-\end{equation*}
-where $\Phi(\mu)$ is the cumulative distribution function of the
-Normal distribution with mean 0 and unit variance.
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-
-\item The expected value ({\tt qi\$ev}) is a simulation of predicted
-  probability of success $$E(Y) = \pi_i = \Phi(x_i
-  \beta),$$ given a draw of $\beta$ from its sampling distribution.  
-
-\item The predicted value ({\tt qi\$pr}) is a draw from a Bernoulli
-  distribution with mean $\pi_i$.  
-  
-\item The first difference ({\tt qi\$fd}) in expected values is
-  defined as
-\begin{equation*}
-\textrm{FD} = \Pr(Y = 1 \mid x_1) - \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item The risk ratio ({\tt qi\$rr}) is defined as
-\begin{equation*}
-\textrm{RR} = \Pr(Y = 1 \mid x_1) / \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "probit", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: a vector of the in-sample fitted values.
-   \item {\tt linear.predictors}: a vector of $x_{i}\beta$.  
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt data}: the name of the input data frame.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values, or predicted
-     probabilities, for the specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from the
-     distributions defined by the predicted probabilities.  
-   \item {\tt qi\$fd}: the simulated first differences in the predicted
-     probabilities for the values specified in {\tt x} and {\tt x1}.
-   \item {\tt qi\$rr}: the simulated risk ratio for the predicted
-     probabilities simulated from {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-
-\subsection* {How to Cite} 
-
-\input{cites/probit}
-\input{citeZelig}
-
-\subsection* {See also}
-The probit model is part of the stats package by \citet{VenRip02}.
-Advanced users may wish to refer to \texttt{help(glm)} and
-\texttt{help(family)}, as well as \cite{McCNel89}. Robust standard
-errors are implemented via the sandwich package by \citet{Zeileis04}.
-Sample data are from \cite{KinTomWit00}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
diff --git a/vignettes/probit.mixed.Rnw b/vignettes/probit.mixed.Rnw
deleted file mode 100644
index 6885ba0..0000000
--- a/vignettes/probit.mixed.Rnw
+++ /dev/null
@@ -1,185 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=probitmixed}
-\include{zinput}
-%\VignetteIndexEntry{Mixed effects probit regression}
-%\VignetteDepends{Zelig}
-%\VignetteKeyWords{mixed,probit regression}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt probit.mixed}: Mixed effects probit Regression}
-\label{mixed}
-
-Use generalized multi-level linear regression if you have covariates that are grouped according to one or more classification factors. The probit model is appropriate when the dependent variable is dichotomous.
-
-While generally called multi-level models in the social sciences, this class of models is often referred to as mixed-effects models in the statistics literature and as hierarchical models in a Bayesian setting. This general class of models consists of linear models that are expressed as a function of both \emph{fixed effects}, parameters corresponding to an entire population or certain repeatable levels of experimental factors, and \emph{random effects}, parameters corresponding to indiv [...]
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-z.out <- zelig(formula= y ~ x1 + x2 + tag(z1 + z2 | g),
-               data=mydata, model="probit.mixed")
-
-z.out <- zelig(formula= list(mu=y ~ xl + x2 + tag(z1, gamma | g),
-               gamma= ~ tag(w1 + w2 | g)), data=mydata, model="probit.mixed")
-\end{verbatim}
-
-\subsubsection{Inputs}
-
-\noindent {\tt zelig()} takes the following arguments for {\tt mixed}:
-\begin{itemize}
-\item {\tt formula:} a two-sided linear formula object describing the systematic component of the model, with the response on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1 + ... + zn | g)} with {\tt z1 + ... + zn} specifying the model for the random effects and {\tt g} the grouping structure. Random intercept terms are included with the notation {\tt ta [...]
-Alternatively, {\tt formula} may be a list where the first entry, {\tt mu}, is a two-sided linear formula object describing the systematic component of the model, with the repsonse on the left of a {\tt $\tilde{}$} operator and the fixed effects terms, separated by {\tt +} operators, on the right. Any random effects terms are included with the notation {\tt tag(z1, gamma | g)} with {\tt z1} specifying the individual level model for the random effects, {\tt g} the grouping structure and { [...]
-\end{itemize}
-
-\subsubsection{Additional Inputs}
-
-In addition, {\tt zelig()} accepts the following additional arguments for model specification:
-
-\begin{itemize}
-\item {\tt data:} An optional data frame containing the variables named in {\tt formula}. By default, the variables are taken from the environment from which {\tt zelig()} is called.
-\item {\tt na.action:} A function that indicates what should happen when the data contain {\tt NAs}. The default action ({\tt na.fail}) causes {\tt zelig()} to print an error message and terminate if there are any incomplete observations.
-\end{itemize}
-Additionally, users may with to refer to {\tt lmer} in the package {\tt lme4} for more information, including control parameters for the estimation algorithm and their defaults.
-
-\subsubsection{Examples}
-
-\begin{enumerate}
-\item Basic Example with First Differences \\
-\\
-Attach sample data: \\
-<<Examples.data>>=
-data(voteincome)
-@
-Estimate model:
-<<Examples.zelig>>=
-z.out1 <- zelig(vote ~ education + age + female + tag(1 | state), data=voteincome, model="probit.mixed")
-@
-
-\noindent Summarize regression coefficients and estimated variance of random effects:\\
-<<Examples.summary>>=
-summary(z.out1)
-@
-Set explanatory variables to their default values, with high (80th percentile) and low (20th percentile) values for education:\\
-<<Examples.setx>>=
-x.high <- setx(z.out1, education=quantile(voteincome$education, 0.8))
-x.low <- setx(z.out1, education=quantile(voteincome$education, 0.2))
-@
-Generate first differences for the effect of high versus low education on voting: \\
-<<Examples.sim>>=
-s.out1 <- sim(z.out1, x=x.high, x1=x.low)
-summary(s.out1)
-@
-
-\end{enumerate}
-
-\subsubsection{Mixed effects probit Regression Model}
-
-Let $Y_{ij}$ be the binary dependent variable, realized for observation $j$ in group $i$ as $y_{ij}$ which takes the value of either 0 or 1, for $i = 1, \ldots, M$, $j = 1, \ldots, n_i$.
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by a Bernoulli distribution with mean vector $\pi_{ij}$.
-\begin{equation*}
-Y_{ij} \sim \mathrm{Bernoulli}(y_{ij} | \pi_{ij}) = \pi_{ij}^{y_{ij}} (1 - \pi_{ij})^{1 - y_{ij}}
-\end{equation*}
-where
-\begin{equation*}
-\pi_{ij} = \mathrm{Pr}(Y_{ij} = 1)
-\end{equation*}
-\item The $q$-dimensional vector of \emph{random effects}, $b_i$, is restricted to be mean zero, and therefore is completely characterized by the variance covarance matrix $\Psi$, a $(q \times q)$ symmetric positive semi-definite matrix.
-\begin{equation*}
-b_i \sim Normal(0, \Psi)
-\end{equation*}
-\item The \emph{systematic component} is
-\begin{equation*}
-\pi_{ij} \equiv \Phi(X_{ij} \beta + Z_{ij} b_i)
-\end{equation*}
-where $\Phi(\mu)$ is the cumulative distribution function of the Normal distribution with mean 0 and unit variance, and \\
-where $X_{ij}$ is the $(n_i \times p \times M)$ array of known fixed effects explanatory variables, $\beta$ is the $p$-dimensional vector of fixed effects coefficients, $Z_{ij}$ is the $(n_i \times q \times M)$ array of known random effects explanatory variables and $b_i$ is the $q$-dimensional vector of random effects.
-\end{itemize}
-
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The predicted values ({\tt qi\$pr}) are draws from the Binomial distribution with mean equal to the simulated expected value, $\pi_{ij}$ for
-\begin{equation*}
-\pi_{ij} = \Phi(X_{ij} \beta + Z_{ij} b_i)
-\end{equation*}
-given $X_{ij}$ and $Z_{ij}$ and simulations of of $\beta$ and $b_i$ from their posterior distributions. The estimated variance covariance matrices are taken as correct and are themselves not simulated.
-
-\item The expected values ({\tt qi\$ev}) are simulations of the predicted probability of a success given draws of $\beta$ from its posterior:
-\begin{equation*}
-E(Y_{ij} | X_{ij}) = \pi_{ij} = \Phi(X_{ij} \beta).
-\end{equation*}
-
-\item The first difference ({\tt qi\$fd}) is given by the difference in predicted probabilities, conditional on $X_{ij}$ and $X_{ij}^\prime$, representing different values of the explanatory variables.
-\begin{equation*}
-FD(Y_{ij} | X_{ij}, X_{ij}^\prime) = Pr(Y_{ij} = 1 | X_{ij}) - Pr(Y_{ij} = 1 | X_{ij}^\prime)
-\end{equation*}
-
-\item The risk ratio ({\tt qi\$rr}) is defined as
-\begin{equation*}
-RR(Y_{ij} | X_{ij}, X_{ij}^{\prime}) = \frac{Pr(Y_{ij} = 1 | X_{ij})}{Pr(Y_{ij} = 1 | X_{ij}^{\prime})}
-\end{equation*}
-
-\item In conditional prediction models, the average predicted treatment effect ({\tt qi\$att.pr}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - \widehat{Y_{ij}(t_{ij} = 0)} \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $Y_{ij}(t_{ij} = 0)$, the counterfactual predicted value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\item In conditional prediction models, the average expected treatment effect ({\tt qi\$att.ev}) for the treatment group is given by
-\begin{equation*}
-\frac{1}{\sum_{i = 1}^M \sum_{j = 1}^{n_i} t_{ij}} \sum_{i = 1}^M \sum_{j:t_{ij} = 1}^{n_i} \{ Y_{ij} (t_{ij} = 1) - E[Y_{ij}(t_{ij} = 0)] \},
-\end{equation*}
-where $t_{ij}$ is a binary explanatory variable defining the treatment $(t_{ij} = 1)$ and control $(t_{ij} = 0)$ groups. Variation in the simulations is due to uncertainty in simulating $E[Y_{ij}(t_{ij} = 0)]$, the counterfactual expected value of $Y_{ij}$ for observations in the treatment group, under the assumption that everything stays the same except that the treatment indicator is switched to $t_{ij} = 0$.
-
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you may view. You may examine the available information in {\tt z.out} by using {\tt slotNames(z.out)}, see the fixed effect coefficients by using {\tt summary(z.out)@coefs}, and a default summary of information through {\tt summary(z.out)}. Other elements available through the {\tt \@} operator are listed below.
-\begin{itemize}
-\item From the {\tt zelig()} output stored in {\tt summary(z.out)}, you may extract:
-\begin{itemize}
-\item[--] {\tt fixef}: numeric vector containing the conditional estimates of the fixed effects.
-\item[--] {\tt ranef}: numeric vector containing the conditional modes of the random effects.
-\item[--] {\tt frame}: the model frame for the model.
-\end{itemize}
-\item From the {\tt sim()} output stored in {\tt s.out}, you may extract quantities of interest stored in a data frame:
-\begin{itemize}
-\item {\tt qi\$pr}: the simulated predicted values drawn from the distributions defined by the expected values.
-\item {\tt qi\$ev}: the simulated expected values for the specified values of x.
-\item {\tt qi\$fd}: the simulated first differences in the expected values for the values specified in x and x1.
-\item {\tt qi\$ate.pr}: the simulated average predicted treatment effect for the treated from conditional prediction models.
-\item {\tt qi\$ate.ev}: the simulated average expected treatment effect for the treated from conditional prediction models.
-\end{itemize}
-\end{itemize}
-
-
-
-\subsection* {How to Cite}
-
-\input{cites/probit.mixed}
-\input{citeZelig}
-
-\subsection* {See also}
-Mixed effects probit regression is part of {\tt lme4} package by Douglas M. Bates \citep{Bates07}. For a detailed discussion of mixed-effects models, please see \cite{JosBat00}
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after <- search()
- torm <- setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
-\end{document}
diff --git a/vignettes/probit.survey.Rnw b/vignettes/probit.survey.Rnw
deleted file mode 100644
index 7ec5ee0..0000000
--- a/vignettes/probit.survey.Rnw
+++ /dev/null
@@ -1,521 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=probitsurvey}
-\include{zinput}
-%\VignetteIndexEntry{Survey-Weighted Probit Regression for Dichotomous Dependent Variables}
-%\VignetteDepends{Zelig, stats, survey}
-%\VignetteKeyWords{model,probit ,dichotomous, regression, survey}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography* 
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>= 
-library(Zelig)
-library(survey) 
-@
-\section{{\tt probit.survey}: Survey-Weighted Probit Regression for Dichotomous Dependent Variables}
-\label{probit.survey}
-
-The survey-weighted probit regression model is appropriate for 
-survey data obtained using complex sampling techniques, such as 
-stratified random or cluster sampling (e.g., not simple random 
-sampling).  Like the conventional probit regression models (see 
-\Sref{probit}), survey-weighted probit regression specifies a 
-dichotomous dependent variable as function of a set of explanatory 
-variables.  The survey-weighted probit model reports estimates of 
-model parameters identical to conventional probit estimates, but uses 
-information from the survey design to correct variance estimates.
-
-The {\tt probit.survey} model accommodates three common types of 
-complex survey data.  Each method listed here requires selecting 
-specific options which are detailed in the ``Additional Inputs'' 
-section below.  \begin{enumerate}
-
-\item \textbf{Survey weights}:  Survey data are often published along
-with weights for each observation.  For example, if a survey
-intentionally over-samples a particular type of case, weights can be
-used to correct for the over-representation of that type of case in
-the dataset. Survey weights come in two forms:
-\begin{enumerate}
-
-\item \textit{Probability} weights report the probability that each
-case is drawn from the population.  For each stratum or cluster, 
-this is computed as the number of observations in the sample drawn 
-from that group divided by the number of observations in the 
-population in the group.
-
-\item \textit{Sampling} weights are the inverse of the probability
-weights.   
-
-\end{enumerate}
-
-\item \textbf{Strata/cluster identification}:  A complex survey 
-dataset may include variables that identify the strata or cluster 
-from which observations are drawn.  For stratified random sampling 
-designs, observations may be nested in different strata.  There are 
-two ways to employ these identifiers:
-
-\begin{enumerate}
-
-\item Use \textit{finite population corrections} to specify the
-total number of cases in the stratum or cluster from which each
-observation was drawn.
-
-\item For stratified random sampling designs, use the raw strata ids
-to compute sampling weights from the data.
-
-\end{enumerate}
-
-\item \textbf{Replication weights}: To preserve the anonymity of
-survey participants, some surveys exclude strata and cluster ids 
-from the public data and instead release only pre-computed replicate 
-weights.
-
-\end{enumerate}
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Y ~ X1 + X2, model = "probit.survey", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-
-
-\subsubsection{Additional Inputs}
-
-In addition to the standard {\tt zelig} inputs (see
-\Sref{Zcommands}), survey-weighted probit models accept the following
-optional inputs:
-\begin{enumerate}
-
-\item Datasets that include survey weights:. 
-
-\begin{itemize}  
-
-\item {\tt probs}: An optional formula or numerical vector specifying each 
-case's probability weight, the probability that the case was 
-selected.  Probability weights need not (and, in most cases, will 
-not) sum to one.  Cases with lower probability weights are weighted 
-more heavily in the computation of model coefficients.
-
-\item {\tt weights}: An optional numerical vector specifying each 
-case's sample weight, the inverse of the probability that the case 
-was selected.  Sampling weights need not (and, in most cases, will 
-not) sum to one.  Cases with higher sampling weights are weighted 
-more heavily in the computation of model coefficients.
-
-\end{itemize}
-
-\item Datasets that include strata/cluster identifiers:
-
-\begin{itemize} 
-
-\item {\tt ids}: An optional formula or numerical vector identifying the 
-cluster from which each observation was drawn (ordered from largest level to smallest level).  
-For survey designs  that do not involve cluster sampling, {\tt ids} defaults to {\tt NULL}.
-
-\item {\tt fpc}: An optional numerical vector identifying each 
-case's frequency weight, the total number of units in the population 
-from which each observation was sampled. 
-
-\item {\tt strata}: An optional formula or vector identifying the 
-stratum from which each observation was sampled.  Entries may be 
-numerical, logical, or strings.  For survey designs that do not 
-involve cluster sampling, {\tt strata} defaults to {\tt NULL}. 
-
-\item {\tt nest}: An optional logical value specifying whether 
-primary sampling unites (PSUs) have non-unique ids across multiple 
-strata.  {\tt nest=TRUE} is appropriate when PSUs reuse the same 
-identifiers across strata.  Otherwise, {\tt nest} defaults to {\tt 
-FALSE}. 
-
-\item {\tt check.strata}: An optional input specifying whether to 
-check that clusters are nested in strata.  If {\tt check.strata} is 
-left at its default, {\tt !nest}, the check is not performed.  If 
-{\tt check.strata} is specified as {\tt TRUE}, the check is carried 
-out.  
-
-\end{itemize}
-
-\item Datasets that include replication weights:
-\begin{itemize}
-  \item {\tt repweights}: A formula or matrix specifying
-    replication weights, numerical vectors of weights used
-    in a process in which the sample is repeatedly re-weighted and parameters
-    are re-estimated in order to compute the variance of the model parameters.
-  \item {\tt type}: A string specifying the type of replication weights being used.
-    This input is required if replicate weights are specified.  The following types
-    of replication weights are recognized: {\tt"BRR"}, {\tt "Fay"},
-    {\tt "JK1"}, {\tt "JKn"}, {\tt "bootstrap"}, or {\tt "other"}.
-  \item {\tt weights}: An optional vector or formula specifying each case's sample weight,
-    the inverse of the probability that the case was selected.  If a survey includes both sampling 
-    weights and replicate weights separately for the same survey, both should be included in 
-    the model specification.  In these cases, sampling weights are used to correct potential biases 
-    in in the computation of coefficients and replication weights are used to compute the variance 
-    of coefficient estimates.  
-  \item {\tt combined.weights}: An optional logical value that 
-    should be specified as {\tt TRUE} if the replicate weights include the sampling weights.  Otherwise, 
-    {\tt combined.weights} defaults to {\tt FALSE}.  
-  \item {\tt rho}:  An optional numerical value specifying a shrinkage factor
-    for replicate weights of type {\tt "Fay"}.
-  \item {\tt bootstrap.average}: An optional numerical input specifying
-    the number of iterations over which replicate weights of type {\tt "bootstrap"} were averaged. 
-    This input should be left as {\tt NULL} for {\tt "bootstrap"} weights that were
-    not were created by averaging.
-\item {\tt scale}:  When replicate weights are included,
-    the variance is computed as the sum of squared deviations of the replicates from their mean.
-    {\tt scale} is an optional overall multiplier for the standard deviations.
-\item {\tt rscale}: Like {\tt scale}, {\tt rscale} specifies an 
-optional vector of replicate-specific multipliers for the squared 
-deviations used in variance computation. 
-
-\item {\tt fpc}: For models in which {\tt "JK1"}, {\tt "JKn"}, or 
-{\tt "other"} replicates are specified, {\tt fpc} is an optional 
-numerical vector (with one entry for each replicate) designating the 
-replicates' finite population corrections.   
-
-\item {\tt fpctype}: When a finite population correction is included 
-as an {\tt fpc} input, {\tt fpctype} is a required input specifying 
-whether the input to {\tt fpc} is a sampling fraction ({\tt 
-fpctype="fraction"}) or a direct correction ({\tt 
-fpctype="correction"}).  
-
-\item {\tt return.replicates}: An optional logical value    
-specifying whether the replicates should be returned as a component 
-of the output.  {\tt return.replicates} defaults to {\tt FALSE}.  
-
-\end{itemize}
-
-\end{enumerate}
-
-\subsubsection{Examples}
-
-\begin{enumerate} 
-
-\item A dataset that includes survey weights:
-
-Attach the sample data: 
-<<Existing.data>>= 
-data(api, package="survey") 
-@ 
-
-Suppose that a dataset included a dichotomous indicator 
-for whether each public school attends classes year round ({\tt yr.rnd}), a measure of 
-the percentage of students at each school who receive subsidized 
-meals ({\tt meals}), a measure of the percentage of students at 
-each school who are new to to the school ({\tt mobility}), and sampling 
-weights computed by the survey house ({\tt pw}).  Estimate a model
-that regresses the year-round schooling indicator on the {\tt meals} and {\tt mobility}
-variables:
-<<Existing.zelig>>= 
-z.out1 <- zelig(yr.rnd ~ meals + mobility, model = "probit.survey", weights=~pw, data = apistrat)
-@ 
-Summarize regression coefficients:
-<<Existing.summary>>=
- summary(z.out1)
-@ 
-Set explanatory variables to their default (mean/mode) values, and
-set a high (80th percentile) and low (20th percentile) value for
-``meals'': 
-<<Existing.setx>>= 
-x.low <- setx(z.out1, meals=quantile(apistrat$meals, 0.2)) 
-x.high <- setx(z.out1, meals=quantile(apistrat$meals, 0.8)) 
-@ 
-Generate first differences for the
-effect of high versus low concentrations of children receiving
-subsidized meals on the probability of holding school year-round: 
-<<Existing.sim>>=
- s.out1 <- sim(z.out1, x = x.high, x1 = x.low)
-@ 
-<<Existing.summary.sim>>=
- summary(s.out1)
-@ 
-Generate a visual summary of the quantities of interest:
-\begin{center}
-<<label=ExistingPlot,fig=true,echo=true>>=
- plot(s.out1)
-@
-\end{center}
-
-\item  A dataset that includes strata/cluster identifiers:
-
-Suppose that the survey house that provided the dataset used in the
-previous example excluded sampling weights but made other details
-about the survey design available.  A user can still estimate a model
-without sampling weights that instead uses inputs that identify the
-stratum and/or cluster to which each observation belongs and the
-size of the finite population from which each observation was drawn.
-
-Continuing the example above, suppose the survey house drew at
-random a fixed number of elementary schools, a fixed number of
-middle schools, and a fixed number of high schools. If the variable
-{\tt stype} is a vector of characters ({\tt "E"} for elementary
-schools, {\tt "M"} for middle schools, and {\tt "H"} for high schools)
-that identifies the type of school each case
-represents and {\tt fpc} is a numerical vector that identifies for
-each case the total number of schools of the same type in the
-population, then the user could estimate the following model:
-
-<<Complex.zelig>>= 
-z.out2 <- zelig(yr.rnd ~ meals + mobility, model = "probit.survey", strata=~stype, fpc=~fpc, data = apistrat)
-@
-Summarize the regression output:
-<<Complex.output>>= 
-summary(z.out2) 
-@ 
-The coefficient estimates for this example are
-identical to the point estimates in the first example, when
-pre-existing sampling weights were used.  When sampling weights are
-omitted, they are estimated automatically for {\tt "probit.survey"}
-models based on the user-defined description of sampling designs. 
-
-Moreover, because the user provided information about the survey
-design, the standard error estimates are lower in this example than
-in the previous example, in which the user omitted variables pertaining
-to the details of the complex survey design.
-
-\item A dataset that includes replication weights:
-
-Consider a dataset that includes information for a sample of hospitals
-about the number of out-of-hospital cardiac arrests that each
-hospital treats and the number of patients who arrive alive
-at each hospital: 
-<<Replicate.data>>= 
-data(scd, package="survey") 
-@ 
-Survey houses sometimes supply
-replicate weights (in lieu of details about the survey design).  For the sake
-of illustrating how replicate weights can be used as inputs in {\tt
-probit.survey} models, create a set of balanced repeated replicate
-(BRR) weights and an (artificial) dependent variable to simulate an indicator 
-for whether each hospital was sued:
-<<Replicate.rw>>= 
-BRRrep<-2*cbind(c(1,0,1,0,1,0),c(1,0,0,1,0,1), c(0,1,1,0,0,1),c(0,1,0,1,1,0)) 
-scd$sued <- as.vector(c(0,0,0,1,1,1))
-@ 
-Estimate a model that regresses the indicator for hospitals that were
-sued on the number of patients who arrive alive in
-each hospital and the number of cardiac arrests that each hospital treats, using
-the BRR replicate weights in {\tt BRRrep} to compute standard errors.
-<<Replicate.zelig>>= 
-z.out3 <- zelig(formula=sued ~ arrests + alive , model = "probit.survey", 
-  repweights=BRRrep, type="BRR", data=scd)
-@
-Summarize the regression coefficients: 
-<<Replicate.summary>>=
- summary(z.out3)
-@ 
-Set {\tt alive} at its mean and set {\tt arrests} at its 20th and 80th quantiles:
-<<Replicate.setx>>= 
-x.low <- setx(z.out3, arrests = quantile(scd$arrests, .2))
-x.high <- setx(z.out3, arrests = quantile(scd$arrests,.8)) 
-@ 
-Generate first
-differences for the effect of high versus low cardiac arrests
-on the probability that a hospital will be sued:
-<<Replicate.sim>>= 
-s.out3 <- sim(z.out3, x=x.high, x1=x.low)
-@ 
-<<Replicate.summary.sim>>=
- summary(s.out3)
-@ 
-Generate a visual summary of quantities of interest:
-\begin{center}
-<<label=ReplicatePlot,fig=true,echo=true>>=
- plot(s.out3)
-@
-\end{center}
-
-
-\end{enumerate}
-
-\subsubsection{Model}
-Let $Y_i$ be the observed binary dependent variable for observation
-$i$ which takes the value of either 0 or 1.
-\begin{itemize}
-\item The \emph{stochastic component} is given by  
-\begin{equation*}
-Y_i \; \sim \; \textrm{Bernoulli}(\pi_i), 
-\end{equation*}
-where $\pi_i=\Pr(Y_i=1)$.
-
-\item The \emph{systematic component} is 
-\begin{equation*}
-  \pi_i \; = \; \Phi (x_i \beta)
-\end{equation*}
-where $\Phi(\mu)$ is the cumulative distribution function of the
-Normal distribution with mean 0 and unit variance.
-\end{itemize}
-
-\subsubsection{Variance}
-
-When replicate weights are not used, the variance of the
-coefficients is estimated as
-\[
-\hat{\boldsymbol{\Sigma}} \left[
- \sum_{i=1}^n
-\frac{(1-\pi_i)}{\pi_i^2}
-(\mathbf{X}_i(Y_i-\mu_i))^\prime(\mathbf{X}_i(Y_i-\mu_i)) + 2
-\sum_{i=1}^n \sum_{j=i+1}^n \frac{(\pi_{ij} - \pi_i\pi_j)}{\pi_i
-\pi_j \pi_{ij}}(\mathbf{X}_i(Y_i-\mu_i))^\prime
-(\mathbf{X}_j(Y_j-\mu_j)) \right] \hat{\boldsymbol{\Sigma}}
-\]
-where ${\pi_i}$ is the probability of case $i$ being sampled,
-$\mathbf{X}_i$ is a vector of the values of the explanatory
-variables for case $i$, $Y_i$ is value of the dependent variable for
-case $i$, $\hat{\mu}_i$ is the predicted value of the dependent
-variable for case $i$ based on the linear model estimates, and
-$\hat{\boldsymbol{\Sigma}}$ is the conventional variance-covariance
-matrix in a parametric glm. This statistic is derived from the
-method for estimating the variance of sums described in \cite{Bin83}
-and the Horvitz-Thompson estimator of the variance of a sum
-described in \cite{HorTho52}.
-
-When replicate weights are used, the model is re-estimated for each
-set of replicate weights, and the variance of each parameter is
-estimated by summing the squared deviations of the replicates from
-their mean.
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-
-\item The expected value ({\tt qi\$ev}) is a simulation of predicted
-  probability of success $$E(Y) = \pi_i = \Phi(x_i
-  \beta),$$ given a draw of $\beta$ from its sampling distribution.  
-
-\item The predicted value ({\tt qi\$pr}) is a draw from a Bernoulli
-  distribution with mean $\pi_i$.  
-  
-\item The first difference ({\tt qi\$fd}) in expected values is
-  defined as
-\begin{equation*}
-\textrm{FD} = \Pr(Y = 1 \mid x_1) - \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item The risk ratio ({\tt qi\$rr}) is defined as
-\begin{equation*}
-\textrm{RR} = \Pr(Y = 1 \mid x_1) / \Pr(Y = 1 \mid x).
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the treatment
-    ($t_i=1$) and control ($t_i=0$) groups.  Variation in the
-    simulations are due to uncertainty in simulating $E[Y_i(t_i=0)]$,
-    the counterfactual expected value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  Variation in
-    the simulations are due to uncertainty in simulating
-    $\widehat{Y_i(t_i=0)}$, the counterfactual predicted value of
-    $Y_i$ for observations in the treatment group, under the
-    assumption that everything stays the same except that the
-    treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\ x,
-  model = "probit.survey", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may
-  extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt residuals}: the working residuals in the final iteration
-     of the IWLS fit.
-   \item {\tt fitted.values}: the vector of fitted values for the
-     systemic component, $\pi_i$.
-   \item {\tt linear.predictors}: the vector of $x_{i}\beta$
-   \item {\tt aic}: Akaike's Information Criterion (minus twice the
-     maximized log-likelihood plus twice the number of coefficients).
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt data}: the name of the input data frame.  
-   \end{itemize}
-
-\item From {\tt summary(z.out)}, you may extract: 
-   \begin{itemize}
-   \item {\tt coefficients}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \item{\tt cov.scaled}: a $k \times k$ matrix of scaled covariances.
-   \item{\tt cov.unscaled}: a $k \times k$ matrix of unscaled
-     covariances.  
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected probabilities for the
-     specified values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values for the
-     specified values of {\tt x}.
-   \item {\tt qi\$fd}: the simulated first difference in the expected
-     probabilities for the values specified in {\tt x} and {\tt x1}.
-   \item {\tt qi\$rr}: the simulated risk ratio for the expected
-     probabilities simulated from {\tt x} and {\tt x1}.
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-When users estimate {\tt probit.survey} models with replicate weights in {\tt Zelig}, an 
-object called {\tt .survey.prob.weights} is created in the global environment.  
-{\tt Zelig} will over-write any existing object with that name, and users 
-are therefore advised to re-name any object called {\tt .survey.prob.weights} before using {\tt probit.survey} models in {\tt Zelig}.
-
-\subsection* {How to Cite}
-
-\input{cites/probit.survey}
- \input{citeZelig}
- 
- \subsection* {See also}
- 
- Survey-weighted linear models and the sample data used in the
- examples above are a part of the {\tt survey} package by Thomas
- Lumley. Users may wish to refer to the help files for the three
- functions that Zelig draws upon when estimating survey-weighted
- models, namely, {\tt help(svyglm)}, {\tt help(svydesign)}, and {\tt
- help(svrepdesign)}.  The Gamma model is part of the stats package
- by \citet{VenRip02}. Advanced users may wish to refer to
- \texttt{help(glm)} and \texttt{help(family)}, as well as
- \cite{McCNel89}.
-  
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-
-<<afterpkgs, echo=FALSE>>=
-  after<-search()
-  torm<-setdiff(after,before)
-  for (pkg in torm)
-  detach(pos=match(pkg,search()))
-@
-
-\end{document}
diff --git a/vignettes/upquote.sty b/vignettes/upquote.sty
deleted file mode 100644
index 67b4703..0000000
--- a/vignettes/upquote.sty
+++ /dev/null
@@ -1,76 +0,0 @@
-\NeedsTeXFormat{LaTeX2e}[1995/12/01]
-\ProvidesPackage{upquote}[2003/08/11 v1.1 Covington's upright-quote
-                          modification to verbatim and verb]
-
-%% Copyright 2000 Michael A. Covington.
-%% Copyright 2003 Michael A. Covington, Frank Mittelbach.
-%%
-%% It may be distributed and/or modified under the
-%% conditions of the LaTeX Project Public License, either version 1.2
-%% of this license or (at your option) any later version.
-%% The latest version of this license is in
-%%    http://www.latex-project.org/lppl.txt
-%% and version 1.2 or later is part of all distributions of LaTeX
-%% version 1999/12/01 or later.
-%%
-
-% Problem: Computer Modern Typewriter is the best font for program listings,
-%           *but* it prints ` ' as bent opening and closing single quotes.
-%           Other fonts, and most programming languages, print ` as a grave
-%           accent and ' upright; ' is used both to open and to close quoted
-%           strings.
-
-%           See also M. Covington, "Computer Languages in Type," Journal of
-%           Scholarly Publishing 26.1:34-41 (1994).
-
-% Solution: This package switches the typewriter font to Computer Modern
-%           Typewriter (regardless of other fonts in use, so long as this
-%           package is called afterward) and modifies the behavior of
-%           verbatim, verbatim*, verb, and verb* to print ` ' the desired way.
-%           It does not affect \tt, \texttt, etc.
-
-% Author:   Michael A. Covington
-%           Artificial Intelligence Center
-%           The University of Georgia
-%           http://www.ai.uga.edu/~mc
-%
-%           and
-%
-%           Covington Innovations (Consulting and Typesetting)
-%           http://www.CovingtonInnovations.com
-
-% The modification is done by adding instructions to \@noligs,
-% which is called by verbatim and verb in order to turn the
-% characters ` < > , ' - into active characters that merely
-% print themselves rather than activating ligatures.
-%
-% What is added is code to redefine ` as grave and ' as upright single quote.
-%
-
-% Bug fix, 2000/12/11: previously, '0 (or ' and any digit) would print as a
-% nonexistent character.  (The reason: \char13 or \char18 was combining with
-% the digit to make \char130, etc.)  Fixed by adding curly brackets.
-
-
-
-%% Rewritten by FMi 2003/06/19
-%
-%
-% Use textcomp official chars so that change works in various
-% encodings
-%
-% Extend \@noligs (this also works with the latest listings package
-% that recognizes that the package was loaded)
-
-\RequirePackage{textcomp}
-
-\begingroup
-\catcode`'=\active               
-\catcode``=\active               
-\g at addto@macro\@noligs
-   {\let`\textasciigrave
-    \let'\textquotesingle}
-\endgroup
-
-
-\endinput
diff --git a/vignettes/weibull.Rnw b/vignettes/weibull.Rnw
deleted file mode 100644
index d25bded..0000000
--- a/vignettes/weibull.Rnw
+++ /dev/null
@@ -1,292 +0,0 @@
-\SweaveOpts{results=hide, prefix.string=weibull}
-\include{zinput}
-%\VignetteIndexEntry{Weibull Regression for Duration Dependent Variables}
-%\VignetteDepends{Zelig, survival}
-%\VignetteKeyWords{model, weibull,regression,bounded, duration}
-%\VignettePackage{Zelig}
-\begin{document}
-\nobibliography*
-<<beforepkgs, echo=FALSE>>=
- before=search()
-@
-
-<<loadLibrary, echo=F,results=hide>>=
-library(Zelig)
-@
-
-\section{{\tt weibull}: Weibull Regression for Duration
-Dependent Variables}\label{weibull}
-
-Choose the Weibull regression model if the values in your dependent
-variable are duration observations.  The Weibull model relaxes the
-exponential model's (see \Sref{exp}) assumption of constant hazard,
-and allows the hazard rate to increase or decrease monotonically with
-respect to elapsed time.
-
-\subsubsection{Syntax}
-
-\begin{verbatim}
-> z.out <- zelig(Surv(Y, C) ~ X1 + X2, model = "weibull", data = mydata)
-> x.out <- setx(z.out)
-> s.out <- sim(z.out, x = x.out)
-\end{verbatim}
-Weibull models require that the dependent variable be in the form {\tt
-  Surv(Y, C)}, where {\tt Y} and {\tt C} are vectors of length $n$.
-For each observation $i$ in 1, \dots, $n$, the value $y_i$ is the
-duration (lifetime, for example), and the associated $c_i$ is a binary
-variable such that $c_i = 1$ if the duration is not censored ({\it
-  e.g.}, the subject dies during the study) or $c_i = 0$ if the
-duration is censored ({\it e.g.}, the subject is still alive at the
-end of the study).  If $c_i$ is omitted, all Y are assumed to be
-completed; that is, time defaults to 1 for all observations.
-
-\subsubsection{Input Values} 
-
-In addition to the standard inputs, {\tt zelig()} takes the following
-additional options for weibull regression:  
-\begin{itemize}
-\item {\tt robust}: defaults to {\tt FALSE}.  If {\tt TRUE}, {\tt
-zelig()} computes robust standard errors based on sandwich estimators
-(see \cite{Huber81} and \cite{White80}) based on the options in {\tt
-cluster}.
-\item {\tt cluster}:  if {\tt robust = TRUE}, you may select a
-variable to define groups of correlated observations.  Let {\tt x3} be
-a variable that consists of either discrete numeric values, character
-strings, or factors that define strata.  Then
-\begin{verbatim}
-> z.out <- zelig(y ~ x1 + x2, robust = TRUE, cluster = "x3", 
-                 model = "exp", data = mydata)
-\end{verbatim}
-means that the observations can be correlated within the strata defined by
-the variable {\tt x3}, and that robust standard errors should be
-calculated according to those clusters.  If {\tt robust = TRUE} but
-{\tt cluster} is not specified, {\tt zelig()} assumes that each
-observation falls into its own cluster.  
-\end{itemize}  
-
-\subsubsection{Example}
-
-Attach the sample data: 
-<<Example.data>>=
- data(coalition)
-@ 
-Estimate the model: 
-<<Example.zelig>>=
- z.out <- zelig(Surv(duration, ciep12) ~ fract + numst2, model = "weibull",
-                 data = coalition)
-@ 
-View the regression output:  
-<<Example.summary>>=
- summary(z.out)
-@ 
-Set the baseline values (with the ruling coalition in the minority)
-and the alternative values (with the ruling coalition in the majority)
-for X:
-<<Example.setx>>=
- x.low <- setx(z.out, numst2 = 0)
- x.high <- setx(z.out, numst2 = 1)
-@ 
-Simulate expected values ({\tt qi\$ev}) and first differences ({\tt qi\$fd}):
-<<Example.sim>>=
- s.out <- sim(z.out, x = x.low, x1 = x.high)
-@
-<<Example.summary.sim>>= 
- summary(s.out)
-@  
-\begin{center}
-<<label=ExamplePlot,fig=true,echo=true>>= 
-plot(s.out)
-@ 
-\end{center}
-
-\subsubsection{Model}
-Let $Y_i^*$ be the survival time for observation $i$. This variable
-might be censored for some observations at a fixed time $y_c$ such
-that the fully observed dependent variable, $Y_i$, is defined as
-\begin{equation*}
-  Y_i = \left\{ \begin{array}{ll}
-      Y_i^* & \textrm{if }Y_i^* \leq y_c \\
-      y_c & \textrm{if }Y_i^* > y_c 
-    \end{array} \right.
-\end{equation*}
-
-\begin{itemize}
-\item The \emph{stochastic component} is described by the distribution
-  of the partially observed variable $Y^*$.  We assume $Y_i^*$ follows
-  the Weibull distribution whose density function is given by
-  \begin{equation*}
-    f(y_i^*\mid \lambda_i, \alpha) = \frac{\alpha}{\lambda_i^\alpha}
-    y_i^{* \alpha-1} \exp \left\{ -\left( \frac{y_i^*}{\lambda_i}
-\right)^{\alpha} \right\}
-  \end{equation*}
-  for $y_i^* \ge 0$, the scale parameter $\lambda_i > 0$, and the shape
-  parameter $\alpha > 0$. The mean of this distribution is $\lambda_i
-  \Gamma(1 + 1 / \alpha)$. When $\alpha = 1$, the distribution reduces to
-  the exponential distribution (see Section~\ref{exp}).  (Note that
-the output from {\tt zelig()} parameterizes {\tt scale}$ = 1 / \alpha$.)
-
-In addition, survival models like the Weibull have three additional
-properties.  The hazard function $h(t)$ measures the probability of
-not surviving past time $t$ given survival up to $t$. In general,
-the hazard function is equal to $f(t)/S(t)$ where the survival
-function $S(t) = 1 - \int_{0}^t f(s) ds$ represents the fraction still
-surviving at time $t$.  The cumulative hazard function $H(t)$
-describes the probability of dying before time $t$.  In general,
-$H(t)= \int_{0}^{t} h(s) ds = -\log S(t)$.  In the case of the Weibull
-model,
-\begin{eqnarray*}
-h(t) &=& \frac{\alpha}{\lambda_i^{\alpha}} t^{\alpha - 1}  \\
-S(t) &=&  \exp \left\{ -\left( \frac{t}{\lambda_i} \right)^{\alpha} \right\} \\
-H(t) &=& \left( \frac{t}{\lambda_i} \right)^{\alpha}
-\end{eqnarray*}
-For the Weibull model, the hazard function $h(t)$ can increase or
-decrease monotonically over time.  
-
-\item The \emph{systematic component} $\lambda_i$ is modeled as
-  \begin{equation*}
-    \lambda_i = \exp(x_i \beta),
-  \end{equation*}
-  where $x_i$ is the vector of explanatory variables, and $\beta$ is
-  the vector of coefficients.
-  
-\end{itemize}
-
-\subsubsection{Quantities of Interest}
-
-\begin{itemize}
-\item The expected values ({\tt qi\$ev}) for the Weibull model are
-  simulations of the expected duration:
-\begin{equation*}
-E(Y) = \lambda_i \, \Gamma (1 + \alpha^{-1}),
-\end{equation*}
-given draws of $\beta$ and $\alpha$ from their sampling
-distributions. 
-
-\item The predicted value ({\tt qi\$pr}) is drawn from a distribution
-  defined by $(\lambda_i, \alpha)$.  
-
-\item The first difference ({\tt qi\$fd}) in expected value is
-\begin{equation*}
-\textrm{FD} = E(Y \mid x_1) - E(Y \mid x). 
-\end{equation*}
-
-\item In conditional prediction models, the average expected treatment
-  effect ({\tt att.ev}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      E[Y_i(t_i=0)] \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups. When
-    $Y_i(t_i=1)$ is censored rather than observed, we replace it with
-    a simulation from the model given available knowledge of the
-    censoring process.  Variation in the simulations are due to
-    uncertainty in simulating $E[Y_i(t_i=0)]$, the counterfactual
-    expected value of $Y_i$ for observations in the treatment group,
-    under the assumption that everything stays the same except that
-    the treatment indicator is switched to $t_i=0$.
-
-\item In conditional prediction models, the average predicted treatment
-  effect ({\tt att.pr}) for the treatment group is 
-    \begin{equation*} \frac{1}{\sum_{i=1}^n t_i}\sum_{i:t_i=1}^n \left\{ Y_i(t_i=1) -
-      \widehat{Y_i(t_i=0)} \right\},
-    \end{equation*} 
-    where $t_i$ is a binary explanatory variable defining the
-    treatment ($t_i=1$) and control ($t_i=0$) groups.  When
-    $Y_i(t_i=1)$ is censored rather than observed, we replace it with
-    a simulation from the model given available knowledge of the
-    censoring process.  Variation in the simulations are due to
-    uncertainty in simulating $\widehat{Y_i(t_i=0)}$, the
-    counterfactual predicted value of $Y_i$ for observations in the
-    treatment group, under the assumption that everything stays the
-    same except that the treatment indicator is switched to $t_i=0$.
-\end{itemize}
-
-\subsubsection{Output Values}
-
-The output of each Zelig command contains useful information which you
-may view.  For example, if you run \texttt{z.out <- zelig(y \~\,
-  x, model = "weibull", data)}, then you may examine the available
-information in \texttt{z.out} by using \texttt{names(z.out)},
-see the {\tt coefficients} by using {\tt z.out\$coefficients}, and
-a default summary of information through \texttt{summary(z.out)}.
-Other elements available through the {\tt \$} operator are listed
-below.
-
-\begin{itemize}
-\item From the {\tt zelig()} output object {\tt z.out}, you may extract:
-   \begin{itemize}
-   \item {\tt coefficients}: parameter estimates for the explanatory
-     variables.
-   \item {\tt icoef}: parameter estimates for the intercept and ``scale''
-     parameter $1 / \alpha$.  
-   \item {\tt var}: the variance-covariance matrix.  
-   \item {\tt loglik}: a vector containing the log-likelihood for the
-     model and intercept only (respectively).
-   \item {\tt linear.predictors}: a vector of the
-     $x_{i}\beta$.
-   \item {\tt df.residual}: the residual degrees of freedom.
-   \item {\tt df.null}: the residual degrees of freedom for the null
-     model.
-   \item {\tt zelig.data}: the input data frame if {\tt save.data = TRUE}.  
-   \end{itemize}
-\item Most of this may be conveniently summarized using {\tt
-   summary(z.out)}.  From {\tt summary(z.out)}, you may
- additionally extract: 
-   \begin{itemize}
-   \item {\tt table}: the parameter estimates with their
-     associated standard errors, $p$-values, and $t$-statistics.
-   \end{itemize}
-
-\item From the {\tt sim()} output object {\tt s.out}, you may extract
-  quantities of interest arranged as matrices indexed by simulation
-  $\times$ {\tt x}-observation (for more than one {\tt x}-observation).
-  Available quantities are:
-
-   \begin{itemize}
-   \item {\tt qi\$ev}: the simulated expected values for the specified
-     values of {\tt x}.
-   \item {\tt qi\$pr}: the simulated predicted values drawn from a
-     distribution defined by $(\lambda_i, \alpha)$.  
-   \item {\tt qi\$fd}: the simulated first differences between the
-     simulated expected values for {\tt x} and {\tt x1}.  
-   \item {\tt qi\$att.ev}: the simulated average expected treatment
-     effect for the treated from conditional prediction models.  
-   \item {\tt qi\$att.pr}: the simulated average predicted treatment
-     effect for the treated from conditional prediction models.  
-   \end{itemize}
-\end{itemize}
-
-\subsection* {How to Cite} 
-
-\input{cites/weibull}
-\input{citeZelig}
-
-\subsection* {See also}
-The Weibull model is part of the survival library by Terry Therneau,
-ported to R by Thomas Lumley.  Advanced users may wish to refer to
-\texttt{help(survfit)} in the survival library, and \cite{VenRip02}.
-Sample data are from \cite{KinAltBur90}.
-
-\bibliographystyle{plain}
-\bibliography{gk,gkpubs}
-<<afterpkgs, echo=FALSE>>=
- after<-search()
- torm<-setdiff(after,before)
- for (pkg in torm)
- detach(pos=match(pkg,search()))
-@
- \end{document}
-
-
-%%% Local Variables: 
-%%% mode: latex
-%%% TeX-master: t
-%%% End: 
-
-
-
-
-
-
-
-
diff --git a/vignettes/zinput.tex b/vignettes/zinput.tex
deleted file mode 100644
index 64c4311..0000000
--- a/vignettes/zinput.tex
+++ /dev/null
@@ -1,38 +0,0 @@
-\documentclass[oneside,letterpaper,12pt]{book}
-\usepackage{Rd}
-%\usepackage{Sweave}
-%\usepackage{/usr/lib64/R/share/texmf/Sweave}
-%\usepackage{/usr/share/R/texmf/Sweave}
-\usepackage{bibentry}
-\usepackage{upquote}
-\usepackage{graphicx}
-\usepackage{natbib}
-\usepackage[reqno]{amsmath}
-\usepackage{amssymb}
-\usepackage{amsfonts}
-\usepackage{amsmath}
-\usepackage{verbatim}
-\usepackage{epsf}
-\usepackage{url}
-\usepackage{html}
-\usepackage{dcolumn}
-\usepackage{multirow}
-\usepackage{fullpage}
-\usepackage{lscape}
-\usepackage[all]{xy}
-
-\usepackage{csquotes}
-% \usepackage[pdftex, bookmarksopen=true,bookmarksnumbered=true,
-%   linkcolor=webred]{hyperref}
-\bibpunct{(}{)}{;}{a}{}{,}
-\newcolumntype{.}{D{.}{.}{-1}}
-\newcolumntype{d}[1]{D{.}{.}{#1}}
-\newcommand{\MatchIt}{{\sc MatchIt}}
-\newcommand{\hlink}{\htmladdnormallink}
-\newcommand{\Sref}[1]{Section~\ref{#1}}
-\newcommand{\fullrvers}{2.5.1}
-\newcommand{\rvers}{2.5}
-\newcommand{\rwvers}{R-2.5.1}
-%\renewcommand{\bibentry}{\citealt}
-
-\setcounter{tocdepth}{2}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-zelig.git



More information about the debian-science-commits mailing list