[r-cran-vgam] 54/63: New upstream version 1.0-3

Andreas Tille tille at debian.org
Tue Jan 24 13:54:41 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-vgam.

commit ef164e46310a0b89797a5a3750afc4f8c095644a
Author: Andreas Tille <tille at debian.org>
Date:   Tue Jan 24 14:29:38 2017 +0100

    New upstream version 1.0-3
---
 BUGS                               |    8 +
 DESCRIPTION                        |    8 +-
 MD5                                | 1069 +++---
 NAMESPACE                          |  130 +-
 NEWS                               |   53 +
 R/Links.R                          |    4 +-
 R/aamethods.q                      |   67 +-
 R/attrassign.R                     |    4 +-
 R/bAIC.q                           |   14 +-
 R/build.terms.vlm.q                |   20 +-
 R/calibrate.q                      |   36 +-
 R/cao.R                            |   24 +-
 R/cao.fit.q                        |  150 +-
 R/coef.vlm.q                       |   16 +-
 R/confint.vlm.R                    |   88 +-
 R/cqo.R                            |   24 +-
 R/cqo.fit.q                        |   85 +-
 R/deviance.vlm.q                   |   27 +-
 R/effects.vglm.q                   |    2 +-
 R/family.actuary.R                 |  781 +++--
 R/family.aunivariate.R             | 3168 ++++++++++++++---
 R/family.basics.R                  |  208 +-
 R/family.binomial.R                |  443 ++-
 R/family.bivariate.R               |  444 ++-
 R/family.categorical.R             |  244 +-
 R/family.censored.R                |   88 +-
 R/family.circular.R                |   32 +-
 R/family.exp.R                     |   78 +-
 R/family.extremes.R                |  216 +-
 R/family.functions.R               |  144 +-
 R/family.genetic.R                 |   85 +-
 R/family.glmgam.R                  |  305 +-
 R/family.loglin.R                  |   65 +-
 R/family.math.R                    |  143 +-
 R/family.mixture.R                 |   89 +-
 R/family.nbd.R                     | 1868 ++++++++++
 R/family.nonlinear.R               |   27 +-
 R/family.normal.R                  |  326 +-
 R/family.oneinf.R                  | 2188 ++++++++++++
 R/family.others.R                  |  424 ++-
 R/family.positive.R                |  558 +--
 R/family.qreg.R                    |  522 +--
 R/family.rcim.R                    |   53 +-
 R/family.rcqo.R                    |    4 +-
 R/family.robust.R                  |   32 +-
 R/family.rrr.R                     |  331 +-
 R/family.sur.R                     |   19 +-
 R/family.survival.R                |   40 +-
 R/family.ts.R                      |  354 +-
 R/family.univariate.R              | 6636 ++++++++++++------------------------
 R/family.vglm.R                    |    6 +-
 R/family.zeroinf.R                 |  775 ++---
 R/fittedvlm.R                      |   21 +-
 R/formula.vlm.q                    |    2 +-
 R/generic.q                        |    2 +-
 R/getxvlmaug.R                     |  199 ++
 R/links.q                          |  137 +-
 R/logLik.vlm.q                     |    8 +-
 R/lrwaldtest.R                     |   88 +-
 R/model.matrix.vglm.q              |  198 +-
 R/mux.q                            |   32 +-
 R/nobs.R                           |    2 +-
 R/penvps.R                         |  139 -
 R/plot.vgam.R                      |  190 +-
 R/plot.vglm.R                      |    4 +-
 R/predict.vgam.q                   |   26 +-
 R/predict.vglm.q                   |  101 +-
 R/predict.vlm.q                    |   35 +-
 R/print.vglm.q                     |  291 +-
 R/print.vlm.q                      |   10 +-
 R/profilevglm.R                    |  303 ++
 R/psfun.R                          |  183 -
 R/psv2magic.R                      |   63 +-
 R/qrrvglm.control.q                |   26 +-
 R/qtplot.q                         |  148 +-
 R/residuals.vlm.q                  |   54 +-
 R/rrvglm.R                         |   32 +-
 R/rrvglm.control.q                 |   13 +-
 R/rrvglm.fit.q                     |   58 +-
 R/s.q                              |    6 +-
 R/s.vam.q                          |   16 +-
 R/simulate.vglm.R                  |    4 +-
 R/sm.os.R                          |  333 ++
 R/sm.ps.R                          |  206 ++
 R/smart.R                          |   50 +-
 R/summary.vgam.q                   |   84 +-
 R/summary.vglm.q                   |   45 +-
 R/summary.vlm.q                    |   30 +-
 R/vcov.pvgam.R                     |  526 +++
 R/vgam.R                           |  162 +-
 R/vgam.control.q                   |   49 +-
 R/vgam.fit.q                       |  375 +-
 R/vgam.match.q                     |   10 +-
 R/vglm.R                           |   42 +-
 R/vglm.control.q                   |  110 +-
 R/vglm.fit.q                       |   74 +-
 R/vlm.R                            |   26 +-
 R/vlm.wfit.q                       |  108 +-
 R/vsmooth.spline.q                 |   60 +-
 build/vignette.rds                 |  Bin 478 -> 480 bytes
 data/Huggins89.t1.rda              |  Bin 442 -> 443 bytes
 data/Huggins89table1.rda           |  Bin 444 -> 445 bytes
 data/alclevels.rda                 |  Bin 549 -> 550 bytes
 data/alcoff.rda                    |  Bin 546 -> 547 bytes
 data/auuc.rda                      |  Bin 244 -> 246 bytes
 data/backPain.rda                  |  Bin 480 -> 485 bytes
 data/beggs.rda                     |  Bin 196 -> 198 bytes
 data/car.all.rda                   |  Bin 6959 -> 6962 bytes
 data/cfibrosis.rda                 |  Bin 263 -> 264 bytes
 data/corbet.rda                    |  Bin 236 -> 245 bytes
 data/crashbc.rda                   |  Bin 374 -> 374 bytes
 data/crashf.rda                    |  Bin 340 -> 341 bytes
 data/crashi.rda                    |  Bin 489 -> 491 bytes
 data/crashmc.rda                   |  Bin 385 -> 385 bytes
 data/crashp.rda                    |  Bin 375 -> 376 bytes
 data/crashtr.rda                   |  Bin 360 -> 361 bytes
 data/deermice.rda                  |  Bin 393 -> 393 bytes
 data/ducklings.rda                 |  Bin 560 -> 561 bytes
 data/finney44.rda                  |  Bin 209 -> 210 bytes
 data/flourbeetle.rda               |  Bin 342 -> 344 bytes
 data/hspider.rda                   |  Bin 1343 -> 1344 bytes
 data/lakeO.rda                     |  Bin 333 -> 335 bytes
 data/leukemia.rda                  |  Bin 328 -> 329 bytes
 data/marital.nz.rda                |  Bin 10492 -> 10448 bytes
 data/melbmaxtemp.rda               |  Bin 4257 -> 4264 bytes
 data/pneumo.rda                    |  Bin 266 -> 268 bytes
 data/prinia.rda                    |  Bin 1228 -> 1229 bytes
 data/ruge.rda                      |  Bin 254 -> 258 bytes
 data/toxop.rda                     |  Bin 472 -> 474 bytes
 data/venice.rda                    |  Bin 983 -> 986 bytes
 data/venice90.rda                  |  Bin 8036 -> 8008 bytes
 data/wine.rda                      |  Bin 270 -> 269 bytes
 inst/CITATION                      |    4 +-
 inst/doc/categoricalVGAM.R         |   66 +-
 inst/doc/categoricalVGAM.Rnw       |   18 +-
 inst/doc/categoricalVGAM.pdf       |  Bin 646125 -> 648348 bytes
 inst/doc/crVGAM.pdf                |  Bin 421526 -> 421558 bytes
 man/A1A2A3.Rd                      |   12 +-
 man/AA.Aa.aa.Rd                    |   10 +-
 man/AB.Ab.aB.ab.Rd                 |    8 +-
 man/ABO.Rd                         |   10 +-
 man/AICvlm.Rd                      |    2 +-
 man/AR1.Rd                         |   72 +-
 man/AR1EIM.Rd                      |  166 +-
 man/AR1UC.Rd                       |    2 +-
 man/BICvlm.Rd                      |    2 +-
 man/Coef.qrrvglm-class.Rd          |   30 +-
 man/Coef.qrrvglm.Rd                |    6 +-
 man/Coef.rrvglm-class.Rd           |    4 +-
 man/Coef.rrvglm.Rd                 |   10 +-
 man/Coef.vlm.Rd                    |    4 +-
 man/CommonVGAMffArguments.Rd       |   31 +-
 man/Huggins89.t1.Rd                |    2 +-
 man/Inv.gaussian.Rd                |    2 +-
 man/Links.Rd                       |   22 +-
 man/MNSs.Rd                        |   18 +-
 man/Opt.Rd                         |    2 +-
 man/ParetoUC.Rd                    |    2 +-
 man/QvarUC.Rd                      |    2 +-
 man/Rcim.Rd                        |   14 +-
 man/SURff.Rd                       |    2 +-
 man/Select.Rd                      |   34 +-
 man/SurvS4-class.Rd                |    2 +-
 man/Tol.Rd                         |    2 +-
 man/UtilitiesVGAM.Rd               |    2 +-
 man/V1.Rd                          |    6 +-
 man/VGAM-package.Rd                |   42 +-
 man/acat.Rd                        |    4 +-
 man/alaplace3.Rd                   |   22 +-
 man/amlbinomial.Rd                 |    4 +-
 man/amlexponential.Rd              |    2 +-
 man/amlnormal.Rd                   |    2 +-
 man/amlpoisson.Rd                  |    2 +-
 man/auxposbernoulli.t.Rd           |   12 +-
 man/backPain.Rd                    |    2 +-
 man/beggs.Rd                       |    8 +-
 man/benfUC.Rd                      |    4 +-
 man/benini.Rd                      |    4 +-
 man/beniniUC.Rd                    |    2 +-
 man/betaII.Rd                      |    8 +-
 man/betaR.Rd                       |   14 +-
 man/betabinomUC.Rd                 |   71 +-
 man/betabinomial.Rd                |   26 +-
 man/betabinomialff.Rd              |   28 +-
 man/betaff.Rd                      |   23 +-
 man/betageomUC.Rd                  |    2 +-
 man/betageometric.Rd               |   20 +-
 man/betanormUC.Rd                  |    2 +-
 man/betaprime.Rd                   |   38 +-
 man/biamhcop.Rd                    |    2 +-
 man/biclaytoncop.Rd                |    2 +-
 man/biclaytoncopUC.Rd              |    2 +-
 man/bifgmcop.Rd                    |    4 +-
 man/bifgmcopUC.Rd                  |    2 +-
 man/bifgmexp.Rd                    |    2 +-
 man/bifrankcop.Rd                  |    4 +-
 man/bigumbelIexp.Rd                |    2 +-
 man/bilogistic.Rd                  |   12 +-
 man/binom2.or.Rd                   |    8 +-
 man/binom2.rho.Rd                  |    6 +-
 man/binom2.rhoUC.Rd                |    6 +-
 man/binomialff.Rd                  |   12 +-
 man/binormal.Rd                    |   15 +-
 man/binormalUC.Rd                  |    6 +-
 man/binormalcop.Rd                 |    4 +-
 man/binormcopUC.Rd                 |    2 +-
 man/bisa.Rd                        |   10 +-
 man/bisaUC.Rd                      |    2 +-
 man/bistudentt.Rd                  |    2 +-
 man/bistudenttUC.Rd                |    2 +-
 man/bmi.nz.Rd                      |    8 +-
 man/borel.tanner.Rd                |    4 +-
 man/brat.Rd                        |   10 +-
 man/bratUC.Rd                      |    6 +-
 man/bratt.Rd                       |   18 +-
 man/calibrate-methods.Rd           |    2 +-
 man/calibrate.Rd                   |    4 +-
 man/calibrate.qrrvglm.Rd           |    4 +-
 man/calibrate.qrrvglm.control.Rd   |   12 +-
 man/cao.Rd                         |   42 +-
 man/cao.control.Rd                 |   54 +-
 man/cardUC.Rd                      |    4 +-
 man/cardioid.Rd                    |    8 +-
 man/cauchit.Rd                     |    2 +-
 man/cauchy.Rd                      |   12 +-
 man/cdf.lmscreg.Rd                 |   16 +-
 man/cens.gumbel.Rd                 |    4 +-
 man/cens.normal.Rd                 |    4 +-
 man/cfibrosis.Rd                   |    2 +-
 man/cgo.Rd                         |   10 +-
 man/chest.nz.Rd                    |    4 +-
 man/chisq.Rd                       |    2 +-
 man/clo.Rd                         |    4 +-
 man/cloglog.Rd                     |   12 +-
 man/coefvgam.Rd                    |    2 +-
 man/coefvlm.Rd                     |    2 +-
 man/confintvglm.Rd                 |  118 +-
 man/constraints.Rd                 |    2 +-
 man/corbet.Rd                      |    2 +-
 man/cqo.Rd                         |   38 +-
 man/crashes.Rd                     |   12 +-
 man/cratio.Rd                      |    2 +-
 man/cumulative.Rd                  |   18 +-
 man/dagum.Rd                       |   12 +-
 man/deplot.lmscreg.Rd              |    4 +-
 man/depvar.Rd                      |    2 +-
 man/df.residual.Rd                 |    4 +-
 man/diffzeta.Rd                    |   85 +
 man/diffzetaUC.Rd                  |   98 +
 man/dirichlet.Rd                   |    4 +-
 man/dirmul.old.Rd                  |    8 +-
 man/dirmultinomial.Rd              |   31 +-
 man/double.cens.normal.Rd          |    2 +-
 man/double.expbinomial.Rd          |    8 +-
 man/eexpUC.Rd                      |    4 +-
 man/enormUC.Rd                     |    4 +-
 man/enzyme.Rd                      |    2 +-
 man/erlang.Rd                      |    8 +-
 man/eunifUC.Rd                     |    4 +-
 man/expexpff.Rd                    |    2 +-
 man/expexpff1.Rd                   |    2 +-
 man/expint3.Rd                     |   12 +-
 man/explink.Rd                     |    2 +-
 man/exponential.Rd                 |    4 +-
 man/felix.Rd                       |   11 +-
 man/felixUC.Rd                     |   18 +-
 man/fff.Rd                         |    8 +-
 man/fill.Rd                        |   16 +-
 man/fisherz.Rd                     |    6 +-
 man/fisk.Rd                        |   14 +-
 man/fittedvlm.Rd                   |   16 +-
 man/flourbeetle.Rd                 |    6 +-
 man/foldnormUC.Rd                  |    2 +-
 man/foldnormal.Rd                  |    4 +-
 man/foldsqrt.Rd                    |    2 +-
 man/frechet.Rd                     |    6 +-
 man/freund61.Rd                    |   20 +-
 man/gamma1.Rd                      |    6 +-
 man/gamma2.Rd                      |    4 +-
 man/gammaR.Rd                      |    6 +-
 man/gammahyperbola.Rd              |    2 +-
 man/garma.Rd                       |    4 +-
 man/gaussianff.Rd                  |    2 +-
 man/genbetaII.Rd                   |    6 +-
 man/gengamma.Rd                    |    6 +-
 man/genpoisUC.Rd                   |    2 +-
 man/genpoisson.Rd                  |    6 +-
 man/geometric.Rd                   |   10 +-
 man/gev.Rd                         |   20 +-
 man/gevUC.Rd                       |    2 +-
 man/gew.Rd                         |    2 +-
 man/golf.Rd                        |    8 +-
 man/gompertz.Rd                    |    4 +-
 man/gpd.Rd                         |    4 +-
 man/gpdUC.Rd                       |    2 +-
 man/grain.us.Rd                    |    2 +-
 man/grc.Rd                         |   26 +-
 man/gumbel.Rd                      |   44 +-
 man/gumbelII.Rd                    |   12 +-
 man/gumbelUC.Rd                    |    2 +-
 man/guplot.Rd                      |   10 +-
 man/has.intercept.Rd               |    2 +-
 man/hormone.Rd                     |    4 +-
 man/hspider.Rd                     |    4 +-
 man/huber.Rd                       |    8 +-
 man/huberUC.Rd                     |    4 +-
 man/hunua.Rd                       |   10 +-
 man/hyperg.Rd                      |   10 +-
 man/hypersecant.Rd                 |    8 +-
 man/hzeta.Rd                       |   44 +-
 man/hzetaUC.Rd                     |   49 +-
 man/iam.Rd                         |   28 +-
 man/identitylink.Rd                |    8 +-
 man/inv.binomial.Rd                |    4 +-
 man/inv.gaussianff.Rd              |    8 +-
 man/inv.lomax.Rd                   |    4 +-
 man/inv.paralogistic.Rd            |   12 +-
 man/is.buggy.Rd                    |   17 +-
 man/kendall.tau.Rd                 |    8 +-
 man/kumar.Rd                       |   16 +-
 man/kumarUC.Rd                     |    2 +-
 man/lakeO.Rd                       |    2 +-
 man/laplace.Rd                     |    2 +-
 man/laplaceUC.Rd                   |    4 +-
 man/latvar.Rd                      |    2 +-
 man/leipnik.Rd                     |   33 +-
 man/lerch.Rd                       |    6 +-
 man/levy.Rd                        |   10 +-
 man/lgammaff.Rd                    |   10 +-
 man/lindUC.Rd                      |    7 +-
 man/lindley.Rd                     |    6 +-
 man/linkfun.Rd                     |    2 +-
 man/linkfun.vglm.Rd                |    6 +-
 man/lino.Rd                        |    4 +-
 man/linoUC.Rd                      |    2 +-
 man/lirat.Rd                       |    4 +-
 man/lms.bcg.Rd                     |    4 +-
 man/lms.bcn.Rd                     |    6 +-
 man/lms.yjn.Rd                     |   26 +-
 man/logF.Rd                        |    2 +-
 man/logF.UC.Rd                     |    4 +-
 man/logUC.Rd                       |   62 +-
 man/logc.Rd                        |    2 +-
 man/loge.Rd                        |    2 +-
 man/logff.Rd                       |   30 +-
 man/logistic.Rd                    |    2 +-
 man/logit.Rd                       |   10 +-
 man/logitoffsetlink.Rd             |    2 +-
 man/loglaplace.Rd                  |   12 +-
 man/loglinb2.Rd                    |   12 +-
 man/loglog.Rd                      |    2 +-
 man/lognormal.Rd                   |    4 +-
 man/logoff.Rd                      |    4 +-
 man/lomax.Rd                       |    4 +-
 man/lqnorm.Rd                      |    4 +-
 man/lrtest.Rd                      |   12 +-
 man/lvplot.qrrvglm.Rd              |   74 +-
 man/lvplot.rrvglm.Rd               |   34 +-
 man/machinists.Rd                  |    4 +-
 man/makeham.Rd                     |   18 +-
 man/makehamUC.Rd                   |    2 +-
 man/margeff.Rd                     |    2 +-
 man/maxwell.Rd                     |    6 +-
 man/maxwellUC.Rd                   |    6 +-
 man/mccullagh89.Rd                 |    4 +-
 man/micmen.Rd                      |    2 +-
 man/mix2exp.Rd                     |    2 +-
 man/mix2normal.Rd                  |   12 +-
 man/mix2poisson.Rd                 |    2 +-
 man/model.matrixvlm.Rd             |   10 +-
 man/moffset.Rd                     |   10 +-
 man/multilogit.Rd                  |    2 +-
 man/multinomial.Rd                 |   20 +-
 man/nakagami.Rd                    |   18 +-
 man/nakagamiUC.Rd                  |    2 +-
 man/nbcanlink.Rd                   |    2 +-
 man/nbolf.Rd                       |    6 +-
 man/negbinomial.Rd                 |   68 +-
 man/negbinomial.size.Rd            |    2 +-
 man/normal.vcm.Rd                  |    2 +-
 man/notdocumentedyet.Rd            |   42 +-
 man/oalog.Rd                       |  138 +
 man/oalogUC.Rd                     |   77 +
 man/oapospoisUC.Rd                 |   77 +
 man/oapospoisson.Rd                |  131 +
 man/oazeta.Rd                      |  132 +
 man/oazetaUC.Rd                    |   80 +
 man/oilog.Rd                       |   93 +
 man/oilogUC.Rd                     |  126 +
 man/oiposbinomUC.Rd                |   16 +-
 man/oiposbinomial.Rd               |  207 ++
 man/oipospoisUC.Rd                 |   10 +-
 man/oipospoisson.Rd                |    8 +-
 man/{oipospoisson.Rd => oizeta.Rd} |   59 +-
 man/oizetaUC.Rd                    |  122 +
 man/{oipospoisson.Rd => oizipf.Rd} |   62 +-
 man/oizipfUC.Rd                    |  122 +
 man/ordpoisson.Rd                  |    6 +-
 man/otlog.Rd                       |   65 +
 man/otlogUC.Rd                     |  105 +
 man/otpospoisUC.Rd                 |   87 +
 man/otpospoisson.Rd                |   71 +
 man/otzeta.Rd                      |   73 +
 man/otzetaUC.Rd                    |   98 +
 man/paralogistic.Rd                |   10 +-
 man/paretoIV.Rd                    |   16 +-
 man/paretoIVUC.Rd                  |    2 +-
 man/paretoff.Rd                    |    8 +-
 man/perks.Rd                       |   17 +-
 man/perksUC.Rd                     |    4 +-
 man/persp.qrrvglm.Rd               |   16 +-
 man/pgamma.deriv.Rd                |   10 +-
 man/pgamma.deriv.unscaled.Rd       |    2 +-
 man/plotdeplot.lmscreg.Rd          |   12 +-
 man/plotqrrvglm.Rd                 |   14 +-
 man/plotqtplot.lmscreg.Rd          |   16 +-
 man/plotrcim0.Rd                   |   28 +-
 man/plotvgam.Rd                    |   20 +-
 man/plotvgam.control.Rd            |   18 +-
 man/plotvglm.Rd                    |   14 +-
 man/pneumo.Rd                      |    4 +-
 man/poisson.points.Rd              |    2 +-
 man/poissonff.Rd                   |   10 +-
 man/polf.Rd                        |    8 +-
 man/polonoUC.Rd                    |    8 +-
 man/posbernUC.Rd                   |   16 +-
 man/posbernoulli.b.Rd              |    2 +-
 man/posbernoulli.t.Rd              |    6 +-
 man/posbernoulli.tb.Rd             |    2 +-
 man/posbinomUC.Rd                  |    6 +-
 man/posbinomial.Rd                 |    2 +-
 man/posgeomUC.Rd                   |    6 +-
 man/posnegbinUC.Rd                 |   13 +-
 man/posnegbinomial.Rd              |   54 +-
 man/posnormUC.Rd                   |    2 +-
 man/posnormal.Rd                   |    8 +-
 man/pospoisUC.Rd                   |    4 +-
 man/pospoisson.Rd                  |    4 +-
 man/powerlink.Rd                   |    2 +-
 man/prats.Rd                       |    6 +-
 man/predictqrrvglm.Rd              |    4 +-
 man/predictvglm.Rd                 |   43 +-
 man/prentice74.Rd                  |    6 +-
 man/probit.Rd                      |    6 +-
 man/propodds.Rd                    |    2 +-
 man/ps.Rd                          |  267 --
 man/qrrvglm.control.Rd             |   54 +-
 man/qtplot.lmscreg.Rd              |   14 +-
 man/quasibinomialff.Rd             |   12 +-
 man/quasipoissonff.Rd              |    2 +-
 man/rayleigh.Rd                    |    2 +-
 man/rcqo.Rd                        |   54 +-
 man/rdiric.Rd                      |   14 +-
 man/rec.exp1.Rd                    |    2 +-
 man/rec.normal.Rd                  |    4 +-
 man/reciprocal.Rd                  |    2 +-
 man/rhobit.Rd                      |    2 +-
 man/riceff.Rd                      |    4 +-
 man/rigff.Rd                       |    4 +-
 man/rlplot.gevff.Rd                |    6 +-
 man/rrar.Rd                        |   14 +-
 man/rrvglm-class.Rd                |   14 +-
 man/rrvglm.Rd                      |    6 +-
 man/rrvglm.control.Rd              |   20 +-
 man/rrvglm.optim.control.Rd        |   10 +-
 man/s.Rd                           |   54 +-
 man/sc.studentt2.Rd                |    8 +-
 man/sc.t2UC.Rd                     |    2 +-
 man/seq2binomial.Rd                |   10 +-
 man/simplex.Rd                     |    6 +-
 man/simplexUC.Rd                   |    2 +-
 man/simulate.vlm.Rd                |   10 +-
 man/sinmad.Rd                      |    6 +-
 man/skellam.Rd                     |    6 +-
 man/skewnormUC.Rd                  |    8 +-
 man/skewnormal.Rd                  |    2 +-
 man/slash.Rd                       |   37 +-
 man/slashUC.Rd                     |    6 +-
 man/sm.os.Rd                       |  472 +++
 man/sm.ps.Rd                       |  269 ++
 man/smartpred.Rd                   |    2 +
 man/sratio.Rd                      |    4 +-
 man/studentt.Rd                    |    6 +-
 man/summarypvgam.Rd                |  108 +
 man/summaryvgam.Rd                 |   86 +
 man/summaryvglm.Rd                 |   24 +-
 man/tikuv.Rd                       |    2 +-
 man/tikuvUC.Rd                     |    2 +-
 man/tobit.Rd                       |   16 +-
 man/tobitUC.Rd                     |    8 +-
 man/topple.Rd                      |   73 +
 man/toppleUC.Rd                    |   96 +
 man/triangle.Rd                    |   10 +-
 man/triangleUC.Rd                  |    3 +-
 man/trplot.Rd                      |    2 +-
 man/trplot.qrrvglm.Rd              |   34 +-
 man/truncparetoUC.Rd               |    2 +-
 man/truncweibull.Rd                |   10 +-
 man/ucberk.Rd                      |    4 +-
 man/undocumented-methods.Rd        |   14 +-
 man/vcovvlm.Rd                     |    2 +-
 man/venice.Rd                      |   14 +-
 man/vgam-class.Rd                  |   18 +-
 man/vgam.Rd                        |  242 +-
 man/vgam.control.Rd                |   90 +-
 man/vglm-class.Rd                  |   40 +-
 man/vglm.Rd                        |   32 +-
 man/vglm.control.Rd                |   52 +-
 man/vglmff-class.Rd                |   24 +-
 man/vonmises.Rd                    |   10 +-
 man/vsmooth.spline.Rd              |   43 +-
 man/waitakere.Rd                   |    8 +-
 man/waldff.Rd                      |   25 +-
 man/weibull.mean.Rd                |    4 +-
 man/weibullR.Rd                    |   10 +-
 man/weightsvglm.Rd                 |    4 +-
 man/wine.Rd                        |    8 +-
 man/yeo.johnson.Rd                 |    8 +-
 man/yip88.Rd                       |   20 +-
 man/yulesimon.Rd                   |   24 +-
 man/yulesimonUC.Rd                 |   45 +-
 man/zabinomUC.Rd                   |    6 +-
 man/zabinomial.Rd                  |   10 +-
 man/zageomUC.Rd                    |    6 +-
 man/zageometric.Rd                 |   10 +-
 man/zanegbinUC.Rd                  |    8 +-
 man/zanegbinomial.Rd               |   12 +-
 man/zapoisUC.Rd                    |    4 +-
 man/zapoisson.Rd                   |    2 +-
 man/zero.Rd                        |    4 +-
 man/zeta.Rd                        |   57 +-
 man/zetaUC.Rd                      |   68 +-
 man/zetaff.Rd                      |   26 +-
 man/zibinomUC.Rd                   |    8 +-
 man/zibinomial.Rd                  |   16 +-
 man/zigeomUC.Rd                    |   10 +-
 man/zigeometric.Rd                 |    8 +-
 man/zinegbinUC.Rd                  |   10 +-
 man/zinegbinomial.Rd               |   30 +-
 man/zipebcom.Rd                    |   10 +-
 man/zipf.Rd                        |   30 +-
 man/zipfUC.Rd                      |   52 +-
 man/zipoisUC.Rd                    |    8 +-
 man/zipoisson.Rd                   |    8 +-
 man/zoabetaR.Rd                    |   10 +-
 man/zoabetaUC.Rd                   |   18 +-
 src/fgam.f                         |   84 +-
 src/tyeepolygamma3.c               |    4 +-
 src/vdigami.f                      |   10 +-
 src/veigen.f                       |   55 +-
 src/vlinpack2.f                    |    5 +-
 src/vlinpack3.f                    |    5 +-
 vignettes/categoricalVGAM.Rnw      |   18 +-
 vignettes/categoricalVGAMbib.bib   |   49 +-
 554 files changed, 23255 insertions(+), 12637 deletions(-)

diff --git a/BUGS b/BUGS
index 3d65463..b0cc29c 100755
--- a/BUGS
+++ b/BUGS
@@ -1,6 +1,14 @@
 Here is a list of known bugs.
 
 
+2016-06
+
+lrtest(zipoissonff.object, poissonff.object) fails.
+
+
+
+
+
 2016-05
 
 rcim() with alaplace2() may fail.
diff --git a/DESCRIPTION b/DESCRIPTION
index f7e0605..221356b 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
 Package: VGAM
-Version: 1.0-2
-Date: 2016-05-26
+Version: 1.0-3
+Date: 2017-01-11
 Title: Vector Generalized Linear and Additive Models
 Author: Thomas W. Yee <t.yee at auckland.ac.nz>
 Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
@@ -31,6 +31,6 @@ NeedsCompilation: yes
 BuildVignettes: yes
 LazyLoad: yes
 LazyData: yes
-Packaged: 2016-05-26 13:22:19 UTC; tyee001
+Packaged: 2017-01-11 02:07:28 UTC; tyee001
 Repository: CRAN
-Date/Publication: 2016-05-27 18:08:24
+Date/Publication: 2017-01-11 08:59:31
diff --git a/MD5 b/MD5
index 14f9067..ebffe4e 100644
--- a/MD5
+++ b/MD5
@@ -1,148 +1,153 @@
-68f03c3d43f72a0d24b94d4af893dd36 *BUGS
+babb8a1c553c18356c0db0223bece6cc *BUGS
 7ee5b2dc375f5ec613dffed100ca7b3d *ChangeLog
-dfb0d44f06250aa6eaa381521a1ed880 *DESCRIPTION
+01ec327a7d06295780bb418aae042706 *DESCRIPTION
 e640665d8993539374917f850992ddc7 *LICENCE.note
-e9304ea877f43e4e34c4ce1b3d27a48d *NAMESPACE
-80eb5bf0880da3735541f850ea0cb1bc *NEWS
-1cf8c58b1b8f8f512152ad0fef77f9b3 *R/Links.R
-21f10bcb0180979e9306f79521eaeaf3 *R/aamethods.q
-9694330a9429ca317d230d95b4f28439 *R/attrassign.R
-56f4d63ba58a8118751745ee1d114c2f *R/bAIC.q
-ddc20f03a63a70292d06634a2c33fdb2 *R/build.terms.vlm.q
-e18970ef384fe48b670fd3e39da3e773 *R/calibrate.q
-cfefa5d3109605043519fcb00f2e973c *R/cao.R
-206e6fdff874d039ba7b0783aba59e5e *R/cao.fit.q
-712ba0e03427dfc76a251d04b564cbcd *R/coef.vlm.q
-0caeff46be684ed015c8797312fb584a *R/confint.vlm.R
-f7b3a3390696387b3160d50fab2f1e26 *R/cqo.R
-37a34a9474c4015cfff888efe0e1df58 *R/cqo.fit.q
-d7411f35192232f4b396b2b9a256c048 *R/deviance.vlm.q
-1d9d007e875a5298f466a67da0a5355b *R/effects.vglm.q
-0cdac42854e20d508ba4eda8fcf905b5 *R/family.actuary.R
-bbcd730bf607107b7901bf6ed0bbeb0c *R/family.aunivariate.R
-8a903ec2a3167664db6d781a446e2e0f *R/family.basics.R
-d865febbab3728ad23ab1d29c5a1aefc *R/family.binomial.R
-8cc88aa0238f0c24873e43b96248069f *R/family.bivariate.R
-9ab9ab5bb3408b7b8c96d0b27044ffd3 *R/family.categorical.R
-a1854051e36e942db41075b38d01da14 *R/family.censored.R
-53c98dae9e0fca770deb579158c3649a *R/family.circular.R
-4c12b580d3d15be2f7393731223e6483 *R/family.exp.R
-4c642fd4f02fe56de2ada12775ff545e *R/family.extremes.R
-436edded73dcb29d8a6eaa208a3d616d *R/family.functions.R
-d3cd45b76a23ce50c5d55487dfb2bea6 *R/family.genetic.R
-e6214ab432a3c59169a1d38f5d7321d5 *R/family.glmgam.R
-4ee41145327bb60d68e61636d742bfc4 *R/family.loglin.R
-9c7b3a42d0c04f9a6244180303ae52a4 *R/family.math.R
-38dd9066ca231d11fe68838ab1e8c236 *R/family.mixture.R
-4a9235c693f664c02d3f0028746456c8 *R/family.nonlinear.R
-610a746d203f5d80dfe3f9235201113d *R/family.normal.R
-3c8ccd291b82a4dd473f05b71dc33de0 *R/family.others.R
-165a865e6f3d4a689abf0df987dbe4c5 *R/family.positive.R
-160ef5bb3ade2a010f7fb89e0626a277 *R/family.qreg.R
-09e1e6f7b83739b27d34ae1355564a80 *R/family.rcim.R
-56bc66c989b520e3dab7995fbfb52b49 *R/family.rcqo.R
-b64fb946c724db1151be8c10a918609b *R/family.robust.R
-1fd817f411b76ffa57ef99b3266dbce5 *R/family.rrr.R
-ff3ab815d988febc776287b3230eec59 *R/family.sur.R
-533ca6d202400a48588a5f707aa4770e *R/family.survival.R
-2294aa48ef55115a5a985a4cc5b372d1 *R/family.ts.R
-4de93f7061f23fa901713236597676fa *R/family.univariate.R
-8c4fe096ac15f024a37038ae7e222662 *R/family.vglm.R
-f151351322dcd21ffe0a0adc093caed4 *R/family.zeroinf.R
-2e0feaff930416a2be667b96674f0f99 *R/fittedvlm.R
-562a5bfc621b8a1c0e1e897ee96ef27a *R/formula.vlm.q
-a9c48afe02653db5b368ddbbc3a98bae *R/generic.q
-8ccbb2c06885c2421b6079be28b1e0a9 *R/links.q
-b275831b8ad5e1c70f5e45849a663e6f *R/logLik.vlm.q
-2e5531d6158191fd1f1fc5851a514386 *R/lrwaldtest.R
-f7ce594fdeb677de275fe3935426902f *R/model.matrix.vglm.q
-308858105b4bd4eb35d15a90e7a06645 *R/mux.q
-21c6f98334d6f745adb68bf1a9b0de4d *R/nobs.R
-709894eed9cee3ab89ea651b52a5086b *R/penvps.R
-87144da24f6ccee1e83973a812733961 *R/plot.vgam.R
-d19041afde6928b79454d697e7ef7b77 *R/plot.vglm.R
-0b826112c20b62aa84de56995df0dfd2 *R/predict.vgam.q
-2b8ee0e63edd4225f6c654c574527d8c *R/predict.vglm.q
-54ea08fbdc77b5bc32281bb92e48a49f *R/predict.vlm.q
-b744f88aae3e4d79313f688b3edcfbd6 *R/print.vglm.q
-c1398324a29defd22e16a4bfc12eeb0c *R/print.vlm.q
-d8eb45f7f8cde512bfb81cd6ba73c852 *R/psfun.R
-e84add7acb90669f496564778fa82f2b *R/psv2magic.R
-b04deb6f14c8561e40d4c7477794426d *R/qrrvglm.control.q
-0a368c9c8cb0065eda12801f306c82c5 *R/qtplot.q
-f995162b96b7d38b0c388d818281a8c5 *R/residuals.vlm.q
-e660aa5a0acfe92c37c6d1a898bebe9a *R/rrvglm.R
-a765ef813001a447a28afd390a95c463 *R/rrvglm.control.q
-550ee5bdebaa23c93551a743bea80cdd *R/rrvglm.fit.q
-eab2d3dcd2d47eccc0917d44b2328834 *R/s.q
-09e35797e609474bf5586830b72f61b6 *R/s.vam.q
-a3611170442f07a7c5f6c5b80a4c88a7 *R/simulate.vglm.R
-6002fdbd315202fe3c809d0d4bad738d *R/smart.R
-dfb248166c9030652294440d3434114d *R/summary.vgam.q
-0d3ef350650f66ca4332aa74d8682419 *R/summary.vglm.q
-7b868586600cd09af0b0d962beb4f9ef *R/summary.vlm.q
-eae97f190da8dc8305aa3bb942c8d693 *R/vgam.R
-57cf03d0771a8787e73a631577b00aa4 *R/vgam.control.q
-7028939a835af468c0c18e185d479f13 *R/vgam.fit.q
-a6816380f5d7d20cc618d48e947aa920 *R/vgam.match.q
-de045db8370a4e1c0bdb69415cd77404 *R/vglm.R
-605a747fdc64cefa58bc6ec1e2f07512 *R/vglm.control.q
-f30de8d3512608317be4c535515fd5a2 *R/vglm.fit.q
-619a7ce3a35b93df64ae6eda4b64f681 *R/vlm.R
-ce7085fc18245d111bb2c9cb62db4052 *R/vlm.wfit.q
-77ed62698e44dc3768c6f325d6029323 *R/vsmooth.spline.q
-a07ba9fab9b037499cd4d53fe5dfdcbe *build/vignette.rds
-a7c4647f1fb9a84fb6b3e4939a1bdab2 *data/Huggins89.t1.rda
-4b7a96145fe191aae6d4000119fca953 *data/Huggins89table1.rda
+e51b1861337e053ea930cb59c15c50c5 *NAMESPACE
+e042b20792bfd607c5d2caf0788ee355 *NEWS
+599f20468d8fbb1e31970a7f39bc675b *R/Links.R
+6e28a39a5c99b065f92f96d4d362fe34 *R/aamethods.q
+850fc2355e3cdad5e6da31d75e7e3e87 *R/attrassign.R
+be85f5ed7e4886fa80527e81b9f402f6 *R/bAIC.q
+d4a2289805cafc49f5685f734921adba *R/build.terms.vlm.q
+fea38191b321bfce95dc9e30fbc0b54e *R/calibrate.q
+fda9031b005721942543ded2556bbadd *R/cao.R
+05f98527a97c88a3ac5733c24b3ef7b1 *R/cao.fit.q
+8d90f7d73c88b7f70997d82e37270538 *R/coef.vlm.q
+dd4bac465a287f5db3d3cd37547ab1dc *R/confint.vlm.R
+90543ae2527ae6618890f6c278108127 *R/cqo.R
+2aa1e33b70242e41b5fbb0363c6d5202 *R/cqo.fit.q
+6bd2dee0075dc98322bb67e173bc3411 *R/deviance.vlm.q
+40b0bc3b6621172402a2e948c060947c *R/effects.vglm.q
+88baa0710621afa80d8e2fa0ed995a7f *R/family.actuary.R
+f7891f309abfcb5a3311a1b7a9018db9 *R/family.aunivariate.R
+a2eab95d9f4fca1e5dfb890c0a45543e *R/family.basics.R
+a4bb6597b92a6cdfdef2dc5c06ec4248 *R/family.binomial.R
+029e84e10266d7dcbbaf3064d99c7334 *R/family.bivariate.R
+4ab7ae04fe2b80e704ed6844ef30c607 *R/family.categorical.R
+7565c3f6d5f1a3b95a02505d391432de *R/family.censored.R
+0a8f27601542f6e00b2120fae23db0e0 *R/family.circular.R
+ff42cacaf581014695c79903671a3ec2 *R/family.exp.R
+f569123de1eed8f9fc5c8cdeb2a2aba6 *R/family.extremes.R
+049fea46015aa13b157a02083ac718eb *R/family.functions.R
+988ce78b40c2e99d41a3865fe1068adc *R/family.genetic.R
+8a52c05ea0a66c6516b6a86c7bbc93a8 *R/family.glmgam.R
+31a2db812c69b1316db4be8a232a3280 *R/family.loglin.R
+81d397bc95e31d678d355c59c62ee09f *R/family.math.R
+a1a4c3db19ea37ba7a4506e4c63b583d *R/family.mixture.R
+097f2cd584e40cb648ae17400754757e *R/family.nbd.R
+6cd5f50d9df631ca8f244b6029e53886 *R/family.nonlinear.R
+874804a8367e39997bc0f2b2126a3b0c *R/family.normal.R
+3f643294d6f5b1b40d754aaf2e62e4b2 *R/family.oneinf.R
+0f6b900978a1c8fbe99f058ad8cbe921 *R/family.others.R
+f7d59021cd9b1a03f2ebc53d4de8ae07 *R/family.positive.R
+20e8e087836a3386c38f1c149deae4d1 *R/family.qreg.R
+d1765ac8518188f267c9676c6fb38975 *R/family.rcim.R
+e8a591f4dbdca517b3c7abb4c7148498 *R/family.rcqo.R
+61612a0ab87e08723e2dcb61862da4e0 *R/family.robust.R
+da90caa5b5fcb768f91720f46af2668c *R/family.rrr.R
+7c77d6e90c6344ecd1d86a21d6b53eb9 *R/family.sur.R
+4d8c241af8a93744d2ef2bb560b71d28 *R/family.survival.R
+cf45027a79a8ed7e7c201813a3dc3495 *R/family.ts.R
+6898c02bdc3903e7ef8c2e30459e7754 *R/family.univariate.R
+7fb585dbf26bfb316794f3dcdca2f177 *R/family.vglm.R
+3f9f450781851b76d3ec530a9555873b *R/family.zeroinf.R
+ce324d0c57408b541796f45f8c99e66d *R/fittedvlm.R
+1bf4ab21ebcf3de74b513740a15d35a7 *R/formula.vlm.q
+2eee5295f01795773e5ca2eb648d730e *R/generic.q
+f257433624cebe43f7ab54926c58e7ae *R/getxvlmaug.R
+511f0b808d028371fa257bf7bf462863 *R/links.q
+29d773c0d46692117a0983942b7ccdf4 *R/logLik.vlm.q
+19e85d36989af790dff4afb1726e42e0 *R/lrwaldtest.R
+33acdccefa8361e01a44881c59cc3a50 *R/model.matrix.vglm.q
+fe70b0c5a381241c7df199c786922f0f *R/mux.q
+fe713edeefaf3f3660835103fe2e3e64 *R/nobs.R
+79469ed8e3d205fdd7903cceb11914f2 *R/plot.vgam.R
+0556ee6f3ea4287744485a4c23faca05 *R/plot.vglm.R
+541762a018c2c64b8d65230877116958 *R/predict.vgam.q
+51b20db1da7c63d773b1c11af9f36d83 *R/predict.vglm.q
+1e4ebba7ffa017465bd611dd5d1447cf *R/predict.vlm.q
+0358f183d2b098967e8a40d7449315f5 *R/print.vglm.q
+79210aa7183e752a6fa036e193251af6 *R/print.vlm.q
+637e2241be5a481067e3059764371c06 *R/profilevglm.R
+945527e91fd66440a6b5849bbc30a949 *R/psv2magic.R
+b69ca0140390f60473398540808ed27e *R/qrrvglm.control.q
+96747a8f20c4d4f47caeb53f8e92baab *R/qtplot.q
+067ea212f72255109d664c77db84471d *R/residuals.vlm.q
+c8e0e940c0eea195c4440bf3e161f545 *R/rrvglm.R
+af47d54734f99aee7201c93e7ef927d5 *R/rrvglm.control.q
+c136a82096da6eab7cb6acc93c3803a6 *R/rrvglm.fit.q
+53dcd9f73e32190243ba3763d1b5b005 *R/s.q
+6da8e34ae162ed21d59650805ff1623c *R/s.vam.q
+688ad55cc7d6eba17fdbb1667eccdbe8 *R/simulate.vglm.R
+b5b74e062d32f11899d1ba96224e375c *R/sm.os.R
+8a5cab372b14ee98fb7e3ecb877e4216 *R/sm.ps.R
+d57222b1fddf6de18688d0f9e2786d9d *R/smart.R
+3e7923049875fdd3e16e3e79f3f96c7a *R/summary.vgam.q
+d9ff794a0e35c1d4e2c112d32c68f98a *R/summary.vglm.q
+d84c8c1fdb0d89fbc940e5dacbebf1fa *R/summary.vlm.q
+a2f49fc7d012e1b78d575a2f895dab95 *R/vcov.pvgam.R
+d8e5d4b0b3912b69125b2b8b654d032a *R/vgam.R
+aa18161ae77bdab4ac9433bd6734ab2a *R/vgam.control.q
+0fb9e7d906e6707834a13b64fdf595d4 *R/vgam.fit.q
+5c97b388e0e51b4ca0464a1a3202de9b *R/vgam.match.q
+d88eaeb2f7faf56f1f326576bd56ae89 *R/vglm.R
+203deda34c94348dee0918bff172dc88 *R/vglm.control.q
+c8e1180a8166816637de8b1ee60fac39 *R/vglm.fit.q
+ae4961cc98cb4d03ffec7fef195d943f *R/vlm.R
+ba69df50faefd39592539f77b0f46d94 *R/vlm.wfit.q
+3a4defc1e8e86e0ea4e4113e1d94a9e9 *R/vsmooth.spline.q
+61ae22107870459218fcdfb9a8fa9a18 *build/vignette.rds
+2b6ba47dfe94bab2f8606566090d917e *data/Huggins89.t1.rda
+7d19e0f2e4277de23dab8df30776d58f *data/Huggins89table1.rda
 d89f69ab78bc3c7a526960c8bdb9454b *data/V1.txt.gz
-002750b117ad85abddddd7f3a9202329 *data/alclevels.rda
-f0273a4099e893b5c0a75026832353e6 *data/alcoff.rda
-88eff02445bfc854cb291e09358dfe77 *data/auuc.rda
-0d4f8bc5415cb9135d55dd15317d4522 *data/backPain.rda
+0fd3bf904bd3c339fdb9c84bfdf1f0df *data/alclevels.rda
+edfd9e64e11a21c11871db15f49195ea *data/alcoff.rda
+f9e47e7e97326e28b9eaa5e7a6ed1488 *data/auuc.rda
+cdedb4cad81a93b450b7f1f5ac820044 *data/backPain.rda
 4fa3eac69a59ea5ed0123d54528e5595 *data/backPain.txt.gz
-767e369fcbe38c25a6d25e3ca4766e5a *data/beggs.rda
+a4f7007364837d79aab105db6d200d23 *data/beggs.rda
 e039fd36c33b359830b2ac811ca7fc49 *data/bmi.nz.txt.xz
-6e979612433ab8f64ecb0777552fbeaf *data/car.all.rda
-be2a9693f80356b34c6ab906f3ba279e *data/cfibrosis.rda
+85d059b18f7cdbd51c365dc35bb9abbf *data/car.all.rda
+c450de88a2b34f76aa00f4131f7791f6 *data/cfibrosis.rda
 b29c1a4125f0898885b0a723442d6a92 *data/chest.nz.txt.bz2
 4df5fd8b5db905c4c19071e1e6a698a4 *data/chinese.nz.txt.gz
 3cb8bc8e1fc615416f0c8838a50b3f51 *data/coalminers.txt.gz
-feca377e8acb42e16c9e7d5095e0ee8e *data/corbet.rda
-f619df7f64b33a8a86a8824fb7d8986b *data/crashbc.rda
-bf8193fdc83a46d6499d47ee1eeb0ee7 *data/crashf.rda
-314e993b8f2deff5002c9e3ae3ea7c77 *data/crashi.rda
-2fb69d2b29f97023857bbcb400ab53c7 *data/crashmc.rda
-8998af71ee12daff74ba3ccb852a5649 *data/crashp.rda
-bf110e2aca39cf5a7b24a618db4f8f3f *data/crashtr.rda
-9965ac837e615904002246ee7239064e *data/deermice.rda
-389b381253dfcaffca2855d6a07bfff1 *data/ducklings.rda
+95beb3f7a12400cb9dc756bcee40bb3d *data/corbet.rda
+9210ad4773e3b8b58d35bdbe1db951a1 *data/crashbc.rda
+47e3c1164dae88f0bd5ae41cead7901f *data/crashf.rda
+2b1502a8639e3e9b4c1a2f577f029795 *data/crashi.rda
+00c854eaa390117e33b67f638f74214d *data/crashmc.rda
+3a713ff96d8bbd582d92e9ef4ffe0b72 *data/crashp.rda
+f37e52b8e16d0e567222942cd5fcd8f2 *data/crashtr.rda
+b4de12a58cf8eb3704b476eb6c4efdd5 *data/deermice.rda
+7929f8d04533beb4ede3cb6e960312f2 *data/ducklings.rda
 08e87bb80a2364697b17ccec6260387c *data/enzyme.txt.gz
-29a903c0cb10f4eaf0476b5e0b4b1b39 *data/finney44.rda
-311be1c751377403e48723c468169b25 *data/flourbeetle.rda
+15355ce79f182577c4bc89270fa942bc *data/finney44.rda
+2457e428a81886b008b5a4bf7fcf84db *data/flourbeetle.rda
 3125b7b004c671f9d4516999c8473eac *data/gew.txt.gz
 bec512b2d2d680889c9b71c7b97dbffd *data/grain.us.txt.bz2
 9dcb8cdf026f5468fa70f8037fd72a0b *data/hormone.txt.bz2
-a07b5ef2e95ab88ea73243f6622f2ca7 *data/hspider.rda
+5427f9cd7ec7185b3adf8cd09d083113 *data/hspider.rda
 dffe21fbabf645127bccc3f3733098a7 *data/hunua.txt.bz2
-36be2f45f357277bb790005a90dba695 *data/lakeO.rda
-425bfc030a5addd98be30f3e2406d367 *data/leukemia.rda
+5d0e602f8243c04522ab35d91bda17a7 *data/lakeO.rda
+3e0dc81698306af759e18091fb9acf8e *data/leukemia.rda
 aba4885e0eeda8ee887a422fee01e02a *data/lirat.txt.gz
 7d7e59127af09903659c5727d71acc56 *data/machinists.txt.gz
-d7541dc77a4e696004b02e8c1a824068 *data/marital.nz.rda
-06d0845d4f0034e789d3b92c235812ec *data/melbmaxtemp.rda
+e72f33ebb9d18d007bc2852ee39feaaf *data/marital.nz.rda
+a8619b2201cd114fa0ae6b8c674edf00 *data/melbmaxtemp.rda
 56490506642d6415ac67d9b6a7f7aff6 *data/olym08.txt.gz
 fe334fe839d5efbe61aa3a757c38faeb *data/olym12.txt.gz
 3ed63397c4a34f3233326ade6cfd1279 *data/oxtemp.txt.gz
-64ac94ed85c27aeeeb69ed4334a43a9f *data/pneumo.rda
+9be3fe433126f2be522a2f8a55afd022 *data/pneumo.rda
 0cd66b7ce4e596ad3ca75e1e2ec0a73c *data/prats.txt.gz
-0f719ec09b48e431b183a0a5dcbef0cd *data/prinia.rda
-ddd5cd78e8ba35f28add47f3d75bf690 *data/ruge.rda
-772105106bdec180786e942d0d43d68b *data/toxop.rda
+3ed7a9111362ecf1123c6819e490760c *data/prinia.rda
+eaac1831de27a17311ce8421f44289ad *data/ruge.rda
+15032bbaac96b52b5222018e40fd7e64 *data/toxop.rda
 1b059fc42c890bf89f2282298828d098 *data/ucberk.txt.gz
-011b2583a58ab3fabcc204537ae4fa0e *data/venice.rda
-322ad7d5d65dc2208f410dfcd157fbbf *data/venice90.rda
+b2ba5eaf1521acfc1a5afaac7bdcec85 *data/venice.rda
+402924358fc289e13d7a89c048bd52f3 *data/venice90.rda
 e990ca4deea25b60febd2d315a6a9ec4 *data/waitakere.txt.bz2
-fb0c5a27e2a6b8e1fd3b5243a6e51e55 *data/wine.rda
+e7e1eb94836489d747e710521478c0fe *data/wine.rda
 81f7f0844a196dc48e91870c4cfafc99 *demo/00Index
 9327dcfa4015cf47172717bac166f353 *demo/binom2.or.R
 b9f0af62a654d77a3052997eb4cc15e2 *demo/cqo.R
@@ -150,480 +155,506 @@ b9f0af62a654d77a3052997eb4cc15e2 *demo/cqo.R
 541e1a831b9abf6a2a5bfe193b03b1b4 *demo/lmsqreg.R
 ab8081763fe2144558be25f3a154327b *demo/vgam.R
 65570d10948785994d70d817f574bd96 *demo/zipoisson.R
-d2fcbc6a325172d058671fd977d0b5e5 *inst/CITATION
-4ff0e35d38b3c5bb38f1f7232b9af863 *inst/doc/categoricalVGAM.R
-bfa11dbdbff271fb20342560f2bacd53 *inst/doc/categoricalVGAM.Rnw
-95283b44f1ada2e41b1637309b80bdaf *inst/doc/categoricalVGAM.pdf
+4c1b5ce8ed92900b8bcb6746ad08b15f *inst/CITATION
+7e8d803a82d44c20e2f3a4de088132f0 *inst/doc/categoricalVGAM.R
+d0ac5e66d77fe4d6a7fa283e01b06882 *inst/doc/categoricalVGAM.Rnw
+de586eabfd4ecf4b16677a2903543bcb *inst/doc/categoricalVGAM.pdf
 2f57d2a0610fd514e05aae8ea94d8ebc *inst/doc/crVGAM.R
 8e489008d8b8b8f769e5e93e351c9c42 *inst/doc/crVGAM.Rnw
-e3818408b067f73bc46ecb851b9e5123 *inst/doc/crVGAM.pdf
-9b97006cdc82d3a0c0ace3d43c9758de *man/A1A2A3.Rd
-4bc543c785c8a213c46693e2e37f5f00 *man/AA.Aa.aa.Rd
-26a120083d1d9d77ac0a5193d0c186b9 *man/AB.Ab.aB.ab.Rd
-c6c2a703e0f76c8b0f9e0a7d36f13386 *man/ABO.Rd
-38647708600610216a454c61450810ff *man/AICvlm.Rd
-3891efed1727773933c6289b0147601a *man/AR1.Rd
-5867eaa6a561af2007aad087c87a7c91 *man/AR1EIM.Rd
-144c95d558d807bf195bb8b297bae332 *man/AR1UC.Rd
-0f4a799e95b245cfa0b5a37280a446ef *man/BICvlm.Rd
+0ab671ea4e33474f4ac11640fc5afd2f *inst/doc/crVGAM.pdf
+285846db4de993ae1778be8166a5a113 *man/A1A2A3.Rd
+bf4deb73bc66a698f518b65290a52e7e *man/AA.Aa.aa.Rd
+03439b6b6db1ebb78bf79bd11e8b560a *man/AB.Ab.aB.ab.Rd
+fc91674c4fdc7d2026d1dde6ff1cecb5 *man/ABO.Rd
+467a312763e8a9a3c938aee39dac64f7 *man/AICvlm.Rd
+6c0407b2dece39cc4d627564375a54b8 *man/AR1.Rd
+9f24a0d6fc1877fcc3814aced0559e12 *man/AR1EIM.Rd
+ea156efd9638ec3d235442147ca21843 *man/AR1UC.Rd
+447222e0a355dab67f394d1ed02011d0 *man/BICvlm.Rd
 32daae0afb71eae3cdeefc042f4241c6 *man/Coef.Rd
-7b7ad4188c687ac8361fa1176697ce88 *man/Coef.qrrvglm-class.Rd
-77ac83a0f65139c47a0427516552193d *man/Coef.qrrvglm.Rd
-a89beda3a48d5ff1cfdfae4636032a62 *man/Coef.rrvglm-class.Rd
-4da595e2cf6fffc2227871e745a5ee77 *man/Coef.rrvglm.Rd
-9d39d6e12ea6e56f687a10f76cb1803c *man/Coef.vlm.Rd
-827e574dfaf7a6b73ca967f05f2469be *man/CommonVGAMffArguments.Rd
-098a57d6e5525de04157c61dea2e1b9b *man/Huggins89.t1.Rd
-ce79d0626711d299c9c0cc2efab3abac *man/Inv.gaussian.Rd
-b9505b66dea5b1311aa8d2700d3d6a34 *man/Links.Rd
-e53a7b5f977320e9a2b3cfba16e097ee *man/MNSs.Rd
+7064da1eee8698b388f5d40c76c00ed1 *man/Coef.qrrvglm-class.Rd
+e7f724d393e8e41632c9bfe0777f2ba3 *man/Coef.qrrvglm.Rd
+20dae27ef5c49a65318dc19d0059f0d3 *man/Coef.rrvglm-class.Rd
+2090ca21bbfccf2ed98d694514654ae1 *man/Coef.rrvglm.Rd
+4e4b7b0f0535956bd665e88e99681c1c *man/Coef.vlm.Rd
+c5b42970f5b3557fed1aaa9c4edf380e *man/CommonVGAMffArguments.Rd
+aecb9f3fb3bcde4e74f3722cc4f67efd *man/Huggins89.t1.Rd
+63b7e9ae7538ac73ca7c2913ce093cb9 *man/Inv.gaussian.Rd
+023a52f3674f1a99ed5b084f11951c7f *man/Links.Rd
+500b60125f8954997037620b5b8505a6 *man/MNSs.Rd
 5ddd860d2b28b025dbf94b80062e3fc6 *man/Max.Rd
-00dce9ac476270fc8ce02ea1e75de191 *man/Opt.Rd
-d315bc4396e206c1ec3c5219e4efc677 *man/ParetoUC.Rd
-f84dea8ac6b2c1e857d25faaceb706d2 *man/QvarUC.Rd
-bd689bfc27028aea403c93863cf2e207 *man/Rcim.Rd
-d39629f7598851d50262b1075321525a *man/SURff.Rd
-aaf085cd16f13eefc77d2f920ae840f0 *man/Select.Rd
-20a760cb2a7468d974d2de5c88d870e3 *man/SurvS4-class.Rd
+4393fcf0fc30408567c6a8477a822503 *man/Opt.Rd
+4f8d77d5f9c49e4ae5033a169bfe274e *man/ParetoUC.Rd
+2ec707e8c6450699f244a27a35466e77 *man/QvarUC.Rd
+826c063341f27d31e4778e5ef65eee35 *man/Rcim.Rd
+cef46e6d9d73924ca7f8b9e969e55b9c *man/SURff.Rd
+01e0e6705724e5cdbcc9d5077cef7aa6 *man/Select.Rd
+ba415357209734874cf96a406bd9214b *man/SurvS4-class.Rd
 6ed5239b716d4aaef069b66f248503f0 *man/SurvS4.Rd
-21dc3918d6b5375c18dcc6cc05be554e *man/Tol.Rd
-eeed63e131219077a163410c683fd32e *man/UtilitiesVGAM.Rd
-6930cfc91e602940cafeb95cbe4a60d3 *man/V1.Rd
-8d0c54826f9afc4662dd38c89f1264f8 *man/VGAM-package.Rd
-ce8d4266cb5eeb30fbe40e28ff554f5e *man/acat.Rd
-d2407fe64af0c4369d18ff4cc5f58a34 *man/alaplace3.Rd
+d7625b064394b2c400802feb17ef400c *man/Tol.Rd
+77777a8c452a5d806a1a4e5c3378be96 *man/UtilitiesVGAM.Rd
+39394f12a25f8ae05334df307ccd10fe *man/V1.Rd
+181c34c20563e98d09b441d9916adc23 *man/VGAM-package.Rd
+421b43ebe193fc7dae0bcffdcd07227b *man/acat.Rd
+99d6e410a20c6ffa691021a8a87c5c24 *man/alaplace3.Rd
 8c0d8e4d9e634a0c2539e3a052afa9cc *man/alaplaceUC.Rd
-8e181f4f03b718c6c9825ea3b6c4b8d6 *man/amlbinomial.Rd
-f6c521d0142c7e65e7d5aad6880616ee *man/amlexponential.Rd
-cf9c3d4f8799980be2f9e965eb809b42 *man/amlnormal.Rd
-ec213548ebb41e47b727541566160dfb *man/amlpoisson.Rd
+5227bdac87a20561f26516d74684a5bf *man/amlbinomial.Rd
+48d6f12735b9acbce463730f63468db9 *man/amlexponential.Rd
+a1d58b708d9db88ecce89fe3015445e7 *man/amlnormal.Rd
+cdaeaaafe03e6137fa24b41a6308ad06 *man/amlpoisson.Rd
 9f1ddcb0af49daaec702a1284341d778 *man/auuc.Rd
-c8efe93df8799ff106b6784e1bf50597 *man/auxposbernoulli.t.Rd
-bcddb8c1df8893cf14a4400ee5dee6df *man/backPain.Rd
-6ac5a3f07851ac3f7e19eaa977365e0f *man/beggs.Rd
-65a5426c021e0a6c90731c14786a3395 *man/benfUC.Rd
-afa1ccbe6dd6e769dc1bbbc5702148dd *man/benini.Rd
-12d28242eea600b3e6f52db5d71d871f *man/beniniUC.Rd
-4711e53251972b32ac49ca46c65dd4e4 *man/betaII.Rd
-9ea8b2b59f58e287682c7a7ebdfeb705 *man/betaR.Rd
-33d5779c489976a0c1b94cfc590d24cc *man/betabinomUC.Rd
-3dc23022db723ea07649cac674dd0e2f *man/betabinomial.Rd
-5049c8cf22a2f1e637db29a40ad3c3b1 *man/betabinomialff.Rd
-d4f5ab83c26ee7c3ffcccff3b149797e *man/betaff.Rd
-4b590ee6208b2f3025109b82c1f6d67c *man/betageomUC.Rd
-725a8c9d8b4a9facb0c3cb815d75266b *man/betageometric.Rd
-7553029f69c2be7dbb20c864b97102e5 *man/betanormUC.Rd
-4a10d6e7f4fd8eb99d7c0b14e1845433 *man/betaprime.Rd
-f41bc1b37620bca37ba4d2f16fdae05d *man/biamhcop.Rd
+c5776da4bc66a6581a053cdb7de2cf0a *man/auxposbernoulli.t.Rd
+b3936cd4a7ea085e6fdde92047c13c66 *man/backPain.Rd
+111893c26485baa2b2616a0bf87c927e *man/beggs.Rd
+fbb153319516252a8dc9f0e40282a433 *man/benfUC.Rd
+ec06d832602e25e42d8d0d3d4ff4f655 *man/benini.Rd
+f4c7eaee169de8469b521abb5504c0f3 *man/beniniUC.Rd
+1cd00f8000b3e2af73bff388590a3146 *man/betaII.Rd
+bd214a990456c1c8c29c7352e56fee90 *man/betaR.Rd
+7217fa8ae33d3b4946e5275b0e93255d *man/betabinomUC.Rd
+194cd150f9675c57214394982c0c724c *man/betabinomial.Rd
+c9d17bd3e9a704ac922eb935eade0eff *man/betabinomialff.Rd
+c3420e8dc291534ccbcfcdb6bf5ea9a5 *man/betaff.Rd
+cb0be46a372240c2416eafde863e9824 *man/betageomUC.Rd
+a321871b9722215e053297e8d8b34687 *man/betageometric.Rd
+e649861c7f8f6b151a8f95b8dce56bf9 *man/betanormUC.Rd
+92dc23b72dcd7cca817690c70cb1a434 *man/betaprime.Rd
+5cbd2127e8da755d996e032343ec982f *man/biamhcop.Rd
 495e32601db2c4f22462811e27436c9d *man/biamhcopUC.Rd
-003ba5eb60e8e27f6c9a022ae1e336d1 *man/biclaytoncop.Rd
-f1afe1e3f5c95a262b998521408ede24 *man/biclaytoncopUC.Rd
-b25a2fadd7cdb9601aa3022d25265b30 *man/bifgmcop.Rd
-faeb492060203a0d89d5cf4f40b0e4c4 *man/bifgmcopUC.Rd
-57536bc44454e58eb293b928919c92ca *man/bifgmexp.Rd
-5e0bc6b73af5b7a56805a2f7600a439d *man/bifrankcop.Rd
+8f59784ca8807108410c4f3fb1299ac1 *man/biclaytoncop.Rd
+82b67433a10d4d25ac7bd66e3f4a2f33 *man/biclaytoncopUC.Rd
+67daaae368a358fd35c714d258e54f04 *man/bifgmcop.Rd
+2c51520c0e0485360fa5ee65989dbe2f *man/bifgmcopUC.Rd
+b8a59120e83fc9edb42af25537dcc93c *man/bifgmexp.Rd
+17da2a456e6af88182e2d2fa59f189b7 *man/bifrankcop.Rd
 4e57b0a38391fdfe5e57e39799ae9d6d *man/bifrankcopUC.Rd
 24ffd4d97c8b5d9c71c6702c4ecb3316 *man/bigamma.mckay.Rd
-7a1c045834b0bd9de92a4aa97f52ab3c *man/bigumbelIexp.Rd
+913fd4c8407162fb1aa521583dc41ccc *man/bigumbelIexp.Rd
 ffcbfc72f334094f6dfd4842ab522e96 *man/bilogisUC.Rd
-df5c6274584e9a5b961b253c498c0580 *man/bilogistic.Rd
-1e3bfb0dc5eb125518194b131c78ecc3 *man/binom2.or.Rd
+cb0b07c6c1ac54aac522170d1dfb02bc *man/bilogistic.Rd
+422973a9e84550aab73f4c797dcbfd23 *man/binom2.or.Rd
 84dc66c2c5d0321ace962180382c7c59 *man/binom2.orUC.Rd
-3da84a2c9a4148aa7f062129c7b40c8d *man/binom2.rho.Rd
-20cb304b16a9073488621b104549e361 *man/binom2.rhoUC.Rd
-29a9e5aa565832fad506a6a45c7b2897 *man/binomialff.Rd
-1fb31f47df7a9bcbf36b8ee148577384 *man/binormal.Rd
-262cfd215344aee98efb116c7c1151b8 *man/binormalUC.Rd
-ad66bf95a28851ff1f77b8675352cc04 *man/binormalcop.Rd
-9758ba4618c9c24caafec486b01238f5 *man/binormcopUC.Rd
+4cb0caf5f677d96c4bf95573d4357fbf *man/binom2.rho.Rd
+9f2ac11550d4af21ccc838f98cf04014 *man/binom2.rhoUC.Rd
+1406de1905bc201eb615eb59b7d29b16 *man/binomialff.Rd
+ed1c2cb576b135de3dde2a997ce87230 *man/binormal.Rd
+b7288a39f385659ce3fda48789c75f46 *man/binormalUC.Rd
+020d2ccd1d470d6fd84a09af53119ebc *man/binormalcop.Rd
+4d8649f87cb865fe10d55aa655749e10 *man/binormcopUC.Rd
 1d943aad478481e7bf4c4b1a9540706c *man/biplackettcop.Rd
 79d9cd96d00531b88793d55a07d29842 *man/biplackettcopUC.Rd
 bdad9ecfb116c4f30f930bcaf7208735 *man/biplot-methods.Rd
-d04726582d80a3f32bf27f8c5d3a690f *man/bisa.Rd
-8b2718247258cfa11b0857a922c512ab *man/bisaUC.Rd
-ce60753888f08f05ba46dbd49dc0f4b8 *man/bistudentt.Rd
-0489e2ceeed7b2aaf9cbcf6cfcabae81 *man/bistudenttUC.Rd
-81a2433effb7547679702256a5536b04 *man/bmi.nz.Rd
-214e2f5b25156e937a5af65d1e6e1b58 *man/borel.tanner.Rd
+673b5f26090b304c6a87bb818bc78ad0 *man/bisa.Rd
+ce85dc490ea819c417ca847e0a80ace8 *man/bisaUC.Rd
+a7c09c2ae5e665ab7a45213b1baafb38 *man/bistudentt.Rd
+db08fad184b678ca63e8b290cec33312 *man/bistudenttUC.Rd
+40e4a5e1a4e770be48eeea6337ecca83 *man/bmi.nz.Rd
+732e21d872b4c4d96380f14e602114b8 *man/borel.tanner.Rd
 a25a019943aa0d82d35d6c46ec726c67 *man/bortUC.Rd
-3bb03f3211ee3ce9848b85060c6de4d3 *man/brat.Rd
-a20b8cf27b3285f4ffcd0648862edc46 *man/bratUC.Rd
-c7322bedb2b3d8ba4e7c0a19a2098ced *man/bratt.Rd
-f640961a0c1a206ce052a54bb7b4ca34 *man/calibrate-methods.Rd
-b121ffb4e604644ef7082d777b4411df *man/calibrate.Rd
-22f73cce0070ea9bb785567af837e14f *man/calibrate.qrrvglm.Rd
-3d6361a83221391b92f5d29a52117c29 *man/calibrate.qrrvglm.control.Rd
-afbb7b695f652a4bccfb0e6cb80a8739 *man/cao.Rd
-4005c8bdb2b1a2e7d0ff5f1a800f4224 *man/cao.control.Rd
-10f72289cb33f5f734d39826893a280b *man/cardUC.Rd
-53ff522ff00f7bcfe443309762441150 *man/cardioid.Rd
-a458bca3e32bdc653cd924dd564ee58d *man/cauchit.Rd
-957dd50f814f492806ec05aa4c046569 *man/cauchy.Rd
-4973007c9a18278e2130994b68a2e47d *man/cdf.lmscreg.Rd
-d7acde63c3dccc32fc8d4932f6a5e314 *man/cens.gumbel.Rd
-49787b380cee2941b0b8d04b602ebadb *man/cens.normal.Rd
+e409b7d19bd012900f3edf51ce084318 *man/brat.Rd
+f2de98254c464126b697019198425e21 *man/bratUC.Rd
+4b650c9b9cc0709dcba08a9f4661e957 *man/bratt.Rd
+1147c12691451e1844fe8f5d51e61f97 *man/calibrate-methods.Rd
+cea5875b93e8f7713016dee29e7a03ce *man/calibrate.Rd
+fa8933dbceee07985324dabfdebda4b6 *man/calibrate.qrrvglm.Rd
+ba98066f822e0bf6e5601fb1efb4117e *man/calibrate.qrrvglm.control.Rd
+2364f4c577c77e261dbdcb295cf3829b *man/cao.Rd
+ca994a5e35760fa5ea5b91e5d55d7988 *man/cao.control.Rd
+26c82ae6a9216158479d3707493127b0 *man/cardUC.Rd
+711cd6465d3c6aadabb2e4b59964da79 *man/cardioid.Rd
+2b2ef87eb1e638b5278405db1067deb5 *man/cauchit.Rd
+e827a0f2a0205030364895150f391d1e *man/cauchy.Rd
+a0df4cb63aae3cdd06afc498620e8ea3 *man/cdf.lmscreg.Rd
+3c0924505405ede8f26c9e637573302d *man/cens.gumbel.Rd
+d38abec814db0e3fb01ba9ccfeabba20 *man/cens.normal.Rd
 72901f13efe7d772fc5ed78bd6c58cea *man/cens.poisson.Rd
-94e6c5ea5488d93e0400ce9675e4d692 *man/cfibrosis.Rd
-a443fafdb223e2fa87d3766ea31d3fd8 *man/cgo.Rd
-1d5073eb8aded1b67fc52855c72fbc8d *man/chest.nz.Rd
+77939979aa0728370bb60b66a2c98136 *man/cfibrosis.Rd
+aa6292da8fd825045a7d21b2ce8c78cf *man/cgo.Rd
+2855aeac51aae2e795d607941b8273ac *man/chest.nz.Rd
 922ebc06682ee2090eb1804d9939ec03 *man/chinese.nz.Rd
-9dc1deb6ea4940257ebab8f072584b74 *man/chisq.Rd
-b52839c08d0e405351b90b44ecef37ef *man/clo.Rd
-e35c0ce37b72050ab56a340fa1d4f375 *man/cloglog.Rd
+51f11030b641d0c57db0aec5d57de538 *man/chisq.Rd
+64750de2ee96a799428e2301ab647dd8 *man/clo.Rd
+412d70832fbb40ecf9f4d8f03243b545 *man/cloglog.Rd
 b1985e33c967fdddf79e10cbb646b974 *man/coalminers.Rd
-eb8ba8eea01187377705b5cb7d682947 *man/coefvgam.Rd
-7ab6167f053b9ac7bb36f855293af71e *man/coefvlm.Rd
+74b5325202b9c0285519604f5705b27d *man/coefvgam.Rd
+67cbcd6d989b8dd99d95611e19b61ca0 *man/coefvlm.Rd
 1409b01c52bad85c87e9740fb003699a *man/concoef-methods.Rd
 e9a2bf379aac3e4035b8259463a5374b *man/concoef.Rd
-19ee88e086b371be838206bd11b5479e *man/confintvglm.Rd
-30bff4a27550ca7e9a699e5b5cba007e *man/constraints.Rd
-523567ea78adcaaeab2d9629b2aa2cf2 *man/corbet.Rd
-5314268c4257680ac10edf26e9222944 *man/cqo.Rd
-8b1b3a39d15fe353a7eceec9f6a327d4 *man/crashes.Rd
-b7742b0b5c630d48f1834fb5fefc0835 *man/cratio.Rd
-7297008d99a1c3882555652d6a5d7441 *man/cumulative.Rd
-acd3af26cffe6abaf21889775de0aae2 *man/dagum.Rd
+f3eef1b987170deac0889eec5493c907 *man/confintvglm.Rd
+ed32d3a380d9427e110f36fcc622ac83 *man/constraints.Rd
+ef1f3f2351de5cf13132fd524197dc92 *man/corbet.Rd
+218dd3b304a11f9e81c6b906ed0f1926 *man/cqo.Rd
+b85e14be939b981496370132cb1fe0eb *man/crashes.Rd
+40e3fc904c8f5774b466d592b5871509 *man/cratio.Rd
+2ed5ab6b7d565123f9690cd1ba7d1fc8 *man/cumulative.Rd
+2b5c56846f4b02069a7beeb19b21e370 *man/dagum.Rd
 12192f19751804a540e6d0852e29726c *man/dagumUC.Rd
 d5439d37875ba50990406c5c5f8595eb *man/deermice.Rd
-dbebc9542906034905fe1137e86a1256 *man/deplot.lmscreg.Rd
-72353428e1c837e182bac272b2b75def *man/depvar.Rd
-bffbb780b54bd3c8c76cf546ec87e4a0 *man/df.residual.Rd
-7736a01002f56a8d42e24d984fee9a2e *man/dirichlet.Rd
-17afdbe28f8a8d93725e2747c2daa303 *man/dirmul.old.Rd
-7a63063be35f8510ea5198556bf1c192 *man/dirmultinomial.Rd
-7c78ad345e44a5b81963f0cfc744f701 *man/double.cens.normal.Rd
-7e76fa91f062dc857b0d628b6617b480 *man/double.expbinomial.Rd
+d8fbb18d62bd64d50e7896e34ffd59d1 *man/deplot.lmscreg.Rd
+522bbe5bf684007a8e8d02ec8ce3b911 *man/depvar.Rd
+5a201348194119b8d79548bf1cd9f9e0 *man/df.residual.Rd
+49407a051530fcd4424f64740e4f601c *man/diffzeta.Rd
+1b4fb2baff32165b567969ff96f22c76 *man/diffzetaUC.Rd
+bc6022d55c367cdd33b768aa20948e30 *man/dirichlet.Rd
+5d80c9aec9a30bf52ee26429deae8721 *man/dirmul.old.Rd
+affd1703898de2fce53af63eebe656c8 *man/dirmultinomial.Rd
+55033c690b7e4ef405fc9009fe7fd01d *man/double.cens.normal.Rd
+3ea7f60a4427620c3a50932ef7da33ac *man/double.expbinomial.Rd
 2b74d3ee310b347a7112ce9c0b0ccb34 *man/ducklings.Rd
-90481ad7be6cb76a82e99694a2a8e016 *man/eexpUC.Rd
-92007c408a76e89f46e756eba4724a44 *man/enormUC.Rd
-ca3e766bd344902d3b8bf05c65d6c12b *man/enzyme.Rd
+1e7015b921166b7564a38eb8c49c0436 *man/eexpUC.Rd
+34dedcca98e6434bc1e9c7deca8a49fa *man/enormUC.Rd
+62236ca35117508cb67241845240d5b3 *man/enzyme.Rd
 980efa41e75a65ef1c0a8ccf943f6398 *man/erf.Rd
-bce699d9d485230ad940142978689709 *man/erlang.Rd
-b557620d84ef23c76ac3012a8fc7c35d *man/eunifUC.Rd
-cb83f77886603d8f133964c227915d08 *man/expexpff.Rd
-772ca8da2a38dbc5a2ffcb2138f91368 *man/expexpff1.Rd
+4e888f6bc8399670553b48ae319d349f *man/erlang.Rd
+25e5bdbf1b46ac8d999ffaf284ed45e1 *man/eunifUC.Rd
+64b225658109fddba5d01002764a63bd *man/expexpff.Rd
+cb7a983158b27b6fdfe1eafbcb6cf42a *man/expexpff1.Rd
 eccfa33017118bc7314ef168695a595e *man/expgeometric.Rd
 f39dd0be93d3e24eda78f08310ff4b2f *man/expgeometricUC.Rd
-1b6f2c2a7b9fbbe335a89fa0275733aa *man/expint3.Rd
-6ab5a59ea1b5f61fbe676577b3882529 *man/explink.Rd
+5bde1dc82f2facb1410342713dfd6407 *man/expint3.Rd
+6cd9a3c431dd04ca30c58d836d77f2e4 *man/explink.Rd
 89ce96662b931aa17182192618085ed0 *man/explogUC.Rd
 e51211ad603eeecbe72cd7f6db0e76e0 *man/explogff.Rd
-fdbbdfc5e8f244b0ec6759aa8894bced *man/exponential.Rd
+dca2e21888171a395e2b9e8ad788cd87 *man/exponential.Rd
 f3cca02f31b091259c7a8cf690f93148 *man/exppoisson.Rd
 79f43e2f29b5cca093569fd81aea3abe *man/exppoissonUC.Rd
-0712cad8a071a24a0676bbea9b09094c *man/felix.Rd
-c5d0b237e64605d008502da6b8f4f64c *man/felixUC.Rd
-09fc6553edb037bc708396a30fe3c8f2 *man/fff.Rd
-9d679a175cfe7165b89906441e5efebc *man/fill.Rd
+326f2e1ad8675fb4d3fc610bd4d473e4 *man/felix.Rd
+0cb64513fed0f600d4d7012733d6e71d *man/felixUC.Rd
+28102fa0c3908cda895835e03c6ca8d4 *man/fff.Rd
+02da5dbf379f99aa8f74562b1a69aad5 *man/fill.Rd
 b929e2ab670eb59700bc4a1db07bbbc0 *man/finney44.Rd
-460448c26c4268e7870bbff5f9d2fb66 *man/fisherz.Rd
-c987dc987a36b7df68a94e3309b96096 *man/fisk.Rd
+676e766de613899da4bb649d736e5fac *man/fisherz.Rd
+fe861dacc59a40590ffec9752a5c31c0 *man/fisk.Rd
 5966dbc9e396bd3cbb15b2650d885177 *man/fiskUC.Rd
-97bcdcc90669435272c5d940f0b6d967 *man/fittedvlm.Rd
-742b72298fd6b2ca944812681ad625a6 *man/flourbeetle.Rd
-c0269f789f9739dc6aeeb20b446ae751 *man/foldnormUC.Rd
-3909f1a56c381d71501b6fde8d6647fe *man/foldnormal.Rd
-e1413cdef7d5b35f976738561f60a91a *man/foldsqrt.Rd
+e2b8486d75b02113b0f249e7b2af6df0 *man/fittedvlm.Rd
+43a9d36771b0925338017e31e0e23c02 *man/flourbeetle.Rd
+56da968ac438de6793e1de751093aa2f *man/foldnormUC.Rd
+422c8e83b864b0336ef325f8f1502412 *man/foldnormal.Rd
+704cb2521df31b2924262ad97b269f73 *man/foldsqrt.Rd
 628edb6d51c54d246702e9521ba6470c *man/formulavlm.Rd
-7af865ab486ea1d5d043bdef4bbf81cc *man/frechet.Rd
+5968650ac0521408bbd7c06c842f77c8 *man/frechet.Rd
 dabb4b7cdd3422f239888fb85ca5a70b *man/frechetUC.Rd
-babdf09c0633ab6fce48345f26984611 *man/freund61.Rd
-c4aea59df1932e36cd6fb2ec38110e6d *man/gamma1.Rd
-13beda968ad3c4461042e74b89e744c5 *man/gamma2.Rd
-969c6650372ab79d1751a733754f0dac *man/gammaR.Rd
-3558584dfba54663dc4de34e21cc9aa9 *man/gammahyperbola.Rd
-edd2c4cefb99138667d2528f3d878bad *man/garma.Rd
-1994c9a780c42db46cebfbcf23716be5 *man/gaussianff.Rd
-6bdfa23e246b5ec65b369e4e746574e9 *man/genbetaII.Rd
+fd885ddeff27c79c445bc21f1d02265f *man/freund61.Rd
+1159eecef9c45ae80bf2da78ca439985 *man/gamma1.Rd
+4a46213a615e160d1de424276dc86fb6 *man/gamma2.Rd
+5605f627059a37fca123209727ae358d *man/gammaR.Rd
+09a8f1cbdd2216433910a23f982b2943 *man/gammahyperbola.Rd
+b4b0cf65a9d6064e4d4578fc00997fa9 *man/garma.Rd
+6be60152befbb0b0c97bd1bb38934479 *man/gaussianff.Rd
+e539477266517e44c478d7a014d59e33 *man/genbetaII.Rd
 45999add2a92fc243422b25bfc8f8198 *man/genbetaIIUC.Rd
-69a758aeab4a968d9e9f74d96a43fa17 *man/gengamma.Rd
+7757fe8e94585aa266cb216c458665b3 *man/gengamma.Rd
 588e10d5c3fd9ff745c679435c5f2457 *man/gengammaUC.Rd
-0a765eb0392ad75d94c0b0f0c517f9fb *man/genpoisUC.Rd
-8cd5ee8e81b3db18715e148f372d9c15 *man/genpoisson.Rd
+e76ba09ce1f37dcaefe798265764cc0c *man/genpoisUC.Rd
+a119afc00f962b1e029606fbb432c4da *man/genpoisson.Rd
 15429ac99e67921a77cb78e47210d7fc *man/genrayleigh.Rd
 2b8ec736188410b1502ce23ba1852463 *man/genrayleighUC.Rd
-ac050e093931cbc8b783c56728350b69 *man/geometric.Rd
+47cf2f1cabda023a3a982c4f1594bc6b *man/geometric.Rd
 ea16a72ebd8739cd2133e91fd9c92662 *man/get.smart.Rd
 d89a22500e2031841b7bcfa1d8607d44 *man/get.smart.prediction.Rd
-737ed71da7e1d3d9f50d2ecd573d043d *man/gev.Rd
-1517975a704bc6f715924c10ae148d14 *man/gevUC.Rd
-fd070015282f2cca2b0a4b8200822551 *man/gew.Rd
-7ac66cc25e3d13cc7fed08bb6b85e1db *man/golf.Rd
-9a635d01c2a0f08b71517df675b20a92 *man/gompertz.Rd
+1209ed3da3e11b7925de5eaa91a31f97 *man/gev.Rd
+b14571b6ff3acfe34ada144787605811 *man/gevUC.Rd
+b67d72038a94936de43e9c9867e42696 *man/gew.Rd
+98323d4ddee9e4389db885c5dfd9e8b5 *man/golf.Rd
+f67b351f60fea785dcb5c364bcb76439 *man/gompertz.Rd
 8170cb9545cf35f1768db069b13a893e *man/gompertzUC.Rd
-d5cc5cbc038ca4b11365bd44b3867b25 *man/gpd.Rd
-b87cedd5e790170299c3b2f9590475f2 *man/gpdUC.Rd
-7e50fed7b6ffe72b14e243fcc601fc50 *man/grain.us.Rd
-6e28498b6d44f47f2663a6be72f68529 *man/grc.Rd
-586ed95a4487db2fcd9e6b90c92efcf3 *man/gumbel.Rd
-f4c347dbfde0cbe8013496d5f8ef175a *man/gumbelII.Rd
+a82c1ef0b1f4275354aa1ccdb2f3f032 *man/gpd.Rd
+1ac579be6f58aef14c0fd352e242fbb1 *man/gpdUC.Rd
+ae22c83775126740de24f9b424eb1ac8 *man/grain.us.Rd
+9e6745cde809d3f4d76d9454ed4bdfea *man/grc.Rd
+086dd47f85b86acd9cdccd841f49b2cb *man/gumbel.Rd
+50b5296cb8c594ec807e7093db9884cc *man/gumbelII.Rd
 5099d1835eebc1b4610481e77463a50c *man/gumbelIIUC.Rd
-ff165cb5cc91cc4574c32dc88c6218b8 *man/gumbelUC.Rd
-f2c855e1a1291a70604b70b0f0040963 *man/guplot.Rd
-2c4e81cce3a305291fc9493d3f128b07 *man/has.intercept.Rd
+58292f4d52c780593164af0311a765af *man/gumbelUC.Rd
+7a67665b02e62cf7c115a6f689c4b489 *man/guplot.Rd
+4b511af2b7f9ef9ebb409d803ebc09b0 *man/has.intercept.Rd
 d5ad348b7727127369874c7e7faf49bd *man/hatvalues.Rd
-2be497a8d77472f00279d19f735863b5 *man/hormone.Rd
-93557c7aca25514dc023773bdd045d76 *man/hspider.Rd
-fda38a6b1fd8ed628b52c6c2021dbdf3 *man/huber.Rd
-c3f293eac3d5ae8362329d28c5f2be17 *man/huberUC.Rd
-d3df700bb2a4f9ae85b13abe7ffea123 *man/hunua.Rd
-592f01af00d4309ecb01ed58b764e12e *man/hyperg.Rd
-e3a9765eba431e1f55e2fdc11ff52b4b *man/hypersecant.Rd
-2bf15af91bb331e94b94dd69050589c0 *man/hzeta.Rd
-04198bb4e2bf6a230e17b4e84251887f *man/hzetaUC.Rd
-7f0e64784914835bb11c6f43643aae15 *man/iam.Rd
-ca465d4fce5e11d024c52dd64b1d0b0e *man/identitylink.Rd
-857cbf6f8c5970a18867fe560f275f6f *man/inv.binomial.Rd
-3e5254faf43189942b98ee8dafaaa06f *man/inv.gaussianff.Rd
-a78ed6bfc5949e6586975bf781ece433 *man/inv.lomax.Rd
+6fc61fd507dc4c1c75fb6c28bee0a2b2 *man/hormone.Rd
+8b6e465e5fe73a935f46daa60046c9cd *man/hspider.Rd
+62abee8f8a170a6bbe50d1ab3e56c347 *man/huber.Rd
+95e3f876501a7fd2e84e35f4894477ef *man/huberUC.Rd
+fc4d3f7180812d4d2d9795729baade82 *man/hunua.Rd
+17e69c7a5141452916dd851ff3d8da1b *man/hyperg.Rd
+1d933319c8cb063e3cb36453b97ae4c6 *man/hypersecant.Rd
+1c49eed2542b1209bc724ad32f60fabd *man/hzeta.Rd
+41cf19f4dc45e8194f64a34e7c039f1d *man/hzetaUC.Rd
+5c27582c146028bd203dc6138b22bcef *man/iam.Rd
+8f8543e4554295b88ca2d3444a28173b *man/identitylink.Rd
+3270bfcad322c6b39ea015225ec71267 *man/inv.binomial.Rd
+65dfc6df4db250babcb46684a65328cd *man/inv.gaussianff.Rd
+1be0ee6f6ed383873765d3618a82ee72 *man/inv.lomax.Rd
 4492e4a4f91d5fe7d4ec75a128bf4e07 *man/inv.lomaxUC.Rd
-c6a4f615abfb46e7961add90565f2c6f *man/inv.paralogistic.Rd
+b0fbf31decc06fbee3ecb4b78463a796 *man/inv.paralogistic.Rd
 6f740a890a174ff4ff3879fa8719ec58 *man/inv.paralogisticUC.Rd
-b2ce02b5af6709a1b2d294fcf254d393 *man/is.buggy.Rd
+7153cd3b651179845eedeb69cb2f9c7d *man/is.buggy.Rd
 a501c3d3de4a744a0e0cdbc0673b543d *man/is.parallel.Rd
 e68a1f19e55cd95da21eec0b119c0ad8 *man/is.smart.Rd
 1b33dcd08e9f444146fb7fe03a425add *man/is.zero.Rd
-5cf973ee22fcfd1442e61458a9d91ce9 *man/kendall.tau.Rd
-149c759079151bd06084810c29f6c72c *man/kumar.Rd
-255a587274163051c7c5e81b79bb24cd *man/kumarUC.Rd
-1bcedd3ac3a0c7467e5dee8ba1de9ace *man/lakeO.Rd
+e28d52ce917a7dc0a0da14ee2a44ad5c *man/kendall.tau.Rd
+bf860633c6991dbc98a3ae4a56679470 *man/kumar.Rd
+bcc3111b901e3cc621a4a49d48b66206 *man/kumarUC.Rd
+6f5588e7ecb0328d282c94b391950240 *man/lakeO.Rd
 decbd103cc5311735e70d906d170c742 *man/lambertW.Rd
-640c78cf542ad1ee952d75baa009bb83 *man/laplace.Rd
-55f7da75a7695c5f00b10d600711bab9 *man/laplaceUC.Rd
-16b21ecf83bb8fce76079502877b2fbd *man/latvar.Rd
-2cd5151baff29f9d8dd996dc48293301 *man/leipnik.Rd
-3bd268665a29f6a6edb1b3387b69b2d5 *man/lerch.Rd
+d7ffbb868815836d70d5019f20b9af95 *man/laplace.Rd
+381e036b4bb5ec7eab83aec4924ea195 *man/laplaceUC.Rd
+7a0dffd900869abd11094f369410c3cd *man/latvar.Rd
+6139f5266e9dabcd024f5323f09abec7 *man/leipnik.Rd
+f14199e5a0c4d647de4fee04f1ba7edf *man/lerch.Rd
 8c7fca39c92e5f79391a7881a0f44026 *man/leukemia.Rd
-42550fcfd84f5f7ee4efb5886d1fe224 *man/levy.Rd
+92fe3c03e74cb0fdd584bab129416eb4 *man/levy.Rd
 d3fb68f03d6cc946da6b48772bea3297 *man/lgammaUC.Rd
-d3d35561bb39104a648833365e13bb26 *man/lgammaff.Rd
-1bb4af539f983579a19c180c3ab29aec *man/lindUC.Rd
-271536a592dedaff73d9cde20c844d76 *man/lindley.Rd
-a271c63ae1172ebdabb67a1a25a18d17 *man/linkfun.Rd
-79a20f167d06958b953c5a7a8dfe16f0 *man/linkfun.vglm.Rd
-c6df85746e6410c593e22489045a88e5 *man/lino.Rd
-f56802c0fe3ec1b61cd313c370b9ff58 *man/linoUC.Rd
-b5dfa4faa955b15ebade0a3bdc8f93fe *man/lirat.Rd
-1cb54dfd175703b0fa36ff139404217f *man/lms.bcg.Rd
-1d9caf2fdc9cad915a7df45cfe4790f4 *man/lms.bcn.Rd
-2bab43fb4c3c8bc597867838aecb67df *man/lms.yjn.Rd
+058b16f16ccc46ab0bc029419c60d71a *man/lgammaff.Rd
+31b32afde6eea39dce5651070827e17f *man/lindUC.Rd
+c5b96c1bb5898f1948d946ebeabcd8b3 *man/lindley.Rd
+0dbe4623dfb32dde6bb3dd3c7fefd509 *man/linkfun.Rd
+ce4327b5fa5398367a23005373ebf851 *man/linkfun.vglm.Rd
+1923870b8d3c672d7a7736d1cb752d59 *man/lino.Rd
+f4fa53903badeb6850411bb464cbfa11 *man/linoUC.Rd
+48d52e3536f40f4bb8b80ed13523e098 *man/lirat.Rd
+e86677e152d0f11cfdd23cf85adb373f *man/lms.bcg.Rd
+02ed4ad7f248aa002395a7f1547ebfc6 *man/lms.bcn.Rd
+ec26899ccac356ad8916ab2a6136a808 *man/lms.yjn.Rd
 0dad131a129a97908dfa39adac5ca812 *man/log1mexp.Rd
-34cbd6bc583c55d2acd79a46a66e064e *man/logF.Rd
-06a1ce6e6f01fca7e7037eabc6cf3dad *man/logF.UC.Rd
+7c108f33921093160892c22505f77e79 *man/logF.Rd
+128e56816c06da9c68204c5e17f40909 *man/logF.UC.Rd
 9f80bd504e1c75b0c7b29b3449cf7362 *man/logLikvlm.Rd
-236716ee0347bd21a08aec9fec2a810b *man/logUC.Rd
-a319edb070badcf389045ee076ff0e41 *man/logc.Rd
-a3f3250120073a8ed5d34d7958234b5c *man/loge.Rd
-20cc0c73ee555790179879533cb526f7 *man/logff.Rd
-227fe95675d683b575accc2d9390755c *man/logistic.Rd
-c65e7936494787bc6fa0c31d931d8f6b *man/logit.Rd
-501f8acee0a27cb53cd02f174e37fe9e *man/logitoffsetlink.Rd
+e25ab9a04aa6fe3c1ca7e207127df654 *man/logUC.Rd
+fac0b2e9215006ccc2d19659f370038f *man/logc.Rd
+0322131a22664e2eb8df4353fff97ec6 *man/loge.Rd
+d32bf68ce529579e1afc7198e557812a *man/logff.Rd
+2d41c4483d71434df635770c3227661f *man/logistic.Rd
+3457599cfa34218ea02070a931ce37bd *man/logit.Rd
+81fbcf2fd0488f7467f10a9b04112c36 *man/logitoffsetlink.Rd
 8822ba593955e90e63a8779aaf74d29b *man/loglapUC.Rd
-0f6dd1a9c0fc77dd6521af733693f52e *man/loglaplace.Rd
-bc4fdb6ecc0913ebadab7deb1a95efed *man/loglinb2.Rd
+bac5b0432ecb0ea9a2a8813b514ba205 *man/loglaplace.Rd
+19da55b6d1195e349514a7397c2611cc *man/loglinb2.Rd
 4290a696c9eedd140e5d64489b6f29be *man/loglinb3.Rd
-f5f48817604ad9b59304d4fb571359dd *man/loglog.Rd
-7495135db74b6b1eb9646755218e7020 *man/lognormal.Rd
-25116483cd0f23a8c31bc80f91a1685f *man/logoff.Rd
-77db7395c6e627f5183464aa77a56835 *man/lomax.Rd
+fd4cd26d6ff2a4436bc76b32032697dc *man/loglog.Rd
+b60549cb1381a5e28c243336088c8d0c *man/lognormal.Rd
+61a6b92080d54adddba15f5aff318167 *man/logoff.Rd
+1d125855406e1d3298b00cbbb43f617c *man/lomax.Rd
 dbc62e15528097b42fb64d49be5f22f3 *man/lomaxUC.Rd
-ac49f1d5575295a237328c2de3cbab10 *man/lqnorm.Rd
-fc9ca61a4c495cf650cba5a458b0dae1 *man/lrtest.Rd
+6bd022246e23e920706e334289786a92 *man/lqnorm.Rd
+c928f85e7f60b7bbbe3b8e0cac382caa *man/lrtest.Rd
 f0a38f0b82c1525dcd51687a2f2768c1 *man/lvplot.Rd
-7dcf0051720ee4587304e819ecc8de71 *man/lvplot.qrrvglm.Rd
-16b238586876d84bad0a1420402b5718 *man/lvplot.rrvglm.Rd
-c5760c3960748f906230ded119478271 *man/machinists.Rd
-4df8393312f1b7ff81d4dab3d18984cd *man/makeham.Rd
-7785dc7e94e63e94e688d9553a9c7b2a *man/makehamUC.Rd
-b830a21e53610a5abfbfa7466ae0f3c3 *man/margeff.Rd
+b58baa9997db4a2d17bffd823b18d91f *man/lvplot.qrrvglm.Rd
+d49cdb5fb7d5c6802adf306c149b4b11 *man/lvplot.rrvglm.Rd
+a22ccac7d14abadd6c77d953fb7a40e9 *man/machinists.Rd
+84c2bc3a634004df81bcf51de759d69b *man/makeham.Rd
+ded9a294b923c79230f83eb84f3103e3 *man/makehamUC.Rd
+2bbcbe064963fcc8c05f47ce043b9c56 *man/margeff.Rd
 3ddf15a17c9065fcb8a97c4d4dca724d *man/marital.nz.Rd
-b2f1aa9cecaec318a14cc5d4fbb20d67 *man/maxwell.Rd
-c7fcbd341df77f76494a92836715789a *man/maxwellUC.Rd
-665ee56b876aac685d2e35853f8712b8 *man/mccullagh89.Rd
+fe8730c68060188f47c05090d696f324 *man/maxwell.Rd
+8fe9e02925adf9a10be7913a361c79b9 *man/maxwellUC.Rd
+831b3c476b91b3f79edc76e8708b8d1e *man/mccullagh89.Rd
 a98728733448bc9c8943a2fb8f3f66bc *man/melbmaxtemp.Rd
 4d8d0f37dc8249d00e52283764534e98 *man/meplot.Rd
-2bcfc226edb08c7257783853ff52d87b *man/micmen.Rd
-09a21e6a1a75e5a2e0e30079a1cbdee1 *man/mix2exp.Rd
-ac6dffa8b08d6cba20464169d19e8439 *man/mix2normal.Rd
-03dead9556e4a5968333b55521a7d381 *man/mix2poisson.Rd
+620804acef2de6c732fd9bb2dc05f8df *man/micmen.Rd
+dba6353c3b5a28ab1d489fbfee0ed1a8 *man/mix2exp.Rd
+98bf54f72e45f62c7048cea667f7e1bc *man/mix2normal.Rd
+9441d372f9b21c0e0f7e649e07a685af *man/mix2poisson.Rd
 131aaa836a137554786e8bda01d8e334 *man/model.framevlm.Rd
-3d875985c00b26af9cb66e0ae0e3aef8 *man/model.matrixvlm.Rd
-199ef13d300d6fe1210885af1647c13b *man/moffset.Rd
-00846c716aa6e89baea6f08be433fe9f *man/multilogit.Rd
-c903155fb0adb75baf311a4e1b2df6b3 *man/multinomial.Rd
-c3248f9d509aecb0726bd0e6e36a13d4 *man/nakagami.Rd
-61319d756fcb8509696cc1aa55ae4ed2 *man/nakagamiUC.Rd
-879a1566439b7bec4f48b6ed57ac118c *man/nbcanlink.Rd
-0c0ef87d1221196cdc7fc0d156ac150a *man/nbolf.Rd
-045a7642ca57ea99a0196a36f077399d *man/negbinomial.Rd
-8bac5c2532f5af0a4bedf8afa437171f *man/negbinomial.size.Rd
-d2fea6d91944fd5842f5f7655f9cf278 *man/normal.vcm.Rd
-110d21511561a735015f63b126f79e95 *man/notdocumentedyet.Rd
+d9b1e066f2438d36dab43f26214070f1 *man/model.matrixvlm.Rd
+5e63d27fdf0187ee5f4163a3b17bc3e5 *man/moffset.Rd
+2eb97d7bbc97fd001eae65a243925cbf *man/multilogit.Rd
+92bb80be2226f90308850fb8aab11feb *man/multinomial.Rd
+d16ea937505bcc0f0595660c8f4477e6 *man/nakagami.Rd
+ec4fd16fac5c0c2657dfecb963394ad2 *man/nakagamiUC.Rd
+0fc885bb61c35622aac3df1d729fe85a *man/nbcanlink.Rd
+05a7844d77b65b35420f247e776ee657 *man/nbolf.Rd
+c9429d15843929a27faa844390f60942 *man/negbinomial.Rd
+8e9dc5fac4c38f9fa0cebcd8c65a33f1 *man/negbinomial.size.Rd
+4811ccf0037a161a3381c6390a54a004 *man/normal.vcm.Rd
+0d98018be4492bfea10b86cf9af929c6 *man/notdocumentedyet.Rd
 5e590acdda3ff0a9e2df0db8d233f848 *man/nparamvglm.Rd
-8489561ee3f0890992e047759c6cca94 *man/oiposbinomUC.Rd
-2902dd89d64f467803e677121e19b2c3 *man/oipospoisUC.Rd
-467d7104cd536f3f682d5c6d97a318fe *man/oipospoisson.Rd
+880a8634be7d3a1b56be1c93545fb327 *man/oalog.Rd
+d5d5aa6c773fe976386fe73c300926c8 *man/oalogUC.Rd
+1372b54544f32022ac1e7b0e0c653407 *man/oapospoisUC.Rd
+ac947e9ab52c8db4338fbb8fe74a9e65 *man/oapospoisson.Rd
+4bad12a578bf78715b8564c0d4404494 *man/oazeta.Rd
+a095d22eecb1cc08cb577d3fe8980d25 *man/oazetaUC.Rd
+5ce5c0f9814fe22b8bb29ba0c884d407 *man/oilog.Rd
+d09ac6330dc06648d12cff12a46e7301 *man/oilogUC.Rd
+188f9232c145101e2406d295a03baafd *man/oiposbinomUC.Rd
+bd35edc6b99943212cdb1bba9e7aa19b *man/oiposbinomial.Rd
+d885552fcb8421235b192fe939c77b1e *man/oipospoisUC.Rd
+2900e15a9d5d04db0c473a234fd20b80 *man/oipospoisson.Rd
+e5d5dd80195d9ca99e9119cb02a8067b *man/oizeta.Rd
+f17688c4d640452bbd9eb9a0a50e42e3 *man/oizetaUC.Rd
+e7d848e60f3a36a25df093deff2104be *man/oizipf.Rd
+a15a33c2c5565e2aa3c09c7d3b1c9546 *man/oizipfUC.Rd
 98b83e406ea1968ba3e8b17d0933b2cf *man/olym.Rd
-858c73ce3c458d33e5151342a4e36707 *man/ordpoisson.Rd
+94ca3795c49c069e81d4abb217046057 *man/ordpoisson.Rd
+03e5915aea0d7d6d764123a245f3d8f7 *man/otlog.Rd
+f6ec078f93412c9498e1c309f2cc3761 *man/otlogUC.Rd
+6803fbdd9ae7dc3086c20e30abe93dce *man/otpospoisUC.Rd
+502cc7989515c59ce6f1fd40bf705b12 *man/otpospoisson.Rd
+244e33255edcc38590a49bab0cdc282c *man/otzeta.Rd
+4b01e8ae04198fa40c89cbf83df24e21 *man/otzetaUC.Rd
 5d9093c6c1297fb988ed0695d88ffeb2 *man/oxtemp.Rd
-e1f36164728a10785808cb359a7807e9 *man/paralogistic.Rd
+8313e214e8fb06aba430451440ad1f3a *man/paralogistic.Rd
 383805a5130a512c207a6a30c28553d3 *man/paralogisticUC.Rd
-b8a1bd0580460ec6155b7c7bb2dae503 *man/paretoIV.Rd
-9e30cad5872ffef80576a429e37cdaca *man/paretoIVUC.Rd
-c0c60830c70e697aeab8bc6d11472b78 *man/paretoff.Rd
-28a8a9fa1e219d71dcb68cfdb6f88d1b *man/perks.Rd
-a0d64aa4469a9ca70fcfa4e5af26956a *man/perksUC.Rd
-60fac0e03c8dce88e04e2c3f6def20b9 *man/persp.qrrvglm.Rd
-e4ea396d024de674ff4bfdda6975bb72 *man/pgamma.deriv.Rd
-8e0120c68b69d0760218c483490aed8e *man/pgamma.deriv.unscaled.Rd
-2c3491351af8d4eb4618723f612c4f26 *man/plotdeplot.lmscreg.Rd
-cea29349aed21cbaf8c70f81b7900b15 *man/plotqrrvglm.Rd
-24a05d0db169fb74f603b21f0b8dd7b8 *man/plotqtplot.lmscreg.Rd
-725d095cfee76e0b05a1448738d7e853 *man/plotrcim0.Rd
-0f3e17d0b1877bd81a9ae8431cd6beb7 *man/plotvgam.Rd
-72bade4a008240a55ae5a8e5298e30b8 *man/plotvgam.control.Rd
-823905758e645eb93ac7292316cb47fc *man/plotvglm.Rd
-40f1661d2f26cb11f54c9140c767c61b *man/pneumo.Rd
-606c4d8331ff8e0e4241f0284aba98cd *man/poisson.points.Rd
+bb3a3f7ead9224fd8c9a4b0404bca995 *man/paretoIV.Rd
+8111c66c525adc1c66cda4a6fa9a5d94 *man/paretoIVUC.Rd
+d5fc72aaf9ba9efb046c5be1f65fd11f *man/paretoff.Rd
+9b2c4dd01870f3f32ba3c15ae95587cb *man/perks.Rd
+295f59604bff838edcef5b96d0da8350 *man/perksUC.Rd
+39f6cfbf1ae8bc5222d992e92fb4f954 *man/persp.qrrvglm.Rd
+2712adc085e3af192860b8a966ca5d78 *man/pgamma.deriv.Rd
+353c3ec4b00a91d4417bff571322ac49 *man/pgamma.deriv.unscaled.Rd
+30c34e19012351e5099882d59a44baf3 *man/plotdeplot.lmscreg.Rd
+34ef2a5ed9f1bfa72b34480b77845c5a *man/plotqrrvglm.Rd
+4b406159b245829944a887876f3e713b *man/plotqtplot.lmscreg.Rd
+963dc57ab1c5ac0a61c3be106219e332 *man/plotrcim0.Rd
+d464580d44d5a26f6c77288fc76793e9 *man/plotvgam.Rd
+f25590471e0e46cdeb4f1e392f2f04c7 *man/plotvgam.control.Rd
+26fed3fa54d8840117d985ef588d081b *man/plotvglm.Rd
+38cd2c95cba3ed8b74972d53df71426b *man/pneumo.Rd
+2d5197dddcf4865060d96fcab0386ba2 *man/poisson.points.Rd
 8c7d77fdf6933ab63d412be61e3fa0ec *man/poisson.pointsUC.Rd
-e4e2fc2618efd51a0bc97ab8671ed82b *man/poissonff.Rd
-83497c4069d8c74dc15f0308de0dac89 *man/polf.Rd
-696c74487d4cebf0251299be00d545c7 *man/polonoUC.Rd
-2f4dfc6a802a52da2e14e9789e0170ae *man/posbernUC.Rd
-a746161f043ec5c5517df4b9cf71501e *man/posbernoulli.b.Rd
-04f6169b69f25ad72e088a51ef9e99b7 *man/posbernoulli.t.Rd
-12ee5b18104f163749da385de04fa175 *man/posbernoulli.tb.Rd
-04eefb1ff1ad4d9af313b5dec284d91e *man/posbinomUC.Rd
-aab909e407aa248772db0235e64890dd *man/posbinomial.Rd
-3dc1e01b8fe96fe13467d80aab3d0465 *man/posgeomUC.Rd
-8b10cb766f37fa372b6f50632752d9a7 *man/posnegbinUC.Rd
-7f73ccbe96ab4a1d68184e0b525dae86 *man/posnegbinomial.Rd
-45b528182d1c01bc352dea7b84fd7671 *man/posnormUC.Rd
-9061c33c9a5d44acc0c5c4fd1eeec22f *man/posnormal.Rd
-b2a2f2ec4eff7fb9c96b053a099f93d3 *man/pospoisUC.Rd
-2fdd9216b21961f62931871dc165375a *man/pospoisson.Rd
-cc06ad7f82789c3703e4977cc39828ed *man/powerlink.Rd
-66bad6a1a2012e256b483e1727aca7e9 *man/prats.Rd
-ee31e58dfd33c2c3b0d51eac95b553ad *man/predictqrrvglm.Rd
-cb6a8c644c31d6ec5e8977ea7b1198df *man/predictvglm.Rd
-1842dc23f02ce22f6aef3247d61965f8 *man/prentice74.Rd
+e2ed4b49bc24fcb3afe5faa9087c8907 *man/poissonff.Rd
+3fdf36e4c276e54a76645769d2ed72bc *man/polf.Rd
+694c02f146b91973590273ad5cb4fd4f *man/polonoUC.Rd
+164d34ccd72cc10ec9791bac5b1ed7fe *man/posbernUC.Rd
+e4df302db602b3c97346af14aca849d1 *man/posbernoulli.b.Rd
+71dfacd1e28b00ac6ad6647c5a51ff5d *man/posbernoulli.t.Rd
+3d033407af789f6441ea60bba129097e *man/posbernoulli.tb.Rd
+fd9833c48acf34cec92c36ca92734c1a *man/posbinomUC.Rd
+5e0a62bad63329c57c079e34816d77ee *man/posbinomial.Rd
+11c6a588bb83c54efcc4bb14d44de769 *man/posgeomUC.Rd
+3ddd0176688f1477124da0158406b0d0 *man/posnegbinUC.Rd
+ee784865b3e4d1f9ae7a632dd1fdc47e *man/posnegbinomial.Rd
+a13ee6ad182d4f914aada7041f84332e *man/posnormUC.Rd
+0cad58b1ace9b8b16c4f563c1e4af868 *man/posnormal.Rd
+f1013339a9d1c7745558d45d80f80cc4 *man/pospoisUC.Rd
+4e8e0fd1d0589cc90578092f56fe8a47 *man/pospoisson.Rd
+9fcfe365dcb6ac011a4657a1de224cfc *man/powerlink.Rd
+c4ba6c4f7daa451ff072c4dc6212482c *man/prats.Rd
+fb899ff42f1e50ad04f981036b560cd8 *man/predictqrrvglm.Rd
+7d97e6a6bdb932f940ff3d02985de1b8 *man/predictvglm.Rd
+98fd7cc94d6cac1ed9ff3196422cf116 *man/prentice74.Rd
 5f4fbb060b2d8386d8d2bfde926d9d5d *man/prinia.Rd
-889d24cbaa36abd8df4c54fbf88609e2 *man/probit.Rd
-0dc0ebdd8538489ac38a624176612691 *man/propodds.Rd
+aee50b5fbbc6e550edf241b0bf7a037e *man/probit.Rd
+603ba91498e87096beca269b31993826 *man/propodds.Rd
 241402d089ef4159f01fb4cd2c72b9a3 *man/prplot.Rd
-3a0eab7a9e21bac43d738f9ab9681f80 *man/ps.Rd
 ab1399d5d5f71707fd46960dc3efad04 *man/put.smart.Rd
-86482135c7096ece69019577ca79a2a1 *man/qrrvglm.control.Rd
+46b5a6ac57b987b42b63bb50b5ed9228 *man/qrrvglm.control.Rd
 4d9e77b96958342af0ab14eb7efe6ed3 *man/qtplot.gumbel.Rd
-b10bad72776d283be77901e730593f2e *man/qtplot.lmscreg.Rd
-6c60658fef3dc7aa5d53d1d954a65e96 *man/quasibinomialff.Rd
-06c7ef40ac06f97042d785a04e81989e *man/quasipoissonff.Rd
+70e2a9e06f485fe104196f9e8106eb42 *man/qtplot.lmscreg.Rd
+fda36bd798b4f79df236467bedfee142 *man/quasibinomialff.Rd
+024c0d00d1d04c12bd1ae9df05705ec7 *man/quasipoissonff.Rd
 bbde69d1bad346cd4ad04763c96d6ffe *man/qvar.Rd
-9941ff94abd604ccf9bf44d3819e60ee *man/rayleigh.Rd
+d2436d379f06a0ba36a56ad83f5dfe59 *man/rayleigh.Rd
 a95c0df100dedc0b4e80be0659858441 *man/rayleighUC.Rd
-6c45f58f39a63abc2ce8a0923c75cecc *man/rcqo.Rd
-97b7c30ea27ac4fa16167599c35b136e *man/rdiric.Rd
-585af0deb3deb7b61388d6d4557994d8 *man/rec.exp1.Rd
-dbfea987d2d41c45477fa82bd978ab5e *man/rec.normal.Rd
-1787d0e69981aab74ae789bac092722e *man/reciprocal.Rd
-8e6ffaeea6e88d46925e60f343364a0d *man/rhobit.Rd
+1809b33212eea356d82f80436bb14976 *man/rcqo.Rd
+297ed178a02d65d90eebe17ab7981127 *man/rdiric.Rd
+ecde375d79d50e892f54786fa4dd8327 *man/rec.exp1.Rd
+b2f3db92bcc08670d243983b9ccf050a *man/rec.normal.Rd
+2acf75ab67b2a465ff82cebcce58d965 *man/reciprocal.Rd
+892fc03d536bb55c2f3aeee3bf245268 *man/rhobit.Rd
 d907e0bbe40b4fb02b0763ab6076309e *man/riceUC.Rd
-4d5fb32666631b97e65f8a2324f42bcb *man/riceff.Rd
-9dd5a151bfc05adcce0ae88a02eb08a8 *man/rigff.Rd
-81b8b316257fea4c5fdd2f83a251f80b *man/rlplot.gevff.Rd
-3c6afb0af10ae003dfa8cf9caa567d9b *man/rrar.Rd
-330d39b23f38eea93d453f07fcb7574b *man/rrvglm-class.Rd
-6c28407aa99813ab33175602570fbd3b *man/rrvglm.Rd
-7b5b0475883ebb8e5845dad778e39be9 *man/rrvglm.control.Rd
-7d300d5a2ba96f87a47dc8a73df5eaa8 *man/rrvglm.optim.control.Rd
+4ce494548d1104b52ff5362b6654e954 *man/riceff.Rd
+4c325e0d372cb92f3d7257978c8d0051 *man/rigff.Rd
+aa87e119b195f40e7a9db901da1cea55 *man/rlplot.gevff.Rd
+a538533f1313c946eb0a2e5716513698 *man/rrar.Rd
+17084ab97d2faf2df082ea5b5b4de6e8 *man/rrvglm-class.Rd
+1c7d21696f2e333c90412cc53b369b72 *man/rrvglm.Rd
+bbcf2bc13a6493c9e1cd55dde8f7fc29 *man/rrvglm.control.Rd
+b6726fd139c577faab7116201b46a8f0 *man/rrvglm.optim.control.Rd
 ecc44804896b8f3d4a9d469a952fe9a6 *man/ruge.Rd
-6eb25df526a146532bed84abda730335 *man/s.Rd
-c66939737b4a412d7057eaf0da8f67d9 *man/sc.studentt2.Rd
-114f55f02750721179c9fc78d93f686c *man/sc.t2UC.Rd
-c3096134b4f765a7d1d893fb9388488b *man/seq2binomial.Rd
+4ff8684ef85a13adc80a3f9cd21cdc05 *man/s.Rd
+6341d64e2c95390be630ccedf4b1b89f *man/sc.studentt2.Rd
+307960ccb037e87c18c6df4ba774f761 *man/sc.t2UC.Rd
+8e497f59f72836001a13f44caa2127fd *man/seq2binomial.Rd
 9985ea15444cc317e3e8fc2aad7200da *man/setup.smart.Rd
-056aa6efa43e4cd79f5e07769a0c6fd9 *man/simplex.Rd
-f158e6c60a4e6b6e13f2a9519515a021 *man/simplexUC.Rd
-41af17badd0ef1b17cee591a35d46a12 *man/simulate.vlm.Rd
-dad32c56e791762f4f795cd5d1fc38dc *man/sinmad.Rd
+73f90d943920dda558b022de9d7e06cc *man/simplex.Rd
+3a6df504e33f0a38210c7f07243af4b1 *man/simplexUC.Rd
+55472b86057f6827b8da81a884066eff *man/simulate.vlm.Rd
+abb6970dec2c99061594d6429dfd156a *man/sinmad.Rd
 95cbc5903a187d325c52c3d9d07ee252 *man/sinmadUC.Rd
-c5839042eff769ac461463b8a7a49428 *man/skellam.Rd
+31eae032b50794ce290bba66d7873df0 *man/skellam.Rd
 2424940e3cff6d5a3ddd0ee99565ea39 *man/skellamUC.Rd
-b62da6a60b01916a10d691e980253bc0 *man/skewnormUC.Rd
-3797084c4e552d460e8b3942a661260a *man/skewnormal.Rd
-fda97ab39e5972100e2392fd0f26432b *man/slash.Rd
-9fc90a85fdd63c0b3c49203f5e3d776f *man/slashUC.Rd
+922d2393f1195c0e0fe93a7ee58590c0 *man/skewnormUC.Rd
+236f9327ac9a3f291712e2f064dc59f7 *man/skewnormal.Rd
+60cd21a8dfbd42ab623261382b2eac34 *man/slash.Rd
+55e7c75854df8cfeeb3e871a9ed3b286 *man/slashUC.Rd
+cd2637f8050f093064fd6e85e096a5df *man/sm.os.Rd
+445ffb7a2db302f854a0d1bd7306281b *man/sm.ps.Rd
 21bada3a13aca65ba49fb28127575144 *man/smart.expression.Rd
 5726ef8bb900532df62b24bd4b7b8fe4 *man/smart.mode.is.Rd
-21a1d3bd045859ceab377610a53ba976 *man/smartpred.Rd
-81d3f84a4dc023adad8e37f46b949ae6 *man/sratio.Rd
-501d551af0419b35ef1bd47bf4d740db *man/studentt.Rd
-8f91c92bee6e12da2adea37b35535a8e *man/summaryvglm.Rd
-234bf47d30e9afe3629e4ad8c1b39b4b *man/tikuv.Rd
-ccaa57b076049fdf3cee1c321a2ab456 *man/tikuvUC.Rd
-190c660343d7f8465fc01c043c28f658 *man/tobit.Rd
-5130a86e60a3b1010b1364155a1afdd0 *man/tobitUC.Rd
+ef0defa486a8ea35cf8f0008dcacfd3c *man/smartpred.Rd
+84ef8e73613b4e648d320236a8f61720 *man/sratio.Rd
+9d461f98c11b2731f60b5d06460d2a35 *man/studentt.Rd
+c741f034c6a2a63d020f384e1057f51e *man/summarypvgam.Rd
+a1553608ed84216f359717b6aebc6079 *man/summaryvgam.Rd
+1d0c13caddbb48fc79fe09a1811475a5 *man/summaryvglm.Rd
+9c715fb5d55b290394fae63526c67851 *man/tikuv.Rd
+4777309021c9780ce7229ab29aa6ac6b *man/tikuvUC.Rd
+b36380d5c4189df2cb25a7c4644ce664 *man/tobit.Rd
+3eac65d470861c7c8492b8c996ff88f6 *man/tobitUC.Rd
+630313b8d7de3fb57ac2446cf966d652 *man/topple.Rd
+198fe709514ab9e978abb67e5016521d *man/toppleUC.Rd
 b70afa170b0cf98a6c2a9eea9dc58483 *man/toxop.Rd
-59e040af3616943e93946ddf0ba96aba *man/triangle.Rd
-4b120eb41d1983a4afbe2b45793dc11e *man/triangleUC.Rd
-1d13e92969384eebec80c2b5901bc5db *man/trplot.Rd
-deae0b3d6157ae23411419f0f64b2ef6 *man/trplot.qrrvglm.Rd
-aeaf42ac6e475f1dc3f180450d56c2ee *man/truncparetoUC.Rd
-1658b0820ef97964c22fa4f3a18d13e6 *man/truncweibull.Rd
-50ada9ecd189456ce9f218d22b49089c *man/ucberk.Rd
-1ef31771939fc4f99d962a282252abf5 *man/undocumented-methods.Rd
+91785ee07f0a4a097b818f850a5fc7da *man/triangle.Rd
+7d08069709e7beb97ace613a967cd70e *man/triangleUC.Rd
+3184a6345636018d37b8c6ba9680df9c *man/trplot.Rd
+9fb1ee580c26ba647e722eedb2680c93 *man/trplot.qrrvglm.Rd
+346a3abab054b4d7c524770ffce9e1c6 *man/truncparetoUC.Rd
+28b59fd1046ac92695de2e144bad7292 *man/truncweibull.Rd
+90bd59df538b418b4250e7ca95b95ccb *man/ucberk.Rd
+70f642ee2acdc8a1e8390b25c361b5b1 *man/undocumented-methods.Rd
 395bf20844e881303e4f76da27a693cd *man/uninormal.Rd
-6a60d8e09c890e47042be1203aee9547 *man/vcovvlm.Rd
-f787bf505e7e68f5f16a49f48abb9bcb *man/venice.Rd
-8ab09ea32a3839db780ac641218c322e *man/vgam-class.Rd
-8a51ee9d7f3c6960f4475e3dc76563b1 *man/vgam.Rd
-5bdd6fc54f66b79bdd33317fc6741e7f *man/vgam.control.Rd
-126b55b4567a63cf2edb04a8b6d91506 *man/vglm-class.Rd
-bc4f239adfeb3eba830c3738badad2d1 *man/vglm.Rd
-123a3db22c339481529926292883ec46 *man/vglm.control.Rd
-33ea80f5f411700dff4b19371517c743 *man/vglmff-class.Rd
-3c3444f49659331d0b0da1c4e28ea9c8 *man/vonmises.Rd
-25b2ef45238e3f61e82dcf52f3d17090 *man/vsmooth.spline.Rd
-c498f29d7fc8156fd345b4892f02190d *man/waitakere.Rd
-9b9bdfbbf8060eb284c84e8ed9273154 *man/waldff.Rd
-8bc759f493a94c1df7477b32b35ef8a9 *man/weibull.mean.Rd
-f490b97d72a0bdd81753f2cfc45e6809 *man/weibullR.Rd
-e41e54f8623a002d20e55df65c5b6a87 *man/weightsvglm.Rd
-e7fd9c7165410545d49481aeded2b317 *man/wine.Rd
+79644baae867f75743c1e0140475976f *man/vcovvlm.Rd
+3d62484053bac86524482b090739b567 *man/venice.Rd
+8481f7f192836bfced10d8b3602fee0a *man/vgam-class.Rd
+7dc6324ef92b570ca3f14315103bc6e1 *man/vgam.Rd
+ea2c68965f70af760eecc6395c88eeaa *man/vgam.control.Rd
+63631c9b681a02e88ddf1c561a056837 *man/vglm-class.Rd
+932d3e97aa44788c2f5934d8b54fba8c *man/vglm.Rd
+f972a5160daa01f39cd570b5f54859e6 *man/vglm.control.Rd
+4f4cfc57a0e88ef20bd239bb472d3c26 *man/vglmff-class.Rd
+1f2b7e686f738759842ed3440c022a8a *man/vonmises.Rd
+1e00d80d16a246873d74d6ff82bb08ea *man/vsmooth.spline.Rd
+7977fc14291cc0edb2ecba062401f3fe *man/waitakere.Rd
+336b5a87d1c8a1487c234571776198cc *man/waldff.Rd
+74524fcccdf2a073afa7b16eb3ea2729 *man/weibull.mean.Rd
+9ce85dfd3a2c65e056a5bdde204d9a26 *man/weibullR.Rd
+926b0135ba89f142999133d8a386e018 *man/weightsvglm.Rd
+590569895fbf4c69045cd5e4c75eae19 *man/wine.Rd
 a814b37503a9534c86789482ab81333f *man/wrapup.smart.Rd
-622f0105b04159f54fcfb361972e4fb7 *man/yeo.johnson.Rd
-28e8c835229f9fdbb6605917fa38e3aa *man/yip88.Rd
-225fcd19868f17b4a5d2590e834cb888 *man/yulesimon.Rd
-ef96177f3ee5b07478b717529111adea *man/yulesimonUC.Rd
-ae671324c0f93f66adc72f053ef9ebd9 *man/zabinomUC.Rd
-cb21430df0f12962f6abf34d9d0e51ce *man/zabinomial.Rd
-7d5df5fee6f78c5cf37faaf71adbbb91 *man/zageomUC.Rd
-8c0f4c29525dab1b9715b9f7fe40facc *man/zageometric.Rd
-78eef8b541d039b00e9990ff758e53e9 *man/zanegbinUC.Rd
-ce96ae4bbda9d9e1c0cbcf4b9852a3eb *man/zanegbinomial.Rd
-b4bcb3a52a6e60efbdaa5d3cfed6fbf4 *man/zapoisUC.Rd
-d5a378daf17ca29279ffce104fe40cb1 *man/zapoisson.Rd
-426432d39c7a2b0975e6cf9fc3ce520d *man/zero.Rd
-2364749f0041ab1fc22b6469bef31fe4 *man/zeta.Rd
-e0ef189ae8251b5e0d20b614c18cdd5a *man/zetaUC.Rd
-02dcb7552bb55e8e8b37aa55cde4a9b3 *man/zetaff.Rd
-a2a94ba506f78263c96f423740e3270f *man/zibinomUC.Rd
-6e0a43313870e96f898452981365bf90 *man/zibinomial.Rd
-7b1d2ee37f339b9a218f1db4abb30cdd *man/zigeomUC.Rd
-75b757f1586dba0d8837bc4bc682da73 *man/zigeometric.Rd
-025dd2763701ec5b6880bcd6f4a9f35a *man/zinegbinUC.Rd
-e638747f021ad82af94a1938bf33aaa0 *man/zinegbinomial.Rd
-75f01804be352529f2935da74770c4b1 *man/zipebcom.Rd
-abfe2e5adf8a4fcd610adccf060e4f45 *man/zipf.Rd
-fd2adf6acc7093de70cb3c16d3819f23 *man/zipfUC.Rd
-2751243dfcbd74edff2f05db0d841afc *man/zipoisUC.Rd
-e98f562d17c2bebb37d9937695458519 *man/zipoisson.Rd
-ca15a5a4e923e77169ed88e86877ab09 *man/zoabetaR.Rd
-bc8275fcc85b5884fcfe7a70e49e6b5f *man/zoabetaUC.Rd
+d37d3685ddda0eb2e65314d5df4d92c0 *man/yeo.johnson.Rd
+ba8a9e9e16a86f89352763dffb377942 *man/yip88.Rd
+40d1463cbadda8c9659f9de4c059997a *man/yulesimon.Rd
+e2cb133a9b12c848730f8716944ade3e *man/yulesimonUC.Rd
+b3f454f3c002fd2eb31f4b38df803626 *man/zabinomUC.Rd
+c8d7a55e87998b3baaa10545046f4f5d *man/zabinomial.Rd
+5ffb51ee3a3e86590daa4ce3ebd6b410 *man/zageomUC.Rd
+59828103410f040ea22c4eba309cbf5e *man/zageometric.Rd
+5033e639911af869f9aea3d5014067f6 *man/zanegbinUC.Rd
+f804f4552fe35142f67c3526fbfa2b54 *man/zanegbinomial.Rd
+cce8cebbf1f5b348ede3d0fe284201a1 *man/zapoisUC.Rd
+75d7a648df8e8813590ec9723fccc6ee *man/zapoisson.Rd
+6b4cfbabda746f57a831f2d762710a0f *man/zero.Rd
+1a7e90a732be5e2c8bc2d7ba617bec9e *man/zeta.Rd
+9364e9797c523a5114b6833c8452fc36 *man/zetaUC.Rd
+73cd8928268a99e4870eb722e29b7c8d *man/zetaff.Rd
+7b0a5c037cbadaa08859b1ce2cb860ec *man/zibinomUC.Rd
+47ec000d6c8d502f60e9706b88d71f49 *man/zibinomial.Rd
+070f470ac592a7d463c8fea7a239c787 *man/zigeomUC.Rd
+e83cc03487ae3b667eb4fde5647bc99a *man/zigeometric.Rd
+5b0ce56f13bb255e3ee569373403b797 *man/zinegbinUC.Rd
+028c3b0f5ae31395cbf7240a31b9d615 *man/zinegbinomial.Rd
+e5f47c53c8af2547c4cf3f867ebec4d2 *man/zipebcom.Rd
+60fcc6b3c2d2cda0a13119e3d02fad08 *man/zipf.Rd
+95724a61f6d83eb58c97246d14ddaaae *man/zipfUC.Rd
+10be2ebb9b6f81f14463acd1cd226b2f *man/zipoisUC.Rd
+24a4e6a3a3e010e6beba76edc4c34475 *man/zipoisson.Rd
+1e98238d361280f9119e993fb5480910 *man/zoabetaR.Rd
+c6979016b5a054cf8e740fdf3e2a7620 *man/zoabetaUC.Rd
 f306f4262366ba8c13d31e6afd0e393b *src/caqo3.c
 ec1b60ab786ea922f9c9665ae352b147 *src/cqof.f
 8daac3d03d7cb7a355a4c5ba548c9793 *src/ei.f
-77ed63cecc681dfebc94a028d0cfc996 *src/fgam.f
+964e4ddd4ec4e99a1924ed513a3b124c *src/fgam.f
 f8fe99dcda865eceb06b66f4976f4bf2 *src/gautr.c
 dc1ca5b4e9a67b6d48c25e7107112d9c *src/lerchphi.c
 c54afdee58cf86ecaf1072c492b49001 *src/lms.f
@@ -632,19 +663,19 @@ feba7ba09eca8007392e0405c4b373a8 *src/muxr3.c
 473bc0b2f4d6757fa9b397ac0d7c9e47 *src/rgam3.c
 6aee7dc8f242ea6e9446ade5b7edeee5 *src/specfun3.c
 4814bb73b4c3eedc7507ad99511c7dc5 *src/tyeepolygamma.f
-80322c801242c7751e7bdcd0ae192744 *src/tyeepolygamma3.c
+b8589097163df4491a094baff9352a6d *src/tyeepolygamma3.c
 79cf39f1d83f25e29a6c56d344ea8d76 *src/vcall2.f
-3bc5ecda1e1216006e74ebd72b77d662 *src/vdigami.f
-3e145d8721d17dbd0e642508c2de1472 *src/veigen.f
+1875946181988bbab62f6a084c25ee12 *src/vdigami.f
+ea134ab83bbe5c5fc40bcda63e8a6728 *src/veigen.f
 5ea414b5b42454c8efa73152c45ea62b *src/vgam.f
 73b8d37419685738d4a7a151284299b4 *src/vgam3.c
 bbb4ca20dcf50cd985b411b9a65b68f2 *src/vlinpack1.f
-80c0a0f512ae74ecbed144c5f115fb16 *src/vlinpack2.f
-e9187111f5c6ce1e5808bbb3dc088c17 *src/vlinpack3.f
+14d546d52123df5ffa6dd5013470c89f *src/vlinpack2.f
+5c49fbd5021e96518c4b3cc85ff82a39 *src/vlinpack3.f
 753359563526a9cd5ebac104dab2d754 *src/vmux.f
 9083b462bcc275ee6dda47e97f1ebf94 *src/vmux3.c
 b19585d2495c46800b0c95f347fe89f9 *src/zeta3.c
-bfa11dbdbff271fb20342560f2bacd53 *vignettes/categoricalVGAM.Rnw
-d7beca978b587625654f981f7dc433d0 *vignettes/categoricalVGAMbib.bib
+d0ac5e66d77fe4d6a7fa283e01b06882 *vignettes/categoricalVGAM.Rnw
+028d0e9facb7ef9ca35f376c8ac8728a *vignettes/categoricalVGAMbib.bib
 8e489008d8b8b8f769e5e93e351c9c42 *vignettes/crVGAM.Rnw
 b5d97a0617a50e3ac2396b54a6f9db67 *vignettes/crVGAM.bib
diff --git a/NAMESPACE b/NAMESPACE
index c56d112..930cd7c 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -7,7 +7,46 @@
 useDynLib(VGAM)
 
 
-export(doiposbinom, poiposbinom, qoiposbinom, roiposbinom)
+importFrom("utils", "tail")
+importFrom("stats", ".nknots.smspl")
+export(sm.os)
+export(label.cols.y)
+export(prob.munb.size.VGAM)
+export(negbinomial.initialize.yj)
+export(mroot2)
+export(psint)
+export(psintpvgam)
+export(startstoppvgam)
+export(summarypvgam, show.summary.pvgam)
+S3method(df.residual, pvgam, df.residual_pvgam)
+export(df.residual_pvgam)
+exportMethods(endf)
+export(endfpvgam)
+export(vcov.pvgam)
+S3method(vcov, pvgam, vcovpvgam)
+export(show.pvgam)
+importFrom("graphics", "polygon")
+export(model.matrixpvgam)
+S3method(model.matrix, pvgam, model.matrixpvgam)
+importFrom("stats", "ppoints")
+export(doazeta, poazeta, qoazeta, roazeta, oazeta)
+export(doapospois, poapospois, qoapospois, roapospois, oapospoisson)
+export(doalog, poalog, qoalog, roalog, oalog)
+export(ddiffzeta, pdiffzeta, qdiffzeta, rdiffzeta, diffzeta)
+export(dotzeta, potzeta, qotzeta, rotzeta, otzeta)
+export(dotpospois, potpospois, qotpospois, rotpospois, otpospoisson)
+export(dotlog, potlog, qotlog, rotlog, otlog)
+export(doilog, poilog, qoilog, roilog, oilog)
+export(doizipf, poizipf, qoizipf, roizipf, oizipf)
+export(gharmonic, gharmonic2)
+export(pzeta, qzeta, rzeta)
+export(qzipf)
+export(doizeta, poizeta, qoizeta, roizeta, oizeta)
+export(bisection.basic)
+export(Zeta.aux, deflat.limit.oizeta)
+export(topple, dtopple, ptopple, qtopple, rtopple)
+
+export(oiposbinomial, doiposbinom, poiposbinom, qoiposbinom, roiposbinom)
 export(doipospois, poipospois, qoipospois, roipospois, oipospoisson)
 export(deflat.limit.oipospois)
 export(zoabetaR)
@@ -15,7 +54,7 @@ export(zoabetaR)
 
 
 
-export(ps, Pen.psv, psv2magic)
+export(sm.ps, get.X.VLM.aug, psv2magic)
 
 export(checkwz)
 export(process.constraints)
@@ -91,7 +130,7 @@ exportMethods(responseName)
              "model.offset", "model.response", "model.weights",
              "na.fail", "napredict", "optim", "pbeta", "pbinom",
              "pgamma", "pgeom", "pnbinom", "polym", "printCoefmat",
-             "plogis", "qlogis", 
+             "plogis", "qlogis",
              "pweibull", "qbeta", "qbinom", "qchisq", "qf", "qgamma",
              "qgeom", "qnbinom", "qt", "quantile", "qweibull", "rbeta",
              "rbinom", "rgamma", "rgeom", "rlnorm", "rlogis", "rnbinom",
@@ -101,6 +140,15 @@ exportMethods(responseName)
 
 
 
+importFrom("stats4", profile)  # For S4, not S3
+export(profilevglm)  # For S4, not S3
+importFrom("stats", "approx")
+export(vplot.profile)
+export(vpairs.profile)
+importFrom("grDevices", "dev.flush", "dev.hold")
+importFrom("graphics", "frame")
+
+
 
 importFrom("stats4", confint)  # For S4, not S3
 export(confintvglm)  # For S4, not S3
@@ -240,7 +288,7 @@ exportMethods(hatplot)
 export(VGAMenv)
 
 
-export(lrtest, lrtest_vglm) 
+export(lrtest, lrtest_vglm)
 export(update_default, update_formula)
 
 
@@ -277,7 +325,7 @@ procVec,
 ResSS.vgam,
 valt.control,
 vforsub, vbacksub, vchol,
-vcontrol.expression, 
+vcontrol.expression,
 vplot, vplot.default, vplot.factor, vplot.list,
 vplot.matrix, vplot.numeric, vvplot.factor)
 
@@ -314,8 +362,9 @@ export(put.caption)
 
 export(
 cm.VGAM, cm.nointercept.VGAM, cm.zero.VGAM,
-Deviance.categorical.data.vgam, 
+Deviance.categorical.data.vgam,
 lm2qrrvlm.model.matrix,
+vlabel,
 dimm)
 
 
@@ -342,13 +391,13 @@ export(pnorm2, dnorm2)
 
 export(iam,
 fill, fill1, fill2, fill3,
-biamhcop, dbiamhcop, pbiamhcop, rbiamhcop, 
+biamhcop, dbiamhcop, pbiamhcop, rbiamhcop,
 bigamma.mckay,
 freund61,
 frechet, dfrechet, pfrechet, qfrechet, rfrechet,
-bifrankcop, dbifrankcop, pbifrankcop, rbifrankcop, 
+bifrankcop, dbifrankcop, pbifrankcop, rbifrankcop,
 biplackettcop, dbiplackcop, pbiplackcop, rbiplackcop,
-benini1, dbenini, pbenini, qbenini, rbenini, 
+benini1, dbenini, pbenini, qbenini, rbenini,
 maxwell, dmaxwell, pmaxwell, qmaxwell, rmaxwell,
 bifgmexp,
 bifgmcop, dbifgmcop, pbifgmcop, rbifgmcop,
@@ -361,13 +410,13 @@ paretoIII, dparetoIII, qparetoIII, rparetoIII, pparetoIII,
 paretoII, dparetoII, qparetoII, rparetoII, pparetoII,
 dparetoI, qparetoI, rparetoI, pparetoI,
 cens.gumbel, gumbelff, gumbel,
-dgumbel, pgumbel, qgumbel, rgumbel, 
+dgumbel, pgumbel, qgumbel, rgumbel,
 foldnormal, dfoldnorm, pfoldnorm, qfoldnorm, rfoldnorm,
 cennormal,
 cens.normal, double.cens.normal,
 rec.normal, rec.normal.control,
 rec.exp1,   rec.exp1.control,
-cens.rayleigh, rayleigh, drayleigh, prayleigh, qrayleigh, rrayleigh, 
+cens.rayleigh, rayleigh, drayleigh, prayleigh, qrayleigh, rrayleigh,
 drice, price, qrice, rrice, riceff, marcumQ,
 dskellam, rskellam, skellam,
 inv.gaussianff, dinv.gaussian, pinv.gaussian, rinv.gaussian, waldff,
@@ -377,9 +426,9 @@ expexpff1, expexpff)
 
 
 export(a2m,
-AICvlm, AICvgam, AICrrvglm, AICqrrvglm, # AICvglm, 
+AICvlm, AICvgam, AICrrvglm, AICqrrvglm, # AICvglm,
 anova.vgam,
-anova.vglm, 
+anova.vglm,
 bisa, dbisa, pbisa, qbisa, rbisa,
 betabinomialff, betabinomial,
 double.expbinomial,
@@ -391,14 +440,14 @@ care.exp,
 cauchy, cauchy1,
 concoef.rrvgam, concoef.Coef.rrvgam, concoef.Coef.qrrvglm, concoef.qrrvglm,
 cdf, cdf.lms.bcg, cdf.lms.bcn,
-cdf.lms.yjn, cdf.vglm, 
+cdf.lms.yjn, cdf.vglm,
 Coef.rrvgam, Coefficients,
-coefqrrvglm, 
+coefqrrvglm,
 coefvlm, coefvgam,
 coefvsmooth.spline, coefvsmooth.spline.fit,
-constraints, constraints.vlm, 
+constraints, constraints.vlm,
 deplot, deplot.default, deplot.lms.bcg, deplot.lms.bcn,
-deplot.lms.yjn, deplot.lms.yjn2, deplot.vglm, 
+deplot.lms.yjn, deplot.lms.yjn2, deplot.vglm,
 deviance.vlm, deviance.qrrvglm,
 df.residual_vlm,
 dirmultinomial, dirmul.old,
@@ -444,20 +493,20 @@ export(poisson.points, dpois.points)
 
 
 
-export(m2a, 
+export(m2a,
 erlang,
 dfelix, felix,
 fittedvlm, fittedvsmooth.spline, foldsqrt,
 formulavlm, formulaNA.VGAM,
 garma, gaussianff,
-hypersecant, hypersecant01, 
+hypersecant, hypersecant01,
 hyperg,
 inv.binomial, InverseBrat, inverse.gaussianff,
 is.Numeric,
 mccullagh89, leipnik,
 dlevy, plevy, qlevy, rlevy, levy,
 lms.bcg.control, lms.bcn.control, lmscreg.control,
-lms.yjn.control, 
+lms.yjn.control,
 lms.bcg, lms.bcn, lms.yjn, lms.yjn2,
 dlms.bcn, qlms.bcn,
 lqnorm,
@@ -526,10 +575,11 @@ summary.rrvgam, summary.grc,
 summary.rrvglm,
 summaryvgam, summaryvglm, summaryvlm,
 s.vam,
-terms.vlm, 
-termsvlm, 
+terms.vlm,
+termsvlm,
 Tol.Coef.qrrvglm, Tol.qrrvglm,
-triangle, dtriangle, ptriangle, qtriangle, rtriangle, 
+triangle, dtriangle, ptriangle, qtriangle, rtriangle,
+valid.vknotl2,
   vcovvlm,
 vglm.fit, vgam.fit,
 vglm.garma.control, vglm.multinomial.control,
@@ -537,14 +587,14 @@ vglm.multinomial.deviance.control, vglm.VGAMcategorical.control,
 vlm, vlm.control,
 vnonlinear.control,
 wweights, yeo.johnson,
-dzipf, pzipf, zipf,
+dzipf, pzipf, rzipf, zipf,
 zeta, zetaff,
 dzeta)
 
 
 
-export(lm2vlm.model.matrix) 
-export(vlm2lm.model.matrix) 
+export(lm2vlm.model.matrix)
+export(vlm2lm.model.matrix)
 
 
 
@@ -632,7 +682,7 @@ export(fff, fff.control,
 export(
 AA.Aa.aa, AB.Ab.aB.ab, ABO, acat,
 betaR, betaff,
-dbetageom, pbetageom, rbetageom, betageometric, 
+dbetageom, pbetageom, rbetageom, betageometric,
 dbetanorm, pbetanorm, qbetanorm, rbetanorm, # betanorm,
 betaprime,
 betaII,
@@ -643,7 +693,7 @@ binomialff, biplot.rrvglm, brat,
 bratt, Brat, calibrate.qrrvglm.control, calibrate.qrrvglm,
 calibrate, cao.control,
 cao,
-cdf.lmscreg, cgo, chisq, clo, 
+cdf.lmscreg, cgo, chisq, clo,
 concoef,
 Coef, Coef.qrrvglm, Coef.rrvglm, Coef.vlm,
 predictqrrvglm,
@@ -666,15 +716,15 @@ dbenf, pbenf, qbenf, rbenf,
 genbetaII.Loglikfun4, genbetaII, dgenbetaII,
 genpoisson,
 geometric, truncgeometric,
-dlino, plino, qlino, rlino, lino, 
+dlino, plino, qlino, rlino, lino,
 grc,
-dhzeta, phzeta, qhzeta, rhzeta, hzeta, 
+dhzeta, phzeta, qhzeta, rhzeta, hzeta,
 negidentity, identitylink,
 prentice74,
 amlnormal, amlbinomial, amlexponential, amlpoisson, Wr1, Wr2,
 dkumar, pkumar, qkumar, rkumar, kumar,
-dyules, pyules, ryules, yulesimon, 
-logff, dlog, plog, rlog,
+dyules, pyules, qyules, ryules, yulesimon,
+logff, dlog, plog, qlog, rlog,
 logF, dlogF,
 loglinb2, loglinb3,
 loglog,
@@ -710,10 +760,10 @@ negbinomial, negbinomial.size, polya, polyaR,
 uninormal, SURff, normal.vcm,
 nbcanlink,
 tobit, dtobit, ptobit, qtobit, rtobit,
-Opt, 
+Opt,
 perspqrrvglm, plotdeplot.lmscreg, plotqrrvglm, plotqtplot.lmscreg,
-plotvgam.control, plotvgam, 
-plot.vgam, 
+plotvgam.control, plotvgam,
+plot.vgam,
 cens.poisson,
 poissonff,
 dposbinom, pposbinom, qposbinom, rposbinom, posbinomial,
@@ -728,7 +778,7 @@ rrvglm.optim.control)
 
 export(eta2theta, theta2eta,
 rrvglm,
-simplex, dsimplex, rsimplex, 
+simplex, dsimplex, rsimplex,
 sratio, s,
 studentt, studentt2, studentt3, Kayfun.studentt,
 Tol, trplot.qrrvglm,
@@ -762,11 +812,11 @@ tikuv, dtikuv, ptikuv, qtikuv, rtikuv)
 
 
 exportClasses(vglmff, vlm, vglm, vgam,
-rrvglm, qrrvglm, grc,  rcim, 
+rrvglm, qrrvglm, grc,  rcim,
 vlmsmall, rrvgam,
 summary.vgam, summary.vglm, summary.vlm,
 summary.qrrvglm,
-summary.rrvgam, summary.rrvglm, 
+summary.rrvgam, summary.rrvglm,
 Coef.rrvglm, Coef.qrrvglm, Coef.rrvgam,
 vcov.qrrvglm,
 vsmooth.spline.fit, vsmooth.spline)
@@ -783,7 +833,7 @@ exportClasses(SurvS4)
 
  exportMethods(
 Coef, coefficients,
-constraints, 
+constraints,
 effects,
 predict, fitted, fitted.values,
 resid,
diff --git a/NEWS b/NEWS
index c986eff..08771ae 100755
--- a/NEWS
+++ b/NEWS
@@ -6,6 +6,59 @@
 
 
 
+                CHANGES IN VGAM VERSION 1.0-3
+
+NEW FEATURES
+
+    o   vgam() with sm.os() and sm.ps() terms allows G2-VGAMs to be fitted.
+    o   plotvgam() has a "shade" argument.
+    o   Almost all family functions have been "validparams"-enabled, for
+        greater reliability.
+    o   confint() implements the profile likelihood method (in addition
+        to the Wald method).
+    o   New family functions: diffzeta(dpqr),
+        oilog(dpqr), oiposbinomial(dpqr), oizeta(dpqr),  oizipf(dpqr),
+        otlog(dpqr), otpospoisson(dpqr), otzeta(dpqr),
+        oalog(dpqr), oapospoisson(dpqr), oazeta(dpqr), topple(dpqr).
+    o   New functions: [pqr]zeta(), [qr]zipf().
+    o   Argument 'zero' now accepts "" or NA and interprets these as NULL,
+        i.e., no linear or additive predictors are intercept-only.
+    o   Significance stars added to summary(rrvglm.object), for very crude
+        inference.
+    o   zeta() can return the Hurwitz zeta function, via the 'shift' argument.
+    o   show.summary.vglm() will only print out any dispersion parameter
+        that is not equal to 1.
+    o   type.fitted = "quantiles" is available for
+        gevff(), negbinomial(), poissonff().
+    o   Tested okay on R 3.3.2.
+
+
+
+BUG FIXES and CHANGES
+
+    o   mills.ratio1() in tobit() did not handle very negative 'x' values
+        correctly. Thanks to Christoph Nolte for detecting this.
+    o   Renamed arguments: zetaff(d) use 'shape', not 'p'.
+    o   betabinomialff()@infos was buggy wrt 'lshape1' and 'lshape2'.
+        Thanks to Xiaodong for detecting this.
+    o   leipnik() uses logoff(offset = -1) as the default link for
+        lambda now, not "loge".
+    o   logff(dpqr) uses 'shape' instead of 'c'     as the parameter name.
+    o   yules(dpqr) uses 'shape' instead of 'rho'   as the parameter name.
+    o   hzeta(dpqr) uses 'shape' instead of 'alpha' as the parameter name.
+    o   felix(dpqr) uses 'rate'  instead of 'a'     as the parameter name.
+    o   dbetabinom.ab() handles large values of shape1 and shape2 better, via
+        the dbinom() limit. Thanks to Micha Schneider for picking up the bug.
+    o   [dpqr]posnegbin() have been improved a little.
+    o   logLik(summation = TRUE): prior weights 'w' have been
+        converted to a vector when passed in; this is likely to make it less
+        likely to give an error.
+    o   Labelling of the colnames of the fitted values have changed for many
+        family functions, including those for multiple responses,
+        e.g., gevff(). Also "mean" had a bug or two in gevff()@linkinv.
+
+
+
                 CHANGES IN VGAM VERSION 1.0-2
 
 NEW FEATURES
diff --git a/R/Links.R b/R/Links.R
index 89f7d5e..f6bd12e 100644
--- a/R/Links.R
+++ b/R/Links.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -47,7 +47,7 @@
 
 
 
- d2theta.deta2 <- 
+ d2theta.deta2 <-
   function(theta,
            link = "identitylink",
            earg = list(theta = theta,  # Needed
diff --git a/R/aamethods.q b/R/aamethods.q
index fbdc436..911d8f9 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -24,9 +24,9 @@ VGAMenv <- new.env()
 
 
 
- 
- 
- 
+
+
+
 
 
 
@@ -80,7 +80,7 @@ valid.vglmff <- function(object) {
 }
 
 
-if (FALSE) 
+if (FALSE)
   setValidity("vglmff", valid.vglmff)
 
 
@@ -199,9 +199,9 @@ setClass("vgam", representation(
     contains = "vglm")
 
 
-setClass("psvgam", representation(
-      "psslot"              = "list"),
-    contains = "vglm")
+setClass("pvgam", representation(
+         "ospsslot"         = "list"),
+         contains = "vglm")
 
 
 
@@ -214,28 +214,47 @@ setClass("summary.vgam", representation(
         df = "numeric",
         pearson.resid = "matrix",
         sigma = "numeric"),
-    prototype(anova=data.frame()),
-    contains = "vgam")
+        prototype(anova = data.frame()),
+        contains = "vgam")
 
 
 setClass("summary.vglm", representation(
-        coef3 = "matrix",
-        cov.unscaled = "matrix",
-        correlation = "matrix",
-        df = "numeric",
-        pearson.resid = "matrix",
-        sigma = "numeric"),
-    contains = "vglm")
+         coef3 = "matrix",
+         cov.unscaled = "matrix",
+         correlation = "matrix",
+         df = "numeric",
+         pearson.resid = "matrix",
+         sigma = "numeric"),
+         contains = "vglm")
 
 
 setClass("summary.vlm", representation(
-        coef3 = "matrix",
-        cov.unscaled = "matrix",
-        correlation = "matrix",
-        df = "numeric",
-        pearson.resid = "matrix",
-        sigma = "numeric"),
-    contains = "vlm")
+         coef3 = "matrix",
+         cov.unscaled = "matrix",
+         correlation = "matrix",
+         df = "numeric",
+         pearson.resid = "matrix",
+         sigma = "numeric"),
+         contains = "vlm")
+
+
+
+
+
+
+setClass("summary.pvgam", representation(
+         anova      = "data.frame",
+         "ospsslot" = "list"),
+         prototype(anova = data.frame()),
+         contains = c("summary.vglm", "pvgam")
+         )
+
+
+
+
+
+
+
 
 
 
diff --git a/R/attrassign.R b/R/attrassign.R
index 7191109..3e7b383 100644
--- a/R/attrassign.R
+++ b/R/attrassign.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -34,7 +34,7 @@ if (!isGeneric("attrassign"))
   setGeneric("attrassign", function(object, ...)
              standardGeneric("attrassign"))
 
-setMethod("attrassign", "lm", 
+setMethod("attrassign", "lm",
          function(object, ...)
          attrassignlm(object, ...))
 
diff --git a/R/bAIC.q b/R/bAIC.q
index 98b556d..fcd0c2c 100644
--- a/R/bAIC.q
+++ b/R/bAIC.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -64,7 +64,7 @@ nparam.vgam <- function(object, dpar = TRUE,
   check.omit.constant(object)
 
   no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
-             length(object at misc$dispersion) else 0 
+             length(object at misc$dispersion) else 0
   nldf <- if (is.Numeric(object at nl.df)) sum(object at nl.df) else 0
 
   if (linear.only) {
@@ -82,7 +82,7 @@ nparam.rrvglm <- function(object, dpar = TRUE, ...) {
 
   estdisp <- object at misc$estimated.dispersion
   no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
-    length(object at misc$dispersion) else 0 
+    length(object at misc$dispersion) else 0
   str0 <- object at control$str0
   MMM <- object at misc$M
   Rank <- object at control$Rank
@@ -101,7 +101,7 @@ nparam.qrrvglm <- function(object, dpar = TRUE, ...) {
 
   estdisp <- object at misc$estimated.dispersion
   no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
-             length(object at misc$dispersion) else 0 
+             length(object at misc$dispersion) else 0
   str0 <- object at control$str0
   MMM <- object at misc$M
   Rank <- object at control$Rank
@@ -152,7 +152,7 @@ nparam.rrvgam <- function(object, dpar = TRUE, ...) {
 
   estdisp <- object at misc$estimated.dispersion
   no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
-             length(object at misc$dispersion) else 0 
+             length(object at misc$dispersion) else 0
   str0 <- object at control$str0
   MMM <- object at misc$M
   Rank <- object at control$Rank
@@ -226,7 +226,7 @@ if (!isGeneric("AIC"))
 
 
 
-AICvlm <- function(object, ..., 
+AICvlm <- function(object, ...,
                    corrected = FALSE,
                    k = 2) {
   estdisp <- object at misc$estimated.dispersion
@@ -299,7 +299,7 @@ AICqrrvglm <- function(object, ...,
 
 
 
- 
+
  AICrrvgam <- function(object, ...,
                        k = 2) {
 
diff --git a/R/build.terms.vlm.q b/R/build.terms.vlm.q
index 3bf3a1a..afa43e1 100644
--- a/R/build.terms.vlm.q
+++ b/R/build.terms.vlm.q
@@ -1,9 +1,11 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
 
+
+
 if (!isGeneric("terms"))
   setGeneric("terms", function(x, ...) standardGeneric("terms"))
 
@@ -60,7 +62,7 @@ Build.terms.vlm <-
     if (M == 1)
       fit <- c(fit)
     if (cov.true) {
-      var <- ((x %*% cov) * x) %*% rep_len(1, length(coefs))
+      var <- rowSums((x %*% cov) * x)
       list(fitted.values = fit,
            se.fit = if (M == 1) c(sqrt(var)) else
                     matrix(sqrt(var), ncol = M,
@@ -72,9 +74,9 @@ Build.terms.vlm <-
 
     constant <- attr(x, "constant")
     if (!is.null(constant)) {
-      constant <- as.vector( t(coefmat) %*% constant )
+      constant <- as.vector(t(coefmat) %*% constant)
     }
-    
+
     if (missing(assign))
       assign <- attr(x, "assign")
     if (is.null(assign))
@@ -99,19 +101,19 @@ Build.terms.vlm <-
         TT <- assign[[term]]
         xt <- x[, TT]
         fit[, term] <- xt %*% coefs[TT]
-        if (cov.true)
-          se[, term] <- sqrt(drop(((xt %*% cov[TT, TT]) * xt) %*%
-                                    rep_len(1, length(TT))))
+        if (cov.true) {
+          se[, term] <- sqrt(rowSums((xt %*% cov[TT, TT]) * xt))
+        }
       }
     }
     attr(fit, "constant") <- constant
-    
+
     if (cov.true)
       list(fitted.values = fit,
            se.fit        = se) else
       fit
   }
-}
+}  # Build.terms.vlm()
 
 
 
diff --git a/R/calibrate.q b/R/calibrate.q
index c8c2daf..b3d2db1 100644
--- a/R/calibrate.q
+++ b/R/calibrate.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -49,11 +49,11 @@ if (!isGeneric("calibrate"))
                function(object, ...) standardGeneric("calibrate"))
 
 
- 
- 
- 
+
+
+
 calibrate.qrrvglm <-
-  function(object, 
+  function(object,
            newdata = NULL,
            type = c("latvar", "predictors", "response", "vcov", "all3or4"),
            initial.vals = NULL, ...) {
@@ -90,7 +90,7 @@ calibrate.qrrvglm <-
   minimize.obfunct <-
     if (Quadratic) object at control$min.criterion else
     TRUE  # Logical; TRUE for CAO objects because deviance is minimized
-  if (!is.logical(minimize.obfunct)) 
+  if (!is.logical(minimize.obfunct))
     stop("object at control$min.criterion is not a logical")
   optim.control <- calibrate.qrrvglm.control(object = object, ...)
 
@@ -121,7 +121,7 @@ calibrate.qrrvglm <-
     stop("The x1 vector must be an intercept only")
 
   nn <- nrow(newdata)
-  BestOFpar <- NULL   # It may be more efficient not to append 
+  BestOFpar <- NULL   # It may be more efficient not to append
   BestOFvalues <- NULL   # Best OF objective function values
   for (i1 in 1:nn) {
     if (optim.control$trace)
@@ -161,7 +161,7 @@ calibrate.qrrvglm <-
 
         if (optim.control$trace) {
           if (ans$convergence == 0)
-            cat("Successful convergence\n") else 
+            cat("Successful convergence\n") else
             cat("Unsuccessful convergence\n")
           flush.console()
         }
@@ -175,8 +175,8 @@ calibrate.qrrvglm <-
             (1:nrow(OFpar))[OFvalues == min(OFvalues)] else
             (1:nrow(OFpar))[OFvalues == max(OFvalues)]
      if (length(index) > 1) {
-         warning(paste("multiple solutions found for observation ", i1,
-                       ". Choosing one randomly.", sep = ""))
+         warning("multiple solutions found for observation ", i1,
+                 ". Choosing one randomly.")
          index <- sample(index, size = 1)
      } else if (length(index) == 0)
         stop("length(index) is zero")
@@ -190,7 +190,7 @@ calibrate.qrrvglm <-
 
   pretty <- function(BestOFpar, newdata, Rank) {
     if (Rank == 1) {
-      BestOFpar <- c(BestOFpar) 
+      BestOFpar <- c(BestOFpar)
       names(BestOFpar) <- dimnames(newdata)[[1]]
     } else
       dimnames(BestOFpar) <-
@@ -243,13 +243,13 @@ calibrate.qrrvglm <-
        etaValues
     } else if (type == "vcov") {
        if (Quadratic)
-         dimnames(vcValues) <- list(as.character(1:Rank), 
+         dimnames(vcValues) <- list(as.character(1:Rank),
                                     as.character(1:Rank),
                                     dimnames(newdata)[[1]])
        vcValues
     } else if (type == "all3or4") {
        if (Quadratic)
-         dimnames(vcValues) <- list(as.character(1:Rank), 
+         dimnames(vcValues) <- list(as.character(1:Rank),
                                     as.character(1:Rank),
                                     dimnames(newdata)[[1]])
        dimnames(muValues) <- dimnames(newdata)
@@ -269,7 +269,7 @@ calibrate.qrrvglm <-
 
 
 
- 
+
 
 
 .my.calib.objfunction.qrrvglm <-
@@ -289,7 +289,7 @@ calibrate.qrrvglm <-
     eta[s, 1] <- eta[s, 1] + t(bnumat) %*% temp %*% bnumat
   }
   eta <- matrix(eta, 1, M, byrow = TRUE)
-  mu <- rbind(mu.function(eta, extra))  # Make sure it has one row 
+  mu <- rbind(mu.function(eta, extra))  # Make sure it has one row
   value <- objfun(mu = mu, y = y,
                  w = 1,  # ignore prior.weights on the object
                  residuals = FALSE, eta = eta, extra = extra)
@@ -314,7 +314,7 @@ calibrate.qrrvglm <-
 
 
 
- 
+
 
 .my.calib.objfunction.rrvgam <-
   function(bnu, y, extra = NULL,
@@ -323,13 +323,13 @@ calibrate.qrrvglm <-
            everything=TRUE,
            mu.function) {
     Rank <- length(bnu)
-    NOS <- Coefs at NOS 
+    NOS <- Coefs at NOS
     eta <- matrix(NA_real_, 1, NOS)
     for (jlocal in 1:NOS) {
       eta[1, jlocal] <- predictrrvgam(object, grid = bnu, sppno = jlocal,
                                       Rank = Rank, deriv = 0)$yvals
     }
-    mu <- rbind(mu.function(eta, extra))  # Make sure it has one row 
+    mu <- rbind(mu.function(eta, extra))  # Make sure it has one row
     value <- objfun(mu = mu, y = y,
                    w = 1,  # ignore prior.weights on the object
                    residuals = FALSE, eta = eta, extra = extra)
diff --git a/R/cao.R b/R/cao.R
index 85a5ff4..18a727b 100644
--- a/R/cao.R
+++ b/R/cao.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -10,27 +10,27 @@
 
 
 cao  <- function(formula,
-                 family, data = list(), 
+                 family, data = list(),
                  weights = NULL, subset = NULL, na.action = na.fail,
                  etastart = NULL, mustart = NULL, coefstart = NULL,
-                 control = cao.control(...), 
-                 offset = NULL, 
+                 control = cao.control(...),
+                 offset = NULL,
                  method = "cao.fit",
                  model = FALSE, x.arg = TRUE, y.arg = TRUE,
-                 contrasts = NULL, 
+                 contrasts = NULL,
                  constraints = NULL,
-                 extra = NULL, 
+                 extra = NULL,
                  qr.arg = FALSE, smart = TRUE, ...) {
   dataname <- as.character(substitute(data))  # "list" if no data=
   function.name <- "cao"
 
   ocall <- match.call()
 
-  if (smart) 
+  if (smart)
     setup.smart("write")
 
   mt <- terms(formula, data = data)
-  if (missing(data)) 
+  if (missing(data))
     data <- environment(formula)
 
   mf <- match.call(expand.dots = FALSE)
@@ -39,9 +39,9 @@ cao  <- function(formula,
     mf$contrasts <- mf$constraints <- mf$extra <- mf$qr.arg <- NULL
   mf$coefstart <- mf$etastart <- mf$... <- NULL
   mf$smart <- NULL
-  mf$drop.unused.levels <- TRUE 
+  mf$drop.unused.levels <- TRUE
   mf[[1]] <- as.name("model.frame")
-  mf <- eval(mf, parent.frame()) 
+  mf <- eval(mf, parent.frame())
   if (method == "model.frame")
     return(mf)
   na.act <- attr(mf, "na.action")
@@ -58,7 +58,7 @@ cao  <- function(formula,
   x <- model.matrix(mt, mf, contrasts)
   attr(x, "assign") <- attrassigndefault(x, mt)
   offset <- model.offset(mf)
-  if (is.null(offset)) 
+  if (is.null(offset))
     offset <- 0  # yyy ???
   w <- model.weights(mf)
   if (!length(w)) {
@@ -165,7 +165,7 @@ cao  <- function(formula,
   } else list()  # R-1.5.0
 
   slot(answer, "iter") <- fit$iter
-  fit$predictors <- as.matrix(fit$predictors)  # Must be a matrix 
+  fit$predictors <- as.matrix(fit$predictors)  # Must be a matrix
   dimnames(fit$predictors) <- list(dimnames(fit$predictors)[[1]],
                                    fit$misc$predictors.names)
   slot(answer, "predictors") <- fit$predictors
diff --git a/R/cao.fit.q b/R/cao.fit.q
index e6bd882..4c453b9 100644
--- a/R/cao.fit.q
+++ b/R/cao.fit.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -47,8 +47,8 @@ cao.fit <-
   intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
   y.names <- predictors.names <- NULL # May be overwritten in @initialize
 
- 
-  n.save <- n 
+
+  n.save <- n
 
 
   Rank <- control$Rank
@@ -56,7 +56,7 @@ cao.fit <-
 
   if (length(family at initialize))
     eval(family at initialize)   # Initialize mu and M (and optionally w)
-  n <- n.save 
+  n <- n.save
 
   modelno <- switch(family at vfamily[1], "poissonff" = 2,
                     "binomialff" = 1, "quasipoissonff" = 0,
@@ -155,7 +155,7 @@ cao.fit <-
     stop("not nice")
 
   ncolHlist <- unlist(lapply(Hlist, ncol))
-  latvar.mat <- x[, colx2.index, drop = FALSE] %*% Cmat 
+  latvar.mat <- x[, colx2.index, drop = FALSE] %*% Cmat
 
 
   rmfromVGAMenv(c("etamat", "beta"), prefix = ".VGAM.CAO.")
@@ -167,7 +167,7 @@ cao.fit <-
   NOS <- ifelse(modelno %in% c(3, 5), M/2, M)
   p1star. <- if (Nice21) ifelse(modelno %in% c(3, 5), 2, 1) else M
   p2star. <- if (Nice21) Rank else stop("not Nice21")
-  pstar. <- p1star. + p2star. 
+  pstar. <- p1star. + p2star.
   nstar <- if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
   lenbeta <- pstar. * ifelse(Nice21, NOS, 1)
 
@@ -198,7 +198,7 @@ cao.fit <-
                                   trace = as.integer(control$trace),
                                   maxit = control$Maxit.optim,
                                   REPORT = 10),
-                   etamat = eta, xmat = x, ymat = y,  # as.matrix(y), 
+                   etamat = eta, xmat = x, ymat = y,  # as.matrix(y),
                    wvec = w, modelno = modelno,
                    Control = control,
                    Nice21 = Nice21,
@@ -235,7 +235,7 @@ cao.fit <-
                     Control = control,
                     Nice21 = Nice21,
                     p1star. = p1star. , p2star. = p2star. ,
-                    n = n, M = M, 
+                    n = n, M = M,
                     othint = othint, othdbl = othdbl,
                     alldump = TRUE)
   if (!is.list(extra))
@@ -293,7 +293,7 @@ cao.fit <-
   fit <- list(
               fitted.values = mu,
               Cmatrix = Cmat,
-              terms = Terms)  # terms: This used to be done in vglm() 
+              terms = Terms)  # terms: This used to be done in vglm()
 
 
 
@@ -311,7 +311,7 @@ cao.fit <-
   crit.list$deviance <- temp9$deviance
 
 
-                                    
+
 
 
   if (w[1] != 1 || any(w != w[1]))
@@ -320,7 +320,7 @@ cao.fit <-
   if (length(family at last))
     eval(family at last)
 
-  structure(c(fit, 
+  structure(c(fit,
       temp9,
       list(
       contrasts = attr(x, "contrasts"),
@@ -457,7 +457,7 @@ cao.control <- function(Rank = 1,
     stop("Bad input for argument 'SmallNo'")
   if ((SmallNo < .Machine$double.eps) ||
       (SmallNo > .0001))
-    stop("'SmallNo' is out of range") 
+    stop("'SmallNo' is out of range")
 
     ans <- list(
      Corner = FALSE,  # A constant, not a control parameter; unneeded?
@@ -512,7 +512,7 @@ create.cms <- function(Rank = 1, M, MSratio = 1, which, p1 = 1) {
     Hlist.[[rr]] <- diag(M)
   names(Hlist.) <- if (p1 == 1) c("(Intercept)", names(which)) else stop()
   if (MSratio == 2) {
-    for (r in 1:Rank) 
+    for (r in 1:Rank)
       Hlist.[[p1+r]] <- eijfun(1, M)
   }
   Hlist.
@@ -522,11 +522,11 @@ create.cms <- function(Rank = 1, M, MSratio = 1, which, p1 = 1) {
 
 
 callcaoc <- function(cmatrix,
-                    etamat, xmat, ymat, wvec, modelno, 
+                    etamat, xmat, ymat, wvec, modelno,
                     Control, Nice21 = TRUE,
                     p1star. = if (modelno %in% c(3, 5)) 2 else 1,
                     p2star. = Rank,
-                    n, M, 
+                    n, M,
                     othint, othdbl,
                     alldump = FALSE) {
   flush.console()
@@ -554,7 +554,7 @@ callcaoc <- function(cmatrix,
   cmatrix <- matrix(cmatrix, p2, Rank)  # crow1C() needs a matrix as input
   cmatrix <- crow1C(cmatrix, crow1positive = control$Crow1positive)
   numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
-  evnu <- eigen(var(numat))
+  evnu <- eigen(var(numat), symmetric = TRUE)
   temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
            evnu$vector %*% evnu$value^(-0.5)
   cmatrix <- cmatrix %*% temp7
@@ -598,7 +598,7 @@ callcaoc <- function(cmatrix,
                  getfromVGAMenv("beta", prefix = ".VGAM.CAO.") else
                  double(lenbeta)
   othint[5] <- inited   # Refine initialization within C
-  pstar <- NOS * pstar. 
+  pstar <- NOS * pstar.
   bnumat <- if (Nice21) matrix(0, nstar, pstar.) else
                         stop("code not written here")
 
@@ -638,7 +638,7 @@ callcaoc <- function(cmatrix,
   ncolb <- max(ncbvec)
 
   qbig. <- NOS * qbig    # == NOS * Rank; holds all the smooths
-  if (!all.equal(as.vector(ncbvec), rep_len(1, queue)))
+  if (!all(as.vector(ncbvec) == rep_len(1, queue)))
     stop("'ncbvec' not right---should be a queue-vector of ones")
   pbig <- pstar. #
 
@@ -680,7 +680,7 @@ callcaoc <- function(cmatrix,
   qr = double(nstar*pstar.), qraux = double(pstar.),
     qpivot = integer(pstar.),
   n = as.integer(n), M = as.integer(M), NOS = as.integer(NOS),
-      nstar = as.integer(nstar), dim1U = as.integer( M ),  # for U, not U. 
+      nstar = as.integer(nstar), dim1U = as.integer( M ),  # for U, not U.
   errcode = integer(1), othint = as.integer(othint),
   deviance = double(1 + NOS),  # NOS more elts added 20100413
   beta = as.double(usethisbeta),
@@ -695,8 +695,8 @@ callcaoc <- function(cmatrix,
       smomat = as.double(matrix(0, n, qbig. )),
       nu1mat = as.double(nu1mat),
   Hlist = as.double(unlist( Hlist. )),
-  as.integer(ncbvec), 
-      smap = as.integer(1:(Rank+1)),  # 
+  as.integer(ncbvec),
+      smap = as.integer(1:(Rank+1)),  #
       trivc = as.integer(trivc),
 
 
@@ -796,11 +796,11 @@ flush.console()
 
 
 calldcaoc <- function(cmatrix,
-                     etamat, xmat, ymat, wvec, modelno, 
+                     etamat, xmat, ymat, wvec, modelno,
                      Control, Nice21 = TRUE,
                      p1star. = if (modelno %in% c(3, 5)) 2 else 1,
                      p2star. = Rank,
-                     n, M, 
+                     n, M,
                      othint, othdbl,
                      alldump = FALSE) {
 
@@ -844,7 +844,7 @@ calldcaoc <- function(cmatrix,
     attr(temp.smooth.frame[,uu+1], "spar") <- 0  # any old value
     attr(temp.smooth.frame[,uu+1], "df") <- 4    # any old value
   }
-  pstar.  <- p1star.  + p2star. 
+  pstar.  <- p1star.  + p2star.
   nstar <- if (Nice21) ifelse(modelno %in% c(3, 5), n * 2, n) else n * M
   NOS <- ifelse(modelno %in% c(3, 5), M / 2, M)
   lenbeta <- pstar. * ifelse(Nice21, NOS, 1)
@@ -864,7 +864,7 @@ calldcaoc <- function(cmatrix,
 
 
 
-  pstar <- NOS * pstar. 
+  pstar <- NOS * pstar.
   bnumat <- if (Nice21)
             matrix(0, nstar, pstar) else stop("need 'Nice21'")
 
@@ -987,13 +987,13 @@ warning("20100405; this is new:")
     as.double(xmat2),
     cmat = as.double(cmatrix),
     p2 = as.integer(p2), deriv = double(p2 * Rank),
-    betasave = double(lenbeta), 
+    betasave = double(lenbeta),
     npetc = as.integer(npetc), M. = as.integer( M. ),
     dofvec = as.double(dofvec + 1.0),
     lamvec = as.double(0 * dofvec),
     smopar = as.double(smopar),
     match = as.integer(smooth.frame$matcho),
-    as.integer(smooth.frame$nef), 
+    as.integer(smooth.frame$nef),
     as.integer(which),
     smomat = as.double(matrix(0, n, qbig. )),
         nu1mat = as.double(nu1mat),
@@ -1073,7 +1073,7 @@ warning("20100405; this is new:")
     ans1$deriv
   }
   flush.console()
-  returnans 
+  returnans
 }
 
 
@@ -1129,7 +1129,7 @@ Coef.rrvgam <- function(object,
 
 
   ocontrol <- object at control
-  if ((Rank <- ocontrol$Rank) > 2) stop("'Rank' must be 1 or 2") 
+  if ((Rank <- ocontrol$Rank) > 2) stop("'Rank' must be 1 or 2")
   gridlen <- rep_len(gridlen, Rank)
   M <- if (any(slotNames(object) == "predictors") &&
            is.matrix(object at predictors))
@@ -1150,7 +1150,7 @@ Coef.rrvgam <- function(object,
     if (!length(ynames)) ynames <- object at misc$ynames
     if (!length(ynames)) ynames <- paste("Y", 1:NOS, sep = "")
     lp.names <- object at misc$predictors.names
-    if (!length(lp.names)) lp.names <- NULL 
+    if (!length(lp.names)) lp.names <- NULL
 
     latvar.names <-
       if (Rank == 1) "latvar" else paste("latvar", 1:Rank, sep = "")
@@ -1158,7 +1158,7 @@ Coef.rrvgam <- function(object,
     if (ConstrainedO)
       dimnames(Cmat) <- list(names(ocontrol$colx2.index), latvar.names)
     latvar.mat <- if (ConstrainedO) {
-      object at x[, ocontrol$colx2.index, drop = FALSE] %*% Cmat 
+      object at x[, ocontrol$colx2.index, drop = FALSE] %*% Cmat
     } else {
       object at latvar
     }
@@ -1172,6 +1172,7 @@ Coef.rrvgam <- function(object,
     which.species <- 1:NOS  # Do it for all species
     if (Rank == 1) {
       gridd <- cbind(seq(extents[1, 1], extents[2, 1], len = gridlen))
+      eta2matrix <- matrix(0, NOS, 1)  # Added 20160716
     } else {
       gridd <-
         expand.grid(seq(extents[1, 1], extents[2, 1], len = gridlen[1]),
@@ -1180,7 +1181,7 @@ Coef.rrvgam <- function(object,
     }
     gridd.orig <- gridd
     for (sppno in seq_along(which.species)) {
-      gridd <- gridd.orig 
+      gridd <- gridd.orig
       gridres1 <- gridd[2, 1] - gridd[1, 1]
       gridres2 <- if (Rank == 2) gridd[2, 2] - gridd[1, 2] else 0
       griditer <- 1
@@ -1200,7 +1201,8 @@ Coef.rrvgam <- function(object,
                            Rank = Rank, deriv = 0, MSratio = MSratio)
         yvals <- temp$yvals  # gridlen-vector
         xvals <- temp$xvals  # gridlen x Rank; gridd
-        if (length(temp$eta2)) eta2matrix[sppno, 1] <- temp$eta2
+        if (length(temp$eta2))
+          eta2matrix[sppno, 1] <- temp$eta2
 
         nnn <- length(yvals)
         index <- (1:nnn)[yvals == max(yvals)]
@@ -1232,7 +1234,7 @@ Coef.rrvgam <- function(object,
           gridres1 <- gridd[2, 1] - gridd[1, 1]
           griditer <- griditer + 1
         }
-      }  # of while 
+      }  # of while
 
       if (Rank == 2) {
         myfun <- function(x, object, sppno, Rank = 1,
@@ -1254,12 +1256,12 @@ Coef.rrvgam <- function(object,
              maximum[sppno] <- answer$value
           }
         }  # end of Rank = 2
-    }  # end of sppno 
+    }  # end of sppno
     myetamat <- rbind(maximum)
     if (MSratio == 2)
       myetamat <- kronecker(myetamat, matrix(1:0, 1, 2))
     maximum <- object at family@linkinv(eta = myetamat, extra = object at extra)
-    maximum <- c(maximum)  # Convert from matrix to vector 
+    maximum <- c(maximum)  # Convert from matrix to vector
     names(maximum) <- ynames
 
     ans <- new(Class = "Coef.rrvgam",
@@ -1270,9 +1272,9 @@ Coef.rrvgam <- function(object,
                latvar.order = latvar.mat,
                Maximum = maximum,
                M = M,
-               NOS = NOS, 
-               Optimum = optimum, 
-               Optimum.order = optimum, 
+               NOS = NOS,
+               Optimum = optimum,
+               Optimum.order = optimum,
                Rank = Rank,
                spar1 = object at extra$spar1)
     if (ConstrainedO) {
@@ -1284,7 +1286,7 @@ Coef.rrvgam <- function(object,
       dimnames(eta2matrix) <-
         list(object at misc$predictors.names[c(FALSE, TRUE)], " ")
       ans at eta2 <- eta2matrix
-      ans at df2.nl <- object at extra$df2.nl 
+      ans at df2.nl <- object at extra$df2.nl
       ans at spar2  <- object at extra$spar2
     }
 
@@ -1299,10 +1301,10 @@ Coef.rrvgam <- function(object,
     n <- object at misc$n
     M <- object at misc$M
     NOS <- if (length(object at y)) ncol(object at y) else M
-    pstar <- p + length(Cmat)  # Adjustment 
+    pstar <- p + length(Cmat)  # Adjustment
     adjusted.dispersion <- object at misc$dispersion *
                            (n * M - p) / (n * M - pstar)
-    ans at dispersion <- adjusted.dispersion 
+    ans at dispersion <- adjusted.dispersion
   }
   if (MSratio == 2) {
     lcoef <- object at coefficients
@@ -1311,7 +1313,7 @@ Coef.rrvgam <- function(object,
     ans at dispersion <- temp
   }
   dimnames(ans at Optimum) <- list(latvar.names, ynames)
-  ans 
+  ans
 }
 
 
@@ -1368,15 +1370,15 @@ setMethod("Coef", "rrvgam", function(object, ...) Coef.rrvgam(object, ...))
 
 
 lvplot.rrvgam <- function(object,
-          add = FALSE, show.plot = TRUE, rugplot = TRUE, y = FALSE, 
+          add = FALSE, show.plot = TRUE, rugplot = TRUE, y = FALSE,
           type = c("fitted.values", "predictors"),
           xlab = paste("Latent Variable",
                        if (Rank == 1) "" else " 1", sep = ""),
           ylab = if (Rank == 1) switch(type, predictors = "Predictors",
               fitted.values = "Fitted values") else "Latent Variable 2",
-          pcex = par()$cex, pcol = par()$col, pch = par()$pch, 
+          pcex = par()$cex, pcol = par()$col, pch = par()$pch,
           llty = par()$lty, lcol = par()$col, llwd = par()$lwd,
-          label.arg= FALSE, adj.arg=-0.5, 
+          label.arg= FALSE, adj.arg=-0.5,
           sites= FALSE, spch = NULL, scol = par()$col, scex = par()$cex,
           sfont = par()$font,
           which.species = NULL,
@@ -1403,7 +1405,7 @@ lvplot.rrvgam <- function(object,
 
     Coeflist <- Coef(object)
     Cmat <- Coeflist at C
-    latvarmat <- Coeflist at latvar  # n x Rank 
+    latvarmat <- Coeflist at latvar  # n x Rank
 
     if (!show.plot)
       return(latvarmat)
@@ -1445,7 +1447,7 @@ lvplot.rrvgam <- function(object,
            match(which.species[sppno], sppnames) else which.species[sppno]
         if (is.na(indexSpecies))
           stop("mismatch found in 'which.species'")
-        xx <- latvarmat 
+        xx <- latvarmat
         yy <- r.curves[, indexSpecies]
         ooo <- sort.list(xx)
         xx <- xx[ooo]
@@ -1503,7 +1505,7 @@ setMethod("lvplot", "rrvgam",
 
 
 predict.rrvgam <- function (object, newdata = NULL,
-                         type = c("link", "response", "terms"), 
+                         type = c("link", "response", "terms"),
                          deriv = 0, ...) {
   type <- match.arg(type, c("link", "response", "terms"))[1]
   if (type != "link" && deriv != 0)
@@ -1536,7 +1538,7 @@ predict.rrvgam <- function (object, newdata = NULL,
       setup.smart("read", smart.prediction = object at smart.prediction)
     }
 
-    tt <- terms(object)  # 20030811; object at terms$terms 
+    tt <- terms(object)  # 20030811; object at terms$terms
     X <- model.matrix(delete.response(tt), newdata,
                       contrasts = if (length(object at contrasts))
                                   object at contrasts else NULL,
@@ -1546,7 +1548,7 @@ predict.rrvgam <- function (object, newdata = NULL,
       as.save <- attr(X, "assign")
       X <- X[rep_len(1, nrow(newdata)),, drop = FALSE]
       dimnames(X) <- list(dimnames(newdata)[[1]], "(Intercept)")
-      attr(X, "assign") <- as.save  # Restored 
+      attr(X, "assign") <- as.save  # Restored
     }
 
     offset <- if (!is.null(off.num <- attr(tt, "offset"))) {
@@ -1555,7 +1557,7 @@ predict.rrvgam <- function (object, newdata = NULL,
                 eval(object at call$offset, newdata)
 
     if (is.smart(object) && length(object at smart.prediction)) {
-      wrapup.smart() 
+      wrapup.smart()
     }
 
     attr(X, "assign") <- attrassigndefault(X, tt)
@@ -1595,15 +1597,15 @@ predict.rrvgam <- function (object, newdata = NULL,
                   type = ifelse(type == "response", "link", type))
      if (MSratio == 2) {
        if (any(type == c("link", "response"))) {
-         etamat[, 2*sppno-1] <- temp345$yvals 
-         etamat[, 2*sppno  ] <- temp345$eta2 
+         etamat[, 2*sppno-1] <- temp345$yvals
+         etamat[, 2*sppno  ] <- temp345$eta2
        } else {
          terms.mat[, ind8] <- temp345
          interceptvector[sppno] <- attr(temp345, "constant")
        }
      } else {
        if (any(type == c("link", "response"))) {
-         etamat[, sppno] <- temp345$yvals 
+         etamat[, sppno] <- temp345$yvals
        } else {
          terms.mat[, ind8] <- temp345
          interceptvector[sppno] <- attr(temp345, "constant")
@@ -1618,7 +1620,7 @@ predict.rrvgam <- function (object, newdata = NULL,
   if (type == "link") {
     dimnames(etamat) <-
         list(dimnames(X)[[1]],
-             if (deriv == 0) 
+             if (deriv == 0)
                object at misc$predictors.names else NULL)
     return(etamat)
   } else if (type == "response") {
@@ -1641,7 +1643,7 @@ setMethod("predict", "rrvgam", function(object, ...)
 
 
 predictrrvgam <- function(object, grid, sppno, Rank = 1,
-                       deriv = 0, MSratio = 1, type = "link") {
+                          deriv = 0, MSratio = 1, type = "link") {
   if (type != "link" && type != "terms")
     stop("'link' must be \"link\" or \"terms\"")
   if (ncol(grid <- as.matrix(grid)) != Rank)
@@ -1695,7 +1697,7 @@ predictrrvgam <- function(object, grid, sppno, Rank = 1,
   } else {
     list(xvals = grid,
          yvals = c(nlfunvalues),
-         eta2 = if (MSratio == 2) llcoef[MSratio] else NULL)
+         eta2  = if (MSratio == 2) llcoef[MSratio] else NULL)
     }
 }
 
@@ -1705,15 +1707,15 @@ predictrrvgam <- function(object, grid, sppno, Rank = 1,
 
 
 plot.rrvgam <- function(x,
-                     xlab = if (Rank == 1) "Latent Variable" else 
+                     xlab = if (Rank == 1) "Latent Variable" else
                             paste("Latent Variable", 1:Rank),
                      ylab = NULL, residuals.arg = FALSE,
                      pcol = par()$col, pcex = par()$cex, pch = par()$pch,
-                     lcol = par()$col, lwd = par()$lwd, lty = par()$lty, 
-                     add = FALSE, 
+                     lcol = par()$col, lwd = par()$lwd, lty = par()$lty,
+                     add = FALSE,
                      main = NULL,
                      center.cf = Rank > 1,
-                     WhichRank = 1:Rank, 
+                     WhichRank = 1:Rank,
                      which.species = NULL,  # a numeric or character vector
                      rugplot = TRUE, se.arg = FALSE, deriv = 0,
                      scale = 0, ylim = NULL,
@@ -1740,7 +1742,7 @@ plot.rrvgam <- function(x,
   xlab <- rep_len(xlab, Rank)
 
   if (!length(which.species)) which.species <- 1:NOS
-  if (length(ylab)) 
+  if (length(ylab))
     ylab <- rep_len(ylab, length(which.species))  # Too long if overlay
   if (length(main))
     main <- rep_len(main, length(which.species))  # Too long if overlay
@@ -1774,18 +1776,18 @@ plot.rrvgam <- function(x,
         ylim.use <- if (length(ylim)) ylim else
                     ylim.scale(range(yvals), scale)
         matplot(xvals, yvals, type = "n",
-                xlab = xlab[rindex], 
-                ylab = if (length(ylab)) ylab[sppno] else 
+                xlab = xlab[rindex],
+                ylab = if (length(ylab)) ylab[sppno] else
                        ifelse(overlay, "Fitted functions",
                                        "Fitted function"),
-                main = if (length(main)) main[sppno] else 
+                main = if (length(main)) main[sppno] else
                        ifelse(overlay, "", sppnames[thisSpecies]),
                 ylim = ylim.use,
                 ...)
       }
       if (residuals.arg) {
         stop("cannot handle residuals = TRUE yet")
-      } 
+      }
       counter <- counter + 1
       lines(xvals, yvals,
             col = lcol[counter], lwd = lwd[counter], lty = lty[counter])
@@ -1823,8 +1825,8 @@ persp.rrvgam <-
            lwd = par()$lwd,
            rugplot = FALSE,
            ...) {
-  object <- x  # don't like x as the primary argument 
-  coefobj <- Coef(object) 
+  object <- x  # don't like x as the primary argument
+  coefobj <- Coef(object)
   if ((Rank <- coefobj at Rank) > 2)
     stop("object must be a rank-1 or rank-2 model")
   fvmat <- fitted(object)
@@ -1868,7 +1870,7 @@ persp.rrvgam <-
   LP <- matrix(NA_real_, nrow(latvarmat), NOS)
   for (sppno in 1:NOS) {
     temp <- predictrrvgam(object = object, grid = latvarmat, sppno = sppno,
-                       Rank = Rank, deriv = 0, MSratio = MSratio)
+                          Rank = Rank, deriv = 0, MSratio = MSratio)
     LP[, sppno] <- temp$yval
   }
   if (MSratio == 2) {
@@ -1886,10 +1888,10 @@ persp.rrvgam <-
       lwd <- rep_len(lwd, length(which.species.numer))
       matplot(latvar1, fitvals, xlab = xlab, ylab = ylab,
               type = "n", main = main, xlim = xlim, ylim = ylim, ...)
-      if (rugplot) rug(latvar(object)) 
+      if (rugplot) rug(latvar(object))
       for (sppno in seq_along(which.species.numer)) {
         ptr2 <- which.species.numer[sppno]  # points to species column
-        lines(latvar1, fitvals[,ptr2], col = col[sppno], 
+        lines(latvar1, fitvals[,ptr2], col = col[sppno],
               lty = lty[sppno], lwd = lwd [sppno], ...)
         if (labelSpecies) {
           ptr1 <- (1:nrow(fitvals))[max(fitvals[, ptr2]) ==
@@ -1907,7 +1909,7 @@ persp.rrvgam <-
     if (length(which.species) > 1)
       for (sppno in which.species[-1]) {
         max.fitted <- pmax(max.fitted,
-                           matrix(fitvals[, sppno], 
+                           matrix(fitvals[, sppno],
                                   length(latvar1), length(latvar2)))
     }
     if (!length(zlim))
@@ -2083,7 +2085,7 @@ if (!isGeneric("calibrate"))
 setMethod("calibrate", "rrvgam", function(object, ...)
           calibrate.qrrvglm(object, ...))
 
-    
+
 setMethod("calibrate", "qrrvglm", function(object, ...)
           calibrate.qrrvglm(object, ...))
 
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index 6b07534..ca74e18 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -24,11 +24,11 @@ coefvlm <- function(object, matrix.out = FALSE, label = TRUE,
       stop("cannot have 'matrix.out = TRUE' and 'colon = TRUE'")
     if (!label)
       stop("cannot have 'label = FALSE' and 'colon = TRUE'")
-    
+
     d1 <- object at misc$colnames.x
     Hlist <- object at constraints
     M <- object at misc$M
-    ncolHlist <- unlist(lapply(Hlist, ncol)) 
+    ncolHlist <- unlist(lapply(Hlist, ncol))
     new.labs <- vlabel(xn = d1, ncolHlist, M = M, colon = colon)
     names(ans) <- new.labs
     return(ans)
@@ -41,7 +41,7 @@ coefvlm <- function(object, matrix.out = FALSE, label = TRUE,
     return(ans)
 
 
-  
+
   ncolx <- object at misc$p  # = length(object at constraints)
   M <- object at misc$M
 
@@ -52,9 +52,9 @@ coefvlm <- function(object, matrix.out = FALSE, label = TRUE,
     Bmat <- matrix(NA_real_, nrow = ncolx, ncol = M)
 
     if (!matrix.out)
-      return(ans) 
+      return(ans)
 
-    ncolHlist <- unlist(lapply(Hlist, ncol)) 
+    ncolHlist <- unlist(lapply(Hlist, ncol))
     nasgn <- names(Hlist)
     temp <- c(0, cumsum(ncolHlist))
     for (ii in seq_along(nasgn)) {
@@ -89,7 +89,7 @@ setMethod("coef", "vglm", function(object, ...)
 
 
 
-  
+
 setMethod("coefficients", "summary.vglm", function(object, ...)
           object at coef3)
 setMethod("coef",         "summary.vglm", function(object, ...)
@@ -128,7 +128,7 @@ Coef.vlm <- function(object, ...) {
 
     if (!is.list(use.earg <- object at misc$earg))
       use.earg <- list()
-    
+
     answer <-
       eta2theta(rbind(coefvlm(object)),
                 link = object at misc$link,
diff --git a/R/confint.vlm.R b/R/confint.vlm.R
index b068677..c6e5cee 100644
--- a/R/confint.vlm.R
+++ b/R/confint.vlm.R
@@ -1,39 +1,61 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
 
 
 
-confintvglm <- function(object, parm, level = 0.95, ...) {
+confintvglm <-
+  function(object, parm = "(All)", level = 0.95,
+           method = c("wald", "profile"),
+           trace = NULL,
+           ...) {
+
+  method <- match.arg(method, c("wald", "profile"))[1]
+
+
   cf <- coef(object)
   pnames <- names(cf)
-  if (missing(parm))
+  if (is.character(parm) && length(parm) == 1 && parm == "(All)")
     parm <- pnames else
   if (is.numeric(parm))
     parm <- pnames[parm]
-  a <- (1 - level)/2
-  a <- c(a, 1 - a)
   format.perc <- function(probs, digits)
-  paste(format(100 * probs, trim = TRUE, scientific = FALSE, digits = digits),
-        "%")
-  pct <- format.perc(a, 3)
-  fac <- qnorm(a)
-  ci <- array(NA, dim = c(length(parm), 2L), dimnames = list(parm, pct))
-  ses <- sqrt(diag(vcov(object)))[parm]
-  ci[] <- cf[parm] + ses %o% fac
-  ci
-}
+    paste(format(100 * probs, trim = TRUE, scientific = FALSE,
+          digits = digits), "%")
 
 
 
-confintrrvglm <- function(object, parm, level = 0.95, ...) {
-  stop("currently this function has not been written")
+  if (method == "wald") {
+    aa <- (1 - level) / 2
+    aa <- c(aa, 1 - aa)
+    pct <- format.perc(aa, 3)
+    fac <- qnorm(aa)
+    ci <- array(NA, dim = c(length(parm), 2L),
+                dimnames = list(parm, pct))
+    ses <- sqrt(diag(vcov(object)))[parm]
+    ci[] <- cf[parm] + ses %o% fac
+    return(ci)
+  }  # if (method == "wald")
+
+
+
+
+  ppv <- profilevglm(object, which = parm, alpha = (1 - level) / 4,
+                     trace = trace, ...)
 
 
 
-  
+  MASSconfint.profile.glm(ppv, parm = parm, level = level,
+                          trace = trace, ...)
+}  # confintvglm
+
+
+
+confintrrvglm <- function(object, parm, level = 0.95, ...) {
+  stop("currently this function has not been written")
+
 }
 
 
@@ -57,12 +79,14 @@ if (!isGeneric("confint"))
 
 setMethod("confint", "vglm",
           function(object, parm, level = 0.95, ...)
-            confintvglm(object = object, parm = parm, level = level, ...))
+            confintvglm(object = object,
+                        parm = if (missing(parm)) "(All)" else parm,
+                        level = level, ...))
 
 
 setMethod("confint", "rrvglm",
           function(object, parm, level = 0.95, ...)
-            confintrrvglm(object = object, parm = parm, level = level, ...))
+          confintrrvglm(object = object, parm = parm, level = level, ...))
 
 setMethod("confint", "vgam",
           function(object, parm, level = 0.95, ...)
@@ -71,3 +95,29 @@ setMethod("confint", "vgam",
 
 
 
+
+
+MASSconfint.profile.glm <-
+function (object, parm = seq_along(pnames), level = 0.95, ...) {
+  of <- attr(object, "original.fit")
+  pnames <- names(coef(of))
+  if (is.character(parm))
+    parm <- match(parm, pnames, nomatch = 0L)
+  a <- (1 - level)/2
+  a <- c(a, 1 - a)
+  pct <- paste(round(100 * a, 1), "%")
+  ci <- array(NA, dim = c(length(parm), 2L), dimnames = list(pnames[parm],
+      pct))
+  cutoff <- qnorm(a)
+  for (pm in parm) {
+    pro <- object[[pnames[pm]]]
+    if (is.null(pro))
+      next
+    if (length(pnames) > 1L)
+      sp <- spline(x = pro[, "par.vals"][, pm], y = pro[, 1])
+    else sp <- spline(x = pro[, "par.vals"], y = pro[, 1])
+    ci[pnames[pm], ] <- approx(sp$y, sp$x, xout = cutoff)$y
+  }
+  drop(ci)
+}
+
diff --git a/R/cqo.R b/R/cqo.R
index 07538a5..e8f399c 100644
--- a/R/cqo.R
+++ b/R/cqo.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -7,27 +7,27 @@
 
 
 cqo <- function(formula,
-                family, data = list(), 
+                family, data = list(),
                 weights = NULL, subset = NULL, na.action = na.fail,
                 etastart = NULL, mustart = NULL, coefstart = NULL,
-                control = qrrvglm.control(...), 
-                offset = NULL, 
+                control = qrrvglm.control(...),
+                offset = NULL,
                 method = "cqo.fit",
                 model = FALSE, x.arg = TRUE, y.arg = TRUE,
-                contrasts = NULL, 
+                contrasts = NULL,
                 constraints = NULL,
-                extra = NULL, 
+                extra = NULL,
                 smart = TRUE, ...) {
   dataname <- as.character(substitute(data))  # "list" if no data =
   function.name <- "cqo"
 
   ocall <- match.call()
 
-  if (smart) 
+  if (smart)
     setup.smart("write")
 
   mt <- terms(formula, data = data)
-  if (missing(data)) 
+  if (missing(data))
       data <- environment(formula)
 
   mf <- match.call(expand.dots = FALSE)
@@ -35,9 +35,9 @@ cqo <- function(formula,
     mf$control <- mf$contrasts <- mf$constraints <- mf$extra <- NULL
   mf$coefstart <- mf$etastart <- mf$... <- NULL
   mf$smart <- NULL
-  mf$drop.unused.levels <- TRUE 
+  mf$drop.unused.levels <- TRUE
   mf[[1]] <- as.name("model.frame")
-  mf <- eval(mf, parent.frame()) 
+  mf <- eval(mf, parent.frame())
   if (method == "model.frame")
     return(mf)
   na.act <- attr(mf, "na.action")
@@ -54,7 +54,7 @@ cqo <- function(formula,
   x <- model.matrix(mt, mf, contrasts)
   attr(x, "assign") <- attrassigndefault(x, mt)
   offset <- model.offset(mf)
-  if (is.null(offset)) 
+  if (is.null(offset))
     offset <- 0  # yyy ???
   w <- model.weights(mf)
   if (!length(w)) {
@@ -150,7 +150,7 @@ cqo <- function(formula,
     }
   } else list()  # R-1.5.0
   slot(answer, "iter") <- fit$iter
-  fit$predictors <- as.matrix(fit$predictors)  # Must be a matrix 
+  fit$predictors <- as.matrix(fit$predictors)  # Must be a matrix
   dimnames(fit$predictors) <- list(dimnames(fit$predictors)[[1]],
                                    fit$misc$predictors.names)
   slot(answer, "predictors") <- fit$predictors
diff --git a/R/cqo.fit.q b/R/cqo.fit.q
index f293533..1069b3b 100644
--- a/R/cqo.fit.q
+++ b/R/cqo.fit.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -26,7 +26,7 @@ callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
   if (I.tol <- control$I.tolerances) {
     if (Rank > 1) {
       numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
-      evnu <- eigen(var(numat))
+      evnu <- eigen(var(numat), symmetric = TRUE)
       cmatrix <- cmatrix %*% evnu$vector
     }
 
@@ -50,7 +50,7 @@ callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
       }
   } else {
     numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
-    evnu <- eigen(var(numat))
+    evnu <- eigen(var(numat), symmetric = TRUE)
     temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
                            evnu$vector %*% evnu$value^(-0.5)
     cmatrix <- cmatrix %*% temp7
@@ -61,9 +61,9 @@ callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
   inited <- ifelse(exists(".VGAM.CQO.etamat", envir = VGAMenv), 1, 0)
 
 
-  usethiseta <- if (inited == 1) 
+  usethiseta <- if (inited == 1)
     getfromVGAMenv("etamat", prefix = ".VGAM.CQO.") else t(etamat)
-  usethisbeta <- if (inited == 2) 
+  usethisbeta <- if (inited == 2)
     getfromVGAMenv("beta", prefix = ".VGAM.CQO.") else double(lenbeta)
 
   othint <- c(Rank = Rank, control$eq.tol, pstar = pstar,
@@ -75,10 +75,10 @@ callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
   bnumat <- if (nice31) matrix(0,nstar,pstar) else
             cbind(matrix(0, nstar, p2star), X.vlm.1save)
 
- 
+
 
   ans1 <- if (nice31) .C("cqo_1",
-     numat = as.double(numat), as.double(ymat), 
+     numat = as.double(numat), as.double(ymat),
      as.double(if (p1) xmat[, control$colx1.index] else 999),
      as.double(wvec), etamat = as.double(usethiseta),
      moff = double(if (I.tol) n else 1),
@@ -95,7 +95,7 @@ callcqoc <- function(cmatrix, etamat, xmat, ymat, wvec,
                 iKvector = rep_len(control$iKvector, NOS),
                 iShape = rep_len(control$iShape, NOS)))) else
   .C("cqo_2",
-     numat = as.double(numat), as.double(ymat), 
+     numat = as.double(numat), as.double(ymat),
      as.double(if (p1) xmat[, control$colx1.index] else 999),
      as.double(wvec), etamat = as.double(usethiseta),
      moff = double(if (I.tol) n else 1),
@@ -162,7 +162,7 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
   if (I.tol <- control$I.tolerances) {
     if (Rank > 1) {
       numat <- xmat[, control$colx2.index, drop=FALSE] %*% cmatrix
-      evnu <- eigen(var(numat))
+      evnu <- eigen(var(numat), symmetric = TRUE)
       cmatrix <- cmatrix %*% evnu$vector
     }
 
@@ -185,7 +185,7 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
       }
   } else {
     numat <- xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
-    evnu <- eigen(var(numat))
+    evnu <- eigen(var(numat), symmetric = TRUE)
     temp7 <- if (Rank > 1)
                evnu$vector %*% diag(evnu$value^(-0.5)) else
                evnu$vector %*% evnu$value^(-0.5)
@@ -198,9 +198,9 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
                             envir = VGAMenv), 1, 0)
 
 
-    usethiseta <- if (inited == 1) 
+    usethiseta <- if (inited == 1)
       getfromVGAMenv("etamat", prefix = ".VGAM.CQO.") else t(etamat)
-    usethisbeta <- if (inited == 2) 
+    usethisbeta <- if (inited == 2)
       getfromVGAMenv("beta", prefix = ".VGAM.CQO.") else double(lenbeta)
 
     othint <- c(Rank, control$eq.tol, pstar, dimw = 1, inited = inited,
@@ -213,9 +213,9 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
              cbind(matrix(0,nstar,p2star), X.vlm.1save)
     flush.console()
 
-    ans1 <- 
+    ans1 <-
     .C("dcqo1",
-       numat = as.double(numat), as.double(ymat), 
+       numat = as.double(numat), as.double(ymat),
        as.double(if (p1) xmat[,control$colx1.index] else 999),
        as.double(wvec), etamat = as.double(usethiseta),
            moff = double(if (I.tol) n else 1),
@@ -256,7 +256,7 @@ checkCMCO <- function(Hlist, control, modelno) {
     stop("Some variables are needed in noRRR and non-noRRR arguments")
   if (all(names(colx1.index) != "(Intercept)"))
     stop("an intercept term must be in the argument 'noRRR' formula")
-  Hlist1 <- vector("list", p1) 
+  Hlist1 <- vector("list", p1)
   Hlist2 <- vector("list", p2)
   for (kk in 1:p1)
     Hlist1[[kk]] <- Hlist[[(colx1.index[kk])]]
@@ -282,7 +282,7 @@ checkCMCO <- function(Hlist, control, modelno) {
         if (!trivial.constraints(list(Hlist1[[kk]])))
           stop("the constraint matrices for some 'noRRR' ",
                "terms is not trivial")
-            
+
   nice31 <- if (control$Quadratic)
               (!control$eq.tol || control$I.tolerances) else TRUE
   as.numeric(nice31)
@@ -329,8 +329,8 @@ ny <- names(y)
   intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
   y.names <- predictors.names <- NULL  # May be overwritten in @initialize
 
- 
-    n.save <- n 
+
+    n.save <- n
 
 
 
@@ -339,7 +339,7 @@ ny <- names(y)
 
     if (length(family at initialize))
       eval(family at initialize)  # Initialize mu and M (and optionally w)
-    n <- n.save 
+    n <- n.save
 
     eval(rrr.init.expression)
 
@@ -460,18 +460,17 @@ ny <- names(y)
     Hlist <- process.constraints(constraints, x, M, specialCM = specialCM)
     nice31 <- checkCMCO(Hlist, control = control, modelno = modelno)
     ncolHlist <- unlist(lapply(Hlist, ncol))
-    dimB <- sum(ncolHlist)
 
     X.vlm.save <- if (nice31) {
-      NULL 
+      NULL
     } else {
       tmp500 <- lm2qrrvlm.model.matrix(x = x, Hlist = Hlist,
                                        C = Cmat, control = control)
-      xsmall.qrr <- tmp500$new.latvar.model.matrix 
+      xsmall.qrr <- tmp500$new.latvar.model.matrix
       H.list <- tmp500$constraints
       latvar.mat <- tmp500$latvar.mat
       if (length(tmp500$offset)) {
-        offset <- tmp500$offset 
+        offset <- tmp500$offset
       }
       lm2vlm.model.matrix(xsmall.qrr, H.list, xij = control$xij)
     }
@@ -480,7 +479,7 @@ ny <- names(y)
       eta <- if (ncol(X.vlm.save) > 1)
                X.vlm.save %*% coefstart + offset else
                X.vlm.save  *  coefstart + offset
-      eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta) 
+      eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta)
       mu <- family at linkinv(eta, extra)
     }
 
@@ -539,9 +538,9 @@ ny <- names(y)
                 df.residual = df.residual,
                 df.total = n*M,
                 fitted.values = mu,
-                offset = offset, 
+                offset = offset,
                 residuals = residuals,
-                terms = Terms)  # terms: This used to be done in vglm() 
+                terms = Terms)  # terms: This used to be done in vglm()
 
     if (M == 1) {
       wz <- as.vector(wz)  # Convert wz into a vector
@@ -551,7 +550,7 @@ ny <- names(y)
     misc <- list(
         colnames.x = xn,
         criterion = "deviance",
-        function.name = function.name, 
+        function.name = function.name,
         intercept.only=intercept.only,
         predictors.names = predictors.names,
         M = M,
@@ -621,7 +620,7 @@ ny <- names(y)
   Crow1positive <- if (length(Crow1positive))
                    rep_len(Crow1positive, Rank) else
                    rep_len(TRUE, Rank)
-  if (epsilon <= 0) 
+  if (epsilon <= 0)
     stop("epsilon > 0 is required")
   ymat <- cbind(ymat) + epsilon  # ymat == 0 cause problems
   NOS <- ncol(ymat)
@@ -631,7 +630,7 @@ ny <- names(y)
                   Crow1positive)
     eval(sd.scale.X2.expression)
     if (NOS == 1) {
-      eval(print.CQO.expression) 
+      eval(print.CQO.expression)
       return(ans)
     } else {
       ans.save <- ans;   # ans.save contains scaled guesses
@@ -669,9 +668,9 @@ ny <- names(y)
                          colx2.index = (ncol(X1)+1):(ncol(X1) + ncol(X2)),
                          Corner = FALSE, Svd.arg = TRUE,
                          Uncorrelated.latvar = TRUE, Quadratic = FALSE)
-    
+
     ans2 <- if (Rank > 1)
-              rrr.normalize(rrcontrol = temp.control, A = alt$A, 
+              rrr.normalize(rrcontrol = temp.control, A = alt$A,
                             C = alt$C, x = cbind(X1, X2)) else
               alt
     ans <- crow1C(ans2$C, rep_len(Crow1positive, effrank))
@@ -679,7 +678,7 @@ ny <- names(y)
     Rank.save <- Rank
     Rank <- effrank
     eval(sd.scale.X2.expression)
-    Rank <- Rank.save 
+    Rank <- Rank.save
 
     if (effrank < Rank) {
       ans <- cbind(ans, ans.save[,-(1:effrank)])  # ans is better
@@ -698,7 +697,7 @@ ny <- names(y)
     }
 
     if (Rank > 1) {
-      evnu <- eigen(var(ans))
+      evnu <- eigen(var(ans), symmetric = TRUE)
       ans <- ans %*% evnu$vector
     }
 
@@ -723,7 +722,7 @@ ny <- names(y)
 
 cqo.init.derivative.expression <- expression({
   which.optimizer <- if (control$Quadratic && control$FastAlgorithm) {
-    "BFGS" 
+    "BFGS"
   } else {
     ifelse(iter <= rrcontrol$Switch.optimizer, "Nelder-Mead", "BFGS")
   }
@@ -732,7 +731,7 @@ cqo.init.derivative.expression <- expression({
   if (trace && control$OptimizeWrtC) {
     cat("\nUsing", which.optimizer, "algorithm\n")
     flush.console()
-  } 
+  }
 
 
  if (FALSE) {
@@ -749,7 +748,7 @@ cqo.init.derivative.expression <- expression({
   if (!canfitok)
     stop("cannot fit this model using fast algorithm")
 
-  p2star <- if (nice31) 
+  p2star <- if (nice31)
     ifelse(control$I.toleran, Rank, Rank + Rank*(Rank+1)/2) else
     (NOS*Rank + Rank*(Rank+1)/2 * ifelse(control$eq.tol, 1, NOS))
 
@@ -757,7 +756,7 @@ cqo.init.derivative.expression <- expression({
             (ncol(X.vlm.save) - p2star)
   X.vlm.1save <- if (p1star > 0) X.vlm.save[, -(1:p2star)] else NULL
 })
-    
+
 
 
 
@@ -799,7 +798,7 @@ cqo.derivative.expression <- expression({
   if (length(alt$offset))
     offset <- alt$offset
 
-  B1.save <- alt$B1  # Put later into extra  
+  B1.save <- alt$B1  # Put later into extra
   tmp.fitted <- alt$fitted  # contains \bI_{Rank} \bnu if Corner
 
   if (trace && control$OptimizeWrtC) {
@@ -817,9 +816,9 @@ cqo.derivative.expression <- expression({
     flush.console()
   }
 
-  Amat <- alt$Amat  # 
-  Cmat <- alt$Cmat  # 
-  Dmat <- alt$Dmat  # 
+  Amat <- alt$Amat  #
+  Cmat <- alt$Cmat  #
+  Dmat <- alt$Dmat  #
 
   eval(cqo.end.expression)  #
 })
@@ -836,7 +835,7 @@ cqo.end.expression <- expression({
     if (!length(extra))
       extra <- list()
     extra$Amat <- Amat  # Not the latest iteration ??
-    extra$Cmat <- Cmat  # Saves the latest iteration 
+    extra$Cmat <- Cmat  # Saves the latest iteration
     extra$Dmat <- Dmat     # Not the latest iteration
     extra$B1   <- B1.save  # Not the latest iteration (not good)
   } else {
@@ -849,7 +848,7 @@ cqo.end.expression <- expression({
   mu <- family at linkinv(eta, extra)
 
   if (anyNA(mu))
-    warning("there are NAs in mu") 
+    warning("there are NAs in mu")
 
   deriv.mu <- eval(family at deriv)
   wz <- eval(family at weight)
diff --git a/R/deviance.vlm.q b/R/deviance.vlm.q
index d9fddf1..ec81ba6 100644
--- a/R/deviance.vlm.q
+++ b/R/deviance.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -70,6 +70,7 @@ setMethod("deviance", "vglm", function(object, ...)
 
 
 
+
 deviance.qrrvglm <- function(object,
                              summation = TRUE,
                              history = FALSE,
@@ -97,6 +98,8 @@ setMethod("deviance", "rrvgam",  function(object, ...)
 
 
 
+
+
 df.residual_vlm <- function(object, type = c("vlm", "lm"), ...) {
   type <- type[1]
 
@@ -109,13 +112,33 @@ df.residual_vlm <- function(object, type = c("vlm", "lm"), ...) {
 
 
 
-
 setMethod("df.residual", "vlm", function(object, ...)
            df.residual_vlm(object, ...))
 
 
 
 
+
+
+df.residual_pvgam <-
+  function(object,
+           ...) {
+
+
+  nobs(object, type = "lm") * npred(object) -
+  sum(endf(object, diag.all = TRUE))
+}
+
+
+
+setMethod("df.residual", "pvgam", function(object, ...)
+           df.residual_pvgam(object, ...))
+
+
+
+
+
+
 nvar_vlm <- function(object, ...) {
 
 
diff --git a/R/effects.vglm.q b/R/effects.vglm.q
index 2989e61..5b29040 100644
--- a/R/effects.vglm.q
+++ b/R/effects.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/family.actuary.R b/R/family.actuary.R
index c3a3266..121990a 100644
--- a/R/family.actuary.R
+++ b/R/family.actuary.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -53,38 +53,38 @@ pgumbelII <- function(q, scale = 1, shape,
   # 20150121 KaiH
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   # 20150121 KaiH
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
-  LLL <- max(length(q), length(shape), length(scale)) 
+
+  LLL <- max(length(q), length(shape), length(scale))
   if (length(q)       != LLL) q       <- rep_len(q,       LLL)
   if (length(shape)   != LLL) shape   <- rep_len(shape,   LLL)
   if (length(scale)   != LLL) scale   <- rep_len(scale,   LLL)
-  
-  # 20150121 KaiH 
+
+  # 20150121 KaiH
   if (lower.tail) {
-    if (log.p) { 
+    if (log.p) {
       ans <- -(q / scale)^(-shape)
       ans[q <= 0 ] <- -Inf
       ans[q == Inf] <- 0
-    } else { 
+    } else {
       ans <- exp(-(q / scale)^(-shape))
-      ans[q <= 0] <- 0 
+      ans[q <= 0] <- 0
       ans[q == Inf] <- 1
     }
   } else {
     if (log.p) {
       ans <- log(-expm1(-(q / scale)^(-shape)))
-      ans[q <= 0] <- 0 
+      ans[q <= 0] <- 0
       ans[q == Inf] <- -Inf
-    } else { 
+    } else {
       ans <- -expm1(-(q / scale)^(-shape))
-      ans[q <= 0] <- 1 
+      ans[q <= 0] <- 1
       ans[q == Inf] <- 0
     }
-  } 
+  }
   ans[shape <= 0 | scale <= 0] <- NaN
   ans
 }
@@ -370,7 +370,14 @@ rgumbelII <- function(n, scale = 1, shape) {
            .escale = escale, .eshape = eshape
          ) )),
   vfamily = c("gumbelII"),
-
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , .eshape )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(Shape)) && all(0 < Shape)
+    okay1
+  }, list( .lscale = lscale, .lshape = lshape,
+           .escale = escale, .eshape = eshape) )),
 
 
   simslot = eval(substitute(
@@ -378,7 +385,7 @@ rgumbelII <- function(n, scale = 1, shape) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     Scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
@@ -453,7 +460,7 @@ dmbeard <- function(x, shape, scale = 1, rho, epsilon, log = FALSE) {
             (-epsilon * x -
             ((rho * epsilon - 1) / (rho * scale)) *
             (log1p(rho * shape) -
-             log(exp(-x * scale) + rho * shape) - scale * x)) - 
+             log(exp(-x * scale) + rho * shape) - scale * x)) -
             log(exp(-x * scale) + shape * rho)
 
   ans[index0] <- log(0)
@@ -515,7 +522,7 @@ dmperks <- function(x, scale = 1, shape, epsilon, log = FALSE) {
             (-epsilon * x -
             ((epsilon - 1) / scale) *
             (log1p(shape) -
-             log(shape + exp(-x * scale)) -x * scale)) - 
+             log(shape + exp(-x * scale)) -x * scale)) -
             log(exp(-x * scale) + shape)
 
   ans[index0] <- log(0)
@@ -945,7 +952,7 @@ perks.control <- function(save.weights = TRUE, ...) {
   }), list( .lscale = lscale, .lshape = lshape,
             .escale = escale, .eshape = eshape,
             .nsimEIM = nsimEIM ))),
-  loglikelihood = eval(substitute( 
+  loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
@@ -965,6 +972,17 @@ perks.control <- function(save.weights = TRUE, ...) {
   }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape ))),
   vfamily = c("perks"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                       .lscale , .escale )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                       .lshape , .eshape )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(Shape)) && all(0 < Shape)
+    okay1
+  }, list( .lscale = lscale, .lshape = lshape,
+           .escale = escale, .eshape = eshape) )),
+
 
 
 
@@ -974,7 +992,7 @@ perks.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     Scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
@@ -988,7 +1006,7 @@ perks.control <- function(save.weights = TRUE, ...) {
 
 
 
- 
+
   deriv = eval(substitute(expression({
     M1 <- 2
     scale <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
@@ -1020,7 +1038,7 @@ perks.control <- function(save.weights = TRUE, ...) {
     NOS <- M / M1
     dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M1 = M1)]
 
-    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal' 
+    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal'
 
     ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
 
@@ -1428,7 +1446,7 @@ makeham.control <- function(save.weights = TRUE, ...) {
         makeham.Loglikfun2 <- function(epsilval, y, x, w, extraargs) {
           ans <-
           sum(c(w) * dmakeham(x = y, shape = extraargs$Shape,
-                              epsilon = epsilval, 
+                              epsilon = epsilval,
                               scale = extraargs$Scale, log = TRUE))
           ans
         }
@@ -1490,7 +1508,7 @@ makeham.control <- function(save.weights = TRUE, ...) {
             .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
             .eshape = eshape, .escale = escale, .eepsil = eepsil,
             .nsimEIM = nsimEIM ))),
-  loglikelihood = eval(substitute( 
+  loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
@@ -1511,7 +1529,21 @@ makeham.control <- function(save.weights = TRUE, ...) {
   }, list( .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
            .eshape = eshape, .escale = escale, .eepsil = eepsil ))),
   vfamily = c("makeham"),
- 
+ # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale <- eta2theta(eta[, c(TRUE, FALSE, FALSE), drop = FALSE],
+                       .lscale , .escale )
+    shape <- eta2theta(eta[, c(FALSE, TRUE, FALSE), drop = FALSE],
+                       .lshape , .eshape )
+    epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE), drop = FALSE],
+                       .lepsil , .eepsil )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(shape)) && all(0 < shape) &&
+             all(is.finite(epsil)) && all(0 < epsil)
+    okay1
+  }, list( .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
+           .eshape = eshape, .escale = escale, .eepsil = eepsil ))),
+
 
 
 
@@ -1520,7 +1552,7 @@ makeham.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     Scale <- eta2theta(eta[, c(TRUE, FALSE, FALSE), drop = FALSE],
@@ -1869,7 +1901,7 @@ gompertz.control <- function(save.weights = TRUE, ...) {
           ans <-
           sum(c(w) * dgompertz(x = y, shape = extraargs$Shape,
                                scale = scaleval, log = TRUE))
-          ans 
+          ans
         }
 
         mymat <- matrix(-1, length(shape.grid), 2)
@@ -1926,7 +1958,7 @@ gompertz.control <- function(save.weights = TRUE, ...) {
   }), list( .lshape = lshape, .lscale = lscale,
             .eshape = eshape, .escale = escale,
             .nsimEIM = nsimEIM ))),
-  loglikelihood = eval(substitute( 
+  loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
@@ -1946,7 +1978,19 @@ gompertz.control <- function(save.weights = TRUE, ...) {
     }, list( .lshape = lshape, .lscale = lscale,
              .eshape = eshape, .escale = escale ))),
   vfamily = c("gompertz"),
- 
+ # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lscale ,
+                       .escale )
+    shape <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lshape ,
+                       .eshape )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lshape = lshape, .lscale = lscale,
+           .eshape = eshape, .escale = escale ))),
+
+
 
 
 
@@ -1955,7 +1999,7 @@ gompertz.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     Scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
@@ -1996,7 +2040,7 @@ gompertz.control <- function(save.weights = TRUE, ...) {
     NOS <- M / M1
     dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M1 = M1)]
 
-    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal' 
+    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal'
 
     ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
 
@@ -2289,7 +2333,7 @@ exponential.mo.control <- function(save.weights = TRUE, ...) {
             .ealpha0 = ealpha0, .elambda = elambda,
             .nsimEIM = nsimEIM,
             .imethod = imethod ))),
-  loglikelihood = eval(substitute( 
+  loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
@@ -2309,7 +2353,18 @@ exponential.mo.control <- function(save.weights = TRUE, ...) {
     }, list( .lalpha0 = lalpha0, .llambda = llambda,
              .ealpha0 = ealpha0, .elambda = elambda ))),
   vfamily = c("exponential.mo"),
- 
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    alpha0 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lalpha0 ,
+                       .ealpha0 )
+    lambda <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
+                       .elambda )
+    okay1 <- all(is.finite(alpha0)) && all(0 < alpha0) &&
+             all(is.finite(lambda)) && all(0 < lambda)
+    okay1
+  }, list( .lalpha0 = lalpha0, .llambda = llambda,
+           .ealpha0 = ealpha0, .elambda = elambda ))),
+
+
   deriv = eval(substitute(expression({
     M1 <- 2
     alpha0 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lalpha0 ,
@@ -2337,7 +2392,7 @@ exponential.mo.control <- function(save.weights = TRUE, ...) {
     NOS <- M / M1
     dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M1 = M1)]
 
-    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal' 
+    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal'
 
     ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
 
@@ -2432,47 +2487,47 @@ if (ii < 3) {
 
   if (length(lss) != 1 && !is.logical(lss))
     stop("Argument 'lss' not specified correctly")
-  
-  
-  if (length(iscale) && !is.Numeric(iscale, positive = TRUE)) 
+
+
+  if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
     stop("Bad input for argument 'iscale'")
-  
+
   if (length(ishape1.a) && !is.Numeric(ishape1.a, positive = TRUE))
     stop("Bad input for argument 'ishape1.a'")
-  
+
   if (length(ishape2.p) && !is.Numeric(ishape2.p, positive = TRUE))
     stop("Bad input for argument 'ishape2.p'")
-  
+
   if (length(ishape3.q) && !is.Numeric(ishape3.q, positive = TRUE))
     stop("Bad input for argument 'ishape3.q'")
-  
 
-  
+
+
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
   lscale <- attr(escale, "function.name")
-  
+
   lshape1.a <- as.list(substitute(lshape1.a))
   eshape1.a <- link2list(lshape1.a)
   lshape1.a <- attr(eshape1.a, "function.name")
-  
+
   lshape2.p <- as.list(substitute(lshape2.p))
   eshape2.p <- link2list(lshape2.p)
   lshape2.p <- attr(eshape2.p, "function.name")
-  
+
   lshape3.q <- as.list(substitute(lshape3.q))
   eshape3.q <- link2list(lshape3.q)
   lshape3.q <- attr(eshape3.q, "function.name")
-   
 
-  new("vglmff", 
-  blurb = 
+
+  new("vglmff",
+  blurb =
     c("Generalized Beta II distribution \n\n",
-      "Links:    ", 
+      "Links:    ",
       ifelse (lss,
               namesof("scale"   , lscale   , earg = escale),
               namesof("shape1.a", lshape1.a, earg = eshape1.a)), ", ",
-      ifelse (lss, 
+      ifelse (lss,
               namesof("shape1.a", lshape1.a, earg = eshape1.a),
               namesof("scale"   , lscale   , earg = escale)), ", ",
       namesof("shape2.p" , lshape2.p, earg = eshape2.p), ", ",
@@ -2504,13 +2559,13 @@ if (ii < 3) {
            .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
            .lss  = lss ,
            .zero = zero ))),
-  initialize = eval(substitute(expression({ 
-    temp5 <- w.y.check(w = w, y = y, 
-                       Is.positive.y = TRUE, 
-                       ncol.w.max = Inf, 
-                       ncol.y.max = Inf, 
-                       out.wy = TRUE, 
-                       colsyperw = 1, 
+  initialize = eval(substitute(expression({
+    temp5 <- w.y.check(w = w, y = y,
+                       Is.positive.y = TRUE,
+                       ncol.w.max = Inf,
+                       ncol.y.max = Inf,
+                       out.wy = TRUE,
+                       colsyperw = 1,
                        maximize = TRUE)
     y    <- temp5$y
     w    <- temp5$w
@@ -2534,20 +2589,20 @@ if (ii < 3) {
       namesof(sha2.names , .lshape2.p , earg = .eshape2.p , tag = FALSE),
       namesof(sha3.names , .lshape3.q , earg = .eshape3.q , tag = FALSE))
     predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-    
+
     if (!length(etastart)) {
       sc.init <-
       aa.init <-
       pp.init <-
       qq.init <- matrix(NA_real_, n, NOS)
-          
+
       for (spp. in 1:NOS) {  # For each response 'y_spp.'... do:
         yvec <- y[, spp.]
         wvec <- w[, spp.]
 
           gscale     <- .gscale
           gshape1.a  <- .gshape1.a
-          gshape2.p  <- .gshape2.p        
+          gshape2.p  <- .gshape2.p
           gshape3.q  <- .gshape3.q
           if (length( .iscale    )) gscale    <-  rep_len( .iscale    , NOS)
           if (length( .ishape1.a )) gshape1.a <-  rep_len( .ishape1.a , NOS)
@@ -2588,12 +2643,12 @@ if (ii < 3) {
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
             .escale    = escale   , .eshape1.a = eshape1.a,
             .iscale    = iscale   , .ishape1.a = ishape1.a,
-            .gscale    = gscale   , .gshape1.a = gshape1.a,           
+            .gscale    = gscale   , .gshape1.a = gshape1.a,
             .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
             .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
             .ishape2.p = ishape2.p, .ishape3.q = ishape3.q,
             .gshape2.p = gshape2.p, .gshape3.q = gshape3.q,
-            .lss = lss ))), 
+            .lss = lss ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     M1 <- 4
     NOS <- ncol(eta) / M1
@@ -2657,9 +2712,9 @@ if (ii < 3) {
             .escale    = escale   , .eshape1.a = eshape1.a,
             .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
             .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
-            .lss = lss ))), 
+            .lss = lss ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, 
+    function(mu, y, w, residuals = FALSE,
              eta, extra = NULL, summation = TRUE) {
       M1  <- 4
       NOS <- ncol(eta)/M1
@@ -2703,7 +2758,7 @@ if (ii < 3) {
   validparams = eval(substitute(function(eta, y, extra = NULL) {
     M1 <- 4
     NOS <- ncol(eta) / M1
-    if ( .lss ) { 
+    if ( .lss ) {
       Scale <- eta2theta(eta[, M1*(1:NOS) - 3, drop = FALSE],
                          .lscale    , earg = .escale   )
       aa    <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
@@ -2713,7 +2768,7 @@ if (ii < 3) {
                          .lshape1.a , earg = .eshape1.a)
       Scale <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
                          .lscale    , earg = .escale )
-    }  
+    }
     parg <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                       .lshape2.p , earg = .eshape2.p)
     qq   <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
@@ -2739,10 +2794,10 @@ if (ii < 3) {
 
 
 
-  deriv = eval(substitute(expression({ 
+  deriv = eval(substitute(expression({
     M1 <- 4
     NOS <- ncol(eta)/M1  # Needed for summary()
-    if ( .lss ) { 
+    if ( .lss ) {
       Scale <- eta2theta(eta[, M1*(1:NOS) - 3, drop = FALSE],
                          .lscale    , earg = .escale   )
       aa    <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
@@ -2752,7 +2807,7 @@ if (ii < 3) {
                          .lshape1.a , earg = .eshape1.a)
       Scale <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
                          .lscale    , earg = .escale )
-    }  
+    }
     parg <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                       .lshape2.p , earg = .eshape2.p)
     qq   <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
@@ -2763,17 +2818,17 @@ if (ii < 3) {
     temp3a <- digamma(parg)
     temp3b <- digamma(qq)
     temp4 <- log1p(temp2)
-  
+
     dl.dscale <- (aa/Scale) * (-parg + (parg+qq) / (1+1/temp2))
     dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
     dl.dp <- aa * temp1 + temp3 - temp3a - temp4
     dl.dq <- temp3 - temp3b - temp4
-    
+
     dscale.deta <- dtheta.deta(Scale, .lscale ,    earg = .escale )
     da.deta     <- dtheta.deta(aa,    .lshape1.a , earg = .eshape1.a )
     dp.deta     <- dtheta.deta(parg,  .lshape2.p , earg = .eshape2.p )
     dq.deta     <- dtheta.deta(qq,    .lshape3.q , earg = .eshape3.q )
-    
+
     myderiv <- if ( .lss ) {
       c(w) * cbind(dl.dscale * dscale.deta,
                    dl.da * da.deta,
@@ -2837,7 +2892,7 @@ if (ii < 3) {
     wz <- arwz2wz(wz, M = M, M1 = M1)
     wz
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
-            .escale    = escale   , .eshape1.a = eshape1.a, 
+            .escale    = escale   , .eshape1.a = eshape1.a,
             .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
             .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
             .lss = lss ))))
@@ -2864,7 +2919,7 @@ dgenbetaII <- function(x, scale = 1, shape1.a, shape2.p, shape3.q,
             shape1.a * shape2.p * log(scale) -
             lbeta(shape2.p, shape3.q) -
             (shape2.p + shape3.q) * log1p((abs(x)/scale)^shape1.a)
-  
+
 
   if (any(x <= 0) || any(is.infinite(x))) {
     LLL <- max(length(x),        length(scale),
@@ -2876,7 +2931,7 @@ dgenbetaII <- function(x, scale = 1, shape1.a, shape2.p, shape3.q,
     if (length(shape3.q) != LLL) shape3.q <- rep_len(shape3.q, LLL)
 
     logden[is.infinite(x)] <- log(0)
-    logden[x < 0] <- log(0)  
+    logden[x < 0] <- log(0)
     x.eq.0 <- !is.na(x) & (x == 0)
     if (any(x.eq.0)) {
       axp <- shape1.a[x.eq.0] * shape2.p[x.eq.0]
@@ -2887,7 +2942,7 @@ dgenbetaII <- function(x, scale = 1, shape1.a, shape2.p, shape3.q,
         lbeta(shape2.p[ind5], shape3.q[ind5]) -
         (shape2.p[ind5] + shape3.q[ind5]) *
         log1p((0/scale[ind5])^shape1.a[ind5])
-      logden[x.eq.0 & axp >  1] <- log(0)    
+      logden[x.eq.0 & axp >  1] <- log(0)
     }
   }
 
@@ -3328,46 +3383,46 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
   if (length(lss) != 1 && !is.logical(lss))
     stop("Argument 'lss' not specified correctly")
-  
+
   if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, 
+                  integer.valued = TRUE,
                   positive = TRUE) || imethod > 2)
     stop("Bad input for argument 'imethod'")
-  
-  if (length(iscale) && !is.Numeric(iscale, positive = TRUE)) 
+
+  if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
     stop("Bad input for argument 'iscale'")
-  
+
   if (length(ishape1.a) && !is.Numeric(ishape1.a, positive = TRUE))
     stop("Bad input for argument 'ishape1.a'")
-  
+
   if (length(ishape3.q) && !is.Numeric(ishape3.q, positive = TRUE))
     stop("Bad input for argument 'ishape3.q'")
-  
-  if (length(probs.y) < 2 || max(probs.y) > 1 || 
+
+  if (length(probs.y) < 2 || max(probs.y) > 1 ||
         !is.Numeric(probs.y, positive = TRUE))
     stop("Bad input for argument 'probs.y'")
 
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
   lscale <- attr(escale, "function.name")
-  
+
   lshape1.a <- as.list(substitute(lshape1.a))
   eshape1.a <- link2list(lshape1.a)
   lshape1.a <- attr(eshape1.a, "function.name")
-  
+
   lshape3.q <- as.list(substitute(lshape3.q))
   eshape3.q <- link2list(lshape3.q)
   lshape3.q <- attr(eshape3.q, "function.name")
-   
 
-  new("vglmff", 
-  blurb = 
+
+  new("vglmff",
+  blurb =
     c("Singh-Maddala distribution \n\n",
-      "Links:    ", 
+      "Links:    ",
       ifelse (lss,
               namesof("scale"   , lscale   , earg = escale),
               namesof("shape1.a", lshape1.a, earg = eshape1.a)), ", ",
-      ifelse (lss, 
+      ifelse (lss,
               namesof("shape1.a", lshape1.a, earg = eshape1.a),
               namesof("scale"   , lscale   , earg = escale)), ", ",
       namesof("shape3.q" , lshape3.q, earg = eshape3.q), "\n",
@@ -3398,13 +3453,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                                    .eshape3.q = eshape3.q,
            .lss  = lss ,
            .zero = zero ))),
-  initialize = eval(substitute(expression({ 
-    temp5 <- w.y.check(w = w, y = y, 
-                       Is.positive.y = TRUE, 
-                       ncol.w.max = Inf, 
-                       ncol.y.max = Inf, 
-                       out.wy = TRUE, 
-                       colsyperw = 1, 
+  initialize = eval(substitute(expression({
+    temp5 <- w.y.check(w = w, y = y,
+                       Is.positive.y = TRUE,
+                       ncol.w.max = Inf,
+                       ncol.y.max = Inf,
+                       out.wy = TRUE,
+                       colsyperw = 1,
                        maximize = TRUE)
     y    <- temp5$y
     w    <- temp5$w
@@ -3426,12 +3481,12 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
       },
       namesof(sha3.names , .lshape3.q , earg = .eshape3.q , tag = FALSE))
     predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-    
+
     if (!length(etastart)) {
       sc.init <-
       aa.init <-
       qq.init <- matrix(NA_real_, n, NOS)
-          
+
       for (spp. in 1:NOS) {  # For each response 'y_spp.'... do:
         yvec <- y[, spp.]
         wvec <- w[, spp.]
@@ -3455,7 +3510,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
           aa.init[, spp.] <- try.this["Value2"]
           qq.init[, spp.] <- try.this["Value4"]
        } else {  # .imethod == 2
-          qvec <- .probs.y 
+          qvec <- .probs.y
           ishape3.q <- if (length( .ishape3.q )) .ishape3.q else 1
           xvec <- log( (1-qvec)^(-1/ ishape3.q ) - 1 )
           fit0 <- lsfit(x = xvec, y = log(quantile(yvec,  qvec)))
@@ -3490,13 +3545,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
             .escale    = escale   , .eshape1.a = eshape1.a,
             .iscale    = iscale   , .ishape1.a = ishape1.a,
-            .gscale    = gscale   , .gshape1.a = gshape1.a,           
+            .gscale    = gscale   , .gshape1.a = gshape1.a,
                                     .lshape3.q = lshape3.q,
                                     .eshape3.q = eshape3.q,
                                     .ishape3.q = ishape3.q,
                                     .gshape3.q = gshape3.q,
             .imethod   = imethod   , .probs.y  = probs.y,
-            .lss = lss ))), 
+            .lss = lss ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     M1 <- 3
     NOS <- ncol(eta)/M1
@@ -3561,9 +3616,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
             .escale    = escale   , .eshape1.a = eshape1.a,
                                     .lshape3.q = lshape3.q,
                                     .eshape3.q = eshape3.q,
-            .lss = lss ))), 
+            .lss = lss ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, 
+    function(mu, y, w, residuals = FALSE,
              eta, extra = NULL, summation = TRUE) {
       M1  <- 3
       NOS <- ncol(eta)/M1
@@ -3604,7 +3659,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
 
     eta <- predict(object)
@@ -3669,10 +3724,10 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
 
 
-  deriv = eval(substitute(expression({ 
+  deriv = eval(substitute(expression({
     M1 <- 3
     NOS <- ncol(eta)/M1  # Needed for summary()
-    if ( .lss ) { 
+    if ( .lss ) {
       Scale <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
                          .lscale    , earg = .escale   )
       aa    <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
@@ -3682,7 +3737,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                          .lshape1.a , earg = .eshape1.a)
       Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                          .lscale    , earg = .escale )
-    }  
+    }
     parg <- 1
     qq   <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                       .lshape3.q , earg = .eshape3.q)
@@ -3692,15 +3747,15 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     temp3a <- digamma(parg)
     temp3b <- digamma(qq)
     temp4 <- log1p(temp2)
-  
+
     dl.dscale <- (aa/Scale) * (-parg + (parg+qq) / (1+1/temp2))
     dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
     dl.dq <- temp3 - temp3b - temp4
-    
+
     dscale.deta <- dtheta.deta(Scale, .lscale ,    earg = .escale )
     da.deta     <- dtheta.deta(aa,    .lshape1.a , earg = .eshape1.a )
     dq.deta     <- dtheta.deta(qq,    .lshape3.q , earg = .eshape3.q )
-    
+
     myderiv <- if ( .lss ) {
       c(w) * cbind(dl.dscale * dscale.deta,
                    dl.da * da.deta,
@@ -3750,7 +3805,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     wz <- arwz2wz(wz, M = M, M1 = M1)
     wz
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
-            .escale    = escale   , .eshape1.a = eshape1.a, 
+            .escale    = escale   , .eshape1.a = eshape1.a,
                                     .lshape3.q = lshape3.q,
                                     .eshape3.q = eshape3.q,
             .lss = lss ))))
@@ -3788,47 +3843,47 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
   if (length(lss) != 1 && !is.logical(lss))
     stop("Argument 'lss' not specified correctly")
-  
+
   if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, 
+                  integer.valued = TRUE,
                   positive = TRUE) || imethod > 2)
     stop("Bad input for argument 'imethod'")
-  
-  if (length(iscale) && !is.Numeric(iscale, positive = TRUE)) 
+
+  if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
     stop("Bad input for argument 'iscale'")
-  
+
   if (length(ishape1.a) && !is.Numeric(ishape1.a, positive = TRUE))
     stop("Bad input for argument 'ishape1.a'")
-  
+
   if (length(ishape2.p) && !is.Numeric(ishape2.p, positive = TRUE))
     stop("Bad input for argument 'ishape2.p'")
-  
-  if (length(probs.y) < 2 || max(probs.y) > 1 || 
+
+  if (length(probs.y) < 2 || max(probs.y) > 1 ||
         !is.Numeric(probs.y, positive = TRUE))
     stop("Bad input for argument 'probs.y'")
 
-  
+
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
   lscale <- attr(escale, "function.name")
-  
+
   lshape1.a <- as.list(substitute(lshape1.a))
   eshape1.a <- link2list(lshape1.a)
   lshape1.a <- attr(eshape1.a, "function.name")
-  
+
   lshape2.p <- as.list(substitute(lshape2.p))
   eshape2.p <- link2list(lshape2.p)
   lshape2.p <- attr(eshape2.p, "function.name")
-  
 
-  new("vglmff", 
-  blurb = 
+
+  new("vglmff",
+  blurb =
     c("Dagum distribution \n\n",
-      "Links:    ", 
+      "Links:    ",
       ifelse (lss,
               namesof("scale"   , lscale   , earg = escale),
               namesof("shape1.a", lshape1.a, earg = eshape1.a)), ", ",
-      ifelse (lss, 
+      ifelse (lss,
               namesof("shape1.a", lshape1.a, earg = eshape1.a),
               namesof("scale"   , lscale   , earg = escale)), ", ",
       namesof("shape2.p" , lshape2.p, earg = eshape2.p), "\n",
@@ -3851,21 +3906,21 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                        c("shape1.a", "scale", "shape2.p"),
          lscale    = .lscale    , lshape1.a = .lshape1.a ,
          escale    = .escale    , eshape1.a = .eshape1.a ,
-         lshape2.p = .lshape2.p ,                         
+         lshape2.p = .lshape2.p ,
          eshape2.p = .eshape2.p )
   }, list( .lscale = lscale      , .lshape1.a = lshape1.a,
            .escale = escale      , .eshape1.a = eshape1.a,
-           .lshape2.p = lshape2.p,                        
-           .eshape2.p = eshape2.p,                        
+           .lshape2.p = lshape2.p,
+           .eshape2.p = eshape2.p,
            .lss  = lss ,
            .zero = zero ))),
-  initialize = eval(substitute(expression({ 
-    temp5 <- w.y.check(w = w, y = y, 
-                       Is.positive.y = TRUE, 
-                       ncol.w.max = Inf, 
-                       ncol.y.max = Inf, 
-                       out.wy = TRUE, 
-                       colsyperw = 1, 
+  initialize = eval(substitute(expression({
+    temp5 <- w.y.check(w = w, y = y,
+                       Is.positive.y = TRUE,
+                       ncol.w.max = Inf,
+                       ncol.y.max = Inf,
+                       out.wy = TRUE,
+                       colsyperw = 1,
                        maximize = TRUE)
     y    <- temp5$y
     w    <- temp5$w
@@ -3887,12 +3942,12 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
       },
       namesof(sha2.names , .lshape2.p , earg = .eshape2.p , tag = FALSE))
     predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-    
+
     if (!length(etastart)) {
       sc.init <-
       aa.init <-
       pp.init <- matrix(NA_real_, n, NOS)
-          
+
       for (spp. in 1:NOS) {  # For each response 'y_spp.'... do:
         yvec <- y[, spp.]
         wvec <- w[, spp.]
@@ -3900,7 +3955,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
         if ( .imethod == 1 ) {
           gscale     <- .gscale
           gshape1.a  <- .gshape1.a
-          gshape2.p  <- .gshape2.p        
+          gshape2.p  <- .gshape2.p
           if (length( .iscale    )) gscale    <- rep_len( .iscale    , NOS)
           if (length( .ishape1.a )) gshape1.a <- rep_len( .ishape1.a , NOS)
           if (length( .ishape2.p )) gshape2.p <- rep_len( .ishape2.p , NOS)
@@ -3916,7 +3971,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
           aa.init[, spp.] <- try.this["Value2"]
           pp.init[, spp.] <- try.this["Value3"]
        } else {  # .imethod == 2
-          qvec <- .probs.y 
+          qvec <- .probs.y
           ishape2.p <- if (length( .ishape2.p )) .ishape2.p else 1
           xvec <- log( qvec^(-1/ ishape2.p) - 1 )
           fit0 <- lsfit(x = xvec, y = log(quantile(yvec,  qvec)))
@@ -3949,13 +4004,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
             .escale    = escale   , .eshape1.a = eshape1.a,
             .iscale    = iscale   , .ishape1.a = ishape1.a,
-            .gscale    = gscale   , .gshape1.a = gshape1.a,           
-            .lshape2.p = lshape2.p,                        
-            .eshape2.p = eshape2.p,                        
+            .gscale    = gscale   , .gshape1.a = gshape1.a,
+            .lshape2.p = lshape2.p,
+            .eshape2.p = eshape2.p,
             .ishape2.p = ishape2.p,
             .gshape2.p = gshape2.p,
             .imethod   = imethod   , .probs.y = probs.y,
-            .lss = lss ))), 
+            .lss = lss ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     M1 <- 3
     NOS <- ncol(eta)/M1
@@ -4021,9 +4076,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
             .escale    = escale   , .eshape1.a = eshape1.a,
             .lshape2.p = lshape2.p,
             .eshape2.p = eshape2.p,
-            .lss = lss ))), 
+            .lss = lss ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, 
+    function(mu, y, w, residuals = FALSE,
              eta, extra = NULL, summation = TRUE) {
       M1  <- 3
       NOS <- ncol(eta)/M1
@@ -4064,10 +4119,10 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
-    if ( .lss ) { 
+    if ( .lss ) {
       Scale <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
                          .lscale    , earg = .escale   )
       aa    <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
@@ -4077,7 +4132,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                          .lshape1.a , earg = .eshape1.a)
       Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                          .lscale    , earg = .escale )
-    }  
+    }
     parg <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                       .lshape2.p , earg = .eshape2.p)
     qq   <- 1
@@ -4132,7 +4187,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
   deriv = eval(substitute(expression({
     M1 <- 3
     NOS <- ncol(eta)/M1  # Needed for summary()
-    if ( .lss ) { 
+    if ( .lss ) {
       Scale <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
                          .lscale    , earg = .escale   )
       aa    <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
@@ -4142,7 +4197,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                          .lshape1.a , earg = .eshape1.a)
       Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                          .lscale    , earg = .escale )
-    }  
+    }
     parg <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                       .lshape2.p , earg = .eshape2.p)
     qq   <- 1
@@ -4152,15 +4207,15 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     temp3a <- digamma(parg)
     temp3b <- digamma(qq)
     temp4 <- log1p(temp2)
-  
+
     dl.dscale <- (aa/Scale) * (-parg + (parg+qq) / (1+1/temp2))
     dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
     dl.dp <- aa * temp1 + temp3 - temp3a - temp4
-    
+
     dscale.deta <- dtheta.deta(Scale, .lscale ,    earg = .escale )
     da.deta     <- dtheta.deta(aa,    .lshape1.a , earg = .eshape1.a )
     dp.deta     <- dtheta.deta(parg,  .lshape2.p , earg = .eshape2.p )
-    
+
     myderiv <- if ( .lss ) {
       c(w) * cbind(dl.dscale * dscale.deta,
                    dl.da * da.deta,
@@ -4210,7 +4265,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     wz <- arwz2wz(wz, M = M, M1 = M1)
     wz
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
-            .escale    = escale   , .eshape1.a = eshape1.a, 
+            .escale    = escale   , .eshape1.a = eshape1.a,
             .lshape2.p = lshape2.p,
             .eshape2.p = eshape2.p,
             .lss = lss ))))
@@ -4239,41 +4294,41 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
 
   if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, 
+                  integer.valued = TRUE,
                   positive = TRUE) || imethod > 2)
     stop("Bad input for argument 'imethod'")
-  
+
   if (length(iscale   ) && !is.Numeric(iscale   ,  positive = TRUE))
     stop("Bad input for argument 'iscale'")
-  
+
   if (length(ishape2.p) && !is.Numeric(ishape2.p, positive = TRUE))
     stop("Bad input for argument 'ishape2.p'")
-  
+
   if (length(ishape3.q) && !is.Numeric(ishape3.q, positive = TRUE))
     stop("Bad input for argument 'ishape3.q'")
-  
-  if (length(probs.y) < 2 || max(probs.y) > 1 || 
+
+  if (length(probs.y) < 2 || max(probs.y) > 1 ||
         !is.Numeric(probs.y, positive = TRUE))
     stop("Bad input for argument 'probs.y'")
 
-  
+
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
   lscale <- attr(escale, "function.name")
-  
+
   lshape2.p <- as.list(substitute(lshape2.p))
   eshape2.p <- link2list(lshape2.p)
   lshape2.p <- attr(eshape2.p, "function.name")
-  
+
   lshape3.q <- as.list(substitute(lshape3.q))
   eshape3.q <- link2list(lshape3.q)
   lshape3.q <- attr(eshape3.q, "function.name")
-   
 
-  new("vglmff", 
-  blurb = 
+
+  new("vglmff",
+  blurb =
     c("Beta II distribution \n\n",
-      "Links:    ", 
+      "Links:    ",
       namesof("scale"    , lscale   , earg = escale   ), ", ",
       namesof("shape2.p" , lshape2.p, earg = eshape2.p), ", ",
       namesof("shape3.q" , lshape3.q, earg = eshape3.q), "\n",
@@ -4301,13 +4356,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
            .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
            .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
            .zero = zero ))),
-  initialize = eval(substitute(expression({ 
-    temp5 <- w.y.check(w = w, y = y, 
-                       Is.positive.y = TRUE, 
-                       ncol.w.max = Inf, 
-                       ncol.y.max = Inf, 
-                       out.wy = TRUE, 
-                       colsyperw = 1, 
+  initialize = eval(substitute(expression({
+    temp5 <- w.y.check(w = w, y = y,
+                       Is.positive.y = TRUE,
+                       ncol.w.max = Inf,
+                       ncol.y.max = Inf,
+                       out.wy = TRUE,
+                       colsyperw = 1,
                        maximize = TRUE)
     y    <- temp5$y
     w    <- temp5$w
@@ -4324,19 +4379,19 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
         namesof(sha2.names , .lshape2.p , earg = .eshape2.p , tag = FALSE),
         namesof(sha3.names , .lshape3.q , earg = .eshape3.q , tag = FALSE))
     predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-    
+
     if (!length(etastart)) {
       sc.init <-
       pp.init <-
       qq.init <- matrix(NA_real_, n, NOS)
-          
+
       for (spp. in 1:NOS) {  # For each response 'y_spp.'... do:
         yvec <- y[, spp.]
         wvec <- w[, spp.]
 
         if ( .imethod == 1 ) {
           gscale     <- .gscale
-          gshape2.p  <- .gshape2.p        
+          gshape2.p  <- .gshape2.p
           gshape3.q  <- .gshape3.q
           if (length( .iscale    )) gscale    <- rep_len( .iscale    , NOS)
           if (length( .ishape2.p )) gshape2.p <- rep_len( .ishape2.p , NOS)
@@ -4392,7 +4447,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
             .ishape2.p = ishape2.p, .ishape3.q = ishape3.q,
             .gshape2.p = gshape2.p, .gshape3.q = gshape3.q,
             .imethod   = imethod   , .probs.y = probs.y
-                       ))), 
+                       ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     M1 <- 3
     NOS <- ncol(eta)/M1
@@ -4440,9 +4495,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
             .escale    = escale   ,
             .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
             .eshape2.p = eshape2.p, .eshape3.q = eshape3.q
-                       ))), 
+                       ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, 
+    function(mu, y, w, residuals = FALSE,
              eta, extra = NULL, summation = TRUE) {
       M1  <- 3
       NOS <- ncol(eta)/M1
@@ -4502,7 +4557,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
 
 
-  deriv = eval(substitute(expression({ 
+  deriv = eval(substitute(expression({
     M1 <- 3
     NOS <- ncol(eta)/M1  # Needed for summary()
     Scale <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
@@ -4518,15 +4573,15 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     temp3a <- digamma(parg)
     temp3b <- digamma(qq)
     temp4 <- log1p(temp2)
-  
+
     dl.dscale <- (aa/Scale) * (-parg + (parg+qq) / (1+1/temp2))
     dl.dp <- aa * temp1 + temp3 - temp3a - temp4
     dl.dq <- temp3 - temp3b - temp4
-    
+
     dscale.deta <- dtheta.deta(Scale, .lscale ,    earg = .escale )
     dp.deta     <- dtheta.deta(parg,  .lshape2.p , earg = .eshape2.p )
     dq.deta     <- dtheta.deta(qq,    .lshape3.q , earg = .eshape3.q )
-    
+
     myderiv <-
       c(w) * cbind(dl.dscale * dscale.deta,
                    dl.dp * dp.deta,
@@ -4548,7 +4603,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     ned2l.dscalep <-  aa * qq   / (Scale*(parg+qq))
     ned2l.dscaleq <- -aa * parg / (Scale*(parg+qq))
     ned2l.dpq     <- -temp5
-    wz <- 
+    wz <-
       array(c(c(w) * ned2l.dscale * dscale.deta^2,
               c(w) * ned2l.dp * dp.deta^2,
               c(w) * ned2l.dq * dq.deta^2,
@@ -4577,7 +4632,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
            ishape3.q = NULL,
            imethod   = 1,
            gscale    = exp(-5:5),
-           gshape3.q = seq(0.75, 4, by = 0.25),  # exp(-5:5),  
+           gshape3.q = seq(0.75, 4, by = 0.25),  # exp(-5:5),
            probs.y   = c(0.25, 0.50, 0.75),
            zero      = "shape") {
 
@@ -4585,34 +4640,34 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
 
   if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, 
+                  integer.valued = TRUE,
                   positive = TRUE) || imethod > 2)
     stop("Bad input for argument 'imethod'")
-  
-  if (length(iscale) && !is.Numeric(iscale, positive = TRUE)) 
+
+  if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
     stop("Bad input for argument 'iscale'")
-  
+
   if (length(ishape3.q) && !is.Numeric(ishape3.q, positive = TRUE))
     stop("Bad input for argument 'ishape3.q'")
-  
-  if (length(probs.y) < 2 || max(probs.y) > 1 || 
+
+  if (length(probs.y) < 2 || max(probs.y) > 1 ||
         !is.Numeric(probs.y, positive = TRUE))
     stop("Bad input for argument 'probs.y'")
 
-  
+
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
   lscale <- attr(escale, "function.name")
-  
+
   lshape3.q <- as.list(substitute(lshape3.q))
   eshape3.q <- link2list(lshape3.q)
   lshape3.q <- attr(eshape3.q, "function.name")
-   
 
-  new("vglmff", 
-  blurb = 
+
+  new("vglmff",
+  blurb =
     c("Lomax distribution \n\n",
-      "Links:    ", 
+      "Links:    ",
       namesof("scale"    , lscale   , earg = escale   ), ", ",
       namesof("shape3.q" , lshape3.q, earg = eshape3.q), "\n",
       "Mean:     scale / (shape3.q - 1)"),
@@ -4637,13 +4692,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                                    .lshape3.q = lshape3.q,
                                    .eshape3.q = eshape3.q,
            .zero = zero ))),
-  initialize = eval(substitute(expression({ 
-    temp5 <- w.y.check(w = w, y = y, 
-                       Is.positive.y = TRUE, 
-                       ncol.w.max = Inf, 
-                       ncol.y.max = Inf, 
-                       out.wy = TRUE, 
-                       colsyperw = 1, 
+  initialize = eval(substitute(expression({
+    temp5 <- w.y.check(w = w, y = y,
+                       Is.positive.y = TRUE,
+                       ncol.w.max = Inf,
+                       ncol.y.max = Inf,
+                       out.wy = TRUE,
+                       colsyperw = 1,
                        maximize = TRUE)
     y    <- temp5$y
     w    <- temp5$w
@@ -4658,11 +4713,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
       c(namesof(scaL.names , .lscale    , earg = .escale    , tag = FALSE),
         namesof(sha3.names , .lshape3.q , earg = .eshape3.q , tag = FALSE))
     predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-    
+
     if (!length(etastart)) {
       sc.init <-
       qq.init <- matrix(NA_real_, n, NOS)
-          
+
       for (spp. in 1:NOS) {  # For each response 'y_spp.'... do:
         yvec <- y[, spp.]
         wvec <- w[, spp.]
@@ -4683,7 +4738,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
           sc.init[, spp.] <- try.this["Value1"]
           qq.init[, spp.] <- try.this["Value4"]
        } else {  # .imethod == 2
-          qvec <- .probs.y 
+          qvec <- .probs.y
           iscale <- if (length( .iscale )) .iscale else 1
           xvec <- log1p( quantile(yvec / iscale, probs = qvec) )
           fit0 <- lsfit(x = xvec, y = -log1p(-qvec), intercept = FALSE)
@@ -4714,7 +4769,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                                     .ishape3.q = ishape3.q,
                                     .gshape3.q = gshape3.q,
             .imethod   = imethod   , .probs.y  = probs.y
-                       ))), 
+                       ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     M1 <- 2
     NOS <- ncol(eta)/M1
@@ -4759,9 +4814,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
             .escale    = escale   ,
                                     .lshape3.q = lshape3.q,
                                     .eshape3.q = eshape3.q
-                       ))), 
+                       ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, 
+    function(mu, y, w, residuals = FALSE,
              eta, extra = NULL, summation = TRUE) {
       M1  <- 2
       NOS <- ncol(eta)/M1
@@ -4794,7 +4849,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
 
     eta <- predict(object)
@@ -4855,13 +4910,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     temp3a <- digamma(parg)
     temp3b <- digamma(qq)
     temp4 <- log1p(temp2)
-  
+
     dl.dscale <- (aa/Scale) * (-parg + (parg+qq) / (1+1/temp2))
     dl.dq <- temp3 - temp3b - temp4
-    
+
     dscale.deta <- dtheta.deta(Scale, .lscale ,    earg = .escale )
     dq.deta     <- dtheta.deta(qq,    .lshape3.q , earg = .eshape3.q )
-    
+
     myderiv <-
       c(w) * cbind(dl.dscale * dscale.deta,
                    dl.dq * dq.deta)
@@ -4920,40 +4975,40 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
   if (length(lss) != 1 && !is.logical(lss))
     stop("Argument 'lss' not specified correctly")
-  
+
   if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, 
+                  integer.valued = TRUE,
                   positive = TRUE) || imethod > 2)
     stop("Bad input for argument 'imethod'")
-  
-  if (length(iscale) && !is.Numeric(iscale, positive = TRUE)) 
+
+  if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
     stop("Bad input for argument 'iscale'")
-  
+
   if (length(ishape1.a) && !is.Numeric(ishape1.a, positive = TRUE))
     stop("Bad input for argument 'ishape1.a'")
-  
-  if (length(probs.y) < 2 || max(probs.y) > 1 || 
+
+  if (length(probs.y) < 2 || max(probs.y) > 1 ||
         !is.Numeric(probs.y, positive = TRUE))
     stop("Bad input for argument 'probs.y'")
 
-  
+
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
   lscale <- attr(escale, "function.name")
-  
+
   lshape1.a <- as.list(substitute(lshape1.a))
   eshape1.a <- link2list(lshape1.a)
   lshape1.a <- attr(eshape1.a, "function.name")
-  
 
-  new("vglmff", 
-  blurb = 
+
+  new("vglmff",
+  blurb =
     c("Fisk distribution \n\n",
-      "Links:    ", 
+      "Links:    ",
       ifelse (lss,
               namesof("scale"   , lscale   , earg = escale),
               namesof("shape1.a", lshape1.a, earg = eshape1.a)), ", ",
-      ifelse (lss, 
+      ifelse (lss,
               namesof("shape1.a", lshape1.a, earg = eshape1.a),
               namesof("scale"   , lscale   , earg = escale)), "\n",
       "Mean:     scale * gamma(1 + 1/shape1.a) * ",
@@ -4978,13 +5033,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
            .escale = escale      , .eshape1.a = eshape1.a,
            .lss  = lss ,
            .zero = zero ))),
-  initialize = eval(substitute(expression({ 
-    temp5 <- w.y.check(w = w, y = y, 
-                       Is.positive.y = TRUE, 
-                       ncol.w.max = Inf, 
-                       ncol.y.max = Inf, 
-                       out.wy = TRUE, 
-                       colsyperw = 1, 
+  initialize = eval(substitute(expression({
+    temp5 <- w.y.check(w = w, y = y,
+                       Is.positive.y = TRUE,
+                       ncol.w.max = Inf,
+                       ncol.y.max = Inf,
+                       out.wy = TRUE,
+                       colsyperw = 1,
                        maximize = TRUE)
     y    <- temp5$y
     w    <- temp5$w
@@ -5003,11 +5058,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
         namesof(scaL.names , .lscale    , earg = .escale    , tag = FALSE))
     }
     predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-    
+
     if (!length(etastart)) {
       sc.init <-
       aa.init <- matrix(NA_real_, n, NOS)
-          
+
       for (spp. in 1:NOS) {  # For each response 'y_spp.'... do:
         yvec <- y[, spp.]
         wvec <- w[, spp.]
@@ -5029,7 +5084,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
           sc.init[, spp.] <- try.this["Value1"]
           aa.init[, spp.] <- try.this["Value2"]
        } else {  # .imethod == 2
-          qvec <- .probs.y 
+          qvec <- .probs.y
           iscale <- if (length( .iscale )) .iscale else 1
           xvec <- log( quantile(yvec / iscale, probs = qvec) )
           fit0 <- lsfit(x = xvec, y = logit(qvec), intercept = FALSE)
@@ -5058,9 +5113,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
             .escale    = escale   , .eshape1.a = eshape1.a,
             .iscale    = iscale   , .ishape1.a = ishape1.a,
-            .gscale    = gscale   , .gshape1.a = gshape1.a,           
+            .gscale    = gscale   , .gshape1.a = gshape1.a,
             .imethod   = imethod   , .probs.y  = probs.y,
-            .lss = lss ))), 
+            .lss = lss ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     M1 <- 2
     NOS <- ncol(eta)/M1
@@ -5116,9 +5171,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     misc$multipleResponses <- TRUE
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
             .escale    = escale   , .eshape1.a = eshape1.a,
-            .lss = lss ))), 
+            .lss = lss ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, 
+    function(mu, y, w, residuals = FALSE,
              eta, extra = NULL, summation = TRUE) {
       M1  <- 2
       NOS <- ncol(eta)/M1
@@ -5153,7 +5208,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
 
     eta <- predict(object)
@@ -5178,7 +5233,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
   validparams = eval(substitute(function(eta, y, extra = NULL) {
     M1 <- 2
     NOS <- ncol(eta) / M1
-    if ( .lss ) { 
+    if ( .lss ) {
       Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                          .lscale    , earg = .escale   )
       aa    <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
@@ -5188,7 +5243,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                          .lshape1.a , earg = .eshape1.a)
       Scale <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                          .lscale    , earg = .escale )
-    }  
+    }
     parg <- 1
     qq   <- 1
 
@@ -5210,10 +5265,10 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
 
 
-  deriv = eval(substitute(expression({ 
+  deriv = eval(substitute(expression({
     M1 <- 2
     NOS <- ncol(eta)/M1  # Needed for summary()
-    if ( .lss ) { 
+    if ( .lss ) {
       Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                          .lscale    , earg = .escale   )
       aa    <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
@@ -5223,7 +5278,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                          .lshape1.a , earg = .eshape1.a)
       Scale <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                          .lscale    , earg = .escale )
-    }  
+    }
     parg <- 1
     qq   <- 1
     temp1 <- log(y/Scale)
@@ -5232,13 +5287,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     temp3a <- digamma(parg)
     temp3b <- digamma(qq)
     temp4 <- log1p(temp2)
-  
+
     dl.dscale <- (aa/Scale) * (-parg + (parg+qq) / (1+1/temp2))
     dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
-    
+
     dscale.deta <- dtheta.deta(Scale, .lscale ,    earg = .escale )
     da.deta     <- dtheta.deta(aa,    .lshape1.a , earg = .eshape1.a )
-    
+
     myderiv <- if ( .lss ) {
       c(w) * cbind(dl.dscale * dscale.deta,
                    dl.da * da.deta)
@@ -5275,7 +5330,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     wz <- arwz2wz(wz, M = M, M1 = M1)
     wz
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
-            .escale    = escale   , .eshape1.a = eshape1.a, 
+            .escale    = escale   , .eshape1.a = eshape1.a,
             .lss = lss ))))
 }
 
@@ -5305,34 +5360,34 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
 
   if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, 
+                  integer.valued = TRUE,
                   positive = TRUE) || imethod > 2)
     stop("Bad input for argument 'imethod'")
-  
-  if (length(iscale) && !is.Numeric(iscale, positive = TRUE)) 
+
+  if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
     stop("Bad input for argument 'iscale'")
-  
+
   if (length(ishape2.p) && !is.Numeric(ishape2.p, positive = TRUE))
     stop("Bad input for argument 'ishape2.p'")
-  
-  if (length(probs.y) < 2 || max(probs.y) > 1 || 
+
+  if (length(probs.y) < 2 || max(probs.y) > 1 ||
         !is.Numeric(probs.y, positive = TRUE))
     stop("Bad input for argument 'probs.y'")
 
-  
+
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
   lscale <- attr(escale, "function.name")
-  
+
   lshape2.p <- as.list(substitute(lshape2.p))
   eshape2.p <- link2list(lshape2.p)
   lshape2.p <- attr(eshape2.p, "function.name")
-  
 
-  new("vglmff", 
-  blurb = 
+
+  new("vglmff",
+  blurb =
     c("Inverse Lomax distribution \n\n",
-      "Links:    ", 
+      "Links:    ",
       namesof("scale"    , lscale   , earg = escale),    ", ",
       namesof("shape2.p" , lshape2.p, earg = eshape2.p), "\n",
       "Mean:     does not exist"),
@@ -5350,20 +5405,20 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
          parameters.names = c("scale", "shape2.p"),
          lscale    = .lscale    ,
          escale    = .escale    ,
-         lshape2.p = .lshape2.p ,                         
+         lshape2.p = .lshape2.p ,
          eshape2.p = .eshape2.p )
   }, list( .lscale = lscale      ,
            .escale = escale      ,
-           .lshape2.p = lshape2.p,                        
-           .eshape2.p = eshape2.p,                        
+           .lshape2.p = lshape2.p,
+           .eshape2.p = eshape2.p,
            .zero = zero ))),
-  initialize = eval(substitute(expression({ 
-    temp5 <- w.y.check(w = w, y = y, 
-                       Is.positive.y = TRUE, 
-                       ncol.w.max = Inf, 
-                       ncol.y.max = Inf, 
-                       out.wy = TRUE, 
-                       colsyperw = 1, 
+  initialize = eval(substitute(expression({
+    temp5 <- w.y.check(w = w, y = y,
+                       Is.positive.y = TRUE,
+                       ncol.w.max = Inf,
+                       ncol.y.max = Inf,
+                       out.wy = TRUE,
+                       colsyperw = 1,
                        maximize = TRUE)
     y    <- temp5$y
     w    <- temp5$w
@@ -5382,14 +5437,14 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     if (!length(etastart)) {
       sc.init <-
       pp.init <- matrix(NA_real_, n, NOS)
-          
+
       for (spp. in 1:NOS) {  # For each response 'y_spp.'... do:
         yvec <- y[, spp.]
         wvec <- w[, spp.]
 
         if ( .imethod == 1 ) {
           gscale     <- .gscale
-          gshape2.p  <- .gshape2.p        
+          gshape2.p  <- .gshape2.p
           if (length( .iscale    )) gscale    <- rep_len( .iscale    , NOS)
           if (length( .ishape2.p )) gshape2.p <- rep_len( .ishape2.p , NOS)
 
@@ -5405,7 +5460,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
           sc.init[, spp.] <- try.this["Value1"]
           pp.init[, spp.] <- try.this["Value3"]
        } else {  # .imethod == 2
-          qvec <- .probs.y 
+          qvec <- .probs.y
           ishape2.p <- if (length( .ishape2.p )) .ishape2.p else 1
           xvec <- log( qvec^(-1/ ishape2.p) - 1 )
           fit0 <- lsfit(x = xvec, y = log(quantile(yvec,  qvec)))
@@ -5425,12 +5480,12 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
             .escale    = escale   ,
             .iscale    = iscale   ,
             .gscale    = gscale   ,
-            .lshape2.p = lshape2.p,                        
-            .eshape2.p = eshape2.p,                        
+            .lshape2.p = lshape2.p,
+            .eshape2.p = eshape2.p,
             .ishape2.p = ishape2.p,
             .gshape2.p = gshape2.p,
             .imethod   = imethod   , .probs.y = probs.y
-                       ))), 
+                       ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
 
     M1 <- 2
@@ -5467,9 +5522,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
             .escale    = escale   ,
             .lshape2.p = lshape2.p,
             .eshape2.p = eshape2.p
-                       ))), 
+                       ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, 
+    function(mu, y, w, residuals = FALSE,
              eta, extra = NULL, summation = TRUE) {
       M1  <- 2
       NOS <- ncol(eta)/M1
@@ -5501,7 +5556,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
@@ -5509,7 +5564,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     parg  <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                        .lshape2.p , earg = .eshape2.p )
     aa   <- 1
-    qq   <- 1    
+    qq   <- 1
     rinv.lomax(nsim * length(Scale), scale = Scale, shape2.p = parg)
   }, list(  .lscale    = lscale   ,
             .escale    = escale   ,
@@ -5564,13 +5619,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     temp3a <- digamma(parg)
     temp3b <- digamma(qq)
     temp4 <- log1p(temp2)
-  
+
     dl.dscale <- (aa/Scale) * (-parg + (parg+qq) / (1+1/temp2))
     dl.dp <- aa * temp1 + temp3 - temp3a - temp4
-    
+
     dscale.deta <- dtheta.deta(Scale, .lscale ,    earg = .escale )
     dp.deta     <- dtheta.deta(parg,  .lshape2.p , earg = .eshape2.p )
-    
+
     myderiv <-
       c(w) * cbind(dl.dscale * dscale.deta,
                    dl.dp * dp.deta)
@@ -5625,40 +5680,40 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
   if (length(lss) != 1 && !is.logical(lss))
     stop("Argument 'lss' not specified correctly")
-  
+
   if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, 
+                  integer.valued = TRUE,
                   positive = TRUE) || imethod > 2)
     stop("Bad input for argument 'imethod'")
-  
-  if (length(iscale) && !is.Numeric(iscale, positive = TRUE)) 
+
+  if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
     stop("Bad input for argument 'iscale'")
-  
+
   if (length(ishape1.a) && !is.Numeric(ishape1.a, positive = TRUE))
     stop("Bad input for argument 'ishape1.a'")
-  
-  if (length(probs.y) < 2 || max(probs.y) > 1 || 
+
+  if (length(probs.y) < 2 || max(probs.y) > 1 ||
         !is.Numeric(probs.y, positive = TRUE))
     stop("Bad input for argument 'probs.y'")
 
-  
+
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
   lscale <- attr(escale, "function.name")
-  
+
   lshape1.a <- as.list(substitute(lshape1.a))
   eshape1.a <- link2list(lshape1.a)
   lshape1.a <- attr(eshape1.a, "function.name")
-  
+
 
   new("vglmff",
   blurb =
     c("Paralogistic distribution \n\n",
-      "Links:    ", 
+      "Links:    ",
       ifelse (lss,
               namesof("scale"   , lscale   , earg = escale),
               namesof("shape1.a", lshape1.a, earg = eshape1.a)), ", ",
-      ifelse (lss, 
+      ifelse (lss,
               namesof("shape1.a", lshape1.a, earg = eshape1.a),
               namesof("scale"   , lscale   , earg = escale)), "\n",
       "Mean:     scale * gamma(1 + 1/shape1.a) * ",
@@ -5684,13 +5739,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
            .escale = escale      , .eshape1.a = eshape1.a,
            .lss  = lss ,
            .zero = zero ))),
-  initialize = eval(substitute(expression({ 
-    temp5 <- w.y.check(w = w, y = y, 
-                       Is.positive.y = TRUE, 
-                       ncol.w.max = Inf, 
-                       ncol.y.max = Inf, 
-                       out.wy = TRUE, 
-                       colsyperw = 1, 
+  initialize = eval(substitute(expression({
+    temp5 <- w.y.check(w = w, y = y,
+                       Is.positive.y = TRUE,
+                       ncol.w.max = Inf,
+                       ncol.y.max = Inf,
+                       out.wy = TRUE,
+                       colsyperw = 1,
                        maximize = TRUE)
     y    <- temp5$y
     w    <- temp5$w
@@ -5710,11 +5765,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
           namesof(scaL.names , .lscale    , earg = .escale    , tag = FALSE))
       }
     predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-    
+
     if (!length(etastart)) {
       sc.init <-
       aa.init <- matrix(NA_real_, n, NOS)
-          
+
       for (spp. in 1:NOS) {  # For each response 'y_spp.'... do:
         yvec <- y[, spp.]
         wvec <- w[, spp.]
@@ -5743,7 +5798,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
           sc.init[, spp.] <- try.this["Value1"]
           aa.init[, spp.] <- try.this["Value2"]
        } else {  # .imethod == 2
-          qvec <- .probs.y 
+          qvec <- .probs.y
           ishape3.q <- if (length( .ishape1.a )) .ishape1.a else 1
           xvec <- log( (1-qvec)^(-1/ ishape3.q) - 1 )
           fit0 <- lsfit(x = xvec, y = log(quantile(yvec,  qvec)))
@@ -5771,9 +5826,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
             .escale    = escale   , .eshape1.a = eshape1.a,
             .iscale    = iscale   , .ishape1.a = ishape1.a,
-            .gscale    = gscale   , .gshape1.a = gshape1.a,           
+            .gscale    = gscale   , .gshape1.a = gshape1.a,
             .imethod   = imethod  , .probs.y  = probs.y,
-            .lss = lss ))), 
+            .lss = lss ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     M1 <- 2
     NOS <- ncol(eta)/M1
@@ -5829,9 +5884,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     misc$multipleResponses <- TRUE
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
             .escale    = escale   , .eshape1.a = eshape1.a,
-            .lss = lss ))), 
+            .lss = lss ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, 
+    function(mu, y, w, residuals = FALSE,
              eta, extra = NULL, summation = TRUE) {
       M1  <- 2
       NOS <- ncol(eta)/M1
@@ -5869,7 +5924,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
 
     eta <- predict(object)
@@ -5894,7 +5949,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
   validparams = eval(substitute(function(eta, y, extra = NULL) {
     M1 <- 2
     NOS <- ncol(eta) / M1
-    if ( .lss ) { 
+    if ( .lss ) {
       Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                          .lscale    , earg = .escale   )
       aa    <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
@@ -5904,7 +5959,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                          .lshape1.a , earg = .eshape1.a)
       Scale <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                          .lscale    , earg = .escale )
-    }  
+    }
     parg <- 1
     qq   <- aa
 
@@ -5929,7 +5984,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
   deriv = eval(substitute(expression({
     M1 <- 2
     NOS <- ncol(eta)/M1  # Needed for summary()
-    if ( .lss ) { 
+    if ( .lss ) {
       Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                          .lscale    , earg = .escale   )
       aa    <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
@@ -5939,7 +5994,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                          .lshape1.a , earg = .eshape1.a)
       Scale <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                          .lscale    , earg = .escale )
-    }  
+    }
     parg <- 1
     qq   <- aa
 
@@ -5949,13 +6004,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     temp3a <- digamma(parg)
     temp3b <- digamma(qq)
     temp4 <- log1p(temp2)
-  
+
     dl.dscale <- (aa/Scale) * (-parg + (parg+qq) / (1+1/temp2))
     dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
-    
+
     dscale.deta <- dtheta.deta(Scale, .lscale ,    earg = .escale )
     da.deta     <- dtheta.deta(aa,    .lshape1.a , earg = .eshape1.a )
-    
+
     myderiv <- if ( .lss ) {
       c(w) * cbind(dl.dscale * dscale.deta,
                    dl.da * da.deta)
@@ -5992,7 +6047,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     wz <- arwz2wz(wz, M = M, M1 = M1)
     wz
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
-            .escale    = escale   , .eshape1.a = eshape1.a, 
+            .escale    = escale   , .eshape1.a = eshape1.a,
             .lss = lss ))))
 }
 
@@ -6023,40 +6078,40 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
   if (length(lss) != 1 && !is.logical(lss))
     stop("Argument 'lss' not specified correctly")
-  
+
   if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, 
+                  integer.valued = TRUE,
                   positive = TRUE) || imethod > 2)
     stop("Bad input for argument 'imethod'")
-  
-  if (length(iscale) && !is.Numeric(iscale, positive = TRUE)) 
+
+  if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
     stop("Bad input for argument 'iscale'")
-  
+
   if (length(ishape1.a) && !is.Numeric(ishape1.a, positive = TRUE))
     stop("Bad input for argument 'ishape1.a'")
-  
-  if (length(probs.y) < 2 || max(probs.y) > 1 || 
+
+  if (length(probs.y) < 2 || max(probs.y) > 1 ||
         !is.Numeric(probs.y, positive = TRUE))
     stop("Bad input for argument 'probs.y'")
 
-  
+
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
   lscale <- attr(escale, "function.name")
-  
+
   lshape1.a <- as.list(substitute(lshape1.a))
   eshape1.a <- link2list(lshape1.a)
   lshape1.a <- attr(eshape1.a, "function.name")
-  
 
-  new("vglmff", 
-  blurb = 
+
+  new("vglmff",
+  blurb =
     c("Inverse paralogistic distribution \n\n",
-      "Links:    ", 
+      "Links:    ",
       ifelse (lss,
               namesof("scale"   , lscale   , earg = escale),
               namesof("shape1.a", lshape1.a, earg = eshape1.a)), ", ",
-      ifelse (lss, 
+      ifelse (lss,
               namesof("shape1.a", lshape1.a, earg = eshape1.a),
               namesof("scale"   , lscale   , earg = escale)), "\n",
       "Mean:     scale * gamma(shape1.a + 1/shape1.a) * ",
@@ -6082,13 +6137,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
            .escale = escale      , .eshape1.a = eshape1.a,
            .lss  = lss ,
            .zero = zero ))),
-  initialize = eval(substitute(expression({ 
-    temp5 <- w.y.check(w = w, y = y, 
-                       Is.positive.y = TRUE, 
-                       ncol.w.max = Inf, 
-                       ncol.y.max = Inf, 
-                       out.wy = TRUE, 
-                       colsyperw = 1, 
+  initialize = eval(substitute(expression({
+    temp5 <- w.y.check(w = w, y = y,
+                       Is.positive.y = TRUE,
+                       ncol.w.max = Inf,
+                       ncol.y.max = Inf,
+                       out.wy = TRUE,
+                       colsyperw = 1,
                        maximize = TRUE)
     y    <- temp5$y
     w    <- temp5$w
@@ -6107,11 +6162,11 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
           namesof(scaL.names , .lscale    , earg = .escale    , tag = FALSE))
       }
     predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-    
+
     if (!length(etastart)) {
       sc.init <-
       aa.init <- matrix(NA_real_, n, NOS)
-          
+
       for (spp. in 1:NOS) {  # For each response 'y_spp.'... do:
         yvec <- y[, spp.]
         wvec <- w[, spp.]
@@ -6141,7 +6196,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
           sc.init[, spp.] <- try.this["Value1"]
           aa.init[, spp.] <- try.this["Value2"]
         } else {  # .imethod == 2
-          qvec <- .probs.y 
+          qvec <- .probs.y
           ishape2.p <- if (length( .ishape1.a )) .ishape1.a else 1
           xvec <- log( qvec^(-1/ ishape2.p) - 1 )
           fit0 <- lsfit(x = xvec, y = log(quantile(yvec,  qvec)))
@@ -6171,9 +6226,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
             .escale    = escale   , .eshape1.a = eshape1.a,
             .iscale    = iscale   , .ishape1.a = ishape1.a,
-            .gscale    = gscale   , .gshape1.a = gshape1.a,           
+            .gscale    = gscale   , .gshape1.a = gshape1.a,
             .imethod   = imethod  , .probs.y   = probs.y,
-            .lss = lss ))), 
+            .lss = lss ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     M1 <- 2
     NOS <- ncol(eta)/M1
@@ -6229,9 +6284,9 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     misc$multipleResponses <- TRUE
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
             .escale    = escale   , .eshape1.a = eshape1.a,
-            .lss = lss ))), 
+            .lss = lss ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, 
+    function(mu, y, w, residuals = FALSE,
              eta, extra = NULL, summation = TRUE) {
       M1  <- 2
       NOS <- ncol(eta)/M1
@@ -6269,10 +6324,10 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
-    if ( .lss ) { 
+    if ( .lss ) {
       Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                          .lscale    , earg = .escale   )
       aa    <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
@@ -6282,7 +6337,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                          .lshape1.a , earg = .eshape1.a)
       Scale <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                          .lscale    , earg = .escale )
-    }  
+    }
     parg <- aa
     qq   <- 1
     rinv.paralogistic(nsim * length(Scale), shape1.a = aa, scale = Scale)
@@ -6329,7 +6384,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
   deriv = eval(substitute(expression({
     M1 <- 2
     NOS <- ncol(eta)/M1  # Needed for summary()
-    if ( .lss ) { 
+    if ( .lss ) {
       Scale <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                          .lscale    , earg = .escale   )
       aa    <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
@@ -6339,7 +6394,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
                          .lshape1.a , earg = .eshape1.a)
       Scale <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                          .lscale    , earg = .escale )
-    }  
+    }
     parg <- aa
     qq   <- 1
     temp1 <- log(y/Scale)
@@ -6348,13 +6403,13 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     temp3a <- digamma(parg)
     temp3b <- digamma(qq)
     temp4 <- log1p(temp2)
-  
+
     dl.dscale <- (aa/Scale) * (-parg + (parg+qq) / (1+1/temp2))
     dl.da <- 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
-    
+
     dscale.deta <- dtheta.deta(Scale, .lscale ,    earg = .escale )
     da.deta     <- dtheta.deta(aa,    .lshape1.a , earg = .eshape1.a )
-    
+
     myderiv <- if ( .lss ) {
       c(w) * cbind(dl.dscale * dscale.deta,
                    dl.da * da.deta)
@@ -6391,7 +6446,7 @@ dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
     wz <- arwz2wz(wz, M = M, M1 = M1)
     wz
   }), list( .lscale    = lscale   , .lshape1.a = lshape1.a,
-            .escale    = escale   , .eshape1.a = eshape1.a, 
+            .escale    = escale   , .eshape1.a = eshape1.a,
             .lss = lss ))))
 }
 
diff --git a/R/family.aunivariate.R b/R/family.aunivariate.R
index e33910b..6b39cb3 100644
--- a/R/family.aunivariate.R
+++ b/R/family.aunivariate.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -13,6 +13,240 @@
 
 
 
+
+
+hzeta.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
+}
+
+
+
+ hzeta <- function(lshape = "loglog", ishape = NULL, nsimEIM = 100) {
+
+  stopifnot(ishape > 0)
+  stopifnot(nsimEIM > 10,
+            length(nsimEIM) == 1,
+            nsimEIM == round(nsimEIM))
+
+
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+  new("vglmff",
+  blurb = c("Haight's Zeta distribution f(y) = (2y-1)^(-shape) - ",
+            "(2y+1)^(-shape),\n",
+            "    shape>0, y = 1, 2,....\n\n",
+            "Link:    ",
+            namesof("shape", lshape, earg = eshape), "\n\n",
+            "Mean:     (1-2^(-shape)) * zeta(shape) if shape>1",
+            "\n",
+            "Variance: (1-2^(1-shape)) * zeta(shape-1) - mean^2 if shape>2"),
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         expected = FALSE,
+         multipleResponses = FALSE,
+         parameters.names = c("shape"),
+         lshape = .lshape ,
+         nsimEIM = .nsimEIM )
+  }, list( .nsimEIM = nsimEIM, .lshape = lshape ))),
+
+  initialize = eval(substitute(expression({
+
+    w.y.check(w = w, y = y,
+              Is.integer.y = TRUE,
+              Is.positive.y = TRUE)
+
+
+    predictors.names <-
+      namesof("shape", .lshape , earg = .eshape , tag = FALSE)
+
+    if (!length(etastart)) {
+      a.init <- if (length( .ishape)) .ishape else {
+        if ((meany <- weighted.mean(y, w)) < 1.5) 3.0 else
+        if (meany < 2.5) 1.4 else 1.1
+      }
+      a.init <- rep_len(a.init, n)
+      etastart <- theta2eta(a.init, .lshape , earg = .eshape )
+    }
+  }), list( .lshape = lshape, .eshape = eshape, .ishape = ishape ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    mu <- (1-2^(-shape)) * zeta(shape)
+    mu[shape <= 1] <- Inf
+    mu
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  last = eval(substitute(expression({
+    misc$link <-    c(shape = .lshape)
+
+    misc$earg <- list(shape = .eshape )
+
+    misc$nsimEIM <- .nsimEIM
+
+  }), list( .lshape = lshape, .eshape = eshape, .nsimEIM = nsimEIM ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dhzeta(x = y, shape = shape, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  vfamily = c("hzeta"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lshape = lshape, .eshape = eshape ))),
+
+
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    rhzeta(nsim * length(shape), shape = shape)
+  }, list( .lshape = lshape, .eshape = eshape ))),
+
+
+
+  deriv = eval(substitute(expression({
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+
+    d3 <- deriv3(~ log((2*y-1)^(-shape) - (2*y+1)^(-shape)),
+                 "shape", hessian = FALSE)
+    eval.d3 <- eval(d3)
+
+    dl.dshape <-  attr(eval.d3, "gradient")
+
+    c(w) * dl.dshape * dshape.deta
+  }), list( .lshape = lshape, .eshape = eshape ))),
+  weight = eval(substitute(expression({
+    sd3 <- deriv3(~ log((2*ysim-1)^(-shape) - (2*ysim+1)^(-shape)),
+                  "shape", hessian = FALSE)
+    run.var <- 0
+    for (ii in 1:( .nsimEIM )) {
+      ysim <- rhzeta(n, shape = shape)
+      eval.sd3 <- eval(sd3)
+      dl.dshape <-  attr(eval.d3, "gradient")
+      rm(ysim)
+      temp3 <- dl.dshape
+      run.var <- ((ii-1) * run.var + temp3^2) / ii
+    }
+    wz <- if (intercept.only)
+        matrix(colMeans(cbind(run.var)),
+               n, dimm(M), byrow = TRUE) else cbind(run.var)
+
+    wz <- wz * dshape.deta^2
+    c(w) * wz
+  }), list( .nsimEIM = nsimEIM ))))
+}
+
+
+
+
+dhzeta <- function(x, shape, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  if (!is.Numeric(shape, positive = TRUE))
+    stop("'shape' must be numeric and have positive values")
+
+  nn <- max(length(x), length(shape))
+  if (length(x)     != nn) x     <- rep_len(x,     nn)
+  if (length(shape) != nn) shape <- rep_len(shape, nn)
+
+  ox <- !is.finite(x)
+  zero <- ox | round(x) != x | x < 1
+  ans <- rep_len(0, nn)
+  ans[!zero] <- (2*x[!zero]-1)^(-shape[!zero]) -
+                (2*x[!zero]+1)^(-shape[!zero])
+  if (log.arg) log(ans) else ans
+}
+
+
+
+phzeta <- function(q, shape, log.p = FALSE) {
+
+
+  nn <- max(length(q), length(shape))
+  q     <- rep_len(q,     nn)
+  shape <- rep_len(shape, nn)
+  oq <- !is.finite(q)
+  zero <- oq | q < 1
+  q <- floor(q)
+  ans <- 0 * q
+  ans[!zero] <- 1 - (2*q[!zero]+1)^(-shape[!zero])
+
+  ans[q == -Inf] <- 0  # 20141215 KaiH
+  ans[q ==  Inf] <- 1  # 20141215 KaiH
+
+  ans[shape <= 0] <- NaN
+  if (log.p) log(ans) else ans
+}
+
+
+
+qhzeta <- function(p, shape) {
+
+  if (!is.Numeric(p, positive = TRUE) ||
+      any(p >= 1))
+    stop("argument 'p' must have values inside the interval (0,1)")
+
+  nn <- max(length(p), length(shape))
+  p     <- rep_len(p,     nn)
+  shape <- rep_len(shape, nn)
+  ans <- (((1 - p)^(-1/shape) - 1) / 2)  # p is in (0,1)
+  ans[shape <= 0] <- NaN
+  floor(ans + 1)
+}
+
+
+rhzeta <- function(n, shape) {
+
+
+  use.n <- if ((length.n <- length(n)) > 1) length.n else
+           if (!is.Numeric(n, integer.valued = TRUE,
+                           length.arg = 1, positive = TRUE))
+              stop("bad input for argument 'n'") else n
+
+  shape <- rep_len(shape, use.n)
+  ans <- (runif(use.n)^(-1/shape) - 1) / 2
+  ans[shape <= 0] <- NaN
+  floor(ans + 1)
+}
+
+
+
+
+
+
+
+
+
+
+
+
 dkumar <- function(x, shape1, shape2, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -136,7 +370,7 @@ pkumar <- function(q, shape1, shape2,
  kumar <-
   function(lshape1 = "loge", lshape2 = "loge",
            ishape1 = NULL,   ishape2 = NULL,
-           grid.shape1 = c(0.4, 6.0), tol12 = 1.0e-4, zero = NULL) {
+           gshape1 = exp(2*ppoints(5) - 1), tol12 = 1.0e-4, zero = NULL) {
   lshape1 <- as.list(substitute(lshape1))
   eshape1 <- link2list(lshape1)
   lshape1 <- attr(eshape1, "function.name")
@@ -152,14 +386,15 @@ pkumar <- function(q, shape1, shape2,
 
   if (!is.Numeric(tol12, length.arg = 1, positive = TRUE))
     stop("bad input for argument 'tol12'")
-  if (!is.Numeric(grid.shape1, length.arg = 2, positive = TRUE))
-    stop("bad input for argument 'grid.shape1'")
+  if (!is.Numeric(gshape1, positive = TRUE))
+    stop("bad input for argument 'gshape1'")
 
 
   new("vglmff",
   blurb = c("Kumaraswamy distribution\n\n",
-            "Links:    ", namesof("shape1", lshape1, eshape1, tag = FALSE), ", ",
-                          namesof("shape2", lshape2, eshape2, tag = FALSE), "\n",
+            "Links:    ",
+              namesof("shape1", lshape1, eshape1, tag = FALSE), ", ",
+              namesof("shape2", lshape2, eshape2, tag = FALSE), "\n",
             "Mean:     shape2 * beta(1 + 1 / shape1, shape2)"),
  constraints = eval(substitute(expression({
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
@@ -204,7 +439,7 @@ pkumar <- function(q, shape1, shape2,
         sum(c(w) * dkumar(y, shape1 = shape1, shape2 = shape2, log = TRUE))
       }
 
-      shape1.grid <- seq( .grid.shape1[1], .grid.shape1[2], len = 19)
+      shape1.grid <- .gshape1
       shape1.init <- if (length( .ishape1 )) .ishape1 else
         grid.search(shape1.grid, objfun = kumar.Loglikfun,
                     y = y,  x = x, w = w)
@@ -222,7 +457,7 @@ pkumar <- function(q, shape1, shape2,
   }), list( .lshape1 = lshape1, .lshape2 = lshape2,
             .ishape1 = ishape1, .ishape2 = ishape2,
             .eshape1 = eshape1, .eshape2 = eshape2,
-            .grid.shape1 = grid.shape1 ))),
+            .gshape1 = gshape1 ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     shape1 <- eta2theta(eta[, c(TRUE, FALSE)], .lshape1 , earg = .eshape1 )
     shape2 <- eta2theta(eta[, c(FALSE, TRUE)], .lshape2 , earg = .eshape2 )
@@ -256,6 +491,14 @@ pkumar <- function(q, shape1, shape2,
   }, list( .lshape1 = lshape1, .lshape2 = lshape2,
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
   vfamily = c("kumar"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape1 <- eta2theta(eta[, c(TRUE, FALSE)], .lshape1 , earg = .eshape1 )
+    shape2 <- eta2theta(eta[, c(FALSE, TRUE)], .lshape2 , earg = .eshape2 )
+    okay1 <- all(is.finite(shape1)) && all(0 < shape1) &&
+             all(is.finite(shape2)) && all(0 < shape2)
+    okay1
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2,
+           .eshape1 = eshape1, .eshape2 = eshape2 ))),
   simslot = eval(substitute(
   function(object, nsim) {
     eta <- predict(object)
@@ -455,7 +698,7 @@ riceff.control <- function(save.weights = TRUE, ...) {
     predictors.names <-
       c(namesof("sigma", .lsigma , earg = .esigma , tag = FALSE),
         namesof("vee",   .lvee   , earg = .evee   , tag = FALSE))
-        
+
 
 
 
@@ -523,6 +766,14 @@ riceff.control <- function(save.weights = TRUE, ...) {
   }, list( .lvee = lvee, .lsigma = lsigma,
            .evee = evee, .esigma = esigma ))),
   vfamily = c("riceff"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    sigma <- eta2theta(eta[, 1], link = .lsigma , earg = .esigma )
+    vee   <- eta2theta(eta[, 2], link = .lvee   , earg = .evee )
+    okay1 <- all(is.finite(sigma)) && all(0 < sigma) &&
+             all(is.finite(vee  )) && all(0 < vee  )
+    okay1
+  }, list( .lvee = lvee, .lsigma = lsigma,
+           .evee = evee, .esigma = esigma ))),
 
 
   simslot = eval(substitute(
@@ -530,7 +781,7 @@ riceff.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     sigma <- eta2theta(eta[, 1], link = .lsigma , earg = .esigma )
@@ -683,13 +934,13 @@ skellam.control <- function(save.weights = TRUE, ...) {
   new("vglmff",
   blurb = c("Skellam distribution\n\n",
          "Links:    ",
-         namesof("mu1", lmu1, earg = emu1, tag = FALSE), ", ", 
+         namesof("mu1", lmu1, earg = emu1, tag = FALSE), ", ",
          namesof("mu2", lmu2, earg = emu2, tag = FALSE), "\n",
          "Mean:     mu1-mu2", "\n",
          "Variance: mu1+mu2"),
   constraints = eval(substitute(expression({
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
-                           bool = .parallel , 
+                           bool = .parallel ,
                            constraints = constraints,
                            apply.int = TRUE)
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
@@ -789,6 +1040,15 @@ skellam.control <- function(save.weights = TRUE, ...) {
            .emu1 = emu1, .emu2 = emu2,
            .parallel = parallel ))),
   vfamily = c("skellam"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1 )
+    mu2 <- eta2theta(eta[, 2], link = .lmu2, earg = .emu2 )
+    okay1 <- all(is.finite(mu1)) && all(0 < mu1) &&
+             all(is.finite(mu2)) && all(0 < mu2)
+    okay1
+  }, list( .lmu1 = lmu1, .lmu2 = lmu2,
+           .emu1 = emu1, .emu2 = emu2 ))),
+
 
 
 
@@ -797,7 +1057,7 @@ skellam.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     mu1 <- eta2theta(eta[, 1], link = .lmu1, earg = .emu1 )
@@ -865,66 +1125,113 @@ skellam.control <- function(save.weights = TRUE, ...) {
 
 
 
-dyules <- function(x, rho, log = FALSE) {
+
+dyules <- function(x, shape, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
-
   if ( log.arg ) {
-    ans <- log(rho) + lbeta(abs(x), rho+1)
+    ans <- log(shape) + lbeta(abs(x), shape+1)
     ans[(x != round(x)) | (x < 1)] <- log(0)
   } else {
-    ans <- rho * beta(x, rho+1)
+    ans <- shape * beta(x, shape+1)
     ans[(x != round(x)) | (x < 1)] <- 0
   }
-  ans[!is.finite(rho) | (rho <= 0) | (rho <= 0)] <- NA
+  ans[!is.finite(shape) | (shape <= 0)] <- NaN
   ans
 }
 
 
 
 
-ryules <- function(n, rho) {
-  rgeom(n, prob = exp(-rexp(n, rate = rho))) + 1
+pyules <- function(q, shape, lower.tail = TRUE, log.p = FALSE) {
+
+
+  tq <- trunc(q)
+
+  if (lower.tail) {
+    ans <- 1 - tq * beta(abs(tq), shape+1)
+    ans[q < 1] <- 0
+    ans[is.infinite(q) & 0 < q] <- 1  # 20141215 KaiH
+  } else {
+    ans <-     tq * beta(abs(tq), shape+1)
+    ans[q < 1] <- 1
+    ans[is.infinite(q) & 0 < q] <- 0  # 20160713
+  }
+
+  ans[shape <= 0] <- NaN
+  if (log.p) log(ans) else ans
+  ans
 }
 
 
 
+ qyules <- function(p, shape) {
 
-pyules <- function(q, rho, log.p = FALSE) {
+  LLL <- max(length(p), length(shape))
+  if (length(p)     != LLL) p     <- rep_len(p,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  ans <- rep_len(0, LLL)
 
+  lo <- rep_len(1, LLL)
+  approx.ans <- lo  # True at lhs
+  hi <- 2 * lo + 10
+  dont.iterate <- p == 1 | shape <= 0
+  done <- p <= pyules(hi, shape) | dont.iterate
+  while (!all(done)) {
+    hi.save <- hi[!done]
+    hi[!done] <- 2 * lo[!done] + 10
+    lo[!done] <- hi.save
+    done[!done] <- (p[!done] <= pyules(hi[!done], shape[!done]))
+  }
+
+  foo <- function(q, shape, p)
+    pyules(q, shape) - p
+
+  lhs <- (p <= dyules(1, shape)) | dont.iterate
+
+  approx.ans[!lhs] <- bisection.basic(foo, lo[!lhs], hi[!lhs], tol = 1/16,
+                                      shape = shape[!lhs], p = p[!lhs])
+  faa <- floor(approx.ans)
+  ans <- ifelse(pyules(faa, shape) < p & p <= pyules(faa+1, shape), faa+1, faa)
+
+  ans[p == 1] <- Inf
+  ans[shape <= 0] <- NaN
 
-  tq <- trunc(q)
-  ans <- 1 - tq * beta(abs(tq), rho+1)
-  ans[q < 1] <- 0
-  ans[is.infinite(q) & q > 0] <- 1  # 20141215 KaiH
-  ans[(rho <= 0) | (rho <= 0)] <- NA
-  if (log.p) log(ans) else ans
   ans
+}  # qyules
+
+
+
+ryules <- function(n, shape) {
+
+  rgeom(n, prob = exp(-rexp(n, rate = shape))) + 1
 }
 
 
 
 
+
 yulesimon.control <- function(save.weights = TRUE, ...) {
   list(save.weights = save.weights)
 }
 
 
- yulesimon <- function(link = "loge",
-                       irho = NULL, nsimEIM = 200,
+
+ yulesimon <- function(lshape = "loge",
+                       ishape = NULL, nsimEIM = 200,
                        zero = NULL) {
 
-  if (length(irho) &&
-      !is.Numeric(irho, positive = TRUE))
-    stop("argument 'irho' must be > 0")
+  if (length(ishape) &&
+      !is.Numeric(ishape, positive = TRUE))
+    stop("argument 'ishape' must be > 0")
 
 
 
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
 
 
   if (!is.Numeric(nsimEIM, length.arg = 1,
@@ -935,13 +1242,13 @@ yulesimon.control <- function(save.weights = TRUE, ...) {
 
 
   new("vglmff",
-  blurb = c("Yule-Simon distribution f(y) = rho * beta(y, rho + 1), ",
-            "rho > 0, y = 1, 2,..\n\n",
+  blurb = c("Yule-Simon distribution f(y) = shape * beta(y, shape + 1), ",
+            "shape > 0, y = 1, 2,..\n\n",
             "Link:    ",
-            namesof("rho", link, earg = earg), "\n\n",
-            "Mean:     rho / (rho - 1), provided rho > 1\n",
-            "Variance: rho^2 / ((rho - 1)^2 * (rho - 2)), ",
-            "provided rho > 2"),
+            namesof("shape", lshape, earg = eshape), "\n\n",
+            "Mean:     shape / (shape - 1), provided shape > 1\n",
+            "Variance: shape^2 / ((shape - 1)^2 * (shape - 2)), ",
+            "provided shape > 2"),
   constraints = eval(substitute(expression({
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
                                 predictors.names = predictors.names,
@@ -951,8 +1258,10 @@ yulesimon.control <- function(save.weights = TRUE, ...) {
   infos = eval(substitute(function(...) {
     list(M1 = 1,
          Q1 = 1,
-         nsimEIM = .nsimEIM,
-         parameters.names = c("rho"),
+         expected = TRUE,
+         multipleResponses = TRUE,
+         nsimEIM = .nsimEIM ,
+         parameters.names = c("shape"),
          zero = .zero )
   }, list( .zero = zero,
            .nsimEIM = nsimEIM ))),
@@ -982,59 +1291,63 @@ yulesimon.control <- function(save.weights = TRUE, ...) {
     M <- M1 * ncoly
 
 
-    mynames1  <- param.names("rho", ncoly)
-    predictors.names <- namesof(mynames1, .link , earg = .earg , tag = FALSE)
+    mynames1  <- param.names("shape", ncoly)
+    predictors.names <-
+      namesof(mynames1, .lshape , earg = .eshape , tag = FALSE)
 
     if (!length(etastart)) {
       wmeany <- colSums(y * w) / colSums(w) + 1/8
 
-      rho.init <- wmeany / (wmeany - 1)
-      rho.init <- matrix(if (length( .irho )) .irho else
-                         rho.init, n, M, byrow = TRUE)
-      etastart <- theta2eta(rho.init, .link , earg = .earg )
+      shape.init <- wmeany / (wmeany - 1)
+      shape.init <- matrix(if (length( .ishape )) .ishape else
+                           shape.init, n, M, byrow = TRUE)
+      etastart <- theta2eta(shape.init, .lshape , earg = .eshape )
     }
-  }), list( .link = link, .earg = earg, .irho = irho ))),
+  }), list( .lshape = lshape, .eshape = eshape, .ishape = ishape ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    ans <- rho <- eta2theta(eta, .link , earg = .earg )
-    ans[rho >  1] <- rho / (rho - 1)
-    ans[rho <= 1] <- NA
+    ans <- shape <- eta2theta(eta, .lshape , earg = .eshape )
+    ans[shape >  1] <- shape / (shape - 1)
+    ans[shape <= 1] <- NA
     ans
-  }, list( .link = link, .earg = earg ))),
+  }, list( .lshape = lshape, .eshape = eshape ))),
   last = eval(substitute(expression({
     M1 <- extra$M1
-    misc$link <- c(rep_len( .link , ncoly))
+    misc$link <- c(rep_len( .lshape , ncoly))
     names(misc$link) <- mynames1
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
     for (ii in 1:ncoly) {
-      misc$earg[[ii]] <- .earg
+      misc$earg[[ii]] <- .eshape
     }
 
     misc$M1 <- M1
-    misc$irho <- .irho
-    misc$expected <- TRUE
-    misc$multipleResponses <- TRUE
+    misc$ishape <- .ishape
     misc$nsimEIM <- .nsimEIM
-  }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM,
-            .irho = irho ))),
+  }), list( .lshape = lshape, .eshape = eshape, .nsimEIM = nsimEIM,
+            .ishape = ishape ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    rho <- eta2theta(eta, .link , earg = .earg )
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dyules(x = y, rho = rho, log = TRUE)
+      ll.elts <- c(w) * dyules(x = y, shape = shape, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .link = link, .earg = earg ))),
+  }, list( .lshape = lshape, .eshape = eshape ))),
   vfamily = c("yulesimon"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lshape = lshape, .eshape = eshape ))),
 
 
 
@@ -1045,12 +1358,12 @@ yulesimon.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
-    rho <- eta2theta(eta, .link , earg = .earg )
-    ryules(nsim * length(rho), rho = rho)
-  }, list( .link = link, .earg = earg ))),
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    ryules(nsim * length(shape), shape = shape)
+  }, list( .lshape = lshape, .eshape = eshape ))),
 
 
 
@@ -1060,31 +1373,31 @@ yulesimon.control <- function(save.weights = TRUE, ...) {
 
   deriv = eval(substitute(expression({
     M1 <- 1
-    rho <- eta2theta(eta, .link , earg = .earg )
-    dl.drho <- 1/rho + digamma(1+rho) - digamma(1+rho+y)
-    drho.deta <- dtheta.deta(rho, .link , earg = .earg )
-    c(w) * dl.drho * drho.deta
-  }), list( .link = link, .earg = earg ))),
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    dl.dshape <- 1/shape + digamma(1+shape) - digamma(1+shape+y)
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+    c(w) * dl.dshape * dshape.deta
+  }), list( .lshape = lshape, .eshape = eshape ))),
   weight = eval(substitute(expression({
 
     run.var <- 0
     for (ii in 1:( .nsimEIM )) {
-      ysim <- ryules(n, rho <- rho)
-      dl.drho <- 1/rho + digamma(1+rho) - digamma(1+rho+ysim)
+      ysim <- ryules(n, shape <- shape)
+      dl.dshape <- 1/shape + digamma(1+shape) - digamma(1+shape+ysim)
       rm(ysim)
-      temp3 <- dl.drho
+      temp3 <- dl.dshape
       run.var <- ((ii-1) * run.var + temp3^2) / ii
     }
     wz <- if (intercept.only)
         matrix(colMeans(cbind(run.var)),
                n, M, byrow = TRUE) else cbind(run.var)
 
-    wz <- wz * drho.deta^2
+    wz <- wz * dshape.deta^2
 
 
     c(w) * wz
   }), list( .nsimEIM = nsimEIM ))))
-}
+}  # yule.simon()
 
 
 
@@ -1153,7 +1466,7 @@ plind <- function(q, theta, lower.tail = TRUE, log.p = FALSE) {
 rlind <- function(n, theta) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           length.arg = 1, positive = TRUE)) 
+                           length.arg = 1, positive = TRUE))
              stop("bad input for argument 'n'") else n
 
 
@@ -1183,7 +1496,7 @@ rlind <- function(n, theta) {
 
   new("vglmff",
   blurb = c("Lindley distribution f(y) = ",
-            "theta^2 * (1 + y) * exp(-theta * y) / (1 + theta), ",  
+            "theta^2 * (1 + y) * exp(-theta * y) / (1 + theta), ",
             "theta > 0, y > 0,\n\n",
             "Link:    ",
             namesof("theta", link, earg = earg), "\n\n",
@@ -1280,6 +1593,12 @@ rlind <- function(n, theta) {
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("lindley"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    theta <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(theta)) && all(0 < theta)
+    okay1
+  }, list( .link = link, .earg = earg ))),
+
 
 
 
@@ -1288,7 +1607,7 @@ rlind <- function(n, theta) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     theta <- eta2theta(eta, .link , earg = .earg )
@@ -1339,240 +1658,56 @@ dpoislindley <- function(x, theta, log = FALSE) {
 }
 
 
-if (FALSE)
-rpoislindley <- function(n, theta) {
-}
 
 
-if (FALSE)
-ppoislindley <- function(q, theta) {
-}
 
 
 
-if (FALSE)
-poislindley.control <- function(save.weights = TRUE, ...) {
-  list(save.weights = save.weights)
-}
+dslash <- function(x, mu = 0, sigma = 1, log = FALSE,
+                   smallno = .Machine$double.eps * 1000) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
 
+  if (!is.Numeric(sigma) || any(sigma <= 0))
+    stop("argument 'sigma' must be positive")
+  L <- max(length(x), length(mu), length(sigma))
+  if (length(x)     != L) x     <- rep_len(x,     L)
+  if (length(mu)    != L) mu    <- rep_len(mu,    L)
+  if (length(sigma) != L) sigma <- rep_len(sigma, L)
 
-if (FALSE)
- poissonlindley <-
-  function(link = "loge",
-           itheta = NULL, nsimEIM = 200,
-           zero = NULL) {
+  zedd <- (x-mu)/sigma
+  if (log.arg) {
+    ifelse(abs(zedd) < smallno,
+           -log(2*sigma*sqrt(2*pi)),
+           log1p(-exp(-zedd^2/2)) - log(sqrt(2*pi)*sigma*zedd^2))
+  } else {
+    ifelse(abs(zedd) < smallno,
+           1/(2*sigma*sqrt(2*pi)),
+           -expm1(-zedd^2/2)/(sqrt(2*pi)*sigma*zedd^2))
+  }
+}
 
-  stop("not working since rpoislindley() not written")
 
 
 
-  if (length(itheta) &&
-      !is.Numeric(itheta, positive = TRUE))
-    stop("argument 'itheta' must be > 0")
+pslash <- function(q, mu = 0, sigma = 1, very.negative = -10000,
+                   lower.tail = TRUE, log.p = FALSE) {
+  if (anyNA(q))
+    stop("argument 'q' must have non-missing values")
+  if (!is.Numeric(mu))
+    stop("argument 'mu' must have finite and non-missing values")
+  if (!is.Numeric(sigma, positive = TRUE))
+    stop("argument 'sigma' must have positive finite non-missing values")
+  if (!is.Numeric(very.negative, length.arg = 1) ||
+     (very.negative >= 0))
+    stop("argument 'very.negative' must be quite negative")
 
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
 
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
-
-
-  if (!is.Numeric(nsimEIM, length.arg = 1,
-                  integer.valued = TRUE) ||
-      nsimEIM <= 50)
-    stop("argument 'nsimEIM' should be an integer greater than 50")
-
-
-
-  new("vglmff",
-  blurb = c("Poisson-Lindley distribution f(y) = ",
-            "theta^2 * (theta + 2 + y) / (theta + 1)^(y+3), ",  
-            "theta > 0, y = 0, 1, 2,..\n\n",
-            "Link:    ",
-            namesof("theta", link, earg = earg), "\n\n",
-            "Mean:     (theta + 2) / (theta * (theta + 1)),\n",
-            "Variance: (theta^3 + 4 * theta^2 + 6 * theta + 2) / ",
-                      "(theta * (theta + 1))^2, "
-            ),
-  constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
-                                predictors.names = predictors.names,
-                                M1 = 1)
-  }), list( .zero = zero ))),
-
-  infos = eval(substitute(function(...) {
-    list(M1 = 1,
-         Q1 = 1,
-         nsimEIM = .nsimEIM ,
-         parameters.names = c("theta"),
-         zero = .zero )
-  }, list( .zero = zero,
-           .nsimEIM = nsimEIM ))),
-
-  initialize = eval(substitute(expression({
-
-
-    temp5 <-
-    w.y.check(w = w, y = y,
-              Is.positive.y = TRUE,
-              ncol.w.max = Inf,
-              ncol.y.max = Inf,
-              Is.integer.y = TRUE,
-              out.wy = TRUE,
-              colsyperw = 1,
-              maximize = TRUE)
-    w <- temp5$w
-    y <- temp5$y
-
-
-
-    ncoly <- ncol(y)
-
-    M1 <- 1
-    extra$ncoly <- ncoly
-    extra$M1 <- M1
-    M <- M1 * ncoly
-
-
-    mynames1  <- param.names("theta", ncoly)
-    predictors.names <- namesof(mynames1, .link , earg = .earg , tag = FALSE)
-
-    if (!length(etastart)) {
-      wmeany <- colSums(y * w) / colSums(w) + 1/8
-
-      MOM <- (sqrt((wmeany - 1)^2 + 8 * wmeany) -
-              wmeany + 1) / (2 * wmeany)
-      MOM[MOM < 0.01] <- 0.01
-
-
-      theta.init <- MOM
-      theta.init <- matrix(if (length( .itheta )) .itheta else
-                           theta.init, n, M, byrow = TRUE)
-      etastart <- theta2eta(theta.init, .link , earg = .earg )
-    }
-  }), list( .link = link, .earg = earg, .itheta = itheta ))),
-  linkinv = eval(substitute(function(eta, extra = NULL) {
-    theta <- eta2theta(eta, .link , earg = .earg )
-    (theta + 2) / (theta * (theta + 1))
-  }, list( .link = link, .earg = earg ))),
-  last = eval(substitute(expression({
-    M1 <- extra$M1
-    misc$link <- c(rep_len( .link , ncoly))
-    names(misc$link) <- mynames1
-
-    misc$earg <- vector("list", M)
-    names(misc$earg) <- mynames1
-    for (ii in 1:ncoly) {
-      misc$earg[[ii]] <- .earg
-    }
-
-    misc$M1 <- M1
-    misc$itheta <- .itheta
-    misc$expected <- TRUE
-    misc$multipleResponses <- TRUE
-    misc$nsimEIM <- .nsimEIM
-  }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM,
-            .itheta = itheta ))),
-  loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
-             summation = TRUE) {
-    theta = eta2theta(eta, .link , earg = .earg )
-    if (residuals) {
-      stop("loglikelihood residuals not implemented yet")
-    } else {
-      ll.elts <- c(w) * dpoislindley(x = y, theta = theta, log = TRUE)
-      if (summation) {
-        sum(ll.elts)
-      } else {
-        ll.elts
-      }
-    }
-  }, list( .link = link, .earg = earg ))),
-  vfamily = c("poissonlindley"),
-  deriv = eval(substitute(expression({
-    M1 <- 1
-    theta <- eta2theta(eta, .link , earg = .earg )
-
-    dl.dtheta <- 2 / theta + 1 / (y + 2 + theta) - (y + 3) / (theta + 1)
-
-    DTHETA.DETA <- dtheta.deta(theta, .link , earg = .earg )
-
-    c(w) * dl.dtheta * DTHETA.DETA
-  }), list( .link = link, .earg = earg ))),
-  weight = eval(substitute(expression({
-
-    run.var <- 0
-    for (ii in 1:( .nsimEIM )) {
-      ysim <- rpoislindley(n, theta = theta)
-      dl.dtheta <- 2 / theta + 1 / (ysim + 2 + theta) -
-                   (ysim + 3) / (theta + 1)
-      rm(ysim)
-      temp3 <- dl.dtheta
-      run.var <- ((ii-1) * run.var + temp3^2) / ii
-    }
-    wz <- if (intercept.only)
-        matrix(colMeans(cbind(run.var)),
-               n, M, byrow = TRUE) else cbind(run.var)
-
-    wz <- wz * DTHETA.DETA^2
-
-
-    c(w) * wz
-  }), list( .nsimEIM = nsimEIM ))))
-}
-
-
-
-
-
-
-
-
-dslash <- function(x, mu = 0, sigma = 1, log = FALSE,
-                   smallno = .Machine$double.eps * 1000) {
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
-
-  if (!is.Numeric(sigma) || any(sigma <= 0))
-    stop("argument 'sigma' must be positive")
-  L <- max(length(x), length(mu), length(sigma))
-  if (length(x)     != L) x     <- rep_len(x,     L)
-  if (length(mu)    != L) mu    <- rep_len(mu,    L)
-  if (length(sigma) != L) sigma <- rep_len(sigma, L)
-
-  zedd <- (x-mu)/sigma
-  if (log.arg) {
-    ifelse(abs(zedd) < smallno,
-           -log(2*sigma*sqrt(2*pi)),
-           log1p(-exp(-zedd^2/2)) - log(sqrt(2*pi)*sigma*zedd^2))
-  } else {
-    ifelse(abs(zedd) < smallno,
-           1/(2*sigma*sqrt(2*pi)),
-           -expm1(-zedd^2/2)/(sqrt(2*pi)*sigma*zedd^2))
-  }
-}
-
-
-
-
-pslash <- function(q, mu = 0, sigma = 1, very.negative = -10000,
-                   lower.tail = TRUE, log.p = FALSE) {
-  if (anyNA(q))
-    stop("argument 'q' must have non-missing values")
-  if (!is.Numeric(mu))
-    stop("argument 'mu' must have finite and non-missing values")
-  if (!is.Numeric(sigma, positive = TRUE))
-    stop("argument 'sigma' must have positive finite non-missing values")
-  if (!is.Numeric(very.negative, length.arg = 1) ||
-     (very.negative >= 0))
-    stop("argument 'very.negative' must be quite negative")
-
-  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
-    stop("bad input for argument 'lower.tail'")
-
-  if (!is.logical(log.p) || length(log.p) != 1)
-    stop("bad input for argument 'log.p'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   L <- max(length(q), length(mu), length(sigma))
   if (length(q)     != L) q     <- rep_len(q,     L)
@@ -1630,7 +1765,7 @@ slash.control <- function(save.weights = TRUE, ...) {
 
  slash <- function(lmu = "identitylink", lsigma = "loge",
                    imu = NULL, isigma = NULL,
-                   iprobs = c(0.1, 0.9),
+                   gprobs.y = ppoints(8),
                    nsimEIM = 250, zero = NULL,
                    smallno = .Machine$double.eps * 1000) {
 
@@ -1654,10 +1789,9 @@ slash.control <- function(save.weights = TRUE, ...) {
       nsimEIM <= 50)
     stop("argument 'nsimEIM' should be an integer greater than 50")
 
-  if (!is.Numeric(iprobs, positive = TRUE) ||
-      max(iprobs) >= 1 ||
-      length(iprobs) != 2)
-    stop("bad input for argument 'iprobs'")
+  if (!is.Numeric(gprobs.y, positive = TRUE) ||
+      max(gprobs.y) >= 1)
+    stop("bad input for argument 'gprobs.y'")
   if (!is.Numeric(smallno, positive = TRUE) ||
       smallno > 0.1)
     stop("bad input for argument 'smallno'")
@@ -1723,8 +1857,8 @@ slash.control <- function(save.weights = TRUE, ...) {
                          log1p(-exp(-zedd^2/2)) -
                          log(sqrt(2*pi) * sigma * zedd^2)))
       }
-      iprobs <- .iprobs
-      mu.grid <- quantile(rep(y, w), probs=iprobs)
+      gprobs.y <- .gprobs.y
+      mu.grid <- quantile(rep(y, w), probs = gprobs.y)
       mu.grid <- seq(mu.grid[1], mu.grid[2], length=100)
       mu.init <- if (length( .imu )) .imu else
                  grid.search(mu.grid, objfun = slash.Loglikfun,
@@ -1741,7 +1875,7 @@ slash.control <- function(save.weights = TRUE, ...) {
   }), list( .lmu = lmu, .lsigma = lsigma,
             .imu = imu, .isigma = isigma,
             .emu = emu, .esigma = esigma,
-            .iprobs = iprobs, .smallno = smallno))),
+            .gprobs.y = gprobs.y, .smallno = smallno))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
       NA * eta2theta(eta[, 1], link = .lmu , earg = .emu )
   }, list( .lmu = lmu, .emu = emu ))),
@@ -1775,6 +1909,16 @@ slash.control <- function(save.weights = TRUE, ...) {
   }, list( .lmu = lmu, .lsigma = lsigma,
            .emu = emu, .esigma = esigma, .smallno = smallno ))),
   vfamily = c("slash"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mu    <- eta2theta(eta[, 1], link = .lmu    , earg = .emu    )
+    sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
+
+    okay1 <- all(is.finite(mu))    &&
+             all(is.finite(sigma)) && all(0 < sigma)
+    okay1
+  }, list( .lmu = lmu, .lsigma = lsigma,
+           .emu = emu, .esigma = esigma ))),
+
 
 
 
@@ -1785,7 +1929,7 @@ slash.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     mu    <- eta2theta(eta[, 1], link = .lmu    , earg = .emu )
@@ -1896,6 +2040,15 @@ dnefghs <- function(x, tau, log = FALSE) {
             "Link:    ",
             namesof("tau", link, earg = earg), "\n\n",
             "Mean:     pi / tan(pi * tau)\n"),
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = FALSE,
+         parameters.names = c("tau"),
+         ltau = .link )
+  }, list( .link = link ))),
+
   initialize = eval(substitute(expression({
     temp5 <-
     w.y.check(w = w, y = y,
@@ -1908,7 +2061,7 @@ dnefghs <- function(x, tau, log = FALSE) {
 
 
     predictors.names <-
-      namesof("tau", .link , earg = .earg , tag = FALSE) 
+      namesof("tau", .link , earg = .earg , tag = FALSE)
 
 
     if (!length(etastart)) {
@@ -1930,7 +2083,7 @@ dnefghs <- function(x, tau, log = FALSE) {
     pi / tan(pi * tau)
   }, list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
-    misc$link <-    c(tau = .link)
+    misc$link <-    c(tau = .link )
 
     misc$earg <- list(tau = .earg )
 
@@ -1955,6 +2108,13 @@ dnefghs <- function(x, tau, log = FALSE) {
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("nefghs"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    tau <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(tau)) && all(0 < tau)
+    okay1
+  }, list( .link = link, .earg = earg ))),
+
+
   deriv = eval(substitute(expression({
     tau <- eta2theta(eta, .link , earg = .earg )
     dl.dtau <- pi / tan(pi * tau) - y
@@ -2025,6 +2185,17 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
             namesof("shape1", lshape1, earg = eshape1), ", ",
             namesof("shape2", lshape2, earg = eshape2), "\n\n",
             "Mean:     digamma(shape1) - digamma(shape2)"),
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = FALSE,
+         parameters.names = c("shape1", "shape2"),
+         lshape1 = .lshape1 ,
+         lshape2 = .lshape2 ,
+         imethod = .imethod )
+  }, list( .imethod = imethod, .lshape1 = lshape1, .lshape2 = lshape2 ))),
+
   initialize = eval(substitute(expression({
 
     temp5 <-
@@ -2070,7 +2241,7 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
   }, list( .lshape1 = lshape1, .lshape2 = lshape2,
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
   last = eval(substitute(expression({
-    misc$link <-    c(shape1 = .lshape1 , shape2 = .lshape2)
+    misc$link <-    c(shape1 = .lshape1 , shape2 = .lshape2 )
 
     misc$earg <- list(shape1 = .eshape1 , shape2 = .eshape2 )
 
@@ -2099,6 +2270,15 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
   }, list( .lshape1 = lshape1, .lshape2 = lshape2,
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
   vfamily = c("logF"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
+    okay1 <- all(is.finite(shape1)) && all(0 < shape1) &&
+             all(is.finite(shape2)) && all(0 < shape2)
+    okay1
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2,
+           .eshape1 = eshape1, .eshape2 = eshape2 ))),
+
 
 
 
@@ -2133,10 +2313,10 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
     ned2l.dshape1shape2 <- -tmp888
 
     wz <- matrix(0, n, dimm(M))
-    wz[,iam(1, 1, M = M)] <- ned2l.dshape12 * dshape1.deta^2
-    wz[,iam(2, 2, M = M)] <- ned2l.dshape22 * dshape2.deta^2
-    wz[,iam(1, 2, M = M)] <- ned2l.dshape1shape2 * dshape1.deta *
-                                                   dshape2.deta
+    wz[, iam(1, 1, M = M)] <- ned2l.dshape12 * dshape1.deta^2
+    wz[, iam(2, 2, M = M)] <- ned2l.dshape22 * dshape2.deta^2
+    wz[, iam(1, 2, M = M)] <- ned2l.dshape1shape2 * dshape1.deta * dshape2.deta
+
     c(w) * wz
   }), list( .lshape1 = lshape1, .lshape2 = lshape2,
             .eshape1 = eshape1, .eshape2 = eshape2 ))))
@@ -2183,7 +2363,7 @@ rbenf <- function(n, ndigits = 1) {
   upperlimit <- ifelse(ndigits == 1, 9, 99)
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
-                           length.arg = 1, positive = TRUE)) 
+                           length.arg = 1, positive = TRUE))
              stop("bad input for argument 'n'") else n
   myrunif <- runif(use.n)
 
@@ -2309,7 +2489,7 @@ qbenf <- function(p, ndigits = 1,
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
 
-  if (log.p) { 
+  if (log.p) {
     bad <- ((p > 0) | is.na(p) | is.nan(p))
   } else {
     bad <- ((p < 0) | (p > 1) | is.na(p) | is.nan(p))
@@ -2551,6 +2731,11 @@ qbenf <- function(p, ndigits = 1,
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("truncgeometric"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    prob <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(prob)) && all(0 < prob & prob < 1)
+    okay1
+  }, list( .link = link, .earg = earg ))),
   deriv = eval(substitute(expression({
     prob <- eta2theta(eta, .link , earg = .earg )
     sss <- upper.limit <- extra$upper.limit  # Is a matrix
@@ -2603,9 +2788,17 @@ qbenf <- function(p, ndigits = 1,
   function(A = 0, B = 1,
            lmu = "logit",
            lphi = "loge",
-           imu = NULL, iphi = NULL, imethod = 1, zero = NULL) {
+           imu = NULL, iphi = NULL,  # imethod = 1,
+           gprobs.y = ppoints(8),  # (1:9)/10,
+           gphi  = exp(-3:5)/4,
+           zero = NULL) {
 
 
+
+  if (!is.Numeric(A, length.arg = 1) ||
+      !is.Numeric(B, length.arg = 1) || A >= B)
+    stop("A must be < B, and both must be of length one")
+
   stdbeta <- (A == 0 && B == 1)
 
 
@@ -2620,22 +2813,12 @@ qbenf <- function(p, ndigits = 1,
   lphi <- attr(ephi, "function.name")
 
 
-  if (!is.Numeric(A, length.arg = 1) ||
-      !is.Numeric(B, length.arg = 1) || A >= B)
-    stop("A must be < B, and both must be of length one")
-
-
-
 
   if (length(imu) && (!is.Numeric(imu, positive = TRUE) ||
      any(imu <= A) || any(imu >= B)))
     stop("bad input for argument 'imu'")
   if (length(iphi) && !is.Numeric(iphi, positive = TRUE))
     stop("bad input for argument 'iphi'")
-  if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE) ||
-     imethod > 2)
-    stop("argument 'imethod' must be 1 or 2")
 
 
   new("vglmff",
@@ -2656,31 +2839,87 @@ qbenf <- function(p, ndigits = 1,
   constraints = eval(substitute(expression({
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = FALSE,
+         parameters.names = c("mu", "phi"),
+         A = .A ,
+         B = .B ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .A = A, .B = B ))),
+
   initialize = eval(substitute(expression({
     if (min(y) <= .A || max(y) >= .B)
       stop("data not within (A, B)")
 
 
-    w.y.check(w = w, y = y)
+    temp5 <-
+    w.y.check(w = w, y = y,
+              out.wy = TRUE,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+    extra$A <- .A  # Needed for @validparams
+    extra$B <- .B
 
 
     predictors.names <- c(namesof("mu",  .lmu ,  .emu , short = TRUE),
                           namesof("phi", .lphi , .ephi, short = TRUE))
     if (!length(etastart)) {
-      mu.init <- if (is.Numeric( .imu )) .imu else {
-                   if ( .imethod == 1) weighted.mean(y, w) else
-                                       median(rep(y, w))
-                 }
-      mu1.init <- (mu.init - .A ) / ( .B - .A )  # In (0,1)
-      phi.init <- if (is.Numeric( .iphi )) .iphi else
-         max(0.01, -1 + ( .B - .A )^2 * mu1.init*(1-mu1.init)/var(y))
+      NOS <- 1
+      muu.init <-
+      phi.init <- matrix(NA_real_, n, NOS)
+      gprobs.y <- .gprobs.y
+      gphi <- if (length( .iphi )) .iphi else .gphi
+
+      betaff.Loglikfun <- function(muu, phi, y, x, w, extraargs) {
+        zedd <- (y   - extraargs$A) / ( extraargs$B - extraargs$A)
+        m1u  <- (muu - extraargs$A) / ( extraargs$B - extraargs$A)
+        shape1 <- phi * m1u
+        shape2 <- (1 - m1u) * phi
+        sum(c(w) * (dbeta(x = zedd, shape1, shape2, log = TRUE) -
+                    log(abs( extraargs$B - extraargs$A ))))
+      }
+
+
+      for (jay in 1:NOS) {  # For each response 'y_jay'... do:
+        gmuu <- if (length( .imu )) .imu else quantile(y[, jay], probs = gprobs.y)
+
+
+        try.this <-
+          grid.search2(gmuu, gphi,
+                       objfun = betaff.Loglikfun,
+                       y = y[, jay],
+                       w = w[, jay],
+                       extraargs = list(A = .A , B = .B ),
+                       ret.objfun = TRUE)  # Last value is the loglik
+        muu.init[, jay] <-  try.this["Value1"]
+        phi.init[, jay] <-  try.this["Value2"]
+      }  # for (jay ...)
+
+
+if (FALSE) {
+      mu.init <- if (is.Numeric( .imu )) .imu else {
+                   if ( .imethod == 1) weighted.mean(y, w) else
+                      (y + weighted.mean(y, w)) / 2
+                 }
+      mu1.init <- (mu.init - .A ) / ( .B - .A )  # In (0,1)
+      phi.init <- if (is.Numeric( .iphi )) .iphi else
+         max(0.01, -1 + ( .B - .A )^2 * mu1.init*(1-mu1.init)/var(y))
+  }
+
+
+
       etastart <- matrix(0, n, 2)
-      etastart[, 1] <- theta2eta(mu.init , .lmu  , earg = .emu  )
+      etastart[, 1] <- theta2eta(muu.init, .lmu  , earg = .emu  )
       etastart[, 2] <- theta2eta(phi.init, .lphi , earg = .ephi )
     }
   }), list( .lmu = lmu, .lphi = lphi, .imu = imu, .iphi = iphi,
             .A = A, .B = B, .emu = emu, .ephi = ephi,
-            .imethod = imethod ))),
+            .gprobs.y = gprobs.y, .gphi = gphi  ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
      mu <- eta2theta(eta[, 1], .lmu , .emu )
@@ -2721,7 +2960,14 @@ qbenf <- function(p, ndigits = 1,
            .emu = emu, .ephi = ephi,
            .stdbeta = stdbeta ))),
   vfamily = "betaff",
-
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mu  <- eta2theta(eta[, 1], .lmu  , .emu  )
+    phi <- eta2theta(eta[, 2], .lphi , .ephi )
+    okay1 <- all(is.finite(mu )) && all(extra$A < mu & mu < extra$B) &&
+             all(is.finite(phi)) && all(0 < phi)
+    okay1
+  }, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
+           .emu = emu, .ephi = ephi ))),
 
 
   simslot = eval(substitute(
@@ -2729,7 +2975,7 @@ qbenf <- function(p, ndigits = 1,
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
 
     eta <- predict(object)
@@ -2749,11 +2995,11 @@ qbenf <- function(p, ndigits = 1,
 
 
   deriv = eval(substitute(expression({
-    mu <- eta2theta(eta[, 1], .lmu , .emu )
+    mu  <- eta2theta(eta[, 1], .lmu  , .emu  )
     phi <- eta2theta(eta[, 2], .lphi , .ephi )
     m1u <- if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
     dmu.deta <- dtheta.deta(mu, .lmu , .emu )
-    dmu1.dmu <- 1 / ( .B - .A)
+    dmu1.dmu <- 1 / ( .B - .A )
     dphi.deta <- dtheta.deta(phi, .lphi , .ephi )
     temp1 <- m1u*phi
     temp2 <- (1-m1u)*phi
@@ -2775,14 +3021,14 @@ qbenf <- function(p, ndigits = 1,
             .A = A, .B = B,
             .stdbeta = stdbeta ))),
   weight = eval(substitute(expression({
-    d2l.dmu12 <- (trigamma(temp1) + trigamma(temp2)) * phi^2
-    d2l.dphi2 <- -trigamma(phi) + trigamma(temp1) * m1u^2 +
-                  trigamma(temp2) * (1-m1u)^2
-    d2l.dmu1phi <- temp1 * trigamma(temp1) - temp2 * trigamma(temp2)
+    ned2l.dmu12 <- (trigamma(temp1) + trigamma(temp2)) * phi^2
+    ned2l.dphi2 <- -trigamma(phi) + trigamma(temp1) * m1u^2 +
+                    trigamma(temp2) * (1-m1u)^2
+    ned2l.dmu1phi <- temp1 * trigamma(temp1) - temp2 * trigamma(temp2)
     wz <- matrix(NA_real_, n, dimm(M))
-    wz[, iam(1, 1, M)] <- d2l.dmu12 * dmu1.dmu^2 * dmu.deta^2
-    wz[, iam(2, 2, M)] <- d2l.dphi2 * dphi.deta^2
-    wz[, iam(1, 2, M)] <- d2l.dmu1phi * dmu1.dmu * dmu.deta * dphi.deta
+    wz[, iam(1, 1, M)] <- ned2l.dmu12 * dmu1.dmu^2 * dmu.deta^2
+    wz[, iam(2, 2, M)] <- ned2l.dphi2 * dphi.deta^2
+    wz[, iam(1, 2, M)] <- ned2l.dmu1phi * dmu1.dmu * dmu.deta * dphi.deta
     c(w) * wz
   }), list( .A = A, .B = B ))))
 }
@@ -2864,8 +3110,8 @@ qbenf <- function(p, ndigits = 1,
 
     if (!length(etastart)) {
       mu1d <- mean(y, trim = .trim )
-      uu <- (mu1d - .A) / ( .B - .A) 
-      DD <- ( .B - .A)^2 
+      uu <- (mu1d - .A) / ( .B - .A)
+      DD <- ( .B - .A)^2
       pinit <- max(0.01, uu^2 * (1 - uu) * DD / var(y) - uu)
       qinit <- max(0.01, pinit * (1 - uu) / uu)
       etastart <- matrix(0, n, 2)
@@ -2883,7 +3129,7 @@ qbenf <- function(p, ndigits = 1,
     shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
                     eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
     .A + ( .B - .A ) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
-  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
   last = eval(substitute(expression({
     misc$link <-    c(shape1 = .lshape1 , shape2 = .lshape2 )
@@ -2911,10 +3157,16 @@ qbenf <- function(p, ndigits = 1,
         ll.elts
       }
     }
-  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
   vfamily = "betaR",
-
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
+                    eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
+    okay1 <- all(is.finite(shapes)) && all(0 < shapes)
+    okay1
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+           .eshape1 = eshape1, .eshape2 = eshape2 ))),
 
 
 
@@ -2923,7 +3175,7 @@ qbenf <- function(p, ndigits = 1,
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
 
     eta <- predict(object)
@@ -2932,7 +3184,7 @@ qbenf <- function(p, ndigits = 1,
     .A + ( .B - .A ) *
     rbeta(nsim * length(shapes[, 1]),
           shape1 = shapes[, 1], shape2 = shapes[, 2])
-  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
 
 
@@ -2950,12 +3202,12 @@ qbenf <- function(p, ndigits = 1,
                   digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A )
 
     c(w) * dl.dshapes * dshapes.deta
-  }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
+  }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
             .eshape1 = eshape1, .eshape2 = eshape2 ))),
   weight = expression({
     trig.sum <- trigamma(shapes[, 1] + shapes[, 2])
-    ned2l.dshape12 <- trigamma(shapes[, 1]) - trig.sum 
-    ned2l.dshape22 <- trigamma(shapes[, 2]) - trig.sum 
+    ned2l.dshape12 <- trigamma(shapes[, 1]) - trig.sum
+    ned2l.dshape22 <- trigamma(shapes[, 2]) - trig.sum
     ned2l.dshape1shape2 <- -trig.sum
     wz <- matrix(NA_real_, n, dimm(M))  # dimm(M) == 3
     wz[, iam(1, 1, M)] <- ned2l.dshape12      * dshapes.deta[, 1]^2
@@ -2970,11 +3222,12 @@ qbenf <- function(p, ndigits = 1,
 
 
 
- betaprime <- function(link = "loge", i1 = 2, i2 = NULL, zero = NULL) {
+ betaprime <-
+  function(lshape = "loge", ishape1 = 2, ishape2 = NULL, zero = NULL) {
 
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
 
 
   new("vglmff",
@@ -2982,12 +3235,23 @@ qbenf <- function(p, ndigits = 1,
             "y^(shape1-1) * (1+y)^(-shape1-shape2) / Beta(shape1,shape2),",
             " y>0, shape1>0, shape2>0\n\n",
             "Links:    ",
-            namesof("shape1", link, earg = earg),  ", ",
-            namesof("shape2", link, earg = earg), "\n",
+            namesof("shape1", lshape, earg = eshape),  ", ",
+            namesof("shape2", lshape, earg = eshape), "\n",
             "Mean:     shape1/(shape2-1) provided shape2>1"),
   constraints = eval(substitute(expression({
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = FALSE,
+         parameters.names = c("shape1", "shape2"),
+         lshape1 = .lshape ,
+         lshape2 = .lshape ,
+         zero = .zero )
+  }, list( .zero = zero, .lshape = lshape ))),
+
   initialize = eval(substitute(expression({
 
     w.y.check(w = w, y = y,
@@ -2998,77 +3262,83 @@ qbenf <- function(p, ndigits = 1,
 
 
     predictors.names <-
-      c(namesof("shape1", .link , earg = .earg , short = TRUE),
-        namesof("shape2", .link , earg = .earg , short = TRUE))
-    if (is.numeric( .i1) && is.numeric( .i2)) {
-      vec <- c( .i1, .i2)
-      vec <- c(theta2eta(vec[1], .link , earg = .earg ),
-              theta2eta(vec[2], .link , earg = .earg ))
+      c(namesof("shape1", .lshape , earg = .eshape , short = TRUE),
+        namesof("shape2", .lshape , earg = .eshape , short = TRUE))
+    if (is.numeric( .ishape1) && is.numeric( .ishape2 )) {
+      vec <- c( .ishape1, .ishape2 )
+      vec <- c(theta2eta(vec[1], .lshape , earg = .eshape ),
+               theta2eta(vec[2], .lshape , earg = .eshape ))
       etastart <- matrix(vec, n, 2, byrow = TRUE)
     }
     if (!length(etastart)) {
-      init1 <- if (length( .i1 )) 
-        rep_len( .i1 , n) else rep_len(1, n)
-      init2 <- if (length( .i2 ))
-        rep_len( .i2 , n) else 1 + init1 / (y + 0.1)
+      init1 <- if (length( .ishape1 ))
+        rep_len( .ishape1 , n) else rep_len(1, n)
+      init2 <- if (length( .ishape2 ))
+        rep_len( .ishape2 , n) else 1 + init1 / (y + 0.1)
       etastart <-
-        matrix(theta2eta(c(init1, init2), .link , earg = .earg ),
+        matrix(theta2eta(c(init1, init2), .lshape , earg = .eshape ),
                n, 2, byrow = TRUE)
     }
-  }), list( .link = link, .earg = earg, .i1 = i1, .i2 = i2 ))), 
+  }), list( .lshape = lshape, .eshape = eshape,
+            .ishape1 = ishape1, .ishape2 = ishape2 ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    shapes <- eta2theta(eta, .link , earg = .earg )
+    shapes <- eta2theta(eta, .lshape , earg = .eshape )
     ifelse(shapes[, 2] > 1, shapes[, 1] / (shapes[, 2] - 1), NA)
-  }, list( .link = link, .earg = earg ))),
+  }, list( .lshape = lshape, .eshape = eshape ))),
   last = eval(substitute(expression({
-    misc$link <- c(shape1 = .link , shape2 = .link)
-    misc$earg <- list(shape1 = .earg , shape2 = .earg )
-  }), list( .link = link, .earg = earg ))),
+    misc$link <-    c(shape1 = .lshape , shape2 = .lshape )
+    misc$earg <- list(shape1 = .eshape , shape2 = .eshape )
+  }), list( .lshape = lshape, .eshape = eshape ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    shapes <- eta2theta(eta, .link , earg = .earg )
+    shapes <- eta2theta(eta, .lshape , earg = .eshape )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <-
         c(w) * ((shapes[, 1]-1) * log(y) -
                  lbeta(shapes[, 1], shapes[, 2]) -
-                (shapes[, 2]+shapes[, 1]) * log1p(y))
+                (shapes[, 2] + shapes[, 1]) * log1p(y))
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .link = link, .earg = earg ))),
+  }, list( .lshape = lshape, .eshape = eshape ))),
   vfamily = "betaprime",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shapes <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shapes)) && all(0 < shapes)
+    okay1
+  }, list( .lshape = lshape, .eshape = eshape ))),
   deriv = eval(substitute(expression({
-    shapes <- eta2theta(eta, .link , earg = .earg )
-    dshapes.deta <- dtheta.deta(shapes, .link , earg = .earg )
-    dl.dshapes <- cbind(log(y) - log1p(y) - digamma(shapes[, 1]) + 
-                       digamma(shapes[, 1]+shapes[, 2]),
-                       - log1p(y) - digamma(shapes[, 2]) + 
-                       digamma(shapes[, 1]+shapes[, 2]))
+    shapes <- eta2theta(eta, .lshape , earg = .eshape )
+    dshapes.deta <- dtheta.deta(shapes, .lshape , earg = .eshape )
+    dl.dshapes <- cbind(log(y) - log1p(y) - digamma(shapes[, 1]) +
+                        digamma(shapes[, 1] + shapes[, 2]),
+                        - log1p(y) - digamma(shapes[, 2]) +
+                        digamma(shapes[, 1] + shapes[, 2]))
     c(w) * dl.dshapes * dshapes.deta
-  }), list( .link = link, .earg = earg ))),
+  }), list( .lshape = lshape, .eshape = eshape ))),
   weight = expression({
     temp2 <- trigamma(shapes[, 1] + shapes[, 2])
-    d2l.dshape12 <- temp2 - trigamma(shapes[, 1])
-    d2l.dshape22 <- temp2 - trigamma(shapes[, 2])
-    d2l.dshape1shape2 <- temp2
+    ned2l.dshape12 <- trigamma(shapes[, 1]) - temp2
+    ned2l.dshape22 <- trigamma(shapes[, 2]) - temp2
+    ned2l.dshape1shape2 <- -temp2
 
     wz <- matrix(NA_real_, n, dimm(M))  #3=dimm(M)
-    wz[, iam(1, 1, M)] <- d2l.dshape12 * dshapes.deta[, 1]^2
-    wz[, iam(2, 2, M)] <- d2l.dshape22 * dshapes.deta[, 2]^2
-    wz[, iam(1, 2, M)] <- d2l.dshape1shape2 *
+    wz[, iam(1, 1, M)] <- ned2l.dshape12 * dshapes.deta[, 1]^2
+    wz[, iam(2, 2, M)] <- ned2l.dshape22 * dshapes.deta[, 2]^2
+    wz[, iam(1, 2, M)] <- ned2l.dshape1shape2 *
                           dshapes.deta[, 1] * dshapes.deta[, 2]
 
-    -c(w) * wz
+    c(w) * wz
   }))
-}
+}  # betaprime
 
 
 
@@ -3139,17 +3409,17 @@ qbenf <- function(p, ndigits = 1,
             "0 < pobs0 < 1, 0 < pobs1 < 1 \n\n", sep = ""),
             "Links:    ",
             namesof("shape1", lshape1, earg = eshape1),  ", ",
-            namesof("shape1", lshape1, earg = eshape1),  ", ",
+            namesof("shape2", lshape2, earg = eshape2),  ", ",
             namesof("pobs0",  lprobb0, earg = eprobb0),  ", ",
             namesof("pobs1",  lprobb1, earg = eshape1)),
 
 
 
   constraints = eval(substitute(expression({
- 
+
 
     constraints.orig <- constraints
-   
+
 
     if (is.logical( .parallel.probb ) && .parallel.probb &&
         (cind0[1] + cind1[1] <= 1))
@@ -3167,8 +3437,8 @@ qbenf <- function(p, ndigits = 1,
                      apply.int = TRUE,
                      cm.default           = cmk.S,
                      cm.intercept.default = cmk.S)
-   
-    
+
+
 
     cmk.p <- kronecker(matrix(1, NOS, 1), rbind(0, 0, 1, 1))
     cmk.P <- kronecker(diag(NOS), rbind(0*diag(2), diag(2)))
@@ -3179,7 +3449,7 @@ qbenf <- function(p, ndigits = 1,
                      apply.int = TRUE,
                      cm.default           = cmk.P,
                      cm.intercept.default = cmk.P)
-   
+
     con.use <- con.s
     for (klocal in seq_along(con.s)) {
       con.use[[klocal]] <-
@@ -3195,7 +3465,7 @@ qbenf <- function(p, ndigits = 1,
       con.use[[klocal]] <- (con.use[[klocal]])[, !col.delete]
     }
 
-    
+
     constraints <- con.use
 
 
@@ -3239,14 +3509,15 @@ qbenf <- function(p, ndigits = 1,
     cind0 <- colSums(ind0 <- y == 0) > 0
     cind1 <- colSums(ind1 <- y == 1) > 0
     if (!any(cind0 | cind1))
-      stop("no 0s or 1s in the responses to perform 0- and/or 1-inflation! ",
+      stop("no 0s or 1s in the responses to perform 0- and/or ",
+           "1-inflation! ",
            "Try using betaff() or betaR() instead.")
 
     if (ncoly > 1 && !all(cind0 == cind0[1]) &&  # FALSE &&
                      !all(cind0 == cind0[1]))
       stop("with multiple responses, cannot have 0-inflation in ",
            "some responses and 1-inflation in other responses")
-    M1 <- 2 + cind0[1] + cind1[1]  # 4 when there is both 0 and 1-inflation
+    M1 <- 2 + cind0[1] + cind1[1]  # 4 when there is both 0 & 1-inflation
     M <- M1 * NOS
 
     mynames1 <- param.names("shape1", ncoly)
@@ -3265,6 +3536,7 @@ qbenf <- function(p, ndigits = 1,
           interleave.VGAM(M, M1 = M1)]
 
     extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
     extra$M1          <- M1  # Determined from the data
     extra$cind0       <- cind0
     extra$cind1       <- cind1
@@ -3281,8 +3553,8 @@ qbenf <- function(p, ndigits = 1,
         yy <- yy[ .A < yy & yy < .B ]
         mu1d[, jay] <- weighted.mean(yy, trim = .trim )
       }
-      uu <- (mu1d - .A ) / ( .B - .A ) 
-      DD <- ( .B - .A )^2 
+      uu <- (mu1d - .A ) / ( .B - .A )
+      DD <- ( .B - .A )^2
       p.init <- if (is.Numeric( .ishape1 ))
         matrix( .ishape1 , n, ncoly, byrow = TRUE) else
         uu^2 * (1 - uu) * DD / var(yy) - uu
@@ -3326,7 +3598,8 @@ qbenf <- function(p, ndigits = 1,
                 .lprobb0 , earg = .eprobb0 ) else 0
     probb1 <- if (cind1[1])
       eta2theta(eta[, c(FALSE, FALSE,
-                        if (cind0[1]) FALSE else NULL, TRUE), drop = FALSE],
+                        if (cind0[1]) FALSE else NULL, TRUE),
+                    drop = FALSE],
                 .lprobb1 , earg = .eprobb1 ) else 0
 
     type.fitted <- match.arg(extra$type.fitted,
@@ -3336,27 +3609,14 @@ qbenf <- function(p, ndigits = 1,
       switch(type.fitted,
              "mean"      = (1 - probb0) * shape1 / (shape1 + shape2) +
                                 probb1  * shape2 / (shape1 + shape2),
-             "beta.mean" = shape1/(shape1+shape2),  # zz Mux by (1-pobs0-pobs1)??
+      "beta.mean" = shape1/(shape1+shape2),  # zz Mux by (1-pobs0-pobs1)??
              "pobs0"     = probb0,
              "pobs1"     = probb1)
-
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans)) 
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
-  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
            .eshape1 = eshape1, .eshape2 = eshape2,
            .lprobb0 = lprobb0, .lprobb1 = lprobb1,
-           .eprobb0 = eprobb0, .eprobb1 = eprobb1,
-           .type.fitted = type.fitted ))),
+           .eprobb0 = eprobb0, .eprobb1 = eprobb1 ))),
   last = eval(substitute(expression({
     misc$link <- rep_len( c( .lshape1 , .lshape2 ,
                              if (cind0[1]) .lprobb0 else NULL,
@@ -3409,7 +3669,8 @@ qbenf <- function(p, ndigits = 1,
                 .lprobb0 , earg = .eprobb0 ) else 0
     probb1 <- if (cind1[1])
       eta2theta(eta[, c(FALSE, FALSE,
-                        if (cind0[1]) FALSE else NULL, TRUE), drop = FALSE],
+                        if (cind0[1]) FALSE else NULL, TRUE),
+                    drop = FALSE],
                 .lprobb1 , earg = .eprobb1 ) else 0
 
     if (residuals) {
@@ -3426,7 +3687,7 @@ qbenf <- function(p, ndigits = 1,
         ll.elts
       }
     }
-  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
            .eshape1 = eshape1, .eshape2 = eshape2,
            .lprobb0 = lprobb0, .lprobb1 = lprobb1,
            .eprobb0 = eprobb0, .eprobb1 = eprobb1 ))),
@@ -3462,12 +3723,12 @@ qbenf <- function(p, ndigits = 1,
 
 
 
-    okay1 <- all(is.finite(shape1)) && all(shape1 > 0) &&
-             all(is.finite(shape2)) && all(shape2 > 0) &&
-             all(is.finite(probb0)) && all(probb0 > 0) && all(probb0 < 1) &&
-             all(is.finite(probb1)) && all(probb1 > 0) && all(probb1 < 1)
+    okay1 <- all(is.finite(shape1)) && all(0 < shape1) &&
+             all(is.finite(shape2)) && all(0 < shape2) &&
+             all(is.finite(probb0)) && all(0 < probb0 & probb0 < 1) &&
+             all(is.finite(probb1)) && all(0 < probb1 & probb1 < 1)
     okay1
-  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
            .eshape1 = eshape1, .eshape2 = eshape2,
            .lprobb0 = lprobb0, .lprobb1 = lprobb1,
            .eprobb0 = eprobb0, .eprobb1 = eprobb1 ))),
@@ -3530,7 +3791,7 @@ qbenf <- function(p, ndigits = 1,
             if (cind1[1]) dl.dprobb1 * dprobb1.deta else NULL)
     colnames(myderiv) <- NULL
     myderiv[, interleave.VGAM(M, M1 = M1)]
-  }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
+  }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
             .eshape1 = eshape1, .eshape2 = eshape2,
             .lprobb0 = lprobb0, .lprobb1 = lprobb1,
             .eprobb0 = eprobb0, .eprobb1 = eprobb1 ))),
@@ -3562,7 +3823,7 @@ ned2l.dshape1probb0 <- 0
    if (cind1[1])  c(w) * ned2l.dshape1probb1 * dshape1.deta * dprobb1.deta),
                 dim = c(n, M / M1, M1*(M1+1)/2))
 
-    wz <- arwz2wz(wz, M = M, M1 = M1) # wz is tridiagonal but unexploited here
+    wz <- arwz2wz(wz, M = M, M1 = M1) # tridiagonal but unexploited here
     wz
   }))
 }  # zoabetaR
@@ -3570,3 +3831,2028 @@ ned2l.dshape1probb0 <- 0
 
 
 
+
+
+dtopple <- function(x, shape, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  L <- max(length(x), length(shape))
+  if (length(x)     != L) x     <- rep_len(x,     L)
+  if (length(shape) != L) shape <- rep_len(shape, L)
+
+  logdensity <- rep_len(log(0), L)
+  xok <- (0 <= x) & (x <= 1)
+  logdensity[xok] <-
+    log(2) + log(shape[xok]) + log1p(-x[xok]) +
+    (shape[xok] - 1) * (log(x[xok]) + log(2) + log1p(-x[xok]/2))
+  logdensity[shape >= 1] <- NaN
+  if (log.arg) logdensity else exp(logdensity)
+}
+
+
+
+ptopple <- function(q, shape, lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- shape * (log(q) + log(2) + log1p(-q/2))
+      ans[q <= 0 ] <- -Inf
+      ans[q >= 1] <- 0
+    } else {
+      ans <- (q * (2 - q))^shape
+      ans[q <= 0] <- 0
+      ans[q >= 1] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- log1p(-(q * (2 - q))^shape)
+      ans[q <= 0] <- 0
+      ans[q >= 1] <- -Inf
+    } else {
+      ans <- exp(log1p(-(q * (2 - q))^shape))
+      ans[q <= 0] <- 1
+      ans[q >= 1] <- 0
+    }
+  }
+  ans[shape <= 0] <- NaN
+  ans[shape >= 1] <- NaN
+  ans
+}
+
+
+
+qtopple <- function(p, shape) {
+  ans <- -expm1(0.5 * log1p(-p^(1/shape)))
+  ans[shape <= 0] <- NaN
+  ans[shape >= 1] <- NaN
+  ans
+}
+
+
+
+rtopple <- function(n, shape) {
+  qtopple(runif(n), shape)
+}
+
+
+
+
+
+ topple <- function(lshape = "logit", zero = NULL,
+                    gshape = ppoints(8)) {
+
+
+  lshape <- as.list(substitute(lshape))  # orig
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+
+  new("vglmff",
+  blurb = c("Topp-Leone distribution F(y;shape) = (y * (2 - y))^shape, ",
+            "0 < y < 1, 0 < shape < 1\n",
+            "Link:    ",
+            namesof("shape", lshape, earg = eshape)),
+  constraints = eval(substitute(expression({
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         parameters.names = "shape",
+         zero = .zero )
+  }, list( .zero = zero ))),
+
+
+  initialize = eval(substitute(expression({
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+    if (any(y >= 1))
+      stop("response must be in (0, 1)")
+
+
+    ncoly <- ncol(y)
+    M1 <- 1
+    extra$ncoly <- ncoly
+    extra$M1 <- M1
+    M <- M1 * ncoly
+
+
+    mynames1  <- param.names("shape", ncoly)
+    predictors.names <-
+      namesof(mynames1, .lshape , earg = .eshape , tag = FALSE)
+
+
+    if (!length(etastart)) {
+      shape.init <- matrix(0, nrow(x), ncoly)
+      gshape <- .gshape
+      topple.Loglikfun <- function(shape, y, x = NULL, w, extraargs = NULL) {
+        sum(c(w) * dtopple(x = y, shape = shape, log = TRUE))
+      }
+
+      for (jay in 1:ncoly) {
+        shape.init[, jay] <- grid.search(gshape, objfun = topple.Loglikfun,
+                                         y = y[, jay], w = w[, jay])
+      }
+      etastart <- theta2eta(shape.init, .lshape , earg = .eshape )
+    }
+  }), list( .lshape = lshape, .gshape = gshape,
+            .eshape = eshape ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    1 - (gamma(1 + shape))^2 * 4^shape / gamma(2 * (1 + shape))
+  }, list( .lshape = lshape,
+           .eshape = eshape ))),
+  last = eval(substitute(expression({
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- mynames1
+    for (ilocal in 1:ncoly) {
+      misc$earg[[ilocal]] <- .eshape
+    }
+
+    misc$link <- rep_len( .lshape , ncoly)
+    names(misc$link) <- mynames1
+  }), list( .lshape = lshape, .eshape = eshape ))),
+
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dtopple(x = y, shape = shape, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  vfamily = c("topple"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape & shape < 1)
+    okay1
+  }, list( .lshape = lshape, .eshape = eshape ))),
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    rtopple(nsim * length(shape), shape = c(shape))
+  }, list( .lshape = lshape,
+           .eshape = eshape ))),
+
+
+  deriv = eval(substitute(expression({
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    dl.dshape <- 1 / shape + log(y) + log(2) + log1p(-y/2)
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+    c(w) * dl.dshape * dshape.deta
+  }), list( .lshape = lshape, .eshape = eshape ))),
+  weight = eval(substitute(expression({
+    ned2l.dshape2 <- 1 / shape^2
+    wz <- c(w) * ned2l.dshape2 * dshape.deta^2
+    wz
+  }), list( .lshape = lshape, .eshape = eshape ))))
+}
+
+
+
+
+dzeta <- function(x, shape, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+
+
+  LLL <- max(length(shape), length(x))
+  if (length(x)     != LLL) x     <- rep_len(x,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+
+  ox <- !is.finite(x)
+  zero <- ox | round(x) != x | x < 1
+  ans <- rep_len(if (log.arg) log(0) else 0, LLL)
+  if (any(!zero)) {
+      if (log.arg) {
+          ans[!zero] <- (-shape[!zero]-1)*log(x[!zero]) -
+                        log(zeta(shape[!zero]+1))
+      } else {
+          ans[!zero] <- x[!zero]^(-shape[!zero]-1) / zeta(shape[!zero]+1)
+      }
+  }
+  if (any(ox))
+    ans[ox] <- if (log.arg) log(0) else 0
+  ans[shape <= 0] <- NaN  # Added 20160617
+  ans
+}
+
+
+
+ pzeta <- function(q, shape, lower.tail = TRUE) {
+
+
+  LLL <- max(lenq <- length(q), lens <- length(shape))
+  if (length(q)     != LLL) q     <- rep_len(q,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  ans <- rep_len(0, LLL)
+
+  aa <- 12  # Same as Zeta.aux()
+  qfloor <- floor(q)
+  for (nn in 1:(aa-1))
+    ans <- ans + as.numeric(nn <= qfloor) / nn^(shape+1)
+
+  vecTF <- (aa-1 <= qfloor)
+  if (lower.tail) {
+    if (any(vecTF))
+      ans[vecTF] <- zeta(shape[vecTF]+1) -
+                    Zeta.aux(shape[vecTF]+1, qfloor[vecTF]+1)
+  } else {
+    ans <- zeta(shape+1) - ans
+    if (any(vecTF))
+      ans[vecTF] <- Zeta.aux(shape[vecTF]+1, qfloor[vecTF]+1)
+  }
+  ans / zeta(shape+1)
+}  # pzeta
+
+
+
+
+ qzeta <- function(p, shape) {
+
+  LLL <- max(lenp <- length(p), lens <- length(shape))
+  if (length(p)     != LLL) p     <- rep_len(p,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  ans <- rep_len(0, LLL)
+
+  lo <- rep_len(1, LLL)
+  approx.ans <- lo  # True at lhs
+  hi <- 2 * lo + 10
+  dont.iterate <- p == 1 | shape <= 0
+  done <- p <= pzeta(hi, shape) | dont.iterate
+  while (!all(done)) {
+    hi.save <- hi[!done]
+    hi[!done] <- 2 * lo[!done] + 10
+    lo[!done] <- hi.save
+    done[!done] <- (p[!done] <= pzeta(hi[!done], shape[!done]))
+  }
+
+  foo <- function(q, shape, p)
+    pzeta(q, shape) - p
+
+  lhs <- (p <= dzeta(1, shape)) | dont.iterate
+
+  approx.ans[!lhs] <- bisection.basic(foo, lo[!lhs], hi[!lhs], tol = 1/16,
+                                      shape = shape[!lhs], p = p[!lhs])
+  faa <- floor(approx.ans)
+  ans <- ifelse(pzeta(faa, shape) < p & p <= pzeta(faa+1, shape), faa+1, faa)
+
+  ans[p == 1] <- Inf
+  ans[shape <= 0] <- NaN
+
+  ans
+}  # qzeta
+
+
+
+rzeta <- function(n, shape) {
+  qzeta(runif(n), shape)
+}
+
+
+
+
+
+ zetaff <-
+    function(lshape = "loge",
+             ishape = NULL,
+             gshape = exp(-3:4)/4,
+             zero = NULL) {
+
+
+  if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
+    stop("argument 'ishape' must be > 0")
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+  new("vglmff",
+  blurb = c("Zeta distribution ",
+            "f(y) = 1/(y^(shape+1) zeta(shape+1)), shape>0, y = 1, 2,..\n\n",
+            "Link:    ",
+            namesof("shape", lshape, earg = eshape), "\n\n",
+            "Mean:     zeta(shape) / zeta(shape+1), provided shape>1\n",
+            "Variance: zeta(shape-1) / zeta(shape+1) - mean^2, provided shape>2"),
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         multipleResponses = TRUE,
+         parameters.names = "shape",
+         zero = .zero ,
+         lshape = .lshape )
+  }, list( .lshape = lshape,
+           .zero = zero ))),
+  initialize = eval(substitute(expression({
+
+   temp5 <-
+    w.y.check(w = w, y = y,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              Is.integer.y = TRUE,
+              Is.positive.y = TRUE,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+
+    ncoly <- ncol(y)
+    mynames1 <- param.names("shape", ncoly)
+    predictors.names <-
+      namesof(mynames1, .lshape , earg = .eshape , tag = FALSE)
+
+    M1 <- 1
+    extra$ncoly <- ncoly
+    extra$M1 <- M1
+    M <- M1 * ncoly
+
+
+    if (!length(etastart)) {
+      zetaff.Loglikfun <- function(shape, y, x, w, extraargs) {
+        sum(c(w) * dzeta(x = y, shape, log = TRUE))
+      }
+
+
+      gshape <- .gshape
+      if (!length( .ishape )) {
+        shape.init <- matrix(NA_real_, n, M, byrow = TRUE)
+        for (jay in 1:ncoly) {
+          shape.init[, jay] <- grid.search(gshape, objfun = zetaff.Loglikfun,
+                                           y = y[, jay], x = x, w = w[, jay])
+        }
+      } else {
+        shape.init <- matrix( .ishape , n, M, byrow = TRUE)
+      }
+      etastart <- theta2eta(shape.init, .lshape , earg = .eshape )
+    }
+  }), list( .lshape = lshape, .eshape = eshape,
+            .ishape = ishape, .gshape = gshape ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    ans <- pp <- eta2theta(eta, .lshape , earg = .eshape )
+    ans[pp > 1] <- zeta(pp[pp > 1]) / zeta(pp[pp > 1] + 1)
+    ans[pp <= 1] <- NA
+    ans
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  last = eval(substitute(expression({
+    misc$link <- rep_len( .lshape , ncoly)
+    names(misc$link) <- mynames1
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- mynames1
+    for (jay in 1:ncoly) {
+      misc$earg[[jay]] <- .eshape
+    }
+
+  }), list( .lshape = lshape, .eshape = eshape ))),
+  loglikelihood = eval(substitute( function(mu, y, w, residuals = FALSE,
+             eta, extra = NULL, summation = TRUE) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dzeta(x = y, shape, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  vfamily = c("zetaff"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  deriv = eval(substitute(expression({
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+
+    fred0 <- zeta(shape+1)
+    fred1 <- zeta(shape+1, deriv = 1)
+    dl.dshape <- -log(y) - fred1 / fred0
+
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+
+    c(w) * dl.dshape * dshape.deta
+  }), list( .lshape = lshape, .eshape = eshape ))),
+  weight = expression({
+    NOS <- NCOL(y)
+    nd2l.dshape2 <- zeta(shape + 1, deriv = 2) / fred0 - (fred1/fred0)^2
+    wz <- nd2l.dshape2 * dshape.deta^2
+    w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
+  }))
+}
+
+
+
+gharmonic2 <- function(n, shape = 1) {
+
+
+
+  if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
+    stop("bad input for argument 'n'")
+
+  LLL <- max(length(n), length(shape))
+  if (length(n)     != LLL) n     <- rep_len(n,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+
+  aa <- 12
+  ans <- rep_len(0, LLL)
+  for (ii in 1:aa)
+     ans <- ans + as.numeric(ii <= n) / ii^shape
+
+  vecTF <- (aa < n)
+  if (any(vecTF))
+    ans[vecTF] <- zeta(shape[vecTF]) - Zeta.aux(shape[vecTF], 1 + n[vecTF])
+  ans
+}
+
+
+
+gharmonic <- function(n, shape = 1, deriv = 0) {
+
+
+
+
+
+  if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
+      stop("bad input for argument 'n'")
+  if (!is.Numeric(deriv, length.arg = 1, integer.valued = TRUE) ||
+      deriv < 0)
+    stop("bad input for argument 'deriv'")
+
+  lognexponent <- deriv
+  sign <- ifelse(deriv %% 2 == 0, 1, -1)
+
+  ans <-
+  if (length(n) == 1 && length(shape) == 1) {
+    if (lognexponent != 0) sum(log(1:n)^lognexponent * (1:n)^(-shape)) else
+      sum((1:n)^(-shape))
+  } else {
+    LEN <- max(length(n), length(shape))
+    n <- rep_len(n, LEN)
+    ans <- shape <- rep_len(shape, LEN)
+    if (lognexponent != 0) {
+      for (ii in 1:LEN)
+        ans[ii] <- sum(log(1:n[ii])^lognexponent * (1:n[ii])^(-shape[ii]))
+    } else {
+      for (ii in 1:LEN)
+        ans[ii] <- sum((1:n[ii])^(-shape[ii]))
+    }
+    ans
+  }
+  sign * ans
+}
+
+
+
+
+dzipf <- function(x, N, shape, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+
+  if (!is.Numeric(x))
+    stop("bad input for argument 'x'")
+  if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
+    stop("bad input for argument 'N'")
+  if (!is.Numeric(shape, positive = TRUE))
+    stop("bad input for argument 'shape'")
+  nn <- max(length(x), length(N), length(shape))
+  if (length(x)     != nn) x     <- rep_len(x,     nn)
+  if (length(N)     != nn) N     <- rep_len(N,     nn)
+  if (length(shape) != nn) shape <- rep_len(shape, nn)
+
+  ox <- !is.finite(x)
+  zero <- ox | round(x) != x | x < 1 | x > N
+  ans <- (if (log.arg) log(0) else 0) * x
+  if (any(!zero))
+    if (log.arg) {
+      ans[!zero] <- (-shape[!zero]) * log(x[!zero]) -
+                    log(gharmonic2(N[!zero], shape[!zero]))
+    } else {
+      ans[!zero] <- x[!zero]^(-shape[!zero]) / gharmonic2(N[!zero],
+                                                          shape[!zero])
+    }
+  ans
+}
+
+
+
+
+
+pzipf <- function(q, N, shape, log.p = FALSE) {
+
+  if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
+    stop("bad input for argument 'N'")
+
+  nn <- max(length(q), length(N), length(shape))
+  if (length(q)     != nn) q     <- rep_len(q,     nn)
+  if (length(N)     != nn) N     <- rep_len(N,     nn)
+  if (length(shape) != nn) shape <- rep_len(shape, nn)
+    oq <- !is.finite(q)
+  dont.iterate <- shape <= 0
+    zeroOR1 <- oq | q < 1 | N <= q | dont.iterate
+    floorq <- floor(q)
+    ans <- 0 * floorq
+    ans[oq | q >= N] <- 1
+    if (any(!zeroOR1))
+      ans[!zeroOR1] <- gharmonic2(floorq[!zeroOR1], shape[!zeroOR1]) /
+                       gharmonic2(     N[!zeroOR1], shape[!zeroOR1])
+
+    ans[shape <= 0] <- NaN
+
+    if (log.p) log(ans) else ans
+}
+
+
+
+
+
+
+qzipf <- function(p, N, shape) {
+  if (!is.Numeric(p))
+    stop("bad input for argument 'p'")
+  if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
+    stop("bad input for argument 'N'")
+  if (!is.Numeric(shape, positive = TRUE))
+    stop("bad input for argument 'shape'")
+
+  nn <- max(length(p), length(N), length(shape))
+  if (length(p)     != nn) p     <- rep_len(p,     nn)
+  if (length(N)     != nn) N     <- rep_len(N,     nn)
+  if (length(shape) != nn) shape <- rep_len(shape, nn)
+
+  a <- rep_len(1, nn)
+  b <- rep_len(N, nn)
+  approx.ans <- a  # True at lhs
+
+  foo <- function(q, N, shape, p)
+    pzipf(q, N, shape) - p
+
+  dont.iterate <- p == 1 | shape <= 0
+  lhs <- (p <= dzipf(1, N, shape)) | dont.iterate
+
+  approx.ans[!lhs] <-
+    bisection.basic(foo, a[!lhs], b[!lhs], shape = shape[!lhs], tol = 1/16,
+                    p = p[!lhs], N = N[!lhs])
+  faa <- floor(approx.ans)
+  ans <- ifelse(pzipf(faa, N, shape) < p & p <= pzipf(faa+1, N, shape),
+                faa+1, faa)
+
+  ans[shape <= 0] <- NaN
+  ans[p == 1] <- N
+
+  ans
+}  # qzipf
+
+
+
+rzipf <- function(n, N, shape) {
+  qzipf(runif(n), N, shape)
+}
+
+
+
+
+
+
+
+ zipf <- function(N = NULL, lshape = "loge", ishape = NULL) {
+
+  if (length(N) &&
+    (!is.Numeric(N, positive = TRUE,
+                 integer.valued = TRUE, length.arg = 1) ||
+      N <= 1))
+    stop("bad input for argument 'N'")
+  enteredN <- length(N)
+
+  if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
+      stop("argument 'ishape' must be > 0")
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+  new("vglmff",
+  blurb = c("Zipf distribution f(y;s) = y^(-s) / sum((1:N)^(-s)),",
+            " s > 0, y = 1, 2,...,N",
+            ifelse(enteredN, paste(" = ", N, sep = ""), ""),
+            "\n\n",
+            "Link:    ",
+            namesof("shape", lshape, earg = eshape),
+            "\n\n",
+            "Mean:    gharmonic(N, shape-1) / gharmonic(N, shape)"),
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         multipleResponses = FALSE,
+         parameters.names = "shape",
+         N = enteredN,
+         lshape = .lshape )
+  }, list( .lshape = lshape,
+           .enteredN = enteredN
+         ))),
+  initialize = eval(substitute(expression({
+
+
+    w.y.check(w = w, y = y,
+              Is.integer.y = TRUE)
+
+
+    predictors.names <- namesof("shape", .lshape , earg = .eshape ,
+                                tag = FALSE)
+
+    NN <- .N
+    if (!is.Numeric(NN, length.arg = 1,
+                    positive = TRUE, integer.valued = TRUE))
+        NN <- max(y)
+    if (max(y) > NN)
+      stop("maximum of the response is greater than argument 'N'")
+    if (any(y < 1))
+      stop("all response values must be in 1, 2, 3,...,N( = ", NN,")")
+    extra$N <- NN
+
+    if (!length(etastart)) {
+      llfun <- function(shape, y, N, w) {
+        sum(c(w) * dzipf(x = y, N = extra$N, shape = shape, log = TRUE))
+      }
+      shape.init <- if (length( .ishape )) .ishape else
+        getInitVals(gvals = seq(0.1, 3, length.out = 19),
+                    llfun = llfun,
+                    y = y, N = extra$N, w = w)
+      shape.init <- rep_len(shape.init, length(y))
+      if ( .lshape == "loglog") shape.init[shape.init <= 1] <- 1.2
+      etastart <- theta2eta(shape.init, .lshape , earg = .eshape )
+    }
+  }), list( .lshape = lshape, .eshape = eshape, .ishape = ishape, .N = N ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    gharmonic2(extra$N, shape = shape - 1) / gharmonic2(extra$N, shape = shape)
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  last = eval(substitute(expression({
+    misc$expected <- FALSE
+    misc$link <-    c(shape = .lshape)
+    misc$earg <- list(shape = .eshape )
+    misc$N <- extra$N
+  }), list( .lshape = lshape, .eshape = eshape ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dzipf(x = y, N = extra$N, shape = shape, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  vfamily = c("zipf"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lshape = lshape, .eshape = eshape ))),
+
+
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    extra <- object at extra
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    rzipf(nsim * length(shape), N = extra$N, shape = shape)
+  }, list( .lshape = lshape, .eshape = eshape ))),
+
+
+
+  deriv = eval(substitute(expression({
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    fred1 <- gharmonic(extra$N, shape, deriv = 1)
+
+    fred0 <- gharmonic2(extra$N, shape)
+
+    dl.dshape <- -log(y) - fred1 / fred0
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+    d2shape.deta2 <- d2theta.deta2(shape, .lshape , earg = .eshape )
+    c(w) * dl.dshape * dshape.deta
+  }), list( .lshape = lshape, .eshape = eshape ))),
+  weight = expression({
+    d2l.dshape <- gharmonic(extra$N, shape, deriv = 2) / fred0 -
+                  (fred1/fred0)^2
+    wz <- c(w) * (dshape.deta^2 * d2l.dshape - d2shape.deta2 * dl.dshape)
+    wz
+  }))
+}
+
+
+
+
+
+
+
+deflat.limit.oizeta  <- function(shape) {
+  if (any(shape <= 0))
+    stop("argument 'shape' must be positive")
+  ans <- -dzeta(1, shape) / pzeta(1, shape, lower.tail = FALSE)
+  ans
+}
+
+
+
+doizeta <- function(x, shape, pstr1 = 0, log = FALSE) {
+
+
+
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  LLL <- max(length(x), length(shape), length(pstr1))
+  if (length(x)     != LLL) x     <- rep_len(x,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+
+  ans <- rep(NA_real_, LLL)
+  index1 <- (x == 1)
+  if (log.arg) {
+    ans[ index1] <- log(pstr1[ index1] + (1 - pstr1[ index1]) *
+                        dzeta(x[ index1], shape[ index1]))
+    ans[!index1] <- log1p(-pstr1[!index1]) +
+                         dzeta(x[!index1], shape[!index1], log = TRUE)
+  } else {
+    ans[ index1] <-      pstr1[ index1] + (1 - pstr1[ index1]) *
+                       dzeta(x[ index1], shape[ index1])
+    ans[!index1] <- (1 - pstr1[!index1]) *
+                       dzeta(x[!index1], shape[!index1])
+  }
+
+
+  ans[pstr1 < deflat.limit.oizeta(shape)] <- NaN
+  ans[pstr1 > 1] <- NaN
+
+  ans
+}  # doizeta
+
+
+
+
+poizeta <- function(q, shape, pstr1 = 0) {
+
+  LLL <- max(length(q), length(shape), length(pstr1))
+  if (length(q)     != LLL) q     <- rep_len(q,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+  ans <- rep_len(NA_real_, LLL)
+  deflat.limit <- deflat.limit.oizeta(shape)
+
+  ans <- pzeta(q, shape)  #, lower.tail = lower.tail, log.p = log.p
+  ans <- ifelse(q < 1, 0, pstr1 + (1 - pstr1) * ans)
+
+  ans[pstr1 < deflat.limit] <- NaN
+  ans[1 < pstr1] <- NaN
+  ans[shape <= 0] <- NaN
+
+  ans
+}  # poizeta
+
+
+
+
+
+qoizeta <- function(p, shape, pstr1 = 0) {
+
+  LLL <- max(length(p), length(shape), length(pstr1))
+  if (length(p)     != LLL) p     <- rep_len(p,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+  ans <- rep_len(NA_real_, LLL)
+  deflat.limit <- deflat.limit.oizeta(shape)
+
+  ans[p <= pstr1] <- 1
+  pindex <- (deflat.limit <= pstr1) & (pstr1 < p)
+  ans[pindex] <-
+    qzeta((p[pindex] - pstr1[pindex]) / (1 - pstr1[pindex]),
+          shape = shape[pindex])
+
+  ans[pstr1 < deflat.limit] <- NaN
+  ans[1 < pstr1] <- NaN
+
+  ans[p < 0] <- NaN
+  ans[1 < p] <- NaN
+  ans[shape <= 0] <- NaN
+
+  ans
+}  # qoizeta
+
+
+
+roizeta <- function(n, shape, pstr1 = 0) {
+  qoizeta(runif(n), shape, pstr1 = pstr1)
+}
+
+
+
+
+
+ oizeta <-
+  function(lpstr1 = "logit", lshape = "loge",
+           type.fitted = c("mean", "shape", "pobs1", "pstr1", "onempstr1"),
+           ishape = NULL,
+           gpstr1 = ppoints(8),
+           gshape = exp((-3:3) / 4), # grid for finding shape.init
+           zero = NULL) {
+
+  lpstr1 <- as.list(substitute(lpstr1))
+  epstr1 <- link2list(lpstr1)
+  lpstr1 <- attr(epstr1, "function.name")
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+
+  type.fitted <- match.arg(type.fitted,
+                   c("mean", "shape", "pobs1", "pstr1", "onempstr1"))[1]
+
+
+  if (length(ishape))
+    if (!is.Numeric(ishape, positive = TRUE))
+      stop("argument 'ishape' values must be positive")
+
+
+  new("vglmff",
+  blurb = c("One-inflated zeta regression\n\n",
+            "Links:    ",
+            namesof("pstr1",  lpstr1, earg = epstr1 ), ", ",
+            namesof("shape", lshape, earg = eshape ), "\n",
+            "Mean:     pstr1 + (1 - pstr1) * zeta(shape) / ",
+                       "zeta(1 + shape), if shape > 1"),
+
+  constraints = eval(substitute(expression({
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+                                predictors.names = predictors.names,
+                                M1 = 2)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         parameters.names = c("pstr1", "shape"),
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+  initialize = eval(substitute(expression({
+    M1 <- 2
+
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              Is.nonnegative.y = TRUE,
+              Is.integer.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+
+
+    NOS <- ncoly <- ncol(y)
+    extra$ncoly <- ncoly
+    M <- M1 * ncoly
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+
+    mynames1 <- param.names("pstr1",  ncoly)
+    mynames2 <- param.names("shape", ncoly)
+    predictors.names <-
+        c(namesof(mynames1, .lpstr1 , earg = .epstr1 , tag = FALSE),
+          namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))[
+          interleave.VGAM(M, M1 = M1)]
+
+
+    if (!length(etastart)) {
+
+      shape.init <-
+      pstr1.init <- matrix(NA_real_, n, NOS)
+      gpstr1 <- .gpstr1
+      gshape <- .gshape
+
+      oizeta.Loglikfun <- function(pstr1, shape, y, x, w, extraargs) {
+        sum(c(w) * doizeta(x = y, pstr1 = pstr1,
+                           shape = shape, log = TRUE))
+      }
+
+
+      for (jay in 1:NOS) {  # For each response 'y_jay'... do:
+
+
+        try.this <-
+          grid.search2(gpstr1, gshape,
+                       objfun = oizeta.Loglikfun,
+                       y = y[, jay],  # x = x[TFvec, , drop = FALSE],
+                       w = w[, jay],
+                       ret.objfun = TRUE)  # Last value is the loglik
+        pstr1.init[, jay] <-  try.this["Value1"]
+        shape.init[, jay] <-  try.this["Value2"]
+      }  # for (jay ...)
+
+      etastart <- cbind(theta2eta(pstr1.init, .lpstr1 , earg = .epstr1 ),
+                        theta2eta(shape.init, .lshape , earg = .eshape ))[,
+                        interleave.VGAM(M, M1 = M1)]
+      mustart <- NULL  # Since etastart has been computed.
+    }  # End of !length(etastart)
+  }), list( .lpstr1 = lpstr1, .lshape = lshape,
+            .epstr1 = epstr1, .eshape = eshape,
+                                .ishape = ishape,
+            .gpstr1 = gpstr1,
+            .gshape = gshape,
+            .type.fitted = type.fitted ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                     c("mean", "shape", "pobs1", "pstr1", "onempstr1"))[1]
+
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+
+    Meanfun <- function(shape) {
+      Mean <- shape
+      Mean[shape > 1] <-
+        zeta(shape[shape > 1]) / zeta(1 + shape[shape > 1])
+      Mean[shape <= 1] <- NA
+      Mean
+    }
+
+    ans <-
+      switch(type.fitted,
+             "mean"      = pstr1 + (1 - pstr1) * Meanfun(shape),
+             "shape"     = shape,
+             "pobs1" = doizeta(1, shape = shape, pstr1 = pstr1),  # P(Y=1)
+             "pstr1"     =     pstr1,
+             "onempstr1" = 1 - pstr1)
+
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
+  }, list( .lpstr1 = lpstr1, .lshape = lshape,
+           .epstr1 = epstr1, .eshape = eshape ))),
+  last = eval(substitute(expression({
+    misc$link <-
+      c(rep_len( .lpstr1 , NOS),
+        rep_len( .lshape , NOS))[interleave.VGAM(M, M1 = M1)]
+    temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
+    names(misc$link) <- temp.names
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- temp.names
+    for (ii in 1:ncoly) {
+      misc$earg[[M1*ii-1]] <- .epstr1
+      misc$earg[[M1*ii  ]] <- .eshape
+    }
+  }), list( .lpstr1 = lpstr1, .lshape = lshape,
+            .epstr1 = epstr1, .eshape = eshape ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * doizeta(x = y, pstr1 = pstr1, shape = shape,
+                                log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lpstr1 = lpstr1, .lshape = lshape,
+           .epstr1 = epstr1, .eshape = eshape ))),
+  vfamily = c("oizeta"),
+
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+    roizeta(nsim * length(shape), shape = shape, pstr1 = pstr1)
+  }, list( .lpstr1 = lpstr1, .lshape = lshape,
+           .epstr1 = epstr1, .eshape = eshape ))),
+
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr1 ,
+                        earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lshape ,
+                        earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape) &&
+             all(is.finite(pstr1)) && all(pstr1 < 1)
+    deflat.limit <- deflat.limit.oizeta(shape)
+    okay2.deflat <- TRUE
+    if (okay1 && !(okay2.deflat <- all(deflat.limit < pstr1)))
+      warning("parameter 'pstr1' is too negative even allowing for ",
+              "1-deflation.")
+    okay1 && okay2.deflat
+  }, list( .lpstr1 = lpstr1, .lshape = lshape,
+           .epstr1 = epstr1, .eshape = eshape ))),
+
+
+
+
+
+
+  deriv = eval(substitute(expression({
+    M1 <- 2
+    NOS <- M / M1
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr1 ,
+                       earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lshape ,
+                       earg = .eshape )
+
+    pmf1 <- dzeta(1, shape)
+    onempmf1 <- 1 - pmf1  # dozeta(1, shape = shape, pstr1 = pstr1)
+    pobs1 <- pstr1 + (1 - pstr1) * pmf1
+    index1 <- as.matrix(y == 1)
+
+    zeta0 <- zeta(shape + 1)
+    zeta1 <- zeta(shape + 1, deriv = 1)
+    zeta2 <- zeta(shape + 1, deriv = 2)
+
+    dl.dpstr1 <- onempmf1 / pobs1
+    dl.dpstr1[!index1] <- -1 / (1 - pstr1[!index1])
+
+    dpmf1.dshape <- -zeta1 / zeta0^2
+
+   d2pmf1.dshape2 <- (2 * zeta1^2 / zeta0 - zeta2) / zeta0^2
+
+    dl.dshape <- (1 - pstr1) * dpmf1.dshape / pobs1  #
+    dl.dshape[!index1] <- -log(y[!index1]) - zeta1[!index1] / zeta0[!index1]
+
+    dpstr1.deta <- dtheta.deta(pstr1, .lpstr1 , earg = .epstr1 )
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+
+    myderiv <- c(w) * cbind(dl.dpstr1 * dpstr1.deta,
+                            dl.dshape * dshape.deta)
+    myderiv[, interleave.VGAM(M, M1 = M1)]
+  }), list( .lpstr1 = lpstr1, .lshape = lshape,
+            .epstr1 = epstr1, .eshape = eshape ))),
+  weight = eval(substitute(expression({
+
+    LHS <- ((1 - pstr1) / pobs1) * dpmf1.dshape^2 - d2pmf1.dshape2
+    RHS <- (zeta2 - zeta1^2 / zeta0) / zeta0
+    ned2l.dpstr12 <- onempmf1 / ((1 - pstr1) * pobs1)  #
+    ned2l.dpstr1shape <- dpmf1.dshape / pobs1  #
+    ned2l.dshape2 <- (1 - pstr1) * (LHS + (1 - pmf1) * RHS)
+
+    wz <- array(c(c(w) * ned2l.dpstr12 * dpstr1.deta^2,
+                  c(w) * ned2l.dshape2 * dshape.deta^2,
+                  c(w) * ned2l.dpstr1shape * dpstr1.deta * dshape.deta),
+                dim = c(n, M / M1, 3))
+    wz <- arwz2wz(wz, M = M, M1 = M1)
+    wz
+  }), list( .lshape = lshape, .eshape = eshape ))))
+}  # oizeta
+
+
+
+
+
+
+
+
+deflat.limit.oizipf  <- function(N, shape) {
+  if (any(shape <= 0))
+    stop("argument 'shape' must be positive")
+  ans <- 1 / (1 - 1 / dzipf(1, N, shape))
+  ans
+}
+
+
+
+
+
+doizipf <- function(x, N, shape, pstr1 = 0, log = FALSE) {
+
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+
+  if (!is.Numeric(x))
+    stop("bad input for argument 'x'")
+  if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
+    stop("bad input for argument 'N'")
+  if (!is.Numeric(shape, positive = TRUE))
+    stop("bad input for argument 'shape'")
+  nn <- max(length(x), length(N), length(shape), length(pstr1))
+  if (length(x)    != nn) x     <- rep_len(x,     nn)
+  if (length(N)    != nn) N     <- rep_len(N,     nn)
+  if (length(shape)!= nn) shape <- rep_len(shape, nn)
+  if (length(pstr1)!= nn) pstr1 <- rep_len(pstr1, nn)
+
+  ans <- rep(NA_real_, nn)
+  index1 <- (x == 1)
+  if (log.arg) {
+    ans[ index1] <- log(pstr1[ index1] + (1 - pstr1[ index1]) *
+                      dzipf(x[ index1], N[ index1], shape[ index1]))
+    ans[!index1] <- log1p(-pstr1[!index1]) +
+                      dzipf(x[!index1], N[!index1], shape[!index1], log = TRUE)
+  } else {
+    ans[ index1] <-      pstr1[ index1] + (1 - pstr1[ index1]) *
+                       dzipf(x[ index1], N[ index1], shape[ index1])
+    ans[!index1] <- (1 - pstr1[!index1]) *
+                       dzipf(x[!index1], N[!index1], shape[!index1])
+  }
+
+
+  deflat.limit <- deflat.limit.oizipf(N, shape)
+  ans[pstr1 < deflat.limit] <- NaN
+  ans[pstr1 > 1] <- NaN
+
+  ans
+}
+
+
+
+
+
+poizipf <- function(q, N, shape, pstr1 = 0) {
+
+  LLL <- max(length(q), length(N), length(shape), length(pstr1))
+  if (length(q)     != LLL) q     <- rep_len(q,     LLL)
+  if (length(N)     != LLL) N     <- rep_len(N,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+  ans <- rep_len(NA_real_, LLL)
+  deflat.limit <- deflat.limit.oizipf(N, shape)
+
+  ans <- pzipf(q, N, shape)  #, lower.tail = lower.tail, log.p = log.p
+  ans <- ifelse(q < 1, 0, pstr1 + (1 - pstr1) * ans)
+
+  ans[pstr1 < deflat.limit] <- NaN
+  ans[1 < pstr1] <- NaN
+  ans[s <= 0] <- NaN
+
+  ans
+}
+
+
+
+
+
+
+qoizipf <- function(p, N, shape, pstr1 = 0) {
+
+  if (!is.Numeric(p))
+    stop("bad input for argument 'p'")
+  if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
+    stop("bad input for argument 'N'")
+  if (!is.Numeric(shape, positive = TRUE))
+    stop("bad input for argument 'shape'")
+
+  nn <- max(length(p), length(N), length(s), length(pstr1))
+  if (length(p)     != nn) p     <- rep_len(p,     nn)
+  if (length(N)     != nn) N     <- rep_len(N,     nn)
+  if (length(shape) != nn) shape <- rep_len(shape, nn)
+  if (length(pstr1) != nn) pstr1 <- rep_len(pstr1, nn)
+
+
+  ans    <- rep_len(NA_real_, nn)
+  deflat.limit <- deflat.limit.oizipf(N, shape)
+
+  dont.iterate <- 1 < p
+  ans[p <= pstr1] <- 1
+  pindex <- (pstr1 < p) & (deflat.limit <= pstr1) & !dont.iterate
+  if (any(pindex))
+  ans[pindex] <-
+    qzipf((p[pindex] - pstr1[pindex]) / (1 - pstr1[pindex]),
+          N = N[pindex], shape = shape[pindex])
+
+  ans[pstr1 < deflat.limit] <- NaN
+  ans[1 < pstr1] <- NaN
+
+  ans[shape < 0] <- NaN
+  ans[p < 0] <- NaN
+  ans[1 < p] <- NaN
+
+  ans
+}
+
+
+
+roizipf <- function(n, N, shape, pstr1 = 0) {
+  qoizipf(runif(n), N, shape, pstr1 = pstr1)
+}
+
+
+
+
+
+ oizipf <-
+  function(N = NULL, lpstr1 = "logit", lshape = "loge",
+           type.fitted = c("mean", "shape", "pobs1", "pstr1", "onempstr1"),
+           ishape = NULL,
+           gpstr1 = ppoints(8),
+           gshape = exp((-3:3) / 4), # grid for finding shape.init
+           zero = NULL) {
+
+  if (length(N) &&
+     (!is.Numeric(N, positive = TRUE,
+                 integer.valued = TRUE, length.arg = 1) ||
+      N <= 1))
+    stop("bad input for argument 'N'")
+  enteredN <- length(N)
+
+  lpstr1 <- as.list(substitute(lpstr1))
+  epstr1 <- link2list(lpstr1)
+  lpstr1 <- attr(epstr1, "function.name")
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+
+  type.fitted <- match.arg(type.fitted,
+                   c("mean", "shape", "pobs1", "pstr1", "onempstr1"))[1]
+
+
+  if (length(ishape))
+    if (!is.Numeric(ishape, positive = TRUE))
+      stop("argument 'ishape' values must be positive")
+
+
+  new("vglmff",
+  blurb = c("One-inflated Zipf distribution f(y; pstr1, shape) = pstr1 + ",
+            "(1 - pstr1) * y^(-shape) / sum((1:N)^(-shape)),",
+            " 0 < shape, y = 1, 2,...,N",
+            ifelse(enteredN, paste(" = ", N, sep = ""), ""),
+            "\n\n",
+            "Links:    ",
+            namesof("pstr1", lpstr1, earg = epstr1 ), ", ",
+            namesof("shape", lshape, earg = eshape ), "\n",
+            "Mean:     pstr1 + (1 - pstr1) * ",
+            "gharmonic(N, shape-1) / gharmonic(N, shape)"),
+
+  constraints = eval(substitute(expression({
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+                                predictors.names = predictors.names,
+                                M1 = 2)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         parameters.names = c("pstr1", "shape"),
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+  initialize = eval(substitute(expression({
+    M1 <- 2
+
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              Is.integer.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+
+
+    NOS <- ncoly <- ncol(y)
+    extra$ncoly <- ncoly
+    M <- M1 * ncoly
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+
+
+
+    NN <- .N
+    if (!is.Numeric(NN, length.arg = 1,
+                    positive = TRUE, integer.valued = TRUE))
+      NN <- max(y)
+    if (max(y) > NN)
+      stop("maximum of the response is greater than argument 'N'")
+    extra$N <- NN
+
+
+
+    mynames1 <- param.names("pstr1", ncoly)
+    mynames2 <- param.names("shape", ncoly)
+    predictors.names <-
+        c(namesof(mynames1, .lpstr1 , earg = .epstr1 , tag = FALSE),
+          namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))[
+          interleave.VGAM(M, M1 = M1)]
+
+
+    if (!length(etastart)) {
+
+      shape.init <-
+      pstr1.init <- matrix(NA_real_, n, NOS)
+      gpstr1 <- .gpstr1
+      gshape <- .gshape
+
+      oizipf.Loglikfun <- function(pstr1, shape, y, x, w, extraargs) {
+        sum(c(w) * doizipf(x = y, pstr1 = pstr1, N = extraargs$N,
+                           s = shape, log = TRUE))
+      }
+
+
+      for (jay in 1:NOS) {  # For each response 'y_jay'... do:
+
+
+        try.this <-
+          grid.search2(gpstr1, gshape,
+                       objfun = oizipf.Loglikfun,
+                       y = y[, jay],  # x = x[TFvec, , drop = FALSE],
+                       w = w[, jay],
+                       extraargs = list(N = extra$N),
+                       ret.objfun = TRUE)  # Last value is the loglik
+        pstr1.init[, jay] <-  try.this["Value1"]
+        shape.init[, jay] <-  try.this["Value2"]
+      }  # for (jay ...)
+
+      etastart <- cbind(theta2eta(pstr1.init, .lpstr1 , earg = .epstr1 ),
+                        theta2eta(shape.init, .lshape , earg = .eshape ))[,
+                        interleave.VGAM(M, M1 = M1)]
+      mustart <- NULL  # Since etastart has been computed.
+    }  # End of !length(etastart)
+  }), list( .lpstr1 = lpstr1, .lshape = lshape,
+            .epstr1 = epstr1, .eshape = eshape,
+                              .ishape = ishape,
+            .gpstr1 = gpstr1,
+            .gshape = gshape,
+            .type.fitted = type.fitted,
+            .N = N ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                     c("mean", "shape", "pobs1", "pstr1", "onempstr1"))[1]
+
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+
+    Meanfun <- function(shape, extra) {
+      Mean <- shape
+      Mean <- ( gharmonic2(extra$N, shape = shape - 1)
+              / gharmonic2(extra$N, shape = shape))
+      Mean[shape <= 0] <- NaN
+      Mean
+    }
+
+    ans <-
+      switch(type.fitted,
+             "mean"      = pstr1 + (1 - pstr1) * Meanfun(shape, extra),
+             "shape"     = shape,
+             "pobs1"     = doizipf(1, N = extra$N, s = shape, pstr1 = pstr1),
+             "pstr1"     =     pstr1,
+             "onempstr1" = 1 - pstr1)
+
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
+  }, list( .lpstr1 = lpstr1, .lshape = lshape,
+           .epstr1 = epstr1, .eshape = eshape ))),
+  last = eval(substitute(expression({
+    misc$link <-
+      c(rep_len( .lpstr1 , NOS),
+        rep_len( .lshape , NOS))[interleave.VGAM(M, M1 = M1)]
+    temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
+    names(misc$link) <- temp.names
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- temp.names
+    for (ii in 1:ncoly) {
+      misc$earg[[M1*ii-1]] <- .epstr1
+      misc$earg[[M1*ii  ]] <- .eshape
+    }
+  }), list( .lpstr1 = lpstr1, .lshape = lshape,
+            .epstr1 = epstr1, .eshape = eshape ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+             summation = TRUE) {
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * doizipf(x = y, pstr1 = pstr1, s = shape,
+                                N = extra$N, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lpstr1 = lpstr1, .lshape = lshape,
+           .epstr1 = epstr1, .eshape = eshape ))),
+  vfamily = c("oizipf"),
+
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+    roizipf(nsim * length(shape), s = shape, pstr1 = pstr1,
+            N = object at extra$N)
+  }, list( .lpstr1 = lpstr1, .lshape = lshape,
+           .epstr1 = epstr1, .eshape = eshape ))),
+
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr1 ,
+                       earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lshape ,
+                       earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape) &&
+             all(is.finite(pstr1)) && all(pstr1 < 1)
+    deflat.limit <- deflat.limit.oizipf(N = extra$N, s = shape)
+    okay2.deflat <- TRUE
+    if (okay1 && !(okay2.deflat <- all(deflat.limit < pstr1)))
+      warning("parameter 'pstr1' is too negative even allowing for ",
+              "1-deflation.")
+    okay1 && okay2.deflat
+  }, list( .lpstr1 = lpstr1, .lshape = lshape,
+           .epstr1 = epstr1, .eshape = eshape ))),
+
+
+
+
+
+
+  deriv = eval(substitute(expression({
+    M1 <- 2
+    NOS <- M / M1
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr1 ,
+                       earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lshape ,
+                       earg = .eshape )
+
+    pmf1 <- dzipf(1, N = extra$N, shape = shape)
+    onempmf1 <- 1 - pmf1  # dozeta(1, shape = shape, pstr1 = pstr1)
+    pobs1 <- pstr1 + (1 - pstr1) * pmf1
+    index1 <- as.matrix(y == 1)
+
+    ghar0 <-  gharmonic2(extra$N, shape)
+    ghar1 <-   gharmonic(extra$N, shape, deriv = 1)
+    ghar2 <-   gharmonic(extra$N, shape, deriv = 2)
+
+    dl.dpstr1 <- onempmf1 / pobs1
+    dl.dpstr1[!index1] <- -1 / (1 - pstr1[!index1])
+
+    dpmf1.dshape <- -ghar1 / ghar0^2
+
+    d2pmf1.dshape2 <- (2 * ghar1^2 / ghar0 - ghar2) / ghar0^2
+
+    dl.dshape <- (1 - pstr1) * dpmf1.dshape / pobs1  #
+    dl.dshape[!index1] <- -log(y[!index1]) - ghar1[!index1] / ghar0[!index1]
+
+    dpstr1.deta <- dtheta.deta(pstr1, .lpstr1 , earg = .epstr1 )
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+
+    myderiv <- c(w) * cbind(dl.dpstr1 * dpstr1.deta,
+                            dl.dshape * dshape.deta)
+    myderiv[, interleave.VGAM(M, M1 = M1)]
+  }), list( .lpstr1 = lpstr1, .lshape = lshape,
+            .epstr1 = epstr1, .eshape = eshape ))),
+  weight = eval(substitute(expression({
+
+    LHS <- ((1 - pstr1) / pobs1) * dpmf1.dshape^2 - d2pmf1.dshape2
+    RHS <- (ghar2 - ghar1^2 / ghar0) / ghar0
+    ned2l.dpstr12 <- onempmf1 / ((1 - pstr1) * pobs1)  #
+    ned2l.dpstr1shape <- dpmf1.dshape / pobs1  #
+    ned2l.dshape2 <- (1 - pstr1) * (LHS + (1 - pmf1) * RHS)
+
+    wz <- array(c(c(w) * ned2l.dpstr12 * dpstr1.deta^2,
+                  c(w) * ned2l.dshape2 * dshape.deta^2,
+                  c(w) * ned2l.dpstr1shape * dpstr1.deta * dshape.deta),
+                dim = c(n, M / M1, 3))
+    wz <- arwz2wz(wz, M = M, M1 = M1)
+    wz
+  }), list( .lshape = lshape, .eshape = eshape ))))
+}  # oizipf
+
+
+
+
+
+dotzeta <- function(x, shape, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  if (log.arg) {
+    ans <- dzeta(x, shape, log = log.arg) - log1p(-dzeta(1, shape))
+    ans[x == 1] <- log(0)
+  } else {
+    ans <- dzeta(x, shape) / (1 - dzeta(1, shape))
+    ans[x == 1] <- 0
+  }
+  ans[shape < 0] <- NaN
+  ans
+}  # dotzeta
+
+
+
+potzeta <- function(q, shape, log.p = FALSE) {
+  if (log.p) log(pzeta(q, shape) - dzeta(1, shape)) -
+      log1p(-dzeta(1, shape)) else
+    (pzeta(q, shape) - dzeta(1, shape)) / (1 - dzeta(1, shape))
+}
+
+
+
+ qotzeta <- function(p, shape) {
+  ans <- qzeta((1 - dzeta(1, shape)) * p + dzeta(1, shape), shape = shape)
+
+  ans[p == 1] <- Inf
+  ans[p < 0 | 1 < p] <- NaN
+
+  ans[shape < 0] <- NaN
+  ans
+}  # qotzeta
+
+
+
+rotzeta <- function(n, shape) {
+  qotzeta(runif(n), shape)
+}
+
+
+
+
+
+ otzeta <-
+    function(lshape = "loge",
+             ishape = NULL,
+             gshape = exp((-4:3)/4),
+             zero = NULL) {
+
+  if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
+    stop("argument 'ishape' must be > 0")
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+  new("vglmff",
+  blurb = c("One-truncated Zeta distribution ",
+            "f(y) = 1/(y^(shape+1) * (zeta(shape+1) - 1 - 1/2^(shape+1)))",
+            " 0<shape, y = 2, 3,...\n\n",
+            "Link:    ",
+            namesof("shape", lshape, earg = eshape)),
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         expected = FALSE,  # NR == FS
+         multipleResponses = TRUE,
+         parameters.names = "shape",
+         zero = .zero ,
+         lshape = .lshape )
+  }, list( .lshape = lshape,
+           .zero = zero ))),
+  initialize = eval(substitute(expression({
+
+   temp5 <-
+    w.y.check(w = w, y = y,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              Is.integer.y = TRUE,
+              Is.positive.y = TRUE,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+    if (any(y <= 1))
+      stop("no 1s in the response allowed!")
+
+
+    ncoly <- ncol(y)
+    mynames1 <- param.names("shape", ncoly)
+    predictors.names <-
+      namesof(mynames1, .lshape , earg = .eshape , tag = FALSE)
+
+    M1 <- 1
+    M <- M1 * ncoly
+
+
+    if (!length(etastart)) {
+      otzetaff.Loglikfun <- function(shape, y, x, w, extraargs) {
+        sum(c(w) * dotzeta(x = y, shape, log = TRUE))
+      }
+
+
+      gshape <- .gshape
+      if (!length( .ishape )) {
+        shape.init <- matrix(NA_real_, n, M, byrow = TRUE)
+        for (jay in 1:ncoly) {
+          shape.init[, jay] <- grid.search(gshape, objfun = otzetaff.Loglikfun,
+                                           y = y[, jay], x = x, w = w[, jay])
+        }
+      } else {
+        shape.init <- matrix( .ishape , n, M, byrow = TRUE)
+      }
+      etastart <- theta2eta(shape.init, .lshape , earg = .eshape )
+    }
+  }), list( .lshape = lshape, .eshape = eshape,
+            .ishape = ishape, .gshape = gshape ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    ans <- pp <- eta2theta(eta, .lshape , earg = .eshape )
+    ans[pp > 1] <- zeta(pp[pp > 1]) / zeta(pp[pp > 1] + 1)
+    ans[pp <= 1] <- NA
+    pmf.1 <- dzeta(1, pp)
+    (ans - pmf.1) / (1 - pmf.1)
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  last = eval(substitute(expression({
+    misc$link <- rep_len( .lshape , ncoly)
+    names(misc$link) <- mynames1
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- mynames1
+    for (jay in 1:ncoly) {
+      misc$earg[[jay]] <- .eshape
+    }
+
+  }), list( .lshape = lshape, .eshape = eshape ))),
+  loglikelihood = eval(substitute( function(mu, y, w, residuals = FALSE,
+             eta, extra = NULL, summation = TRUE) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dotzeta(x = y, shape, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  vfamily = c("otzeta"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  deriv = eval(substitute(expression({
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    BBBB  <- zeta(shape + 1) - 1
+    fred1 <- zeta(shape + 1, deriv = 1)
+    dl.dshape <- -log(y) - fred1 / BBBB
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+    c(w) * dl.dshape * dshape.deta
+  }), list( .lshape = lshape, .eshape = eshape ))),
+  weight = expression({
+    ned2l.dshape2 <- (zeta(shape + 1, deriv = 2) - fred1^2 / BBBB) / BBBB
+    wz <- ned2l.dshape2 * dshape.deta^2
+    c(w) * wz
+  }))
+}
+
+
+
+
+
+
+
+ddiffzeta <- function(x, shape, start = 1, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  LLL <- max(length(shape), length(x), length(start))
+  if (length(x)     != LLL) x     <- rep_len(x,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(start) != LLL) start <- rep_len(start, LLL)
+
+  ox <- !is.finite(x)
+  zero <- ox | round(x) != x | x < start
+  ans <- rep_len(if (log.arg) log(0) else 0, LLL)
+  if (any(!zero)) {
+    ans[!zero] <- (start[!zero] /      x[!zero]) ^(shape[!zero]) -
+                  (start[!zero] / (1 + x[!zero]))^(shape[!zero])
+    if (log.arg)
+      ans[!zero] <- log(ans[!zero])
+  }
+  if (any(ox))
+    ans[ox] <- if (log.arg) log(0) else 0
+  ans[shape <= 0] <- NaN
+  ans[start != round(start) | start < 1] <- NaN
+  ans
+}
+
+
+
+ pdiffzeta <- function(q, shape, start = 1, lower.tail = TRUE) {
+
+  LLL <- max(length(shape), length(q), length(start))
+  if (length(q)     != LLL) q     <- rep_len(q,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(start) != LLL) start <- rep_len(start, LLL)
+
+  if (lower.tail) {
+    ans <- 1 - (start / floor(1 + q))^shape
+  } else {
+    ans <-     (start / floor(1 + q))^shape
+  }
+  ans[q < start] <- if (lower.tail) 0 else 1
+  ans[shape <= 0] <- NaN
+  ans[start != round(start) | start < 1] <- NaN
+  ans
+}  # pdiffzeta
+
+
+
+
+ qdiffzeta <- function(p, shape, start = 1) {
+
+  LLL <- max(length(p), length(shape), length(start))
+  if (length(p)     != LLL) p     <- rep_len(p,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(start) != LLL) start <- rep_len(start, LLL)
+
+  lo <- rep_len(start, LLL)
+  approx.ans <- lo  # True at lhs
+  hi <- 2 * lo + 10
+  dont.iterate <- p == 1 | shape <= 0 | start != round(start) | start < 1
+  done <- p <= pdiffzeta(hi, shape, start = start) | dont.iterate
+  max.iter <- 100
+  iter <- 0
+  while (!all(done) && iter < max.iter) {
+    hi.save <- hi[!done]
+    hi[!done] <- 2 * lo[!done] + 10
+    lo[!done] <- hi.save
+    done[!done] <- is.infinite(hi[!done]) |
+                   (p[!done] <= pdiffzeta(hi[!done], shape[!done], start[!done]))
+    iter <- iter + 1
+  }
+
+  foo <- function(q, shape, start, p)
+    pdiffzeta(q, shape, start) - p
+
+  lhs <- (p <= ddiffzeta(start, shape, start = start)) | dont.iterate
+
+  approx.ans[!lhs] <- bisection.basic(foo, lo[!lhs], hi[!lhs], tol = 1/16,
+                                      shape = shape[!lhs],
+                                      start = start[!lhs], p = p[!lhs])
+  faa <- floor(approx.ans)
+  ans <- ifelse(pdiffzeta(faa  , shape, start = start) <  p &
+                p <= pdiffzeta(faa+1, shape, start = start), faa+1, faa)
+
+  ans[p == 1] <- Inf
+  ans[shape <= 0] <- NaN
+  ans[start != round(start) | start < 1] <- NaN
+
+  ans
+}  # qdiffzeta
+
+
+
+
+rdiffzeta <- function(n, shape, start = 1) {
+  rr <- runif(n)
+  qdiffzeta(rr, shape, start = start)
+}
+
+
+
+ diffzeta <- function(start = 1, lshape = "loge", ishape = NULL) {
+
+
+  if (!is.Numeric(start, positive = TRUE,
+                  integer.valued = TRUE, length.arg = 1))
+    stop("bad input for argument 'start'")
+  enteredstart <- length(start)
+
+  if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
+    stop("argument 'ishape' must be > 0")
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+  new("vglmff",
+  blurb = c("Difference in 2 Zipf distributions ",
+            "f(y;s) = y^(-shape) / sum((1:start)^(-shape)), ",
+            "shape > 0, start, start+1,...",
+            ifelse(enteredstart, paste("start = ", start, sep = ""), ""),
+            "\n\n",
+            "Link:    ",
+            namesof("shape", lshape, earg = eshape),
+            "\n\n",
+            "Mean:    gharmonic(start, shape-1) / gharmonic(start, shape)"),
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         start = .start ,
+         parameters.names = "shape")
+  }, list( .start = start ))),
+  initialize = eval(substitute(expression({
+  start <- .start
+  temp5 <-
+    w.y.check(w = w, y = y,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              Is.integer.y = TRUE,
+              Is.positive.y = TRUE,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+    if (any(y < start))
+      stop("some response values less than 'start'")
+
+
+    predictors.names <- namesof("shape", .lshape , earg = .eshape , tag = FALSE)
+
+    extra$start <- start
+    if (!length(etastart)) {
+      llfun <- function(shape, y, start, w) {
+        sum(c(w) * ddiffzeta(x = y, start = extra$start, shape = shape, log = TRUE))
+      }
+      shape.init <- if (length( .ishape )) .ishape else
+        getInitVals(gvals = seq(0.1, 3.0, length.out = 19),
+                    llfun = llfun,
+                    y = y, start = extra$start, w = w)
+      shape.init <- rep_len(shape.init, length(y))
+      if ( .lshape == "loglog") shape.init[shape.init <= 1] <- 1.2
+      etastart <- theta2eta(shape.init, .lshape , earg = .eshape )
+    }
+  }), list( .lshape = lshape,
+            .eshape = eshape, .ishape = ishape, .start = start ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    aa <- extra$start
+    if (length(aa) != 1 || aa < 1 || round(aa) != aa)
+      stop("the 'start' variable must be of unit length")
+    if (aa == 1)
+      return(zeta(shape))
+    mymat <- matrix(1:aa, NROW(eta), aa, byrow = TRUE)
+    temp1 <- rowSums(1 / mymat^shape)
+    (aa^shape) * (zeta(shape) - temp1 + 1 / aa^(shape-1))
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  last = eval(substitute(expression({
+    misc$expected <- FALSE
+    misc$link <-    c(shape = .lshape )
+    misc$earg <- list(shape = .eshape )
+    misc$start <- extra$start
+  }), list( .lshape = lshape, .eshape = eshape ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * ddiffzeta(x = y, start = extra$start,
+                                  shape = shape, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  vfamily = c("diffzeta"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lshape = lshape, .eshape = eshape ))),
+
+
+
+
+
+
+  deriv = eval(substitute(expression({
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    temp1 <- extra$start /  y
+    temp2 <- extra$start / (y+1)
+    AA <- temp1^shape - temp2^shape
+    Aprime <- log(temp1) * temp1^shape -
+              log(temp2) * temp2^shape
+
+    dl.dshape <- Aprime / AA
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+    c(w) * dl.dshape * dshape.deta
+  }), list( .lshape = lshape, .eshape = eshape ))),
+  weight = expression({
+    ned2l.dshape <- (Aprime / AA)^2  # Not quite FS. Half FS.
+    wz <- c(w) * ned2l.dshape * dshape.deta^2
+    wz
+  }))
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.basics.R b/R/family.basics.R
index 8cf5fb4..628e32d 100644
--- a/R/family.basics.R
+++ b/R/family.basics.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -36,7 +36,7 @@ Select <-
   if (is.character(exclude))
     if (any(nchar(prefix) == 0))
       stop("bad input for argument 'exclude'")
-  if (!is.logical(sort.arg) || 
+  if (!is.logical(sort.arg) ||
       length(sort.arg) != 1)
     stop("bad input for argument 'sort.arg'")
 
@@ -230,7 +230,7 @@ subsetc <-
 
   ind5 <- if (maximize) which.max(objvals) else which.min(objvals)
 
-  
+
   c(Value1 = allmat1[ind5, "vov1"],
     Value2 = allmat1[ind5, "vov2"],
     ObjFun = if (ret.objfun) objvals[ind5] else NULL)
@@ -263,7 +263,7 @@ subsetc <-
 
   ind5 <- if (maximize) which.max(objvals) else which.min(objvals)
 
-  
+
   c(Value1 = allmat1[ind5, "vov1"],
     Value2 = allmat1[ind5, "vov2"],
     Value3 = allmat1[ind5, "vov3"],
@@ -302,7 +302,7 @@ subsetc <-
 
   ind5 <- if (maximize) which.max(objvals) else which.min(objvals)
 
-  
+
   c(Value1 = allmat1[ind5, "vov1"],
     Value2 = allmat1[ind5, "vov2"],
     Value3 = allmat1[ind5, "vov3"],
@@ -357,7 +357,7 @@ subsetc <-
 
  cm.VGAM <-
   function(cm, x, bool, constraints,
-           apply.int = FALSE, 
+           apply.int = FALSE,
            cm.default = diag(nrow(cm)),  # 20121226
            cm.intercept.default = diag(nrow(cm))  # 20121226
           ) {
@@ -388,7 +388,7 @@ subsetc <-
 
     if (any(nasgn == "(Intercept)"))
       constraints[["(Intercept)"]] <- cm.intercept.default
-  } 
+  }
 
   if (!is.list(constraints))
     stop("argument 'constraints' must be a list")
@@ -487,12 +487,12 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
     stop("'nointercept' out of range")
   if (nasgn[1] != "(Intercept)" || M == 1)
     stop("Need an (Intercept) constraint matrix with M>1")
-  if (!all.equal(constraints[["(Intercept)"]], diag(M)))
+  if (!identical(constraints[["(Intercept)"]], diag(M)))
     warning("Constraint matrix of (Intercept) not diagonal")
 
   temp <- constraints[["(Intercept)"]]
   temp <- temp[, -nointercept, drop = FALSE]
-  constraints[["(Intercept)"]] <- temp 
+  constraints[["(Intercept)"]] <- temp
   constraints
 }
 
@@ -505,32 +505,35 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
   dotzero <- zero  # Transition
 
+  if (length(dotzero) == 1 && (dotzero == "" || is.na(dotzero)))
+    dotzero <- NULL
+
   if (is.character(dotzero)) {
 
 
 
 
-  which.numeric.all <- NULL
-  for (ii in seq_along(dotzero)) {
-    which.ones <-
-        grep(dotzero[ii], predictors.names, fixed = TRUE)
-    if (length(which.ones)) {
-      which.numeric.all <- c(which.numeric.all, which.ones)
-    } else {
-      warning("some values of argument 'zero' are unmatched. Ignoring them")
+    which.numeric.all <- NULL
+    for (ii in seq_along(dotzero)) {
+      which.ones <-
+          grep(dotzero[ii], predictors.names, fixed = TRUE)
+      if (length(which.ones)) {
+        which.numeric.all <- c(which.numeric.all, which.ones)
+      } else {
+        warning("some values of argument 'zero' are unmatched. Ignoring them")
+      }
+    }  # for ii
+    which.numeric <- unique(sort(which.numeric.all))
+
+    if (!length(which.numeric)) {
+      warning("No values of argument 'zero' were matched.")
+      which.numeric <- NULL
+    } else if (length(which.numeric.all) > length(which.numeric)) {
+      warning("There were redundant values of argument 'zero'.")
     }
-  }
-  which.numeric <- unique(sort(which.numeric.all))
-
-  if (!length(which.numeric)) {
-    warning("No values of argument 'zero' were matched.")
-    which.numeric <- NULL
-  } else if (length(which.numeric.all) > length(which.numeric)) {
-    warning("There were redundant values of argument 'zero'.")
-  }
 
     dotzero <- which.numeric
-  }
+  }  # if is.character(dotzero)
 
 
 
@@ -554,8 +557,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
   }
 
   zpos.index <- if (length(posdotzero)) posdotzero else NULL
-  z.Index <- if (!length(dotzero))
-               NULL else
+  z.Index <- if (!length(dotzero)) NULL else
                unique(sort(c(zneg.index, zpos.index)))
 
 
@@ -580,7 +582,8 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
     return(constraints)
 
   if (any(zero < 1 | zero > M))
-    stop("argument 'zero' out of range")
+    stop("argument 'zero' out of range; should have values between ",
+         "1 and ", M, " inclusive")
   if (nasgn[1] != "(Intercept)")
     stop("cannot fit an intercept to a no-intercept model")
 
@@ -590,13 +593,15 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
       Hmatk[zero, ] <- 0
       index <- NULL
       for (kk in 1:ncol(Hmatk))
-        if (all(Hmatk[, kk] == 0)) index <- c(index, kk)
-      if (length(index) == ncol(Hmatk)) 
+        if (all(Hmatk[, kk] == 0))
+          index <- c(index, kk)
+      if (length(index) == ncol(Hmatk))
         stop("constraint matrix has no columns!")
       if (!is.null(index))
         Hmatk <- Hmatk[, -index, drop = FALSE]
-      constraints[[nasgn[ii]]] <- Hmatk 
-    }
+      constraints[[nasgn[ii]]] <- Hmatk
+    }  # for ii
+
   constraints
 }
 
@@ -645,8 +650,8 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
         stop("'constraints[[", ii, "]]' is not a matrix")
     }
 
-  if (is.null(names(constraints))) 
-    names(constraints) <- rep_len(nasgn, lenconstraints) 
+  if (is.null(names(constraints)))
+    names(constraints) <- rep_len(nasgn, lenconstraints)
 
   temp <- vector("list", length(nasgn))
   names(temp) <- nasgn
@@ -680,7 +685,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
             } else {
               constraints[[ii]]
             }
-      Hlist[[jay]] <- cm 
+      Hlist[[jay]] <- cm
     }
   }
   names(Hlist) <- dimnames(x)[[2]]
@@ -709,6 +714,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
 
 
+
  trivial.constraints <- function(Hlist, target = diag(M)) {
 
 
@@ -719,9 +725,9 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
     Hlist <- list(Hlist)
   M <- dim(Hlist[[1]])[1]
 
-  if (!is.matrix(target)) 
+  if (!is.matrix(target))
     stop("target is not a matrix")
-  dimtar <- dim(target) 
+  dimtar <- dim(target)
 
   trivc <- rep_len(1, length(Hlist))
   names(trivc) <- names(Hlist)
@@ -742,6 +748,9 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
 
 
+
+
+
  add.constraints <- function(constraints, new.constraints,
                              overwrite = FALSE, check = FALSE) {
 
@@ -750,7 +759,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
   if (empty.list(constraints))
     if (is.list(new.constraints))
-      return(new.constraints) else 
+      return(new.constraints) else
       return(list())  # Both NULL probably
 
   constraints <- as.list(constraints)
@@ -773,7 +782,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
                "of the constraints")
         if (overwrite)
           constraints[[ii]] <- new.constraints[[ii]]
-      } else 
+      } else
         constraints[[ii]] <- new.constraints[[ii]]
     }
   } else {
@@ -799,15 +808,16 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
 
 
+
  iam <- function(j, k, M,  # hbw = M,
                  both = FALSE, diag = TRUE) {
 
 
-  jay <- j 
+  jay <- j
   kay <- k
 
   if (M == 1)
-    if (!diag) stop("cannot handle this") 
+    if (!diag) stop("cannot handle this")
 
   if (M == 1)
     if (both) return(list(row.index = 1, col.index = 1)) else return(1)
@@ -817,7 +827,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
   i2 <- lapply(i2, seq)
   i2 <- unlist(i2)
 
-  i1 <- matrix(1:M, M, M) 
+  i1 <- matrix(1:M, M, M)
   i1 <- if (diag) c(i1[row(i1) >= col(i1)]) else
                   c(i1[row(i1) >  col(i1)])
 
@@ -836,6 +846,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
 
 
+
  dimm <- function(M, hbw = M) {
 
   if (!is.numeric(hbw))
@@ -843,7 +854,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
   if (hbw > M || hbw < 1)
     stop("range error in argument 'hbw'")
-  hbw * (2 * M - hbw +1) / 2 
+  hbw * (2 * M - hbw +1) / 2
 }
 
 
@@ -866,16 +877,16 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
   dimm <- ncol(m)
   index <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
   if (dimm > length(index$row.index))
-    stop("bad value for 'M'; it is too small") 
+    stop("bad value for 'M'; it is too small")
   if (dimm < M) {
-    stop("bad value for 'M'; it is too big") 
+    stop("bad value for 'M'; it is too big")
   }
 
   fred <- .C("m2a", as.double(t(m)), ans=double(M*M*n),
       as.integer(dimm),
-      as.integer(index$row-1),  
-      as.integer(index$col-1),  
-      as.integer(n),  as.integer(M),  
+      as.integer(index$row-1),
+      as.integer(index$col-1),
+      as.integer(n),  as.integer(M),
       as.integer(as.numeric(upper)), NAOK = TRUE)
   dim(fred$ans) <- c(M, M, n)
   alpn <- NULL
@@ -885,6 +896,9 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
 
 
+
+
+
  a2m <- function(a, hbw = M) {
 
 
@@ -903,12 +917,12 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
   fred <- .C("a2m",
              as.double(a), m = double(dimm.value * n),
       as.integer(dimm.value),
-      as.integer(index$row-1),  as.integer(index$col-1),  
+      as.integer(index$row-1),  as.integer(index$col-1),
       as.integer(n),  as.integer(M), NAOK = TRUE)
   dim(fred$m) <- c(dimm.value,n)
   fred$m <- t(fred$m)
 
-  if (hbw != M) 
+  if (hbw != M)
     attr(fred$m, "hbw") <- hbw
   if (length(lpn <- dimnames(a)[[1]]) != 0)
     attr(fred$m, "predictors.names") <- lpn
@@ -917,13 +931,16 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
 
 
+
+
+
  vindex <- function(M, row.arg = FALSE, col.arg = FALSE,
                     length.arg = M * (M + 1) / 2) {
 
 
 
   if ((row.arg + col.arg) != 1)
-    stop("only one of row and col must be TRUE") 
+    stop("only one of row and col must be TRUE")
   if (M == 1) {
     ans <- 1
   } else {
@@ -931,19 +948,20 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
       i1 <- matrix(1:M, M, M)
       ans <- c(i1[row(i1) + col(i1) <= (M + 1)])
     } else {
-      i1 <- matrix(1:M, M, M) 
+      i1 <- matrix(1:M, M, M)
       ans <- c(i1[row(i1) >= col(i1)])
     }
   }
   if (length.arg > length(ans))
     stop("argument 'length.arg' too big")
-  rep_len(ans, length.arg) 
+  rep_len(ans, length.arg)
 }
 
 
 
 
 
+
  wweights <- function(object, matrix.arg = TRUE, deriv.arg = FALSE,
                       ignore.slot = FALSE, checkwz = TRUE) {
 
@@ -953,8 +971,8 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
 
 
-  if (length(wz <- object at weights) && !ignore.slot && !deriv.arg) { 
-    return(wz) 
+  if (length(wz <- object at weights) && !ignore.slot && !deriv.arg) {
+    return(wz)
   }
 
   M <- object at misc$M  # Done below
@@ -970,7 +988,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
   if (any(slotNames(object) == "predictors"))
     eta <- object at predictors
   mt <- terms(object)  # object at terms$terms; 20030811
-  Hlist <- constraints <- object at constraints 
+  Hlist <- constraints <- object at constraints
   new.coeffs <- object at coefficients
   if (any(slotNames(object) == "iter"))
     iter <- object at iter
@@ -1014,12 +1032,12 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
   if (any(slotNames(object) == "control"))
     for (ii in names(object at control)) {
       assign(ii, object at control[[ii]])
-    } 
+    }
 
   if (length(object at misc))
     for (ii in names(object at misc)) {
-      assign(ii, object at misc[[ii]]) 
-    } 
+      assign(ii, object at misc[[ii]])
+    }
 
   if (any(slotNames(object) == "family")) {
     expr <- object at family@deriv
@@ -1029,28 +1047,30 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
       wz <- eval(expr)
 
 
-      if (M > 1) 
+      if (M > 1)
         dimnames(wz) <- list(dimnames(wz)[[1]], NULL)  # Remove colnames
-      wz <- if (matrix.arg) as.matrix(wz) else c(wz) 
+      wz <- if (matrix.arg) as.matrix(wz) else c(wz)
     }
     if (deriv.arg) list(deriv = deriv.mu, weights = wz) else wz
   } else {
-    NULL 
+    NULL
   }
 }
 
 
 
 
+
+
  pweights <- function(object, ...) {
   ans <- object at prior.weights
   if (length(ans)) {
-    ans 
+    ans
   } else {
     temp <- object at y
     ans <- rep_len(1, nrow(temp))  # Assumed all equal and unity.
     names(ans) <- dimnames(temp)[[1]]
-    ans 
+    ans
   }
 }
 
@@ -1058,6 +1078,9 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
 
 
+
+
+
 procVec <- function(vec, yn, Default) {
 
 
@@ -1106,6 +1129,8 @@ if (FALSE) {
 
 
 
+
+
  weightsvglm <- function(object, type = c("prior", "working"),
                         matrix.arg = TRUE, ignore.slot = FALSE,
                         deriv.arg = FALSE, ...) {
@@ -1136,6 +1161,7 @@ if (FALSE) {
 }
 
 
+
 if (!isGeneric("weights"))
     setGeneric("weights", function(object, ...)
   standardGeneric("weights"))
@@ -1161,10 +1187,9 @@ setMethod("weights", "vglm",
 
 
 
-
-qnupdate <- function(w, wzold, dderiv, deta, M, keeppd = TRUE, 
-                    trace = FALSE, reset = FALSE,
-                    effpos=.Machine$double.eps^0.75) {
+qnupdate <- function(w, wzold, dderiv, deta, M, keeppd = TRUE,
+                     trace = FALSE, reset = FALSE,
+                     effpos=.Machine$double.eps^0.75) {
 
 
   if (M == 1) {
@@ -1220,7 +1245,7 @@ mbesselI0 <- function(x, deriv.arg = 0) {
 
     ans <- matrix(NA_real_, nrow = nn, ncol = deriv.arg+1)
     ans[, 1] <- besselI(x, nu = 0)
-    if (deriv.arg>=1) ans[,2] <- besselI(x, nu = 1) 
+    if (deriv.arg>=1) ans[,2] <- besselI(x, nu = 1)
     if (deriv.arg>=2) ans[,3] <- ans[,1] - ans[,2] / x
     ans
 }
@@ -1232,11 +1257,11 @@ VGAM.matrix.norm <- function(A, power = 2, suppressWarning = FALSE) {
     warning("norms should be calculated for square matrices; ",
             "'A' is not square")
   if (power == "F") {
-    sqrt(sum(A^2)) 
+    sqrt(sum(A^2))
   } else if (power == 1) {
     max(colSums(abs(A)))
   } else if (power == 2) {
-    sqrt(max(eigen(t(A) %*% A)$value))
+    sqrt(max(eigen(t(A) %*% A, symmetric = TRUE)$value))
   } else if (!is.finite(power)) {
     max(colSums(abs(A)))
   } else {
@@ -1297,7 +1322,7 @@ getfromVGAMenv <- function(varname, prefix = "") {
   get(varname, envir = VGAMenv)
 }
 
- 
+
 
 lerch <- function(x, s, v, tolerance = 1.0e-10, iter = 100) {
   if (!is.Numeric(x) || !is.Numeric(s) || !is.Numeric(v))
@@ -1341,6 +1366,10 @@ negzero.expression.VGAM <- expression({
 
 
 
+  if (length(dotzero) == 1 && (dotzero == "" || is.na(dotzero)))
+    dotzero <- NULL
+
+
 
   if (is.character(dotzero)) {
 
@@ -1445,7 +1474,7 @@ interleave.cmat <- function(cmat1, cmat2) {
       return(cbind(cmat1[, 1], cmat2, cmat1[, -1]))
     } else {  # ncol1 == ncol2 and both are > 1.
       kronecker(cmat1, cbind(1, 0)) +
-      kronecker(cmat2, cbind(0, 1))        
+      kronecker(cmat2, cbind(0, 1))
     }
   }
 }
@@ -1594,7 +1623,7 @@ w.y.check <- function(w, y,
                             sep = ""))
     y <- matrix(y, n.lm, Ncol.max.y, dimnames = list(rn.y, cn.y))
   }
-       
+
   list(w = if (out.wy) w else NULL,
        y = if (out.wy) y else NULL)
 }
@@ -1716,3 +1745,30 @@ setMethod("familyname", "vlm",
 
 
 
+bisection.basic <- function(f, a, b, tol = 1e-9, nmax = 500, ...) {
+
+
+
+  if (!all(sign(f(a, ...)) * sign(f(b, ...)) <= 0))
+    stop("roots do not exist between 'a' and 'b'")
+
+  N <- 1
+  while (N <= nmax) {
+    mid <- (a + b) / 2
+    save.f <- f(mid, ...)
+    if (all(save.f == 0 | (b - a)/2 < tol)) {
+      return(mid)
+    }
+    N <- N + 1
+    vecTF <- sign(save.f) == sign(f(a, ...))
+    a[ vecTF] <- mid[ vecTF]
+    b[!vecTF] <- mid[!vecTF]
+  }
+
+  warning("did not coverge. Returning final root")
+  mid
+}
+
+
+
+
diff --git a/R/family.binomial.R b/R/family.binomial.R
index 8642ea6..ab952ae 100644
--- a/R/family.binomial.R
+++ b/R/family.binomial.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -132,8 +132,8 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
          zero = .zero )
   }, list( .lmu = lmu, .lrho = lrho,
            .imethod = imethod, .ishrinkage = ishrinkage,
-           .zero = zero,
-           .nsimEIM = nsimEIM ))),
+           .nsimEIM = nsimEIM,
+           .zero = zero ))),
 
 
   initialize = eval(substitute(expression({
@@ -145,7 +145,7 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
     }
 
     mustart.orig <- mustart
-    eval(binomialff()@initialize)  # Note: n,w,y,mustart is changed 
+    eval(binomialff()@initialize)  # Note: n,w,y,mustart is changed
     if (length(mustart.orig))
       mustart <- mustart.orig  # Retain it if inputted
 
@@ -203,7 +203,7 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
             .imethod = imethod, .ishrinkage = ishrinkage,
             .nsimEIM = nsimEIM, .irho = irho ))),
   linkinv = eval(substitute(function(eta, extra = NULL)
-    eta2theta(eta[, 1], .lmu , earg = .emu ), 
+    eta2theta(eta[, 1], .lmu , earg = .emu ),
   list( .lmu = lmu, .emu = emu ))),
   last = eval(substitute(expression({
     misc$link <-    c(mu = .lmu , rho = .lrho)
@@ -255,6 +255,14 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
   }, list( .lmu = lmu, .lrho = lrho,
            .emu = emu, .erho = erho  ))),
   vfamily = c("betabinomial"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu <- eta2theta(eta[, 1], .lmu  , earg = .emu  )
+    rho  <- eta2theta(eta[, 2], .lrho , earg = .erho )
+    okay1 <- all(is.finite(mymu)) && all(0 < mymu & mymu < 1) &&
+             all(is.finite(rho )) && all(0 < rho  & rho  < 1)
+    okay1
+  }, list( .lmu = lmu, .lrho = lrho,
+           .emu = emu, .erho = erho  ))),
 
 
 
@@ -264,7 +272,7 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     w <- pwts
     eta <- predict(object)
@@ -291,7 +299,7 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
               y * w  # Convert proportions to counts
 
     ycounts <- round(ycounts)
-    mymu <- eta2theta(eta[, 1], .lmu ,  earg = .emu )
+    mymu <- eta2theta(eta[, 1], .lmu  , earg = .emu  )
     rho  <- eta2theta(eta[, 2], .lrho , earg = .erho )
     smallno <- 100 * .Machine$double.eps
     rho  <- pmax(rho, smallno)
@@ -356,7 +364,7 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
                  digamma(shape1) + digamma(shape2))
         dl.drho <- (-1/rho^2) * (mymu * digamma(shape1+ysim) +
                   (1-mymu) * digamma(shape2+nvec-ysim) -
-                  digamma(shape1+shape2+nvec) - 
+                  digamma(shape1+shape2+nvec) -
                   mymu * digamma(shape1) -
                   (1-mymu)*digamma(shape2) + digamma(shape1+shape2))
 
@@ -377,7 +385,7 @@ betabinomial.control <- function(save.weights = TRUE, ...) {
       wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
     }
   }), list( .lmu = lmu, .lrho = lrho,
-            .emu = emu, .erho = erho, 
+            .emu = emu, .erho = erho,
             .nsimEIM = nsimEIM ))))
 }
 
@@ -397,14 +405,14 @@ dbinom2.or <-
            ErrorCheck = TRUE) {
   if (ErrorCheck) {
     if (!is.Numeric(mu1, positive = TRUE) || max(mu1) >= 1)
-      stop("bad input for argument 'mu1'") 
+      stop("bad input for argument 'mu1'")
     if (!is.Numeric(mu2, positive = TRUE) || max(mu2) >= 1)
-      stop("bad input for argument 'mu2'") 
+      stop("bad input for argument 'mu2'")
     if (!is.Numeric(oratio, positive = TRUE))
-      stop("bad input for argument 'oratio'") 
+      stop("bad input for argument 'oratio'")
     if (!is.Numeric(tol, positive = TRUE, length.arg = 1) ||
         tol > 0.1)
-      stop("bad input for argument 'tol'") 
+      stop("bad input for argument 'tol'")
     if (exchangeable && max(abs(mu1 - mu2)) > 0.00001)
       stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
   }
@@ -447,14 +455,14 @@ rbinom2.or <-
 
   if (ErrorCheck) {
     if (!is.Numeric(mu1, positive = TRUE) || max(mu1) >= 1)
-      stop("bad input for argument 'mu1'") 
+      stop("bad input for argument 'mu1'")
     if (!is.Numeric(mu2, positive = TRUE) || max(mu2) >= 1)
-      stop("bad input for argument 'mu2'") 
+      stop("bad input for argument 'mu2'")
     if (!is.Numeric(oratio, positive = TRUE))
-      stop("bad input for argument 'oratio'") 
+      stop("bad input for argument 'oratio'")
     if (!is.Numeric(tol, positive = TRUE, length.arg = 1) ||
         tol > 0.1)
-      stop("bad input for argument 'tol'") 
+      stop("bad input for argument 'tol'")
     if (exchangeable && max(abs(mu1 - mu2)) > 0.00001)
       stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
   }
@@ -515,17 +523,17 @@ rbinom2.or <-
 
 
   if (!is.logical(exchangeable))
-    warning("argument 'exchangeable' should be a single logical") 
+    warning("argument 'exchangeable' should be a single logical")
 
   if (is.logical(exchangeable) && exchangeable &&
-     ((lmu1 != lmu2) || !all.equal(emu1, emu2)))
-    warning("exchangeable = TRUE but marginal links are not equal") 
+     ((lmu1 != lmu2) || !identical(emu1, emu2)))
+    warning("exchangeable = TRUE but marginal links are not equal")
 
 
 
   if (!is.Numeric(tol, positive = TRUE, length.arg = 1) ||
       tol > 0.1)
-    stop("bad input for argument 'tol'") 
+    stop("bad input for argument 'tol'")
 
 
   new("vglmff",
@@ -574,34 +582,34 @@ rbinom2.or <-
 
 
     predictors.names <-
-       c(namesof("mu1",     .lmu1,    earg = .emu1,    short = TRUE),
-         namesof("mu2",     .lmu2,    earg = .emu2,    short = TRUE),
-         namesof("oratio",  .loratio, earg = .eoratio, short = TRUE))
+       c(namesof("mu1",    .lmu1 ,    earg = .emu1 ,    short = TRUE),
+         namesof("mu2",    .lmu2 ,    earg = .emu2 ,    short = TRUE),
+         namesof("oratio", .loratio , earg = .eoratio , short = TRUE))
 
 
     if (!length(etastart)) {
         pmargin <- cbind(mustart[, 3] + mustart[, 4],
                          mustart[, 2] + mustart[, 4])
-        ioratio <- if (length( .ioratio)) rep_len( .ioratio , n) else
+        ioratio <- if (length( .ioratio )) rep_len( .ioratio , n) else
                    mustart[, 4] * mustart[, 1] / (mustart[, 2] *
                                                   mustart[, 3])
         if (length( .imu1 )) pmargin[, 1] <- .imu1
         if (length( .imu2 )) pmargin[, 2] <- .imu2
-        etastart <- cbind(theta2eta(pmargin[, 1], .lmu1, earg = .emu1),
-                          theta2eta(pmargin[, 2], .lmu2, earg = .emu2),
-                          theta2eta(ioratio, .loratio, earg = .eoratio))
+        etastart <- cbind(theta2eta(pmargin[, 1], .lmu1 , earg = .emu1 ),
+                          theta2eta(pmargin[, 2], .lmu2 , earg = .emu2 ),
+                          theta2eta(ioratio, .loratio , earg = .eoratio ))
     }
   }), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
             .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio,
             .imu1 = imu1, .imu2 = imu2, .ioratio = ioratio ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    pmargin <- cbind(eta2theta(eta[, 1], .lmu1, earg = .emu1),
-                     eta2theta(eta[, 2], .lmu2, earg = .emu2))
-    oratio <- eta2theta(eta[, 3], .loratio, earg = .eoratio)
-    a.temp <- 1 + (pmargin[, 1]+pmargin[, 2])*(oratio-1)
+    pmargin <- cbind(eta2theta(eta[, 1], .lmu1 , earg = .emu1 ),
+                     eta2theta(eta[, 2], .lmu2 , earg = .emu2 ))
+    oratio <- eta2theta(eta[, 3], .loratio , earg = .eoratio )
+    a.temp <- 1 + (pmargin[, 1] + pmargin[, 2]) * (oratio - 1)
     b.temp <- -4 * oratio * (oratio-1) * pmargin[, 1] * pmargin[, 2]
     temp <- sqrt(a.temp^2 + b.temp)
-    pj4 <- ifelse(abs(oratio-1) < .tol, pmargin[, 1]*pmargin[, 2],
+    pj4 <- ifelse(abs(oratio-1) < .tol , pmargin[, 1] * pmargin[, 2],
                  (a.temp-temp)/(2*(oratio-1)))
     pj2 <- pmargin[, 2] - pj4
     pj3 <- pmargin[, 1] - pj4
@@ -626,7 +634,7 @@ rbinom2.or <-
     pmargin <- cbind(mu[, 3]+mu[, 4], mu[, 2]+mu[, 4])
     oratio <- mu[, 4]*mu[, 1] / (mu[, 2]*mu[, 3])
     cbind(theta2eta(pmargin[, 1], .lmu1 , earg = .emu1),
-          theta2eta(pmargin[, 2], .lmu2 , earg = .emu2), 
+          theta2eta(pmargin[, 2], .lmu2 , earg = .emu2),
           theta2eta(oratio,      .loratio, earg = .eoratio))
   }, list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
            .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))),
@@ -663,6 +671,15 @@ rbinom2.or <-
     }
   }, list( .more.robust = more.robust ))),
   vfamily = c("binom2.or", "binom2"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    pmargin <- cbind(eta2theta(eta[, 1], .lmu1 , earg = .emu1 ),
+                     eta2theta(eta[, 2], .lmu2 , earg = .emu2 ))
+    oratio <- eta2theta(eta[, 3], .loratio , earg = .eoratio )
+    okay1 <- all(is.finite(pmargin)) && all(0 < pmargin & pmargin < 1) &&
+             all(is.finite(oratio )) && all(0 < oratio)
+    okay1
+  }, list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
+           .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))),
 
 
 
@@ -692,7 +709,7 @@ rbinom2.or <-
     coeff12 <- -0.5 + (2*oratio*pmargin - a.temp) / (2*temp9)
     dl.dmu1 <- coeff12[, 2] * (y[, 1]/mu.use[, 1]-y[, 3]/mu.use[, 3]) -
        (1+coeff12[, 2]) * (y[, 2]/mu.use[, 2]-y[, 4]/mu.use[, 4])
-    
+
     dl.dmu2 <- coeff12[, 1] * (y[, 1]/mu.use[, 1]-y[, 2]/mu.use[, 2]) -
        (1+coeff12[, 1]) * (y[, 3]/mu.use[, 3]-y[, 4]/mu.use[, 4])
 
@@ -802,11 +819,11 @@ dbinom2.rho <-
            ErrorCheck = TRUE) {
   if (ErrorCheck) {
     if (!is.Numeric(mu1, positive = TRUE) || max(mu1) >= 1)
-      stop("bad input for argument 'mu1'") 
+      stop("bad input for argument 'mu1'")
     if (!is.Numeric(mu2, positive = TRUE) || max(mu2) >= 1)
-      stop("bad input for argument 'mu2'") 
+      stop("bad input for argument 'mu2'")
     if (!is.Numeric(rho) || min(rho) <= -1 || max(rho) >= 1)
-      stop("bad input for argument 'rho'") 
+      stop("bad input for argument 'rho'")
     if (exchangeable && max(abs(mu1 - mu2)) > 0.00001)
       stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
   }
@@ -845,13 +862,13 @@ rbinom2.rho <-
   if (ErrorCheck) {
     if (!is.Numeric(mu1, positive = TRUE) ||
         max(mu1) >= 1)
-      stop("bad input for argument 'mu1'") 
+      stop("bad input for argument 'mu1'")
     if (!is.Numeric(mu2, positive = TRUE) ||
         max(mu2) >= 1)
-      stop("bad input for argument 'mu2'") 
+      stop("bad input for argument 'mu2'")
     if (!is.Numeric(rho) || min(rho) <= -1 ||
         max(rho) >= 1)
-      stop("bad input for argument 'rho'") 
+      stop("bad input for argument 'rho'")
 
 
     if (exchangeable &&
@@ -1064,7 +1081,7 @@ binom2.rho.control <- function(save.weights = TRUE, ...) {
       mustart <- NULL # Since etastart has been computed.
     }
   }), list( .lmu12 = lmu12, .lrho = lrho,
-            .emu12 = emu12, .erho = erho, 
+            .emu12 = emu12, .erho = erho,
                             .grho = grho,
                             .irho = irho,
             .imethod = imethod, .nsimEIM = nsimEIM,
@@ -1126,6 +1143,15 @@ binom2.rho.control <- function(save.weights = TRUE, ...) {
     }
   }, list( .erho = erho ))),
   vfamily = c("binom2.rho", "binom2"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
+                     eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
+    rhovec <- eta2theta(eta[, 3], .lrho , earg = .erho )
+    okay1 <- all(is.finite(pmargin)) && all( 0 < pmargin & pmargin < 1) &&
+             all(is.finite(rhovec )) && all(-1 < rhovec  & rhovec  < 1)
+    okay1
+  }, list( .lmu12 = lmu12, .lrho = lrho,
+           .emu12 = emu12, .erho = erho ))),
 
 
 
@@ -1427,7 +1453,7 @@ my.dbinom <- function(x,
     }
   }), list( .prob = prob, .link = link ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    nvec <- eta2theta(eta, .link)
+    nvec <- eta2theta(eta, .link )
     nvec * extra$temp2
   }, list( .link = link ))),
   last = eval(substitute(expression({
@@ -1460,10 +1486,15 @@ my.dbinom <- function(x,
     }
   }, list( .prob = prob ))),
   vfamily = c("size.binomial"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    nvec <- eta2theta(eta, .link )
+    okay1 <- all(is.finite(nvec)) && all( 0 < nvec)
+    okay1
+  }, list( .link = link ))),
   deriv = eval(substitute(expression({
     nvec <- mu/extra$temp2
     dldnvec <- digamma(nvec+1) - digamma(nvec-y+1) + log1p(-extra$temp2)
-    dnvecdeta <- dtheta.deta(nvec, .link)
+    dnvecdeta <- dtheta.deta(nvec, .link )
     c(w) * cbind(dldnvec * dnvecdeta)
   }), list( .link = link ))),
   weight = eval(substitute(expression({
@@ -1477,24 +1508,27 @@ my.dbinom <- function(x,
 
 
 
- dbetabinom.ab <- function(x, size, shape1, shape2, log = FALSE,
-                           Inf.shape = 1e6
-                          ) {
+ dbetabinom.ab <-
+    function(x, size, shape1, shape2, log = FALSE,
+             Inf.shape = exp(20),  # 1e6, originally
+             limit.prob = 0.5  # Strictly should be NaN
+            ) {
 
 
-  Bigg <- Inf.shape
+  Bigg  <- Inf.shape
+  Bigg2 <- Inf.shape  # big.shape  # exp(34)  # Found empirically
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
 
-
-
   LLL <- max(length(x), length(size), length(shape1), length(shape2))
   if (length(x)      != LLL) x      <- rep_len(x,      LLL)
   if (length(size)   != LLL) size   <- rep_len(size,   LLL)
   if (length(shape1) != LLL) shape1 <- rep_len(shape1, LLL)
   if (length(shape2) != LLL) shape2 <- rep_len(shape2, LLL)
+  is.infinite.shape1 <- is.infinite(shape1)  # Includes -Inf !!
+  is.infinite.shape2 <- is.infinite(shape2)
 
   ans <- x
   ans[TRUE] <- log(0)
@@ -1503,88 +1537,87 @@ my.dbinom <- function(x,
 
 
   ok0 <- !is.na(shape1) & !is.na(shape2) & !is.na(x) & !is.na(size)
-  ok <- (round(x) == x) & (x >= 0) & (x <= size) &
-        is.finite(shape1) & is.finite(shape2) & ok0
-  if (any(ok)) {
-    ans[ok] <- lchoose(size[ok], x[ok]) +
-               lbeta(shape1[ok]            + x[ok],
-                     shape2[ok] + size[ok] - x[ok]) -
-               lbeta(shape1[ok], shape2[ok])
-
-
-    endpt <- (x == size) & ((shape1 < 1/Bigg) | (shape2 < 1/Bigg)) & ok0
-    if (any(endpt)) {
-      ans[endpt] <- lgamma(size[endpt] + shape1[endpt]) +
-                    lgamma(shape1[endpt] + shape2[endpt]) -
-                   (lgamma(size[endpt] + shape1[endpt] + shape2[endpt]) +
-                    lgamma(shape1[endpt]))
-    }
+  okk <- (round(x) == x) & (x >= 0) & (x <= size) &
+         !is.infinite.shape1 & !is.infinite.shape2 & ok0
+  if (any(okk)) {
+    ans[okk] <- lchoose(size[okk], x[okk]) +
+                lbeta(shape1[okk]             + x[okk],
+                      shape2[okk] + size[okk] - x[okk]) -
+                lbeta(shape1[okk], shape2[okk])
 
 
+    endpt1 <- (x == size) & ((shape1 < 1/Bigg) | (shape2 < 1/Bigg)) & ok0
+    if (any(endpt1)) {
+      ans[endpt1] <- lgamma(  size[endpt1] + shape1[endpt1]) +
+                     lgamma(shape1[endpt1] + shape2[endpt1]) -
+                    (lgamma(  size[endpt1] + shape1[endpt1] + shape2[endpt1]) +
+                     lgamma(shape1[endpt1]))
+    }  # endpt1
 
 
-    endpt <- (x == 0) & ((shape1 < 1/Bigg) | (shape2 < 1/Bigg)) & ok0
-    if (any(endpt)) {
-      ans[endpt] <- lgamma(size[endpt] + shape2[endpt]) +
-                    lgamma(shape1[endpt] + shape2[endpt]) -
-                   (lgamma(size[endpt] + shape1[endpt] + shape2[endpt]) +
-                    lgamma(shape2[endpt]))
-    }
 
 
+    endpt2 <- (x == 0) & ((shape1 < 1/Bigg) | (shape2 < 1/Bigg)) & ok0
+    if (any(endpt2)) {
+      ans[endpt2] <- lgamma(  size[endpt2] + shape2[endpt2]) +
+                     lgamma(shape1[endpt2] + shape2[endpt2]) -
+                    (lgamma(  size[endpt2] + shape1[endpt2] + shape2[endpt2]) +
+                     lgamma(shape2[endpt2]))
+    }  # endpt2
 
 
 
-    endpt <- ((shape1 > Bigg) | (shape2 > Bigg)) & ok0
-    if (any(endpt)) {
-      ans[endpt] <- lchoose(size[endpt], x[endpt]) +
-                    lgamma(x[endpt] + shape1[endpt]) +
-                    lgamma(size[endpt] - x[endpt] + shape2[endpt]) +
-                    lgamma(shape1[endpt] + shape2[endpt]) -
-                   (lgamma(size[endpt] + shape1[endpt] + shape2[endpt]) +
-                    lgamma(shape1[endpt]) +
-                    lgamma(shape2[endpt]))
-    }
-  }  # if (any(ok))
 
 
+    endpt3 <- ((Bigg < shape1) | (Bigg < shape2)) & ok0
+    if (any(endpt3)) {
+
+
+
+      ans[endpt3] <- lchoose(size[endpt3], x[endpt3]) +
+                     lbeta(shape1[endpt3] +                x[endpt3],
+                           shape2[endpt3] + size[endpt3] - x[endpt3]) -
+                     lbeta(shape1[endpt3], shape2[endpt3])
+    }  # endpt3
+  }  # if (any(okk))
+
 
   if (!log.arg) {
     ans <- exp(ans)
   }
 
 
+  ok1 <- !is.infinite.shape1 &  is.infinite.shape2  # rho==0 & prob==0
+  ok2 <-  is.infinite.shape1 & !is.infinite.shape2  # rho==0 & prob==1
+  ok3 <-  Bigg2 < shape1     &  Bigg2 < shape2
+  ok4 <-  is.infinite.shape1 &  is.infinite.shape2  # prob undefined
 
-  if (FALSE) {
-    ok1 <- is.na(shape1)       & is.infinite(shape2)  # rho==0 & prob==0
-    ok2 <- is.infinite(shape1) & is.na(shape2)        # rho==0 & prob==1
-    ok3 <- is.infinite(shape1) & is.infinite(shape2)  # rho==0 & 0<prob<1
-  } else {
-    ok1 <-   is.finite(shape1) & is.infinite(shape2)  # rho==0 & prob==0
-    ok2 <- is.infinite(shape1) &   is.finite(shape2)  # rho==0 & prob==1
-    ok3 <- is.infinite(shape1) & is.infinite(shape2)  # prob undefined
 
-  }
+
+  if (any(ok3)) {
+    prob1 <- shape1[ok3] / (shape1[ok3] + shape2[ok3])
+    ans[ok3] <- dbinom(x = x[ok3], size = size[ok3],
+                       prob = prob1, log = log.arg)
+
+    if (any(ok4)) {
+      ans[ok4] <- dbinom(x = x[ok4], size = size[ok4],
+                         prob = limit.prob, log = log.arg)
+    }
+  }  # ok3
 
   if (any(ok1))
     ans[ok1] <- dbinom(x = x[ok1], size = size[ok1],
-                       prob = shape1[ok1] / (shape1[ok1]+shape2[ok1]),  # 0,
+                       prob = 0,  # finite / (finite + Inf) == 0
                        log = log.arg)
   if (any(ok2))
     ans[ok2] <- dbinom(x = x[ok2], size = size[ok2],
                        prob = 1,  # Inf / (finite + Inf) == 1
                        log = log.arg)
-  if (any(ok3)) {
-    ans[ok3] <- dbinom(x = x[ok3], size = size[ok3],
-                       prob = shape1[ok3] / (shape1[ok3]+shape2[ok3]),
-                       log = log.arg)
-  }
 
 
   ans[shape1 < 0] <- NaN
   ans[shape2 < 0] <- NaN
 
-
   ans
 }
 
@@ -1764,13 +1797,13 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
 
 
   lshape1 <- as.list(substitute(lshape1))
-  earg1 <- link2list(lshape1)
-  lshape1 <- attr(earg1, "function.name")
+  eshape1 <- link2list(lshape1)
+  lshape1 <- attr(eshape1, "function.name")
 
 
   lshape2 <- as.list(substitute(lshape2))
-  earg2 <- link2list(lshape2)
-  lshape2 <- attr(earg2, "function.name")
+  eshape2 <- link2list(lshape2)
+  lshape2 <- attr(eshape2, "function.name")
 
 
   if (!is.Numeric(ishape1, positive = TRUE))
@@ -1795,8 +1828,8 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
   new("vglmff",
   blurb = c("Beta-binomial model\n",
             "Links:    ",
-            namesof("shape1", lshape1, earg = earg1), ", ",
-            namesof("shape2", lshape2, earg = earg2), "\n",
+            namesof("shape1", lshape1, earg = eshape1), ", ",
+            namesof("shape2", lshape2, earg = eshape2), "\n",
             "Mean:     mu = shape1 / (shape1+shape2)", "\n",
             "Variance: mu * (1-mu) * (1+(w-1)*rho) / w, ",
                        "where rho = 1 / (shape1+shape2+1)"),
@@ -1815,7 +1848,9 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
          lshape1 = .lshape1 ,
          lshape2 = .lshape2 ,
          zero = .zero )
-  }, list( .zero = zero ))),
+  }, list( .zero = zero,
+           .lshape1 = lshape1,
+           .lshape2 = lshape2 ))),
 
 
   initialize = eval(substitute(expression({
@@ -1827,12 +1862,12 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
     }
 
     mustart.orig <- mustart
-    eval(binomialff()@initialize)   # Note: n,w,y,mustart is changed 
+    eval(binomialff()@initialize)   # Note: n,w,y,mustart is changed
     if (length(mustart.orig))
       mustart <- mustart.orig  # Retain it if inputted
     predictors.names <-
-         c(namesof("shape1", .lshape1 , earg = .earg1 , tag = FALSE),
-           namesof("shape2", .lshape2 , earg = .earg2 , tag = FALSE))
+         c(namesof("shape1", .lshape1 , earg = .eshape1 , tag = FALSE),
+           namesof("shape2", .lshape2 , earg = .eshape2 , tag = FALSE))
 
     if (!length(etastart)) {
 
@@ -1859,35 +1894,35 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
          warning("the response (as counts) does not appear to ",
                  "be integer-valued. Am rounding to integer values.")
       ycounts <- round(ycounts)  # Make sure it is an integer
-      etastart <- cbind(theta2eta(shape1, .lshape1 , earg = .earg1 ),
-                        theta2eta(shape2, .lshape2 , earg = .earg2 ))
+      etastart <- cbind(theta2eta(shape1, .lshape1 , earg = .eshape1 ),
+                        theta2eta(shape2, .lshape2 , earg = .eshape2 ))
       mustart <- NULL  # Since etastart has been computed.
     }
   }), list( .lshape1 = lshape1, .lshape2 = lshape2,
-            .earg1 = earg1, .earg2 = earg2,
+            .eshape1 = eshape1, .eshape2 = eshape2,
             .ishape1 = ishape1, .ishape2 = ishape2,
             .nsimEIM = nsimEIM,
             .imethod = imethod, .ishrinkage = ishrinkage ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .earg1 )
-    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .earg2 )
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
     shape1 / (shape1 + shape2)
   }, list( .lshape1 = lshape1, .lshape2 = lshape2,
-           .earg1 = earg1, .earg2 = earg2 ))),
+           .eshape1 = eshape1, .eshape2 = eshape2 ))),
   last = eval(substitute(expression({
     misc$link <-    c("shape1" = .lshape1 , "shape2" = .lshape2 )
 
-    misc$earg <- list("shape1" = .earg1 ,   "shape2" = .earg2   )
+    misc$earg <- list("shape1" = .eshape1 , "shape2" = .eshape2 )
 
-    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .earg1 )
-    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .earg2 )
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
 
     misc$rho <- 1 / (shape1 + shape2 + 1)
     misc$expected <- TRUE
     misc$nsimEIM <- .nsimEIM
     misc$zero <- .zero
   }), list( .lshape1 = lshape1, .lshape2 = lshape2,
-            .earg1 = earg1, .earg2 = earg2,
+            .eshape1 = eshape1, .eshape2 = eshape2,
             .nsimEIM = nsimEIM, .zero = zero ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL,
@@ -1900,8 +1935,8 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
       warning("converting 'ycounts' to integer in @loglikelihood")
     ycounts <- round(ycounts)
 
-    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .earg1 )
-    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .earg2 )
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
     nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
               round(w)
     if (residuals) {
@@ -1918,8 +1953,16 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
       }
     }
   }, list( .lshape1 = lshape1, .lshape2 = lshape2,
-           .earg1 = earg1, .earg2 = earg2 ))),
+           .eshape1 = eshape1, .eshape2 = eshape2 ))),
   vfamily = c("betabinomialff"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
+    okay1 <- all(is.finite(shape1)) && all(0 < shape1) &&
+             all(is.finite(shape2)) && all(0 < shape2)
+    okay1
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2,
+           .eshape1 = eshape1, .eshape2 = eshape2 ))),
 
 
 
@@ -1930,20 +1973,20 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     w <- pwts
     eta <- predict(object)
     extra <- object at extra
-    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .earg1 )
-    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .earg2 )
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
     nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
               round(w)
     rbetabinom.ab(nsim * length(shape1), size = nvec,
                   shape1 = shape1,
                   shape2 = shape2)
-  }, list( .lshape1 = lshape1, .lshape2 = lshape2, 
-           .earg1 = earg1, .earg2 = earg2 ))),
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2,
+           .eshape1 = eshape1, .eshape2 = eshape2 ))),
 
 
 
@@ -1954,11 +1997,11 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
               round(w)
     ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
               y * w # Convert proportions to counts
-    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .earg1 )
-    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .earg2 )
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
 
-    dshape1.deta <- dtheta.deta(shape1, .lshape1 , earg = .earg1 )
-    dshape2.deta <- dtheta.deta(shape2, .lshape2 , earg = .earg2 )
+    dshape1.deta <- dtheta.deta(shape1, .lshape1 , earg = .eshape1 )
+    dshape2.deta <- dtheta.deta(shape2, .lshape2 , earg = .eshape2 )
 
     dl.dshape1 <- digamma(shape1+ycounts) -
                   digamma(shape1+shape2+nvec) -
@@ -1971,7 +2014,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
     cbind(dl.dshape1 * dshape1.deta,
           dl.dshape2 * dshape2.deta)
   }), list( .lshape1 = lshape1, .lshape2 = lshape2,
-            .earg1 = earg1, .earg2 = earg2 ))),
+            .eshape1 = eshape1, .eshape2 = eshape2 ))),
   weight = eval(substitute(expression({
     if (is.null( .nsimEIM)) {
       wz <- matrix(NA_real_, n, dimm(M))  #3=dimm(2)
@@ -2017,8 +2060,8 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
       wz <- wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
       wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
     }
-  }), list( .lshape1 = lshape1, .lshape2 = lshape2, 
-            .earg1 = earg1, .earg2 = earg2,
+  }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+            .eshape1 = eshape1, .eshape2 = eshape2,
             .nsimEIM = nsimEIM ))))
 }
 
@@ -2149,6 +2192,14 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
   }, list( .lprob = lprob, .lshape = lshape,
            .eprob = eprob, .eshape = eshape ))),
   vfamily = c("betageometric"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    prob  <- eta2theta(eta[, 1], .lprob ,  earg = .eprob )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(prob )) && all(0 < prob  & prob < 1) &&
+             all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lprob = lprob, .lshape = lshape,
+           .eprob = eprob, .eshape = eshape ))),
 
 
 
@@ -2158,7 +2209,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     prob  <- eta2theta(eta[, 1], .lprob  , earg = .eprob  )
@@ -2289,7 +2340,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
     if (!is.vector(w))
       stop("the 'weights' argument must be a vector")
 
-    if (any(abs(w - round(w)) > 0.000001))
+    if (any(abs(w - round(w)) > 1e-6))
       stop("the 'weights' argument does not seem to be integer-valued")
 
 
@@ -2311,8 +2362,8 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
               " column two should be integer-valued")
 
     predictors.names <-
-        c(namesof("prob1", .lprob1,earg =  .eprob1, tag = FALSE),
-          namesof("prob2", .lprob2,earg =  .eprob2, tag = FALSE))
+        c(namesof("prob1", .lprob1 , earg =  .eprob1 , tag = FALSE),
+          namesof("prob2", .lprob2 , earg =  .eprob2 , tag = FALSE))
 
     prob1.init <- if (is.Numeric( .iprob1))
                    rep_len( .iprob1 , n) else
@@ -2323,15 +2374,15 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
 
     if (!length(etastart)) {
       etastart <-
-        cbind(theta2eta(prob1.init, .lprob1, earg = .eprob1),
-              theta2eta(prob2.init, .lprob2, earg = .eprob2))
+        cbind(theta2eta(prob1.init, .lprob1 , earg = .eprob1 ),
+              theta2eta(prob2.init, .lprob2 , earg = .eprob2 ))
     }
   }), list( .iprob1 = iprob1, .iprob2 = iprob2,
             .lprob1 = lprob1, .lprob2 = lprob2,
             .eprob1 = eprob1, .eprob2 = eprob2 ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    prob1 <- eta2theta(eta[, 1], .lprob1, earg = .eprob1)
-    prob2 <- eta2theta(eta[, 2], .lprob2, earg = .eprob2)
+    prob1 <- eta2theta(eta[, 1], .lprob1 , earg = .eprob1 )
+    prob2 <- eta2theta(eta[, 2], .lprob2 , earg = .eprob2 )
     cbind(prob1, prob2)
   }, list( .lprob1 = lprob1, .lprob2 = lprob2,
            .eprob1 = eprob1, .eprob2 = eprob2 ))),
@@ -2352,13 +2403,13 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL,
              summation = TRUE) {
-    prob1 <- eta2theta(eta[, 1], .lprob1, earg = .eprob1)
-    prob2 <- eta2theta(eta[, 2], .lprob2, earg = .eprob2)
+    prob1 <- eta2theta(eta[, 1], .lprob1 , earg = .eprob1 )
+    prob2 <- eta2theta(eta[, 2], .lprob2 , earg = .eprob2 )
 
     smallno <- 100 * .Machine$double.eps
-    prob1 <- pmax(prob1, smallno)
+    prob1 <- pmax(prob1,   smallno)
     prob1 <- pmin(prob1, 1-smallno)
-    prob2 <- pmax(prob2, smallno)
+    prob2 <- pmax(prob2,   smallno)
     prob2 <- pmin(prob2, 1-smallno)
     mvector <- w
     rvector <- w * y[, 1]
@@ -2380,9 +2431,17 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
   }, list( .lprob1 = lprob1, .lprob2 = lprob2,
            .eprob1 = eprob1, .eprob2 = eprob2 ))),
   vfamily = c("seq2binomial"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    prob1 <- eta2theta(eta[, 1], .lprob1 , earg = .eprob1 )
+    prob2 <- eta2theta(eta[, 2], .lprob2 , earg = .eprob2 )
+    okay1 <- all(is.finite(prob1)) && all(0 < prob1 & prob1 < 1) &&
+             all(is.finite(prob2)) && all(0 < prob2 & prob2 < 1)
+    okay1
+  }, list( .lprob1 = lprob1, .lprob2 = lprob2,
+           .eprob1 = eprob1, .eprob2 = eprob2 ))),
   deriv = eval(substitute(expression({
-    prob1 <- eta2theta(eta[, 1], .lprob1, earg = .eprob1)
-    prob2 <- eta2theta(eta[, 2], .lprob2, earg = .eprob2)
+    prob1 <- eta2theta(eta[, 1], .lprob1 , earg = .eprob1 )
+    prob2 <- eta2theta(eta[, 2], .lprob2 , earg = .eprob2 )
     smallno <- 100 * .Machine$double.eps
     prob1 <- pmax(prob1, smallno)
     prob1 <- pmin(prob1, 1-smallno)
@@ -2438,10 +2497,10 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
 
   if (!is.Numeric(tol, positive = TRUE, length.arg = 1) ||
       tol > 0.1)
-      stop("bad input for argument 'tol'") 
+      stop("bad input for argument 'tol'")
   if (!is.Numeric(addRidge, length.arg = 1, positive = TRUE) ||
       addRidge > 0.5)
-    stop("bad input for argument 'addRidge'") 
+    stop("bad input for argument 'addRidge'")
 
   if (lmu12 != "cloglog")
     warning("argument 'lmu12' should be 'cloglog'")
@@ -2506,15 +2565,16 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
             theta2eta(mu12.init,   .lmu12 ,   earg = .emu12 ),
             theta2eta(phi.init,    .lphi12,  earg = .ephi12),
             theta2eta(oratio.init, .loratio, earg = .eoratio))
+        mustart <- NULL
       }
   }), list( .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
             .emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio,
             .imu12 = imu12, .iphi12 = iphi12, .ioratio = ioratio ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    A1vec  <- eta2theta(eta[, 1], .lmu12 ,  earg = .emu12 )
-    phivec <- eta2theta(eta[, 2], .lphi12, earg = .ephi12)
+    A1vec  <- eta2theta(eta[, 1], .lmu12  , earg = .emu12  )
+    phivec <- eta2theta(eta[, 2], .lphi12 , earg = .ephi12 )
     pmargin <- matrix((1 - phivec) * A1vec, nrow(eta), 2)
-    oratio <- eta2theta(eta[, 3], .loratio, earg = .eoratio)
+    oratio <- eta2theta(eta[, 3], .loratio , earg = .eoratio )
     a.temp <- 1 + (pmargin[, 1]+pmargin[, 2])*(oratio-1)
     b.temp <- -4 * oratio * (oratio-1) * pmargin[, 1] * pmargin[, 2]
     temp <- sqrt(a.temp^2 + b.temp)
@@ -2527,13 +2587,13 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
            .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
            .emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio ))),
   last = eval(substitute(expression({
-    misc$link <-    c("mu12"= .lmu12 ,
-                      "phi12" = .lphi12,
-                      "oratio" = .loratio)
+    misc$link <-    c("mu12"   = .lmu12 ,
+                      "phi12"  = .lphi12 ,
+                      "oratio" = .loratio )
 
-    misc$earg <- list("mu12"= .emu12 ,
-                      "phi12" = .ephi12,
-                      "oratio" = .eoratio)
+    misc$earg <- list("mu12"   = .emu12 ,
+                      "phi12"  = .ephi12 ,
+                      "oratio" = .eoratio )
 
     misc$tol <- .tol
     misc$expected <- TRUE
@@ -2570,6 +2630,19 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
     }
   },
   vfamily = c("zipebcom"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    A1vec  <- eta2theta(eta[, 1], .lmu12 ,  earg = .emu12 )
+    smallno <- .Machine$double.eps^(2/4)
+    A1vec[A1vec > 1.0 - smallno] <- 1.0 - smallno
+
+    phivec <- eta2theta(eta[, 2], .lphi12 , earg = .ephi12 )
+    oratio <- eta2theta(eta[, 3], .loratio , earg = .eoratio )
+    okay1 <- all(is.finite(A1vec )) && all(0 < A1vec  & A1vec  < 1) &&
+             all(is.finite(phivec)) && all(0 < phivec & phivec < 1) &&
+             all(is.finite(oratio)) && all(0 < oratio)
+    okay1
+  }, list( .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
+           .emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio ))),
   deriv = eval(substitute(expression({
     A1vec  <- eta2theta(eta[, 1], .lmu12 ,  earg = .emu12 )
     smallno <- .Machine$double.eps^(2/4)
@@ -2585,7 +2658,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
     temp1 <- oratio * mu[, 2] + mu[, 4]
     dp11star.dp1unstar <- 2*(1-phivec)*Vab * Vabc
     dp11star.dphi1 <- -2 * A1vec * Vab * Vabc
-    dp11star.doratio <- Vab / oratio 
+    dp11star.doratio <- Vab / oratio
     yandmu <- (y[, 1]/mu[, 1] - y[, 2]/mu[, 2] - y[, 3]/mu[, 3] +
                y[, 4]/mu[, 4])
     dp11.doratio <- Vab / oratio
@@ -2633,7 +2706,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
 
 
 
- binom2.Rho <- function(rho = 0, imu1 = NULL, imu2 = NULL, 
+ binom2.Rho <- function(rho = 0, imu1 = NULL, imu2 = NULL,
                         exchangeable = FALSE, nsimEIM = NULL) {
   lmu12 <- "probit"
   emu12 <- list()
@@ -2645,6 +2718,9 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
     if (nsimEIM <= 100)
       warning("'nsimEIM' should be an integer greater than 100")
   }
+  if (min(rho) <= -1 || 1 <= max(rho))
+    stop("argument 'rho' should lie in (-1, 1)")
+
 
   new("vglmff",
   blurb = c("Bivariate probit model with rho = ", format(rho), "\n",
@@ -2658,6 +2734,15 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
                            apply.int = TRUE)
   }), list( .exchangeable = exchangeable ))),
   deviance = Deviance.categorical.data.vgam,
+  infos = eval(substitute(function(...) {
+    list(M1 = 3,
+         expected = TRUE,
+         multipleResponses = FALSE,
+         parameters.names = c("mu1", "mu2"),
+         lmu12   = .lmu12 )
+  }, list( .lmu12 = lmu12 ))),
+
+
   initialize = eval(substitute(expression({
     eval(process.binomial2.data.VGAM)
     predictors.names <- c(
@@ -2665,18 +2750,19 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
                   namesof("mu2", .lmu12 , earg = .emu12 , short = TRUE))
 
     if (is.null( .nsimEIM )) {
-         save.weights <- control$save.weights <- FALSE
+      save.weights <- control$save.weights <- FALSE
     }
     if (is.null(etastart)) {
-      mu1.init= if (is.Numeric( .imu1 ))
-                rep_len( .imu1 , n) else
-                mu[, 3] + mu[, 4]
-      mu2.init= if (is.Numeric( .imu2 ))
-                rep_len( .imu2 , n) else
-                mu[, 2] + mu[, 4]
+      mu1.init <- if (is.Numeric( .imu1 ))
+                    rep_len( .imu1 , n) else
+                    mu[, 3] + mu[, 4]
+      mu2.init <- if (is.Numeric( .imu2 ))
+                    rep_len( .imu2 , n) else
+                    mu[, 2] + mu[, 4]
       etastart <-
         cbind(theta2eta(mu1.init, .lmu12 , earg = .emu12 ),
               theta2eta(mu2.init, .lmu12 , earg = .emu12 ))
+      mustart <- NULL
     }
   }), list( .lmu12 = lmu12, .emu12 = emu12, .nsimEIM = nsimEIM,
             .imu1 = imu1, .imu2 = imu2 ))),
@@ -2731,6 +2817,12 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
     }
   }, list( .rho = rho ))),
   vfamily = c("binom2.Rho", "binom2"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
+                     eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
+    okay1 <- all(is.finite(pmargin)) && all(0 < pmargin & pmargin < 1)
+    okay1
+  }, list( .lmu12 = lmu12, .emu12 = emu12, .rho = rho ))),
   deriv = eval(substitute(expression({
     pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
                      eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
@@ -2786,7 +2878,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
                              onemPhiB * (ysim[, 3]/p10-ysim[, 1]/p00)
         dl.dprob2 <- PhiA * (ysim[, 4]/p11-ysim[, 3]/p10) +
                              onemPhiA * (ysim[, 2]/p01-ysim[, 1]/p00)
-  
+
         rm(ysim)
         temp3 <- cbind(dl.dprob1, dl.dprob2)
         run.varcov <- ((ii-1) * run.varcov +
@@ -2992,7 +3084,7 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
     }
     mustart <- NULL  # Since etastart has been computed and/or no @linkfun.
   }), list( .lmu12 = lmu12, .l.rho = l.rho,
-            .emu12 = emu12, .e.rho = e.rho, 
+            .emu12 = emu12, .e.rho = e.rho,
                             .grho = grho,
                             .irho = irho,
             .imethod = imethod,
@@ -3058,6 +3150,15 @@ betabinomialff.control <- function(save.weights = TRUE, ...) {
     }
   }, list( .l.rho = l.rho, .e.rho = e.rho ))),
   vfamily = c("binom2.rho.ss", "binom2"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    pmargin <- cbind(eta2theta(eta[, 1], .lmu12 , earg = .emu12 ),
+                     eta2theta(eta[, 2], .lmu12 , earg = .emu12 ))
+    rhovec <-        eta2theta(eta[, 3], .l.rho , earg = .e.rho )
+    okay1 <- all(is.finite(pmargin)) && all( 0 < pmargin & pmargin < 1) &&
+             all(is.finite(rhovec )) && all(-1 < rhovec  & rhovec  < 1)
+    okay1
+  }, list( .lmu12 = lmu12, .l.rho = l.rho,
+           .emu12 = emu12, .e.rho = e.rho ))),
 
   deriv = eval(substitute(expression({
     nvec <- 1
diff --git a/R/family.bivariate.R b/R/family.bivariate.R
index f44a297..d0f4452 100644
--- a/R/family.bivariate.R
+++ b/R/family.bivariate.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -20,7 +20,7 @@ dbiclaytoncop <- function(x1, x2, apar = 0, log = FALSE) {
 
   A <- x1^(-apar) + x2^(-apar) - 1
   logdensity <- log1p(apar) -
-                (1 + apar) * (log(x1) + log(x2)) - 
+                (1 + apar) * (log(x1) + log(x2)) -
                 (2 + 1 / apar) * log(abs(A))  # Avoid warning
 
   out.square <- (x1 < 0) | (x1 > 1) | (x2 < 0) | (x2 > 1)
@@ -71,7 +71,7 @@ rbiclaytoncop <- function(n, apar = 0) {
                           imethod   = 1,
                           parallel  = FALSE,
                           zero = NULL) {
-  
+
   apply.parint <- TRUE
 
 
@@ -86,16 +86,19 @@ rbiclaytoncop <- function(n, apar = 0) {
 
 
   if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE) || imethod > 3)
+                  integer.valued = TRUE, positive = TRUE) ||
+      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
 
+
+
   new("vglmff",
-  blurb = c(" bivariate Clayton copula distribution)\n","Links:    ",
-                namesof("apar", lapar, earg = eapar)),
+  blurb = c("Bivariate Clayton copula distribution)\n",
+            "Links:    ", namesof("apar", lapar, earg = eapar)),
 
   constraints = eval(substitute(expression({
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
-                           bool = .parallel , 
+                           bool = .parallel ,
                            constraints = constraints,
                            apply.int = .apply.parint )
 
@@ -115,7 +118,7 @@ rbiclaytoncop <- function(n, apar = 0) {
          parallel = .parallel ,
          zero = .zero )
     }, list( .zero = zero,
-             .apply.parint = apply.parint, 
+             .apply.parint = apply.parint,
              .lapar = lapar,
              .parallel = parallel ))),
 
@@ -146,14 +149,11 @@ rbiclaytoncop <- function(n, apar = 0) {
     predictors.names <-
       namesof(mynames1, .lapar , earg = .eapar , short = TRUE)
 
+    extra$colnames.y  <- colnames(y)
 
-    extra$dimnamesy1 <- dimnames(y)[[1]]
-    if (length(dimnames(y)))
-      extra$dimnamesy2 <- dimnames(y)[[2]]
-    
     if (!length(etastart)) {
-      
-      apar.init <- matrix(if (length( .iapar )) .iapar else 0 + NA,
+
+      apar.init <- matrix(if (length( .iapar )) .iapar else NA_real_,
                           n, M / M1, byrow = TRUE)
 
       if (!length( .iapar ))
@@ -178,7 +178,7 @@ rbiclaytoncop <- function(n, apar = 0) {
           if (anyNA(apar.init[, spp.]))
             apar.init[, spp.] <- apar.init0
         }
-          
+
       etastart <- theta2eta(apar.init, .lapar , earg = .eapar )
     }
   }), list( .imethod = imethod,
@@ -186,13 +186,10 @@ rbiclaytoncop <- function(n, apar = 0) {
             .eapar = eapar,
             .iapar = iapar ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    eta <- as.matrix(eta)
-    fv.matrix <- matrix(0.5, nrow(eta), extra$ncoly)
-
-    if (length(extra$dimnamesy2))
-      dimnames(fv.matrix) <- list(extra$dimnamesy1,
-                                  extra$dimnamesy2)
-    fv.matrix
+    NOS <- NCOL(eta) / c(M1 = 1)
+    Q1 <- 2
+    fv.mat <- matrix(0.5, NROW(eta), NOS * Q1)
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }  , list( .lapar = lapar,
              .eapar = eapar ))),
 
@@ -202,7 +199,7 @@ rbiclaytoncop <- function(n, apar = 0) {
     misc$link <- rep_len( .lapar , M)
     temp.names <- mynames1
     names(misc$link) <- temp.names
-    
+
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
     for (ii in 1:M) {
@@ -240,16 +237,19 @@ rbiclaytoncop <- function(n, apar = 0) {
         ll.elts
       }
     }
-  } , list( .lapar = lapar,
-            .eapar = eapar,
-            .imethod = imethod ))),
+  } , list( .lapar = lapar, .eapar = eapar, .imethod = imethod ))),
   vfamily = c("biclaytoncop"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Alpha <- eta2theta(eta, .lapar , earg = .eapar )
+    okay1 <- all(is.finite(Alpha)) && all(0 < Alpha)
+    okay1
+  } , list( .lapar = lapar, .eapar = eapar, .imethod = imethod ))),
 
   simslot = eval(substitute(
   function(object, nsim) {
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     Alpha <- eta2theta(eta, .lapar , earg = .eapar )
@@ -267,13 +267,13 @@ rbiclaytoncop <- function(n, apar = 0) {
 
 
 
-    
+
     AA <- y[, Yindex1]^(-Alpha) + y[, Yindex2]^(-Alpha) - 1
     dAA.dapar <- -y[, Yindex1]^(-Alpha) * log(y[, Yindex1]) -
-                   y[, Yindex2]^(-Alpha) * log(y[, Yindex2])
+                  y[, Yindex2]^(-Alpha) * log(y[, Yindex2])
     dl.dapar <- 1 / (1 + Alpha) - log(y[, Yindex1] * y[, Yindex2]) -
-                 dAA.dapar / AA * (2 + 1 / Alpha ) + log(AA) / Alpha^2
-   
+                dAA.dapar / AA * (2 + 1 / Alpha ) + log(AA) / Alpha^2
+
 
 
     dapar.deta <- dtheta.deta(Alpha, .lapar , earg = .eapar )
@@ -285,7 +285,7 @@ rbiclaytoncop <- function(n, apar = 0) {
             .imethod = imethod ))),
 
   weight = eval(substitute(expression({
-    par <- Alpha + 1  # 20130808
+    par <- Alpha + 1
     denom1 <- (3 * par -2) * (2 * par - 1)
     denom2 <- 2 * (par - 1)
     v1 <- trigamma(1 / denom2)
@@ -293,10 +293,10 @@ rbiclaytoncop <- function(n, apar = 0) {
     v3 <- trigamma((2 * par - 1) / denom2)
     Rho. <- (1 + par  * (v1 - v2) / denom2 +
                         (v2 - v3) / denom2) / denom1
-    
-    out <- 1 / par^2 + 2 / (par * (par - 1) * (2 * par - 1)) +
-           4 * par / (3 * par - 2) - 2 * (2 * par - 1) * Rho. / (par - 1)
-    ned2l.dapar  <- out
+
+    ned2l.dapar  <- 1 / par^2 + 2 / (par * (par - 1) * (2 * par - 1)) +
+                    4 * par / (3 * par - 2) -
+                    2 * (2 * par - 1) * Rho. / (par - 1)
 
     wz <- ned2l.dapar * dapar.deta^2
     c(w) * wz
@@ -348,7 +348,7 @@ dbistudentt <- function(x1, x2, df, rho = 0, log = FALSE) {
 if (FALSE)
 bistudent.deriv.dof <-  function(u, v, nu, rho) {
 
-  
+
   t1 <- qt(u, nu, 1, 0)
   t2 <- qt(v, nu, 1, 0)
   t3 <- -(nu + 2.0) / 2.0
@@ -415,7 +415,7 @@ bistudent.deriv.dof <-  function(u, v, nu, rho) {
 
   constraints = eval(substitute(expression({
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
-                           bool = .parallel , 
+                           bool = .parallel ,
                            constraints = constraints,
                            apply.int = .apply.parint )
 
@@ -434,7 +434,7 @@ bistudent.deriv.dof <-  function(u, v, nu, rho) {
          parallel = .parallel ,
          zero = .zero )
   }, list( .zero = zero,
-           .apply.parint = apply.parint, 
+           .apply.parint = apply.parint,
            .parallel = parallel ))),
 
   initialize = eval(substitute(expression({
@@ -467,9 +467,7 @@ bistudent.deriv.dof <-  function(u, v, nu, rho) {
               interleave.VGAM(M, M1 = M1)]
 
 
-    extra$dimnamesy1 <- dimnames(y)[[1]]
-    if (length(dimnames(y)))
-      extra$dimnamesy2 <- dimnames(y)[[2]]
+    extra$colnames.y  <- colnames(y)
 
     if (!length(etastart)) {
 
@@ -518,15 +516,10 @@ bistudent.deriv.dof <-  function(u, v, nu, rho) {
             .erho = erho, .edof = edof,
             .idof = idof, .irho = irho ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-
-    eta <- as.matrix(eta)
-    fv.matrix <- matrix(0.0, nrow(eta), extra$ncoly)
-
-
-    if (length(extra$dimnamesy2))
-      dimnames(fv.matrix) <- list(extra$dimnamesy1,
-                                  extra$dimnamesy2)
-    fv.matrix
+    NOS <- ncol(eta) / c(M1 = 2)
+    Q1 <- 2
+    fv.mat <- matrix(0, nrow(eta), Q1 * NOS)
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }  , list( .lrho = lrho, .ldof = ldof,
              .erho = erho, .edof = edof ))),
 
@@ -589,6 +582,17 @@ bistudent.deriv.dof <-  function(u, v, nu, rho) {
            .erho = erho, .edof = edof,
            .imethod = imethod ))),
   vfamily = c("bistudentt"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Dof <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                     .ldof , earg = .edof )
+    Rho <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                     .lrho , earg = .erho )
+    okay1 <- all(is.finite(Dof)) && all(0 < Dof) &&
+             all(is.finite(Rho)) && all(abs(Rho) < 1)
+    okay1
+  }, list( .lrho = lrho, .ldof = ldof,
+           .erho = erho, .edof = edof,
+           .imethod = imethod ))),
   deriv = eval(substitute(expression({
     M1 <- Q1 <- 2
     Dof <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
@@ -610,11 +614,11 @@ bistudent.deriv.dof <-  function(u, v, nu, rho) {
     eval.d3 <- eval(dee3)
 
     dl.dthetas <-  attr(eval.d3, "gradient")
-   
+
     dl.ddof <- matrix(dl.dthetas[, "Dof"], n, length(Yindex1))
     dl.drho <- matrix(dl.dthetas[, "Rho"], n, length(Yindex2))
 
-  
+
   if (FALSE) {
     dd <- cbind(y, Rho, Dof)
     pp <- apply(dd, 1, function(x)
@@ -626,16 +630,8 @@ bistudent.deriv.dof <-  function(u, v, nu, rho) {
                      BiCopDeriv(x[1], x[2], family = 2,
                                 x[3], x[4], "par")) / pp
 
- print("head(dl.ddof)")
- print( head(dl.ddof) )
- print("head(alt.dl.ddof)")
- print( head(alt.dl.ddof) )
 
- print("max(abs(alt.dl.drho - dl.drho))")
- print( max(abs(alt.dl.drho - dl.drho)) )
- print("max(abs(alt.dl.ddof - dl.ddof))")
- print( max(abs(alt.dl.ddof - dl.ddof)) )
-    
+
   }
 
 
@@ -661,7 +657,7 @@ bistudent.deriv.dof <-  function(u, v, nu, rho) {
     wz22 <- (1 + Rho^2) / (1 - Rho^2)^2 +
             (Dof^2 + 2 * Dof) * Rho^2 *
              beta(3, Dof / 2) / (4 * (1 - Rho^2)^2)
-    wz22 <- wz22 + (Dof^2 + 2 * Dof) * (2 - 3 * Rho^2 + Rho^6) *   
+    wz22 <- wz22 + (Dof^2 + 2 * Dof) * (2 - 3 * Rho^2 + Rho^6) *
             beta(3, Dof / 2) / (16 * (1 - Rho^2)^4)
     wz22 <- wz22 + (Dof^2 + 2 * Dof) * (1 + Rho^2) *    # Replace - by + ???
             beta(2, Dof / 2) / (4 * (1 - Rho^2)^2)  # denom == 4 or 2 ???
@@ -683,7 +679,7 @@ bistudent.deriv.dof <-  function(u, v, nu, rho) {
 
 
 
-  
+
 
 
 dbinormcop <- function(x1, x2, rho = 0, log = FALSE) {
@@ -771,7 +767,7 @@ rbinormcop <- function(n, rho = 0  #, inverse = FALSE
 
   constraints = eval(substitute(expression({
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
-                           bool = .parallel , 
+                           bool = .parallel ,
                            constraints = constraints,
                            apply.int = .apply.parint )
 
@@ -790,7 +786,7 @@ rbinormcop <- function(n, rho = 0  #, inverse = FALSE
          parallel = .parallel ,
          zero = .zero )
   }, list( .zero = zero,
-           .apply.parint = apply.parint, 
+           .apply.parint = apply.parint,
            .parallel = parallel ))),
 
   initialize = eval(substitute(expression({
@@ -821,9 +817,7 @@ rbinormcop <- function(n, rho = 0  #, inverse = FALSE
       namesof(mynames1, .lrho , earg = .erho , short = TRUE))
 
 
-    extra$dimnamesy1 <- dimnames(y)[[1]]
-    if (length(dimnames(y)))
-      extra$dimnamesy2 <- dimnames(y)[[2]]
+    extra$colnames.y  <- colnames(y)
 
     if (!length(etastart)) {
 
@@ -861,20 +855,14 @@ rbinormcop <- function(n, rho = 0  #, inverse = FALSE
             .erho = erho,
             .irho = irho ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-
-    eta <- as.matrix(eta)
-    fv.matrix <- matrix(0.5, nrow(eta), extra$ncoly)
-
-
-    if (length(extra$dimnamesy2))
-      dimnames(fv.matrix) <- list(extra$dimnamesy1,
-                                  extra$dimnamesy2)
-    fv.matrix
+    NOS <- NCOL(eta) / c(M1 = 1)
+    Q1 <- 2
+    fv.mat <- matrix(0.5, NROW(eta), NOS * Q1)
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }  , list( .lrho = lrho,
              .erho = erho ))),
 
   last = eval(substitute(expression({
-
     M1 <- extra$M1
     Q1 <- extra$Q1
     misc$link <- rep_len( .lrho , M)
@@ -925,6 +913,11 @@ rbinormcop <- function(n, rho = 0  #, inverse = FALSE
             .erho = erho,
             .imethod = imethod ))),
   vfamily = c("binormalcop"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Rho <- eta2theta(eta, .lrho , earg = .erho )
+    okay1 <- all(is.finite(Rho)) && all(abs(Rho) < 1)
+    okay1
+  }, list( .lrho = lrho, .erho = erho, .imethod = imethod ))),
 
 
 
@@ -933,7 +926,7 @@ rbinormcop <- function(n, rho = 0  #, inverse = FALSE
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     Rho <- eta2theta(eta, .lrho , earg = .erho )
@@ -1042,6 +1035,8 @@ bilogistic.control <- function(save.weights = TRUE, ...) {
     w <- temp5$w
     y <- temp5$y
 
+    extra$colnames.y  <- colnames(y)
+
 
     predictors.names <-
       c(namesof("location1", .llocat, .elocat , tag = FALSE),
@@ -1088,7 +1083,9 @@ bilogistic.control <- function(save.weights = TRUE, ...) {
            .elocat = elocat, .escale = escale,
            .iscale1 = iscale1, .iscale2 = iscale2))),
   linkinv = function(eta, extra = NULL) {
-    cbind(eta[, 1], eta[, 2])
+    NOS <- NCOL(eta) / c(M1 = 4)
+    fv.mat <- eta[, 1:2]
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   },
   last = eval(substitute(expression({
     misc$link <-    c(location1 = .llocat , scale1 = .lscale ,
@@ -1130,6 +1127,18 @@ bilogistic.control <- function(save.weights = TRUE, ...) {
   }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
   vfamily = c("bilogistic"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    locat1 <- eta2theta(eta[, 1], .llocat , .elocat )
+    Scale1 <- eta2theta(eta[, 2], .lscale , .escale )
+    locat2 <- eta2theta(eta[, 3], .llocat , .elocat )
+    Scale2 <- eta2theta(eta[, 4], .lscale , .escale )
+    okay1 <- all(is.finite(locat1)) &&
+             all(is.finite(Scale1)) && all(0 < Scale1) &&
+             all(is.finite(locat2)) &&
+             all(is.finite(Scale2)) && all(0 < Scale2)
+    okay1
+  }, list( .llocat = llocat, .lscale = lscale,
+           .elocat = elocat, .escale = escale ))),
 
 
   simslot = eval(substitute(
@@ -1137,7 +1146,7 @@ bilogistic.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     locat1 <- eta2theta(eta[, 1], .llocat , .elocat )
@@ -1233,7 +1242,7 @@ dbilogis <- function(x1, x2, loc1 = 0, scale1 = 1,
 
 
 
-  logdensity <- log(2) - zedd1 - zedd2 - log(scale1) - 
+  logdensity <- log(2) - zedd1 - zedd2 - log(scale1) -
                 log(scale1) - 3 * log1p(exp(-zedd1) + exp(-zedd2))
 
 
@@ -1352,9 +1361,9 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
 
 
     predictors.names <-
-      c(namesof("a",  .la  , earg = .ea  , short = TRUE), 
-        namesof("ap", .lap , earg = .eap , short = TRUE), 
-        namesof("b",  .lb  , earg = .eb  , short = TRUE), 
+      c(namesof("a",  .la  , earg = .ea  , short = TRUE),
+        namesof("ap", .lap , earg = .eap , short = TRUE),
+        namesof("b",  .lb  , earg = .eb  , short = TRUE),
         namesof("bp", .lbp , earg = .ebp , short = TRUE))
     extra$y1.lt.y2 = y[, 1] < y[, 2]
 
@@ -1391,12 +1400,14 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
             .ea = ea, .eap = eap, .eb = eb, .ebp = ebp,
             .ia = ia, .iap = iap, .ib = ib, .ibp = ibp))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- NCOL(eta) / c(M1 = 4)
     alpha  <- eta2theta(eta[, 1], .la,  earg = .ea  )
     alphap <- eta2theta(eta[, 2], .lap, earg = .eap )
     beta   <- eta2theta(eta[, 3], .lb,  earg = .eb  )
     betap  <- eta2theta(eta[, 4], .lbp, earg = .ebp )
-    cbind((alphap + beta) / (alphap * (alpha + beta)),
-          (alpha + betap) / (betap * (alpha + beta)))
+    fv.mat <- cbind((alphap + beta) / (alphap * (alpha + beta)),
+                    (alpha + betap) / (betap * (alpha + beta)))
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
            .ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
   last = eval(substitute(expression({
@@ -1437,6 +1448,18 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
   }, list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
            .ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
   vfamily = c("freund61"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    alpha  <- eta2theta(eta[, 1], .la,  earg = .ea  )
+    alphap <- eta2theta(eta[, 2], .lap, earg = .eap )
+    beta   <- eta2theta(eta[, 3], .lb,  earg = .eb  )
+    betap  <- eta2theta(eta[, 4], .lbp, earg = .ebp )
+    okay1 <- all(is.finite(alpha )) && all(0 < alpha ) &&
+             all(is.finite(alphap)) && all(0 < alphap) &&
+             all(is.finite(beta  )) && all(0 < beta  ) &&
+             all(is.finite(betap )) && all(0 < betap )
+    okay1
+  }, list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
+           .ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
   deriv = eval(substitute(expression({
     tmp88  <- extra$y1.lt.y2
     alpha  <- eta2theta(eta[, 1], .la,  earg = .ea  )
@@ -1571,14 +1594,16 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
     y <- temp5$y
 
 
+    extra$colnames.y  <- colnames(y)
+
     if (any(y[, 1] >= y[, 2]))
       stop("the second column minus the first column must be a vector ",
            "of positive values")
 
 
     predictors.names <-
-      c(namesof("scale",  .lscale,  .escale,  short = TRUE), 
-        namesof("shape1", .lshape1, .eshape1, short = TRUE), 
+      c(namesof("scale",  .lscale,  .escale,  short = TRUE),
+        namesof("shape1", .lshape1, .eshape1, short = TRUE),
         namesof("shape2", .lshape2, .eshape2, short = TRUE))
 
     if (!length(etastart)) {
@@ -1623,11 +1648,13 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
             .iscale = iscale, .ishape1 = ishape1, .ishape2 = ishape2,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- NCOL(eta) / c(M1 = 3)
     a <- eta2theta(eta[, 1], .lscale  ,  .escale )
     p <- eta2theta(eta[, 2], .lshape1 , .eshape1 )
     q <- eta2theta(eta[, 3], .lshape2 , .eshape2 )
-    cbind("y1" = p*a,
-          "y2" = (p+q)*a)
+    fv.mat <-  cbind("y1" = p*a,
+                     "y2" = (p+q)*a)  # Overwrite the colnames:
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
            .escale = escale, .eshape1 = eshape1, .eshape2 = eshape2 ))),
   last = eval(substitute(expression({
@@ -1672,6 +1699,16 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
   }, list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
            .escale = escale, .eshape1 = eshape1, .eshape2 = eshape2 ))),
   vfamily = c("bigamma.mckay"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    aparam <- eta2theta(eta[, 1], .lscale  ,  .escale )
+    shape1 <- eta2theta(eta[, 2], .lshape1 , .eshape1 )
+    shape2 <- eta2theta(eta[, 3], .lshape2 , .eshape2 )
+    okay1 <- all(is.finite(aparam)) && all(0 < aparam) &&
+             all(is.finite(shape1)) && all(0 < shape1) &&
+             all(is.finite(shape2)) && all(0 < shape2)
+    okay1
+  }, list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
+           .escale = escale, .eshape1 = eshape1, .eshape2 = eshape2 ))),
   deriv = eval(substitute(expression({
     aparam <- eta2theta(eta[, 1], .lscale  ,  .escale )
     shape1 <- eta2theta(eta[, 2], .lshape1 , .eshape1 )
@@ -1766,7 +1803,7 @@ pbifrankcop <- function(q1, q2, apar) {
   ans <- as.numeric(index)
   if (any(!index))
   ans[!index] <- logb(1 + ((apar[!index])^(x[!index]) - 1)*
-                 ((apar[!index])^(y[!index]) - 1)/(apar[!index] - 1), 
+                 ((apar[!index])^(y[!index]) - 1)/(apar[!index] - 1),
                  base = apar[!index])
   ind2 <- (abs(apar - 1) < .Machine$double.eps)
   ans[ind2] <- x[ind2] * y[ind2]
@@ -1839,6 +1876,7 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
 
 
 
+
  bifrankcop <- function(lapar = "loge", iapar = 2, nsimEIM = 250) {
 
   lapar <- as.list(substitute(lapar))
@@ -1864,7 +1902,7 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
   initialize = eval(substitute(expression({
 
     if (any(y <= 0) || any(y >= 1))
-      stop("the response must have values between 0 and 1") 
+      stop("the response must have values between 0 and 1")
 
     temp5 <-
     w.y.check(w = w, y = y,
@@ -1882,8 +1920,7 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
     predictors.names <-
       c(namesof("apar", .lapar , earg = .eapar, short = TRUE))
 
-    if (length(dimnames(y)))
-      extra$dimnamesy2 <- dimnames(y)[[2]]
+    extra$colnames.y  <- colnames(y)
 
     if (!length(etastart)) {
       apar.init <- rep_len(.iapar , n)
@@ -1891,11 +1928,10 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
     }
   }), list( .lapar = lapar, .eapar = eapar, .iapar = iapar))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    apar <- eta2theta(eta, .lapar , earg = .eapar )
-    fv.matrix <- matrix(0.5, length(apar), 2)
-    if (length(extra$dimnamesy2))
-      dimnames(fv.matrix) <- list(names(eta), extra$dimnamesy2)
-    fv.matrix
+    NOS <- NCOL(eta) / c(M1 = 1)
+    Q1 <- 2
+    fv.mat <- matrix(0.5, NROW(eta), NOS * Q1)
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lapar = lapar, .eapar = eapar ))),
   last = eval(substitute(expression({
     misc$link <-    c("apar" = .lapar )
@@ -1925,6 +1961,11 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
     }
   }, list( .lapar = lapar, .eapar = eapar ))),
   vfamily = c("bifrankcop"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    apar <- eta2theta(eta, .lapar , earg = .eapar )
+    okay1 <- all(is.finite(apar)) && all(0 < apar)
+    okay1
+  }, list( .lapar = lapar, .eapar = eapar ))),
 
 
 
@@ -1933,7 +1974,7 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     apar <- eta2theta(eta, .lapar , earg = .eapar )
@@ -1953,7 +1994,7 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
 
     denom <- apar-1 + (apar^y[, 1]  - 1) * (apar^y[, 2]  - 1)
     tmp700 <- 2*apar^(y[, 1]+y[, 2]) - apar^y[, 1] - apar^y[, 2]
-    numerator <- 1 + y[, 1] * apar^(y[, 1] - 1) * (apar^y[, 2]  - 1) + 
+    numerator <- 1 + y[, 1] * apar^(y[, 1] - 1) * (apar^y[, 2]  - 1) +
                      y[, 2] * apar^(y[, 2] - 1) * (apar^y[, 1]  - 1)
     Dl.dapar <- 1/(apar - 1) + 1/(apar*log(apar)) +
                 (y[, 1]+y[, 2])/apar - 2 * numerator / denom
@@ -1983,8 +2024,8 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
     c(w) * wz
   } else {
       nump <- apar^(y[, 1]+y[, 2]-2) * (2 * y[, 1] * y[, 2] +
-                    y[, 1]*(y[, 1] - 1) + y[, 2]*(y[, 2] - 1)) - 
-                    y[, 1]*(y[, 1] - 1) * apar^(y[, 1]-2) - 
+                    y[, 1]*(y[, 1] - 1) + y[, 2]*(y[, 2] - 1)) -
+                    y[, 1]*(y[, 1] - 1) * apar^(y[, 1]-2) -
                     y[, 2]*(y[, 2] - 1) * apar^(y[, 2]-2)
       D2l.dapar2 <- 1/(apar - 1)^2 + (1+log(apar))/(apar*log(apar))^2 +
                     (y[, 1]+y[, 2])/apar^2 + 2 *
@@ -2028,7 +2069,7 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
             namesof("theta", ltheta, etheta)),
   initialize = eval(substitute(expression({
     if (any(y[, 1] <= 0) || any(y[, 2] <= 1))
-      stop("the response has values that are out of range") 
+      stop("the response has values that are out of range")
 
     temp5 <-
     w.y.check(w = w, y = y,
@@ -2042,13 +2083,15 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
     w <- temp5$w
     y <- temp5$y
 
+    extra$colnames.y  <- colnames(y)
+
 
     predictors.names <-
       c(namesof("theta", .ltheta , .etheta , short = TRUE))
 
     if (!length(etastart)) {
       theta.init <- if (length( .itheta)) {
-        rep_len( .itheta , n) 
+        rep_len( .itheta , n)
       } else {
         1 / (y[, 2] - 1 + 0.01)
       }
@@ -2057,15 +2100,17 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
     }
   }), list( .ltheta = ltheta, .etheta = etheta, .itheta = itheta))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- NCOL(eta) / c(M1 = 1)
     theta <- eta2theta(eta, .ltheta , .etheta )
-    cbind(theta*exp(theta), 1+1/theta)
+    fv.mat <- cbind(theta * exp(theta), 1 + 1 / theta)
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .ltheta = ltheta, .etheta = etheta ))),
   last = eval(substitute(expression({
     misc$link <-    c("theta" = .ltheta )
 
     misc$earg <- list("theta" = .etheta )
 
-    misc$expected <- .expected 
+    misc$expected <- .expected
     misc$multipleResponses <- FALSE
   }), list( .ltheta = ltheta,
             .etheta = etheta, .expected = expected ))),
@@ -2087,6 +2132,11 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
     }
   }, list( .ltheta = ltheta, .etheta = etheta ))),
   vfamily = c("gammahyperbola"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    theta <- eta2theta(eta, .ltheta , .etheta )
+    okay1 <- all(is.finite(theta)) && all(0 < theta)
+    okay1
+  }, list( .ltheta = ltheta, .etheta = etheta ))),
   deriv = eval(substitute(expression({
     theta <- eta2theta(eta, .ltheta , .etheta )
     Dl.dtheta <- exp(-theta) * y[, 1] * (1+theta) / theta^2 - y[, 2]
@@ -2137,11 +2187,10 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
 
   new("vglmff",
   blurb = c("Bivariate Farlie-Gumbel-Morgenstern ",
-            "exponential distribution\n",  # Morgenstern's 
+            "exponential distribution\n",  # Morgenstern's
             "Links:    ",
             namesof("apar", lapar, earg = earg )),
   initialize = eval(substitute(expression({
-
     temp5 <-
     w.y.check(w = w, y = y,
               Is.nonnegative.y = TRUE,
@@ -2159,8 +2208,7 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
     predictors.names <-
       c(namesof("apar", .lapar , earg = .earg , short = TRUE))
 
-    if (length(dimnames(y)))
-      extra$dimnamesy2 = dimnames(y)[[2]]
+    extra$colnames.y  <- colnames(y)
 
     if (!length(etastart)) {
       ainit  <- if (length(.iapar))  rep_len( .iapar , n) else {
@@ -2176,11 +2224,10 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
   }), list( .iapar = iapar, .lapar = lapar, .earg = earg,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    alpha <- eta2theta(eta, .lapar , earg = .earg )
-    fv.matrix <- matrix(1, length(alpha), 2)
-    if (length(extra$dimnamesy2))
-        dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
-    fv.matrix
+    NOS <- NCOL(eta) / c(M1 = 1)
+    Q1 <- 2
+    fv.mat <- matrix(1, NROW(eta), NOS * Q1)
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lapar = lapar, .earg = earg ))),
   last = eval(substitute(expression({
     misc$link <-    c("apar" = .lapar )
@@ -2211,8 +2258,13 @@ bifrankcop.control <- function(save.weights = TRUE, ...) {
     }
   }, list( .lapar = lapar, .earg = earg, .tola0 = tola0 ))),
   vfamily = c("bifgmexp"),  # morgenstern
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    alpha <- eta2theta(eta, .lapar , earg = .earg )
+    okay1 <- all(is.finite(alpha)) && all(abs(alpha) < 1)
+    okay1
+  }, list( .lapar = lapar, .earg = earg, .tola0 = tola0 ))),
   deriv = eval(substitute(expression({
-    alpha  <- eta2theta(eta, .lapar , earg = .earg )
+    alpha <- eta2theta(eta, .lapar , earg = .earg )
     alpha[abs(alpha) < .tola0 ] <- .tola0
     numerator <- 1 - 2*(exp(-y[, 1]) + exp(-y[, 2])) +
                  4*exp(-y[, 1] - y[, 2])
@@ -2377,8 +2429,7 @@ pbifgmcop <- function(q1, q2, apar) {
     predictors.names <-
       namesof("apar", .lapar , earg = .earg , short = TRUE)
 
-    if (length(dimnames(y)))
-      extra$dimnamesy2 <- dimnames(y)[[2]]
+    extra$colnames.y  <- colnames(y)
 
     if (!length(etastart)) {
       ainit  <- if (length( .iapar ))  .iapar else {
@@ -2405,11 +2456,10 @@ pbifgmcop <- function(q1, q2, apar) {
   }), list( .iapar = iapar, .lapar = lapar, .earg = earg,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    alpha <- eta2theta(eta, .lapar , earg = .earg )
-    fv.matrix <- matrix(0.5, length(alpha), 2)
-    if (length(extra$dimnamesy2))
-      dimnames(fv.matrix) <- list(names(eta), extra$dimnamesy2)
-    fv.matrix
+    NOS <- NCOL(eta) / c(M1 = 1)
+    Q1 <- 2
+    fv.mat <- matrix(0.5, NROW(eta), NOS * Q1)
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lapar = lapar, .earg = earg ))),
   last = eval(substitute(expression({
     misc$link <-    c("apar" = .lapar )
@@ -2436,6 +2486,11 @@ pbifgmcop <- function(q1, q2, apar) {
     }
   }, list( .lapar = lapar, .earg = earg ))),
   vfamily = c("bifgmcop"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    alpha <- eta2theta(eta, .lapar , earg = .earg )
+    okay1 <- all(is.finite(alpha)) && all(abs(alpha) < 1)
+    okay1
+  }, list( .lapar = lapar, .earg = earg ))),
 
 
   simslot = eval(substitute(
@@ -2443,7 +2498,7 @@ pbifgmcop <- function(q1, q2, apar) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     alpha <- eta2theta(eta, .lapar , earg = .earg )
@@ -2453,7 +2508,7 @@ pbifgmcop <- function(q1, q2, apar) {
 
 
   deriv = eval(substitute(expression({
-    alpha  <- eta2theta(eta, .lapar , earg = .earg )
+    alpha <- eta2theta(eta, .lapar , earg = .earg )
 
     dalpha.deta <- dtheta.deta(alpha, .lapar , earg = .earg )
 
@@ -2517,6 +2572,8 @@ pbifgmcop <- function(q1, q2, apar) {
     w <- temp5$w
     y <- temp5$y
 
+    extra$colnames.y  <- colnames(y)
+
 
 
     predictors.names <-
@@ -2535,9 +2592,11 @@ pbifgmcop <- function(q1, q2, apar) {
   }), list( .iapar = iapar, .lapar = lapar, .earg = earg,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- NCOL(eta) / c(M1 = 1)
+    Q1 <- 2
     alpha <- eta2theta(eta, .lapar , earg = .earg )
-    cbind(rep_len(1, length(alpha)),
-          rep_len(1, length(alpha)))
+    fv.mat <- matrix(1, NROW(eta), NOS * Q1)
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lapar = lapar, .earg = earg ))),
   last = eval(substitute(expression({
     misc$link <-    c("apar" = .lapar )
@@ -2568,7 +2627,7 @@ pbifgmcop <- function(q1, q2, apar) {
 
 
       if (summation) {
-      sum(bad) * (-1.0e10) + 
+      sum(bad) * (-1.0e10) +
       sum(w[!bad] * (-y[!bad, 1] - y[!bad, 2] +
           alpha[!bad] * y[!bad, 1] * y[!bad, 2] + log(denom[!bad])))
       } else {
@@ -2577,8 +2636,13 @@ pbifgmcop <- function(q1, q2, apar) {
     }
   }, list( .lapar = lapar, .earg = earg ))),
   vfamily = c("bigumbelIexp"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    alpha <- eta2theta(eta, .lapar , earg = .earg )
+    okay1 <- all(is.finite(alpha))
+    okay1
+  }, list( .lapar = lapar, .earg = earg ))),
   deriv = eval(substitute(expression({
-    alpha  <- eta2theta(eta, .lapar , earg = .earg )
+    alpha <- eta2theta(eta, .lapar , earg = .earg )
     numerator <- (alpha * y[, 1] - 1) * y[, 2] +
                  (alpha * y[, 2] - 1) * y[, 1] + 1
     denom <- (alpha * y[, 1] - 1) * (alpha * y[, 2] - 1) + alpha
@@ -2733,8 +2797,7 @@ biplackettcop.control <- function(save.weights = TRUE, ...) {
     predictors.names <-
       namesof("oratio", .link , earg = .earg, short = TRUE)
 
-    if (length(dimnames(y)))
-      extra$dimnamesy2 <- dimnames(y)[[2]]
+    extra$colnames.y  <- colnames(y)
 
     if (!length(etastart)) {
       orinit <- if (length( .ioratio ))  .ioratio else {
@@ -2758,11 +2821,10 @@ biplackettcop.control <- function(save.weights = TRUE, ...) {
   }), list( .ioratio = ioratio, .link = link, .earg = earg,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    oratio <- eta2theta(eta, .link , earg = .earg )
-    fv.matrix <- matrix(0.5, length(oratio), 2)
-    if (length(extra$dimnamesy2))
-        dimnames(fv.matrix) <- list(dimnames(eta)[[1]], extra$dimnamesy2)
-    fv.matrix
+    NOS <- NCOL(eta) / c(M1 = 1)
+    Q1 <- 2
+    fv.mat <- matrix(0.5, NROW(eta), NOS * Q1)
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
     misc$link <-    c(oratio = .link)
@@ -2792,6 +2854,11 @@ biplackettcop.control <- function(save.weights = TRUE, ...) {
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("biplackettcop"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    oratio <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(oratio)) && all(0 < oratio)
+    okay1
+  }, list( .link = link, .earg = earg ))),
 
 
   simslot = eval(substitute(
@@ -2799,7 +2866,7 @@ biplackettcop.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     oratio <- eta2theta(eta, .link , earg = .earg )
@@ -2809,7 +2876,7 @@ biplackettcop.control <- function(save.weights = TRUE, ...) {
 
 
   deriv = eval(substitute(expression({
-    oratio  <- eta2theta(eta, .link , earg = .earg )
+    oratio <- eta2theta(eta, .link , earg = .earg )
     doratio.deta <- dtheta.deta(oratio, .link , earg = .earg )
     y1 <- y[, 1]
     y2 <- y[, 2]
@@ -2982,8 +3049,7 @@ biamhcop.control <- function(save.weights = TRUE, ...) {
     predictors.names <-
       c(namesof("apar", .lapar, earg = .eapar, short = TRUE))
 
-    if (length(dimnames(y)))
-      extra$dimnamesy2 <- dimnames(y)[[2]]
+    extra$colnames.y  <- colnames(y)
 
     if (!length(etastart)) {
       ainit  <- if (length( .iapar ))  .iapar else {
@@ -3000,11 +3066,10 @@ biamhcop.control <- function(save.weights = TRUE, ...) {
   }), list( .lapar = lapar, .eapar = eapar, .iapar = iapar,
             .imethod = imethod))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    apar <- eta2theta(eta, .lapar, earg = .eapar )
-    fv.matrix <- matrix(0.5, length(apar), 2)
-    if (length(extra$dimnamesy2))
-        dimnames(fv.matrix) <- list(names(eta), extra$dimnamesy2)
-    fv.matrix
+    NOS <- NCOL(eta) / c(M1 = 1)
+    Q1 <- 2
+    fv.mat <- matrix(0.5, NROW(eta), NOS * Q1)
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lapar = lapar, .eapar = eapar ))),
   last = eval(substitute(expression({
     misc$link <-    c("apar" = .lapar )
@@ -3034,6 +3099,11 @@ biamhcop.control <- function(save.weights = TRUE, ...) {
     }
   }, list( .lapar = lapar, .eapar = eapar ))),
   vfamily = c("biamhcop"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    apar <- eta2theta(eta, .lapar, earg = .eapar )
+    okay1 <- all(is.finite(apar)) && all(abs(apar) < 1)
+    okay1
+  }, list( .lapar = lapar, .eapar = eapar ))),
 
 
 
@@ -3042,7 +3112,7 @@ biamhcop.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     apar <- eta2theta(eta, .lapar , earg = .eapar )
@@ -3219,7 +3289,7 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
                      x = x,
                      bool = .eq.mean ,  #
                      constraints = constraints.orig,
-                     apply.int = TRUE, 
+                     apply.int = TRUE,
                      cm.default           = cmk.m,
                      cm.intercept.default = cm1.m)
 
@@ -3288,8 +3358,7 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
       namesof("sd2",   .lsd2 ,   earg = .esd2 ,   short = TRUE),
       namesof("rho",   .lrho ,   earg = .erho ,   short = TRUE))
 
-    if (length(dimnames(y)))
-      extra$dimnamesy2 <- dimnames(y)[[2]]
+    extra$colnames.y  <- colnames(y)
 
     if (!length(etastart)) {
       imean1 <- rep_len(if (length( .imean1 )) .imean1 else
@@ -3320,12 +3389,11 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
             .isd1   = isd1,   .isd2   = isd2,
             .irho   = irho ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    mean1 <- eta2theta(eta[, 1], .lmean1, earg = .emean1)
-    mean2 <- eta2theta(eta[, 2], .lmean2, earg = .emean2)
-    fv.matrix <- cbind(mean1, mean2)
-    if (length(extra$dimnamesy2))
-      dimnames(fv.matrix) <- list(names(eta), extra$dimnamesy2)
-    fv.matrix
+    NOS <- ncol(eta) / c(M1 = 5)
+    mean1 <- eta2theta(eta[, 1], .lmean1 , earg = .emean1 )
+    mean2 <- eta2theta(eta[, 2], .lmean2 , earg = .emean2 )
+    fv.mat <- cbind(mean1, mean2)
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }  , list( .lmean1 = lmean1, .lmean2 = lmean2,
              .emean1 = emean1, .emean2 = emean2,
              .lsd1   = lsd1  , .lsd2   = lsd2  , .lrho = lrho,
@@ -3339,7 +3407,7 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
                       "rho"   = .lrho )
 
     misc$earg <- list("mean1" = .emean1 ,
-                      "mean2" = .emean2 , 
+                      "mean2" = .emean2 ,
                       "sd1"   = .esd1 ,
                       "sd2"   = .esd2 ,
                       "rho"   = .erho )
@@ -3381,6 +3449,23 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
             .esd1   = esd1  , .esd2   = esd2  , .erho = erho,
             .imethod = imethod ))),
   vfamily = c("binormal"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mean1 <- eta2theta(eta[, 1], .lmean1, earg = .emean1)
+    mean2 <- eta2theta(eta[, 2], .lmean2, earg = .emean2)
+    sd1   <- eta2theta(eta[, 3], .lsd1  , earg = .esd1  )
+    sd2   <- eta2theta(eta[, 4], .lsd2  , earg = .esd2  )
+    Rho   <- eta2theta(eta[, 5], .lrho  , earg = .erho  )
+    okay1 <- all(is.finite(mean1)) &&
+             all(is.finite(mean2)) &&
+             all(is.finite(sd1  )) && all(0 < sd1) &&
+             all(is.finite(sd2  )) && all(0 < sd2) &&
+             all(is.finite(Rho  )) && all(abs(Rho) < 1)
+    okay1
+  } , list( .lmean1 = lmean1, .lmean2 = lmean2,
+            .emean1 = emean1, .emean2 = emean2,
+            .lsd1   = lsd1  , .lsd2   = lsd2  , .lrho = lrho,
+            .esd1   = esd1  , .esd2   = esd2  , .erho = erho,
+            .imethod = imethod ))),
 
 
 
@@ -3389,7 +3474,7 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     mean1 <- eta2theta(eta[, 1], .lmean1 , earg = .emean1 )
@@ -3432,11 +3517,11 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
                 zedd1 * zedd2 / temp5 +
                 Rho / temp5
 
-    dmean1.deta <- dtheta.deta(mean1, .lmean1) 
-    dmean2.deta <- dtheta.deta(mean2, .lmean2) 
-    dsd1.deta   <- dtheta.deta(sd1  , .lsd1  ) 
-    dsd2.deta   <- dtheta.deta(sd2  , .lsd2  ) 
-    drho.deta   <- dtheta.deta(Rho  , .lrho  ) 
+    dmean1.deta <- dtheta.deta(mean1, .lmean1)
+    dmean2.deta <- dtheta.deta(mean2, .lmean2)
+    dsd1.deta   <- dtheta.deta(sd1  , .lsd1  )
+    dsd2.deta   <- dtheta.deta(sd2  , .lsd2  )
+    drho.deta   <- dtheta.deta(Rho  , .lrho  )
     dthetas.detas  <- cbind(dmean1.deta,
                            dmean2.deta,
                            dsd1.deta,
@@ -3510,11 +3595,14 @@ gumbelI <-
           namesof("a", la, earg =  earg )),
   initialize = eval(substitute(expression({
     if (!is.matrix(y) || ncol(y) != 2)
-        stop("the response must be a 2 column matrix") 
+        stop("the response must be a 2 column matrix")
 
     if (any(y < 0))
         stop("the response must have non-negative values only")
 
+
+    extra$colnames.y  <- colnames(y)
+
     predictors.names <-
       c(namesof("a", .la, earg =  .earg , short = TRUE))
     if (!length(etastart)) {
@@ -3528,9 +3616,10 @@ gumbelI <-
       }
   }), list( .ia=ia, .la = la, .earg = earg, .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    alpha <- eta2theta(eta, .la , earg =  .earg )
-    cbind(rep_len(1, length(alpha)),
-          rep_len(1, length(alpha)))
+    NOS <- NCOL(eta) / c(M1 = 1)
+    Q1 <- 2
+    fv.mat <- matrix(1, NROW(eta), NOS * Q1)
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .la = la ))),
   last = eval(substitute(expression({
     misc$link <-    c("a" = .la )
@@ -3564,6 +3653,11 @@ gumbelI <-
     }
   }, list( .la = la, .earg = earg ))),
   vfamily = c("gumbelI"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    alpha  <- eta2theta(eta, .la , earg = .earg )
+    okay1 <- all(is.finite(alpha))
+    okay1
+  } , list( .la = la, .earg = earg ))),
   deriv = eval(substitute(expression({
       alpha  <- eta2theta(eta, .la, earg =  .earg )
       numerator <- (alpha*y[,1] - 1)*y[,2] + (alpha*y[,2] - 1)*y[,1] + 1
@@ -3601,8 +3695,8 @@ kendall.tau <- function(x, y, exact = FALSE, max.n = 3000) {
 
   NN <- if (!exact && N > max.n) {
     cindex <- sample.int(n = N, size = max.n, replace = FALSE)
-    x <- x[cindex] 
-    y <- y[cindex] 
+    x <- x[cindex]
+    y <- y[cindex]
     max.n
   } else {
     N
@@ -3630,13 +3724,13 @@ kendall.tau <- function(x, y, exact = TRUE, max.n = 1000) {
     stop("arguments 'x' and 'y' do not have equal lengths")
   index <- iam(NA, NA, M = N, both = TRUE)
 
-  index$row.index <- index$row.index[-(1:N)] 
-  index$col.index <- index$col.index[-(1:N)] 
+  index$row.index <- index$row.index[-(1:N)]
+  index$col.index <- index$col.index[-(1:N)]
 
   NN <- if (!exact && N > max.n) {
     cindex <- sample.int(n = N, size = max.n, replace = FALSE)
-    index$row.index <- index$row.index[cindex] 
-    index$col.index <- index$col.index[cindex] 
+    index$row.index <- index$row.index[cindex]
+    index$col.index <- index$col.index[cindex]
     max.n
   } else{
     choose(N, 2)
diff --git a/R/family.categorical.R b/R/family.categorical.R
index ed06f61..9ff607f 100644
--- a/R/family.categorical.R
+++ b/R/family.categorical.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -43,6 +43,7 @@ process.categorical.data.VGAM <- expression({
     nn <- nrow(y)
   }
 
+
   nvec <- rowSums(y)
 
   if (min(y) < 0 || any(round(y) != y))
@@ -249,10 +250,10 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
 
-    delete.zero.colns <- TRUE 
+    delete.zero.colns <- TRUE
     eval(process.categorical.data.VGAM)
     extra$wy.prod <- TRUE
-    M <- ncol(y) - 1 
+    M <- ncol(y) - 1
 
     mynames <- if ( .reverse )
       paste("P[Y", .fillerChar, "=", .fillerChar, 2:(M+1), "|Y",
@@ -266,8 +267,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     extra$mymat <- if ( .reverse ) tapplymat1(y, "cumsum") else
                   tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
 
-    if (length(dimnames(y)))
-      extra$dimnamesy2 <- dimnames(y)[[2]]
+    extra$colnames.y  <- colnames(y)
   }), list( .earg = earg, .link = link, .reverse = reverse,
             .fillerChar = fillerChar,
             .whitespace = whitespace ))),
@@ -275,9 +275,8 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
   linkinv = eval(substitute( function(eta, extra = NULL) {
     if (!is.matrix(eta))
       eta <- as.matrix(eta)
-    fv.matrix <-
-    if ( .reverse ) {
-      M <- ncol(eta)
+    fv.mat <- if ( .reverse ) {
+      M <- NCOL(eta)
       djr <- eta2theta(eta, .link , earg = .earg )
       temp <- tapplymat1(1 - djr[, M:1], "cumprod")[, M:1]
       cbind(1, djr) * cbind(temp, 1)
@@ -286,10 +285,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
       temp <- tapplymat1(1 - dj, "cumprod")
       cbind(dj, 1) * cbind(1, temp)
     }
-    if (length(extra$dimnamesy2))
-      dimnames(fv.matrix) = list(dimnames(eta)[[1]],
-                                 extra$dimnamesy2)
-    fv.matrix
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = 1)
   }, list( .earg = earg, .link = link, .reverse = reverse) )),
   last = eval(substitute(expression({
     misc$link <- rep_len( .link , M)
@@ -305,7 +301,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     misc$fillerChar <- .fillerChar
     misc$whitespace <- .whitespace
 
-    extra <- list()  # kill what was used 
+    extra <- list()  # kill what was used
   }), list( .earg = earg, .link = link, .reverse = reverse,
             .fillerChar = fillerChar,
             .whitespace = whitespace ))),
@@ -348,6 +344,11 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
       }
     },
   vfamily = c("sratio", "VGAMordinal", "VGAMcategorical"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    djr <- eta2theta(eta, .link , earg = .earg )  # dj or djr
+    okay1 <- all(is.finite(djr)) && all(0 < djr & djr < 1)
+    okay1
+  }, list( .earg = earg, .link = link, .reverse = reverse) )),
   deriv = eval(substitute(expression({
     if (!length(extra$mymat)) {
       extra$mymat <- if ( .reverse ) tapplymat1(y, "cumsum") else
@@ -401,7 +402,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
   new("vglmff",
-  blurb = c("Continuation ratio model\n\n", 
+  blurb = c("Continuation ratio model\n\n",
             "Links:    ",
             namesof(if (reverse)
             ifelse(whitespace, "P[Y < j+1|Y <= j+1]",
@@ -452,9 +453,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
 
-    delete.zero.colns <- TRUE 
+    delete.zero.colns <- TRUE
     eval(process.categorical.data.VGAM)
-    M <- ncol(y) - 1 
+    M <- ncol(y) - 1
 
     mynames <- if ( .reverse )
       paste("P[Y", .fillerChar, "<", .fillerChar, 2:(M+1), "|Y",
@@ -469,8 +470,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
                    tapplymat1(y, "cumsum") else
                    tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
 
-    if (length(dimnames(y)))
-      extra$dimnamesy2 <- dimnames(y)[[2]]
+    extra$colnames.y  <- colnames(y)
   }), list( .earg = earg, .link = link, .reverse = reverse,
             .fillerChar = fillerChar,
             .whitespace = whitespace ))),
@@ -478,7 +478,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
   linkinv = eval(substitute( function(eta, extra = NULL) {
     if (!is.matrix(eta))
       eta <- as.matrix(eta)
-    fv.matrix <- if ( .reverse ) {
+    fv.mat <- if ( .reverse ) {
       M <- ncol(eta)
       djrs <- eta2theta(eta, .link , earg = .earg )
       temp <- tapplymat1(djrs[, M:1], "cumprod")[, M:1]
@@ -488,10 +488,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
       temp <- tapplymat1(djs, "cumprod")
       cbind(1 - djs, 1) * cbind(1, temp)
     }
-    if (length(extra$dimnamesy2))
-      dimnames(fv.matrix) <- list(dimnames(eta)[[1]],
-                                  extra$dimnamesy2)
-    fv.matrix
+    label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = 1)
   }, list( .earg = earg, .link = link, .reverse = reverse) )),
   last = eval(substitute(expression({
 
@@ -509,7 +506,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     misc$whitespace <- .whitespace
 
 
-    extra <- list()  # kill what was used 
+    extra <- list()  # kill what was used
   }), list( .earg = earg, .link = link, .reverse = reverse,
             .fillerChar = fillerChar,
             .whitespace = whitespace ))),
@@ -553,6 +550,11 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     }
   },
   vfamily = c("cratio", "VGAMordinal", "VGAMcategorical"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    djrs <- eta2theta(eta, .link , earg = .earg )  # djs or djrs
+    okay1 <- all(is.finite(djrs)) && all(0 < djrs & djrs < 1)
+    okay1
+  }, list( .earg = earg, .link = link, .reverse = reverse) )),
 
   deriv = eval(substitute(expression({
     if (!length(extra$mymat)) {
@@ -601,7 +603,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
  vglm.multinomial.control <-
-  function(maxit = 21, panic = FALSE, 
+  function(maxit = 21, panic = FALSE,
            criterion = c("aic1", "aic2", names( .min.criterion.VGAM )),
            ...) {
   if (mode(criterion) != "character" && mode(criterion) != "name")
@@ -678,7 +680,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
   new("vglmff",
-  blurb = c("Multinomial logit model\n\n", 
+  blurb = c("Multinomial logit model\n\n",
             "Links:    ",
          if (is.numeric(refLevel)) {
          if (refLevel < 0) {
@@ -746,9 +748,10 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
 
-    delete.zero.colns <- TRUE 
+    delete.zero.colns <- TRUE
     eval(process.categorical.data.VGAM)
 
+
     M <- ncol(y)-1
     use.refLevel <- if (is.numeric( .refLevel )) {
       if ( .refLevel < 0) M+1 else .refLevel
@@ -770,6 +773,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
             "]", .fillerChar, "/", .fillerChar, "mu[,",
             use.refLevel, "])", sep = "")
 
+    extra$colnames.y  <- colnames(y)
   }), list( .refLevel = refLevel,
             .fillerChar = fillerChar,
             .whitespace = whitespace ))),
@@ -777,14 +781,15 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
   linkinv = eval(substitute( function(eta, extra = NULL) {
     if (anyNA(eta))
       warning("there are NAs in eta in slot inverse")
-    ans <- multilogit(eta, refLevel = extra$use.refLevel, # .refLevel ,
+    ans <- multilogit(eta,
+                      refLevel = extra$use.refLevel,  # .refLevel ,
                       inverse = TRUE)
     if (anyNA(ans))
       warning("there are NAs here in slot linkinv")
     if (min(ans) == 0 || max(ans) == 1)
       warning("fitted probabilities numerically 0 or 1 occurred")
 
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = 1)
   }), list( .refLevel = refLevel )),
 
   last = eval(substitute(expression({
@@ -811,7 +816,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
           ))),
 
   linkfun = eval(substitute( function(mu, extra = NULL) {
-    multilogit(mu, refLevel = extra$use.refLevel)  # .refLevel 
+    multilogit(mu, refLevel = extra$use.refLevel)  # .refLevel
   }), list( .refLevel = refLevel )),
 
   loglikelihood =
@@ -842,12 +847,19 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     }
   },
   vfamily = c("multinomial", "VGAMcategorical"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    probs <- multilogit(eta, refLevel = extra$use.refLevel,
+                        inverse = TRUE)  # .refLevel
+    okay1 <- all(is.finite(probs)) && all(0 < probs & probs < 1)
+    okay1
+  }, list( .refLevel = refLevel ))),
+
   deriv = eval(substitute(expression({
     use.refLevel <- extra$use.refLevel  # Restore its value
     c(w) * (y[, -use.refLevel] - mu[, -use.refLevel])
   }), list( .refLevel = refLevel ))),
   weight = eval(substitute(expression({
-    mytiny <- (mu < sqrt(.Machine$double.eps)) | 
+    mytiny <- (mu < sqrt(.Machine$double.eps)) |
               (mu > 1.0 - sqrt(.Machine$double.eps))
 
     if (M == 1) {
@@ -880,7 +892,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
  cumulative <- function(link = "logit",
                         parallel = FALSE,  # Does not apply to the intercept
-                        reverse = FALSE, 
+                        reverse = FALSE,
                         multiple.responses = FALSE,
                         whitespace = FALSE) {
 
@@ -909,7 +921,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
   blurb = if ( multiple.responses )
           c(paste("Multivariate cumulative", link, "model\n\n"),
           "Links:   ",
-          namesof(if (reverse) 
+          namesof(if (reverse)
                   ifelse(whitespace, "P[Y1 >= j+1]", "P[Y1>=j+1]") else
                   ifelse(whitespace, "P[Y1 <= j]",   "P[Y1<=j]"),
                   link, earg = earg),
@@ -1003,7 +1015,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
       if (any(w != 1) || ncol(cbind(w)) != 1)
           stop("the 'weights' argument must be a vector of all ones")
       Llevels <- max(y)
-      delete.zero.colns <- FALSE 
+      delete.zero.colns <- FALSE
       orig.y <- cbind(y)  # Convert y into a matrix if necessary
       NOS <- ncol(cbind(orig.y))
       use.y <- use.mustart <- NULL
@@ -1033,6 +1045,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
       extra$Llevels <- Llevels
   } else {
 
+
       delete.zero.colns <- TRUE
 
       eval(process.categorical.data.VGAM)
@@ -1053,8 +1066,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
                 mustart[,iii] <- weighted.mean(y[,iii], w)
       }
 
-      if (length(dimnames(y)))
-        extra$dimnamesy2 <- dimnames(y)[[2]]
+    extra$colnames.y  <- colnames(y)
   }
   }), list( .reverse = reverse, .multiple.responses = multiple.responses,
             .link = link, .earg = earg,
@@ -1067,7 +1079,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
         if ( .multiple.responses ) {
           NOS <- extra$NOS
           Llevels <- extra$Llevels
-          fv.matrix <- matrix(0, nrow(eta), NOS*Llevels)
+          fv.mat <- matrix(0, nrow(eta), NOS * Llevels)
           for (iii in 1:NOS) {
             cindex <- (iii-1)*(Llevels-1) + 1:(Llevels-1)
             aindex <- (iii-1)*(Llevels) + 1:(Llevels)
@@ -1075,7 +1087,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
               ccump <- cbind(1,
                              eta2theta(eta[, cindex, drop = FALSE],
                                        .link , earg = .earg ))
-              fv.matrix[,aindex] <-
+              fv.mat[, aindex] <-
                   cbind(-tapplymat1(ccump, "diff"),
                         ccump[, ncol(ccump)])
             } else {
@@ -1083,24 +1095,22 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
                                       .link ,
                                       earg = .earg ),
                             1)
-              fv.matrix[,aindex] <-
+              fv.mat[, aindex] <-
                   cbind(cump[, 1], tapplymat1(cump, "diff"))
             }
           }
-          fv.matrix
+          label.cols.y(fv.mat, NOS = NOS,
+                       colnames.y = if (is.null(extra$colnames.y)) NULL else
+                         rep_len(extra$colnames.y, ncol(fv.mat)))
         } else {
-          fv.matrix <-
-          if ( .reverse ) {
+          fv.mat <- if ( .reverse ) {
             ccump <- cbind(1, eta2theta(eta, .link , earg = .earg ))
             cbind(-tapplymat1(ccump, "diff"), ccump[, ncol(ccump)])
           } else {
             cump <- cbind(eta2theta(eta, .link , earg = .earg ), 1)
             cbind(cump[, 1], tapplymat1(cump, "diff"))
           }
-          if (length(extra$dimnamesy2))
-            dimnames(fv.matrix) <- list(dimnames(eta)[[1]],
-                                        extra$dimnamesy2)
-          fv.matrix
+          label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = 1)
         }
         answer
     }, list( .reverse = reverse,
@@ -1138,7 +1148,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
             .whitespace = whitespace ))),
 
   linkfun = eval(substitute( function(mu, extra = NULL) {
-    answer <- 
+    answer <-
     if ( .multiple.responses ) {
       NOS <- extra$NOS
       Llevels <- extra$Llevels
@@ -1146,7 +1156,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
       for (iii in 1:NOS) {
         cindex <- (iii-1)*(Llevels-1) + 1:(Llevels-1)
         aindex <- (iii-1)*(Llevels) + 1:(Llevels)
-        cump <- tapplymat1(as.matrix(mu[,aindex]), "cumsum")
+        cump <- tapplymat1(as.matrix(mu[, aindex]), "cumsum")
         eta.matrix[,cindex] =
             theta2eta(if ( .reverse ) 1-cump[, 1:(Llevels-1)] else
                   cump[, 1:(Llevels-1)], .link , earg = .earg )
@@ -1192,6 +1202,29 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     }
   },
   vfamily = c("cumulative", "VGAMordinal", "VGAMcategorical"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    if ( .multiple.responses ) {
+      return(TRUE)
+    }
+    probs <-
+      if ( .reverse ) {
+        ccump <- cbind(1, eta2theta(eta, .link , earg = .earg ))
+        cbind(-tapplymat1(ccump, "diff"), ccump[, ncol(ccump)])
+      } else {
+        cump <- cbind(eta2theta(eta, .link , earg = .earg ), 1)
+        cbind(cump[, 1], tapplymat1(cump, "diff"))
+      }
+    okay1 <- all(is.finite(probs)) && all(0 < probs & probs < 1)
+    if (!okay1)
+      warning("It seems that the nonparallelism assumption has resulted in ",
+              "intersecting linear/additive predictors. Try propodds() or ",
+              "fitting a partial nonproportional odds model or choosing ",
+              "some other link function, etc.")
+    okay1
+  }, list( .link = link, .earg = earg,
+           .reverse = reverse,
+           .multiple.responses = multiple.responses ))),
+
   deriv = eval(substitute(expression({
     mu.use <- pmax(mu, .Machine$double.eps * 1.0e-0)
     deriv.answer <-
@@ -1209,7 +1242,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
           (y[,   aindex, drop = FALSE] / mu.use[,   aindex, drop = FALSE] -
            y[, 1+aindex, drop = FALSE] / mu.use[, 1+aindex, drop = FALSE])
       }
-      (if ( .reverse ) -c(w)  else c(w)) * dcump.deta * resmat 
+      (if ( .reverse ) -c(w)  else c(w)) * dcump.deta * resmat
     } else {
       cump <- eta2theta(eta, .link , earg = .earg )
       dcump.deta <- dtheta.deta(cump, .link , earg = .earg )
@@ -1345,7 +1378,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
 
-    delete.zero.colns <- TRUE 
+    delete.zero.colns <- TRUE
     eval(process.categorical.data.VGAM)
     M <- ncol(y) - 1
     mynames <- if ( .reverse )
@@ -1361,8 +1394,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
       namesof(mynames, .link , short = TRUE, earg = .earg )
     y.names <- paste("mu", 1:(M+1), sep = "")
 
-    if (length(dimnames(y)))
-      extra$dimnamesy2 <- dimnames(y)[[2]]
+    extra$colnames.y  <- colnames(y)
   }), list( .earg = earg, .link = link, .reverse = reverse,
             .fillerChar = fillerChar,
             .whitespace = whitespace ))),
@@ -1371,7 +1403,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     if (!is.matrix(eta))
       eta <- as.matrix(eta)
     M <- ncol(eta)
-    fv.matrix <- if ( .reverse ) {
+    fv.mat <- if ( .reverse ) {
       zetar <- eta2theta(eta, .link , earg = .earg )
       temp <- tapplymat1(zetar[, M:1], "cumprod")[, M:1, drop = FALSE]
       cbind(temp, 1) / drop(1 + temp %*% rep(1, ncol(temp)))
@@ -1380,10 +1412,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
       temp <- tapplymat1(zeta, "cumprod")
       cbind(1, temp) / drop(1 + temp %*% rep(1, ncol(temp)))
     }
-    if (length(extra$dimnamesy2))
-      dimnames(fv.matrix) <- list(dimnames(eta)[[1]],
-                                  extra$dimnamesy2)
-    fv.matrix
+   label.cols.y(fv.mat, colnames.y = extra$colnames.y, NOS = 1)
   }, list( .earg = earg, .link = link, .reverse = reverse) )),
 
   last = eval(substitute(expression({
@@ -1436,6 +1465,22 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     }
   },
   vfamily = c("acat", "VGAMordinal", "VGAMcategorical"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    if (!is.matrix(eta))
+      eta <- as.matrix(eta)
+    M <- ncol(eta)
+    probs <- if ( .reverse ) {
+      zetar <- eta2theta(eta, .link , earg = .earg )
+      temp <- tapplymat1(zetar[, M:1], "cumprod")[, M:1, drop = FALSE]
+      cbind(temp, 1) / drop(1 + temp %*% rep(1, ncol(temp)))
+    } else {
+      zeta <- eta2theta(eta, .link , earg = .earg )
+      temp <- tapplymat1(zeta, "cumprod")
+      cbind(1, temp) / drop(1 + temp %*% rep(1, ncol(temp)))
+    }
+    okay1 <- all(is.finite(probs)) && all(0 < probs & probs < 1)
+    okay1
+  }, list( .earg = earg, .link = link, .reverse = reverse) )),
   deriv = eval(substitute(expression({
     zeta <- eta2theta(eta, .link , earg = .earg )  # May be zetar
 
@@ -1457,7 +1502,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     answer
   }), list( .earg = earg, .link = link, .reverse = reverse) )),
   weight = eval(substitute(expression({
-    wz <- matrix(NA_real_, n, dimm(M)) 
+    wz <- matrix(NA_real_, n, dimm(M))
 
     hess <- attr(d1, "hessian") / d1
 
@@ -1486,11 +1531,11 @@ acat.deriv <- function(zeta, reverse, M, n) {
     index <- if (reverse) ii:M else 1:ii
     vars <- paste("zeta", index, sep = "")
     txt <- paste(vars, collapse = "*")
-    alltxt <- c(alltxt, txt) 
+    alltxt <- c(alltxt, txt)
   }
   alltxt <- paste(alltxt, collapse = " + ")
   alltxt <- paste(" ~ 1 +", alltxt)
-  txt <- as.formula(alltxt) 
+  txt <- as.formula(alltxt)
 
   allvars <- paste("zeta", 1:M, sep = "")
   d1 <- deriv3(txt, allvars, hessian = TRUE)
@@ -1523,7 +1568,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
 
 
   new("vglmff",
-  blurb = c(paste("Bradley-Terry model (without ties)\n\n"), 
+  blurb = c(paste("Bradley-Terry model (without ties)\n\n"),
             "Links:   ",
             namesof("alpha's", "loge")),
   infos = eval(substitute(function(...) {
@@ -1619,6 +1664,22 @@ acat.deriv <- function(zeta, reverse, M, n) {
     }
   },
   vfamily = c("brat", "VGAMcategorical"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    probs <- NULL
+    eta <- as.matrix(eta)  # in case M = 1
+    for (ii in 1:nrow(eta)) {
+      alpha <- .brat.alpha(eta2theta(eta[ii, ], "loge",
+                                     earg = list(theta = NULL)),
+                           .refvalue , .refgp )
+      alpha1 <- alpha[extra$ybrat.indices[, "rindex"]]
+      alpha2 <- alpha[extra$ybrat.indices[, "cindex"]]
+      probs <- rbind(probs, alpha1 / (alpha1 + alpha2))
+    }
+
+
+    okay1 <- all(is.finite(probs)) && all(0 < probs & probs < 1)
+    okay1
+  }, list( .refvalue = refvalue, .refgp = refgp) )),
   deriv = eval(substitute(expression({
     ans <- NULL
     uindex <- if ( .refgp == "last") 1:M else (1:(M+1))[-( .refgp ) ]
@@ -1681,14 +1742,14 @@ acat.deriv <- function(zeta, reverse, M, n) {
   if (!is.Numeric(refvalue, length.arg = 1, positive = TRUE))
     stop("'refvalue' must be a single positive value")
 
-  if (!is.character(refgp) && 
+  if (!is.character(refgp) &&
      !is.Numeric(refgp, length.arg = 1,
                  integer.valued = TRUE, positive = TRUE))
     stop("'refgp' must be a single positive integer")
 
 
   new("vglmff",
-  blurb = c(paste("Bradley-Terry model (with ties)\n\n"), 
+  blurb = c(paste("Bradley-Terry model (with ties)\n\n"),
             "Links:   ",
             namesof("alpha's", "loge"), ", log(alpha0)"),
   infos = eval(substitute(function(...) {
@@ -1797,6 +1858,23 @@ acat.deriv <- function(zeta, reverse, M, n) {
     }
   },
   vfamily = c("bratt", "VGAMcategorical"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    probs <- qprobs <- NULL
+    M <- ncol(eta)
+    for (ii in 1:nrow(eta)) {
+      alpha <- .brat.alpha(eta2theta(eta[ii, -M],
+                                     "loge"),
+                           .refvalue , .refgp )
+      alpha0 <- loge(eta[ii, M], inverse = TRUE)
+      alpha1 <- alpha[extra$ybrat.indices[, "rindex"]]
+      alpha2 <- alpha[extra$ybrat.indices[, "cindex"]]
+       probs <- rbind( probs, alpha1 / (alpha1 + alpha2 + alpha0))  #
+      qprobs <- rbind(qprobs, alpha0 / (alpha1 + alpha2 + alpha0))  #
+    }
+    okay1 <- all(is.finite( probs)) && all(0 <  probs &  probs < 1) &&
+             all(is.finite(qprobs)) && all(0 < qprobs & qprobs < 1)
+    okay1
+  }, list( .refvalue = refvalue, .refgp = refgp) )),
   deriv = eval(substitute(expression({
     ans <- NULL
     ties <- extra$ties
@@ -1819,7 +1897,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
                   tmat[uindex, aa]) / Daj
       }
       deriv0 <- 0 # deriv wrt eta[M]
-      for (aa in 1:(NCo-1)) 
+      for (aa in 1:(NCo-1))
         for (bb in (aa+1):NCo) {
           Dab <- alpha[aa] + alpha[bb] + alpha0
           qab <- alpha0 / Dab
@@ -1872,7 +1950,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
         jay <- uindex[sss]
         naj <- ymat[, jay] + ymat[jay, ] + tmat[, jay]
         Daj <- alpha[jay] + alpha + alpha0
-        wz[ii, iam(sss, NCo, M = NCo, diag = TRUE)] <- 
+        wz[ii, iam(sss, NCo, M = NCo, diag = TRUE)] <-
             -alpha[jay] * alpha0 * sum(naj / Daj^2)
       }
     }
@@ -1924,7 +2002,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
   ans <- ans.ties <- NULL
   for (ii in seq_along(allargs)) {
     m <- allargs[[ii]]
-    if (!is.matrix(m) || dim(m)[1] != dim(m)[2]) 
+    if (!is.matrix(m) || dim(m)[1] != dim(m)[2])
       stop("m must be a square matrix")
 
     diag(ties) <- 0
@@ -1949,8 +2027,8 @@ acat.deriv <- function(zeta, reverse, M, n) {
   }
   dimnames(ans) <- list(callit, usethis1)
   dimnames(ans.ties) <- list(callit, usethis2)
-  attr(ans, "ties") <- ans.ties 
-  attr(ans, "are.ties") <- are.ties 
+  attr(ans, "ties") <- ans.ties
+  attr(ans, "are.ties") <- are.ties
   ans
 }
 
@@ -1998,7 +2076,7 @@ InverseBrat <-
     } else {
       dimnames(ans) <- list(cal, cal)
     }
-  } 
+  }
   ans
 }
 
@@ -2041,7 +2119,7 @@ InverseBrat <-
 
 
   new("vglmff",
-  blurb = c(paste("Ordinal Poisson model\n\n"), 
+  blurb = c(paste("Ordinal Poisson model\n\n"),
             "Link:     ", namesof("mu", link, earg = earg)),
 
 
@@ -2087,7 +2165,7 @@ InverseBrat <-
 
     initmu <- if (is.Numeric( .init.mu )) rep_len( .init.mu , NOS) else NULL
     cutpoints <- rep_len( .cutpoints, sum(Levels))
-    delete.zero.colns <- FALSE 
+    delete.zero.colns <- FALSE
     use.y <- if ( .countdata ) y else matrix(0, n, sum(Levels))
     use.etastart <- matrix(0, n, M)
     cptr <- 1
@@ -2190,7 +2268,7 @@ InverseBrat <-
     cptr <- 1
     for (iii in 1:NOS) {
       for (kkk in 1:Levels[iii]) {
-        d2l.dmu2[,iii] <- d2l.dmu2[,iii] + 
+        d2l.dmu2[,iii] <- d2l.dmu2[,iii] +
             dprob.dmu[,cptr]^2 / probs.use[,cptr]
         cptr <- cptr + 1
       }
@@ -2499,8 +2577,8 @@ setMethod("margeffS4VGAM",  signature(VGAMff = "acat"),
 
     if (!all(object at misc$link == "loge"))
       stop("currently only the 'loge' link is supported")
- 
- 
+
+
   acat.derivs <- function(jay, tee,
                           M, expcs.etamat, Thetamat,
                           prob1, probMplus1,
@@ -2646,7 +2724,7 @@ setMethod("margeffS4VGAM",  signature(VGAMff = "cratio"),
 
 
 
-   
+
 
   vfamily <- object at family@vfamily
   c.nots <- any(vfamily == "cratio")
@@ -2709,7 +2787,7 @@ setMethod("margeffS4VGAM",  signature(VGAMff = "cratio"),
     if (reverse) {
       A[, , M+1, M] <- ifelse(c.nots, -1, 1) * hdot[, M]
     } else {
-      for (jlocal in 1:M) { 
+      for (jlocal in 1:M) {
         for (tlocal in 1:jlocal) {
           A[, , M+1, tlocal] <- if (c.nots) {
             A[, , M+1, tlocal] - A[, , jlocal, tlocal]
@@ -2834,7 +2912,7 @@ setMethod("margeffS4VGAM",  signature(VGAMff = "sratio"),
     if (reverse) {
       A[, , M+1, M] <- ifelse(c.nots, -1, 1) * hdot[, M]
     } else {
-      for (jlocal in 1:M) { 
+      for (jlocal in 1:M) {
         for (tlocal in 1:jlocal) {
           A[, , M+1, tlocal] <- if (c.nots) {
             A[, , M+1, tlocal] - A[, , jlocal, tlocal]
@@ -3052,8 +3130,8 @@ setMethod("margeffS4VGAM",  signature(VGAMff = "sratio"),
   if (any(vfamily == "acat")) {
     if (!all(object at misc$link == "loge"))
       stop("currently only the 'loge' link is supported")
- 
- 
+
+
   acat.derivs <- function(jay, tee,
                           M, expcs.etamat, Thetamat,
                           prob1, probMplus1,
@@ -3129,7 +3207,7 @@ setMethod("margeffS4VGAM",  signature(VGAMff = "sratio"),
 
 
 
-   
+
 
   c.nots <- any(vfamily == "cratio")
 
@@ -3221,7 +3299,7 @@ setMethod("margeffS4VGAM",  signature(VGAMff = "sratio"),
     if (reverse) {
       A[, , M+1, M] <- ifelse(c.nots, -1, 1) * hdot[, M]
     } else {
-      for (jlocal in 1:M) { 
+      for (jlocal in 1:M) {
         for (tlocal in 1:jlocal) {
           A[, , M+1, tlocal] <- if (c.nots) {
             A[, , M+1, tlocal] - A[, , jlocal, tlocal]
@@ -3284,7 +3362,7 @@ prplot <- function(object,
                             byrow = TRUE)
   for (ii in 1:MM) {
     use.y[, ii] <- eta2theta(use.y[, ii],
-                             link = object at misc$link[[ii]], 
+                             link = object at misc$link[[ii]],
                              earg = object at misc$earg[[ii]])
   }
   if (ncol(use.y) != MM) use.y = use.y[, 1:MM, drop = FALSE]
@@ -3324,7 +3402,7 @@ prplot <- function(object,
                            lwd = par()$lwd,
                            rlwd = par()$lwd,
                            las = par()$las,
-                           rug.arg  = FALSE, 
+                           rug.arg  = FALSE,
                            ...) {
 
     list(xlab = xlab, ylab = ylab,
diff --git a/R/family.censored.R b/R/family.censored.R
index 340c668..4ecf061 100644
--- a/R/family.censored.R
+++ b/R/family.censored.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -79,7 +79,7 @@
       stop("response have to be in a class of SurvS4")
 
       if (length( .imu )) init.mu <- 0 * y[, 1] + .imu
-  
+
       predictors.names <-
         namesof("mu", .link, earg = .earg, short = TRUE)
 
@@ -119,12 +119,17 @@
     }
   },
   vfamily = "cens.poisson",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    lambda <- eta2theta(eta, link = .link , earg = .earg )
+    okay1 <- all(is.finite(lambda)) && all(0 < lambda)
+    okay1
+  }, list(  .link = link, .earg = earg ))),
   deriv = eval(substitute(expression({
     cen0 <- extra$uncensored
     cenL <- extra$leftcensored
     cenU <- extra$rightcensored
     cenI <- extra$intervalcensored
-      lambda <- eta2theta(eta, link = .link, earg = .earg)
+      lambda <- eta2theta(eta, link = .link , earg = .earg )
 
       dl.dlambda <- (y[, 1] - lambda)/lambda   # uncensored
 
@@ -188,7 +193,7 @@
 
 
 if (FALSE)
- cens.exponential <- 
+ cens.exponential <-
  ecens.exponential <- function(link = "loge", location = 0) {
   if (!is.Numeric(location, length.arg = 1))
     stop("bad input for 'location'")
@@ -286,6 +291,11 @@ if (FALSE)
     exp(-rate[cenI]*(y[cenI, 1]-extra$location))))
   }, list( .link = link ))),
   vfamily = c("ecens.exponential"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    rate <- 1 / (mu - extra$location)
+    okay1 <- all(is.finite(rate)) && all(0 < rate)
+    okay1
+  }, list(  .link = link ))),
   deriv = eval(substitute(expression({
     rate <- 1 / (mu - extra$location)
     cen0 <- extra$uncensored
@@ -443,6 +453,14 @@ if (FALSE)
   }, list( .lmu = lmu, .lsd = lsd,
            .emu = emu, .esd = esd ))),
   vfamily = c("cens.normal"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mum <- eta2theta(eta[, 1], .lmu )
+    sdv <- eta2theta(eta[, 2], .lsd )
+    okay1 <- all(is.finite(mum)) &&
+             all(is.finite(sdv)) && all(0 < sdv)
+    okay1
+  }, list( .lmu = lmu, .lsd = lsd,
+           .emu = emu, .esd = esd ))),
   deriv = eval(substitute(expression({
     cenL <- extra$leftcensored
     cenU <- extra$rightcensored
@@ -450,8 +468,8 @@ if (FALSE)
     Lower <- ifelse(cenL, y, -Inf)
     Upper <- ifelse(cenU, y,  Inf)
 
-    mum <- eta2theta(eta[, 1], .lmu)
-    sdv <- eta2theta(eta[, 2], .lsd)
+    mum <- eta2theta(eta[, 1], .lmu )
+    sdv <- eta2theta(eta[, 2], .lsd )
 
     dl.dmu <- (y-mum) / sdv^2
     dl.dsd <- (((y-mum)/sdv)^2 - 1) / sdv
@@ -494,7 +512,7 @@ if (FALSE)
     temp21L <- mumL / sdv
     PhiL <- pnorm(temp21L)
     phiL <- dnorm(temp21L)
-    temp31L <- ((1-PhiL) * sdv)^2 
+    temp31L <- ((1-PhiL) * sdv)^2
     wz.cenL11 <- phiL * (phiL - (1-PhiL)*temp21L) / temp31L
     wz.cenL22 <- mumL * phiL * ((1-PhiL) * (2 - temp21L^2) +
                  mumL * phiL / sdv) / (sdv * temp31L)
@@ -574,9 +592,9 @@ if (FALSE)
     Scale * sqrt(pi/2)
   }, list( .lscale = lscale, .escale = escale ))),
   last = eval(substitute(expression({
-    misc$link <-    c("scale" = .lscale)
+    misc$link <-    c("scale" = .lscale )
 
-    misc$earg <- list("scale" = .escale)
+    misc$earg <- list("scale" = .escale )
 
     misc$oim <- .oim
   }), list( .lscale = lscale, .escale = escale,
@@ -593,9 +611,13 @@ if (FALSE)
       sum(w[cen0] * (log(y[cen0]) - 2*log(Scale[cen0]) -
                      0.5*(y[cen0]/Scale[cen0])^2)) -
       sum(w[cenU] * (y[cenU]/Scale[cenU])^2) * 0.5
-  }, list( .lscale = lscale,
-           .escale = escale ))),
+  }, list( .lscale = lscale, .escale = escale ))),
   vfamily = c("cens.rayleigh"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale <- eta2theta(eta, .lscale , earg = .escale )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale)
+    okay1
+  }, list( .lscale = lscale, .escale = escale ))),
   deriv = eval(substitute(expression({
     cen0 <- !extra$rightcensored   # uncensored obsns
     cenU <- extra$rightcensored
@@ -765,7 +787,7 @@ if (FALSE)
             Shape.init[, ilocal] <- 1 / fit0$coef["X"]
         }  # ilocal
 
-        etastart <- 
+        etastart <-
           cbind(theta2eta(Meann.init, .lmeann , earg = .emeann ),
                 theta2eta(Shape.init, .lshape , earg = .eshape ))[,
                 interleave.VGAM(M, M1 = M1)]
@@ -840,6 +862,14 @@ if (FALSE)
   }, list( .lmeann = lmeann, .lshape = lshape,
            .emeann = emeann, .eshape = eshape ) )),
   vfamily = c("weibull.mean"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Meann <- eta2theta(eta[, c(TRUE, FALSE)], .lmeann , earg = .emeann )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(Meann)) && all(0 < Meann) &&
+             all(is.finite(Shape)) && all(0 < Shape)
+    okay1
+  }, list( .lmeann = lmeann, .lshape = lshape,
+           .emeann = emeann, .eshape = eshape ) )),
   deriv = eval(substitute(expression({
     M1 <- 2
     Meann <- eta2theta(eta[, c(TRUE, FALSE)], .lmeann , earg = .emeann )
@@ -853,7 +883,7 @@ if (FALSE)
       CC <- y * gamma(1 + 1/Shape) / Meann
       dl.dmeann <- (CC^Shape - 1) * Shape / Meann  # Agrees
       dl.dshape <- 1/Shape -
-                   (log(y/Meann) + lgamma(1 + 1/Shape)) * (CC^Shape - 1) + 
+                   (log(y/Meann) + lgamma(1 + 1/Shape)) * (CC^Shape - 1) +
                    (BB / Shape) * (CC^Shape - 1)
     }
 
@@ -973,7 +1003,7 @@ if (FALSE)
   }, list( .zero = zero, .scale.12 = scale.12, .scale.TF = scale.TF,
            .lscale = lscale ,
            .lshape = lshape ,
-           .lss = lss 
+           .lss = lss
          ))),
 
   initialize = eval(substitute(expression({
@@ -1007,7 +1037,7 @@ if (FALSE)
       predictors.names <-
           c(namesof(mynames1, .lscale , earg = .escale , tag = FALSE),
             namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))
-            
+
     } else {
       mynames1 <- param.names("shape", ncoly)
       mynames2 <- param.names("scale", ncoly)
@@ -1128,6 +1158,15 @@ if (FALSE)
            .escale = escale, .eshape = eshape,
            .scale.12 = scale.12, .scale.TF = scale.TF, .lss = lss ) )),
   vfamily = c("weibullR"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale <- eta2theta(eta[,    .scale.TF  ], .lscale , earg = .escale )
+    Shape <- eta2theta(eta[, !( .scale.TF )], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(Shape)) && all(0 < Shape)
+    okay1
+  }, list( .lscale = lscale, .lshape = lshape,
+           .escale = escale, .eshape = eshape,
+           .scale.12 = scale.12, .scale.TF = scale.TF, .lss = lss ) )),
   deriv = eval(substitute(expression({
     M1 <- 2
     Scale <- eta2theta(eta[,    .scale.TF  ], .lscale , earg = .escale )
@@ -1371,7 +1410,7 @@ pgamma.deriv.unscaled <- function(q, shape) {
 
   alld <- pgamma.deriv(q = q, shape = shape)  # 6-coln matrix
   tmp3 <- alld[, 3] / I.sq  # RHS of eqn (4.5) of \cite{wing:1989}
-    
+
   G1s <- digamma(shape) + tmp3  # eqn (4.9)
   gam1 <- gam0 * G1s
 
@@ -1447,8 +1486,8 @@ pgamma.deriv.unscaled <- function(q, shape) {
   new("vglmff",
   blurb = c("Truncated weibull distribution\n\n",
             "Links:    ",
-            namesof("Alpha", lAlpha, earg = eAlpha), ", ", 
-            namesof("Betaa", lBetaa, earg = eBetaa), "\n", 
+            namesof("Alpha", lAlpha, earg = eAlpha), ", ",
+            namesof("Betaa", lBetaa, earg = eBetaa), "\n",
             if (length( lower.limit ) < 5)
               paste("Truncation point(s):     ",
                     lower.limit, sep = ", ") else
@@ -1643,6 +1682,15 @@ pgamma.deriv.unscaled <- function(q, shape) {
            .lower.limit = lower.limit ) )),
 
   vfamily = c("truncweibull"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Alpha <- eta2theta(eta[, c(TRUE, FALSE)], .lAlpha , earg = .eAlpha )
+    Betaa <- eta2theta(eta[, c(FALSE, TRUE)], .lBetaa , earg = .eBetaa )
+    okay1 <- all(is.finite(Alpha)) && all(0 < Alpha) &&
+             all(is.finite(Betaa)) && all(0 < Betaa)
+    okay1
+  }, list( .lBetaa = lBetaa, .lAlpha = lAlpha,
+           .eBetaa = eBetaa, .eAlpha = eAlpha,
+           .lower.limit = lower.limit ) )),
 
   deriv = eval(substitute(expression({
     M1 <- 2
@@ -1675,12 +1723,12 @@ pgamma.deriv.unscaled <- function(q, shape) {
     wingo3 <- pgamma.deriv.unscaled(q = aTb,
                                     shape = 2)  # 3-cols
 
- 
+
     Eyblogy <- (exp.aTb * (digamma(2) - wingo3[, 2]) -
                (aTb + 1) * log(Alpha)) / (Alpha * Betaa)
 
 
- 
+
     Eyblog2y <- (exp.aTb * (digamma(2)^2 + trigamma(2) -
                  wingo3[, 3]) - 2 * log(Alpha) *
                 (digamma(2) - wingo3[, 2])) / (Alpha * Betaa^2) +
diff --git a/R/family.circular.R b/R/family.circular.R
index 8e1be77..62ba32e 100644
--- a/R/family.circular.R
+++ b/R/family.circular.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -35,7 +35,7 @@ dcard <- function(x, mu, rho, log = FALSE) {
 
 
 pcard <- function(q, mu, rho, lower.tail = TRUE, log.p = FALSE) {
-  
+
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
   if (!is.logical(log.p) || length(log.p) != 1)
@@ -61,7 +61,7 @@ pcard <- function(q, mu, rho, lower.tail = TRUE, log.p = FALSE) {
       ans[q <= 0] <- 1
       ans[q >= (2*pi)] <- 0
     }
-  } 
+  }
   ans[mu < 0 | mu > 2*pi] <- NaN  # A warning() may be a good idea here
   ans[abs(rho) > 0.5] <- NaN
   ans
@@ -84,7 +84,7 @@ qcard <- function(p, mu, rho, tolerance = 1.0e-7, maxits = 500,
     stop("bad input for argument 'lower.tail'")
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   if (lower.tail) {
     if (log.p) {
       ln.p <- p
@@ -139,14 +139,14 @@ qcard <- function(p, mu, rho, tolerance = 1.0e-7, maxits = 500,
           warning("did not converge")
           break
         }
-        oldans <- ans 
+        oldans <- ans
        }
-    } else { 
+    } else {
       for (its in 1:maxits) {
         oldans <- 2 * pi - 2 * pi * p
         ans <- oldans - (oldans + 2 * rho * (sin(oldans-mu)+sin(mu)) -
                2*pi + 2*pi*p) / (1 + 2 * rho * cos(oldans - mu))
-        index <- (ans < 0) | (ans > 2*pi)  
+        index <- (ans < 0) | (ans > 2*pi)
         if (any(index)) {
           ans[index] <- runif (sum(index), 0, 2*pi)
         }
@@ -225,7 +225,7 @@ cardioid.control <- function(save.weights = TRUE, ...) {
   new("vglmff",
   blurb = c("Cardioid distribution\n\n",
             "Links:    ",
-            namesof("mu",  lmu,  earg = emu,  tag = FALSE), ", ", 
+            namesof("mu",  lmu,  earg = emu,  tag = FALSE), ", ",
             namesof("rho", lrho, earg = erho, tag = FALSE), "\n",
             "Mean:     ",
             "pi + (rho/pi) *",
@@ -325,6 +325,14 @@ cardioid.control <- function(save.weights = TRUE, ...) {
   }, list( .lmu = lmu, .lrho = lrho,
            .emu = emu, .erho = erho ))),
   vfamily = c("cardioid"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mu  <- eta2theta(eta[, 1], link = .lmu  , earg = .emu  )
+    rho <- eta2theta(eta[, 2], link = .lrho , earg = .erho )
+    okay1 <- all(is.finite(mu )) && all( 0   < mu  & mu  < 2*pi) &&
+             all(is.finite(rho)) && all(-0.5 < rho & rho < 0.5)
+    okay1
+  }, list( .lmu = lmu, .lrho = lrho,
+           .emu = emu, .erho = erho ))),
 
 
 
@@ -481,6 +489,14 @@ cardioid.control <- function(save.weights = TRUE, ...) {
   }, list( .escale = escale, .lscale = lscale,
            .llocat = llocat, .elocat = elocat ))),
   vfamily = c("vonmises"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    okay1 <- all(is.finite(locat)) && all(0 < locat & locat < 2*pi) &&
+             all(is.finite(Scale)) && all(0 < Scale)
+    okay1
+  }, list( .escale = escale, .lscale = lscale,
+           .llocat = llocat, .elocat = elocat ))),
   deriv = eval(substitute(expression({
     locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
     Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
diff --git a/R/family.exp.R b/R/family.exp.R
index 581d764..3fd01ca 100644
--- a/R/family.exp.R
+++ b/R/family.exp.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -20,7 +20,7 @@ qeunif <- function(p, min = 0, max = 1, Maxit.nr = 10, Tol.nr = 1.0e-6,
   if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
   rm(log.p)   # 20150102 KaiH
-  
+
   if (lower.tail) {
     if (log.arg)
       p <- exp(p)
@@ -72,10 +72,10 @@ peunif <- function(q, min = 0, max = 1,
 
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   if (any(min >= max))
     stop("argument 'min' has values greater or equal to argument 'max'")
 
@@ -152,7 +152,7 @@ qenorm <- function(p, mean = 0, sd = 1, Maxit.nr = 10, Tol.nr = 1.0e-6,
 
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   ppp <- p
   if (!is.Numeric( Tol.nr, length.arg = 1, positive = TRUE) ||
       Tol.nr > 0.10)
@@ -160,7 +160,7 @@ qenorm <- function(p, mean = 0, sd = 1, Maxit.nr = 10, Tol.nr = 1.0e-6,
          "positive value, or is too large")
   nrok <- is.finite(ppp)
 
-  eee <-  qnorm(ppp, sd = 2/3, lower.tail = lower.tail, log.p = log.p) 
+  eee <-  qnorm(ppp, sd = 2/3, lower.tail = lower.tail, log.p = log.p)
 
 
 
@@ -208,7 +208,7 @@ qenorm <- function(p, mean = 0, sd = 1, Maxit.nr = 10, Tol.nr = 1.0e-6,
   } else {
     if (log.p) {
       eee[ln.ppp > 0] <- NaN
-    } else { 
+    } else {
       eee[ppp == 0] <- Inf
       eee[ppp == 1] <- -Inf
       eee[ppp <  0] <- NaN
@@ -224,7 +224,7 @@ penorm <- function(q, mean = 0, sd = 1,
                    lower.tail = TRUE, log.p = FALSE) {
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
 
@@ -293,13 +293,13 @@ qeexp <- function(p, rate = 1, Maxit.nr = 10, Tol.nr = 1.0e-6,
   if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
   rm(log.p)   # 20150102 KaiH
-  
+
   if (lower.tail) {
     if (log.arg) p <- exp(p)
   } else {
     p <- if (log.arg) -expm1(p) else 1 - p
   }
-  
+
   ppp <- p
   vsmallno <- sqrt(.Machine$double.eps)
   if (!is.Numeric( Tol.nr, length.arg = 1, positive = TRUE) ||
@@ -340,7 +340,7 @@ qeexp <- function(p, rate = 1, Maxit.nr = 10, Tol.nr = 1.0e-6,
 peexp <- function(q, rate = 1, lower.tail = TRUE, log.p = FALSE) {
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
 
@@ -427,7 +427,7 @@ psc.t2 <- function(q, location = 0, scale = 1,
                    lower.tail = TRUE, log.p = FALSE) {
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
 
@@ -438,10 +438,10 @@ psc.t2 <- function(q, location = 0, scale = 1,
     if (log.p) {
       ans <- log(0.5) + log1p(zedd / sqrt(4 + zedd^2))
       ans[q == -Inf] <- log(0)
-      ans[q == Inf] <- log(1) 
+      ans[q == Inf] <- log(1)
       } else {
       ans <- 0.5 * (1 + zedd / sqrt(4 + zedd^2))
-      ans[q == -Inf] <- 0 
+      ans[q == -Inf] <- 0
       ans[q == Inf] <- 1
       }
   } else {
@@ -451,11 +451,11 @@ psc.t2 <- function(q, location = 0, scale = 1,
       ans[q == Inf] <- log(0)
     } else {
       ans <- 0.5 * exp(log1p(-zedd / sqrt(4 + zedd^2)))
-      ans[q == -Inf] <- 1 
-      ans[q == Inf] <- 0 
+      ans[q == -Inf] <- 1
+      ans[q == Inf] <- 0
     }
   }
-  ans 
+  ans
 }
 
 
@@ -468,10 +468,10 @@ qsc.t2 <- function(p, location = 0, scale = 1,
 
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   if (lower.tail) {
     if (log.p) {
       ln.p <- p
@@ -493,7 +493,7 @@ qsc.t2 <- function(p, location = 0, scale = 1,
              exp(0.5*(ln.p - log(-expm1(ln.p))))
       ans[ln.p > 0] <- NaN
       ans
-    } else { 
+    } else {
       ans <- exp(0.5*(log1p(-p) - log(p))) -
              exp(0.5*(log(p) - log1p(-p)))
       ans[p < 0] <- NaN
@@ -529,7 +529,7 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
                      imethod = 1,
                      zero = "scale") {
 
- 
+
 
   llocat <- as.list(substitute(llocation))
   elocat <- link2list(llocat)
@@ -597,8 +597,8 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
 
 
     predictors.names <- c(
-        namesof("location", .llocat, earg = .elocat, tag = FALSE),
-        namesof("scale",    .lscale, earg = .escale, tag = FALSE))
+        namesof("location", .llocat , earg = .elocat , tag = FALSE),
+        namesof("scale",    .lscale , earg = .escale , tag = FALSE))
 
 
     if (!length(etastart)) {
@@ -611,8 +611,8 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
         diff(quantile(y, prob = c(0.25, 0.75))) / (2 * 1.155) + 1.0e-5
       locat.init <- rep_len(locat.init, length(y))
       Scale.init <- rep_len(Scale.init, length(y))
-      etastart <- cbind(theta2eta(locat.init, .llocat, earg = .elocat),
-                        theta2eta(Scale.init, .lscale, earg = .escale))
+      etastart <- cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
+                        theta2eta(Scale.init, .lscale , earg = .escale ))
     }
   }), list( .llocat = llocat, .lscale = lscale,
             .ilocat = ilocat, .iscale = iscale,
@@ -620,8 +620,8 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL){
     Perce <- .percentile
-    locat <- eta2theta(eta[, 1], link = .llocat, earg = .elocat)
-    Scale <- eta2theta(eta[, 2], link = .lscale, earg = .escale)
+    locat <- eta2theta(eta[, 1], link = .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, 2], link = .lscale , earg = .escale )
     answer <- matrix(locat, nrow(eta), length(Perce))
     for (ii in seq_along(Perce))
       answer[, ii] <- qsc.t2(Perce[ii] / 100, loc = locat, sc = Scale)
@@ -632,9 +632,9 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
            .elocat = elocat, .escale = escale,
            .percentile = percentile ))),
   last = eval(substitute(expression({
-    misc$link <-    c("location" = .llocat, "scale" = .lscale)
+    misc$link <-    c("location" = .llocat , "scale" = .lscale )
 
-    misc$earg <- list("location" = .elocat, "scale" = .escale)
+    misc$earg <- list("location" = .elocat , "scale" = .escale )
 
     misc$expected <- TRUE
     misc$percentile <- .percentile
@@ -654,8 +654,8 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL,
              summation = TRUE) {
-    locat <- eta2theta(eta[, 1], link = .llocat, earg = .elocat)
-    Scale <- eta2theta(eta[, 2], link = .lscale, earg = .escale)
+    locat <- eta2theta(eta[, 1], link = .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, 2], link = .lscale , earg = .escale )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
@@ -670,12 +670,20 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
   }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
   vfamily = c("sc.studentt2"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    locat <- eta2theta(eta[, 1], link = .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, 2], link = .lscale , earg = .escale )
+    okay1 <- all(is.finite(locat)) &&
+             all(is.finite(Scale)) && all(0 < Scale)
+    okay1
+  }, list( .llocat = llocat, .lscale = lscale,
+           .elocat = elocat, .escale = escale ))),
   deriv = eval(substitute(expression({
-    locat <- eta2theta(eta[, 1], link = .llocat, earg = .elocat)
-    Scale <- eta2theta(eta[, 2], link = .lscale, earg = .escale)
+    locat <- eta2theta(eta[, 1], link = .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, 2], link = .lscale , earg = .escale )
 
-    dlocat.deta <- dtheta.deta(locat, link = .llocat, earg = .elocat)
-    dscale.deta <- dtheta.deta(Scale, link = .lscale, earg = .escale)
+    dlocat.deta <- dtheta.deta(locat, link = .llocat , earg = .elocat )
+    dscale.deta <- dtheta.deta(Scale, link = .lscale , earg = .escale )
 
     zedd <- (y - locat) / Scale
 
diff --git a/R/family.extremes.R b/R/family.extremes.R
index 1ceea48..319163f 100644
--- a/R/family.extremes.R
+++ b/R/family.extremes.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -17,15 +17,13 @@
 
 
 
-
-
 rgev <- function(n, location = 0, scale = 1, shape = 0) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
                            length.arg = 1, positive = TRUE))
              stop("bad input for argument 'n'") else n
 
-  if (!is.Numeric(location)) 
+  if (!is.Numeric(location))
     stop("bad input for argument argument 'location'")
   if (!is.Numeric(shape))
     stop("bad input for argument argument 'shape'")
@@ -53,7 +51,7 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
                   tolshape0 = sqrt( .Machine$double.eps )) {
 
   oobounds.log <- -Inf   # 20160412; No longer an argument.
-  
+
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -67,7 +65,7 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
   if (length(location) != use.n) location <- rep_len(location, use.n)
   if (length(scale)    != use.n) scale    <- rep_len(scale,    use.n)
 
-  
+
 
 
   if (length(x)        != use.n) x        <- rep_len(x,        use.n)
@@ -77,7 +75,7 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
   scase <- (abs(shape) < tolshape0)
   nscase <- sum(scase)
   if (use.n - nscase) {
-    zedd <- 1 + shape * (x - location) / scale  # pmax(0, (1+shape*xc/scale))
+    zedd <- 1 + shape * (x - location) / scale
     xok <- (!scase) & (zedd > 0)
     logdensity[xok] <- -log(scale[xok]) - zedd[xok]^(-1/shape[xok]) -
                        (1 + 1/shape[xok]) * log(zedd[xok])
@@ -108,7 +106,7 @@ pgev <- function(q, location = 0, scale = 1, shape = 0,
 
   if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   use.n <- max(length(q), length(location), length(scale), length(shape))
   if (length(shape)    != use.n) shape    <- rep_len(shape,    use.n)
   if (length(location) != use.n) location <- rep_len(location, use.n)
@@ -150,8 +148,8 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
 
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
- 
+
+
 
   use.n <- max(length(p), length(location), length(scale), length(shape))
   if (length(shape)    != use.n) shape    <- rep_len(shape,    use.n)
@@ -259,7 +257,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
   new("vglmff",
   blurb = c("Generalized extreme value distribution\n",
             "Links:    ",
-            namesof("location", llocat, earg = elocat), ", ", 
+            namesof("location", llocat, earg = elocat), ", ",
             namesof("scale",    lscale, earg = escale), ", ",
             namesof("shape",    lshape, earg = eshape)),
   constraints = eval(substitute(expression({
@@ -307,6 +305,8 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
     ncoly <- ncol(y)
     extra$ncoly <- ncoly
     extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+    extra$percentiles <- .percentiles
     extra$M1 <- M1
 
 
@@ -333,7 +333,6 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
     if (any(r.vec == 0))
       stop("A row contains all missing values")
 
-    extra$percentiles <- .percentiles
 
 
 
@@ -417,6 +416,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
             .imethod = imethod ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 3)
     Locat <- eta2theta(eta[, 1], .llocat , .elocat )
     sigma <- eta2theta(eta[, 2], .lscale , .escale )
     shape <- eta2theta(eta[, 3], .lshape , .eshape )
@@ -443,8 +443,9 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
         fv[, ii] <- qgev(pcent[ii] /100, loc = Locat, scale = sigma,
                          shape = shape)
       }
-      dimnames(fv) <- list(dimnames(eta)[[1]],
-                           paste(as.character(pcent), "%", sep = ""))
+      fv <- label.cols.y(fv, colnames.y = extra$colnames.y,
+                         NOS = NOS, percentiles = pcent,
+                         one.on.one = FALSE)
     } else {
       is.zero <- (abs(shape) < .tolshape0 )
       EulerM <- -digamma(1)
@@ -452,13 +453,13 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
       fv[!is.zero] <- Locat[!is.zero] + sigma[!is.zero] *
                       (gamma(1 - shape[!is.zero]) - 1) / shape[!is.zero]
       fv[shape >= 1] <- NA  # Mean exists only if shape < 1.
+      fv <- label.cols.y(fv, colnames.y = extra$colnames.y, NOS = NOS)
     }
     fv
-  }, list(
-            .llocat = llocat, .lscale = lscale, .lshape = lshape,
-            .elocat = elocat, .escale = escale, .eshape = eshape,
+  }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+           .elocat = elocat, .escale = escale, .eshape = eshape,
+           .tolshape0 = tolshape0 ))),
 
-            .tolshape0 = tolshape0 ))),
   last = eval(substitute(expression({
     misc$earg <- vector("list", M)
     names(misc$earg) <- c(mynames1, mynames2, mynames3)
@@ -621,12 +622,12 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
         stop("cannot handle shape == 0 with a multivariate response")
 
       EulerM <- -digamma(1)
-      wz[is.zero, iam(2, 2, M)] <- (pi^2/6 + (1-EulerM)^2)/sigma[is.zero]^2
+      wz[is.zero, iam(2, 2, M)] <- (pi^2/6 +(1-EulerM)^2)/sigma[is.zero]^2
       wz[is.zero, iam(3, 3, M)] <- 2.4236
       wz[is.zero, iam(1, 2, M)] <-
         (digamma(2) + 2 * (EulerM - 1)) / sigma[is.zero]^2
       wz[is.zero, iam(1, 3, M)] <-
-       -(trigamma(1) / 2 + digamma(1) * (digamma(1)/2 + 1)) / sigma[is.zero]
+       -(trigamma(1) / 2 + digamma(1) * (digamma(1)/2 + 1))/sigma[is.zero]
       wz[is.zero, iam(2, 3, M)] <-
         (-dgammadx(2, 3)/6 +   dgammadx(1, 1) +
                              2*dgammadx(1, 2) +
@@ -720,7 +721,7 @@ dgammadx <- function(x, deriv.arg = 1) {
   lshape <- attr(eshape, "function.name")
 
 
-  if (length(percentiles) && 
+  if (length(percentiles) &&
     (!is.Numeric(percentiles, positive = TRUE) ||
      max(percentiles) >= 100))
     stop("bad input for argument 'percentiles'")
@@ -742,7 +743,7 @@ dgammadx <- function(x, deriv.arg = 1) {
   new("vglmff",
   blurb = c("Generalized extreme value distribution\n",
           "Links:    ",
-          namesof("location", link = llocat, earg = elocat), ", ", 
+          namesof("location", link = llocat, earg = elocat), ", ",
           namesof("scale",    link = lscale, earg = escale), ", ",
           namesof("shape",    link = lshape, earg = eshape)),
   constraints = eval(substitute(expression({
@@ -762,10 +763,12 @@ dgammadx <- function(x, deriv.arg = 1) {
          lscale    = .lscale ,
          lshape    = .lshape ,
          type.fitted = .type.fitted ,
+         percentiles = .percentiles ,
          zero = .zero )
   }, list( .zero = zero,
            .llocat = llocation, .lscale = lscale, .lshape = lshape,
-           .type.fitted = type.fitted ))),
+           .type.fitted = type.fitted,
+           .percentiles = percentiles ))),
 
 
   initialize = eval(substitute(expression({
@@ -785,6 +788,8 @@ dgammadx <- function(x, deriv.arg = 1) {
     NOS <- ncoly <- ncol(y)
     extra$ncoly <- ncoly
     extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+    extra$percentiles <- .percentiles
     extra$M1 <- M1
     M <- M1 * ncoly  # Is now true!
 
@@ -799,7 +804,6 @@ dgammadx <- function(x, deriv.arg = 1) {
           interleave.VGAM(M, M1 = M1)]
 
 
-      extra$percentiles <- .percentiles
 
 
 
@@ -814,7 +818,7 @@ dgammadx <- function(x, deriv.arg = 1) {
     if (!length(etastart)) {
 
 
-      if ( .lshape == "extlogit" && length( .ishape ) && 
+      if ( .lshape == "extlogit" && length( .ishape ) &&
          (any( .ishape <= eshape$min | .ishape >= eshape$max)))
         stop("bad input for argument 'eshape'")
 
@@ -881,7 +885,7 @@ dgammadx <- function(x, deriv.arg = 1) {
 
       etastart <-
         cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
-              theta2eta(scale.init, .lscale , earg = .escale ), 
+              theta2eta(scale.init, .lscale , earg = .escale ),
               theta2eta(shape.init, .lshape , earg = .eshape ))
       etastart <-
         etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
@@ -890,20 +894,22 @@ dgammadx <- function(x, deriv.arg = 1) {
             .elocat = elocat, .escale = escale, .eshape = eshape,
             .ilocat = ilocat, .iscale = iscale, .ishape = ishape,
                                                 .gshape = gshape,
-           
+
             .gprobs.y = gprobs.y, .gscale.mux = gscale.mux,
             .iprobs.y = iprobs.y,
-           
+
             .percentiles = percentiles, .tolshape0 = tolshape0,
             .imethod = imethod, .type.fitted = type.fitted ))),
 
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    M1 <- 3
-    NOS <- ncol(eta) / M1
-    Locat <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .llocat , earg = .elocat )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , earg = .escale )
-    shape <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lshape , earg = .eshape )
+    NOS <- ncol(eta) / c(M1 = 3)
+    Locat <- eta2theta(eta[, c(TRUE, FALSE, FALSE)],
+                       .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)],
+                       .lscale , earg = .escale )
+    shape <- eta2theta(eta[, c(FALSE, FALSE, TRUE)],
+                       .lshape , earg = .eshape )
     type.fitted <-
       if (length(extra$type.fitted)) {
         extra$type.fitted
@@ -929,20 +935,23 @@ dgammadx <- function(x, deriv.arg = 1) {
         fv[, icol] <- qgev(pcent[ii] / 100, loc = Locat, scale = Scale,
                            shape = shape)
       }
-      colnames.fv <- rep_len(paste(as.character(pcent), "%", sep = ""), LP*NOS)
-      dimnames(fv) <- list(dimnames(eta)[[1]], colnames.fv)
+      fv <- label.cols.y(fv, colnames.y = extra$colnames.y,
+                         NOS = NOS, percentiles = pcent,
+                         one.on.one = FALSE)
     } else {
       is.zero <- (abs(shape) < .tolshape0 )
       EulerM <- -digamma(1)
-      fv <- loc + sigma * EulerM  # When shape == 0 it is Gumbel
-      fv[!is.zero] <- loc[!is.zero] + sigma[!is.zero] *
+      fv <- Locat + Scale * EulerM  # When shape == 0 it is Gumbel
+      fv[!is.zero] <- Locat[!is.zero] + Scale[!is.zero] *
                       (gamma(1 - shape[!is.zero]) - 1) / shape[!is.zero]
       fv[shape >= 1] <- NA  # Mean exists only if shape < 1.
+
+      fv <- label.cols.y(fv, colnames.y = extra$colnames.y, NOS = NOS)
     }
     fv
   }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
            .elocat = elocat, .escale = escale, .eshape = eshape,
-           .type.fitted = type.fitted, .tolshape0 = tolshape0 ))),
+           .tolshape0 = tolshape0 ))),
   last = eval(substitute(expression({
     temp0303 <- c(rep_len( .llocat , NOS),
                   rep_len( .lscale , NOS),
@@ -973,9 +982,12 @@ dgammadx <- function(x, deriv.arg = 1) {
              summation = TRUE) {
     M1 <- 3
     NOS <- ncol(eta) / M1
-    Locat <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .llocat , earg = .elocat )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , earg = .escale )
-    shape <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lshape , earg = .eshape )
+    Locat <- eta2theta(eta[, c(TRUE, FALSE, FALSE)],
+                       .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)],
+                       .lscale , earg = .escale )
+    shape <- eta2theta(eta[, c(FALSE, FALSE, TRUE)],
+                       .lshape , earg = .eshape )
 
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
@@ -1027,9 +1039,12 @@ dgammadx <- function(x, deriv.arg = 1) {
   deriv = eval(substitute(expression({
     M1 <- 3
     NOS <- ncol(eta) / M1
-    Locat <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .llocat , earg = .elocat )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , earg = .escale )
-    shape <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lshape , earg = .eshape )
+    Locat <- eta2theta(eta[, c(TRUE, FALSE, FALSE)],
+                       .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)],
+                       .lscale , earg = .escale )
+    shape <- eta2theta(eta[, c(FALSE, FALSE, TRUE)],
+                       .lshape , earg = .eshape )
 
 
     is.zero <- (abs(shape) < .tolshape0 )
@@ -1044,7 +1059,7 @@ dgammadx <- function(x, deriv.arg = 1) {
       stop(sum(bad, na.rm = TRUE),
            " observations violating boundary constraints in '@deriv'")
 
-    AA <- 1 / (shape * A^pow)- pow / A 
+    AA <- 1 / (shape * A^pow)- pow / A
     dl.dlocat <- dA.dlocat * AA
     dl.dscale <- dA.dScale * AA - 1/Scale
     dl.dshape <- log(A)/shape^2 - pow * dA.dshape / A -
@@ -1082,7 +1097,7 @@ dgammadx <- function(x, deriv.arg = 1) {
 
     shape[abs(shape + 0.5) < .tolshape0 ] <- -0.499
     temp100 <- gamma(2 + shape)
-    pp <- (1 + shape)^2 * gamma(1 + 2*shape)  # gamma(0) is undefined so shape != -0.5
+    pp <- (1 + shape)^2 * gamma(1 + 2*shape)
     qq <- temp100 * (digamma(1 + shape) + (1 + shape)/shape)
     ned2l.dlocat2 <- pp / Scale^2
     ned2l.dscale2 <- (1 - 2*temp100 + pp) / (Scale * shape)^2
@@ -1096,8 +1111,10 @@ dgammadx <- function(x, deriv.arg = 1) {
     if (any(is.zero)) {
       ned2l.dscale2[is.zero] <- (pi^2/6 + (1-EulerM)^2) / Scale[is.zero]^2
       ned2l.dshape2[is.zero] <- 2.4236
-      ned2l.dlocsca[is.zero] <- (digamma(2) + 2*(EulerM - 1)) / Scale[is.zero]^2
-      ned2l.dscasha[is.zero] <- -(   -dgammadx(2, 3) / 6 + dgammadx(1, 1) +
+      ned2l.dlocsca[is.zero] <- (digamma(2) +
+                                 2*(EulerM - 1)) / Scale[is.zero]^2
+      ned2l.dscasha[is.zero] <- -(   -dgammadx(2, 3) / 6 +
+                                      dgammadx(1, 1) +
                                     2*dgammadx(1, 2) +
                                     2*dgammadx(1, 3) / 3) / Scale[is.zero]
       ned2l.dlocsha[is.zero] <-  (trigamma(1) / 2 + digamma(1)*
@@ -1147,7 +1164,7 @@ qgumbel <- function(p, location = 0, scale = 1,
                     lower.tail = TRUE, log.p = FALSE) {
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
 
@@ -1165,7 +1182,7 @@ qgumbel <- function(p, location = 0, scale = 1,
       ln.p <- p
       ans <- location - scale * log(-log(-expm1(ln.p)))
       ans[ln.p > 0] <- NaN
-    } else { 
+    } else {
       ans <- location - scale * log(-log1p(-p))
       ans[p == 0] <-  Inf
       ans[p == 1] <- -Inf
@@ -1183,7 +1200,7 @@ pgumbel <- function(q, location = 0, scale = 1,
     stop("bad input for argument 'lower.tail'")
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   if (lower.tail) {
     if (log.p) {
       ans <- -exp(-(q - location) / scale)
@@ -1204,7 +1221,7 @@ pgumbel <- function(q, location = 0, scale = 1,
       ans[q <= -Inf] <- 1
       ans[q ==  Inf] <- 0
     }
-  } 
+  }
 
   ans[scale <= 0] <- NaN
   ans
@@ -1339,12 +1356,12 @@ pgumbel <- function(q, location = 0, scale = 1,
               -log(pcent[ii] / 100)
         mu[, ii] <- loc - sigma * log(ci)
       }
-      if (mpv) 
+      if (mpv)
         mu[, ncol(mu)] <- loc - sigma * log(log(2))
 
 
     dmn2 <- paste(as.character(pcent), "%", sep = "")
-    if (mpv) 
+    if (mpv)
       dmn2 <- c(dmn2, "MPV")
     dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
   } else {
@@ -1381,7 +1398,7 @@ pgumbel <- function(q, location = 0, scale = 1,
     max.r.vec <- max(r.vec)
     for (jay in 1:max.r.vec) {
       index <- (jay <= r.vec)
-      ans[index] <- ans[index] - (y[index,jay] - loc[index]) / sigma[index]
+      ans[index] <- ans[index] - (y[index,jay] - loc[index])/sigma[index]
     }
 
 
@@ -1478,7 +1495,7 @@ dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
-  
+
   oobounds.log <- -Inf
   if (!is.Numeric(tolshape0, length.arg = 1, positive = TRUE))
     stop("bad input for argument 'tolshape0'")
@@ -1571,7 +1588,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
   if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
   rm(log.p)
-  
+
   if (lower.tail) {
     if (log.arg) p <- exp(p)
   } else {
@@ -1620,7 +1637,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
           lshape = logoff(offset = 0.5),
           percentiles = c(90, 95),
           iscale = NULL,
-          ishape = NULL, 
+          ishape = NULL,
           tolshape0 = 0.001,
           type.fitted = c("percentiles", "mean"),
           imethod = 1,
@@ -1640,7 +1657,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
   lshape <- attr(eshape, "function.name")
 
 
-  if (!is.Numeric(threshold)) 
+  if (!is.Numeric(threshold))
     stop("bad input for argument 'threshold'")
 
   if (!is.Numeric(imethod, length.arg = 1,
@@ -1648,7 +1665,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
      imethod > 2.5)
     stop("argument 'imethod' must be 1 or 2")
 
-  if (length(percentiles) && 
+  if (length(percentiles) &&
     (!is.Numeric(percentiles, positive = TRUE) ||
      max(percentiles) >= 100))
     stop("bad input for argument 'percentiles'")
@@ -1710,6 +1727,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
     extra$y.names <- y.names
     extra$type.fitted <- .type.fitted
     extra$percentiles <- .percentiles
+    extra$colnames.y  <- colnames(y)
 
 
 
@@ -1760,8 +1778,8 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
 
 
       init.sig[init.sig <=  0.0] <-  0.01  # sigma > 0
-      init.xii[init.xii <= -0.5] <- -0.40  # Fisher scoring works if xi > -0.5
-      init.xii[init.xii >=  1.0] <-  0.90  # Mean/var exists if xi < 1 / 0.5
+      init.xii[init.xii <= -0.5] <- -0.40  # FS works if xi > -0.5
+      init.xii[init.xii >=  1.0] <-  0.90  # Mean/var exists if xi < 1/0.5
       if ( .lshape == "loge")
         init.xii[init.xii <= 0.0] <-  0.05
 
@@ -1803,6 +1821,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
 
 
     M1 <- 2
+    NOS <- ncol(eta) / M1
     pcent <- extra$percentiles  # Post-20140912
 
 
@@ -1821,7 +1840,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
 
 
 
-    do.one <- function(yvec, shape, scale, 
+    do.one <- function(yvec, shape, scale,
                        threshold,
                        percentiles = c(90, 95),
                        y.name = NULL,
@@ -1847,13 +1866,12 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
                post.name else
                paste(y.name, post.name, sep = " "))
       fv
-    }
+    }  # do.one
 
 
 
 
       fv <- matrix(-1, nrow(sigma),  LP * ncoly)
-      colnames.cumsum.fv <- NULL
       for (jlocal in 1:ncoly) {
         block.mat.fv <-
           do.one(yvec = y[, jlocal],
@@ -1864,14 +1882,15 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
                  y.name = if (ncoly > 1) y.names[jlocal] else NULL,
                  tolshape0 = .tolshape0 )
         fv[, (jlocal - 1) *  LP + (1:LP)] <- block.mat.fv
-        colnames.cumsum.fv <- c(colnames.cumsum.fv,
-                                colnames(block.mat.fv))
       }
-      colnames(fv) <- colnames.cumsum.fv
+      fv <- label.cols.y(fv, colnames.y = extra$colnames.y,
+                         NOS = NOS, percentiles = pcent,
+                         one.on.one = FALSE)
     } else {
       fv <- Threshold + sigma / (1 - shape)
       fv[shape >= 1] <- Inf  # Mean exists only if shape < 1.
-      dimnames(fv) <- list(dimnames(eta)[[1]], y.names)
+      fv <- label.cols.y(fv, colnames.y = extra$colnames.y,
+                         NOS = NOS)
     }
 
     fv
@@ -1980,8 +1999,8 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
     iexp <-  is.zero &  !bad
 
     dl.dShape <- dl.dsigma <- rep_len(0, length(y))
-    dl.dsigma[igpd] <- ((1 + Shape[igpd]) * ystar[igpd]     / (sigma[igpd] +
-                             Shape[igpd]  * ystar[igpd])-1) /  sigma[igpd]
+    dl.dsigma[igpd] <- ((1+Shape[igpd]) * ystar[igpd]     / (sigma[igpd] +
+                           Shape[igpd]  * ystar[igpd])-1) /  sigma[igpd]
 
     dl.dShape[igpd] <- log(A[igpd])/Shape[igpd]^2 - (1 + 1/Shape[igpd]) *
                        ystar[igpd] / (A[igpd] * sigma[igpd])
@@ -2026,7 +2045,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0,
 
 
 meplot.default <- function(y, main = "Mean Excess Plot",
-    xlab = "Threshold", ylab = "Mean Excess", lty = c(2, 1:2), 
+    xlab = "Threshold", ylab = "Mean Excess", lty = c(2, 1:2),
     conf = 0.95, col = c("blue", "black", "blue"), type = "l", ...) {
 
 
@@ -2047,7 +2066,7 @@ meplot.default <- function(y, main = "Mean Excess Plot",
   sy <- sy - sqrt( .Machine$double.eps )
 
   matplot(sy, mymat, main = main,
-          xlab = xlab, ylab = ylab, 
+          xlab = xlab, ylab = ylab,
           lty = lty, col = col, type = type, ...)
   invisible(list(threshold = sy,
                  meanExcess = me,
@@ -2059,7 +2078,7 @@ meplot.default <- function(y, main = "Mean Excess Plot",
 meplot.vlm <- function(object, ...) {
    if (!length(y <- object at y))
      stop("y slot is empty")
-   ans <- meplot(as.numeric(y), ...) 
+   ans <- meplot(as.numeric(y), ...)
    invisible(ans)
 }
 
@@ -2104,7 +2123,7 @@ guplot.default <-
 guplot.vlm <- function(object, ...) {
     if (!length(y <- object at y))
       stop("y slot is empty")
-    ans <- guplot(as.numeric(y), ...) 
+    ans <- guplot(as.numeric(y), ...)
     invisible(ans)
 }
 
@@ -2252,7 +2271,7 @@ setMethod("guplot", "vlm",
     }
   }), list( .llocat = llocat, .lscale = lscale,
             .elocat = elocat, .escale = escale,
-                              .iscale = iscale, 
+                              .iscale = iscale,
             .R = R, .mpv = mpv, .percentiles = percentiles ))),
   linkinv = eval(substitute( function(eta, extra = NULL) {
     M1 <- 2
@@ -2333,7 +2352,7 @@ setMethod("guplot", "vlm",
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <- c(w) *
-                 dgumbel(x = y, location = Locat, scale = Scale, log = TRUE)
+              dgumbel(x = y, location = Locat, scale = Scale, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -2374,9 +2393,10 @@ setMethod("guplot", "vlm",
             .elocat = elocat, .escale = escale ))),
   weight = expression({
     digamma1 <- digamma(1)
-    ned2l.dscale2 <- ((2 + digamma1) * digamma1 + trigamma(1) + 1) / Scale^2
+    ned2l.dscale2 <- ((2 + digamma1) * digamma1 +
+                         trigamma(1) + 1) / Scale^2
     ned2l.dlocat2 <- 1 / Scale^2
-    ned2l.dlocsca <- -(1 + digamma1) / Scale^2 
+    ned2l.dlocsca <- -(1 + digamma1) / Scale^2
 
     wz <- array( c(c(w) * ned2l.dlocat2 * dlocat.deta^2,
                    c(w) * ned2l.dscale2 * dscale.deta^2,
@@ -2414,7 +2434,7 @@ setMethod("guplot", "vlm",
   new("vglmff",
   blurb = c("Censored Gumbel distribution\n\n",
             "Links:    ",
-            namesof("location", llocat, earg = elocat, tag = TRUE), ", ", 
+            namesof("location", llocat, earg = elocat, tag = TRUE), ", ",
             namesof("scale",    lscale, earg = escale, tag = TRUE), "\n",
             "Mean:     location + scale*0.5772..\n",
             "Variance: pi^2 * scale^2 / 6"),
@@ -2460,7 +2480,7 @@ setMethod("guplot", "vlm",
       namesof("scale",    .lscale , earg = .escale , tag = FALSE))
 
     if (!length(etastart)) {
-      sca.init <-  if (is.Numeric( .iscale, positive = TRUE)) 
+      sca.init <-  if (is.Numeric( .iscale, positive = TRUE))
                       .iscale else 1.1 * sqrt(var(y) * 6 ) / pi
       sca.init <- rep_len(sca.init, n)
       EulerM <- -digamma(1)
@@ -2471,8 +2491,8 @@ setMethod("guplot", "vlm",
               theta2eta(sca.init, .lscale , earg = .escale ))
     }
   }), list( .lscale = lscale, .iscale = iscale,
-            .llocat = llocat, 
-            .elocat = elocat, .escale = escale ))), 
+            .llocat = llocat,
+            .elocat = elocat, .escale = escale ))),
   linkinv = eval(substitute( function(eta, extra = NULL) {
     loc  <- eta2theta(eta[, 1], .llocat)
     sc   <- eta2theta(eta[, 2], .lscale)
@@ -2489,16 +2509,16 @@ setMethod("guplot", "vlm",
       mu
     }
   }, list( .lscale = lscale, .percentiles = percentiles,
-           .llocat = llocat, 
+           .llocat = llocat,
            .elocat = elocat, .escale = escale ,
-           .mean=mean ))), 
+           .mean=mean ))),
   last = eval(substitute(expression({
-        misc$link <- c(location= .llocat,  scale = .lscale) 
+        misc$link <- c(location= .llocat,  scale = .lscale)
         misc$earg <- list(location= .elocat, scale= .escale )
-        misc$true.mu <- .mean    # if FALSE then @fitted is not a true mu 
+        misc$true.mu <- .mean    # if FALSE then @fitted is not a true mu
         misc$percentiles = .percentiles
   }), list( .lscale = lscale, .mean=mean,
-            .llocat = llocat, 
+            .llocat = llocat,
             .elocat = elocat, .escale = escale ,
             .percentiles = percentiles ))),
   loglikelihood = eval(substitute(
@@ -2519,7 +2539,7 @@ setMethod("guplot", "vlm",
                         "implemented yet") else
       sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
   }, list( .lscale = lscale,
-           .llocat = llocat, 
+           .llocat = llocat,
            .elocat = elocat, .escale = escale ))),
   vfamily = "cens.gumbel",
   deriv = eval(substitute(expression({
@@ -2551,7 +2571,7 @@ setMethod("guplot", "vlm",
     c(w) * cbind(dl.dloc * dloc.deta,
                  dl.dsc * dsc.deta)
   }), list( .lscale = lscale,
-            .llocat = llocat, 
+            .llocat = llocat,
             .elocat = elocat, .escale = escale ))),
   weight = expression({
     A1 <- ifelse(cenL, Fy, 0)
@@ -2560,7 +2580,7 @@ setMethod("guplot", "vlm",
     digamma1 <- digamma(1)
     ed2l.dsc2 <- ((2+digamma1)*digamma1 + trigamma(1) + 1) / sc^2
     ed2l.dloc2 <- 1 / sc^2
-    ed2l.dlocsc <- -(1 + digamma1) / sc^2 
+    ed2l.dlocsc <- -(1 + digamma1) / sc^2
     wz <- matrix(NA_real_, n, dimm(M = 2))
     wz[, iam(1, 1, M)] <- A2 * ed2l.dloc2 * dloc.deta^2
     wz[, iam(2, 2, M)] <- A2 * ed2l.dsc2 * dsc.deta^2
@@ -2627,16 +2647,16 @@ pfrechet <- function(q, location = 0, scale = 1, shape,
     if (log.p) {
       ans <- -(rzedd^shape)
       ans[q <= location] <- -Inf
-    } else {  
+    } else {
       ans <- exp(-(rzedd^shape))
-      ans[q <= location] <- 0 
+      ans[q <= location] <- 0
       }
   } else {
     if (log.p) {
       ans <- log(-expm1(-(rzedd^shape)))
       ans[q <= location] <- 0
     } else {
-      ans <- -expm1(-(rzedd^shape)) 
+      ans <- -expm1(-(rzedd^shape))
       ans[q <= location]  <- 1
     }
   }
@@ -2669,7 +2689,7 @@ qfrechet <- function(p, location = 0, scale = 1, shape,
       ln.p <- p
       ans <- location + scale * (-log(-expm1(ln.p)))^(-1 / shape)
       ans[ln.p > 0] <- NaN
-    } else { 
+    } else {
       ans <- location + scale * (-log1p(-p))^(-1 / shape)
       ans[p < 0] <- NaN
       ans[p == 0] <- Inf
@@ -2789,7 +2809,7 @@ frechet.control <- function(save.weights = TRUE, ...) {
         myquant <- (-log(myprobs))^(-1/shapeval)
         myfit <- lsfit(x = myquant, y = myobsns, intercept = TRUE)
         sum(myfit$resid^2)
-      } 
+      }
 
       shape.grid <- c(100, 70, 40, 20, 12, 8, 4, 2, 1.5)
       shape.grid <- c(1 / shape.grid, 1, shape.grid)
@@ -3169,7 +3189,7 @@ rec.exp1.control <- function(save.weights = TRUE, ...) {
     NN <- length(eta)
     rate <- c(eta2theta(eta, .lrate , .erate ))
 
-    dl.drate <- 1 / rate 
+    dl.drate <- 1 / rate
     dl.drate[NN] <- 1/ rate[NN] - y[NN, 1]
 
     drate.deta <- dtheta.deta(rate, .lrate , .erate )
@@ -3285,7 +3305,7 @@ dpois.points <- function(x, lambda, ostatistic,
 
 
     predictors.names <-
-      namesof("density", .link, earg = .earg , tag = FALSE) 
+      namesof("density", .link, earg = .earg , tag = FALSE)
 
 
 
diff --git a/R/family.functions.R b/R/family.functions.R
index fdcc035..0132949 100644
--- a/R/family.functions.R
+++ b/R/family.functions.R
@@ -1,18 +1,24 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
 
 
-fill <- 
-fill1 <- fill2 <- fill3 <- 
+
+
+
+fill <-
+fill1 <- fill2 <- fill3 <-
   function(x, values = 0, ncolx = ncol(x)) {
   x <- as.matrix(x)
   matrix(values, nrow = nrow(x), ncol = ncolx, byrow = TRUE)
 }
 
 
+
+
+
 extract.arg <- function(a) {
   s <- substitute(a)
   as.character(s)
@@ -20,6 +26,9 @@ extract.arg <- function(a) {
 
 
 
+
+
+
 remove.arg <- function(string) {
 
   nc <- nchar(string)
@@ -33,10 +42,14 @@ remove.arg <- function(string) {
 }
 
 
+
+
+
+
 add.arg <- function(string, arg.string) {
 
   if (arg.string == "")
-    return(string) 
+    return(string)
   nc <- nchar(string)
   lastc <- substring(string, nc, nc)
   if (lastc == ")") {
@@ -53,6 +66,10 @@ add.arg <- function(string, arg.string) {
 }
 
 
+
+
+
+
 get.arg <- function(string) {
 
   nc <- nchar(string)
@@ -74,10 +91,13 @@ get.arg <- function(string) {
     cbind(as.numeric((1:n) == i))
 
 
+
  eifun <-
  I.col <- function(i, n)
     diag(n)[, i, drop = FALSE]
 
+
+
  eijfun <- function(i, n) {
   temp <- matrix(0, n, 1)
   if (length(i))
@@ -86,11 +106,7 @@ get.arg <- function(string) {
 }
 
 
-dneg.binomial <- function(x, k, prob) {
 
-  care.exp(x * log1p(-prob) + k * log(prob) + lgamma(x+k) -
-           lgamma(k) - lgamma(x + 1))
-}
 
 
 
@@ -121,6 +137,9 @@ tapplymat1 <- function(mat, function.arg = c("cumsum", "diff", "cumprod")) {
 
 
 
+
+
+
 matrix.power <- function(wz, M, power, fast = TRUE) {
 
 
@@ -134,20 +153,20 @@ matrix.power <- function(wz, M, power, fast = TRUE) {
 
 
   if (M == 1 || dimm.value == M) {
-      WW <- wz^power          # May contain NAs
-      return(t(WW))
+    WW <- wz^power  # May contain NAs
+    return(t(WW))
   }
 
   if (fast) {
     k <- veigen(t(wz), M = M)  # matrix.arg)
-    evals <- k$values           # M x n
-    evects <- k$vectors         # M x M x n
+    evals  <- k$values   # M x n
+    evects <- k$vectors  # M x M x n
   } else {
     stop("sorry, cannot handle matrix-band form yet")
     k <- unlist(apply(wz, 3, eigen), use.names = FALSE)
     dim(k) <- c(M, M+1, n)
-    evals <- k[, 1, , drop = TRUE]      # M x n
-    evects <- k[, -1, , drop = TRUE]    # M x M x n
+    evals  <- k[,  1, , drop = TRUE]  # M x n
+    evects <- k[, -1, , drop = TRUE]  # M x M x n
   }
 
   temp <- evals^power    # Some values may be NAs
@@ -158,8 +177,8 @@ matrix.power <- function(wz, M, power, fast = TRUE) {
 
   index <- (index == 0)
   if (!all(index)) {
-    warning(paste("Some weight matrices have negative",
-                  "eigenvalues. They\nwill be assigned NAs"))
+    warning("Some weight matrices have negative ",
+            "eigenvalues. They will be assigned NAs")
     temp[,!index] <- 1
   }
 
@@ -170,6 +189,9 @@ matrix.power <- function(wz, M, power, fast = TRUE) {
 
 
 
+
+
+
 ResSS.vgam <- function(z, wz, M) {
 
 
@@ -182,14 +204,17 @@ ResSS.vgam <- function(z, wz, M) {
 
 
 
+
+
+
 wweighted.mean <- function(y, w = NULL, matrix.arg = TRUE) {
   if (!matrix.arg)
-    stop("currently, matrix.arg must be TRUE")
+    stop("currently, argument 'matrix.arg' must be TRUE")
   y <- as.matrix(y)
   M <- ncol(y)
   n <- nrow(y)
   if (M == 1) {
-    if (missing(w)) mean(y) else sum(w * y)/sum(w)
+    if (missing(w)) mean(y) else sum(w * y) / sum(w)
   } else {
     if (missing(w)) y %*% rep(1, n) else {
       numer <- mux22(t(w), y, M, as.matrix = TRUE)
@@ -206,6 +231,8 @@ wweighted.mean <- function(y, w = NULL, matrix.arg = TRUE) {
 
 
 
+
+
 veigen <- function(x, M) {
 
 
@@ -242,6 +269,7 @@ veigen <- function(x, M) {
 
 
 
+
 ima <- function(j, k, M) {
   if (length(M) > 1 || M <= 0 || j <= 0 || k <= 0 ||
       j > M || k > M)
@@ -254,18 +282,88 @@ ima <- function(j, k, M) {
 
 
 
+
+
 checkwz <- function(wz, M, trace = FALSE,
                     wzepsilon = .Machine$double.eps^0.75) {
   if (wzepsilon > 0.5)
-    warning("'wzepsilon' is probably too large")
+    warning("argument 'wzepsilon' is probably too large")
   if (!is.matrix(wz))
     wz <- as.matrix(wz)
-  if ((temp <- sum(wz[, 1:M, drop = FALSE] < wzepsilon)))
-    warning(paste(temp, "elements replaced by",
-                  signif(wzepsilon, 5)))
-  wz[, 1:M] <- pmax(wzepsilon, wz[, 1:M])
+  wzsubset <- wz[, 1:M, drop = FALSE]
+  if (any(is.na(wzsubset)))
+    stop("NAs found in the working weights variable 'wz'")
+  if (any(!is.finite(wzsubset)))
+    stop("Some elements in the working weights variable 'wz' are ",
+         "not finite")
+
+  if ((temp <- sum(wzsubset < wzepsilon)))
+    warning(temp, " diagonal elements of the working weights variable ",
+            "'wz' have been replaced by ", signif(wzepsilon, 5))
+  wz[, 1:M] <- pmax(wzepsilon, wzsubset)
   wz
 }
 
 
 
+
+
+
+
+
+label.cols.y <-
+  function(answer,
+           colnames.y = NULL,
+           NOS = 1,
+           percentiles = c(25, 50, 75),
+           one.on.one = TRUE,
+           byy = TRUE) {
+  if (!is.matrix(answer))
+    answer <- as.matrix(answer)
+
+  if (one.on.one) {
+    colnames(answer) <-
+      if (length(colnames.y) == ncol(answer))
+        colnames.y else NULL
+    return(answer)
+  }
+
+
+
+  if (is.null(percentiles))
+    percentiles <- c(25, 50, 75)  # Restore to the default
+
+  if (!is.Numeric(percentiles) ||
+      min(percentiles) <= 0 ||
+      max(percentiles) >= 100)
+    stop("values of 'percentiles' should be in [0, 100]")
+
+  percentiles <- signif(percentiles, digits = 5)
+
+  ab1 <- rep(as.character(percentiles), length = ncol(answer))
+  ab1 <- paste(ab1, "%", sep = "")
+  if (NOS > 1) {
+    suffix.char <- if (length(colnames.y) == NOS)
+      colnames.y else as.character(1:NOS)
+    ab1 <- paste(ab1, rep(suffix.char, each = length(percentiles)),
+                 sep = "")
+  }
+  colnames(answer) <- ab1
+
+
+  if (byy) {
+    answer <-
+      answer[, interleave.VGAM(.M = NCOL(answer),
+                               M1 = NOS),   # length(percentiles)),
+             drop = FALSE]
+  }
+  answer
+}
+
+
+
+
+
+
+
+
diff --git a/R/family.genetic.R b/R/family.genetic.R
index 3a9e2cb..9e0e4d4 100644
--- a/R/family.genetic.R
+++ b/R/family.genetic.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -35,7 +35,7 @@
             ifelse(inbreeding, "without", "with"),
             " the Hardy-Weinberg equilibrium assumption)\n\n",
             "Links:    ",
-            namesof("p1", link, earg = earg, tag = FALSE), ", ", 
+            namesof("p1", link, earg = earg, tag = FALSE), ", ",
             namesof("p2", link, earg = earg, tag = FALSE),
             if (inbreeding) paste(",",
             namesof("f",  link, earg = earg, tag = FALSE)) else
@@ -148,6 +148,20 @@
                        log = TRUE, dochecking = FALSE))
       },
   vfamily = c("A1A2A3", "vgenetic"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    p1 <- eta2theta(eta[, 1], link = .link , earg = .earg )
+    p2 <- eta2theta(eta[, 2], link = .link , earg = .earg )
+    p3 <- 1-p1-p2
+    okay1 <- all(is.finite(p1)) && all(0 < p1 & p1 < 1) &&
+             all(is.finite(p2)) && all(0 < p2 & p2 < 1) &&
+             all(is.finite(p3)) && all(0 < p3 & p3 < 1)
+    okay2 <- TRUE
+    if ( .inbreeding ) {
+      f <- eta2theta(eta[, 3], link = .link , earg = .earg )
+      okay2 <- all(is.finite(f)) && all(0 <= f)  # && all(f < 1)
+    }
+    okay1 && okay2
+  }, list( .link = link, .earg = earg, .inbreeding = inbreeding) )),
   deriv = eval(substitute(expression({
     p1 <- eta2theta(eta[, 1], link = .link , earg = .earg )
     p2 <- eta2theta(eta[, 2], link = .link , earg = .earg )
@@ -159,7 +173,7 @@
                   0, -2*(1-f)*p2, -f - 2*p3*(1-f))
       dP2 <- cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
                    2*(1-f)*(1-p1-2*p2), -f - 2*p3*(1-f))
-      dP3 <- cbind(p1*(1-p1), -2*p1*p2, -2*p1*p3, p2*(1-p2), -2*p2*p3, 
+      dP3 <- cbind(p1*(1-p1), -2*p1*p2, -2*p1*p3, p2*(1-p2), -2*p2*p3,
                    p3*(1-p3))
       dl1 <- rowSums(y * dP1 / mu)
       dl2 <- rowSums(y * dP2 / mu)
@@ -167,7 +181,7 @@
       dPP.deta <- dtheta.deta(cbind(p1, p2, f),
                               link = .link , earg = .earg )
       c(w) * cbind(dPP.deta[, 1] * dl1,
-                   dPP.deta[, 2] * dl2, 
+                   dPP.deta[, 2] * dl2,
                    dPP.deta[, 3] * dl3)
     } else {
       dl.dp1 <- (2*y[, 1]+y[, 2]+y[, 4])/p1 -
@@ -225,8 +239,8 @@
   new("vglmff",
   blurb = c("MNSs Blood Group System (MS-Ms-MNS-MNs-NS-Ns phenotype)\n\n",
             "Links:    ",
-            namesof("mS", link, earg = earg), ", ", 
-            namesof("ms", link, earg = earg), ", ", 
+            namesof("mS", link, earg = earg), ", ",
+            namesof("ms", link, earg = earg), ", ",
             namesof("nS", link, earg = earg, tag = FALSE)),
   deviance = Deviance.categorical.data.vgam,
   initialize = eval(substitute(expression({
@@ -295,6 +309,17 @@
                        log = TRUE, dochecking = FALSE))
     },
   vfamily = c("MNSs", "vgenetic"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mS <- eta2theta(eta[, 1], link = .link , earg = .earg )
+    ms <- eta2theta(eta[, 2], link = .link , earg = .earg )
+    nS <- eta2theta(eta[, 3], link = .link , earg = .earg )
+    ns <- 1-mS-ms-nS
+    okay1 <- all(is.finite(mS)) && all(0 < mS & mS < 1) &&
+             all(is.finite(ms)) && all(0 < ms & ms < 1) &&
+             all(is.finite(nS)) && all(0 < nS & nS < 1) &&
+             all(is.finite(ns)) && all(0 < ns & ns < 1)
+    okay1
+  }, list( .link = link, .earg = earg) )),
   deriv = eval(substitute(expression({
     mS <- eta2theta(eta[, 1], link = .link , earg = .earg )
     ms <- eta2theta(eta[, 2], link = .link , earg = .earg )
@@ -343,7 +368,7 @@
   new("vglmff",
   blurb = c("ABO Blood Group System (A-B-AB-O phenotype)\n\n",
             "Links:    ",
-            namesof("pA", link.pA, earg = earg.pA, tag = FALSE), ", ", 
+            namesof("pA", link.pA, earg = earg.pA, tag = FALSE), ", ",
             namesof("pB", link.pB, earg = earg.pB, tag = FALSE)),
   deviance = Deviance.categorical.data.vgam,
 
@@ -381,7 +406,7 @@
         setequal(ok.col.ny, col.ny)) {
       if (!all(ok.col.ny == col.ny))
         stop("the columns of the response matrix should have names ",
-             "(output of colnames()) ordered as c('A','B','AB','O')")
+             "(output of colnames()) ordered as c('A', 'B', 'AB', 'O')")
     }
 
 
@@ -413,7 +438,7 @@
       cbind(A  = pA*(pA+2*pO),
             B  = pB*(pB+2*pO),
             AB = 2*pA*pB,
-            O  = pO*pO) 
+            O  = pO*pO)
   }, list( .link.pA = link.pA, .link.pB = link.pB,
            .earg.pA = earg.pA, .earg.pB = earg.pB ))),
 
@@ -434,11 +459,21 @@
     },
 
   vfamily = c("ABO", "vgenetic"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    ppp <- eta2theta(eta[, 1], link = .link.pA , earg = .earg.pA )
+    qqq <- eta2theta(eta[, 2], link = .link.pB , earg = .earg.pB )
+    rrr <- 1 - ppp - qqq  # abs(1 - ppp - qqq) prior to 20160624
+    okay1 <- all(is.finite(ppp)) && all(0 < ppp & ppp < 1) &&
+             all(is.finite(qqq)) && all(0 < qqq & qqq < 1) &&
+             all(is.finite(rrr)) && all(0 < rrr & rrr < 1)
+    okay1
+  }, list( .link.pA = link.pA, .link.pB = link.pB,
+           .earg.pA = earg.pA, .earg.pB = earg.pB ))),
 
   deriv = eval(substitute(expression({
     ppp <- eta2theta(eta[, 1], link = .link.pA , earg = .earg.pA )
     qqq <- eta2theta(eta[, 2], link = .link.pB , earg = .earg.pB )
-    rrr <- abs(1 - ppp - qqq)
+    rrr <- 1 - ppp - qqq  # abs(1 - ppp - qqq)
 
 
     pbar <- 2*rrr + ppp
@@ -523,7 +558,7 @@
     cbind(AB = 0.5 + pp4,
           Ab = 0.25 - pp4,
           aB = 0.25 - pp4,
-          ab = pp4) 
+          ab = pp4)
   }, list( .link = link, .earg = earg))),
 
   last = eval(substitute(expression({
@@ -542,6 +577,11 @@
                        log = TRUE, dochecking = FALSE))
     },
   vfamily = c("AB.Ab.aB.ab", "vgenetic"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    pp <- eta2theta(eta, link = .link , earg = .earg )
+    okay1 <- all(is.finite(pp)) && all(0 < pp & pp < 1)
+    okay1
+  }, list( .link = link, .earg = earg ))),
   deriv = eval(substitute(expression({
     pp <- eta2theta(eta, link = .link , earg = .earg )
 
@@ -583,7 +623,7 @@
            ipA = NULL,
            ifp = NULL,
            zero = NULL) {
-    
+
   linkp <- as.list(substitute(linkp))
   eargp <- link2list(linkp)
   linkp <- attr(eargp, "function.name")
@@ -595,7 +635,7 @@
   if (!is.logical(inbreeding) || length(inbreeding) > 1)
     stop("argument 'inbreeding' must be a single logical")
 
-  
+
 
   new("vglmff",
   blurb = c("AA-Aa-aa phenotype (",
@@ -644,7 +684,7 @@
         if ( .inbreeding )
         namesof("f",  .linkf , earg = .eargf , tag = FALSE) else NULL)
     mustart <- (y + mustart) / 2
-        
+
 
     if (is.null(etastart)) {
       pA <- if (is.numeric( .ipA )) rep_len( .ipA , n) else
@@ -694,6 +734,19 @@
                          log = TRUE, dochecking = FALSE))
     },
   vfamily = c("AA.Aa.aa", "vgenetic"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    eta <- as.matrix(eta)
+    pA <- eta2theta(eta[, 1], link = .linkp , earg = .eargp )
+    okay1 <- all(is.finite(pA)) && all(0 < pA & pA < 1)
+    okay2 <- TRUE
+    if ( .inbreeding ) {
+      fp <- eta2theta(eta[, 2], link = .linkf , earg = .eargf )
+      okay2 <- all(is.finite(fp)) && all(0 <= fp)  # && all(fp < 1)
+    }
+    okay1 && okay2
+  }, list( .linkp = linkp, .linkf = linkf,
+           .eargp = eargp, .eargf = eargf,
+           .inbreeding = inbreeding ))),
   deriv = eval(substitute(expression({
     eta <- as.matrix(eta)
     pA <- eta2theta(eta[, 1], link = .linkp , earg = .eargp )
@@ -714,7 +767,7 @@
       dfp.deta <- dtheta.deta(fp, link = .linkf , earg = .eargf )
 
       c(w) * cbind(dPP.deta * dl1,
-                   dfp.deta * dl2)      
+                   dfp.deta * dl2)
     } else {
       nAA <- c(w) * y[, 1]
       nAa <- c(w) * y[, 2]
@@ -722,7 +775,7 @@
       dl.dpA <- (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
       dpA.deta <- dtheta.deta(pA, link = .linkp , earg = .eargp )
       dl.dpA * dpA.deta
-    }  
+    }
   }), list( .linkp = linkp, .linkf = linkf,
             .eargp = eargp, .eargf = eargf,
             .inbreeding = inbreeding ))),
diff --git a/R/family.glmgam.R b/R/family.glmgam.R
index 248cf0e..ae492e9 100644
--- a/R/family.glmgam.R
+++ b/R/family.glmgam.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -46,16 +46,16 @@
 
   ans <-
   new("vglmff",
-  blurb = if (multiple.responses) c("Multiple binomial model\n\n", 
+  blurb = if (multiple.responses) c("Multiple binomial model\n\n",
          "Link:     ", namesof("mu[,j]", link, earg = earg), "\n",
          "Variance: mu[,j]*(1-mu[,j])") else
-         c("Binomial model\n\n", 
+         c("Binomial model\n\n",
          "Link:     ", namesof("prob", link, earg = earg), "\n",
          "Variance: mu * (1 - mu)"),
 
   constraints = eval(substitute(expression({
-    constraints <- cm.VGAM(matrix(1, M, 1), x = x, 
-                           bool = .parallel , 
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
                            constraints = constraints,
                            apply.int = .apply.parint )
 
@@ -120,16 +120,16 @@
 
       dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
       dn2 <- if (length(dn2)) {
-        paste("E[", dn2, "]", sep = "") 
+        paste("E[", dn2, "]", sep = "")
       } else {
-        paste(new.name, 1:M, sep = "") 
+        paste(new.name, 1:M, sep = "")
       }
       predictors.names <-
           namesof(if (M > 1) dn2 else new.name,
                   .link , earg = .earg , short = TRUE)
 
       if (!length(mustart) && !length(etastart))
-        mustart <- matrix(colMeans(y.counts), nrow = nrow(y), ncol = ncol(y),
+        mustart <- matrix(colMeans(y.counts), nrow(y), ncol = ncol(y),
                          byrow = TRUE) /
                    matrix(colMeans(w), nrow = nrow(w), ncol = ncol(w),
                          byrow = TRUE)
@@ -176,7 +176,8 @@
         } else {
           stop("for the binomialff family, response 'y' must be a ",
                "vector of 0 and 1's\n",
-               "or a factor (first level = fail, other levels = success),\n",
+               "or a factor (first level = fail, other levels = success)",
+               ",\n",
                "or a 2-column matrix where col 1 is the no. of ",
                "successes and col 2 is the no. of failures")
         }
@@ -289,6 +290,11 @@
   }, list( .multiple.responses = multiple.responses ))),
 
   vfamily = c("binomialff", "VGAMcategorical"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(mymu)) && all(0 < mymu & mymu < 1)
+    okay1
+  }, list( .link = link, .earg = earg, .bred = bred))),
 
 
 
@@ -314,7 +320,7 @@
     if (length(m <- object at model) > 0) {
       y <- model.response(m)
       if (is.factor(y)) {
-        yy <- factor(1 + rbinom(ntot, size = 1, prob = ftd), 
+        yy <- factor(1 + rbinom(ntot, size = 1, prob = ftd),
                      labels = levels(y))
         split(yy, rep(seq_len(nsim), each = n))
       } else if (is.matrix(y) && ncol(y) == 2) {
@@ -343,7 +349,7 @@
       Hvector <- hatvaluesbasic(X.vlm = X.vlm.save,
                                 diagWm = c(t(w * mu)))  # Handles M>1
 
-      varY <- mu * (1 - mu) / w  # Is a matrix if M>1. Seems the most correct.
+      varY <- mu * (1 - mu) / w  # A matrix if M>1. Seems the most correct.
       d1.ADJ <-   dtheta.deta(mu, .link , earg = .earg )
 
       temp.earg <- .earg
@@ -380,7 +386,7 @@
   weight = eval(substitute(expression({
     tmp100 <- mu * (1.0 - mu)
 
-    tmp200 <- if ( .link == "logit") {
+    ned2ldprob2 <- if ( .link == "logit") {
       cbind(c(w) * tmp100)
     } else if ( .link == "cloglog") {
       cbind(c(w) * (1.0 - mu.use) * (log1p(-mu.use))^2 / mu.use)
@@ -389,13 +395,13 @@
                                earg = .earg )^2 / tmp100)
     }
     for (ii in 1:M) {
-      index500 <- !is.finite(tmp200[, ii]) |
-                  (abs(tmp200[, ii]) < .Machine$double.eps)
+      index500 <- !is.finite(ned2ldprob2[, ii]) |
+                  (abs(ned2ldprob2[, ii]) < .Machine$double.eps)
       if (any(index500)) {  # Diagonal 0s are bad
-        tmp200[index500, ii] <- .Machine$double.eps
+        ned2ldprob2[index500, ii] <- .Machine$double.eps
       }
     }
-    tmp200
+    ned2ldprob2
   }), list( .link = link, .earg = earg))))
 
 
@@ -403,7 +409,7 @@
 
 
 
-    ans at deviance <- 
+    ans at deviance <-
       if (multiple.responses)
         function(mu, y, w, residuals = FALSE, eta, extra = NULL,
                  summation = TRUE) {
@@ -479,9 +485,9 @@
     M <- if (is.matrix(y)) ncol(y) else 1
     dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
     dn2 <- if (length(dn2)) {
-      paste("E[", dn2, "]", sep = "") 
+      paste("E[", dn2, "]", sep = "")
     } else {
-      paste("mu", 1:M, sep = "") 
+      paste("mu", 1:M, sep = "")
     }
 
     predictors.names <-
@@ -504,7 +510,8 @@
         dpar <- rep_len(0, M)
         for (spp in 1:M) {
           temp <- c(w) * dmu.deta[, spp]^2
-          dpar[spp] <- sum(c(w) * (y[,spp]-mu[, spp])^2 * wz[, spp]/temp) / (
+          dpar[spp] <- sum(c(w) * (y[,spp]-mu[, spp])^2 *
+                           wz[, spp]/temp) / (
                        length(mu[,spp]) - ncol(x))
         }
       }
@@ -530,6 +537,11 @@
     theta2eta(mu, link = .link , earg = .earg )
   }, list( .link = link, .earg = earg))),
   vfamily = "gammaff",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu <- theta2eta(mu, .link , earg = .earg )
+    okay1 <- all(is.finite(mymu)) && all(0 < mymu)
+    okay1
+  }, list( .link = link, .earg = earg ))),
   deriv = eval(substitute(expression({
     M1 <- 1
     ncoly <- ncol(as.matrix(y))
@@ -587,6 +599,7 @@
     list(M1 = 1,
          Q1 = 1,
          parameters.names = c("mu"),
+         quasi.type = TRUE,
          dispersion = .dispersion )
   }, list( .earg = earg , .dispersion = dispersion ))),
   initialize = eval(substitute(expression({
@@ -607,9 +620,9 @@
     M <- if (is.matrix(y)) ncol(y) else 1
     dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
     dn2 <- if (length(dn2)) {
-      paste("E[", dn2, "]", sep = "") 
+      paste("E[", dn2, "]", sep = "")
     } else {
-      paste("mu", 1:M, sep = "") 
+      paste("mu", 1:M, sep = "")
     }
 
     predictors.names <-
@@ -646,9 +659,14 @@
             .estimated.dispersion = estimated.dispersion,
             .link = link, .earg = earg ))),
   linkfun = eval(substitute(function(mu, extra = NULL) {
-      theta2eta(mu, link = .link, earg = .earg )
+    theta2eta(mu, link = .link , earg = .earg )
   }, list( .link = link, .earg = earg ))),
   vfamily = "inverse.gaussianff",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu <- theta2eta(mu, .link , earg = .earg )
+    okay1 <- all(is.finite(mymu)) && all(0 < mymu)
+    okay1
+  }, list( .link = link, .earg = earg ))),
   deriv = eval(substitute(expression({
     M1 <- 1
     ncoly <- ncol(as.matrix(y))
@@ -777,14 +795,15 @@ rinv.gaussian <- function(n, mu, lambda) {
   new("vglmff",
   blurb = c("Inverse Gaussian distribution\n\n",
             "f(y) = sqrt(lambda/(2*pi*y^3)) * ",
-            "exp(-lambda * (y - mu)^2 / (2 * mu^2 * y)); y, mu & lambda > 0",
+            "exp(-lambda * (y - mu)^2 / (2 * mu^2 * y)); ",
+            "y, mu & lambda > 0",
             "Link:     ", namesof("mu",     lmu,     earg = emu), ", ",
-                          namesof("lambda", llambda, earg = elambda), "\n",
+                          namesof("lambda", llambda, earg = elambda),"\n",
             "Mean:     ", "mu\n",
             "Variance: mu^3 / lambda"),
   constraints = eval(substitute(expression({
-    constraints <- cm.VGAM(matrix(1, M, 1), x = x, 
-                           bool = .parallel , 
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
                            constraints = constraints,
                            apply.int = .apply.parint )
 
@@ -880,7 +899,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
     misc$M1 <- M1
     misc$imethod <- .imethod
-    misc$ishrinkage <- .ishrinkage 
+    misc$ishrinkage <- .ishrinkage
     misc$expected <- TRUE
     misc$multipleResponses <- FALSE
     misc$parallel <- .parallel
@@ -913,6 +932,16 @@ rinv.gaussian <- function(n, mu, lambda) {
            .emu = emu, .elambda = elambda ))),
 
   vfamily = "inv.gaussianff",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu   <- eta2theta(eta[, c(TRUE, FALSE)],
+                        link = .lmu ,     earg = .emu )
+    lambda <- eta2theta(eta[, c(FALSE, TRUE)],
+                        link = .llambda , earg = .elambda )
+    okay1 <- all(is.finite(mymu  )) && all(0 < mymu  ) &&
+             all(is.finite(lambda)) && all(0 < lambda)
+    okay1
+  }, list( .lmu = lmu, .llambda = llambda,
+           .emu = emu, .elambda = elambda ))),
 
   deriv = eval(substitute(expression({
     M1 <- 2
@@ -955,9 +984,14 @@ rinv.gaussian <- function(n, mu, lambda) {
                        imu = NULL, imethod = 1,
                        parallel = FALSE, zero = NULL,
                        bred = FALSE,
-                       earg.link = FALSE) {
+                       earg.link = FALSE,
+                       type.fitted = c("mean", "quantiles"),
+                       percentiles = c(25, 50, 75)) {
+
 
 
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "quantiles"))[1]
 
   if (!is.logical(bred) || length(bred) > 1)
     stop("argument 'bred' must be a single logical")
@@ -990,8 +1024,8 @@ rinv.gaussian <- function(n, mu, lambda) {
             "Link:     ", namesof("lambda", link, earg = earg), "\n",
             "Variance: lambda"),
   constraints = eval(substitute(expression({
-    constraints <- cm.VGAM(matrix(1, M, 1), x = x, 
-                           bool = .parallel , 
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
                            constraints = constraints)
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
                                 predictors.names = predictors.names,
@@ -1004,20 +1038,25 @@ rinv.gaussian <- function(n, mu, lambda) {
          expected = TRUE,
          multipleResponses = TRUE,
          parameters.names = c("lambda"),
+         type.fitted = .type.fitted ,
+         percentiles = .percentiles ,
          bred = .bred ,
          zero = .zero )
   }, list( .zero = zero,
+           .type.fitted = type.fitted,
+           .percentiles = percentiles,
            .bred = bred ))),
 
 
-  deviance =
+  deviance = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL,
              summation = TRUE) {
+    mupo <- eta2theta(eta, link = .link , earg = .earg )
     nz <- (y > 0)
-    devi <-  -(y - mu)
-    devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mu[nz])
+    devi <-  -(y - mupo)
+    devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mupo[nz])
     if (residuals) {
-      sign(y - mu) * sqrt(2 * abs(devi) * c(w))
+      sign(y - mupo) * sqrt(2 * abs(devi) * c(w))
     } else {
       dev.elts <- 2 * c(w) * devi
       if (summation) {
@@ -1026,7 +1065,7 @@ rinv.gaussian <- function(n, mu, lambda) {
         dev.elts
       }
     }
-  },
+  }, list( .link = link, .earg = earg ))),
 
   initialize = eval(substitute(expression({
 
@@ -1052,9 +1091,9 @@ rinv.gaussian <- function(n, mu, lambda) {
     new.name <- "lambda"
     dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
     dn2 <- if (length(dn2)) {
-      paste("E[", dn2, "]", sep = "") 
+      paste("E[", dn2, "]", sep = "")
     } else {
-      paste(new.name, 1:M, sep = "") 
+      paste(new.name, 1:M, sep = "")
     }
     predictors.names <-
       namesof(if (M > 1) dn2 else new.name, # was "mu" == old.name
@@ -1068,6 +1107,9 @@ rinv.gaussian <- function(n, mu, lambda) {
       }
     }
 
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+    extra$percentiles <- .percentiles
 
 
     if (!length(etastart)) {
@@ -1084,11 +1126,43 @@ rinv.gaussian <- function(n, mu, lambda) {
       etastart <- theta2eta(mu.init, link = .link , earg = .earg )
     }
   }), list( .link = link, .estimated.dispersion = estimated.dispersion,
+            .type.fitted = type.fitted,
+            .percentiles = percentiles,
             .bred = bred,
             .imethod = imethod, .imu = imu, .earg = earg))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    mu <- eta2theta(eta, link = .link , earg = .earg )
-    mu
+    mupo <- eta2theta(eta, link = .link , earg = .earg )
+
+    type.fitted <-
+      if (length(extra$type.fitted)) {
+        extra$type.fitted
+      } else {
+        warning("cannot find 'type.fitted'. Returning 'mean'.")
+        "mean"
+      }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("mean", "quantiles"))[1]
+
+    if (type.fitted == "mean") {
+      return(label.cols.y(mupo, colnames.y = extra$colnames.y,
+                          NOS = NOS))
+    }
+
+
+    percvec <- extra$percentiles
+    lenperc <- length(percvec)
+    NOS <- NCOL(eta) / c(M1 = 1)
+    jvec <- lenperc * (0:(NOS - 1))
+    ans <- matrix(0, NROW(eta), lenperc * NOS)
+    for (kay in 1:lenperc)
+      ans[, jvec + kay] <-
+        qpois(0.01 * percvec[kay], lambda = mupo)
+
+    rownames(ans) <- rownames(eta)
+    label.cols.y(ans, colnames.y = extra$colnames.y,
+                 NOS = NOS, percentiles = percvec,
+                 one.on.one = FALSE)
   }, list( .link = link, .earg = earg))),
 
   last = eval(substitute(expression({
@@ -1097,7 +1171,7 @@ rinv.gaussian <- function(n, mu, lambda) {
     dpar <- .dispersion
     if (!dpar) {
       temp87 <- (y-mu)^2 *
-          wz / (dtheta.deta(mu, link = .link , earg = .earg )^2)  # w cancel
+        wz / (dtheta.deta(mu, link = .link , earg = .earg )^2)  # w cancel
       if (M > 1 && ! .onedpar ) {
         dpar <- rep_len(NA_real_, M)
         temp87 <- cbind(temp87)
@@ -1114,10 +1188,7 @@ rinv.gaussian <- function(n, mu, lambda) {
     misc$default.dispersion <- 1
     misc$estimated.dispersion <- .estimated.dispersion
 
-    misc$expected <- TRUE
     misc$imethod <- .imethod
-    misc$multipleResponses <- TRUE
-    misc$bred <- .bred
 
 
     misc$link <- rep_len( .link , M)
@@ -1137,21 +1208,27 @@ rinv.gaussian <- function(n, mu, lambda) {
     theta2eta(mu, link = .link , earg = .earg )
   }, list( .link = link, .earg = earg))),
 
-  loglikelihood =
+  loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL,
              summation = TRUE) {
+    mupo <- eta2theta(eta, link = .link , earg = .earg )
     if (residuals) {
-      c(w) * (y / mu - 1)
+      c(w) * (y / mupo - 1)
     } else {
-      ll.elts <- c(w) * dpois(x = y, lambda = mu, log = TRUE)
+      ll.elts <- c(w) * dpois(x = y, lambda = mupo, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  },
+  }, list( .link = link, .earg = earg ))),
   vfamily = "poissonff",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mupo <- eta2theta(eta, link = .link , earg = .earg )
+    okay1 <- all(is.finite(mupo)) && all(0 < mupo)
+    okay1
+  }, list( .link = link, .earg = earg ))),
 
 
 
@@ -1162,7 +1239,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     ftd <- fitted(object)
     rpois(nsim * length(ftd), ftd)
@@ -1171,14 +1248,15 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
   deriv = eval(substitute(expression({
+    mupo <- eta2theta(eta, link = .link , earg = .earg )
     yBRED <- if ( .bred ) {
       Hvector <- hatvaluesbasic(X.vlm = X.vlm.save,
-                                diagWm = c(t(c(w) * mu)))  # Handles M>1
+                                diagWm = c(t(c(w) * mupo)))  # Handles M>1
 
 
-      varY <- mu # Is a matrix if M>1.
-      d1.BRED <-   dtheta.deta(mu, .link , earg = .earg )
-      d2.BRED <- d2theta.deta2(mu, .link , earg = .earg )
+      varY <- mupo  # Is a matrix if M>1.
+      d1.BRED <-   dtheta.deta(mupo, .link , earg = .earg )
+      d2.BRED <- d2theta.deta2(mupo, .link , earg = .earg )
       y + matrix(Hvector, n, M, byrow = TRUE) *
                  varY * d2.BRED / (2 * d1.BRED^2)
     } else {
@@ -1186,10 +1264,10 @@ rinv.gaussian <- function(n, mu, lambda) {
     }
 
 
-    answer <- if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
-      c(w) * (yBRED - mu)
+    answer <- if ( .link == "loge" && (any(mupo < .Machine$double.eps))) {
+      c(w) * (yBRED - mupo)
     } else {
-      lambda <- mu
+      lambda <- mupo
       dl.dlambda <- (yBRED - lambda) / lambda
       dlambda.deta <- dtheta.deta(theta = lambda,
                                   link = .link , earg = .earg )
@@ -1200,8 +1278,8 @@ rinv.gaussian <- function(n, mu, lambda) {
   }), list( .link = link, .earg = earg, .bred = bred))),
 
   weight = eval(substitute(expression({
-    if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
-      tmp600 <- mu
+    if ( .link == "loge" && (any(mupo < .Machine$double.eps))) {
+      tmp600 <- mupo
       tmp600[tmp600 < .Machine$double.eps] <- .Machine$double.eps
       c(w) * tmp600
     } else {
@@ -1211,7 +1289,7 @@ rinv.gaussian <- function(n, mu, lambda) {
       c(w) * dlambda.deta^2 * ned2l.dlambda2
     }
   }), list( .link = link, .earg = earg))))
-}
+}  # poissonff()
 
 
 
@@ -1227,7 +1305,7 @@ rinv.gaussian <- function(n, mu, lambda) {
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
-  dispersion <- 0 # Estimated; this is the only difference with binomialff()
+  dispersion <- 0 # Estimated; this is the only difference w. binomialff()
   ans <- binomialff(link = earg, earg.link = TRUE,
                     dispersion = dispersion,
                     multiple.responses = multiple.responses,
@@ -1239,6 +1317,7 @@ rinv.gaussian <- function(n, mu, lambda) {
          Q1 = 1,
          multipleResponses = .multiple.responses ,
          parameters.names = c("prob"),
+         quasi.type = TRUE,
          zero = .zero )
   }, list( .zero = zero,
            .multiple.responses = multiple.responses )))
@@ -1270,6 +1349,7 @@ rinv.gaussian <- function(n, mu, lambda) {
          Q1 = 1,
          multipleResponses = TRUE,
          parameters.names = c("lambda"),
+         quasi.type = TRUE,
          zero = .zero )
   }, list( .zero = zero )))
 
@@ -1336,7 +1416,7 @@ rinv.gaussian <- function(n, mu, lambda) {
     M <- if (is.matrix(y)) ncol(y) else 1
     dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
     dn2 <- if (length(dn2)) {
-        paste("E[", dn2, "]", sep = "") 
+        paste("E[", dn2, "]", sep = "")
     } else {
         "mu"
     }
@@ -1387,10 +1467,19 @@ rinv.gaussian <- function(n, mu, lambda) {
   }, list( .lmean = lmean, .emean = emean,
            .ldisp = ldisp, .edisp = edisp ))),
   vfamily = "double.exppoisson",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    lambda <- eta2theta(eta[, 1], link = .lmean , earg = .emean )
+    Disper <- eta2theta(eta[, 2], link = .ldisp , earg = .edisp )
+    okay1 <- all(is.finite(lambda)) && all(0 < lambda) &&
+             all(is.finite(Disper)) && all(0 < Disper & Disper < 1)
+    okay1
+  }, list( .lmean = lmean, .emean = emean,
+           .ldisp = ldisp, .edisp = edisp ))),
+
+
   deriv = eval(substitute(expression({
-    lambda <- eta2theta(eta[, 1], link = .lmean, earg = .emean)
-    Disper <- eta2theta(eta[, 2], link = .ldisp,
-                        earg = .edisp)
+    lambda <- eta2theta(eta[, 1], link = .lmean , earg = .emean )
+    Disper <- eta2theta(eta[, 2], link = .ldisp , earg = .edisp )
 
     dl.dlambda <- Disper * (y / lambda - 1)
     dl.dDisper <- y * log(lambda) + y - lambda + 0.5 / Disper
@@ -1499,12 +1588,12 @@ rinv.gaussian <- function(n, mu, lambda) {
             w <- w * nvec
             init.mu <- (0.5 + nvec * y) / (1 + nvec)
         } else
-            stop("for the double.expbinomial family, response 'y' must be a ",
-                 "vector of 0 and 1's\n",
-                     "or a factor (first level = fail, ",
-                     "other levels = success),\n",
-                     "or a 2-column matrix where col 1 is the no. of ",
-                     "successes and col 2 is the no. of failures")
+          stop("for the double.expbinomial family, response 'y' must be",
+               " a vector of 0 and 1's\n",
+               "or a factor (first level = fail, ",
+               "other levels = success),\n",
+               "or a 2-column matrix where col 1 is the no. of ",
+               "successes and col 2 is the no. of failures")
 
     dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
     dn2 <- if (length(dn2)) paste("E[", dn2, "]", sep = "") else "mu"
@@ -1545,7 +1634,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
       temp1 <- y * log(ifelse(y > 0, y, 1))  # y*log(y)
-      temp2 <- (1.0-y) * log1p(ifelse(y < 1, -y, 0))  # (1-y)*log(1-y)
+      temp2 <- (1.0-y)* log1p(ifelse(y < 1, -y, 0))  # (1-y)*log(1-y)
 
 
       ll.elts <-
@@ -1561,6 +1650,14 @@ rinv.gaussian <- function(n, mu, lambda) {
   }, list( .lmean = lmean, .emean = emean,
            .ldisp = ldisp, .edisp = edisp ))),
   vfamily = "double.expbinomial",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    prob   <- eta2theta(eta[, 1], link = .lmean , earg = .emean )
+    Disper <- eta2theta(eta[, 2], link = .ldisp , earg = .edisp )
+    okay1 <- all(is.finite(prob  )) && all(0 < prob   & prob   < 1) &&
+             all(is.finite(Disper)) && all(0 < Disper & Disper < 1)
+    okay1
+  }, list( .lmean = lmean, .emean = emean,
+           .ldisp = ldisp, .edisp = edisp ))),
   deriv = eval(substitute(expression({
     prob   <- eta2theta(eta[, 1], link = .lmean, earg = .emean)
     Disper <- eta2theta(eta[, 2], link = .ldisp, earg = .edisp)
@@ -1570,7 +1667,7 @@ rinv.gaussian <- function(n, mu, lambda) {
     temp3 <- pmax(temp3, .Machine$double.eps * 10000)
 
     dl.dprob <- w * Disper * (y - prob) / temp3
-    dl.dDisper <- 0.5 / Disper + w * (y * log(prob) + 
+    dl.dDisper <- 0.5 / Disper + w * (y * log(prob) +
                  (1-y)*log1p(-prob) - temp1 - temp2)
 
     dprob.deta   <- dtheta.deta(theta = prob,   .lmean, earg = .emean)
@@ -1599,12 +1696,12 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
  augbinomial <- function(link = "logit", multiple.responses = FALSE,
-                        parallel = TRUE) {
+                         parallel = TRUE) {
 
-    if (!is.logical(parallel) ||
-        length(parallel) != 1 ||
-        !parallel)
-      warning("Argument 'parallel' should be assigned 'TRUE' only")
+  if (!is.logical(parallel) ||
+      length(parallel) != 1 ||
+      !parallel)
+    warning("Argument 'parallel' should be assigned 'TRUE' only")
 
   link <- as.list(substitute(link))
   earg <- link2list(link)
@@ -1613,13 +1710,13 @@ rinv.gaussian <- function(n, mu, lambda) {
 
   new("vglmff",
   blurb = if (multiple.responses)
-          c("Augmented multivariate binomial model\n\n", 
+          c("Augmented multivariate binomial model\n\n",
          "Link:     ",
          namesof("mu.1[,j]", link, earg = earg), ", ",
          namesof("mu.2[,j]", link, earg = earg),
          "\n",
          "Variance: mu[,j]*(1-mu[,j])") else
-         c("Augmented binomial model\n\n", 
+         c("Augmented binomial model\n\n",
          "Link:     ",
          namesof("mu.1[,j]", link, earg = earg), ", ",
          namesof("mu.2[,j]", link, earg = earg),
@@ -1627,10 +1724,10 @@ rinv.gaussian <- function(n, mu, lambda) {
          "Variance: mu*(1-mu)"),
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL,
                       summation = TRUE) {
-      Deviance.categorical.data.vgam(mu = cbind(mu, 1-mu), y=cbind(y, 1-y),
-                                     w = w, residuals = residuals,
-                                     eta = eta, extra = extra,
-                                     summation = summation)
+    Deviance.categorical.data.vgam(mu = cbind(mu, 1-mu), y=cbind(y, 1-y),
+                                   w = w, residuals = residuals,
+                                   eta = eta, extra = extra,
+                                   summation = summation)
   },
   infos = eval(substitute(function(...) {
     list(M1 = 2,
@@ -1648,9 +1745,9 @@ rinv.gaussian <- function(n, mu, lambda) {
             stop("response must contain 0's and 1's only")
         dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
         dn2 = if (length(dn2)) {
-            paste("E[", dn2, "]", sep = "") 
+            paste("E[", dn2, "]", sep = "")
         } else {
-            paste("mu", 1:M, sep = "") 
+            paste("mu", 1:M, sep = "")
         }
         predictors.names <-
           c(namesof(if (M > 1) dn2 else
@@ -1704,12 +1801,12 @@ rinv.gaussian <- function(n, mu, lambda) {
               if (!length(mustart) && !length(etastart))
                 mustart = (0.5 + nvec * y) / (1 + nvec)
           } else {
-              stop("for the binomialff family, response 'y' must be a ",
-                   "vector of 0 and 1's\n",
-                   "or a factor (first level = fail, ",
-                                 "other levels = success),\n",
-                   "or a 2-column matrix where col 1 is the no. of ",
-                   "successes and col 2 is the no. of failures")
+            stop("for the binomialff family, response 'y' must be a ",
+                 "vector of 0 and 1's\n",
+                 "or a factor (first level = fail, ",
+                 "other levels = success),\n",
+                 "or a 2-column matrix where col 1 is the no. of ",
+                 "successes and col 2 is the no. of failures")
           }
           predictors.names <-
             c(namesof("mu.1", .link , earg = .earg , short = TRUE),
@@ -1720,8 +1817,7 @@ rinv.gaussian <- function(n, mu, lambda) {
   linkinv = eval(substitute(function(eta, extra = NULL) {
     Mdiv2  =  ncol(eta) / 2
     index1 =  2*(1:Mdiv2) - 1
-    mu =  eta2theta(eta[, index1],
-                    link = .link , earg = .earg )
+    mu <-  eta2theta(eta[, index1], link = .link , earg = .earg )
     mu
   }, list( .link = link, .earg = earg  ))),
   last = eval(substitute(expression({
@@ -1771,6 +1867,13 @@ rinv.gaussian <- function(n, mu, lambda) {
     }
   },
   vfamily = c("augbinomial", "VGAMcategorical"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Mdiv2  =  ncol(eta) / 2
+    index1 =  2*(1:Mdiv2) - 1
+    mu <- eta2theta(eta[, index1], link = .link , earg = .earg )
+    okay1 <- all(is.finite(mu)) && all(0 < mu & mu < 1)
+    okay1
+  }, list( .link = link, .earg = earg))),
   deriv = eval(substitute(expression({
     M1 <- 2
     Mdiv2 <-  M / 2
@@ -1800,16 +1903,16 @@ rinv.gaussian <- function(n, mu, lambda) {
     myderiv
   }), list( .link = link, .earg = earg))),
   weight = eval(substitute(expression({
-      tmp100 <- mu * (1.0 - mu)
+    tmp100 <- mu * (1.0 - mu)
 
-      tmp200 <- if ( .link == "logit") {
-          cbind(w * tmp100)
-        } else {
-          cbind(w * dtheta.deta(mu, link = .link , earg = .earg )^2 / tmp100)
-        }
+    tmp200 <- if ( .link == "logit") {
+      cbind(w * tmp100)
+    } else {
+      cbind(w * dtheta.deta(mu, link = .link , earg = .earg )^2 / tmp100)
+    }
 
-      wk.wt1 <- (Konst1^2) * tmp200 * (1 - mu)
-      wk.wt2 <- (Konst1^2) * tmp200 *      mu
+    wk.wt1 <- (Konst1^2) * tmp200 * (1 - mu)
+    wk.wt2 <- (Konst1^2) * tmp200 *      mu
 
 
 
diff --git a/R/family.loglin.R b/R/family.loglin.R
index a1ac483..c844fd9 100644
--- a/R/family.loglin.R
+++ b/R/family.loglin.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -71,7 +71,7 @@
       mustart[,3] <- weighted.mean(y[,1]*(1-y[,2]), w)
       mustart[,4] <- weighted.mean(y[,1]*y[,2], w)
       if (any(mustart == 0))
-        stop("some combinations of the response not realized") 
+        stop("some combinations of the response not realized")
     }
   }),
   linkinv = function(eta, extra = NULL) {
@@ -94,10 +94,10 @@
     misc$multipleResponses <- TRUE
   }),
   linkfun = function(mu, extra = NULL)  {
-    u0 <-  log(mu[,1]) 
+    u0 <-  log(mu[,1])
     u2 <-  log(mu[,2]) - u0
     u1 <-  log(mu[,3]) - u0
-    u12 <- log(mu[,4]) - u0 - u1 - u2 
+    u12 <- log(mu[,4]) - u0 - u1 - u2
     cbind(u1, u2, u12)
   },
   loglikelihood =
@@ -121,30 +121,39 @@
     }
   },
   vfamily = c("loglinb2"),
+  validparams = function(eta, y, extra = NULL) {
+    u1 <-  eta[, 1]
+    u2 <-  eta[, 2]
+    u12 <- eta[, 3]
+    okay1 <- all(is.finite(u1 )) &&
+             all(is.finite(u2 )) &&
+             all(is.finite(u12))
+    okay1
+  },
   deriv = expression({
-    u1 <-  eta[,1]
-    u2 <-  eta[,2]
-    u12 <- eta[,3]
+    u1 <-  eta[, 1]
+    u2 <-  eta[, 2]
+    u12 <- eta[, 3]
     denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
-    du0.du1 <- -(exp(u1) + exp(u1 + u2 + u12)) / denom 
-    du0.du2 <- -(exp(u2) + exp(u1 + u2 + u12)) / denom 
-    du0.du12 <- -exp(u1 + u2 + u12) / denom 
-    c(w) * cbind(du0.du1  + y[,1], 
+    du0.du1 <- -(exp(u1) + exp(u1 + u2 + u12)) / denom
+    du0.du2 <- -(exp(u2) + exp(u1 + u2 + u12)) / denom
+    du0.du12 <- -exp(u1 + u2 + u12) / denom
+    c(w) * cbind(du0.du1  + y[,1],
                  du0.du2  + y[,2],
-                 du0.du12 + y[,1] * y[,2]) 
+                 du0.du12 + y[,1] * y[,2])
   }),
   weight = expression({
-    d2u0.du1.2 <- -(exp(u1) + exp(u1 + u2 + u12)) * (1+exp(u2)) / denom^2 
-    d2u0.du22 <-  -(exp(u2) + exp(u1 + u2 + u12)) * (1+exp(u1)) / denom^2 
-    d2u0.du122 <- -exp(u1 + u2 + u12) * (1+exp(u1)+exp(u2)) / denom^2 
-    d2u0.du1u2 <- -(exp(u1 + u2 + u12) - exp(u1 + u2)) / denom^2 
-    d2u0.du1u3 <- -(1 + exp(u2)) * exp(u1 + u2 + u12) / denom^2 
-    d2u0.du2u3 <- -(1 + exp(u1)) * exp(u1 + u2 + u12) / denom^2 
-
-    wz <- matrix(NA_real_, n, dimm(M)) 
-    wz[,iam(1,1,M)] <- -d2u0.du1.2 
+    d2u0.du1.2 <- -(exp(u1) + exp(u1 + u2 + u12)) * (1+exp(u2)) / denom^2
+    d2u0.du22 <-  -(exp(u2) + exp(u1 + u2 + u12)) * (1+exp(u1)) / denom^2
+    d2u0.du122 <- -exp(u1 + u2 + u12) * (1+exp(u1)+exp(u2)) / denom^2
+    d2u0.du1u2 <- -(exp(u1 + u2 + u12) - exp(u1 + u2)) / denom^2
+    d2u0.du1u3 <- -(1 + exp(u2)) * exp(u1 + u2 + u12) / denom^2
+    d2u0.du2u3 <- -(1 + exp(u1)) * exp(u1 + u2 + u12) / denom^2
+
+    wz <- matrix(NA_real_, n, dimm(M))
+    wz[,iam(1,1,M)] <- -d2u0.du1.2
     wz[,iam(2,2,M)] <- -d2u0.du22
-    wz[,iam(3,3,M)] <- -d2u0.du122 
+    wz[,iam(3,3,M)] <- -d2u0.du122
     wz[,iam(1,2,M)] <- -d2u0.du1u2
     wz[,iam(1,3,M)] <- -d2u0.du1u3
     wz[,iam(2,3,M)] <- -d2u0.du2u3
@@ -242,7 +251,7 @@
       mustart[,7] <- weighted.mean(y[,1]*y[,2]*(1-y[,3]), w)
       mustart[,8] <- weighted.mean(y[,1]*y[,2]*y[,3], w)
       if (any(mustart == 0))
-        stop("some combinations of the response not realized") 
+        stop("some combinations of the response not realized")
     }
   }),
   linkinv = function(eta, extra = NULL) {
@@ -316,6 +325,10 @@
     }
   },
   vfamily = c("loglinb3"),
+  validparams = function(eta, y, extra = NULL) {
+    okay1 <- all(is.finite(eta))
+    okay1
+  },
   deriv = expression({
     u1  <- eta[, 1]
     u2  <- eta[, 2]
@@ -341,7 +354,7 @@
     A23 <- exp(u2 + u3 + u23) + allterms
 
 
-    c(w) * cbind(-A1/denom + y[,1], 
+    c(w) * cbind(-A1/denom + y[,1],
                  -A2/denom + y[,2],
                  -A3/denom + y[,3],
                  -A12/denom + y[,1]*y[,2],
@@ -354,7 +367,7 @@
     dA3.du1 <- exp(u1 + u3 + u13) + allterms
     dA3.du2 <- exp(u2 + u3 + u23) + allterms
 
-    wz <- matrix(NA_real_, n, dimm(6)) 
+    wz <- matrix(NA_real_, n, dimm(6))
     expu0 <- exp(u0)
 
     wz[,iam(1,1,M)] <- A1 * (1 - expu0 * A1)
@@ -378,7 +391,7 @@
     wz[,iam(3,4,M)] <- (allterms - expu0 * A3 * A12)
     wz[,iam(3,5,M)] <- A13 * (1 - expu0 * A3)
     wz[,iam(3,6,M)] <- A23 * (1 - expu0 * A3)
-    wz <- expu0 * wz 
+    wz <- expu0 * wz
     c(w) * wz
   }))
 }
diff --git a/R/family.math.R b/R/family.math.R
index 26eb6c8..51fc1dc 100644
--- a/R/family.math.R
+++ b/R/family.math.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -171,7 +171,7 @@ expint <- function (x, deriv = 0) {
       stop("Bad input for argument 'deriv'")
     answer <- rep_len(0, length(x))
     if (deriv == 1) {
-      answer <- exp(x) / x  
+      answer <- exp(x) / x
     }
     if (deriv == 2) {
       answer <- exp(x) / x - exp(x) / x^2
@@ -222,7 +222,7 @@ expint.E1 <- function (x, deriv = 0) {
       stop("Bad input for argument 'deriv'")
     answer <- rep_len(0, length(x))
     if (deriv == 1) {
-      answer <- exp(-x) / x  
+      answer <- exp(-x) / x
     }
     if (deriv == 2) {
       answer <- exp(-x) / x + exp(-x) / x^2
@@ -283,6 +283,13 @@ expexpint <- function(x) {
 }
 
 
+if (FALSE)
+pochhammer <- function (x, n) {
+  exp(lgamma(x+n) - lgamma(x))
+}
+
+
+
 
 
 
@@ -308,4 +315,134 @@ expint.E1 <- function(x) {
 
 
 
+ Zeta.aux <- function(shape, qq, shift = 1) {
+
+
+
+  LLL <- max(length(shape), length(qq))
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(qq   ) != LLL) qq    <- rep_len(qq,    LLL)
+
+  if (any(qq < 12-1))
+    warning("all values of argument 'q' should be 12 or more")
+  aa <- qq
+
+
+  B2 <- c(1/6, -1/30, 1/42, -1/30, 5/66, -691/2730, 7/6, -3617/510)
+  kk <- length(B2)  # 8
+  ans <- 1 / ((shape-1) * (shift + aa)^(shape-1)) +
+         0.5 / (shift + aa)^shape
+
+  term <- (shape/2) / (shift + aa)^(shape+1)
+  ans <- ans + term * B2[1]
+
+  for (mm in 2:kk) {
+    term <- term * (shape+2*mm-3) *
+            (shape+2*mm-2) / ((2*mm-1) * 2 * mm * (shift + aa)^2)
+    ans <- ans + term * B2[mm]
+  }
+  ifelse(aa - 1 <= qq, ans, rep(0, length(ans)))  # Handled above
+}
+
+
+
+
+
+
+
+ zeta <- function(x, deriv = 0, shift = 1) {
+
+
+
+
+  deriv.arg <- deriv
+  rm(deriv)
+  if (!is.Numeric(deriv.arg, length.arg = 1,
+                  integer.valued = TRUE))
+    stop("'deriv' must be a single non-negative integer")
+  if (deriv.arg < 0 || deriv.arg > 2)
+    stop("'deriv' must be 0, 1, or 2")
+
+
+  if (deriv.arg > 0)
+    return(Zeta.derivative(x, deriv.arg = deriv.arg, shift = shift))
+
+
+
+  if (any(special <- Re(x) <= 1)) {
+    ans <- x
+    ans[special] <- Inf  # For Re(x) == 1
+
+    special3 <- Re(x) < 1
+    ans[special3] <- NA  # For 0 < Re(x) < 1
+
+    special4 <- (0 < Re(x)) & (Re(x) < 1) & (Im(x) == 0)
+    ans[special4] <-
+      Zeta.derivative(x[special4], deriv.arg = deriv.arg, shift = shift)
+
+
+    special2 <- Re(x) < 0
+    if (any(special2)) {
+      x2 <- x[special2]
+      cx <- 1 - x2
+      ans[special2] <- 2^(x2) * pi^(x2-1) * sin(pi*x2/2) *
+                       gamma(cx) * Recall(cx)
+    }  # special2
+
+    if (any(!special)) {
+      ans[!special] <- Recall(x[!special])
+    }
+    return(ans)
+  }  # special
+
+  aa <- 12
+  ans <- 0
+  for (ii in 0:(aa-1))
+    ans <- ans + 1 / (shift + ii)^x
+
+  ans <- ans + Zeta.aux(shape = x, aa, shift = shift)
+  ans[shift <= 0] <- NaN
+  ans
+}  # zeta
+
+
+
+ Zeta.derivative <- function(x, deriv.arg = 0, shift = 1) {
+
+
+
+  if (!all(shift == 1))
+    stop("currently 'shift' must all be 1")
+
+
+  if (!is.Numeric(deriv.arg, length.arg = 1,
+                  integer.valued = TRUE))
+    stop("'deriv.arg' must be a single non-negative integer")
+  if (deriv.arg < 0 || deriv.arg > 2)
+    stop("'deriv.arg' must be 0, 1, or 2")
+
+  if (any(Im(x) != 0))
+    stop("Sorry, currently can only handle x real, not complex")
+  if (any(x < 0))
+    stop("Sorry, currently cannot handle x < 0")
+
+  ok <- is.finite(x) & x > 0 & x != 1  # Handles NAs
+  ans <- rep_len(NA_real_, length(x))
+  nn <- sum(ok)  # Effective length (excludes x < 0 and x = 1 values)
+  if (nn)
+    ans[ok] <- .C("vzetawr", as.double(x[ok]), ans = double(nn),
+                  as.integer(deriv.arg), as.integer(nn))$ans
+
+
+
+  if (deriv.arg == 0)
+    ans[is.finite(x) & abs(x) < 1.0e-12] <- -0.5
+
+  ans
+}
+
+
+
+
+
 
diff --git a/R/family.mixture.R b/R/family.mixture.R
index fa046d9..e4f44bd 100644
--- a/R/family.mixture.R
+++ b/R/family.mixture.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -78,7 +78,7 @@ mix2normal.control <- function(trace = TRUE, ...) {
   new("vglmff",
   blurb = c("Mixture of two univariate normals\n\n",
             "Links:    ",
-            namesof("phi", lphi, earg = ephi, tag = FALSE), ", ", 
+            namesof("phi", lphi, earg = ephi, tag = FALSE), ", ",
             namesof("mu1",  lmu, earg = emu1, tag = FALSE), ", ",
             namesof("sd1",  lsd, earg = esd1, tag = FALSE), ", ",
             namesof("mu2",  lmu, earg = emu2, tag = FALSE), ", ",
@@ -156,7 +156,7 @@ mix2normal.control <- function(trace = TRUE, ...) {
       if ( .eq.sd ) {
         init.sd1 <-
         init.sd2 <- (init.sd1 + init.sd2) / 2
-        if (!all.equal( .esd1, .esd2 ))
+        if (!identical( .esd1 , .esd2 ))
           stop("'esd1' and 'esd2' must be equal if 'eq.sd = TRUE'")
       }
       etastart <- cbind(
@@ -220,6 +220,22 @@ mix2normal.control <- function(trace = TRUE, ...) {
           .esd1 = esd1, .esd2 = esd2,
           .lsd = lsd ))),
   vfamily = c("mix2normal"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    phi <- eta2theta(eta[, 1], link = .lphi , earg = .ephi )
+    mu1 <- eta2theta(eta[, 2], link = .lmu  , earg = .emu1 )
+    sd1 <- eta2theta(eta[, 3], link = .lsd  , earg = .esd1 )
+    mu2 <- eta2theta(eta[, 4], link = .lmu  , earg = .emu2 )
+    sd2 <- eta2theta(eta[, 5], link = .lsd  , earg = .esd2 )
+    okay1 <- all(is.finite(mu1)) &&
+             all(is.finite(mu2)) &&
+             all(is.finite(sd1)) && all(0 < sd1) &&
+             all(is.finite(sd2)) && all(0 < sd2) &&
+             all(is.finite(phi)) && all(0 < phi & phi < 1)
+    okay1
+  }, list(.lphi = lphi, .lmu = lmu,
+          .ephi = ephi, .emu1 = emu1, .emu2 = emu2,
+          .esd1 = esd1, .esd2 = esd2,
+          .lsd = lsd ))),
   deriv = eval(substitute(expression({
     phi <- eta2theta(eta[, 1], link = .lphi , earg = .ephi )
     mu1 <- eta2theta(eta[, 2], link = .lmu  , earg = .emu1 )
@@ -338,9 +354,9 @@ mix2poisson.control <- function(trace = TRUE, ...) {
   new("vglmff",
   blurb = c("Mixture of two Poisson distributions\n\n",
             "Links:    ",
-            namesof("phi",lphi, earg = ephi), ", ", 
-            namesof("lambda1", llambda, earg = el1, tag = FALSE), ", ",
-            namesof("lambda2", llambda, earg = el2, tag = FALSE), "\n",
+            namesof("phi",     lphi,    earg = ephi, tag = FALSE), ", ",
+            namesof("lambda1", llambda, earg = el1,  tag = FALSE), ", ",
+            namesof("lambda2", llambda, earg = el2,  tag = FALSE), "\n",
             "Mean:     phi*lambda1 + (1 - phi)*lambda2"),
   constraints = eval(substitute(expression({
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
@@ -363,8 +379,7 @@ mix2poisson.control <- function(trace = TRUE, ...) {
   }, list( .zero = zero,
            .nsimEIM = nsimEIM,
            .lphi = lphi,
-           .llambda = llambda
-         ))),
+           .llambda = llambda ))),
 
 
   initialize = eval(substitute(expression({
@@ -396,22 +411,22 @@ mix2poisson.control <- function(trace = TRUE, ...) {
       init.lambda1 <- rep_len(if (length( .il1  )) .il1  else qy[1], n)
       init.lambda2 <- rep_len(if (length( .il2  )) .il2  else qy[2], n)
 
-      if (!length(etastart))  
+      if (!length(etastart))
         etastart <- cbind(theta2eta(init.phi, .lphi , earg = .ephi ),
                           theta2eta(init.lambda1, .llambda , earg = .el1 ),
                           theta2eta(init.lambda2, .llambda , earg = .el2 ))
     }
-  }), list(.lphi = lphi, .llambda = llambda,
-           .ephi = ephi, .el1 = el1, .el2 = el2,
-           .iphi = iphi, .il1 = il1, .il2 = il2,
-           .qmu = qmu))),
+  }), list( .lphi = lphi, .llambda = llambda,
+            .ephi = ephi, .el1 = el1, .el2 = el2,
+            .iphi = iphi, .il1 = il1, .il2 = il2,
+            .qmu = qmu))),
   linkinv = eval(substitute(function(eta, extra = NULL){
     phi     <- eta2theta(eta[, 1], link = .lphi ,    earg = .ephi )
     lambda1 <- eta2theta(eta[, 2], link = .llambda , earg = .el1  )
     lambda2 <- eta2theta(eta[, 3], link = .llambda , earg = .el2  )
     phi * lambda1 + (1 - phi) * lambda2
-  }, list(.lphi = lphi, .llambda = llambda,
-          .ephi = ephi, .el1 = el1, .el2 = el2 ))),
+  }, list( .lphi = lphi, .llambda = llambda,
+           .ephi = ephi, .el1 = el1, .el2 = el2 ))),
   last = eval(substitute(expression({
     misc$link <-
          c("phi" = .lphi , "lambda1" = .llambda , "lambda2" = .llambda )
@@ -422,9 +437,9 @@ mix2poisson.control <- function(trace = TRUE, ...) {
     misc$expected <- TRUE
     misc$nsimEIM <- .nsimEIM
     misc$multipleResponses <- FALSE
-  }), list(.lphi = lphi, .llambda = llambda,
-           .ephi = ephi, .el1 = el1, .el2 = el2,
-           .nsimEIM = nsimEIM ))),
+  }), list( .lphi = lphi, .llambda = llambda,
+            .ephi = ephi, .el1 = el1, .el2 = el2,
+            .nsimEIM = nsimEIM ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
@@ -444,9 +459,19 @@ mix2poisson.control <- function(trace = TRUE, ...) {
         ll.elts
       }
     }
-  }, list(.lphi = lphi, .llambda = llambda,
+  }, list( .lphi = lphi, .llambda = llambda,
            .ephi = ephi, .el1 = el1, .el2 = el2 ))),
   vfamily = c("mix2poisson"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    phi     <- eta2theta(eta[, 1], link = .lphi    , earg = .ephi )
+    lambda1 <- eta2theta(eta[, 2], link = .llambda , earg = .el1  )
+    lambda2 <- eta2theta(eta[, 3], link = .llambda , earg = .el2  )
+    okay1 <- all(is.finite(phi))     && all(0 < phi & phi < 1) &&
+             all(is.finite(lambda1)) && all(0 < lambda1) &&
+             all(is.finite(lambda2)) && all(0 < lambda2)
+    okay1
+  }, list( .lphi = lphi, .llambda = llambda,
+           .ephi = ephi, .el1 = el1, .el2 = el2 ))),
   deriv = eval(substitute(expression({
     phi     <- eta2theta(eta[, 1], link = .lphi    , earg = .ephi )
     lambda1 <- eta2theta(eta[, 2], link = .llambda , earg = .el1  )
@@ -468,9 +493,9 @@ mix2poisson.control <- function(trace = TRUE, ...) {
     c(w) * cbind(dl.dphi * dphi.deta,
                  dl.dlambda1 * dlambda1.deta,
                  dl.dlambda2 * dlambda2.deta)
-  }), list(.lphi = lphi, .llambda = llambda,
-           .ephi = ephi, .el1 = el1, .el2 = el2,
-           .nsimEIM = nsimEIM ))),
+  }), list( .lphi = lphi, .llambda = llambda,
+            .ephi = ephi, .el1 = el1, .el2 = el2,
+            .nsimEIM = nsimEIM ))),
   weight = eval(substitute(expression({
     run.mean <- 0
     for (ii in 1:( .nsimEIM )) {
@@ -524,9 +549,9 @@ mix2poisson.control <- function(trace = TRUE, ...) {
                dtheta.detas[, index0$col]
 
     c(w) * wz
-  }), list(.lphi = lphi, .llambda = llambda,
-           .ephi = ephi, .el1 = el1, .el2 = el2,
-           .nsimEIM = nsimEIM ))))
+  }), list( .lphi = lphi, .llambda = llambda,
+            .ephi = ephi, .el1 = el1, .el2 = el2,
+            .nsimEIM = nsimEIM ))))
 }
 
 
@@ -577,7 +602,7 @@ mix2exp.control <- function(trace = TRUE, ...) {
   new("vglmff",
   blurb = c("Mixture of two univariate exponentials\n\n",
             "Links:    ",
-            namesof("phi",     lphi,    earg = ephi, tag = FALSE), ", ", 
+            namesof("phi",     lphi,    earg = ephi, tag = FALSE), ", ",
             namesof("lambda1", llambda, earg = el1 , tag = FALSE), ", ",
             namesof("lambda2", llambda, earg = el2 , tag = FALSE), "\n",
             "Mean:     phi / lambda1 + (1 - phi) / lambda2\n"),
@@ -633,7 +658,7 @@ mix2exp.control <- function(trace = TRUE, ...) {
       init.phi <-     rep_len(if (length( .iphi )) .iphi else 0.5, n)
       init.lambda1 <- rep_len(if (length( .il1 )) .il1 else 1/qy[1], n)
       init.lambda2 <- rep_len(if (length( .il2 )) .il2 else 1/qy[2], n)
-      if (!length(etastart))  
+      if (!length(etastart))
         etastart <- cbind(theta2eta(init.phi,     .lphi    , earg = .ephi ),
                           theta2eta(init.lambda1, .llambda , earg = .el1  ),
                           theta2eta(init.lambda2, .llambda , earg = .el2  ))
@@ -684,6 +709,16 @@ mix2exp.control <- function(trace = TRUE, ...) {
   }, list(.lphi = lphi, .llambda = llambda,
           .ephi = ephi, .el1 = el1, .el2 = el2 ))),
   vfamily = c("mix2exp"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    phi     <- eta2theta(eta[, 1], link = .lphi    , earg = .ephi )
+    lambda1 <- eta2theta(eta[, 2], link = .llambda , earg = .el1  )
+    lambda2 <- eta2theta(eta[, 3], link = .llambda , earg = .el2  )
+    okay1 <- all(is.finite(phi))     && all(0 < phi & phi < 1) &&
+             all(is.finite(lambda1)) && all(0 < lambda1) &&
+             all(is.finite(lambda2)) && all(0 < lambda2)
+    okay1
+  }, list( .lphi = lphi, .llambda = llambda,
+           .ephi = ephi, .el1 = el1, .el2 = el2 ))),
   deriv = eval(substitute(expression({
     phi     <- eta2theta(eta[, 1], link = .lphi    , earg = .ephi )
     lambda1 <- eta2theta(eta[, 2], link = .llambda , earg = .el1  )
diff --git a/R/family.nbd.R b/R/family.nbd.R
new file mode 100644
index 0000000..25a321c
--- /dev/null
+++ b/R/family.nbd.R
@@ -0,0 +1,1868 @@
+# These functions are
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Init.mu <-
+  function(y, x = cbind("(Intercept)" = rep_len(1, nrow(as.matrix(y)))),
+           w = x, imethod = 1, imu = NULL,
+           ishrinkage = 0.95,
+           pos.only = FALSE,
+           probs.y = 0.35) {
+    if (!is.matrix(x)) x <- as.matrix(x)
+    if (!is.matrix(y)) y <- as.matrix(y)
+    if (!is.matrix(w)) w <- as.matrix(w)
+    if (ncol(w) != ncol(y))
+      w <- matrix(w, nrow = nrow(y), ncol = ncol(y))
+
+    if (length(imu)) {
+      MU.INIT <- matrix(imu, nrow(y), ncol(y), byrow = TRUE)
+      return(MU.INIT)
+    }
+
+
+    if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 || ishrinkage > 1)
+     warning("bad input for argument 'ishrinkage'; ",
+             "using the value 0.95 instead")
+
+
+    if (imethod > 6) {
+      warning("argument 'imethod' should be 1 or 2 or... 6; ",
+              "using the value 1")
+      imethod <- 1
+    }
+    mu.init <- y
+    for (jay in 1:ncol(y)) {
+      TFvec <- if (pos.only) y[, jay] > 0 else TRUE
+      locn.est <- if ( imethod %in% c(1, 4)) {
+        weighted.mean(y[TFvec, jay], w[TFvec, jay]) + 1/16
+      } else if ( imethod %in% c(3, 6)) {
+        c(quantile(y[TFvec, jay], probs = probs.y ) + 1/16)
+      } else {
+        median(y[TFvec, jay]) + 1/16
+      }
+
+      if (imethod <= 3) {
+        mu.init[, jay] <-      ishrinkage   * locn.est +
+                          (1 - ishrinkage ) * y[, jay]
+      } else {
+        medabsres <- median(abs(y[, jay] - locn.est)) + 1/32
+        allowfun <- function(z, maxtol = 1)
+          sign(z) * pmin(abs(z), maxtol)
+        mu.init[, jay] <- locn.est + (1 - ishrinkage ) *
+                          allowfun(y[, jay] - locn.est, maxtol = medabsres)
+
+        mu.init[, jay] <- abs(mu.init[, jay]) + 1 / 1024
+      }
+    }  # of for (jay)
+
+    mu.init
+  }
+
+
+
+
+
+
+
+
+EIM.NB.specialp <-
+  function(mu, size,
+           y.max = NULL,  # Must be an integer
+           cutoff.prob = 0.995,
+           intercept.only = FALSE,
+           extra.bit = TRUE) {
+
+
+  if (intercept.only) {
+    mu <- mu[1]
+    size <- size[1]
+  }
+
+  y.min <- 0  # A fixed constant really
+
+  if (!is.numeric(y.max)) {
+    eff.p <- sort(c(cutoff.prob, 1 - cutoff.prob))
+    y.max <- max(round(qnbinom(p = eff.p[2], mu = mu, size = size) * 1.1)) + 30
+  }
+
+  Y.mat <- if (intercept.only) y.min:y.max else
+           matrix(y.min:y.max, length(mu), y.max-y.min+1, byrow = TRUE)
+  neff.row <- ifelse(intercept.only, 1, nrow(Y.mat))
+  neff.col <- ifelse(intercept.only, length(Y.mat), ncol(Y.mat))
+
+  if (FALSE) {
+  trigg.term <- if (intercept.only) {
+    check2 <-
+     sum(pnbinom(Y.mat, size = size, mu = mu, lower.tail = FALSE)
+         / (Y.mat + size)^2)
+    check2
+  } else {
+  check2 <-
+    rowSums(pnbinom(Y.mat, size = size, mu = mu, lower.tail = FALSE)
+            / (Y.mat + size)^2)
+  check2
+  }
+  }  # FALSE
+
+
+  if (TRUE) {
+    answerC <- .C("eimpnbinomspecialp",
+      as.integer(intercept.only),
+      as.double(neff.row), as.double(neff.col),
+      as.double(size),
+      as.double(pnbinom(Y.mat, size = size, mu = mu, lower.tail = FALSE)),
+      rowsums = double(neff.row))
+    trigg.term <- answerC$rowsums
+  }  # TRUE
+
+  ned2l.dk2 <- trigg.term
+  if (extra.bit)
+    ned2l.dk2 <- ned2l.dk2 - 1 / size + 1 / (size + mu)
+  ned2l.dk2
+}  # EIM.NB.specialp()
+
+
+
+
+
+
+
+EIM.NB.speciald <-
+  function(mu, size,
+           y.min = 0,  # 20160201; must be an integer
+           y.max = NULL,  # Must be an integer
+           cutoff.prob = 0.995,
+           intercept.only = FALSE,
+           extra.bit = TRUE) {
+
+
+
+
+
+  if (intercept.only) {
+    mu <- mu[1]
+    size <- size[1]
+  }
+
+  if (!is.numeric(y.max)) {
+    eff.p <- sort(c(cutoff.prob, 1 - cutoff.prob))
+    y.max <- max(round(qnbinom(p = eff.p[2], mu = mu, size = size) * 1.1)) + 30
+  }
+
+  Y.mat <- if (intercept.only) y.min:y.max else
+           matrix(y.min:y.max, length(mu), y.max-y.min+1, byrow = TRUE)
+  trigg.term <- if (intercept.only) {
+     dnbinom(Y.mat, size = size, mu = mu) %*% trigamma(Y.mat + size)
+  } else {
+     rowSums(dnbinom(Y.mat, size = size, mu = mu) *
+             trigamma(Y.mat + size))
+  }
+  ned2l.dk2 <- trigamma(size) - trigg.term
+  if (extra.bit)
+    ned2l.dk2 <- ned2l.dk2 - 1 / size + 1 / (size + mu)
+  ned2l.dk2
+}  # end of EIM.NB.speciald()
+
+
+
+NBD.Loglikfun2 <- function(munbval, sizeval,
+                           y, x, w, extraargs) {
+  sum(c(w) * dnbinom(x = y, mu = munbval,
+                     size = sizeval, log = TRUE))
+}
+
+
+
+
+
+negbinomial.initialize.yj <-
+  function(yvec, wvec = rep(1, length(yvec)),
+           gprobs.y = ppoints(9),
+           wm.yj = weighted.mean(yvec, w = wvec)) {
+  try.mu <- c(quantile(yvec, probs = gprobs.y) + 1/16,
+              wm.yj)
+  if (median(try.mu) < 1) {
+    y.pos <- yvec[yvec > 0]
+    try.mu <- c(min(try.mu),  # 0.25,
+                wm.yj,
+                summary.default(y.pos)[c(1:3, 5)],
+                quantile(y.pos, probs = gprobs.y) - 1/16)
+
+  }
+  unique(sort(try.mu))
+}
+
+
+
+negbinomial.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
+}
+
+
+
+ negbinomial <-
+  function(
+           zero = "size",
+           parallel = FALSE,
+           deviance.arg = FALSE,
+           type.fitted = c("mean", "quantiles"),
+           percentiles = c(25, 50, 75),
+           mds.min = 1e-3,
+           nsimEIM = 500, cutoff.prob = 0.999,  # Maxiter = 5000,
+           eps.trig = 1e-7,
+           max.support = 4000,
+           max.chunk.MB = 30,  # max.memory = Inf is allowed
+           lmu = "loge", lsize = "loge",
+           imethod = 1,
+           imu = NULL,
+           iprobs.y = NULL,  # 0.35,
+           gprobs.y = ppoints(6),
+           isize = NULL,
+           gsize.mux = exp(c(-30, -20, -15, -10, -6:3))) {
+
+
+
+
+
+
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 2)
+      stop("argument 'imethod' must be 1 or 2")
+
+
+  if (!is.logical( deviance.arg ) || length( deviance.arg ) != 1)
+    stop("argument 'deviance.arg' must be TRUE or FALSE")
+
+
+
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "quantiles"))[1]
+
+
+  lmunb <- as.list(substitute(lmu))
+  emunb <- link2list(lmunb)
+  lmunb <- attr(emunb, "function.name")
+
+  imunb <- imu
+
+  lsize <- as.list(substitute(lsize))
+  esize <- link2list(lsize)
+  lsize <- attr(esize, "function.name")
+
+
+  if (!is.Numeric(eps.trig, length.arg = 1,
+                  positive = TRUE) || eps.trig > 1e-5)
+    stop("argument 'eps.trig' must be positive and smaller in value")
+
+  if (length(imunb) && !is.Numeric(imunb, positive = TRUE))
+    stop("bad input for argument 'imu'")
+  if (length(isize) && !is.Numeric(isize, positive = TRUE))
+    stop("bad input for argument 'isize'")
+
+  if (!is.Numeric(cutoff.prob, length.arg = 1) ||
+    cutoff.prob < 0.95 ||
+    cutoff.prob >= 1)
+    stop("range error in the argument 'cutoff.prob'; ",
+         "a value in [0.95, 1) is needed")
+
+    if (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE))
+      stop("bad input for argument 'nsimEIM'")
+    if (nsimEIM <= 10)
+      warning("argument 'nsimEIM' should be an integer ",
+               "greater than 10, say")
+
+
+    if (is.logical( parallel ) && parallel  && length(zero))
+      stop("need to set 'zero = NULL' when parallel = TRUE")
+
+
+
+  ans <-
+  new("vglmff",
+
+
+  blurb = c("Negative binomial distribution\n\n",
+            "Links:    ",
+            namesof("mu",   lmunb, earg = emunb), ", ",
+            namesof("size", lsize, earg = esize), "\n",
+            "Mean:     mu\n",
+            "Variance: mu * (1 + mu / size) for NB-2"),
+
+  constraints = eval(substitute(expression({
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
+                           constraints = constraints)
+
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+                                predictors.names = predictors.names,
+                                M1 = 2)
+  }), list( .parallel = parallel, .zero = zero ))),
+
+
+
+  infos = eval(substitute(function(...) {
+    list(M1    = 2,
+         Q1    = 1,
+         expected = TRUE,
+         imethod = .imethod ,
+         mds.min = .mds.min ,
+         multipleResponses = TRUE,
+         parameters.names = c("mu", "size"),
+         type.fitted = .type.fitted ,
+         percentiles = .percentiles ,
+         lmu   = .lmunb ,
+         lsize = .lsize ,
+         nsimEIM = .nsimEIM ,
+         eps.trig = .eps.trig ,
+         zero  = .zero ,
+         max.chunk.MB = .max.chunk.MB ,
+         cutoff.prob = .cutoff.prob
+         )
+  }, list( .zero = zero, .lsize = lsize, .lmunb = lmunb,
+           .type.fitted = type.fitted,
+           .percentiles = percentiles ,
+           .eps.trig = eps.trig,
+           .imethod = imethod,
+           .mds.min = mds.min,
+           .cutoff.prob = cutoff.prob,
+           .max.chunk.MB = max.chunk.MB,
+           .nsimEIM = nsimEIM ))),
+
+  initialize = eval(substitute(expression({
+    M1 <- 2
+
+    temp12 <-
+      w.y.check(w = w, y = y,
+                Is.nonnegative.y = TRUE,
+                Is.integer.y = TRUE,
+                ncol.w.max = Inf,
+                ncol.y.max = Inf,
+                out.wy = TRUE,
+                colsyperw = 1, maximize = TRUE)
+    w <- temp12$w
+    y <- temp12$y
+
+
+    assign("CQO.FastAlgorithm",
+          ( .lmunb == "loge") && ( .lsize == "loge"),
+           envir = VGAMenv)
+
+    if (any(function.name == c("cqo", "cao")) &&
+        ((is.Numeric( .zero , length.arg = 1) && .zero != -2) ||
+         (is.character( .zero ) && .zero != "size")))
+        stop("argument zero = 'size' or zero = -2 is required")
+
+
+    extra$type.fitted <- .type.fitted
+    extra$percentiles <- .percentiles
+    extra$colnames.y  <- colnames(y)
+    M <- M1 * ncol(y)
+    NOS <- ncoly <- ncol(y)  # Number of species
+    predictors.names <-
+     c(namesof(param.names("mu",   NOS),
+                .lmunb , earg = .emunb , tag = FALSE),
+       namesof(param.names("size", NOS),
+                .lsize , earg = .esize , tag = FALSE))
+    predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
+
+    gprobs.y <- .gprobs.y
+    imunb <- .imunb  # Default is NULL
+    if (length(imunb))
+      imunb <- matrix(imunb, n, NOS, byrow = TRUE)
+
+    if (!length(etastart)) {
+      munb.init <-
+      size.init <- matrix(NA_real_, n, NOS)
+      if (length( .iprobs.y ))
+        gprobs.y <-  .iprobs.y
+      gsize.mux <- .gsize.mux  # gsize.mux is on a relative scale
+
+      for (jay in 1:NOS) {  # For each response 'y_jay'... do:
+        wm.yj <- weighted.mean(y[, jay], w = w[, jay])
+        munb.init.jay <- if ( .imethod == 1 ) {
+          negbinomial.initialize.yj(y[, jay], w[, jay],
+                                    gprobs.y = gprobs.y,
+                                    wm.yj = wm.yj)
+        } else {
+          wm.yj
+        }
+        if (length(imunb))
+          munb.init.jay <- imunb[, jay]
+
+
+        gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
+                                    wm.yj)
+        if (length( .isize ))
+          gsize <- .isize  # isize is on an absolute scale
+
+
+        try.this <-
+          grid.search2(munb.init.jay, gsize,
+                       objfun = NBD.Loglikfun2,
+                       y = y[, jay], w = w[, jay],
+                       ret.objfun = TRUE)  # Last value is the loglik
+
+        munb.init[, jay] <- try.this["Value1"]
+        size.init[, jay] <- try.this["Value2"]
+      }  # for (jay ...)
+
+
+
+      newemu <- .emunb
+      if ( .lmunb == "nbcanlink") {
+        newemu$size <- size.init
+        testing1 <- log(munb.init / (munb.init + size.init))
+        testing2 <- theta2eta(munb.init, link = .lmunb , earg = newemu )
+      }
+
+
+      etastart <-
+        cbind(theta2eta(munb.init, link = .lmunb , earg = newemu ),
+              theta2eta(size.init, link = .lsize , earg = .esize ))
+      etastart <-
+        etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
+    }
+  }), list( .lmunb = lmunb, .lsize = lsize,
+            .emunb = emunb, .esize = esize,
+            .imunb = imunb,
+            .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
+            .deviance.arg = deviance.arg,
+            .isize = isize, .iprobs.y = iprobs.y,
+            .nsimEIM = nsimEIM,
+            .zero = zero, .imethod = imethod,
+            .type.fitted = type.fitted,
+            .percentiles = percentiles ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
+    kmat <- NULL
+
+    munb <- if ( .lmunb == "nbcanlink") {
+
+      newemu <- .emunb
+      kmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                        .lsize , earg = .esize )
+      newemu$size <- kmat
+      check.munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                .lmunb , earg = newemu )
+
+
+      munb <- kmat / expm1(-eta[, c(TRUE, FALSE), drop = FALSE])
+    } else {
+      eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                .lmunb , earg = .emunb )
+    }
+
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+    type.fitted <- match.arg(type.fitted,
+                     c("mean", "quantiles"))[1]
+    if (type.fitted == "mean") {
+      return(label.cols.y(munb, colnames.y = extra$colnames.y,
+                          NOS = NOS))
+    }
+
+
+    if (is.null(kmat))
+      kmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                        .lsize , earg = .esize )
+    percvec <- extra$percentiles
+    lenperc <- length(percvec)
+    jvec <- lenperc * (0:(NOS - 1))
+    ans <- matrix(0, nrow(eta), lenperc * NOS)
+    for (kay in 1:lenperc)
+      ans[, jvec + kay] <-
+        qnbinom(0.01 * percvec[kay], mu = munb, size = kmat)
+
+    rownames(ans) <- rownames(eta)
+
+
+    label.cols.y(ans, colnames.y = extra$colnames.y,
+                 NOS = NOS, percentiles = percvec,
+                 one.on.one = FALSE)
+  }, list( .lmunb = lmunb, .lsize = lsize,
+           .emunb = emunb, .esize = esize))),
+
+  last = eval(substitute(expression({
+    if (exists("CQO.FastAlgorithm", envir = VGAMenv))
+        rm("CQO.FastAlgorithm", envir = VGAMenv)
+
+
+    if (function.name == "cao")
+      ind2 <- FALSE
+
+
+    save.weights <- control$save.weights <- !all(ind2)
+
+
+    temp0303 <- c(rep_len( .lmunb , NOS),
+                  rep_len( .lsize , NOS))
+    names(temp0303) <- c(param.names("mu",   NOS),
+                         param.names("size", NOS))
+    misc$link <- temp0303[interleave.VGAM(M, M1 = M1)] # Already named
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- names(misc$link)
+    for (ii in 1:NOS) {
+      misc$earg[[M1*ii-1]] <- newemu
+      misc$earg[[M1*ii  ]] <- .esize
+    }
+  }), list( .lmunb = lmunb, .lsize = lsize,
+            .emunb = emunb, .esize = esize ))),
+
+  linkfun = eval(substitute(function(mu, extra = NULL) {
+    M1 <- 2
+
+    newemu <- .emunb
+
+    eta.temp <- theta2eta(mu, .lmunb , earg = newemu)
+    eta.size <- theta2eta(if (is.numeric( .isize )) .isize else 1.0,
+                          .lsize , earg = .esize )
+    eta.size <- 0 * eta.temp + eta.size  # Right dimension now.
+
+
+
+    if ( .lmunb == "nbcanlink") {
+      newemu$size <- eta2theta(eta.size, .lsize , earg = .esize )
+    }
+
+
+
+    eta.temp <- cbind(eta.temp, eta.size)
+    eta.temp[, interleave.VGAM(ncol(eta.temp), M1 = M1), drop = FALSE]
+  }, list( .lmunb = lmunb, .lsize = lsize,
+           .emunb = emunb, .esize = esize,
+                           .isize = isize ))),
+
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE,  eta,
+             extra = NULL, summation = TRUE) {
+    munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                      .lmunb , earg = .emunb )
+    kmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                      .lsize , earg = .esize )
+
+
+
+    newemu <- .emunb
+    if ( .lmunb == "nbcanlink") {
+      newemu$size <- kmat
+    }
+
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dnbinom(x = y, mu = munb, size = kmat, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lmunb = lmunb, .lsize = lsize,
+           .emunb = emunb, .esize = esize))),
+
+  vfamily = c("negbinomial"),
+
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    vecTF <- c(TRUE, FALSE)
+    munb <- cbind(eta2theta(eta[,  vecTF], .lmunb , earg = .emunb ))
+    size <- cbind(eta2theta(eta[, !vecTF], .lsize , earg = .esize ))
+    rnbinom(nsim * length(munb), mu = munb, size = size)
+  }, list( .lmunb = lmunb, .lsize = lsize,
+           .emunb = emunb, .esize = esize ))),
+
+
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                      .lmunb , earg = .emunb )
+    size <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                      .lsize , earg = .esize )
+
+    smallval <- .mds.min  # .munb.div.size
+    okay1 <- all(is.finite(munb)) && all(0 < munb) &&
+             all(is.finite(size)) && all(0 < size)
+
+
+    okay0 <- if ( .lmunb == "nbcanlink") all(eta < 0) else TRUE
+
+
+    overdispersion <- if (okay1) all(smallval < munb / size) else FALSE
+    if (!overdispersion)
+      warning("parameter 'size' has very large values relative ",
+              "to 'mu'; ",
+              "try fitting a quasi-Poisson ",
+              "model instead.")
+    okay1 && overdispersion && okay0
+  }, list( .lmunb = lmunb, .emunb = emunb,
+           .lsize = lsize, .esize = esize,
+           .mds.min = mds.min))),
+
+
+
+  deriv = eval(substitute(expression({
+
+
+
+
+  odd.iter <- 1   # iter %% 2
+  even.iter <- 1  # 1 - odd.iter
+
+  if ( iter == 1 && .deviance.arg ) {
+    if (control$criterion != "coefficients" &&
+        control$half.step)
+      warning("Argument 'criterion' should be 'coefficients' ",
+               "or 'half.step' should be 'FALSE' when ",
+              "'deviance.arg = TRUE'")
+
+
+
+    low.index <- ifelse(names(constraints)[1] == "(Intercept)", 2, 1)
+    if (low.index <= length(constraints))
+    for (iii in low.index:length(constraints)) {
+      conmat <- constraints[[iii]]
+      if (any(conmat[c(FALSE, TRUE), ] != 0))
+        stop("argument 'deviance.arg' should only be TRUE for NB-2 ",
+             "models; ",
+             "non-zero elements detected for the 'size' parameter." )
+    }
+  }
+
+
+
+
+
+
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+    munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                      .lmunb , earg = .emunb )
+    kmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                      .lsize , earg = .esize )
+
+
+    smallval <- .mds.min  # Something like this is needed
+    if (any(big.size <- (munb / kmat < smallval))) {
+      kmat[big.size] <- munb[big.size] / smallval
+    }
+
+
+    newemu <- .emunb
+    if ( .lmunb == "nbcanlink") {
+      newemu$size <- kmat
+    }
+
+
+    dl.dmunb <- y / munb - (1 + y/kmat) / (1 + munb/kmat)
+    dl.dsize <- digamma(y + kmat) - digamma(kmat) -
+                (y - munb) / (munb + kmat) + log1p(-munb / (kmat + munb))
+    if (any(big.size)) {
+      dl.dsize[big.size] <- 1e-8  # A small number
+    }
+
+
+    dsize.deta <- dtheta.deta(kmat, .lsize , earg = .esize )
+
+
+    myderiv <- if ( .lmunb == "nbcanlink") {
+      dmunb.deta1 <- 1 / nbcanlink(munb, size = kmat, wrt.param = 1,
+                                   deriv = 1)
+
+      dsize.deta1 <- 1 / nbcanlink(munb, size = kmat, wrt.param = 2,
+                                   deriv = 1)
+      c(w) * cbind(dl.dmunb * dmunb.deta1 *  odd.iter +
+                   dl.dsize * dsize.deta1 * 1 * even.iter,
+                   dl.dsize * dsize.deta  * even.iter)
+    } else {
+      dmunb.deta <- dtheta.deta(munb, .lmunb , earg = .emunb )
+      c(w) * cbind(dl.dmunb * dmunb.deta,
+                   dl.dsize * dsize.deta)
+    }
+
+
+    myderiv <- myderiv[, interleave.VGAM(M, M1 = M1)]
+    myderiv
+  }), list( .lmunb = lmunb, .lsize = lsize,
+            .emunb = emunb, .esize = esize,
+            .deviance.arg = deviance.arg,
+            .mds.min = mds.min ))),
+
+
+
+  weight = eval(substitute(expression({
+    wz <- matrix(NA_real_, n, M)
+
+
+    max.support <- .max.support
+    max.chunk.MB <- .max.chunk.MB
+
+
+    ind2 <- matrix(FALSE, n, NOS)  # Used for SFS
+    for (jay in 1:NOS) {
+      eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
+      Q.mins <- 0
+      Q.maxs <- round(qnbinom(p = eff.p[2],
+                              mu = munb[, jay],
+                              size = kmat[, jay]) * 1.1) + 30
+
+
+      eps.trig <- .eps.trig
+      Q.MAXS <- if ( .lsize == "loge")
+        pmax(10, ceiling(kmat[, jay] / sqrt(eps.trig))) else Inf
+      Q.maxs <- pmin(Q.maxs, Q.MAXS)
+
+
+
+      ind1 <- if (max.chunk.MB > 0)
+        (Q.maxs - Q.mins < max.support) else FALSE
+      if ((NN <- sum(ind1)) > 0) {
+        Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
+        n.chunks <- if (intercept.only) 1 else
+                    max(1, ceiling( Object.Size / max.chunk.MB))
+        chunk.rows <- ceiling(NN / n.chunks)
+        ind2[, jay] <- ind1  # Save this
+        wind2 <- which(ind1)
+
+
+        upr.ptr <- 0
+        lwr.ptr <- upr.ptr + 1
+        while (lwr.ptr <= NN) {
+          upr.ptr <- min(upr.ptr + chunk.rows, NN)
+          sind2 <- wind2[lwr.ptr:upr.ptr]
+          wz[sind2, M1*jay] <-
+            EIM.NB.specialp(mu          =   munb[sind2, jay],
+                            size        = kmat[sind2, jay],
+                            y.max = max(Q.maxs[sind2]),
+                            cutoff.prob = .cutoff.prob ,
+                            intercept.only = intercept.only)
+
+
+          if (any(eim.kk.TF <- wz[sind2, M1*jay] <= 0)) {
+            ind2[sind2[eim.kk.TF], jay] <- FALSE
+          }
+
+
+          lwr.ptr <- upr.ptr + 1
+        }  # while
+      }  # if
+    }  # end of for (jay in 1:NOS)
+
+
+
+
+
+
+
+
+
+    for (jay in 1:NOS) {
+      run.varcov <- 0
+      ii.TF <- !ind2[, jay]  # Not assigned above
+      if (any(ii.TF)) {
+        kkvec <- kmat[ii.TF, jay]
+        muvec <-   munb[ii.TF, jay]
+        for (ii in 1:( .nsimEIM )) {
+          ysim <- rnbinom(sum(ii.TF), mu = muvec, size = kkvec)
+          dl.dsize <- digamma(ysim + kkvec) - digamma(kkvec) -
+                      (ysim - muvec) / (muvec + kkvec) +
+                      log1p( -muvec / (kkvec + muvec))
+          run.varcov <- run.varcov + dl.dsize^2
+        }  # end of for loop
+
+        run.varcov <- c(run.varcov / .nsimEIM )
+        ned2l.dsize2 <- if (intercept.only)
+          mean(run.varcov) else run.varcov
+
+        wz[ii.TF, M1*jay] <- ned2l.dsize2
+      }
+    }
+
+
+
+    save.weights <- !all(ind2)
+
+
+
+    ned2l.dmunb2 <- 1 / munb - 1 / (munb + kmat)
+    ned2l.dsize2 <- wz[, M1*(1:NOS), drop = FALSE]
+
+
+    if ( .lmunb == "nbcanlink") {
+      wz <- cbind(wz, matrix(0, n, M-1))  # Make it tridiagonal
+
+      wz[,     M1*(1:NOS) - 1] <-
+        (ned2l.dmunb2 * (munb/kmat)^2 * odd.iter +
+         ned2l.dsize2 * even.iter * 1) *
+          (munb + kmat)^2
+
+
+
+      wz[, M + M1*(1:NOS) - 1] <-
+        -(munb + kmat) * ned2l.dsize2 * dsize.deta * even.iter
+    } else {
+      wz[, c(TRUE, FALSE)] <- ned2l.dmunb2 * dmunb.deta^2
+    }
+
+
+    wz[, M1*(1:NOS)] <- wz[, M1*(1:NOS)] * dsize.deta^2
+
+
+    w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
+  }), list( .cutoff.prob = cutoff.prob,
+            .max.support = max.support,
+            .max.chunk.MB = max.chunk.MB,
+            .lmunb = lmunb, .lsize = lsize,
+            .eps.trig = eps.trig,
+            .nsimEIM = nsimEIM ))))
+
+
+
+
+  if (deviance.arg) {
+    ans at deviance <- eval(substitute(
+      function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+               summation = TRUE) {
+
+
+
+
+
+
+    size <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                      .lsize , earg = .esize )
+
+    if (residuals) {
+      stop("this part of the function has not been written yet.")
+    } else {
+      dev.elts <- 2 * c(w) *
+                  (y * log(pmax(1, y) / mu) -
+                  (y + size) * log((y + size) / (mu + size)))
+      if (summation) {
+        sum(dev.elts)
+      } else {
+        dev.elts
+      }
+    }
+  }, list( .lsize = lsize, .esize = esize,
+           .lmunb = lmunb, .emunb = emunb )))
+
+
+
+
+
+  }
+
+
+
+
+
+  ans
+}  # negbinomial()
+
+
+
+
+
+
+
+
+polya.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
+}
+
+
+
+ polya <-
+  function(
+           zero = "size",
+           type.fitted = c("mean", "prob"),
+           mds.min = 1e-3,
+           nsimEIM = 500,  cutoff.prob = 0.999,  # Maxiter = 5000,
+           eps.trig = 1e-7,
+           max.support = 4000,
+           max.chunk.MB = 30,  # max.memory = Inf is allowed
+           lprob = "logit", lsize = "loge",
+           imethod = 1,
+           iprob = NULL,
+           iprobs.y = NULL,
+           gprobs.y = ppoints(6),
+           isize = NULL,
+           gsize.mux = exp(c(-30, -20, -15, -10, -6:3)),
+           imunb = NULL) {
+
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 2)
+      stop("argument 'imethod' must be 1 or 2")
+
+
+  deviance.arg <- FALSE  # 20131212; for now
+
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "prob"))[1]
+
+
+
+  if (length(iprob) && !is.Numeric(iprob, positive = TRUE))
+    stop("bad input for argument 'iprob'")
+  if (length(isize) && !is.Numeric(isize, positive = TRUE))
+    stop("bad input for argument 'isize'")
+
+  if (!is.Numeric(eps.trig, length.arg = 1,
+                  positive = TRUE) || eps.trig > 0.001)
+    stop("argument 'eps.trig' must be positive and smaller in value")
+
+  if (!is.Numeric(nsimEIM, length.arg = 1,
+                  integer.valued = TRUE))
+    stop("bad input for argument 'nsimEIM'")
+  if (nsimEIM <= 10)
+    warning("argument 'nsimEIM' should be an integer ",
+            "greater than 10, say")
+
+
+  lprob <- as.list(substitute(lprob))
+  eprob <- link2list(lprob)
+  lprob <- attr(eprob, "function.name")
+
+  lsize <- as.list(substitute(lsize))
+  esize <- link2list(lsize)
+  lsize <- attr(esize, "function.name")
+
+
+
+  ans <-
+  new("vglmff",
+  blurb = c("Polya (negative-binomial) distribution\n\n",
+            "Links:    ",
+            namesof("prob", lprob, earg = eprob), ", ",
+            namesof("size", lsize, earg = esize), "\n",
+            "Mean:     size * (1 - prob) / prob\n",
+            "Variance: mean / prob"),
+  constraints = eval(substitute(expression({
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+                                predictors.names = predictors.names,
+                                M1 = 2)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         mds.min = .mds.min ,
+         type.fitted  = .type.fitted ,
+         eps.trig = .eps.trig ,
+         parameters.names = c("prob", "size"),
+         zero = .zero)
+  }, list( .zero = zero, .eps.trig = eps.trig,
+           .type.fitted = type.fitted,
+           .mds.min = mds.min))),
+
+  initialize = eval(substitute(expression({
+    M1 <- 2
+    if (any(function.name == c("cqo", "cao")))
+      stop("polya() does not work with cqo() or cao(). ",
+           "Try negbinomial()")
+
+
+    temp12 <- w.y.check(w = w, y = y,
+              Is.integer.y = TRUE,
+              Is.nonnegative = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1, maximize = TRUE)
+    w <- temp12$w
+    y <- temp12$y
+
+
+    M <- M1 * ncol(y)
+    NOS <- ncoly <- ncol(y)  # Number of species
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+
+    predictors.names <-
+      c(namesof(param.names("prob", NOS), .lprob , earg = .eprob ,
+                tag = FALSE),
+        namesof(param.names("size", NOS), .lsize , earg = .esize ,
+                tag = FALSE))
+    predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
+
+    if (is.null( .nsimEIM )) {
+       save.weights <- control$save.weights <- FALSE
+    }
+
+
+    gprobs.y <- .gprobs.y
+    imunb <- .imunb  # Default in NULL
+    if (length(imunb))
+      imunb <- matrix(imunb, n, NOS, byrow = TRUE)
+
+
+
+    if (!length(etastart)) {
+      munb.init <-
+      size.init <- matrix(NA_real_, n, NOS)
+      gprobs.y  <- .gprobs.y
+      if (length( .iprobs.y ))
+        gprobs.y <-  .iprobs.y
+      gsize.mux <- .gsize.mux  # gsize.mux is on a relative scale
+
+      for (jay in 1:NOS) {  # For each response 'y_jay'... do:
+        munb.init.jay <- if ( .imethod == 1 ) {
+          quantile(y[, jay], probs = gprobs.y) + 1/16
+        } else {
+          weighted.mean(y[, jay], w = w[, jay])
+        }
+        if (length(imunb))
+          munb.init.jay <- imunb[, jay]
+
+
+        gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
+                                    weighted.mean(y[, jay], w = w[, jay]))
+        if (length( .isize ))
+          gsize <- .isize  # isize is on an absolute scale
+
+
+        try.this <-
+          grid.search2(munb.init.jay, gsize,
+                       objfun = NBD.Loglikfun2,
+                       y = y[, jay], w = w[, jay],
+                       ret.objfun = TRUE)  # Last value is the loglik
+
+        munb.init[, jay] <- try.this["Value1"]
+        size.init[, jay] <- try.this["Value2"]
+      }  # for (jay ...)
+
+
+
+
+
+
+      prob.init <- if (length( .iprob ))
+                   matrix( .iprob , nrow(y), ncol(y), byrow = TRUE) else
+                   size.init / (size.init + munb.init)
+
+
+      etastart <-
+        cbind(theta2eta(prob.init, .lprob , earg = .eprob),
+              theta2eta(size.init, .lsize , earg = .esize))
+      etastart <-
+        etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
+      }
+  }), list( .lprob = lprob, .lsize = lsize,
+            .eprob = eprob, .esize = esize,
+            .iprob = iprob, .isize = isize,
+            .pinit = iprob,
+            .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
+            .iprobs.y = iprobs.y,
+            .nsimEIM = nsimEIM, .zero = zero,
+            .imethod = imethod , .imunb = imunb,
+            .type.fitted = type.fitted ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
+    pmat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                      .lprob , earg = .eprob )
+    kmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                      .lsize , earg = .esize )
+
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                     c("mean", "prob"))[1]
+
+    ans <- switch(type.fitted,
+                  "mean"      = kmat * (1 - pmat) / pmat,
+                  "prob"      = pmat)
+
+
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
+  }, list( .lprob = lprob, .eprob = eprob,
+           .lsize = lsize, .esize = esize))),
+  last = eval(substitute(expression({
+    temp0303 <- c(rep_len( .lprob , NOS),
+                  rep_len( .lsize , NOS))
+    names(temp0303) <- c(param.names("prob", NOS),
+                         param.names("size", NOS))
+    temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
+    misc$link <- temp0303  # Already named
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- names(misc$link)
+    for (ii in 1:NOS) {
+      misc$earg[[M1*ii-1]] <- .eprob
+      misc$earg[[M1*ii  ]] <- .esize
+    }
+
+    misc$isize <- .isize
+    misc$imethod <- .imethod
+    misc$nsimEIM <- .nsimEIM
+  }), list( .lprob = lprob, .lsize = lsize,
+            .eprob = eprob, .esize = esize,
+            .isize = isize,
+            .nsimEIM = nsimEIM,
+            .imethod = imethod ))),
+
+
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    pmat  <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                       .lprob , earg = .eprob)
+    temp300 <-         eta[, c(FALSE, TRUE), drop = FALSE]
+    if ( .lsize == "loge") {
+      bigval <- 68
+      temp300[temp300 >  bigval] <-  bigval
+      temp300[temp300 < -bigval] <- -bigval
+    }
+    kmat <- eta2theta(temp300, .lsize , earg = .esize )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dnbinom(y, prob = pmat, size = kmat, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lsize = lsize, .lprob = lprob,
+           .esize = esize, .eprob = eprob ))),
+  vfamily = c("polya"),
+
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    pmat <- eta2theta(eta[, c(TRUE, FALSE)], .lprob , .eprob )
+    kmat <- eta2theta(eta[, c(FALSE, TRUE)], .lsize , .esize )
+    rnbinom(nsim * length(pmat), prob = pmat, size = kmat)
+  }, list( .lprob = lprob, .lsize = lsize,
+           .eprob = eprob, .esize = esize ))),
+
+
+
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    pmat <- eta2theta(eta[, c(TRUE, FALSE)], .lprob , .eprob )
+    size <- eta2theta(eta[, c(FALSE, TRUE)], .lsize , .esize )
+    munb <- size * (1 / pmat - 1)
+
+    smallval <- .mds.min  # .munb.div.size
+    okay1 <- all(is.finite(munb)) && all(0 < munb) &&
+             all(is.finite(size)) && all(0 < size) &&
+             all(is.finite(pmat)) && all(0 < pmat & pmat < 1)
+    overdispersion <- if (okay1) all(munb / size > smallval) else FALSE
+    if (!overdispersion)
+      warning("parameter 'size' has very large values; ",
+              "try fitting a quasi-Poisson ",
+              "model instead.")
+    okay1 && overdispersion
+  }, list( .lprob = lprob, .eprob = eprob,
+           .lsize = lsize, .esize = esize,
+           .mds.min = mds.min))),
+
+
+  deriv = eval(substitute(expression({
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+
+    pmat  <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                       .lprob , earg = .eprob )
+    temp3 <-           eta[, c(FALSE, TRUE), drop = FALSE]
+    if ( .lsize == "loge") {
+      bigval <- 68
+      temp3[temp3 >  bigval] <-  bigval  # pmin() collapses matrices
+      temp3[temp3 < -bigval] <- -bigval
+     }
+    kmat <- as.matrix(eta2theta(temp3, .lsize , earg = .esize ))
+
+    dl.dprob <- kmat / pmat - y / (1.0 - pmat)
+    dl.dkayy <- digamma(y + kmat) - digamma(kmat) + log(pmat)
+
+    dprob.deta <- dtheta.deta(pmat, .lprob , earg = .eprob )
+    dkayy.deta <- dtheta.deta(kmat, .lsize , earg = .esize )
+
+    myderiv <- c(w) * cbind(dl.dprob * dprob.deta,
+                            dl.dkayy * dkayy.deta)
+    myderiv[, interleave.VGAM(M, M1 = M1)]
+  }), list( .lprob = lprob, .lsize = lsize,
+            .eprob = eprob, .esize = esize))),
+  weight = eval(substitute(expression({
+    wz <- matrix(0, n, M + M - 1)  # wz is 'tridiagonal'
+
+
+
+
+    max.support <- .max.support
+    max.chunk.MB <- .max.chunk.MB
+
+
+    ind2 <- matrix(FALSE, n, NOS)  # Used for SFS
+    for (jay in 1:NOS) {
+      eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
+      Q.mins <- 0
+      Q.maxs <- round(qnbinom(p = eff.p[2],
+                              mu = mu[, jay],
+                              size = kmat[, jay]) * 1.1) + 30
+
+
+
+      eps.trig <- .eps.trig
+      Q.MAXS <-      pmax(10, ceiling(1 / sqrt(eps.trig)))
+      Q.maxs <- pmin(Q.maxs, Q.MAXS)
+
+
+      ind1 <- if (max.chunk.MB > 0)
+        (Q.maxs - Q.mins < max.support) else FALSE
+      if ((NN <- sum(ind1)) > 0) {
+        Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
+        n.chunks <- if (intercept.only) 1 else
+                    max(1, ceiling( Object.Size / max.chunk.MB))
+        chunk.rows <- ceiling(NN / n.chunks)
+        ind2[, jay] <- ind1  # Save this
+        wind2 <- which(ind1)
+
+
+        upr.ptr <- 0
+        lwr.ptr <- upr.ptr + 1
+        while (lwr.ptr <= NN) {
+          upr.ptr <- min(upr.ptr + chunk.rows, NN)
+          sind2 <- wind2[lwr.ptr:upr.ptr]
+          wz[sind2, M1*jay] <-
+            EIM.NB.specialp(mu          =   mu[sind2, jay],
+                            size        = kmat[sind2, jay],
+                            y.max = max(Q.maxs[sind2]),
+                            cutoff.prob = .cutoff.prob ,
+                            intercept.only = intercept.only,
+                            extra.bit = FALSE)
+          lwr.ptr <- upr.ptr + 1
+        }  # while
+      }  # if
+    }  # end of for (jay in 1:NOS)
+
+
+
+
+
+
+
+
+
+    for (jay in 1:NOS) {
+      run.varcov <- 0
+      ii.TF <- !ind2[, jay]  # Not assigned above
+      if (any(ii.TF)) {
+        ppvec <- pmat[ii.TF, jay]
+        kkvec <- kmat[ii.TF, jay]
+        muvec <-   mu[ii.TF, jay]
+        for (ii in 1:( .nsimEIM )) {
+          ysim <- rnbinom(sum(ii.TF), mu = muvec, size = kkvec)
+          dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) + log(ppvec)
+          run.varcov <- run.varcov + dl.dk^2
+        }  # end of for loop
+
+        run.varcov <- c(run.varcov / .nsimEIM )
+        ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
+
+        wz[ii.TF, M1*jay] <- ned2l.dk2  # * (dk.deta2[ii.TF, jay])^2
+      }
+    }
+
+
+    wz[,     M1*(1:NOS)    ] <- wz[,      M1 * (1:NOS)] * dkayy.deta^2
+
+
+    save.weights <- !all(ind2)
+
+
+    ned2l.dprob2 <- kmat / ((1 - pmat) * pmat^2)
+    wz[,     M1*(1:NOS) - 1] <- ned2l.dprob2 * dprob.deta^2
+
+    ned2l.dkayyprob <- -1 / pmat
+    wz[, M + M1*(1:NOS) - 1] <- ned2l.dkayyprob * dkayy.deta * dprob.deta
+
+
+
+
+    w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
+  }), list( .cutoff.prob = cutoff.prob, .eps.trig = eps.trig,
+            .max.support = max.support,
+            .max.chunk.MB = max.chunk.MB,
+            .nsimEIM = nsimEIM ))))
+
+
+
+
+  if (deviance.arg)
+  ans at deviance <- eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+             summation = TRUE) {
+    temp300 <-  eta[, c(FALSE, TRUE), drop = FALSE]
+
+
+    if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
+      stop("cannot handle matrix 'w' yet")
+
+
+
+    if ( .lsize == "loge") {
+      bigval <- 68
+      temp300[temp300 >  bigval] <-  bigval
+      temp300[temp300 < -bigval] <- -bigval
+    } else {
+      stop("can only handle the 'loge' link")
+    }
+    kayy <-  eta2theta(temp300, .lsize , earg = .esize)
+    devi <- 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
+                (y + kayy) * log((mu + kayy) / (kayy + y)))
+    if (residuals) {
+      sign(y - mu) * sqrt(abs(devi) * w)
+    } else {
+      dev.elts <- sum(c(w) * devi)
+      if (summation) {
+        sum(dev.elts)
+      } else {
+        dev.elts
+      }
+    }
+  }, list( .lsize = lsize, .eprob = eprob,
+           .esize = esize )))
+
+  ans
+}  # End of polya()
+
+
+
+
+
+
+
+
+
+
+
+polyaR.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
+}
+
+
+
+ polyaR <-
+  function(
+           zero = "size",
+           type.fitted = c("mean", "prob"),
+           mds.min = 1e-3,
+           nsimEIM = 500,  cutoff.prob = 0.999,  # Maxiter = 5000,
+           eps.trig = 1e-7,
+           max.support = 4000,
+           max.chunk.MB = 30,  # max.memory = Inf is allowed
+           lsize = "loge", lprob = "logit",
+           imethod = 1,
+           iprob = NULL,
+           iprobs.y = NULL,
+           gprobs.y = ppoints(6),
+           isize = NULL,
+           gsize.mux = exp(c(-30, -20, -15, -10, -6:3)),
+           imunb = NULL) {
+
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 2)
+      stop("argument 'imethod' must be 1 or 2")
+
+
+  deviance.arg <- FALSE  # 20131212; for now
+
+
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "prob"))[1]
+
+
+  if (!is.Numeric(eps.trig, length.arg = 1,
+                  positive = TRUE) || eps.trig > 0.001)
+    stop("argument 'eps.trig' must be positive and smaller in value")
+
+
+  if (length(iprob) && !is.Numeric(iprob, positive = TRUE))
+    stop("bad input for argument 'iprob'")
+  if (length(isize) && !is.Numeric(isize, positive = TRUE))
+    stop("bad input for argument 'isize'")
+
+  if (!is.Numeric(nsimEIM, length.arg = 1,
+                  integer.valued = TRUE))
+    stop("bad input for argument 'nsimEIM'")
+  if (nsimEIM <= 10)
+    warning("argument 'nsimEIM' should be an integer ",
+            "greater than 10, say")
+
+
+  lprob <- as.list(substitute(lprob))
+  eprob <- link2list(lprob)
+  lprob <- attr(eprob, "function.name")
+
+  lsize <- as.list(substitute(lsize))
+  esize <- link2list(lsize)
+  lsize <- attr(esize, "function.name")
+
+
+
+  ans <-
+  new("vglmff",
+  blurb = c("Polya (negative-binomial) distribution\n\n",
+            "Links:    ",
+            namesof("size", lsize, earg = esize), ", ",
+            namesof("prob", lprob, earg = eprob), "\n",
+            "Mean:     size * (1 - prob) / prob\n",
+            "Variance: mean / prob"),
+  constraints = eval(substitute(expression({
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+                                predictors.names = predictors.names,
+                                M1 = 2)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         mds.min = .mds.min ,
+         multipleResponses = TRUE,
+         type.fitted  = .type.fitted ,
+         parameters.names = c("size", "prob"),
+         eps.trig = .eps.trig ,
+         zero = .zero )
+  }, list( .zero = zero, .eps.trig = eps.trig,
+           .type.fitted = type.fitted,
+           .mds.min = mds.min))),
+
+  initialize = eval(substitute(expression({
+    M1 <- 2
+    if (any(function.name == c("cqo", "cao")))
+      stop("polyaR() does not work with cqo() or cao(). ",
+           "Try negbinomial()")
+
+
+    temp12 <- w.y.check(w = w, y = y,
+              Is.integer.y = TRUE,
+              Is.nonnegative = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1, maximize = TRUE)
+    w <- temp12$w
+    y <- temp12$y
+
+
+    M <- M1 * ncol(y)
+    NOS <- ncoly <- ncol(y)  # Number of species
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+
+    predictors.names <-
+      c(namesof(param.names("size", NOS),
+                .lsize , earg = .esize , tag = FALSE),
+        namesof(param.names("prob", NOS),
+                .lprob , earg = .eprob , tag = FALSE))
+    predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
+
+    if (is.null( .nsimEIM )) {
+       save.weights <- control$save.weights <- FALSE
+    }
+
+
+
+    gprobs.y <- .gprobs.y
+    imunb <- .imunb  # Default in NULL
+    if (length(imunb))
+      imunb <- matrix(imunb, n, NOS, byrow = TRUE)
+
+
+
+    if (!length(etastart)) {
+      munb.init <-
+      size.init <- matrix(NA_real_, n, NOS)
+      gprobs.y  <- .gprobs.y
+      if (length( .iprobs.y ))
+        gprobs.y <-  .iprobs.y
+      gsize.mux <- .gsize.mux  # gsize.mux is on a relative scale
+
+      for (jay in 1:NOS) {  # For each response 'y_jay'... do:
+        munb.init.jay <- if ( .imethod == 1 ) {
+          quantile(y[, jay], probs = gprobs.y) + 1/16
+        } else {
+          weighted.mean(y[, jay], w = w[, jay])
+        }
+        if (length(imunb))
+          munb.init.jay <- imunb[, jay]
+
+
+        gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
+                                    weighted.mean(y[, jay], w = w[, jay]))
+        if (length( .isize ))
+          gsize <- .isize  # isize is on an absolute scale
+
+
+        try.this <-
+          grid.search2(munb.init.jay, gsize,
+                       objfun = NBD.Loglikfun2,
+                       y = y[, jay], w = w[, jay],
+                       ret.objfun = TRUE)  # Last value is the loglik
+
+        munb.init[, jay] <- try.this["Value1"]
+        size.init[, jay] <- try.this["Value2"]
+      }  # for (jay ...)
+
+
+
+
+
+
+      prob.init <- if (length( .iprob ))
+                   matrix( .iprob , nrow(y), ncol(y), byrow = TRUE) else
+                   size.init / (size.init + munb.init)
+
+
+      etastart <-
+        cbind(theta2eta(size.init, .lsize , earg = .esize ),
+              theta2eta(prob.init, .lprob , earg = .eprob ))
+      etastart <-
+        etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
+      }
+  }), list( .lprob = lprob, .lsize = lsize,
+            .eprob = eprob, .esize = esize,
+            .iprob = iprob, .isize = isize,
+            .pinit = iprob,
+            .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
+            .iprobs.y = iprobs.y,
+            .nsimEIM = nsimEIM, .zero = zero,
+            .imethod = imethod , .imunb = imunb,
+            .type.fitted = type.fitted ))),
+
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
+    kmat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                      .lsize , earg = .esize )
+    pmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                      .lprob , earg = .eprob )
+
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                     c("mean", "prob"))[1]
+
+    ans <- switch(type.fitted,
+                  "mean"      = kmat * (1 - pmat) / pmat,
+                  "prob"      = pmat)
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
+  }, list( .lprob = lprob, .eprob = eprob,
+           .lsize = lsize, .esize = esize))),
+  last = eval(substitute(expression({
+    temp0303 <- c(rep_len( .lprob , NOS),
+                  rep_len( .lsize , NOS))
+    names(temp0303) <- c(param.names("size", NOS),
+                         param.names("prob", NOS))
+    temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
+    misc$link <- temp0303  # Already named
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- names(misc$link)
+    for (ii in 1:NOS) {
+      misc$earg[[M1*ii-1]] <- .esize
+      misc$earg[[M1*ii  ]] <- .eprob
+    }
+
+    misc$isize <- .isize
+    misc$imethod <- .imethod
+    misc$nsimEIM <- .nsimEIM
+  }), list( .lprob = lprob, .lsize = lsize,
+            .eprob = eprob, .esize = esize,
+            .isize = isize,
+            .nsimEIM = nsimEIM,
+            .imethod = imethod ))),
+
+
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    pmat  <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                       .lprob , earg = .eprob)
+    temp300 <-         eta[, c(TRUE, FALSE), drop = FALSE]
+    if ( .lsize == "loge") {
+      bigval <- 68
+      temp300[temp300 >  bigval] <-  bigval
+      temp300[temp300 < -bigval] <- -bigval
+    }
+    kmat <- eta2theta(temp300, .lsize , earg = .esize )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dnbinom(y, prob = pmat, size = kmat, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lsize = lsize, .lprob = lprob,
+           .esize = esize, .eprob = eprob ))),
+  vfamily = c("polyaR"),
+
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    kmat <- eta2theta(eta[, c(TRUE, FALSE)], .lsize , .esize )
+    pmat <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , .eprob )
+    rnbinom(nsim * length(pmat), prob = pmat, size = kmat)
+  }, list( .lprob = lprob, .lsize = lsize,
+           .eprob = eprob, .esize = esize ))),
+
+
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    size <- eta2theta(eta[, c(TRUE, FALSE)], .lsize , .esize )
+    pmat <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , .eprob )
+    munb <- size * (1 / pmat - 1)
+
+    smallval <- .mds.min  # .munb.div.size
+    overdispersion <- all(munb / size > smallval)
+    ans <- all(is.finite(munb)) && all(0 < munb) &&
+           all(is.finite(size)) && all(0 < size) &&
+           all(is.finite(pmat)) && all(0 < pmat & pmat < 1) &&
+           overdispersion
+    if (!overdispersion)
+      warning("parameter 'size' has very large values; ",
+              "try fitting a quasi-Poisson ",
+              "model instead.")
+    ans
+  }, list( .lprob = lprob, .eprob = eprob,
+           .lsize = lsize, .esize = esize,
+           .mds.min = mds.min))),
+
+
+  deriv = eval(substitute(expression({
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+
+    pmat  <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                       .lprob , earg = .eprob)
+    temp3 <-           eta[, c(TRUE, FALSE), drop = FALSE]
+    if ( .lsize == "loge") {
+      bigval <- 68
+      temp3[temp3 >  bigval] <-  bigval  # pmin() collapses matrices
+      temp3[temp3 < -bigval] <- -bigval
+     }
+    kmat <- as.matrix(eta2theta(temp3, .lsize , earg = .esize ))
+
+    dl.dprob <- kmat / pmat - y / (1.0 - pmat)
+    dl.dkayy <- digamma(y + kmat) - digamma(kmat) + log(pmat)
+
+    dprob.deta <- dtheta.deta(pmat, .lprob , earg = .eprob)
+    dkayy.deta <- dtheta.deta(kmat, .lsize , earg = .esize)
+
+    myderiv <- c(w) * cbind(dl.dkayy * dkayy.deta,
+                            dl.dprob * dprob.deta)
+    myderiv[, interleave.VGAM(M, M1 = M1)]
+  }), list( .lprob = lprob, .lsize = lsize,
+            .eprob = eprob, .esize = esize))),
+  weight = eval(substitute(expression({
+    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal'
+
+
+
+
+    max.support <- .max.support
+    max.chunk.MB <- .max.chunk.MB
+
+
+    ind2 <- matrix(FALSE, n, NOS)  # Used for SFS
+    for (jay in 1:NOS) {
+      eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
+      Q.mins <- 0
+      Q.maxs <- round(qnbinom(p = eff.p[2],
+                              mu = mu[, jay],
+                              size = kmat[, jay]) * 1.1) + 30
+
+
+
+      eps.trig <- .eps.trig
+      Q.MAXS <-      pmax(10, ceiling(1 / sqrt(eps.trig) - kmat[, jay]))
+      Q.maxs <- pmin(Q.maxs, Q.MAXS)
+
+
+
+      ind1 <- if (max.chunk.MB > 0)
+        (Q.maxs - Q.mins < max.support) else FALSE
+      if ((NN <- sum(ind1)) > 0) {
+        Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
+        n.chunks <- if (intercept.only) 1 else
+                    max(1, ceiling( Object.Size / max.chunk.MB))
+        chunk.rows <- ceiling(NN / n.chunks)
+        ind2[, jay] <- ind1  # Save this
+        wind2 <- which(ind1)
+
+
+        upr.ptr <- 0
+        lwr.ptr <- upr.ptr + 1
+        while (lwr.ptr <= NN) {
+          upr.ptr <- min(upr.ptr + chunk.rows, NN)
+          sind2 <- wind2[lwr.ptr:upr.ptr]
+          wz[sind2, M1*jay - 1] <-
+            EIM.NB.specialp(mu          =   mu[sind2, jay],
+                            size        = kmat[sind2, jay],
+                            y.max = max(Q.maxs[sind2]),
+                            cutoff.prob = .cutoff.prob ,
+                            intercept.only = intercept.only,
+                            extra.bit = FALSE)
+          lwr.ptr <- upr.ptr + 1
+        }  # while
+      }  # if
+    }  # end of for (jay in 1:NOS)
+
+
+
+
+
+
+
+
+
+    for (jay in 1:NOS) {
+      run.varcov <- 0
+      ii.TF <- !ind2[, jay]  # Not assigned above
+      if (any(ii.TF)) {
+        ppvec <- pmat[ii.TF, jay]
+        kkvec <- kmat[ii.TF, jay]
+        muvec <-   mu[ii.TF, jay]
+        for (ii in 1:( .nsimEIM )) {
+          ysim <- rnbinom(sum(ii.TF), mu = muvec, size = kkvec)
+          dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) + log(ppvec)
+          run.varcov <- run.varcov + dl.dk^2
+        }  # end of for loop
+
+        run.varcov <- c(run.varcov / .nsimEIM )
+        ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
+
+        wz[ii.TF, M1*jay - 1] <- ned2l.dk2  # * (dk.deta2[ii.TF, jay])^2
+      }
+    }
+
+
+    wz[, M1*(1:NOS) - 1] <- wz[, M1*(1:NOS) - 1] * dkayy.deta^2
+
+
+    save.weights <- !all(ind2)
+
+
+    ned2l.dprob2 <- kmat / ((1 - pmat) * pmat^2)
+    wz[,     M1*(1:NOS)    ] <- ned2l.dprob2 * dprob.deta^2
+
+    ned2l.dkayyprob <- -1 / pmat
+    wz[, M + M1*(1:NOS) - 1] <- ned2l.dkayyprob * dkayy.deta * dprob.deta
+
+
+
+
+    w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
+  }), list( .cutoff.prob = cutoff.prob, .eps.trig = eps.trig,
+            .max.support = max.support,
+            .max.chunk.MB = max.chunk.MB,
+            .nsimEIM = nsimEIM ))))
+
+
+
+
+  if (deviance.arg)
+  ans at deviance <- eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+             summation = TRUE) {
+    temp300 <-  eta[, c(FALSE, TRUE), drop = FALSE]
+
+
+    if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
+      stop("cannot handle matrix 'w' yet")
+
+
+
+    if ( .lsize == "loge") {
+      bigval <- 68
+      temp300[temp300 >  bigval] <-  bigval
+      temp300[temp300 < -bigval] <- -bigval
+    } else {
+      stop("can only handle the 'loge' link")
+    }
+    kayy <-  eta2theta(temp300, .lsize , earg = .esize)
+    devi <- 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
+                (y + kayy) * log((mu + kayy) / (kayy + y)))
+    if (residuals) {
+      sign(y - mu) * sqrt(abs(devi) * w)
+    } else {
+      dev.elts <- sum(c(w) * devi)
+      if (summation) {
+        sum(dev.elts)
+      } else {
+        dev.elts
+      }
+    }
+  }, list( .lsize = lsize, .eprob = eprob,
+           .esize = esize )))
+
+  ans
+}  # End of polyaR()
+
+
+
+
+
+
+
+
+
diff --git a/R/family.nonlinear.R b/R/family.nonlinear.R
index 377cb6f..fe2d8ae 100644
--- a/R/family.nonlinear.R
+++ b/R/family.nonlinear.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -238,7 +238,7 @@ micmen.control <- function(save.weights = TRUE, ...) {
     fit$df.residual <- n - rank   # Not nrow.X.vlm - rank
     fit$df.total <- n             # Not nrow.X.vlm
 
-    extra$Xm2 <- NULL             # Regressor is in control$regressor 
+    extra$Xm2 <- NULL             # Regressor is in control$regressor
     dpar <- .dispersion
     if (!dpar) {
       dpar <- sum(c(w) * (y - mu)^2) / (n - ncol.X.vlm)
@@ -267,6 +267,17 @@ micmen.control <- function(save.weights = TRUE, ...) {
   summary.dispersion = FALSE,
 
   vfamily = c("micmen", "vnonlinear"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    theta1 <- eta2theta(eta[, 1], .link1 , earg = .earg1 )
+    theta2 <- eta2theta(eta[, 2], .link2 , earg = .earg2 )
+    okay1 <- all(is.finite(theta1)) &&
+             all(is.finite(theta2))
+    okay1
+  }, list( .link1 = link1, .earg1 = earg1,
+           .link2 = link2, .earg2 = earg2,
+           .firstDeriv = firstDeriv,
+           .rpar = rpar, .divisor = divisor ))),
+
 
   deriv = eval(substitute(expression({
     theta1 <- eta2theta(eta[, 1], .link1 , earg = .earg1 )
@@ -336,7 +347,7 @@ micmen.control <- function(save.weights = TRUE, ...) {
       } else {
         wz <- cbind(( dmus.dthetas[, 1] * dthetas.detas[, 1])^2,
                     ( dmus.dthetas[, 2] * dthetas.detas[, 2])^2 + rpar,
-                      dmus.dthetas[, 1] *  dmus.dthetas[, 2] * 
+                      dmus.dthetas[, 1] *  dmus.dthetas[, 2] *
                      dthetas.detas[, 1] * dthetas.detas[, 2])
       }
     } else {
@@ -598,6 +609,16 @@ skira.control <- function(save.weights = TRUE, ...) {
             .estimated.dispersion = estimated.dispersion ))),
   summary.dispersion = FALSE,
   vfamily = c("skira", "vnonlinear"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    theta1 <- eta2theta(eta[, 1], .link1 , earg = .earg1 )
+    theta2 <- eta2theta(eta[, 2], .link2 , earg = .earg2 )
+    okay1 <- all(is.finite(theta1)) &&
+             all(is.finite(theta2))
+    okay1
+  }, list( .link1 = link1, .earg1 = earg1,
+           .link2 = link2, .earg2 = earg2,
+           .firstDeriv = firstDeriv,
+           .rpar = rpar, .divisor = divisor ))),
   deriv = eval(substitute(expression({
     rpar <- if ( .firstDeriv == "rpar") {
       if (iter > 1) {
diff --git a/R/family.normal.R b/R/family.normal.R
index 438e923..adc4f0e 100644
--- a/R/family.normal.R
+++ b/R/family.normal.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -55,7 +55,7 @@ VGAM.weights.function <- function(w, M, n) {
 
   constraints = eval(substitute(expression({
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
-                           bool = .parallel , 
+                           bool = .parallel ,
                            constraints = constraints)
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
                                 predictors.names = predictors.names,
@@ -68,7 +68,7 @@ VGAM.weights.function <- function(w, M, n) {
     wz <- VGAM.weights.function(w = w, M = M, n = n)
     if (residuals) {
       if (M > 1) {
-        U <- vchol(wz, M = M, n = n) 
+        U <- vchol(wz, M = M, n = n)
         temp <- mux22(U, y-mu, M = M, upper = TRUE, as.matrix = TRUE)
         dimnames(temp) <- dimnames(y)
         temp
@@ -83,6 +83,7 @@ VGAM.weights.function <- function(w, M, n) {
          Q1 = 1,
          expected = TRUE,
          multipleResponses = TRUE,
+         quasi.type = TRUE,
          zero = .zero )
   }, list( .zero = zero ))),
 
@@ -112,10 +113,10 @@ VGAM.weights.function <- function(w, M, n) {
     predictors.names <- if (!is.null(dy[[2]])) dy[[2]] else
                        paste("Y", 1:M, sep = "")
 
-    if (!length(etastart)) 
+    if (!length(etastart))
       etastart <- 0 * y
   }), list( .parallel = parallel, .zero = zero ))),
-  linkinv = function(eta, extra = NULL) eta, 
+  linkinv = function(eta, extra = NULL) eta,
   last = eval(substitute(expression({
     dy <- dimnames(y)
     if (!is.null(dy[[2]]))
@@ -165,25 +166,25 @@ VGAM.weights.function <- function(w, M, n) {
       stop("loglikelihood residuals not implemented yet")
     } else {
 
-      temp1 <- ResSS.vgam(y-mu, wz = wz, M = M)
+    temp1 <- ResSS.vgam(y-mu, wz = wz, M = M)
 
 
 
 
-      ll.elts <-
-      if (M == 1 || ncol(wz) == M) {
+    ll.elts <-
+    if (M == 1 || ncol(wz) == M) {
 
-        -0.5 * temp1 + 0.5 *    (log(wz)) - n * (M / 2) * log(2*pi)
-      } else {
-        if (all(wz[1, ] == apply(wz, 2, min)) &&
-            all(wz[1, ] == apply(wz, 2, max))) {
-          onewz <- m2a(wz[1, , drop = FALSE], M = M)
-          onewz <- onewz[,, 1]  # M x M
+      -0.5 * temp1 + 0.5 *    (log(wz)) - n * (M / 2) * log(2*pi)
+    } else {
+      if (all(wz[1, ] == apply(wz, 2, min)) &&
+          all(wz[1, ] == apply(wz, 2, max))) {
+        onewz <- m2a(wz[1, , drop = FALSE], M = M)
+        onewz <- onewz[,, 1]  # M x M
 
 
-          logdet <- determinant(onewz)$modulus
-          logretval <- -0.5 * temp1 + 0.5 * n * logdet -
-                       n * (M / 2) * log(2*pi)
+        logdet <- determinant(onewz)$modulus
+        logretval <- -0.5 * temp1 + 0.5 * n * logdet -
+                     n * (M / 2) * log(2*pi)
 
         distval <- stop("variable 'distval' not computed yet")
         logretval <- -(ncol(onewz) * log(2 * pi) + logdet + distval)/2
@@ -209,6 +210,10 @@ VGAM.weights.function <- function(w, M, n) {
   },
   linkfun = function(mu, extra = NULL) mu,
   vfamily = "gaussianff",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    okay1 <- all(is.finite(eta))
+    okay1
+  }, list( .zero = zero ))),
   deriv = expression({
     wz <- VGAM.weights.function(w = w, M = M, n = n)
     mux22(cc = t(wz), xmat = y-mu, M = M, as.matrix = TRUE)
@@ -242,7 +247,7 @@ dposnorm <- function(x, mean = 0, sd = 1, log = FALSE) {
     ifelse(x < 0, log(0), dnorm(x, mean = mean, sd = sd, log = TRUE) -
            pnorm(mean / sd, log.p = TRUE))
   } else {
-    ifelse(x < 0, 0, dnorm(x = x, mean = mean, sd = sd) / pnorm(mean / sd))
+    ifelse(x < 0, 0, dnorm(x = x, mean = mean, sd = sd) / pnorm(mean/sd))
   }
 }
 
@@ -275,7 +280,7 @@ qposnorm <- function(p, mean = 0, sd = 1,
   if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
   rm(log.p)   # 20150102 KaiH
-  
+
   if (lower.tail) {
     if (log.arg) p <- exp(p)
   } else {
@@ -555,6 +560,14 @@ if (FALSE)
   }, list( .lmean = lmean, .lsd = lsd,
            .emean = emean, .esd = esd ))),
   vfamily = c("posnormal"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu <- eta2theta(eta[, c(TRUE, FALSE)], .lmean , earg = .emean )
+    mysd <- eta2theta(eta[, c(FALSE, TRUE)], .lsd   , earg = .esd   )
+    okay1 <- all(is.finite(mymu)) &&
+             all(is.finite(mysd)) && all(0 < mysd)
+    okay1
+  }, list( .lmean = lmean, .lsd = lsd,
+           .emean = emean, .esd = esd ))),
 
 
 
@@ -562,7 +575,7 @@ if (FALSE)
   function(object, nsim) {
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     mymu <- eta2theta(eta[, c(TRUE, FALSE)], .lmean , earg = .emean )
@@ -607,7 +620,7 @@ if (FALSE)
       NOS <- M / M1
       dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M1 = M1)]
 
-      wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal' 
+      wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal'
 
       ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
 
@@ -624,7 +637,7 @@ if (FALSE)
         dl.dmu <- (zedd - imratio) / Mysd
         dl.dsd <- (temp0 * imratio + zedd^2 - 1) / Mysd
 
-        
+
         temp7 <- cbind(dl.dmu, dl.dsd)
         run.varcov <- run.varcov +
                       temp7[, ind1$row.index] *
@@ -663,7 +676,7 @@ if (FALSE)
       ned2l.dmusd <- imratio * (1 + temp0 * (temp0 + imratio)) / mysd^2
       ned2l.dsd2 <- (2 - imratio * (temp0 * (1 + temp0 *
                     (temp0 + imratio)))) / mysd^2
-  
+
       wz <- array(c(c(w) * ned2l.dmu2  * dmu.deta^2,
                     c(w) * ned2l.dsd2  * dsd.deta^2,
                     c(w) * ned2l.dmusd * dmu.deta * dsd.deta),
@@ -744,8 +757,8 @@ dtikuv <- function(x, d, mean = 0, sigma = 1, log = FALSE) {
 
   hh <- 2 - d
   KK <- 1 / (1 + 1/hh + 0.75/hh^2)
-  logden <- dnorm(x = x, mean = mean, sd = sigma, log = TRUE) + log(KK) +
-    2 * log1p(((x-mean)/sigma)^2 / (2*hh))
+  logden <- dnorm(x = x, mean = mean, sd = sigma, log = TRUE) +
+    log(KK) + 2 * log1p(((x-mean)/sigma)^2 / (2*hh))
   logden[is.infinite(x)] <- log(0)  # 20141209 KaiH
   if (log.arg) logden else exp(logden)
 }
@@ -795,11 +808,11 @@ ptikuv <- function(q, d, mean = 0, sigma = 1,
 
 
 
-qtikuv <- function(p, d, mean = 0, sigma = 1, 
+qtikuv <- function(p, d, mean = 0, sigma = 1,
                    lower.tail = TRUE, log.p = FALSE, ...) {
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   if (!is.Numeric(d, length.arg = 1) || max(d) >= 2)
     stop("bad input for argument 'd'")
 
@@ -809,7 +822,7 @@ qtikuv <- function(p, d, mean = 0, sigma = 1,
   } else {
     p <- if (log.p) -expm1(p) else 1 - p
   }
-  
+
   L <- max(length(p), length(mean), length(sigma))
   if (length(p)     != L) p     <- rep_len(p,     L)
   if (length(mean)  != L) mean  <- rep_len(mean,  L)
@@ -875,7 +888,8 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
     while (ptikuv(q = Lower, d = d, mean = mean, sigma = sigma) > Smallno)
       Lower <- Lower - sigma
     Upper <- mean + 5 * sigma
-    while (ptikuv(q = Upper, d = d, mean = mean, sigma = sigma) < 1-Smallno)
+    while (ptikuv(q = Upper, d = d,
+                  mean = mean, sigma = sigma) < 1-Smallno)
       Upper <- Upper + sigma
     x <- runif(2*use.n, min = Lower, max = Upper)
     index <- runif(2*use.n, max = ymax) <
@@ -940,7 +954,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
     w.y.check(w = w, y = y)
 
 
-    predictors.names <- 
+    predictors.names <-
       c(namesof("mean",  .lmean  , earg = .emean  , tag = FALSE),
         namesof("sigma", .lsigma , earg = .esigma , tag = FALSE))
 
@@ -952,7 +966,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
         K2 <- 1 + 3/hh + 15/(4*hh^2)
         rep_len(sqrt(var(y) / (KK*K2)), n)
       }
-      mean.init <- rep_len(weighted.mean(y, w), n) 
+      mean.init <- rep_len(weighted.mean(y, w), n)
       etastart <-
         cbind(theta2eta(mean.init,  .lmean  , earg = .emean  ),
               theta2eta(sigma.init, .lsigma , earg = .esigma ))
@@ -970,7 +984,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
       misc$earg <- list("mean" = .emean , "sigma"= .esigma )
 
       misc$expected <- TRUE
-      misc$d <- .d 
+      misc$d <- .d
   }), list( .lmean = lmean, .lsigma = lsigma, .d = d,
             .emean = emean, .esigma = esigma ))),
   loglikelihood = eval(substitute(
@@ -993,6 +1007,16 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
   }, list( .lmean = lmean, .lsigma = lsigma, .d = d,
            .emean = emean, .esigma = esigma ))),
   vfamily = c("tikuv"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu  <- eta2theta(eta[, 1], .lmean  , earg = .emean  )
+    sigma <- eta2theta(eta[, 2], .lsigma , earg = .esigma )
+    dee   <- .d
+    okay1 <- all(is.finite(mymu )) &&
+             all(is.finite(sigma)) && all(0 < sigma) &&
+             all(is.finite(dee  )) && all(0 < dee & dee < 2)
+    okay1
+  }, list( .lmean = lmean, .lsigma = lsigma, .d = d,
+           .emean = emean, .esigma = esigma ))),
 
 
 
@@ -1009,14 +1033,14 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
 
 
   deriv = eval(substitute(expression({
-    mymu  <- eta2theta(eta[, 1], .lmean ,  earg = .emean )
-    sigma <- eta2theta(eta[, 2], .lsigma, earg = .esigma)
+    mymu  <- eta2theta(eta[, 1], .lmean  , earg = .emean  )
+    sigma <- eta2theta(eta[, 2], .lsigma , earg = .esigma )
 
     dmu.deta <- dtheta.deta(mymu, .lmean , earg = .emean )
     dsigma.deta <- dtheta.deta(sigma, .lsigma, earg = .esigma)
 
     zedd <- (y - mymu) / sigma
-    hh <- 2 - .d 
+    hh <- 2 - .d
     gzedd <- zedd / (1 + 0.5*zedd^2 / hh)
 
     dl.dmu <- zedd / sigma - 2 * gzedd / (hh*sigma)
@@ -1076,29 +1100,29 @@ pfoldnorm <- function(q, mean = 0, sd = 1, a1 = 1, a2 = 1,
 
   if (lower.tail) {
     if (log.p) {
-      ans <- log(pnorm(q =  q/(a1*sd) - mean/sd) - 
+      ans <- log(pnorm(q =  q/(a1*sd) - mean/sd) -
                  pnorm(q = -q/(a2*sd) - mean/sd))
       ans[q <= 0 ] <- -Inf
       ans[q == Inf] <- 0
     } else {
-      ans <- pnorm(q =  q/(a1*sd) - mean/sd) - 
+      ans <- pnorm(q =  q/(a1*sd) - mean/sd) -
              pnorm(q = -q/(a2*sd) - mean/sd)
       ans[q <= 0] <- 0
       ans[q == Inf] <- 1
     }
   } else {
     if (log.p) {
-      ans <- log(pnorm(q =  q/(a1*sd) - mean/sd, lower.tail = FALSE) + 
+      ans <- log(pnorm(q =  q/(a1*sd) - mean/sd, lower.tail = FALSE) +
                  pnorm(q = -q/(a2*sd) - mean/sd))
       ans[q <= 0] <- 0
       ans[q == Inf] <- -Inf
     } else {
-      ans <- pnorm(q =  q/(a1*sd) - mean/sd, lower.tail = FALSE) + 
+      ans <- pnorm(q =  q/(a1*sd) - mean/sd, lower.tail = FALSE) +
              pnorm(q = -q/(a2*sd) - mean/sd)
       ans[q <= 0] <- 1
       ans[q == Inf] <- 0
     }
-  } 
+  }
   ans[a1 <= 0 | a2 <= 0] <- NaN
   ans[sd <= 0] <- NaN
   ans
@@ -1112,13 +1136,13 @@ qfoldnorm <- function(p, mean = 0, sd = 1, a1 = 1, a2 = 1,
   if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
   rm(log.p)
-  
+
   if (lower.tail) {
     if (log.arg) p <- exp(p)
   } else {
     p <- if (log.arg) -expm1(p) else 1 - p
   }
-  
+
   L <- max(length(p), length(mean), length(sd), length(a1), length(a2))
   if (length(p)    != L) p    <- rep_len(p,    L)
   if (length(mean) != L) mean <- rep_len(mean, L)
@@ -1321,6 +1345,18 @@ rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2 = 1) {
            .emean = emean, .esd = esd,
            .a1 = a1, .a2 = a2 ))),
   vfamily = c("foldnormal"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
+    mysd <- eta2theta(eta[, 2], .lsd ,   earg = .esd )
+    okay1 <- all(is.finite(mymu))  &&
+             all(is.finite(mysd))  && all(0 < mysd)  &&
+             all(is.finite( .a1 )) && all(0 <  .a1 ) &&
+             all(is.finite( .a2 )) && all(0 <  .a2 )
+    okay1
+  }, list( .lmean = lmean, .lsd = lsd,
+           .emean = emean, .esd = esd,
+           .a1 = a1, .a2 = a2 ))),
+
   deriv = eval(substitute(expression({
     M1 <- 2
     mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
@@ -1440,7 +1476,7 @@ lqnorm <- function(qpower = 2,
     if (!length(etastart))  {
       meany <- weighted.mean(y, w)
       mean.init <- rep_len(if (length( .i.mu )) .i.mu else {
-        if ( .imethod == 2) median(y) else 
+        if ( .imethod == 2) median(y) else
         if ( .imethod == 1) meany else
           .ishrinkage * meany + (1 - .ishrinkage ) * y
       }, n)
@@ -1472,6 +1508,10 @@ lqnorm <- function(qpower = 2,
     theta2eta(mu, link = .link, earg = .earg)
   }, list( .link = link, .earg = earg ))),
   vfamily = "lqnorm",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    okay1 <- all(is.finite(eta))
+    okay1
+  }, list( .link = link, .earg = earg ))),
   deriv = eval(substitute(expression({
     dmu.deta <- dtheta.deta(theta=mu, link = .link, earg = .earg )
     myresid <- y - mu
@@ -1542,7 +1582,7 @@ ptobit <- function(q, mean = 0, sd = 1, Lower = 0, Upper = Inf,
     stop("all(Lower < Upper) is not TRUE")
 
 
-  ans <- pnorm(q = q, mean = mean, sd = sd, 
+  ans <- pnorm(q = q, mean = mean, sd = sd,
                lower.tail = lower.tail, log.p = log.p)
   ind1 <- (q <  Lower)
   ans[ind1] <- if (lower.tail) ifelse(log.p, log(0.0), 0.0) else
@@ -1565,11 +1605,11 @@ qtobit <- function(p, mean = 0, sd = 1,
     stop("all(Lower < Upper) is not TRUE")
 
   # 20150127 KaiH; add lower.tail = lower.tail, log.p = log.p
-  ans <- qnorm(p, mean = mean, sd = sd, 
+  ans <- qnorm(p, mean = mean, sd = sd,
                lower.tail = lower.tail, log.p = log.p)
-  pnorm.Lower <- ptobit(q = Lower, mean = mean, sd = sd, 
+  pnorm.Lower <- ptobit(q = Lower, mean = mean, sd = sd,
                         lower.tail = lower.tail, log.p = log.p)
-  pnorm.Upper <- ptobit(q = Upper, mean = mean, sd = sd, 
+  pnorm.Upper <- ptobit(q = Upper, mean = mean, sd = sd,
                         lower.tail = lower.tail, log.p = log.p)
 
 if (FALSE) {
@@ -1577,12 +1617,12 @@ if (FALSE) {
     ind1 <- (p <= pnorm.Lower)
     ans[ind1] <- Lower[ind1]
     ind2 <- (pnorm.Upper <= p)
-    ans[ind2] <- Upper[ind2] 
+    ans[ind2] <- Upper[ind2]
   } else {
     ind1 <- (p >= pnorm.Lower)
     ans[ind1] <- Lower[ind1]
     ind2 <- (pnorm.Upper >= p)
-    ans[ind2] <- Upper[ind2] 
+    ans[ind2] <- Upper[ind2]
   }
 } else {
   ans <- qnorm(p = p, mean = mean, sd = sd,
@@ -1624,7 +1664,7 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
     ans <- pmax(ans, Lower)
     ans <- pmin(ans, Upper)
   }
-  
+
   attr(ans, "Lower") <- Lower
   attr(ans, "Upper") <- Upper
   attr(ans, "cenL") <- cenL
@@ -1635,12 +1675,13 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
 
 
 
- tobit <- function(Lower = 0, Upper = Inf,  # See the trick described below.
-                   lmu = "identitylink",  lsd = "loge",
-                   imu = NULL,        isd = NULL,
-                   type.fitted = c("uncensored", "censored", "mean.obs"),
-                   byrow.arg = FALSE,
-                   imethod = 1, zero = "sd") {
+ tobit <-
+  function(Lower = 0, Upper = Inf,  # See the trick described below.
+           lmu = "identitylink",  lsd = "loge",
+           imu = NULL,        isd = NULL,
+           type.fitted = c("uncensored", "censored", "mean.obs"),
+           byrow.arg = FALSE,
+           imethod = 1, zero = "sd") {
 
 
 
@@ -1731,10 +1772,11 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
     ncoly <- ncol(y)
     M <- M1 * ncoly
 
-    Lowmat <- matrix( .Lower , nrow = n, ncol = ncoly, byrow = .byrow.arg )
-    Uppmat <- matrix( .Upper , nrow = n, ncol = ncoly, byrow = .byrow.arg )
+    Lowmat <- matrix( .Lower , n, ncol = ncoly, byrow = .byrow.arg )
+    Uppmat <- matrix( .Upper , n, ncol = ncoly, byrow = .byrow.arg )
 
     extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
     extra$censoredL <- (y <= Lowmat)
     extra$censoredU <- (y >= Uppmat)
     if (any(matTF <- (y < Lowmat))) {
@@ -1763,10 +1805,11 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
       sd.init <- matrix(0.0, n, ncoly)
       for (jay in 1:ncol(y)) {
         if ( .imethod >  2) {
-          mu.init[, jay] <- (y[, jay] + weighted.mean(y[, jay], w[, jay]))/2
-          sd.init[, jay] <- pmax(weighted.mean((y[, jay] - mu.init[, jay])^2,
-                                                w[, jay])^0.5,
-                                 0.001)
+          mu.init[, jay] <-
+            (y[, jay] + weighted.mean(y[, jay], w[, jay])) / 2
+          sd.init[, jay] <-
+            pmax(weighted.mean((y[, jay] - mu.init[, jay])^2,
+                                w[, jay])^0.5, 0.001)
         } else {  # .imethod <= 2
 
           use.i11 <- i11[, jay]
@@ -1778,7 +1821,7 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
                           y = y[!use.i11, jay],
                           w = w[!use.i11, jay])
 
-                     
+
 
           sd.init[, jay] <- sqrt( sum(w[!use.i11, jay] * mylm$resid^2)
                                 / mylm$df.residual ) * 1.5
@@ -1809,9 +1852,10 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
            .imethod = imethod ))),
   linkinv = eval(substitute( function(eta, extra = NULL) {
     M1 <- 2
-    ncoly <- ncol(eta) / M1
+    NOS <- ncoly <- ncol(eta) / M1
     mum <- eta2theta(eta[, M1*(1:ncoly)-1, drop = FALSE],
                      .lmu , earg = .emu )
+    mum <- label.cols.y(mum, colnames.y = extra$colnames.y, NOS = NOS)
 
     type.fitted <-
       if (length(extra$type.fitted)) {
@@ -1834,7 +1878,7 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
     if ( type.fitted == "censored") {
       mum[mum < Lowmat] <- Lowmat[mum < Lowmat]
       mum[mum > Uppmat] <- Uppmat[mum > Uppmat]
-      mum
+      return(mum)
     } else {
 
 
@@ -1935,6 +1979,20 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
            .byrow.arg = byrow.arg,
            .Lower = Lower, .Upper = Upper ))),
   vfamily = c("tobit"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    M1 <- 2
+    ncoly <- NCOL(y)
+    mum <- eta2theta(eta[, M1*(1:ncoly)-1, drop = FALSE],
+                     .lmu , earg = .emu )
+    sdm <- eta2theta(eta[, M1*(1:ncoly)-0, drop = FALSE],
+                     .lsd , earg = .esd )
+    okay1 <- all(is.finite(mum)) &&
+             all(is.finite(sdm)) && all(0 < sdm)
+    okay1
+  }, list( .lmu = lmu, .lsd = lsd,
+           .emu = emu, .esd = esd,
+           .byrow.arg = byrow.arg,
+           .Lower = Lower, .Upper = Upper ))),
 
 
 
@@ -1951,9 +2009,12 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
 
     mills.ratio1 <- function(x) {
       ans <- exp(dnorm(x, log = TRUE) - pnorm(x, log = TRUE))
-      ans[x < -1e2] <- -x / (1 - 1/x^2 + 3 / x^4)
+      if (any(vecTF <- (x < -1e2))) {
+        xvneg <- x[vecTF]
+        ans[vecTF] <- -xvneg / (1 - 1/xvneg^2 + 3 / xvneg^4)
+      }
       ans
-    }
+    }  # mills.ratio1()
 
 
   mills.ratio2 <- function(x) {
@@ -2104,7 +2165,7 @@ moment.millsratio2 <- function(zedd) {
       phicPhi.B <- mills.ratio1(-B.i)
 
 
-                         
+
 
 
       ned2l.dmumu <- Phivec.B - Phivec.A +
@@ -2117,7 +2178,7 @@ moment.millsratio2 <- function(zedd) {
                      2 * moment.k.dnorm(-B.i, k = 1) +
                      moment.k.dnorm(-B.i, k = 3) +
                      moment.millsratio2(-B.i) -
-                         
+
                      2 * moment.k.dnorm( A.i, k = 1) +
                      moment.k.dnorm( A.i, k = 3) +
                      moment.millsratio2( A.i)
@@ -2159,13 +2220,14 @@ moment.millsratio2 <- function(zedd) {
 
 
  normal1 <-
- uninormal <- function(lmean = "identitylink", lsd = "loge", lvar = "loge",
-                       var.arg = FALSE,
-                       imethod = 1,
-                       isd = NULL,
-                       parallel = FALSE,
-                       smallno = 1.0e-5,
-                       zero = "sd") {
+ uninormal <-
+  function(lmean = "identitylink", lsd = "loge", lvar = "loge",
+           var.arg = FALSE,
+           imethod = 1,
+           isd = NULL,
+           parallel = FALSE,
+           smallno = 1.0e-5,
+           zero = "sd") {
 
 
 
@@ -2310,7 +2372,7 @@ moment.millsratio2 <- function(zedd) {
     mynames2 <- param.names(if ( .var.arg ) "var" else "sd", ncoly)
     predictors.names <-
         c(namesof(mynames1, .lmean , earg = .emean , tag = FALSE),
-          if ( .var.arg ) 
+          if ( .var.arg )
           namesof(mynames2, .lvare , earg = .evare , tag = FALSE) else
           namesof(mynames2, .lsdev , earg = .esdev , tag = FALSE))
     predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
@@ -2338,7 +2400,7 @@ moment.millsratio2 <- function(zedd) {
               sqrt( sum(w[, jay] * jfit$resid^2) / jfit$df.resid ) else
               sqrt( sum(w[, jay] * jfit$resid^2) / sum(w[, jay]) )
           } else if ( .imethod == 3) {
-            sqrt( sum(w[, jay] * 
+            sqrt( sum(w[, jay] *
                   (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
           } else {
             sqrt( sum(w[, jay] * abs(y[, jay] -
@@ -2457,6 +2519,30 @@ moment.millsratio2 <- function(zedd) {
            .smallno = smallno,
            .var.arg = var.arg ))),
   vfamily = c("uninormal"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    M1 <- 2
+    ncoly <- NCOL(y)
+
+    mymu <- eta2theta(  eta[, M1*(1:ncoly) - 1], .lmean , earg = .emean )
+    if ( .var.arg ) {
+      Varm <- eta2theta(eta[, M1*(1:ncoly)    ], .lvare , earg = .evare )
+      sdev <- 111
+    } else {
+      sdev <- eta2theta(eta[, M1*(1:ncoly)    ], .lsdev , earg = .esdev )
+      Varm <- 111
+    }
+    okay1 <- all(is.finite(mymu)) &&
+             all(is.finite(sdev)) && all(0 < sdev) &&
+             all(is.finite(Varm)) && all(0 < Varm)
+    okay2 <- TRUE
+    if ( .lmean == "explink") {
+      okay2 <- all(0 < eta[, M1*(1:ncoly) - 1])
+    }
+    okay1 && okay2
+  }, list( .lmean = lmean, .lsdev = lsdev, .lvare = lvare,
+           .emean = emean, .esdev = esdev, .evare = evare,
+           .smallno = smallno,
+           .var.arg = var.arg ))),
 
 
 
@@ -2466,7 +2552,7 @@ moment.millsratio2 <- function(zedd) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     mymu <- fitted(object)
     eta <- predict(object)
@@ -2640,9 +2726,9 @@ moment.millsratio2 <- function(zedd) {
   } else {
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
                                 predictors.names = predictors.names,
-                                M1 = M)  # 20151222; Okay for one response?
+                                M1 = M)  # 20151222; Okay for 1 response?
   }
-  }), list( .zero = zero 
+  }), list( .zero = zero
           ))),
 
   infos = eval(substitute(function(...) {
@@ -2717,9 +2803,10 @@ moment.millsratio2 <- function(zedd) {
     extra$earg.list <- earg.list
 
 
-    
 
-    if (any(is.multilogit <- (unlist(link.list.ordered) == "multilogit"))) {
+
+    if (any(is.multilogit <-
+       (unlist(link.list.ordered) == "multilogit"))) {
       if (sum(is.multilogit) < 2)
         stop("at least two 'multilogit' links need to be specified, ",
              "else none")
@@ -2728,7 +2815,7 @@ moment.millsratio2 <- function(zedd) {
       extra$is.multilogit <- is.multilogit
     }
 
-    
+
 
 
     temp5 <-
@@ -2761,8 +2848,8 @@ moment.millsratio2 <- function(zedd) {
 
     for (jlocal in seq_along(mynames1)) {
       mynames1[jlocal] <- namesof(mynames1[jlocal],
-                                  link = link.list[[jlocal]],
-                                  earg = earg.list[[jlocal]], short = TRUE)
+                                link = link.list[[jlocal]],
+                                earg = earg.list[[jlocal]], short = TRUE)
     }
     extra$all.mynames1 <- all.mynames1 <- mynames1
 
@@ -2773,7 +2860,7 @@ moment.millsratio2 <- function(zedd) {
     mynames2 <- param.names(if ( .var.arg ) "var" else "sd", ncoly)
     predictors.names <-
         c(mynames1,
-          if ( .var.arg ) 
+          if ( .var.arg )
           namesof(mynames2, .lvar  , earg = .evar  , tag = FALSE) else
           namesof(mynames2, .lsd   , earg = .esd   , tag = FALSE))
     extra$predictors.names <- predictors.names
@@ -2811,15 +2898,17 @@ moment.millsratio2 <- function(zedd) {
 
         if (link.list[[jlocal]] == "logoff" &&
             is.numeric(LLL <- (earg.list[[jlocal]])$offset) &&
-            jfit.coeff[jlocal] <= -LLL)
+            jfit.coeff[jlocal] <= -LLL) {
           jfit.coeff[jlocal] <- max((-LLL) * 1.05,
                                     (-LLL) * 0.95, -LLL + 1)
+        }
 
         if (link.list[[jlocal]] == "loge" &&
             jfit.coeff[jlocal] <= 0.001)
           jfit.coeff[jlocal] <- 1/8
       }
 
+
       if (!icoefficients.given) {
         if (LLL <- length(extra$is.multilogit)) {
           raw.coeffs <- jfit.coeff[extra$col.index.is.multilogit]
@@ -2851,7 +2940,7 @@ moment.millsratio2 <- function(zedd) {
           multilogit(thetamat.init[, extra$col.index.is.multilogit])
         etamat.init <- etamat.init[, -max(extra$col.index.is.multilogit)]
       }
-      
+
 
       w.sum1 <- w / sum(w)
       sdev.init <-
@@ -2868,7 +2957,8 @@ moment.millsratio2 <- function(zedd) {
 
       sd.inflation.factor <- .sd.inflation.factor
       sdev.init <- sdev.init * sd.inflation.factor
-      sdev.init <- pmax(sdev.init, ( .Machine$double.eps )^0.25)  # Limit the smallness
+      sdev.init <- pmax(sdev.init,
+                     ( .Machine$double.eps )^0.25)  # Limit the smallness
 
       if (length( .isdev )) {
         sdev.init <- matrix( .isdev , n, ncoly, byrow = TRUE)
@@ -2900,7 +2990,7 @@ moment.millsratio2 <- function(zedd) {
     sdev <- eta2theta(eta[, M1*(1:NOS)  , drop = FALSE],
                       .lsd , earg = .esd )
 
-    okay1 <- all(is.finite(sdev))  && all(sdev  > 0) &&
+    okay1 <- all(is.finite(sdev))  && all(0 < sdev) &&
              all(is.finite(eta))
     okay1
   }, list( .link.list = link.list,
@@ -2922,7 +3012,7 @@ moment.millsratio2 <- function(zedd) {
                    probs.last.multilogit = 0,
                    if (last.one == M) NULL else
                    coffs[, last.one:ncol(coffs)])
-    colnames(coffs) <- extra$all.mynames1 
+    colnames(coffs) <- extra$all.mynames1
   }
 
 
@@ -2944,7 +3034,7 @@ moment.millsratio2 <- function(zedd) {
 
     if (LLL <- length(extra$col.index.is.multilogit)) {
       coffs[, extra$col.index.is.multilogit] <-
-        multilogit(eta[, extra$col.index.is.multilogit[-LLL], drop = FALSE],
+     multilogit(eta[, extra$col.index.is.multilogit[-LLL], drop = FALSE],
                inverse = TRUE)
     }
 
@@ -3058,7 +3148,8 @@ moment.millsratio2 <- function(zedd) {
 
     if (LLL <- length(extra$col.index.is.multilogit)) {
       coffs[, extra$col.index.is.multilogit] <-
-        multilogit(eta[, extra$col.index.is.multilogit[-LLL], drop = FALSE],
+        multilogit(eta[, extra$col.index.is.multilogit[-LLL],
+                       drop = FALSE],
                inverse = TRUE)
     }
 
@@ -3072,7 +3163,7 @@ moment.millsratio2 <- function(zedd) {
         extra$earg.list[[jlocal]]
       }
       dcoffs.deta[, jlocal] <-
-        dtheta.deta(coffs[, jlocal], 
+        dtheta.deta(coffs[, jlocal],
                     link = extra$link.list[[jlocal]],
                     earg = earg.use)
     }
@@ -3092,7 +3183,7 @@ moment.millsratio2 <- function(zedd) {
       dsd.deta <- dtheta.deta(sdev, .lsd  , earg = .esd )
     }
 
-    
+
     dMu.deta <- dmu.dcoffs * dcoffs.deta  # n x pLM, but may change below
     if (LLL <- length(extra$col.index.is.multilogit)) {
       dMu.deta[, extra$col.index.is.multilogit[-LLL]] <-
@@ -3102,20 +3193,20 @@ moment.millsratio2 <- function(zedd) {
                       coffs[, extra$col.index.is.multilogit]))
       dMu.deta <- dMu.deta[, -extra$col.index.is.multilogit[LLL]]
     }
-    
+
 
     dl.deta <- if ( .var.arg )
                c(w) * cbind(dl.dmu * dMu.deta,
                             "var" = c(dl.dva * dva.deta)) else
                c(w) * cbind(dl.dmu * dMu.deta,
                             "sd"  = c(dl.dsd * dsd.deta))
- 
+
     dl.deta
   }), list( .link.list = link.list, .lsd = lsd, .lvar = lvar,
             .earg.list = earg.list, .esd = esd, .evar = evar,
             .var.arg = var.arg ))),
 
-      
+
 
 
 
@@ -3144,17 +3235,17 @@ moment.millsratio2 <- function(zedd) {
     indtw <- iam(NA, NA, M-1, both = TRUE, diag = TRUE)
     ned2l.dmu2 <- 1 / sdev^2
 
- 
 
 
 
- 
+
+
     if ((LLL <- length(extra$col.index.is.multilogit))) {
        dmu.dcoffs[, extra$col.index.is.multilogit[-LLL]] <-
          dMu.deta[, extra$col.index.is.multilogit[-LLL]]
       dcoffs.deta[, extra$col.index.is.multilogit[-LLL]] <- 1
      }
-  
+
     twz  <- crossprod(dmu.dcoffs * sqrt(c(w))) / sum(w)
 
     twz <- matrix(twz[cbind(indtw$row.index,
@@ -3288,6 +3379,14 @@ moment.millsratio2 <- function(zedd) {
   }, list( .lmulog = lmulog, .lsdlog = lsdlog,
            .emulog = emulog, .esdlog = esdlog ))),
   vfamily = c("lognormal"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mulog <- eta2theta(eta[, 1], .lmulog , earg = .emulog )
+    sdlog <- eta2theta(eta[, 2], .lsdlog , earg = .esdlog )
+    okay1 <- all(is.finite(mulog)) &&
+             all(is.finite(sdlog)) && all(0 < sdlog)
+    okay1
+  }, list( .lmulog = lmulog, .lsdlog = lsdlog,
+           .emulog = emulog, .esdlog = esdlog ))),
 
 
 
@@ -3297,7 +3396,7 @@ moment.millsratio2 <- function(zedd) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     mulog <- eta2theta(eta[, c(TRUE, FALSE)], .lmulog , earg = .emulog )
@@ -3319,7 +3418,7 @@ moment.millsratio2 <- function(zedd) {
     dl.dmulog <- (log(y) - mulog) / sdlog^2
     dl.dsdlog <- -1 / sdlog + (log(y) - mulog)^2 / sdlog^3
 
-    c(w) * cbind(dl.dmulog * dmulog.deta, 
+    c(w) * cbind(dl.dmulog * dmulog.deta,
                  dl.dsdlog * dsdlog.deta)
   }), list( .lmulog = lmulog, .lsdlog = lsdlog,
             .emulog = emulog, .esdlog = esdlog ))),
@@ -3341,7 +3440,8 @@ moment.millsratio2 <- function(zedd) {
 
 
 
-dskewnorm <- function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
+dskewnorm <-
+  function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
 
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -3451,7 +3551,7 @@ rskewnorm <- function(n, location = 0, scale = 1, shape = 0) {
       etastart <- matrix(init.shape, n, ncol(y))
     }
   }), list( .lshape = lshape, .eshape = eshape,
-            .ishape = ishape ))), 
+            .ishape = ishape ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     alpha <- eta2theta(eta, .lshape , earg = .eshape )
     alpha * sqrt(2/(pi * (1+alpha^2 )))
@@ -3460,7 +3560,7 @@ rskewnorm <- function(n, location = 0, scale = 1, shape = 0) {
 
 
 
-    misc$link <-    c(shape = .lshape) 
+    misc$link <-    c(shape = .lshape)
 
     misc$earg <- list(shape = .eshape )
 
@@ -3488,8 +3588,14 @@ rskewnorm <- function(n, location = 0, scale = 1, shape = 0) {
         ll.elts
       }
     }
-  }, list( .eshape = eshape, .lshape = lshape ))), 
+  }, list( .eshape = eshape, .lshape = lshape ))),
   vfamily = c("skewnormal"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    alpha <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(alpha))
+    okay1
+  }, list( .eshape = eshape, .lshape = lshape ))),
+
 
 
 
@@ -3499,13 +3605,13 @@ rskewnorm <- function(n, location = 0, scale = 1, shape = 0) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     alpha <- eta2theta(eta, .lshape , earg = .eshape )
     rskewnorm(nsim * length(alpha), location = 0, scale = 1,
               shape = alpha)
-  }, list( .eshape = eshape, .lshape = lshape ))), 
+  }, list( .eshape = eshape, .lshape = lshape ))),
 
 
 
diff --git a/R/family.oneinf.R b/R/family.oneinf.R
new file mode 100644
index 0000000..6dbc3b5
--- /dev/null
+++ b/R/family.oneinf.R
@@ -0,0 +1,2188 @@
+# These functions are
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+dlog <- function(x, shape, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+
+  N <- max(length(x), length(shape))
+  if (length(x)     != N) x     <- rep_len(x,     N)
+  if (length(shape) != N) shape <- rep_len(shape, N)
+  ox <- !is.finite(x)
+  zero <- ox | round(x) != x | x < 1
+  ans <- rep_len(0.0, length(x))
+  if (log.arg) {
+    ans[ zero] <- log(0.0)
+    ans[!zero] <- x[!zero] * log(shape[!zero]) - log(x[!zero]) -
+                  log(-log1p(-shape[!zero]))
+    ans[ox] <- log(0)  # 20141212 KaiH
+  } else {
+    ans[!zero] <- -(shape[!zero]^(x[!zero])) / (x[!zero] *
+                   log1p(-shape[!zero]))
+    ans[ox] <- 0.0  # 20141212 KaiH
+  }
+  ans[shape < 0 | 1 < shape] <- NaN
+  ans
+}
+
+
+
+plog  <- function(q, shape, log.p = FALSE) {
+
+
+  if (any(is.na(q))) stop("NAs not allowed for argument 'q'")
+  if (any(is.na(shape)))
+    stop("NAs not allowed for argument 'shape'")
+
+
+  N <- max(length(q), length(shape))
+  if (length(q)     != N) q     <- rep_len(q,     N)
+  if (length(shape) != N) shape <- rep_len(shape, N)
+
+
+  bigno <- 10
+  owen1965 <- (q * (1 - shape) > bigno)
+  if (specialCase <- any(owen1965)) {
+    qqq <- q[owen1965]
+    ppp <- shape[owen1965]
+    pqp <- qqq * (1 - ppp)
+    bigans <- (ppp^(1+qqq) / (1-ppp)) * (1/qqq -
+              1 / (            pqp * (qqq-1)) +
+              2 / ((1-ppp)   * pqp * (qqq-1) * (qqq-2)) -
+              6 / ((1-ppp)^2 * pqp * (qqq-1) * (qqq-2) * (qqq-3)) +
+          24 / ((1-ppp)^3 * pqp * (qqq-1) * (qqq-2) * (qqq-3) * (qqq-4)))
+      bigans <- 1 + bigans / log1p(-ppp)
+  }
+
+  floorq <- pmax(1, floor(q))  # Ensures at least one element per q value
+  floorq[owen1965] <- 1
+  seqq <- sequence(floorq)
+  seqp <- rep(shape, floorq)
+  onevector <- (seqp^seqq / seqq) / (-log1p(-seqp))
+  rlist <-  .C("tyee_C_cum8sum",
+                as.double(onevector), answer = double(N),
+                as.integer(N), as.double(seqq),
+                as.integer(length(onevector)), notok=integer(1))
+  if (rlist$notok != 0)
+    stop("error in C function 'cum8sum'")
+  ans <- if (log.p) log(rlist$answer) else rlist$answer
+  if (specialCase)
+    ans[owen1965] <- if (log.p) log(bigans) else bigans
+  ans[q < 1] <- if (log.p) log(0.0) else 0.0
+  ans[shape < 0 | 1 < shape] <- NaN
+  ans
+}
+
+
+
+
+
+ qlog <- function(p, shape) {
+
+  LLL <- max(length(p), length(shape))
+  if (length(p)     != LLL) p     <- rep_len(p,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  ans <- rep_len(0, LLL)
+
+  lo <- rep_len(1, LLL)
+  approx.ans <- lo  # True at lhs
+  hi <- 2 * lo + 10
+  dont.iterate <- p == 1 | shape <= 0
+  done <- p <= plog(hi, shape) | dont.iterate
+  while (!all(done)) {
+    hi.save <- hi[!done]
+    hi[!done] <- 2 * lo[!done] + 10
+    lo[!done] <- hi.save
+    done[!done] <- (p[!done] <= plog(hi[!done], shape[!done]))
+  }
+
+  foo <- function(q, shape, p)
+    plog(q, shape) - p
+
+  lhs <- (p <= dlog(1, shape)) | dont.iterate
+
+  approx.ans[!lhs] <-
+    bisection.basic(foo, lo[!lhs], hi[!lhs], tol = 1/16,
+                    shape = shape[!lhs], p = p[!lhs])
+  faa <- floor(approx.ans)
+  ans <- ifelse(plog(faa, shape) < p & p <= plog(faa+1, shape),
+                faa+1, faa)
+
+  ans[p == 1] <- Inf
+  ans[shape <= 0] <- NaN
+
+  ans
+}  # qlog
+
+
+
+
+rlog <- function(n, shape) {
+  qlog(runif(n), shape)
+}
+
+
+
+
+
+
+
+ logff <- function(lshape = "logit", gshape = ppoints(8), zero = NULL) {
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+  new("vglmff",
+  blurb = c("Logarithmic distribution f(y) = a * shape^y / y, ",
+             "y = 1, 2, 3,...,\n",
+             "            0 < shape < 1, a = -1 / log(1-shape)  \n\n",
+             "Link:    ", namesof("shape", lshape, earg = eshape),
+             "\n", "\n",
+             "Mean:    a * shape / (1 - shape)", "\n"),
+  constraints = eval(substitute(expression({
+    dotzero <- .zero
+    M1 <- 1
+    eval(negzero.expression.VGAM)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         parameters.names = "shape",
+         zero = .zero )
+  }, list( .zero = zero ))),
+
+
+  initialize = eval(substitute(expression({
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              Is.integer.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+
+    ncoly <- ncol(y)
+    M1 <- 1
+    extra$ncoly <- ncoly
+    M <- M1 * ncoly
+
+
+    mynames1  <- param.names("shape", ncoly)
+    predictors.names <- namesof(mynames1, .lshape , earg = .eshape ,
+                                tag = FALSE)
+
+
+    if (!length(etastart)) {
+      logff.Loglikfun <- function(shapeval, y, x, w, extraargs) {
+        sum(c(w) * dlog(x = y, shape = shapeval, log = TRUE))
+      }
+      Init.shape <- matrix(0, n, M)
+      shape.grid <- .gshape
+
+      for (ilocal in 1:ncoly) {
+        Init.shape[, ilocal] <- grid.search(shape.grid,
+                                            objfun = logff.Loglikfun,
+                                            y = y[, ilocal],  # x = x,
+                                            w = w[, ilocal])
+      }  # for
+      etastart <- theta2eta(Init.shape, .lshape , earg = .eshape )
+    }
+  }), list( .lshape = lshape, .eshape = eshape, .gshape = gshape ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    aa <- -1 / log1p(-shape)
+    aa * shape / (1 - shape)
+  }, list( .lshape = lshape, .eshape = eshape ))),
+
+  last = eval(substitute(expression({
+    misc$link <- c(rep_len( .lshape , ncoly))
+    names(misc$link) <- mynames1
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- mynames1
+    for (ii in 1:ncoly) {
+      misc$earg[[ii]] <- .eshape
+    }
+  }), list( .lshape = lshape, .eshape = eshape ))),
+
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dlog(x = y, shape = shape, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  vfamily = c("logff"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    okay0 <- if ( .lshape == "logfflink") all(0 < eta) else TRUE
+    okay1 <- if (okay0) {
+      shape <- eta2theta(eta, .lshape , earg = .eshape )
+      all(is.finite(shape)) && all(0 < shape & shape < 1)
+    } else {
+      FALSE
+    }
+    okay0 && okay1
+  }, list( .lshape = lshape, .eshape = eshape ))),
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    rlog(nsim * length(shape), shape = shape)
+  }, list( .lshape = lshape, .eshape = eshape ))),
+
+
+
+  deriv = eval(substitute(expression({
+    M1 <- 1
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    aa <- -1 / log1p(-shape)
+    dl.dshape <- -aa / (1 - shape) + y / shape
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+    c(w) * dl.dshape * dshape.deta
+  }), list( .lshape = lshape, .eshape = eshape ))),
+  weight = eval(substitute(expression({
+    ned2l.dshape2 <- aa * (1 - aa * shape) / (shape * (1-shape)^2)
+    wz <- c(w) * ned2l.dshape2 * dshape.deta^2
+    wz
+  }), list( .lshape = lshape, .eshape = eshape ))))
+}
+
+
+
+
+
+
+
+
+
+deflat.limit.oilog  <- function(shape) {
+  if (any(shape <= 0 | 1 <= shape ))
+    stop("argument 'shape' must be in (0, 1)")
+  ans <- 1 / (1 - 1 / dlog(1, shape))
+  ans
+}
+
+
+
+doilog <- function(x, shape, pstr1 = 0, log = FALSE) {
+
+
+
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  LLL <- max(length(x), length(shape), length(pstr1))
+  if (length(x)     != LLL) x     <- rep_len(x,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+
+  ans <- rep(NA_real_, LLL)
+  index1 <- (x == 1)
+  if (log.arg) {
+    ans[ index1] <- log(pstr1[ index1] + (1 - pstr1[ index1]) *
+                       dlog(x[ index1], shape[ index1]))
+    ans[!index1] <- log1p(-pstr1[!index1]) +
+                          dlog(x[!index1], shape[!index1], log = TRUE)
+  } else {
+    ans[ index1] <-      pstr1[ index1] + (1 - pstr1[ index1]) *
+                        dlog(x[ index1], shape[ index1])
+    ans[!index1] <- (1 - pstr1[!index1]) *
+                        dlog(x[!index1], shape[!index1])
+  }
+
+
+  ans[pstr1 < deflat.limit.oilog(shape) | 1 < pstr1] <- NaN
+  ans[shape <= 0 | 1 <= shape] <- NaN
+  ans
+}  # doilog
+
+
+
+
+poilog <- function(q, shape, pstr1 = 0) {
+
+  LLL <- max(length(q), length(shape), length(pstr1))
+  if (length(q)     != LLL) q     <- rep_len(q,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+  ans <- rep_len(NA_real_, LLL)
+  deflat.limit <- deflat.limit.oilog(shape)
+
+  ans <- plog(q, shape)  #, lower.tail = lower.tail, log.p = log.p
+  ans <- ifelse(q < 1, 0, pstr1 + (1 - pstr1) * ans)
+
+  ans[pstr1 < deflat.limit] <- NaN
+  ans[1 < pstr1] <- NaN
+  ans[shape <= 0] <- NaN
+  ans[1 <= shape] <- NaN
+
+  ans
+}  # poilog
+
+
+
+
+
+qoilog <- function(p, shape, pstr1 = 0) {
+
+  LLL <- max(length(p), length(shape), length(pstr1))
+  if (length(p)     != LLL) p     <- rep_len(p,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(pstr1) != LLL) pstr1 <- rep_len(pstr1, LLL)
+  ans <- rep_len(NA_real_, LLL)
+  deflat.limit <- deflat.limit.oilog(shape)
+
+  ans[p <= pstr1] <- 1
+  pindex <- (deflat.limit <= pstr1) & (pstr1 < p)
+  ans[pindex] <-
+    qlog((p[pindex] - pstr1[pindex]) / (1 - pstr1[pindex]),
+         shape = shape[pindex])
+
+  ans[pstr1 < deflat.limit] <- NaN
+  ans[1 < pstr1] <- NaN
+
+  ans[p < 0] <- NaN
+  ans[1 < p] <- NaN
+  ans[shape <= 0] <- NaN
+  ans[1 <= shape] <- NaN
+
+  ans
+}  # qoilog
+
+
+
+roilog <- function(n, shape, pstr1 = 0) {
+  qoilog(runif(n), shape, pstr1 = pstr1)
+}
+
+
+
+
+
+ oilog <-
+  function(lpstr1 = "logit", lshape = "logit",
+         type.fitted = c("mean", "shape", "pobs1", "pstr1", "onempstr1"),
+           ishape = NULL,
+           gpstr1 = ppoints(8),
+           gshape = ppoints(8),
+           zero = NULL) {
+
+  lpstr1 <- as.list(substitute(lpstr1))
+  epstr1 <- link2list(lpstr1)
+  lpstr1 <- attr(epstr1, "function.name")
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+
+  type.fitted <- match.arg(type.fitted,
+                   c("mean", "shape", "pobs1", "pstr1", "onempstr1"))[1]
+
+
+  if (length(ishape))
+    if (!is.Numeric(ishape, positive = TRUE))
+      stop("argument 'ishape' values must be positive")
+
+
+  new("vglmff",
+  blurb = c("One-inflated logarithmic distribution\n\n",
+            "Links:    ",
+            namesof("pstr1", lpstr1, earg = epstr1 ), ", ",
+            namesof("shape", lshape, earg = eshape ), "\n",
+            "Mean:     pstr1 + (1 - pstr1) * a * shape / (1 - shape), ",
+                       "a = -1 / log(1-shape), 0 < shape < 1"),
+
+  constraints = eval(substitute(expression({
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+                                predictors.names = predictors.names,
+                                M1 = 2)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         parameters.names = c("pstr1", "shape"),
+         type.fitted  = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted
+         ))),
+  initialize = eval(substitute(expression({
+    M1 <- 2
+
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              Is.integer.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+
+
+    NOS <- ncoly <- ncol(y)
+    extra$ncoly <- ncoly
+    M <- M1 * ncoly
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+
+    mynames1 <- param.names("pstr1", ncoly)
+    mynames2 <- param.names("shape", ncoly)
+    predictors.names <-
+        c(namesof(mynames1, .lpstr1 , earg = .epstr1 , tag = FALSE),
+          namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))[
+          interleave.VGAM(M, M1 = M1)]
+
+
+    if (!length(etastart)) {
+
+      shape.init <-
+      pstr1.init <- matrix(NA_real_, n, NOS)
+      gpstr1 <- .gpstr1
+      gshape <- .gshape
+
+      oilog.Loglikfun <- function(pstr1, shape, y, x, w, extraargs) {
+        sum(c(w) * doilog(x = y, pstr1 = pstr1,
+                          shape = shape, log = TRUE))
+      }
+
+
+      for (jay in 1:NOS) {  # For each response 'y_jay'... do:
+
+
+        try.this <-
+          grid.search2(gpstr1, gshape,
+                       objfun = oilog.Loglikfun,
+                       y = y[, jay],  # x = x[TFvec, , drop = FALSE],
+                       w = w[, jay],
+                       ret.objfun = TRUE)  # Last value is the loglik
+        pstr1.init[, jay] <-  try.this["Value1"]
+        shape.init[, jay] <-  try.this["Value2"]
+      }  # for (jay ...)
+
+      etastart <- cbind(theta2eta(pstr1.init, .lpstr1 , earg = .epstr1 ),
+                        theta2eta(shape.init, .lshape , earg = .eshape ))[,
+                        interleave.VGAM(M, M1 = M1)]
+      mustart <- NULL  # Since etastart has been computed.
+    }  # End of !length(etastart)
+  }), list( .lpstr1 = lpstr1, .lshape = lshape,
+            .epstr1 = epstr1, .eshape = eshape,
+                              .ishape = ishape,
+            .gpstr1 = gpstr1,
+            .gshape  = gshape,
+            .type.fitted = type.fitted ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                     c("mean", "shape", "pobs1", "pstr1", "onempstr1"))[1]
+
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+
+    Meanfun <- function(shape) {
+      aa <- -1 / log1p(-shape)
+      Mean <- aa * shape / (1 - shape)
+      Mean[shape <= 0 | 1 <= shape] <- NaN
+      Mean
+    }
+
+    ans <-
+      switch(type.fitted,
+             "mean"      = pstr1 + (1 - pstr1) * Meanfun(shape),
+             "shape"     = shape,
+             "pobs1" = doizeta(1, shape = shape, pstr1 = pstr1),  # P(Y=1)
+             "pstr1"     =     pstr1,
+             "onempstr1" = 1 - pstr1)
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
+  }, list( .lpstr1 = lpstr1, .lshape = lshape,
+           .epstr1 = epstr1, .eshape = eshape ))),
+  last = eval(substitute(expression({
+    misc$link <-
+      c(rep_len( .lpstr1 , NOS),
+        rep_len( .lshape , NOS))[interleave.VGAM(M, M1 = M1)]
+    temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
+    names(misc$link) <- temp.names
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- temp.names
+    for (ii in 1:ncoly) {
+      misc$earg[[M1*ii-1]] <- .epstr1
+      misc$earg[[M1*ii  ]] <- .eshape
+    }
+  }), list( .lpstr1 = lpstr1, .lshape = lshape,
+            .epstr1 = epstr1, .eshape = eshape ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * doilog(x = y, pstr1 = pstr1, shape = shape,
+                               log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lpstr1 = lpstr1, .lshape = lshape,
+           .epstr1 = epstr1, .eshape = eshape ))),
+  vfamily = c("oilog"),
+
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+    roilog(nsim * length(shape), shape = shape, pstr1 = pstr1)
+  }, list( .lpstr1 = lpstr1, .lshape = lshape,
+           .epstr1 = epstr1, .eshape = eshape ))),
+
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr1 ,
+                       earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lshape  ,
+                       earg = .eshape )
+    okay1 <- all(is.finite(shape )) && all(0 < shape & shape < 1) &&
+             all(is.finite(pstr1)) && all(pstr1 < 1)
+    deflat.limit <- deflat.limit.oizeta(shape)
+    okay2.deflat <- TRUE
+    if (okay1 && !(okay2.deflat <- all(deflat.limit < pstr1)))
+      warning("parameter 'pstr1' is too negative even allowing for ",
+              "1-deflation.")
+    okay1 && okay2.deflat
+  }, list( .lpstr1 = lpstr1, .lshape = lshape,
+           .epstr1 = epstr1, .eshape = eshape ))),
+
+
+
+
+
+
+  deriv = eval(substitute(expression({
+    M1 <- 2
+    NOS <- M / M1
+    pstr1 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr1 ,
+                       earg = .epstr1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lshape ,
+                       earg = .eshape )
+
+    pmf1 <- dlog(1, shape)
+    onempmf1 <- 1 - pmf1  # dozeta(1, shape = shape, pstr1 = pstr1)
+    pobs1 <- pstr1 + (1 - pstr1) * pmf1
+    index1 <- as.matrix(y == 1)
+
+    mraa <- log1p(-shape)
+    aaaa <- -1 / mraa
+
+    dl.dpstr1 <- onempmf1 / pobs1
+    dl.dpstr1[!index1] <- -1 / (1 - pstr1[!index1])
+
+
+
+
+    dpmf1.dshape <- -1 / mraa - shape / ((1 - shape) * mraa^2)
+    d2pmf1.dshape2 <- -2 / ((1 - shape) * mraa^2) -
+                      shape * (2 + mraa) / ((1 - shape)^2 * mraa^3)
+
+    dl.dshape <- (1 - pstr1) * dpmf1.dshape / pobs1  #
+    dl.dshape[!index1] <- y[!index1] / shape[!index1] +
+                         1 / ((1 - shape[!index1]) * mraa[!index1])
+
+    dpstr1.deta <- dtheta.deta(pstr1, .lpstr1 , earg = .epstr1 )
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+
+    myderiv <- c(w) * cbind(dl.dpstr1 * dpstr1.deta,
+                            dl.dshape * dshape.deta)
+    myderiv[, interleave.VGAM(M, M1 = M1)]
+  }), list( .lpstr1 = lpstr1, .lshape = lshape,
+            .epstr1 = epstr1, .eshape = eshape ))),
+  weight = eval(substitute(expression({
+
+    EY.y.gt.1 <- aaaa * shape^2 / ((1 - shape) * (1 - aaaa * shape))
+    LHS <- ((1 - pstr1) / pobs1) * dpmf1.dshape^2 - d2pmf1.dshape2
+    RHS <- EY.y.gt.1 / shape^2 - (1 + mraa) / ((1 - shape) * mraa)^2
+    ned2l.dpstr12 <- onempmf1 / ((1 - pstr1) * pobs1)  #
+    ned2l.dpstr1shape <- dpmf1.dshape / pobs1  #
+    ned2l.dshape2 <- (1 - pstr1) * (LHS + (1 - pmf1) * RHS)
+
+    wz <- array(c(c(w) * ned2l.dpstr12 * dpstr1.deta^2,
+                  c(w) * ned2l.dshape2 * dshape.deta^2,
+                  c(w) * ned2l.dpstr1shape * dpstr1.deta * dshape.deta),
+                dim = c(n, M / M1, 3))
+    wz <- arwz2wz(wz, M = M, M1 = M1)
+    wz
+  }), list( .lshape = lshape, .eshape = eshape ))))
+}  # oilog
+
+
+
+
+
+
+dotlog <- function(x, shape, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  if (log.arg) {
+    ans <- dlog(x, shape, log = log.arg) - log1p(-dlog(1, shape))
+    ans[x == 1] <- log(0)
+  } else {
+    ans <- dlog(x, shape) / (1 - dlog(1, shape))
+    ans[x == 1] <- 0
+  }
+  ans
+}  # dotlog
+
+
+
+potlog  <- function(q, shape, log.p = FALSE) {
+  if (log.p) log(plog(q, shape) - dlog(1, shape)) -
+      log1p(-dlog(1, shape)) else
+    (plog(q, shape) - dlog(1, shape)) / (1 - dlog(1, shape))
+}
+
+
+
+ qotlog <- function(p, shape) {
+
+  ans <- qlog((1 - dlog(1, shape)) * p + dlog(1, shape), shape = shape)
+
+  ans[p == 1] <- Inf
+  ans[p < 0] <- NaN
+  ans[1 < p] <- NaN
+
+  ans[shape < 0 | 1 < shape] <- NaN
+  ans
+}  # qotlog
+
+
+
+rotlog <- function(n, shape) {
+  qotlog(runif(n), shape)
+}
+
+
+
+ otlog <- function(lshape = "logit", gshape = ppoints(8), zero = NULL) {
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+
+  new("vglmff",
+  blurb = c("One-truncated logarithmic distribution ",
+            "f(y) = shape^y / ((-shape - log1p(-shape)) * y), ",
+             "y = 2, 3,...,\n",
+             "            0 < shape < 1,\n\n",
+             "Link:    ", namesof("shape", lshape, earg = eshape)),
+  constraints = eval(substitute(expression({
+    dotzero <- .zero
+    M1 <- 1
+    eval(negzero.expression.VGAM)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         parameters.names = "shape",
+         zero = .zero )
+  }, list( .zero = zero ))),
+
+
+  initialize = eval(substitute(expression({
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              Is.integer.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+    if (any(y <= 1))
+      stop("cannot have any 1s in the response")
+
+
+    ncoly <- ncol(y)
+    M1 <- 1
+    extra$ncoly <- ncoly
+    M <- M1 * ncoly
+
+
+    mynames1  <- param.names("shape", ncoly)
+    predictors.names <- namesof(mynames1, .lshape , earg = .eshape ,
+                                tag = FALSE)
+
+
+    if (!length(etastart)) {
+      dotlog.Loglikfun <- function(shapeval, y, x, w, extraargs) {
+        sum(c(w) * dotlog(x = y, shape = shapeval, log = TRUE))
+      }
+      Init.shape <- matrix(0, n, M)
+      shape.grid <- .gshape
+
+      for (ilocal in 1:ncoly) {
+        Init.shape[, ilocal] <- grid.search(shape.grid,
+                                            objfun = dotlog.Loglikfun,
+                                            y = y[, ilocal],  # x = x,
+                                            w = w[, ilocal])
+      }  # for
+      etastart <- theta2eta(Init.shape, .lshape , earg = .eshape )
+    }
+  }), list( .lshape = lshape, .eshape = eshape, .gshape = gshape ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    aa <- -1 / log1p(-shape)
+    ((aa * shape / (1 - shape)) - dlog(1, shape)) / (1 - dlog(1, shape))
+  }, list( .lshape = lshape, .eshape = eshape ))),
+
+  last = eval(substitute(expression({
+    misc$link <- c(rep_len( .lshape , ncoly))
+    names(misc$link) <- mynames1
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- mynames1
+    for (ii in 1:ncoly) {
+      misc$earg[[ii]] <- .eshape
+    }
+  }), list( .lshape = lshape, .eshape = eshape ))),
+
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dotlog(x = y, shape = shape, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lshape = lshape, .eshape = eshape ))),
+  vfamily = c("otlog"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape & shape < 1)
+    okay1
+  }, list( .lshape = lshape, .eshape = eshape ))),
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    rotlog(nsim * length(shape), shape = shape)
+  }, list( .lshape = lshape, .eshape = eshape ))),
+
+
+
+  deriv = eval(substitute(expression({
+    M1 <- 1
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    aa <- -1 / log1p(-shape)
+    dl.dshape <- y / shape +
+                shape / ((1 - shape) * (shape + log1p(-shape)))
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+    c(w) * dl.dshape * dshape.deta
+  }), list( .lshape = lshape, .eshape = eshape ))),
+  weight = eval(substitute(expression({
+    EY.logff <-  aa * shape / (1 - shape)
+
+    d3 <- deriv3( ~ shape / ((1 - shape) * (shape + log(1 - shape))),
+                  c("shape"), hessian = FALSE)
+    eval.d3 <- eval(d3)
+    d2pmf1.dshape2 <- c(attr(eval.d3, "gradient"))
+
+    ned2l.dshape2 <-
+      (EY.logff - dlog(1, shape)) / ((1 - dlog(1, shape)) * shape^2) -
+      d2pmf1.dshape2
+    wz <- c(w) * ned2l.dshape2 * dshape.deta^2
+    wz
+  }), list( .lshape = lshape, .eshape = eshape ))))
+}  # otlog
+
+
+
+
+
+
+dotpospois <- function(x, lambda, log = FALSE) {
+  if (!is.logical(larg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  if (larg) {
+    ans <- dpospois(x, lambda, log = larg) - log1p(-dpospois(1, lambda))
+    ans[x == 1] <- log(0)
+  } else {
+    ans <- dpospois(x, lambda) / (1 - dpospois(1, lambda))
+    ans[x == 1] <- 0
+  }
+  ans
+}  # dotpospois
+
+
+
+potpospois  <- function(q, lambda, log.p = FALSE) {
+  if (log.p) log(ppospois(q, lambda) - dpospois(1, lambda)) -
+      log1p(-dpospois(1, lambda)) else
+    (ppospois(q, lambda) - dpospois(1, lambda)) / (1-dpospois(1, lambda))
+}
+
+
+
+ qotpospois <- function(p, lambda) {
+  ans <- qpospois((1 - dpospois(1, lambda)) * p +
+                  dpospois(1, lambda), lambda = lambda)
+
+  ans[p == 1 & 0 < lambda] <- Inf
+  ans[p < 0] <- NaN
+  ans[1 < p] <- NaN
+
+  ans[lambda < 0] <- NaN
+  ans
+}  # qotpospois
+
+
+
+rotpospois <- function(n, lambda) {
+  qotpospois(runif(n), lambda)
+}
+
+
+
+
+ otpospoisson <-
+    function(llambda = "loge",
+             type.fitted = c("mean", "lambda", "prob0", "prob1"),
+             ilambda = NULL, imethod = 1, zero = NULL) {
+
+  llambda <- as.list(substitute(llambda))
+  elambda <- link2list(llambda)
+  llambda <- attr(elambda, "function.name")
+
+
+  if (length( ilambda) && !is.Numeric(ilambda, positive = TRUE))
+    stop("bad input for argument 'ilambda'")
+
+  type.fitted <- match.arg(type.fitted,
+                           c("mean", "lambda", "prob0", "prob1"))[1]
+
+
+  new("vglmff",
+  blurb = c("One-truncated Positive-Poisson distribution\n\n",
+            "Links:    ",
+            namesof("lambda", llambda, earg = elambda, tag = FALSE)),
+  constraints = eval(substitute(expression({
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+                                predictors.names = predictors.names,
+                                M1 = 1)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         parameters.names = c("lambda"),
+         type.fitted  = .type.fitted ,
+         llambda = .llambda ,
+         elambda = .elambda )
+  }, list( .llambda = llambda, .elambda = elambda,
+           .type.fitted = type.fitted ))),
+
+  initialize = eval(substitute(expression({
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              Is.integer.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+    if (any(y < 2))
+      stop("response values must be 2 or more")
+
+    ncoly <- ncol(y)
+    M1 <- 1
+    extra$ncoly <- ncoly
+    M <- M1 * ncoly
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+
+
+    mynames1 <- param.names("lambda", ncoly)
+    predictors.names <- namesof(mynames1, .llambda , earg = .elambda ,
+                                tag = FALSE)
+
+    if (!length(etastart)) {
+      lambda.init <- Init.mu(y = y, w = w, imethod = .imethod ,
+                             imu = .ilambda )
+
+      etastart <- theta2eta(lambda.init, .llambda , earg = .elambda)
+    }
+  }), list( .llambda = llambda, .elambda = elambda,
+            .ilambda = ilambda, .imethod = imethod,
+            .type.fitted = type.fitted ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- NCOL(eta) / c(M1 = 1)
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                     c("mean", "lambda", "prob0", "prob1"))[1]
+
+    lambda <- eta2theta(eta, .llambda , earg = .elambda )
+    ans <- switch(type.fitted,
+                  "mean"      = lambda / ppois(1, lambda, lower = FALSE),
+                  "lambda"    = lambda,
+                  "prob0"     = ppois(0, lambda),  # P(Y=0) as it were
+                  "prob1"     = ppois(1, lambda))  # P(Y=1) as it were
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
+  }, list( .llambda = llambda, .elambda = elambda ))),
+  last = eval(substitute(expression({
+    misc$link <- rep_len( .llambda , M)
+    names(misc$link) <- mynames1
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- mynames1
+    for (ii in 1:M)
+      misc$earg[[ii]] <- .elambda
+  }), list( .llambda = llambda, .elambda = elambda ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    lambda <- eta2theta(eta, .llambda , earg = .elambda )
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dotpospois(x = y, lambda = lambda, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .llambda = llambda, .elambda = elambda ))),
+  vfamily = c("otpospoisson"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    lambda <- eta2theta(eta, .llambda , earg = .elambda )
+    okay1 <- all(is.finite(lambda)) && all(0 < lambda)
+    okay1
+  }, list( .llambda = llambda, .elambda = elambda ))),
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    lambda <- eta2theta(eta, .llambda , earg = .elambda )
+    rotpospois(nsim * length(lambda), lambda)
+  }, list( .llambda = llambda, .elambda = elambda ))),
+
+
+
+
+  deriv = eval(substitute(expression({
+    M1 <- 1
+    lambda <- eta2theta(eta, .llambda , earg = .elambda )
+
+    EY.cond <- 1 / ppois(1, lambda, lower.tail = FALSE)
+    temp1 <- expm1(lambda)
+    temp0 <- lambda * exp(-lambda)
+    prob.geq.2 <- -expm1(-lambda) - temp0
+    dl.dlambda <- y / lambda - 1 - temp0 / prob.geq.2
+
+    dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
+
+    c(w) * dl.dlambda * dlambda.deta
+  }), list( .llambda = llambda, .elambda = elambda ))),
+  weight = eval(substitute(expression({
+    ned2l.dlambda2 <- EY.cond / lambda +
+        ((1 - lambda) * exp(-lambda) - temp0^2 / prob.geq.2) / prob.geq.2
+    wz <-  ned2l.dlambda2 * dlambda.deta^2
+    c(w) * wz
+  }), list( .llambda = llambda, .elambda = elambda ))))
+}  # otpospoisson
+
+
+
+
+
+
+doalog <- function(x, shape, pobs1 = 0, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  LLL <- max(length(x), length(shape), length(pobs1))
+  if (length(x)      != LLL) x      <- rep_len(x,      LLL)
+  if (length(shape)  != LLL) shape  <- rep_len(shape,  LLL)
+  if (length(pobs1)  != LLL) pobs1  <- rep_len(pobs1,  LLL)
+  ans <- rep_len(0.0, LLL)
+
+
+  index1 <- (x == 1)
+
+  if (log.arg) {
+    ans[ index1] <- log(pobs1[index1])
+    ans[!index1] <- log1p(-pobs1[!index1]) +
+                    dotlog(x[!index1], shape[!index1], log = TRUE)
+  } else {
+    ans[ index1] <- pobs1[index1]
+    ans[!index1] <- (1 - pobs1[!index1]) *
+                    dotlog(x[!index1], shape[!index1])
+  }
+  ans[pobs1 < 0 | 1 < pobs1] <- NaN
+  ans
+}
+
+
+
+poalog <- function(q, shape, pobs1 = 0) {
+  LLL <- max(length(q), length(shape), length(pobs1))
+  if (length(q)      != LLL) q      <- rep_len(q,      LLL)
+  if (length(shape)  != LLL) shape  <- rep_len(shape,  LLL)
+  if (length(pobs1)  != LLL) pobs1  <- rep_len(pobs1,  LLL)
+  ans <- rep_len(0.0, LLL)
+
+  ans[q >  1] <-    pobs1[q > 1] +
+                 (1-pobs1[q > 1]) * potlog(q[q > 1], shape[q > 1])
+  ans[q <  1] <- 0
+  ans[q == 1] <- pobs1[q == 1]
+
+  ans <- pmax(0, ans)
+  ans <- pmin(1, ans)
+  ans[pobs1 < 0 | 1 < pobs1] <- NaN
+
+  ans
+}
+
+
+
+qoalog <- function(p, shape, pobs1 = 0) {
+  LLL <- max(length(p), length(shape), length(pobs1))
+  if (length(p)      != LLL) p      <- rep_len(p,      LLL)
+  if (length(shape)  != LLL) shape  <- rep_len(shape,  LLL)
+  if (length(pobs1)  != LLL) pobs1  <- rep_len(pobs1,  LLL)
+
+  ans <- rep_len(NaN, LLL)
+  ind4 <- pobs1 < p
+  ans[!ind4] <- 1
+  ans[ ind4] <- qotlog((p[ind4] - pobs1[ind4]) / (1 - pobs1[ind4]),
+                       shape = shape[ind4])
+  ans[pobs1 < 0 | 1 < pobs1] <- NaN
+  ans[p < 0 | 1 < p] <- NaN
+  ans
+}
+
+
+
+roalog <- function(n, shape, pobs1 = 0) {
+  qoalog(runif(n), shape = shape, pobs1 = pobs1)
+}
+
+
+
+
+
+
+ oalog <-
+  function(lpobs1 = "logit",
+           lshape = "logit",
+           type.fitted = c("mean", "shape", "pobs1", "onempobs1"),
+           ipobs1 = NULL,
+           gshape = ppoints(8),
+           zero = NULL) {
+
+
+  lpobs1 <- as.list(substitute(lpobs1))
+  epobs1 <- link2list(lpobs1)
+  lpobs1 <- attr(epobs1, "function.name")
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+ type.fitted <- match.arg(type.fitted,
+                           c("mean", "shape", "pobs1", "onempobs1"))[1]
+
+
+  new("vglmff",
+  blurb = c("One-altered logarithmic distribution \n",
+            "(Bernoulli and 1-truncated logarithmic distribution model)",
+            "\n\n",
+            "Links:    ",
+            namesof("pobs1",  lpobs1, earg = epobs1, tag = FALSE), ", ",
+            namesof("shape",  lshape, earg = eshape, tag = FALSE)),
+  constraints = eval(substitute(expression({
+    dotzero <- .zero
+    M1 <- 2
+    eval(negzero.expression.VGAM)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         parameters.names = c("pobs1", "shape"),
+         zero = .zero )
+  }, list( .zero = zero ))),
+
+
+  initialize = eval(substitute(expression({
+    M1 <- 2
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              Is.integer.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+    extra$y1 <- y1 <- ifelse(y == 1, 1, 0)
+    extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
+    extra$skip.these <- skip.these <- matrix(as.logical(y1), n, NOS)
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+
+    mynames1 <- param.names("pobs1", ncoly)
+    mynames2 <- param.names("shape", ncoly)
+    predictors.names <-
+        c(namesof(mynames1, .lpobs1 , earg = .epobs1 , tag = FALSE),
+          namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))[
+          interleave.VGAM(M1*NOS, M1 = M1)]
+
+    ncoly <- ncol(y)
+    extra$ncoly <- ncoly
+    M <- M1 * ncoly
+
+
+    if (!length(etastart)) {
+      dotlog.Loglikfun <- function(shapeval, y, x, w, extraargs) {
+        sum(c(w) * dotlog(x = y, shape = shapeval, log = TRUE))
+      }
+      Init.shape <- matrix(0, n, ncoly)
+      shape.grid <- .gshape
+
+      for (jlocal in 1:ncoly) {
+        index1 <- y[, jlocal] > 1
+        Init.shape[, jlocal] <-
+          grid.search(shape.grid,
+                      objfun = dotlog.Loglikfun,
+                      y = y[index1, jlocal],  # x = x,
+                      w = w[index1, jlocal])
+      }  # for
+      etastart <-
+        cbind(theta2eta(if (length( .ipobs1 )) .ipobs1 else
+                        (0.5 + w * y1) / (1 + w),
+                        .lpobs1 , earg = .epobs1 ),
+              theta2eta(Init.shape, .lshape , earg = .eshape ))
+      etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
+    }
+  }), list( .lshape = lshape, .eshape = eshape, .gshape = gshape,
+            .lpobs1 = lpobs1, .epobs1 = epobs1,
+            .ipobs1 = ipobs1,  # .ishape = ishape,
+            .type.fitted = type.fitted
+           ))),
+
+
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                     c("mean", "shape", "pobs1", "onempobs1"))[1]
+
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+
+    pobs1 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+                             .lpobs1 , earg = .epobs1 ))
+    shape <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
+                             .lshape , earg = .eshape ))
+
+    aa <- -1 / log1p(-shape)
+    otlog.mean <- ((aa * shape / (1 - shape)) -
+                   dlog(1, shape)) / (1 - dlog(1, shape))
+
+    ans <- switch(type.fitted,
+                  "mean"      = pobs1 + (1 - pobs1) * otlog.mean,
+                  "shape"     = shape,
+                  "pobs1"     =      pobs1,  # P(Y=1)
+                  "onempobs1" =  1 - pobs1)  # P(Y>1)
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
+  }, list( .lpobs1 = lpobs1, .lshape = lshape,
+           .epobs1 = epobs1, .eshape = eshape ))),
+  last = eval(substitute(expression({
+    temp.names <- c(rep_len( .lpobs1 , NOS),
+                    rep_len( .lshape , NOS))
+    temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
+    misc$link  <- temp.names
+    names(misc$link) <-
+      c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
+
+    misc$earg <- vector("list", M1 * NOS)
+    names(misc$earg) <- names(misc$link)
+    for (ii in 1:NOS) {
+      misc$earg[[M1*ii-1]] <- .epobs1
+      misc$earg[[M1*ii  ]] <- .eshape
+    }
+  }), list( .lpobs1 = lpobs1, .lshape = lshape,
+            .epobs1 = epobs1, .eshape = eshape ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+
+    pobs1 <- cbind(eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                             .lpobs1, earg = .epobs1 ))
+    shape <- cbind(eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                             .lshape, earg = .eshape ))
+
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * doalog(x = y, pobs1 = pobs1, shape = shape,
+                               log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lpobs1 = lpobs1, .lshape = lshape,
+           .epobs1 = epobs1, .eshape = eshape ))),
+  vfamily = c("oalog"),
+
+
+
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    TFvec <- c(TRUE, FALSE)
+    pobs1 <- eta2theta(eta[,  TFvec, drop = FALSE],
+                       .lpobs1 , earg = .epobs1 )
+    shape <- eta2theta(eta[, !TFvec, drop = FALSE],
+                       .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape & shape < 1) &&
+             all(is.finite(pobs1)) && all(0 < pobs1 & pobs1 < 1)
+    okay1
+  }, list( .lpobs1 = lpobs1, .lshape = lshape,
+           .epobs1 = epobs1, .eshape = eshape ))),
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    pobs1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpobs1 , earg = .epobs1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+    roalog(nsim * length(shape), shape = shape, pobs1 = pobs1)
+  }, list( .lpobs1 = lpobs1, .lshape = lshape,
+           .epobs1 = epobs1, .eshape = eshape ))),
+
+
+
+  deriv = eval(substitute(expression({
+    M1 <- 2
+    NOS <- ncol(eta) / M1  # extra$NOS
+    y1 <- extra$y1
+    skip <- extra$skip.these
+
+    TFvec <- c(TRUE, FALSE)
+    pobs1 <- eta2theta(eta[,  TFvec, drop = FALSE],
+                       .lpobs1 , earg = .epobs1 )
+    shape <- eta2theta(eta[, !TFvec, drop = FALSE],
+                       .lshape , earg = .eshape )
+
+    aa <- -1 / log1p(-shape)
+    dl.dshape <- y / shape +
+                 shape / ((1 - shape) * (shape + log1p(-shape)))
+
+    dl.dpobs1 <- -1 / (1 - pobs1)  # For y > 1 obsns
+
+    for (spp. in 1:NOS) {
+      dl.dpobs1[skip[, spp.], spp.] <- 1 / pobs1[skip[, spp.], spp.]
+      dl.dshape[skip[, spp.], spp.] <- 0
+    }
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+    mu.phi1 <- pobs1
+
+    temp3 <- if ( .lpobs1 == "logit") {
+      c(w) * (y1 - mu.phi1)
+    } else {
+      c(w) * dtheta.deta(mu.phi1, link = .lpobs1 , earg = .epobs1 ) *
+             dl.dpobs1
+    }
+
+    ans <- cbind(temp3,
+                 c(w) * dl.dshape * dshape.deta)
+    ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
+    ans
+  }), list( .lpobs1 = lpobs1, .lshape = lshape,
+            .epobs1 = epobs1, .eshape = eshape ))),
+  weight = eval(substitute(expression({
+    wz <- matrix(0, n, M1 * NOS)  # EIM is diagonal
+
+
+    EY.logff <-  aa * shape / (1 - shape)
+    d3 <- deriv3( ~ shape / ((1 - shape) * (shape + log(1 - shape))),
+                  c("shape"), hessian = FALSE)
+    eval.d3 <- eval(d3)
+    d2pmf1.dshape2 <- c(attr(eval.d3, "gradient"))
+
+    ned2l.dshape2 <-
+      (EY.logff - dlog(1, shape)) / ((1 - dlog(1, shape)) * shape^2) -
+      d2pmf1.dshape2
+
+
+
+    ned2l.dshape2 <- (1-pobs1) * ned2l.dshape2  #+stop("another quantity")
+    wz[, NOS+(1:NOS)] <- c(w) * ned2l.dshape2 * dshape.deta^2
+
+
+    tmp100 <- mu.phi1 * (1 - mu.phi1)
+    tmp200 <- if ( .lpobs1 == "logit" && is.empty.list( .epobs1 )) {
+        cbind(c(w) * tmp100)
+    } else {
+      cbind(c(w) * (1 / tmp100) *
+            dtheta.deta(mu.phi1, link = .lpobs1 , earg = .epobs1 )^2)
+    }
+    wz[, 1:NOS] <-  tmp200
+
+    wz <- wz[, interleave.VGAM(ncol(wz), M1 = M1)]
+
+    wz
+  }), list( .lpobs1 = lpobs1,
+            .epobs1 = epobs1 ))))
+}  # End of oalog
+
+
+
+
+
+
+
+
+
+doapospois <- function(x, lambda, pobs1 = 0, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  LLL <- max(length(x), length(lambda), length(pobs1))
+  if (length(x)      != LLL) x      <- rep_len(x,      LLL)
+  if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+  if (length(pobs1)  != LLL) pobs1  <- rep_len(pobs1,  LLL)
+  ans <- rep_len(0.0, LLL)
+
+
+  index1 <- (x == 1)
+
+  if (log.arg) {
+    ans[ index1] <- log(pobs1[index1])
+    ans[!index1] <- log1p(-pobs1[!index1]) +
+                    dotpospois(x[!index1], lambda[!index1], log = TRUE)
+  } else {
+    ans[ index1] <- pobs1[index1]
+    ans[!index1] <- (1 - pobs1[!index1]) *
+                    dotpospois(x[!index1], lambda[!index1])
+  }
+  ans[pobs1 < 0 | 1 < pobs1] <- NaN
+  ans[lambda < 0] <- NaN
+  ans
+}
+
+
+
+poapospois <- function(q, lambda, pobs1 = 0) {
+  LLL <- max(length(q), length(lambda), length(pobs1))
+  if (length(q)      != LLL) q      <- rep_len(q,      LLL)
+  if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+  if (length(pobs1)  != LLL) pobs1  <- rep_len(pobs1,  LLL)
+  ans <- rep_len(0.0, LLL)
+
+  ans[q >  1] <-    pobs1[q > 1] +
+                 (1-pobs1[q > 1]) * potpospois(q[q > 1], lambda[q > 1])
+  ans[q <  1] <- 0
+  ans[q == 1] <- pobs1[q == 1]
+
+  ans <- pmax(0, ans)
+  ans <- pmin(1, ans)
+  ans[pobs1 < 0 | 1 < pobs1] <- NaN
+  ans[lambda < 0] <- NaN
+
+  ans
+}
+
+
+
+qoapospois <- function(p, lambda, pobs1 = 0) {
+  LLL <- max(length(p), length(lambda), length(pobs1))
+  if (length(p)      != LLL) p      <- rep_len(p,      LLL)
+  if (length(lambda) != LLL) lambda <- rep_len(lambda, LLL)
+  if (length(pobs1)  != LLL) pobs1  <- rep_len(pobs1,  LLL)
+
+  ans <- rep_len(NaN, LLL)
+  ind4 <- pobs1 < p
+  ans[!ind4] <- 1
+  ans[ ind4] <- qotpospois((p[ind4] - pobs1[ind4]) / (1 - pobs1[ind4]),
+                           lambda = lambda[ind4])
+  ans[pobs1 < 0 | 1 < pobs1] <- NaN
+  ans[p < 0 | 1 < p] <- NaN
+  ans[lambda < 0] <- NaN
+  ans
+}
+
+
+
+roapospois <- function(n, lambda, pobs1 = 0) {
+  qoapospois(runif(n), lambda = lambda, pobs1 = pobs1)
+}
+
+
+
+
+
+
+ oapospoisson <-
+  function(lpobs1 = "logit",
+           llambda = "loge",
+           type.fitted = c("mean", "lambda", "pobs1", "onempobs1"),
+           ipobs1 = NULL,
+           zero = NULL) {
+
+
+  lpobs1 <- as.list(substitute(lpobs1))
+  epobs1 <- link2list(lpobs1)
+  lpobs1 <- attr(epobs1, "function.name")
+
+  llambd <- as.list(substitute(llambda))
+  elambd <- link2list(llambd)
+  llambd <- attr(elambd, "function.name")
+
+
+ type.fitted <- match.arg(type.fitted,
+                           c("mean", "lambda", "pobs1", "onempobs1"))[1]
+
+
+  new("vglmff",
+  blurb = c("One-altered positive-Poisson distribution \n",
+            "(Bernoulli and 1-truncated positive-Poisson ",
+            "distribution model)\n\n",
+            "Links:    ",
+            namesof("pobs1",  lpobs1, earg = epobs1, tag = FALSE), ", ",
+            namesof("lambda", llambd, earg = elambd, tag = FALSE)),
+  constraints = eval(substitute(expression({
+    dotzero <- .zero
+    M1 <- 2
+    eval(negzero.expression.VGAM)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         parameters.names = c("pobs1", "lambda"),
+         zero = .zero )
+  }, list( .zero = zero ))),
+
+
+  initialize = eval(substitute(expression({
+    M1 <- 2
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              Is.integer.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+    extra$y1 <- y1 <- ifelse(y == 1, 1, 0)
+    extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
+    extra$skip.these <- skip.these <- matrix(as.logical(y1), n, NOS)
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+
+    mynames1 <- param.names("pobs1",  ncoly)
+    mynames2 <- param.names("lambda", ncoly)
+    predictors.names <-
+        c(namesof(mynames1, .lpobs1 , earg = .epobs1 , tag = FALSE),
+          namesof(mynames2, .llambd , earg = .elambd , tag = FALSE))[
+          interleave.VGAM(M1*NOS, M1 = M1)]
+
+    ncoly <- ncol(y)
+    extra$ncoly <- ncoly
+    M <- M1 * ncoly
+
+
+    if (!length(etastart)) {
+      Init.lambda <- y - 0.25
+      etastart <-
+        cbind(theta2eta(if (length( .ipobs1 )) .ipobs1 else
+                        (0.5 + w * y1) / (1 + w),
+                        .lpobs1 , earg = .epobs1 ),
+              theta2eta(Init.lambda, .llambd , earg = .elambd ))
+      etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
+    }
+  }), list( .llambd = llambd, .elambd = elambd,
+            .lpobs1 = lpobs1, .epobs1 = epobs1,
+            .ipobs1 = ipobs1,  # .ilambd = ilambd,
+            .type.fitted = type.fitted
+           ))),
+
+
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                     c("mean", "lambda", "pobs1", "onempobs1"))[1]
+
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+
+    pobs1 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+                             .lpobs1 , earg = .epobs1 ))
+    lambd <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
+                             .llambd , earg = .elambd ))
+
+
+    ans <- switch(type.fitted,
+                  "mean"      = pobs1 + (1 - pobs1) *
+                                lambd / ppois(1, lambd, lower = FALSE),
+                  "lambda"      = lambd,
+                  "pobs1"     =      pobs1,  # P(Y=1)
+                  "onempobs1" =  1 - pobs1)  # P(Y>1)
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
+  }, list( .lpobs1 = lpobs1, .llambd = llambd,
+           .epobs1 = epobs1, .elambd = elambd ))),
+  last = eval(substitute(expression({
+    temp.names <- c(rep_len( .lpobs1 , NOS),
+                    rep_len( .llambd , NOS))
+    temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
+    misc$link  <- temp.names
+    names(misc$link) <-
+      c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
+
+    misc$earg <- vector("list", M1 * NOS)
+    names(misc$earg) <- names(misc$link)
+    for (ii in 1:NOS) {
+      misc$earg[[M1*ii-1]] <- .epobs1
+      misc$earg[[M1*ii  ]] <- .elambd
+    }
+  }), list( .lpobs1 = lpobs1, .llambd = llambd,
+            .epobs1 = epobs1, .elambd = elambd ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    pobs1  <- cbind(eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                              .lpobs1, earg = .epobs1))
+    lambd <- cbind(eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                              .llambd, earg = .elambd ))
+
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * doapospois(x = y, pobs1 = pobs1, lambda = lambd,
+                                   log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lpobs1 = lpobs1, .llambd = llambd,
+           .epobs1 = epobs1, .elambd = elambd ))),
+  vfamily = c("oapospoisson"),
+
+
+
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    TFvec <- c(TRUE, FALSE)
+    pobs1 <- eta2theta(eta[,  TFvec, drop = FALSE],
+                       .lpobs1 , earg = .epobs1 )
+    lambd <- eta2theta(eta[, !TFvec, drop = FALSE],
+                       .llambd , earg = .elambd )
+    okay1 <- all(is.finite(lambd)) && all(0 < lambd) &&
+             all(is.finite(pobs1)) && all(0 < pobs1 & pobs1 < 1)
+    okay1
+  }, list( .lpobs1 = lpobs1, .llambd = llambd,
+           .epobs1 = epobs1, .elambd = elambd ))),
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    pobs1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpobs1 , earg = .epobs1 )
+    lambd <- eta2theta(eta[, c(FALSE, TRUE)], .llambd , earg = .elambd )
+    roapospois(nsim * length(lambd), lambd = lambd, pobs1 = pobs1)
+  }, list( .lpobs1 = lpobs1, .llambd = llambd,
+           .epobs1 = epobs1, .elambd = elambd ))),
+
+
+
+  deriv = eval(substitute(expression({
+    M1 <- 2
+    NOS <- ncol(eta) / M1  # extra$NOS
+    y1 <- extra$y1
+    skip <- extra$skip.these
+
+    TFvec <- c(TRUE, FALSE)
+    pobs1  <- eta2theta(eta[,  TFvec, drop = FALSE],
+                        .lpobs1 , earg = .epobs1 )
+    lambda <- eta2theta(eta[, !TFvec, drop = FALSE],
+                        .llambd , earg = .elambd )
+
+    EY.cond <- 1 / ppois(1, lambda, lower.tail = FALSE)
+    temp1 <- expm1(lambda)
+    temp0 <- lambda * exp(-lambda)
+    shape.geq.2 <- -expm1(-lambda) - temp0
+    dl.dlambd <- y / lambda - 1 - temp0 / shape.geq.2
+
+
+    dl.dpobs1 <- -1 / (1 - pobs1)  # For y > 1 obsns
+
+    for (spp. in 1:NOS) {
+      dl.dpobs1[skip[, spp.], spp.] <- 1 / pobs1[skip[, spp.], spp.]
+      dl.dlambd[skip[, spp.], spp.] <- 0
+    }
+    dlambd.deta <- dtheta.deta(lambda, .llambd , earg = .elambd )
+    mu.phi1 <- pobs1
+
+    temp3 <- if ( .lpobs1 == "logit") {
+      c(w) * (y1 - mu.phi1)
+    } else {
+      c(w) * dtheta.deta(mu.phi1, link = .lpobs1 , earg = .epobs1 ) *
+             dl.dpobs1
+    }
+
+    ans <- cbind(temp3,
+                 c(w) * dl.dlambd * dlambd.deta)
+    ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
+    ans
+  }), list( .lpobs1 = lpobs1, .llambd = llambd,
+            .epobs1 = epobs1, .elambd = elambd ))),
+  weight = eval(substitute(expression({
+    wz <- matrix(0, n, M1 * NOS)  # EIM is diagonal
+
+    ned2l.dlambd2 <- EY.cond / lambda +
+        ((1 - lambda) *
+         exp(-lambda) - temp0^2 / shape.geq.2) / shape.geq.2
+
+    ned2l.dlambd2 <- (1 - pobs1) * ned2l.dlambd2
+    wz[, NOS+(1:NOS)] <- c(w) * ned2l.dlambd2 * dlambd.deta^2
+
+
+    tmp100 <- mu.phi1 * (1 - mu.phi1)
+    tmp200 <- if ( .lpobs1 == "logit" && is.empty.list( .epobs1 )) {
+        cbind(c(w) * tmp100)
+    } else {
+      cbind(c(w) * (1 / tmp100) *
+            dtheta.deta(mu.phi1, link = .lpobs1 , earg = .epobs1 )^2)
+    }
+    wz[, 1:NOS] <-  tmp200
+
+    wz <- wz[, interleave.VGAM(ncol(wz), M1 = M1)]
+
+    wz
+  }), list( .lpobs1 = lpobs1,
+            .epobs1 = epobs1 ))))
+}  # End of oapospoisson
+
+
+
+
+
+
+
+
+doazeta <- function(x, shape, pobs1 = 0, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  LLL <- max(length(x), length(shape), length(pobs1))
+  if (length(x)     != LLL) x     <- rep_len(x,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(pobs1) != LLL) pobs1 <- rep_len(pobs1, LLL)
+  ans <- rep_len(0.0, LLL)
+
+
+  index1 <- (x == 1)
+
+  if (log.arg) {
+    ans[ index1] <- log(pobs1[index1])
+    ans[!index1] <- log1p(-pobs1[!index1]) +
+                    dotzeta(x[!index1], shape[!index1], log = TRUE)
+  } else {
+    ans[ index1] <- pobs1[index1]
+    ans[!index1] <- (1 - pobs1[!index1]) *
+                    dotzeta(x[!index1], shape[!index1])
+  }
+  ans[pobs1 < 0 | 1 < pobs1] <- NaN
+  ans[shape <= 0] <- NaN
+  ans
+}
+
+
+
+poazeta <- function(q, shape, pobs1 = 0) {
+  LLL <- max(length(q), length(shape), length(pobs1))
+  if (length(q)     != LLL) q     <- rep_len(q,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(pobs1) != LLL) pobs1 <- rep_len(pobs1, LLL)
+  ans <- rep_len(0.0, LLL)
+
+  ans[q >  1] <-    pobs1[q > 1] +
+                 (1-pobs1[q > 1]) * potzeta(q[q > 1], shape[q > 1])
+  ans[q <  1] <- 0
+  ans[q == 1] <- pobs1[q == 1]
+
+  ans <- pmax(0, ans)
+  ans <- pmin(1, ans)
+  ans[pobs1 < 0 | 1 < pobs1] <- NaN
+  ans[shape <= 0] <- NaN
+
+  ans
+}
+
+
+
+qoazeta <- function(p, shape, pobs1 = 0) {
+  LLL <- max(length(p), length(shape), length(pobs1))
+  if (length(p)     != LLL) p     <- rep_len(p,     LLL)
+  if (length(shape) != LLL) shape <- rep_len(shape, LLL)
+  if (length(pobs1) != LLL) pobs1 <- rep_len(pobs1, LLL)
+
+  ans <- rep_len(NaN, LLL)
+  ind4 <- pobs1 < p
+  ans[!ind4] <- 1
+  ans[ ind4] <- qotzeta((p[ind4] - pobs1[ind4]) / (1 - pobs1[ind4]),
+                        shape = shape[ind4])
+  ans[pobs1 < 0 | 1 < pobs1] <- NaN
+  ans[p < 0 | 1 < p] <- NaN
+  ans[shape <= 0] <- NaN
+  ans
+}
+
+
+
+roazeta <- function(n, shape, pobs1 = 0) {
+  qoazeta(runif(n), shape = shape, pobs1 = pobs1)
+}
+
+
+
+
+
+
+ oazeta <-
+  function(lpobs1 = "logit",
+           lshape = "loge",
+           type.fitted = c("mean", "shape", "pobs1", "onempobs1"),
+           gshape = exp((-4:3)/4),
+           ishape = NULL,
+           ipobs1 = NULL,
+           zero = NULL) {
+
+
+  lpobs1 <- as.list(substitute(lpobs1))
+  epobs1 <- link2list(lpobs1)
+  lpobs1 <- attr(epobs1, "function.name")
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+
+ type.fitted <- match.arg(type.fitted,
+                          c("mean", "shape", "pobs1", "onempobs1"))[1]
+
+
+  new("vglmff",
+  blurb = c("One-altered zeta distribution \n",
+            "(Bernoulli and 1-truncated zeta distribution model)\n\n",
+            "Links:    ",
+            namesof("pobs1", lpobs1, earg = epobs1, tag = FALSE), ", ",
+            namesof("shape", lshape, earg = eshape, tag = FALSE)),
+  constraints = eval(substitute(expression({
+    dotzero <- .zero
+    M1 <- 2
+    eval(negzero.expression.VGAM)
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         parameters.names = c("pobs1", "shape"),
+         zero = .zero )
+  }, list( .zero = zero ))),
+
+
+  initialize = eval(substitute(expression({
+    M1 <- 2
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              Is.integer.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+    extra$y1 <- y1 <- ifelse(y == 1, 1, 0)
+    extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
+    extra$skip.these <- skip.these <- matrix(as.logical(y1), n, NOS)
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+
+    mynames1 <- param.names("pobs1", ncoly)
+    mynames2 <- param.names("shape", ncoly)
+    predictors.names <-
+        c(namesof(mynames1, .lpobs1 , earg = .epobs1 , tag = FALSE),
+          namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))[
+          interleave.VGAM(M1*NOS, M1 = M1)]
+
+    ncoly <- ncol(y)
+    extra$ncoly <- ncoly
+    M <- M1 * ncoly
+
+
+    if (!length(etastart)) {
+      otzetaff.Loglikfun <- function(shape, y, x, w, extraargs) {
+        sum(c(w) * dotzeta(x = y, shape, log = TRUE))
+      }
+
+      gshape <- .gshape
+      if (!length( .ishape )) {
+        shape.init <- matrix(NA_real_, n, M/M1, byrow = TRUE)
+        for (jay in 1:ncoly) {
+          index1 <- y[, jay] > 1
+          shape.init[, jay] <-
+            grid.search(gshape, objfun = otzetaff.Loglikfun,  # x = x,
+                        y = y[index1, jay], w = w[index1, jay])
+        }
+      } else {
+        shape.init <- matrix( .ishape , n, M, byrow = TRUE)
+      }
+
+      etastart <-
+        cbind(theta2eta(if (length( .ipobs1 )) .ipobs1 else
+                        (0.5 + w * y1) / (1 + w),
+                        .lpobs1 , earg = .epobs1 ),
+              theta2eta(shape.init, .lshape , earg = .eshape ))
+      etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
+    }
+  }), list( .lshape = lshape, .eshape = eshape,
+            .lpobs1 = lpobs1, .epobs1 = epobs1,
+            .ipobs1 = ipobs1, .ishape = ishape,
+                              .gshape = gshape,
+            .type.fitted = type.fitted
+           ))),
+
+
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning the 'mean'.")
+                     "mean"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                     c("mean", "shape", "pobs1", "onempobs1"))[1]
+
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+
+    pobs1 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+                             .lpobs1 , earg = .epobs1 ))
+    shape <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
+                             .lshape , earg = .eshape ))
+    if (type.fitted == "mean") {
+      ans <- shape
+      ans[shape > 1] <- zeta(shape[shape > 1])/zeta(shape[shape > 1] + 1)
+      ans[shape <= 1] <- NA
+      pmf.1 <- dzeta(1, shape)
+      mean.otzeta <- (ans - pmf.1) / (1 - pmf.1)
+    }
+
+    ans <- switch(type.fitted,
+                  "mean"      = pobs1 + (1 - pobs1) * mean.otzeta,
+                  "shape"     = shape,
+                  "pobs1"     =      pobs1,  # P(Y=1)
+                  "onempobs1" =  1 - pobs1)  # P(Y>1)
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
+  }, list( .lpobs1 = lpobs1, .lshape = lshape,
+           .epobs1 = epobs1, .eshape = eshape ))),
+  last = eval(substitute(expression({
+    temp.names <- c(rep_len( .lpobs1 , NOS),
+                    rep_len( .lshape , NOS))
+    temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
+    misc$link  <- temp.names
+    names(misc$link) <-
+      c(mynames1, mynames2)[interleave.VGAM(M1*NOS, M1 = M1)]
+
+    misc$earg <- vector("list", M1 * NOS)
+    names(misc$earg) <- names(misc$link)
+    for (ii in 1:NOS) {
+      misc$earg[[M1*ii-1]] <- .epobs1
+      misc$earg[[M1*ii  ]] <- .eshape
+    }
+  }), list( .lpobs1 = lpobs1, .lshape = lshape,
+            .epobs1 = epobs1, .eshape = eshape ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    pobs1 <- cbind(eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+                             .lpobs1, earg = .epobs1 ))
+    shape <- cbind(eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                             .lshape, earg = .eshape ))
+
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * doazeta(x = y, pobs1 = pobs1, shape = shape,
+                                log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lpobs1 = lpobs1, .lshape = lshape,
+           .epobs1 = epobs1, .eshape = eshape ))),
+  vfamily = c("oazeta"),
+
+
+
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    TFvec <- c(TRUE, FALSE)
+    pobs1 <- eta2theta(eta[,  TFvec, drop = FALSE],
+                       .lpobs1 , earg = .epobs1 )
+    shape <- eta2theta(eta[, !TFvec, drop = FALSE],
+                       .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape) &&
+             all(is.finite(pobs1)) && all(0 < pobs1 & pobs1 < 1)
+    okay1
+  }, list( .lpobs1 = lpobs1, .lshape = lshape,
+           .epobs1 = epobs1, .eshape = eshape ))),
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    pobs1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpobs1 , earg = .epobs1 )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+    roazeta(nsim * length(shape), shape = shape, pobs1 = pobs1)
+  }, list( .lpobs1 = lpobs1, .lshape = lshape,
+           .epobs1 = epobs1, .eshape = eshape ))),
+
+
+
+  deriv = eval(substitute(expression({
+    M1 <- 2
+    NOS <- ncol(eta) / M1  # extra$NOS
+    y1 <- extra$y1
+    skip <- extra$skip.these
+
+    TFvec <- c(TRUE, FALSE)
+    pobs1 <- eta2theta(eta[,  TFvec, drop = FALSE],
+                       .lpobs1 , earg = .epobs1 )
+    shape <- eta2theta(eta[, !TFvec, drop = FALSE],
+                       .lshape , earg = .eshape )
+
+    BBBB  <- zeta(shape + 1) - 1
+    fred1 <- zeta(shape + 1, deriv = 1)
+    dl.dshape <- -log(y) - fred1 / BBBB
+
+    dl.dpobs1 <- -1 / (1 - pobs1)  # For y > 1 obsns
+
+    for (spp. in 1:NOS) {
+      dl.dpobs1[skip[, spp.], spp.] <- 1 / pobs1[skip[, spp.], spp.]
+      dl.dshape[skip[, spp.], spp.] <- 0
+    }
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+    mu.phi1 <- pobs1
+
+    temp3 <- if ( .lpobs1 == "logit") {
+      c(w) * (y1 - mu.phi1)
+    } else {
+      c(w) * dtheta.deta(mu.phi1, link = .lpobs1 , earg = .epobs1 ) *
+             dl.dpobs1
+    }
+
+    ans <- cbind(temp3,
+                 c(w) * dl.dshape * dshape.deta)
+    ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
+    ans
+  }), list( .lpobs1 = lpobs1, .lshape = lshape,
+            .epobs1 = epobs1, .eshape = eshape ))),
+  weight = eval(substitute(expression({
+    wz <- matrix(0, n, M1 * NOS)  # EIM is diagonal
+
+    ned2l.dshape2 <- (zeta(shape + 1, deriv = 2) - fred1^2 / BBBB) / BBBB
+
+    ned2l.dshape2 <- (1 - pobs1) * ned2l.dshape2
+    wz[, NOS+(1:NOS)] <- c(w) * ned2l.dshape2 * dshape.deta^2
+
+
+    tmp100 <- mu.phi1 * (1 - mu.phi1)
+    tmp200 <- if ( .lpobs1 == "logit" && is.empty.list( .epobs1 )) {
+        cbind(c(w) * tmp100)
+    } else {
+      cbind(c(w) * (1 / tmp100) *
+            dtheta.deta(mu.phi1, link = .lpobs1 , earg = .epobs1 )^2)
+    }
+    wz[, 1:NOS] <-  tmp200
+
+    wz <- wz[, interleave.VGAM(ncol(wz), M1 = M1)]
+
+    wz
+  }), list( .lpobs1 = lpobs1,
+            .epobs1 = epobs1 ))))
+}  # End of oazeta
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.others.R b/R/family.others.R
index 89bc491..618a973 100644
--- a/R/family.others.R
+++ b/R/family.others.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -32,12 +32,12 @@ dexppois <- function(x, rate = 1, shape, log = FALSE) {
   logdensity <- rep_len(log(0), N)
 
   xok <- (0 < x)
- 
+
   logdensity[xok] <- log(shape[xok]) + log(rate[xok]) -
-                     log1p(-exp(-shape[xok])) - shape[xok] - 
-                     rate[xok] * x[xok] + shape[xok] * 
+                     log1p(-exp(-shape[xok])) - shape[xok] -
+                     rate[xok] * x[xok] + shape[xok] *
                      exp(-rate[xok] * x[xok])
-   
+
   logdensity[shape <= 0] <- NaN
   logdensity[rate <= 0] <- NaN
   if (log.arg) logdensity else exp(logdensity)
@@ -48,14 +48,14 @@ dexppois <- function(x, rate = 1, shape, log = FALSE) {
 
 
 
-qexppois<- function(p, rate = 1, shape, 
-                    lower.tail = TRUE, log.p = FALSE) { 
+qexppois<- function(p, rate = 1, shape,
+                    lower.tail = TRUE, log.p = FALSE) {
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   if (lower.tail) {
     if (log.p) {
       ln.p <- p
@@ -71,7 +71,7 @@ qexppois<- function(p, rate = 1, shape,
       ln.p <- p
       ans <- -log(log(expm1(ln.p) * expm1(shape) + exp(shape)) / shape) / rate
       ans[ln.p > 0] <- NaN
-    } else { 
+    } else {
       ans <- -log(log(p * expm1(shape) + 1) / shape) / rate
       ans[p < 0] <- NaN
       ans[p > 1] <- NaN
@@ -94,14 +94,14 @@ qexppois<- function(p, rate = 1, shape,
 
 
 
-pexppois<- function(q, rate = 1, shape, 
+pexppois<- function(q, rate = 1, shape,
                     lower.tail = TRUE, log.p = FALSE) {
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   if (lower.tail) {
     if (log.p) {
       ans <- log((exp(shape * exp(-rate * q)) -
@@ -123,7 +123,7 @@ pexppois<- function(q, rate = 1, shape,
       ans[q <= 0] <- 1
       ans[q == Inf] <- 0
     }
-  } 
+  }
   ans[(shape <= 0) | (rate <= 0)] <- NaN
   ans
 }
@@ -147,7 +147,7 @@ rexppois <- function(n, rate = 1, shape) {
 
 
  exppoisson <- function(lrate = "loge", lshape = "loge",
-                        irate = 2.0, ishape = 1.1,   
+                        irate = 2.0, ishape = 1.1,
                         zero = NULL) {
 
   lshape <- as.list(substitute(lshape))
@@ -221,21 +221,21 @@ rexppois <- function(n, rate = 1, shape) {
               stop("Need to input a value into argument 'iratee'")
       shape.init <- if (length( .ishape ))
                       rep_len( .ishape , n) else
-                      (1/ratee.init - mean(y)) / ((y * 
+                      (1/ratee.init - mean(y)) / ((y *
                       exp(-ratee.init * y))/n)
 
 
       ratee.init <- rep_len(weighted.mean(ratee.init, w = w), n)
-      
+
       etastart <-
         cbind(theta2eta(ratee.init, .lratee , earg = .eratee ),
               theta2eta(shape.init, .lshape , earg = .eshape ))
-              
+
 
     }
-  }), list( .lshape = lshape, .lratee = lratee, 
-            .ishape = ishape, .iratee = iratee, 
-            .eshape = eshape, .eratee = eratee))), 
+  }), list( .lshape = lshape, .lratee = lratee,
+            .ishape = ishape, .iratee = iratee,
+            .eshape = eshape, .eratee = eratee))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
     ratee <- eta2theta(eta[, 1], .lratee , earg = .eratee )
@@ -246,8 +246,8 @@ rexppois <- function(n, rate = 1, shape) {
 
 
     qexppois(p = 0.5, rate = ratee, shape = shape)
-  }, list( .lshape = lshape, .lratee = lratee, 
-           .eshape = eshape, .eratee = eratee))), 
+  }, list( .lshape = lshape, .lratee = lratee,
+           .eshape = eshape, .eratee = eratee))),
 
   last = eval(substitute(expression({
     misc$link <-    c( rate = .lratee , shape = .lshape )
@@ -256,7 +256,7 @@ rexppois <- function(n, rate = 1, shape) {
     misc$expected <- TRUE
     misc$multipleResponses <- FALSE
   }), list( .lshape = lshape, .lratee = lratee,
-            .eshape = eshape, .eratee = eratee))), 
+            .eshape = eshape, .eratee = eratee))),
 
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
@@ -276,10 +276,18 @@ rexppois <- function(n, rate = 1, shape) {
         ll.elts
       }
     }
-  }, list( .lratee = lratee , .lshape = lshape , 
-           .eshape = eshape , .eratee = eratee ))), 
+  }, list( .lratee = lratee , .lshape = lshape ,
+           .eshape = eshape , .eratee = eratee ))),
 
   vfamily = c("exppoisson"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    ratee <- eta2theta(eta[, 1], .lratee , earg = .eratee )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(ratee)) && all(0 < ratee) &&
+             all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lratee = lratee , .lshape = lshape ,
+           .eshape = eshape , .eratee = eratee ))),
 
   deriv = eval(substitute(expression({
     ratee <- eta2theta(eta[, 1], .lratee , earg = .eratee )
@@ -292,22 +300,22 @@ rexppois <- function(n, rate = 1, shape) {
     c(w) * cbind(dl.dratee * dratee.deta,
                  dl.dshape * dshape.deta)
   }), list( .lshape = lshape, .lratee = lratee,
-            .eshape = eshape, .eratee = eratee ))), 
+            .eshape = eshape, .eratee = eratee ))),
 
   weight = eval(substitute(expression({
-    
+
     temp1 <- -expm1(-shape)
-    
+
     ned2l.dshape2 <- (1 + exp(2 * shape) - shape^2 * exp(shape) - 2 *
                       exp(shape)) / (shape * temp1)^2
 
 
-    ned2l.dratee2 <- 1 / ratee^2 - (shape^2 * exp(-shape) / (4 * 
-                    ratee^2 * temp1)) * 
-                    genhypergeo(c(2, 2, 2), c(3, 3, 3), shape) 
+    ned2l.dratee2 <- 1 / ratee^2 - (shape^2 * exp(-shape) / (4 *
+                    ratee^2 * temp1)) *
+                    genhypergeo(c(2, 2, 2), c(3, 3, 3), shape)
 
     ned2l.drateeshape <- (shape * exp(-shape) / (4 * ratee * temp1)) *
-                           genhypergeo(c(2, 2), c(3, 3), shape)   
+                           genhypergeo(c(2, 2), c(3, 3), shape)
 
     wz <- matrix(0, n, dimm(M))
     wz[, iam(1, 1, M)] <- dratee.deta^2 * ned2l.dratee2
@@ -359,14 +367,14 @@ dgenray <- function(x, scale = 1, shape, log = FALSE) {
 
 
 
-pgenray <- function(q, scale = 1, shape, 
+pgenray <- function(q, scale = 1, shape,
                     lower.tail = TRUE, log.p = FALSE) {
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   if (lower.tail) {
     if (log.p) {
       ans <- log((-expm1(-(q/scale)^2))^shape)
@@ -383,7 +391,7 @@ pgenray <- function(q, scale = 1, shape,
       ans <- -expm1(shape*log(-expm1(-(q/scale)^2)))
       ans[q <= 0] <- 1
     }
-  } 
+  }
   ans[(shape <= 0) | (scale <= 0)] <- NaN
   ans
 }
@@ -396,14 +404,14 @@ pgenray <- function(q, scale = 1, shape,
 
 
 
-qgenray <- function(p, scale = 1, shape, 
+qgenray <- function(p, scale = 1, shape,
                     lower.tail = TRUE, log.p = FALSE) {
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   if (lower.tail) {
     if (log.p) {
       ln.p <- p
@@ -419,7 +427,7 @@ qgenray <- function(p, scale = 1, shape,
       ln.p <- p
       ans <- scale * sqrt(-log1p(-((-expm1(ln.p))^(1/shape))))
       ans[ln.p > 0] <- NaN
-    } else { 
+    } else {
       ans <- scale * sqrt(-log1p(-exp((1/shape)*log1p(-p))))
       ans[p < 0] <- NaN
       ans[p > 1] <- NaN
@@ -451,7 +459,7 @@ genrayleigh.control <- function(save.weights = TRUE, ...) {
  genrayleigh <-
   function(lscale = "loge", lshape = "loge",
            iscale = NULL,   ishape = NULL,
-           tol12 = 1.0e-05, 
+           tol12 = 1.0e-05,
            nsimEIM = 300, zero = 2) {
 
   lshape <- as.list(substitute(lshape))
@@ -466,7 +474,7 @@ genrayleigh.control <- function(save.weights = TRUE, ...) {
       !is.Numeric(ishape, positive = TRUE))
     stop("bad input for argument 'ishape'")
   if (length(iscale) &&
-      !is.Numeric(iscale, positive = TRUE)) 
+      !is.Numeric(iscale, positive = TRUE))
     stop("bad input for argument 'iscale'")
 
   if (!is.Numeric(nsimEIM, length.arg = 1,
@@ -534,7 +542,7 @@ genrayleigh.control <- function(save.weights = TRUE, ...) {
                     grid.search(scale.grid, objfun = genrayleigh.Loglikfun,
                                 y = y, x = x, w = w)
       scale.init <- rep_len(scale.init, length(y))
- 
+
       shape.init <- if (length( .ishape )) .ishape else
                     -1 / weighted.mean(log1p(-exp(-(y/scale.init)^2)),
                      w = w)
@@ -542,17 +550,17 @@ genrayleigh.control <- function(save.weights = TRUE, ...) {
 
       etastart <- cbind(theta2eta(scale.init, .lscale , earg = .escale ),
                         theta2eta(shape.init, .lshape , earg = .eshape ))
-                        
+
         }
     }), list( .lscale = lscale, .lshape = lshape,
               .iscale = iscale, .ishape = ishape,
-              .escale = escale, .eshape = eshape))), 
+              .escale = escale, .eshape = eshape))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
     shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
     qgenray(p = 0.5, shape = shape, scale = Scale)
-  }, list( .lshape = lshape, .lscale = lscale, 
+  }, list( .lshape = lshape, .lscale = lscale,
            .eshape = eshape, .escale = escale ))),
 
   last = eval(substitute(expression({
@@ -585,10 +593,18 @@ genrayleigh.control <- function(save.weights = TRUE, ...) {
         ll.elts
       }
     }
-  }, list( .lshape = lshape , .lscale = lscale , 
-           .eshape = eshape , .escale = escale ))), 
-      
+  }, list( .lshape = lshape , .lscale = lscale ,
+           .eshape = eshape , .escale = escale ))),
+
   vfamily = c("genrayleigh"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lshape = lshape , .lscale = lscale ,
+           .eshape = eshape , .escale = escale ))),
 
   deriv = eval(substitute(expression({
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
@@ -671,7 +687,7 @@ dexpgeom <- function(x, scale = 1, shape, log = FALSE) {
 
   if (any(xok <- (x > 0))) {
     temp1 <- -x[xok] / scale[xok]
-    logdensity[xok] <- -log(scale[xok]) + log1p(-shape[xok]) + 
+    logdensity[xok] <- -log(scale[xok]) + log1p(-shape[xok]) +
                        temp1 - 2 * log1p(-shape[xok] * exp(temp1))
   }
 
@@ -692,7 +708,7 @@ pexpgeom <- function(q, scale = 1, shape) {
   ans
 }
 
- 
+
 qexpgeom <- function(p, scale = 1, shape) {
   ans <- (-scale) * log((p - 1) / (p * shape - 1))
   ans[(scale <= 0) | (shape <= 0) | (shape >= 1)] <- NaN
@@ -721,7 +737,7 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
 
 
  expgeometric <- function(lscale = "loge", lshape = "logit",
-                          iscale = NULL,   ishape = NULL, 
+                          iscale = NULL,   ishape = NULL,
                           tol12 = 1.0e-05, zero = 1,
                           nsimEIM = 400) {
 
@@ -759,8 +775,8 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
             namesof("scale", lscale, earg = escale), ", ",
             namesof("shape", lshape, earg = eshape), "\n",
             "Mean:     ", "(shape - 1) * log(1 - ",
-            "shape) / (shape / scale)"), 
-                           
+            "shape) / (shape / scale)"),
+
   constraints = eval(substitute(expression({
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
                                 predictors.names = predictors.names,
@@ -811,23 +827,23 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
       shape.init[shape.init >= 0.95] <- 0.95
       shape.init[shape.init <= 0.05] <- 0.05
 
-      
+
       etastart <-
         cbind(theta2eta(scale.init, .lscale , earg = .escale ),
               theta2eta(shape.init, .lshape , earg = .eshape ))
 
     }
-   }), list( .lscale = lscale, .lshape = lshape, 
-             .iscale = iscale, .ishape = ishape, 
-             .escale = escale, .eshape = eshape))), 
+   }), list( .lscale = lscale, .lshape = lshape,
+             .iscale = iscale, .ishape = ishape,
+             .escale = escale, .eshape = eshape))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
     shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
-    
+
     (shape - 1) * log1p(-shape) / (shape / Scale)
 
-  }, list( .lscale = lscale, .lshape = lshape, 
+  }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape ))),
 
   last = eval(substitute(expression({
@@ -848,7 +864,7 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
 
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
     shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
-    
+
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
@@ -860,10 +876,18 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
         ll.elts
       }
     }
-  }, list( .lscale = lscale , .lshape = lshape , 
-           .escale = escale , .eshape = eshape ))), 
-      
+  }, list( .lscale = lscale, .lshape = lshape,
+           .escale = escale, .eshape = eshape))),
+
   vfamily = c("expgeometric"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(shape)) && all(0 < shape & shape < 1)
+    okay1
+  }, list( .lscale = lscale, .lshape = lshape,
+           .escale = escale, .eshape = eshape))),
 
   deriv = eval(substitute(expression({
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
@@ -875,7 +899,7 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
      dl.dscale <-  -1 / Scale + temp4 + 2 * temp4 * temp3 / (1 - temp3)
      dl.dshape <- -1 / (1 - shape)    + 2 * temp2 / (1 - temp3)
 
-    dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )            
+    dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
     dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
     dthetas.detas <- cbind(dscale.deta, dshape.deta)
 
@@ -885,7 +909,7 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
             .escale = escale,  .eshape = eshape ))),
 
   weight = eval(substitute(expression({
-  
+
 
 
 
@@ -904,9 +928,9 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
           temp2 <- exp(-ysim / Scale)
           temp3 <- shape * temp2
           temp4 <- ysim / Scale^2
-          dl.dscale <-  -1 / Scale + temp4 + 
+          dl.dscale <-  -1 / Scale + temp4 +
                        2 * temp4 * temp3 / (1 - temp3)
-          dl.dshape <- -1 / (1 - shape) + 
+          dl.dshape <- -1 / (1 - shape) +
                        2 * temp2 / (1 - temp3)
 
           temp6 <- cbind(dl.dscale, dl.dshape)
@@ -924,7 +948,7 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
                  dthetas.detas[, ind1$col]
     }
 
-    c(w) * wz      
+    c(w) * wz
   }), list( .nsimEIM = nsimEIM ))))
 }
 
@@ -952,8 +976,8 @@ dexplog <- function(x, scale = 1, shape, log = FALSE) {
   logdensity <- rep_len(log(0), N)
   if (any(xok <- (x > 0))) {
     temp1 <- -x[xok] / scale[xok]
-    logdensity[xok] <- -log(-log(shape[xok])) - log(scale[xok]) + 
-                       log1p(-shape[xok]) + temp1 - 
+    logdensity[xok] <- -log(-log(shape[xok])) - log(scale[xok]) +
+                       log1p(-shape[xok]) + temp1 -
                        log1p(-(1-shape[xok]) * exp(temp1))
   }
 
@@ -1088,7 +1112,7 @@ explogff.control <- function(save.weights = TRUE, ...) {
       scale.init <- if (is.Numeric( .iscale , positive = TRUE)) {
                      rep_len( .iscale , n)
                    } else {
-                     stats::sd(c(y))  
+                     stats::sd(c(y))
                    }
 
       shape.init <- if (is.Numeric( .ishape , positive = TRUE)) {
@@ -1115,7 +1139,7 @@ explogff.control <- function(save.weights = TRUE, ...) {
 
 
 
-    qexplog(p = 0.5, shape = shape, scale = scale)  
+    qexplog(p = 0.5, shape = shape, scale = scale)
 
   }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape ))),
@@ -1139,7 +1163,7 @@ explogff.control <- function(save.weights = TRUE, ...) {
 
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
     shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
-    
+
 
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
@@ -1152,10 +1176,18 @@ explogff.control <- function(save.weights = TRUE, ...) {
         ll.elts
       }
     }
-  }, list( .lscale = lscale , .lshape = lshape ,
-           .escale = escale , .eshape = eshape ))),
+  }, list( .lscale = lscale, .lshape = lshape,
+           .escale = escale, .eshape = eshape))),
 
   vfamily = c("explogff"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(shape)) && all(0 < shape & shape < 1)
+    okay1
+  }, list( .lscale = lscale, .lshape = lshape,
+           .escale = escale, .eshape = eshape))),
 
   deriv = eval(substitute(expression({
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
@@ -1228,7 +1260,7 @@ explogff.control <- function(save.weights = TRUE, ...) {
 
 
 
-  
+
 dweibull3 <- function(x, location = 0, scale = 1, shape,
                       log = FALSE) {
 
@@ -1263,7 +1295,7 @@ rweibull3 <- function(n, location = 0, scale = 1, shape) {
 
 
 
-   ### Two-piece normal (TPN) family 
+   ### Two-piece normal (TPN) family
 
 
 dtpn <- function(x, location = 0, scale = 1, skewpar = 0.5,
@@ -1282,15 +1314,15 @@ dtpn <- function(x, location = 0, scale = 1, skewpar = 0.5,
   if (length(scale)    != N) scale     <- rep_len(scale,    N)
   if (length(location) != N) location  <- rep_len(location, N)
   if (length(skewpar)  != N) skewpar   <- rep_len(skewpar,  N)
-    
+
   zedd <- (x - location) / scale
 
   log.s1 <-  -zedd^2 / (8 * skewpar^2)
   log.s2 <-  -zedd^2 / (8 * (1 - skewpar)^2)
-            
+
   logdensity <- log.s1
   logdensity[zedd > 0] <- log.s2[zedd > 0]
-  
+
   logdensity <- logdensity -log(scale) - log(sqrt(2 * pi))
 
   if (log.arg) logdensity else exp(logdensity)
@@ -1310,7 +1342,7 @@ ptpn <- function(q, location = 0, scale = 1, skewpar = 0.5) {
   s1 <- 2 * skewpar * pnorm(zedd, sd = 2 * skewpar)  #/ scale
   s2 <- skewpar + (1 - skewpar) *
         pgamma(zedd^2 / (8 * (1-skewpar)^2), 0.5)
- 
+
 ans <- rep_len(0.0, length(zedd))
 ans[zedd <= 0] <- s1[zedd <= 0]
 ans[zedd > 0] <- s2[zedd > 0]
@@ -1321,7 +1353,7 @@ ans
 
 
 pos <- function(x) ifelse(x > 0, x, 0.0)
- 
+
 
 qtpn <- function(p, location = 0, scale = 1, skewpar = 0.5) {
 
@@ -1340,15 +1372,15 @@ qtpn <- function(p, location = 0, scale = 1, skewpar = 0.5) {
   if (length(location) != LLL) location <- rep_len(location, LLL)
   if (length(scale)    != LLL) scale    <- rep_len(scale,    LLL)
   if (length(skewpar)  != LLL) skewpar  <- rep_len(skewpar,  LLL)
-       
+
   qtpn <- rep_len(NA_real_, length(LLL))
   qtpn <- qnorm(pp / (2 * skewpar), sd = 2 * skewpar)
-  qtpn[pp > skewpar] <- sqrt(8 * ( 1 - skewpar)^2 * 
-                        qgamma(pos( pp - skewpar) / ( 
+  qtpn[pp > skewpar] <- sqrt(8 * ( 1 - skewpar)^2 *
+                        qgamma(pos( pp - skewpar) / (
                         1 - skewpar),.5))[pp > skewpar]
-        
+
    qtpn * scale + location
-  
+
 }
 
 
@@ -1493,6 +1525,15 @@ tpnff <- function(llocation = "identitylink", lscale = "loge",
            .elocat = elocat, .escale = escale,
            .pp      = pp ))),
   vfamily = c("tpnff"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mylocat <- eta2theta(eta[, 1], .llocat ,  earg = .elocat )
+    myscale <- eta2theta(eta[, 2], .lscale ,  earg = .escale )
+    okay1 <- all(is.finite(mylocat)) &&
+             all(is.finite(myscale)) && all(0 < myscale)
+    okay1
+  }, list( .llocat = llocat, .lscale = lscale,
+           .elocat = elocat, .escale = escale,
+           .pp      = pp ))),
   deriv = eval(substitute(expression({
     mylocat <- eta2theta(eta[, 1], .llocat ,  earg = .elocat )
     myscale <- eta2theta(eta[, 2], .lscale ,  earg = .escale )
@@ -1512,7 +1553,7 @@ tpnff <- function(llocation = "identitylink", lscale = "loge",
 
     #dl.dpp        <-  zedd^2 /  (4 * mypp^3)
     #dl.dpp[cond2] <- -zedd^2 /  (4 * (1 - mypp)^3)[cond2]
-    
+
     dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
     dscale.deta <- dtheta.deta(myscale, .lscale, earg = .escale)
 
@@ -1527,7 +1568,7 @@ tpnff <- function(llocation = "identitylink", lscale = "loge",
     temp10 <- mypp * (1 - mypp)
     ned2l.dlocat2        <- 1 / ((4 * temp10) * myscale^2)
     ned2l.dscale2        <- 2 /  myscale^2
-     
+
 
     wz[, iam(1, 1, M)] <- ned2l.dlocat2 * dlocat.deta^2
     wz[, iam(2, 2, M)] <- ned2l.dscale2 * dscale.deta^2
@@ -1637,7 +1678,7 @@ tpnff3 <- function(llocation = "identitylink",
     }
   }), list( .llocat = llocat, .lscale = lscale, .lskewp = lskewp,
             .elocat = elocat, .escale = escale, .eskewp = eskewp,
-            
+
             .method.init=method.init ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     eta2theta(eta[, 1], .llocat, earg = .elocat)
@@ -1645,7 +1686,7 @@ tpnff3 <- function(llocation = "identitylink",
            .elocat = elocat, .escale = escale ))),
   last = eval(substitute(expression({
     misc$link     <-     c("location" = .llocat,
-                           "scale"    = .lscale, 
+                           "scale"    = .lscale,
                            "skewpar"  = .lskewp)
 
     misc$earg     <-  list("location" = .elocat,
@@ -1681,11 +1722,22 @@ tpnff3 <- function(llocation = "identitylink",
            .elocat = elocat, .escale = escale, .eskewp = eskewp
            ))),
   vfamily = c("tpnff3"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mylocat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+    myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    myskew  <- eta2theta(eta[, 3], .lskewp , earg = .eskewp )
+    okay1 <- all(is.finite(mylocat)) &&
+             all(is.finite(myscale)) && all(0 < myscale) &&
+             all(is.finite(myskew ))
+    okay1
+  }, list( .llocat = llocat, .lscale = lscale, .lskewp = lskewp,
+           .elocat = elocat, .escale = escale, .eskewp = eskewp
+         ))),
   deriv = eval(substitute(expression({
     mylocat <- eta2theta(eta[, 1], .llocat,   earg = .elocat)
     myscale <- eta2theta(eta[, 2], .lscale,   earg = .escale)
     myskew  <- eta2theta(eta[, 3], .lskewp, earg = .eskewp)
-  
+
 
     zedd <- (y - mylocat) / myscale
    cond2 <-    (zedd > 0)
@@ -1700,7 +1752,7 @@ tpnff3 <- function(llocation = "identitylink",
 
     dl.dskewpar      <-     zedd^2 /  (4 * myskew^3)
     dl.dskewpar[cond2] <- (-zedd^2 /  (4 * (1 - myskew)^3))[cond2]
-    
+
 
 
     dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
@@ -1717,7 +1769,7 @@ tpnff3 <- function(llocation = "identitylink",
             ))),
   weight = eval(substitute(expression({
     wz <- matrix(NA_real_, n, dimm(M))  # diag matrix; y is one-col too
-   
+
     temp10 <- myskew * (1 - myskew)
 
     ned2l.dlocat2        <- 1 / ((4 * temp10) * myscale^2)
@@ -1725,13 +1777,13 @@ tpnff3 <- function(llocation = "identitylink",
     ned2l.dskewpar2      <- 3 / temp10
     ned2l.dlocatdskewpar <- (-2 * sqrt(2)) / (temp10 * sqrt(pi) *
                              myscale)
-     
+
     wz[, iam(1, 1,M)] <- ned2l.dlocat2 * dlocat.deta^2
     wz[, iam(2, 2,M)] <- ned2l.dscale2 * dscale.deta^2
     wz[, iam(3, 3,M)] <- ned2l.dskewpar2 * dskewpar.deta^2
     wz[, iam(1, 3,M)] <- ned2l.dlocatdskewpar * dskewpar.deta *
                          dlocat.deta
-  
+
     ans
     c(w) * wz
   }))))
@@ -1757,9 +1809,9 @@ dzoabeta <- function(x, shape1, shape2, pobs0 = 0,
   k1 <- (pobs0 < -tol | pobs1 < -tol |
     (pobs0 + pobs1) > (1 + tol))
   k4 <- is.na(pobs0) | is.na(pobs1)
-  ans[!k4 & !k1] <- dbeta(x[!k4 & !k1], 
-                          shape1[!k4 & !k1], 
-                          shape2[!k4 & !k1], log = TRUE) + 
+  ans[!k4 & !k1] <- dbeta(x[!k4 & !k1],
+                          shape1[!k4 & !k1],
+                          shape2[!k4 & !k1], log = TRUE) +
                     log1p(-(pobs0[!k4 & !k1] + pobs1[!k4 & !k1]))
   k2 <- x == 0 & pobs0 > 0 & !is.na(x)
   k3 <- x == 1 & pobs1 > 0 & !is.na(x)
@@ -1780,7 +1832,7 @@ rzoabeta <- function(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
   use.n <- if ((length.n <- length(n)) > 1) {
     length.n
   } else {
-    if (!is.Numeric(n, integer.valued = TRUE, length.arg = 1, 
+    if (!is.Numeric(n, integer.valued = TRUE, length.arg = 1,
                     positive = TRUE)) {
       stop("bad input for argument 'n'")
     } else {
@@ -1823,15 +1875,15 @@ pzoabeta <- function(q, shape1, shape2, pobs0 = 0, pobs1 = 0,
   k4 <- is.na(pobs0) | is.na(pobs1)
   ans <- rep_len(NA_real_, LLL)
   ans[!k3 & !k4] <- pbeta(q[!k3 & !k4],
-                          shape1[!k3 & !k4], 
+                          shape1[!k3 & !k4],
                           shape2[!k3 & !k4], log.p = TRUE) +
     log1p(-(pobs0[!k3 & !k4] + pobs1[!k3 & !k4]))
   ans <- exp(ans)
   k1 <- q >= 0 & !is.na(q)
   k2 <- q >= 1 & !is.na(q)
-  ans[k1 & !k3 & !k4] <- ans[k1 & !k3 & !k4] + 
+  ans[k1 & !k3 & !k4] <- ans[k1 & !k3 & !k4] +
     pobs0[k1 & !k3 & !k4]
-  ans[k2 & !k3 & !k4] <- ans[k2 & !k3 & !k4] + 
+  ans[k2 & !k3 & !k4] <- ans[k2 & !k3 & !k4] +
     pobs1[k2 & !k3 & !k4]
   if (!lower.tail & log.p) {
     ans <- log1p(-ans)
@@ -1881,7 +1933,7 @@ qzoabeta <- function(p, shape1, shape2, pobs0 = 0, pobs1 = 0,
     qbeta((p[k2 & !k0 & !k4] -
            pobs0[k2 & !k0 & !k4]) / (1 - pobs0[k2 & !k0 & !k4] -
            pobs1[k2 & !k0 & !k4]),
-           shape1 = shape1[k2 & !k0 & !k4], 
+           shape1 = shape1[k2 & !k0 & !k4],
            shape2 = shape2[k2 & !k0 & !k4])
   ans[k3 & !k0 & !k4] <- 1
   if (any(k0 & !k4)) {
@@ -1903,7 +1955,7 @@ log1mexp <- function(x) {
 
 
 log1pexp <- function(x){
-  
+
   ifelse(x <= -37, exp(x),
          ifelse(x <= 18, log1p(exp(x)),
                 ifelse(x <= 33, x + exp(-x), x)))
@@ -1934,12 +1986,12 @@ dzoibetabinom.ab <- function(x, size, shape1, shape2, pstr0 = 0,
   if (sum(!k & !k1) > 0) {
     ans[!k & !k1] <-
       dbetabinom.ab(x[!k & !k1], size[!k & !k1], shape1[!k & !k1],
-                    shape2[!k & !k1], log = TRUE) + 
+                    shape2[!k & !k1], log = TRUE) +
       log1p(-(pstr0[!k & !k1]+pstrsize[!k & !k1]))
     if (!log.arg) ans <- exp(ans)
   }
-  k2 <- x == 0 & pstr0 > 0 
-  k3 <- x == size & pstrsize > 0 
+  k2 <- x == 0 & pstr0 > 0
+  k3 <- x == size & pstrsize > 0
   if (sum(k2 & !k & !k1) > 0)
     ans[k2 & !k & !k1] <- pstr0[k2 & !k & !k1] +
       ans[k2 & !k & !k1]
@@ -1958,18 +2010,18 @@ dzoibetabinom.ab <- function(x, size, shape1, shape2, pstr0 = 0,
 dzoibetabinom <- function(x, size, prob, rho = 0, pstr0 = 0,
                           pstrsize = 0, log = FALSE) {
   dzoibetabinom.ab(x, size, shape1 = prob * (1 - rho) / rho,
-                   shape2 = (1 - prob) * (1 - rho) / rho, 
+                   shape2 = (1 - prob) * (1 - rho) / rho,
                    pstr0 = pstr0, pstrsize = pstrsize, log = log)
 }
 
 
 
-rzoibetabinom.ab <- function(n, size, shape1, shape2, 
+rzoibetabinom.ab <- function(n, size, shape1, shape2,
                              pstr0 = 0, pstrsize = 0) {
   use.n <- if ((length.n <- length(n)) > 1) {
     length.n
   } else {
-    if (!is.Numeric(n, integer.valued = TRUE, length.arg = 1, 
+    if (!is.Numeric(n, integer.valued = TRUE, length.arg = 1,
                     positive = TRUE)) {
       stop("bad input for argument 'n'")
     } else {
@@ -1988,15 +2040,15 @@ rzoibetabinom.ab <- function(n, size, shape1, shape2,
     (pstr0 + pstrsize) > 1
   random.number <- runif(use.n)
   k2 <- random.number[!k] < pstr0[!k]
-  k3 <- pstr0[!k] <= random.number[!k] & 
+  k3 <- pstr0[!k] <= random.number[!k] &
     random.number[!k] <= (1 - pstrsize[!k])
   k4 <- (1 - pstrsize[!k]) < random.number[!k]
   if (sum(k2 & !k1 & !k) > 0)
     ans[k2 & !k1 & !k] <- 0
   if (sum(k3 & !k1 & !k) > 0)
-    ans[k3 & !k1 & !k] <- rbetabinom.ab(sum(k3 & !k1 & !k), 
+    ans[k3 & !k1 & !k] <- rbetabinom.ab(sum(k3 & !k1 & !k),
                                         size =  size[k3 & !k1 & !k],
-                                        shape1 = shape1[k3 & !k1 & !k], 
+                                        shape1 = shape1[k3 & !k1 & !k],
                                         shape2 = shape2[k3 & !k1 & !k])
   if (sum(k4 & !k1 & !k) > 0)
     ans[k4 & !k1 & !k] <- size[k4 & !k1 & !k]
@@ -2008,7 +2060,7 @@ rzoibetabinom.ab <- function(n, size, shape1, shape2,
 rzoibetabinom <- function(n, size, prob, rho = 0, pstr0 = 0,
                           pstrsize = 0) {
   rzoibetabinom.ab(n, size, shape1 = prob * (1 - rho) / rho,
-                   shape2 = (1 - prob) * (1 - rho) / rho, 
+                   shape2 = (1 - prob) * (1 - rho) / rho,
                    pstr0 = pstr0,
                    pstrsize = pstrsize)
 }
@@ -2033,17 +2085,17 @@ pzoibetabinom.ab <- function(q, size, shape1, shape2, pstr0 = 0,
     (pstr0 + pstrsize) > 1
   if (sum(!k1 & !k) > 0)
     ans[!k & !k1] <-
-      pbetabinom.ab(q[!k & !k1], size[!k & !k1], 
+      pbetabinom.ab(q[!k & !k1], size[!k & !k1],
                     shape1[!k & !k1], shape2[!k & !k1], log.p = TRUE) +
       log1p(-(pstr0[!k & !k1] + pstrsize[!k & !k1]))
   ans <- exp(ans)
-  k2 <- q >= 0 
-  k3 <- q >= size 
+  k2 <- q >= 0
+  k3 <- q >= size
   if (sum(k2 & !k1 & !k) > 0)
-    ans[k2 & !k & !k1] <- ans[k2 & !k & !k1] + 
+    ans[k2 & !k & !k1] <- ans[k2 & !k & !k1] +
       pstr0[k2 & !k & !k1]
   if (sum(k3 & !k1 & !k) > 0)
-    ans[k3 & !k & !k1] <- ans[k3 & !k & !k1] + 
+    ans[k3 & !k & !k1] <- ans[k3 & !k & !k1] +
       pstrsize[k3 & !k & !k1]
   if (!lower.tail & log.p) {
     ans <- log1p(-ans)
@@ -2061,11 +2113,11 @@ pzoibetabinom.ab <- function(q, size, shape1, shape2, pstr0 = 0,
 }
 
 
-pzoibetabinom <- function(q, size, prob, rho, 
+pzoibetabinom <- function(q, size, prob, rho,
                           pstr0 = 0, pstrsize = 0,
                         lower.tail = TRUE, log.p = FALSE) {
   pzoibetabinom.ab(q, size, shape1 = prob * (1 - rho) / rho,
-                 shape2 = (1 - prob) * (1 - rho) / rho, 
+                 shape2 = (1 - prob) * (1 - rho) / rho,
                  pstr0 = pstr0, pstrsize = pstrsize,
                  lower.tail = lower.tail, log.p = log.p)
 }
@@ -2084,148 +2136,148 @@ pzoibetabinom <- function(q, size, prob, rho,
 
 
   AR1EIM<- function(x = NULL,
-                    var.arg   = NULL, 
-                    p.drift   = NULL, 
-                    WNsd      = NULL, 
+                    var.arg   = NULL,
+                    p.drift   = NULL,
+                    WNsd      = NULL,
                     ARcoeff1  = NULL,
                     eps.porat = 1e-2) {
-  
+
     if (!is.matrix(x))
       stop("Argument 'x' must be a matrix.")
-    
+
     yy   <- x
     M    <- 3
     nn   <- nrow(x)
     nn0  <- numeric(0)
     NOS  <- ncol(x)
-    
+
     if (!is.matrix(WNsd))
       WNsd <- matrix(WNsd, nrow = nn, ncol = NOS, byrow = TRUE)
-  
+
     if (!is.matrix(ARcoeff1))
       ARcoeff1 <- matrix(ARcoeff1, nrow = nn, ncol = NOS, byrow = TRUE)
-    
+
     if (!is.Numeric(eps.porat, length.arg = 1) || eps.porat < 0 ||
         eps.porat > 1e-2)
       stop("Bad input for argument 'eps.porat'.")
-    
+
     sdTSR   <- colMeans(WNsd)
     sdTSv   <- colMeans(WNsd)
     drift.v <- rep(p.drift, NOS)[1:NOS]
     Aux11   <- (NOS > 1)
     the1v   <- colMeans(ARcoeff1)
     JFin    <- array(0.0, dim = c(nn, NOS, M + (M - 1) + (M - 2) ))
-    
+
     for (spp in 1:NOS) {
-      
+
       x <- yy[, spp]
       the1    <- the1v[spp]
       drift.p <- drift.v[spp]
       sdTS    <- sdTSv[spp]
-      
+
       r <- numeric(nn)
-      r <- AR1.gammas(x = x, lags = nn - 1) 
+      r <- AR1.gammas(x = x, lags = nn - 1)
       r[nn] <- r[1]
-      
+
       s0 <- numeric(nn)
       s1 <- numeric(nn)
-      s1 <- if (var.arg) (the1^(0:(nn - 1))) / (1 - the1^2) else 
+      s1 <- if (var.arg) (the1^(0:(nn - 1))) / (1 - the1^2) else
              2 * (the1^(0:(nn - 1))) * sdTS / (1 - the1^2)
-      
+
       s2    <- numeric(nn)
       help1 <- c(0:(nn - 1))
       s2    <- help1 * (the1^(help1 - 1)) * (sdTS^2) / (1 - the1^2) +
                    2 * (sdTS^2) * (the1^(help1 + 1)) / (1 - the1^2)^2
       sMat <- cbind(s0, s1, s2)
-      
-      J  <- array(NA_real_, 
+
+      J  <- array(NA_real_,
                   dim = c(length(the1) + 2, length(the1) + 2, nn))
-      Jp <- array(NA_real_, 
+      Jp <- array(NA_real_,
                   dim = c(length(the1) + 2, length(the1) + 2, nn))
-      
+
       alpha    <- numeric(nn)
       alpha[1] <- 1
       delta    <- r[1]
       eta      <- matrix(NA_real_, nrow = nn, ncol = M)
       eta[1, ] <- cbind(s0[1], s1[1], s2[1])
-      
+
       psi      <- matrix(0, nrow = nn, ncol = length(the1) + 2)
       psi[1, ] <- cbind(s0[1], s1[1], s2[1]) / r[1]
-      
+
       u0   <- rep(1/(1 - sign(the1v[1]) * min(0.975, abs(the1v[1]))), nn )
       u1   <- rep(drift.p/(1 - the1)^2, nn)
       uMat <- cbind(u0, rep(0, nn), u1)
-      
-      aux1 <- matrix(sMat[1, ], 
-                     nrow = 2 + length(the1), 
+
+      aux1 <- matrix(sMat[1, ],
+                     nrow = 2 + length(the1),
                      ncol = 2 + length(the1), byrow = TRUE)
       diag(aux1) <- sMat[1, ]
       J[, , 1]   <- Jp[, , 1] <- aux1 * t(aux1) / (2 * r[1]^2)
       J[1, 1, 1] <- Jp[1, 1, 1] <- 1 / sdTS^2
       JFin[1, spp, 1:M] <- Jp[, , 1][row(Jp[, , 1]) == col(Jp[, , 1])]
       Neps.porat <- 1.819*eps.porat*(1e-10)
-      
+
       dk   <- matrix(NA_real_, nrow = 1, ncol = length(the1) + 2)
       eR   <- matrix(NA_real_, nrow = 1, ncol = length(the1) + 2)
       cAux2 <- d55 <- numeric(nn); d55[1] <- 0.1
-      
+
       for (jay in 1:(nn - 1)) {
-        
-        cAux <- as.numeric(alpha[1:jay] %*% 
+
+        cAux <- as.numeric(alpha[1:jay] %*%
                     r[2:(jay + 1)][length(r[2:(jay + 1)]):1])/delta
-        
-        dk <- alpha[1:jay] %*% 
+
+        dk <- alpha[1:jay] %*%
          sMat[2:(jay + 1), , drop = FALSE][length(sMat[2:(jay + 1)]):1, ]
-        
+
         delta        <- delta * (1 - cAux^2)
         d55[jay + 1] <- cAux^2
-        
+
         if ((d55[jay + 1] < eps.porat*1e-2) || (jay > 1e1)) {
           nn0 <- jay
           break
         }
-        
+
         eta[jay + 1, ] <- dk
         tAux <- numeric(jay + 1)
-        tAux <- alpha[1:(jay + 1)] - 
+        tAux <- alpha[1:(jay + 1)] -
                       cAux * alpha[1:(jay + 1)][(jay + 1):1]
         alpha[1:(jay + 1)] <- tAux[1:(jay + 1)]
-        
-        eR <- alpha[1:(jay + 1)][(jay + 1):1] %*% 
+
+        eR <- alpha[1:(jay + 1)][(jay + 1):1] %*%
           eta[1:(jay + 1), , drop = FALSE]
-        
-        tAux <- eta[1:(jay + 1), ] - 
+
+        tAux <- eta[1:(jay + 1), ] -
           cAux * eta[1:(jay + 1), ][(jay + 1):1, ]
-        
+
         eta[1:(jay + 1), ] <- tAux
-        
+
         AuxE <- matrix(eR, nrow = jay + 1, ncol = M, byrow = TRUE)
         Aux3 <- matrix(alpha[1:(jay + 1)][(jay + 1):1],
                        nrow = jay + 1, ncol = M, byrow = FALSE)
         Aux4 <- matrix(alpha[1:(jay + 1)],
                        nrow = jay + 1, ncol = M, byrow = FALSE)
-                  tAux <- psi[1:(jay + 1), ] - 
-                      cAux * psi[1:(jay + 1), ][(jay + 1):1, ] + 
+                  tAux <- psi[1:(jay + 1), ] -
+                      cAux * psi[1:(jay + 1), ][(jay + 1):1, ] +
                          AuxE * (Aux3 - cAux * Aux4) / delta
-        
+
         if (any(dim(psi[1:(jay + 1), ])) != any(dim(tAux)) )
           stop("Invalids 'psi' and 'tAux'.")
-        
+
         psi[1:(jay + 1), ] <- tAux
         fk <- alpha[1:(jay + 1)] %*% eta[1:(jay + 1), ]
         gk <- alpha[1:(jay + 1)][(jay + 1):1] %*% uMat[1:(jay + 1), ]
-        
+
         Auxf <- matrix(fk, nrow = M, ncol = M, byrow = FALSE)
         Auxg <- matrix(gk, nrow = M, ncol = M, byrow = FALSE)
         J[, , jay + 1] <-
           J[, , jay] + t(eta[1:(jay + 1), ]) %*% psi[1:(jay + 1), ] /
-                      delta - 0.5 * Auxf * t(Auxf) / delta^2 + 
-                          Auxg * t(Auxg) / delta  
-        
-        Jp[, , jay + 1] <- J[, , jay + 1] - J[, , jay] 
-        JFin[jay + 1, spp , 1:M ] <- 
+                      delta - 0.5 * Auxf * t(Auxf) / delta^2 +
+                          Auxg * t(Auxg) / delta
+
+        Jp[, , jay + 1] <- J[, , jay + 1] - J[, , jay]
+        JFin[jay + 1, spp , 1:M ] <-
           Jp[, , jay + 1][col(Jp[, , jay + 1]) == row(Jp[, , jay + 1])]
-        
+
         helpC <- numeric(0)
         for (kk in 1:(M - 1))  {
           TF1 <- ( col(Jp[, , jay + 1]) >= row(Jp[, , jay + 1]) )
@@ -2233,18 +2285,18 @@ pzoibetabinom <- function(q, size, prob, rho,
           helpC <- c(helpC, Jp[, , jay + 1][TF1 & TF2])
         }
         rm(TF1, TF2)
-        
+
         JFin[jay + 1, spp , -(1:M) ] <- helpC
       }
-      
+
       if (length(nn0))
         for (kk in nn0:(nn - 1)) {
           J[, , kk + 1] <- J[, , nn0] + (kk - nn0 + 1) * Jp[, , nn0]
           Jp[, , kk + 1] <- J[, , kk + 1] - J[, , kk]
-          
-          JFin[kk + 1, spp , 1:M ] <- 
+
+          JFin[kk + 1, spp , 1:M ] <-
             Jp[, , kk + 1][col(Jp[, , kk + 1]) == row(Jp[, , kk + 1])]
-          
+
           helpC <- numeric(0)
           for (ll in 1:(M - 1))  {
            TF1 <- ( col(Jp[, , kk + 1]) >= row(Jp[, , kk + 1]) )
@@ -2254,12 +2306,12 @@ pzoibetabinom <- function(q, size, prob, rho,
           rm(TF1, TF2)
           JFin[kk + 1, spp , -(1:M) ] <- helpC
         }
-      JFin[which(JFin <= Neps.porat)] <- 
+      JFin[which(JFin <= Neps.porat)] <-
         abs( JFin[which(JFin <= Neps.porat)])
     }
 
     JFin
-    
+
   } # End
 
 
@@ -2270,7 +2322,7 @@ pzoibetabinom <- function(q, size, prob, rho,
 AR1.gammas <- function(x, y = NULL, lags = 1) {
   xx  <- matrix(x, ncol = 1)
   nx  <- nrow(xx)
-  
+
   if (lags < 0 || !(is.Numeric(lags, integer.valued = TRUE)))
     stop("'lags' must be a positive integer.")
 
@@ -2286,9 +2338,9 @@ AR1.gammas <- function(x, y = NULL, lags = 1) {
   }
 
   myD <- numeric(lags + 1)
-  myD[1] <- if (length(y)) cov(xx, yy) else cov(xx, xx)  # i.e. var(xx)    
+  myD[1] <- if (length(y)) cov(xx, yy) else cov(xx, xx)  # i.e. var(xx)
   if (lags > 0)
-    for (ii in 1:lags) 
+    for (ii in 1:lags)
       myD[ii + 1]  <- cov(xx[-(1:ii), 1], yy[1:(n - ii) , 1])
 
   myD
diff --git a/R/family.positive.R b/R/family.positive.R
index c1425a1..47cb087 100644
--- a/R/family.positive.R
+++ b/R/family.positive.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -32,7 +32,7 @@ N.hat.posbernoulli <-
   model.type <- match.arg(model.type, c("0", "b", "t", "tb"))[1]
   if (!is.matrix(eta))
     eta <- as.matrix(eta)  # May be needed for "0"
- 
+
   tau <-
     switch(model.type,
            "0"  = extra$tau,
@@ -54,7 +54,7 @@ N.hat.posbernoulli <-
   prc <- as.matrix(prc)  # Might be needed for Mtb(tau=2).
 
 
- 
+
   if (FALSE && model.type == "tb") {
     if (tau == 2)
       prc <- cbind(prc, 1 - prc)
@@ -62,7 +62,7 @@ N.hat.posbernoulli <-
       stop("cannot handle tau > 3 yet")
     jay.index <- 1:tau  # 'Restore' it coz its used below. zz??
   }
-  
+
   QQQ <- exp(rowSums(log1p(-prc)))
   pibbeta <- exp(log1p(-QQQ))  # One.minus.QQQ
   N.hat <- sum(1 / pibbeta)  # Point estimate
@@ -112,7 +112,7 @@ N.hat.posbernoulli <-
     covun <- covun[vecTF.index, vecTF.index, drop = FALSE]
     dvect <- dvect[vecTF.index, drop = FALSE]
   }
- 
+
   list(N.hat    = N.hat,
        SE.N.hat = if (length(R))
                     c(sqrt(ss2 + t(dvect) %*% covun %*% dvect)) else
@@ -203,7 +203,7 @@ rposbern <-
     stop("argument 'pvars' must be at least one")
   if (pvars > length(xcoeff))
     stop("argument 'pvars' is too high")
-  
+
 
   if (earg.link) {
     earg <- link
@@ -234,7 +234,7 @@ rposbern <-
                        matrix(runif(n = use.n * (pvars-1)),
                               use.n, pvars - 1,
                               dimnames = list(as.character(1:use.n),
-                                              paste("x", 2:pvars, sep = ""))))
+                                         paste("x", 2:pvars, sep = ""))))
   }
 
 
@@ -305,7 +305,7 @@ rposbern <-
 
 
 
-  
+
 
 dposbern <- function(x, prob, prob0 = prob, log = FALSE) {
 
@@ -336,11 +336,27 @@ dposbern <- function(x, prob, prob0 = prob, log = FALSE) {
 
 
 
+
+prob.munb.size.VGAM <- function(munb, size) {
+  prob <- size / (size + munb)
+  inf.munb <- is.infinite(munb)
+  inf.size <- is.infinite(size)
+  prob[inf.munb] <- 0
+  prob[inf.size] <- 1
+  prob[inf.munb & inf.size] <- NaN
+  prob[size < 0 | munb < 0] <- NaN
+  prob
+}
+
+
+
 dposnegbin <- function(x, size, prob = NULL, munb = NULL, log = FALSE) {
   if (length(munb)) {
     if (length(prob))
-      stop("'prob' and 'munb' both specified")
-    prob <- size / (size + munb)
+      stop("arguments 'prob' and 'munb' both specified")
+  } else {
+    if (!length(prob))
+      stop("Only one of 'prob' or 'munb' must be specified")
   }
 
   if (!is.logical(log.arg <- log) || length(log) != 1)
@@ -348,51 +364,88 @@ dposnegbin <- function(x, size, prob = NULL, munb = NULL, log = FALSE) {
   rm(log)
 
 
-  LLL <- max(length(x), length(prob), length(size))
+  LLL <- max(length(x), length(prob), length(munb), length(size))
   if (length(x)    != LLL) x    <- rep_len(x,    LLL)
-  if (length(prob) != LLL) prob <- rep_len(prob, LLL)
   if (length(size) != LLL) size <- rep_len(size, LLL)
-
-  ans <- dnbinom(x = x, size = size, prob = prob, log = log.arg)
-  index0 <- (x == 0)
-
-  if (log.arg) {
-    ans[ index0] <- log(0.0)
-    ans[!index0] <- ans[!index0] - log1p(-dnbinom(x = 0 * x[!index0],
-                    size = size[!index0], prob = prob[!index0]))
+  ans <- if (length(munb)) {
+    if (length(munb) != LLL) munb <- rep_len(munb, LLL)
+    dnbinom(x = x, size = size, mu   = munb, log = TRUE)
   } else {
-    ans[ index0] <- 0.0
-    ans[!index0] <- ans[!index0] / pnbinom(q = 0 * x[!index0],
-                    size = size[!index0], prob = prob[!index0],
-                    lower.tail = FALSE)
+    if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+    dnbinom(x = x, size = size, prob = prob, log = TRUE)
   }
 
-  ans[prob == 0] <- NaN
-  ans[prob == 1] <- NaN
+  index0 <- (x == 0) & !is.na(size)  # & (!is.na(prob) |  !is.na(munb))
+  ans[ index0] <- log(0.0)
+  ans[!index0] <- ans[!index0] - (
+    if (length(prob))
+      pnbinom(0, size = size[!index0], prob = prob[!index0],
+              lower.tail = FALSE, log.p = TRUE) else
+      pnbinom(0, size = size[!index0], mu   = munb[!index0],
+              lower.tail = FALSE, log.p = TRUE))
+
+
+  if (!log.arg)
+    ans <- exp(ans)
+
+  if (!length(prob))
+    prob <- prob.munb.size.VGAM(munb, size)
+  ans[prob == 0 | prob == 1] <- NaN
 
   ans
 }
 
 
 
-pposnegbin <- function(q, size, prob = NULL, munb = NULL) {
+pposnegbin <- function(q, size, prob = NULL, munb = NULL,
+                       lower.tail = TRUE, log.p = FALSE) {
 
   if (length(munb)) {
     if (length(prob))
-      stop("'prob' and 'munb' both specified")
-    prob <- size / (size + munb)
+      stop("arguments 'prob' and 'munb' both specified")
+  } else {
+    if (!length(prob))
+      stop("Only one of 'prob' or 'munb' must be specified")
   }
-  L <- max(length(q), length(prob), length(size))
-  if (length(q)    != L) q    <- rep_len(q,    L)
-  if (length(prob) != L) prob <- rep_len(prob, L)
-  if (length(size) != L) size <- rep_len(size, L)
 
-  ans <- ifelse(q < 1, 0, (pnbinom(q, size = size, prob = prob) -
-                           dnbinom(0, size = size, prob = prob))
-       / pnbinom(0, size = size, prob = prob, lower.tail = FALSE))
+  LLL <- max(length(q), length(prob), length(munb), length(size))
+  if (length(q)    != LLL) q    <- rep_len(q,    LLL)
+  if (length(size) != LLL) size <- rep_len(size, LLL)
+  if (length(munb)) {
+    if (length(munb) != LLL) munb <- rep_len(munb, LLL)
+  } else {
+    if (length(prob) != LLL) prob <- rep_len(prob, LLL)
+  }
 
-  ans[prob == 0] <- NaN
-  ans[prob == 1] <- NaN
+  tail.prob <-
+    if (length(prob)) dnbinom(0, size = size, prob = prob) else
+                      dnbinom(0, size = size, mu   = munb)
+
+  vall <- rep_len(ifelse(lower.tail, log(0), log(1)), LLL)
+  ans <- if (length(prob)) {
+    ifelse(q < 1,
+           vall,
+           (if (lower.tail)
+           log(pnbinom(q, size = size, prob = prob) - tail.prob) else
+           pnbinom(q, size = size, prob = prob,
+                   lower.tail = FALSE, log.p = TRUE)) -
+           log1p(-tail.prob))
+  } else {
+    ifelse(q < 1,
+           vall,
+           (if (lower.tail)
+           log(pnbinom(q, size = size, mu   = munb) - tail.prob) else
+           pnbinom(q, size = size, mu   = munb,
+                       lower.tail = FALSE, log.p = TRUE)) -
+           log1p(-tail.prob))
+  }
+
+  if (!log.p)
+    ans <- exp(ans)
+
+  if (!length(prob))
+    prob <- prob.munb.size.VGAM(munb, size)
+  ans[prob == 0 | prob == 1] <- NaN
 
   ans
 }
@@ -404,36 +457,68 @@ qposnegbin <- function(p, size, prob = NULL, munb = NULL) {
 
   if (length(munb)) {
     if (length(prob))
-      stop("'prob' and 'munb' both specified")
-    prob <- size / (size + munb)
+      stop("arguments 'prob' and 'munb' both specified")
+  } else {
+    if (!length(prob))
+      stop("Only one of 'prob' or 'munb' must be specified")
+  }
+
+  ans <- if (length(munb)) {
+    qnbinom(pnbinom(0, size = size, mu   = munb,
+                    lower.tail = FALSE) * p +
+            dnbinom(0, size = size, mu   = munb),
+            size = size, mu   = munb)
+  } else {
+    qnbinom(pnbinom(0, size = size, prob = prob,
+                    lower.tail = FALSE) * p +
+            dnbinom(0, size = size, prob = prob),
+            size = size, prob = prob)
   }
 
-  ans <- qnbinom(pnbinom(q = 0, size = size, prob = prob,
-                         lower.tail = FALSE) * p +
-                 dnbinom(x = 0, size = size, prob = prob),
-                 size = size, prob = prob)
   ans[p == 1] <- Inf
+  ans[p < 0 | 1 < p] <- NaN
 
-  ans[prob == 0] <- NaN
-  ans[prob == 1] <- NaN
+  if (!length(prob))
+    prob <- prob.munb.size.VGAM(munb, size)
+  ans[prob == 0 | prob == 1] <- NaN
 
-  ans[p < 0] <- NaN
-  ans[1 < p] <- NaN
   ans
 }
 
 
 
+rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
+  if (length(munb)) {
+    if (length(prob))
+      stop("arguments 'prob' and 'munb' both specified")
+  } else {
+    if (!length(prob))
+      stop("Only one of 'prob' or 'munb' must be specified")
+  }
+
+  ans <- if (length(munb)) {
+    qnbinom(runif(n, min = dnbinom(0, size = size, mu   = munb)),
+            size = size, mu   = munb)
+  } else {
+    qnbinom(runif(n, min = dnbinom(0, size = size, prob = prob)),
+            size = size, prob = prob)
+  }
+  if (!length(prob))
+    prob <- prob.munb.size.VGAM(munb, size)
+  ans[prob == 0 | prob == 1] <- NaN
+  ans
+}
 
 
 
 
-    EIM.posNB.specialp <- function(munb, size,
-                                   y.max = NULL,  # Must be an integer
-                                   cutoff.prob = 0.995,
-                                   prob0, df0.dkmat, df02.dkmat2,
-                                   intercept.only = FALSE,
-                                   second.deriv = TRUE) {
+ EIM.posNB.specialp <-
+  function(munb, size,
+           y.max = NULL,  # Must be an integer
+           cutoff.prob = 0.995,
+           prob0, df0.dkmat, df02.dkmat2,
+           intercept.only = FALSE,
+           second.deriv = TRUE) {
 
 
       if (intercept.only) {
@@ -444,15 +529,17 @@ qposnegbin <- function(p, size, prob = NULL, munb = NULL) {
         df02.dkmat2 <- df02.dkmat2[1]
       }
 
-      y.min <- 0  # Same as negbinomial() actually. A fixed constant really
+      y.min <- 0  # Same as negbinomial() actually. A fixed const really
 
       if (!is.numeric(y.max)) {
         eff.p <- sort(c(cutoff.prob, 1 - cutoff.prob))
-        y.max <- max(qposnegbin(p = eff.p[2], munb = munb, size = size)) + 10
+        y.max <- max(qposnegbin(p = eff.p[2],
+                                munb = munb, size = size)) + 10
       }
 
       Y.mat <- if (intercept.only) y.min:y.max else
-               matrix(y.min:y.max, length(munb), y.max-y.min+1, byrow = TRUE)
+               matrix(y.min:y.max, length(munb), y.max-y.min+1,
+                      byrow = TRUE)
   neff.row <- ifelse(intercept.only, 1, nrow(Y.mat))
   neff.col <- ifelse(intercept.only, length(Y.mat), ncol(Y.mat))
 
@@ -467,13 +554,14 @@ qposnegbin <- function(p, size, prob = NULL, munb = NULL) {
       }
 
 
-  trigg.term <- 
+  trigg.term <-
   if (TRUE) {
     answerC <- .C("eimpnbinomspecialp",
       as.integer(intercept.only),
       as.double(neff.row), as.double(neff.col),
       as.double(size),
-      as.double(1 - pposnegbin(Y.mat, size = size, munb = munb)),
+      as.double(pposnegbin(Y.mat, size = size, munb = munb,
+                           lower.tail = FALSE)),
       rowsums = double(neff.row))
       answerC$rowsums
   }
@@ -496,13 +584,14 @@ qposnegbin <- function(p, size, prob = NULL, munb = NULL) {
 
 
 
-    EIM.posNB.speciald <- function(munb, size,
-                                   y.min = 1,  # 20160201; must be an integer
-                                   y.max = NULL,  # Must be an integer
-                                   cutoff.prob = 0.995,
-                                   prob0, df0.dkmat, df02.dkmat2,
-                                   intercept.only = FALSE,
-                                   second.deriv = TRUE) {
+ EIM.posNB.speciald <-
+  function(munb, size,
+           y.min = 1,  # 20160201; must be an integer
+           y.max = NULL,  # Must be an integer
+           cutoff.prob = 0.995,
+           prob0, df0.dkmat, df02.dkmat2,
+           intercept.only = FALSE,
+           second.deriv = TRUE) {
 
 
       if (intercept.only) {
@@ -515,13 +604,16 @@ qposnegbin <- function(p, size, prob = NULL, munb = NULL) {
 
       if (!is.numeric(y.max)) {
         eff.p <- sort(c(cutoff.prob, 1 - cutoff.prob))
-        y.max <- max(qposnegbin(p = eff.p[2], munb = munb, size = size)) + 10
+        y.max <- max(qposnegbin(p = eff.p[2],
+                                munb = munb, size = size)) + 10
       }
 
       Y.mat <- if (intercept.only) y.min:y.max else
-               matrix(y.min:y.max, length(munb), y.max-y.min+1, byrow = TRUE)
+               matrix(y.min:y.max, length(munb),
+                      y.max-y.min+1, byrow = TRUE)
       trigg.term <- if (intercept.only) {
-         dposnegbin(Y.mat, size = size, munb = munb) %*% trigamma(Y.mat + size)
+         dposnegbin(Y.mat, size = size, munb = munb) %*%
+             trigamma(Y.mat + size)
       } else {
          rowSums(dposnegbin(Y.mat, size = size, munb = munb) *
                  trigamma(Y.mat + size))
@@ -568,7 +660,7 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
            imethod = 1,
            imunb = NULL,
            iprobs.y = NULL,  # 0.35,
-           gprobs.y = (0:9)/10,  # 20160709; grid for finding munb.init
+           gprobs.y = ppoints(8),
            isize = NULL,
            gsize.mux = exp(c(-30, -20, -15, -10, -6:3))) {
 
@@ -609,8 +701,8 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
   new("vglmff",
   blurb = c("Positive-negative binomial distribution\n\n",
             "Links:    ",
-            namesof("munb", lmunb, earg = emunb ), ", ",
-            namesof("size", lsize, earg = esize ), "\n",
+            namesof("munb", lmunb, earg = emunb), ", ",
+            namesof("size", lsize, earg = esize), "\n",
             "Mean:     munb / (1 - (size / (size + munb))^size)"),
   constraints = eval(substitute(expression({
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
@@ -657,14 +749,14 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
 
 
 
-    M <- M1 * ncol(y) 
+    M <- M1 * ncol(y)
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     extra$type.fitted <- .type.fitted
-    extra$dimnamesy   <- dimnames(y)
+    extra$colnames.y  <- colnames(y)
 
     predictors.names <- c(
-      namesof(param.names("munb", NOS), .lmunb , earg = .emunb , tag = FALSE),
-      namesof(param.names("size", NOS), .lsize , earg = .esize , tag = FALSE))
+    namesof(param.names("munb", NOS), .lmunb , earg = .emunb , tag=FALSE),
+    namesof(param.names("size", NOS), .lsize , earg = .esize , tag=FALSE))
     predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
 
     gprobs.y <- .gprobs.y
@@ -672,7 +764,6 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
     if (length(imunb))
       imunb <- matrix(imunb, n, NOS, byrow = TRUE)
 
-
     if (!length(etastart)) {
 
 
@@ -682,19 +773,24 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
       if (length( .iprobs.y ))
         gprobs.y <-  .iprobs.y
       gsize.mux <- .gsize.mux  # gsize.mux is on a relative scale
-          
+
       for (jay in 1:NOS) {  # For each response 'y_jay'... do:
+        wm.yj <- weighted.mean(y[, jay], w = w[, jay])
         munb.init.jay <- if ( .imethod == 1 ) {
-          quantile(y[, jay], probs = gprobs.y) - 1/2  # + 1/16
+
+          negbinomial.initialize.yj(y[, jay] - 1,
+                                    w[, jay], gprobs.y = gprobs.y,
+                                    wm.yj = wm.yj) + 1 - 1/4
         } else {
-          weighted.mean(y[, jay], w = w[, jay]) - 1/2
+          wm.yj - 1/2
+        }
+        if (length(imunb)) {
+          munb.init.jay <- sample(x = imunb[, jay],
+                                  size = 10, replace = TRUE)
+          munb.init.jay <- unique(sort(munb.init.jay))
         }
-        if (length(imunb))
-          munb.init.jay <- imunb[, jay]
-
 
-        gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
-                                    weighted.mean(y[, jay], w = w[, jay]))
+        gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) + wm.yj)
         if (length( .isize ))
           gsize <- .isize  # isize is on an absolute scale
 
@@ -729,6 +825,7 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
             .type.fitted = type.fitted ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
                              "Returning the 'mean'.")
@@ -741,14 +838,20 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
     TF <- c(TRUE, FALSE)
     munb <- eta2theta(eta[,  TF, drop = FALSE], .lmunb , earg = .emunb )
     kmat <- eta2theta(eta[, !TF, drop = FALSE], .lsize , earg = .esize )
+   small.size <- 1e-10
+   if (any(ind4 <- (kmat < small.size))) {
+     warning("estimates of 'size' are very small. Taking evasive action.")
+     kmat[ind4] <- small.size
+   }
 
     tempk <- 1 / (1 + munb / kmat)  # kmat / (kmat + munb)
     prob0  <- tempk^kmat
     oneminusf0  <- 1 - prob0
 
     smallval <- .mds.min  # Something like this is needed
-    if (any(big.size <- munb / kmat < smallval)) {
-      prob0[big.size]  <- exp(-munb[big.size])  # The limit as kmat --> Inf
+
+    if (any(big.size <- (munb / kmat < smallval))) {
+      prob0[big.size]  <- exp(-munb[big.size])  # The limit as kmat-->Inf
       oneminusf0[big.size] <- -expm1(-munb[big.size])
     }
 
@@ -756,18 +859,7 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
                   "mean"      = munb / oneminusf0,
                   "munb"      = munb,
                   "prob0"     = prob0)  # P(Y=0)
-     if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans))       
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-   ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lsize = lsize, .lmunb = lmunb,
            .esize = esize, .emunb = emunb,
            .mds.min = mds.min ))),
@@ -788,7 +880,7 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
 
     misc$max.chunk.MB <- .max.chunk.MB
     misc$cutoff.prob <- .cutoff.prob
-    misc$imethod <- .imethod 
+    misc$imethod <- .imethod
     misc$nsimEIM <- .nsimEIM
     misc$expected <- TRUE
     misc$multipleResponses <- TRUE
@@ -828,7 +920,7 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
@@ -847,12 +939,16 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
     size <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
                      .lsize , earg = .esize )
 
+   small.size.absolute <- 1e-14  # 20160909
+
     smallval <- .mds.min  # .munb.div.size
-    okay1 <- all(is.finite(munb)) && all(munb > 0) &&
-             all(is.finite(size)) && all(size > 0)
-    overdispersion <- if (okay1) all(munb / size > smallval) else FALSE
+    okay1 <- all(is.finite(munb)) && all(0 < munb) &&
+             all(is.finite(size)) && all(0 < size) &&
+             all(small.size.absolute < size)
+    overdispersion <- if (okay1) all(smallval < munb / size) else FALSE
     if (!overdispersion)
-      warning("parameter 'size' has very large values; ",
+      warning("parameter 'size' has very large values relative ",
+              "to 'munb'; ",
               "try fitting a positive-Poisson ",
               "model instead.")
     okay1 && overdispersion
@@ -898,7 +994,7 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
 
 
     if (any(big.size)) {
-      prob0[big.size]  <- exp(-munb[big.size])  # The limit as kmat --> Inf
+      prob0[big.size]  <- exp(-munb[big.size])  # The limit as kmat-->Inf
       oneminusf0[big.size] <- -expm1(-munb[big.size])
       df0.dmunb[big.size] <- -tempk[big.size] * prob0[big.size]
       df0.dkmat[big.size] <-  prob0[big.size] * AA16[big.size]
@@ -936,7 +1032,7 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
     }
 
 
-    
+
     myderiv <- c(w) * cbind(dl.dmunb * dmunb.deta,
                             dl.dsize * dsize.deta)
     myderiv[, interleave.VGAM(M, M1 = M1)]
@@ -970,7 +1066,8 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
       Q.maxs <- pmin(Q.maxs, Q.MAXS)
 
 
-      ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
+      ind1 <- if (max.chunk.MB > 0)
+          (Q.maxs - Q.mins < max.support) else FALSE
       if ((NN <- sum(ind1)) > 0) {
         Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
         n.chunks <- if (intercept.only) 1 else
@@ -994,14 +1091,14 @@ posnegbinomial.control <- function(save.weights = TRUE, ...) {
                                df0.dkmat   =   df0.dkmat[sind2, jay],
                                df02.dkmat2 = df02.dkmat2[sind2, jay],
                                intercept.only = intercept.only)
-          
-          
+
+
           if (any(eim.kk.TF <-       wz[sind2, M1*jay] <= 0 |
                                is.na(wz[sind2, M1*jay]))) {
             ind2[sind2[eim.kk.TF], jay] <- FALSE
           }
-          
-          
+
+
           lwr.ptr <- upr.ptr + 1
         }  # while
 
@@ -1193,28 +1290,6 @@ rpospois <- function(n, lambda) {
 
 
 
-rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
-  ans <- if (!is.null(munb)) {
-    if (!is.null(prob))
-        stop("'prob' and 'mu' both specified")
- 
-    prob <- size / (size + munb)
-
-    qnbinom(p = runif(n,
-                      min = dnbinom(0, size,              mu = munb)),
-            size,              mu = munb)
-  } else {
-    qnbinom(p = runif(n,
-                      min = dnbinom(0, size, prob = prob           )),
-            size, prob = prob           )
-  }
-  ans[prob == 0] <- NaN
-  ans[prob == 1] <- NaN
-  ans  
-}
-
-
-
 
  pospoisson <- function(link = "loge",
                         type.fitted = c("mean", "lambda", "prob0"),
@@ -1279,12 +1354,12 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
     extra$ncoly <- ncoly
     extra$M1 <- M1
     M <- M1 * ncoly
-    extra$type.fitted      <- .type.fitted
-    extra$dimnamesy <- dimnames(y)
-
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
     mynames1 <- param.names("lambda", ncoly)
-    predictors.names <- namesof(mynames1, .link , earg = .earg, tag = FALSE)
+    predictors.names <- namesof(mynames1, .link , earg = .earg,
+                                tag = FALSE)
 
     if (!length(etastart)) {
       lambda.init <- Init.mu(y = y, w = w, imethod = .imethod ,
@@ -1296,6 +1371,7 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
             .ilambda = ilambda, .imethod = imethod,
             .type.fitted = type.fitted ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- NCOL(eta) / c(M1 = 1)
    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
                              "Returning the 'mean'.")
@@ -1309,19 +1385,8 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
     ans <- switch(type.fitted,
                   "mean"      = -lambda / expm1(-lambda),
                   "lambda"    = lambda,
-                  "prob0"     = exp(-lambda))  # P(Y=0)
-     if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans))       
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-   ans
+                  "prob0"     = exp(-lambda))  # P(Y=0) as it were
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
     misc$link <- rep_len( .link , M)
@@ -1340,7 +1405,7 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    lambda <- eta2theta(eta, .link , earg = .earg ) 
+    lambda <- eta2theta(eta, .link , earg = .earg )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
@@ -1353,6 +1418,11 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("pospoisson"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    lambda <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(lambda)) && all(0 < lambda)
+    okay1
+  }, list( .link = link, .earg = earg ))),
 
 
   simslot = eval(substitute(
@@ -1360,10 +1430,10 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
-    lambda <- eta2theta(eta, .link , earg = .earg ) 
+    lambda <- eta2theta(eta, .link , earg = .earg )
     rpospois(nsim * length(lambda), lambda)
   }, list( .link = link, .earg = earg ))),
 
@@ -1371,7 +1441,7 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
 
 
   deriv = eval(substitute(expression({
-    lambda <- eta2theta(eta, .link , earg = .earg ) 
+    lambda <- eta2theta(eta, .link , earg = .earg )
 
     temp6 <- expm1(lambda)
     dl.dlambda <- y / lambda - 1 - 1 / temp6
@@ -1431,11 +1501,11 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
 
 
-pposbinom <- function(q, size, prob 
+pposbinom <- function(q, size, prob
                      ) {
 
 
-  if (!is.Numeric(prob, positive = TRUE)) 
+  if (!is.Numeric(prob, positive = TRUE))
     stop("no zero or non-numeric values allowed for argument 'prob'")
   L <- max(length(q), length(size), length(prob))
   if (length(q)      != L) q      <- rep_len(q,      L)
@@ -1517,8 +1587,8 @@ rposbinom <- function(n, size, prob) {
               namesof("prob", link, earg = earg, tag = FALSE),
             "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.VGAM(matrix(1, M, 1), x = x, 
-                           bool = .parallel , 
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
                            constraints = constraints)
 
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
@@ -1632,7 +1702,7 @@ rposbinom <- function(n, size, prob) {
   list( .link = link, .earg = earg,
         .multiple.responses = multiple.responses ))),
   last = eval(substitute(expression({
-    extra$w <- NULL  # Kill it off 
+    extra$w <- NULL  # Kill it off
 
 
     misc$link <- rep_len( .link , M)
@@ -1646,8 +1716,8 @@ rposbinom <- function(n, size, prob) {
     misc$expected <- TRUE
     misc$omit.constant <- .omit.constant
     misc$needto.omit.constant <- TRUE  # Safety mechanism
-    
-    
+
+
     misc$multiple.responses   <- .multiple.responses
     w <- as.numeric(w)
 
@@ -1665,7 +1735,7 @@ rposbinom <- function(n, size, prob) {
       extra$SE.N.hat <- tmp6$SE.N.hat
     }
 
-    
+
   }), list( .link = link, .earg = earg,
             .multiple.responses = multiple.responses,
             .omit.constant = omit.constant ))),
@@ -1710,6 +1780,12 @@ rposbinom <- function(n, size, prob) {
           .omit.constant = omit.constant ))),
 
   vfamily = c("posbinomial"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    binprob <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(binprob)) && all(0 < binprob & binprob < 1)
+    okay1
+  }, list( .link = link, .earg = earg ))),
+
 
 
 
@@ -1838,8 +1914,8 @@ rposbinom <- function(n, size, prob) {
             namesof("probM", link, earg = earg, tag = FALSE),
             "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.VGAM(matrix(1, M, 1), x = x, 
-                           bool = .parallel.t , 
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
+                           bool = .parallel.t ,
                            constraints = constraints,
                            apply.int = .apply.parint ,  #  TRUE,
                            cm.default = diag(M),
@@ -1858,7 +1934,7 @@ rposbinom <- function(n, size, prob) {
          parallel.t = .parallel.t )
   }, list( .parallel.t   = parallel.t,
            .p.small    = p.small,
-           .no.warning = no.warning,          
+           .no.warning = no.warning,
            .apply.parint = apply.parint ))),
 
   initialize = eval(substitute(expression({
@@ -1873,7 +1949,7 @@ rposbinom <- function(n, size, prob) {
 
     extra$p.small    <- .p.small
     extra$no.warning <- .no.warning
-    
+
 
     w <- matrix(w, n, ncoly)
     mustart <- matrix(colSums(y) / colSums(w),
@@ -1935,7 +2011,7 @@ rposbinom <- function(n, size, prob) {
     fv
   }, list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
-    extra$w   <- NULL   # Kill it off 
+    extra$w   <- NULL   # Kill it off
 
 
     misc$link <- rep_len( .link , M)
@@ -1996,6 +2072,13 @@ rposbinom <- function(n, size, prob) {
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("posbernoulli.t"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    probs <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(probs)) && all(0 < probs & probs < 1)
+    okay1
+  }, list( .link = link, .earg = earg ))),
+
+
   deriv = eval(substitute(expression({
     probs <- eta2theta(eta, .link , earg = .earg )
     dprobs.deta <- dtheta.deta(probs, .link , earg = .earg )
@@ -2029,9 +2112,9 @@ rposbinom <- function(n, size, prob) {
       for (tlocal in (slocal+1):M)
         wz[, iam(slocal, tlocal, M = M)] <- dprobs.deta[, slocal] *
                                             dprobs.deta[, tlocal] *
-                                            (B.st[,slocal,tlocal] +
-                                             B.s [,slocal] *
-                                             B.s [,tlocal] / AAA) / (-AAA)
+                                            (B.st[, slocal,tlocal] +
+                                             B.s [, slocal] *
+                                             B.s [, tlocal] / AAA) / (-AAA)
 
 
 
@@ -2089,7 +2172,7 @@ rposbinom <- function(n, size, prob) {
   if (!is.Numeric(p.small, positive = TRUE, length.arg = 1))
     stop("bad input for argument 'p.small'")
 
- 
+
 
 
   new("vglmff",
@@ -2107,7 +2190,7 @@ rposbinom <- function(n, size, prob) {
     constraints <- cm.VGAM(matrix(1, 2, 1), x = x,
                            bool = .drop.b ,
                            constraints = constraints,
-                           apply.int = .apply.parint.b ,  # TRUE, 
+                           apply.int = .apply.parint.b ,  # TRUE,
                            cm.default = cm.intercept.default,  # diag(2),
                            cm.intercept.default = cm.intercept.default)
   }), list( .drop.b = drop.b,
@@ -2142,15 +2225,15 @@ rposbinom <- function(n, size, prob) {
     extra$orig.w <- w
     extra$tau     <- tau   <- ncol(y)
     extra$ncoly   <- ncoly <- ncol(y)
-    extra$type.fitted      <- .type.fitted
-    extra$dimnamesy <- dimnames(y)
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
 
     extra$p.small    <- .p.small
     extra$no.warning <- .no.warning
 
 
-    
+
 
     mustart.orig <- mustart
     M <- 2
@@ -2234,38 +2317,21 @@ rposbinom <- function(n, size, prob) {
 
 
     if ( type.fitted == "likelihood.cond") {
-      probs.numer <- prr 
+      probs.numer <- prr
       mat.index <- cbind(1:nrow(prc), extra$cap1)
       probs.numer[mat.index] <- prc[mat.index]
       probs.numer[extra$cap.hist1 == 0] <- prc[extra$cap.hist1 == 0]
       fv <- probs.numer / AAA
-
     } else {
 
-
       fv <- prc - prr
       for (jay in 2:tau)
         fv[, jay] <- fv[, jay-1] * (1 - cap.probs)
       fv <- (fv + prr) / AAA
     }
 
-
-
-    ans <- fv
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
-  }, list( .link = link,
-           .type.fitted = type.fitted,
-           .earg = earg ))),
+    label.cols.y(fv, colnames.y = extra$colnames.y, NOS = tau)
+  }, list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
 
     misc$link <- c( .link , .link )
@@ -2320,7 +2386,7 @@ rposbinom <- function(n, size, prob) {
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      probs.numer <- prr 
+      probs.numer <- prr
       mat.index <- cbind(1:nrow(prc), extra$cap1)
       probs.numer[mat.index] <- prc[mat.index]
       probs.numer[extra$cap.hist1 == 0] <- prc[extra$cap.hist1 == 0]
@@ -2337,6 +2403,16 @@ rposbinom <- function(n, size, prob) {
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("posbernoulli.b"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    cap.probs <- eta2theta(eta[, 1], .link , earg = .earg )
+    rec.probs <- eta2theta(eta[, 2], .link , earg = .earg )
+    okay1 <- all(is.finite(cap.probs)) &&
+             all(0 < cap.probs & cap.probs < 1) &&
+             all(is.finite(rec.probs)) &&
+             all(0 < rec.probs & rec.probs < 1)
+    okay1
+  }, list( .link = link, .earg = earg ))),
+
 
 
 
@@ -2409,7 +2485,7 @@ rposbinom <- function(n, size, prob) {
               rec.probs * (1 - rec.probs) * (1 - QQQ))
     wz[, iam(2, 2, M = M)] <- wz.pr * drecprobs.deta^2
 
-  
+
 
 
     wz <- c(w) * wz
@@ -2429,7 +2505,7 @@ rposbinom <- function(n, size, prob) {
            type.fitted = c("likelihood.cond", "mean.uncond"),
            imethod = 1,
            iprob = NULL,
-           p.small = 1e-4, no.warning = FALSE,  
+           p.small = 1e-4, no.warning = FALSE,
            ridge.constant = 0.01,
            ridge.power = -4) {
 
@@ -2471,19 +2547,20 @@ rposbinom <- function(n, size, prob) {
     stop("bad input for argument 'p.small'")
 
 
-  
+
   new("vglmff",
   blurb = c("Positive-Bernoulli (capture-recapture) model\n",
             "with temporal and behavioural effects (M_{tb}/M_{tbh})\n\n",
             "Links:    ",
             namesof("pcapture.1",     link, earg = earg, tag = FALSE),
             ", ..., ",
-            namesof("pcapture.tau",   link, earg = earg, tag = FALSE), ", ",
+            namesof("pcapture.tau",   link, earg = earg, tag = FALSE),
+            ", ",
             namesof("precapture.2",   link, earg = earg, tag = FALSE),
             ", ..., ",
             namesof("precapture.tau", link, earg = earg, tag = FALSE)),
   constraints = eval(substitute(expression({
- 
+
 
     constraints.orig <- constraints
     cm1.d <-
@@ -2491,10 +2568,10 @@ rposbinom <- function(n, size, prob) {
     con.d <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .drop.b ,
                            constraints = constraints.orig,
-                           apply.int = .apply.parint.d ,  # FALSE,  
+                           apply.int = .apply.parint.d ,  # FALSE,
                            cm.default           = cmk.d,
                            cm.intercept.default = cm1.d)
-   
+
 
 
     cm1.t <-
@@ -2502,11 +2579,11 @@ rposbinom <- function(n, size, prob) {
     con.t <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel.t ,  # Same as .parallel.b
                            constraints = constraints.orig,
-                           apply.int = .apply.parint.t ,  # FALSE,  
+                           apply.int = .apply.parint.t ,  # FALSE,
                            cm.default           = cmk.t,
                            cm.intercept.default = cm1.t)
-   
-    
+
+
 
     cm1.b <-
     cmk.b <- rbind(matrix(0, tau, tau-1), diag(tau-1))
@@ -2514,10 +2591,10 @@ rposbinom <- function(n, size, prob) {
                               rep_len(1, tau-1)), M, 1), x = x,
                            bool = .parallel.b ,  # Same as .parallel.b
                            constraints = constraints.orig,
-                           apply.int = .apply.parint.b ,  # FALSE,  
+                           apply.int = .apply.parint.b ,  # FALSE,
                            cm.default           = cmk.b,
                            cm.intercept.default = cm1.b)
-   
+
     con.use <- con.b
     for (klocal in seq_along(con.b)) {
       con.use[[klocal]] <-
@@ -2526,14 +2603,13 @@ rposbinom <- function(n, size, prob) {
 
     }
 
-    
+
     constraints <- con.use
-    
+
   }), list( .parallel.t = parallel.t,
             .parallel.b = parallel.b,
             .drop.b     = drop.b,
             .apply.parint.b = apply.parint.b,
-            .type.fitted    = type.fitted,
             .apply.parint.d = apply.parint.d,
             .apply.parint.t = apply.parint.t ))),
   infos = eval(substitute(function(...) {
@@ -2579,7 +2655,7 @@ rposbinom <- function(n, size, prob) {
     extra$orig.w  <- w
     extra$ycounts <- y
     extra$type.fitted <- .type.fitted
-    extra$dimnamesy <- dimnames(y)
+    extra$colnames.y  <- colnames(y)
     M <- M1 * tau - 1  # recap.prob.1 is unused
 
 
@@ -2592,11 +2668,11 @@ rposbinom <- function(n, size, prob) {
 
 
 
- 
+
     extra$p.small    <- .p.small
     extra$no.warning <- .no.warning
 
-   
+
 
 
 
@@ -2606,7 +2682,7 @@ rposbinom <- function(n, size, prob) {
 
     tmp3 <- aux.posbernoulli.t(y)
     cap.hist1  <- extra$cap.hist1  <- tmp3$cap.hist1
-    
+
 
     dn2.cap   <- paste("pcapture.",   1:ncoly, sep = "")
     dn2.recap <- paste("precapture.", 2:ncoly, sep = "")
@@ -2663,7 +2739,7 @@ rposbinom <- function(n, size, prob) {
 
 
     if ( type.fitted == "likelihood.cond") {
-      probs.numer <- prr 
+      probs.numer <- prr
       mat.index <- cbind(1:nrow(prc), extra$cap1)
       probs.numer[mat.index] <- prc[mat.index]
       probs.numer[extra$cap.hist1 == 0] <- prc[extra$cap.hist1 == 0]
@@ -2685,25 +2761,10 @@ rposbinom <- function(n, size, prob) {
         fv[, 3:tau] <- fv[, 3:tau] / AAA
       }
     }
-
-
-
-    ans <- fv
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
-  }, list( .link = link,
-           .earg = earg ))),
+    label.cols.y(fv, colnames.y = extra$colnames.y, NOS = NOS)
+  }, list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
-    extra$w   <- NULL   # Kill it off 
+    extra$w   <- NULL   # Kill it off
 
 
     misc$link <- rep_len( .link , M)
@@ -2768,7 +2829,7 @@ rposbinom <- function(n, size, prob) {
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      probs.numer <- prr 
+      probs.numer <- prr
       mat.index <- cbind(1:nrow(prc), extra$cap1)
       probs.numer[mat.index] <- prc[mat.index]
       probs.numer[extra$cap.hist1 == 0] <- prc[extra$cap.hist1 == 0]
@@ -2785,6 +2846,12 @@ rposbinom <- function(n, size, prob) {
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("posbernoulli.tb"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    probs <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(probs)) &&  all(0 < probs & probs < 1)
+    okay1
+  }, list( .link = link, .earg = earg ))),
+
   deriv = eval(substitute(expression({
     tau <- extra$ncoly
     taup1 <- tau + 1
@@ -2795,9 +2862,9 @@ rposbinom <- function(n, size, prob) {
                  probs[, taup1:ncol(probs)])  # 1st coln ignored
     logQQQ <- rowSums(log1p(-prc))
     QQQ <- exp(logQQQ)
- 
-    
-    
+
+
+
     dprobs.deta <- dtheta.deta(probs, .link , earg = .earg )
     dQ.dprc   <- -QQQ / (1 - prc)
     d2Q.dprc <- array(0, c(n, tau, tau))
@@ -2807,7 +2874,7 @@ rposbinom <- function(n, size, prob) {
         d2Q.dprc[, kay, jay] <-  QQQ / ((1 - prc[, jay]) *
                                         (1 - prc[, kay]))
 
-    dl.dpc <- dl.dpr <- matrix(0, n, tau)  # First coln of dl.dpr is ignored
+    dl.dpc <- dl.dpr <- matrix(0, n, tau)  # 1st coln of dl.dpr is ignored
     for (jay in 1:tau) {
       dl.dpc[, jay] <- (1 - extra$cap.hist1[, jay]) *
         (    y[, jay]  /      prc[, jay]   -
@@ -2834,12 +2901,12 @@ rposbinom <- function(n, size, prob) {
     wz.pc <- (QQQcummat / prc - QQQ / (1 - QQQ)) / ((1 - QQQ) *
               (1 - prc)^2)
     wz[, 1:tau] <- wz.pc
-  
+
 
     wz.pr <- as.matrix((1 - QQQcummat / (1 - prc)) / (
                         prr * (1 - prr) * (1 - QQQ)))
     wz[, taup1:M] <- wz.pr[, -1]
-  
+
 
     for (jay in 1:(tau-1))
       for (kay in (jay+1):tau)
@@ -2897,10 +2964,11 @@ setMethod("showsummaryvglmS4VGAM",  signature(VGAMff = "posbernoulli.tb"),
  if (length(object at extra$N.hat) == 1 &&
       is.numeric(object at extra$N.hat)) {
     cat("\nEstimate of N: ", round(object at extra$N.hat, digits = 3), "\n")
-    cat("\nStd. Error of N: ", round(object at extra$SE.N.hat, digits = 3), "\n")
+    cat("\nStd. Error of N: ", round(object at extra$SE.N.hat, digits = 3),
+        "\n")
 
-    confint.N <- object at extra$N.hat + c(Lower = -1, Upper = 1) *
-                                      qnorm(0.975) * object at extra$SE.N.hat
+    confint.N <- object at extra$N.hat +
+        c(Lower = -1, Upper = 1) * qnorm(0.975) * object at extra$SE.N.hat
     cat("\nApproximate 95 percent confidence interval for N:\n")
     print(round(confint.N, digits = 2))
   }
diff --git a/R/family.qreg.R b/R/family.qreg.R
index 8879d42..ec97be3 100644
--- a/R/family.qreg.R
+++ b/R/family.qreg.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -60,7 +60,7 @@ qlms.bcn <- function(p, lambda = 1, mu = 0, sigma = 1) {
 lms.bcn.control <-
 lms.bcg.control <-
 lms.yjn.control <- function(trace = TRUE, ...)
-   list(trace = trace) 
+   list(trace = trace)
 
 
 
@@ -165,22 +165,22 @@ lms.yjn.control <- function(trace = TRUE, ...)
                 theta2eta(sigma.init,  .lsigma,  earg = .esigma))
     }
   }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
-            .elambda = elambda, .emu = emu, .esigma = esigma, 
+            .elambda = elambda, .emu = emu, .esigma = esigma,
             .idf.mu = idf.mu,
             .idf.sigma = idf.sigma,
             .ilambda = ilambda, .isigma = isigma ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-      eta[, 1] <- eta2theta(eta[, 1], .llambda, earg = .elambda)
-      eta[, 2] <- eta2theta(eta[, 2], .lmu,     earg = .emu)
-      eta[, 3] <- eta2theta(eta[, 3], .lsigma,  earg = .esigma)
+    eta[, 1] <- eta2theta(eta[, 1], .llambda, earg = .elambda)
+    eta[, 2] <- eta2theta(eta[, 2], .lmu,     earg = .emu)
+    eta[, 3] <- eta2theta(eta[, 3], .lsigma,  earg = .esigma)
       qtplot.lms.bcn(percentiles = .percentiles, eta = eta)
   }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
-           .elambda = elambda, .emu = emu, .esigma = esigma, 
+           .elambda = elambda, .emu = emu, .esigma = esigma,
            .percentiles = percentiles ))),
   last = eval(substitute(expression({
-    misc$links <-    c(lambda = .llambda, mu = .lmu, sigma = .lsigma )
+    misc$links <-    c(lambda = .llambda , mu = .lmu , sigma = .lsigma )
 
-    misc$earg  <- list(lambda = .elambda, mu = .emu, sigma = .esigma )
+    misc$earg  <- list(lambda = .elambda , mu = .emu , sigma = .esigma )
 
     misc$tol0 <- .tol0
     misc$percentiles  <- .percentiles  # These are argument values
@@ -192,7 +192,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
                                   dimnames = list(dimnames(x)[[1]], NULL)))
     }
   }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
-            .elambda = elambda, .emu = emu, .esigma = esigma, 
+            .elambda = elambda, .emu = emu, .esigma = esigma,
             .percentiles = percentiles,
             .tol0 = tol0 ))),
   loglikelihood = eval(substitute(
@@ -201,8 +201,8 @@ lms.yjn.control <- function(trace = TRUE, ...)
              summation = TRUE) {
 
     lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
-    muvec  <- eta2theta(eta[, 2], .lmu     , earg = .emu )
-    sigma  <- eta2theta(eta[, 3], .lsigma  , earg = .esigma )
+    muvec  <- eta2theta(eta[, 2], .lmu     , earg = .emu     )
+    sigma  <- eta2theta(eta[, 3], .lsigma  , earg = .esigma  )
 
 
     if (residuals) {
@@ -220,10 +220,21 @@ lms.yjn.control <- function(trace = TRUE, ...)
            .elambda = elambda, .emu = emu, .esigma = esigma,
            .tol0 = tol0 ))),
   vfamily = c("lms.bcn", "lmscreg"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+    mymu   <- eta2theta(eta[, 2], .lmu     , earg = .emu     )
+    sigma  <- eta2theta(eta[, 3], .lsigma  , earg = .esigma  )
+    okay1 <- all(is.finite(mymu  )) &&
+             all(is.finite(sigma )) && all(0 < sigma) &&
+             all(is.finite(lambda))
+    okay1
+  }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+           .elambda = elambda, .emu = emu, .esigma = esigma,
+           .tol0 = tol0 ))),
   deriv = eval(substitute(expression({
-    lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
-    mymu   <- eta2theta(eta[, 2], .lmu, earg = .emu)
-    sigma  <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
+    lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+    mymu   <- eta2theta(eta[, 2], .lmu     , earg = .emu     )
+    sigma  <- eta2theta(eta[, 3], .lsigma  , earg = .esigma  )
 
     zedd <- ((y / mymu)^lambda - 1) / (lambda * sigma)
     z2m1 <- zedd * zedd - 1
@@ -353,7 +364,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
                     theta2eta(sigma.init,   .lsigma ,  earg = .esigma ))
         }
   }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
-            .elambda = elambda, .emu = emu, .esigma = esigma, 
+            .elambda = elambda, .emu = emu, .esigma = esigma,
             .idf.mu = idf.mu,
             .idf.sigma = idf.sigma,
             .ilambda = ilambda, .isigma = isigma ))),
@@ -363,7 +374,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
     eta[, 3] <- eta2theta(eta[, 3], .lsigma ,  earg = .esigma )
     qtplot.lms.bcg(percentiles = .percentiles, eta = eta)
   }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
-           .elambda = elambda, .emu = emu, .esigma = esigma, 
+           .elambda = elambda, .emu = emu, .esigma = esigma,
            .percentiles = percentiles ))),
   last = eval(substitute(expression({
     misc$link <-    c(lambda = .llambda, mu = .lmu, sigma = .lsigma )
@@ -377,7 +388,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
           ncol = 3, dimnames = list(dimnames(x)[[1]], NULL)))
     }
   }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
-            .elambda = elambda, .emu = emu, .esigma = esigma, 
+            .elambda = elambda, .emu = emu, .esigma = esigma,
             .percentiles = percentiles ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
@@ -402,10 +413,20 @@ lms.yjn.control <- function(trace = TRUE, ...)
   }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
            .elambda = elambda, .emu = emu, .esigma = esigma ))),
   vfamily = c("lms.bcg", "lmscreg"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+    mymu   <- eta2theta(eta[, 2], .lmu     , earg = .emu     )
+    sigma  <- eta2theta(eta[, 3], .lsigma  , earg = .esigma  )
+    okay1 <- all(is.finite(mymu  )) &&
+             all(is.finite(sigma )) && all(0 < sigma) &&
+             all(is.finite(lambda))
+    okay1
+  }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+           .elambda = elambda, .emu = emu, .esigma = esigma ))),
   deriv = eval(substitute(expression({
-    lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
-    mymu   <- eta2theta(eta[, 2], .lmu,     earg = .emu)
-    sigma  <- eta2theta(eta[, 3], .lsigma,  earg = .esigma)
+    lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda )
+    mymu   <- eta2theta(eta[, 2], .lmu,     earg = .emu     )
+    sigma  <- eta2theta(eta[, 3], .lsigma,  earg = .esigma  )
 
     Gee <- (y / mymu)^lambda
     theta <- 1 / (sigma * lambda)^2
@@ -555,19 +576,19 @@ dpsi.dlambda.yjn <- function(psi, lambda, mymu, sigma,
     answer <- matrix(NA_real_, L, derivative+1)
     CC <- psi >= 0
     BB <- ifelse(CC, lambda, -2+lambda)
-    AA <- psi * BB 
+    AA <- psi * BB
     temp8 <- if (derivative > 0) {
         answer[,1:derivative] <-
             Recall(psi = psi, lambda = lambda, mymu = mymu, sigma = sigma,
-                   derivative = derivative-1, smallno = smallno) 
+                   derivative = derivative-1, smallno = smallno)
         answer[,derivative] * derivative
-    } else { 
+    } else {
         0
     }
     answer[,1+derivative] <- ((AA+1) * (log1p(AA)/BB)^derivative - temp8) / BB
 
     pos <- (CC & abs(lambda) <= smallno) | (!CC & abs(lambda-2) <= smallno)
-    if (any(pos)) 
+    if (any(pos))
       answer[pos,1+derivative] =
         (answer[pos, 1]^(1+derivative))/(derivative+1)
     answer
@@ -581,12 +602,12 @@ gh.weight.yjn.11 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
       ((derivmat[, 2]/sigma)^2 +
         sqrt(2) * z * derivmat[, 3] / sigma) / sqrt(pi)
     } else {
-        # Long-winded way 
+        # Long-winded way
         psi <- mymu + sqrt(2) * sigma * z
         (1 / sqrt(pi)) *
         (dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
                           derivative = 1)[, 2]^2 +
-        (psi - mymu) * 
+        (psi - mymu) *
         dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
                          derivative = 2)[, 3]) / sigma^2
     }
@@ -628,7 +649,7 @@ glag.weight.yjn.11 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
     (1 / (2 * sqrt((z-discontinuity^2)^2 + discontinuity^2))) *
     (1 / sqrt(pi)) *
     (dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 1)[, 2]^2 +
-    (psi - mymu) * 
+    (psi - mymu) *
     dpsi.dlambda.yjn(psi, lambda, mymu,
                      sigma, derivative = 2)[, 3]) / sigma^2
   }
@@ -673,7 +694,7 @@ gleg.weight.yjn.11 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
     psi <- mymu + sqrt(2) * sigma * z
     (exp(-z^2) / sqrt(pi)) *
     (dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 1)[, 2]^2 +
-    (psi - mymu) * 
+    (psi - mymu) *
     dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
                      derivative = 2)[, 3]) / sigma^2
   }
@@ -780,7 +801,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
         namesof("sigma",  .lsigma, earg = .esigma,  short= TRUE))
 
       y.save <- y
-      yoff <- if (is.Numeric( .yoffset)) .yoffset else -median(y) 
+      yoff <- if (is.Numeric( .yoffset)) .yoffset else -median(y)
       extra$yoffset <- yoff
       y <- y + yoff
 
@@ -818,7 +839,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
 
       }
   }), list(.llambda = llambda, .lmu = lmu, .lsigma = lsigma,
-           .elambda = elambda, .emu = emu, .esigma = esigma, 
+           .elambda = elambda, .emu = emu, .esigma = esigma,
            .idf.mu = idf.mu,
            .idf.sigma = idf.sigma,
            .ilambda = ilambda,
@@ -848,11 +869,11 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
 
     if (control$cdf) {
         post$cdf <- cdf.lms.yjn(y + misc$yoffset,
-            eta0=matrix(c(lambda,mymu,sigma), 
+            eta0=matrix(c(lambda,mymu,sigma),
             ncol=3, dimnames = list(dimnames(x)[[1]], NULL)))
     }
   }), list(.percentiles = percentiles,
-           .elambda = elambda, .emu = emu, .esigma = esigma, 
+           .elambda = elambda, .emu = emu, .esigma = esigma,
            .nsimEIM=nsimEIM,
            .llambda = llambda, .lmu = lmu, .lsigma = lsigma ))),
   loglikelihood = eval(substitute(
@@ -874,27 +895,38 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
         ll.elts
       }
     }
-  }, list( .elambda = elambda, .emu = emu, .esigma = esigma, 
+  }, list( .elambda = elambda, .emu = emu, .esigma = esigma,
            .llambda = llambda, .lmu = lmu,
            .lsigma = lsigma ))),
   vfamily = c("lms.yjn2", "lmscreg"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+    mymu   <- eta2theta(eta[, 2], .lmu     , earg = .emu     )
+    sigma  <- eta2theta(eta[, 3], .lsigma  , earg = .esigma  )
+    okay1 <- all(is.finite(mymu  )) &&
+             all(is.finite(sigma )) && all(0 < sigma) &&
+             all(is.finite(lambda))
+    okay1
+  }, list( .elambda = elambda, .emu = emu, .esigma = esigma,
+           .llambda = llambda, .lmu = lmu,
+           .lsigma = lsigma ))),
   deriv = eval(substitute(expression({
-    lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
-    mymu <- eta2theta(eta[, 2], .lmu, earg = .emu)
-    sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
+    lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+    mymu   <- eta2theta(eta[, 2], .lmu     , earg = .emu     )
+    sigma  <- eta2theta(eta[, 3], .lsigma  , earg = .esigma  )
     dlambda.deta <- dtheta.deta(lambda, link = .llambda, earg = .elambda)
     dmu.deta <- dtheta.deta(mymu, link = .lmu, earg = .emu)
     dsigma.deta <- dtheta.deta(sigma, link = .lsigma, earg = .esigma)
 
     psi <- yeo.johnson(y, lambda)
     d1 <- yeo.johnson(y, lambda, deriv = 1)
-    AA <- (psi - mymu) / sigma 
+    AA <- (psi - mymu) / sigma
     dl.dlambda <- -AA * d1 /sigma + sign(y) * log1p(abs(y))
-    dl.dmu <- AA / sigma 
+    dl.dmu <- AA / sigma
     dl.dsigma <- (AA^2 -1) / sigma
     dthetas.detas <- cbind(dlambda.deta, dmu.deta, dsigma.deta)
     c(w) * cbind(dl.dlambda, dl.dmu, dl.dsigma) * dthetas.detas
-  }), list( .elambda = elambda, .emu = emu, .esigma = esigma, 
+  }), list( .elambda = elambda, .emu = emu, .esigma = esigma,
             .llambda = llambda, .lmu = lmu,
                .lsigma = lsigma ))),
   weight = eval(substitute(expression({
@@ -906,9 +938,9 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
         psi <- rnorm(n, mymu, sigma)
         ysim <- yeo.johnson(y=psi, lam=lambda, inv = TRUE)
         d1 <- yeo.johnson(ysim, lambda, deriv = 1)
-        AA <- (psi - mymu) / sigma 
+        AA <- (psi - mymu) / sigma
         dl.dlambda <- -AA * d1 /sigma + sign(ysim) * log1p(abs(ysim))
-        dl.dmu <- AA / sigma 
+        dl.dmu <- AA / sigma
         dl.dsigma <- (AA^2 -1) / sigma
         rm(ysim)
         temp3 <- cbind(dl.dlambda, dl.dmu, dl.dsigma)
@@ -932,7 +964,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
 
 
  lms.yjn <- function(percentiles = c(25, 50, 75),
-                    zero = c("lambda", "sigma"), 
+                    zero = c("lambda", "sigma"),
                     llambda = "identitylink",
                     lsigma = "loge",
                     idf.mu = 4,
@@ -999,7 +1031,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
         namesof("sigma",  .lsigma, earg = .esigma ,  short = TRUE))
 
     y.save <- y
-    yoff <- if (is.Numeric( .yoffset )) .yoffset else -median(y) 
+    yoff <- if (is.Numeric( .yoffset )) .yoffset else -median(y)
     extra$yoffset <- yoff
     y <- y + yoff
 
@@ -1071,7 +1103,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
     if (control$cdf) {
         post$cdf =
           cdf.lms.yjn(y + misc$yoffset,
-                      eta0 = matrix(c(lambda,mymu,sigma), 
+                      eta0 = matrix(c(lambda,mymu,sigma),
                       ncol = 3,
                       dimnames = list(dimnames(x)[[1]], NULL)))
     }
@@ -1102,17 +1134,27 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
   }, list( .esigma = esigma, .elambda = elambda,
            .lsigma = lsigma, .llambda = llambda))),
   vfamily = c("lms.yjn", "lmscreg"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+    mymu   <-           eta[, 2]
+    sigma  <- eta2theta(eta[, 3], .lsigma  , earg = .esigma  )
+    okay1 <- all(is.finite(mymu  )) &&
+             all(is.finite(sigma )) && all(0 < sigma) &&
+             all(is.finite(lambda))
+    okay1
+  }, list( .esigma = esigma, .elambda = elambda,
+           .lsigma = lsigma, .llambda = llambda))),
   deriv = eval(substitute(expression({
-    lambda <- eta2theta(eta[, 1], .llambda, earg = .elambda)
-    mymu <- eta[, 2]
-    sigma <- eta2theta(eta[, 3], .lsigma, earg = .esigma)
+    lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
+    mymu   <-           eta[, 2]
+    sigma  <- eta2theta(eta[, 3], .lsigma  , earg = .esigma  )
 
     psi <- yeo.johnson(y, lambda)
     d1 <- yeo.johnson(y, lambda, deriv = 1)
-    AA <- (psi - mymu) / sigma 
+    AA <- (psi - mymu) / sigma
 
     dl.dlambda <- -AA * d1 /sigma + sign(y) * log1p(abs(y))
-    dl.dmu <- AA / sigma 
+    dl.dmu <- AA / sigma
     dl.dsigma <- (AA^2 -1) / sigma
     dlambda.deta <- dtheta.deta(lambda, link = .llambda, earg = .elambda)
     dsigma.deta <- dtheta.deta(sigma, link = .lsigma, earg = .esigma)
@@ -1137,7 +1179,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
                  11.8437858379,16.2792578314,
                  21.996585812, 29.9206970123)
     glag.wts = c(0.308441115765, 0.401119929155, 0.218068287612,
-                 0.0620874560987, 0.00950151697517, 0.000753008388588, 
+                 0.0620874560987, 0.00950151697517, 0.000753008388588,
                  2.82592334963e-5,
                  4.24931398502e-7, 1.83956482398e-9, 9.91182721958e-13)
     } else {
@@ -1197,7 +1239,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
     if (FALSE) {
       AA <- (UU-LL)/2
       for (kk in seq_along(gleg.wts)) {
-        temp1 <- AA * gleg.wts[kk] 
+        temp1 <- AA * gleg.wts[kk]
         abscissae <- (UU+LL)/2 + AA * gleg.abs[kk]
         psi <- mymu + sqrt(2) * sigma * abscissae
         temp9 <- dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
@@ -1232,11 +1274,11 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
       psi <- mymu + sqrt(2) * sigma * abscissae   # abscissae = z
       temp9 <- dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
                                  derivative = 2)
-      wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] + sgh.wts[kk] * 
+      wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] + sgh.wts[kk] *
             gh.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
-      wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] + sgh.wts[kk] * 
+      wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] + sgh.wts[kk] *
             gh.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
-      wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] + sgh.wts[kk] * 
+      wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] + sgh.wts[kk] *
             gh.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
     }
 
@@ -1245,16 +1287,16 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
       abscissae <- sign(discontinuity) * sqrt(glag.abs[kk]) + discontinuity^2
       psi <- mymu + sqrt(2) * sigma * abscissae
       temp9 <- dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 2)
-      temp9 <- cbind(temp9, 
+      temp9 <- cbind(temp9,
                     1 / (2 * sqrt((abscissae-discontinuity^2)^2 +
                     discontinuity^2) *
                     sqrt(pi) * sigma^2))
       temp7 <- temp1 * glag.wts[kk]
-      wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] + temp7 * 
+      wz[,iam(1, 1, M)] <- wz[,iam(1, 1, M)] + temp7 *
           glag.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
-      wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] + temp7 * 
+      wz[,iam(1, 2, M)] <- wz[,iam(1, 2, M)] + temp7 *
           glag.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
-      wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] + temp7 * 
+      wz[,iam(1, 3, M)] <- wz[,iam(1, 3, M)] + temp7 *
           glag.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
     }
 
@@ -1448,10 +1490,16 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
   }), list( .lexpectile = lexpectile,
             .eexpectile = eexpectile, .parallel = parallel ))),
   vfamily = c("amlnormal"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu <- eta2theta(eta, .lexpectile , earg = .eexpectile )
+    okay1 <- all(is.finite(mymu))
+    okay1
+  }, list( .lexpectile = lexpectile,
+           .eexpectile = eexpectile ))),
 
   deriv = eval(substitute(expression({
-    mymu <- eta2theta(eta, .lexpectile, earg = .eexpectile)
-    dexpectile.deta <- dtheta.deta(mymu, .lexpectile, earg = .eexpectile)
+    mymu <- eta2theta(eta, .lexpectile , earg = .eexpectile )
+    dexpectile.deta <- dtheta.deta(mymu, .lexpectile , earg = .eexpectile )
     myresid <- matrix(y,extra$n,extra$M) - cbind(mu)
     wor1 <- Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
                                    byrow = TRUE))
@@ -1595,6 +1643,11 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
     theta2eta(mu, link =  .link , earg = .earg )
   }, list( .link = link, .earg = earg ))),
   vfamily = c("amlpoisson"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(mymu)) && all(0 < mymu)
+    okay1
+  }, list( .link = link, .earg = earg ))),
   deriv = eval(substitute(expression({
     mymu <- eta2theta(eta, .link , earg = .earg )
     dexpectile.deta <- dtheta.deta(mymu, .link , earg = .earg )
@@ -1763,6 +1816,11 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
     theta2eta(mu, link =  .link , earg = .earg )
   }, list( .link = link, .earg = earg ))),
   vfamily = c("amlbinomial"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(mymu)) && all(0 < mymu & mymu < 1)
+    okay1
+  }, list( .link = link, .earg = earg ))),
   deriv = eval(substitute(expression({
     mymu <- eta2theta(eta, .link , earg = .earg )
     use.mu <- mymu
@@ -1924,6 +1982,11 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
     theta2eta(mu, link =  .link , earg = .earg )
   }, list( .link = link, .earg = earg ))),
   vfamily = c("amlexponential"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(mymu)) && all(0 < mymu)
+    okay1
+  }, list( .link = link, .earg = earg ))),
   deriv = eval(substitute(expression({
     mymu <- eta2theta(eta, .link , earg = .earg )
     bigy <- matrix(y,extra$n,extra$M)
@@ -2007,7 +2070,7 @@ palap <- function(q, location = 0, scale = 1, tau = 0.5,
 
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
 
@@ -2043,7 +2106,7 @@ palap <- function(q, location = 0, scale = 1, tau = 0.5,
       ans[index1] <- (1 + (kappa[index1])^2 *
                      (-expm1(exponent[index1]))) / (1+(kappa[index1])^2)
       }
-  } 
+  }
   indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   ans[!indexTF] <- NaN
   ans
@@ -2057,7 +2120,7 @@ qalap <- function(p, location = 0, scale = 1, tau = 0.5,
 
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
 
@@ -2074,7 +2137,7 @@ qalap <- function(p, location = 0, scale = 1, tau = 0.5,
   temp5 <- kappa^2 / (1 + kappa^2)
   if (lower.tail) {
     if (log.p) {
-      ans <- exp(p) 
+      ans <- exp(p)
       index1 <- (exp(p) <= temp5)
       exponent <- exp(p[index1]) / temp5[index1]
       ans[index1] <- location[index1] + (scale[index1] * kappa[index1]) *
@@ -2083,7 +2146,7 @@ qalap <- function(p, location = 0, scale = 1, tau = 0.5,
         (log1p((kappa[!index1])^2) +
            log(-expm1(p[!index1]))) / sqrt(2)
     } else {
-      ans <- p 
+      ans <- p
       index1 <- (p <= temp5)
       exponent <- p[index1] / temp5[index1]
       ans[index1] <- location[index1] + (scale[index1] * kappa[index1]) *
@@ -2094,7 +2157,7 @@ qalap <- function(p, location = 0, scale = 1, tau = 0.5,
     }
   } else {
     if (log.p) {
-      ans <- -expm1(p) 
+      ans <- -expm1(p)
       index1 <- (-expm1(p)  <= temp5)
       exponent <- -expm1(p[index1]) / temp5[index1]
       ans[index1] <- location[index1] + (scale[index1] * kappa[index1]) *
@@ -2103,8 +2166,8 @@ qalap <- function(p, location = 0, scale = 1, tau = 0.5,
         (log1p((kappa[!index1])^2) +
            p[!index1]) / sqrt(2)
     } else {
-      ans <- exp(log1p(-p)) 
-      index1 <- (p >= (1 / (1+kappa^2))) 
+      ans <- exp(log1p(-p))
+      index1 <- (p >= (1 / (1+kappa^2)))
       exponent <- exp(log1p(-p[index1])) / temp5[index1]
       ans[index1] <- location[index1] + (scale[index1] * kappa[index1]) *
         log(exponent) / sqrt(2)
@@ -2116,7 +2179,7 @@ qalap <- function(p, location = 0, scale = 1, tau = 0.5,
 
   indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   ans[!indexTF] <- NaN
-  ans  
+  ans
 }
 
 
@@ -2184,7 +2247,7 @@ qloglap <- function(p, location.ald = 0, scale.ald = 1,
 
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
 
@@ -2207,12 +2270,12 @@ qloglap <- function(p, location.ald = 0, scale.ald = 1,
   if (lower.tail) {
     if (log.p) {
       ln.p <- p
-      ans <- ifelse((exp(ln.p) > Alpha / temp9), 
+      ans <- ifelse((exp(ln.p) > Alpha / temp9),
                     Delta * (-expm1(ln.p) * temp9 / Beta)^(-1/Alpha),
                     Delta * (exp(ln.p) * temp9 / Alpha)^(1/Beta))
       ans[ln.p > 0] <- NaN
     } else {
-      ans <- ifelse((p > Alpha / temp9), 
+      ans <- ifelse((p > Alpha / temp9),
                     Delta * exp((-1/Alpha) * (log1p(-p) + log(temp9/Beta))),
                     Delta * (p * temp9 / Alpha)^(1/Beta))
       ans[p <  0] <- NaN
@@ -2223,12 +2286,12 @@ qloglap <- function(p, location.ald = 0, scale.ald = 1,
   } else {
     if (log.p) {
       ln.p <- p
-      ans <- ifelse((-expm1(ln.p) > Alpha / temp9), 
+      ans <- ifelse((-expm1(ln.p) > Alpha / temp9),
                     Delta * (exp(ln.p) * temp9 / Beta)^(-1/Alpha),
                     Delta * (-expm1(ln.p) * temp9 / Alpha)^(1/Beta))
       ans[ln.p > 0] <- NaN
-    } else { 
-      ans <- ifelse((p < (temp9 - Alpha) / temp9), 
+    } else {
+      ans <- ifelse((p < (temp9 - Alpha) / temp9),
                     Delta * (p * temp9 / Beta)^(-1/Alpha),
                     Delta * exp((1/Beta)*(log1p(-p) + log(temp9/Alpha))))
       ans[p <  0] <- NaN
@@ -2250,10 +2313,10 @@ ploglap <- function(q, location.ald = 0, scale.ald = 1,
 
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   NN <- max(length(q), length(location.ald), length(scale.ald),
             length(kappa))
   location <- rep_len(location.ald, NN)
@@ -2294,7 +2357,7 @@ ploglap <- function(q, location.ald = 0, scale.ald = 1,
       ans[q <= 0] <- 1
       ans[q == Inf] <- 0
     }
-  } 
+  }
 
   indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   ans[!indexTF] <- NaN
@@ -2659,31 +2722,23 @@ alaplace2.control <- function(maxit = 100, ...) {
     ishrinkage < 0 ||
     ishrinkage > 1)
     stop("bad input for argument 'ishrinkage'")
-
   if (length(tau) &&
       max(abs(kappa - sqrt(tau / (1 - tau)))) > 1.0e-6)
     stop("arguments 'kappa' and 'tau' do not match")
 
 
 
-
-
   fittedMean <- FALSE
   if (!is.logical(fittedMean) || length(fittedMean) != 1)
     stop("bad input for argument 'fittedMean'")
 
 
 
-
-
   new("vglmff",
   blurb = c("Two-parameter asymmetric Laplace distribution\n\n",
             "Links:      ",
-            namesof("location1", llocat, earg = elocat), ", ",
-            namesof("scale1",    lscale, earg = escale), ", ",
-            namesof("location2", llocat, earg = elocat), ", ",
-            namesof("scale2",    lscale, earg = escale),
-            ", ..., ",
+            namesof("location",  llocat, earg = elocat), ", ",
+            namesof("scale",     lscale, earg = escale),  # ", ",
             "\n\n",
             "Mean:       ",
             "location + scale * (1/kappa - kappa) / sqrt(2)", "\n",
@@ -2694,10 +2749,8 @@ alaplace2.control <- function(maxit = 100, ...) {
 
 
   constraints = eval(substitute(expression({
- 
 
- print("Mdiv2")
- print( Mdiv2 )
+
     onemat <- matrix(1, Mdiv2, 1)
     constraints.orig <- constraints
 
@@ -2707,12 +2760,11 @@ alaplace2.control <- function(maxit = 100, ...) {
     con.locat <- cm.VGAM(cmk.locat,
                          x = x, bool = .parallel.locat ,
                          constraints = constraints.orig,
+                         apply.int = .apply.parint.locat ,
                          cm.default           = cm1.locat,
                          cm.intercept.default = cm1.locat)
- print("con.locat[[1]]")
- print( con.locat[[1]] )
 
-    
+
 
     cm1.scale <- kronecker(diag(Mdiv2), rbind(0, 1))
     cmk.scale <- kronecker(onemat,      rbind(0, 1))
@@ -2722,25 +2774,19 @@ alaplace2.control <- function(maxit = 100, ...) {
                          apply.int = .apply.parint.scale ,
                          cm.default           = cm1.scale,
                          cm.intercept.default = cm1.scale)
- print("con.scale[[1]],,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,")
- print( con.scale[[1]] )
-   
+
     con.use <- con.scale
     for (klocal in seq_along(con.scale)) {
       con.use[[klocal]] <- cbind(con.locat[[klocal]],
                                  con.scale[[klocal]])
     }
- print("con.use[[1]],,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,")
- print( con.use[[1]] )
 
-    
+
     constraints <- con.use
 
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
                                 predictors.names = predictors.names,
-                                M1 = 2)
- print("names(constraints),,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,")
- print( names(constraints) )
+                                M1 = M1)
   }), list( .parallel.locat = parallel.locat,
             .parallel.scale = parallel.scale,
             .zero = zero,
@@ -2752,13 +2798,21 @@ alaplace2.control <- function(maxit = 100, ...) {
     list(M1 = 2,
          Q1 = 1,
          summary.pvalues = FALSE,
-         multipleResponses = FALSE,
-         parameters.names = c("location1", "scale1", "location2", "scale2"),
-         zero = .zero )
-  }, list( .zero = zero ))),
-  initialize = eval(substitute(expression({
-    extra$M1 <- M1 <- 2
+         expected = TRUE,   # 20161117
+         multipleResponses = TRUE,  # FALSE,
+         parameters.names = c("location", "scale"),
+         true.mu = .fittedMean ,
+         zero = .zero ,
+         tau  = .tau ,
+         kappa = .kappa )
+  }, list( .tau   = tau,
+           .kappa = kappa,
+           .fittedMean = fittedMean,
+           .zero = zero ))),
+
 
+  initialize = eval(substitute(expression({
+    M1 <- 2
 
     temp5 <-
     w.y.check(w = w, y = y,
@@ -2801,8 +2855,8 @@ alaplace2.control <- function(maxit = 100, ...) {
     mynames1 <- param.names("location", Mdiv2)
     mynames2 <- param.names("scale",    Mdiv2)
     predictors.names <-
-        c(namesof(mynames1, .llocat , earg = .elocat, tag = FALSE),
-          namesof(mynames2, .lscale , earg = .escale, tag = FALSE))
+        c(namesof(mynames1, .llocat , earg = .elocat , tag = FALSE),
+          namesof(mynames2, .lscale , earg = .escale , tag = FALSE))
     predictors.names <-
     predictors.names[interleave.VGAM(M, M1 = M1)]
 
@@ -2831,7 +2885,8 @@ alaplace2.control <- function(maxit = 100, ...) {
                                         sum(w[, Jay]) * 2))
         } else {
           use.this <- weighted.mean(y.use, w[, Jay])
-          locat.init[, jay] <- (1 - .ishrinkage ) * y.use + .ishrinkage * use.this
+          locat.init[, jay] <- (1 - .ishrinkage ) * y.use +
+                                    .ishrinkage   * use.this
           scale.init[, jay] <-
             sqrt(sum(c(w[, Jay]) *
             abs(y.use - median(y.use ))) / (sum(w[, Jay]) * 2))
@@ -2858,17 +2913,17 @@ alaplace2.control <- function(maxit = 100, ...) {
             .elocat = elocat, .escale = escale,
             .llocat = llocat, .lscale = lscale, .kappa = kappa,
             .ilocat = ilocat, .iscale = iscale ))),
+
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    Mdiv2 <- extra$Mdiv2
     M1 <- 2
-    locat <- eta2theta(eta[, M1 * (1:Mdiv2) - 1, drop = FALSE],
-                      .llocat , earg = .elocat )
+    Mdiv2 <- ncol(eta) / M1  # extra$Mdiv2
+    vTF <- c(TRUE, FALSE)
+    locat <- eta2theta(eta[,  vTF, drop = FALSE], .llocat , earg = .elocat )
     dimnames(locat) <- list(dimnames(eta)[[1]], extra$y.names)
     myans <- if ( .fittedMean ) {
       kappamat <- matrix(extra$kappa, extra$n, extra$Mdiv2,
                          byrow = TRUE)
-      Scale <- eta2theta(eta[, M1 * (1:Mdiv2)    , drop = FALSE],
-                         .lscale , earg = .escale )
+      Scale <- eta2theta(eta[, !vTF, drop = FALSE], .lscale , earg = .escale )
       locat + Scale * (1/kappamat - kappamat)
     } else {
       locat
@@ -2880,16 +2935,16 @@ alaplace2.control <- function(maxit = 100, ...) {
            .fittedMean = fittedMean,
            .kappa = kappa ))),
   last = eval(substitute(expression({
-    M1 <- extra$M1
+    M1 <- 2  # extra$M1
+    Mdiv2 <- ncol(eta) / M1  # extra$Mdiv2
 
     tmp34 <- c(rep_len( .llocat , Mdiv2),
                rep_len( .lscale , Mdiv2))
-    names(tmp34) <- c(mynames1, mynames2) 
+    names(tmp34) <- c(mynames1, mynames2)
     tmp34 <- tmp34[interleave.VGAM(M, M1 = M1)]
     misc$link <- tmp34  # Already named
 
     misc$earg <- vector("list", M)
-    misc$M1 <- M1
     for (ii in 1:Mdiv2) {
       misc$earg[[M1 * ii - 1]] <- .elocat
       misc$earg[[M1 * ii    ]] <- .escale
@@ -2897,11 +2952,8 @@ alaplace2.control <- function(maxit = 100, ...) {
     names(misc$earg) <- names(misc$link)
 
 
-    misc$multipleResponses <- TRUE
-    misc$expected <- TRUE
     extra$kappa <- misc$kappa <- .kappa
     extra$tau <- misc$tau <- misc$kappa^2 / (1 + misc$kappa^2)
-    misc$true.mu <- .fittedMean  # @fitted is not a true mu?
 
     extra$percentile <- numeric(Mdiv2)  # length(misc$kappa)
     locat <- as.matrix(locat)
@@ -2922,14 +2974,13 @@ alaplace2.control <- function(maxit = 100, ...) {
              extra = NULL,
              summation = TRUE) {
     M1 <- 2
-    Mdiv2 <- extra$Mdiv2
+    Mdiv2 <- ncol(eta) / M1  # extra$Mdiv2
     ymat <- matrix(y, extra$n, extra$Mdiv2)
     kappamat <- matrix(extra$kappa, extra$n, extra$Mdiv2, byrow = TRUE)
 
-    locat <- eta2theta(eta[, 2 * (1:Mdiv2) - 1, drop = FALSE],
-                       .llocat , earg = .elocat )
-    Scale <- eta2theta(eta[, 2 * (1:Mdiv2)    , drop = FALSE],
-                       .lscale , earg = .escale )
+    vTF <- c(TRUE, FALSE)
+    locat <- eta2theta(eta[,  vTF, drop = FALSE], .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, !vTF, drop = FALSE], .lscale , earg = .escale )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
@@ -2946,6 +2997,16 @@ alaplace2.control <- function(maxit = 100, ...) {
            .escale = escale, .lscale = lscale,
            .kappa = kappa ))),
   vfamily = c("alaplace2"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    vTF <- c(TRUE, FALSE)
+    locat <- eta2theta(eta[,  vTF, drop = FALSE], .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, !vTF, drop = FALSE], .lscale , earg = .escale )
+    okay1 <- all(is.finite(locat)) &&
+             all(is.finite(Scale)) && all(0 < Scale)
+    okay1
+  }, list( .elocat = elocat, .llocat = llocat,
+           .escale = escale, .lscale = lscale,
+           .kappa = kappa ))),
 
 
 
@@ -2955,12 +3016,13 @@ alaplace2.control <- function(maxit = 100, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     extra    <- object at extra
-    locat    <- eta2theta(eta[, c(TRUE, FALSE)], .llocat , .elocat )
-    Scale    <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+    vTF <- c(TRUE, FALSE)
+    locat <- eta2theta(eta[,  vTF, drop = FALSE], .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, !vTF, drop = FALSE], .lscale , earg = .escale )
     kappamat <- matrix(extra$kappa, extra$n, extra$Mdiv2, byrow = TRUE)
     ralap(nsim * length(Scale), location = c(locat),
           scale = c(Scale), kappa = c(kappamat))
@@ -2975,15 +3037,11 @@ alaplace2.control <- function(maxit = 100, ...) {
 
   deriv = eval(substitute(expression({
     M1 <- 2
-    Mdiv2 <- extra$Mdiv2
+    Mdiv2 <- ncol(eta) / M1  # extra$Mdiv2
     ymat <- matrix(y, n, Mdiv2)
-
-    locat <- eta2theta(eta[, M1 * (1:(Mdiv2)) - 1, drop = FALSE],
-                      .llocat , earg = .elocat )
-    Scale <- eta2theta(eta[, M1 * (1:(Mdiv2))    , drop = FALSE],
-                      .lscale , earg = .escale )
-
-
+    vTF <- c(TRUE, FALSE)
+    locat <- eta2theta(eta[,  vTF, drop = FALSE], .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, !vTF, drop = FALSE], .lscale , earg = .escale )
     kappamat <- matrix(extra$kappa, n, Mdiv2, byrow = TRUE)
     zedd <- abs(ymat - locat) / Scale
     dl.dlocat <- sqrt(2) * ifelse(ymat >= locat, kappamat, 1/kappamat) *
@@ -2995,20 +3053,16 @@ alaplace2.control <- function(maxit = 100, ...) {
 
     ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
                         dl.dscale * dscale.deta)
-    ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
-    ans
+    ans[, interleave.VGAM(ncol(ans), M1 = M1)]
   }), list( .escale = escale, .lscale = lscale,
             .elocat = elocat, .llocat = llocat,
             .kappa = kappa ))),
   weight = eval(substitute(expression({
     wz <- matrix(NA_real_, n, M)
-
     d2l.dlocat2 <- 2 / Scale^2
     d2l.dscale2 <- 1 / Scale^2
-
-    wz[, M1*(1:Mdiv2) - 1] <- d2l.dlocat2 * dlocat.deta^2
-    wz[, M1*(1:Mdiv2)    ] <- d2l.dscale2 * dscale.deta^2
-
+    wz[,  vTF] <- d2l.dlocat2 * dlocat.deta^2
+    wz[, !vTF] <- d2l.dscale2 * dscale.deta^2
     c(w) * wz
   }), list( .escale = escale, .lscale = lscale,
             .elocat = elocat, .llocat = llocat ))))
@@ -3055,7 +3109,7 @@ alaplace1.control <- function(maxit = 100, ...) {
 
 
 
-  
+
   if (!is.Numeric(kappa, positive = TRUE))
     stop("bad input for argument 'kappa'")
   if (length(tau) &&
@@ -3121,8 +3175,8 @@ alaplace1.control <- function(maxit = 100, ...) {
                          apply.int = .apply.parint.locat ,
                          cm.default           = cm1.locat,
                          cm.intercept.default = cm1.locat)
-   
-    
+
+
     constraints <- con.locat
 
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
@@ -3250,7 +3304,7 @@ alaplace1.control <- function(maxit = 100, ...) {
     misc$multipleResponses <- TRUE
 
     tmp34 <- c(rep_len( .llocat , M))
-    names(tmp34) <- mynames1 
+    names(tmp34) <- mynames1
     misc$link <- tmp34 # Already named
 
     misc$earg <- vector("list", M)
@@ -3306,6 +3360,13 @@ alaplace1.control <- function(maxit = 100, ...) {
            .llocat = llocat,
            .Scale.arg = Scale.arg, .kappa = kappa ))),
   vfamily = c("alaplace1"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    locat <- eta2theta(eta, .llocat , earg = .elocat )
+    okay1 <- all(is.finite(locat))
+    okay1
+  }, list( .elocat = elocat,
+           .llocat = llocat,
+           .Scale.arg = Scale.arg, .kappa = kappa ))),
 
 
   simslot = eval(substitute(
@@ -3313,7 +3374,7 @@ alaplace1.control <- function(maxit = 100, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     extra    <- object at extra
@@ -3429,7 +3490,7 @@ alaplace3.control <- function(maxit = 100, ...) {
 
 
 
-    predictors.names <- 
+    predictors.names <-
       c(namesof("location", .llocat , earg = .elocat, tag = FALSE),
         namesof("scale",    .lscale , earg = .escale, tag = FALSE),
         namesof("kappa",    .lkappa , earg = .ekappa, tag = FALSE))
@@ -3503,10 +3564,21 @@ alaplace3.control <- function(maxit = 100, ...) {
            .escale = escale, .lscale = lscale,
            .ekappa = ekappa, .lkappa = lkappa ))),
   vfamily = c("alaplace3"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    kappa <- eta2theta(eta[, 3], .lkappa , earg = .ekappa )
+    okay1 <- all(is.finite(locat)) &&
+             all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(kappa)) && all(0 < kappa)
+    okay1
+  }, list( .elocat = elocat, .llocat = llocat,
+           .escale = escale, .lscale = lscale,
+           .ekappa = ekappa, .lkappa = lkappa ))),
   deriv = eval(substitute(expression({
     locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
     Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
-    kappa <- eta2theta(eta[, 3], .lkappa, earg = .ekappa)
+    kappa <- eta2theta(eta[, 3], .lkappa , earg = .ekappa )
 
     zedd <- abs(y - locat) / Scale
     dl.dlocat <- sqrt(2) * ifelse(y >= locat, kappa, 1/kappa) *
@@ -3515,7 +3587,7 @@ alaplace3.control <- function(maxit = 100, ...) {
                  zedd / Scale - 1 / Scale
     dl.dkappa <-  1 / kappa - 2 * kappa / (1+kappa^2) -
                  (sqrt(2) / Scale) *
-                 ifelse(y > locat, 1, -1/kappa^2) * abs(y-locat)  
+                 ifelse(y > locat, 1, -1/kappa^2) * abs(y-locat)
 
     dlocat.deta <- dtheta.deta(locat, .llocat , earg = .elocat )
     dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
@@ -3572,7 +3644,7 @@ plaplace <- function(q, location = 0, scale = 1,
     stop("bad input for argument 'lower.tail'")
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   L <- max(length(q), length(location), length(scale))
   if (length(q)        != L) q        <- rep_len(q,        L)
   if (length(location) != L) location <- rep_len(location, L)
@@ -3602,7 +3674,7 @@ qlaplace <- function(p, location = 0, scale = 1,
                      lower.tail = TRUE, log.p = FALSE) {
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
 
@@ -3627,14 +3699,14 @@ qlaplace <- function(p, location = 0, scale = 1,
       ans <- location - sign(0.5 - exp(ln.p)) * scale *
              log(2 * ifelse(-expm1(ln.p) < 0.5, -expm1(ln.p), exp(ln.p)))
      # ans[ln.p > 0] <- NaN
-    } else { 
+    } else {
       ans <- location - sign(0.5 - p) * scale *
              log(2 * ifelse(p > 0.5, 1 - p, p))
     }
   }
 
   ans[scale <= 0] <- NaN
-  ans  
+  ans
 }
 
 
@@ -3662,7 +3734,7 @@ rlaplace <- function(n, location = 0, scale = 1) {
 
 
 
-  
+
  laplace <- function(llocation = "identitylink", lscale = "loge",
                      ilocation = NULL, iscale = NULL,
                      imethod = 1,
@@ -3722,7 +3794,7 @@ rlaplace <- function(n, location = 0, scale = 1) {
 
 
 
-    predictors.names <- 
+    predictors.names <-
       c(namesof("location", .llocat , earg = .elocat, tag = FALSE),
         namesof("scale",    .lscale , earg = .escale, tag = FALSE))
 
@@ -3785,6 +3857,14 @@ rlaplace <- function(n, location = 0, scale = 1) {
   }, list( .escale = escale, .lscale = lscale,
            .elocat = elocat, .llocat = llocat ))),
   vfamily = c("laplace"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    okay1 <- all(is.finite(Locat)) &&
+             all(is.finite(Scale)) && all(0 < Scale)
+    okay1
+  }, list( .escale = escale, .lscale = lscale,
+           .elocat = elocat, .llocat = llocat ))),
   deriv = eval(substitute(expression({
     Locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
     Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
@@ -3936,8 +4016,15 @@ fff.control <- function(save.weights = TRUE, ...) {
         ll.elts
       }
     }
-  }, list( .link = link, .earg = earg, .ncp=ncp ))),
+  }, list( .link = link, .earg = earg, .ncp = ncp ))),
   vfamily = c("fff"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    df1 <- eta2theta(eta[, 1], .link , earg = .earg )
+    df2 <- eta2theta(eta[, 2], .link , earg = .earg )
+    okay1 <- all(is.finite(df1)) && all(0 < df1) &&
+             all(is.finite(df2)) && all(0 < df2)
+    okay1
+  }, list( .link = link, .earg = earg, .ncp = ncp ))),
   deriv = eval(substitute(expression({
     df1 <- eta2theta(eta[, 1], .link , earg = .earg )
     df2 <- eta2theta(eta[, 2], .link , earg = .earg )
@@ -3945,7 +4032,7 @@ fff.control <- function(save.weights = TRUE, ...) {
               0.5*log(y) - 0.5*digamma(0.5*df1) -
               0.5*(df1+df2)*(y/df2) / (1 + df1*y/df2) -
               0.5*log1p(df1*y/df2)
-    dl.ddf2 <- 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 - 
+    dl.ddf2 <- 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
               0.5*digamma(0.5*df2) -
               0.5*(df1+df2) * (-df1*y/df2^2) / (1 + df1*y/df2) -
               0.5*log1p(df1*y/df2)
@@ -3963,7 +4050,7 @@ fff.control <- function(save.weights = TRUE, ...) {
                 0.5*log(ysim) - 0.5*digamma(0.5*df1) -
                 0.5*(df1+df2)*(ysim/df2) / (1 + df1*ysim/df2) -
                 0.5*log1p(df1*ysim/df2)
-      dl.ddf2 <- 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 - 
+      dl.ddf2 <- 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
                 0.5*digamma(0.5*df2) -
                 0.5*(df1+df2) * (-df1*ysim/df2^2)/(1 + df1*ysim/df2) -
                 0.5*log1p(df1*ysim/df2)
@@ -4046,14 +4133,14 @@ fff.control <- function(save.weights = TRUE, ...) {
 
     }
   }), list( .lprob = lprob, .earg = earg, .N = N, .D = D,
-            .iprob = iprob ))), 
+            .iprob = iprob ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    eta2theta(eta, .lprob, earg = .earg )
+    eta2theta(eta, .lprob , earg = .earg )
   }, list( .lprob = lprob, .earg = earg ))),
   last = eval(substitute(expression({
-    misc$link <-    c("prob" = .lprob) 
+    misc$link <-    c("prob" = .lprob)
 
-    misc$earg <- list("prob" = .earg ) 
+    misc$earg <- list("prob" = .earg )
 
     misc$Dvector <- .D
     misc$Nvector <- .N
@@ -4091,8 +4178,13 @@ fff.control <- function(save.weights = TRUE, ...) {
         ll.elts
       }
     }
-  }, list( .lprob = lprob, .earg = earg ))), 
+  }, list( .lprob = lprob, .earg = earg ))),
   vfamily = c("hyperg"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    prob <- eta2theta(eta, .lprob , earg = .earg )
+    okay1 <- all(is.finite(prob)) && all(0 < prob & prob < 1)
+    okay1
+  }, list( .lprob = lprob, .earg = earg ))),
   deriv = eval(substitute(expression({
     prob <- mu   # equivalently, eta2theta(eta, .lprob, earg = .earg )
     dprob.deta <- dtheta.deta(prob, .lprob, earg = .earg )
@@ -4117,7 +4209,7 @@ fff.control <- function(save.weights = TRUE, ...) {
     if (extra$Nunknown) {
       tmp722 <- tmp72^2
       tmp13 <- 2*Dvec / prob^3
-      d2l.dprob2 <- tmp722 * (trigamma(1 + tmp12) + 
+      d2l.dprob2 <- tmp722 * (trigamma(1 + tmp12) +
                    trigamma(1 + Dvec/prob - w) -
                    trigamma(1 + tmp12 - w + yvec) -
                    trigamma(1 + Dvec/prob)) +
@@ -4202,7 +4294,7 @@ pbenini <- function(q, y0, shape, lower.tail = TRUE, log.p = FALSE) {
       ans[ok] <- exp(-shape[ok] * (log(q[ok]/y0[ok]))^2)
       ans[q <= y0] <- 1
     }
-  } 
+  }
 
   ans
 }
@@ -4216,7 +4308,7 @@ qbenini <- function(p, y0, shape, lower.tail = TRUE, log.p = FALSE) {
     stop("bad input for argument 'lower.tail'")
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   if (lower.tail) {
     if (log.p) {
       ln.p <- p
@@ -4228,7 +4320,7 @@ qbenini <- function(p, y0, shape, lower.tail = TRUE, log.p = FALSE) {
     if (log.p) {
       ln.p <- p
       ans <- y0 * exp(sqrt(-ln.p / shape))
-    } else { 
+    } else {
       ans <-  y0 * exp(sqrt(-log(p) / shape))
     }
   }
@@ -4375,6 +4467,11 @@ rbenini <- function(n, y0, shape) {
     }
   }, list( .lshape = lshape, .eshape = eshape ))),
   vfamily = c("benini1"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lshape = lshape, .eshape = eshape ))),
 
 
 
@@ -4385,7 +4482,7 @@ rbenini <- function(n, y0, shape) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     extra <- object at extra
@@ -4425,7 +4522,7 @@ rbenini <- function(n, y0, shape) {
     if (abs(x) > floor(x)) { # zero prob for -ve or non-integer
       0
     } else
-    if (x == Inf) { # 20141215 KaiH  
+    if (x == Inf) { # 20141215 KaiH
       0
     } else
     if (x > bigx) {
@@ -4549,10 +4646,10 @@ qtriangle <- function(p, theta, lower = 0, upper = 1,
 
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  
+
   N <- max(length(p), length(theta), length(lower), length(upper))
   if (length(p)     != N) p     <- rep_len(p,     N)
   if (length(theta) != N) theta <- rep_len(theta, N)
@@ -4582,7 +4679,7 @@ qtriangle <- function(p, theta, lower = 0, upper = 1,
       Pos <- (exp(ln.p) <= (upper- theta) / (upper - lower))
       pstar <- (-expm1(ln.p) - (theta - lower) / (upper - lower)) /
                ((upper - theta) / (upper - lower))
-    } else { 
+    } else {
       Neg <- (p >= (upper- theta) / (upper - lower))
       temp1 <- (1 - p) * (upper - lower) * (theta - lower)
       Pos <- (p <= (upper- theta) / (upper - lower))
@@ -4599,14 +4696,14 @@ qtriangle <- function(p, theta, lower = 0, upper = 1,
                     qstar[, 2])
     ans[Pos] <- theta[Pos] + qstar * (upper - theta)[Pos]
   }
-  
+
   ans[theta < lower | theta > upper] <- NaN
   ans
 }
 
 
 
-ptriangle <- function(q, theta, lower = 0, upper = 1, 
+ptriangle <- function(q, theta, lower = 0, upper = 1,
                       lower.tail = TRUE, log.p = FALSE) {
 
   N <- max(length(q), length(theta), length(lower), length(upper))
@@ -4617,7 +4714,7 @@ ptriangle <- function(q, theta, lower = 0, upper = 1,
 
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
     stop("bad input for argument 'lower.tail'")
-  
+
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
 
@@ -4638,11 +4735,11 @@ ptriangle <- function(q, theta, lower = 0, upper = 1,
     } else {
       1 - qstar[Neg]
     }
-  } 
+  }
 
   Pos <- (theta <= q & q <= upper)
   qstar <- (q - theta) / (upper-theta)
-  
+
   if (lower.tail) {
     if (log.p) {
       ans[Pos] <- log(((theta-lower)/(upper-lower))[Pos] +
@@ -4667,7 +4764,7 @@ ptriangle <- function(q, theta, lower = 0, upper = 1,
       ans[q <= lower] <- 1
       ans[q >= upper] <- 0
     }
-  } 
+  }
 
   ans[theta < lower | theta > upper] <- NaN
   ans
@@ -4763,7 +4860,7 @@ triangle.control <- function(stepsize = 0.33, maxit = 100, ...) {
     lower <- extra$lower
     upper <- extra$upper
 
-    mu1<-  (lower + upper + Theta) / 3
+    mu1 <- (lower + upper + Theta) / 3
 
     mu1
   }, list( .link = link, .earg = earg ))),
@@ -4793,6 +4890,12 @@ triangle.control <- function(stepsize = 0.33, maxit = 100, ...) {
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("triangle"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Theta <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(Theta)) &&
+             all(extra$lower < Theta & Theta < extra$upper)
+    okay1
+  }, list( .link = link, .earg = earg ))),
 
 
 
@@ -4801,7 +4904,7 @@ triangle.control <- function(stepsize = 0.33, maxit = 100, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     extra <- object at extra
@@ -5100,6 +5203,14 @@ loglaplace1.control <- function(maxit = 300, ...) {
            .rep0 = rep0,
            .Scale.arg = Scale.arg, .kappa = kappa ))),
   vfamily = c("loglaplace1"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    locat.w <- eta
+    locat.y <- eta2theta(locat.w, .llocat , earg = .elocat )
+    okay1 <- all(is.finite(locat.y))
+    okay1
+  }, list( .elocat = elocat, .llocat = llocat,
+           .rep0 = rep0,
+           .Scale.arg = Scale.arg, .kappa = kappa ))),
   deriv = eval(substitute(expression({
     ymat <- matrix(y, n, M)
     Scale.w <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
@@ -5273,7 +5384,7 @@ loglaplace2.control <- function(save.weights = TRUE, ...) {
       paste("tau = ", round(extra$tau, digits = .digt), sep = "")
     extra$individual = FALSE
 
-    predictors.names <- 
+    predictors.names <-
         c(namesof(paste("quantile(", y.names, ")", sep = ""),
                   .llocat , earg = .elocat, tag = FALSE),
           namesof(if (M == 2) "scale" else
@@ -5388,11 +5499,22 @@ loglaplace2.control <- function(save.weights = TRUE, ...) {
 
 
   vfamily = c("loglaplace2"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale.w <- eta2theta(eta[, (1+extra$M/2):extra$M],
+                        .lscale , earg = .escale )
+    locat.w <- eta[, 1:(extra$M/2), drop = FALSE]
+    locat.y <- eta2theta(locat.w, .llocat , earg = .elocat )
+    okay1 <- all(is.finite(locat.y)) &&
+             all(is.finite(Scale.w)) && all(0 < Scale.w)
+    okay1
+  }, list( .elocat = elocat, .llocat = llocat,
+           .escale = escale, .lscale = lscale,
+           .rep0 = rep0, .kappa = kappa ))),
   deriv = eval(substitute(expression({
     ymat <- matrix(y, n, M/2)
-    Scale.w <- eta2theta(eta[,(1+extra$M/2):extra$M],
+    Scale.w <- eta2theta(eta[, (1+extra$M/2):extra$M],
                         .lscale , earg = .escale )
-    locat.w <- eta[,1:(extra$M/2), drop = FALSE]
+    locat.w <- eta[, 1:(extra$M/2), drop = FALSE]
     locat.y <- eta2theta(locat.w, .llocat , earg = .elocat )
     kappamat <- matrix(extra$kappa, n, M/2, byrow = TRUE)
     w.mat <- ymat
@@ -5703,22 +5825,34 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
 
 
   vfamily = c("logitlaplace1"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    locat.w <- eta
+    okay1 <- all(is.finite(locat.w))
+    okay1
+  }, list( .Scale.arg = Scale.arg, .rep01 = rep01,
+           .elocat = elocat,
+           .llocat = llocat,
+
+           .elocat.identity = elocat.identity,
+           .llocat.identity = llocat.identity,
+
+           .kappa = kappa ))),
   deriv = eval(substitute(expression({
     ymat <- matrix(y, n, M)
     Scale.w <- matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
     locat.w <- eta
     kappamat <- matrix(extra$kappa, n, M, byrow = TRUE)
     ymat <- adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
-                                  rep01 = .rep01)
+                                   rep01 = .rep01 )
     w.mat <- theta2eta(ymat, .llocat , earg = .elocat )  # e.g., logit()
-    zedd <- abs(w.mat-locat.w) / Scale.w
+    zedd <- abs(w.mat - locat.w) / Scale.w
     dl.dlocat <- ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
-                   sqrt(2) * sign(w.mat-locat.w) / Scale.w
+                 sqrt(2) * sign(w.mat-locat.w) / Scale.w
 
 
     dlocat.deta <- dtheta.deta(locat.w,
-                              "identitylink",
-                              earg = .elocat.identity )
+                               "identitylink",
+                               earg = .elocat.identity )
 
 
     c(w) * cbind(dl.dlocat * dlocat.deta)
diff --git a/R/family.rcim.R b/R/family.rcim.R
index eddda91..fed70ae 100644
--- a/R/family.rcim.R
+++ b/R/family.rcim.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -44,7 +44,7 @@
 
 
            ...) {
-                           
+
 
 
 
@@ -208,9 +208,9 @@
   str1 <- paste(if (has.intercept) "~ 1 + " else "~ -1 + ", rprefix,
                 as.character(min.row.val),  # "2",
                 sep = "")
-  
 
-  if (nrow(y) > 2) 
+
+  if (nrow(y) > 2)
     str1 <- paste(str1,
                   paste(rprefix, rindex[-1], sep = "", collapse = " + "),
                   sep = " + ")
@@ -320,7 +320,7 @@
   answer <- if (summary.arg) {
     if (Rank > 0) {
       summary.rrvglm(as(answer, "rrvglm"), h.step = h.step)
-    } else { 
+    } else {
       summary(answer)
     }
   } else {
@@ -385,7 +385,7 @@ setMethod("summary", "rcim",
   mat <- as.matrix(mat)
   RRR <- dim(mat)[1]
   CCC <- dim(mat)[2]
-    
+
   rnames <- if (is.null(rownames(mat))) {
     paste("X", 1:RRR, sep = "")
   } else {
@@ -398,13 +398,13 @@ setMethod("summary", "rcim",
     colnames(mat)
   }
 
-  r.index <- if (is.character(rbaseline))  
+  r.index <- if (is.character(rbaseline))
                which(rownames(mat) == rbaseline) else
                      if (is.numeric(rbaseline)) rbaseline else
-                         stop("argement 'rbaseline' must be numeric", 
+                         stop("argement 'rbaseline' must be numeric",
                                "or character of the level of row")
- 
-  c.index <- if (is.character(cbaseline))  
+
+  c.index <- if (is.character(cbaseline))
                which(colnames(mat) == cbaseline) else
                      if (is.numeric(cbaseline)) cbaseline else
                          stop("argement 'cbaseline' must be numeric",
@@ -424,11 +424,11 @@ setMethod("summary", "rcim",
 
   new.rnames <- rnames[c(r.index:RRR,
                          if (r.index > 1) 1:(r.index - 1) else NULL)]
-  new.cnames <- cnames[c(c.index:CCC, 
+  new.cnames <- cnames[c(c.index:CCC,
                          if (c.index > 1) 1:(c.index - 1) else NULL)]
   colnames(yswap) <- new.cnames
   rownames(yswap) <- new.rnames
-  
+
   yswap
 }
 
@@ -461,7 +461,7 @@ setMethod("summary", "rcim",
      no.warning = FALSE,
      ...) {
 
- 
+
   nparff <- if (is.numeric(object at family@infos()$M1)) {
     object at family@infos()$M1
   } else {
@@ -491,16 +491,16 @@ setMethod("summary", "rcim",
 
 
   orig.raxisl  <- rownames(object at y)
-  orig.caxisl  <- colnames(object at y) 
+  orig.caxisl  <- colnames(object at y)
   if (is.null(orig.raxisl))
     orig.raxisl <- as.character(1:nrow(object at y))
   if (is.null(orig.caxisl))
     orig.caxisl <- as.character(1:ncol(object at y))
-    
-  roweff.orig <- 
+
+  roweff.orig <-
   roweff <- orig.roweff[c(rfirst:last.r,
                           if (rfirst > 1) 1:(rfirst-1) else NULL)]
-  coleff.orig <- 
+  coleff.orig <-
   coleff <- orig.coleff[c(cfirst:last.c,
                           if (cfirst > 1) 1:(cfirst-1) else NULL)]
 
@@ -512,17 +512,17 @@ setMethod("summary", "rcim",
   raxisl <- orig.raxisl[c(rfirst:last.r,
                           if (rfirst > 1) 1:(rfirst-1) else NULL)]
 
-  caxisl <- orig.caxisl[c(cfirst:last.c, 
+  caxisl <- orig.caxisl[c(cfirst:last.c,
                           if (cfirst > 1) 1:(cfirst-1) else NULL)]
 
 
   if (any(which.plots == 1, na.rm = TRUE)) {
-    plot(roweff, type = rtype, 
+    plot(roweff, type = rtype,
          axes = FALSE, col = rcol, main = rmain,
          sub  = rsub, xlab = rxlab, ylab = rylab, ...)
 
     axis(1, at = seq_along(raxisl),
-         cex.lab = rcex.lab,  
+         cex.lab = rcex.lab,
          cex.axis = rcex.axis,
          labels = raxisl)
     axis(2, cex.lab = rcex.lab, ...)  # las = rlas)
@@ -533,7 +533,7 @@ setMethod("summary", "rcim",
 
 
   if (any(which.plots == 2, na.rm = TRUE)) {
-    plot(coleff, type = ctype, 
+    plot(coleff, type = ctype,
          axes = FALSE, col = ccol, main = cmain,  # lwd = 2, xpd = FALSE,
          sub  = csub, xlab = cxlab, ylab = cylab, ...)
 
@@ -542,7 +542,7 @@ setMethod("summary", "rcim",
          cex.axis = ccex.axis,
          labels = caxisl)
     axis(2, cex.lab = ccex.lab, ...)  # las = clas)
-    
+
     if (hline0)
       abline(h = 0, lty = hlty, col = hcol, lwd = hlwd)
   }
@@ -773,7 +773,8 @@ Confint.nb1 <- function(nb1, level = 0.95) {
 
 plota21 <- function(rrvglm2, show.plot = TRUE, nseq.a21 = 31,
                     se.eachway = c(5, 5),  # == c(LHS, RHS),
-                    trace.arg = TRUE, ...) {
+                    trace.arg = TRUE,
+                    lwd = 2, ...) {
 
 
 
@@ -859,7 +860,7 @@ plota21 <- function(rrvglm2, show.plot = TRUE, nseq.a21 = 31,
            col = "darkorange", lty = "dashed")
 
     abline(v = a21.hat +  c(-1, 1) * 1.96 * SE.a21.hat,
-           col = "gray50", lty = "dashed", lwd = 2.0)
+           col = "gray50", lty = "dashed", lwd = lwd)
 
   }  # End of (show.plot)
 
@@ -1362,8 +1363,8 @@ qvplot   <-  function(object,
 
 
   if (any(slotNames(object) == "post")) {
-    object at post$estimates  <- estimates 
-    object at post$xvalues    <- xvalues  
+    object at post$estimates  <- estimates
+    object at post$xvalues    <- xvalues
     if (is.numeric(interval.width)) {
       object at post$tails <- tails
       object at post$tops  <- tops
diff --git a/R/family.rcqo.R b/R/family.rcqo.R
index 316a5b4..dc77345 100644
--- a/R/family.rcqo.R
+++ b/R/family.rcqo.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -398,7 +398,7 @@ getInitVals <- function(gvals, llfun, ...) {
   ff <- function(myx, ...) LLFUN(myx, ...)
   objFun <- gvals
   for (ii in seq_along(gvals))
-    objFun[ii] <- ff(myx = gvals[ii], ...) 
+    objFun[ii] <- ff(myx = gvals[ii], ...)
   try.this <- gvals[objFun == max(objFun)]  # Usually scalar, maybe vector
   try.this
 }
diff --git a/R/family.robust.R b/R/family.robust.R
index 8b7f37d..fd3a644 100644
--- a/R/family.robust.R
+++ b/R/family.robust.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -97,14 +97,14 @@ qhuber <- function (p, k = 0.862, mu = 0, sigma = 1,
                     lower.tail = TRUE, log.p = FALSE ) {
 
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
-    stop("bad input for argument 'lower.tail'") 
+    stop("bad input for argument 'lower.tail'")
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
 
   cnorm <- sqrt(2 * pi) * ((2 * pnorm(k) - 1) + 2 * dnorm(k) / k)
   if (lower.tail) {
     if (log.p) {
-      ln.p <- p 
+      ln.p <- p
       x <- pmin(exp(ln.p), -expm1(ln.p))
     } else {
       x <- pmin(p, 1 - p)
@@ -113,7 +113,7 @@ qhuber <- function (p, k = 0.862, mu = 0, sigma = 1,
     if (log.p) {
       ln.p <- p
       x <- pmin(-expm1(ln.p), exp(ln.p))
-    } else { 
+    } else {
       x <- pmin(1 - p, p)
     }
   }
@@ -129,7 +129,7 @@ qhuber <- function (p, k = 0.862, mu = 0, sigma = 1,
     } } else {
       if (log.p) {
         ifelse(exp(ln.p) > 0.5, mu + q * sigma, mu - q * sigma)
-      } else { 
+      } else {
         ifelse(p > 0.5, mu + q * sigma, mu - q * sigma)
       }
     }
@@ -144,7 +144,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
                    lower.tail = TRUE, log.p = FALSE ) {
 
   if (!is.logical(lower.tail) || length(lower.tail ) != 1)
-    stop("bad input for argument 'lower.tail'") 
+    stop("bad input for argument 'lower.tail'")
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
 
@@ -155,7 +155,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
   p <- ifelse(x <= -k ,
               exp(k^2 / 2) / k * exp(k * x) / sqrt(2 * pi),
               dnorm(k) / k + pnorm(x) - pnorm(-k))
-  
+
   if (lower.tail) {
     if (log.p) {
       ans <- ifelse(zedd <= 0, log(p) + log1p(-eps),
@@ -172,7 +172,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
       ans <- ifelse(zedd <= 0, -expm1(log(p) + log1p(-eps)),
                                exp(log(p) + log1p(-eps)))
     }
-  } 
+  }
   ans
 }
 
@@ -308,6 +308,15 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
            .elocat = elocat, .escale = escale,
            .k      = k ))),
   vfamily = c("huber2"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mylocat <- eta2theta(eta[, 1], .llocat ,  earg = .elocat )
+    myscale <- eta2theta(eta[, 2], .lscale ,  earg = .escale )
+    okay1 <- all(is.finite(mylocat)) &&
+             all(is.finite(myscale)) && all(0 < myscale)
+    okay1
+  }, list( .llocat = llocat, .lscale = lscale,
+           .elocat = elocat, .escale = escale,
+           .k      = k ))),
   deriv = eval(substitute(expression({
     mylocat <- eta2theta(eta[, 1], .llocat ,  earg = .elocat )
     myscale <- eta2theta(eta[, 2], .lscale ,  earg = .escale )
@@ -456,6 +465,13 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
            .elocat = elocat,
            .k      = k ))),
   vfamily = c("huber1"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mylocat <- eta2theta(eta, .llocat ,  earg = .elocat )
+    okay1 <- all(is.finite(mylocat))
+    okay1
+  }, list( .llocat = llocat,
+           .elocat = elocat,
+           .k      = k ))),
   deriv = eval(substitute(expression({
     mylocat <- eta2theta(eta, .llocat ,  earg = .elocat )
     myk     <- .k
diff --git a/R/family.rrr.R b/R/family.rrr.R
index 88c31ea..1791edf 100644
--- a/R/family.rrr.R
+++ b/R/family.rrr.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -32,7 +32,7 @@ replace.constraints <- function(Hlist, cm, index) {
   Criterion <- match.arg(Criterion, c("ResSS", "coefficients"))[1]
 
   list(Alphavec = Alphavec,
-       Criterion = Criterion, 
+       Criterion = Criterion,
        Linesearch = Linesearch,
        Maxit = Maxit,
        Suppress.warning = Suppress.warning,
@@ -46,7 +46,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
   Rank <- ncol(numat)
   moff <- NULL
   ans <- if (Quadratic) {
-           index <- iam(NA, NA, M = Rank, diag = TRUE, both = TRUE) 
+           index <- iam(NA, NA, M = Rank, diag = TRUE, both = TRUE)
            temp1 <- cbind(numat[, index$row] * numat[, index$col])
            if (I.tolerances) {
              moff <- 0
@@ -64,7 +64,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
 
 
  valt <- function(x, z, U, Rank = 1,
-                  Hlist = NULL, 
+                  Hlist = NULL,
                   Cinit = NULL,
                   Alphavec = c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
                                60, 80, 100, 125, 2^(8:12)),
@@ -72,18 +72,18 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
                   Crow1positive = rep_len(TRUE, Rank),
                   colx1.index,
                   Linesearch = FALSE,
-                  Maxit = 20, 
+                  Maxit = 20,
                   str0 = NULL,
                   sd.Cinit = 0.02,
                   Suppress.warning = FALSE,
-                  Tolerance = 1e-6, 
+                  Tolerance = 1e-6,
                   trace = FALSE,
                   xij = NULL) {
 
 
 
 
-                 
+
 
 
   if (mode(Criterion) != "character" && mode(Criterion) != "name")
@@ -91,7 +91,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
   Criterion <- match.arg(Criterion, c("ResSS", "coefficients"))[1]
 
   if (any(diff(Alphavec) <= 0))
-    stop("'Alphavec' must be an increasing sequence") 
+    stop("'Alphavec' must be an increasing sequence")
 
   if (!is.matrix(z))
     z <- as.matrix(z)
@@ -135,7 +135,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
   C <- Cinit  # This is input for the main iter loop
   old.crit <- switch(Criterion, coefficients = C, ResSS = fit$ResSS)
 
-  recover <- 0  # Allow a few iterations between different line searches 
+  recover <- 0  # Allow a few iterations between different line searches
   for (iter in 1:Maxit) {
     iter.save <- iter
 
@@ -155,7 +155,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
            solve(t(A) %*% A)
 
       numat <- x[, colx2.index, drop = FALSE] %*% C
-      evnu <- eigen(var(numat))
+      evnu <- eigen(var(numat), symmetric = TRUE)
       temp7 <- if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
                  evnu$vector %*% evnu$value^(-0.5)
       C <- C %*% temp7
@@ -192,7 +192,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
           xnew <- C
 
           direction1 <- (xnew - xold)  # / sqrt(1 + sum((xnew-xold)^2))
-          ftemp <- fit$ResSS  # Most recent objective function 
+          ftemp <- fit$ResSS  # Most recent objective function
           use.alpha <- 0  # The current step relative to (xold, yold)
           for (itter in seq_along(Alphavec)) {
             CC <- xold + Alphavec[itter] * direction1
@@ -208,9 +208,9 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
                             xij = xij)
             if (try$ResSS < ftemp) {
               use.alpha <- Alphavec[itter]
-              fit <- try 
+              fit <- try
               ftemp <- try$ResSS
-              C <- CC 
+              C <- CC
               A <- t(fit$mat.coef[1:Rank, , drop = FALSE])
               latvar.mat <- x[, colx2.index, drop = FALSE] %*% C
               recover <- iter  # Give it some altg iters to recover
@@ -222,8 +222,8 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
               }
               fini.linesearch <- TRUE
             }
-          if (fini.linesearch) break 
-        }  # End of itter loop 
+          if (fini.linesearch) break
+        }  # End of itter loop
     }
 
     xold <- C # Do not take care of drift
@@ -281,9 +281,9 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
     if (!no.thrills) {
       i63 <- iam(NA, NA, M=Rank, both = TRUE)
       names(clist2) <- c(
-             if (NoA) NULL else paste("(latvar", 1:Rank, ")", sep = ""), 
+             if (NoA) NULL else paste("(latvar", 1:Rank, ")", sep = ""),
              if (Quadratic && Rank == 1 && !I.tolerances)
-                 "(latvar^2)" else 
+                 "(latvar^2)" else
              if (Quadratic && Rank>1 && !I.tolerances)
                  paste("(latvar", i63$row, ifelse(i63$row == i63$col, "^2",
                  paste("*latvar", i63$col, sep = "")), ")", sep = "") else NULL,
@@ -328,7 +328,7 @@ valt.2iter <- function(x, z, U, Hlist, A, control) {
 
 
   clist1 <- replace.constraints(Hlist, A, control$colx2.index)
-  fit <- vlm.wfit(xmat = x, z, Hlist = clist1, U = U, matrix.out = TRUE, 
+  fit <- vlm.wfit(xmat = x, z, Hlist = clist1, U = U, matrix.out = TRUE,
                   is.vlmX = FALSE, ResSS = TRUE, qr = FALSE, xij = control$xij)
   C <- fit$mat.coef[control$colx2.index, , drop = FALSE] %*%
        A %*% solve(t(A) %*% A)
@@ -356,7 +356,7 @@ valt.1iter <- function(x, z, U, Hlist, C, control,
     Qoffset <- if (Quadratic) ifelse(I.tolerances, 0, sum(1:Rank)) else 0
     tmp833 <- lm2qrrvlm.model.matrix(x = x, Hlist = Hlist, C = C,
                                      control = control)
-    new.latvar.model.matrix <- tmp833$new.latvar.model.matrix 
+    new.latvar.model.matrix <- tmp833$new.latvar.model.matrix
     clist2 <- tmp833$constraints # Does not contain \bI_{Rank}
     latvar.mat <- tmp833$latvar.mat
     if (Corner)
@@ -397,7 +397,7 @@ valt.1iter <- function(x, z, U, Hlist, C, control,
     A <- if (tmp833$NoA) matrix(0, M, Rank) else
         t(fit$mat.coef[1:Rank,, drop = FALSE])
     if (Corner)
-        A[Index.corner,] <- diag(Rank)     
+        A[Index.corner,] <- diag(Rank)
 
     B1 <- if (p1)
       fit$mat.coef[-(1:(tmp833$Aoffset+Qoffset)),, drop = FALSE] else
@@ -411,7 +411,7 @@ valt.1iter <- function(x, z, U, Hlist, C, control,
                 tmp800[if (MSratio == 2) c(TRUE, FALSE) else
                        TRUE, 1:Rank] <- -0.5
                 tmp800
-            } else 
+            } else
                 t(fit$mat.coef[(tmp833$Aoffset+1):
                   (tmp833$Aoffset+Qoffset),, drop = FALSE])
     } else
@@ -483,7 +483,7 @@ rrr.alternating.expression <- expression({
     ans2 <- rrr.normalize(rrcontrol = rrcontrol, A=alt$A, C=alt$C, x = x)
 
     Amat <- ans2$A           # Fed into Hlist below (in rrr.end.expression)
-    tmp.fitted <- alt$fitted # Also fed; was alt2$fitted 
+    tmp.fitted <- alt$fitted # Also fed; was alt2$fitted
 
     rrcontrol$Cinit <- ans2$C   # For next valt() call
 
@@ -504,7 +504,7 @@ rrr.alternating.expression <- expression({
         Dk <- matrix(Dk, Rank, Rank)
         Dk <- t(Mmat) %*% Dk  %*% Mmat  # 20030822; Not diagonal in general.
         Dmat[kay, ] <- Dk[cbind(ind0$row.index[1:ncol(Dmat)],
-                                ind0$col.index[1:ncol(Dmat)])] 
+                                ind0$col.index[1:ncol(Dmat)])]
       }
     }
     Dmat
@@ -527,7 +527,7 @@ rrr.normalize <- function(rrcontrol, A, C, x, Dmat = NULL) {
       Mmat <- solve(tmp87)  # The normalizing matrix
       C <- C %*% t(tmp87)
       A <- A %*% Mmat
-      A[Index.corner,] <- diag(Rank)  # Make sure 
+      A[Index.corner,] <- diag(Rank)  # Make sure
 
       Dmat <- adjust.Dmat.expression(Mmat = Mmat, Rank = Rank,
                                      Dmat = Dmat, M = M)
@@ -536,7 +536,7 @@ rrr.normalize <- function(rrcontrol, A, C, x, Dmat = NULL) {
     if (rrcontrol$Svd.arg) {
       temp <- svd(C %*% t(A))
       if (!is.matrix(temp$v))
-        temp$v <- as.matrix(temp$v) 
+        temp$v <- as.matrix(temp$v)
       C <- temp$u[, 1:Rank, drop = FALSE] %*%
            diag(temp$d[1:Rank]^(1-rrcontrol$Alpha), nrow = Rank)
       A <- diag(temp$d[1:Rank]^(  rrcontrol$Alpha), nrow = Rank) %*%
@@ -598,7 +598,7 @@ rrr.end.expression <- expression({
   if (control$Quadratic) {
     if (!length(extra))
       extra <- list()
-    extra$Cmat <- Cmat      # Saves the latest iteration 
+    extra$Cmat <- Cmat      # Saves the latest iteration
     extra$Dmat <- Dmat      # Not the latest iteration
     extra$B1   <- B1.save   # Not the latest iteration (not good)
   } else {
@@ -626,7 +626,7 @@ rrr.end.expression <- expression({
     mu <- family at linkinv(eta, extra)
 
     if (anyNA(mu))
-      warning("there are NAs in mu") 
+      warning("there are NAs in mu")
 
     deriv.mu <- eval(family at deriv)
     wz <- eval(family at weight)
@@ -651,7 +651,7 @@ rrr.derivative.expression <- expression({
 
 
     which.optimizer <- if (control$Quadratic && control$FastAlgorithm) {
-      "BFGS" 
+      "BFGS"
     } else {
       if (iter <= rrcontrol$Switch.optimizer) "Nelder-Mead" else "BFGS"
     }
@@ -659,7 +659,7 @@ rrr.derivative.expression <- expression({
       cat("\n\n")
       cat("Using", which.optimizer, "\n")
       flush.console()
-    } 
+    }
 
     constraints <- replace.constraints(constraints, diag(M),
                                        rrcontrol$colx2.index)
@@ -686,7 +686,7 @@ rrr.derivative.expression <- expression({
                 }
 
           }
-    
+
           if (iter == 2 || quasi.newton$convergence) {
               NOS <- ifelse(modelno == 3 || modelno == 5, M/2, M)
 
@@ -695,7 +695,7 @@ rrr.derivative.expression <- expression({
                 get("CQO.FastAlgorithm", envir = VGAMenv))
               if (!canfitok)
                 stop("cannot fit this model using fast algorithm")
-              p2star <- if (nice31) 
+              p2star <- if (nice31)
                         ifelse(control$I.toleran,
                                Rank,
                                Rank+0.5*Rank*(Rank+1)) else
@@ -707,7 +707,7 @@ rrr.derivative.expression <- expression({
               X.vlm.1save <- if (p1star > 0)
                              X.vlm.save[,-(1:p2star)] else NULL
               quasi.newton <-
-                optim(par = Cmat, fn = callcqof, 
+                optim(par = Cmat, fn = callcqof,
                       gr <- if (control$GradientFunction) calldcqo else NULL,
                       method = which.optimizer,
                       control = list(fnscale = 1,
@@ -736,13 +736,13 @@ rrr.derivative.expression <- expression({
               rm(".VGAM.offset", envir = VGAMenv)
           }
         } else {
-          use.reltol <- if (length(rrcontrol$Reltol) >= iter) 
+          use.reltol <- if (length(rrcontrol$Reltol) >= iter)
               rrcontrol$Reltol[iter] else rev(rrcontrol$Reltol)[1]
           quasi.newton <-
             optim(par = theta0,
-                  fn = rrr.derivC.ResSS, 
+                  fn = rrr.derivC.ResSS,
                   method = which.optimizer,
-                  control = list(fnscale = rrcontrol$Fnscale, 
+                  control = list(fnscale = rrcontrol$Fnscale,
                                  maxit = rrcontrol$Maxit,
                                  abstol = rrcontrol$Abstol,
                                  reltol = use.reltol),
@@ -758,7 +758,7 @@ rrr.derivative.expression <- expression({
 
       if (Rank > 1 && rrcontrol$I.tolerances) {
         numat <- x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
-        evnu <- eigen(var(numat))
+        evnu <- eigen(var(numat), symmetric = TRUE)
         Cmat <- Cmat %*% evnu$vector
         numat <- x[, rrcontrol$colx2.index, drop = FALSE] %*% Cmat
         offset <- if (Rank > 1) -0.5*rowSums(numat^2) else -0.5*numat^2
@@ -775,7 +775,7 @@ rrr.derivative.expression <- expression({
     if (length(alt$offset))
         offset <- alt$offset
 
-    B1.save <- alt$B1 # Put later into extra  
+    B1.save <- alt$B1 # Put later into extra
     tmp.fitted <- alt$fitted  # contains \bI_{Rank} \bnu if Corner
 
     if (modelno != 33 && control$OptimizeWrtC)
@@ -799,9 +799,9 @@ rrr.derivative.expression <- expression({
 
 
 
-    Amat <- alt$Amat  # Needed in rrr.end.expression 
-    Cmat <- alt$Cmat  # Needed in rrr.end.expression if Quadratic 
-    Dmat <- alt$Dmat  # Put later into extra  
+    Amat <- alt$Amat  # Needed in rrr.end.expression
+    Cmat <- alt$Cmat  # Needed in rrr.end.expression if Quadratic
+    Dmat <- alt$Dmat  # Put later into extra
 
     eval(rrr.end.expression)    # Put Amat into Hlist, and create new z
 })
@@ -818,7 +818,7 @@ rrr.derivC.ResSS <- function(theta, U, z, M, xmat, Hlist, rrcontrol,
   alreadyThere <- exists(".VGAM.dot.counter", envir = VGAMenv)
   if (alreadyThere) {
     VGAM.dot.counter <- get(".VGAM.dot.counter", envir = VGAMenv)
-    VGAM.dot.counter <- VGAM.dot.counter + 1 
+    VGAM.dot.counter <- VGAM.dot.counter + 1
     assign(".VGAM.dot.counter", VGAM.dot.counter, envir = VGAMenv)
     if (VGAM.dot.counter > max(50, options()$width - 5)) {
       if (rrcontrol$trace) {
@@ -858,16 +858,16 @@ rrr.derivC.ResSS <- function(theta, U, z, M, xmat, Hlist, rrcontrol,
 
 
 rrvglm.optim.control <- function(Fnscale = 1,
-                                 Maxit = 100, 
+                                 Maxit = 100,
                                  Switch.optimizer = 3,
-                                 Abstol = -Inf, 
+                                 Abstol = -Inf,
                                  Reltol = sqrt(.Machine$double.eps),
                                  ...) {
 
 
 
 
-    list(Fnscale = Fnscale, 
+    list(Fnscale = Fnscale,
          Maxit = Maxit,
          Switch.optimizer = Switch.optimizer,
          Abstol = Abstol,
@@ -906,7 +906,7 @@ Coef.qrrvglm <-
 
 
 
-  if (length(varI.latvar) != 1 || !is.logical(varI.latvar)) 
+  if (length(varI.latvar) != 1 || !is.logical(varI.latvar))
     stop("'varI.latvar' must be TRUE or FALSE")
   if (length(refResponse) > 1)
     stop("argument 'refResponse' must be of length 0 or 1")
@@ -919,8 +919,8 @@ Coef.qrrvglm <-
     stop("cannot determine whether the model is constrained or not")
 
   ocontrol <- object at control
-  coef.object <- object at coefficients 
-  Rank <- ocontrol$Rank 
+  coef.object <- object at coefficients
+  Rank <- ocontrol$Rank
   M <- object at misc$M
   NOS <- if (length(object at y)) ncol(object at y) else M
   MSratio <- M / NOS  # First value is g(mean) = quadratic form in latvar
@@ -946,12 +946,12 @@ Coef.qrrvglm <-
   if (!length(ynames)) ynames <- object at misc$ynames
   if (!length(ynames)) ynames <- paste("Y", 1:NOS, sep = "")
   lp.names <- object at misc$predictors.names
-  if (!length(lp.names)) lp.names <- NULL 
+  if (!length(lp.names)) lp.names <- NULL
 
   dzero.vector <- rep_len(FALSE, M)
   if (length(Dzero))
     dzero.vector[Dzero] <- TRUE
-  names(dzero.vector) <- ynames 
+  names(dzero.vector) <- ynames
   latvar.names <- if (Rank == 1)
     "latvar" else
     paste("latvar", 1:Rank, sep = "")
@@ -964,11 +964,11 @@ Coef.qrrvglm <-
     Tolerance <- Darray <- m2a(Dmat, M = Rank)
     for (ii in 1:M)
       if (length(Dzero) && any(Dzero == ii)) {
-        Tolerance[, , ii] <- NA   # Darray[,,ii] == O 
-        bellshaped[ii] <- FALSE 
+        Tolerance[, , ii] <- NA   # Darray[,,ii] == O
+        bellshaped[ii] <- FALSE
       } else {
         Tolerance[, , ii] <- -0.5 * solve(Darray[, , ii])
-        bellshaped[ii] <- all(eigen(Tolerance[, , ii])$values > 0)
+        bellshaped[ii] <- all(eigen(Tolerance[, , ii], symmetric = TRUE)$values > 0)
       }
     optimum <- matrix(NA_real_, Rank, M)
     for (ii in 1:M)
@@ -1006,9 +1006,9 @@ Coef.qrrvglm <-
   elts <- Dmat[this.spp,, drop = FALSE]
       if (length(elts) < Rank)
         elts <- matrix(elts, 1, Rank)
-      Dk <- m2a(elts, M = Rank)[, , 1]  # Hopefully negative-def 
-      temp400 <- eigen(Dk)
-      ptr1 <- ptr1 + 1 
+      Dk <- m2a(elts, M = Rank)[, , 1]  # Hopefully negative-def
+      temp400 <- eigen(Dk, symmetric = TRUE)
+      ptr1 <- ptr1 + 1
       if (all(temp400$value < 0))
         break
       if (ptr1 > length(candidates))
@@ -1048,7 +1048,7 @@ Coef.qrrvglm <-
 
 
     } else {
-      if (length(refResponse) == 1) 
+      if (length(refResponse) == 1)
         stop("tolerance matrix specified by 'refResponse' ",
              "is not positive-definite") else
         warning("could not find any positive-definite ",
@@ -1061,7 +1061,7 @@ Coef.qrrvglm <-
       if (!length(xmat <- object at x))
         stop("cannot obtain the model matrix")
       numat <- xmat[,ocontrol$colx2.index, drop = FALSE] %*% Cmat
-      evnu <- eigen(var(numat))
+      evnu <- eigen(var(numat), symmetric = TRUE)
       Mmat <- solve(t(evnu$vector))
       Cmat <- Cmat %*% evnu$vector  # == Cmat %*% solve(t(Mmat))
       Amat <- Amat %*% Mmat
@@ -1120,19 +1120,19 @@ Coef.qrrvglm <-
   maximum <- if (length(cx1i) == 1 && names(cx1i) == "(Intercept)") {
       eta.temp <- B1
       for (ii in 1:M)
-        eta.temp[ii] <- eta.temp[ii] + 
+        eta.temp[ii] <- eta.temp[ii] +
             Amat[ii, , drop = FALSE] %*% optimum[, ii, drop = FALSE] +
             t(optimum[, ii, drop = FALSE]) %*%
             Darray[,, ii, drop = TRUE] %*% optimum[, ii, drop = FALSE]
-      mymax <- object at family@linkinv(rbind(eta.temp), extra = object at extra)  
-      c(mymax)  # Convert from matrix to vector 
+      mymax <- object at family@linkinv(rbind(eta.temp), extra = object at extra)
+      c(mymax)  # Convert from matrix to vector
     } else {
       5 * rep_len(NA_real_, M)  # Make "numeric"
   }
   names(maximum) <- ynames
-    
+
   latvar.mat <- if (ConstrainedQO) {
-    object at x[, ocontrol$colx2.index, drop = FALSE] %*% Cmat 
+    object at x[, ocontrol$colx2.index, drop = FALSE] %*% Cmat
   } else {
     object at latvar
   }
@@ -1143,14 +1143,14 @@ Coef.qrrvglm <-
   if (!length(xmat <- object at x)) stop("cannot obtain the model matrix")
   dimnames(latvar.mat) <- list(dimnames(xmat)[[1]], latvar.names)
 
-  ans <- 
+  ans <-
   new(Class <- if (ConstrainedQO) "Coef.qrrvglm" else "Coef.uqo",
        A = Amat, B1 = B1, Constrained = ConstrainedQO, D = Darray,
        NOS = NOS, Rank = Rank,
        latvar = latvar.mat,
        latvar.order = latvar.mat,
-       Optimum = optimum, 
-       Optimum.order = optimum, 
+       Optimum = optimum,
+       Optimum.order = optimum,
        bellshaped = bellshaped,
        Dzero = dzero.vector,
        Maximum = maximum,
@@ -1169,10 +1169,10 @@ Coef.qrrvglm <-
     M <- object at misc$M
     NOS <- if (length(object at y)) ncol(object at y) else M
     pstar <- if (ConstrainedQO) (p + length(Cmat)) else
-             p + n*Rank # Adjustment; not sure about UQO 
+             p + n*Rank # Adjustment; not sure about UQO
     adjusted.dispersion <- object at misc$dispersion * (n*M - p) /
             (n*M - pstar)
-    ans at dispersion <- adjusted.dispersion 
+    ans at dispersion <- adjusted.dispersion
   }
 
   if (MSratio > 1) {
@@ -1185,10 +1185,10 @@ Coef.qrrvglm <-
   } else {
     dimnames(ans at D) <- list(latvar.names, latvar.names, ynames)
   }
-  names(ans at bellshaped) <- ynames 
+  names(ans at bellshaped) <- ynames
   dimnames(ans at Optimum) <- list(latvar.names, ynames)
   dimnames(ans at Tolerance) <- list(latvar.names, latvar.names, ynames)
-  ans 
+  ans
 }  # End of Coef.qrrvglm
 
 
@@ -1229,7 +1229,7 @@ setClass(Class = "Coef.qrrvglm", representation(
 
 show.Coef.qrrvglm <- function(x, ...) {
 
-  object <- x 
+  object <- x
   Rank <- object at Rank
   M <- nrow(object at A)
   NOS <- object at NOS
@@ -1248,7 +1248,7 @@ show.Coef.qrrvglm <- function(x, ...) {
                                   sep = ""))
     } else {
       for (ii in 1:NOS) {
-        fred <- eigen(object at Tolerance[, , ii])
+        fred <- eigen(object at Tolerance[, , ii], symmetric = TRUE)
           if (all(fred$value > 0))
               mymat[ii, ] <- sqrt(fred$value)
       }
@@ -1321,7 +1321,7 @@ predictqrrvglm <-
            se.fit = FALSE,
            deriv = 0,
            dispersion = NULL,
-           extra = object at extra, 
+           extra = object at extra,
            varI.latvar = FALSE, refResponse = NULL, ...) {
   if (se.fit)
     stop("cannot handle se.fit == TRUE yet")
@@ -1492,7 +1492,7 @@ show.rrvglm <- function(x, ...) {
             seq_along(vecOfBetas), sep = "")
     cat("\nCoefficients: (", sum(nas),
         " not defined because of singularities)\n", sep = "")
-  } else 
+  } else
       cat("\nCoefficients:\n")
   print.default(vecOfBetas, ...)    # used to be print()
 
@@ -1542,12 +1542,13 @@ setMethod("show", "rrvglm", function(object) show.rrvglm(object))
 
 
 summary.rrvglm <- function(object, correlation = FALSE,
-                           dispersion = NULL, digits = NULL, 
+                           dispersion = NULL, digits = NULL,
                            numerical = TRUE,
-                           h.step = 0.0001, 
+                           h.step = 0.0001,
                            kill.all = FALSE, omit13 = FALSE,
-                           fixA = FALSE, 
-                           presid = TRUE, 
+                           fixA = FALSE,
+                           presid = TRUE,
+                           signif.stars = getOption("show.signif.stars"),
                            nopredictors = FALSE, ...) {
 
 
@@ -1597,13 +1598,13 @@ summary.rrvglm <- function(object, correlation = FALSE,
 
     tmp5 <- get.rrvglm.se1(object, omit13 = omit13,
                            numerical = numerical, h.step = h.step,
-                           kill.all = kill.all, fixA = fixA, ...) 
+                           kill.all = kill.all, fixA = fixA, ...)
     if (any(diag(tmp5$cov.unscaled) <= 0) ||
-       any(eigen(tmp5$cov.unscaled)$value <= 0)) {
-        warning("cov.unscaled is not positive definite") 
+       any(eigen(tmp5$cov.unscaled, symmetric = TRUE)$value <= 0)) {
+        warning("cov.unscaled is not positive definite")
     }
 
-    answer at cov.unscaled <- tmp5$cov.unscaled 
+    answer at cov.unscaled <- tmp5$cov.unscaled
 
     od <- if (is.numeric(object at misc$disper))
         object at misc$disper else
@@ -1616,12 +1617,12 @@ summary.rrvglm <- function(object, correlation = FALSE,
       dispersion <- if (is.numeric(od)) od else 1
     }
 
-    tmp8 <- object at misc$M - object at control$Rank - 
+    tmp8 <- object at misc$M - object at control$Rank -
             length(object at control$str0)
     answer at df[1] <- answer at df[1] + tmp8 * object at control$Rank
     answer at df[2] <- answer at df[2] - tmp8 * object at control$Rank
     if (dispersion == 0) {
-      dispersion <- tmp5$ResSS / answer at df[2]  # Estimate 
+      dispersion <- tmp5$ResSS / answer at df[2]  # Estimate
     }
 
     answer at coef3 <- get.rrvglm.se2(answer at cov.unscaled,
@@ -1632,6 +1633,7 @@ summary.rrvglm <- function(object, correlation = FALSE,
     answer at sigma <- dispersion^0.5
 
 
+    answer at misc$signif.stars <- signif.stars  # 20160629
     answer at misc$nopredictors <- nopredictors  # 20150925
 
     answer
@@ -1660,14 +1662,14 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
     stop("fix at x is empty. Run rrvglm(... , x = TRUE)")
 
   colx1.index <- fit at control$colx1.index  # May be NULL
-  colx2.index <- fit at control$colx2.index 
+  colx2.index <- fit at control$colx2.index
   Hlist <- fit at constraints
   ncolHlist <- unlist(lapply(Hlist, ncol))
 
   p1 <- length(colx1.index)  # May be 0
   p2 <- length(colx2.index)
 
-  Rank <- fit at control$Rank  # fit at misc$Nested.Rank   
+  Rank <- fit at control$Rank  # fit at misc$Nested.Rank
 
   Amat <- fit at constraints[[colx2.index[1]]]
   B1mat <- if (p1)
@@ -1695,7 +1697,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
 
   delct.da <- if (numerical) {
     num.deriv.rrr(fit, M = M, r = Rank,
-                  x1mat = x1mat, x2mat = x2mat, p2 = p2, 
+                  x1mat = x1mat, x2mat = x2mat, p2 = p2,
                   Index.corner, Aimat = Amat,
                   B1mat = B1mat, Cimat = Cmat,
                   h.step = h.step,
@@ -1721,13 +1723,13 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
 
 
 
-  sfit2233 <- summaryvglm(newobject) 
+  sfit2233 <- summaryvglm(newobject)
   d8 <-  dimnames(sfit2233 at cov.unscaled)[[1]]
   cov2233 <- solve(sfit2233 at cov.unscaled)  # Includes any intercepts
   dimnames(cov2233) <- list(d8, d8)
 
-  log.vec33 <- NULL 
-  nassign <- names(fit at constraints) 
+  log.vec33 <- NULL
+  nassign <- names(fit at constraints)
   choose.from <-  varassign(fit at constraints, nassign)
   for (ii in nassign)
     if (any(ii == names(colx2.index))) {
@@ -1739,14 +1741,14 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
 
 
     latvar.mat <- x2mat %*% Cmat
-    offs <- matrix(0, n, M)  # The "0" handles str0's 
+    offs <- matrix(0, n, M)  # The "0" handles str0's
     offs[, Index.corner] <- latvar.mat
     if (M == (Rank + length(str0)))
       stop("cannot handle full-rank models yet")
     cm <- matrix(0, M, M - Rank - length(str0))
     cm[-c(Index.corner, str0), ] <- diag(M - Rank - length(str0))
 
-    Hlist <- vector("list", length(colx1.index)+1) 
+    Hlist <- vector("list", length(colx1.index)+1)
     names(Hlist) <- c(names(colx1.index), "I(latvar.mat)")
     for (ii in names(colx1.index))
       Hlist[[ii]] <- fit at constraints[[ii]]
@@ -1755,7 +1757,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
 
     if (p1) {
       ooo <- fit at assign
-      bb <- NULL 
+      bb <- NULL
       for (ii in seq_along(ooo)) {
         if (any(ooo[[ii]][1] == colx1.index))
           bb <- c(bb, names(ooo)[ii])
@@ -1815,7 +1817,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
     cov13 <- delct.da %*% cov33
 
 
-    if (omit13) 
+    if (omit13)
       cov13 <- cov13 * 0   # zero it
 
     if (kill.all) {
@@ -1851,17 +1853,18 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
 get.rrvglm.se2 <- function(cov.unscaled, dispersion = 1, coefficients) {
 
   d8 <-  dimnames(cov.unscaled)[[1]]
-  ans <- matrix(coefficients, length(coefficients), 3) 
+  ans <- matrix(coefficients, length(coefficients), 4)
   ans[, 2] <- sqrt(dispersion) * sqrt(diag(cov.unscaled))
   ans[, 3] <- ans[, 1] / ans[, 2]
-  dimnames(ans) <- list(d8, c("Estimate", "Std. Error", "z value"))
+  ans[, 4] <- pnorm(-abs(ans[, 3]))
+  dimnames(ans) <- list(d8, c("Estimate", "Std. Error", "z value", "Pr(>|z|)"))
   ans
 }
 
 
 
 num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
-                          p2, Index.corner, Aimat, B1mat, Cimat, 
+                          p2, Index.corner, Aimat, B1mat, Cimat,
                           h.step = 0.0001, colx2.index,
                           xij = NULL, str0 = NULL) {
 
@@ -1894,7 +1897,7 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
       fit at predictors <- neweta
 
 
-      newmu <- fit at family@linkinv(neweta, fit at extra) 
+      newmu <- fit at family@linkinv(neweta, fit at extra)
       fit at fitted.values <- as.matrix(newmu)  # 20100909
 
       fred <- weights(fit, type = "w", deriv = TRUE, ignore.slot = TRUE)
@@ -1946,12 +1949,12 @@ dctda.fast.only <- function(theta, wz, U, zmat, M, r, x1mat, x2mat,
   temp <- aperm(temp, c(2, 1, 3))  # M by p2*r by nn
   temp <- mux5(wz, temp, M = M, matrix.arg= TRUE)
   temp <- m2a(temp, M = p2 * r)  # Note M != M here!
-  G <- solve(rowSums(temp, dims = 2))  # p2*r by p2*r 
+  G <- solve(rowSums(temp, dims = 2))  # p2*r by p2*r
 
   dc.da <- array(NA_real_, c(p2, r, M, r))  # different from other functions
   if (length(Index.corner) == M)
       stop("cannot handle full rank models yet")
-  cbindex <- (1:M)[-Index.corner]  # complement of Index.corner 
+  cbindex <- (1:M)[-Index.corner]  # complement of Index.corner
   resid2 <- if (length(x1mat))
     mux22(t(wz), zmat - x1mat %*% B1mat, M = M,
           upper = FALSE, as.matrix = TRUE) else
@@ -1968,7 +1971,7 @@ dctda.fast.only <- function(theta, wz, U, zmat, M, r, x1mat, x2mat,
                       M = M, upper = FALSE,
                       as.matrix = TRUE)  # nn * M
         wxx <- Wiak[,ttt] * x2mat
-        blocki <- t(x2mat) %*% wxx 
+        blocki <- t(x2mat) %*% wxx
         temp4a <- blocki %*% Cimat[,kkk]
         if (kkk == 1) {
             temp4b <- blocki %*% Cimat[,sss]
@@ -1976,10 +1979,10 @@ dctda.fast.only <- function(theta, wz, U, zmat, M, r, x1mat, x2mat,
         temp2 <- temp2 - kronecker(I.col(sss, r), temp4a) -
                          kronecker(I.col(kkk, r), temp4b)
       }
-      dc.da[,,ttt,sss] <- G %*% temp2 
+      dc.da[,,ttt,sss] <- G %*% temp2
     }
-  ans1 <- dc.da[,,cbindex,, drop = FALSE]  # p2 x r x (M-r) x r 
-  ans1 <- aperm(ans1, c(2, 1, 3, 4))  # r x p2 x (M-r) x r 
+  ans1 <- dc.da[,,cbindex,, drop = FALSE]  # p2 x r x (M-r) x r
+  ans1 <- aperm(ans1, c(2, 1, 3, 4))  # r x p2 x (M-r) x r
 
   ans1 <- matrix(c(ans1), r*p2, (M-r)*r)
   ans1 <- t(ans1)
@@ -2016,7 +2019,7 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
 
 
   int.vec <- if (intercept) c3[, 1] else 0  # \boldeta_0
-  Cimat <- if (intercept) t(c3[Index.corner,-1, drop = FALSE]) else 
+  Cimat <- if (intercept) t(c3[Index.corner,-1, drop = FALSE]) else
            t(c3[Index.corner,, drop = FALSE])
   if (nrow(Cimat)!=pp || ncol(Cimat)!=r)
     stop("Cimat wrong shape")
@@ -2042,7 +2045,7 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
   for (s in 1:r)
     for (tt in cbindex) {
       fred <- (if (intercept) t(xmat[, -1, drop = FALSE]) else
-               t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE) 
+               t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE)
       temp2 <- kronecker(I.col(s, r), rowSums(fred))
 
       temp4 <- rep_len(0, pp)
@@ -2060,8 +2063,8 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
       }
       dc.da[,,tt,s] <- G %*% (temp2 - 2 * kronecker(I.col(s, r), temp4))
     }
-  ans1 <- dc.da[,,cbindex,, drop = FALSE]  # pp x r x (M-r) x r 
-  ans1 <- aperm(ans1, c(2, 1, 3, 4))   # r x pp x (M-r) x r 
+  ans1 <- dc.da[,,cbindex,, drop = FALSE]  # pp x r x (M-r) x r
+  ans1 <- aperm(ans1, c(2, 1, 3, 4))   # r x pp x (M-r) x r
 
   ans1 <- matrix(c(ans1), (M-r)*r, r*pp, byrow = TRUE)
 
@@ -2082,7 +2085,7 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
 
   deta0.da <- array(0,c(M,M,r))
   AtWi <- kronecker(matrix(1, nn, 1), Aimat)
-  AtWi <- mux111(t(wz), AtWi, M = M, upper= FALSE)  # matrix.arg= TRUE, 
+  AtWi <- mux111(t(wz), AtWi, M = M, upper= FALSE)  # matrix.arg= TRUE,
   AtWi <- array(t(AtWi), c(r, M, nn))
   for (ss in 1:r) {
     temp90 <- (m2a(t(colSums(etastar[, ss]*wz)), M = M))[, , 1]  # MxM
@@ -2093,7 +2096,7 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
   }
   ans2 <- deta0.da[-(1:r), , , drop = FALSE]  # (M-r) x M x r
   ans2 <- aperm(ans2, c(1, 3, 2))  # (M-r) x r x M
-  ans2 <- matrix(c(ans2), (M-r)*r, M) 
+  ans2 <- matrix(c(ans2), (M-r)*r, M)
 
   list(dc.da = ans1, dint.da = ans2)
 }
@@ -2182,7 +2185,7 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
   for (s in 1:r)
     for (tt in cbindex) {
       fred <- (if (intercept) t(xmat[, -1, drop = FALSE]) else
-               t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE) 
+               t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE)
       temp2 <- kronecker(I.col(s, r), rowSums(fred))
 
       temp4 <- rep_len(0, pp)
@@ -2193,7 +2196,7 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
         wxx <- Wiak[,tt] * (if (intercept)
                             xmat[, -1, drop = FALSE] else xmat)
         blocki <- (if (intercept) t(xmat[, -1, drop = FALSE]) else
-                  t(xmat)) %*% wxx 
+                  t(xmat)) %*% wxx
         temp4 <- temp4 + blocki %*% Cimat[, k]
       }
       dc.da[,,s,tt] <- G %*% (temp2 - 2 * kronecker(I.col(s, r), temp4))
@@ -2216,7 +2219,7 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
   deta0.da <- array(0, c(M, M, r))
 
   AtWi <- kronecker(matrix(1, nn, 1), Aimat)
-  AtWi <- mux111(t(wz), AtWi, M = M, upper = FALSE)  # matrix.arg= TRUE, 
+  AtWi <- mux111(t(wz), AtWi, M = M, upper = FALSE)  # matrix.arg= TRUE,
   AtWi <- array(t(AtWi), c(r, M, nn))
 
   for (ss in 1:r) {
@@ -2274,16 +2277,16 @@ biplot.qrrvglm <- function(x, ...) {
 
  lvplot.qrrvglm <-
   function(object, varI.latvar = FALSE, refResponse = NULL,
-           add = FALSE, show.plot = TRUE, rug = TRUE, y = FALSE, 
+           add = FALSE, show.plot = TRUE, rug = TRUE, y = FALSE,
            type = c("fitted.values", "predictors"),
            xlab = paste("Latent Variable",
                         if (Rank == 1) "" else " 1", sep = ""),
-           ylab = if (Rank == 1) switch(type, predictors = "Predictors", 
+           ylab = if (Rank == 1) switch(type, predictors = "Predictors",
               fitted.values = "Fitted values") else "Latent Variable 2",
-          pcex = par()$cex, pcol = par()$col, pch = par()$pch, 
+          pcex = par()$cex, pcol = par()$col, pch = par()$pch,
           llty = par()$lty, lcol = par()$col, llwd = par()$lwd,
-          label.arg = FALSE, adj.arg = -0.1, 
-          ellipse = 0.95, Absolute = FALSE, 
+          label.arg = FALSE, adj.arg = -0.1,
+          ellipse = 0.95, Absolute = FALSE,
               elty = par()$lty, ecol = par()$col, elwd = par()$lwd,
               egrid = 200,
           chull.arg = FALSE, clty = 2, ccol = par()$col, clwd = par()$lwd,
@@ -2291,7 +2294,7 @@ biplot.qrrvglm <- function(x, ...) {
           C = FALSE,
               OriginC = c("origin", "mean"),
               Clty = par()$lty, Ccol = par()$col, Clwd = par()$lwd,
-              Ccex = par()$cex, Cadj.arg = -0.1, stretchC = 1, 
+              Ccex = par()$cex, Cadj.arg = -0.1, stretchC = 1,
           sites = FALSE, spch = NULL, scol = par()$col, scex = par()$cex,
           sfont = par()$font,
           check.ok = TRUE, ...) {
@@ -2327,11 +2330,11 @@ biplot.qrrvglm <- function(x, ...) {
     Coef.list <- Coef(object, varI.latvar = varI.latvar,
                       refResponse = refResponse)
     if ( C) Cmat <- Coef.list at C
-    nustar <- Coef.list at latvar  # n x Rank 
+    nustar <- Coef.list at latvar  # n x Rank
 
     if (!show.plot) return(nustar)
 
-    r.curves <- slot(object, type)   # n times M (\boldeta or \boldmu) 
+    r.curves <- slot(object, type)   # n times M (\boldeta or \boldmu)
     if (!add) {
       if (Rank == 1) {
         matplot(nustar,
@@ -2368,7 +2371,7 @@ biplot.qrrvglm <- function(x, ...) {
 
     if (Rank == 1) {
       for (i in 1:ncol(r.curves)) {
-        xx <- nustar 
+        xx <- nustar
         yy <- r.curves[,i]
         o <- sort.list(xx)
         xx <- xx[o]
@@ -2379,10 +2382,10 @@ biplot.qrrvglm <- function(x, ...) {
           if (ncol(as.matrix(ypts)) == ncol(r.curves))
             points(xx, ypts[o,i], col = pcol[i],
                    cex = pcex[i], pch = pch[i])
-        } 
-      } 
+        }
+      }
       if (rug)
-        rug(xx) 
+        rug(xx)
     } else {
      for (i in 1:ncol(r.curves))
       points(Coef.list at Optimum[1, i], Coef.list at Optimum[2, i],
@@ -2390,7 +2393,7 @@ biplot.qrrvglm <- function(x, ...) {
      if (label.arg) {
       for (i in 1:ncol(r.curves))
           text(Coef.list at Optimum[1, i], Coef.list at Optimum[2, i],
-               labels = (dimnames(Coef.list at Optimum)[[2]])[i], 
+               labels = (dimnames(Coef.list at Optimum)[[2]])[i],
                adj = adj.arg[i], col = pcol[i], cex = pcex[i])
     }
     if (chull.arg) {
@@ -2414,7 +2417,7 @@ biplot.qrrvglm <- function(x, ...) {
         cutpoint <- object at family@linkfun( if (Absolute) ellipse.temp
                         else Coef.list at Maximum[i] * ellipse.temp,
                         extra = object at extra)
-        if (MSratio > 1) 
+        if (MSratio > 1)
           cutpoint <- cutpoint[1, 1]
 
           cutpoint <- object at family@linkfun(Coef.list at Maximum[i],
@@ -2422,7 +2425,7 @@ biplot.qrrvglm <- function(x, ...) {
           if (is.finite(cutpoint) && cutpoint > 0) {
             Mmat <- diag(rep_len(ifelse(object at control$Crow1positive, 1, -1),
                                  Rank))
-            etoli <- eigen(t(Mmat) %*% Coef.list at Tolerance[,,i] %*% Mmat)
+            etoli <- eigen(t(Mmat) %*% Coef.list at Tolerance[,,i] %*% Mmat, symmetric = TRUE)
             A <- ifelse(etoli$val[1]>0, sqrt(2*cutpoint*etoli$val[1]), Inf)
             B <- ifelse(etoli$val[2]>0, sqrt(2*cutpoint*etoli$val[2]), Inf)
             if (ellipse < 0)
@@ -2490,9 +2493,9 @@ lvplot.rrvglm <- function(object,
                           Clabels=rownames(Cmat),
                           Cadj = par()$adj,
                           Ccex = par()$cex,
-                          Ccol = par()$col, 
-                          Clty = par()$lty, 
-                          Clwd = par()$lwd, 
+                          Ccol = par()$col,
+                          Clty = par()$lty,
+                          Clwd = par()$lwd,
                           chull.arg = FALSE,
                           ccex = par()$cex,
                           ccol = par()$col,
@@ -2515,7 +2518,7 @@ lvplot.rrvglm <- function(object,
     Cmat <- Coef.list at C
 
     Amat <- Amat * scaleA
-    dimnames(Amat) <- list(object at misc$predictors.names, NULL) 
+    dimnames(Amat) <- list(object at misc$predictors.names, NULL)
     Cmat <- Cmat / scaleA
 
     if (!length(object at x)) {
@@ -2526,8 +2529,8 @@ lvplot.rrvglm <- function(object,
     if (!show.plot) return(as.matrix(nuhat))
 
     index.nosz <- 1:M
-    allmat <- rbind(if (A) Amat else NULL, 
-                   if (C) Cmat else NULL, 
+    allmat <- rbind(if (A) Amat else NULL,
+                   if (C) Cmat else NULL,
                    if (scores) nuhat else NULL)
 
     plot(allmat[, 1], allmat[, 2], type = "n",
@@ -2686,11 +2689,11 @@ show.Coef.rrvglm <- function(x, ...) {
   }
 
   invisible(object)
-} 
+}
 
 
  if (!isGeneric("biplot"))
-    setGeneric("biplot", function(x, ...) standardGeneric("biplot")) 
+    setGeneric("biplot", function(x, ...) standardGeneric("biplot"))
 
 
 setMethod("Coef", "qrrvglm", function(object, ...)
@@ -2726,13 +2729,13 @@ summary.qrrvglm <-
     answer <- object
     answer at post$Coef <- Coef(object,
                              varI.latvar = varI.latvar,
-                             refResponse = refResponse, 
+                             refResponse = refResponse,
                              ...)  # Store it here; non-elegant
 
   if (length((answer at post$Coef)@dispersion) &&
      length(object at misc$estimated.dispersion) &&
      object at misc$estimated.dispersion)
-      answer at dispersion <- 
+      answer at dispersion <-
       answer at misc$dispersion <- (answer at post$Coef)@dispersion
 
   as(answer, "summary.qrrvglm")
@@ -2752,7 +2755,7 @@ show.summary.qrrvglm <- function(x, ...) {
   if (length(x at dispersion) > 1) {
     cat("\nDispersion parameters:\n")
     if (length(x at misc$ynames)) {
-      names(x at dispersion) <- x at misc$ynames 
+      names(x at dispersion) <- x at misc$ynames
       print(x at dispersion, ...)
     } else {
       cat(x at dispersion, fill = TRUE)
@@ -2799,7 +2802,7 @@ setMethod("show", "Coef.rrvglm", function(object)
  grc <- function(y, Rank = 1, Index.corner = 2:(1+Rank),
                  str0 = 1,
                  summary.arg = FALSE, h.step = 0.0001, ...) {
-                           
+
 
 
     myrrcontrol <- rrvglm.control(Rank = Rank,
@@ -2848,7 +2851,7 @@ setMethod("show", "Coef.rrvglm", function(object)
     }
 
     dimnames(.grc.df) <- list(if (length(dimnames(y)[[1]]))
-                              dimnames(y)[[1]] else 
+                              dimnames(y)[[1]] else
                               as.character(1:nrow(y)),
                               dimnames(.grc.df)[[2]])
 
@@ -2867,7 +2870,7 @@ setMethod("show", "Coef.rrvglm", function(object)
 
     warn.save <- options()$warn
     options(warn = -3)  # Suppress the warnings (hopefully, temporarily)
-    answer <- if (is(object.save, "rrvglm")) object.save else 
+    answer <- if (is(object.save, "rrvglm")) object.save else
               rrvglm(as.formula(str2), family = poissonff,
                      constraints = cms, control = myrrcontrol,
                      data = .grc.df)
@@ -2877,7 +2880,7 @@ setMethod("show", "Coef.rrvglm", function(object)
       answer <- as(answer, "rrvglm")
 
       answer <- summary.rrvglm(answer, h.step = h.step)
-    } else { 
+    } else {
       answer <- as(answer, "grc")
     }
 
@@ -2899,16 +2902,16 @@ trplot.qrrvglm <-
   function(object,
            which.species = NULL,
            add = FALSE, show.plot = TRUE,
-           label.sites = FALSE, 
+           label.sites = FALSE,
            sitenames = rownames(object at y),
            axes.equal = TRUE,
            cex = par()$cex,
            col = 1:(nos*(nos-1)/2),
-           log = "", 
+           log = "",
            lty  = rep_len(par()$lty, nos*(nos-1)/2),
            lwd  = rep_len(par()$lwd, nos*(nos-1)/2),
            tcol = rep_len(par()$col, nos*(nos-1)/2),
-           xlab = NULL, ylab = NULL, 
+           xlab = NULL, ylab = NULL,
            main = "",   # "Trajectory plot",
            type = "b",
            check.ok = TRUE, ...) {
@@ -2919,7 +2922,7 @@ trplot.qrrvglm <-
   modelno <- object at control$modelno  # 1, 2, 3, or 0
   NOS <- ncol(fv)   # Number of species
   M <- object at misc$M  #
-  nn <- nrow(fv)  # Number of sites 
+  nn <- nrow(fv)  # Number of sites
   if (length(sitenames))
     sitenames <- rep_len(sitenames, nn)
   sppNames <- dimnames(object at y)[[2]]
@@ -2994,7 +2997,7 @@ trplot.qrrvglm <-
                    labels = sitenames[oo], cex = cex, col = tcol[ii])
         }
     }
-  invisible(list(species.names = species.names, 
+  invisible(list(species.names = species.names,
                  sitenames     = sitenames[oo]))
 }
 
@@ -3011,7 +3014,7 @@ setMethod("trplot", "rrvgam",
 
 
 vcovrrvglm <- function(object, ...) {
-  summary.rrvglm(object, ...)@cov.unscaled 
+  summary.rrvglm(object, ...)@cov.unscaled
 }
 
 
@@ -3108,7 +3111,7 @@ model.matrix.qrrvglm <- function(object,
 
   switch(type,
          latvar  = Coef(object, ...)@latvar,
-         vlm = object at x) 
+         vlm = object at x)
 }
 
 
@@ -3136,20 +3139,20 @@ perspqrrvglm <-
       labelSpecies = FALSE,  # For Rank == 1 only
       stretch = 1.05,  # quick and dirty, Rank == 1 only
       main = "",
-      ticktype = "detailed", 
+      ticktype = "detailed",
       col = if (Rank == 1) par()$col else "white",
       llty = par()$lty, llwd = par()$lwd,
       add1 = FALSE,
       ...) {
   oylim <- ylim
-  object <- x  # Do not like x as the primary argument 
+  object <- x  # Do not like x as the primary argument
   coef.obj <- Coef(object, varI.latvar = varI.latvar,
                    refResponse = refResponse)
   if ((Rank <- coef.obj at Rank) > 2)
     stop("object must be a rank-1 or rank-2 model")
   fv <- fitted(object)
   NOS <- ncol(fv)  # Number of species
-  M <- object at misc$M # 
+  M <- object at misc$M #
 
   xlim <- rep_len(if (length(xlim)) xlim else range(coef.obj at latvar[, 1]), 2)
   if (!length(oylim)) {
@@ -3208,7 +3211,7 @@ perspqrrvglm <-
         if (!add1)
           matplot(latvar1, fitvals, xlab = xlab, ylab = ylab,
                   type = "n",
-                  main = main, xlim = xlim, ylim = ylim, ...) 
+                  main = main, xlim = xlim, ylim = ylim, ...)
         for (jloc in seq_along(which.species.numer)) {
           ptr2 <- which.species.numer[jloc]  # points to species column
           lines(latvar1, fitvals[, ptr2],
@@ -3375,9 +3378,9 @@ Tol.Coef.qrrvglm <- function(object, ...) {
  if (!isGeneric("ccoef"))
     setGeneric("ccoef", function(object, ...) {
     .Deprecated("concoef")
-    
+
     standardGeneric("ccoef")
-    }) 
+    })
 
 setMethod("ccoef",  "rrvglm",
   function(object, ...) concoef.qrrvglm(object, ...))
@@ -3392,7 +3395,7 @@ setMethod("ccoef", "Coef.qrrvglm",
 
  if (!isGeneric("concoef"))
     setGeneric("concoef", function(object, ...)
-    standardGeneric("concoef")) 
+    standardGeneric("concoef"))
 
 setMethod("concoef",  "rrvglm",
   function(object, ...) concoef.qrrvglm(object, ...))
@@ -3420,36 +3423,36 @@ setMethod("coefficients", "qrrvglm",
     setGeneric("lv",
   function(object, ...) {
     .Deprecated("latvar")
-    
+
     standardGeneric("lv")
   })
 
 
 setMethod("lv",  "rrvglm",
   function(object, ...) {
-    
+
     latvar.rrvglm(object, ...)
   })
 setMethod("lv", "qrrvglm",
   function(object, ...) {
-    
+
     latvar.qrrvglm(object, ...)
   })
 setMethod("lv",  "Coef.rrvglm",
   function(object, ...) {
-    
+
     latvar.Coef.qrrvglm(object, ...)
   })
 setMethod("lv", "Coef.qrrvglm",
   function(object, ...) {
-    
+
     latvar.Coef.qrrvglm(object, ...)
   })
 
 
  if (!isGeneric("latvar"))
      setGeneric("latvar",
-  function(object, ...) standardGeneric("latvar")) 
+  function(object, ...) standardGeneric("latvar"))
 setMethod("latvar",  "rrvglm",
   function(object, ...) latvar.rrvglm(object, ...))
 setMethod("latvar", "qrrvglm",
@@ -3462,7 +3465,7 @@ setMethod("latvar", "Coef.qrrvglm",
 
  if (!isGeneric("Max"))
     setGeneric("Max",
-  function(object, ...) standardGeneric("Max")) 
+  function(object, ...) standardGeneric("Max"))
 setMethod("Max", "qrrvglm",
   function(object, ...) Max.qrrvglm(object, ...))
 setMethod("Max", "Coef.qrrvglm",
@@ -3493,7 +3496,7 @@ setMethod("Opt", "rrvgam",
 
  if (!isGeneric("Tol"))
     setGeneric("Tol",
-  function(object, ...) standardGeneric("Tol")) 
+  function(object, ...) standardGeneric("Tol"))
 setMethod("Tol", "qrrvglm",
   function(object, ...) Tol.qrrvglm(object, ...))
 setMethod("Tol", "Coef.qrrvglm",
diff --git a/R/family.sur.R b/R/family.sur.R
index 6ec9324..3a5469c 100644
--- a/R/family.sur.R
+++ b/R/family.sur.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -15,7 +15,7 @@
  SURff <-
   function(mle.normal = FALSE,
            divisor = c("n", "n-max(pj,pk)", "sqrt((n-pj)*(n-pk))"),
-           parallel = FALSE, 
+           parallel = FALSE,
            Varcov = NULL,
            matrix.arg = FALSE) {
 
@@ -59,7 +59,7 @@
   blurb = c("Seemingly unrelated regressions"),
   constraints = eval(substitute(expression({
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
-                           bool = .parallel , 
+                           bool = .parallel ,
                            constraints = constraints,
                            apply.int = .apply.parint )
   }), list( .parallel = parallel,
@@ -153,9 +153,9 @@
       }  # jay
     }  # !length(etastart)
   }), list(
-            .parallel = parallel 
+            .parallel = parallel
           ))),
-  linkinv = function(eta, extra = NULL) eta, 
+  linkinv = function(eta, extra = NULL) eta,
   last = eval(substitute(expression({
 
     M1 <- extra$M1
@@ -181,6 +181,11 @@
           ))),
 
   vfamily = "SURff",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu <- eta
+    okay1 <- all(is.finite(mymu))
+    okay1
+  }, list( .lmean = lmean, .emean = emean, .divisor = divisor ))),
 
 
   deriv = eval(substitute(expression({
@@ -220,9 +225,7 @@
     dmu.deta <- dtheta.deta(mymu,   .lmean , earg = .emean )
 
     c(w) * dl.dmu * dmu.deta
-  }), list( .lmean = lmean,
-            .emean = emean,
-            .divisor = divisor ))),
+  }), list( .lmean = lmean, .emean = emean, .divisor = divisor ))),
 
 
   weight = eval(substitute(expression({
diff --git a/R/family.survival.R b/R/family.survival.R
index c892b14..44a1847 100644
--- a/R/family.survival.R
+++ b/R/family.survival.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -56,8 +56,7 @@
          lmu = .lmu ,
          lsd = .lsd ,
          zero = .zero )
-  }, list( .zero = zero, .lmu = lmu, .lsd = lsd
-         ))),
+  }, list( .zero = zero, .lmu = lmu, .lsd = lsd ))),
 
   initialize = eval(substitute(expression({
     predictors.names <-
@@ -83,14 +82,14 @@
       }
       yyyy.est <- rep_len(yyyy.est , n)
       sd.y.est <- rep_len(sd.y.est , n)
-      etastart <- cbind(mu = theta2eta(yyyy.est, .lmu , earg =.emu ),
-                        sd = theta2eta(sd.y.est, .lsd , earg =.esd ))
+      etastart <- cbind(mu = theta2eta(yyyy.est, .lmu , earg = .emu ),
+                        sd = theta2eta(sd.y.est, .lsd , earg = .esd ))
     }
   }) , list( .lmu = lmu, .lsd = lsd,
              .emu = emu, .esd = esd,
              .imu = imu, .isd = isd,
              .r1 = r1, .r2 = r2 ))),
-  linkinv = function(eta, extra = NULL) eta[, 1], 
+  linkinv = function(eta, extra = NULL) eta[, 1],
   last = eval(substitute(expression({
     misc$link <-    c(mu = .lmu , sd = .lsd )
 
@@ -106,7 +105,7 @@
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL,
              summation = TRUE) {
-    sd <- eta2theta(eta[, 2], .lsd, earg = .esd )
+    sd <- eta2theta(eta[, 2], .lsd , earg = .esd )
 
     if (!summation)
       stop("cannot handle 'summation = FALSE' yet")
@@ -128,6 +127,15 @@
             .emu = emu, .esd = esd,
             .r1 = r1, .r2 = r2 ))),
   vfamily = c("double.cens.normal"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mu <- eta[, 1]
+    sd <- eta2theta(eta[, 2], .lsd , earg = .esd )
+    okay1 <- all(is.finite(mu)) &&
+             all(is.finite(sd)) && all(0 < sd)
+    okay1
+  }, list( .lmu = lmu, .lsd = lsd,
+           .emu = emu, .esd = esd,
+           .r1 = r1, .r2 = r2 ))),
   deriv = eval(substitute(expression({
     sd <- eta2theta(eta[, 2], .lsd, earg =.esd)
 
@@ -159,11 +167,11 @@
     Q.1 <- ifelse(q1 == 0, 1, q1)  # Saves division by 0 below; not elegant
     Q.2 <- ifelse(q2 == 0, 1, q2)  # Saves division by 0 below; not elegant
 
-    ed2l.dmu2 <- 1 / (sd^2) + 
+    ed2l.dmu2 <- 1 / (sd^2) +
                  ((fz1*(z1+fz1/Q.1) - fz2*(z2-fz2/Q.2)) / sd^2) / (pee*w)
     ed2l.dmusd <- ((fz1-fz2 + z1*fz1*(z1+fz1/Q.1) -
                   z2*fz2*(z2-fz2/Q.2)) / sd^2) / (pee*w)
-    ed2l.dsd2 <- 2 / (sd^2) + 
+    ed2l.dsd2 <- 2 / (sd^2) +
                  ((z1*fz1-z2*fz2 + z1^2 *fz1 *(z1+fz1/Q.1) -
                  z2^2 *fz2*(z2-fz2/Q.2)) / sd^2) / (pee*w)
 
@@ -211,7 +219,7 @@ dbisa <- function(x, scale = 1, shape, log = FALSE) {
 pbisa <- function(q, scale = 1, shape,
                   lower.tail = TRUE, log.p = FALSE) {
 
-  
+
   ans <- pnorm(((temp <- sqrt(q/scale)) - 1/temp) / shape,
                lower.tail = lower.tail, log.p = log.p)
   ans[scale < 0 | shape < 0] <- NaN
@@ -260,7 +268,7 @@ qbisa <- function(p, scale = 1, shape,
       ans[ln.p == -Inf] <- Inf
       ans[ln.p == 0] <- 0
      #ans[ln.p > 0] <- NaN
-    } else { 
+    } else {
       ans <- ifelse(p > 0.5, pmin(ans1, ans2), pmax(ans1, ans2))
      #ans[p < 0] <- NaN
       ans[p == 0] <- Inf
@@ -413,11 +421,19 @@ rbisa <- function(n, scale = 1, shape) {
            .eshape = eshape, .escale = escale ))),
 
   vfamily = c("bisa"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    sc <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    sh <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(sc)) && all(0 < sc) &&
+             all(is.finite(sh)) && all(0 < sh)
+    okay1
+  }, list( .lshape = lshape, .lscale = lscale,
+           .eshape = eshape, .escale = escale ))),
   deriv = eval(substitute(expression({
     sc <- eta2theta(eta[, 1], .lscale , earg = .escale )
     sh <- eta2theta(eta[, 2], .lshape , earg = .eshape )
 
-    dl.dsh <- ((y/sc - 2 + sc/y) / sh^2 - 1) / sh 
+    dl.dsh <- ((y/sc - 2 + sc/y) / sh^2 - 1) / sh
     dl.dsc <- -0.5 / sc + 1/(y+sc) + sqrt(y) * ((y+sc)/y) *
              (sqrt(y/sc) - sqrt(sc/y)) / (2 * sh^2 * sc^1.5)
 
diff --git a/R/family.ts.R b/R/family.ts.R
index 0975e7b..1dde9a0 100644
--- a/R/family.ts.R
+++ b/R/family.ts.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -35,7 +35,7 @@ rrar.Ak1 <- function(MM, coeffs, Ranks., aa) {
 
 
 rrar.Di <- function(i, Ranks.) {
-  if (Ranks.[1] == Ranks.[i]) diag(Ranks.[i]) else 
+  if (Ranks.[1] == Ranks.[i]) diag(Ranks.[i]) else
   rbind(diag(Ranks.[i]),
         matrix(0, Ranks.[1] - Ranks.[i], Ranks.[i]))
 }
@@ -157,7 +157,7 @@ rrar.control <- function(stepsize = 0.5, save.weights = TRUE, ...) {
       nn <- nrow(x)  # original n
       indices <- 1:plag
 
-      copy.X.vlm <- TRUE  # X.vlm.save matrix changes at each iteration 
+      copy.X.vlm <- TRUE  # X.vlm.save matrix changes at each iteration
 
       dsrank <- -sort(-Ranks.)  # ==rev(sort(Ranks.))
       if (any(dsrank != Ranks.))
@@ -189,7 +189,7 @@ rrar.control <- function(stepsize = 0.5, save.weights = TRUE, ...) {
                         runif(aa+sum(Ranks.)*MM)
       temp8 <- rrar.Wmat(y.save, Ranks., MM, ki, plag,
                          aa, uu, nn, new.coeffs)
-      X.vlm.save <- temp8$UU %*% temp8$Ht 
+      X.vlm.save <- temp8$UU %*% temp8$Ht
 
       if (!length(etastart)) {
         etastart <- X.vlm.save %*% new.coeffs
@@ -208,7 +208,7 @@ rrar.control <- function(stepsize = 0.5, save.weights = TRUE, ...) {
       y <- y[-indices, , drop = FALSE]
       w <- w[-indices]
       n.save <- n <- nn - plag
-  }), list( .Ranks = Ranks, .coefstart = coefstart ))), 
+  }), list( .Ranks = Ranks, .coefstart = coefstart ))),
 
   linkinv = function(eta, extra = NULL) {
     aa <- extra$aa
@@ -243,13 +243,17 @@ rrar.control <- function(stepsize = 0.5, save.weights = TRUE, ...) {
       misc$Phimatrices[[ii]] <- Ak1 %*% Dmatrices[[ii]] %*%
                                 t(Cmatrices[[ii]])
     }
-    misc$Z <- y.save %*% t(solve(Ak1)) 
+    misc$Z <- y.save %*% t(solve(Ak1))
   }),
   vfamily = "rrar",
+  validparams = function(eta, y, extra = NULL) {
+    okay1 <- TRUE
+    okay1
+  },
   deriv = expression({
     temp8 <- rrar.Wmat(y.save, Ranks., MM, ki, plag,
                        aa, uu, nn, new.coeffs)
-    X.vlm.save <- temp8$UU %*% temp8$Ht 
+    X.vlm.save <- temp8$UU %*% temp8$Ht
 
     extra$coeffs <- new.coeffs
 
@@ -322,7 +326,7 @@ vglm.garma.control <- function(save.weights = TRUE, ...) {
     tt.index <- (1 + plag):nrow(x)
     p.lm <- ncol(x)
 
-    copy.X.vlm <- TRUE  # x matrix changes at each iteration 
+    copy.X.vlm <- TRUE  # x matrix changes at each iteration
 
     if ( .link == "logit"   || .link == "probit" ||
          .link == "cloglog" || .link == "cauchit") {
@@ -341,7 +345,7 @@ vglm.garma.control <- function(save.weights = TRUE, ...) {
     new.coeffs <- .coefstart  # Needed for iter = 1 of @weight
     new.coeffs <- if (length(new.coeffs))
                     rep_len(new.coeffs, p.lm + plag) else
-                    c(rnorm(p.lm, sd = 0.1), rep_len(0, plag)) 
+                    c(rnorm(p.lm, sd = 0.1), rep_len(0, plag))
 
     if (!length(etastart)) {
       etastart <- x[-indices, , drop = FALSE] %*% new.coeffs[1:p.lm]
@@ -349,8 +353,8 @@ vglm.garma.control <- function(save.weights = TRUE, ...) {
 
     x <- cbind(x, matrix(NA_real_, n, plag))  # Right size now
     dx <- dimnames(x.save)
-    morenames <- paste("(lag", 1:plag, ")", sep = "") 
-    dimnames(x) <- list(dx[[1]], c(dx[[2]], morenames)) 
+    morenames <- paste("(lag", 1:plag, ")", sep = "")
+    dimnames(x) <- list(dx[[1]], c(dx[[2]], morenames))
 
     x <- x[-indices, , drop = FALSE]
     class(x) <- "matrix"
@@ -364,7 +368,7 @@ vglm.garma.control <- function(save.weights = TRUE, ...) {
       more[[ii]] <- ii + max(unlist(attr(x.save, "assign")))
     attr(x, "assign") <- c(attr(x.save, "assign"), more)
   }), list( .link = link, .p.ar.lag = p.ar.lag,
-            .coefstart = coefstart, .earg = earg ))), 
+            .coefstart = coefstart, .earg = earg ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     eta2theta(eta, link = .link , earg = .earg)
   }, list( .link = link, .earg = earg ))),
@@ -410,10 +414,15 @@ vglm.garma.control <- function(save.weights = TRUE, ...) {
          new.coeffs[1:p.lm])  # +
     }
 
-    true.eta <- realfv + offset  
+    true.eta <- realfv + offset
     mu <- family at linkinv(true.eta, extra)  # overwrite mu with correct one
   }), list( .link = link, .earg = earg ))),
   vfamily = c("garma", "vglmgam"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mu <- eta2theta(eta, link = .link , earg = .earg )
+    okay1 <- all(is.finite(mu))
+    okay1
+  }, list( .link = link, .earg = earg ))),
   deriv = eval(substitute(expression({
     dl.dmu <- switch( .link ,
                   identitylink = y-mu,
@@ -429,7 +438,7 @@ vglm.garma.control <- function(save.weights = TRUE, ...) {
             .earg = earg ))),
 
   weight = eval(substitute(expression({
-    x[, 1:p.lm] <- x.save[tt.index, 1:p.lm]  # Reinstate 
+    x[, 1:p.lm] <- x.save[tt.index, 1:p.lm]  # Reinstate
 
     for (ii in 1:plag) {
         temp <- theta2eta(y.save[tt.index-ii], .link , earg = .earg )
@@ -443,7 +452,7 @@ vglm.garma.control <- function(save.weights = TRUE, ...) {
     class(x) <- "matrix" # Added 20020227; 20040226
 
     if (iter == 1)
-      old.coeffs <- new.coeffs 
+      old.coeffs <- new.coeffs
 
     X.vlm.save <- lm2vlm.model.matrix(x, Hlist, xij = control$xij)
 
@@ -526,12 +535,12 @@ dAR1 <- function(x,
                  var.error = 1, ARcoef1 = 0.0,
                  type.likelihood = c("exact", "conditional"),
                  log = FALSE) {
-  
+
   type.likelihood <- match.arg(type.likelihood,
                                c("exact", "conditional"))[1]
-  
+
   is.vector.x <- is.vector(x)
-  
+
   x <- as.matrix(x)
   drift <- as.matrix(drift)
   var.error <- as.matrix(var.error)
@@ -542,29 +551,29 @@ dAR1 <- function(x,
   drift      <- matrix(drift,     LLL, UUU)
   var.error  <- matrix(var.error, LLL, UUU)
   rho        <- matrix(ARcoef1,   LLL, UUU)
-  
+
   if (any(abs(rho) > 1))
     warning("Values of argument 'ARcoef1' are greater ",
             "than 1 in absolute value")
-  
-  if (!is.logical(log.arg <- log) || length(log) != 1) 
+
+  if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("Bad input for argument 'log'")
   rm(log)
-  
+
   ans <- matrix(0.0, LLL, UUU)
-  
+
   var.noise <- var.error / (1 - rho^2)
-  
+
   ans[ 1, ] <- dnorm(x    = x[1, ],
-                     mean = drift[ 1, ] / (1 - rho[1, ]), 
+                     mean = drift[ 1, ] / (1 - rho[1, ]),
                      sd   = sqrt(var.noise[1, ]), log = log.arg)
   ans[-1, ] <- dnorm(x    = x[-1, ],
                      mean = drift[-1, ] + rho[-1, ] * x[-nrow(x), ],
                      sd   = sqrt(var.error[-1, ]), log = log.arg)
-  
+
   if (type.likelihood == "conditional")
     ans[1, ] <- NA
-  
+
   if (is.vector.x) as.vector(ans) else ans
 }
 
@@ -582,7 +591,7 @@ AR1.control <- function(epsilon  = 1e-6,
 
 
 
-AR1 <-
+ AR1 <-
   function(ldrift = "identitylink",
            lsd  = "loge",
            lvar = "loge",
@@ -600,61 +609,61 @@ AR1 <-
            print.EIM = FALSE,
            zero = c(if (var.arg) "var" else "sd", "rho")  # "ARcoeff1"
            ) {
-    
+
     type.likelihood <- match.arg(type.likelihood,
                                  c("exact", "conditional"))[1]
-    
+
     if (length(isd) && !is.Numeric(isd, positive = TRUE))
       stop("Bad input for argument 'isd'")
-    
+
     if (length(ivar) && !is.Numeric(ivar, positive = TRUE))
       stop("Bad input for argument 'ivar'")
-    
+
     if (length(irho) &&
           (!is.Numeric(irho) || any(abs(irho) > 1.0)))
       stop("Bad input for argument 'irho'")
-    
+
     type.EIM <- match.arg(type.EIM, c("exact", "approximate"))[1]
     poratM   <- (type.EIM == "exact")
-    
+
     if (!is.logical(nodrift) ||
           length(nodrift) != 1)
       stop("argument 'nodrift' must be a single logical")
-    
+
     if (!is.logical(var.arg) ||
           length(var.arg) != 1)
       stop("argument 'var.arg' must be a single logical")
-    
+
     if (!is.logical(print.EIM))
       stop("Invalid 'print.EIM'.")
-    
+
     ismn <- idrift
     lsmn <- as.list(substitute(ldrift))
     esmn <- link2list(lsmn)
-    lsmn <- attr(esmn, "function.name")     
-    
+    lsmn <- attr(esmn, "function.name")
+
     lsdv <- as.list(substitute(lsd))
     esdv <- link2list(lsdv)
     lsdv <- attr(esdv, "function.name")
-    
+
     lvar  <- as.list(substitute(lvar))
     evar  <- link2list(lvar)
     lvar  <- attr(evar, "function.name")
-    
+
     lrho <- as.list(substitute(lrho))
     erho <- link2list(lrho)
-    lrho <- attr(erho, "function.name")     
-    
+    lrho <- attr(erho, "function.name")
+
     n.sc <- if (var.arg) "var" else "sd"
     l.sc <- if (var.arg) lvar else lsdv
     e.sc <- if (var.arg) evar else esdv
-    
-    new("vglmff", 
+
+    new("vglmff",
         blurb = c(ifelse(nodrift, "Two", "Three"),
                   "-parameter autoregressive process of order-1\n\n",
                   "Links:       ",
                   if (nodrift) "" else
-                    paste(namesof("drift", lsmn, earg = esmn), ", ", 
+                    paste(namesof("drift", lsmn, earg = esmn), ", ",
                           sep = ""),
                   namesof(n.sc , l.sc, earg = e.sc), ", ",
                   namesof("rho", lrho, earg = erho), "\n",
@@ -667,24 +676,24 @@ AR1 <-
                   "Mean:        drift / (1 - rho)", "\n",
                   "Correlation: rho = ARcoef1", "\n",
                   "Variance:    sd^2 / (1 - rho^2)"),
-        
+
      constraints = eval(substitute(expression({
-       
+
        M1 <- 3 - .nodrift
        dotzero <- .zero
        # eval(negzero.expression.VGAM)
-       constraints <- 
+       constraints <-
          cm.zero.VGAM(constraints, x = x, zero = .zero , M = M,
                       predictors.names = predictors.names,
                       M1 = M1)
-       
+
         }), list( .zero = zero,
                   .nodrift = nodrift ))),
-     
+
      infos = eval(substitute(function(...) {
-       list(M1 = 3 - .nodrift , 
-            Q1 = 1, 
-            expected = TRUE, 
+       list(M1 = 3 - .nodrift ,
+            Q1 = 1,
+            expected = TRUE,
             multipleResponse = TRUE,
             type.likelihood = .type.likelihood ,
             ldrift = if ( .nodrift ) NULL else .lsmn ,
@@ -700,7 +709,7 @@ AR1 <-
               .esmn = esmn, .evar = evar, .esdv = esdv, .erho = erho,
               .type.likelihood = type.likelihood,
               .nodrift = nodrift, .zero = zero))),
-     
+
      initialize = eval(substitute(expression({
        extra$M1 <- M1 <- 3 - .nodrift
        check <- w.y.check(w = w, y = y,
@@ -708,55 +717,55 @@ AR1 <-
                           ncol.w.max = Inf,
                           ncol.y.max = Inf,
                           out.wy = TRUE,
-                          colsyperw = 1, 
+                          colsyperw = 1,
                           maximize = TRUE)
        w <- check$w
        y <- check$y
        if ( .type.likelihood == "conditional") {
-         w[1, ] <- 1.0e-6 
+         w[1, ] <- 1.0e-6
        } else {
          if (!(.nodrift ))
            w[1, ] <- 1.0e-1
        }
-         
+
        NOS <- ncoly <- ncol(y)
        n <- nrow(y)
        M <- M1*NOS
-       
+
        var.names <- param.names("var", NOS)
        sdv.names <- param.names("sd",  NOS)
        smn.names <- if ( .nodrift ) NULL else
          param.names("drift",   NOS)
        rho.names <- param.names("rho", NOS)
-       
+
        mynames1 <- smn.names
-       mynames2 <- if ( .var.arg ) var.names else sdv.names 
+       mynames2 <- if ( .var.arg ) var.names else sdv.names
        mynames3 <- rho.names
-       
+
        predictors.names <-
          c(if ( .nodrift ) NULL else
            namesof(smn.names, .lsmn , earg = .esmn , tag = FALSE),
-           if ( .var.arg ) 
+           if ( .var.arg )
              namesof(var.names, .lvar , earg = .evar , tag = FALSE) else
                namesof(sdv.names, .lsdv , earg = .esdv , tag = FALSE),
            namesof(rho.names, .lrho , earg = .erho , tag = FALSE))
-       
+
        predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-       
-       if ( .nodrift ) 
+
+       if ( .nodrift )
         y <- scale(y, scale = FALSE)
-       
+
        if (!length(etastart)) {
          init.smn <- Init.mu(y = y, w = w, imethod = .imethod ,  # x = x,
                              imu = .ismn , ishrinkage = .ishrinkage ,
                              pos.only = FALSE)
-             
-         init.rho <- matrix(if (length( .irho )) .irho else 0.1, 
-                            n, NOS, byrow = TRUE)           
-         init.sdv <- matrix(if (length( .isdv )) .isdv else 1.0,  
-                            n, NOS, byrow = TRUE)          
+
+         init.rho <- matrix(if (length( .irho )) .irho else 0.1,
+                            n, NOS, byrow = TRUE)
+         init.sdv <- matrix(if (length( .isdv )) .isdv else 1.0,
+                            n, NOS, byrow = TRUE)
          init.var <- matrix(if (length( .ivar )) .ivar else 1.0,
-                            n, NOS, byrow = TRUE)           
+                            n, NOS, byrow = TRUE)
          for (jay in 1: NOS) {
            mycor <-  cor(y[-1, jay], y[-n, jay])
            init.smn[ , jay] <- mean(y[, jay]) * (1 - mycor)
@@ -767,27 +776,27 @@ AR1 <-
            if (!length( .isdv ))
              init.sdv[, jay] <- sqrt(init.var[, jay])
          }  # for
-         
+
          etastart <-
            cbind(if ( .nodrift ) NULL else
              theta2eta(init.smn, .lsmn , earg = .esmn ),
-             if ( .var.arg ) 
+             if ( .var.arg )
                theta2eta(init.var, .lvar , earg = .evar ) else
                  theta2eta(init.sdv, .lsdv , earg = .esdv ),
              theta2eta(init.rho, .lrho , earg = .erho ))
-         
+
          etastart <- etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
        }  # end of etastart
-       
+
      }), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
                .esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
                .ismn = ismn, .irho = irho, .isdv = isd , .ivar = ivar,
-               .type.likelihood = type.likelihood, 
+               .type.likelihood = type.likelihood,
                .ishrinkage = ishrinkage, .poratM  = poratM,
                .var.arg = var.arg,
                .nodrift = nodrift,
                .imethod = imethod ))),
-     
+
      linkinv = eval(substitute(function(eta, extra = NULL) {
        M1  <- 3 - .nodrift
        NOS <- ncol(eta)/M1
@@ -797,22 +806,22 @@ AR1 <-
        ar.rho <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                            .lrho , earg = .erho )
        ar.smn / (1 - ar.rho)
-       
+
      }, list ( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
                .var.arg = var.arg, .type.likelihood = type.likelihood,
                .nodrift = nodrift,
                .esmn = esmn, .erho = erho , .esdv = esdv, .evar = evar ))),
-     
+
      last = eval(substitute(expression({
-       if (any(abs(ar.rho) > 1)) 
+       if (any(abs(ar.rho) > 1))
          warning("Regularity conditions are violated at the final",
                  "IRLS iteration, since 'abs(rho) > 1")
-       
+
        M1 <- extra$M1
-       
+
        temp.names <- c(mynames1, mynames2, mynames3)
        temp.names <- temp.names[interleave.VGAM(M1 * ncoly, M1 = M1)]
-       
+
        misc$link <- rep_len( .lrho , M1 * ncoly)
        misc$earg <- vector("list", M1 * ncoly)
        names(misc$link) <-
@@ -827,7 +836,7 @@ AR1 <-
          misc$earg[[M1*ii-1]] <- if ( .var.arg ) .evar else .esdv
          misc$earg[[M1*ii  ]] <- .erho
        }
-       
+
        misc$type.likelihood <- .type.likelihood
        misc$var.arg <- .var.arg
        misc$M1 <- M1
@@ -835,20 +844,20 @@ AR1 <-
        misc$imethod <- .imethod
        misc$multipleResponses <- TRUE
        misc$nodrift <- .nodrift
-       
+
      }), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
                .esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
                .irho = irho, .isdv = isd , .ivar = ivar,
-               .nodrift = nodrift, .poratM = poratM, 
+               .nodrift = nodrift, .poratM = poratM,
                .var.arg = var.arg, .type.likelihood = type.likelihood,
                .imethod = imethod ))),
-     
+
      loglikelihood = eval(substitute(
-       function(mu, y, w, residuals= FALSE, eta, 
+       function(mu, y, w, residuals= FALSE, eta,
                 extra = NULL, summation = TRUE) {
          M1  <- 3 - .nodrift
          NOS <- ncol(eta)/M1
-         
+
          if ( .var.arg ) {
            ar.var <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                                .lvar , earg = .evar )
@@ -857,25 +866,25 @@ AR1 <-
            ar.sdv <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                                .lsdv , earg = .esdv )
            ar.var <- ar.sdv^2
-         }  
+         }
          ar.smn <- if ( .nodrift ) 0 else
            eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
                      .lsmn , earg = .esmn )
          ar.rho <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                              .lrho , earg = .erho )
-         
+
          if (residuals) {
            stop("Loglikelihood not implemented yet to handle",
                 "residuals.")
          } else {
-           loglik.terms <- 
+           loglik.terms <-
              c(w) * dAR1(x = y,
                          drift = ar.smn,
                          var.error = ar.var,
                          type.likelihood = .type.likelihood ,
                          ARcoef1 = ar.rho, log = TRUE)
            loglik.terms <- as.matrix(loglik.terms)
-           
+
            if (summation) {
              sum(if ( .type.likelihood == "exact") loglik.terms else
                loglik.terms[-1, ] )
@@ -883,27 +892,56 @@ AR1 <-
              loglik.terms
            }
          }
-         
+
        }, list( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
                 .var.arg = var.arg, .type.likelihood = type.likelihood,
                 .nodrift = nodrift,
-                .esmn = esmn, .erho = erho , 
+                .esmn = esmn, .erho = erho ,
                 .esdv = esdv, .evar = evar ))),
-     
+
      vfamily = c("AR1"),
-        
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+       M1    <- 3 - .nodrift
+       n     <- nrow(eta)
+       NOS   <- ncol(eta)/M1
+       ncoly <- ncol(as.matrix(y))
+
+       if ( .var.arg ) {
+         ar.var <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+                             .lvar , earg = .evar )
+         ar.sdv <- sqrt(ar.var)
+       } else {
+         ar.sdv <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+                             .lsdv , earg = .esdv )
+         ar.var <- ar.sdv^2
+       }
+       ar.smn <- if ( .nodrift ) matrix(0, n, NOS) else
+         eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+                   .lsmn , earg = .esmn )
+       ar.rho <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
+                           .lrho , earg = .erho )
+    okay1 <- all(is.finite(ar.sdv)) && all(0 < ar.sdv) &&
+             all(is.finite(ar.smn)) &&
+             all(is.finite(ar.rho))
+    okay1
+   }, list( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
+            .var.arg = var.arg, .type.likelihood = type.likelihood,
+            .nodrift = nodrift,
+            .esmn = esmn, .erho = erho ,
+            .esdv = esdv, .evar = evar ))),
+
      simslot = eval(substitute(
        function(object, nsim) {
-         
+
          pwts <- if (length(pwts <- object at prior.weights) > 0)
            pwts else weights(object, type = "prior")
          if (any(pwts != 1))
            warning("ignoring prior weights")
          eta <- predict(object)
-         fva <- fitted(object)      
+         fva <- fitted(object)
          M1  <- 3 - .nodrift
          NOS <- ncol(eta)/M1
-         
+
          if ( .var.arg ) {
            ar.var <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                                .lvar , earg = .evar )
@@ -912,13 +950,13 @@ AR1 <-
            ar.sdv <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                                .lsdv , earg = .esdv )
            ar.var <- ar.sdv^2
-         }  
+         }
          ar.smn <- if ( .nodrift ) matrix(0, n, NOS) else
            eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
                      .lsmn , earg = .esmn )
          ar.rho <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                              .lrho , earg = .erho )
-         
+
          ans <- array(0, c(nrow(eta), NOS, nsim))
          for (jay in 1:NOS) {
            ans[1, jay, ] <- rnorm(nsim, m = fva[1, jay],  # zz
@@ -930,19 +968,19 @@ AR1 <-
          }
          ans <- matrix(c(ans), c(nrow(eta) * NOS, nsim))
          ans
-         
+
        }, list( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
                 .var.arg = var.arg, .type.likelihood = type.likelihood,
                 .nodrift = nodrift,
-                .esmn = esmn, .erho = erho , 
+                .esmn = esmn, .erho = erho ,
                 .esdv = esdv, .evar = evar ))),
-        
-        
+
+
      deriv = eval(substitute(expression({
        M1    <- 3 - .nodrift
        NOS   <- ncol(eta)/M1
        ncoly <- ncol(as.matrix(y))
-       
+
        if ( .var.arg ) {
          ar.var <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
                              .lvar , earg = .evar )
@@ -952,48 +990,48 @@ AR1 <-
                              .lsdv , earg = .esdv )
          ar.var <- ar.sdv^2
        }
-       
+
        ar.smn <- if ( .nodrift ) matrix(0, n, NOS) else
              eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
                        .lsmn , earg = .esmn )
-       
+
        ar.rho <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                            .lrho , earg = .erho )
-       
+
        if (any(abs(ar.rho) < 1e-2))
          warning("Estimated values of 'rho' are too close to zero.")
-       
+
        help2   <- (length(colnames(x)) >= 2)
        myMeans <- matrix(colMeans(y), nrow = n, ncol = NOS, by = TRUE)
        yLag    <- matrix(y, ncol = NOS)
        temp4   <- matrix(0.0, nrow = n, ncol = NOS)
        temp4[-1, ] <- y[-1, , drop = FALSE] - ar.smn[-1, , drop = FALSE]
        yLag[-1, ]  <- y[-n, ]
-       
+
        temp1  <- matrix(0.0, nrow = n, ncol = NOS)
        temp1[-1, ] <- y[-1, , drop = FALSE] - (ar.smn[-1, ,drop = FALSE] +
                       ar.rho[-1, , drop = FALSE] * y[-n, , drop = FALSE])
        temp1[1, ]   <- y[1, ] - ar.smn[1, ]
        dl.dsmn      <- temp1 / ar.var
-       dl.dsmn[1, ] <- ( (y[1, ] - myMeans[1, ]) * 
+       dl.dsmn[1, ] <- ( (y[1, ] - myMeans[1, ]) *
                            (1 + ar.rho[1, ]) ) / ar.var[1, ]
-       
+
        if ( .var.arg ) {
          dl.dvarSD <-  temp1^2 / ( 2 * ar.var^2) - 1 / (2 * ar.var)
-         dl.dvarSD[1, ] <- ( (1 - ar.rho[1, ]^2) * (y[1, ] - 
+         dl.dvarSD[1, ] <- ( (1 - ar.rho[1, ]^2) * (y[1, ] -
             myMeans[1, ])^2 ) /(2 * ar.var[1, ]^2) - 1 / (2 * ar.var[1, ])
        } else {
          dl.dvarSD <- temp1^2 / ar.sdv^3 - 1 / ar.sdv
-         dl.dvarSD[1, ] <- ( (1 - ar.rho[1, ]^2) * 
+         dl.dvarSD[1, ] <- ( (1 - ar.rho[1, ]^2) *
             (y[1, ] - myMeans[1, ])^2 ) / ar.sdv[1, ]^3 - 1/ar.sdv[1, ]
        }
-       
-       dl.drho <- rbind(rep_len(0, 1), 
-                      ( (y[-n, , drop = FALSE] - myMeans[-n, ]) * 
+
+       dl.drho <- rbind(rep_len(0, 1),
+                      ( (y[-n, , drop = FALSE] - myMeans[-n, ]) *
                        temp1[-1, , drop = FALSE ] )/  ar.var[-1, ] )
        dl.drho[1, ] <- (ar.rho[1, ] * (y[1, ] - myMeans[1, ])^2 ) /
-         ar.var[1, ] - ar.rho[1, ] / (1 - ar.rho[1, ]^2) 
-       
+         ar.var[1, ] - ar.rho[1, ] / (1 - ar.rho[1, ]^2)
+
        dsmn.deta <- dtheta.deta(ar.smn, .lsmn , earg = .esmn )
        drho.deta <- dtheta.deta(ar.rho, .lrho , earg = .erho )
        if ( .var.arg ) {
@@ -1001,71 +1039,71 @@ AR1 <-
        } else {
          dvarSD.deta <- dtheta.deta(ar.sdv, .lsdv , earg = .esdv )
        }
-       
+
        myderiv <-
          c(w) * cbind(if ( .nodrift ) NULL else dl.dsmn * dsmn.deta,
-                      dl.dvarSD * dvarSD.deta, 
+                      dl.dvarSD * dvarSD.deta,
                       dl.drho * drho.deta)
        myderiv <- myderiv[, interleave.VGAM(M, M1 = M1)]
        myderiv
-       
+
      }), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
                .esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
                .nodrift = nodrift ,
-               .var.arg = var.arg, 
+               .var.arg = var.arg,
                .type.likelihood = type.likelihood ))),
-        
+
      weight = eval(substitute(expression({
-       
+
        ned2l.dsmn   <- 1 / ar.var
-       ned2l.dsmn[1, ] <- ( (1 + ar.rho[1, ]) / (1 - ar.rho[1, ]) ) * 
-                                  (1 / ar.var[1, ])   
+       ned2l.dsmn[1, ] <- ( (1 + ar.rho[1, ]) / (1 - ar.rho[1, ]) ) *
+                                  (1 / ar.var[1, ])
        # Here, same results for the first and t > 1 observations.
        ned2l.dvarSD <- if ( .var.arg ) 1 / (2 * ar.var^2) else 2 / ar.var
-       gamma0  <- (1 - help2) * ar.var/(1 - ar.rho^2) + 
-                          help2 * (yLag - myMeans)^2       
+       gamma0  <- (1 - help2) * ar.var/(1 - ar.rho^2) +
+                          help2 * (yLag - myMeans)^2
        ned2l.drho  <- gamma0 / ar.var
-       ned2l.drho[1, ] <- 2 * ar.rho[1, ]^2 / (1 - ar.rho[1, ]^2)^2 
+       ned2l.drho[1, ] <- 2 * ar.rho[1, ]^2 / (1 - ar.rho[1, ]^2)^2
        ned2l.drdv  <- matrix(0.0, nrow = n, ncol = NOS)
-       ned2l.drdv[1, ] <- 2 * temp4[1, ] / 
+       ned2l.drdv[1, ] <- 2 * temp4[1, ] /
                             ((1 - temp4[1, ]^2) * ar.sdv[1, ])
        ncol.wz <- M + (M - 1) + ifelse( .nodrift , 0, M - 2)
        ncol.pf <- 3 * (M + ( .nodrift ) ) - 3
        wz      <- matrix(0, nrow = n, ncol = ncol.wz)
        helpPor <- .poratM
-       
-       pf.mat  <- if (helpPor) 
+
+       pf.mat  <- if (helpPor)
          AR1EIM(x = scale(y, scale = FALSE),
                 var.arg  = .var.arg ,
                 p.drift  = 0,
                 WNsd     = ar.sdv,
-                ARcoeff1 = ar.rho ) else 
+                ARcoeff1 = ar.rho ) else
                   array(0.0, dim= c(n, NOS, ncol.pf))
-       
+
        if (!( .nodrift ))
          wz[, M1*(1:NOS) - 2] <-  ( (helpPor) * pf.mat[, , 1] +
                     (1 - (helpPor)) * ned2l.dsmn) * dsmn.deta^2
-       wz[, M1*(1:NOS) - 1]  <- ( (helpPor) * pf.mat[, , 2 ] + 
+       wz[, M1*(1:NOS) - 1]  <- ( (helpPor) * pf.mat[, , 2 ] +
                       (1 - (helpPor)) * ned2l.dvarSD) * dvarSD.deta^2
-       wz[, M1*(1:NOS)    ]   <- ( (helpPor) * pf.mat[, , 3] + 
+       wz[, M1*(1:NOS)    ]   <- ( (helpPor) * pf.mat[, , 3] +
                       (1 - (helpPor)) * ned2l.drho) * drho.deta^2
-         wz[, M1*(1:NOS) + (M - 1) ] <- ((helpPor) * pf.mat[, , 4] + 
+         wz[, M1*(1:NOS) + (M - 1) ] <- ((helpPor) * pf.mat[, , 4] +
                 (1 - (helpPor)) * ned2l.drdv) * drho.deta * dvarSD.deta
-       
+
        wz <- w.wz.merge(w = w, wz = wz, n = n,
                         M = ncol.wz, ndepy = NOS)
-       
+
        if ( .print.EIM ) {
          wz2 <- matrix(0, nrow = n, ncol = ncol.wz)
          if (!(.nodrift ))
            wz2[, M1*(1:NOS) - 2] <- ned2l.dsmn
-         wz2[, M1*(1:NOS) - 1] <- 
+         wz2[, M1*(1:NOS) - 1] <-
                    if ( .var.arg ) 1 / (2 * ar.var^2) else 2 / ar.var
-         wz2[, M1*(1:NOS)    ] <- ned2l.drho 
-         
+         wz2[, M1*(1:NOS)    ] <- ned2l.drho
+
          wz2 <- wz2[, interleave.VGAM( M1 * NOS, M1)]
          if (NOS > 1) {
-           
+
            matAux1  <- matAux2  <- matrix(NA_real_, nrow = n, ncol = NOS)
            approxMat <- array(wz2[, 1:(M1*NOS)], dim = c(n, M1, NOS))
            for (kk in 1:NOS) {
@@ -1076,25 +1114,25 @@ AR1 <-
            colnames(matAux) <- c(paste("ApproxEIM.R",1:NOS, sep = ""),
                                  if (!(.poratM )) NULL else
                                    paste("ExactEIM.R",1:NOS, sep = ""))
-           
+
            matAux <- matAux[, interleave.VGAM( (1 + .poratM) * NOS,
                                                M1 = 1 + .poratM)]
          } else {
-           
-           matAux <- cbind(rowSums(wz2), 
-                           if (helpPor) 
+
+           matAux <- cbind(rowSums(wz2),
+                           if (helpPor)
                              rowSums(pf.mat[, 1, ][, 1:3]) else NULL)
-           colnames(matAux) <- c("Approximate", 
+           colnames(matAux) <- c("Approximate",
                                  if (helpPor) "Exact" else NULL)
-           
+
          }
          print(matAux[1:10, , drop = FALSE])
-       }   
-       
+       }
+
        wz
-       
+
      }), list( .var.arg = var.arg, .type.likelihood = type.likelihood,
-               .nodrift = nodrift, .poratM  = poratM, 
+               .nodrift = nodrift, .poratM  = poratM,
                .print.EIM = print.EIM )))
     )
   }
diff --git a/R/family.univariate.R b/R/family.univariate.R
index 0a78f3b..c215cc7 100644
--- a/R/family.univariate.R
+++ b/R/family.univariate.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -62,6 +62,19 @@
   constraints = eval(substitute(expression({
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = FALSE,
+         parameters.names = c("theta", "nu"),
+         ltheta = .ltheta ,
+         lnu    = .lnu ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .ltheta = ltheta,
+           .lnu = lnuvec ))),
+
   initialize = eval(substitute(expression({
     w.y.check(w, y)
 
@@ -107,9 +120,7 @@
            .etheta = etheta, .enuvec = enuvec ))),
   last = eval(substitute(expression({
     misc$link <-    c("theta" = .ltheta , "nu" = .lnuvec )
-
     misc$earg <- list("theta" = .etheta , "nu" = .enuvec )
-
   }), list( .ltheta = ltheta, .lnuvec = lnuvec,
             .etheta = etheta, .enuvec = enuvec ))),
   loglikelihood = eval(substitute(
@@ -134,6 +145,16 @@
   }, list( .ltheta = ltheta, .lnuvec = lnuvec,
            .etheta = etheta, .enuvec = enuvec ))),
   vfamily = c("mccullagh89"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Theta <- eta2theta(eta[, 1], .ltheta , earg = .etheta )
+    nuvec <- eta2theta(eta[, 2], .lnuvec , earg = .enuvec )
+    okay1 <- all(is.finite(Theta)) && all(abs(Theta) < 1) &&
+             all(is.finite(nuvec)) && all(-0.5 < nuvec)
+    okay1
+  }, list( .ltheta = ltheta, .lnuvec = lnuvec,
+           .etheta = etheta, .enuvec = enuvec ))),
+
+
   deriv = eval(substitute(expression({
     Theta <- eta2theta(eta[, 1], .ltheta , earg = .etheta )
     nuvec <- eta2theta(eta[, 2], .lnuvec , earg = .enuvec )
@@ -150,12 +171,12 @@
   }), list( .ltheta = ltheta, .lnuvec = lnuvec,
             .etheta = etheta, .enuvec = enuvec ))),
   weight = eval(substitute(expression({
-    d2l.dTheta2 <- (2 * nuvec^2 / (1+nuvec)) / (1-Theta^2)
-    d2l.dnuvec2 <- trigamma(nuvec+0.5) - trigamma(nuvec+1)
+    ned2l.dTheta2 <- (2 * nuvec^2 / (1+nuvec)) / (1-Theta^2)
+    ned2l.dnuvec2 <- trigamma(nuvec+0.5) - trigamma(nuvec+1)
 
     wz <- matrix(NA_real_, n, M)  # diagonal matrix
-    wz[, iam(1, 1, M)] <- d2l.dTheta2 * dTheta.deta^2
-    wz[, iam(2, 2, M)] <- d2l.dnuvec2 * dnuvec.deta^2
+    wz[, iam(1, 1, M)] <- ned2l.dTheta2 * dTheta.deta^2
+    wz[, iam(2, 2, M)] <- ned2l.dnuvec2 * dnuvec.deta^2
 
     c(w) * wz
   }), list( .ltheta = ltheta, .lnuvec = lnuvec ))))
@@ -164,215 +185,6 @@
 
 
 
-hzeta.control <- function(save.weights = TRUE, ...) {
-  list(save.weights = save.weights)
-}
-
-
-
- hzeta <- function(link = "loglog", ialpha = NULL, nsimEIM = 100) {
-
-  stopifnot(ialpha > 0)
-  stopifnot(nsimEIM > 10,
-            length(nsimEIM) == 1,
-            nsimEIM == round(nsimEIM))
-
-
-
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
-
-
-  new("vglmff",
-  blurb = c("Haight's Zeta distribution f(y) = (2y-1)^(-alpha) - ",
-            "(2y+1)^(-alpha),\n",
-            "    alpha>0, y = 1, 2,....\n\n",
-            "Link:    ",
-            namesof("alpha", link, earg = earg), "\n\n",
-            "Mean:     (1-2^(-alpha)) * zeta(alpha) if alpha>1",
-            "\n",
-            "Variance: (1-2^(1-alpha)) * zeta(alpha-1) - mean^2 if alpha>2"),
-  initialize = eval(substitute(expression({
-
-    w.y.check(w = w, y = y,
-              Is.integer.y = TRUE,
-              Is.positive.y = TRUE)
-
-
-    predictors.names <-
-      namesof("alpha", .link , earg = .earg , tag = FALSE)
-
-    if (!length(etastart)) {
-      a.init <- if (length( .ialpha)) .ialpha else {
-        if ((meany <- weighted.mean(y, w)) < 1.5) 3.0 else
-        if (meany < 2.5) 1.4 else 1.1 
-      }
-      a.init <- rep_len(a.init, n) 
-      etastart <- theta2eta(a.init, .link , earg = .earg )
-    }
-  }), list( .link = link, .earg = earg, .ialpha = ialpha ))),
-  linkinv = eval(substitute(function(eta, extra = NULL) {
-    alpha <- eta2theta(eta, .link , earg = .earg )
-    mu <- (1-2^(-alpha)) * zeta(alpha)
-    mu[alpha <= 1] <- Inf
-    mu
-  }, list( .link = link, .earg = earg ))),
-  last = eval(substitute(expression({
-    misc$link <-    c(alpha = .link)
-
-    misc$earg <- list(alpha = .earg )
-
-    misc$nsimEIM <- .nsimEIM
-
-  }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM ))),
-  loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
-             summation = TRUE) {
-    alpha <- eta2theta(eta, .link , earg = .earg )
-    if (residuals) {
-      stop("loglikelihood residuals not implemented yet")
-    } else {
-      ll.elts <- c(w) * dhzeta(x = y, alpha = alpha, log = TRUE)
-      if (summation) {
-        sum(ll.elts)
-      } else {
-        ll.elts
-      }
-    }
-  }, list( .link = link, .earg = earg ))),
-  vfamily = c("hzeta"),
-
-
-
-
-  simslot = eval(substitute(
-  function(object, nsim) {
-
-    pwts <- if (length(pwts <- object at prior.weights) > 0)
-              pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
-      warning("ignoring prior weights")
-    eta <- predict(object)
-    alpha <- eta2theta(eta, .link , earg = .earg ) 
-    rhzeta(nsim * length(alpha), alpha = alpha)
-  }, list( .link = link, .earg = earg ))),
-
-
-
-  deriv = eval(substitute(expression({
-    alpha <- eta2theta(eta, .link , earg = .earg ) 
-
-    dalpha.deta <- dtheta.deta(alpha, .link , earg = .earg )
-
-    d3 <- deriv3(~ log((2*y-1)^(-alpha) - (2*y+1)^(-alpha)),
-                 "alpha", hessian = FALSE)
-    eval.d3 <- eval(d3)
-
-    dl.dalpha <-  attr(eval.d3, "gradient")
-
-    c(w) * dl.dalpha * dalpha.deta
-  }), list( .link = link, .earg = earg ))),
-  weight = eval(substitute(expression({
-    sd3 <- deriv3(~ log((2*ysim-1)^(-alpha) - (2*ysim+1)^(-alpha)),
-                  "alpha", hessian = FALSE)
-    run.var <- 0
-    for (ii in 1:( .nsimEIM )) {
-      ysim <- rhzeta(n, alpha = alpha)
-      eval.sd3 <- eval(sd3)
-      dl.dalpha <-  attr(eval.d3, "gradient")
-      rm(ysim)
-      temp3 <- dl.dalpha
-      run.var <- ((ii-1) * run.var + temp3^2) / ii
-    }
-    wz <- if (intercept.only)
-        matrix(colMeans(cbind(run.var)),
-               n, dimm(M), byrow = TRUE) else cbind(run.var)
-
-    wz <- wz * dalpha.deta^2
-    c(w) * wz
-  }), list( .nsimEIM = nsimEIM ))))
-}
-
-
-
-
-dhzeta <- function(x, alpha, log = FALSE) {
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
-
-  if (!is.Numeric(alpha, positive = TRUE))
-    stop("'alpha' must be numeric and have positive values")
-
-  nn <- max(length(x), length(alpha))
-  if (length(x)     != nn) x     <- rep_len(x,     nn)
-  if (length(alpha) != nn) alpha <- rep_len(alpha, nn)
-
-  ox <- !is.finite(x)
-  zero <- ox | round(x) != x | x < 1
-  ans <- rep_len(0, nn)
-  ans[!zero] <- (2*x[!zero]-1)^(-alpha[!zero]) -
-                (2*x[!zero]+1)^(-alpha[!zero])
-  if (log.arg) log(ans) else ans
-}
-
-
-
-phzeta <- function(q, alpha, log.p = FALSE) {
-
-
-  nn <- max(length(q), length(alpha))
-  q     <- rep_len(q,     nn)
-  alpha <- rep_len(alpha, nn)
-  oq <- !is.finite(q)
-  zero <- oq | q < 1
-  q <- floor(q)
-  ans <- 0 * q
-  ans[!zero] <- 1 - (2*q[!zero]+1)^(-alpha[!zero])
-
-  ans[q == -Inf] <- 0  # 20141215 KaiH
-  ans[q ==  Inf] <- 1  # 20141215 KaiH
-
-  ans[alpha <= 0] <- NaN
-  if (log.p) log(ans) else ans
-}
-
-
-
-qhzeta <- function(p, alpha) {
-
-  if (!is.Numeric(p, positive = TRUE) ||
-      any(p >= 1))
-    stop("argument 'p' must have values inside the interval (0,1)")
-
-  nn <- max(length(p), length(alpha))
-  p     <- rep_len(p,     nn)
-  alpha <- rep_len(alpha, nn)
-  ans <- (((1 - p)^(-1/alpha) - 1) / 2)  # p is in (0,1)
-  ans[alpha <= 0] <- NaN
-  floor(ans + 1)
-}
-
-
-rhzeta <- function(n, alpha) {
-
-
-  use.n <- if ((length.n <- length(n)) > 1) length.n else
-           if (!is.Numeric(n, integer.valued = TRUE,
-                           length.arg = 1, positive = TRUE))
-              stop("bad input for argument 'n'") else n
-
-  alpha <- rep_len(alpha, use.n)
-  ans <- (runif(use.n)^(-1/alpha) - 1) / 2
-  ans[alpha <= 0] <- NaN
-  floor(ans + 1)
-}
-
-
-
-
 
 
  dirmultinomial <- function(lphi = "logit",
@@ -417,6 +229,17 @@ rhzeta <- function(n, alpha) {
                            constraints, apply.int = TRUE)
     constraints <- cm.zero.VGAM(constraints, x = x, .ZERO , M)
   }), list( .parallel = parallel, .zero = zero ))),
+  infos = eval(substitute(function(...) {
+    list(M1 = NA,
+         Q1 = NA,
+         expected = TRUE,
+         multipleResponses = FALSE,
+         parameters.names = c("phi"),
+         lphi = .lphi ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .lphi = lphi ))),
+
   initialize = eval(substitute(expression({
     mustart.orig <- mustart
 
@@ -544,6 +367,15 @@ rhzeta <- function(n, alpha) {
     }
   }, list( .ephi = ephi, .lphi = lphi ))),
   vfamily = c("dirmultinomial"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    probs <- cbind(exp(eta[, -M]), 1)
+    probs <- prop.table(probs, 1)
+    phi <- eta2theta(eta[, M], .lphi , earg = .ephi )
+    okay1 <- all(is.finite(probs)) && all(0 < probs & probs < 1) &&
+             all(is.finite(phi  )) && all(0 < phi   & phi   < 1)
+    okay1
+  }, list( .ephi = ephi, .lphi = lphi ))),
+
   deriv = eval(substitute(expression({
     probs <- cbind(exp(eta[, -M]), 1)
     probs <- prop.table(probs, 1)
@@ -567,7 +399,8 @@ rhzeta <- function(n, alpha) {
           if (ycount[iii, jay] > 0) {
             PHI <- phi[iii]
             dl.dphi[iii] <- dl.dphi[iii] +
- sum((rrr-1-probs[iii, jay]) / ((1-PHI)*probs[iii, jay] + (rrr-1)*PHI))
+              sum((rrr-1-probs[iii, jay]) / ((1-PHI)*probs[iii, jay] +
+                  (rrr-1)*PHI))
 
             tmp9 <- (1-PHI) / ((1-PHI)*probs[iii, jay] + (rrr-1)*PHI)
             if (jay < M) {
@@ -587,10 +420,10 @@ rhzeta <- function(n, alpha) {
             (rrr-1)*PHI)
           tmp9 <- (1-PHI) / ((1-PHI)*probs[index, jay] + (rrr-1)*PHI)
           if (jay < M) {
-              dl.dprobs[index, jay] <- dl.dprobs[index, jay] + tmp9
+            dl.dprobs[index, jay] <- dl.dprobs[index, jay] + tmp9
           } else {
-              for (jay2 in 1:(M-1))
-                  dl.dprobs[index, jay2] <- dl.dprobs[index, jay2] - tmp9
+            for (jay2 in 1:(M-1))
+              dl.dprobs[index, jay2] <- dl.dprobs[index, jay2] - tmp9
           }
         }
       }
@@ -621,82 +454,82 @@ rhzeta <- function(n, alpha) {
       wz <- matrix(0, n, dimm(M))
       loopOveri <- (n < maxomega)
       if (loopOveri) {
-          for (iii in 1:n) {
-              rrr <- 1:omega[iii]  # A vector
-              PHI <- phi[iii]
-              pYiM.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
+        for (iii in 1:n) {
+          rrr <- 1:omega[iii]  # A vector
+          PHI <- phi[iii]
+          pYiM.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
+                                           size = omega[iii],
+          shape1 <- probs[iii, M]*(1/PHI-1),
+          shape2 <- (1-probs[iii, M])*(1/PHI-1))  # A vector
+          denomM <- ((1-PHI)*probs[iii, M] + (rrr-1)*PHI)^2  # A vector
+          wz[iii, iam(M, M, M)] <- wz[iii, iam(M, M, M)] +
+            sum(probs[iii, M]^2 * pYiM.ge.rrr / denomM) -
+            sum(1 / (1 + (rrr-2)*PHI)^2)
+          for (jay in 1:(M-1)) {
+              denomj <- ((1-PHI)*probs[iii, jay] + (rrr-1)*PHI)^2
+              pYij.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
                                                size = omega[iii],
-                  shape1<-probs[iii, M]*(1/PHI-1),
-                  shape2<-(1-probs[iii, M])*(1/PHI-1))  # A vector
-              denomM <- ((1-PHI)*probs[iii, M] + (rrr-1)*PHI)^2  # A vector
+                  shape1<-probs[iii, jay]*(1/PHI-1),
+                  shape2<-(1-probs[iii, jay])*(1/PHI-1))
+              wz[iii, iam(jay, jay, M)] <- wz[iii, iam(jay, jay, M)] +
+                  sum(pYij.ge.rrr / denomj) +
+                  sum(pYiM.ge.rrr / denomM)
+              for (kay in jay:(M-1)) if (kay > jay) {
+                wz[iii, iam(jay, kay, M)] <- wz[iii, iam(jay, kay, M)] +
+                    sum(pYiM.ge.rrr / denomM)
+              }
+              wz[iii, iam(jay, M, M)] <- wz[iii, iam(jay, M, M)] +
+                      sum(probs[iii, jay] * pYij.ge.rrr / denomj) -
+                      sum(probs[iii, M]   * pYiM.ge.rrr / denomM)
               wz[iii, iam(M, M, M)] <- wz[iii, iam(M, M, M)] +
-                      sum(probs[iii, M]^2 * pYiM.ge.rrr / denomM) -
-                      sum(1 / (1 + (rrr-2)*PHI)^2)
-              for (jay in 1:(M-1)) {
-                  denomj <- ((1-PHI)*probs[iii, jay] + (rrr-1)*PHI)^2
-                  pYij.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
-                                                   size = omega[iii],
-                      shape1<-probs[iii, jay]*(1/PHI-1),
-                      shape2<-(1-probs[iii, jay])*(1/PHI-1))
-                  wz[iii, iam(jay, jay, M)] <- wz[iii, iam(jay, jay, M)] + 
-                      sum(pYij.ge.rrr / denomj) + 
-                      sum(pYiM.ge.rrr / denomM)
-                  for (kay in jay:(M-1)) if (kay > jay) {
-                    wz[iii, iam(jay, kay, M)] <- wz[iii, iam(jay, kay, M)] +
-                        sum(pYiM.ge.rrr / denomM)
-                  }
-                  wz[iii, iam(jay, M, M)] <- wz[iii, iam(jay, M, M)] +
-                          sum(probs[iii, jay] * pYij.ge.rrr / denomj) -
-                          sum(probs[iii, M]   * pYiM.ge.rrr / denomM)
-                  wz[iii, iam(M, M, M)] <- wz[iii, iam(M, M, M)] +
-                          sum(probs[iii, jay]^2 * pYij.ge.rrr / denomj)
-              }  # end of jay loop
-          }  # end of iii loop
-      } else {
-          for (rrr in 1:maxomega) {
-              ind5 <- rrr <= omega
-              PHI <- phi[ind5]
-              pYiM.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
+                      sum(probs[iii, jay]^2 * pYij.ge.rrr / denomj)
+          }  # end of jay loop
+      }  # end of iii loop
+  } else {
+      for (rrr in 1:maxomega) {
+          ind5 <- rrr <= omega
+          PHI <- phi[ind5]
+          pYiM.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
+                                           size = omega[ind5],
+                                           shape1 <- probs[ind5, M]*(1/PHI-1),
+                                           shape2 <- (1-probs[ind5, M])*(1/PHI-1))
+          denomM <- ((1-PHI)*probs[ind5, M] + (rrr-1)*PHI)^2
+          wz[ind5, iam(M, M, M)] <- wz[ind5, iam(M, M, M)] +
+              probs[ind5, M]^2 * pYiM.ge.rrr / denomM -
+              1 / (1 + (rrr-2)*PHI)^2
+          for (jay in 1:(M-1)) {
+              denomj <- ((1-PHI)*probs[ind5, jay] + (rrr-1)*PHI)^2
+              pYij.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
                                                size = omega[ind5],
-                  shape1<-probs[ind5, M]*(1/PHI-1),
-                  shape2<-(1-probs[ind5, M])*(1/PHI-1))
-              denomM <- ((1-PHI)*probs[ind5, M] + (rrr-1)*PHI)^2
+                  shape1<-probs[ind5, jay]*(1/PHI-1),
+                  shape2<-(1-probs[ind5, jay])*(1/PHI-1))
+              wz[ind5, iam(jay, jay, M)] <- wz[ind5, iam(jay, jay, M)] +
+                  pYij.ge.rrr / denomj + pYiM.ge.rrr / denomM
+              for (kay in jay:(M-1)) if (kay > jay) {
+                wz[ind5, iam(jay, kay, M)] <- wz[ind5, iam(jay, kay, M)] +
+                    pYiM.ge.rrr / denomM
+              }
+              wz[ind5, iam(jay, M, M)] <- wz[ind5, iam(jay, M, M)] +
+                  probs[ind5, jay] * pYij.ge.rrr / denomj -
+                  probs[ind5, M]   * pYiM.ge.rrr / denomM
               wz[ind5, iam(M, M, M)] <- wz[ind5, iam(M, M, M)] +
-                      probs[ind5, M]^2 * pYiM.ge.rrr / denomM -
-                      1 / (1 + (rrr-2)*PHI)^2
-              for (jay in 1:(M-1)) {
-                  denomj <- ((1-PHI)*probs[ind5, jay] + (rrr-1)*PHI)^2
-                  pYij.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
-                                                   size = omega[ind5],
-                      shape1<-probs[ind5, jay]*(1/PHI-1),
-                      shape2<-(1-probs[ind5, jay])*(1/PHI-1))
-                  wz[ind5, iam(jay, jay, M)] <- wz[ind5, iam(jay, jay, M)] + 
-                      pYij.ge.rrr / denomj + pYiM.ge.rrr / denomM 
-                  for (kay in jay:(M-1)) if (kay > jay) {
-                    wz[ind5, iam(jay, kay, M)] <- wz[ind5, iam(jay, kay, M)] +
-                        pYiM.ge.rrr / denomM 
-                  }
-                  wz[ind5, iam(jay, M, M)] <- wz[ind5, iam(jay, M, M)] +
-                      probs[ind5, jay] * pYij.ge.rrr / denomj -
-                      probs[ind5, M]   * pYiM.ge.rrr / denomM
-                  wz[ind5, iam(M, M, M)] <- wz[ind5, iam(M, M, M)] +
-                      probs[ind5, jay]^2 * pYij.ge.rrr / denomj
-              }  # end of jay loop
-          }  # end of rrr loop
-      }
+                  probs[ind5, jay]^2 * pYij.ge.rrr / denomj
+          }  # end of jay loop
+        }  # end of rrr loop
+    }
 
-      for (jay in 1:(M-1))
-        for (kay in jay:(M-1))
-          wz[, iam(jay, kay, M)] <- wz[, iam(jay, kay, M)] * (1-phi)^2
-      for (jay in 1:(M-1))
-        wz[, iam(jay, M, M)] <- wz[, iam(jay, M, M)] * (phi-1) / phi
-      wz[, iam(M, M, M)] <- wz[, iam(M, M, M)] / phi^2
-
-      d1Thetas.deta <- cbind(dprobs.deta,
-                             dphi.deta)
-      index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
-      wz <- wz * d1Thetas.deta[, index$row] * d1Thetas.deta[, index$col]
-      wz
+    for (jay in 1:(M-1))
+      for (kay in jay:(M-1))
+        wz[, iam(jay, kay, M)] <- wz[, iam(jay, kay, M)] * (1-phi)^2
+    for (jay in 1:(M-1))
+      wz[, iam(jay, M, M)] <- wz[, iam(jay, M, M)] * (phi-1) / phi
+    wz[, iam(M, M, M)] <- wz[, iam(M, M, M)] / phi^2
+
+    d1Thetas.deta <- cbind(dprobs.deta,
+                           dphi.deta)
+    index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
+    wz <- wz * d1Thetas.deta[, index$row] * d1Thetas.deta[, index$col]
+    wz
   }), list( .ephi = ephi, .lphi = lphi ))))
 }
 
@@ -787,6 +620,11 @@ dirmul.old <- function(link = "loge", ialpha = 0.01,
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("dirmul.old"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .link = link, .earg = earg ))),
   deriv = eval(substitute(expression({
     shape <- eta2theta(eta, .link , earg = .earg )
 
@@ -846,7 +684,7 @@ rdiric <- function(n, shape, dimension = NULL,
 
     ans <- rgamma(use.n * n.shape * dimension,
                   shape)
-    dim(ans) <- c(use.n * n.shape, dimension) 
+    dim(ans) <- c(use.n * n.shape, dimension)
   } else {
     if (!is.numeric(dimension))
       dimension <- length(shape)
@@ -856,7 +694,7 @@ rdiric <- function(n, shape, dimension = NULL,
 
     ans <- rgamma(use.n * dimension,
                   rep(shape, rep(use.n, dimension)))
-    dim(ans) <- c(use.n, dimension) 
+    dim(ans) <- c(use.n, dimension)
   }
 
 
@@ -893,7 +731,7 @@ rdiric <- function(n, shape, dimension = NULL,
   new("vglmff",
   blurb = c("Dirichlet distribution\n\n",
             "Links:     ",
-            namesof("shapej", link, earg = earg), "\n\n",
+            namesof("shape_j", link, earg = earg), "\n\n",
             "Mean:     shape_j/(1 + sum(shape_j)), j = 1,..,ncol(y)"),
   constraints = eval(substitute(expression({
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
@@ -901,6 +739,17 @@ rdiric <- function(n, shape, dimension = NULL,
                            constraints, apply.int = TRUE)
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
+  infos = eval(substitute(function(...) {
+    list(M1 = NA,
+         Q1 = NA,
+         expected = TRUE,
+         multipleResponses = FALSE,
+         parameters.names = c("shape"),
+         link = .link ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .link = link ))),
+
   initialize = eval(substitute(expression({
     y <- as.matrix(y)
     M <- ncol(y)
@@ -943,7 +792,6 @@ rdiric <- function(n, shape, dimension = NULL,
     for (ii in 1:M)
       misc$earg[[ii]] <- .earg
 
-    misc$expected <- TRUE
     misc$imethod <- .imethod
   }), list( .link = link, .earg = earg,
             .imethod = imethod ))),
@@ -969,7 +817,11 @@ rdiric <- function(n, shape, dimension = NULL,
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("dirichlet"),
-
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .link = link, .earg = earg ))),
 
 
   simslot = eval(substitute(
@@ -977,7 +829,7 @@ rdiric <- function(n, shape, dimension = NULL,
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     M <- ncol(as.matrix(eta))
@@ -1002,9 +854,9 @@ rdiric <- function(n, shape, dimension = NULL,
   }), list( .link = link, .earg = earg ))),
   weight = expression({
     index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
-    wz <- matrix(trigamma(sumshape), nrow = n, ncol = dimm(M))
-    wz[, 1:M] <- wz[, 1:M] - trigamma(shape)
-    wz <- -c(w) * wz * dsh.deta[, index$row] * dsh.deta[, index$col]
+    wz <- matrix(-trigamma(sumshape), nrow = n, ncol = dimm(M))
+    wz[, 1:M] <- trigamma(shape) + wz[, 1:M]
+    wz <- c(w) * wz * dsh.deta[, index$row] * dsh.deta[, index$col]
     wz
   }))
 }
@@ -1012,434 +864,363 @@ rdiric <- function(n, shape, dimension = NULL,
 
 
 
- zeta <- function(x, deriv = 0) {
-
-
-
-  deriv.arg <- deriv
-  rm(deriv)
-  if (!is.Numeric(deriv.arg, length.arg = 1,
-                  integer.valued = TRUE))
-    stop("'deriv' must be a single non-negative integer")
-  if (deriv.arg < 0 || deriv.arg > 2)
-    stop("'deriv' must be 0, 1, or 2")
-
 
-  if (deriv.arg > 0)
-    return(Zeta.derivative(x, deriv.arg = deriv.arg))
 
+cauchy.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
+}
 
 
-  if (any(special <- Re(x) <= 1)) {
-    ans <- x
-    ans[special] <- Inf   # For Re(x) == 1
-
-    special3 <- Re(x) < 1
-    ans[special3] <- NA # For 0 < Re(x) < 1
-
-    special4 <- (0 < Re(x)) & (Re(x) < 1) & (Im(x) == 0)
-    ans[special4] <- Zeta.derivative(x[special4], deriv.arg = deriv.arg)
-
-
-    special2 <- Re(x) < 0
-    if (any(special2)) {
-      x2 <- x[special2]
-      cx <- 1-x2
-      ans[special2] <- 2^(x2) * pi^(x2-1) * sin(pi*x2/2) *
-                      gamma(cx) * Recall(cx)
-    }
-
-    if (any(!special)) {
-      ans[!special] <- Recall(x[!special])
-    }
-    return(ans)
-  }
-
-  a <- 12; k <- 8
-  B <- c(1/6, -1/30,1/42,-1/30,5/66,-691/2730,7/6,-3617/510)
-  ans <- 0
-  for (ii in 1:(a-1))
-     ans <- ans + 1.0 / ii^x
-  ans <- ans + 1.0 / ((x-1.0)* a^(x-1.0)) + 1.0 / (2.0 * a^x)
-
-  term <- (x/2) / a^(x+1)
-  ans <- ans + term * B[1]
-
-  for (mm in 2:k) {
-    term <- term * (x+2*mm-2) * (x+2*mm-3) / (a * a * 2 * mm * (2*mm-1))
-    ans <- ans + term * B[mm]
-  }
-  ans
-}
+ cauchy <- function(llocation = "identitylink", lscale = "loge",
+                    ilocation = NULL, iscale = NULL,
+                    iprobs = seq(0.2, 0.8, by = 0.2),
+                    imethod = 1, nsimEIM = NULL,
+                    zero = "scale") {
 
+  llocat <- as.list(substitute(llocation))
+  elocat <- link2list(llocat)
+  llocat <- attr(elocat, "function.name")
+  ilocat <- ilocation
 
+  lscale <- as.list(substitute(lscale))
+  escale <- link2list(lscale)
+  lscale <- attr(escale, "function.name")
 
- Zeta.derivative <- function(x, deriv.arg = 0) {
 
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 3)
+    stop("argument 'imethod' must be 1 or 2 or 3")
 
-    if (!is.Numeric(deriv.arg, length.arg = 1,
-                    integer.valued = TRUE))
-        stop("'deriv.arg' must be a single non-negative integer")
-    if (deriv.arg < 0 || deriv.arg > 2)
-        stop("'deriv.arg' must be 0, 1, or 2")
 
-    if (any(Im(x) != 0))
-        stop("Sorry, currently can only handle x real, not complex")
-    if (any(x < 0))
-        stop("Sorry, currently cannot handle x < 0")
+  if (length(nsimEIM) &&
+     (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE) ||
+      nsimEIM <= 50))
+    stop("argument 'nsimEIM' should be an integer greater than 50")
+  if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
+    stop("bad input for argument 'iscale'")
+  if (!is.Numeric(iprobs, positive = TRUE) || max(iprobs) >= 1)
+    stop("bad input for argument 'iprobs'")
 
-    ok <- is.finite(x) & x > 0 & x != 1   # Handles NAs
-    ans <- rep_len(NA_real_, length(x))
-    nn <- sum(ok)  # Effective length (excludes x < 0 and x = 1 values)
-    if (nn)
-        ans[ok] <- .C("vzetawr", as.double(x[ok]), ans = double(nn),
-                  as.integer(deriv.arg), as.integer(nn))$ans
 
 
+  new("vglmff",
+  blurb = c("Two-parameter Cauchy distribution ",
+            "(location & scale unknown)\n\n",
+            "Link:    ",
+            namesof("location", llocat, earg = elocat), "\n",
+            namesof("scale",    lscale,    earg = escale), "\n\n",
+            "Mean:     NA\n",
+            "Variance: NA"),
+ constraints = eval(substitute(expression({
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+                                predictors.names = predictors.names,
+                                M1 = 2)
+  }), list( .zero = zero ))),
 
-    if (deriv.arg == 0)
-        ans[is.finite(x) & abs(x) < 1.0e-12] <- -0.5
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = FALSE,
+         parameters.names = c("location", "scale"),
+         llocation = .llocat ,
+         lscale    = .lscale ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .llocat = llocat,
+           .lscale = lscale ))),
 
-    ans
-}
+  initialize = eval(substitute(expression({
+    predictors.names <- c(
+      namesof("location", .llocat , earg = .elocat , tag = FALSE),
+      namesof("scale",    .lscale , earg = .escale , tag = FALSE))
 
 
 
-dzeta <- function(x, p, log = FALSE) {
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
+    w.y.check(w = w, y = y)
 
 
-  if (!is.Numeric(p, positive = TRUE))  # || min(p) <= 1
-      stop("'p' must be numeric and > 0")
-  LLL <- max(length(p), length(x))
-  if (length(x)     != LLL) x     <- rep_len(x,     LLL)
-  if (length(p)     != LLL) p     <- rep_len(p,     LLL)
-
-  ox <- !is.finite(x)
-  zero <- ox | round(x) != x | x < 1
-  if (any(zero)) warning("non-integer x and/or x < 1 or NAs")
-  ans <- rep_len(if (log.arg) log(0) else 0, LLL)
-  if (any(!zero)) {
-      if (log.arg) {
-          ans[!zero] <- (-p[!zero]-1)*log(x[!zero]) - log(zeta(p[!zero]+1))
-      } else {
-          ans[!zero] <- x[!zero]^(-p[!zero]-1) / zeta(p[!zero]+1)
-      }
-  }
-  if (any(ox))
-    ans[ox] <- 0.0  # 20141215 KaiH
-  ans
-}
-
-
-
- zetaff <-
-    function(link = "loge",
-             ishape = NULL,
-             gshape = exp(-3:4)/4,
-             zero = NULL) {
-
-
-  if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
-    stop("argument 'ishape' must be > 0")
-
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
-
-
-  new("vglmff",
-  blurb = c("Zeta distribution ",
-            "f(y) = 1/(y^(shape+1) zeta(shape+1)), shape>0, y = 1, 2,..\n\n",
-            "Link:    ",
-            namesof("shape", link, earg = earg), "\n\n",
-            "Mean:     zeta(shape) / zeta(shape+1), provided shape>1\n",
-            "Variance: zeta(shape-1) / zeta(shape+1) - mean^2, provided shape>2"),
-  infos = eval(substitute(function(...) {
-    list(M1 = 1,
-         Q1 = 1,
-         multipleResponses = TRUE,
-         zero = .zero ,
-         link = .link )
-  }, list( .link = link,
-           .zero = zero ))),
-  initialize = eval(substitute(expression({
-
-   temp5 <-
-    w.y.check(w = w, y = y,
-              ncol.w.max = Inf,
-              ncol.y.max = Inf,
-              Is.integer.y = TRUE,
-              Is.positive.y = TRUE,
-              out.wy = TRUE,
-              colsyperw = 1,
-              maximize = TRUE)
-    w <- temp5$w
-    y <- temp5$y
-
-
-    ncoly <- ncol(y)
-    mynames1 <- param.names("shape", ncoly)
-    predictors.names <-
-      namesof(mynames1, .link , earg = .earg , tag = FALSE)
-
-    M1 <- 1
-    extra$ncoly <- ncoly
-    extra$M1 <- M1
-    M <- M1 * ncoly
-
 
     if (!length(etastart)) {
-      zetaff.Loglikfun <- function(pp, y, x, w, extraargs) {
-        sum(c(w) * dzeta(x = y, p = pp, log = TRUE))
-      }
+      loc.init <- if (length( .ilocat)) .ilocat else {
+        if ( .imethod == 2) median(rep(y, w)) else
+        if ( .imethod == 3) y else {
+            cauchy2.Loglikfun <- function(loc, y, x, w, extraargs) {
+                 iprobs <- .iprobs
+                 qy <- quantile(rep(y, w), probs = iprobs)
+                 ztry <- tan(pi*(iprobs-0.5))
+                 btry <- (qy - loc) / ztry
+                 scal <- median(btry, na.rm = TRUE)
+                 if (scal <= 0)
+                   scal <- 0.1
+                 sum(c(w) * dcauchy(x = y, loc = loc, scale = scal,
+                                    log = TRUE))
+             }
+             loc.grid <- c(quantile(y, probs = seq(0.1, 0.9, by = 0.05)))
+             try.this <- grid.search(loc.grid, objfun = cauchy2.Loglikfun,
+                                     y = y,  x = x, w = w)
+                try.this <- rep_len(c(try.this), n)
+                try.this
+            }
+        }
+        loc.init <- rep_len(c(loc.init), n)
 
 
-      gshape <- .gshape
-      shape.init <- matrix(if (length( .ishape )) .ishape else -1,
-                           n, M, byrow = TRUE)
-      if (!length( .ishape ))
-        for (jay in 1:ncoly) {
-          shape.init[, jay] <- grid.search(gshape, objfun = zetaff.Loglikfun,
-                                         y = y[, jay], x = x, w = w[, jay])
-        }
+            sca.init <- if (length( .iscale )) .iscale else {
+                iprobs <- .iprobs
+                qy <- quantile(rep(y, w), probs = iprobs)
+                ztry <- tan(pi*(iprobs-0.5))
+                btry <- (qy - loc.init[1]) / ztry
+                sca.init <- median(btry, na.rm = TRUE)
+                if (sca.init <= 0) sca.init <- 0.01
+                sca.init
+            }
 
-      etastart <- theta2eta(shape.init, .link , earg = .earg )
-    }
-  }), list( .link = link, .earg = earg,
-            .ishape = ishape, .gshape = gshape ))),
+            sca.init <- rep_len(c(sca.init), n)
+            if ( .llocat == "loge") loc.init <- abs(loc.init)+0.01
+            etastart <-
+              cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
+                    theta2eta(sca.init, .lscale ,    earg = .escale ))
+        }
+  }), list( .ilocat = ilocat,
+            .elocat = elocat, .llocat = llocat,
+            .iscale = iscale, .escale = escale, .lscale = lscale,
+            .iprobs = iprobs, .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    ans <- pp <- eta2theta(eta, .link , earg = .earg )
-    ans[pp > 1] <- zeta(pp[pp > 1]) / zeta(pp[pp > 1] + 1)
-    ans[pp <= 1] <- NA
-    ans
-  }, list( .link = link, .earg = earg ))),
+    eta2theta(eta[, 1], .llocat , earg = .elocat )
+  }, list( .llocat = llocat,
+           .elocat = elocat ))),
   last = eval(substitute(expression({
-    misc$link <- rep_len( .link , ncoly)
-    names(misc$link) <- mynames1
-
-    misc$earg <- vector("list", M)
-    names(misc$earg) <- mynames1
-    for (jay in 1:ncoly) {
-      misc$earg[[jay]] <- .earg
-    }
-
-  }), list( .link = link, .earg = earg ))),
+    misc$expected <- TRUE
+    misc$link <-    c("location" = .llocat , "scale" =.lscale)
+    misc$earg <- list("location" = .elocat , "scale" = .escale )
+    misc$imethod <- .imethod
+  }), list( .escale = escale, .elocat = elocat,
+            .imethod = imethod,
+            .llocat = llocat, .lscale = lscale ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
+  function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    pp <- eta2theta(eta, .link , earg = .earg )
+    locat    <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+    myscale  <- eta2theta(eta[, 2], .lscale , earg = .escale )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dzeta(x = y, p = pp, log = TRUE)
+      ll.elts <-
+        c(w) * dcauchy(x = y, loc = locat, sc = myscale, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .link = link, .earg = earg ))),
-  vfamily = c("zetaff"),
-  deriv = eval(substitute(expression({
-    shape <- eta2theta(eta, .link , earg = .earg )
-
-    fred1 <- zeta(shape+1)
-    fred2 <- zeta(shape+1, deriv = 1)
-    dl.dshape <- -log(y) - fred2 / fred1
+  }, list( .escale = escale, .lscale = lscale,
+           .elocat = elocat, .llocat = llocat ))),
+  vfamily = c("cauchy"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    location <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+    myscale  <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    okay1 <- all(is.finite(location)) &&
+             all(is.finite(myscale )) && all(0 < myscale)
+    okay1
+  }, list( .escale = escale, .lscale = lscale,
+           .elocat = elocat, .llocat = llocat ))),
 
-    dshape.deta <- dtheta.deta(shape, .link , earg = .earg )
 
-    c(w) * dl.dshape * dshape.deta
-  }), list( .link = link, .earg = earg ))),
-  weight = expression({
-    NOS <- NCOL(y)
-    nd2l.dshape2 <- zeta(shape + 1, deriv = 2) / fred1 - (fred2/fred1)^2
-    wz <- nd2l.dshape2 * dshape.deta^2
-    w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
-  }))
-}
 
 
 
-gharmonic <- function(n, s = 1, lognexponent = 0) {
 
-  if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
-      stop("bad input for argument 'n'")
-  if (!is.Numeric(lognexponent, length.arg = 1))
-      stop("bad input for argument 'lognexponent'")
-  if (length(n) == 1 && length(s) == 1) {
-        if (lognexponent != 0) sum(log(1:n)^lognexponent * (1:n)^(-s)) else
-          sum((1:n)^(-s))
-  } else {
-    LEN <- max(length(n), length(s))
-    n <- rep_len(n, LEN)
-    ans <- s <- rep_len(s, LEN)
-    if (lognexponent != 0) {
-      for (ii in 1:LEN)
-        ans[ii] <- sum(log(1:n[ii])^lognexponent * (1:n[ii])^(-s[ii]))
-    } else {
-      for (ii in 1:LEN)
-        ans[ii] <- sum((1:n[ii])^(-s[ii]))
-    }
-    ans
-  }
-}
+  simslot = eval(substitute(
+  function(object, nsim) {
 
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    locat   <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+    myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    rcauchy(nsim * length(myscale), loc = locat, sc = myscale)
+  }, list( .escale = escale, .lscale = lscale,
+           .elocat = elocat, .llocat = llocat ))),
 
 
-rzipf <- function(n, N, s) {
- r <- runif(n)
- sapply(r, function(x) {min(which(pzipf(1:N, N, s) > x))})
-}
 
 
 
 
 
 
-dzipf <- function(x, N, s, log = FALSE) {
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
+  deriv = eval(substitute(expression({
+    location <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+    myscale  <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    dlocation.deta <- dtheta.deta(location, .llocat , earg = .elocat )
+    dscale.deta    <- dtheta.deta(myscale, .lscale , earg = .escale )
+    Z <- (y-location) / myscale
+    dl.dlocation <- 2 * Z / ((1 + Z^2) * myscale)
+    dl.dscale <- (Z^2 - 1) / ((1 + Z^2) * myscale)
+    c(w) * cbind(dl.dlocation * dlocation.deta,
+                 dl.dscale * dscale.deta)
+  }), list( .escale = escale, .lscale = lscale,
+            .elocat = elocat, .llocat = llocat ))),
+  weight = eval(substitute(expression({
+    run.varcov <- 0
+    ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+    dthetas.detas = cbind(dlocation.deta, dscale.deta)
+    if (length( .nsimEIM )) {
+      for (ii in 1:( .nsimEIM )) {
+        ysim <- rcauchy(n, loc = location, scale = myscale)
+        Z <- (ysim-location) / myscale
+        dl.dlocation <- 2 * Z / ((1 + Z^2) * myscale)
+        dl.dscale <- (Z^2 - 1) / ((1 + Z^2) * myscale)
+        rm(ysim)
+        temp3 <- matrix(c(dl.dlocation, dl.dscale), n, 2)
+        run.varcov <- ((ii-1) * run.varcov +
+                   temp3[, ind1$row.index] *
+                   temp3[, ind1$col.index]) / ii
+      }
+      wz <- if (intercept.only)
+          matrix(colMeans(run.varcov),
+                 n, ncol(run.varcov), byrow = TRUE) else run.varcov
 
+      wz <- wz * dthetas.detas[, ind1$row] *
+                dthetas.detas[, ind1$col]
+      wz <- c(w) * matrix(wz, n, dimm(M))
+    } else {
+      wz <- cbind(matrix(0.5 / myscale^2, n, 2), matrix(0, n, 1)) *
+           dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
+      wz <- c(w) * wz[, 1:M]  # diagonal wz
+    }
 
-    if (!is.Numeric(x))
-      stop("bad input for argument 'x'")
-    if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
-      stop("bad input for argument 'N'")
-    if (!is.Numeric(s, positive = TRUE))
-      stop("bad input for argument 's'")
-    nn <- max(length(x), length(N), length(s))
-    x <- rep_len(x, nn)
-    N <- rep_len(N, nn)
-    s <- rep_len(s, nn)
-    ox <- !is.finite(x)
-    zero <- ox | round(x) != x | x < 1 | x > N
-    ans <- (if (log.arg) log(0) else 0) * x
-    if (any(!zero))
-        if (log.arg) {
-          ans[!zero] <- (-s[!zero]) * log(x[!zero]) -
-                       log(gharmonic(N[!zero], s[!zero]))
-        } else {
-          ans[!zero] <- x[!zero]^(-s[!zero]) / gharmonic(N[!zero], s[!zero])
-        }
-    ans
+    wz
+  }), list( .escale = escale, .lscale = lscale, .nsimEIM = nsimEIM,
+            .elocat = elocat, .llocat = llocat ))))
 }
 
 
 
-pzipf <- function(q, N, s, log.p = FALSE) {
-  if (!is.Numeric(q))
-    stop("bad input for argument 'q'")
-  if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
-    stop("bad input for argument 'N'")
-  if (!is.Numeric(s, positive = TRUE))
-    stop("bad input for argument 's'")
-
-  nn <- max(length(q), length(N), length(s))
-  if (length(q)     != nn) q     <- rep_len(q,     nn)
-  if (length(N)     != nn) N     <- rep_len(N,     nn)
-  if (length(s)     != nn) s     <- rep_len(s,     nn)
-    oq <- !is.finite(q)
-    zeroOR1 <- oq | q < 1 | q >= N
-    floorq <- floor(q)
-    ans <- 0 * floorq
-    ans[oq | q >= N] <- 1
-    if (any(!zeroOR1))
-        ans[!zeroOR1] <- gharmonic(floorq[!zeroOR1], s[!zeroOR1]) /
-                        gharmonic(N[!zeroOR1], s[!zeroOR1])
-    if (log.p) log(ans) else ans
-}
 
 
 
- zipf <- function(N = NULL, link = "loge", init.s = NULL) {
 
-  if (length(N) &&
-    (!is.Numeric(N, positive = TRUE,
-                 integer.valued = TRUE, length.arg = 1) ||
-      N <= 1))
-    stop("bad input for argument 'N'")
-  enteredN <- length(N)
-  if (length(init.s) && !is.Numeric(init.s, positive = TRUE))
-      stop("argument 'init.s' must be > 0")
+ cauchy1 <- function(scale.arg = 1, llocation = "identitylink",
+                     ilocation = NULL, imethod = 1) {
+
+
+  llocat <- as.list(substitute(llocation))
+  elocat <- link2list(llocat)
+  llocat <- attr(elocat, "function.name")
+  ilocat <- ilocation
+
+
+
+  if (!is.Numeric(scale.arg, positive = TRUE))
+    stop("bad input for 'scale.arg'")
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 3)
+    stop("argument 'imethod' must be 1 or 2 or 3")
 
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
 
 
   new("vglmff",
-  blurb = c("Zipf distribution f(y;s) = y^(-s) / sum((1:N)^(-s)),",
-            " s > 0, y = 1, 2,...,N",
-            ifelse(enteredN, paste(" = ",N,sep = ""), ""),
-            "\n\n",
+  blurb = c("One-parameter Cauchy distribution ",
+            "(location unknown, scale known)\n\n",
             "Link:    ",
-            namesof("s", link, earg = earg),
-            "\n\n",
-            "Mean:    gharmonic(N,s-1) / gharmonic(N,s)"),
+            namesof("location", llocat, earg = elocat), "\n\n",
+            "Mean:     NA\n",
+            "Variance: NA"),
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = FALSE,
+         parameters.names = c("location"),
+         llocation = .llocat ,
+         imethod = .imethod )
+  }, list( .llocat = llocat,
+           .imethod = imethod ))),
+
   initialize = eval(substitute(expression({
+    predictors.names <- namesof("location", .llocat ,
+                                earg = .elocat , tag = FALSE)
 
 
-    w.y.check(w = w, y = y,
-              Is.integer.y = TRUE)
+    w.y.check(w = w, y = y)
 
 
-    predictors.names <- namesof("s", .link , earg = .earg , tag = FALSE)
 
-    NN <- .N
-    if (!is.Numeric(NN, length.arg = 1,
-                    positive = TRUE, integer.valued = TRUE))
-        NN <- max(y)
-    if (max(y) > NN)
-        stop("maximum of the response is greater than argument 'N'")
-    if (any(y < 1))
-        stop("all response values must be in 1, 2, 3,...,N( = ", NN,")")
-    extra$N <- NN
-    if (!length(etastart)) {
-        llfun <- function(ss, y, N, w) {
-            sum(c(w) * dzipf(x = y, N=extra$N, s=ss, log = TRUE))
-        }
-        ss.init <- if (length( .init.s )) .init.s else
-            getInitVals(gvals = seq(0.1, 3.0, length.out = 19),
-                        llfun=llfun,
-                        y = y, N=extra$N, w = w)
-        ss.init <- rep_len(ss.init, length(y))
-        if ( .link == "loglog") ss.init[ss.init <= 1] = 1.2
-        etastart <- theta2eta(ss.init, .link , earg = .earg )
-    }
-  }), list( .link = link, .earg = earg, .init.s = init.s, .N = N ))),
-  linkinv = eval(substitute(function(eta, extra = NULL) {
-    ss <- eta2theta(eta, .link , earg = .earg )
-    gharmonic(extra$N, s=ss - 1) / gharmonic(extra$N, s=ss)
-  }, list( .link = link, .earg = earg ))),
-  last = eval(substitute(expression({
-    misc$expected <- FALSE
-    misc$link <-    c(s = .link)
-    misc$earg <- list(s = .earg )
-    misc$N <- extra$N
-  }), list( .link = link, .earg = earg ))),
-  loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
-             summation = TRUE) {
-    ss <- eta2theta(eta, .link , earg = .earg )
-    if (residuals) {
+        if (!length(etastart)) {
+          loc.init <- if (length( .ilocat)) .ilocat else {
+            if ( .imethod == 2) median(rep(y, w)) else
+            if ( .imethod == 3) y else {
+              cauchy1.Loglikfun <- function(loc, y, x, w, extraargs) {
+                 scal <- extraargs
+                 sum(c(w) * dcauchy(x = y, loc = loc, scale = scal,
+                                    log = TRUE))
+               }
+               loc.grid <- quantile(y, probs = seq(0.1, 0.9,
+                                                  by = 0.05))
+                 try.this <- grid.search(loc.grid,
+                                         objfun = cauchy1.Loglikfun,
+                                         y = y,  x = x, w = w,
+                                         extraargs = .scale.arg )
+              try.this <- rep_len(try.this, n)
+              try.this
+            }
+          }
+          loc.init <- rep_len(loc.init, n)
+          if ( .llocat == "loge") loc.init = abs(loc.init)+0.01
+          etastart <-
+            theta2eta(loc.init, .llocat , earg = .elocat )
+        }
+    }), list( .scale.arg = scale.arg, .ilocat = ilocat,
+              .elocat = elocat, .llocat = llocat,
+              .imethod = imethod ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    eta2theta(eta, .llocat , earg = .elocat )
+  }, list( .llocat = llocat,
+           .elocat = elocat ))),
+  last = eval(substitute(expression({
+    misc$link <-    c("location" = .llocat )
+    misc$earg <- list("location" = .elocat )
+
+    misc$expected <- TRUE
+    misc$scale.arg <- .scale.arg
+  }), list( .scale.arg = scale.arg, .elocat = elocat,
+           .llocat = llocat ))),
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    locat <- eta2theta(eta, .llocat , earg = .elocat )
+    if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dzipf(x = y, N = extra$N, s = ss, log = TRUE)
+      ll.elts <-
+        c(w) * dcauchy(x = y, loc = locat, scale = .scale.arg ,
+                       log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .link = link, .earg = earg ))),
-  vfamily = c("zipf"),
+  }, list( .elocat = elocat, .scale.arg = scale.arg,
+           .llocat = llocat ))),
+  vfamily = c("cauchy1"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    locat <- eta2theta(eta, .llocat , earg = .elocat )
+    okay1 <- all(is.finite(locat))
+    okay1
+  }, list( .elocat = elocat, .scale.arg = scale.arg,
+           .llocat = llocat ))),
+
+
+
 
 
 
@@ -1448,189 +1229,126 @@ pzipf <- function(q, N, s, log.p = FALSE) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
-    extra <- object at extra
-    ss <- eta2theta(eta, .link , earg = .earg )
-    rzipf(nsim * length(ss), N = extra$N, s = ss)
-  }, list( .link = link, .earg = earg ))),
-
+    locat <- eta2theta(eta, .llocat , earg = .elocat )
+    rcauchy(nsim * length(locat), loc = locat, sc = .scale.arg )
+  }, list( .elocat = elocat, .scale.arg = scale.arg,
+           .llocat = llocat ))),
 
 
   deriv = eval(substitute(expression({
-    ss <- eta2theta(eta, .link , earg = .earg )
-    fred1 <- gharmonic(extra$N, ss)
-    fred2 <- gharmonic(extra$N, ss, lognexp = 1)
-    dl.dss <- -log(y) + fred2 / fred1
-    dss.deta <- dtheta.deta(ss, .link , earg = .earg )
-    d2ss.deta2 <- d2theta.deta2(ss, .link , earg = .earg )
-    c(w) * dl.dss * dss.deta
-  }), list( .link = link, .earg = earg ))),
-  weight = expression({
-    d2l.dss <- gharmonic(extra$N, ss, lognexp = 2) / fred1 - (fred2/fred1)^2
-    wz <- c(w) * (dss.deta^2 * d2l.dss - d2ss.deta2 * dl.dss)
-    wz
-  }))
-}
-
+    locat <- eta2theta(eta, .llocat , earg = .elocat )
+    temp <- (y-locat)/.scale.arg
+    dl.dlocat <- 2 * temp / ((1 + temp^2) * .scale.arg)
 
+    dlocation.deta <- dtheta.deta(locat, .llocat , earg = .elocat )
 
-cauchy.control <- function(save.weights = TRUE, ...) {
-    list(save.weights = save.weights)
+    c(w) * dl.dlocat * dlocation.deta
+  }), list( .elocat = elocat, .scale.arg = scale.arg,
+            .llocat = llocat ))),
+  weight = eval(substitute(expression({
+    wz <- c(w) * dlocation.deta^2 / ( .scale.arg^2 * 2)
+    wz
+  }), list( .elocat = elocat, .scale.arg = scale.arg,
+            .llocat = llocat ))))
 }
 
 
- cauchy <- function(llocation = "identitylink", lscale = "loge",
-                    ilocation = NULL, iscale = NULL,
-                    iprobs = seq(0.2, 0.8, by = 0.2),
-                    imethod = 1, nsimEIM = NULL,
-                    zero = "scale") {
 
-  llocat <- as.list(substitute(llocation))
-  elocat <- link2list(llocat)
-  llocat <- attr(elocat, "function.name")
-  ilocat <- ilocation
 
-  lscale <- as.list(substitute(lscale))
-  escale <- link2list(lscale)
-  lscale <- attr(escale, "function.name")
 
 
+ logistic1 <- function(llocation = "identitylink",
+                       scale.arg = 1, imethod = 1) {
+  if (!is.Numeric(scale.arg, length.arg = 1, positive = TRUE))
+    stop("'scale.arg' must be a single positive number")
   if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
-     imethod > 3)
-    stop("argument 'imethod' must be 1 or 2 or 3")
+     imethod > 2)
+    stop("argument 'imethod' must be 1 or 2")
 
 
-  if (length(nsimEIM) &&
-     (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE) ||
-      nsimEIM <= 50))
-    stop("argument 'nsimEIM' should be an integer greater than 50")
-  if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
-    stop("bad input for argument 'iscale'")
-  if (!is.Numeric(iprobs, positive = TRUE) || max(iprobs) >= 1)
-    stop("bad input for argument 'iprobs'")
+  llocat <- as.list(substitute(llocation))
+  elocat <- link2list(llocat)
+  llocat <- attr(elocat, "function.name")
 
 
 
   new("vglmff",
-  blurb = c("Two-parameter Cauchy distribution ",
-            "(location & scale unknown)\n\n",
+  blurb = c("One-parameter logistic distribution ",
+            "(location unknown, scale known)\n\n",
             "Link:    ",
-            namesof("location", llocat, earg = elocat), "\n",
-            namesof("scale",    lscale,    earg = escale), "\n\n",
-            "Mean:     NA\n",
-            "Variance: NA"),
- constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
-                                predictors.names = predictors.names,
-                                M1 = 2)
-  }), list( .zero = zero ))),
-
+            namesof("location", llocat, earg = elocat), "\n\n",
+            "Mean:     location", "\n",
+            "Variance: (pi*scale)^2 / 3"),
   infos = eval(substitute(function(...) {
     list(M1 = 2,
          Q1 = 1,
          expected = TRUE,
          multipleResponses = FALSE,
-         parameters.names = c("location", "scale"),
-         llocation = .llocat ,
-         lscale    = .lscale ,
-         zero = .zero )
-  }, list( .zero = zero,
-           .llocat = llocat,
-           .lscale = lscale ))),
-
+         parameters.names = c("location"),
+         scale.arg = .scale.arg ,
+         llocation = .llocation )
+  }, list( .llocation = llocation,
+           .scale.arg = scale.arg ))),
   initialize = eval(substitute(expression({
-    predictors.names <- c(
-      namesof("location", .llocat , earg = .elocat , tag = FALSE),
-      namesof("scale",    .lscale , earg = .escale , tag = FALSE))
-
-
 
     w.y.check(w = w, y = y)
 
 
+    predictors.names <- namesof("location", .llocat ,
+                                earg = .elocat , tag = FALSE)
 
-    if (!length(etastart)) {
-      loc.init <- if (length( .ilocat)) .ilocat else {
-        if ( .imethod == 2) median(rep(y, w)) else 
-        if ( .imethod == 3) y else {
-            cauchy2.Loglikfun <- function(loc, y, x, w, extraargs) {
-                 iprobs <- .iprobs
-                 qy <- quantile(rep(y, w), probs = iprobs)
-                 ztry <- tan(pi*(iprobs-0.5))
-                 btry <- (qy - loc) / ztry
-                 scal <- median(btry, na.rm = TRUE)
-                 if (scal <= 0)
-                   scal <- 0.1
-                 sum(c(w) * dcauchy(x = y, loc = loc, scale = scal,
-                                    log = TRUE))
-             }
-             loc.grid <- c(quantile(y, probs = seq(0.1, 0.9, by = 0.05)))
-             try.this <- grid.search(loc.grid, objfun = cauchy2.Loglikfun,
-                                     y = y,  x = x, w = w)
-                try.this <- rep_len(c(try.this), n)
-                try.this
-            }
-        }
-        loc.init <- rep_len(c(loc.init), n)
-
-
-            sca.init <- if (length( .iscale )) .iscale else {
-                iprobs <- .iprobs
-                qy <- quantile(rep(y, w), probs = iprobs)
-                ztry <- tan(pi*(iprobs-0.5))
-                btry <- (qy - loc.init[1]) / ztry
-                sca.init <- median(btry, na.rm = TRUE)
-                if (sca.init <= 0) sca.init <- 0.01
-                sca.init
-            }
 
-            sca.init <- rep_len(c(sca.init), n)
-            if ( .llocat == "loge") loc.init <- abs(loc.init)+0.01
-            etastart <-
-              cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
-                    theta2eta(sca.init, .lscale ,    earg = .escale ))
-        }
-  }), list( .ilocat = ilocat,
-            .elocat = elocat, .llocat = llocat,
-            .iscale = iscale, .escale = escale, .lscale = lscale,
-            .iprobs = iprobs, .imethod = imethod ))),
+    if (!length(etastart)) {
+      locat.init <- if ( .imethod == 1) y else median(rep(y, w))
+      locat.init <- rep_len(locat.init, n)
+      if ( .llocat == "loge")
+        locat.init <- abs(locat.init) + 0.001
+      etastart <-
+        theta2eta(locat.init, .llocat , earg = .elocat )
+    }
+  }), list( .imethod = imethod, .llocat = llocat,
+            .elocat = elocat ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    eta2theta(eta[, 1], .llocat , earg = .elocat )
+    eta2theta(eta, .llocat , earg = .elocat )
   }, list( .llocat = llocat,
            .elocat = elocat ))),
   last = eval(substitute(expression({
-    misc$expected <- TRUE
-    misc$link <-    c("location" = .llocat , "scale" =.lscale)
-    misc$earg <- list("location" = .elocat , "scale" = .escale )
-    misc$imethod <- .imethod
-  }), list( .escale = escale, .elocat = elocat,
-            .imethod = imethod,
-            .llocat = llocat, .lscale = lscale ))),
+    misc$link <-    c(location = .llocat)
+    misc$earg <- list(location = .elocat )
+    misc$scale.arg <- .scale.arg
+  }), list( .llocat = llocat,
+            .elocat = elocat, .scale.arg = scale.arg ))),
   loglikelihood = eval(substitute(
-  function(mu, y, w, residuals = FALSE, eta,
+    function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    locat    <- eta2theta(eta[, 1], .llocat , earg = .elocat )
-    myscale  <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    locat <- eta2theta(eta, .llocat , earg = .elocat )
+    zedd <- (y-locat) / .scale.arg
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <-
-        c(w) * dcauchy(x = y, loc = locat, sc = myscale, log = TRUE)
+        c(w) * dlogis(x = y, locat = locat,
+                      scale = .scale.arg , log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .escale = escale, .lscale = lscale,
-           .elocat = elocat, .llocat = llocat ))),
-  vfamily = c("cauchy"),
-
-
+  }, list( .llocat = llocat,
+           .elocat = elocat, .scale.arg = scale.arg ))),
+  vfamily = c("logistic1"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    locat <- eta2theta(eta, .llocat , earg = .elocat )
+    okay1 <- all(is.finite(locat))
+    okay1
+  }, list( .elocat = elocat, .scale.arg = scale.arg,
+           .llocat = llocat ))),
 
 
 
@@ -1639,169 +1357,178 @@ cauchy.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
-    locat   <- eta2theta(eta[, 1], .llocat , earg = .elocat )
-    myscale <- eta2theta(eta[, 2], .lscale , earg = .escale )
-    rcauchy(nsim * length(myscale), loc = locat, sc = myscale)
-  }, list( .escale = escale, .lscale = lscale,
-           .elocat = elocat, .llocat = llocat ))),
+    locat <- eta2theta(eta, .llocat , earg = .elocat )
+    rlogis(nsim * length(locat),
+           location = locat, scale = .scale.arg )
+  }, list( .llocat = llocat,
+           .elocat = elocat, .scale.arg = scale.arg ))),
 
 
 
+  deriv = eval(substitute(expression({
+    locat <- eta2theta(eta, .llocat , earg = .elocat )
 
+    ezedd <- exp(-(y-locat) / .scale.arg )
+    dl.dlocat <- (1 - ezedd) / ((1 + ezedd) * .scale.arg)
+    dlocat.deta <- dtheta.deta(locat, .llocat , earg = .elocat )
 
+    c(w) * dl.dlocat * dlocat.deta
+  }), list( .llocat = llocat,
+            .elocat = elocat, .scale.arg = scale.arg ))),
+  weight = eval(substitute(expression({
+    wz <- c(w) * dlocat.deta^2 / ( .scale.arg^2 * 3)
+    wz
+  }), list( .scale.arg = scale.arg ))))
+}
 
 
 
-  deriv = eval(substitute(expression({
-    location <- eta2theta(eta[, 1], .llocat , earg = .elocat )
-    myscale  <- eta2theta(eta[, 2], .lscale , earg = .escale )
-    dlocation.deta <- dtheta.deta(location, .llocat , earg = .elocat )
-    dscale.deta    <- dtheta.deta(myscale, .lscale , earg = .escale )
-    Z <- (y-location) / myscale
-    dl.dlocation <- 2 * Z / ((1 + Z^2) * myscale)
-    dl.dscale <- (Z^2 - 1) / ((1 + Z^2) * myscale)
-    c(w) * cbind(dl.dlocation * dlocation.deta,
-                 dl.dscale * dscale.deta)
-  }), list( .escale = escale, .lscale = lscale,
-            .elocat = elocat, .llocat = llocat ))),
-  weight = eval(substitute(expression({
-    run.varcov <- 0
-    ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
-    dthetas.detas = cbind(dlocation.deta, dscale.deta)
-    if (length( .nsimEIM )) {
-      for (ii in 1:( .nsimEIM )) {
-        ysim <- rcauchy(n, loc = location, scale = myscale)
-        Z <- (ysim-location) / myscale
-        dl.dlocation <- 2 * Z / ((1 + Z^2) * myscale)
-        dl.dscale <- (Z^2 - 1) / ((1 + Z^2) * myscale)
-        rm(ysim)
-        temp3 <- matrix(c(dl.dlocation, dl.dscale), n, 2)
-        run.varcov <- ((ii-1) * run.varcov +
-                   temp3[, ind1$row.index] *
-                   temp3[, ind1$col.index]) / ii
-      }
-      wz <- if (intercept.only)
-          matrix(colMeans(run.varcov),
-                 n, ncol(run.varcov), byrow = TRUE) else run.varcov
-
-      wz <- wz * dthetas.detas[, ind1$row] *
-                dthetas.detas[, ind1$col]
-      wz <- c(w) * matrix(wz, n, dimm(M))
-    } else {
-      wz <- cbind(matrix(0.5 / myscale^2, n, 2), matrix(0, n, 1)) *
-           dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
-      wz <- c(w) * wz[, 1:M]  # diagonal wz
-    }
 
-    wz
-  }), list( .escale = escale, .lscale = lscale, .nsimEIM = nsimEIM,
-            .elocat = elocat, .llocat = llocat ))))
-}
+ erlang <-
+  function(shape.arg, lscale = "loge",
+           imethod = 1, zero = NULL) {
 
+  if (!is.Numeric(shape.arg,  # length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE))
+      stop("'shape' must be a positive integer")
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 3)
+      stop("argument 'imethod' must be 1 or 2 or 3")
 
 
+  lscale <- as.list(substitute(lscale))
+  escale <- link2list(lscale)
+  lscale <- attr(escale, "function.name")
 
 
 
 
- cauchy1 <- function(scale.arg = 1, llocation = "identitylink",
-                     ilocation = NULL, imethod = 1) {
 
+  new("vglmff",
+  blurb = c("Erlang distribution\n\n",
+            "Link:    ", namesof("scale", lscale, earg = escale), "\n", "\n",
+            "Mean:     shape * scale", "\n",
+            "Variance: shape * scale^2"),
+  constraints = eval(substitute(expression({
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
+                                predictors.names = predictors.names,
+                                M1 = 1)
 
-  llocat <- as.list(substitute(llocation))
-  elocat <- link2list(llocat)
-  llocat <- attr(elocat, "function.name")
-  ilocat <- ilocation
 
 
 
-  if (!is.Numeric(scale.arg, positive = TRUE))
-    stop("bad input for 'scale.arg'")
-  if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE) ||
-     imethod > 3)
-    stop("argument 'imethod' must be 1 or 2 or 3")
+  }), list( .zero = zero ))),
 
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         multipleResponses = TRUE,
+         shape.arg = .shape.arg ,
+         parameters.names = c("scale"),
+         expected = TRUE,
+         zero = .zero )
+  }, list( .zero = zero,
+           .shape.arg = shape.arg ))),
 
 
-  new("vglmff",
-  blurb = c("One-parameter Cauchy distribution ",
-            "(location unknown, scale known)\n\n",
-            "Link:    ",
-            namesof("location", llocat, earg = elocat), "\n\n",
-            "Mean:     NA\n",
-            "Variance: NA"),
   initialize = eval(substitute(expression({
-    predictors.names <- namesof("location", .llocat ,
-                                earg = .elocat , tag = FALSE)
 
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.nonnegative.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
 
-    w.y.check(w = w, y = y)
 
+    ncoly <- ncol(y)
+    M1 <- 1
+    extra$ncoly <- ncoly
+    extra$M1 <- M1
+    M <- M1 * ncoly
 
 
-        if (!length(etastart)) {
-          loc.init <- if (length( .ilocat)) .ilocat else {
-            if ( .imethod == 2) median(rep(y, w)) else 
-            if ( .imethod == 3) y else {
-              cauchy1.Loglikfun <- function(loc, y, x, w, extraargs) {
-                 scal <- extraargs
-                 sum(c(w) * dcauchy(x = y, loc = loc, scale = scal,
-                                    log = TRUE))
-               }
-               loc.grid <- quantile(y, probs = seq(0.1, 0.9,
-                                                  by = 0.05))
-                 try.this <- grid.search(loc.grid,
-                                         objfun = cauchy1.Loglikfun,
-                                         y = y,  x = x, w = w,
-                                         extraargs = .scale.arg )
-              try.this <- rep_len(try.this, n)
-              try.this
-            }
-          }
-          loc.init <- rep_len(loc.init, n)
-          if ( .llocat == "loge") loc.init = abs(loc.init)+0.01
-          etastart <-
-            theta2eta(loc.init, .llocat , earg = .elocat )
-        }
-    }), list( .scale.arg = scale.arg, .ilocat = ilocat,
-              .elocat = elocat, .llocat = llocat,
-              .imethod = imethod ))),
+    parameters.names <- param.names("scale", ncoly)
+    predictors.names <-
+      namesof(parameters.names, .lscale , earg = .escale , tag = FALSE)
+
+
+    shape.mat <- matrix( .shape.arg , nrow(cbind(y)), ncol(cbind(y)),
+                        byrow = TRUE)
+
+    if (!length(etastart)) {
+      sc.init <- if ( .imethod == 1) {
+        y / shape.mat
+      } else if ( .imethod == 2) {
+        (colSums(y * w) / colSums(w)) / shape.mat
+      } else if ( .imethod == 3) {
+        matrix(apply(y, 2, median), n, ncoly, byrow = TRUE) / shape.mat
+      }
+
+      if ( !is.matrix(sc.init))
+        sc.init <- matrix(sc.init, n, M, byrow = TRUE)
+
+      etastart <- theta2eta(sc.init, .lscale , earg = .escale )
+    }
+  }), list( .lscale = lscale, .escale = escale,
+            .shape.arg = shape.arg, .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    eta2theta(eta, .llocat , earg = .elocat )
-  }, list( .llocat = llocat,
-           .elocat = elocat ))),
+    eta <- as.matrix(eta)
+    SC <- eta2theta(eta, .lscale , earg = .escale )
+    shape.mat <- matrix( .shape.arg , nrow(eta), ncol(eta), byrow = TRUE)
+    shape.mat * SC
+  }, list( .lscale = lscale, .escale = escale, .shape.arg = shape.arg ))),
   last = eval(substitute(expression({
-    misc$link <-    c("location" = .llocat)
-    misc$earg <- list("location" = .elocat )
+    M1 <- extra$M1
+    misc$link <- c(rep_len( .lscale , ncoly))
+    names(misc$link) <- parameters.names
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- parameters.names
+    for (ii in 1:ncoly) {
+      misc$earg[[ii]] <- .escale
+    }
 
+    misc$M1 <- M1
     misc$expected <- TRUE
-    misc$scale.arg <- .scale.arg 
-  }), list( .scale.arg = scale.arg, .elocat = elocat,
-           .llocat = llocat ))),
+    misc$multipleResponses <- TRUE
+    misc$shape.arg <- .shape.arg
+  }), list( .lscale = lscale, .escale = escale, .shape.arg = shape.arg ))),
+
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    locat <- eta2theta(eta, .llocat , earg = .elocat )
+    sc <- eta2theta(eta, .lscale , earg = .escale )
+    shape.mat <- matrix( .shape.arg , nrow(cbind(y)), ncol(cbind(y)),
+                        byrow = TRUE)
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <-
-        c(w) * dcauchy(x = y, loc = locat, scale = .scale.arg ,
-                       log = TRUE)
+        c(w) * (( shape.mat - 1) * log(y) - y / sc -
+                  shape.mat * log(sc) - lgamma( shape.mat ))
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .scale.arg = scale.arg, .elocat = elocat,
-           .llocat = llocat ))),
-  vfamily = c("cauchy1"),
+  }, list( .lscale = lscale, .escale = escale, .shape.arg = shape.arg ))),
+  vfamily = c("erlang"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    sc <- eta2theta(eta, .lscale , earg = .escale )
+    okay1 <- all(is.finite(sc)) && all(0 < sc)
+    okay1
+  }, list( .lscale = lscale, .escale = escale, .shape.arg = shape.arg ))),
 
 
 
@@ -1812,283 +1539,196 @@ cauchy.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
-    locat <- eta2theta(eta, .llocat , earg = .elocat )
-    rcauchy(nsim * length(locat), loc = locat, sc = .scale.arg )
-  }, list( .scale.arg = scale.arg, .elocat = elocat,
-           .llocat = llocat ))),
+    Scale <- eta2theta(eta, .lscale , earg = .escale )
+    shape.mat <- matrix( .shape.arg , nrow(cbind(eta)), ncol(cbind(eta)),
+                        byrow = TRUE)
+    rgamma(nsim * length(Scale), shape = shape.mat , scale = Scale )
+  }, list( .lscale = lscale, .escale = escale, .shape.arg = shape.arg ))),
 
 
-  deriv = eval(substitute(expression({
-    locat <- eta2theta(eta, .llocat , earg = .elocat )
-    temp <- (y-locat)/.scale.arg
-    dl.dlocat <- 2 * temp / ((1 + temp^2) * .scale.arg)
 
-    dlocation.deta <- dtheta.deta(locat, .llocat , earg = .elocat )
 
-    c(w) * dl.dlocat * dlocation.deta
-  }), list( .scale.arg = scale.arg, .elocat = elocat,
-            .llocat = llocat ))),
+
+  deriv = eval(substitute(expression({
+    sc <- eta2theta(eta, .lscale , earg = .escale )
+    shape.mat <- matrix( .shape.arg , nrow(cbind(eta)), ncol(cbind(eta)),
+                        byrow = TRUE)
+    dl.dsc <- (y / sc - shape.mat) / sc
+    dsc.deta <- dtheta.deta(sc, .lscale , earg = .escale )
+    c(w) * dl.dsc * dsc.deta
+  }), list( .lscale = lscale, .escale = escale, .shape.arg = shape.arg ))),
   weight = eval(substitute(expression({
-    wz <- c(w) * dlocation.deta^2 / ( .scale.arg^2 * 2)
+    ned2l.dsc2 <- shape.mat / sc^2
+    wz <- c(w) * dsc.deta^2 * ned2l.dsc2
     wz
-  }), list( .scale.arg = scale.arg, .elocat = elocat,
-            .llocat = llocat ))))
-}
+  }), list( .escale = escale, .shape.arg = shape.arg ))))
+}  # erlang
 
 
 
 
 
+dbort <- function(x, Qsize = 1, a = 0.5, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
 
- logistic1 <- function(llocation = "identitylink",
-                       scale.arg = 1, imethod = 1) {
-  if (!is.Numeric(scale.arg, length.arg = 1, positive = TRUE))
-    stop("'scale.arg' must be a single positive number")
-  if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE) ||
-     imethod > 2)
-    stop("argument 'imethod' must be 1 or 2")
+  if (!is.Numeric(x))
+    stop("bad input for argument 'x'")
+  if (!is.Numeric(Qsize, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE))
+    stop("bad input for argument 'Qsize'")
+  if (!is.Numeric(a, positive = TRUE) || max(a) >= 1)
+    stop("bad input for argument 'a'")
+  N <- max(length(x), length(Qsize), length(a))
+  if (length(x)     != N) x     <- rep_len(x,     N)
+  if (length(a)     != N) a     <- rep_len(a,     N)
+  if (length(Qsize) != N) Qsize <- rep_len(Qsize, N)
 
+  xok <- (x >= Qsize) & (x == round(x)) & (a > 0) & (a < 1)
+  ans <- rep_len(if (log.arg) log(0) else 0, N)  # loglikelihood
+  ans[xok] <- log(Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
+             (x[xok] - 1 - Qsize[xok]) * log(x[xok]) +
+             (x[xok] - Qsize[xok]) * log(a[xok]) - a[xok] * x[xok]
+  if (!log.arg) {
+    ans[xok] <- exp(ans[xok])
+  }
+  ans
+}
 
-  llocat <- as.list(substitute(llocation))
-  elocat <- link2list(llocat)
-  llocat <- attr(elocat, "function.name")
 
 
+rbort <- function(n, Qsize = 1, a = 0.5) {
 
-  new("vglmff",
-  blurb = c("One-parameter logistic distribution ",
-            "(location unknown, scale known)\n\n",
-            "Link:    ",
-            namesof("location", llocat, earg = elocat), "\n\n",
-            "Mean:     location", "\n",
-            "Variance: (pi*scale)^2 / 3"),
-  initialize = eval(substitute(expression({
-
-    w.y.check(w = w, y = y)
-
-
-    predictors.names <- namesof("location", .llocat , 
-                                earg = .elocat , tag = FALSE)
-
-
-    if (!length(etastart)) {
-      locat.init <- if ( .imethod == 1) y else median(rep(y, w))
-      locat.init <- rep_len(locat.init, n)
-      if ( .llocat == "loge")
-        locat.init <- abs(locat.init) + 0.001
-      etastart <-
-        theta2eta(locat.init, .llocat , earg = .elocat )
-    }
-  }), list( .imethod = imethod, .llocat = llocat,
-            .elocat = elocat ))),
-  linkinv = eval(substitute(function(eta, extra = NULL) {
-    eta2theta(eta, .llocat , earg = .elocat )
-  }, list( .llocat = llocat,
-           .elocat = elocat ))),
-  last = eval(substitute(expression({
-    misc$expected <- TRUE
-    misc$link <-    c(location = .llocat)
-    misc$earg <- list(location = .elocat )
-    misc$scale.arg <- .scale.arg 
-  }), list( .llocat = llocat, 
-            .elocat = elocat, .scale.arg = scale.arg ))),
-  loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
-             summation = TRUE) {
-    locat <- eta2theta(eta, .llocat , earg = .elocat )
-    zedd <- (y-locat) / .scale.arg
-    if (residuals) {
-      stop("loglikelihood residuals not implemented yet")
-    } else {
-      ll.elts <-
-        c(w) * dlogis(x = y, locat = locat,
-                      scale = .scale.arg , log = TRUE)
-      if (summation) {
-        sum(ll.elts)
-      } else {
-        ll.elts
-      }
-    }
-  }, list( .llocat = llocat,
-           .elocat = elocat, .scale.arg = scale.arg ))),
-  vfamily = c("logistic1"),
-
-
-  simslot = eval(substitute(
-  function(object, nsim) {
-
-    pwts <- if (length(pwts <- object at prior.weights) > 0)
-              pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
-      warning("ignoring prior weights")
-    eta <- predict(object)
-    locat <- eta2theta(eta, .llocat , earg = .elocat )
-    rlogis(nsim * length(locat),
-           location = locat, scale = .scale.arg )
-  }, list( .llocat = llocat,
-           .elocat = elocat, .scale.arg = scale.arg ))),
-
-
-
-  deriv = eval(substitute(expression({
-    locat <- eta2theta(eta, .llocat , earg = .elocat )
-
-    ezedd <- exp(-(y-locat) / .scale.arg )
-    dl.dlocat <- (1 - ezedd) / ((1 + ezedd) * .scale.arg)
-    dlocat.deta <- dtheta.deta(locat, .llocat ,
-                                 earg = .elocat )
+  use.n <- if ((length.n <- length(n)) > 1) length.n else
+           if (!is.Numeric(n, integer.valued = TRUE,
+                           length.arg = 1, positive = TRUE))
+              stop("bad input for argument 'n'") else n
+  if (!is.Numeric(Qsize, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE))
+    stop("bad input for argument 'Qsize'")
+  if (!is.Numeric(a, positive = TRUE) ||
+      max(a) >= 1)
+    stop("bad input for argument 'a'")
 
-    c(w) * dl.dlocat * dlocat.deta
-  }), list( .llocat = llocat,
-            .elocat = elocat, .scale.arg = scale.arg ))),
-  weight = eval(substitute(expression({
-    wz <- c(w) * dlocat.deta^2 / ( .scale.arg^2 * 3) 
-    wz
-  }), list( .scale.arg = scale.arg ))))
+  N <- use.n
+  qsize <- rep_len(Qsize, N)
+  a     <- rep_len(a,     N)
+  totqsize <- qsize
+  fini <- (qsize < 1)
+  while (any(!fini)) {
+    additions <- rpois(sum(!fini), a[!fini])
+    qsize[!fini] <- qsize[!fini] + additions
+    totqsize[!fini] <- totqsize[!fini] + additions
+    qsize <- qsize - 1
+    fini <- fini | (qsize < 1)
+  }
+  totqsize
 }
 
 
 
+ borel.tanner <- function(Qsize = 1, link = "logit",
+                          imethod = 1) {
 
- erlang <-
-  function(shape.arg, link = "loge",
-           imethod = 1, zero = NULL) {
 
-  if (!is.Numeric(shape.arg,  # length.arg = 1,
+  if (!is.Numeric(Qsize, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
-      stop("'shape' must be a positive integer")
-  if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE) ||
-     imethod > 3)
-      stop("argument 'imethod' must be 1 or 2 or 3")
-
+    stop("bad input for argument 'Qsize'")
 
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
 
-
-
-
-  new("vglmff",
-  blurb = c("Erlang distribution\n\n",
-            "Link:    ", namesof("scale", link, earg = earg), "\n", "\n",
-            "Mean:     shape * scale", "\n",
-            "Variance: shape * scale^2"),
-  constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
-                                predictors.names = predictors.names,
-                                M1 = 1)
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+      imethod > 4)
+    stop("argument 'imethod' must be 1 or 2, 3 or 4")
 
 
 
 
-  }), list( .zero = zero ))),
+  new("vglmff",
+  blurb = c("Borel-Tanner distribution\n\n",
+            "Link:    ",
+            namesof("a", link, earg = earg), "\n\n",
+            "Mean:     Qsize / (1-a)",
+            "\n",
+            "Variance: Qsize * a / (1 - a)^3"),
 
   infos = eval(substitute(function(...) {
     list(M1 = 1,
          Q1 = 1,
-         multipleResponses = TRUE,
-         expected = TRUE,
-         zero = .zero )
-  }, list( .zero = zero ))),
-
+         Qsize = .Qsize ,
+         link = .link ,
+         multipleResponses = FALSE )
+  }, list( .Qsize  = Qsize,
+           .link = link ))),
 
   initialize = eval(substitute(expression({
-
-    temp5 <-
-    w.y.check(w = w, y = y,
-              Is.nonnegative.y = TRUE,
-              ncol.w.max = Inf,
-              ncol.y.max = Inf,
-              out.wy = TRUE,
-              colsyperw = 1,
-              maximize = TRUE)
-    w <- temp5$w
-    y <- temp5$y
-
-
-    ncoly <- ncol(y)
-    M1 <- 1
-    extra$ncoly <- ncoly
-    extra$M1 <- M1
-    M <- M1 * ncoly
+    if (any(y < .Qsize ))
+      stop("all y values must be >= ", .Qsize )
 
 
-    parameters.names <- param.names("scale", ncoly)
-    predictors.names <-
-      namesof(parameters.names, .link , earg = .earg , tag = FALSE)
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              Is.integer.y = TRUE)
 
 
-    shape.mat <- matrix( .shape.arg , nrow(cbind(y)), ncol(cbind(y)),
-                        byrow = TRUE)
+    predictors.names <- namesof("a", .link , earg = .earg , tag = FALSE)
 
     if (!length(etastart)) {
-      sc.init <- if ( .imethod == 1) {
-        y / shape.mat
-      } else if ( .imethod == 2) {
-        (colSums(y * w) / colSums(w)) / shape.mat
-      } else if ( .imethod == 3) {
-        matrix(apply(y, 2, median), n, ncoly, byrow = TRUE) / shape.mat
-      }
-
-      if ( !is.matrix(sc.init))
-        sc.init <- matrix(sc.init, n, M, byrow = TRUE)
-
-      etastart <- theta2eta(sc.init, .link , earg = .earg )
+      a.init <- switch(as.character( .imethod ),
+              "1" = 1 - .Qsize / (y + 1/8),
+              "2" = rep_len(1 - .Qsize / weighted.mean(y, w), n),
+              "3" = rep_len(1 - .Qsize / median(y), n),
+              "4" = rep_len(0.5, n))
+      etastart <-
+          theta2eta(a.init, .link , earg = .earg )
     }
-  }), list( .link = link, .earg = earg,
-            .shape.arg = shape.arg, .imethod = imethod ))),
+  }), list( .link = link, .earg = earg, .Qsize = Qsize,
+            .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    eta <- as.matrix(eta)
-    SC <- eta2theta(eta, .link , earg = .earg )
-    shape.mat <- matrix( .shape.arg , nrow(eta), ncol(eta), byrow = TRUE)
-    shape.mat * SC
-  }, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
+    aa <- eta2theta(eta, .link , earg = .earg )
+    .Qsize / (1 - aa)
+  }, list( .link = link, .earg = earg, .Qsize = Qsize ))),
   last = eval(substitute(expression({
-    M1 <- extra$M1
-    misc$link <- c(rep_len( .link , ncoly))
-    names(misc$link) <- parameters.names
+    misc$link <-    c(a = .link)
 
-    misc$earg <- vector("list", M)
-    names(misc$earg) <- parameters.names
-    for (ii in 1:ncoly) {
-      misc$earg[[ii]] <- .earg
-    }
+    misc$earg <- list(a = .earg )
 
-    misc$M1 <- M1
     misc$expected <- TRUE
-    misc$multipleResponses <- TRUE
-    misc$shape.arg <- .shape.arg 
-  }), list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
-
+    misc$Qsize <- .Qsize
+  }), list( .link = link, .earg = earg, .Qsize = Qsize ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    sc <- eta2theta(eta, .link , earg = .earg )
-    shape.mat <- matrix( .shape.arg , nrow(cbind(y)), ncol(cbind(y)),
-                        byrow = TRUE)
+    aa <- eta2theta(eta, .link , earg = .earg )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <-
-        c(w) * (( shape.mat - 1) * log(y) - y / sc -
-                  shape.mat * log(sc) - lgamma( shape.mat ))
+      ll.elts <- c(w) * dbort(x = y, Qsize = .Qsize , a = aa, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
-  vfamily = c("erlang"),
+  }, list( .link = link, .earg = earg, .Qsize = Qsize ))),
+  vfamily = c("borel.tanner"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    aa <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(aa)) && all(0 < aa)
+    okay1
+  }, list( .link = link, .earg = earg, .Qsize = Qsize ))),
+
+
+
 
 
 
@@ -2097,60 +1737,51 @@ cauchy.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
-    Scale <- eta2theta(eta, .link , earg = .earg )
-    shape.mat <- matrix( .shape.arg , nrow(cbind(eta)), ncol(cbind(eta)),
-                        byrow = TRUE)
-    rgamma(nsim * length(Scale), shape = shape.mat , scale = Scale )
-  }, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
-
+    aa <- eta2theta(eta, .link , earg = .earg )
+    rbort(nsim * length(aa), Qsize = .Qsize , a = aa)
+  }, list( .link = link, .earg = earg, .Qsize = Qsize ))),
 
 
 
 
   deriv = eval(substitute(expression({
-    sc <- eta2theta(eta, .link , earg = .earg )
-    shape.mat <- matrix( .shape.arg , nrow(cbind(eta)), ncol(cbind(eta)),
-                        byrow = TRUE)
-    dl.dsc <- (y / sc - shape.mat) / sc
-    dsc.deta <- dtheta.deta(sc, .link , earg = .earg )
-    c(w) * dl.dsc * dsc.deta
-  }), list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
+    aa <- eta2theta(eta, .link , earg = .earg )
+    dl.da <- (y - .Qsize ) / aa - y
+    da.deta <- dtheta.deta(aa, .link , earg = .earg )
+    c(w) * dl.da * da.deta
+  }), list( .link = link, .earg = earg, .Qsize = Qsize ))),
   weight = eval(substitute(expression({
-    ned2l.dsc2 <- shape.mat / sc^2
-    wz <- c(w) * dsc.deta^2 * ned2l.dsc2
+    ned2l.da2 <- .Qsize / (aa * (1 - aa))
+    wz <- c(w) * ned2l.da2 * da.deta^2
     wz
-  }), list( .earg = earg, .shape.arg = shape.arg ))))
+  }), list( .Qsize = Qsize ))))
 }
 
 
 
 
 
-dbort <- function(x, Qsize = 1, a = 0.5, log = FALSE) {
+dfelix <- function(x, rate = 0.25, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
   if (!is.Numeric(x))
     stop("bad input for argument 'x'")
-  if (!is.Numeric(Qsize, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE))
-    stop("bad input for argument 'Qsize'")
-  if (!is.Numeric(a, positive = TRUE) || max(a) >= 1)
-    stop("bad input for argument 'a'")
-  N <- max(length(x), length(Qsize), length(a))
-  if (length(x)     != N) x     <- rep_len(x,     N)
-  if (length(a)     != N) a     <- rep_len(a,     N)
-  if (length(Qsize) != N) Qsize <- rep_len(Qsize, N)
+  if (!is.Numeric(rate, positive = TRUE))
+    stop("bad input for argument 'rate'")
+  N <- max(length(x), length(rate))
+  if (length(x)    != N) x    <- rep_len(x,    N)
+  if (length(rate) != N) rate <- rep_len(rate, N)
 
-  xok <- (x >= Qsize) & (x == round(x)) & (a > 0) & (a < 1)
+  xok <- (x %% 2 == 1) & (x == round(x)) & (x >= 1) &
+         (rate > 0) & (rate < 0.5)
   ans <- rep_len(if (log.arg) log(0) else 0, N)  # loglikelihood
-  ans[xok] <- log(Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
-             (x[xok] - 1 - Qsize[xok]) * log(x[xok]) +
-             (x[xok] - Qsize[xok]) * log(a[xok]) - a[xok] * x[xok]
+  ans[xok] <- ((x[xok]-3)/2) * log(x[xok]) + ((x[xok]-1)/2) * log(rate[xok]) -
+             lgamma(x[xok]/2 + 0.5) - rate[xok] * x[xok]
   if (!log.arg) {
     ans[xok] <- exp(ans[xok])
   }
@@ -2159,204 +1790,35 @@ dbort <- function(x, Qsize = 1, a = 0.5, log = FALSE) {
 
 
 
-rbort <- function(n, Qsize = 1, a = 0.5) {
-
-  use.n <- if ((length.n <- length(n)) > 1) length.n else
-           if (!is.Numeric(n, integer.valued = TRUE,
-                           length.arg = 1, positive = TRUE))
-              stop("bad input for argument 'n'") else n
-  if (!is.Numeric(Qsize, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE))
-    stop("bad input for argument 'Qsize'")
-  if (!is.Numeric(a, positive = TRUE) ||
-      max(a) >= 1)
-    stop("bad input for argument 'a'")
-
-  N <- use.n
-  qsize <- rep_len(Qsize, N)
-  a     <- rep_len(a,     N)
-  totqsize <- qsize
-  fini <- (qsize < 1)
-  while (any(!fini)) {
-    additions <- rpois(sum(!fini), a[!fini])
-    qsize[!fini] <- qsize[!fini] + additions
-    totqsize[!fini] <- totqsize[!fini] + additions
-    qsize <- qsize - 1
-    fini <- fini | (qsize < 1)
-  }
-  totqsize
-}
-
-
-
- borel.tanner <- function(Qsize = 1, link = "logit",
-                          imethod = 1) {
-
-
-  if (!is.Numeric(Qsize, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE))
-    stop("bad input for argument 'Qsize'")
+ felix <- function(lrate = extlogit(min = 0, max = 0.5), imethod = 1) {
 
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
+  lrate <- as.list(substitute(lrate))
+  erate <- link2list(lrate)
+  lrate <- attr(erate, "function.name")
 
 
   if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
-      imethod > 4)
-    stop("argument 'imethod' must be 1 or 2, 3 or 4")
-
-
+     imethod > 4)
+      stop("argument 'imethod' must be 1 or 2, 3 or 4")
 
 
   new("vglmff",
-  blurb = c("Borel-Tanner distribution\n\n",
+  blurb = c("Felix distribution\n\n",
             "Link:    ",
-            namesof("a", link, earg = earg), "\n\n",
-            "Mean:     Qsize / (1-a)",
-            "\n",
-            "Variance: Qsize * a / (1 - a)^3"),
-
+            namesof("rate", lrate, earg = erate), "\n\n",
+            "Mean:     1/(1-2*rate)"),
   infos = eval(substitute(function(...) {
     list(M1 = 1,
          Q1 = 1,
-         Qsize = .Qsize ,
-         link = .link ,
-         multipleResponses = FALSE )
-  }, list( .Qsize  = Qsize,
-           .link = link ))),
-
-  initialize = eval(substitute(expression({
-    if (any(y < .Qsize ))
-      stop("all y values must be >= ", .Qsize )
-
-
-    w.y.check(w = w, y = y,
-              Is.positive.y = TRUE,
-              Is.integer.y = TRUE)
-
-
-    predictors.names <- namesof("a", .link , earg = .earg , tag = FALSE)
-
-    if (!length(etastart)) {
-      a.init <- switch(as.character( .imethod ),
-              "1" = 1 - .Qsize / (y + 1/8),
-              "2" = rep_len(1 - .Qsize / weighted.mean(y, w), n),
-              "3" = rep_len(1 - .Qsize / median(y), n),
-              "4" = rep_len(0.5, n))
-      etastart <-
-          theta2eta(a.init, .link , earg = .earg )
-    }
-  }), list( .link = link, .earg = earg, .Qsize = Qsize,
-            .imethod = imethod ))),
-  linkinv = eval(substitute(function(eta, extra = NULL) {
-    aa <- eta2theta(eta, .link , earg = .earg )
-    .Qsize / (1 - aa)
-  }, list( .link = link, .earg = earg, .Qsize = Qsize ))),
-  last = eval(substitute(expression({
-    misc$link <-    c(a = .link)
-
-    misc$earg <- list(a = .earg )
-
-    misc$expected <- TRUE
-    misc$Qsize <- .Qsize 
-  }), list( .link = link, .earg = earg, .Qsize = Qsize ))),
-  loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
-             summation = TRUE) {
-    aa <- eta2theta(eta, .link , earg = .earg )
-    if (residuals) {
-      stop("loglikelihood residuals not implemented yet")
-    } else {
-      ll.elts <- c(w) * dbort(x = y, Qsize = .Qsize , a = aa, log = TRUE)
-      if (summation) {
-        sum(ll.elts)
-      } else {
-        ll.elts
-      }
-    }
-  }, list( .link = link, .earg = earg, .Qsize = Qsize ))),
-  vfamily = c("borel.tanner"),
-
-
-
-  simslot = eval(substitute(
-  function(object, nsim) {
-
-    pwts <- if (length(pwts <- object at prior.weights) > 0)
-              pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
-      warning("ignoring prior weights")
-    eta <- predict(object)
-    aa <- eta2theta(eta, .link , earg = .earg )
-    rbort(nsim * length(aa), Qsize = .Qsize , a = aa)
-  }, list( .link = link, .earg = earg, .Qsize = Qsize ))),
-
-
-
-
-  deriv = eval(substitute(expression({
-    aa <- eta2theta(eta, .link , earg = .earg )
-    dl.da <- (y - .Qsize ) / aa - y 
-    da.deta <- dtheta.deta(aa, .link , earg = .earg )
-    c(w) * dl.da * da.deta
-  }), list( .link = link, .earg = earg, .Qsize = Qsize ))),
-  weight = eval(substitute(expression({
-    ned2l.da2 <- .Qsize / (aa * (1 - aa))
-    wz <- c(w) * ned2l.da2 * da.deta^2
-    wz
-  }), list( .Qsize = Qsize ))))
-}
-
-
-
-
-
-dfelix <- function(x, a = 0.25, log = FALSE) {
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
-
-  if (!is.Numeric(x))
-    stop("bad input for argument 'x'")
-  if (!is.Numeric(a, positive = TRUE))
-    stop("bad input for argument 'a'")
-  N <- max(length(x), length(a))
-  if (length(x)     != N) x     <- rep_len(x,     N)
-  if (length(a)     != N) a     <- rep_len(a,     N)
-
-  xok <- (x %% 2 == 1) & (x == round(x)) & (x >= 1) & (a > 0) & (a < 0.5)
-  ans <- rep_len(if (log.arg) log(0) else 0, N)  # loglikelihood
-  ans[xok] <- ((x[xok]-3)/2) * log(x[xok]) + ((x[xok]-1)/2) * log(a[xok]) -
-             lgamma(x[xok]/2 + 0.5) - a[xok] * x[xok]
-  if (!log.arg) {
-    ans[xok] <- exp(ans[xok])
-  }
-  ans
-}
-
-
-
- felix <- function(link = extlogit(min = 0, max = 0.5), imethod = 1) {
-
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
-
-
-  if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE) ||
-     imethod > 4)
-      stop("argument 'imethod' must be 1 or 2, 3 or 4")
-
+         expected = TRUE,
+         multipleResponses = FALSE,
+         parameters.names = c("rate"),
+         lrate    = .lrate ,
+         imethod = .imethod )
+  }, list( .imethod = imethod,
+           .lrate = lrate ))),
 
-  new("vglmff",
-  blurb = c("Felix distribution\n\n",
-            "Link:    ",
-            namesof("a", link, earg = earg), "\n\n",
-            "Mean:     1/(1-2*a)"),
   initialize = eval(substitute(expression({
     if (any(y < 1) ||
         any((y+1)/2 != round((y+1)/2)))
@@ -2367,7 +1829,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
 
       predictors.names <-
-        namesof("a", .link , earg = .earg , tag = FALSE)
+        namesof("rate", .lrate , earg = .erate , tag = FALSE)
 
       if (!length(etastart)) {
           wymean <- weighted.mean(y, w)
@@ -2376,1125 +1838,279 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
               "2" = rep_len((wymean-1+1/8) / (2*(wymean+1/8)+1/8), n),
               "3" = rep_len((median(y)-1+1/8) / (2*(median(y)+1/8)+1/8), n),
               "4" = rep_len(0.25, n))
-          etastart <-
-            theta2eta(a.init, .link , earg = .earg )
-      }
-  }), list( .link = link, .earg = earg,
-            .imethod = imethod ))),
-  linkinv = eval(substitute(function(eta, extra = NULL) {
-    aa <- eta2theta(eta, .link , earg = .earg )
-    1 / (1 - 2 * aa)
-  }, list( .link = link, .earg = earg ))),
-  last = eval(substitute(expression({
-    misc$expected <- TRUE
-
-    misc$link <-    c(a = .link)
-
-    misc$earg <- list(a = .earg )
-  }), list( .link = link, .earg = earg ))),
-  loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
-             summation = TRUE) {
-    aa <- eta2theta(eta, .link , earg = .earg )
-    if (residuals) {
-      stop("loglikelihood residuals not implemented yet")
-    } else {
-      ll.elts <- c(w) * dfelix(x = y, a = aa, log = TRUE)
-      if (summation) {
-        sum(ll.elts)
-      } else {
-        ll.elts
-      }
-    }
-  }, list( .link = link, .earg = earg ))),
-  vfamily = c("felix"),
-  deriv = eval(substitute(expression({
-    aa <- eta2theta(eta, .link , earg = .earg )
-    dl.da <- (y - 1) / (2 * aa) - y 
-    da.deta <- dtheta.deta(aa, .link , earg = .earg )
-    c(w) * dl.da * da.deta
-  }), list( .link = link, .earg = earg ))),
-  weight = eval(substitute(expression({
-    ned2l.da2 <- 1 / (aa * (1 - 2 * aa))
-    wz <- c(w) * da.deta^2 * ned2l.da2
-    wz
-  }), list( .link = link ))))
-}
-
-
-
-
-
-
-
-simple.exponential <- function() {
-  new("vglmff",
-  blurb = c("Simple exponential distribution\n",
-            "Link:    log(rate)\n"),
-  deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL,
-                      summation = TRUE) {
-    devy <- -log(y) - 1
-    devmu <- -log(mu) - y / mu
-    devi <- 2 * (devy - devmu)
-    if (residuals) {
-      sign(y - mu) * sqrt(abs(devi) * c(w))
-    } else {
-      dev.elts <- c(w) * devi
-      if (summation) sum(dev.elts) else dev.elts
-    }
-  },
-  loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL,
-                           summation = TRUE) {
-    if (residuals) return(NULL)
-    if (summation) sum(c(w) * dexp(y, rate  = 1 / mu, log = TRUE)) else
-      c(w) * dexp(y, rate  = 1 / mu, log = TRUE)
-  },
-  initialize = expression({
-    predictors.names <- "loge(rate)"
-    mustart <- y + (y == 0) / 8
-  }),
-  linkinv = function(eta, extra = NULL) exp(-eta),
-  linkfun = function(mu,  extra = NULL) -log(mu),
-  vfamily = "simple.exponential",
-  deriv = expression({
-    rate <- 1 / mu
-    dl.drate <- mu - y
-    drate.deta <- dtheta.deta(rate, "loge")
-    c(w) * dl.drate * drate.deta
-  }),
-  weight = expression({
-    ned2l.drate2 <- 1 / rate^2  # EIM
-    wz <- c(w) * drate.deta^2 * ned2l.drate2
-    wz
-  }))
-}
-
-
-
-
-
-
-
-
-
-
-
- better.exponential <-
-  function(link = "loge", location = 0, expected = TRUE,
-           ishrinkage = 0.95, parallel = FALSE, zero = NULL) {
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
-
-  new("vglmff",
-  blurb = c("Exponential distribution\n\n",
-            "Link:     ", namesof("rate", link, earg, tag = TRUE), "\n",
-            "Mean:     ", "mu = ", if (all(location == 0)) "1 / rate" else
-            if (length(unique(location)) == 1)
-            paste(location[1], "+ 1 / rate") else "location + 1 / rate"),
-  constraints = eval(substitute(expression({
-    constraints <- cm.VGAM(matrix(1, M, 1), x = x, bool = .parallel ,
-                           constraints = constraints, apply.int = TRUE)
-    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
-  }), list( .parallel = parallel, .zero = zero ))),
-  infos = eval(substitute(function(...) {
-    list(M1 = 1, Q1 = 1, multipleResponses = TRUE, zero = .zero )
-  }, list( .zero = zero ))),
-  deviance = function(mu, y, w, residuals = FALSE, eta,
-                      extra = NULL, summation = TRUE) {
-    location <- extra$location
-    devy <- -log(y - location) - 1
-    devmu <- -log(mu - location) - (y - location ) / (mu - location)
-    devi <- 2 * (devy - devmu)
-    if (residuals) sign(y - mu) * sqrt(abs(devi) * w) else {
-      dev.elts <- c(w) * devi
-      if (summation) sum(dev.elts) else dev.elts
-    }
-  },
-  initialize = eval(substitute(expression({
-    checklist <- w.y.check(w = w, y = y, ncol.w.max = Inf, ncol.y.max = Inf,
-                           out.wy = TRUE, colsyperw = 1, maximize = TRUE)
-    w <- checklist$w  # So ncol(w) == ncol(y)
-    y <- checklist$y
-
-    extra$ncoly <- ncoly <- ncol(y)
-    extra$M1 <- M1 <- 1
-    M <- M1 * ncoly
-
-    extra$location <- matrix( .location , n, ncoly, byrow = TRUE)  # By row!
-    if (any(y <= extra$location))
-      stop("all responses must be greater than argument 'location'")
-
-    mynames1 <- param.names("rate", M)
-    predictors.names <- namesof(mynames1, .link , earg = .earg , short = TRUE)
-
-    if (length(mustart) + length(etastart) == 0)
-      mustart <- matrix(colSums(y * w) / colSums(w), n, M, byrow = TRUE) *
-                 .ishrinkage + (1 - .ishrinkage ) * y + 1 / 8
-    if (!length(etastart))
-      etastart <- theta2eta(1 / (mustart - extra$location), .link , .earg )
-  }), list( .location = location, .link = link, .earg = earg,
-            .ishrinkage = ishrinkage ))),
-  linkinv = eval(substitute(function(eta, extra = NULL)
-    extra$location + 1 / eta2theta(eta, .link , earg = .earg ),
-  list( .link = link, .earg = earg ))),
-  last = eval(substitute(expression({
-    misc$link <- rep_len( .link , M)
-    misc$earg <- vector("list", M)
-    names(misc$link) <- names(misc$earg) <- mynames1
-    for (ii in 1:M)
-      misc$earg[[ii]] <- .earg
-    misc$location <- .location
-    misc$expected <- .expected
-  }), list( .link = link, .earg = earg,
-            .expected = expected, .location = location ))),
-  linkfun = eval(substitute(function(mu, extra = NULL) 
-    theta2eta(1 / (mu - extra$location), .link , earg = .earg ),
-  list( .link = link, .earg = earg ))),
-  loglikelihood =
-  function(mu, y, w, residuals = FALSE, eta, extra = NULL, summation = TRUE)
-    if (residuals) stop("loglikelihood residuals not implemented yet") else {
-      rate <- 1 / (mu - extra$location)
-      ll.elts <- c(w) * dexp(y - extra$location, rate = rate, log = TRUE)
-      if (summation) sum(ll.elts) else ll.elts
-    },
-  vfamily = c("better.exponential"),
-  simslot = eval(substitute(function(object, nsim) {
-    pwts <- if (length(pwts <- object at prior.weights) > 0)
-              pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) warning("ignoring prior weights")
-    mu <- fitted(object)
-    rate <- 1 / (mu - object at extra$location)
-    rexp(nsim * length(rate), rate = rate)
-  }, list( .link = link, .earg = earg ))),
-  deriv = eval(substitute(expression({
-    rate <- 1 / (mu - extra$location)
-    dl.drate <- mu - y
-    drate.deta <- dtheta.deta(rate, .link , earg = .earg )
-    c(w) * dl.drate * drate.deta
-  }), list( .link = link, .earg = earg ))),
-  weight = eval(substitute(expression({
-    ned2l.drate2 <- (mu - extra$location)^2
-    wz <- ned2l.drate2 * drate.deta^2  # EIM
-    if (! .expected ) {  # Use the OIM, not the EIM
-      d2rate.deta2 <- d2theta.deta2(rate, .link , earg = .earg )
-      wz <- wz - dl.drate * d2rate.deta2
-    }
-    c(w) * wz
-  }), list( .link = link, .expected = expected, .earg = earg ))))
-}
-
-
-
-
-
-
- exponential <-
-  function(link = "loge", location = 0, expected = TRUE,
-           ishrinkage = 0.95, parallel = FALSE, zero = NULL) {
-  if (!is.logical(expected) || length(expected) != 1)
-    stop("bad input for argument 'expected'")
-
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
-
-
-  if (!is.Numeric(ishrinkage, length.arg = 1) ||
-      ishrinkage < 0 || ishrinkage > 1)
-    stop("bad input for argument 'ishrinkage'")
-
-
-  new("vglmff",
-  blurb = c("Exponential distribution\n\n",
-            "Link:     ",
-            namesof("rate", link, earg, tag = TRUE), "\n",
-            "Mean:     ", "mu = ", 
-            if (all(location == 0)) "1 / rate" else
-            if (length(unique(location)) == 1)
-            paste(location[1], "+ 1 / rate") else
-            "location + 1 / rate"),
-  constraints = eval(substitute(expression({
-    constraints <- cm.VGAM(matrix(1, M, 1), x = x, bool = .parallel ,
-                           constraints = constraints, apply.int = TRUE)
-    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
-  }), list( .parallel = parallel, .zero = zero ))),
-  infos = eval(substitute(function(...) {
-    list(M1 = 1,
-         Q1 = 1,
-         zero = .zero )
-  }, list( .zero = zero ))),
-  deviance = function(mu, y, w, residuals = FALSE, eta,
-                      extra = NULL, summation = TRUE) {
-    location <- extra$location
-    devy <- -log(y - location) - 1
-    devmu <- -log(mu - location) - (y - location ) / (mu - location)
-    devi <- 2 * (devy - devmu)
-    if (residuals) {
-      sign(y - mu) * sqrt(abs(devi) * w)
-    } else {
-      dev.elts <- c(w) * devi
-      if (summation) {
-        sum(dev.elts)
-      } else {
-        dev.elts
-      }
-    }
-  },
-  initialize = eval(substitute(expression({
-    checklist <-
-    w.y.check(w = w, y = y,
-              ncol.w.max = Inf,
-              ncol.y.max = Inf,
-              out.wy = TRUE,
-              colsyperw = 1,
-              maximize = TRUE)
-    w <- checklist$w
-    y <- checklist$y
-
-    ncoly <- ncol(y)
-    M1 <- 1
-    extra$ncoly <- ncoly
-    extra$M1 <- M1
-    M <- M1 * ncoly
-
-    extra$location <- matrix( .location , n, ncoly, byrow = TRUE)  # By row!
-
-    if (any(y <= extra$location))
-      stop("all responses must be greater than ", extra$location)
-
-    mynames1 <- param.names("rate", M)
-    predictors.names <- namesof(mynames1, .link , earg = .earg , short = TRUE)
-
-    if (length(mustart) + length(etastart) == 0)
-      mustart <- matrix(colSums(y * w) / colSums(w), n, M, byrow = TRUE) *
-                 .ishrinkage + (1 - .ishrinkage ) * y + 1 / 8
-    if (!length(etastart))
-      etastart <- theta2eta(1 / (mustart - extra$location),
-                            .link , earg = .earg )
-  }), list( .location = location,
-            .link = link, .earg = earg,
-            .ishrinkage = ishrinkage ))),
-  linkinv = eval(substitute(function(eta, extra = NULL)
-    extra$location + 1 / eta2theta(eta, .link , earg = .earg ),
-  list( .link = link, .earg = earg ))),
-  last = eval(substitute(expression({
-    misc$link <- rep_len( .link , M)
-    names(misc$link) <- mynames1
-    misc$earg <- vector("list", M)
-    names(misc$earg) <- mynames1
-    for (ii in 1:M)
-      misc$earg[[ii]] <- .earg
-    misc$location <- .location
-    misc$expected <- .expected
-    misc$multipleResponses <- TRUE
-    misc$M1 <- M1
-  }), list( .link = link, .earg = earg,
-            .expected = expected, .location = location ))),
-  linkfun = eval(substitute(function(mu, extra = NULL) 
-    theta2eta(1 / (mu - extra$location), .link , earg = .earg ),
-  list( .link = link, .earg = earg ))),
-  loglikelihood =
-  function(mu, y, w, residuals = FALSE, eta, extra = NULL, summation = TRUE)
-    if (residuals) {
-      stop("loglikelihood residuals not implemented yet")
-    } else {
-      rate <- 1 / (mu - extra$location)
-      ll.elts <- c(w) * dexp(x = y - extra$location, rate = rate, log = TRUE)
-      if (summation) sum(ll.elts) else ll.elts
-  },
-  vfamily = c("exponential"),
-  simslot = eval(substitute(
-  function(object, nsim) {
-    pwts <- if (length(pwts <- object at prior.weights) > 0)
-              pwts else weights(object, type = "prior")
-    if (any(pwts != 1))
-      warning("ignoring prior weights")
-    mu <- fitted(object)
-    rate <- 1 / (mu - object at extra$location)
-    rexp(nsim * length(rate), rate = rate)
-  }, list( .link = link, .earg = earg ))),
-  deriv = eval(substitute(expression({
-    rate <- 1 / (mu - extra$location)
-    dl.drate <- mu - y
-    drate.deta <- dtheta.deta(rate, .link , earg = .earg )
-    c(w) * dl.drate * drate.deta
-  }), list( .link = link, .earg = earg ))),
-  weight = eval(substitute(expression({
-    ned2l.drate2 <- (mu - extra$location)^2
-    wz <- ned2l.drate2 * drate.deta^2
-    if (! .expected ) {  # Use the OIM, not the EIM
-      d2rate.deta2 <- d2theta.deta2(rate, .link , earg = .earg )
-      wz <- wz - dl.drate * d2rate.deta2
-    }
-    c(w) * wz
-  }), list( .link = link, .expected = expected, .earg = earg ))))
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- gamma1 <- function(link = "loge", zero = NULL) {
-
-
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
-
-
-
-
-
-  new("vglmff",
-  blurb = c("1-parameter Gamma distribution\n",
-            "Link:     ",
-            namesof("shape", link, earg = earg, tag = TRUE), "\n", 
-            "Mean:       mu (=shape)\n",
-            "Variance:   mu (=shape)"),
-  constraints = eval(substitute(expression({
-    dotzero <- .zero
-    M1 <- 1
-    eval(negzero.expression.VGAM)
-  }), list( .zero = zero ))),
-
-  infos = eval(substitute(function(...) {
-    list(M1 = 1,
-         Q1 = 1,
-         zero = .zero )
-  }, list( .zero = zero ))),
-
-  initialize = eval(substitute(expression({
-
-    temp5 <-
-    w.y.check(w = w, y = y,
-              Is.positive.y = TRUE,
-              ncol.w.max = Inf,
-              ncol.y.max = Inf,
-              out.wy = TRUE,
-              colsyperw = 1,
-              maximize = TRUE)
-    w <- temp5$w
-    y <- temp5$y
-
-
-    M <- if (is.matrix(y)) ncol(y) else 1
-    M1 <- 1
-
-    mynames1 <- param.names("shape", M)
-    predictors.names <- namesof(mynames1, .link , earg = .earg , short = TRUE)
-
-    if (!length(etastart))
-      etastart <- cbind(theta2eta(y + 1/8, .link , earg = .earg ))
-  }), list( .link = link, .earg = earg ))), 
-  linkinv = eval(substitute(function(eta, extra = NULL)
-    eta2theta(eta, .link , earg = .earg )),
-  list( .link = link, .earg = earg )),
-  last = eval(substitute(expression({
-    misc$link <- rep_len( .link , M)
-    names(misc$link) <- mynames1
-
-    misc$earg <- vector("list", M)
-    names(misc$earg) <- names(misc$link)
-    for (ii in 1:M)
-      misc$earg[[ii]] <- .earg
-
-    misc$expected <- TRUE
-    misc$multipleResponses <- TRUE
-    misc$M1 <- M1
-  }), list( .link = link, .earg = earg ))),
-  linkfun = eval(substitute(function(mu, extra = NULL)
-    theta2eta(mu, .link , earg = .earg )),
-  list( .link = link, .earg = earg )),
-  loglikelihood =
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
-             summation = TRUE)
-    if (residuals) {
-      stop("loglikelihood residuals not implemented yet")
-    } else {
-      ll.elts <- c(w) * dgamma(x = y, shape = mu, scale = 1, log = TRUE)
-      if (summation) {
-        sum(ll.elts)
-      } else {
-        ll.elts
-      }
-  },
-  vfamily = c("gamma1"),
-
-
-
-
-  simslot = eval(substitute(
-  function(object, nsim) {
-
-    pwts <- if (length(pwts <- object at prior.weights) > 0)
-              pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
-      warning("ignoring prior weights")
-    mu <- fitted(object)
-    rgamma(nsim * length(shape), shape = mu, scale = 1)
-  }, list( .link = link, .earg = earg ))),
-
-
-
-
-
-  deriv = eval(substitute(expression({
-    shape <- mu
-    dl.dshape <- log(y) - digamma(shape)
-    dshape.deta <- dtheta.deta(shape, .link , earg = .earg )
-    ans <- c(w) * dl.dshape * dshape.deta
-    ans
-    c(w) * dl.dshape * dshape.deta
-  }), list( .link = link, .earg = earg ))),
-  weight = expression({
-    ned2l.dshape <- trigamma(shape)
-    wz <- ned2l.dshape * dshape.deta^2
-    c(w) * wz
-  }))
-}
-
-
-
-
-
-
-
-
-
-
- gammaR <-
-  function(lrate = "loge", lshape = "loge", 
-           irate = NULL,   ishape = NULL,
-           lss = TRUE,
-           zero = "shape"
-          ) {
-
-
-  expected <- TRUE  # FALSE does not work well
-
-  iratee <- irate
-
-  lratee <- as.list(substitute(lrate))
-  eratee <- link2list(lratee)
-  lratee <- attr(eratee, "function.name")
-
-  lshape <- as.list(substitute(lshape))
-  eshape <- link2list(lshape)
-  lshape <- attr(eshape, "function.name")
-
-
-  if (length( iratee) && !is.Numeric(iratee, positive = TRUE))
-    stop("bad input for argument 'irate'")
-  if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
-    stop("bad input for argument 'ishape'")
-
-
-  if (!is.logical(expected) || length(expected) != 1)
-    stop("bad input for argument 'expected'")
-
-
-  ratee.TF <- if (lss) c(TRUE, FALSE) else c(FALSE, TRUE)
-  scale.12 <- if (lss) 1:2 else 2:1
-  blurb.vec <- c(namesof("rate",  lratee, earg = eratee),
-                 namesof("shape", lshape, earg = eshape))
-  blurb.vec <- blurb.vec[scale.12]
-
-
-
-  new("vglmff",
-  blurb = c("2-parameter Gamma distribution\n",
-            "Links:    ",
-            blurb.vec[1], ", ",
-            blurb.vec[2], "\n",
-            "Mean:     mu = shape/rate\n",
-            "Variance: (mu^2)/shape = shape/rate^2"),
-  constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
-                                predictors.names = predictors.names,
-                                M1 = 2)
-  }), list( .zero = zero ))),
-
-  infos = eval(substitute(function(...) {
-    list(M1 = 2,
-         Q1 = 1,
-         expected = .expected ,
-         multipleResponses = TRUE,
-         zero = .zero )
-  }, list( .zero = zero, .scale.12 = scale.12, .ratee.TF = ratee.TF,
-           .expected = expected
-         ))),
-
-  initialize = eval(substitute(expression({
-
-    temp5 <-
-    w.y.check(w = w, y = y,
-              Is.positive.y = TRUE,
-              ncol.w.max = Inf,
-              ncol.y.max = Inf,
-              out.wy = TRUE,
-              colsyperw = 1,
-              maximize = TRUE)
-    w <- temp5$w
-    y <- temp5$y
-
-    ncoly <- ncol(y)
-    M1 <- 2
-    extra$ncoly <- ncoly
-    extra$M1 <- M1
-    M <- M1 * ncoly
-
-
-    if ( .lss ) {
-      mynames1 <- param.names("rate",  ncoly)
-      mynames2 <- param.names("shape", ncoly)
-      predictors.names <-
-          c(namesof(mynames1, .lratee , earg = .eratee , tag = FALSE),
-            namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))
-
-    } else {
-      mynames1 <- param.names("shape", ncoly)
-      mynames2 <- param.names("rate",  ncoly)
-      predictors.names <-
-          c(namesof(mynames1, .lshape , earg = .eshape , tag = FALSE),
-            namesof(mynames2, .lratee , earg = .eratee , tag = FALSE))
-    }
-    parameters.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
-    predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-
-
-
-    Ratee.init <- matrix(if (length( .iratee )) .iratee else 0 + NA,
-                         n, ncoly, byrow = TRUE)
-    Shape.init <- matrix(if (length( .ishape )) .iscale else 0 + NA,
-                         n, ncoly, byrow = TRUE)
-
-
-    if (!length(etastart)) {
-      mymu <- y + 0.167 * (y == 0)
-
-
-      for (ilocal in 1:ncoly) {
-        junk <- lsfit(x, y[, ilocal], wt = w[, ilocal], intercept = FALSE)
-        var.y.est <- sum(c(w[, ilocal]) * junk$resid^2) / (nrow(x) -
-                     length(junk$coef))
-
-        if (!is.Numeric(Shape.init[, ilocal]))
-          Shape.init[, ilocal] <- (mymu[, ilocal])^2 / var.y.est
-
-        if (!is.Numeric(Ratee.init[, ilocal]))
-          Ratee.init[, ilocal] <- Shape.init[, ilocal] / mymu[, ilocal]
-      }
-
-      if ( .lshape == "loglog")
-        Shape.init[Shape.init <= 1] <- 3.1  # Hopefully value is big enough
-      etastart <- if ( .lss )
-        cbind(theta2eta(Ratee.init, .lratee , earg = .eratee ),
-              theta2eta(Shape.init, .lshape , earg = .eshape ))[,
-              interleave.VGAM(M, M1 = M1)] else
-        cbind(theta2eta(Shape.init, .lshape , earg = .eshape ),
-              theta2eta(Ratee.init, .lratee , earg = .eratee ))[,
-              interleave.VGAM(M, M1 = M1)]
-    }
-  }), list( .lratee = lratee, .lshape = lshape,
-            .iratee = iratee, .ishape = ishape,
-            .eratee = eratee, .eshape = eshape,
-            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
-  linkinv = eval(substitute(function(eta, extra = NULL) {
-    Ratee <- eta2theta(eta[,    .ratee.TF  ], .lratee , earg = .eratee )
-    Shape <- eta2theta(eta[, !( .ratee.TF )], .lshape , earg = .eshape )
-    Shape / Ratee
-  }, list( .lratee = lratee, .lshape = lshape,
-           .eratee = eratee, .eshape = eshape,
-            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
-  last = eval(substitute(expression({
-    misc$multipleResponses <- TRUE
-
-    M1 <- extra$M1
-    avector <- if ( .lss ) c(rep_len( .lratee , ncoly),
-                             rep_len( .lshape , ncoly)) else
-                           c(rep_len( .lshape , ncoly),
-                             rep_len( .lratee , ncoly))
-    misc$link <- avector[interleave.VGAM(M, M1 = M1)]
-    temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
-    names(misc$link) <- temp.names
-
-    misc$earg <- vector("list", M)
-    names(misc$earg) <- temp.names
-    for (ii in 1:ncoly) {
-      misc$earg[[M1*ii-1]] <- if ( .lss ) .eratee else .eshape
-      misc$earg[[M1*ii  ]] <- if ( .lss ) .eshape else .eratee
-    }
-
-    misc$M1 <- M1
-  }), list( .lratee = lratee, .lshape = lshape,
-            .eratee = eratee, .eshape = eshape,
-            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
-  loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
-             summation = TRUE) {
-    Ratee <- eta2theta(eta[,    .ratee.TF  ], .lratee , earg = .eratee )
-    Shape <- eta2theta(eta[, !( .ratee.TF )], .lshape , earg = .eshape )
-    if (residuals) {
-      stop("loglikelihood residuals not implemented yet")
-    } else {
-      ll.elts <- c(w) * dgamma(x=y, shape = Shape, rate = Ratee, log = TRUE)
-      if (summation) {
-        sum(ll.elts)
-      } else {
-        ll.elts
-      }
-    }
-  }, list( .lratee = lratee, .lshape = lshape,
-           .eratee = eratee, .eshape = eshape,
-           .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
-  vfamily = c("gammaR"),
-
-
-
-
-  simslot = eval(substitute(
-  function(object, nsim) {
-
-    pwts <- if (length(pwts <- object at prior.weights) > 0)
-              pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
-      warning("ignoring prior weights")
-    eta <- predict(object)
-    Ratee <- eta2theta(eta[,    .ratee.TF  ], .lratee , earg = .eratee )
-    Shape <- eta2theta(eta[, !( .ratee.TF )], .lshape , earg = .eshape )
-    rgamma(nsim * length(Shape), shape = Shape, rate = Ratee)
-  }, list( .lratee = lratee, .lshape = lshape,
-           .eratee = eratee, .eshape = eshape,
-           .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
-
-
-  deriv = eval(substitute(expression({
-    M1 <- 2
-    Ratee <- eta2theta(eta[,    .ratee.TF  ], .lratee , earg = .eratee )
-    Shape <- eta2theta(eta[, !( .ratee.TF )], .lshape , earg = .eshape )
-    dl.dratee <- mu - y
-    dl.dshape <- log(y * Ratee) - digamma(Shape)
-    dratee.deta <- dtheta.deta(Ratee, .lratee , earg = .eratee )
-    dshape.deta <- dtheta.deta(Shape, .lshape , earg = .eshape )
-
-    myderiv <- if ( .lss )
-                 c(w) * cbind(dl.dratee * dratee.deta,
-                              dl.dshape * dshape.deta) else
-                 c(w) * cbind(dl.dshape * dshape.deta,
-                              dl.dratee * dratee.deta)
-    myderiv[, interleave.VGAM(M, M1 = M1)]
-  }), list( .lratee = lratee, .lshape = lshape,
-            .eratee = eratee, .eshape = eshape,
-            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
-  weight = eval(substitute(expression({
-    ned2l.dratee2 <- Shape / (Ratee^2)
-    ned2l.drateeshape <- -1/Ratee
-    ned2l.dshape2 <- trigamma(Shape)
-
-    if ( .expected ) {
-     ratee.adjustment <-  0
-     shape.adjustment <-  0
-    } else {
-      d2ratee.deta2 <- d2theta.deta2(Ratee, .lratee , earg = .eratee )
-      d2shape.deta2 <- d2theta.deta2(Shape, .lshape , earg = .eshape )
-      ratee.adjustment <- dl.dratee * d2ratee.deta2
-      shape.adjustment <- dl.dshape * d2shape.deta2
-    }
-
-    wz <- if ( .lss )
-            array(c(c(w) * (ned2l.dratee2 * dratee.deta^2 - ratee.adjustment),
-                    c(w) * (ned2l.dshape2 * dshape.deta^2 - shape.adjustment),
-                    c(w) * (ned2l.drateeshape * dratee.deta * dshape.deta)),
-                  dim = c(n, M / M1, 3)) else
-            array(c(c(w) * (ned2l.dshape2 * dshape.deta^2 - shape.adjustment),
-                    c(w) * (ned2l.dratee2 * dratee.deta^2 - ratee.adjustment),
-                    c(w) * (ned2l.drateeshape * dratee.deta * dshape.deta)),
-                  dim = c(n, M / M1, 3))
-    wz <- arwz2wz(wz, M = M, M1 = M1)
-    wz
-  }), list( .lratee = lratee, .lshape = lshape,
-            .eratee = eratee, .eshape = eshape, .expected = expected,
-            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss  ))))
-}
-
-
-
-
-
-
- gamma2 <-
-  function(lmu = "loge", lshape = "loge",
-           imethod = 1,  ishape = NULL,
-           parallel = FALSE,
-           deviance.arg = FALSE,
-           zero = "shape") {
-
-
-
-  if (!is.logical( deviance.arg ) || length( deviance.arg ) != 1)
-    stop("argument 'deviance.arg' must be TRUE or FALSE")
-
-
-  apply.parint <- FALSE
-
-  lmu <- as.list(substitute(lmu))
-  emu <- link2list(lmu)
-  lmu <- attr(emu, "function.name")
-
-  lshape <- as.list(substitute(lshape))
-  eshape <- link2list(lshape)
-  lshape <- attr(eshape, "function.name")
-
-
-
-  if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
-    stop("bad input for argument 'ishape'")
-  if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE) ||
-     imethod > 2)
-    stop("argument 'imethod' must be 1 or 2")
-
-
-  if (!is.logical(apply.parint) ||
-      length(apply.parint) != 1)
-    stop("argument 'apply.parint' must be a single logical")
-
-
-  if (is.logical(parallel) && parallel && length(zero))
-    stop("set 'zero = NULL' if 'parallel = TRUE'")
-
-
-    ans <- 
-    new("vglmff",
-    blurb = c("2-parameter gamma distribution",
-              " (McCullagh and Nelder 1989 parameterization)\n",
-              "Links:    ",
-              namesof("mu",    lmu,    earg = emu), ", ", 
-              namesof("shape", lshape, earg = eshape), "\n",
-              "Mean:     mu\n",
-              "Variance: (mu^2)/shape"),
-    constraints = eval(substitute(expression({
-
-    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
-                           bool = .parallel , 
-                           constraints = constraints,
-                           apply.int = .apply.parint )
-
-    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
-                                predictors.names = predictors.names,
-                                M1 = 2)
-  }), list( .zero = zero,
-            .parallel = parallel, .apply.parint = apply.parint ))),
-
-  infos = eval(substitute(function(...) {
-    list(M1 = 2,
-         Q1 = 1,
-         expected = TRUE,
-         multipleResponses = TRUE,
-         parameters.names = c("mu", "shape"),
-         zero = .zero )
-  }, list( .zero = zero ))),
-
-
-  initialize = eval(substitute(expression({
-    M1 <- 2
-
-    temp5 <-
-    w.y.check(w = w, y = y,
-              Is.positive.y = TRUE,
-              ncol.w.max = Inf,
-              ncol.y.max = Inf,
-              out.wy = TRUE,
-              colsyperw = 1,
-              maximize = TRUE)
-    w <- temp5$w
-    y <- temp5$y
-
-
-      assign("CQO.FastAlgorithm", ( .lmu == "loge" && .lshape == "loge"),
-             envir = VGAMenv)
-      if (any(function.name == c("cqo", "cao")) &&
-         is.Numeric( .zero , length.arg = 1) && .zero != -2)
-        stop("argument zero = -2 is required")
-
-      M <- M1 * ncol(y)
-      NOS <- ncoly <- ncol(y)  # Number of species
-
-
-      temp1.names <- param.names("mu",    NOS)
-      temp2.names <- param.names("shape", NOS)
-      predictors.names <-
-          c(namesof(temp1.names, .lmu ,    earg = .emu ,    tag = FALSE),
-            namesof(temp2.names, .lshape , earg = .eshape , tag = FALSE))
-      predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-
-
-
-
-    if (is.logical( .parallel ) & .parallel & ncoly > 1)
-      warning("the constraint matrices may not be correct with ",
-              "multiple responses")
-
-
-
-      if (!length(etastart)) {
-        init.shape <- matrix(1.0, n, NOS)
-        mymu <- y # + 0.167 * (y == 0)  # imethod == 1 (the default)
-        if ( .imethod == 2) {
-            for (ii in 1:ncol(y)) {
-              mymu[, ii] <- weighted.mean(y[, ii], w = w[, ii])
-            }
-        }
-        for (spp in 1:NOS) {
-          junk <- lsfit(x, y[, spp], wt = w[, spp], intercept = FALSE)
-          var.y.est <- sum(w[, spp] * junk$resid^2) / (n - length(junk$coef))
-          init.shape[, spp] <- if (length( .ishape )) .ishape else
-              mymu[, spp]^2 / var.y.est
-          if ( .lshape == "loglog")
-              init.shape[init.shape[, spp] <= 1,spp] <- 3.1
-        }
-        etastart <-
-              cbind(theta2eta(mymu, .lmu , earg = .emu ),
-                    theta2eta(init.shape, .lshape , earg = .eshape ))
-        etastart <-
-            etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
-      }
-  }), list( .lmu = lmu, .lshape = lshape, .ishape = ishape,
-            .emu = emu, .eshape = eshape,
-            .parallel = parallel, .apply.parint = apply.parint,
-            .zero = zero, .imethod = imethod ))),
-  linkinv = eval(substitute(function(eta, extra = NULL) {
-    M1 <- 2
-    NOS <- ncol(eta) / M1
-    eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
-              .lmu , earg = .emu )
-  }, list( .lmu = lmu, .emu = emu ))),
-  last = eval(substitute(expression({
-    if (exists("CQO.FastAlgorithm", envir = VGAMenv))
-        rm("CQO.FastAlgorithm", envir = VGAMenv)
-
-    tmp34 <- c(rep_len( .lmu ,    NOS),
-               rep_len( .lshape , NOS))
-    names(tmp34) <- c(param.names("mu",    NOS), 
-                      param.names("shape", NOS))
-    tmp34 <- tmp34[interleave.VGAM(M, M1 = M1)]
-    misc$link <- tmp34 # Already named
-
-    misc$earg <- vector("list", M)
-    names(misc$earg) <- names(misc$link)
-    for (ii in 1:NOS) {
-      misc$earg[[M1*ii-1]] <- .emu
-      misc$earg[[M1*ii  ]] <- .eshape
-    }
-
-    misc$M1 <- M1
-    misc$expected <- TRUE
-    misc$multipleResponses <- TRUE
-    misc$parallel <- .parallel
-    misc$apply.parint <- .apply.parint
-  }), list( .lmu = lmu, .lshape = lshape,
-            .emu = emu, .eshape = eshape,
-            .parallel = parallel, .apply.parint = apply.parint ))),
-  linkfun = eval(substitute(function(mu, extra = NULL) {
-    temp <- theta2eta(mu, .lmu , earg = .emu )
-    temp <- cbind(temp, NA * temp)
-    temp[, interleave.VGAM(ncol(temp), M1 = M1), drop = FALSE]
-  }, list( .lmu = lmu, .emu = emu ))),
+          etastart <-
+            theta2eta(a.init, .lrate , earg = .erate )
+      }
+  }), list( .lrate = lrate, .erate = erate,
+            .imethod = imethod ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    rate <- eta2theta(eta, .lrate , earg = .erate )
+    1 / (1 - 2 * rate)
+  }, list( .lrate = lrate, .erate = erate ))),
+  last = eval(substitute(expression({
+    misc$link <-    c(rate = .lrate)
+    misc$earg <- list(rate = .erate )
+  }), list( .lrate = lrate, .erate = erate ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    M1 <- 2
-    NOS <- ncol(eta) / M1
-    mymu <- mu  # eta2theta(eta[, 2*(1:NOS)-1], .lmu , earg = .emu )
-    shapemat <- eta2theta(eta[, M1 * (1:NOS), drop = FALSE],
-                         .lshape , earg = .eshape )
+    rate <- eta2theta(eta, .lrate , earg = .erate )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <-
-        c(w) * dgamma(x = y,
-                      shape = c(shapemat),
-                      scale = c(mymu / shapemat),
-                      log = TRUE)
+      ll.elts <- c(w) * dfelix(x = y, rate = rate, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .lmu = lmu, .lshape = lshape,
-           .emu = emu, .eshape = eshape))),
-  vfamily = c("gamma2"),
-
+  }, list( .lrate = lrate, .erate = erate ))),
+  vfamily = c("felix"),
+  deriv = eval(substitute(expression({
+    rate <- eta2theta(eta, .lrate , earg = .erate )
+    dl.da <- (y - 1) / (2 * rate) - y
+    da.deta <- dtheta.deta(rate, .lrate , earg = .erate )
+    c(w) * dl.da * da.deta
+  }), list( .lrate = lrate, .erate = erate ))),
+  weight = eval(substitute(expression({
+    ned2l.da2 <- 1 / (rate * (1 - 2 * rate))
+    wz <- c(w) * da.deta^2 * ned2l.da2
+    wz
+  }), list( .lrate = lrate ))))
+}
 
 
 
-  simslot = eval(substitute(
-  function(object, nsim) {
 
-    pwts <- if (length(pwts <- object at prior.weights) > 0)
-              pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
-      warning("ignoring prior weights")
-    eta <- predict(object)
-    mymu  <- (eta2theta(eta[, c(TRUE, FALSE)], .lmu    , earg = .emu    ))
-    shape <- (eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape ))
-    rgamma(nsim * length(shape),
-           shape = c(shape),
-           scale = c(mymu/shape))
-  }, list( .lmu = lmu, .lshape = lshape,
-           .emu = emu, .eshape = eshape))),
 
 
 
+simple.exponential <- function() {
+  new("vglmff",
+  blurb = c("Simple exponential distribution\n",
+            "Link:    log(rate)\n"),
+  deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+                      summation = TRUE) {
+    devy <- -log(y) - 1
+    devmu <- -log(mu) - y / mu
+    devi <- 2 * (devy - devmu)
+    if (residuals) {
+      sign(y - mu) * sqrt(abs(devi) * c(w))
+    } else {
+      dev.elts <- c(w) * devi
+      if (summation) sum(dev.elts) else dev.elts
+    }
+  },
+  loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+                           summation = TRUE) {
+    if (residuals) return(NULL)
+    if (summation) sum(c(w) * dexp(y, rate  = 1 / mu, log = TRUE)) else
+      c(w) * dexp(y, rate  = 1 / mu, log = TRUE)
+  },
+  initialize = expression({
+    predictors.names <- "loge(rate)"
+    mustart <- y + (y == 0) / 8
+  }),
+  linkinv = function(eta, extra = NULL) exp(-eta),
+  linkfun = function(mu,  extra = NULL) -log(mu),
+  vfamily = "simple.exponential",
+  deriv = expression({
+    rate <- 1 / mu
+    dl.drate <- mu - y
+    drate.deta <- dtheta.deta(rate, "loge")
+    c(w) * dl.drate * drate.deta
+  }),
+  weight = expression({
+    ned2l.drate2 <- 1 / rate^2  # EIM
+    wz <- c(w) * drate.deta^2 * ned2l.drate2
+    wz
+  }))
+}
 
 
-  deriv = eval(substitute(expression({
-    M1 <- 2
-    NOS <- ncol(eta) / M1
 
-    mymu  <- eta2theta(eta[, M1 * (1:NOS) - 1],
-                       .lmu ,    earg = .emu    )
-    shape <- eta2theta(eta[, M1 * (1:NOS)],
-                       .lshape , earg = .eshape )
 
-    dl.dmu <- shape * (y / mymu - 1) / mymu
-    dl.dshape <- log(y) + log(shape) - log(mymu) + 1 - digamma(shape) -
-                y / mymu
 
-    dmu.deta    <- dtheta.deta(mymu,  .lmu ,    earg = .emu )
-    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
 
-    myderiv <- c(w) * cbind(dl.dmu    * dmu.deta,
-                            dl.dshape * dshape.deta)
-    myderiv[, interleave.VGAM(M, M1 = M1)]
-  }), list( .lmu = lmu, .lshape = lshape,
-            .emu = emu, .eshape = eshape))),
-  weight = eval(substitute(expression({
-    ned2l.dmu2 <- shape / (mymu^2)
-    ned2l.dshape2 <- trigamma(shape) - 1 / shape
-    wz <- matrix(NA_real_, n, M)  # 2 = M1; diagonal!
 
-    wz[, M1*(1:NOS)-1] <- ned2l.dmu2 * dmu.deta^2
-    wz[, M1*(1:NOS)  ] <- ned2l.dshape2 * dshape.deta^2
 
 
-    w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
-  }), list( .lmu = lmu ))))
 
 
+ better.exponential <-
+  function(link = "loge", location = 0, expected = TRUE,
+           ishrinkage = 0.95, parallel = FALSE, zero = NULL) {
+  link <- as.list(substitute(link))
+  earg <- link2list(link)
+  link <- attr(earg, "function.name")
 
-  if (deviance.arg)
-    ans at deviance <- eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta, extra = NULL,
-             summation = TRUE) {
+  new("vglmff",
+  blurb = c("Exponential distribution\n\n",
+            "Link:     ", namesof("rate", link, earg, tag = TRUE), "\n",
+            "Mean:     ", "mu = ", if (all(location == 0)) "1 / rate" else
+            if (length(unique(location)) == 1)
+            paste(location[1], "+ 1 / rate") else "location + 1 / rate"),
+  constraints = eval(substitute(expression({
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x, bool = .parallel ,
+                           constraints = constraints, apply.int = TRUE)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
+  }), list( .parallel = parallel, .zero = zero ))),
+  infos = eval(substitute(function(...) {
+    list(M1 = 1, Q1 = 1, multipleResponses = TRUE, zero = .zero )
+  }, list( .zero = zero ))),
+  deviance = function(mu, y, w, residuals = FALSE, eta,
+                      extra = NULL, summation = TRUE) {
+    location <- extra$location
+    devy <- -log(y - location) - 1
+    devmu <- -log(mu - location) - (y - location ) / (mu - location)
+    devi <- 2 * (devy - devmu)
+    if (residuals) sign(y - mu) * sqrt(abs(devi) * w) else {
+      dev.elts <- c(w) * devi
+      if (summation) sum(dev.elts) else dev.elts
+    }
+  },
+  initialize = eval(substitute(expression({
+    checklist <- w.y.check(w = w, y = y, ncol.w.max = Inf, ncol.y.max = Inf,
+                           out.wy = TRUE, colsyperw = 1, maximize = TRUE)
+    w <- checklist$w  # So ncol(w) == ncol(y)
+    y <- checklist$y
 
+    extra$ncoly <- ncoly <- ncol(y)
+    extra$M1 <- M1 <- 1
+    M <- M1 * ncoly
 
-    if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
-      stop("cannot handle matrix 'w' yet")
+    extra$location <- matrix( .location , n, ncoly, byrow = TRUE)  # By row!
+    if (any(y <= extra$location))
+      stop("all responses must be greater than argument 'location'")
 
+    mynames1 <- param.names("rate", M)
+    predictors.names <- namesof(mynames1, .link , earg = .earg , short = TRUE)
 
-    M1 <- 2
-    NOS <- ncol(eta) / 2
-    temp300 <-  eta[, 2*(1:NOS), drop = FALSE]
-    shape <-  eta2theta(temp300, .lshape , earg = .eshape )
-    devi <- -2 * (log(y/mu) - y/mu + 1)
-    if (residuals) {
-      warning("not 100% sure about these deviance residuals!")
-      sign(y - mu) * sqrt(abs(devi) * w)
-    } else {
-      dev.elts <- c(w) * devi
-      if (summation) {
-        sum(dev.elts)
-      } else {
-        dev.elts
-      }
+    if (length(mustart) + length(etastart) == 0)
+      mustart <- matrix(colSums(y * w) / colSums(w), n, M, byrow = TRUE) *
+                 .ishrinkage + (1 - .ishrinkage ) * y + 1 / 8
+    if (!length(etastart))
+      etastart <- theta2eta(1 / (mustart - extra$location), .link , .earg )
+  }), list( .location = location, .link = link, .earg = earg,
+            .ishrinkage = ishrinkage ))),
+  linkinv = eval(substitute(function(eta, extra = NULL)
+    extra$location + 1 / eta2theta(eta, .link , earg = .earg ),
+  list( .link = link, .earg = earg ))),
+  last = eval(substitute(expression({
+    misc$link <- rep_len( .link , M)
+    misc$earg <- vector("list", M)
+    names(misc$link) <- names(misc$earg) <- mynames1
+    for (ii in 1:M)
+      misc$earg[[ii]] <- .earg
+    misc$location <- .location
+    misc$expected <- .expected
+  }), list( .link = link, .earg = earg,
+            .expected = expected, .location = location ))),
+  linkfun = eval(substitute(function(mu, extra = NULL)
+    theta2eta(1 / (mu - extra$location), .link , earg = .earg ),
+  list( .link = link, .earg = earg ))),
+  loglikelihood =
+  function(mu, y, w, residuals = FALSE, eta, extra = NULL, summation = TRUE)
+    if (residuals) stop("loglikelihood residuals not implemented yet") else {
+      rate <- 1 / (mu - extra$location)
+      ll.elts <- c(w) * dexp(y - extra$location, rate = rate, log = TRUE)
+      if (summation) sum(ll.elts) else ll.elts
+    },
+  vfamily = c("better.exponential"),
+  simslot = eval(substitute(function(object, nsim) {
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1)) warning("ignoring prior weights")
+    mu <- fitted(object)
+    rate <- 1 / (mu - object at extra$location)
+    rexp(nsim * length(rate), rate = rate)
+  }, list( .link = link, .earg = earg ))),
+  deriv = eval(substitute(expression({
+    rate <- 1 / (mu - extra$location)
+    dl.drate <- mu - y
+    drate.deta <- dtheta.deta(rate, .link , earg = .earg )
+    c(w) * dl.drate * drate.deta
+  }), list( .link = link, .earg = earg ))),
+  weight = eval(substitute(expression({
+    ned2l.drate2 <- (mu - extra$location)^2
+    wz <- ned2l.drate2 * drate.deta^2  # EIM
+    if (! .expected ) {  # Use the OIM, not the EIM
+      d2rate.deta2 <- d2theta.deta2(rate, .link , earg = .earg )
+      wz <- wz - dl.drate * d2rate.deta2
     }
-  }, list( .lshape = lshape )))
-  ans
+    c(w) * wz
+  }), list( .link = link, .expected = expected, .earg = earg ))))
 }
 
 
 
- geometric <- function(link = "logit", expected = TRUE,
-                       imethod = 1, iprob = NULL, zero = NULL) {
 
+
+
+ exponential <-
+  function(link = "loge", location = 0, expected = TRUE,
+           ishrinkage = 0.95, parallel = FALSE, zero = NULL) {
   if (!is.logical(expected) || length(expected) != 1)
     stop("bad input for argument 'expected'")
 
-
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
 
-  if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE) ||
-     imethod > 3)
-    stop("argument 'imethod' must be 1 or 2 or 3")
-
-
-
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+      ishrinkage < 0 || ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
 
   new("vglmff",
-  blurb = c("Geometric distribution ",
-            "(P[Y=y] = prob * (1 - prob)^y, y = 0, 1, 2,...)\n",
+  blurb = c("Exponential distribution\n\n",
             "Link:     ",
-            namesof("prob", link, earg = earg), "\n",
-            "Mean:     mu = (1 - prob) / prob\n",
-            "Variance: mu * (1 + mu) = (1 - prob) / prob^2"),
+            namesof("rate", link, earg, tag = TRUE), "\n",
+            "Mean:     ", "mu = ",
+            if (all(location == 0)) "1 / rate" else
+            if (length(unique(location)) == 1)
+            paste(location[1], "+ 1 / rate") else
+            "location + 1 / rate"),
   constraints = eval(substitute(expression({
-    dotzero <- .zero
-    M1 <- 1
-    eval(negzero.expression.VGAM)
-  }), list( .zero = zero ))),
-
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x, bool = .parallel ,
+                           constraints = constraints, apply.int = TRUE)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
+  }), list( .parallel = parallel, .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 1,
          Q1 = 1,
          zero = .zero )
   }, list( .zero = zero ))),
-
-
+  deviance = function(mu, y, w, residuals = FALSE, eta,
+                      extra = NULL, summation = TRUE) {
+    location <- extra$location
+    devy <- -log(y - location) - 1
+    devmu <- -log(mu - location) - (y - location ) / (mu - location)
+    devi <- 2 * (devy - devmu)
+    if (residuals) {
+      sign(y - mu) * sqrt(abs(devi) * w)
+    } else {
+      dev.elts <- c(w) * devi
+      if (summation) {
+        sum(dev.elts)
+      } else {
+        dev.elts
+      }
+    }
+  },
   initialize = eval(substitute(expression({
-
-
-    temp5 <-
+    checklist <-
     w.y.check(w = w, y = y,
-              Is.nonnegative.y = TRUE,
-              Is.integer.y = TRUE,
               ncol.w.max = Inf,
               ncol.y.max = Inf,
               out.wy = TRUE,
               colsyperw = 1,
               maximize = TRUE)
-    w <- temp5$w
-    y <- temp5$y
-
+    w <- checklist$w
+    y <- checklist$y
 
     ncoly <- ncol(y)
     M1 <- 1
@@ -3502,177 +2118,77 @@ simple.exponential <- function() {
     extra$M1 <- M1
     M <- M1 * ncoly
 
+    extra$location <- matrix( .location , n, ncoly, byrow = TRUE)  # By row!
 
-    mynames1  <- param.names("prob", ncoly)
-    predictors.names <-
-      namesof(mynames1, .link , earg = .earg , tag = FALSE)
-
-
-    if (!length(etastart)) {
-      prob.init <- if ( .imethod == 2)
-                      1 / (1 + y + 1/16) else
-                  if ( .imethod == 3)
-                      1 / (1 + apply(y, 2, median) + 1/16) else
-                      1 / (1 + colSums(y * w) / colSums(w) + 1/16)
-
-      if (!is.matrix(prob.init))
-        prob.init <- matrix(prob.init, n, M, byrow = TRUE)
-
-
-      if (length( .iprob ))
-        prob.init <- matrix( .iprob , n, M, byrow = TRUE)
-
+    if (any(y <= extra$location))
+      stop("all responses must be greater than ", extra$location)
 
-        etastart <- theta2eta(prob.init, .link , earg = .earg )
-    }
-  }), list( .link = link, .earg = earg,
-            .imethod = imethod, .iprob = iprob ))),
-  linkinv = eval(substitute(function(eta, extra = NULL) {
-    prob <- eta2theta(eta, .link , earg = .earg )
-    (1 - prob) / prob 
-  }, list( .link = link, .earg = earg ))),
+    mynames1 <- param.names("rate", M)
+    predictors.names <- namesof(mynames1, .link , earg = .earg , short = TRUE)
 
+    if (length(mustart) + length(etastart) == 0)
+      mustart <- matrix(colSums(y * w) / colSums(w), n, M, byrow = TRUE) *
+                 .ishrinkage + (1 - .ishrinkage ) * y + 1 / 8
+    if (!length(etastart))
+      etastart <- theta2eta(1 / (mustart - extra$location),
+                            .link , earg = .earg )
+  }), list( .location = location,
+            .link = link, .earg = earg,
+            .ishrinkage = ishrinkage ))),
+  linkinv = eval(substitute(function(eta, extra = NULL)
+    extra$location + 1 / eta2theta(eta, .link , earg = .earg ),
+  list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
-    M1 <- extra$M1
-    misc$link <- c(rep_len( .link , ncoly))
+    misc$link <- rep_len( .link , M)
     names(misc$link) <- mynames1
-
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for (ii in 1:ncoly) {
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
-    }
-
-    misc$M1 <- M1
-    misc$expected <- TRUE
-    misc$multipleResponses <- TRUE
+    misc$location <- .location
     misc$expected <- .expected
-    misc$imethod <- .imethod
-    misc$iprob <- .iprob
+    misc$multipleResponses <- TRUE
+    misc$M1 <- M1
   }), list( .link = link, .earg = earg,
-            .iprob = iprob,
-            .expected = expected, .imethod = imethod ))),
-  loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
-             summation = TRUE) {
-    prob <- eta2theta(eta, .link , earg = .earg )
+            .expected = expected, .location = location ))),
+  linkfun = eval(substitute(function(mu, extra = NULL)
+    theta2eta(1 / (mu - extra$location), .link , earg = .earg ),
+  list( .link = link, .earg = earg ))),
+  loglikelihood =
+  function(mu, y, w, residuals = FALSE, eta, extra = NULL, summation = TRUE)
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dgeom(x = y, prob = prob, log = TRUE)
-      if (summation) {
-        sum(ll.elts)
-      } else {
-        ll.elts
-      }
-    }
-  }, list( .link = link, .earg = earg ))),
-  vfamily = c("geometric"),
-
-
+      rate <- 1 / (mu - extra$location)
+      ll.elts <- c(w) * dexp(x = y - extra$location, rate = rate, log = TRUE)
+      if (summation) sum(ll.elts) else ll.elts
+  },
+  vfamily = c("exponential"),
   simslot = eval(substitute(
   function(object, nsim) {
-
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
-    eta <- predict(object)
-    prob <- eta2theta(eta, .link , earg = .earg )
-    rgeom(nsim * length(prob), prob = prob)
+    mu <- fitted(object)
+    rate <- 1 / (mu - object at extra$location)
+    rexp(nsim * length(rate), rate = rate)
   }, list( .link = link, .earg = earg ))),
-
-
-
-
   deriv = eval(substitute(expression({
-    prob <- eta2theta(eta, .link , earg = .earg )
-
-    dl.dprob <- -y / (1 - prob) + 1 / prob 
-
-    dprobdeta <- dtheta.deta(prob, .link , earg = .earg )
-    c(w) * cbind(dl.dprob * dprobdeta)
-  }), list( .link = link, .earg = earg, .expected = expected ))),
+    rate <- 1 / (mu - extra$location)
+    dl.drate <- mu - y
+    drate.deta <- dtheta.deta(rate, .link , earg = .earg )
+    c(w) * dl.drate * drate.deta
+  }), list( .link = link, .earg = earg ))),
   weight = eval(substitute(expression({
-    ned2l.dprob2 <- if ( .expected ) {
-      1 / (prob^2 * (1 - prob))
-    } else {
-      y / (1 - prob)^2 + 1 / prob^2
+    ned2l.drate2 <- (mu - extra$location)^2
+    wz <- ned2l.drate2 * drate.deta^2
+    if (! .expected ) {  # Use the OIM, not the EIM
+      d2rate.deta2 <- d2theta.deta2(rate, .link , earg = .earg )
+      wz <- wz - dl.drate * d2rate.deta2
     }
-    wz <- ned2l.dprob2 * dprobdeta^2
-    if ( !( .expected ))
-      wz <- wz - dl.dprob * d2theta.deta2(prob, .link , earg = .earg )
     c(w) * wz
-  }), list( .link = link, .earg = earg,
-            .expected = expected ))))
-}
-
-
-
-
-dbetageom <- function(x, shape1, shape2, log = FALSE) {
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
-
-  if (!is.Numeric(x))
-    stop("bad input for argument 'x'")
-  if (!is.Numeric(shape1, positive = TRUE))
-    stop("bad input for argument 'shape1'")
-  if (!is.Numeric(shape2, positive = TRUE))
-    stop("bad input for argument 'shape2'")
-  N <- max(length(x), length(shape1), length(shape2))
-  if (length(x)      != N) x      <- rep_len(x,      N)
-  if (length(shape1) != N) shape1 <- rep_len(shape1, N)
-  if (length(shape2) != N) shape2 <- rep_len(shape2, N)
-
-  loglik <- lbeta(1+shape1, shape2 + abs(x)) - lbeta(shape1, shape2)
-  xok <- (x == round(x) & x >= 0)
-  loglik[!xok] <- log(0)
-  if (log.arg) {
-    loglik
-  } else {
-    exp(loglik)
-  }
-}
-
-
-pbetageom <- function(q, shape1, shape2, log.p = FALSE) {
-  if (!is.Numeric(q))
-    stop("bad input for argument 'q'")
-  if (!is.Numeric(shape1, positive = TRUE))
-    stop("bad input for argument 'shape1'")
-  if (!is.Numeric(shape2, positive = TRUE))
-    stop("bad input for argument 'shape2'")
-  N <- max(length(q), length(shape1), length(shape2))
-  if (length(q)      != N) q      <- rep_len(q,      N)
-  if (length(shape1) != N) shape1 <- rep_len(shape1, N)
-  if (length(shape2) != N) shape2 <- rep_len(shape2, N)
-  ans <- q * 0  # Retains names(q)
-  if (max(abs(shape1-shape1[1])) < 1.0e-08 &&
-     max(abs(shape2-shape2[1])) < 1.0e-08) {
-      qstar <- floor(q)
-      temp <- if (max(qstar) >= 0) dbetageom(x = 0:max(qstar), 
-             shape1 = shape1[1], shape2 = shape2[1]) else 0*qstar
-      unq <- unique(qstar)
-      for (ii in unq) {
-        index <- (qstar == ii)
-        ans[index] <- if (ii >= 0) sum(temp[1:(1+ii)]) else 0
-      }
-  } else {
-    for (ii in 1:N) {
-      qstar <- floor(q[ii])
-      ans[ii] <- if (qstar >= 0) sum(dbetageom(x = 0:qstar,
-                 shape1 = shape1[ii], shape2 = shape2[ii])) else 0
-    }
-  }
-  if (log.p) log(ans) else ans
-}
-
-
-rbetageom <- function(n, shape1, shape2) {
-  rgeom(n = n, prob = rbeta(n = n, shape1 = shape1, shape2 = shape2))
+  }), list( .link = link, .expected = expected, .earg = earg ))))
 }
 
 
@@ -3683,62 +2199,10 @@ rbetageom <- function(n, shape1, shape2) {
 
 
 
- Init.mu <-
-  function(y, x = cbind("(Intercept)" = rep_len(1, nrow(as.matrix(y)))),
-           w = x, imethod = 1, imu = NULL,
-           ishrinkage = 0.95,
-           pos.only = FALSE,
-           probs.y = 0.35) {
-    if (!is.matrix(x)) x <- as.matrix(x)
-    if (!is.matrix(y)) y <- as.matrix(y)
-    if (!is.matrix(w)) w <- as.matrix(w)
-    if (ncol(w) != ncol(y))
-      w <- matrix(w, nrow = nrow(y), ncol = ncol(y))
 
-    if (length(imu)) {
-      MU.INIT <- matrix(imu, nrow(y), ncol(y), byrow = TRUE)
-      return(MU.INIT)
-    }
-
-
-    if (!is.Numeric(ishrinkage, length.arg = 1) ||
-     ishrinkage < 0 || ishrinkage > 1)
-     warning("bad input for argument 'ishrinkage'; ",
-             "using the value 0.95 instead")
-    
-
-    if (imethod > 6) {
-      warning("argument 'imethod' should be 1 or 2 or... 6; ",
-              "using the value 1")
-      imethod <- 1
-    }
-    mu.init <- y
-    for (jay in 1:ncol(y)) {
-      TFvec <- if (pos.only) y[, jay] > 0 else TRUE
-      locn.est <- if ( imethod %in% c(1, 4)) {
-        weighted.mean(y[TFvec, jay], w[TFvec, jay]) + 1/16
-      } else if ( imethod %in% c(3, 6)) {
-        c(quantile(y[TFvec, jay], probs = probs.y ) + 1/16)
-      } else {
-        median(y[TFvec, jay]) + 1/16
-      }
 
-      if (imethod <= 3) {
-        mu.init[, jay] <-      ishrinkage   * locn.est +
-                          (1 - ishrinkage ) * y[, jay]
-      } else {
-        medabsres <- median(abs(y[, jay] - locn.est)) + 1/32
-        allowfun <- function(z, maxtol = 1)
-          sign(z) * pmin(abs(z), maxtol)
-        mu.init[, jay] <- locn.est + (1 - ishrinkage ) *
-                          allowfun(y[, jay] - locn.est, maxtol = medabsres)
 
-        mu.init[, jay] <- abs(mu.init[, jay]) + 1 / 1024
-      }
-    }  # of for (jay)
 
-    mu.init
-  }
 
 
 
@@ -3747,436 +2211,325 @@ rbetageom <- function(n, shape1, shape2) {
 
 
 
-EIM.NB.specialp <- function(mu, size,
-                            y.max = NULL,  # Must be an integer
-                            cutoff.prob = 0.995,
-                            intercept.only = FALSE,
-                            extra.bit = TRUE) {
 
 
-  if (intercept.only) {
-    mu <- mu[1]
-    size <- size[1]
-  }
+ gamma1 <- function(link = "loge", zero = NULL) {
 
-  y.min <- 0  # A fixed constant really
 
-  if (!is.numeric(y.max)) {
-    eff.p <- sort(c(cutoff.prob, 1 - cutoff.prob))
-    y.max <- max(qnbinom(p = eff.p[2], mu = mu, size = size)) + 10
-  }
+  link <- as.list(substitute(link))
+  earg <- link2list(link)
+  link <- attr(earg, "function.name")
 
-  Y.mat <- if (intercept.only) y.min:y.max else
-           matrix(y.min:y.max, length(mu), y.max-y.min+1, byrow = TRUE)
-  neff.row <- ifelse(intercept.only, 1, nrow(Y.mat))
-  neff.col <- ifelse(intercept.only, length(Y.mat), ncol(Y.mat))
-
-  if (FALSE) {
-  trigg.term <- if (intercept.only) {
-    check2 <-
-     sum(pnbinom(Y.mat, size = size, mu = mu, lower.tail = FALSE)
-         / (Y.mat + size)^2)
-    check2
-  } else {
-  check2 <-
-    rowSums(pnbinom(Y.mat, size = size, mu = mu, lower.tail = FALSE)
-            / (Y.mat + size)^2)
-  check2
-  }
-  }
 
 
-  trigg.term <- 
-  if (TRUE) {
-    answerC <- .C("eimpnbinomspecialp",
-      as.integer(intercept.only),
-      as.double(neff.row), as.double(neff.col),
-      as.double(size),
-      as.double(pnbinom(Y.mat, size = size, mu = mu, lower.tail = FALSE)),
-      rowsums = double(neff.row))
-      answerC$rowsums
-  }
 
-  ned2l.dk2 <- trigg.term
-  if (extra.bit)
-    ned2l.dk2 <- ned2l.dk2 - 1 / size + 1 / (size + mu)
-  ned2l.dk2
-}  # end of EIM.NB.specialp()
 
+  new("vglmff",
+  blurb = c("1-parameter Gamma distribution\n",
+            "Link:     ",
+            namesof("shape", link, earg = earg, tag = TRUE), "\n",
+            "Mean:       mu (=shape)\n",
+            "Variance:   mu (=shape)"),
+  constraints = eval(substitute(expression({
+    dotzero <- .zero
+    M1 <- 1
+    eval(negzero.expression.VGAM)
+  }), list( .zero = zero ))),
 
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         zero = .zero )
+  }, list( .zero = zero ))),
 
+  initialize = eval(substitute(expression({
 
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
 
 
+    M <- if (is.matrix(y)) ncol(y) else 1
+    M1 <- 1
 
-EIM.NB.speciald <- function(mu, size,
-                            y.min = 0,  # 20160201; must be an integer
-                            y.max = NULL,  # Must be an integer
-                            cutoff.prob = 0.995,
-                            intercept.only = FALSE,
-                            extra.bit = TRUE) {
+    mynames1 <- param.names("shape", M)
+    predictors.names <- namesof(mynames1, .link , earg = .earg , short = TRUE)
 
+    if (!length(etastart))
+      etastart <- cbind(theta2eta(y + 1/8, .link , earg = .earg ))
+  }), list( .link = link, .earg = earg ))),
+  linkinv = eval(substitute(function(eta, extra = NULL)
+    eta2theta(eta, .link , earg = .earg )),
+  list( .link = link, .earg = earg )),
+  last = eval(substitute(expression({
+    misc$link <- rep_len( .link , M)
+    names(misc$link) <- mynames1
 
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- names(misc$link)
+    for (ii in 1:M)
+      misc$earg[[ii]] <- .earg
 
+    misc$expected <- TRUE
+    misc$multipleResponses <- TRUE
+    misc$M1 <- M1
+  }), list( .link = link, .earg = earg ))),
+  linkfun = eval(substitute(function(mu, extra = NULL)
+    theta2eta(mu, .link , earg = .earg )),
+  list( .link = link, .earg = earg )),
+  loglikelihood =
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE)
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dgamma(x = y, shape = mu, scale = 1, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+  },
+  vfamily = c("gamma1"),
 
 
-  if (intercept.only) {
-    mu <- mu[1]
-    size <- size[1]
-  }
 
-  if (!is.numeric(y.max)) {
-    eff.p <- sort(c(cutoff.prob, 1 - cutoff.prob))
-    y.max <- max(qnbinom(p = eff.p[2], mu = mu, size = size)) + 10
-  }
 
-  Y.mat <- if (intercept.only) y.min:y.max else
-           matrix(y.min:y.max, length(mu), y.max-y.min+1, byrow = TRUE)
-  trigg.term <- if (intercept.only) {
-     dnbinom(Y.mat, size = size, mu = mu) %*% trigamma(Y.mat + size)
-  } else {
-     rowSums(dnbinom(Y.mat, size = size, mu = mu) *
-             trigamma(Y.mat + size))
-  }
-  ned2l.dk2 <- trigamma(size) - trigg.term
-  if (extra.bit)
-    ned2l.dk2 <- ned2l.dk2 - 1 / size + 1 / (size + mu)
-  ned2l.dk2
-}  # end of EIM.NB.speciald()
+  simslot = eval(substitute(
+  function(object, nsim) {
 
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1))
+      warning("ignoring prior weights")
+    mu <- fitted(object)
+    rgamma(nsim * length(shape), shape = mu, scale = 1)
+  }, list( .link = link, .earg = earg ))),
 
 
-NBD.Loglikfun2 <- function(munbval, sizeval,
-                           y, x, w, extraargs) {
-  sum(c(w) * dnbinom(x = y, mu = munbval,
-                     size = sizeval, log = TRUE))
-}
 
 
 
-negbinomial.control <- function(save.weights = TRUE, ...) {
-    list(save.weights = save.weights)
+  deriv = eval(substitute(expression({
+    shape <- mu
+    dl.dshape <- log(y) - digamma(shape)
+    dshape.deta <- dtheta.deta(shape, .link , earg = .earg )
+    ans <- c(w) * dl.dshape * dshape.deta
+    ans
+    c(w) * dl.dshape * dshape.deta
+  }), list( .link = link, .earg = earg ))),
+  weight = expression({
+    ned2l.dshape <- trigamma(shape)
+    wz <- ned2l.dshape * dshape.deta^2
+    c(w) * wz
+  }))
 }
 
 
 
- negbinomial <-
-  function(
-           zero = "size",
-           parallel = FALSE,
-           deviance.arg = FALSE,
-           mds.min = 1e-3,
-           nsimEIM = 500, cutoff.prob = 0.999,  # Maxiter = 5000,
-           eps.trig = 1e-7,
-           max.support = 4000,
-           max.chunk.MB = 30,  # max.memory = Inf is allowed
-           lmu = "loge", lsize = "loge",
-           imethod = 1,
-           imu = NULL,
-           iprobs.y = NULL,  # 0.35,
-           gprobs.y = (0:9)/10,  # 20160709; grid for finding munb.init
-           isize = NULL,
-           gsize.mux = exp(c(-30, -20, -15, -10, -6:3))) {
-
-
 
 
 
 
 
-  if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE) ||
-     imethod > 2)
-      stop("argument 'imethod' must be 1 or 2")
 
 
-  if (!is.logical( deviance.arg ) || length( deviance.arg ) != 1)
-    stop("argument 'deviance.arg' must be TRUE or FALSE")
+ gammaR <-
+  function(lrate = "loge", lshape = "loge",
+           irate = NULL,   ishape = NULL,
+           lss = TRUE,
+           zero = "shape"
+          ) {
 
 
+  expected <- TRUE  # FALSE does not work well
 
-  lmunb <- as.list(substitute(lmu))
-  emunb <- link2list(lmunb)
-  lmunb <- attr(emunb, "function.name")
-  
-  imunb <- imu
+  iratee <- irate
 
-  lsize <- as.list(substitute(lsize))
-  esize <- link2list(lsize)
-  lsize <- attr(esize, "function.name")
+  lratee <- as.list(substitute(lrate))
+  eratee <- link2list(lratee)
+  lratee <- attr(eratee, "function.name")
 
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
 
-  if (!is.Numeric(eps.trig, length.arg = 1,
-                  positive = TRUE) || eps.trig > 1e-5)
-    stop("argument 'eps.trig' must be positive and smaller in value")
 
-  if (length(imunb) && !is.Numeric(imunb, positive = TRUE))
-    stop("bad input for argument 'imu'")
-  if (length(isize) && !is.Numeric(isize, positive = TRUE))
-    stop("bad input for argument 'isize'")
+  if (length( iratee) && !is.Numeric(iratee, positive = TRUE))
+    stop("bad input for argument 'irate'")
+  if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
+    stop("bad input for argument 'ishape'")
 
-  if (!is.Numeric(cutoff.prob, length.arg = 1) ||
-    cutoff.prob < 0.95 ||
-    cutoff.prob >= 1)
-    stop("range error in the argument 'cutoff.prob'; ",
-         "a value in [0.95, 1) is needed")
 
-    if (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE))
-      stop("bad input for argument 'nsimEIM'")
-    if (nsimEIM <= 10)
-      warning("argument 'nsimEIM' should be an integer ",
-               "greater than 10, say")
+  if (!is.logical(expected) || length(expected) != 1)
+    stop("bad input for argument 'expected'")
 
 
-    if (is.logical( parallel ) && parallel  && length(zero))
-      stop("need to set 'zero = NULL' when parallel = TRUE")
+  ratee.TF <- if (lss) c(TRUE, FALSE) else c(FALSE, TRUE)
+  scale.12 <- if (lss) 1:2 else 2:1
+  blurb.vec <- c(namesof("rate",  lratee, earg = eratee),
+                 namesof("shape", lshape, earg = eshape))
+  blurb.vec <- blurb.vec[scale.12]
 
 
 
-  ans <- 
   new("vglmff",
-
-
-  blurb = c("Negative binomial distribution\n\n",
+  blurb = c("2-parameter Gamma distribution\n",
             "Links:    ",
-            namesof("mu",   lmunb, earg = emunb), ", ",
-            namesof("size", lsize, earg = esize), "\n",
-            "Mean:     mu\n",
-            "Variance: mu * (1 + mu / size) for NB-2"),
-
+            blurb.vec[1], ", ",
+            blurb.vec[2], "\n",
+            "Mean:     mu = shape/rate\n",
+            "Variance: (mu^2)/shape = shape/rate^2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
-                           bool = .parallel , 
-                           constraints = constraints)
-
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
                                 predictors.names = predictors.names,
                                 M1 = 2)
-  }), list( .parallel = parallel, .zero = zero ))),
-
-
+  }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
-    list(M1    = 2,
-         Q1    = 1,
-         expected = TRUE,
-         mds.min = .mds.min ,
+    list(M1 = 2,
+         Q1 = 1,
+         expected = .expected ,
          multipleResponses = TRUE,
-         parameters.names = c("mu", "size"),
-         lmu   = .lmunb ,  
-         lsize = .lsize ,
-         eps.trig  = .eps.trig ,
-         zero  = .zero )
-  }, list( .zero = zero, .lsize = lsize, .lmunb = lmunb,
-           .eps.trig = eps.trig,
-           .mds.min = mds.min))),
+         zero = .zero )
+  }, list( .zero = zero, .scale.12 = scale.12, .ratee.TF = ratee.TF,
+           .expected = expected
+         ))),
 
   initialize = eval(substitute(expression({
-    M1 <- 2
-
-    temp12 <-
-      w.y.check(w = w, y = y,
-                Is.nonnegative.y = TRUE,
-                Is.integer.y = TRUE,
-                ncol.w.max = Inf,
-                ncol.y.max = Inf,
-                out.wy = TRUE,
-                colsyperw = 1, maximize = TRUE)
-    w <- temp12$w
-    y <- temp12$y
 
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
 
-    assign("CQO.FastAlgorithm",
-          ( .lmunb == "loge") && ( .lsize == "loge"),
-           envir = VGAMenv)
+    ncoly <- ncol(y)
+    M1 <- 2
+    extra$ncoly <- ncoly
+    extra$M1 <- M1
+    M <- M1 * ncoly
 
-    if (any(function.name == c("cqo", "cao")) &&
-        ((is.Numeric( .zero , length.arg = 1) && .zero != -2) ||
-         (is.character( .zero ) && .zero != "size")))
-        stop("argument zero = 'size' or zero = -2 is required")
 
+    if ( .lss ) {
+      mynames1 <- param.names("rate",  ncoly)
+      mynames2 <- param.names("shape", ncoly)
+      predictors.names <-
+          c(namesof(mynames1, .lratee , earg = .eratee , tag = FALSE),
+            namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))
 
-    M <- M1 * ncol(y) 
-    NOS <- ncoly <- ncol(y)  # Number of species
-    predictors.names <-
-     c(namesof(param.names("mu",   NOS),
-                .lmunb , earg = .emunb , tag = FALSE),
-       namesof(param.names("size", NOS),
-                .lsize , earg = .esize , tag = FALSE))
+    } else {
+      mynames1 <- param.names("shape", ncoly)
+      mynames2 <- param.names("rate",  ncoly)
+      predictors.names <-
+          c(namesof(mynames1, .lshape , earg = .eshape , tag = FALSE),
+            namesof(mynames2, .lratee , earg = .eratee , tag = FALSE))
+    }
+    parameters.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
     predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
 
-    gprobs.y <- .gprobs.y
-    imunb <- .imunb  # Default in NULL
-    if (length(imunb))
-      imunb <- matrix(imunb, n, NOS, byrow = TRUE)
-
-    if (!length(etastart)) {
-      munb.init <-
-      size.init <- matrix(NA_real_, n, NOS)
-      gprobs.y  <- .gprobs.y
-      if (length( .iprobs.y ))
-        gprobs.y <-  .iprobs.y
-      gsize.mux <- .gsize.mux  # gsize.mux is on a relative scale
-          
-      for (jay in 1:NOS) {  # For each response 'y_jay'... do:
-        munb.init.jay <- if ( .imethod == 1 ) {
-          quantile(y[, jay], probs = gprobs.y) + 1/16
-        } else {
-          weighted.mean(y[, jay], w = w[, jay])
-        }
-        if (length(imunb))
-          munb.init.jay <- imunb[, jay]
 
 
-        gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
-                                    weighted.mean(y[, jay], w = w[, jay]))
-        if (length( .isize ))
-          gsize <- .isize  # isize is on an absolute scale
+    Ratee.init <- matrix(if (length( .iratee )) .iratee else 0 + NA,
+                         n, ncoly, byrow = TRUE)
+    Shape.init <- matrix(if (length( .ishape )) .iscale else 0 + NA,
+                         n, ncoly, byrow = TRUE)
 
 
-        try.this <-
-          grid.search2(munb.init.jay, gsize,
-                       objfun = NBD.Loglikfun2,
-                       y = y[, jay], w = w[, jay],
-                       ret.objfun = TRUE)  # Last value is the loglik
+    if (!length(etastart)) {
+      mymu <- y + 0.167 * (y == 0)
 
-        munb.init[, jay] <- try.this["Value1"]
-        size.init[, jay] <- try.this["Value2"]
-      }  # for (jay ...)
 
+      for (ilocal in 1:ncoly) {
+        junk <- lsfit(x, y[, ilocal], wt = w[, ilocal], intercept = FALSE)
+        var.y.est <- sum(c(w[, ilocal]) * junk$resid^2) / (nrow(x) -
+                     length(junk$coef))
 
+        if (!is.Numeric(Shape.init[, ilocal]))
+          Shape.init[, ilocal] <- (mymu[, ilocal])^2 / var.y.est
 
-      newemu <- .emunb
-      if ( .lmunb == "nbcanlink") {
-        newemu$size <- size.init
-        testing1 <- log(munb.init / (munb.init + size.init))
-        testing2 <- theta2eta(munb.init, link = .lmunb , earg = newemu )
+        if (!is.Numeric(Ratee.init[, ilocal]))
+          Ratee.init[, ilocal] <- Shape.init[, ilocal] / mymu[, ilocal]
       }
 
-
-      etastart <-
-        cbind(theta2eta(munb.init, link = .lmunb , earg = newemu ),
-              theta2eta(size.init, link = .lsize , earg = .esize ))
-      etastart <-
-        etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
-    }
-  }), list( .lmunb = lmunb, .lsize = lsize,
-            .emunb = emunb, .esize = esize,
-            .imunb = imunb,
-            .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
-            .deviance.arg = deviance.arg,
-            .isize = isize, .iprobs.y = iprobs.y,
-            .nsimEIM = nsimEIM,
-            .zero = zero, .imethod = imethod ))),
-
-  linkinv = eval(substitute(function(eta, extra = NULL) {
-    if ( .lmunb == "nbcanlink") {
-      eta.k <- eta[, c(FALSE, TRUE), drop = FALSE]
-      kmat <- eta2theta(eta.k, .lsize , earg = .esize )
-
- 
-      newemu <- .emunb
-      newemu$size <- kmat
-      check.munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
-                .lmunb , earg = newemu )
-
- 
-      munb <- kmat / expm1(-eta[, c(TRUE, FALSE), drop = FALSE])
-      munb
-    } else {
-      eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
-                .lmunb , earg = .emunb )
+      if ( .lshape == "loglog")
+        Shape.init[Shape.init <= 1] <- 3.1  # Hopefully value is big enough
+      etastart <- if ( .lss )
+        cbind(theta2eta(Ratee.init, .lratee , earg = .eratee ),
+              theta2eta(Shape.init, .lshape , earg = .eshape ))[,
+              interleave.VGAM(M, M1 = M1)] else
+        cbind(theta2eta(Shape.init, .lshape , earg = .eshape ),
+              theta2eta(Ratee.init, .lratee , earg = .eratee ))[,
+              interleave.VGAM(M, M1 = M1)]
     }
-  }, list( .lmunb = lmunb, .lsize = lsize,
-           .emunb = emunb, .esize = esize))),
-
+  }), list( .lratee = lratee, .lshape = lshape,
+            .iratee = iratee, .ishape = ishape,
+            .eratee = eratee, .eshape = eshape,
+            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    Ratee <- eta2theta(eta[,    .ratee.TF  ], .lratee , earg = .eratee )
+    Shape <- eta2theta(eta[, !( .ratee.TF )], .lshape , earg = .eshape )
+    Shape / Ratee
+  }, list( .lratee = lratee, .lshape = lshape,
+           .eratee = eratee, .eshape = eshape,
+            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
   last = eval(substitute(expression({
-    if (exists("CQO.FastAlgorithm", envir = VGAMenv))
-        rm("CQO.FastAlgorithm", envir = VGAMenv)
-
-
-    save.weights <- control$save.weights <- !all(ind2)
-
-    
-    temp0303 <- c(rep_len( .lmunb , NOS),
-                  rep_len( .lsize , NOS))
-    names(temp0303) <- c(param.names("mu",   NOS),
-                         param.names("size", NOS))
-    temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
-    misc$link <- temp0303  # Already named
-
-    misc$earg <- vector("list", M)
-    names(misc$earg) <- names(misc$link)
-    for (ii in 1:NOS) {
-      misc$earg[[M1*ii-1]] <- newemu
-      misc$earg[[M1*ii  ]] <- .esize
-    }
-
-    misc$max.chunk.MB <- .max.chunk.MB
-    misc$cutoff.prob <- .cutoff.prob
-    misc$imethod <- .imethod 
-    misc$nsimEIM <- .nsimEIM
-    misc$expected <- TRUE
     misc$multipleResponses <- TRUE
-  }), list( .lmunb = lmunb, .lsize = lsize,
-            .emunb = emunb, .esize = esize,
-            .cutoff.prob = cutoff.prob,
-            .max.chunk.MB = max.chunk.MB,
-            .nsimEIM = nsimEIM,
-            .imethod = imethod ))),
-
-  linkfun = eval(substitute(function(mu, extra = NULL) {
-    M1 <- 2
-
-    newemu <- .emunb
-
-    eta.temp <- theta2eta(mu, .lmunb , earg = newemu)
-    eta.kayy <- theta2eta(if (is.numeric( .isize )) .isize else 1.0,
-                     .lsize , earg = .esize )
-    eta.kayy <- 0 * eta.temp + eta.kayy  # Right dimension now.
-
 
+    M1 <- extra$M1
+    avector <- if ( .lss ) c(rep_len( .lratee , ncoly),
+                             rep_len( .lshape , ncoly)) else
+                           c(rep_len( .lshape , ncoly),
+                             rep_len( .lratee , ncoly))
+    misc$link <- avector[interleave.VGAM(M, M1 = M1)]
+    temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M1 = M1)]
+    names(misc$link) <- temp.names
 
-    if ( .lmunb == "nbcanlink") {
-      newemu$size <- eta2theta(eta.kayy, .lsize , earg = .esize )
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- temp.names
+    for (ii in 1:ncoly) {
+      misc$earg[[M1*ii-1]] <- if ( .lss ) .eratee else .eshape
+      misc$earg[[M1*ii  ]] <- if ( .lss ) .eshape else .eratee
     }
 
-
-
-    eta.temp <- cbind(eta.temp, eta.kayy)
-    eta.temp[, interleave.VGAM(ncol(eta.temp), M1 = M1), drop = FALSE]
-  }, list( .lmunb = lmunb, .lsize = lsize,
-           .emunb = emunb, .esize = esize,
-                           .isize = isize ))),
-
+    misc$M1 <- M1
+  }), list( .lratee = lratee, .lshape = lshape,
+            .eratee = eratee, .eshape = eshape,
+            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    eta.k <- eta[, c(FALSE, TRUE), drop = FALSE]
-    kmat <- eta2theta(eta.k, .lsize , earg = .esize )
-
-
-
-    newemu <- .emunb
-    if ( .lmunb == "nbcanlink") {
-      newemu$size <- kmat
-    }
-
+    Ratee <- eta2theta(eta[,    .ratee.TF  ], .lratee , earg = .eratee )
+    Shape <- eta2theta(eta[, !( .ratee.TF )], .lshape , earg = .eshape )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE)
+      ll.elts <- c(w) * dgamma(x=y, shape = Shape, rate = Ratee, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .lsize = lsize,
-           .lmunb = lmunb, .emunb = emunb, .esize = esize))),
+  }, list( .lratee = lratee, .lshape = lshape,
+           .eratee = eratee, .eshape = eshape,
+           .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
+  vfamily = c("gammaR"),
 
-  vfamily = c("negbinomial"),
 
 
 
@@ -4185,1287 +2538,601 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
-    muuuu <- cbind(eta2theta(eta[, c(TRUE, FALSE)], .lmunb , earg = .emunb ))
-    eta.k <- cbind(eta2theta(eta[, c(FALSE, TRUE)], .lsize , earg = .esize ))
-    rnbinom(nsim * length(muuuu), mu = muuuu, size = eta.k)
-  }, list( .lmunb = lmunb, .lsize = lsize,
-           .emunb = emunb, .esize = esize ))),
-
-
-  validparams = eval(substitute(function(eta, y, extra = NULL) {
-    munb <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
-                     .lmunb , earg = .emunb )
-    size <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
-                     .lsize , earg = .esize )
-
-    smallval <- .mds.min  # .munb.div.size
-    okay1 <- all(is.finite(munb)) && all(munb > 0) &&
-             all(is.finite(size)) && all(size > 0)
-    overdispersion <- if (okay1) all(munb / size > smallval) else FALSE
-    if (!overdispersion)
-      warning("parameter 'size' has very large values; ",
-              "try fitting a quasi-Poisson ",
-              "model instead.")
-    okay1 && overdispersion
-  }, list( .lmunb = lmunb, .emunb = emunb,
-           .lsize = lsize, .esize = esize,
-           .mds.min = mds.min))),
-
+    Ratee <- eta2theta(eta[,    .ratee.TF  ], .lratee , earg = .eratee )
+    Shape <- eta2theta(eta[, !( .ratee.TF )], .lshape , earg = .eshape )
+    rgamma(nsim * length(Shape), shape = Shape, rate = Ratee)
+  }, list( .lratee = lratee, .lshape = lshape,
+           .eratee = eratee, .eshape = eshape,
+           .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
 
 
   deriv = eval(substitute(expression({
-
-
-
-
-  odd.iter <- 1   # iter %% 2
-  even.iter <- 1  # 1 - odd.iter
-
-  if ( iter == 1 && .deviance.arg ) {
-    if (control$criterion != "coefficients" &&
-        control$half.step)
-      warning("Argument 'criterion' should be 'coefficients' ",
-               "or 'half.step' should be 'FALSE' when ",
-              "'deviance.arg = TRUE'")
-
-
-
-    low.index <- ifelse(names(constraints)[1] == "(Intercept)", 2, 1)
-    if (low.index <= length(constraints))
-    for (iii in low.index:length(constraints)) {
-      conmat <- constraints[[iii]]
-      if (any(conmat[c(FALSE, TRUE), ] != 0))
-        stop("argument 'deviance.arg' should only be TRUE for NB-2 models; ",
-             "non-zero elements detected for the 'size' parameter." )
-    }
-  }
-
-
-
-
-
-
     M1 <- 2
-    NOS <- ncol(eta) / M1
-    eta.k <- eta[, c(FALSE, TRUE), drop = FALSE]
-    kmat <- eta2theta(eta.k, .lsize , earg = .esize )
-
-
-    smallval <- .mds.min  # Something like this is needed
-    if (any(big.size <- mu / kmat < smallval)) {
-      if (FALSE)
-        warning("parameter 'size' has very large values; ",
-                "try fitting a quasi-Poisson ",
-                "model instead.")
-        kmat[big.size] <- mu[big.size] / smallval
-    }
-
-
-    newemu <- .emunb
-    if ( .lmunb == "nbcanlink") {
-      newemu$size <- kmat
-    }
-
-
-    dl.dmunb <- y / mu - (1 + y/kmat) / (1 + mu/kmat)
-    dl.dsize <- digamma(y + kmat) - digamma(kmat) -
-                (y - mu) / (mu + kmat) + log1p(-mu / (kmat + mu))
-    if (any(big.size)) {
-      dl.dsize[big.size] <- 1e-8  # A small number
-    }
-  
-
-    dsize.deta <- dtheta.deta(kmat, .lsize , earg = .esize )
-
-
-    myderiv <- if ( .lmunb == "nbcanlink") {
-      dmunb.deta1 <- 1 / nbcanlink(mu, size = kmat, wrt.param = 1, deriv = 1)
-
-      dsize.deta1 <- 1 / nbcanlink(mu, size = kmat, wrt.param = 2, deriv = 1)
-
-
-      c(w) * cbind(dl.dmunb * dmunb.deta1 *  odd.iter +
-                   dl.dsize * dsize.deta1 * 1 * even.iter,
-                   dl.dsize * dsize.deta  * even.iter)
-    } else {
-      dmunb.deta <- dtheta.deta(mu,   .lmunb , earg = .emunb )
-      c(w) * cbind(dl.dmunb * dmunb.deta,
-                   dl.dsize * dsize.deta)
-    }
-
-
-    myderiv <- myderiv[, interleave.VGAM(M, M1 = M1)]
-    myderiv
-  }), list( .lmunb = lmunb, .lsize = lsize,
-            .emunb = emunb, .esize = esize,
-            .deviance.arg = deviance.arg,
-            .mds.min = mds.min ))),
-
-
+    Ratee <- eta2theta(eta[,    .ratee.TF  ], .lratee , earg = .eratee )
+    Shape <- eta2theta(eta[, !( .ratee.TF )], .lshape , earg = .eshape )
+    dl.dratee <- mu - y
+    dl.dshape <- log(y * Ratee) - digamma(Shape)
+    dratee.deta <- dtheta.deta(Ratee, .lratee , earg = .eratee )
+    dshape.deta <- dtheta.deta(Shape, .lshape , earg = .eshape )
 
+    myderiv <- if ( .lss )
+                 c(w) * cbind(dl.dratee * dratee.deta,
+                              dl.dshape * dshape.deta) else
+                 c(w) * cbind(dl.dshape * dshape.deta,
+                              dl.dratee * dratee.deta)
+    myderiv[, interleave.VGAM(M, M1 = M1)]
+  }), list( .lratee = lratee, .lshape = lshape,
+            .eratee = eratee, .eshape = eshape,
+            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
   weight = eval(substitute(expression({
-    wz <- matrix(NA_real_, n, M)
-
-
-    max.support <- .max.support
-    max.chunk.MB <- .max.chunk.MB
-
-
-    ind2 <- matrix(FALSE, n, NOS)  # Used for SFS
-    for (jay in 1:NOS) {
-      eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
-      Q.mins <- 0
-      Q.maxs <- qnbinom(p = eff.p[2],
-                        mu = mu[, jay],
-                        size = kmat[, jay]) + 10
-
-
-      eps.trig <- .eps.trig
-      Q.MAXS <-      if ( .lsize == "loge")
-        pmax(10, ceiling(kmat[, jay] / sqrt(eps.trig))) else Inf
-      Q.maxs <- pmin(Q.maxs, Q.MAXS)
-
-
-
-      ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
-      if ((NN <- sum(ind1)) > 0) {
-        Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
-        n.chunks <- if (intercept.only) 1 else
-                    max(1, ceiling( Object.Size / max.chunk.MB))
-        chunk.rows <- ceiling(NN / n.chunks)
-        ind2[, jay] <- ind1  # Save this
-        wind2 <- which(ind1)
-
-
-        upr.ptr <- 0
-        lwr.ptr <- upr.ptr + 1
-        while (lwr.ptr <= NN) {
-          upr.ptr <- min(upr.ptr + chunk.rows, NN)
-          sind2 <- wind2[lwr.ptr:upr.ptr]
-          if (FALSE)
-          wz[sind2, M1*jay] <-
-            EIM.NB.speciald(mu          =   mu[sind2, jay],
-                            size        = kmat[sind2, jay],
-                            y.min = min(Q.mins[sind2]),  # 20160130
-                            y.max = max(Q.maxs[sind2]),
-                            cutoff.prob = .cutoff.prob ,
-                            intercept.only = intercept.only)
-          wz[sind2, M1*jay] <-
-            EIM.NB.specialp(mu          =   mu[sind2, jay],
-                            size        = kmat[sind2, jay],
-                            y.max = max(Q.maxs[sind2]),
-                            cutoff.prob = .cutoff.prob ,
-                            intercept.only = intercept.only)
-
-
-          if (any(eim.kk.TF <- wz[sind2, M1*jay] <= 0)) {
-            ind2[sind2[eim.kk.TF], jay] <- FALSE
-          }
-          
-
-          lwr.ptr <- upr.ptr + 1
-        }  # while
-      }  # if
-    }  # end of for (jay in 1:NOS)
-
-
-
-
-
-
-
-
-
-
-    for (jay in 1:NOS) {
-      run.varcov <- 0
-      ii.TF <- !ind2[, jay]  # Not assigned above
-      if (any(ii.TF)) {
-        kkvec <- kmat[ii.TF, jay]
-        muvec <-   mu[ii.TF, jay]
-        for (ii in 1:( .nsimEIM )) {
-          ysim <- rnbinom(sum(ii.TF), mu = muvec, size = kkvec)
-          dl.dsize <- digamma(ysim + kkvec) - digamma(kkvec) -
-                      (ysim - muvec) / (muvec + kkvec) +
-                      log1p( -muvec / (kkvec + muvec))
-          run.varcov <- run.varcov + dl.dsize^2
-        }  # end of for loop
-
-        run.varcov <- c(run.varcov / .nsimEIM )
-        ned2l.dsize2 <- if (intercept.only) mean(run.varcov) else run.varcov
-
-        wz[ii.TF, M1*jay] <- ned2l.dsize2 
-      }
-    }
-
-
-
-    save.weights <- !all(ind2)
-
-
-    
-    ned2l.dmunb2 <- 1 / mu - 1 / (mu + kmat)
-    ned2l.dsize2 <- wz[, M1*(1:NOS), drop = FALSE]
-
-
-    if ( .lmunb == "nbcanlink") {
-      wz <- cbind(wz, matrix(0, n, M-1))  # Make it tridiagonal
-
-      wz[,     M1*(1:NOS) - 1] <-
-        (ned2l.dmunb2 * (mu/kmat)^2 * odd.iter +
-         ned2l.dsize2 * even.iter * 1) *
-          (mu + kmat)^2
-
-
-
-      wz[, M + M1*(1:NOS) - 1] <-
-        -(mu + kmat) * ned2l.dsize2 * dsize.deta * even.iter
-    } else {
-      wz[, c(TRUE, FALSE)] <- ned2l.dmunb2 * dmunb.deta^2
-    }
-
-
-    wz[, M1*(1:NOS)] <- wz[, M1*(1:NOS)] * dsize.deta^2
-
-
-    w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
-  }), list( .cutoff.prob = cutoff.prob,
-            .max.support = max.support,
-            .max.chunk.MB = max.chunk.MB,
-            .lmunb = lmunb, .lsize = lsize,
-            .eps.trig = eps.trig,
-            .nsimEIM = nsimEIM ))))
-
-  
-
-
-  if (deviance.arg) {
-    ans at deviance <- eval(substitute(
-      function(mu, y, w, residuals = FALSE, eta, extra = NULL,
-               summation = TRUE) {
-
-
-
-
-
-
-    eta.k <- eta[, c(FALSE, TRUE), drop = FALSE]
-    kmat <- eta2theta(eta.k, .lsize , earg = .esize )
+    ned2l.dratee2 <- Shape / (Ratee^2)
+    ned2l.drateeshape <- -1/Ratee
+    ned2l.dshape2 <- trigamma(Shape)
 
-    if (residuals) {
-      stop("this part of the function has not been written yet.")
+    if ( .expected ) {
+     ratee.adjustment <-  0
+     shape.adjustment <-  0
     } else {
-      size <- kmat
-      dev.elts <- 2 * c(w) *
-                  (y * log(pmax(1, y) / mu) -
-                  (y + size) * log((y + size) / (mu + size)))
-      if (summation) {
-        sum(dev.elts)
-      } else {
-        dev.elts
-      }
+      d2ratee.deta2 <- d2theta.deta2(Ratee, .lratee , earg = .eratee )
+      d2shape.deta2 <- d2theta.deta2(Shape, .lshape , earg = .eshape )
+      ratee.adjustment <- dl.dratee * d2ratee.deta2
+      shape.adjustment <- dl.dshape * d2shape.deta2
     }
-  }, list( .lsize = lsize, .esize = esize,
-           .lmunb = lmunb, .emunb = emunb )))
-
-
-
-
-
-  }
-
 
+    wz <- if ( .lss )
+            array(c(c(w) * (ned2l.dratee2 * dratee.deta^2 - ratee.adjustment),
+                    c(w) * (ned2l.dshape2 * dshape.deta^2 - shape.adjustment),
+                    c(w) * (ned2l.drateeshape * dratee.deta * dshape.deta)),
+                  dim = c(n, M / M1, 3)) else
+            array(c(c(w) * (ned2l.dshape2 * dshape.deta^2 - shape.adjustment),
+                    c(w) * (ned2l.dratee2 * dratee.deta^2 - ratee.adjustment),
+                    c(w) * (ned2l.drateeshape * dratee.deta * dshape.deta)),
+                  dim = c(n, M / M1, 3))
+    wz <- arwz2wz(wz, M = M, M1 = M1)
+    wz
+  }), list( .lratee = lratee, .lshape = lshape,
+            .eratee = eratee, .eshape = eshape, .expected = expected,
+            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss  ))))
+}
 
 
 
-  ans
-}  # End of negbinomial()
 
 
 
+ gamma2 <-
+  function(lmu = "loge", lshape = "loge",
+           imethod = 1,  ishape = NULL,
+           parallel = FALSE,
+           deviance.arg = FALSE,
+           zero = "shape") {
 
 
 
+  if (!is.logical( deviance.arg ) || length( deviance.arg ) != 1)
+    stop("argument 'deviance.arg' must be TRUE or FALSE")
 
 
-polya.control <- function(save.weights = TRUE, ...) {
-    list(save.weights = save.weights)
-}
+  apply.parint <- FALSE
 
+  lmu <- as.list(substitute(lmu))
+  emu <- link2list(lmu)
+  lmu <- attr(emu, "function.name")
 
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
 
- polya <-
-  function(
-           zero = "size",
-           type.fitted = c("mean", "prob"),
-           mds.min = 1e-3,
-           nsimEIM = 500,  cutoff.prob = 0.999,  # Maxiter = 5000,
-           eps.trig = 1e-7,
-           max.support = 4000,
-           max.chunk.MB = 30,  # max.memory = Inf is allowed
-           lprob = "logit", lsize = "loge",
-           imethod = 1,
-           iprob = NULL,
-           iprobs.y = NULL,
-           gprobs.y = (0:9)/10,  # 20160709; grid for finding munb.init
-           isize = NULL,
-           gsize.mux = exp(c(-30, -20, -15, -10, -6:3)),
-           imunb = NULL) {
 
 
+  if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
+    stop("bad input for argument 'ishape'")
   if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 2)
-      stop("argument 'imethod' must be 1 or 2")
-
-
-  deviance.arg <- FALSE  # 20131212; for now
-      
-  type.fitted <- match.arg(type.fitted,
-                           c("mean", "prob"))[1]
-
-
-
-  if (length(iprob) && !is.Numeric(iprob, positive = TRUE))
-    stop("bad input for argument 'iprob'")
-  if (length(isize) && !is.Numeric(isize, positive = TRUE))
-    stop("bad input for argument 'isize'")
+    stop("argument 'imethod' must be 1 or 2")
 
-  if (!is.Numeric(eps.trig, length.arg = 1,
-                  positive = TRUE) || eps.trig > 0.001)
-    stop("argument 'eps.trig' must be positive and smaller in value")
 
-  if (!is.Numeric(nsimEIM, length.arg = 1,
-                  integer.valued = TRUE))
-    stop("bad input for argument 'nsimEIM'")
-  if (nsimEIM <= 10)
-    warning("argument 'nsimEIM' should be an integer ",
-            "greater than 10, say")
+  if (!is.logical(apply.parint) ||
+      length(apply.parint) != 1)
+    stop("argument 'apply.parint' must be a single logical")
 
 
-  lprob <- as.list(substitute(lprob))
-  eprob <- link2list(lprob)
-  lprob <- attr(eprob, "function.name")
+  if (is.logical(parallel) && parallel && length(zero))
+    stop("set 'zero = NULL' if 'parallel = TRUE'")
 
-  lsize <- as.list(substitute(lsize))
-  esize <- link2list(lsize)
-  lsize <- attr(esize, "function.name")
 
+    ans <-
+    new("vglmff",
+    blurb = c("2-parameter gamma distribution",
+              " (McCullagh and Nelder 1989 parameterization)\n",
+              "Links:    ",
+              namesof("mu",    lmu,    earg = emu), ", ",
+              namesof("shape", lshape, earg = eshape), "\n",
+              "Mean:     mu\n",
+              "Variance: (mu^2)/shape"),
+    constraints = eval(substitute(expression({
 
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
+                           constraints = constraints,
+                           apply.int = .apply.parint )
 
-  ans <-
-  new("vglmff",
-  blurb = c("Polya (negative-binomial) distribution\n\n",
-            "Links:    ",
-            namesof("prob", lprob, earg = eprob), ", ",
-            namesof("size", lsize, earg = esize), "\n",
-            "Mean:     size * (1 - prob) / prob\n",
-            "Variance: mean / prob"),
-  constraints = eval(substitute(expression({
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
                                 predictors.names = predictors.names,
                                 M1 = 2)
-  }), list( .zero = zero ))),
+  }), list( .zero = zero,
+            .parallel = parallel, .apply.parint = apply.parint ))),
 
   infos = eval(substitute(function(...) {
     list(M1 = 2,
          Q1 = 1,
          expected = TRUE,
          multipleResponses = TRUE,
-         mds.min = .mds.min ,
-         type.fitted  = .type.fitted ,
-         eps.trig = .eps.trig ,
-         parameters.names = c("prob", "size"),
-         zero = .zero)
-  }, list( .zero = zero, .eps.trig = eps.trig,
-           .type.fitted = type.fitted,
-           .mds.min = mds.min))),
+         parameters.names = c("mu", "shape"),
+         zero = .zero )
+  }, list( .zero = zero ))),
+
 
   initialize = eval(substitute(expression({
     M1 <- 2
-    if (any(function.name == c("cqo", "cao")))
-      stop("polya() does not work with cqo() or cao(). ",
-           "Try negbinomial()")
-
 
-    temp12 <- w.y.check(w = w, y = y,
-              Is.integer.y = TRUE,
-              Is.nonnegative = TRUE,
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
               ncol.w.max = Inf,
               ncol.y.max = Inf,
               out.wy = TRUE,
-              colsyperw = 1, maximize = TRUE)
-    w <- temp12$w
-    y <- temp12$y
-
-
-    M <- M1 * ncol(y)
-    NOS <- ncoly <- ncol(y)  # Number of species
-    extra$type.fitted      <- .type.fitted
-    extra$dimnamesy <- dimnames(y)
-
-    predictors.names <-
-      c(namesof(param.names("prob", NOS), .lprob , earg = .eprob , tag = FALSE),
-        namesof(param.names("size", NOS), .lsize , earg = .esize , tag = FALSE))
-    predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-
-    if (is.null( .nsimEIM )) {
-       save.weights <- control$save.weights <- FALSE
-    }
-
-
-    gprobs.y <- .gprobs.y
-    imunb <- .imunb  # Default in NULL
-    if (length(imunb))
-      imunb <- matrix(imunb, n, NOS, byrow = TRUE)
-
-    
-
-    if (!length(etastart)) {
-      munb.init <-
-      size.init <- matrix(NA_real_, n, NOS)
-      gprobs.y  <- .gprobs.y
-      if (length( .iprobs.y ))
-        gprobs.y <-  .iprobs.y
-      gsize.mux <- .gsize.mux  # gsize.mux is on a relative scale
-          
-      for (jay in 1:NOS) {  # For each response 'y_jay'... do:
-        munb.init.jay <- if ( .imethod == 1 ) {
-          quantile(y[, jay], probs = gprobs.y) + 1/16
-        } else {
-          weighted.mean(y[, jay], w = w[, jay])
-        }
-        if (length(imunb))
-          munb.init.jay <- imunb[, jay]
-
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
 
-        gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
-                                    weighted.mean(y[, jay], w = w[, jay]))
-        if (length( .isize ))
-          gsize <- .isize  # isize is on an absolute scale
 
+      assign("CQO.FastAlgorithm", ( .lmu == "loge" && .lshape == "loge"),
+             envir = VGAMenv)
+      if (any(function.name == c("cqo", "cao")) &&
+         is.Numeric( .zero , length.arg = 1) && .zero != -2)
+        stop("argument zero = -2 is required")
 
-        try.this <-
-          grid.search2(munb.init.jay, gsize,
-                       objfun = NBD.Loglikfun2,
-                       y = y[, jay], w = w[, jay],
-                       ret.objfun = TRUE)  # Last value is the loglik
+      M <- M1 * ncol(y)
+      NOS <- ncoly <- ncol(y)  # Number of species
 
-        munb.init[, jay] <- try.this["Value1"]
-        size.init[, jay] <- try.this["Value2"]
-      }  # for (jay ...)
 
+      temp1.names <- param.names("mu",    NOS)
+      temp2.names <- param.names("shape", NOS)
+      predictors.names <-
+          c(namesof(temp1.names, .lmu ,    earg = .emu ,    tag = FALSE),
+            namesof(temp2.names, .lshape , earg = .eshape , tag = FALSE))
+      predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
 
 
 
 
+    if (is.logical( .parallel ) & .parallel & ncoly > 1)
+      warning("the constraint matrices may not be correct with ",
+              "multiple responses")
 
-      prob.init <- if (length( .iprob ))
-                   matrix( .iprob , nrow(y), ncol(y), byrow = TRUE) else
-                   size.init / (size.init + munb.init)
 
 
-      etastart <-
-        cbind(theta2eta(prob.init, .lprob , earg = .eprob),
-              theta2eta(size.init, .lsize , earg = .esize))
-      etastart <-
-        etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
+      if (!length(etastart)) {
+        init.shape <- matrix(1.0, n, NOS)
+        mymu <- y # + 0.167 * (y == 0)  # imethod == 1 (the default)
+        if ( .imethod == 2) {
+            for (ii in 1:ncol(y)) {
+              mymu[, ii] <- weighted.mean(y[, ii], w = w[, ii])
+            }
+        }
+        for (spp in 1:NOS) {
+          junk <- lsfit(x, y[, spp], wt = w[, spp], intercept = FALSE)
+          var.y.est <- sum(w[, spp] * junk$resid^2) / (n - length(junk$coef))
+          init.shape[, spp] <- if (length( .ishape )) .ishape else
+              mymu[, spp]^2 / var.y.est
+          if ( .lshape == "loglog")
+              init.shape[init.shape[, spp] <= 1,spp] <- 3.1
+        }
+        etastart <-
+              cbind(theta2eta(mymu, .lmu , earg = .emu ),
+                    theta2eta(init.shape, .lshape , earg = .eshape ))
+        etastart <-
+            etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
       }
-  }), list( .lprob = lprob, .lsize = lsize,
-            .eprob = eprob, .esize = esize,
-            .iprob = iprob, .isize = isize,
-            .pinit = iprob, 
-            .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
-            .iprobs.y = iprobs.y,
-            .nsimEIM = nsimEIM, .zero = zero,
-            .imethod = imethod , .imunb = imunb,
-            .type.fitted = type.fitted ))),
+  }), list( .lmu = lmu, .lshape = lshape, .ishape = ishape,
+            .emu = emu, .eshape = eshape,
+            .parallel = parallel, .apply.parint = apply.parint,
+            .zero = zero, .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    pmat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
-                     .lprob , earg = .eprob )
-    kmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
-                     .lsize , earg = .esize )
-
-   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
-                     warning("cannot find 'type.fitted'. ",
-                             "Returning the 'mean'.")
-                     "mean"
-                   }
-
-    type.fitted <- match.arg(type.fitted,
-                     c("mean", "prob"))[1]
-
-    ans <- switch(type.fitted,
-                  "mean"      = kmat * (1 - pmat) / pmat,
-                  "prob"      = pmat)
-     if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans))       
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-   ans
-  }, list( .lprob = lprob, .eprob = eprob,
-           .lsize = lsize, .esize = esize))),
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+    eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
+              .lmu , earg = .emu )
+  }, list( .lmu = lmu, .emu = emu ))),
   last = eval(substitute(expression({
-    temp0303 <- c(rep_len( .lprob , NOS),
-                  rep_len( .lsize , NOS))
-    names(temp0303) <- c(param.names("prob", NOS),
-                         param.names("size", NOS))
-    temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
-    misc$link <- temp0303  # Already named
+    if (exists("CQO.FastAlgorithm", envir = VGAMenv))
+        rm("CQO.FastAlgorithm", envir = VGAMenv)
+
+    tmp34 <- c(rep_len( .lmu ,    NOS),
+               rep_len( .lshape , NOS))
+    names(tmp34) <- c(param.names("mu",    NOS),
+                      param.names("shape", NOS))
+    tmp34 <- tmp34[interleave.VGAM(M, M1 = M1)]
+    misc$link <- tmp34 # Already named
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
     for (ii in 1:NOS) {
-      misc$earg[[M1*ii-1]] <- .eprob
-      misc$earg[[M1*ii  ]] <- .esize
+      misc$earg[[M1*ii-1]] <- .emu
+      misc$earg[[M1*ii  ]] <- .eshape
     }
 
-    misc$isize <- .isize  
-    misc$imethod <- .imethod 
-    misc$nsimEIM <- .nsimEIM
-  }), list( .lprob = lprob, .lsize = lsize,
-            .eprob = eprob, .esize = esize,
-            .isize = isize,
-            .nsimEIM = nsimEIM,
-            .imethod = imethod ))),
-
-
+    misc$M1 <- M1
+    misc$expected <- TRUE
+    misc$multipleResponses <- TRUE
+    misc$parallel <- .parallel
+    misc$apply.parint <- .apply.parint
+  }), list( .lmu = lmu, .lshape = lshape,
+            .emu = emu, .eshape = eshape,
+            .parallel = parallel, .apply.parint = apply.parint ))),
+  linkfun = eval(substitute(function(mu, extra = NULL) {
+    temp <- theta2eta(mu, .lmu , earg = .emu )
+    temp <- cbind(temp, NA * temp)
+    temp[, interleave.VGAM(ncol(temp), M1 = M1), drop = FALSE]
+  }, list( .lmu = lmu, .emu = emu ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    pmat  <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
-                       .lprob , earg = .eprob)
-    temp300 <-         eta[, c(FALSE, TRUE), drop = FALSE]
-    if ( .lsize == "loge") {
-      bigval <- 68
-      temp300[temp300 >  bigval] <-  bigval
-      temp300[temp300 < -bigval] <- -bigval
-    }
-    kmat <- eta2theta(temp300, .lsize , earg = .esize )
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+    mymu <- mu  # eta2theta(eta[, 2*(1:NOS)-1], .lmu , earg = .emu )
+    shapemat <- eta2theta(eta[, M1 * (1:NOS), drop = FALSE],
+                         .lshape , earg = .eshape )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dnbinom(y, prob = pmat, size = kmat, log = TRUE)
+      ll.elts <-
+        c(w) * dgamma(x = y,
+                      shape = c(shapemat),
+                      scale = c(mymu / shapemat),
+                      log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .lsize = lsize, .lprob = lprob,
-           .esize = esize, .eprob = eprob ))),
-  vfamily = c("polya"),
+  }, list( .lmu = lmu, .lshape = lshape,
+           .emu = emu, .eshape = eshape))),
+  vfamily = c("gamma2"),
+
 
 
 
   simslot = eval(substitute(
   function(object, nsim) {
+
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
-    pmat <- eta2theta(eta[, c(TRUE, FALSE)], .lprob , .eprob )
-    kmat <- eta2theta(eta[, c(FALSE, TRUE)], .lsize , .esize )
-    rnbinom(nsim * length(pmat), prob = pmat, size = kmat)
-  }, list( .lprob = lprob, .lsize = lsize,
-           .eprob = eprob, .esize = esize ))),
+    mymu  <- (eta2theta(eta[, c(TRUE, FALSE)], .lmu    , earg = .emu    ))
+    shape <- (eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape ))
+    rgamma(nsim * length(shape),
+           shape = c(shape),
+           scale = c(mymu/shape))
+  }, list( .lmu = lmu, .lshape = lshape,
+           .emu = emu, .eshape = eshape))),
 
 
 
-  validparams = eval(substitute(function(eta, y, extra = NULL) {
-    pmat <- eta2theta(eta[, c(TRUE, FALSE)], .lprob , .eprob )
-    size <- eta2theta(eta[, c(FALSE, TRUE)], .lsize , .esize )
-    munb <- size * (1 / pmat - 1)
-
-    smallval <- .mds.min  # .munb.div.size
-    okay1 <- all(is.finite(munb)) && all(munb > 0) &&
-             all(is.finite(size)) && all(size > 0) &&
-             all(is.finite(pmat)) && all(pmat > 0 & pmat < 1)
-    overdispersion <- if (okay1) all(munb / size > smallval) else FALSE
-    if (!overdispersion)
-      warning("parameter 'size' has very large values; ",
-              "try fitting a quasi-Poisson ",
-              "model instead.")
-    okay1 && overdispersion
-  }, list( .lprob = lprob, .eprob = eprob,
-           .lsize = lsize, .esize = esize,
-           .mds.min = mds.min))),
 
 
   deriv = eval(substitute(expression({
     M1 <- 2
     NOS <- ncol(eta) / M1
 
-    pmat  <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
-                       .lprob , earg = .eprob )
-    temp3 <-           eta[, c(FALSE, TRUE), drop = FALSE]
-    if ( .lsize == "loge") {
-      bigval <- 68
-      temp3[temp3 >  bigval] <-  bigval  # pmin() collapses matrices
-      temp3[temp3 < -bigval] <- -bigval
-     }
-    kmat <- as.matrix(eta2theta(temp3, .lsize , earg = .esize ))
+    mymu  <- eta2theta(eta[, M1 * (1:NOS) - 1],
+                       .lmu ,    earg = .emu    )
+    shape <- eta2theta(eta[, M1 * (1:NOS)],
+                       .lshape , earg = .eshape )
 
-    dl.dprob <- kmat / pmat - y / (1.0 - pmat)
-    dl.dkayy <- digamma(y + kmat) - digamma(kmat) + log(pmat)
+    dl.dmu <- shape * (y / mymu - 1) / mymu
+    dl.dshape <- log(y) + log(shape) - log(mymu) + 1 - digamma(shape) -
+                y / mymu
 
-    dprob.deta <- dtheta.deta(pmat, .lprob , earg = .eprob )
-    dkayy.deta <- dtheta.deta(kmat, .lsize , earg = .esize )
+    dmu.deta    <- dtheta.deta(mymu,  .lmu ,    earg = .emu )
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
 
-    myderiv <- c(w) * cbind(dl.dprob * dprob.deta,
-                            dl.dkayy * dkayy.deta)
+    myderiv <- c(w) * cbind(dl.dmu    * dmu.deta,
+                            dl.dshape * dshape.deta)
     myderiv[, interleave.VGAM(M, M1 = M1)]
-  }), list( .lprob = lprob, .lsize = lsize,
-            .eprob = eprob, .esize = esize))),
+  }), list( .lmu = lmu, .lshape = lshape,
+            .emu = emu, .eshape = eshape))),
   weight = eval(substitute(expression({
-    wz <- matrix(0, n, M + M - 1)  # wz is 'tridiagonal' 
-
-
-
-
-    max.support <- .max.support
-    max.chunk.MB <- .max.chunk.MB
-
-
-    ind2 <- matrix(FALSE, n, NOS)  # Used for SFS
-    for (jay in 1:NOS) {
-      eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
-      Q.mins <- 0
-      Q.maxs <-      qnbinom(p = eff.p[2],
-                             mu = mu[, jay],
-                             size = kmat[, jay]) + 10
-
-
-
-      eps.trig <- .eps.trig
-      Q.MAXS <-      pmax(10, ceiling(1 / sqrt(eps.trig)))
-      Q.maxs <- pmin(Q.maxs, Q.MAXS)
-
-
-      ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
-      if ((NN <- sum(ind1)) > 0) {
-        Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
-        n.chunks <- if (intercept.only) 1 else
-                    max(1, ceiling( Object.Size / max.chunk.MB))
-        chunk.rows <- ceiling(NN / n.chunks)
-        ind2[, jay] <- ind1  # Save this
-        wind2 <- which(ind1)
-
-
-        upr.ptr <- 0
-        lwr.ptr <- upr.ptr + 1
-        while (lwr.ptr <= NN) {
-          upr.ptr <- min(upr.ptr + chunk.rows, NN)
-          sind2 <- wind2[lwr.ptr:upr.ptr]
-          wz[sind2, M1*jay] <-
-            EIM.NB.specialp(mu          =   mu[sind2, jay],
-                            size        = kmat[sind2, jay],
-                            y.max = max(Q.maxs[sind2]),
-                            cutoff.prob = .cutoff.prob ,
-                            intercept.only = intercept.only,
-                            extra.bit = FALSE)
-          lwr.ptr <- upr.ptr + 1
-        }  # while
-      }  # if
-    }  # end of for (jay in 1:NOS)
-
-
-
-
-
-
-
-
-
-    for (jay in 1:NOS) {
-      run.varcov <- 0
-      ii.TF <- !ind2[, jay]  # Not assigned above
-      if (any(ii.TF)) {
-        ppvec <- pmat[ii.TF, jay]
-        kkvec <- kmat[ii.TF, jay]
-        muvec <-   mu[ii.TF, jay]
-        for (ii in 1:( .nsimEIM )) {
-          ysim <- rnbinom(sum(ii.TF), mu = muvec, size = kkvec)
-          dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) + log(ppvec)
-          run.varcov <- run.varcov + dl.dk^2
-        }  # end of for loop
-
-        run.varcov <- c(run.varcov / .nsimEIM )
-        ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
-
-        wz[ii.TF, M1*jay] <- ned2l.dk2  # * (dk.deta2[ii.TF, jay])^2
-      }
-    }
-
-
-    wz[,     M1*(1:NOS)    ] <- wz[,      M1 * (1:NOS)] * dkayy.deta^2
-
-
-    save.weights <- !all(ind2)
-
-
-    ned2l.dprob2 <- kmat / ((1 - pmat) * pmat^2)
-    wz[,     M1*(1:NOS) - 1] <- ned2l.dprob2 * dprob.deta^2
-
-    ned2l.dkayyprob <- -1 / pmat
-    wz[, M + M1*(1:NOS) - 1] <- ned2l.dkayyprob * dkayy.deta * dprob.deta
-
+    ned2l.dmu2 <- shape / (mymu^2)
+    ned2l.dshape2 <- trigamma(shape) - 1 / shape
+    wz <- matrix(NA_real_, n, M)  # 2 = M1; diagonal!
 
+    wz[, M1*(1:NOS)-1] <- ned2l.dmu2 * dmu.deta^2
+    wz[, M1*(1:NOS)  ] <- ned2l.dshape2 * dshape.deta^2
 
 
     w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
-  }), list( .cutoff.prob = cutoff.prob, .eps.trig = eps.trig,
-            .max.support = max.support,
-            .max.chunk.MB = max.chunk.MB,
-            .nsimEIM = nsimEIM ))))
-
+  }), list( .lmu = lmu ))))
 
 
 
   if (deviance.arg)
-  ans at deviance <- eval(substitute(
+    ans at deviance <- eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL,
              summation = TRUE) {
-    temp300 <-  eta[, c(FALSE, TRUE), drop = FALSE]
 
 
     if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
       stop("cannot handle matrix 'w' yet")
 
 
-
-    if ( .lsize == "loge") {
-      bigval <- 68
-      temp300[temp300 >  bigval] <-  bigval
-      temp300[temp300 < -bigval] <- -bigval
-    } else {
-      stop("can only handle the 'loge' link")
-    }
-    kayy <-  eta2theta(temp300, .lsize , earg = .esize)
-    devi <- 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
-                (y + kayy) * log((mu + kayy) / (kayy + y)))
+    M1 <- 2
+    NOS <- ncol(eta) / 2
+    temp300 <-  eta[, 2*(1:NOS), drop = FALSE]
+    shape <-  eta2theta(temp300, .lshape , earg = .eshape )
+    devi <- -2 * (log(y/mu) - y/mu + 1)
     if (residuals) {
+      warning("not 100% sure about these deviance residuals!")
       sign(y - mu) * sqrt(abs(devi) * w)
     } else {
-      dev.elts <- sum(c(w) * devi)
+      dev.elts <- c(w) * devi
       if (summation) {
         sum(dev.elts)
       } else {
         dev.elts
       }
     }
-  }, list( .lsize = lsize, .eprob = eprob,
-           .esize = esize )))
-
+  }, list( .lshape = lshape )))
   ans
-}  # End of polya()
-
-
-
-
-
-
-
-
+}
 
 
 
-polyaR.control <- function(save.weights = TRUE, ...) {
-    list(save.weights = save.weights)
-}
+ geometric <- function(link = "logit", expected = TRUE,
+                       imethod = 1, iprob = NULL, zero = NULL) {
 
+  if (!is.logical(expected) || length(expected) != 1)
+    stop("bad input for argument 'expected'")
 
 
- polyaR <-
-  function(
-           zero = "size",
-           type.fitted = c("mean", "prob"),
-           mds.min = 1e-3,
-           nsimEIM = 500,  cutoff.prob = 0.999,  # Maxiter = 5000,
-           eps.trig = 1e-7,
-           max.support = 4000,
-           max.chunk.MB = 30,  # max.memory = Inf is allowed
-           lsize = "loge", lprob = "logit", 
-           imethod = 1,
-           iprob = NULL,
-           iprobs.y = NULL,
-           gprobs.y = (0:9)/10,  # 20160709; grid for finding munb.init
-           isize = NULL,
-           gsize.mux = exp(c(-30, -20, -15, -10, -6:3)),
-           imunb = NULL) {
+  link <- as.list(substitute(link))
+  earg <- link2list(link)
+  link <- attr(earg, "function.name")
 
 
   if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
-     imethod > 2)
-      stop("argument 'imethod' must be 1 or 2")
-
-
-  deviance.arg <- FALSE  # 20131212; for now
-      
-     
-  type.fitted <- match.arg(type.fitted,
-                           c("mean", "prob"))[1]
-
-
-  if (!is.Numeric(eps.trig, length.arg = 1,
-                  positive = TRUE) || eps.trig > 0.001)
-    stop("argument 'eps.trig' must be positive and smaller in value")
-
-
-  if (length(iprob) && !is.Numeric(iprob, positive = TRUE))
-    stop("bad input for argument 'iprob'")
-  if (length(isize) && !is.Numeric(isize, positive = TRUE))
-    stop("bad input for argument 'isize'")
-
-  if (!is.Numeric(nsimEIM, length.arg = 1,
-                  integer.valued = TRUE))
-    stop("bad input for argument 'nsimEIM'")
-  if (nsimEIM <= 10)
-    warning("argument 'nsimEIM' should be an integer ",
-            "greater than 10, say")
-
+     imethod > 3)
+    stop("argument 'imethod' must be 1 or 2 or 3")
 
-  lprob <- as.list(substitute(lprob))
-  eprob <- link2list(lprob)
-  lprob <- attr(eprob, "function.name")
 
-  lsize <- as.list(substitute(lsize))
-  esize <- link2list(lsize)
-  lsize <- attr(esize, "function.name")
 
 
 
-  ans <-
   new("vglmff",
-  blurb = c("Polya (negative-binomial) distribution\n\n",
-            "Links:    ",
-            namesof("size", lsize, earg = esize), ", ",
-            namesof("prob", lprob, earg = eprob), "\n",
-            "Mean:     size * (1 - prob) / prob\n",
-            "Variance: mean / prob"),
+  blurb = c("Geometric distribution ",
+            "(P[Y=y] = prob * (1 - prob)^y, y = 0, 1, 2,...)\n",
+            "Link:     ",
+            namesof("prob", link, earg = earg), "\n",
+            "Mean:     mu = (1 - prob) / prob\n",
+            "Variance: mu * (1 + mu) = (1 - prob) / prob^2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
-                                predictors.names = predictors.names,
-                                M1 = 2)
+    dotzero <- .zero
+    M1 <- 1
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
-    list(M1 = 2,
+    list(M1 = 1,
          Q1 = 1,
-         expected = TRUE,
-         mds.min = .mds.min ,
-         multipleResponses = TRUE,
-         type.fitted  = .type.fitted ,
-         parameters.names = c("size", "prob"),
-         eps.trig = .eps.trig ,
          zero = .zero )
-  }, list( .zero = zero, .eps.trig = eps.trig,
-           .type.fitted = type.fitted,
-           .mds.min = mds.min))),
+  }, list( .zero = zero ))),
+
 
   initialize = eval(substitute(expression({
-    M1 <- 2
-    if (any(function.name == c("cqo", "cao")))
-      stop("polyaR() does not work with cqo() or cao(). ",
-           "Try negbinomial()")
 
 
-    temp12 <- w.y.check(w = w, y = y,
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.nonnegative.y = TRUE,
               Is.integer.y = TRUE,
-              Is.nonnegative = TRUE,
               ncol.w.max = Inf,
               ncol.y.max = Inf,
               out.wy = TRUE,
-              colsyperw = 1, maximize = TRUE)
-    w <- temp12$w
-    y <- temp12$y
-
-
-    M <- M1 * ncol(y)
-    NOS <- ncoly <- ncol(y)  # Number of species
-    extra$type.fitted <- .type.fitted
-    extra$dimnamesy   <- dimnames(y)
-
-    predictors.names <-
-      c(namesof(param.names("size", NOS), .lsize , earg = .esize , tag = FALSE),
-        namesof(param.names("prob", NOS), .lprob , earg = .eprob , tag = FALSE))
-    predictors.names <- predictors.names[interleave.VGAM(M, M1 = M1)]
-
-    if (is.null( .nsimEIM )) {
-       save.weights <- control$save.weights <- FALSE
-    }
-
-    
-
-    gprobs.y <- .gprobs.y
-    imunb <- .imunb  # Default in NULL
-    if (length(imunb))
-      imunb <- matrix(imunb, n, NOS, byrow = TRUE)
-
-    
-
-    if (!length(etastart)) {
-      munb.init <-
-      size.init <- matrix(NA_real_, n, NOS)
-      gprobs.y  <- .gprobs.y
-      if (length( .iprobs.y ))
-        gprobs.y <-  .iprobs.y
-      gsize.mux <- .gsize.mux  # gsize.mux is on a relative scale
-          
-      for (jay in 1:NOS) {  # For each response 'y_jay'... do:
-        munb.init.jay <- if ( .imethod == 1 ) {
-          quantile(y[, jay], probs = gprobs.y) + 1/16
-        } else {
-          weighted.mean(y[, jay], w = w[, jay])
-        }
-        if (length(imunb))
-          munb.init.jay <- imunb[, jay]
-
-
-        gsize <- gsize.mux * 0.5 * (mean(munb.init.jay) +
-                                    weighted.mean(y[, jay], w = w[, jay]))
-        if (length( .isize ))
-          gsize <- .isize  # isize is on an absolute scale
-
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
 
-        try.this <-
-          grid.search2(munb.init.jay, gsize,
-                       objfun = NBD.Loglikfun2,
-                       y = y[, jay], w = w[, jay],
-                       ret.objfun = TRUE)  # Last value is the loglik
 
-        munb.init[, jay] <- try.this["Value1"]
-        size.init[, jay] <- try.this["Value2"]
-      }  # for (jay ...)
+    ncoly <- ncol(y)
+    M1 <- 1
+    extra$ncoly <- ncoly
+    extra$M1 <- M1
+    M <- M1 * ncoly
 
 
+    mynames1  <- param.names("prob", ncoly)
+    predictors.names <-
+      namesof(mynames1, .link , earg = .earg , tag = FALSE)
 
 
+    if (!length(etastart)) {
+      prob.init <- if ( .imethod == 2)
+                      1 / (1 + y + 1/16) else
+                  if ( .imethod == 3)
+                      1 / (1 + apply(y, 2, median) + 1/16) else
+                      1 / (1 + colSums(y * w) / colSums(w) + 1/16)
 
+      if (!is.matrix(prob.init))
+        prob.init <- matrix(prob.init, n, M, byrow = TRUE)
 
-      prob.init <- if (length( .iprob ))
-                   matrix( .iprob , nrow(y), ncol(y), byrow = TRUE) else
-                   size.init / (size.init + munb.init)
 
+      if (length( .iprob ))
+        prob.init <- matrix( .iprob , n, M, byrow = TRUE)
 
-      etastart <-
-        cbind(theta2eta(size.init, .lsize , earg = .esize ),
-              theta2eta(prob.init, .lprob , earg = .eprob ))
-      etastart <-
-        etastart[, interleave.VGAM(M, M1 = M1), drop = FALSE]
-      }
-  }), list( .lprob = lprob, .lsize = lsize,
-            .eprob = eprob, .esize = esize,
-            .iprob = iprob, .isize = isize,
-            .pinit = iprob, 
-            .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
-            .iprobs.y = iprobs.y,
-            .nsimEIM = nsimEIM, .zero = zero,
-            .imethod = imethod , .imunb = imunb,
-            .type.fitted = type.fitted ))),
 
+        etastart <- theta2eta(prob.init, .link , earg = .earg )
+    }
+  }), list( .link = link, .earg = earg,
+            .imethod = imethod, .iprob = iprob ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    kmat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
-                     .lsize , earg = .esize )
-    pmat <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
-                     .lprob , earg = .eprob )
-
-   type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
-                     warning("cannot find 'type.fitted'. ",
-                             "Returning the 'mean'.")
-                     "mean"
-                   }
+    prob <- eta2theta(eta, .link , earg = .earg )
+    (1 - prob) / prob
+  }, list( .link = link, .earg = earg ))),
 
-    type.fitted <- match.arg(type.fitted,
-                     c("mean", "prob"))[1]
-
-    ans <- switch(type.fitted,
-                  "mean"      = kmat * (1 - pmat) / pmat,
-                  "prob"      = pmat)
-     if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans))       
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-   ans
-  }, list( .lprob = lprob, .eprob = eprob,
-           .lsize = lsize, .esize = esize))),
   last = eval(substitute(expression({
-    temp0303 <- c(rep_len( .lprob , NOS),
-                  rep_len( .lsize , NOS))
-    names(temp0303) <- c(param.names("size", NOS),
-                         param.names("prob", NOS))
-    temp0303 <- temp0303[interleave.VGAM(M, M1 = M1)]
-    misc$link <- temp0303  # Already named
+    M1 <- extra$M1
+    misc$link <- c(rep_len( .link , ncoly))
+    names(misc$link) <- mynames1
 
     misc$earg <- vector("list", M)
-    names(misc$earg) <- names(misc$link)
-    for (ii in 1:NOS) {
-      misc$earg[[M1*ii-1]] <- .esize
-      misc$earg[[M1*ii  ]] <- .eprob
+    names(misc$earg) <- mynames1
+    for (ii in 1:ncoly) {
+      misc$earg[[ii]] <- .earg
     }
 
-    misc$isize <- .isize  
-    misc$imethod <- .imethod 
-    misc$nsimEIM <- .nsimEIM
-  }), list( .lprob = lprob, .lsize = lsize,
-            .eprob = eprob, .esize = esize,
-            .isize = isize,
-            .nsimEIM = nsimEIM,
-            .imethod = imethod ))),
-
-
+    misc$M1 <- M1
+    misc$expected <- TRUE
+    misc$multipleResponses <- TRUE
+    misc$expected <- .expected
+    misc$imethod <- .imethod
+    misc$iprob <- .iprob
+  }), list( .link = link, .earg = earg,
+            .iprob = iprob,
+            .expected = expected, .imethod = imethod ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    pmat  <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
-                       .lprob , earg = .eprob)
-    temp300 <-         eta[, c(TRUE, FALSE), drop = FALSE]
-    if ( .lsize == "loge") {
-      bigval <- 68
-      temp300[temp300 >  bigval] <-  bigval
-      temp300[temp300 < -bigval] <- -bigval
-    }
-    kmat <- eta2theta(temp300, .lsize , earg = .esize)
+    prob <- eta2theta(eta, .link , earg = .earg )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dnbinom(y, prob = pmat, size = kmat, log = TRUE)
+      ll.elts <- c(w) * dgeom(x = y, prob = prob, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .lsize = lsize, .lprob = lprob,
-           .esize = esize, .eprob = eprob ))),
-  vfamily = c("polyaR"),
-
+  }, list( .link = link, .earg = earg ))),
+  vfamily = c("geometric"),
 
 
   simslot = eval(substitute(
   function(object, nsim) {
+
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
-    kmat <- eta2theta(eta[, c(TRUE, FALSE)], .lsize , .esize )
-    pmat <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , .eprob )
-    rnbinom(nsim * length(pmat), prob = pmat, size = kmat)
-  }, list( .lprob = lprob, .lsize = lsize,
-           .eprob = eprob, .esize = esize ))),
+    prob <- eta2theta(eta, .link , earg = .earg )
+    rgeom(nsim * length(prob), prob = prob)
+  }, list( .link = link, .earg = earg ))),
 
 
-  validparams = eval(substitute(function(eta, y, extra = NULL) {
-    size <- eta2theta(eta[, c(TRUE, FALSE)], .lsize , .esize )
-    pmat <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , .eprob )
-    munb <- size * (1 / pmat - 1)
-
-    smallval <- .mds.min  # .munb.div.size
-    overdispersion <- all(munb / size > smallval)
-    ans <- all(is.finite(munb)) && all(munb > 0) &&
-           all(is.finite(size)) && all(size > 0) &&
-           all(is.finite(pmat)) && all(pmat > 0 & pmat < 1) &&
-           overdispersion
-    if (!overdispersion)
-      warning("parameter 'size' has very large values; ",
-              "try fitting a quasi-Poisson ",
-              "model instead.")
-    ans
-  }, list( .lprob = lprob, .eprob = eprob,
-           .lsize = lsize, .esize = esize,
-           .mds.min = mds.min))),
 
 
   deriv = eval(substitute(expression({
-    M1 <- 2
-    NOS <- ncol(eta) / M1
-
-    pmat  <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
-                       .lprob , earg = .eprob)
-    temp3 <-           eta[, c(TRUE, FALSE), drop = FALSE]
-    if ( .lsize == "loge") {
-      bigval <- 68
-      temp3[temp3 >  bigval] <-  bigval  # pmin() collapses matrices
-      temp3[temp3 < -bigval] <- -bigval
-     }
-    kmat <- as.matrix(eta2theta(temp3, .lsize , earg = .esize ))
-
-    dl.dprob <- kmat / pmat - y / (1.0 - pmat)
-    dl.dkayy <- digamma(y + kmat) - digamma(kmat) + log(pmat)
+    prob <- eta2theta(eta, .link , earg = .earg )
 
-    dprob.deta <- dtheta.deta(pmat, .lprob , earg = .eprob)
-    dkayy.deta <- dtheta.deta(kmat, .lsize , earg = .esize)
+    dl.dprob <- -y / (1 - prob) + 1 / prob
 
-    myderiv <- c(w) * cbind(dl.dkayy * dkayy.deta,
-                            dl.dprob * dprob.deta)
-    myderiv[, interleave.VGAM(M, M1 = M1)]
-  }), list( .lprob = lprob, .lsize = lsize,
-            .eprob = eprob, .esize = esize))),
+    dprobdeta <- dtheta.deta(prob, .link , earg = .earg )
+    c(w) * cbind(dl.dprob * dprobdeta)
+  }), list( .link = link, .earg = earg, .expected = expected ))),
   weight = eval(substitute(expression({
-    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal' 
-
-
-
-
-    max.support <- .max.support
-    max.chunk.MB <- .max.chunk.MB
-
-
-    ind2 <- matrix(FALSE, n, NOS)  # Used for SFS
-    for (jay in 1:NOS) {
-      eff.p <- sort(c( .cutoff.prob , 1 - .cutoff.prob ))
-      Q.mins <- 0
-      Q.maxs <-      qnbinom(p = eff.p[2],
-                             mu = mu[, jay],
-                             size = kmat[, jay]) + 10
-
-
-
-      eps.trig <- .eps.trig
-      Q.MAXS <-      pmax(10, ceiling(1 / sqrt(eps.trig) - kmat[, jay]))
-      Q.maxs <- pmin(Q.maxs, Q.MAXS)
-
-
-
-      ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
-      if ((NN <- sum(ind1)) > 0) {
-        Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
-        n.chunks <- if (intercept.only) 1 else
-                    max(1, ceiling( Object.Size / max.chunk.MB))
-        chunk.rows <- ceiling(NN / n.chunks)
-        ind2[, jay] <- ind1  # Save this
-        wind2 <- which(ind1)
-
-
-        upr.ptr <- 0
-        lwr.ptr <- upr.ptr + 1
-        while (lwr.ptr <= NN) {
-          upr.ptr <- min(upr.ptr + chunk.rows, NN)
-          sind2 <- wind2[lwr.ptr:upr.ptr]
-          wz[sind2, M1*jay - 1] <-
-            EIM.NB.specialp(mu          =   mu[sind2, jay],
-                            size        = kmat[sind2, jay],
-                            y.max = max(Q.maxs[sind2]),
-                            cutoff.prob = .cutoff.prob ,
-                            intercept.only = intercept.only,
-                            extra.bit = FALSE)
-          lwr.ptr <- upr.ptr + 1
-        }  # while
-      }  # if
-    }  # end of for (jay in 1:NOS)
-
-
-
-
-
-
-
-
-
-    for (jay in 1:NOS) {
-      run.varcov <- 0
-      ii.TF <- !ind2[, jay]  # Not assigned above
-      if (any(ii.TF)) {
-        ppvec <- pmat[ii.TF, jay]
-        kkvec <- kmat[ii.TF, jay]
-        muvec <-   mu[ii.TF, jay]
-        for (ii in 1:( .nsimEIM )) {
-          ysim <- rnbinom(sum(ii.TF), mu = muvec, size = kkvec)
-          dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) + log(ppvec)
-          run.varcov <- run.varcov + dl.dk^2
-        }  # end of for loop
-
-        run.varcov <- c(run.varcov / .nsimEIM )
-        ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
-
-        wz[ii.TF, M1*jay - 1] <- ned2l.dk2  # * (dk.deta2[ii.TF, jay])^2
-      }
+    ned2l.dprob2 <- if ( .expected ) {
+      1 / (prob^2 * (1 - prob))
+    } else {
+      y / (1 - prob)^2 + 1 / prob^2
     }
-
-
-    wz[, M1*(1:NOS) - 1] <- wz[, M1*(1:NOS) - 1] * dkayy.deta^2
-
-
-    save.weights <- !all(ind2)
-
-
-    ned2l.dprob2 <- kmat / ((1 - pmat) * pmat^2)
-    wz[,     M1*(1:NOS)    ] <- ned2l.dprob2 * dprob.deta^2
-
-    ned2l.dkayyprob <- -1 / pmat
-    wz[, M + M1*(1:NOS) - 1] <- ned2l.dkayyprob * dkayy.deta * dprob.deta
-
-
-
-
-    w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
-  }), list( .cutoff.prob = cutoff.prob, .eps.trig = eps.trig,
-            .max.support = max.support,
-            .max.chunk.MB = max.chunk.MB,
-            .nsimEIM = nsimEIM ))))
-
+    wz <- ned2l.dprob2 * dprobdeta^2
+    if ( !( .expected ))
+      wz <- wz - dl.dprob * d2theta.deta2(prob, .link , earg = .earg )
+    c(w) * wz
+  }), list( .link = link, .earg = earg,
+            .expected = expected ))))
+}
 
 
 
-  if (deviance.arg)
-  ans at deviance <- eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta, extra = NULL,
-             summation = TRUE) {
-    temp300 <-  eta[, c(FALSE, TRUE), drop = FALSE]
 
+dbetageom <- function(x, shape1, shape2, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
 
-    if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
-      stop("cannot handle matrix 'w' yet")
+  if (!is.Numeric(x))
+    stop("bad input for argument 'x'")
+  if (!is.Numeric(shape1, positive = TRUE))
+    stop("bad input for argument 'shape1'")
+  if (!is.Numeric(shape2, positive = TRUE))
+    stop("bad input for argument 'shape2'")
+  N <- max(length(x), length(shape1), length(shape2))
+  if (length(x)      != N) x      <- rep_len(x,      N)
+  if (length(shape1) != N) shape1 <- rep_len(shape1, N)
+  if (length(shape2) != N) shape2 <- rep_len(shape2, N)
 
+  loglik <- lbeta(1+shape1, shape2 + abs(x)) - lbeta(shape1, shape2)
+  xok <- (x == round(x) & x >= 0)
+  loglik[!xok] <- log(0)
+  if (log.arg) {
+    loglik
+  } else {
+    exp(loglik)
+  }
+}
 
 
-    if ( .lsize == "loge") {
-      bigval <- 68
-      temp300[temp300 >  bigval] <-  bigval
-      temp300[temp300 < -bigval] <- -bigval
-    } else {
-      stop("can only handle the 'loge' link")
-    }
-    kayy <-  eta2theta(temp300, .lsize , earg = .esize)
-    devi <- 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
-                (y + kayy) * log((mu + kayy) / (kayy + y)))
-    if (residuals) {
-      sign(y - mu) * sqrt(abs(devi) * w)
-    } else {
-      dev.elts <- sum(c(w) * devi)
-      if (summation) {
-        sum(dev.elts)
-      } else {
-        dev.elts
+pbetageom <- function(q, shape1, shape2, log.p = FALSE) {
+  if (!is.Numeric(q))
+    stop("bad input for argument 'q'")
+  if (!is.Numeric(shape1, positive = TRUE))
+    stop("bad input for argument 'shape1'")
+  if (!is.Numeric(shape2, positive = TRUE))
+    stop("bad input for argument 'shape2'")
+  N <- max(length(q), length(shape1), length(shape2))
+  if (length(q)      != N) q      <- rep_len(q,      N)
+  if (length(shape1) != N) shape1 <- rep_len(shape1, N)
+  if (length(shape2) != N) shape2 <- rep_len(shape2, N)
+  ans <- q * 0  # Retains names(q)
+  if (max(abs(shape1-shape1[1])) < 1.0e-08 &&
+     max(abs(shape2-shape2[1])) < 1.0e-08) {
+      qstar <- floor(q)
+      temp <- if (max(qstar) >= 0) dbetageom(x = 0:max(qstar),
+             shape1 = shape1[1], shape2 = shape2[1]) else 0*qstar
+      unq <- unique(qstar)
+      for (ii in unq) {
+        index <- (qstar == ii)
+        ans[index] <- if (ii >= 0) sum(temp[1:(1+ii)]) else 0
       }
+  } else {
+    for (ii in 1:N) {
+      qstar <- floor(q[ii])
+      ans[ii] <- if (qstar >= 0) sum(dbetageom(x = 0:qstar,
+                 shape1 = shape1[ii], shape2 = shape2[ii])) else 0
     }
-  }, list( .lsize = lsize, .eprob = eprob,
-           .esize = esize )))
+  }
+  if (log.p) log(ans) else ans
+}
 
-  ans
-}  # End of polyaR()
+
+rbetageom <- function(n, shape1, shape2) {
+  rgeom(n = n, prob = rbeta(n = n, shape1 = shape1, shape2 = shape2))
+}
 
 
 
@@ -5505,7 +3172,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
 
     if (!length(etastart))
       etastart <- log(mu)
-  }), 
+  }),
   linkinv = function(eta, extra = NULL)
     exp(eta),
   last = expression({
@@ -5600,7 +3267,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
                           length(y))
     }
   }), list( .ldof = ldof, .edof = edof, .idof = idof,
-            .tol1 = tol1, .imethod = imethod ))), 
+            .tol1 = tol1, .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     Dof <- eta2theta(eta, .ldof , earg = .edof )
     ans <- 0 * eta
@@ -5629,9 +3296,13 @@ polyaR.control <- function(save.weights = TRUE, ...) {
         ll.elts
       }
     }
-  }, list( .ldof = ldof, .edof = edof ))), 
+  }, list( .ldof = ldof, .edof = edof ))),
   vfamily = c("studentt"),
-
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Dof <- eta2theta(eta, .ldof , earg = .edof )
+    okay1 <- all(is.finite(Dof)) && all(0 < Dof)
+    okay1
+  }, list( .ldof = ldof, .edof = edof ))),
 
 
 
@@ -5641,12 +3312,12 @@ polyaR.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     Dof <-  eta2theta(eta, .ldof , earg = .edof )
     rt(nsim * length(Dof), df = Dof)
-  }, list( .ldof = ldof, .edof = edof ))), 
+  }, list( .ldof = ldof, .edof = edof ))),
 
 
 
@@ -5655,7 +3326,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
 
   deriv = eval(substitute(expression({
     Dof <- eta2theta(eta, .ldof , earg = .edof )
-    ddf.deta <-  dtheta.deta(theta = Dof, .ldof , earg = .edof )
+    ddf.deta <-  dtheta.deta(Dof, .ldof , earg = .edof )
 
     DDS  <- function(df)          digamma((df + 1) / 2) -  digamma(df / 2)
     DDSp <- function(df)  0.5 * (trigamma((df + 1) / 2) - trigamma(df / 2))
@@ -5673,7 +3344,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
 
     tmp6 <- DDS(Dof)
     nedl2.dnu2 <- 0.5 * (tmp6 * (const2 * tmp6 - 2 / (Dof + 1)) - DDSp(Dof))
- 
+
     wz <- c(w) * nedl2.dnu2 * ddf.deta^2
     wz
   }), list( .ldof = ldof, .edof = edof ))))
@@ -5732,7 +3403,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
   iloc <- ilocation
   isca <- iscale
   idof <- idf
- 
+
 
   if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
@@ -5834,7 +3505,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
   }), list( .lloc = lloc, .eloc = eloc, .iloc = iloc,
             .lsca = lsca, .esca = esca, .isca = isca,
             .ldof = ldof, .edof = edof, .idof = idof,
-            .imethod = imethod ))), 
+            .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     NOS    <- extra$NOS
     M1 <- extra$M1
@@ -5862,7 +3533,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
       misc$earg[[M1*ii-1]] <- .esca
       misc$earg[[M1*ii  ]] <- .edof
     }
- 
+
     misc$M1 <- M1
     misc$imethod <- .imethod
     misc$expected <- TRUE
@@ -5893,8 +3564,23 @@ polyaR.control <- function(save.weights = TRUE, ...) {
     }
   }, list(  .lloc = lloc, .eloc = eloc,
             .lsca = lsca, .esca = esca,
-            .ldof = ldof, .edof = edof ))), 
+            .ldof = ldof, .edof = edof ))),
   vfamily = c("studentt3"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    M1 <- extra$M1
+    NOS <- extra$NOS
+    Loc <- eta2theta(eta[, M1*(1:NOS)-2], .lloc , earg = .eloc )
+    Sca <- eta2theta(eta[, M1*(1:NOS)-1], .lsca , earg = .esca )
+    Dof <- eta2theta(eta[, M1*(1:NOS)-0], .ldof , earg = .edof )
+    okay1 <- all(is.finite(Loc)) &&
+             all(is.finite(Sca)) && all(0 < Sca) &&
+             all(is.finite(Dof)) && all(0 < Dof)
+    okay1
+  }, list(  .lloc = lloc, .eloc = eloc,
+            .lsca = lsca, .esca = esca,
+            .ldof = ldof, .edof = edof ))),
+
+
 
 
 
@@ -5904,7 +3590,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     Loc <-  eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lloc , earg = .eloc )
@@ -5914,7 +3600,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
     Loc + Sca * rt(nsim * length(Dof), df = Dof)
   }, list(  .lloc = lloc, .eloc = eloc,
             .lsca = lsca, .esca = esca,
-            .ldof = ldof, .edof = edof ))), 
+            .ldof = ldof, .edof = edof ))),
 
 
 
@@ -5939,7 +3625,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
     dl.ddof <- 0.5 * (-temp0 - log1p(temp1) +
                      (Dof+1) * zedd^2 / (Dof^2 * (1 + temp1)) +
                      digamma((Dof+1)/2) - digamma(Dof/2))
- 
+
     ans <- c(w) * cbind(dl.dloc * dloc.deta,
                         dl.dsca * dsca.deta,
                         dl.ddof * ddof.deta)
@@ -6122,7 +3808,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
   }), list( .lloc = lloc, .eloc = eloc, .iloc = iloc,
             .lsca = lsca, .esca = esca, .isca = isca,
             .doff = doff,
-            .imethod = imethod ))), 
+            .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     NOS <- extra$NOS
     M1 <- extra$M1
@@ -6146,7 +3832,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
       misc$earg[[M1*ii-1]] <- .eloc
       misc$earg[[M1*ii-0]] <- .esca
     }
- 
+
     misc$M1 <- M1
     misc$simEIM <- TRUE
     misc$df <- .doff
@@ -6179,8 +3865,22 @@ polyaR.control <- function(save.weights = TRUE, ...) {
     }
   }, list(  .lloc = lloc, .eloc = eloc,
             .lsca = lsca, .esca = esca,
-            .doff = doff ))), 
+            .doff = doff ))),
   vfamily = c("studentt2"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    M1 <- extra$M1
+    NOS <- extra$NOS
+    Loc <- eta2theta(eta[, M1*(1:NOS)-1], .lloc , earg = .eloc )
+    Sca <- eta2theta(eta[, M1*(1:NOS)-0], .lsca , earg = .esca )
+    Dof <- .doff
+    okay1 <- all(is.finite(Loc)) &&
+             all(is.finite(Sca)) && all(0 < Sca) &&
+             all(is.finite(Dof)) && all(0 < Dof)
+    okay1
+  }, list(  .lloc = lloc, .eloc = eloc,
+            .lsca = lsca, .esca = esca,
+            .doff = doff ))),
+
 
 
 
@@ -6190,7 +3890,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     extra <- object at extra
@@ -6202,7 +3902,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
     Loc + Sca * rt(nsim * length(Sca), df = Dof)
   }, list(  .lloc = lloc, .eloc = eloc,
             .lsca = lsca, .esca = esca,
-            .doff = doff ))), 
+            .doff = doff ))),
 
 
 
@@ -6224,7 +3924,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
     dl.dlocat <- (Dof + 1) * zedd / (Sca * (Dof + zedd^2))
     dl.dlocat[!is.finite(Dof)] <- zedd / Sca  # Adjust for df=Inf
     dl.dscale <- zedd * dl.dlocat - 1 / Sca
- 
+
     ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
                         dl.dscale * dscale.deta)
     ans <- ans[, interleave.VGAM(ncol(ans), M1 = M1)]
@@ -6258,7 +3958,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
 
 
 
- 
+
  chisq <- function(link = "loge", zero = NULL) {
 
   link <- as.list(substitute(link))
@@ -6355,6 +4055,11 @@ polyaR.control <- function(save.weights = TRUE, ...) {
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = "chisq",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mydf <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(mydf)) && all(0 < mydf)
+    okay1
+  }, list(  .link = link, .earg = earg ))),
 
 
 
@@ -6363,7 +4068,7 @@ polyaR.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     Dof <- eta2theta(eta, .link , earg = .earg )
@@ -6396,7 +4101,7 @@ dsimplex <- function(x, mu = 0.5, dispersion = 1, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
-  sigma <- dispersion 
+  sigma <- dispersion
 
   deeFun <- function(y, mu)
       (((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
@@ -6601,6 +4306,14 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
   }, list( .lsigma = lsigma, .emu = emu,
            .esigma = esigma ))),
   vfamily = c("simplex"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu  <- eta2theta(eta[, 1], .lmu    , earg = .emu )
+    sigma <- eta2theta(eta[, 2], .lsigma , earg = .esigma )
+    okay1 <- all(is.finite(mymu )) &&
+             all(is.finite(sigma)) && all(0 < sigma)
+    okay1
+  }, list( .lmu = lmu, .lsigma = lsigma,
+           .emu = emu, .esigma = esigma ))),
 
 
 
@@ -6609,7 +4322,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     mymu  <- eta2theta(eta[, 1], .lmu    , earg = .emu )
@@ -6690,7 +4403,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
 
 
-    predictors.names <- 
+    predictors.names <-
       c(namesof("mu",     .lmu ,    earg = .emu ,     tag = FALSE),
         namesof("lambda", .llambda , earg = .elambda , tag = FALSE))
     if (!length(etastart)) {
@@ -6736,6 +4449,15 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
            .elambda = elambda,
            .emu = emu ))),
   vfamily = c("rigff"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu   <- eta2theta(eta[, 1], .lmu    , earg = .emu )
+    lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
+    okay1 <- all(is.finite(mymu )) &&
+             all(is.finite(lambda)) && all(0 < lambda)
+    okay1
+  }, list( .lmu = lmu, .llambda = llambda,
+           .emu = emu, .elambda = elambda ))),
+
   deriv = eval(substitute(expression({
     if (iter == 1) {
       d3 <- deriv3( ~ w *
@@ -6848,6 +4570,11 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
     }
   }, list( .link.theta = link.theta , .earg = earg ))),
   vfamily = c("hypersecant"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    theta <- eta2theta(eta, .link.theta , earg = .earg )
+    okay1 <- all(is.finite(theta)) && all(abs(theta) < pi/2)
+    okay1
+  }, list( .link.theta = link.theta , .earg = earg ))),
   deriv = eval(substitute(expression({
     theta <- eta2theta(eta, .link.theta , earg = .earg )
     dl.dthetas <-  y - tan(theta)
@@ -6934,9 +4661,14 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
     }
   }, list( .link.theta = link.theta , .earg = earg ))),
   vfamily = c("hypersecant01"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    theta <- eta2theta(eta, .link.theta , earg = .earg )
+    okay1 <- all(is.finite(theta)) && all(abs(theta) < pi/2)
+    okay1
+  }, list( .link.theta = link.theta , .earg = earg ))),
   deriv = eval(substitute(expression({
     theta <- eta2theta(eta, .link.theta , earg = .earg )
-    dl.dthetas <-  -tan(theta) + log(y/(1-y)) / pi 
+    dl.dthetas <-  -tan(theta) + logit(y) / pi
     dparam.deta <- dtheta.deta(theta, .link.theta , earg = .earg )
     c(w) * dl.dthetas * dparam.deta
   }), list( .link.theta = link.theta , .earg = earg ))),
@@ -6949,7 +4681,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
 
 
- leipnik <- function(lmu = "logit", llambda = "loge",
+ leipnik <- function(lmu = "logit", llambda = logoff(offset = 1),
                      imu = NULL,    ilambda = NULL) {
 
 
@@ -7040,6 +4772,14 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
   }, list( .llambda = llambda,
            .emu = emu, .elambda = elambda ))),
   vfamily = c("leipnik"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    mymu   <- eta2theta(eta[, 1], .lmu ,     earg = .emu     )
+    lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
+    okay1 <- all(is.finite(mymu  )) && all( 0 < mymu & mymu < 1) &&
+             all(is.finite(lambda)) && all(-1 < lambda)
+    okay1
+  }, list( .lmu = lmu, .llambda = llambda,
+           .emu = emu, .elambda = elambda ))),
   deriv = eval(substitute(expression({
     lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
     dl.dthetas =
@@ -7059,7 +4799,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
     denominator <- y*(1-y) + (y-mu)^2
     d2l.dthetas2 <-  array(NA_real_, c(n, 2, 2))
     d2l.dthetas2[, 1, 1] <- c(w) * lambda*(-y*(1-y)+(y-mu)^2)/denominator^2
-    d2l.dthetas2[, 1, 2] <- 
+    d2l.dthetas2[, 1, 2] <-
     d2l.dthetas2[, 2, 1] <- c(w) * (y-mu) / denominator
     d2l.dthetas2[, 2, 2] <- c(w) * (-0.25*trigamma((lambda+1)/2) +
                                  0.25*trigamma(1+lambda/2))
@@ -7118,8 +4858,8 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
   new("vglmff",
   blurb = c("Inverse binomial distribution\n\n",
             "Links:    ",
-            namesof("rho", lrho, earg = erho), ", ", 
-            namesof("lambda", llambda, earg = elambda), "\n", 
+            namesof("rho", lrho, earg = erho), ", ",
+            namesof("lambda", llambda, earg = elambda), "\n",
             "Mean:     lambda*(1-rho)/(2*rho-1)\n",
             "Variance: lambda*rho*(1-rho)/(2*rho-1)^3\n"),
   constraints = eval(substitute(expression({
@@ -7187,7 +4927,15 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
     }
   }, list( .llambda = llambda, .lrho = lrho,
            .elambda = elambda, .erho = erho ))),
-  vfamily = c("inv.binomial"),
+  vfamily = c("inv.binomial"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    rho    <- eta2theta(eta[, 1], .lrho    , earg = .erho )
+    lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
+    okay1 <- all(is.finite(rho   )) && all(0.5 < rho & rho < 1) &&
+             all(is.finite(lambda)) && all(0   < lambda)
+    okay1
+  }, list( .llambda = llambda, .lrho = lrho,
+           .elambda = elambda, .erho = erho ))),
   deriv = eval(substitute(expression({
     rho    <- eta2theta(eta[, 1], .lrho    , earg = .erho )
     lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
@@ -7305,8 +5053,8 @@ dgenpois <- function(x, lambda = 0, theta, log = FALSE) {
   new("vglmff",
   blurb = c("Generalized Poisson distribution\n\n",
             "Links:    ",
-            namesof("lambda", llambda, earg = elambda), ", ", 
-            namesof("theta",  ltheta,  earg = etheta ), "\n", 
+            namesof("lambda", llambda, earg = elambda), ", ",
+            namesof("theta",  ltheta,  earg = etheta ), "\n",
             "Mean:     theta / (1-lambda)\n",
             "Variance: theta / (1-lambda)^3"),
  constraints = eval(substitute(expression({
@@ -7441,7 +5189,21 @@ dgenpois <- function(x, lambda = 0, theta, log = FALSE) {
     }
   }, list( .ltheta = ltheta, .llambda = llambda,
            .etheta = etheta, .elambda = elambda ))),
-  vfamily = c("genpoisson"),
+   vfamily = c("genpoisson"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    lambda <- eta2theta(eta[, c(TRUE, FALSE)], .llambda , earg = .elambda )
+    theta  <- eta2theta(eta[, c(FALSE, TRUE)], .ltheta  , earg = .etheta  )
+    mmm <- ifelse(lambda < 0, floor(-theta/lambda), Inf)
+    if (any(mmm < 4)) {
+      warning("the lower bound is less than 4; choosing 4")
+      mmm <- pmax(mmm, 4)
+    }
+    Lbnd <- pmax(-1, -theta / mmm)
+    okay1 <- all(is.finite(lambda)) && all(Lbnd < lambda & lambda < 1) &&
+             all(is.finite(theta )) && all(0 < theta)
+    okay1
+  }, list( .ltheta = ltheta, .llambda = llambda,
+           .etheta = etheta, .elambda = elambda ))),
   deriv = eval(substitute(expression({
     M1  <- 2
     NOS <- ncol(eta)/M1
@@ -7472,7 +5234,7 @@ dgenpois <- function(x, lambda = 0, theta, log = FALSE) {
     } else {
       d2l.dlambda2 <- -y^2 * (y-1) / (theta+y*lambda)^2
       d2l.dtheta2 <- -(y-1)/(theta+y*lambda)^2 - 1 / theta^2
-      d2l.dthetalambda <-  -y * (y-1) / (theta+y*lambda)^2 
+      d2l.dthetalambda <-  -y * (y-1) / (theta+y*lambda)^2
       wz[, M1*(1:NOS) - 1    ] <- -d2l.dlambda2 * dlambda.deta^2
       wz[, M1*(1:NOS)        ] <- -d2l.dtheta2 * dTHETA.deta^2
       wz[, M1*(1:NOS) + M - 1] <- -d2l.dthetalambda * dTHETA.deta * dlambda.deta
@@ -7572,7 +5334,7 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
 
 
     predictors.names <-
-      namesof("shape", .link , earg = .earg , tag = FALSE) 
+      namesof("shape", .link , earg = .earg , tag = FALSE)
 
     if (!length(etastart)) {
       k.init <- if (length( .init.k))
@@ -7611,7 +5373,11 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
     }
   }, list( .link = link, .earg = earg ))),
   vfamily = c("lgamma1"),
-
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    kk <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(kk)) && all(0 < kk)
+    okay1
+  }, list( .link = link, .earg = earg ))),
 
 
   simslot = eval(substitute(
@@ -7619,7 +5385,7 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     kay <- eta2theta(eta, .link , earg = .earg )
@@ -7629,7 +5395,7 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
 
 
   deriv = eval(substitute(expression({
-    kk <- eta2theta(eta, .link , earg = .earg ) 
+    kk <- eta2theta(eta, .link , earg = .earg )
     dl.dk <- y - digamma(kk)
     dk.deta <- dtheta.deta(kk, .link , earg = .earg )
     c(w) * dl.dk * dk.deta
@@ -7779,6 +5545,17 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
   }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
            .elocat = elocat, .escale = escale, .eshape = eshape))),
   vfamily = c("lgamma3"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    aa <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+    bb <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    kk <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(kk)) && all(0 < kk) &&
+             all(is.finite(bb)) && all(0 < bb) &&
+             all(is.finite(aa))
+    okay1
+  }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+           .elocat = elocat, .escale = escale, .eshape = eshape))),
+
 
 
 
@@ -7789,7 +5566,7 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     aa <- eta2theta(eta[, 1], .llocat , earg = .elocat )
@@ -7974,6 +5751,16 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
   }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
            .elocat = elocat, .escale = escale, .eshape = eshape))),
   vfamily = c("prentice74"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    aa <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+    bb <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    kk <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(kk)) &&
+             all(is.finite(bb)) && all(0 < bb) &&
+             all(is.finite(aa))
+    okay1
+  }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+           .elocat = elocat, .escale = escale, .eshape = eshape))),
   deriv = eval(substitute(expression({
     a <- eta2theta(eta[, 1], .llocat , earg = .elocat )
     b <- eta2theta(eta[, 2], .lscale , earg = .escale )
@@ -8170,391 +5957,97 @@ rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
       sc.init <-
       dd.init <-
       kk.init <- matrix(NA_real_, n, NOS)
-          
+
       for (spp. in 1:NOS) {  # For each response 'y_spp.'... do:
         yvec <- y[, spp.]
         wvec <- w[, spp.]
 
           gscale     <- .gscale
           gshape1.d  <- .gshape1.d
-          gshape2.k  <- .gshape2.k        
+          gshape2.k  <- .gshape2.k
           if (length( .iscale )) gscale    <- rep_len( .iscale , NOS)
-          if (length( .id     )) gshape1.d <- rep_len( .id     , NOS)
-          if (length( .ik     )) gshape2.p <- rep_len( .ik     , NOS)
-
-
-          ll.gstacy3 <- function(scaleval, shape1.d, shape2.k,
-                                 x = x, y = y, w = w, extraargs) { 
-            ans <- sum(c(w) * dgengamma.stacy(x = y,
-                                              scale    = scaleval,
-                                              d        = shape1.d,
-                                              k        = shape2.k,
-                                              log = TRUE))
-            ans
-          }
-        try.this <-
-          grid.search3(gscale, gshape1.d, gshape2.k,
-                       objfun = ll.gstacy3,
-                       y = yvec, w = wvec,
-                       ret.objfun = TRUE)  # Last value is the loglik
-
-          sc.init[, spp.] <- try.this["Value1" ]
-          dd.init[, spp.] <- try.this["Value2" ]
-          kk.init[, spp.] <- try.this["Value3" ]
-      }  # End of for (spp. ...)
-
-
-      etastart <-
-        cbind(theta2eta(sc.init,  .lscale    , earg = .escale  ),
-              theta2eta(dd.init , .ld        , earg = .ed      ),
-              theta2eta(kk.init , .lk        , earg = .ek      ))
-    }  # End of etastart.
-  }), list( .lscale = lscale, .ld = ld, .lk = lk,
-            .escale = escale, .ed = ed, .ek = ek,
-            .iscale = iscale, .id = id, .ik = ik,
-            .gscale = gscale, .gshape1.d = gshape1.d,           
-                              .gshape2.k = gshape2.k
-           ))),
-  linkinv = eval(substitute(function(eta, extra = NULL) {
-    b <- eta2theta(eta[, 1], .lscale , earg = .escale )
-    d <- eta2theta(eta[, 2], .ld     , earg = .ed )
-    k <- eta2theta(eta[, 3], .lk     , earg = .ek )
-    b * gamma(k + 1 / d) / gamma(k)
-  }, list( .lscale = lscale, .lk = lk, .ld = ld,
-           .escale = escale, .ek = ek, .ed = ed ))),
-  last = eval(substitute(expression({
-    misc$link <-    c(scale = .lscale , d = .ld , k = .lk )
-    misc$earg <- list(scale = .escale , d = .ed , k = .ek )
-    misc$expected <- TRUE
-  }), list( .lscale = lscale, .ld = ld, .lk = lk,
-            .escale = escale, .ed = ed, .ek = ek ))),
-  loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
-             summation = TRUE) {
-    b <- eta2theta(eta[, 1], .lscale , earg = .escale )
-    d <- eta2theta(eta[, 2], .ld     , earg = .ed )
-    k <- eta2theta(eta[, 3], .lk     , earg = .ek )
-
-    if (residuals) {
-      stop("loglikelihood residuals not implemented yet")
-    } else {
-      ll.elts <-
-        c(w) * dgengamma.stacy(x = y, scale = b, d = d, k = k, log = TRUE)
-      if (summation) {
-        sum(ll.elts)
-      } else {
-        ll.elts
-      }
-    }
-  }, list( .lscale = lscale, .ld = ld, .lk = lk,
-           .escale = escale, .ed = ed, .ek = ek ))),
-  vfamily = c("gengamma.stacy"),
-
-
-
-
-
-  simslot = eval(substitute(
-  function(object, nsim) {
-
-    pwts <- if (length(pwts <- object at prior.weights) > 0)
-              pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
-      warning("ignoring prior weights")
-    eta <- predict(object)
-    bbb <- eta2theta(eta[, 1], .lscale , earg = .escale )
-    ddd <- eta2theta(eta[, 2], .ld     , earg = .ed )
-    kkk <- eta2theta(eta[, 3], .lk     , earg = .ek )
-    rgengamma.stacy(nsim * length(kkk), scale = bbb, d = ddd, k = kkk)
-  }, list( .lscale = lscale, .ld = ld, .lk = lk,
-           .escale = escale, .ed = ed, .ek = ek ))),
-
-
-
-
-
-  deriv = eval(substitute(expression({
-    b <- eta2theta(eta[, 1], .lscale , earg = .escale )
-    d <- eta2theta(eta[, 2], .ld     , earg = .ed )
-    k <- eta2theta(eta[, 3], .lk     , earg = .ek )
-
-    tmp22 <- (y/b)^d
-    tmp33 <- log(y/b)
-    dl.db <- d * (tmp22 - k) / b
-    dl.dd <- 1/d + tmp33 * (k - tmp22)
-    dl.dk <- d * tmp33 - digamma(k)
-
-    db.deta <- dtheta.deta(b, .lscale , earg = .escale )
-    dd.deta <- dtheta.deta(d, .ld     , earg = .ed )
-    dk.deta <- dtheta.deta(k, .lk     , earg = .ek )
-
-    c(w) * cbind(dl.db * db.deta,
-                 dl.dd * dd.deta,
-                 dl.dk * dk.deta)
-  }), list( .lscale = lscale, .ld = ld, .lk = lk,
-            .escale = escale, .ed = ed, .ek = ek ))),
-  weight = eval(substitute(expression({
-    ned2l.db2 <- k * (d/b)^2
-    ned2l.dd2 <- (1 + k * (trigamma(k+1) + (digamma(k+1))^2)) / d^2 
-    ned2l.dk2 <- trigamma(k)
-    ned2l.dbdd <- -(1 + k*digamma(k)) / b
-    ned2l.dbdk <- d / b
-    ned2l.dddk <- -digamma(k) / d
-
-    wz <- matrix(NA_real_, n, dimm(M))
-    wz[, iam(1, 1, M)] <- ned2l.db2 * db.deta^2
-    wz[, iam(2, 2, M)] <- ned2l.dd2 * dd.deta^2
-    wz[, iam(3, 3, M)] <- ned2l.dk2 * dk.deta^2
-    wz[, iam(1, 2, M)] <- ned2l.dbdd * db.deta * dd.deta
-    wz[, iam(1, 3, M)] <- ned2l.dbdk * db.deta * dk.deta
-    wz[, iam(2, 3, M)] <- ned2l.dddk * dd.deta * dk.deta
-
-    wz <- c(w) * wz
-    wz
-  }), list( .lscale = lscale, .ld = ld, .lk = lk,
-            .escale = escale, .ed = ed, .ek = ek ))))
-}
-
-
-
-
-dlog <- function(x, prob, log = FALSE) {
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
-
-  if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
-      stop("bad input for argument 'prob'")
-  N <- max(length(x), length(prob))
-  if (length(x)    != N) x    <- rep_len(x,    N)
-  if (length(prob) != N) prob <- rep_len(prob, N)
-  ox <- !is.finite(x)
-  zero <- ox | round(x) != x | x < 1
-  ans <- rep_len(0.0, length(x))
-  if (log.arg) {
-    ans[ zero] <- log(0.0)
-    ans[!zero] <- x[!zero] * log(prob[!zero]) - log(x[!zero]) -
-                  log(-log1p(-prob[!zero]))
-    ans[ox] <- log(0)  # 20141212 KaiH
-  } else {
-    ans[!zero] <- -(prob[!zero]^(x[!zero])) / (x[!zero] *
-                   log1p(-prob[!zero]))
-    ans[ox] <- 0.0  # 20141212 KaiH
-  }
-  ans
-}
-
-
-
-plog  <- function(q, prob, log.p = FALSE) {
-  if (!is.Numeric(q)) stop("bad input for argument 'q'")
-  if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
-    stop("bad input for argument 'prob'")
-  N <- max(length(q), length(prob))
-  if (length(q)    != N) q    <- rep_len(q,    N)
-  if (length(prob) != N) prob <- rep_len(prob, N)
-
-
-    bigno <- 10
-    owen1965 <- (q * (1 - prob) > bigno)
-    if (specialCase <- any(owen1965)) {
-        qqq <- q[owen1965]
-        ppp <- prob[owen1965]
-        pqp <- qqq * (1 - ppp)
-        bigans <- (ppp^(1+qqq) / (1-ppp)) * (1/qqq -
-                 1 / (            pqp * (qqq-1)) +
-                 2 / ((1-ppp)   * pqp * (qqq-1) * (qqq-2)) -
-                 6 / ((1-ppp)^2 * pqp * (qqq-1) * (qqq-2) * (qqq-3)) +
-                24 / ((1-ppp)^3 * pqp * (qqq-1) * (qqq-2) * (qqq-3) * (qqq-4)))
-        bigans <- 1 + bigans / log1p(-ppp)
-    }
-
-    floorq <- pmax(1, floor(q))  # Ensures at least one element per q value
-    floorq[owen1965] <- 1
-    seqq <- sequence(floorq)
-    seqp <- rep(prob, floorq)
-    onevector <- (seqp^seqq / seqq) / (-log1p(-seqp))
-    rlist <-  .C("tyee_C_cum8sum",
-                  as.double(onevector), answer = double(N),
-                  as.integer(N), as.double(seqq),
-                  as.integer(length(onevector)), notok=integer(1))
-    if (rlist$notok != 0) stop("error in 'cum8sum'")
-    ans <- if (log.p) log(rlist$answer) else rlist$answer
-    if (specialCase)
-        ans[owen1965] <- if (log.p) log(bigans) else bigans
-    ans[q < 1] <- if (log.p) log(0.0) else 0.0
-    ans
-}
-
-
-
-
-
-
-
-rlog <- function(n, prob, Smallno = 1.0e-6) {
-
-  use.n <- if ((length.n <- length(n)) > 1) length.n else
-           if (!is.Numeric(n, integer.valued = TRUE,
-                           length.arg = 1, positive = TRUE))
-               stop("bad input for argument 'n'") else n
-
-  if (!is.Numeric(prob, length.arg = 1, positive = TRUE) ||
-      max(prob) >= 1)
-    stop("bad input for argument 'prob'")
-  if (!is.Numeric(Smallno, positive = TRUE, length.arg = 1) ||
-      Smallno > 0.01 ||
-     Smallno < 2 * .Machine$double.eps)
-    stop("bad input for argument 'Smallno'")
-  ans <- rep_len(0.0, use.n)
-
-  ptr1 <- 1; ptr2 <- 0
-  a <- -1 / log1p(-prob)
-  mean <- a*prob/(1-prob)    # E(Y)
-  sigma <- sqrt(a * prob * (1 - a * prob)) / (1 - prob)  # sd(Y)
-  ymax <- dlog(x = 1, prob)
-  while (ptr2 < use.n) {
-    Lower <- 0.5 # A continuity correction is used = 1 - 0.5.
-    Upper <- mean + 5 * sigma
-    while (plog(q = Upper, prob) < 1 - Smallno)
-      Upper <- Upper + sigma
-    Upper <- Upper + 0.5
-    x <- round(runif(2 * use.n, min = Lower, max = Upper))
-    index <- runif(2 * use.n, max = ymax) < dlog(x,prob)
-    sindex <- sum(index)
-    if (sindex) {
-      ptr2 <- min(use.n, ptr1 + sindex - 1)
-      ans[ptr1:ptr2] <- (x[index])[1:(1+ptr2-ptr1)]
-      ptr1 <- ptr2 + 1
-    }
-  }
-  ans
-}
-
-
-
-
-
-
-
-
- logff <- function(link = "logit", init.c = NULL, zero = NULL) {
-  if (length(init.c) &&
-     (!is.Numeric(init.c, positive = TRUE) || max(init.c) >= 1))
-    stop("init.c must be in (0,1)")
-
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
-
-
-
-
-
-  new("vglmff",
-  blurb = c("Logarithmic distribution f(y) = a * c^y / y, ",
-             "y = 1, 2, 3,...,\n",
-             "            0 < c < 1, a = -1 / log(1-c)  \n\n",
-             "Link:    ", namesof("c", link, earg = earg), "\n", "\n",
-             "Mean:    a * c / (1 - c)", "\n"),
-  constraints = eval(substitute(expression({
-    dotzero <- .zero
-    M1 <- 1
-    eval(negzero.expression.VGAM)
-  }), list( .zero = zero ))),
-
-  infos = eval(substitute(function(...) {
-    list(M1 = 1,
-         Q1 = 1,
-         zero = .zero )
-  }, list( .zero = zero ))),
-
-
-  initialize = eval(substitute(expression({
-    temp5 <-
-    w.y.check(w = w, y = y,
-              Is.positive.y = TRUE,
-              Is.integer.y = TRUE,
-              ncol.w.max = Inf,
-              ncol.y.max = Inf,
-              out.wy = TRUE,
-              colsyperw = 1,
-              maximize = TRUE)
-    w <- temp5$w
-    y <- temp5$y
-
-
-    ncoly <- ncol(y)
-    M1 <- 1
-    extra$ncoly <- ncoly
-    extra$M1 <- M1
-    M <- M1 * ncoly
-
+          if (length( .id     )) gshape1.d <- rep_len( .id     , NOS)
+          if (length( .ik     )) gshape2.p <- rep_len( .ik     , NOS)
 
-    mynames1  <- param.names("c", ncoly)
-    predictors.names <- namesof(mynames1, .link , earg = .earg , tag = FALSE)
 
+          ll.gstacy3 <- function(scaleval, shape1.d, shape2.k,
+                                 x = x, y = y, w = w, extraargs) {
+            ans <- sum(c(w) * dgengamma.stacy(x = y,
+                                              scale    = scaleval,
+                                              d        = shape1.d,
+                                              k        = shape2.k,
+                                              log = TRUE))
+            ans
+          }
+        try.this <-
+          grid.search3(gscale, gshape1.d, gshape2.k,
+                       objfun = ll.gstacy3,
+                       y = yvec, w = wvec,
+                       ret.objfun = TRUE)  # Last value is the loglik
 
-    if (!length(etastart)) {
-      logff.Loglikfun <- function(probval, y, x, w, extraargs) {
-        sum(c(w) * dlog(x = y, prob = probval, log = TRUE))
-      }
-      Init.c <- matrix(if (length( .init.c )) .init.c else 0,
-                       n, M, byrow = TRUE)
+          sc.init[, spp.] <- try.this["Value1" ]
+          dd.init[, spp.] <- try.this["Value2" ]
+          kk.init[, spp.] <- try.this["Value3" ]
+      }  # End of for (spp. ...)
 
-      if (!length( .init.c ))
-        for (ilocal in 1:ncoly) {
-          prob.grid <- seq(0.05, 0.95, by = 0.05)
-          Init.c[, ilocal] <- grid.search(prob.grid,
-                                          objfun = logff.Loglikfun,
-                                          y = y[, ilocal], x = x,
-                                          w = w[, ilocal])
 
-        }
-      etastart <- theta2eta(Init.c, .link , earg = .earg )
-    }
-  }), list( .link = link, .earg = earg, .init.c = init.c ))),
+      etastart <-
+        cbind(theta2eta(sc.init,  .lscale    , earg = .escale  ),
+              theta2eta(dd.init , .ld        , earg = .ed      ),
+              theta2eta(kk.init , .lk        , earg = .ek      ))
+    }  # End of etastart.
+  }), list( .lscale = lscale, .ld = ld, .lk = lk,
+            .escale = escale, .ed = ed, .ek = ek,
+            .iscale = iscale, .id = id, .ik = ik,
+            .gscale = gscale, .gshape1.d = gshape1.d,
+                              .gshape2.k = gshape2.k
+           ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    cc <- eta2theta(eta, .link , earg = .earg )
-    aa <- -1 / log1p(-cc)
-    aa * cc / (1 - cc)
-  }, list( .link = link, .earg = earg ))),
-
+    b <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    d <- eta2theta(eta[, 2], .ld     , earg = .ed )
+    k <- eta2theta(eta[, 3], .lk     , earg = .ek )
+    b * gamma(k + 1 / d) / gamma(k)
+  }, list( .lscale = lscale, .lk = lk, .ld = ld,
+           .escale = escale, .ek = ek, .ed = ed ))),
   last = eval(substitute(expression({
-    M1 <- extra$M1
-    misc$link <- c(rep_len( .link , ncoly))
-    names(misc$link) <- mynames1
-
-    misc$earg <- vector("list", M)
-    names(misc$earg) <- mynames1
-    for (ii in 1:ncoly) {
-      misc$earg[[ii]] <- .earg
-    }
-
-    misc$M1 <- M1
+    misc$link <-    c(scale = .lscale , d = .ld , k = .lk )
+    misc$earg <- list(scale = .escale , d = .ed , k = .ek )
     misc$expected <- TRUE
-    misc$multipleResponses <- TRUE
-  }), list( .link = link, .earg = earg ))),
-
+  }), list( .lscale = lscale, .ld = ld, .lk = lk,
+            .escale = escale, .ed = ed, .ek = ek ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    cc <- eta2theta(eta, .link , earg = .earg )
-    aa <- -1 / log1p(-cc)
+    b <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    d <- eta2theta(eta[, 2], .ld     , earg = .ed )
+    k <- eta2theta(eta[, 3], .lk     , earg = .ek )
+
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dlog(x = y, prob = -expm1(-1/aa), log = TRUE)
+      ll.elts <-
+        c(w) * dgengamma.stacy(x = y, scale = b, d = d, k = k, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .link = link, .earg = earg ))),
-  vfamily = c("logff"),
+  }, list( .lscale = lscale, .ld = ld, .lk = lk,
+           .escale = escale, .ed = ed, .ek = ek ))),
+  vfamily = c("gengamma.stacy"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    bb <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    dd <- eta2theta(eta[, 2], .ld     , earg = .ed )
+    kk <- eta2theta(eta[, 3], .lk     , earg = .ek )
+    okay1 <- all(is.finite(kk)) && all(0 < kk) &&
+             all(is.finite(bb)) && all(0 < bb) &&
+             all(is.finite(dd)) && all(0 < dd)
+    okay1
+  }, list( .lscale = lscale, .ld = ld, .lk = lk,
+           .escale = escale, .ed = ed, .ek = ek ))),
+
 
 
 
@@ -8563,34 +6056,67 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
-    cc <- eta2theta(eta, .link , earg = .earg )
-    aa <- -1 / log1p(-cc)
-    rlog(nsim * length(aa), prob = -expm1(-1/aa))
-  }, list( .link = link, .earg = earg ))),
+    bbb <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    ddd <- eta2theta(eta[, 2], .ld     , earg = .ed )
+    kkk <- eta2theta(eta[, 3], .lk     , earg = .ek )
+    rgengamma.stacy(nsim * length(kkk), scale = bbb, d = ddd, k = kkk)
+  }, list( .lscale = lscale, .ld = ld, .lk = lk,
+           .escale = escale, .ed = ed, .ek = ek ))),
+
+
 
 
 
   deriv = eval(substitute(expression({
-    M1 <- 1
-    cc <- eta2theta(eta, .link , earg = .earg )
-    aa <- -1 / log1p(-cc)
-    dl.dc <- 1 / ((1 - cc) * log1p(-cc)) + y / cc
-    dc.deta <- dtheta.deta(cc, .link , earg = .earg )
-    c(w) * dl.dc * dc.deta
-  }), list( .link = link, .earg = earg ))),
+    b <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    d <- eta2theta(eta[, 2], .ld     , earg = .ed )
+    k <- eta2theta(eta[, 3], .lk     , earg = .ek )
+
+    tmp22 <- (y/b)^d
+    tmp33 <- log(y/b)
+    dl.db <- d * (tmp22 - k) / b
+    dl.dd <- 1/d + tmp33 * (k - tmp22)
+    dl.dk <- d * tmp33 - digamma(k)
+
+    db.deta <- dtheta.deta(b, .lscale , earg = .escale )
+    dd.deta <- dtheta.deta(d, .ld     , earg = .ed )
+    dk.deta <- dtheta.deta(k, .lk     , earg = .ek )
+
+    c(w) * cbind(dl.db * db.deta,
+                 dl.dd * dd.deta,
+                 dl.dk * dk.deta)
+  }), list( .lscale = lscale, .ld = ld, .lk = lk,
+            .escale = escale, .ed = ed, .ek = ek ))),
   weight = eval(substitute(expression({
-    ned2l.dc2 <- aa * (1 - aa * cc) / (cc * (1-cc)^2)
-    wz <- c(w) * dc.deta^2 * ned2l.dc2
+    ned2l.db2 <- k * (d/b)^2
+    ned2l.dd2 <- (1 + k * (trigamma(k+1) + (digamma(k+1))^2)) / d^2
+    ned2l.dk2 <- trigamma(k)
+    ned2l.dbdd <- -(1 + k*digamma(k)) / b
+    ned2l.dbdk <- d / b
+    ned2l.dddk <- -digamma(k) / d
+
+    wz <- matrix(NA_real_, n, dimm(M))
+    wz[, iam(1, 1, M)] <- ned2l.db2 * db.deta^2
+    wz[, iam(2, 2, M)] <- ned2l.dd2 * dd.deta^2
+    wz[, iam(3, 3, M)] <- ned2l.dk2 * dk.deta^2
+    wz[, iam(1, 2, M)] <- ned2l.dbdd * db.deta * dd.deta
+    wz[, iam(1, 3, M)] <- ned2l.dbdk * db.deta * dk.deta
+    wz[, iam(2, 3, M)] <- ned2l.dddk * dd.deta * dk.deta
+
+    wz <- c(w) * wz
     wz
-  }), list( .link = link, .earg = earg ))))
+  }), list( .lscale = lscale, .ld = ld, .lk = lk,
+            .escale = escale, .ed = ed, .ek = ek ))))
 }
 
 
 
 
+
+
 dlevy <- function(x, location = 0, scale = 1, log.arg = FALSE) {
   logdensity <- 0.5 * log(scale / (2*pi)) - 1.5 * log(x - location) -
                       0.5 * scale / (x - location)
@@ -8652,10 +6178,10 @@ rlevy <- function(n, location = 0, scale = 1)
             "          location < y < Inf, scale > 0",
             if (delta.known) "Link:    " else "Links:   ",
             namesof("scale", link.gamma, earg = earg),
-            if (! delta.known) 
+            if (! delta.known)
                 c(", ", namesof("delta", "identitylink", earg = list())),
             "\n\n",
-            "Mean:    NA", 
+            "Mean:    NA",
             "\n"),
   initialize = eval(substitute(expression({
 
@@ -8668,7 +6194,7 @@ rlevy <- function(n, location = 0, scale = 1)
 
     predictors.names <-
       c(namesof("scale", .link.gamma , earg = .earg , tag = FALSE),
-        if ( .delta.known) NULL else 
+        if ( .delta.known) NULL else
         namesof("delta", "identitylink", earg = list(), tag = FALSE))
 
 
@@ -8676,7 +6202,7 @@ rlevy <- function(n, location = 0, scale = 1)
       delta.init <- if ( .delta.known) {
                      if (min(y, na.rm = TRUE) <= .delta )
                          stop("'location' must be < min(y)")
-                     .delta 
+                     .delta
                    } else {
                      if (length( .idelta )) .idelta else
                          min(y,na.rm = TRUE) - 1.0e-4 *
@@ -8688,7 +6214,7 @@ rlevy <- function(n, location = 0, scale = 1)
       etastart <-
         cbind(theta2eta(gamma.init, .link.gamma , earg = .earg ),
                         if ( .delta.known ) NULL else delta.init)
-                       
+
     }
   }), list( .link.gamma = link.gamma, .earg = earg,
             .delta.known = delta.known,
@@ -8737,6 +6263,15 @@ rlevy <- function(n, location = 0, scale = 1)
            .delta.known = delta.known,
            .delta = delta ))),
   vfamily = c("levy"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    eta <- as.matrix(eta)
+    mygamma <- eta2theta(eta[, 1], .link.gamma , earg = .earg )
+    okay1 <- all(is.finite(mygamma)) && all(0 < mygamma)
+    okay1
+  }, list( .link.gamma = link.gamma, .earg = earg,
+           .delta.known = delta.known,
+           .delta = delta ))),
+
   deriv = eval(substitute(expression({
     eta <- as.matrix(eta)
     mygamma <- eta2theta(eta[, 1], .link.gamma , earg = .earg )
@@ -8745,7 +6280,7 @@ rlevy <- function(n, location = 0, scale = 1)
       dl.ddelta  <- (3 - mygamma / (y-delta)) / (2 * (y-delta))
     dl.dgamma <- 0.5 * (1 / mygamma - 1 / (y-delta))
     dgamma.deta <- dtheta.deta(mygamma, .link.gamma , earg = .earg )
-    c(w) * cbind(dl.dgamma * dgamma.deta, 
+    c(w) * cbind(dl.dgamma * dgamma.deta,
                  if ( .delta.known ) NULL else dl.ddelta)
   }), list( .link.gamma = link.gamma, .earg = earg,
             .delta.known = delta.known,
@@ -8757,7 +6292,7 @@ rlevy <- function(n, location = 0, scale = 1)
       wz[, iam(1, 2, M)] <-  3 * dgamma.deta
       wz[, iam(2, 2, M)] <-  21
     }
-    wz <- c(w) * wz / (2 * mygamma^2) 
+    wz <- c(w) * wz / (2 * mygamma^2)
     wz
   }), list( .link.gamma = link.gamma, .earg = earg,
            .delta.known = delta.known,
@@ -8765,7 +6300,7 @@ rlevy <- function(n, location = 0, scale = 1)
 }
 
 
-        
+
 
 
 
@@ -8841,9 +6376,9 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
   new("vglmff",
   blurb = c("Generalized Beta distribution (Libby and Novick, 1982)\n\n",
             "Links:    ",
-            namesof("shape1", lshape1, earg = eshape1), ", ", 
-            namesof("shape2", lshape2, earg = eshape2), ", ", 
-            namesof("lambda", llambda, earg = elambda), "\n", 
+            namesof("shape1", lshape1, earg = eshape1), ", ",
+            namesof("shape2", lshape2, earg = eshape2), ", ",
+            namesof("lambda", llambda, earg = elambda), "\n",
             "Mean:     something complicated"),
   constraints = eval(substitute(expression({
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
@@ -8925,7 +6460,16 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
   }, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
            .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
   vfamily = c("lino"),
-
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
+    lambda <- eta2theta(eta[, 3], .llambda , earg = .elambda )
+    okay1 <- all(is.finite(shape1)) && all(0 < shape1) &&
+             all(is.finite(shape2)) && all(0 < shape2) &&
+             all(is.finite(lambda)) && all(0 < lambda)
+    okay1
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+           .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
 
 
   simslot = eval(substitute(
@@ -8933,7 +6477,7 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
@@ -8948,8 +6492,8 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
 
 
   deriv = eval(substitute(expression({
-    sh1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
-    sh2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
+    sh1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1 )
+    sh2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2 )
     lambda <- eta2theta(eta[, 3], .llambda , earg = .elambda )
 
     temp1 <- log1p(-(1-lambda) * y)
@@ -9159,9 +6703,13 @@ rmaxwell <- function(n, rate) {
         ll.elts
       }
     }
-  }, list( .link = link,
-           .earg = earg ))),
+  }, list( .link = link, .earg = earg ))),
   vfamily = c("maxwell"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .link = link, .earg = earg ))),
 
 
 
@@ -9171,7 +6719,7 @@ rmaxwell <- function(n, rate) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     aa <- eta2theta(eta, .link , earg = .earg )
@@ -9398,6 +6946,16 @@ rnaka <- function(n, scale = 1, shape, Smallno = 1.0e-6) {
   }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape))),
   vfamily = c("nakagami"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lscale = lscale, .lshape = lshape,
+           .escale = escale, .eshape = eshape))),
+
+
 
 
 
@@ -9640,6 +7198,11 @@ rrayleigh <- function(n, scale = 1) {
   }, list( .lscale = lscale, .escale = escale))),
 
   vfamily = c("rayleigh"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale <- eta2theta(eta, .lscale , earg = .escale )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale)
+    okay1
+  }, list( .lscale = lscale, .escale = escale))),
 
 
 
@@ -9648,7 +7211,7 @@ rrayleigh <- function(n, scale = 1) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
 
     Scale <- fitted(object) / sqrt(pi / 2)
@@ -9714,7 +7277,7 @@ dparetoIV <- function(x, location = 0, scale = 1, inequality = 1,
   zedd <- (x - location) / scale
   logdensity[xok] <- log(shape[xok]) -
                     log(scale[xok]) -  log(inequality[xok]) +
-                    (1/inequality[xok]-1) * log(zedd[xok]) - 
+                    (1/inequality[xok]-1) * log(zedd[xok]) -
                     (shape[xok]+1) *
                       log1p(zedd[xok]^(1/inequality[xok]))
   logdensity[is.infinite(x)] <- log(0)  # 20141208 KaiH
@@ -9805,7 +7368,7 @@ qparetoIV <-
 
 rparetoIV <-
   function(n, location = 0, scale = 1, inequality = 1, shape = 1) {
-  if (!is.Numeric(inequality, positive = TRUE)) 
+  if (!is.Numeric(inequality, positive = TRUE))
     stop("bad input for argument 'inequality'")
   ans <- location + scale * (-1 + runif(n)^(-1/shape))^inequality
   ans[scale <= 0] <- NaN
@@ -10027,10 +7590,24 @@ rparetoI <- function(n, scale = 1, shape = 1)
            .lshape = lshape,
            .eshape = eshape))),
   vfamily = c("paretoIV"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    location <- extra$location
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    inequ <- eta2theta(eta[, 2], .linequ , earg = .einequ )
+    shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(inequ)) && all(0 < inequ) &&
+             all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lscale = lscale, .linequ = linequ,
+           .escale = escale, .einequ = einequ,
+           .lshape = lshape,
+           .eshape = eshape))),
+
   deriv = eval(substitute(expression({
-    location = extra$location
+    location <- extra$location
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
-    inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
+    inequ <- eta2theta(eta[, 2], .linequ , earg = .einequ )
     shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
     zedd <- (y - location) / Scale
     temp100 <- 1 + zedd^(1/inequ)
@@ -10042,7 +7619,7 @@ rparetoI <- function(n, scale = 1, shape = 1)
     dinequ.deta <- dtheta.deta(inequ, .linequ, earg = .einequ)
     dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
     c(w) * cbind(dl.dscale * dscale.deta,
-                 dl.dinequ * dinequ.deta, 
+                 dl.dinequ * dinequ.deta,
                  dl.dshape * dshape.deta)
   }), list( .lscale = lscale, .linequ = linequ,
             .lshape = lshape,
@@ -10183,29 +7760,38 @@ rparetoI <- function(n, scale = 1, shape = 1)
     }, list( .lscale = lscale, .linequ = linequ,
              .escale = escale, .einequ = einequ ))),
   vfamily = c("paretoIII"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    location <- extra$location
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(inequ)) && all(0 < inequ)
+    okay1
+  }, list( .lscale = lscale, .linequ = linequ,
+           .escale = escale, .einequ = einequ ))),
   deriv = eval(substitute(expression({
-      location <- extra$location
-      Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
-      inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
-      shape <- 1
-      zedd <- (y - location) / Scale
-      temp100 <- 1 + zedd^(1/inequ)
-      dl.dscale <- (shape  - (1+shape) / temp100) / (inequ * Scale)
-      dl.dinequ <- ((log(zedd) * (shape - (1+shape)/temp100)) /
-                       inequ - 1) / inequ
-      dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
-      dinequ.deta <- dtheta.deta(inequ, .linequ, earg = .einequ)
-      c(w) * cbind(dl.dscale * dscale.deta,
-                   dl.dinequ * dinequ.deta)
+    location <- extra$location
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
+    shape <- 1
+    zedd <- (y - location) / Scale
+    temp100 <- 1 + zedd^(1/inequ)
+    dl.dscale <- (shape  - (1+shape) / temp100) / (inequ * Scale)
+    dl.dinequ <- ((log(zedd) * (shape - (1+shape)/temp100)) /
+                     inequ - 1) / inequ
+    dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+    dinequ.deta <- dtheta.deta(inequ, .linequ, earg = .einequ)
+    c(w) * cbind(dl.dscale * dscale.deta,
+                 dl.dinequ * dinequ.deta)
   }), list( .lscale = lscale, .linequ = linequ,
             .escale = escale, .einequ = einequ ))),
   weight = eval(substitute(expression({
-      d2scale.deta2 <- 1 / ((inequ*Scale)^2 * 3)
-      d2inequ.deta2 <- (1 + 2* trigamma(1)) / (inequ^2 * 3)
-      wz <- matrix(0, n, M)  # It is diagonal
-      wz[, iam(1, 1, M)] <- dscale.deta^2 * d2scale.deta2
-      wz[, iam(2, 2, M)] <- dinequ.deta^2 * d2inequ.deta2
-      c(w) * wz
+    d2scale.deta2 <- 1 / ((inequ*Scale)^2 * 3)
+    d2inequ.deta2 <- (1 + 2* trigamma(1)) / (inequ^2 * 3)
+    wz <- matrix(0, n, M)  # It is diagonal
+    wz[, iam(1, 1, M)] <- dscale.deta^2 * d2scale.deta2
+    wz[, iam(2, 2, M)] <- dinequ.deta^2 * d2inequ.deta2
+    c(w) * wz
   }), list( .lscale = lscale, .linequ = linequ,
             .escale = escale, .einequ = einequ ))))
 }
@@ -10283,7 +7869,7 @@ rparetoI <- function(n, scale = 1, shape = 1)
                 theta2eta(rep_len(shape.init, n), .lshape , earg = .eshape ))
     }
   }), list( .location = location, .lscale = lscale,
-            .escale = escale, .eshape = eshape, 
+            .escale = escale, .eshape = eshape,
             .lshape = lshape, .iscale = iscale, .ishape = ishape ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     location <- extra$location
@@ -10325,6 +7911,15 @@ rparetoI <- function(n, scale = 1, shape = 1)
   }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape))),
   vfamily = c("paretoII"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    location <- extra$location
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale) &&
+             all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lscale = lscale, .lshape = lshape,
+           .escale = escale, .eshape = eshape))),
   deriv = eval(substitute(expression({
     location <- extra$location
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
@@ -10363,7 +7958,7 @@ dpareto <- function(x, scale = 1, shape, log = FALSE) {
     stop("bad input for argument 'log'")
   rm(log)
 
-  L <- max(length(x), length(scale), length(shape)) 
+  L <- max(length(x), length(scale), length(shape))
   if (length(x)     != L) x     <- rep_len(x,     L)
   if (length(scale) != L) scale <- rep_len(scale, L)
   if (length(shape) != L) shape <- rep_len(shape, L)
@@ -10474,14 +8069,11 @@ rpareto <- function(n, scale = 1, shape) {
   lshape <- attr(eshape, "function.name")
 
 
-  earg <- eshape
-
-
   new("vglmff",
   blurb = c("Pareto distribution ",
             "f(y) = shape * scale^shape / y^(shape+1),",
-            " 0<scale<y, shape>0\n",
-            "Link:    ", namesof("shape", lshape, earg = earg),
+            " 0<scale<y, 0<shape\n",
+            "Link:    ", namesof("shape", lshape, earg = eshape),
             "\n", "\n",
             "Mean:    scale*shape/(shape-1) for shape>1"),
   initialize = eval(substitute(expression({
@@ -10492,7 +8084,7 @@ rpareto <- function(n, scale = 1, shape) {
 
 
     predictors.names <-
-      namesof("shape", .lshape , earg = .earg , tag = FALSE)
+      namesof("shape", .lshape , earg = .eshape , tag = FALSE)
 
 
     scalehat <- if (!length( .scale )) {
@@ -10510,54 +8102,61 @@ rpareto <- function(n, scale = 1, shape) {
 
     if (!length(etastart)) {
       k.init <- (y + 1/8) / (y - scalehat + 1/8)
-      etastart <- theta2eta(k.init, .lshape , earg = .earg )
+      etastart <- theta2eta(k.init, .lshape , earg = .eshape )
     }
-  }), list( .lshape = lshape, .earg = earg,
+  }), list( .lshape = lshape, .eshape = eshape,
             .scale = scale ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    k <- eta2theta(eta, .lshape , earg = .earg )
+    k <- eta2theta(eta, .lshape , earg = .eshape )
     scale <- extra$scale
     ifelse(k > 1, k * scale / (k-1), NA)
-  }, list( .lshape = lshape, .earg = earg ))),
+  }, list( .lshape = lshape, .eshape = eshape ))),
   last = eval(substitute(expression({
     misc$link <-    c(k = .lshape)
 
-    misc$earg <- list(k = .earg )
+    misc$earg <- list(k = .eshape )
 
     misc$scale <- extra$scale # Use this for prediction
-  }), list( .lshape = lshape, .earg = earg ))),
+  }), list( .lshape = lshape, .eshape = eshape ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    k <- eta2theta(eta, .lshape , earg = .earg )
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
     scale <- extra$scale
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
 
 
-      ll.elts <- c(w) * (log(k) + k * log(scale) - (k+1) * log(y))
+      ll.elts <- c(w) * dpareto(x = y, scale = scale, shape = shape, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .lshape = lshape, .earg = earg ))),
+  }, list( .lshape = lshape, .eshape = eshape ))),
   vfamily = c("paretoff"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    Scale <- extra$scale
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(Scale)) && all(0 < Scale & Scale <= y) &&
+             all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lshape = lshape, .eshape = eshape ))),
   deriv = eval(substitute(expression({
     scale <- extra$scale
-    k <- eta2theta(eta, .lshape , earg = .earg )
+    k <- eta2theta(eta, .lshape , earg = .eshape )
     dl.dk <- 1/k + log(scale/y)
-    dk.deta <- dtheta.deta(k, .lshape , earg = .earg )
+    dk.deta <- dtheta.deta(k, .lshape , earg = .eshape )
     c(w) * dl.dk * dk.deta
-  }), list( .lshape = lshape, .earg = earg ))),
+  }), list( .lshape = lshape, .eshape = eshape ))),
   weight = eval(substitute(expression({
-    ed2l.dk2 <- 1 / k^2
-    wz <- c(w) * dk.deta^2 * ed2l.dk2
+    ned2l.dk2 <- 1 / k^2
+    wz <- c(w) * dk.deta^2 * ned2l.dk2
     wz
-  }), list( .lshape = lshape, .earg = earg ))))
+  }), list( .lshape = lshape, .eshape = eshape ))))
 }
 
 
@@ -10612,7 +8211,7 @@ ptruncpareto <- function(q, lower, upper, shape,
     stop("bad input for argument 'log.p'")
   rm(log.p)   # 20141231 KaiH
 
-  L <- max(length(q), length(lower), length(upper), length(shape)) 
+  L <- max(length(q), length(lower), length(upper), length(shape))
   if (length(q)     != L) q     <- rep_len(q,     L)
   if (length(shape) != L) shape <- rep_len(shape, L)
   if (length(lower) != L) lower <- rep_len(lower, L)
@@ -10697,7 +8296,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
   blurb = c("Truncated Pareto distribution f(y) = shape * lower^shape /",
             "(y^(shape+1) * (1-(lower/upper)^shape)),",
             " 0 < lower < y < upper < Inf, shape>0\n",
-            "Link:    ", namesof("shape", lshape, earg = earg), "\n", "\n",
+            "Link:    ", namesof("shape", lshape, earg = eshape), "\n", "\n",
             "Mean:    shape*lower^shape*(upper^(1-shape)-lower^(1-shape)) /",
                       " ((1-shape) * (1-(lower/upper)^shape))"),
   initialize = eval(substitute(expression({
@@ -10709,7 +8308,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
 
 
-    predictors.names <- namesof("shape", .lshape , earg = .earg ,
+    predictors.names <- namesof("shape", .lshape , earg = .eshape ,
                                 tag = FALSE)
     if (any(y <= .lower))
       stop("the value of argument 'lower' is too high ",
@@ -10737,34 +8336,34 @@ rtruncpareto <- function(n, lower, upper, shape) {
         try.this <- rep_len(try.this, n)
         try.this
       }
-      etastart <- theta2eta(shape.init, .lshape , earg = .earg )
+      etastart <- theta2eta(shape.init, .lshape , earg = .eshape )
     }
-  }), list( .lshape = lshape, .earg = earg,
+  }), list( .lshape = lshape, .eshape = eshape,
             .ishape = ishape,
             .imethod = imethod,
             .lower = lower, .upper = upper ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    shape <- eta2theta(eta, .lshape , earg = .earg )
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
     myratio <- .lower / .upper
     constprop <- shape * .lower^shape / (1 - myratio^shape)
     constprop * ( .upper^(1-shape) - .lower^(1-shape)) / (1-shape)
-  }, list( .lshape = lshape, .earg = earg,
-             .lower = lower, .upper = upper ))),
+  }, list( .lshape = lshape, .lower = lower,
+           .eshape = eshape, .upper = upper ))),
   last = eval(substitute(expression({
-    misc$link <-    c(shape = .lshape)
+    misc$link <-    c(shape = .lshape )
 
-    misc$earg <- list(shape = .earg )
+    misc$earg <- list(shape = .eshape )
 
     misc$lower <- extra$lower
     misc$upper <- extra$upper
     misc$expected <- TRUE
-  }), list( .lshape = lshape, .earg = earg,
-            .lower = lower, .upper = upper ))),
+  }), list( .lshape = lshape, .lower = lower,
+            .eshape = eshape, .upper = upper ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    shape <- eta2theta(eta, .lshape , earg = .earg )
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
@@ -10777,51 +8376,63 @@ rtruncpareto <- function(n, lower, upper, shape) {
         ll.elts
       }
     }
-  }, list( .lshape = lshape, .earg = earg,
-           .lower = lower, .upper = upper ))),
+  }, list( .lshape = lshape, .lower = lower,
+           .eshape = eshape, .upper = upper ))),
   vfamily = c("truncpareto"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
+    okay1 <- all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lshape = lshape, .lower = lower,
+           .eshape = eshape, .upper = upper ))),
   deriv = eval(substitute(expression({
-    shape <- eta2theta(eta, .lshape , earg = .earg )
+    shape <- eta2theta(eta, .lshape , earg = .eshape )
     myratio <- .lower / .upper
     myratio2 <-  myratio^shape
     tmp330 <- myratio2 * log(myratio) / (1 - myratio2)
 
-    dl.dshape <- 1 / shape + log( .lower) - log(y) + tmp330 
+    dl.dshape <- 1 / shape + log( .lower) - log(y) + tmp330
 
-    dshape.deta <- dtheta.deta(shape, .lshape , earg = .earg )
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
 
     c(w) * dl.dshape * dshape.deta
-  }), list( .lshape = lshape, .earg = earg,
-            .lower = lower, .upper = upper ))),
+  }), list( .lshape = lshape, .lower = lower,
+            .eshape = eshape, .upper = upper ))),
   weight = eval(substitute(expression({
     ned2l.dshape2 <- 1 / shape^2 - tmp330^2 / myratio2
     wz <- c(w) * dshape.deta^2 * ned2l.dshape2
     wz
-  }), list( .lshape = lshape, .earg = earg,
-            .lower = lower, .upper = upper ))))
+  }), list( .lshape = lshape, .lower = lower,
+            .eshape = eshape, .upper = upper ))))
 }
 
 
 
 
 
- waldff <- function(link.lambda = "loge", init.lambda = NULL) {
-
-  link.lambda <- as.list(substitute(link.lambda))
-  earg <- link2list(link.lambda)
-  link.lambda <- attr(earg, "function.name")
+ waldff <- function(llambda = "loge", ilambda = NULL) {
 
+  llambda <- as.list(substitute(llambda))
+  elambda <- link2list(llambda)
+  llambda <- attr(elambda, "function.name")
 
 
   new("vglmff",
   blurb = c("Standard Wald distribution\n\n",
            "f(y) = sqrt(lambda/(2*pi*y^3)) * ",
-           "exp(-lambda*(y-1)^2/(2*y)), y&lambda>0",
-           "\n", 
-           "Link:     ",
-                         namesof("lambda", link.lambda, earg = earg), "\n",
+           "exp(-lambda*(y-1)^2/(2*y)), y&lambda>0", "\n",
+           "Link:     ", namesof("lambda", llambda, earg = elambda), "\n",
            "Mean:     ", "1\n",
            "Variance: 1 / lambda"),
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = FALSE,
+         parameters.names = c("lambda"),
+         llambda = .llambda )
+  }, list( .llambda = llambda ))),
+
   initialize = eval(substitute(expression({
 
     w.y.check(w = w, y = y,
@@ -10831,33 +8442,28 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
 
     predictors.names <-
-      namesof("lambda", .link.lambda , earg = .earg , short = TRUE)
+      namesof("lambda", .llambda , earg = .elambda , short = TRUE)
 
 
     if (!length(etastart)) {
-      initlambda <- if (length( .init.lambda )) .init.lambda else
+      initlambda <- if (length( .ilambda )) .ilambda else
                     1 / (0.01 + (y-1)^2)
       initlambda <- rep_len(initlambda, n)
       etastart <-
-        cbind(theta2eta(initlambda,
-                        link = .link.lambda , earg = .earg ))
+        cbind(theta2eta(initlambda, link = .llambda , earg = .elambda ))
       }
-  }), list( .link.lambda = link.lambda, .earg = earg,
-           .init.lambda = init.lambda ))),
+  }), list( .llambda = llambda, .elambda = elambda, .ilambda = ilambda ))),
   linkinv = function(eta, extra = NULL) {
       0 * eta + 1
   },
   last = eval(substitute(expression({
-    misc$link <-    c(lambda = .link.lambda )
-
-    misc$earg <- list(lambda = .earg )
-
-  }), list( .link.lambda = link.lambda, .earg = earg ))),
+    misc$link <-    c(lambda = .llambda )
+    misc$earg <- list(lambda = .elambda )
+  }), list( .llambda = llambda, .elambda = elambda ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL,
              summation = TRUE) {
-    lambda <- eta2theta(eta, link=.link.lambda, earg = .earg )
+    lambda <- eta2theta(eta, link = .llambda , earg = .elambda )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
@@ -10869,19 +8475,24 @@ rtruncpareto <- function(n, lower, upper, shape) {
         ll.elts
       }
     }
-  }, list( .link.lambda = link.lambda, .earg = earg ))),
+  }, list( .llambda = llambda, .elambda = elambda ))),
   vfamily = "waldff",
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    lambda <- eta2theta(eta, .llambda , earg = .elambda )
+    okay1 <- all(is.finite(lambda)) && all(0 < lambda)
+    okay1
+  }, list( .llambda = llambda, .elambda = elambda ))),
   deriv = eval(substitute(expression({
-    lambda <- eta2theta(eta, link=.link.lambda, earg = .earg )
+    lambda <- eta2theta(eta, .llambda , earg = .elambda )
     dl.dlambda <- 0.5 / lambda + 1 - 0.5 * (y + 1/y)
-    dlambda.deta <- dtheta.deta(lambda, .link.lambda , earg = .earg )
+    dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
     c(w) * cbind(dl.dlambda * dlambda.deta)
-  }), list( .link.lambda = link.lambda, .earg = earg ))),
+  }), list( .llambda = llambda, .elambda = elambda ))),
   weight = eval(substitute(expression({
-    d2l.dlambda2 <- 0.5 / (lambda^2)
+    d2l.dlambda2 <- 0.5 / lambda^2
     c(w) * cbind(dlambda.deta^2 * d2l.dlambda2)
-  }), list( .link.lambda = link.lambda, .earg = earg ))))
-}
+  }), list( .llambda = llambda, .elambda = elambda ))))
+}  # waldff
 
 
 
@@ -10938,7 +8549,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
       predictors.names <-
         c(namesof("rate",  .lratee , earg = .eratee , short = TRUE),
-          namesof("shape", .lshape , earg = .eshape , short = TRUE)) 
+          namesof("shape", .lshape , earg = .eshape , short = TRUE))
 
 
       if (!length(etastart)) {
@@ -10951,7 +8562,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
         etastart <-
           cbind(theta2eta(ratee.init, .lratee , earg = .eratee ),
                 theta2eta(shape.init, .lshape , earg = .eshape ))
-                
+
     }
   }), list( .lshape = lshape, .lratee = lratee,
             .iratee = iratee, .ishape = ishape,
@@ -10979,7 +8590,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <-
-        c(w) * (log(shape) + log(ratee) + 
+        c(w) * (log(shape) + log(ratee) +
                (shape-1)*log1p(-exp(-ratee*y)) - ratee*y)
       if (summation) {
         sum(ll.elts)
@@ -10990,6 +8601,14 @@ rtruncpareto <- function(n, lower, upper, shape) {
   }, list( .lratee = lratee, .lshape = lshape,
            .eshape = eshape, .eratee = eratee))),
   vfamily = c("expexpff"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    ratee <- eta2theta(eta[, 1], .lratee , earg = .eratee )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+    okay1 <- all(is.finite(ratee)) && all(0 < ratee) &&
+             all(is.finite(shape)) && all(0 < shape)
+    okay1
+  }, list( .lratee = lratee, .lshape = lshape,
+           .eshape = eshape, .eratee = eratee))),
   deriv = eval(substitute(expression({
     ratee <- eta2theta(eta[, 1], .lratee , earg = .eratee )
     shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
@@ -11096,7 +8715,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
              stop("argument 'ishape' must be positive") else
              rep_len( .ishape , n)
       rateinit <- if (length( .irate )) rep_len( .irate , n) else
-                  (digamma(shape.init+1) - digamma(1)) / (y+1/8)  
+                  (digamma(shape.init+1) - digamma(1)) / (y+1/8)
       etastart <- cbind(theta2eta(rateinit, .lrate , earg = .erate ))
     }
   }), list( .lrate = lrate, .irate = irate, .ishape = ishape,
@@ -11128,7 +8747,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <-
-        c(w) * (log(shape) + log(rate) + 
+        c(w) * (log(shape) + log(rate) +
                (shape-1)*log1p(-exp(-rate*y)) - rate*y)
       if (summation) {
         sum(ll.elts)
@@ -11138,6 +8757,11 @@ rtruncpareto <- function(n, lower, upper, shape) {
     }
   }, list( .lrate = lrate, .erate = erate))),
   vfamily = c("expexpff1"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    rate <- eta2theta(eta, .lrate , earg = .erate )
+    okay1 <- all(is.finite(rate)) && all(0 < rate)
+    okay1
+  }, list( .lrate = lrate, .erate = erate))),
   deriv = eval(substitute(expression({
     rate <- eta2theta(eta, .lrate , earg = .erate )
 
@@ -11296,7 +8920,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
   linkinv = eval(substitute(function(eta, extra = NULL) {
     M <- ncol(eta)
     M1 <- 2
-    ncoly <- M / M1 
+    ncoly <- M / M1
     eta2theta(eta[, (1:ncoly) * M1 - 1], .llocat , earg = .elocat )
   }, list( .llocat = llocat,
            .elocat = elocat ))),
@@ -11329,7 +8953,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
              summation = TRUE) {
     M <- ncol(eta)
     M1 <- 2
-    ncoly <- M / M1 
+    ncoly <- M / M1
 
     locat <- eta2theta(eta[, (1:ncoly)*M1-1], .llocat , earg = .elocat )
     Scale <- eta2theta(eta[, (1:ncoly)*M1  ], .lscale , earg = .escale )
@@ -11347,6 +8971,16 @@ rtruncpareto <- function(n, lower, upper, shape) {
   }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale))),
   vfamily = c("logistic"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    M1 <- 2; M <- NCOL(eta)
+    ncoly <- M / M1
+    locat <- eta2theta(eta[, (1:ncoly)*M1-1], .llocat , earg = .elocat )
+    Scale <- eta2theta(eta[, (1:ncoly)*M1  ], .lscale , earg = .escale )
+    okay1 <- all(is.finite(locat)) &&
+             all(is.finite(Scale)) && all(0 < Scale)
+    okay1
+  }, list( .llocat = llocat, .lscale = lscale,
+           .elocat = elocat, .escale = escale))),
 
 
 
@@ -11355,7 +8989,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     locat <- eta2theta(eta[, c(TRUE, FALSE)], .llocat , earg = .elocat )
@@ -11369,7 +9003,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
   deriv = eval(substitute(expression({
     M1 <- 2
-    ncoly <- M / M1 
+    ncoly <- M / M1
 
     locat <- eta2theta(eta[, (1:ncoly)*M1-1], .llocat , earg = .elocat )
     Scale <- eta2theta(eta[, (1:ncoly)*M1  ], .lscale , earg = .escale )
@@ -11447,7 +9081,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
 
 
-  ans <- 
+  ans <-
   new("vglmff",
 
   blurb = c("Negative-binomial distribution with size known\n\n",
@@ -11493,7 +9127,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
 
 
-    M <- M1 * ncol(y) 
+    M <- M1 * ncol(y)
     NOS <- ncoly <- ncol(y)  # Number of species
     mynames1 <- param.names("mu", NOS)
     predictors.names <- namesof(mynames1, .lmu , earg = .emu , tag = FALSE)
@@ -11585,7 +9219,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
     }
 
 
-    misc$imethod <- .imethod 
+    misc$imethod <- .imethod
     misc$expected <- TRUE
     misc$ishrinkage <- .ishrinkage
     misc$size <- kmat
@@ -11634,6 +9268,18 @@ rtruncpareto <- function(n, lower, upper, shape) {
   }, list( .size = size ))),
 
   vfamily = c("negbinomial.size"),
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    eta <- as.matrix(eta)
+    newemu <- .emu
+    if ( .lmu == "nbcanlink") {
+      newemu$size <- matrix( .size , nrow(eta), ncol(eta), byrow = TRUE)
+    }
+    munb <- eta2theta(eta, .lmu , earg = newemu )
+    okay1 <- all(is.finite(munb))    && all(0 < munb) &&
+                                        all(0 < .size )
+    okay1
+  }, list( .lmu = lmu, .emu = emu, .size = size ))),
+
 
 
 
@@ -11641,7 +9287,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
   function(object, nsim) {
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     muuu <- fitted(object)
     n   <- nrow(as.matrix(muuu))
@@ -11675,12 +9321,12 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
     myderiv <- c(w) * dl.dmu * dmu.deta
     myderiv
-  }), list( .lmu = lmu, 
+  }), list( .lmu = lmu,
             .emu = emu,
            .size = size ))),
 
   weight = eval(substitute(expression({
-    wz <- matrix(NA_real_, n, M)  # wz is 'diagonal' 
+    wz <- matrix(NA_real_, n, M)  # wz is 'diagonal'
 
     ned2l.dmunb2 <- 1 / mu - 1 / (mu + kmat)
     wz <- dmu.deta^2 * ned2l.dmunb2
diff --git a/R/family.vglm.R b/R/family.vglm.R
index 4fe021c..3a66030 100644
--- a/R/family.vglm.R
+++ b/R/family.vglm.R
@@ -1,12 +1,12 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
 
 
 if (FALSE)
-family.vglm <- function(object, ...) 
+family.vglm <- function(object, ...)
     object$vfamily
 
 
@@ -20,7 +20,7 @@ print.vfamily <- function(x, ...) {
   if (is.null(nn))
     invisible(return(x))
 
-  cat("Family: ", f[1], "\n") 
+  cat("Family: ", f[1], "\n")
   if (length(f)>1)
     cat("Classes:", paste(f, collapse=", "), "\n")
   cat("\n")
diff --git a/R/family.zeroinf.R b/R/family.zeroinf.R
index 6095299..cf1d1c3 100644
--- a/R/family.zeroinf.R
+++ b/R/family.zeroinf.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -299,7 +299,7 @@ qzipois <- function(p, lambda, pstr0 = 0) {
   ans    <- rep_len(NA_real_, LLL)
   deflat.limit <- -1 / expm1(lambda)
 
-  ans[p <= pstr0] <- 0 
+  ans[p <= pstr0] <- 0
   pindex <- (pstr0 < p) & (deflat.limit <= pstr0)
   ans[pindex] <-
     qpois((p[pindex] - pstr0[pindex]) / (1 - pstr0[pindex]),
@@ -309,6 +309,7 @@ qzipois <- function(p, lambda, pstr0 = 0) {
   ans[1 < pstr0] <- NaN
 
 
+  ans[lambda < 0] <- NaN
   ans[p < 0] <- NaN
   ans[1 < p] <- NaN
   ans
@@ -353,7 +354,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
     if (any(zero)) {
       if (length(extra)) extra$sumw <- sum(w) else
         extra <- list(sumw=sum(w))
-      if (is.numeric(.n.arg) && extra$sumw != .n.arg) 
+      if (is.numeric(.n.arg) && extra$sumw != .n.arg)
         stop("value of 'n.arg' conflicts with data ",
              "(it need not be specified anyway)")
       warning("trimming out the zero observations")
@@ -365,10 +366,10 @@ rzipois <- function(n, lambda, pstr0 = 0) {
       w <- w[!zero]
       y <- y[!zero]
     } else {
-      if (!is.numeric(.n.arg)) 
+      if (!is.numeric(.n.arg))
         stop("n.arg must be supplied")
     }
-        
+
   }), list( .n.arg = n.arg ))),
 
   initialize = eval(substitute(expression({
@@ -441,6 +442,13 @@ rzipois <- function(n, lambda, pstr0 = 0) {
   }, list( .link = link, .earg = earg ))),
 
   vfamily = c("yip88"),
+
+  validparams = eval(substitute(function(eta, y, extra = NULL) {
+    lambda <- eta2theta(eta, .link , earg = .earg )
+    okay1 <- all(is.finite(lambda)) && all(0 < lambda)
+    okay1
+  }, list( .link = link, .earg = earg ))),
+
   deriv = eval(substitute(expression({
     lambda <- eta2theta(eta, .link , earg = .earg )
     temp5 <- exp(-lambda)
@@ -528,20 +536,20 @@ rzipois <- function(n, lambda, pstr0 = 0) {
     extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
-    extra$dimnamesy <- dimnames(y)
-    extra$type.fitted      <- .type.fitted
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
     mynames1 <- param.names("pobs0",  ncoly)
     mynames2 <- param.names("lambda", ncoly)
     predictors.names <-
-        c(namesof(mynames1, .lpobs.0, earg = .epobs.0, tag = FALSE),
-          namesof(mynames2, .llambda, earg = .elambda, tag = FALSE))[
+        c(namesof(mynames1, .lpobs.0 , earg = .epobs.0 , tag = FALSE),
+          namesof(mynames2, .llambda , earg = .elambda , tag = FALSE))[
           interleave.VGAM(M1*NOS, M1 = M1)]
 
     if (!length(etastart)) {
       lambda.init <- Init.mu(y = y, w = w, imethod = .imethod ,  # x = x,
-                             imu = .ilambda,
-                             ishrinkage = .ishrinkage,
+                             imu = .ilambda ,
+                             ishrinkage = .ishrinkage ,
                              pos.only = TRUE,
                              probs.y = .probs.y )
 
@@ -557,7 +565,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
             .ipobs0 = ipobs0, .ilambda = ilambda,
             .ishrinkage = ishrinkage, .probs.y = probs.y,
             .imethod = imethod,
-            .type.fitted = type.fitted ))), 
+            .type.fitted = type.fitted ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
@@ -573,9 +581,9 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
 
     pobs.0 <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
-                              .lpobs.0, earg = .epobs.0 ))
+                              .lpobs.0 , earg = .epobs.0 ))
     lambda <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
-                              .llambda, earg = .elambda ))
+                              .llambda , earg = .elambda ))
 
 
     ans <- switch(type.fitted,
@@ -583,24 +591,10 @@ rzipois <- function(n, lambda, pstr0 = 0) {
                   "lambda"    = lambda,
                   "pobs0"     =      pobs.0,  # P(Y=0)
                   "onempobs0" =  1 - pobs.0)  # P(Y>0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans))       
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lpobs.0 = lpobs.0, .llambda = llambda,
            .epobs.0 = epobs.0, .elambda = elambda ))),
   last = eval(substitute(expression({
-    misc$expected <- TRUE
-    misc$multipleResponses <- TRUE
-
     temp.names <- c(rep_len( .lpobs.0 , NOS),
                     rep_len( .llambda , NOS))
     temp.names <- temp.names[interleave.VGAM(M1*NOS, M1 = M1)]
@@ -659,7 +653,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     pobs0  <- eta2theta(eta[, c(TRUE, FALSE)], .lpobs.0 , earg = .epobs.0 )
@@ -676,10 +670,9 @@ rzipois <- function(n, lambda, pstr0 = 0) {
     y0 <- extra$y0
     skip <- extra$skip.these
 
-    phimat <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
-                              .lpobs.0, earg = .epobs.0 ))
-    lambda <- cbind(eta2theta(eta[, M1*(1:NOS)-0, drop = FALSE],
-                              .llambda, earg = .elambda ))
+    TFvec <- c(TRUE, FALSE)
+    phimat <- eta2theta(eta[,  TFvec, drop = FALSE], .lpobs.0 , earg = .epobs.0 )
+    lambda <- eta2theta(eta[, !TFvec, drop = FALSE], .llambda , earg = .elambda )
 
     dl.dlambda <- y / lambda + 1 / expm1(-lambda)
     dl.dphimat <- -1 / (1 - phimat)  # For y > 0 obsns
@@ -691,7 +684,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
     dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
     mu.phi0 <- phimat
 
-    temp3 <- if (.lpobs.0 == "logit") {
+    temp3 <- if ( .lpobs.0 == "logit") {
       c(w) * (y0 - mu.phi0)
     } else {
       c(w) * dtheta.deta(mu.phi0, link = .lpobs.0 , earg = .epobs.0 ) *
@@ -820,8 +813,8 @@ rzipois <- function(n, lambda, pstr0 = 0) {
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
 
-    extra$dimnamesy   <- dimnames(y)
     extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
     mynames1 <- param.names("lambda",    ncoly)
     mynames2 <- param.names("onempobs0", ncoly)
@@ -849,7 +842,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
                                       .ilambda = ilambda,
             .ishrinkage = ishrinkage, .probs.y = probs.y,
             .type.fitted = type.fitted,
-            .imethod = imethod ))), 
+            .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
@@ -874,18 +867,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
                   "lambda"    =    lambda,
                   "pobs0"     = 1 - onempobs0,  # P(Y=0)
                   "onempobs0" =     onempobs0)  # P(Y>0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans))
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lonempobs0 = lonempobs0, .llambda = llambda,
            .eonempobs0 = eonempobs0, .elambda = elambda ))),
   last = eval(substitute(expression({
@@ -954,7 +936,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     lambda    <- eta2theta(eta[, c(TRUE, FALSE)], .llambda    ,
@@ -1167,8 +1149,8 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     M <- M1 * ncoly
 
-    extra$dimnamesy   <- dimnames(y)
     extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
     mynames1 <- param.names("pobs0", NOS)
     mynames2 <- param.names("munb",  NOS)
@@ -1196,7 +1178,7 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
       if (length( .iprobs.y ))
         gprobs.y <-  .iprobs.y
       gsize.mux <- .gsize.mux  # gsize.mux is on a relative scale
-          
+
       for (jay in 1:NOS) {  # For each response 'y_jay'... do:
         TFvec <- y[, jay] > 0  # Important to exclude the 0s
         posyvec <- y[TFvec, jay]
@@ -1291,18 +1273,7 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
                   "mean"      = (1 - phi0) * munb / oneminusf0,
                   "munb"      = munb,
                   "pobs0"     = phi0)  # P(Y=0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans))
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lpobs0 = lpobs0, .lsize = lsize, .lmunb = lmunb,
            .epobs0 = epobs0, .emunb = emunb, .esize = esize,
            .mds.min = mds.min ))),
@@ -1367,7 +1338,7 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     phi0 <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lpobs0 , earg = .epobs0 )
@@ -1389,8 +1360,8 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
     size <- eta2theta(eta[, M1*(1:NOS)  , drop = FALSE],
                       .lsize , earg = .esize )
 
-    okay1 <- all(is.finite(munb))  && all(munb > 0) &&
-             all(is.finite(size))  && all(size > 0) &&
+    okay1 <- all(is.finite(munb))  && all(0 < munb) &&
+             all(is.finite(size))  && all(0 < size) &&
              all(is.finite(phi0))  && all(0 < phi0 & phi0 < 1)
     smallval <- .mds.min  # .munb.div.size
     overdispersion <- if (okay1) all(munb / size > smallval) else FALSE
@@ -1604,14 +1575,14 @@ zanegbinomial.control <- function(save.weights = TRUE, ...) {
                                is.na(wz[sind2, M1*jay]))) {
             ind2[sind2[eim.kk.TF], jay] <- FALSE
           }
-          
-          
+
+
           lwr.ptr <- upr.ptr + 1
         }  # while
       }  # if
     }  # end of for (jay in 1:NOS)
 
-    
+
 
 
 
@@ -1799,8 +1770,8 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     M <- M1 * ncoly
 
-    extra$dimnamesy   <- dimnames(y)
     extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
     mynames1 <- param.names("munb",      NOS)
     mynames2 <- param.names("size",      NOS)
@@ -1831,7 +1802,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
       if (length( .iprobs.y ))
         gprobs.y <-  .iprobs.y
       gsize.mux <- .gsize.mux  # gsize.mux is on a relative scale
-          
+
       for (jay in 1:NOS) {  # For each response 'y_jay'... do:
         TFvec <- y[, jay] > 0  # Important to exclude the 0s
         posyvec <- y[TFvec, jay]
@@ -1891,7 +1862,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
             .gprobs.y = gprobs.y, .gsize.mux = gsize.mux,
             .ipobs0.small = ipobs0.small,
             .imethod = imethod,
-            .iprobs.y = iprobs.y, .type.fitted = type.fitted ))), 
+            .iprobs.y = iprobs.y, .type.fitted = type.fitted ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
@@ -1926,18 +1897,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
                   "munb"      =    munb,
                   "pobs0"     = 1 - onempobs0,  # P(Y=0)
                   "onempobs0" =     onempobs0)  # P(Y>0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans))        
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lonempobs0 = lonempobs0, .lsize = lsize, .lmunb = lmunb,
            .eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize,
            .mds.min = mds.min ))),
@@ -2003,7 +1963,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     munb      <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lmunb , earg = .emunb )
@@ -2027,8 +1987,8 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
     onempobs0 <- eta2theta(eta[, M1*(1:NOS)  , drop = FALSE],
                            .lonempobs0 , earg = .eonempobs0 )
 
-    okay1 <- all(is.finite(munb))       && all(munb  > 0) &&
-             all(is.finite(size))       && all(size  > 0) &&
+    okay1 <- all(is.finite(munb))       && all(0 < munb) &&
+             all(is.finite(size))       && all(0 < size) &&
              all(is.finite(onempobs0))  && all(0 < onempobs0 & onempobs0 < 1)
     smallval <- .mds.min  # .munb.div.size
     overdispersion <- if (okay1) all(munb / size > smallval) else FALSE
@@ -2199,7 +2159,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
 
 
 
-      
+
       ind1 <- if (max.chunk.MB > 0) (Q.maxs - Q.mins < max.support) else FALSE
       if ((NN <- sum(ind1)) > 0) {
         Object.Size <- NN * 8 * max(Q.maxs - Q.mins) / (2^20)
@@ -2243,8 +2203,8 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
                                is.na(wz[sind2, M1*jay - 1]))) {
             ind2[sind2[eim.kk.TF], jay] <- FALSE
           }
-          
-          
+
+
           lwr.ptr <- upr.ptr + 1
         }  # while
       }  # if
@@ -2408,9 +2368,9 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
     ncoly <- ncol(y)
     extra$ncoly <- ncoly
     extra$M1 <- M1
-    extra$dimnamesy <- dimnames(y)
     M <- M1 * ncoly
-    extra$type.fitted      <- .type.fitted
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
     mynames1 <- param.names("pstr0",  ncoly)
     mynames2 <- param.names("lambda", ncoly)
@@ -2452,7 +2412,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
                dpois(0, matL[, jay]))
         }
         if (mean(Phi.init == ipstr0.small) > 0.95 &&
-            .lpstr0 != "identitylink")
+            .lpstr00 != "identitylink")
           warning("from the initial values only, the data appears to ",
                   "have little or no 0-inflation, and possibly ",
                   "0-deflation.")
@@ -2473,6 +2433,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
             .type.fitted = type.fitted,
             .ishrinkage = ishrinkage ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
     type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
                              "Returning the 'mean'.")
@@ -2485,28 +2446,16 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
     phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
     lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
 
-    
+
     ans <- switch(type.fitted,
                   "mean"      = (1 - phimat) * lambda,
                   "lambda"    = lambda,
                   "pobs0"     = phimat + (1-phimat)*exp(-lambda),  # P(Y=0)
                   "pstr0"     =     phimat,
                   "onempstr0" = 1 - phimat)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans)) 
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lpstr00 = lpstr00, .llambda = llambda,
-           .epstr00 = epstr00, .elambda = elambda,
-           .type.fitted = type.fitted
+           .epstr00 = epstr00, .elambda = elambda
          ))),
   last = eval(substitute(expression({
     M1 <- extra$M1
@@ -2541,7 +2490,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
   }), list( .lpstr00 = lpstr00, .llambda = llambda,
             .epstr00 = epstr00, .elambda = elambda,
             .imethod = imethod ))),
-  loglikelihood = eval(substitute( 
+  loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
@@ -2570,7 +2519,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
     phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
     lambda <- eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
 
-    okay1 <- all(is.finite(lambda)) && all(lambda > 0) &&
+    okay1 <- all(is.finite(lambda)) && all(0 < lambda) &&
              all(is.finite(phimat)) && all(phimat < 1)
     deflat.limit <- -1 / expm1(lambda)
     okay2.deflat <- TRUE
@@ -2587,7 +2536,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
@@ -2669,7 +2618,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
   function(llambda = "loge", lonempstr0 = "logit",
            type.fitted = c("mean", "lambda", "pobs0", "pstr0", "onempstr0"),
            ilambda = NULL,   ionempstr0 = NULL,
-           gonempstr0 = NULL,  # (1:9) / 10, 
+           gonempstr0 = NULL,  # (1:9) / 10,
            imethod = 1,
            ishrinkage = 0.95, probs.y = 0.35,
            zero = "onempstr0") {
@@ -2746,8 +2695,8 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
     extra$ncoly <- ncoly
     extra$M1 <- M1
     M <- M1 * ncoly
-    extra$type.fitted      <- .type.fitted
-    extra$dimnamesy <- dimnames(y)
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
     mynames1 <- param.names("lambda",    ncoly)
     mynames2 <- param.names("onempstr0", ncoly)
@@ -2820,7 +2769,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
                      c("mean", "lambda", "pobs0", "pstr0", "onempstr0"))[1]
 
     M1 <- 2
-    ncoly <- ncol(eta) / M1
+    NOS <- ncoly <- ncol(eta) / M1
     lambda    <- eta2theta(eta[, M1*(1:ncoly) - 1], .llambda ,
                            earg = .elambda )
     onempstr0 <- eta2theta(eta[, M1*(1:ncoly)    ], .lonempstr0 ,
@@ -2833,21 +2782,9 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
                   "pobs0"     = 1 + onempstr0 * expm1(-lambda),  # P(Y=0)
                   "pstr0"     = 1 - onempstr0,
                   "onempstr0" =     onempstr0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans))
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lonempstr0 = lonempstr0, .llambda = llambda,
-           .eonempstr0 = eonempstr0, .elambda = elambda,
-           .type.fitted = type.fitted ))),
+           .eonempstr0 = eonempstr0, .elambda = elambda ))),
   last = eval(substitute(expression({
     M1 <- extra$M1
     misc$link <-
@@ -2884,7 +2821,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
   }), list( .lonempstr0 = lonempstr0, .llambda = llambda,
             .eonempstr0 = eonempstr0, .elambda = elambda,
             .imethod = imethod ))),
-  loglikelihood = eval(substitute( 
+  loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
@@ -2917,7 +2854,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     lambda    <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
@@ -3033,7 +2970,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
   lprob <- attr(eprob, "function.name")
 
   type.fitted <- match.arg(type.fitted,
-                           c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
+                   c("mean", "prob", "pobs0", "pstr0", "onempstr0"))[1]
 
 
   if (is.Numeric(ipstr0))
@@ -3116,9 +3053,12 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
     if ( .imethod == 1)
       mustart <- (mustart + y) / 2
 
+    if ( .imethod == 2)
+      mustart <- mean(mustart) + 0 * y
+
 
     extra$type.fitted <- .type.fitted
-    extra$dimnamesy   <- dimnames(y)
+    extra$colnames.y  <- colnames(y)
 
 
 
@@ -3138,9 +3078,8 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
         }
     }
 
-    phi.init[phi.init <= -0.10] <- 0.10  # Lots of sample variation
-    phi.init[phi.init <=  0.05] <- 0.15  # Last resort
-    phi.init[phi.init >=  0.80] <- 0.80  # Last resort
+    phi.init[phi.init <=  0.05] <- 0.05  # Last resort
+    phi.init[phi.init >=  0.95] <- 0.95  # Last resort
 
     if ( length(mustart) && !length(etastart))
       mustart <- cbind(rep_len(phi.init, n),
@@ -3148,9 +3087,10 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
   }), list( .lpstr0 = lpstr0, .lprob = lprob,
             .epstr0 = epstr0, .eprob = eprob,
             .ipstr0 = ipstr0,
-            .type.fitted = type.fitted,          
+            .type.fitted = type.fitted,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
     pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
     mubin <- eta2theta(eta[, 2], .lprob  , earg = .eprob  )
 
@@ -3176,20 +3116,9 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
                   "pobs0"     = pstr0 + (1-pstr0)*(1-mubin)^nvec,  # P(Y=0)
                   "pstr0"     =     pstr0,
                   "onempstr0" = 1 - pstr0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lpstr0 = lpstr0, .lprob = lprob,
-           .epstr0 = epstr0, .eprob = eprob,
-           .type.fitted = type.fitted ))),
+           .epstr0 = epstr0, .eprob = eprob ))),
   last = eval(substitute(expression({
     misc$link <-    c("pstr0" = .lpstr0 , "prob" = .lprob )
 
@@ -3206,7 +3135,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
           theta2eta(mu[, 2], .lprob  , earg = .eprob  ))
   }, list( .lpstr0 = lpstr0, .lprob = lprob,
            .epstr0 = epstr0, .eprob = eprob ))),
-  loglikelihood = eval(substitute( 
+  loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
@@ -3384,7 +3313,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
   }, list( .zero = zero,
            .type.fitted = type.fitted
          ))),
-      
+
   initialize = eval(substitute(expression({
     if (!all(w == 1))
       extra$orig.w <- w
@@ -3433,7 +3362,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
 
 
     extra$type.fitted <- .type.fitted
-    extra$dimnamesy   <- dimnames(y)
+    extra$colnames.y  <- colnames(y)
 
 
 
@@ -3464,9 +3393,10 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
   }), list( .lonempstr0 = lonempstr0, .lprob = lprob,
             .eonempstr0 = eonempstr0, .eprob = eprob,
             .ionempstr0 = ionempstr0,
-            .type.fitted = type.fitted,          
+            .type.fitted = type.fitted,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
     mubin     <- eta2theta(eta[, 1], .lprob      , earg = .eprob      )
     onempstr0 <- eta2theta(eta[, 2], .lonempstr0 , earg = .eonempstr0 )
 
@@ -3492,20 +3422,9 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
                   "pobs0"     = 1 - onempstr0 + (onempstr0)*(1-mubin)^nvec,  # P(Y=0)
                   "pstr0"     = 1 - onempstr0,
                   "onempstr0" =     onempstr0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lonempstr0 = lonempstr0, .lprob = lprob,
-           .eonempstr0 = eonempstr0, .eprob = eprob,
-           .type.fitted = type.fitted ))),
+           .eonempstr0 = eonempstr0, .eprob = eprob ))),
   last = eval(substitute(expression({
     misc$link <-    c("prob" = .lprob , "onempstr0" = .lonempstr0 )
 
@@ -3524,7 +3443,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
           theta2eta(mu[, 2], .lonempstr0 , earg = .eonempstr0 ))
   }, list( .lonempstr0 = lonempstr0, .lprob = lprob,
            .eonempstr0 = eonempstr0, .eprob = eprob ))),
-  loglikelihood = eval(substitute( 
+  loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
@@ -3709,8 +3628,8 @@ qzibinom <- function(p, size, prob, pstr0 = 0
   pstr0 <- rep_len(pstr0, LLL)
 
 
-  ans <- p 
-  ans[p <= pstr0] <- 0 
+  ans <- p
+  ans[p <= pstr0] <- 0
   ans[p >  pstr0] <-
     qbinom((p[p > pstr0] - pstr0[p > pstr0]) / (1 - pstr0[p > pstr0]),
            size[p > pstr0],
@@ -3723,7 +3642,7 @@ qzibinom <- function(p, size, prob, pstr0 = 0
   ind0 <- (deflat.limit <= pstr0) & (pstr0 <  0)
   if (any(ind0)) {
     pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
-    ans[p[ind0] <= pobs0] <- 0 
+    ans[p[ind0] <= pobs0] <- 0
     pindex <- (1:LLL)[ind0 & (p > pobs0)]
     Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
     ans[pindex] <- qposbinom((p[pindex] - Pobs0) / (1 - Pobs0),
@@ -3973,7 +3892,7 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
            .type.fitted = type.fitted,
            .mds.min = mds.min))),
 
-      
+
   initialize = eval(substitute(expression({
     M1 <- 3
 
@@ -3993,11 +3912,11 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
 
 
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
-    extra$type.fitted      <- .type.fitted
-    extra$dimnamesy <- dimnames(y)
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+
 
 
-    
     mynames1 <- param.names("pstr0", NOS)
     mynames2 <- param.names("munb",  NOS)
     mynames3 <- param.names("size",  NOS)
@@ -4023,7 +3942,7 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
       if (length( .iprobs.y ))
         gprobs.y <- .iprobs.y
       gsize.mux <- .gsize.mux  # gsize.mux is on a relative scale
-          
+
       for (jay in 1:NOS) {  # For each response 'y_jay'... do:
         TFvec <- y[, jay] > 0  # Important to exclude the 0s
         posyvec <- y[TFvec, jay]
@@ -4057,7 +3976,7 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
 
 
 
-      
+
         if (length( .ipstr0 )) {
           pstr0.init <- matrix( .ipstr0 , n, ncoly, byrow = TRUE)
         } else {
@@ -4076,7 +3995,7 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
             pstr0.init[, jay] <- Phi.init
           }  # for (jay)
         }
-          
+
 
 
 
@@ -4097,8 +4016,9 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
             .iprobs.y = iprobs.y,
             .ipstr0.small = ipstr0.small,
             .imethod = imethod ))),
-      
+
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 3)
     type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
                              "Returning the 'mean'.")
@@ -4133,22 +4053,11 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
                   "pobs0"     = pstr0 + (1 - pstr0) * prob0,  # P(Y=0)
                   "pstr0"     =     pstr0,
                   "onempstr0" = 1 - pstr0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans))        
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lpstr0 = lpstr0, .lsize = lsize, .lmunb = lmunb,
            .epstr0 = epstr0, .esize = esize, .emunb = emunb,
-           .type.fitted = type.fitted, .mds.min = mds.min ))),
-      
+           .mds.min = mds.min ))),
+
   last = eval(substitute(expression({
     misc$link <-
       c(rep_len( .lpstr0 , NOS),
@@ -4173,7 +4082,7 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
 
     misc$max.chunk.MB <- .max.chunk.MB
     misc$cutoff.prob <- .cutoff.prob
-    misc$imethod <- .imethod 
+    misc$imethod <- .imethod
     misc$nsimEIM <- .nsimEIM
     misc$expected <- TRUE
     misc$multipleResponses <- TRUE
@@ -4215,7 +4124,7 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
    pstr0 <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lpstr0 , earg = .epstr0 )
@@ -4543,7 +4452,7 @@ zinegbinomial.control <- function(save.weights = TRUE, ...) {
     wz[, M +       M1*(1:NOS) - 1] <- ned2l.dmunbsize * dmunb.deta *
                                                         dsize.deta
 
-    
+
 
 
 
@@ -4568,9 +4477,9 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
 
 
  zinegbinomialff <-
-  function(lmunb = "loge", lsize = "loge", lonempstr0 = "logit", 
+  function(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
            type.fitted = c("mean", "munb", "pobs0", "pstr0", "onempstr0"),
-           imunb = NULL, isize = NULL, ionempstr0 = NULL,  
+           imunb = NULL, isize = NULL, ionempstr0 = NULL,
            zero = c("size", "onempstr0"),
            imethod = 1,
 
@@ -4661,7 +4570,7 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
            .mds.min = mds.min
          ))),
 
-      
+
   initialize = eval(substitute(expression({
     M1 <- 3
 
@@ -4680,13 +4589,13 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
 
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     extra$type.fitted <- .type.fitted
-    extra$dimnamesy   <- dimnames(y)
+    extra$colnames.y  <- colnames(y)
+
 
 
-    
     mynames1 <- param.names("munb",       NOS)
     mynames2 <- param.names("size",       NOS)
-    mynames3 <- param.names("onempstr0",  NOS) 
+    mynames3 <- param.names("onempstr0",  NOS)
     predictors.names <-
       c(namesof(mynames1, .lmunb  , earg = .emunb  , tag = FALSE),
         namesof(mynames2, .lsize  , earg = .esize  , tag = FALSE),
@@ -4742,7 +4651,7 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
 
 
 
-      
+
         if (length( .ionempstr0 )) {
           onempstr0.init <- matrix( .ionempstr0 , n, ncoly, byrow = TRUE)
         } else {
@@ -4759,7 +4668,7 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
             onempstr0.init[, jay] <- 1 - Phi.init
           }  # for (jay)
         }
-          
+
 
 
 
@@ -4780,7 +4689,7 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
             .ipstr0.small = ipstr0.small,
             .iprobs.y = iprobs.y,
             .imethod = imethod ))),
-      
+
   linkinv = eval(substitute(function(eta, extra = NULL) {
     type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
@@ -4820,22 +4729,11 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
                   "pobs0"     = 1 - onempstr0 + onempstr0 * prob0,  # P(Y=0)
                   "pstr0"     = 1 - onempstr0,
                   "onempstr0" =     onempstr0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans))        
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lonempstr0 = lonempstr0, .lsize = lsize, .lmunb = lmunb,
            .eonempstr0 = eonempstr0, .esize = esize, .emunb = emunb,
            .type.fitted = type.fitted, .mds.min = mds.min ))),
-      
+
   last = eval(substitute(expression({
     misc$link <-
       c(rep_len( .lmunb      , NOS),
@@ -4902,7 +4800,7 @@ zinegbinomialff.control <- function(save.weights = TRUE, ...) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     munb <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lmunb , earg = .emunb )
@@ -5297,7 +5195,7 @@ qzigeom <- function(p, prob, pstr0 = 0) {
   ans <- p <- rep_len(p,     LLL)
   prob     <- rep_len(prob,  LLL)
   pstr0    <- rep_len(pstr0, LLL)
-  ans[p <= pstr0] <- 0 
+  ans[p <= pstr0] <- 0
   ind1 <- (p > pstr0)
   ans[ind1] <-
     qgeom((p[ind1] - pstr0[ind1]) / (1 - pstr0[ind1]),
@@ -5309,7 +5207,7 @@ qzigeom <- function(p, prob, pstr0 = 0) {
   ind0 <- (deflat.limit <= pstr0) & (pstr0 <  0)
   if (any(ind0)) {
     pobs0 <- pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
-    ans[p[ind0] <= pobs0] <- 0 
+    ans[p[ind0] <= pobs0] <- 0
     pindex <- (1:LLL)[ind0 & (p > pobs0)]
     Pobs0 <- pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
     ans[pindex] <- 1 + qgeom((p[pindex] - Pobs0) / (1 - Pobs0),
@@ -5422,8 +5320,8 @@ rzigeom <- function(n, prob, pstr0 = 0) {
     w <- temp5$w
     y <- temp5$y
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
-    extra$type.fitted      <- .type.fitted
-    extra$dimnamesy <- dimnames(y)
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
 
     mynames1 <- param.names("pstr0", ncoly)
@@ -5483,6 +5381,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
             .bias.red = bias.red,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
     pstr0  <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
     prob   <- eta2theta(eta[, c(FALSE, TRUE)], .lprob  , earg = .eprob  )
 
@@ -5501,20 +5400,9 @@ rzigeom <- function(n, prob, pstr0 = 0) {
                   "pobs0"     = pstr0 + (1 - pstr0) * prob,  # P(Y=0)
                   "pstr0"     =     pstr0,
                   "onempstr0" = 1 - pstr0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lprob = lprob, .lpstr0 = lpstr0,
-           .eprob = eprob, .epstr0 = epstr0,
-           .type.fitted = type.fitted ))),
+           .eprob = eprob, .epstr0 = epstr0 ))),
   last = eval(substitute(expression({
     temp.names <- c(rep_len( .lpstr0 , NOS),
                     rep_len( .lprob  , NOS))
@@ -5538,10 +5426,9 @@ rzigeom <- function(n, prob, pstr0 = 0) {
     misc$bias.red <- .bias.red
     misc$expected <- .expected
     misc$ipstr0 <- .ipstr0
-    misc$type.fitted <- .type.fitted
 
 
-    misc$pobs0 <- pobs0 
+    misc$pobs0 <- pobs0
     if (length(dimnames(y)[[2]]) > 0)
       dimnames(misc$pobs0) <- dimnames(y)
     misc$pstr0 <- pstr0
@@ -5552,7 +5439,6 @@ rzigeom <- function(n, prob, pstr0 = 0) {
                             .ipstr0 = ipstr0,
             .zero = zero,
             .expected = expected,
-            .type.fitted = type.fitted,
             .bias.red = bias.red,
             .imethod = imethod ))),
   loglikelihood = eval(substitute(
@@ -5584,7 +5470,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     pstr0  <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
@@ -5596,7 +5482,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
     pstr0  <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr0 , earg = .epstr0 )
     prob   <- eta2theta(eta[, c(FALSE, TRUE)], .lprob  , earg = .eprob  )
 
-    okay1 <- all(is.finite(prob )) && all(0 < prob) && all(prob < 1) &&
+    okay1 <- all(is.finite(prob )) && all(0 < prob & prob < 1) &&
              all(is.finite(pstr0)) && all(pstr0 < 1)
     prob0 <- prob
     deflat.limit <- -prob0 / (1 - prob0)
@@ -5770,8 +5656,8 @@ rzigeom <- function(n, prob, pstr0 = 0) {
     w <- temp5$w
     y <- temp5$y
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
-    extra$type.fitted      <- .type.fitted
-    extra$dimnamesy <- dimnames(y)
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
 
     mynames1 <- param.names("prob",      ncoly)
@@ -5831,6 +5717,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
             .bias.red = bias.red,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
     prob      <- eta2theta(eta[, c(TRUE, FALSE)], .lprob      ,
                            earg = .eprob  )
     onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
@@ -5851,20 +5738,9 @@ rzigeom <- function(n, prob, pstr0 = 0) {
                   "pobs0"     = 1 - onempstr0 + onempstr0 * prob,  # P(Y=0)
                   "pstr0"     = 1 - onempstr0,
                   "onempstr0" =     onempstr0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lprob = lprob, .lonempstr0 = lonempstr0,
-           .eprob = eprob, .eonempstr0 = eonempstr0,
-           .type.fitted = type.fitted ))),
+           .eprob = eprob, .eonempstr0 = eonempstr0 ))),
   last = eval(substitute(expression({
     temp.names <- c(rep_len( .lprob      , NOS),
                     rep_len( .lonempstr0 , NOS))
@@ -5890,7 +5766,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
     misc$ionempstr0   <- .ionempstr0
 
 
-    misc$pobs0 <- pobs0 
+    misc$pobs0 <- pobs0
     if (length(dimnames(y)[[2]]) > 0)
       dimnames(misc$pobs0) <- dimnames(y)
     misc$onempstr0 <- onempstr0
@@ -5935,7 +5811,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     prob       <- eta2theta(eta[, c(TRUE, FALSE)], .lprob      ,
@@ -6354,14 +6230,14 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
 
-    extra$dimnamesy <- dimnames(y)
-    extra$type.fitted      <- .type.fitted
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
 
     predictors.names <-
         c(namesof("pobs0", .lpobs0 , earg = .epobs0 , tag = FALSE),
           namesof("prob" , .lprob  , earg = .eprob  , tag = FALSE))
-          
+
 
 
     orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
@@ -6391,7 +6267,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
       etastart <-
         cbind(theta2eta(phi.init, .lpobs0, earg = .epobs0 ),
               theta2eta( mustart, .lprob,  earg = .eprob  ))
-              
+
 
       mustart <- NULL
     }
@@ -6402,6 +6278,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
             .type.fitted = type.fitted ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
                              "Returning the 'mean'.")
@@ -6410,9 +6287,9 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
 
     type.fitted <- match.arg(type.fitted,
                              c("mean", "prob", "pobs0"))[1]
-    
-    phi0  <- eta2theta(eta[, 1], .lpobs0, earg = .epobs0 )
-    prob  <- eta2theta(eta[, 2], .lprob,  earg = .eprob  )
+
+    phi0  <- eta2theta(eta[, 1], .lpobs0 , earg = .epobs0 )
+    prob  <- eta2theta(eta[, 2], .lprob  , earg = .eprob  )
     orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
     new.w  <- if (length(extra$new.w))  extra$new.w  else 1
     Size <- new.w / orig.w
@@ -6421,17 +6298,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
                   "mean"      = (1 - phi0) * prob / (1 - (1 - prob)^Size),
                   "prob"      = prob,
                   "pobs0"     = phi0)  # P(Y=0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lprob = lprob, .lpobs0 = lpobs0,
            .eprob = eprob, .epobs0 = epobs0 ))),
 
@@ -6518,8 +6385,8 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
 
     ans <- cbind(c(orig.w) * dl.dphi0 * dphi0.deta,
                              dl.dprob * dprob.deta)
-                 
-                 
+
+
     ans
   }), list( .lprob = lprob, .lpobs0 = lpobs0,
             .eprob = eprob, .epobs0 = epobs0 ))),
@@ -6676,8 +6543,8 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
 
-    extra$dimnamesy   <- dimnames(y)
     extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
 
     predictors.names <-
@@ -6722,6 +6589,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
             .type.fitted = type.fitted ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
                              "Returning the 'mean'.")
@@ -6730,7 +6598,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
 
     type.fitted <- match.arg(type.fitted,
                      c("mean", "prob", "pobs0", "onempobs0"))[1]
-    
+
     prob      <- eta2theta(eta[, 1], .lprob      , earg = .eprob  )
     onempobs0 <- eta2theta(eta[, 2], .lonempobs0 , earg = .eonempobs0 )
     orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
@@ -6742,23 +6610,13 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
                   "prob"      = prob,
                   "pobs0"     = 1 - onempobs0,  # P(Y=0)
                   "onempobs0" =     onempobs0)  # P(Y>0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lprob = lprob, .lonempobs0 = lonempobs0,
            .eprob = eprob, .eonempobs0 = eonempobs0 ))),
 
   last = eval(substitute(expression({
-    misc$link <-    c(prob = .lprob, onempobs0 = .lonempobs0 )
-    misc$earg <- list(prob = .eprob, onempobs0 = .eonempobs0 )
+    misc$link <-    c(prob = .lprob , onempobs0 = .lonempobs0 )
+    misc$earg <- list(prob = .eprob , onempobs0 = .eonempobs0 )
 
     misc$imethod  <- .imethod
     misc$zero     <- .zero
@@ -6848,7 +6706,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
 
     ans <- cbind(            dl.dprob      * dprob.deta,
                  c(orig.w) * dl.donempobs0 * donempobs0.deta)
-                 
+
     ans
   }), list( .lprob = lprob, .lonempobs0 = lonempobs0,
             .eprob = eprob, .eonempobs0 = eonempobs0 ))),
@@ -6977,10 +6835,10 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
 
-    extra$dimnamesy <- dimnames(y)
-    extra$type.fitted      <- .type.fitted
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+
 
-    
     mynames1 <- param.names("pobs0", ncoly)
     mynames2 <- param.names("prob",  ncoly)
     predictors.names <-
@@ -7020,7 +6878,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
             .epobs0 = epobs0, .eprob = eprob,
             .ipobs0 = ipobs0, .iprob = iprob,
             .imethod = imethod,
-            .type.fitted = type.fitted ))), 
+            .type.fitted = type.fitted ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
@@ -7044,17 +6902,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
                   "prob"      = prob,
                   "pobs0"     =      phi0,  # P(Y=0)
                   "onempobs0" =  1 - phi0)  # P(Y>0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lpobs0 = lpobs0, .lprob = lprob,
            .epobs0 = epobs0, .eprob = eprob ))),
   last = eval(substitute(expression({
@@ -7119,7 +6967,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     phi0 <- cbind(eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
@@ -7293,10 +7141,10 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     extra$NOS <- NOS <- ncoly <- ncol(y)  # Number of species
     extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
 
-    extra$dimnamesy   <- dimnames(y)
     extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
+
 
-    
     mynames1 <- param.names("prob",       ncoly)
     mynames2 <- param.names("onempobs0",  ncoly)
     predictors.names <-
@@ -7331,14 +7179,14 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
       etastart <-
         cbind(theta2eta(    prob.init, .lprob      , earg = .eprob      ),
               theta2eta(1 - phi0.init, .lonempobs0 , earg = .eonempobs0 ))
-                        
+
       etastart <- etastart[, interleave.VGAM(ncol(etastart), M1 = M1)]
     }
   }), list( .lonempobs0 = lonempobs0, .lprob = lprob,
             .eonempobs0 = eonempobs0, .eprob = eprob,
             .ionempobs0 = ionempobs0, .iprob = iprob,
             .imethod = imethod,
-            .type.fitted = type.fitted ))), 
+            .type.fitted = type.fitted ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
@@ -7349,8 +7197,8 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
     type.fitted <- match.arg(type.fitted,
                      c("mean", "prob", "pobs0", "onempobs0"))[1]
 
-    NOS <- extra$NOS
     M1 <- 2
+    NOS <- ncol(eta) / M1
 
     prob      <- cbind(eta2theta(eta[, M1*(1:NOS)-1, drop = FALSE],
                                  .lprob  , earg = .eprob ))
@@ -7363,17 +7211,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
                   "prob"      =  prob,
                   "pobs0"     =  1 - onempobs0,  # P(Y=0)
                   "onempobs0" =      onempobs0)  # P(Y>0)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lonempobs0 = lonempobs0, .lprob = lprob,
            .eonempobs0 = eonempobs0, .eprob = eprob ))),
   last = eval(substitute(expression({
@@ -7439,7 +7277,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
 
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     onempobs0 <- cbind(eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
@@ -7562,7 +7400,7 @@ doipospois <- function(x, lambda, pstr1 = 0, log = FALSE) {
   if (length(pstr1)  != LLL) pstr1  <- rep_len(pstr1,  LLL)
 
   ans <- rep(NA_real_, LLL)
-  index1 <- (x == 1) 
+  index1 <- (x == 1)
   if (log.arg) {
     ans[ index1] <- log(pstr1[ index1] + (1 - pstr1[ index1]) *
                         dpospois(x[ index1], lambda[ index1]))
@@ -7631,7 +7469,7 @@ qoipospois <- function(p, lambda, pstr1 = 0) {
   ans[p < 0] <- NaN
   ans[1 < p] <- NaN
   ans[lambda <= 0] <- NaN
-  
+
   ans
 }  # qoipospois
 
@@ -7690,7 +7528,7 @@ roipospois <- function(n, lambda, pstr1 = 0) {
             "Links:    ",
             namesof("pstr1",  lpstr10, earg = epstr10 ), ", ",
             namesof("lambda", llambda, earg = elambda ), "\n",
-            "Mean:     (1 - pstr1) * lambda / (1 - exp(-lambda))"),
+            "Mean:     pstr1 + (1 - pstr1) * lambda / (1 - exp(-lambda))"),
 
   constraints = eval(substitute(expression({
     constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M,
@@ -7731,9 +7569,9 @@ roipospois <- function(n, lambda, pstr1 = 0) {
 
     NOS <- ncoly <- ncol(y)
     extra$ncoly <- ncoly
-    extra$dimnamesy <- dimnames(y)
     M <- M1 * ncoly
-    extra$type.fitted      <- .type.fitted
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
     mynames1 <- param.names("pstr1",  ncoly)
     mynames2 <- param.names("lambda", ncoly)
@@ -7798,6 +7636,7 @@ roipospois <- function(n, lambda, pstr1 = 0) {
             .imethod = imethod,  # .probs.y = probs.y,
             .type.fitted = type.fitted ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
     type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
                              "Returning the 'mean'.")
@@ -7817,21 +7656,9 @@ roipospois <- function(n, lambda, pstr1 = 0) {
              "pobs1"     = doipospois(1, lambda = lambda, pstr1 = phimat), # Pr(Y=1)
              "pstr1"     =     phimat,
              "onempstr1" = 1 - phimat)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans)) 
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lpstr10 = lpstr10, .llambda = llambda,
-           .epstr10 = epstr10, .elambda = elambda,
-           .type.fitted = type.fitted
+           .epstr10 = epstr10, .elambda = elambda
          ))),
   last = eval(substitute(expression({
     misc$link <-
@@ -7848,7 +7675,7 @@ roipospois <- function(n, lambda, pstr1 = 0) {
     }
   }), list( .lpstr10 = lpstr10, .llambda = llambda,
             .epstr10 = epstr10, .elambda = elambda ))),
-  loglikelihood = eval(substitute( 
+  loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
@@ -7875,7 +7702,7 @@ roipospois <- function(n, lambda, pstr1 = 0) {
   function(object, nsim) {
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     phimat <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr10 , earg = .epstr10 )
@@ -7949,7 +7776,7 @@ roipospois <- function(n, lambda, pstr1 = 0) {
     ned2l.dlambda2 <-
      (((1 - phimat) * dpmf1.dlambda)^2) / pobs1 -
        (1 - phimat) * d2pmf1.dlambda2 +
-       (1 - phimat) * (1/lambda - exp(-lambda) * 
+       (1 - phimat) * (1/lambda - exp(-lambda) *
        (1 - exp(-lambda) - lambda * exp(-lambda)) / (expm1(-lambda))^3)
 
     wz <- array(c(c(w) * ned2l.dphimat2 * dphimat.deta^2,
@@ -8066,26 +7893,16 @@ roiposbinom <- function(n, size, prob, pstr1 = 0) {
 
 
 
-if (FALSE)
  oiposbinomial <-
-  function(lpstr1 = "logit", lprob = "loge",
+  function(lpstr1 = "logit", lprob = "logit",
            type.fitted = c("mean", "prob", "pobs1", "pstr1", "onempstr1"),
            iprob = NULL,
-           gpstr1 = (1:19)/20,
-           gprobs.y = (1:19)/20,  # 20160518; grid for finding lambd.init
-           imethod = 1,
+           gpstr1 = ppoints(9),  # (1:19)/20,
+           gprob  = ppoints(9),  # (1:19)/20,  # 20160613; grid for finding prob
+           multiple.responses = FALSE,
            zero = NULL) {
 
-
-
-  stop("this family function is not working yet")
-
-
-  if (!is.Numeric(imethod, length.arg = 1,
-                  integer.valued = TRUE, positive = TRUE) ||
-      imethod > 2)
-    stop("argument 'imethod' must be 1 or 2")
-
+  gprobb <- gprob
 
   lpstr1 <- as.list(substitute(lpstr1))
   epstr1 <- link2list(lpstr1)
@@ -8102,8 +7919,9 @@ if (FALSE)
 
   iprobb <- iprob
   if (length(iprobb))
-    if (!is.Numeric(iprobb, positive = TRUE))
-      stop("argument 'iprob' values must be positive")
+    if (!is.Numeric(iprobb, positive = TRUE) ||
+        any(iprobb >= 1))
+      stop("argument 'iprob' values must be in (0, 1)")
 
 
   new("vglmff",
@@ -8124,38 +7942,62 @@ if (FALSE)
     list(M1 = 2,
          Q1 = 1,
          expected = TRUE,
-         imethod = .imethod ,
-         multipleResponses = TRUE,
+         multipleResponses = .multiple.responses ,  # FALSE,  # TRUE,
          parameters.names = c("pstr1", "prob"),
          type.fitted  = .type.fitted ,
          zero = .zero )
   }, list( .zero = zero,
-           .imethod = imethod,
+           .multiple.responses = multiple.responses,
            .type.fitted = type.fitted
          ))),
   initialize = eval(substitute(expression({
     M1 <- 2
+    multiple.responses <- .multiple.responses
+    y <- as.matrix(y)
+    w.orig <- as.matrix(w)  # zz this may be of a weird dimension
 
     temp5 <-
     w.y.check(w = w, y = y,
               Is.positive.y = TRUE,
-              Is.nonnegative.y = TRUE,
-              Is.integer.y = TRUE,
-              ncol.w.max = Inf,
-              ncol.y.max = Inf,
+              Is.integer.y =  if (multiple.responses) FALSE else FALSE,
+              ncol.w.max =  if (multiple.responses) ncol(y) else 1,
+              ncol.y.max =  if (multiple.responses) ncol(y) else ncol(y),
               out.wy = TRUE,
-              colsyperw = 1,
+              colsyperw = if (multiple.responses) 1 else ncol(y),
               maximize = TRUE)
     w <- temp5$w
     y <- temp5$y
 
 
+    if (multiple.responses) {
+      if (!all(w == round(w)))
+        stop("the 'weights' argument must be integer valued")
+      if (min(y) < 0 || max(y) > 1)
+        stop("the response must be a proportion")
+      Nvec <- w
+    } else {
+      if (ncol(y) > 1) {
+        Nvec <- rowSums(y)
+        y[, 1] <- y[, 1] / Nvec
+        y <- y[, 1, drop = FALSE]
+        w[, 1] <- w[, 1] * Nvec  # == w.orig * Nvec
+        w <- w[, 1, drop = FALSE]
+      } else {
+        Nvec <- w  # rep_len(1, nrow(x))
+        if (!all(Nvec == round(Nvec)))
+          stop("number of trials is not integer-valued")
+      }
+    }
+    extra$Nvec <- Nvec
+    w.orig <- matrix(w.orig, n, ncol(y))  # Now no longer of a weird dimension
+
+
 
     NOS <- ncoly <- ncol(y)
     extra$ncoly <- ncoly
-    extra$dimnamesy <- dimnames(y)
     M <- M1 * ncoly
-    extra$type.fitted      <- .type.fitted
+    extra$type.fitted <- .type.fitted
+    extra$colnames.y  <- colnames(y)
 
     mynames1 <- param.names("pstr1", ncoly)
     mynames2 <- param.names("prob",  ncoly)
@@ -8166,60 +8008,47 @@ if (FALSE)
 
 
     if (!length(etastart)) {
-
-      lambd.init <-
+      probb.init <-
       pstr1.init <- matrix(NA_real_, n, NOS)
       gpstr1 <- .gpstr1
-      gprobs.y  <- .gprobs.y
+      gprobb <- .gprobb
       iprobb <- .iprobb
+      if (length(iprobb))
+        gprobb <- iprobb
 
       oiposbinom.Loglikfun <- function(pstr1, prob, y, x, w, extraargs) {
-        sum(c(w) * doiposbinom(x = y, pstr1 = pstr1,
-                               prob = probb, log = TRUE))
+        sum(c(w) * doiposbinom(x = y, pstr1 = pstr1, size = extraargs$size,
+                               prob = prob, log = TRUE))
       }
 
 
       for (jay in 1:NOS) {  # For each response 'y_jay'... do:
-        TFvec <- y[, jay] > 1  # Important to exclude the 1s
-        posyvec <- y[TFvec, jay]  # Variable name unchanged (lazy)
-        lambd.init.jay <- if ( .imethod == 1) {
-          quantile(posyvec, probs = gprobs.y) - 1/2  # + 1/16
-        } else if ( .imethod == 2) {
-          weighted.mean(posyvec, w = w[TFvec, jay]) - 1/2
-        } else {
-          warning("argument 'imethod' should have the value 1 or 2")
-        }
-        if (length(iprobb)) { # zz
-          lambd.init.jay <- iprobb[jay]
-        } else {
-        }
-
 
 
         try.this <-
-          grid.search2(gpstr1, lambd.init.jay,
+          grid.search2(gpstr1, gprobb,
                        objfun = oiposbinom.Loglikfun,
-                       y = y[, jay],  # x = x[TFvec, , drop = FALSE],
-                       w = w[, jay],
+                       y = round(y[, jay] * Nvec[, jay]),
+                       w = 1,  # w.orig[, jay], or 1, or w[, jay], possibly
+                       extraargs = list(size = Nvec),
                        ret.objfun = TRUE)  # Last value is the loglik
         pstr1.init[, jay] <-  try.this["Value1"]
-        lambd.init[, jay] <- (try.this["Value2"] + y[, jay]) / 2
-        lambd.init[, jay] <-  try.this["Value2"]
+        probb.init[, jay] <-  try.this["Value2"]
       }  # for (jay ...)
 
       etastart <- cbind(theta2eta(pstr1.init, .lpstr1 , earg = .epstr1 ),
-                        theta2eta(lambd.init, .lprobb , earg = .eprobb ))[,
+                        theta2eta(probb.init, .lprobb , earg = .eprobb ))[,
                         interleave.VGAM(M, M1 = M1)]
       mustart <- NULL  # Since etastart has been computed.
     }  # End of !length(etastart)
   }), list( .lpstr1 = lpstr1, .lprobb = lprobb,
             .epstr1 = epstr1, .eprobb = eprobb,
-                                .iprobb = iprobb,
-            .gpstr1 = gpstr1,
-            .gprobs.y = gprobs.y,
-            .imethod = imethod,  # .probs.y = probs.y,
+                              .iprobb = iprobb,
+            .gpstr1 = gpstr1, .gprobb = gprobb,
+            .multiple.responses = multiple.responses,
             .type.fitted = type.fitted ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
+    NOS <- ncol(eta) / c(M1 = 2)
     type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
                              "Returning the 'mean'.")
@@ -8231,29 +8060,22 @@ if (FALSE)
 
     pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
     probb <- eta2theta(eta[, c(FALSE, TRUE)], .lprobb , earg = .eprobb )
+    Nvec  <- extra$Nvec
+    if (!is.numeric(Nvec))
+      stop("something gone wrong with 'Nvec'")
 
     ans <-
       switch(type.fitted,
-             "mean"      = pstr1 - (1 - pstr1) * probb / expm1(-probb),
-             "prob"    = probb,
-             "pobs1"     = doipospois(1, prob = probb, pstr1 = pstr1), # Pr(Y=1)
+             "mean"      = pstr1 + (1 - pstr1) * Nvec *
+                           probb / (1 - (1-probb)^Nvec),
+             "prob"      = probb,
+             "pobs1"     = doiposbinom(1, prob = probb,
+                                       size = Nvec, pstr1 = pstr1), # Pr(Y=1)
              "pstr1"     =     pstr1,
              "onempstr1" = 1 - pstr1)
-    if (length(extra$dimnamesy) &&
-        is.matrix(ans) &&
-        length(extra$dimnamesy[[2]]) == ncol(ans) &&
-        length(extra$dimnamesy[[2]]) > 0) {
-      if (length(extra$dimnamesy[[1]]) == nrow(ans)) 
-        dimnames(ans) <- extra$dimnamesy
-    } else
-    if (NCOL(ans) == 1 &&
-        is.matrix(ans)) {
-      colnames(ans) <- NULL
-    }
-    ans
+    label.cols.y(ans, colnames.y = extra$colnames.y, NOS = NOS)
   }, list( .lpstr1 = lpstr1, .lprobb = lprobb,
-           .epstr1 = epstr1, .eprobb = eprobb,
-           .type.fitted = type.fitted
+           .epstr1 = epstr1, .eprobb = eprobb
          ))),
   last = eval(substitute(expression({
     misc$link <-
@@ -8270,7 +8092,7 @@ if (FALSE)
     }
   }), list( .lpstr1 = lpstr1, .lprobb = lprobb,
             .epstr1 = epstr1, .eprobb = eprobb ))),
-  loglikelihood = eval(substitute( 
+  loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
@@ -8279,8 +8101,10 @@ if (FALSE)
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * doipospois(x = y, pstr1 = pstr1, probb = probb,
-                                   log = TRUE)
+      ll.elts <- c(w) * doiposbinom(x = round(extra$Nvec * y),
+                                    size = extra$Nvec,  # w,
+                                    pstr1 = pstr1, prob = probb,
+                                    log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -8297,12 +8121,14 @@ if (FALSE)
   function(object, nsim) {
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
+    if (any(pwts != 1))
       warning("ignoring prior weights")
     eta <- predict(object)
     pstr1 <- eta2theta(eta[, c(TRUE, FALSE)], .lpstr1 , earg = .epstr1 )
     probb <- eta2theta(eta[, c(FALSE, TRUE)], .lprobb , earg = .eprobb )
-    roipospois(nsim * length(probb), probb = probb, pstr1 = pstr1)
+    Nvec <- object at extra$Nvec
+    roiposbinom(nsim * length(probb), size = Nvec,
+                probb = probb, pstr1 = pstr1)
   }, list( .lpstr1 = lpstr1, .lprobb = lprobb,
            .epstr1 = epstr1, .eprobb = eprobb ))),
 
@@ -8311,13 +8137,15 @@ if (FALSE)
                         earg = .epstr1 )
     probb <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lprobb ,
                         earg = .eprobb )
+    size <- extra$Nvec
     okay1 <- all(is.finite(pstr1)) && all(pstr1 < 1) &&
-             all(is.finite(probb)) && all(0 < probb) && all(prob < 1)
-    deflat.limit <- size * prob / (1 + (size-1) * prob - 1 / (1-prob)^(size-1))
+             all(is.finite(probb)) && all(0 < probb & probb < 1)
+    deflat.limit <- size * probb / (1 + (size-1) * probb -
+                                    1 / (1-probb)^(size-1))
     okay2.deflat <- TRUE
     if (okay1 && !(okay2.deflat <- all(deflat.limit < pstr1)))
       warning("parameter 'pstr1' is too negative even allowing for ",
-              "0-deflation.")
+              "1-deflation.")
     okay1 && okay2.deflat
   }, list( .lpstr1 = lpstr1, .lprobb = lprobb,
            .epstr1 = epstr1, .eprobb = eprobb ))),
@@ -8331,52 +8159,87 @@ if (FALSE)
     M1 <- 2
     NOS <- M / M1
     pstr1 <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr1 ,
-                        earg = .epstr1 )
+                       earg = .epstr1 )
     probb <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lprobb ,
-                        earg = .eprobb )
+                       earg = .eprobb )
+
+    size <- extra$Nvec
+
+    Qn <- function(n, prob) (1 - prob)^n
+    pmf1 <- size * probb * Qn(size-1, probb) / (1 - Qn(size, probb))
 
-    pmf1 <- -probb * exp(-probb) / expm1(-probb)
-    onempmf1 <- 1 - pmf1  # doipospois(1, probb = probb, pstr1 = pstr1)
+
+    onempmf1 <- 1 - pmf1  # doiposbinom(1, probb = probb, pstr1 = pstr1)
     pobs1 <- pstr1 + (1 - pstr1) * pmf1
-    index1 <- as.matrix(y == 1)
+    index1 <- as.matrix(round(w * y) == 1)
 
     dl.dpstr1 <- onempmf1 / pobs1
     dl.dpstr1[!index1] <- -1 / (1 - pstr1[!index1])
 
-    dpmf1.dprobb <- exp(-probb) *
-        (1 - probb - exp(-probb)) / (expm1(-probb))^2
 
-    d3 <- deriv3( ~ exp(-probb) * probb / (1 - exp(-probb)),
+    d3 <- deriv3( ~ size * probb * ((1 - probb)^(size-1))
+                    / (1 - (1 - probb)^size),
                   c("probb"), hessian = TRUE)
     eval.d3 <- eval(d3)
-    d2pmf1.dprobb2 <- attr(eval.d3, "hessian")
+    dpmf1.dprobb   <- attr(eval.d3, "gradient")  # For checking only
+    d2pmf1.dprobb2 <- attr(eval.d3, "hessian")   #
+    dim(dpmf1.dprobb)   <- c(n, NOS)  # Matrix it, even for NOS==1
     dim(d2pmf1.dprobb2) <- c(n, NOS)  # Matrix it, even for NOS==1
 
-    dl.dprobb <- (1 - pstr1) * dpmf1.dprobb / pobs1  #
-    dl.dprobb[!index1] <- y[!index1] / probb[!index1] - 1 -
-                           1 / expm1(probb[!index1])
+
+
+    dl.dprobb <-  size *
+      (      y  /      probb   -
+       (1  - y) / (1 - probb)  -
+       Qn(size-1, probb) / (1 - Qn(size, probb)))
+    dl.dprobb[index1] <- (1 - pstr1[index1]) *
+                         dpmf1.dprobb[index1] / pobs1[index1]
 
     dpstr1.deta <- dtheta.deta(pstr1, .lpstr1 , earg = .epstr1 )
     dprobb.deta <- dtheta.deta(probb, .lprobb , earg = .eprobb )
 
-    myderiv <- c(w) * cbind(dl.dpstr1 * dpstr1.deta,
-                            dl.dprobb * dprobb.deta)
+    myderiv <- cbind(dl.dpstr1 * dpstr1.deta,  # * c(w),
+                     dl.dprobb * dprobb.deta)
     myderiv[, interleave.VGAM(M, M1 = M1)]
   }), list( .lpstr1 = lpstr1, .lprobb = lprobb,
             .epstr1 = epstr1, .eprobb = eprobb ))),
+
+
+
+
   weight = eval(substitute(expression({
 
-    ned2l.dpstr12 <- onempmf1 / ((1 - pstr1) * pobs1)  #
-    ned2l.dpstr1probb <- dpmf1.dprobb / pobs1  #
-    ned2l.dprobb2 <-
-     (((1 - pstr1) * dpmf1.dprobb)^2) / pobs1 -
-       (1 - pstr1) * d2pmf1.dprobb2 +
-       (1 - pstr1) * (1/probb - exp(-probb) * 
-       (1 - exp(-probb) - probb * exp(-probb)) / (expm1(-probb))^3)
-
-    wz <- array(c(c(w) * ned2l.dpstr12 * dpstr1.deta^2,
-                  c(w) * ned2l.dprobb2 * dprobb.deta^2,
-                  c(w) * ned2l.dpstr1probb * dpstr1.deta * dprobb.deta),
+
+    d4 <- deriv3( ~ size * ((1 - probb)^(size-1)) / (1 - (1 - probb)^size),
+                 c("probb"), hessian = FALSE)
+    eval.d4 <- eval(d4)
+    d2logonempmf0.dprobb2 <- attr(eval.d4, "gradient")
+    dim(d2logonempmf0.dprobb2) <- c(n, NOS)  # Matrix it, even for NOS==1
+
+
+    E2 <- function(size, prob) {
+      size *
+      prob * (1 - Qn(size-1, prob)) /
+     (1 - Qn(size, prob) - size * prob * Qn(size-1, prob))
+    }
+
+    E2mat <- E2(size, probb)
+    RHS <- onempmf1 * (        E2mat  /    probb^2 +
+                       (size - E2mat) / (1-probb)^2 +
+                       d2logonempmf0.dprobb2)
+
+
+    LHS <- -d2pmf1.dprobb2 + ((1-pstr1) / pobs1) * dpmf1.dprobb^2
+
+
+    ned2l.dpstr12 <- onempmf1 / ((1 - pstr1) * pobs1)
+    ned2l.dpstr1probb <- dpmf1.dprobb / pobs1
+    ned2l.dprobb2 <- (1 - pstr1) * (LHS + RHS)
+
+
+    wz <- array(c(ned2l.dpstr12 * dpstr1.deta^2,
+                  ned2l.dprobb2 * dprobb.deta^2,
+                  ned2l.dpstr1probb * dpstr1.deta * dprobb.deta),
                 dim = c(n, M / M1, 3))
     wz <- arwz2wz(wz, M = M, M1 = M1)
     wz
diff --git a/R/fittedvlm.R b/R/fittedvlm.R
index 02bde96..0003c1b 100644
--- a/R/fittedvlm.R
+++ b/R/fittedvlm.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -16,10 +16,12 @@
 fittedvlm <- function(object,
                       drop = FALSE,
                       type.fitted = NULL,
+                      percentiles = NULL,
                       ...) {
 
 
-  if (is.null(type.fitted)) {
+  if (is.null(type.fitted) &&
+      is.null(percentiles)) {
     answer <- if (drop) {
       if (!is.matrix(object at fitted.values) ||
           !length(object at fitted.values))
@@ -31,13 +33,22 @@ fittedvlm <- function(object,
         c(object at fitted.values)
       }
     } else {
-        object at fitted.values
+      object at fitted.values
     }
   } else {
     linkinv <- object at family@linkinv
     new.extra <- object at extra
-    new.extra$type.fitted <- type.fitted
+
+
+    if (length(percentiles)) {
+      new.extra$percentiles <- percentiles
+    }
+    if (length(type.fitted)) {
+      new.extra$type.fitted <- type.fitted
+    }
+
     answer <- linkinv(eta = predict(object), extra = new.extra)
+    linkinv <- object at family@linkinv
 
     answer <- if (drop) {
       c(answer)
@@ -103,7 +114,7 @@ predictors.vglm <- function(object, matrix = TRUE, ...) {
 }
 
 
-if (!isGeneric("predictors")) 
+if (!isGeneric("predictors"))
     setGeneric("predictors",
       function(object, ...)
         standardGeneric("predictors"))
diff --git a/R/formula.vlm.q b/R/formula.vlm.q
index 7b71a07..13a16d8 100644
--- a/R/formula.vlm.q
+++ b/R/formula.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/generic.q b/R/generic.q
index 72c0ba0..f606248 100644
--- a/R/generic.q
+++ b/R/generic.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/getxvlmaug.R b/R/getxvlmaug.R
new file mode 100644
index 0000000..d0e91cf
--- /dev/null
+++ b/R/getxvlmaug.R
@@ -0,0 +1,199 @@
+# These functions are
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+mroot2 <- function(A) {
+  if (!isTRUE(all.equal(A, t(A))))
+    stop("Supplied matrix not symmetric")
+
+  U <- chol(A, pivot = TRUE, tol = 0)
+
+  opiv <- order(attr(U, "pivot"))
+  r <- attr(U, "rank")
+  p <- ncol(U)
+  if (r < p) U[(r+1):p, (r+1):p] <- 0
+  rank <- r
+  U <- U[, opiv, drop = FALSE]
+  U
+}  # mroot2
+
+
+
+
+
+mroot3 <- function(A, rank = NULL, transpose = FALSE) {
+  if (is.null(rank)) rank <- 0 
+  if (!isTRUE(all.equal(A, t(A))))
+    stop("Supplied matrix not symmetric")
+  U <- suppressWarnings(chol(A, pivot = TRUE, tol = 0))
+  piv <- order(attr(U, "pivot"))
+  r <- attr(U, "rank")
+  p <- ncol(U)
+  if (r < p) U[(r+1):p, (r+1):p] <- 0
+  if (rank < 1) rank <- r
+  U <- U[, piv, drop = FALSE]
+  if (transpose) t(U[1:rank, , drop = FALSE]) else
+                   U[1:rank, , drop = FALSE]
+}  # mroot3
+
+
+
+
+
+get.X.VLM.aug <-
+  function(constraints = constraints, sm.osps.list = sm.osps.list) {
+  assignx <- sm.osps.list$assignx
+  nassignx <- names(assignx)
+  indexterms <- sm.osps.list$indexterms
+
+
+  which.X.sm.osps <- sm.osps.list$which.X.sm.osps
+
+  S.arg <- sm.osps.list$S.arg
+
+  sparlist <- sm.osps.list$sparlist
+
+  ridge.adj <- sm.osps.list$ridge.adj
+
+  term.labels <- sm.osps.list$term.labels
+
+
+
+  spar.new <- list()
+  pen.new.list <- list()
+  ncol.X.sm.osps <- sapply(which.X.sm.osps, length)
+  ncolHlist.model <- unlist(lapply(constraints, ncol))
+
+
+  ncolHlist.new <- ncolHlist.model
+  if (names(constraints)[[1]] == "(Intercept)") {
+    ncolHlist.new <- ncolHlist.new[-1]
+    nassignx <- nassignx[-1]
+  }
+
+
+
+  ncol.H.sm.osps <- ncolHlist.new[indexterms]
+  nsm.osps <- nassignx[indexterms]
+
+
+
+  sparlen <- sapply(sparlist, length)
+
+
+
+
+
+
+  for (ii in seq_along(ncol.H.sm.osps)) {
+    nspar <- sparlen[ii]  # sparlen[[ii]]
+
+
+
+    sparlist.use <- sparlist[[ii]]
+    sparlist.use[sparlist.use < 0] <- 0
+
+
+
+    spar.new[[ii]] <- if (nspar == ncol.H.sm.osps[ii]) {
+      sparlist.use
+    } else {
+      if (ncol.H.sm.osps[ii] < nspar)
+        warning("too many 'spar' values; using the first few")
+      rep_len(sparlist.use, ncol.H.sm.osps[ii])
+    }
+   
+
+    names(spar.new)[[ii]] <- nsm.osps[ii]  # nsm.osps[[ii]]
+
+
+
+
+
+
+
+    if (ridge.adj[[ii]] == 0) {
+      spar.diag <- diag(sqrt(spar.new[[ii]]))
+      pen.noridge <- kronecker(spar.diag, S.arg[[ii]])
+      ooo <- matrix(1:(ncol.H.sm.osps[ii] * ncol.X.sm.osps[ii]),
+                    ncol = ncol.X.sm.osps[ii], byrow = TRUE)
+      pen.new.list[[ii]] <- pen.noridge[, ooo]
+      names(pen.new.list)[[ii]] <- nsm.osps[ii]  # nsm.osps[[ii]]
+    } else {
+      ioffset <- 0
+      joffset <- 0
+      Dmat1 <-
+        matrix(0,
+               ncol.H.sm.osps[ii] * (ncol(S.arg[[ii]]) + nrow(S.arg[[ii]])),
+               ncol.H.sm.osps[ii] *  ncol(S.arg[[ii]]))
+      for (jay in 1:(ncol.H.sm.osps[ii])) {
+
+
+
+        pen.set <- mroot2(sqrt(spar.new[[ii]][jay]) * S.arg[[ii]] +
+                          sqrt(ridge.adj[[ii]]) * diag(ncol(S.arg[[ii]])))
+        pen.ridge <- rbind(pen.set,
+                           sqrt(ridge.adj[[ii]]) * diag(ncol(S.arg[[ii]])))
+        Dmat1[ioffset + 1:nrow(pen.ridge),
+              joffset + 1:ncol(pen.ridge)] <- pen.ridge
+        ioffset <- ioffset + nrow(pen.ridge)
+        joffset <- joffset + ncol(pen.ridge)
+      }  # for jay
+
+
+      ooo <- matrix(1:(ncol.H.sm.osps[ii] * ncol.X.sm.osps[ii]),
+                    nrow = ncol.H.sm.osps[ii],  # Redundant really
+                    ncol = ncol.X.sm.osps[ii], byrow = TRUE)
+      pen.new.list[[ii]] <- Dmat1[, c(ooo), drop = FALSE]
+      names(pen.new.list)[[ii]] <- nsm.osps[ii]  # nsm.osps[[ii]]
+      ioffset <- 0
+      joffset <- 0
+    }  # if-else ridge.adj
+  }  # for
+
+
+
+
+
+  ncol.allterms <- sapply(assignx, length)
+
+  ncol.model <- if (names(constraints)[[1]] == "(Intercept)")
+                  ncol.allterms[-1] else  ncol.allterms
+  nrowpen.new.list <- sapply(pen.new.list, nrow)
+  nrowPen <- sum(nrowpen.new.list)
+  ncolPen <- sum(ncol.allterms * ncolHlist.model)
+  iioffset <- 0
+  Dmat2 <- matrix(0, nrowPen, ncolPen)
+  jay <- 0
+
+
+  jjoffset <- if (names(constraints)[[1]] == "(Intercept)")
+                ncolHlist.model[1] else 0
+
+  for (ii in seq_along(term.labels)) {
+    if (indexterms[ii]) {
+      jay <- jay + 1
+      ind.x <- iioffset + 1:nrow(pen.new.list[[jay]])
+      ind.y <- jjoffset + 1:ncol(pen.new.list[[jay]])
+      Dmat2[ind.x, ind.y] <- pen.new.list[[jay]]
+      iioffset <- iioffset + nrow(pen.new.list[[jay]])
+      jjoffset <- jjoffset + ncol(pen.new.list[[jay]])
+    } else {
+      jjoffset <- jjoffset + ncolHlist.new[ii] * ncol.model[ii]
+    }
+  }  # ii
+
+
+  Xvlm.aug <- Dmat2
+
+  attr(Xvlm.aug, "spar.vlm") <- spar.new
+  Xvlm.aug
+}  # get.X.VLM.aug
+
+
+
+
diff --git a/R/links.q b/R/links.q
index 88c3ddd..c84f7cd 100644
--- a/R/links.q
+++ b/R/links.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -58,7 +58,9 @@ if (FALSE) {
            parallel = TRUE,
            ishrinkage = 0.95,
            nointercept = NULL, imethod = 1,
-           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+           type.fitted = c("mean", "quantiles",
+                           "pobs0", "pstr0", "onempstr0"),
+           percentiles = c(25, 50, 75),
            probs.x = c(0.15, 0.85),
            probs.y = c(0.25, 0.50, 0.75),
            multiple.responses = FALSE, earg.link = FALSE,
@@ -186,8 +188,8 @@ care.exp <- function(x,
             "+",
             as.char.expression(theta),
             ")", sep = "")
-    if (tag) 
-      string <- paste("Log with offset:", string) 
+    if (tag)
+      string <- paste("Log with offset:", string)
     return(string)
   }
 
@@ -235,8 +237,8 @@ care.exp <- function(x,
   if (is.character(theta)) {
     theta <- as.char.expression(theta)
     string <- paste("-", theta, sep = "")
-    if (tag) 
-      string <- paste("Negative-identity:", string) 
+    if (tag)
+      string <- paste("Negative-identity:", string)
     return(string)
   }
 
@@ -257,7 +259,7 @@ care.exp <- function(x,
            inverse = FALSE, deriv = 0,
            short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
-    string <- if (short) 
+    string <- if (short)
         paste("logit(",
                theta,
               ")", sep = "") else
@@ -266,8 +268,8 @@ care.exp <- function(x,
               "/(1-",
               as.char.expression(theta),
               "))", sep = "")
-    if (tag) 
-      string <- paste("Logit:", string) 
+    if (tag)
+      string <- paste("Logit:", string)
     return(string)
   }
 
@@ -304,11 +306,11 @@ care.exp <- function(x,
                     inverse = FALSE, deriv = 0,
                     short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
-    string <- if (short) 
+    string <- if (short)
         paste("loglog(",  theta, ")",  sep = "") else
         paste("log(log(", theta, "))", sep = "")
-    if (tag) 
-      string <- paste("Log-Log:", string) 
+    if (tag)
+      string <- paste("Log-Log:", string)
     return(string)
   }
 
@@ -348,8 +350,8 @@ care.exp <- function(x,
         paste("log(-log(1-",
               as.char.expression(theta),
               "))", sep = "")
-    if (tag) 
-      string <- paste("Complementary log-log:", string) 
+    if (tag)
+      string <- paste("Complementary log-log:", string)
     return(string)
   }
 
@@ -360,8 +362,7 @@ care.exp <- function(x,
 
   if (inverse) {
     switch(deriv+1, {
-           junk <- exp(theta)
-           -expm1(-junk)
+           -expm1(-exp(theta))
            },
            -((1 - theta) * log1p(-theta)),
            {  junk <- log1p(-theta)
@@ -386,11 +387,11 @@ care.exp <- function(x,
                     inverse = FALSE, deriv = 0,
                     short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
-    string <- if (short) 
+    string <- if (short)
         paste("probit(", theta, ")", sep = "") else
         paste("qnorm(",  theta, ")", sep = "")
-    if (tag) 
-      string <- paste("Probit:", string) 
+    if (tag)
+      string <- paste("Probit:", string)
     return(string)
   }
 
@@ -468,11 +469,11 @@ care.exp <- function(x,
                      inverse = FALSE, deriv = 0,
                      short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
-    string <- if (short) 
+    string <- if (short)
         paste("explink(", theta, ")", sep = "") else
         paste("exp(", theta, ")", sep = "")
-    if (tag) 
-      string <- paste("Exp:", string) 
+    if (tag)
+      string <- paste("Exp:", string)
     return(string)
   }
 
@@ -502,8 +503,8 @@ care.exp <- function(x,
   if (is.character(theta)) {
     theta <- as.char.expression(theta)
     string <- paste("1/", theta, sep = "")
-    if (tag) 
-      string <- paste("Reciprocal:", string) 
+    if (tag)
+      string <- paste("Reciprocal:", string)
     return(string)
   }
 
@@ -532,11 +533,11 @@ care.exp <- function(x,
                    inverse = FALSE, deriv = 0,
                    short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
-      string <- if (short) 
+      string <- if (short)
           paste("negloge(", theta, ")", sep = "") else
           paste("-log(",  theta, ")", sep = "")
-      if (tag) 
-        string <- paste("Negative log:", string) 
+      if (tag)
+        string <- paste("Negative log:", string)
       return(string)
   }
 
@@ -567,8 +568,8 @@ care.exp <- function(x,
   if (is.character(theta)) {
     theta <- as.char.expression(theta)
     string <- paste("-1/", theta, sep = "")
-    if (tag) 
-      string <- paste("Negative reciprocal:", string) 
+    if (tag)
+      string <- paste("Negative reciprocal:", string)
     return(string)
   }
 
@@ -600,8 +601,8 @@ care.exp <- function(x,
   if (is.character(theta)) {
     theta <- as.char.expression(theta)
     string <- paste("-1/", theta, sep = "")
-    if (tag) 
-      string <- paste("Negative inverse:", string) 
+    if (tag)
+      string <- paste("Negative inverse:", string)
     return(string)
   }
 
@@ -636,8 +637,8 @@ care.exp <- function(x,
               ")/(1-",
               as.char.expression(theta),
               "))", sep = "")
-    if (tag) 
-      string <- paste("Rhobit:", string) 
+    if (tag)
+      string <- paste("Rhobit:", string)
     return(string)
   }
 
@@ -669,15 +670,15 @@ care.exp <- function(x,
                      inverse = FALSE, deriv = 0,
                      short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
-    string <- if (short) 
+    string <- if (short)
         paste("fisherz(", theta, ")", sep = "") else
         paste("(1/2) * log((1+",
               as.char.expression(theta),
               ")/(1-",
               as.char.expression(theta),
               "))", sep = "")
-    if (tag) 
-      string <- paste("Fisher's Z transformation:", string) 
+    if (tag)
+      string <- paste("Fisher's Z transformation:", string)
     return(string)
   }
 
@@ -713,7 +714,7 @@ care.exp <- function(x,
            bvalue = NULL,
            inverse = FALSE, deriv = 0,
            short = TRUE, tag = FALSE) {
- 
+
 
   fillerChar <- ifelse(whitespace, " ", "")
 
@@ -868,7 +869,7 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
     stop("'min' >= 'max' is not allowed")
 
   if (is.character(theta)) {
-    string <- if (short) 
+    string <- if (short)
       paste("foldsqrt(", theta, ")", sep = "") else {
     theta <- as.char.expression(theta)
       if (abs(mux-sqrt(2)) < 1.0e-10)
@@ -879,8 +880,8 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
             max, "-", theta, "))",
             sep = "")
     }
-    if (tag) 
-      string <- paste("Folded square root:", string) 
+    if (tag)
+      string <- paste("Folded square root:", string)
     return(string)
   }
 
@@ -920,13 +921,13 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
       stop("use the 'loge' link")
 
   if (is.character(theta)) {
-    string <- if (short) 
+    string <- if (short)
         paste("powerlink(", theta, ", power = ",
               as.character(exponent), ")",
               sep = "") else
         paste(as.char.expression(theta),
               "^(", as.character(exponent), ")", sep = "")
-    if (tag) 
+    if (tag)
       string <- paste("Power link:", string)
     return(string)
   }
@@ -977,8 +978,8 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
             as.char.expression(theta),
             "))", sep = "")
     }
-    if (tag) 
-      string <- paste("Extended logit:", string) 
+    if (tag)
+      string <- paste("Extended logit:", string)
     return(string)
   }
 
@@ -1006,13 +1007,13 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
                   inverse = FALSE, deriv = 0,
                   short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
-    string <- if (short) 
+    string <- if (short)
         paste("logc(", theta, ")", sep = "") else {
         theta <- as.char.expression(theta)
         paste("log(1-", theta, ")", sep = "")
     }
-    if (tag) 
-      string <- paste("Log Complementary:", string) 
+    if (tag)
+      string <- paste("Log Complementary:", string)
     return(string)
   }
 
@@ -1045,13 +1046,13 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
                      inverse = FALSE, deriv = 0,
                      short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
-    string <- if (short) 
+    string <- if (short)
         paste("cauchit(", theta, ")", sep = "") else {
         theta <- as.char.expression(theta)
         paste("tan(pi*(", theta, "-0.5))", sep = "")
     }
-    if (tag) 
-      string <- paste("Cauchit:", string) 
+    if (tag)
+      string <- paste("Cauchit:", string)
     return(string)
   }
 
@@ -1127,8 +1128,8 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
               ")/(3*sqrt(lambda)))", sep = "")
       }
     }
-    if (tag) 
-      string <- paste("Gamma-ordinal link function:", string) 
+    if (tag)
+      string <- paste("Gamma-ordinal link function:", string)
     return(string)
   }
 
@@ -1214,14 +1215,14 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
             if (lenc) "c(" else "",
             ToString(cutpoint),
             if (lenc) ")" else "",
-            ")", sep = "") 
+            ")", sep = "")
     } else {
       theta <- as.char.expression(theta)
       paste("2*log(0.5*qnorm(", theta,
             ") + sqrt(cutpoint+7/8))", sep = "")
     }
-    if (tag) 
-      string <- paste("Poisson-ordinal link function:", string) 
+    if (tag)
+      string <- paste("Poisson-ordinal link function:", string)
     return(string)
   }
 
@@ -1242,10 +1243,10 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
   if (inverse) {
       switch(deriv+1, {
  # deriv == 0
-          origans <- 
+          origans <-
           if (any(cp.index <- cutpoint == 0)) {
               tmp <- theta
-              tmp[cp.index] <- 
+              tmp[cp.index] <-
               cloglog(theta = theta[cp.index],
                       inverse = inverse, deriv = deriv)
               tmp[!cp.index] <-
@@ -1264,13 +1265,13 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
              )
 
 
-      
+
   } else {
     if (any(cp.index <- cutpoint == 0)) {
         cloglog(theta = theta,
                 inverse = inverse, deriv = deriv)
 
-        
+
     } else {
       smallno <- 1 * .Machine$double.eps
       SMALLNO <- 1 * .Machine$double.xmin
@@ -1279,7 +1280,7 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
       Theta <- pmax(Theta, smallno)  # Since theta == 0 is a possibility
       Ql <- qnorm(Theta)
 
-      
+
       switch(deriv+1, {
       temp <- 0.5 * Ql + sqrt(cutpoint + 7/8)
       temp <- pmax(temp, SMALLNO)
@@ -1289,7 +1290,7 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
       origans <- (Ql/2 + sqrt(cutpoint + 7/8)) * dnorm(Ql)
       1 / origans
       },
-             
+
       {  stop('cannot currently handle deriv = 2') },
       stop("argument 'deriv' unmatched"))
     }
@@ -1343,7 +1344,7 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
               ")/(2*sqrt(k)) + ",
               "asinh(sqrt(cutpoint/k))))", sep = "")
       }
-      if (tag) 
+      if (tag)
         string <- paste("Negative binomial-ordinal link function:",
                         string)
       return(string)
@@ -1383,7 +1384,7 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
 
 
 
-      
+
   } else {
     smallno <- 1 * .Machine$double.eps
     SMALLNO <- 1 * .Machine$double.xmin
@@ -1469,7 +1470,7 @@ warning("20150711; this function has not been updated")
     theta <- as.char.expression(theta)
     paste("3*log(<a complicated expression>)", sep = "")
   }
-  if (tag) 
+  if (tag)
     string <- paste("Negative binomial-ordinal link function 2:",
                    string)
   return(string)
@@ -1748,7 +1749,7 @@ setMethod("linkfun", "vglm", function(object, ...)
            inverse = FALSE, deriv = 0,
            short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
-    string <- if (short) 
+    string <- if (short)
         paste("logitoffsetlink(",
                theta,
               ", ", offset[1],
@@ -1760,8 +1761,8 @@ setMethod("linkfun", "vglm", function(object, ...)
               ")",
               " - ", offset[1],
               ")", sep = "")
-    if (tag) 
-      string <- paste("Logit-with-offset:", string) 
+    if (tag)
+      string <- paste("Logit-with-offset:", string)
     return(string)
   }
 
@@ -1770,7 +1771,7 @@ setMethod("linkfun", "vglm", function(object, ...)
 
   if (inverse) {
     switch(deriv+1, {
-           exp.eta <- exp(theta) 
+           exp.eta <- exp(theta)
            (exp.eta + offset) / (1 + exp.eta + offset)
            },
            1 / Recall(theta = theta,
diff --git a/R/logLik.vlm.q b/R/logLik.vlm.q
index d7e375d..2412665 100644
--- a/R/logLik.vlm.q
+++ b/R/logLik.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -9,7 +9,7 @@
 
 
 
- 
+
 logLik.vlm <- function(object,
                        summation = TRUE,
                        ...) {
@@ -27,7 +27,7 @@ logLik.vlm <- function(object,
 
     object at family@loglikelihood(mu = fitted(object),
                                 y = depvar(object),
-                                w = weights(object, type = "prior"),
+                          w = as.vector(weights(object, type = "prior")),
                                 residuals = FALSE,
                                 eta = predict(object),
                                 extra = object at extra,
@@ -37,7 +37,7 @@ logLik.vlm <- function(object,
 
 
 
- 
+
 logLik.qrrvglm <- function(object,
                            summation = TRUE,
                            ...) {
diff --git a/R/lrwaldtest.R b/R/lrwaldtest.R
index 845f342..9232285 100644
--- a/R/lrwaldtest.R
+++ b/R/lrwaldtest.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -14,7 +14,7 @@
 
 
 update_default <- function (object, formula., ..., evaluate = TRUE) {
-  if (is.null(call <- getCall(object))) 
+  if (is.null(call <- getCall(object)))
     stop("need an object with call component")
 
   extras <- match.call(expand.dots = FALSE)$...
@@ -80,8 +80,8 @@ print_anova <- function (x, digits = max(getOption("digits") - 2, 3),
   tst.i <- i
   if (length(i <- grep("Df$", cn)))
     zap.i <- zap.i[!(zap.i %in% i)]
-  stats::printCoefmat(x, digits = digits, signif.stars = signif.stars, 
-      has.Pvalue = has.P, P.values = has.P, cs.ind = NULL, 
+  stats::printCoefmat(x, digits = digits, signif.stars = signif.stars,
+      has.Pvalue = has.P, P.values = has.P, cs.ind = NULL,
       zap.ind = zap.i, tst.ind = tst.i, na.print = "", ...)
   invisible(x)
 }
@@ -101,7 +101,10 @@ setClass("VGAManova", representation(
 
 
 
-lrtest_vglm <- function(object, ..., name = NULL) {
+lrtest_vglm <-
+  function(object, ...,
+           no.warning = FALSE,  # 20160802
+           name = NULL) {
 
 
 
@@ -122,19 +125,20 @@ lrtest_vglm <- function(object, ..., name = NULL) {
     name <- function(x) paste(deparse(formula(x)), collapse = "\n")
 
 
-
-  modelUpdate <- function(fm, update) {
+  modelUpdate <- function(fm, update, no.warning = FALSE) {
 
     if (is.numeric(update)) {
       if (any(update < 1)) {
+        if (!no.warning)
         warning("for numeric model specifications all values ",
                 "have to be >=1")
         update <- abs(update)[abs(update) > 0]
       }
       if (any(update > length(tlab(fm)))) {
-        warning(paste("more terms specified than existent in the model:",
+        if (!no.warning)
+        warning("more terms specified than existent in the model: ",
                 paste(as.character(update[update > length(tlab(fm))]),
-                      collapse = ", ")))
+                      collapse = ", "))
         update <- update[update <= length(tlab(fm))]
       }
       update <- tlab(fm)[update]
@@ -142,12 +146,13 @@ lrtest_vglm <- function(object, ..., name = NULL) {
 
     if (is.character(update)) {
       if (!all(update %in% tlab(fm))) {
-        warning(paste("terms specified that are not in the model:",
+        if (!no.warning)
+        warning("terms specified that are not in the model:",
                 paste(dQuote(update[!(update %in% tlab(fm))]),
-                      collapse = ", ")))
+                      collapse = ", "))
         update <- update[update %in% tlab(fm)]
       }
-      if (length(update) < 1) stop("empty model specification")  
+      if (length(update) < 1) stop("empty model specification")
       update <- as.formula(paste(". ~ . -",
                            paste(update, collapse = " - ")))
     }
@@ -156,9 +161,9 @@ lrtest_vglm <- function(object, ..., name = NULL) {
       update <- update_default(fm, update)
     }
     if (!inherits(update, cls)) {
-      warning(paste("original model was of class \"", cls,
-                    "\", updated model is of class \"",
-                    class(update)[1], "\"", sep = ""))
+      if (!no.warning)
+        warning("original model was of class '", cls,
+                "', updated model is of class '", class(update)[1], "'")
     }
     return(update)
   }
@@ -170,11 +175,12 @@ lrtest_vglm <- function(object, ..., name = NULL) {
     objects <- c(objects, . ~ 1)
     nmodels <- 2
   }
-  
+
   no.update <- sapply(objects, function(obj) inherits(obj, cls))
-  
+
   for (i in 2:nmodels) {
-    objects[[i]] <- modelUpdate(objects[[i-1]], objects[[i]])
+    objects[[i]] <- modelUpdate(objects[[i-1]], objects[[i]],
+                                no.warning = no.warning)
   }
 
 
@@ -202,7 +208,7 @@ lrtest_vglm <- function(object, ..., name = NULL) {
   rval <- matrix(rep_len(NA_real_, 5 * nmodels), ncol = 5)
   colnames(rval) <- c("#Df", "LogLik", "Df", "Chisq", "Pr(>Chisq)")
   rownames(rval) <- 1:nmodels
-  
+
   logLlist <- lapply(objects, logLik)
 
   dflist <- lapply(objects, df.residual)
@@ -307,29 +313,28 @@ lrtest.default <- function(object, ..., name = NULL) {
         update <- abs(update)[abs(update) > 0]
       }
       if (any(update > length(tlab(fm)))) {
-        warning(paste("more terms specified than existent in the model:",
+        warning("more terms specified than existent in the model: ",
                 paste(as.character(update[update > length(tlab(fm))]),
-                      collapse = ", ")))
+                      collapse = ", "))
         update <- update[update <= length(tlab(fm))]
       }
       update <- tlab(fm)[update]
     }
     if (is.character(update)) {
       if (!all(update %in% tlab(fm))) {
-        warning(paste("terms specified that are not in the model:",
+        warning("terms specified that are not in the model: ",
                 paste(dQuote(update[!(update %in% tlab(fm))]),
-                      collapse = ", ")))
+                      collapse = ", "))
         update <- update[update %in% tlab(fm)]
       }
-      if (length(update) < 1) stop("empty model specification")  
+      if (length(update) < 1) stop("empty model specification")
       update <- as.formula(paste(". ~ . -",
                            paste(update, collapse = " - ")))
     }
     if (inherits(update, "formula")) update <- update(fm, update)
     if (!inherits(update, cls))
-      warning(paste("original model was of class \"", cls,
-                    "\", updated model is of class \"",
-                    class(update)[1], "\"", sep = ""))
+      warning("original model was of class '", cls,
+              "', updated model is of class '", class(update)[1], "'")
     return(update)
   }
 
@@ -342,11 +347,11 @@ lrtest.default <- function(object, ..., name = NULL) {
  print( objects )
     nmodels <- 2
   }
-  
+
   no.update <- sapply(objects, function(obj) inherits(obj, cls))
  print("no.update")
  print( no.update )
-  
+
   for (i in 2:nmodels)
     objects[[i]] <- modelUpdate(objects[[i-1]], objects[[i]])
 
@@ -377,9 +382,9 @@ lrtest.default <- function(object, ..., name = NULL) {
   rval <- matrix(rep_len(NA_real_, 5 * nmodels), ncol = 5)
   colnames(rval) <- c("#Df", "LogLik", "Df", "Chisq", "Pr(>Chisq)")
   rownames(rval) <- 1:nmodels
-  
+
   logL <- lapply(objects, logLik)
-  rval[,1] <- as.numeric(sapply(logL, function(x) attr(x, "df")))  
+  rval[,1] <- as.numeric(sapply(logL, function(x) attr(x, "df")))
   rval[,2] <- sapply(logL, as.numeric)
   rval[2:nmodels, 3] <- rval[2:nmodels, 1] - rval[1:(nmodels-1), 1]
   rval[2:nmodels, 4] <- 2 * abs(rval[2:nmodels, 2] - rval[1:(nmodels-1), 2])
@@ -422,7 +427,7 @@ waldtest_formula <- function(object, ..., data = list()) {
          environment(object))) else
     eval(call("lm", formula = as.formula(deparse(substitute(object))),
          data = as.name(deparse(substitute(data))), environment(data)))
- 
+
 }
 
 
@@ -453,29 +458,28 @@ waldtest_default <- function(object, ..., vcov = NULL,
         update <- abs(update)[abs(update) > 0]
       }
       if (any(update > length(tlab(fm)))) {
-        warning(paste("more terms specified than existent in the model:",
+        warning("more terms specified than existent in the model: ",
                 paste(as.character(update[update > length(tlab(fm))]),
-                      collapse = ", ")))
+                      collapse = ", "))
         update <- update[update <= length(tlab(fm))]
       }
       update <- tlab(fm)[update]
     }
     if (is.character(update)) {
       if (!all(update %in% tlab(fm))) {
-        warning(paste("terms specified that are not in the model:",
+        warning("terms specified that are not in the model: ",
                 paste(dQuote(update[!(update %in% tlab(fm))]),
-                      collapse = ", ")))
+                      collapse = ", "))
         update <- update[update %in% tlab(fm)]
       }
-      if (length(update) < 1) stop("empty model specification")  
+      if (length(update) < 1) stop("empty model specification")
       update <- as.formula(paste(". ~ . -",
                            paste(update, collapse = " - ")))
     }
     if (inherits(update, "formula")) update <- update(fm, update)
     if (!inherits(update, cls))
-      stop(paste("original model was of class \"", cls,
-                 "\", updated model is of class \"",
-                 class(update)[1], "\"", sep = ""))
+      stop("original model was of class '", cls,
+           "', updated model is of class '", class(update)[1], "'")
     return(update)
   }
 
@@ -511,9 +515,9 @@ waldtest_default <- function(object, ..., vcov = NULL,
     objects <- c(objects, . ~ 1)
     nmodels <- 2
   }
-  
+
   no.update <- sapply(objects, function(obj) inherits(obj, cls))
-  
+
   for (i in 2:nmodels)
     objects[[i]] <- modelUpdate(objects[[i-1]], objects[[i]])
 
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index fca1ab6..a751058 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -31,6 +31,8 @@ attrassignlm <- function(object, ...)
 
 
 
+
+
  vlabel <- function(xn, ncolHlist, M, separator = ":", colon = FALSE) {
 
   if (length(xn) != length(ncolHlist))
@@ -54,12 +56,14 @@ attrassignlm <- function(object, ...)
 
 
 
+
+
  vlm2lm.model.matrix <-
-  function(x.vlm, Hlist = NULL, 
-           which.linpred = 1, 
+  function(x.vlm, Hlist = NULL,
+           which.linpred = 1,
            M = NULL) {
 
- 
+
 
 
 
@@ -83,7 +87,7 @@ attrassignlm <- function(object, ...)
     stop("'n.lm' does not seem to be an integer")
   linpred.index <- which.linpred
   vecTF <- Hmatrices[linpred.index, ] != 0
-  X.lm.jay <- x.vlm[(0:(n.lm - 1)) * M + linpred.index, vecTF, 
+  X.lm.jay <- x.vlm[(0:(n.lm - 1)) * M + linpred.index, vecTF,
                     drop = FALSE]
   X.lm.jay
 }
@@ -92,8 +96,10 @@ attrassignlm <- function(object, ...)
 
 
 
+
+
  lm2vlm.model.matrix <-
-  function(x, Hlist = NULL, assign.attributes = TRUE, 
+  function(x, Hlist = NULL, assign.attributes = TRUE,
            M = NULL, xij = NULL, Xm2 = NULL) {
 
 
@@ -131,7 +137,7 @@ attrassignlm <- function(object, ...)
   dn <- labels(x)
   yn <- dn[[1]]
   xn <- dn[[2]]
-  dimnames(X.vlm) <- list(vlabel(yn, rep(M, nrow.X.lm), M), 
+  dimnames(X.vlm) <- list(vlabel(yn, rep(M, nrow.X.lm), M),
                           vlabel(xn, ncolHlist, M))
 
   if (assign.attributes) {
@@ -141,7 +147,7 @@ attrassignlm <- function(object, ...)
       attr(X.vlm, "class")       <- attr(x, "class")
       attr(X.vlm, "order")       <- attr(x, "order")
       attr(X.vlm, "term.labels") <- attr(x, "term.labels")
-    
+
       nasgn <- oasgn <- attr(x, "assign")
       lowind <- 0
       for (ii in seq_along(oasgn)) {
@@ -152,7 +158,7 @@ attrassignlm <- function(object, ...)
       if (lowind != ncol(X.vlm))
         stop("something gone wrong")
       attr(X.vlm, "assign") <- nasgn
-    
+
 
       fred <- unlist(lapply(nasgn, length)) / unlist(lapply(oasgn, length))
       vasgn <- vector("list", sum(fred))
@@ -188,7 +194,7 @@ attrassignlm <- function(object, ...)
 
   for (ii in seq_along(xij)) {
     form.xij <- xij[[ii]]
-    if (length(form.xij) != 3) 
+    if (length(form.xij) != 3)
       stop("xij[[", ii, "]] is not a formula with a response")
     tform.xij <- terms(form.xij)
     aterm.form <- attr(tform.xij, "term.labels")  # Does not include response
@@ -218,7 +224,7 @@ attrassignlm <- function(object, ...)
       tmp44 <- array(t(tmp44), c(M, Rsum.k, nrow.X.lm))
       tmp44 <- aperm(tmp44, c(1, 3, 2))  # c(M, n, Rsum.k)
       rep.index <- cols.X.vlm[((bbb-1)*Rsum.k+1):(bbb*Rsum.k)]
-      X.vlm[, rep.index] <- c(tmp44) 
+      X.vlm[, rep.index] <- c(tmp44)
     }  # End of bbb
   }  # End of for (ii in seq_along(xij))
 
@@ -245,9 +251,9 @@ model.matrix.vlm <- function(object, ...)
 
 
 
- model.matrixvlm <- function(object, 
-                             type = c("vlm", "lm", "lm2", "bothlmlm2"), 
-                             linpred.index = NULL, 
+ model.matrixvlm <- function(object,
+                             type = c("vlm", "lm", "lm2", "bothlmlm2"),
+                             linpred.index = NULL,
                             ...) {
 
 
@@ -258,11 +264,11 @@ model.matrix.vlm <- function(object, ...)
 
   if (length(linpred.index) &&
       type != "lm")
-    stop("Must set 'type = \"lm\"' when 'linpred.index' is ", 
+    stop("Must set 'type = \"lm\"' when 'linpred.index' is ",
          "assigned a value")
   if (length(linpred.index) &&
       length(object at control$xij))
-    stop("Currently cannot handle 'xij' models when 'linpred.index' is ", 
+    stop("Currently cannot handle 'xij' models when 'linpred.index' is ",
          "assigned a value")
 
 
@@ -272,29 +278,41 @@ model.matrix.vlm <- function(object, ...)
   Xm2 <- if (any(slotNames(object) == "Xm2")) slot(object, "Xm2") else
          numeric(0)
 
+
+  form2 <- if (any(slotNames(object) == "misc")) object at misc$form2 else NULL
+  if (type == "lm2" && !length(form2))
+    return(Xm2)
+
+
   if (!length(x)) {
-    data <- model.frame(object, xlev = object at xlevels, ...) 
+    data <- model.frame(object, xlev = object at xlevels, ...)
 
     kill.con <- if (length(object at contrasts)) object at contrasts else NULL
 
-    x <- vmodel.matrix.default(object, data = data, 
+    x <- vmodel.matrix.default(object, data = data,
                                contrasts.arg = kill.con)
     tt <- terms(object)
     attr(x, "assign") <- attrassigndefault(x, tt)
   }
 
+
+
+
   if ((type == "lm2" || type == "bothlmlm2") &&
       !length(Xm2)) {
     object.copy2 <- object
-    data <- model.frame(object.copy2, xlev = object.copy2 at xlevels, ...) 
+    data <- model.frame(object.copy2, xlev = object.copy2 at xlevels, ...)
 
     kill.con <- if (length(object.copy2 at contrasts))
                 object.copy2 at contrasts else NULL
 
-    Xm2 <- vmodel.matrix.default(object.copy2, data = data, 
+    Xm2 <- vmodel.matrix.default(object.copy2, data = data,
                                  contrasts.arg = kill.con)
-    ttXm2 <- terms(object.copy2 at misc$form2)
-    attr(Xm2, "assign") <- attrassigndefault(Xm2, ttXm2)
+
+
+    if (length(form2)) {
+      attr(Xm2, "assign") <- attrassigndefault(Xm2, terms(form2))
+    }
   }
 
 
@@ -310,19 +328,19 @@ model.matrix.vlm <- function(object, ...)
   }
 
 
-  M <- object at misc$M  
+  M <- object at misc$M
   Hlist <- object at constraints  # == constraints(object, type = "lm")
-  X.vlm <- lm2vlm.model.matrix(x = x, Hlist = Hlist, 
+  X.vlm <- lm2vlm.model.matrix(x = x, Hlist = Hlist,
                                xij = object at control$xij, Xm2 = Xm2)
 
   if (type == "vlm") {
     return(X.vlm)
   } else if (type == "lm" && length(linpred.index)) {
-    if (!is.Numeric(linpred.index, integer.valued = TRUE, positive = TRUE, 
+    if (!is.Numeric(linpred.index, integer.valued = TRUE, positive = TRUE,
                     length.arg = 1))
       stop("bad input for argument 'linpred.index'")
     if (!length(intersect(linpred.index, 1:M)))
-      stop("argument 'linpred.index' should have ", 
+      stop("argument 'linpred.index' should have ",
            "a single value from the set 1:", M)
 
     Hlist <- Hlist
@@ -348,12 +366,12 @@ setMethod("model.matrix",  "vlm", function(object, ...)
 
 
  model.matrixvgam <-
-  function(object, 
-           type = c("lm", "vlm", "lm", "lm2", "bothlmlm2"), 
-           linpred.index = NULL, 
+  function(object,
+           type = c("lm", "vlm", "lm", "lm2", "bothlmlm2"),
+           linpred.index = NULL,
            ...) {
-  model.matrixvlm(object = object, 
-                  type = type[1], 
+  model.matrixvlm(object = object,
+                  type = type[1],
                   linpred.index = linpred.index, ...)
 }
 setMethod("model.matrix",  "vgam", function(object, ...)
@@ -364,8 +382,8 @@ setMethod("model.matrix",  "vgam", function(object, ...)
 
 
 
- model.framevlm <- function(object, 
-                            setupsmart = TRUE, 
+ model.framevlm <- function(object,
+                            setupsmart = TRUE,
                             wrapupsmart = TRUE, ...) {
 
   dots <- list(...)
@@ -382,7 +400,7 @@ setMethod("model.matrix",  "vgam", function(object, ...)
 
     fcall[names(nargs)] <- nargs
     env <- environment(object at terms$terms)  # @terms or @terms$terms ??
-    if (is.null(env)) 
+    if (is.null(env))
       env <- parent.frame()
     ans <- eval(fcall, env, parent.frame())
 
@@ -406,13 +424,13 @@ setMethod("model.frame",  "vlm", function(formula, ...)
 
 
  vmodel.matrix.default <-
-  function(object, data = environment(object), 
+  function(object, data = environment(object),
            contrasts.arg = NULL, xlev = NULL, ...) {
 
   t <- if (missing(data)) terms(object) else terms(object, data = data)
   if (is.null(attr(data, "terms")))
     data <- model.frame(object, data, xlev = xlev) else {
-    reorder <- match(sapply(attr(t, "variables"), deparse, 
+    reorder <- match(sapply(attr(t, "variables"), deparse,
                      width.cutoff = 500)[-1], names(data))
     if (anyNA(reorder))
       stop("model frame and formula mismatch in model.matrix()")
@@ -425,7 +443,7 @@ setMethod("model.frame",  "vlm", function(formula, ...)
     namD <- names(data)
     for (i in namD) if (is.character(data[[i]])) {
       data[[i]] <- factor(data[[i]])
-      warning(gettextf("variable '%s' converted to a factor", i), 
+      warning(gettextf("variable '%s' converted to a factor", i),
               domain = NA)
     }
     isF <- sapply(data, function(x) is.factor(x) || is.logical(x))
@@ -439,7 +457,7 @@ setMethod("model.frame",  "vlm", function(formula, ...)
       for (nn in namC) {
         if (is.na(ni <- match(nn, namD)))
           warning(gettextf(
-            "variable '%s' is absent, its contrast will be ignored", 
+            "variable '%s' is absent, its contrast will be ignored",
             nn), domain = NA) else {
           ca <- contrasts.arg[[nn]]
           if (is.matrix(ca))
@@ -470,9 +488,9 @@ setMethod("model.frame",  "vlm", function(formula, ...)
 
 
 depvar.vlm <-
-  function(object, 
-           type = c("lm", "lm2"), 
-           drop = FALSE, 
+  function(object,
+           type = c("lm", "lm2"),
+           drop = FALSE,
            ...) {
   type <- match.arg(type, c("lm", "lm2"))[1]
   ans <- if (type == "lm") {
@@ -486,9 +504,9 @@ depvar.vlm <-
 
 
 if (!isGeneric("depvar"))
-    setGeneric("depvar", 
+    setGeneric("depvar",
                function(object, ...)
-                 standardGeneric("depvar"), 
+                 standardGeneric("depvar"),
                package = "VGAM")
 
 
@@ -507,15 +525,15 @@ setMethod("depvar",  "rcim", function(object, ...)
 
 
 
-npred.vlm <- function(object, 
-                      type = c("total", "one.response"), 
+npred.vlm <- function(object,
+                      type = c("total", "one.response"),
                       ...) {
   if (!missing(type))
     type <- as.character(substitute(type))
   type.arg <- match.arg(type, c("total", "one.response"))[1]
 
 
-  MM <- 
+  MM <-
     if (length(object at misc$M))
       object at misc$M else
     if (ncol(as.matrix(predict(object))) > 0)
@@ -549,7 +567,7 @@ npred.vlm <- function(object,
 
 
 if (!isGeneric("npred"))
-    setGeneric("npred", function(object, ...) standardGeneric("npred"), 
+    setGeneric("npred", function(object, ...) standardGeneric("npred"),
                package = "VGAM")
 
 
@@ -570,7 +588,7 @@ setMethod("npred",  "rcim", function(object, ...)
 
 
 hatvaluesvlm <-
-  function(model, 
+  function(model,
            type = c("diagonal", "matrix", "centralBlocks"), ...) {
 
 
@@ -638,7 +656,7 @@ hatvaluesvlm <-
     H.ss <- matrix(H.ss, nn, MMp1d2, byrow = TRUE)
     H.ss
   }
-}
+}  # hatvaluesvlm
 
 
 
@@ -665,10 +683,10 @@ setMethod("hatvalues",  "rcim", function(model, ...)
 
 
 hatplot.vlm <-
-  function(model, multiplier = c(2, 3), 
-           lty = "dashed", 
-           xlab = "Observation", 
-           ylab = "Hat values", 
+  function(model, multiplier = c(2, 3),
+           lty = "dashed",
+           xlab = "Observation",
+           ylab = "Hat values",
            ylim = NULL, ...) {
 
   if (is(model, "vlm")) {
@@ -698,8 +716,8 @@ hatplot.vlm <-
   if (is.null(ylim))
     ylim <- c(0, max(hatval))
   for (jay in 1:M) {
-    plot(hatval[, jay], type = "n", main = predictors.names[jay], 
-         ylim = ylim, xlab = xlab, ylab = ylab, 
+    plot(hatval[, jay], type = "n", main = predictors.names[jay],
+         ylim = ylim, xlab = xlab, ylab = ylab,
          ...)
     points(1:N, hatval[, jay], ...)
     abline(h = multiplier * ncol.X.vlm / (N * M), lty = lty, ...)
@@ -739,10 +757,10 @@ setMethod("hatplot",  "rcim", function(model, ...)
 
 
 dfbetavlm <-
-  function(model, 
-           maxit.new = 1, 
-           trace.new = FALSE, 
-           smallno = 1.0e-8, 
+  function(model,
+           maxit.new = 1,
+           trace.new = FALSE,
+           smallno = 1.0e-8,
            ...) {
 
   if (!is(model, "vlm"))
@@ -786,22 +804,22 @@ dfbetavlm <-
                 orig.w
     w.orig[ii] <- w.orig[ii] * smallno  # Relative
 
-    fit <- vglm.fit(x = X.lm, 
+    fit <- vglm.fit(x = X.lm,
                     X.vlm.arg = X.vlm,  # Should be more efficient
                     y = if (y.integer)
                       round(depvar(model) * c(pweights) / c(orig.w)) else
-                           (depvar(model) * c(pweights) / c(orig.w)), 
+                           (depvar(model) * c(pweights) / c(orig.w)),
                     w = w.orig,  # Set to zero so that it is 'deleted'.
-                    Xm2 = NULL, Ym2 = NULL, 
-                    etastart = etastart,  # coefstart = NULL, 
-                    offset = offset, 
-                    family = model at family, 
-                    control = new.control, 
-                    criterion =  new.control$criterion,  # "coefficients", 
-                    qr.arg = FALSE, 
-                    constraints = constraints(model, type = "term"), 
-                    extra = model at extra, 
-                    Terms = Terms.zz, 
+                    Xm2 = NULL, Ym2 = NULL,
+                    etastart = etastart,  # coefstart = NULL,
+                    offset = offset,
+                    family = model at family,
+                    control = new.control,
+                    criterion =  new.control$criterion,  # "coefficients",
+                    qr.arg = FALSE,
+                    constraints = constraints(model, type = "term"),
+                    extra = model at extra,
+                    Terms = Terms.zz,
                     function.name = "vglm")
 
     dfbeta[ii, ] <- coef.model - fit$coeff
@@ -840,8 +858,8 @@ setMethod("dfbeta",  "rcim", function(model, ...)
 
 
 
-hatvaluesbasic <- function(X.vlm, 
-                           diagWm, 
+hatvaluesbasic <- function(X.vlm,
+                           diagWm,
                            M = 1) {
 
 
@@ -877,3 +895,37 @@ hatvaluesbasic <- function(X.vlm,
 
 
 
+
+
+
+ model.matrixpvgam <-
+  function(object,
+           type = c("vlm", "lm", "lm2", "bothlmlm2",
+                    "augmentedvlm", "penalty"),  # This line is new
+           linpred.index = NULL,
+           ...) {
+  type <- match.arg(type, c("vlm", "lm", "lm2", "bothlmlm2",
+                            "augmentedvlm", "penalty"))[1]
+
+  if (type == "augmentedvlm" ||
+      type == "penalty") {
+    rbind(if (type == "penalty") NULL else
+          model.matrixvlm(object, type = "vlm",
+                          linpred.index = linpred.index, ...),
+          get.X.VLM.aug(constraints  = constraints(object, type = "term"),
+                        sm.osps.list = object at ospsslot$sm.osps.list))
+  } else {
+    model.matrixvlm(object, type = type, linpred.index = linpred.index, ...)
+  }
+}
+
+
+
+setMethod("model.matrix",  "pvgam", function(object, ...)
+           model.matrixpvgam(object, ...))
+
+
+
+
+
+
diff --git a/R/mux.q b/R/mux.q
index 0cf28f7..65bf9e4 100644
--- a/R/mux.q
+++ b/R/mux.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -101,8 +101,8 @@ mux5 <- function(cc, x, M, matrix.arg = FALSE) {
 
 
 
-  dimx <- dim(x) 
-  dimcc <- dim(cc) 
+  dimx <- dim(x)
+  dimcc <- dim(cc)
   r <- dimx[2]
 
   if (matrix.arg) {
@@ -115,7 +115,7 @@ mux5 <- function(cc, x, M, matrix.arg = FALSE) {
         dimx[1]  != dimcc[1] ||
         (length(dimx) == 3 && dimx[3] != dimcc[3]))
       stop('input nonconformable')
-    neltscci <- M*(M+1)/2 
+    neltscci <- M*(M+1)/2
   }
 
   if (is.matrix(x))
@@ -128,15 +128,15 @@ mux5 <- function(cc, x, M, matrix.arg = FALSE) {
                ans = double(size),
                as.integer(M), as.integer(n), as.integer(r),
                as.integer(neltscci),
-               as.integer(dimm(r)), 
+               as.integer(dimm(r)),
                as.integer(as.numeric(matrix.arg)),
-               double(M*M), double(r*r), 
+               double(M*M), double(r*r),
                as.integer(index.M$row), as.integer(index.M$col),
-               as.integer(index.r$row), as.integer(index.r$col), 
+               as.integer(index.r$row), as.integer(index.r$col),
                ok3 = as.integer(1), NAOK = TRUE)
   if (fred$ok3 == 0)
     stop("can only handle matrix.arg == 1")
- 
+
 
   if (matrix.arg) {
     ans <- fred$ans
@@ -163,7 +163,7 @@ mux55 <- function(evects, evals, M) {
   fred <- .C("mux55", as.double(evects), as.double(evals),
              ans = double(MMp1d2 * n),
              double(M*M), double(M*M),
-             as.integer(index$row), as.integer(index$col), 
+             as.integer(index$row), as.integer(index$col),
              as.integer(M), as.integer(n), NAOK = TRUE)
   dim(fred$ans) <- c(MMp1d2, n)
   fred$ans
@@ -174,8 +174,8 @@ mux55 <- function(evects, evals, M) {
 
 mux7 <- function(cc, x) {
 
-  dimx <- dim(x) 
-  dimcc <- dim(cc) 
+  dimx <- dim(x)
+  dimcc <- dim(cc)
   if (dimx[1]!= dimcc[2] ||
      (length(dimx) == 3 && dimx[3]!= dimcc[3]))
     stop('input nonconformable')
@@ -202,8 +202,8 @@ mux9 <- function(cc, xmat) {
 
   if (is.vector(xmat))
     xmat <- cbind(xmat)
-  dimxmat <- dim(xmat) 
-  dimcc <- dim(cc) 
+  dimxmat <- dim(xmat)
+  dimcc <- dim(cc)
 
   if (dimcc[1]   != dimcc[2] ||
       dimxmat[1] != dimcc[3] ||
@@ -380,7 +380,7 @@ vchol <- function(cc, M, n, silent = FALSE, callno = 0) {
   }
   dim(ans) <- c(MM, n)  # Make sure
 
-  ans 
+  ans
 }
 
 
@@ -402,7 +402,7 @@ vchol.greenstadt <- function(cc, M, silent = FALSE,
 
 
 
-  temp <- veigen(cc, M = M)  # , mat = TRUE) 
+  temp <- veigen(cc, M = M)  # , mat = TRUE)
   dim(temp$vectors) <- c(M, M, n)  # Make sure (when M = 1) for mux5
   dim(temp$values)  <- c(M, n)    # Make sure (when M = 1) for mux5
 
@@ -437,7 +437,7 @@ vchol.greenstadt <- function(cc, M, silent = FALSE,
   temp3 <- mux55(temp$vectors, temp$values, M = M)  #, matrix.arg = TRUE)
   ans <- vchol(t(temp3), M = M, n = n, silent = silent,
                callno = callno + 1)  #, matrix.arg = TRUE)
-                                   
+
 
 
   if (nrow(ans) == MM) ans else ans[1:MM, , drop = FALSE]
diff --git a/R/nobs.R b/R/nobs.R
index c685677..d15fbc3 100644
--- a/R/nobs.R
+++ b/R/nobs.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/penvps.R b/R/penvps.R
deleted file mode 100644
index 26d9de0..0000000
--- a/R/penvps.R
+++ /dev/null
@@ -1,139 +0,0 @@
-# These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
-# All rights reserved.
-
-
-
-Pen.psv <-
-  function(constraints = constraints, ps.list = ps.list) {
-  assignx <- ps.list$assignx
-  nassignx <- names(assignx)
-  indexterms <- ps.list$indexterms
-
-
-  which.X.ps <- ps.list$which.X.ps
-
-  S.arg <- ps.list$S.arg
-
-  lambdalist <- ps.list$lambdalist
-
-  ridge.adj <- ps.list$ridge.adj
-
-  term.labels <- ps.list$term.labels
-
-
-
-  index <- numeric()
-  lambda.new <- list()
-  pen.new.list <- list()
-  ncol.X.ps <- sapply(which.X.ps, length)
-  ncolHlist.model <- unlist(lapply(constraints, ncol))
-
-
-  ncolHlist.new <- ncolHlist.model
-  if (names(constraints)[[1]] == "(Intercept)") {
-    ncolHlist.new <- ncolHlist.new[-1]
-    nassignx <- nassignx[-1]
-  }
-
-
-
-  ncol.H.ps <- ncolHlist.new[indexterms]
-  nps <- nassignx[indexterms]
-
-
-
-  lambdalen <- sapply(lambdalist, length)
-
-
-
-
-
-  for (ii in seq_along(ncol.H.ps)) {
-    nlambda <- lambdalen[ii]  # lambdalen[[ii]]
-    if (nlambda == ncol.H.ps[ii]) {
-      lambda.new[[ii]] <- lambdalist[[ii]]
-    } else {
-      if (nlambda > ncol.H.ps[ii])
-        warning("too many lambdas; using the first few")
-      lambda.new[[ii]] <- rep_len(lambdalist[[ii]], ncol.H.ps[ii])
-    }
-
-    names(lambda.new)[[ii]] <- nps[ii]  # nps[[ii]]
-
-
-
-
-
-
-    if (ridge.adj[[ii]] == 0) {
-      lambda.diag <- diag(sqrt(lambda.new[[ii]]))
-      pen.noridge <- kronecker(lambda.diag, S.arg[[ii]])
-      ooo <- matrix(1:(ncol.H.ps[ii] * ncol.X.ps[ii]),
-                    ncol = ncol.X.ps[ii], byrow = TRUE)
-      pen.new.list[[ii]] <- pen.noridge[, ooo]
-      names(pen.new.list)[[ii]] <- nps[ii]  # nps[[ii]]
-    } else {
-      ioffset <- 0
-      joffset <- 0
-      Dmat1 <- matrix(0,
-                      ncol.H.ps[ii] * (ncol(S.arg[[ii]]) + nrow(S.arg[[ii]])),
-                      ncol.H.ps[ii] *  ncol(S.arg[[ii]]))
-      for (jay in 1:ncol.H.ps[ii]) {
-        pen.set <- sqrt(lambda.new[[ii]][jay]) * S.arg[[ii]]
-        pen.ridge <- rbind(pen.set,
-                           sqrt(ridge.adj[[ii]]) * diag(ncol(S.arg[[ii]])))
-        Dmat1[ioffset + 1:nrow(pen.ridge),
-              joffset + 1:ncol(pen.ridge)] <- pen.ridge
-        ioffset <- ioffset + nrow(pen.ridge)
-        joffset <- joffset + ncol(pen.ridge)
-      }  # for jay
-      ooo <- matrix(1:(ncol.H.ps[ii] * ncol.X.ps[ii]),
-                    ncol = ncol.X.ps[ii], byrow = TRUE)
-      pen.new.list[[ii]] <- Dmat1[, ooo]
-      names(pen.new.list)[[ii]] <- nps[ii]  # nps[[ii]]
-      ioffset <- 0
-      joffset <- 0
-    }  # if-else ridge.adj
-  }  # for
-
-
-
-
-  ncol.allterms <- sapply(assignx, length)
-
-  ncol.model <- if (names(constraints)[[1]] == "(Intercept)")
-                  ncol.allterms[-1] else  ncol.allterms
-  nrowpen.new.list <- sapply(pen.new.list, nrow)
-  nrowPen <- sum(nrowpen.new.list)
-  ncolPen <- sum(ncol.allterms * ncolHlist.model)
-  iioffset <- 0
-  Dmat2 <- matrix(0, nrowPen, ncolPen)
-  jay <- 0
-
-
-  jjoffset <- if (names(constraints)[[1]] == "(Intercept)")
-                ncolHlist.model[1] else 0
-
-  for (ii in seq_along(term.labels)) {
-    if (indexterms[ii]) {
-      jay <- jay + 1
-      ind.x <- iioffset + 1:nrow(pen.new.list[[jay]])
-      ind.y <- jjoffset + 1:ncol(pen.new.list[[jay]])
-      Dmat2[ind.x, ind.y] <- pen.new.list[[jay]]
-      iioffset <- iioffset + nrow(pen.new.list[[jay]])
-      jjoffset <- jjoffset + ncol(pen.new.list[[jay]])
-    } else {
-      jjoffset <- jjoffset + ncolHlist.new[ii] * ncol.model[ii]
-    }
-  }  # ii
-
-  Xvlm.aug <- Dmat2
-
-  attr(Xvlm.aug, "lambda.vlm") <- lambda.new
-  Xvlm.aug 
-}
-
-
-
-
diff --git a/R/plot.vgam.R b/R/plot.vgam.R
index 184aaea..2a1e71d 100644
--- a/R/plot.vgam.R
+++ b/R/plot.vgam.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -17,11 +17,11 @@
 plotvgam <-
 plot.vgam <-
   function(x, newdata = NULL, y = NULL, residuals = NULL, rugplot = TRUE,
-           se = FALSE, scale = 0, 
+           se = FALSE, scale = 0,
            raw = TRUE, offset.arg = 0, deriv.arg = 0, overlay = FALSE,
            type.residuals = c("deviance", "working", "pearson", "response"),
-           plot.arg = TRUE, which.term = NULL, which.cf = NULL, 
-           control = plotvgam.control(...), 
+           plot.arg = TRUE, which.term = NULL, which.cf = NULL,
+           control = plotvgam.control(...),
            varxij = 1, ...) {
 
   missing.control <- missing(control)
@@ -88,7 +88,7 @@ plot.vgam <-
                 control, plotvgam.control(...))
   }
 
-  x at post$plotvgam.control <- control  # Add it to the object 
+  x at post$plotvgam.control <- control  # Add it to the object
 
   if (plot.arg)
     plotpreplotvgam(preplot.object, residuals = residuals,
@@ -113,7 +113,7 @@ ylim.scale <- function(ylim, scale = 0) {
   try <- ylim[2] - ylim[1]
   if (try > scale) ylim else
     c(ylim[1] + ylim[2] - scale,
-      ylim[1] + ylim[2] + scale) / 2 
+      ylim[1] + ylim[2] + scale) / 2
 }
 
 
@@ -139,20 +139,20 @@ getallresponses <- function(xij) {
            terms = attr((object at terms)$terms, "term.labels"),
            raw = TRUE, deriv.arg = deriv.arg, se = FALSE,
            varxij = 1) {
-  Terms <- terms(object)  # 20030811; object at terms$terms 
+  Terms <- terms(object)  # 20030811; object at terms$terms
   aa <- attributes(Terms)
   all.terms <- labels(Terms)
   xvars <- parse(text = all.terms)
 
 
 
- 
+
   names(xvars) <- all.terms
   terms <- sapply(terms, match.arg, all.terms)
 
   Interactions <- aa$order > 1
   if (any(Interactions)) {
-    stop("cannot handle interactions") 
+    stop("cannot handle interactions")
   }
 
   xvars <- xvars[terms]
@@ -182,8 +182,8 @@ getallresponses <- function(xij) {
     }
     xvars[[term]] <- evars
   }
-    
-    
+
+
   xvars <- c(as.name("list"), xvars)
   mode(xvars) <- "call"
   if (length(newdata)) {
@@ -289,7 +289,7 @@ preplotvgam <-
     gamplot[[term]] <- TT
   }
   attr(gamplot, "Constant") <- Constant
-  invisible(gamplot) 
+  invisible(gamplot)
 }
 
 
@@ -324,7 +324,7 @@ preplotvgam <-
   } else {
     dummy <- function(residuals = NULL, rugplot = TRUE,
                       se = FALSE, scale = 0,
-                      offset.arg = 0, deriv.arg = 0, overlay = FALSE, 
+                      offset.arg = 0, deriv.arg = 0, overlay = FALSE,
                       which.cf = NULL, control = plotvgam.control())
      c(list(residuals = residuals, rugplot = rugplot,
             se = se, scale = scale,
@@ -335,7 +335,7 @@ preplotvgam <-
                 se = se, scale = scale,
                 offset.arg = offset.arg, deriv.arg = deriv.arg,
                 overlay = overlay,
-                which.cf = which.cf, 
+                which.cf = which.cf,
                 control = control)
 
     uniq.comps <- unique(c(names(x), names(dd)))
@@ -348,7 +348,7 @@ preplotvgam <-
 
 vplot.default <- function(x, y, se.y = NULL, xlab = "", ylab = "",
                           residuals = NULL, rugplot = FALSE,
-                          scale = 0, se = FALSE, 
+                          scale = 0, se = FALSE,
                           offset.arg = 0, deriv.arg = 0, overlay = FALSE,
                           which.cf = NULL, ...) {
   switch(data.class(x)[1],
@@ -357,7 +357,7 @@ vplot.default <- function(x, y, se.y = NULL, xlab = "", ylab = "",
                                 offset.arg = offset.arg,
                                 overlay = overlay, ...),
          if (is.numeric(x)) {
-           vplot.numeric(as.vector(x), y, se.y, xlab, ylab, 
+           vplot.numeric(as.vector(x), y, se.y, xlab, ylab,
                          residuals, rugplot, scale, se,
                          offset.arg = offset.arg, overlay = overlay, ...)
          } else {
@@ -370,18 +370,18 @@ vplot.default <- function(x, y, se.y = NULL, xlab = "", ylab = "",
 
 
 vplot.list <-
-  function(x, y, se.y = NULL, xlab, ylab, 
-           residuals = NULL, rugplot = FALSE, scale = 0, se = FALSE, 
-           offset.arg = 0, deriv.arg = 0, overlay = FALSE, 
+  function(x, y, se.y = NULL, xlab, ylab,
+           residuals = NULL, rugplot = FALSE, scale = 0, se = FALSE,
+           offset.arg = 0, deriv.arg = 0, overlay = FALSE,
            which.cf = NULL, ...) {
 
   if (is.numeric(x[[1]])) {
-    vplot.numeric(x[[1]], y, se.y, xlab, ylab, 
-                  residuals, rugplot, scale, se, 
+    vplot.numeric(x[[1]], y, se.y, xlab, ylab,
+                  residuals, rugplot, scale, se,
                   offset.arg = offset.arg, deriv.arg = deriv.arg,
                   overlay = overlay, ...)
   } else {
-    stop("this function has not been written yet") 
+    stop("this function has not been written yet")
   }
 }
 
@@ -402,13 +402,14 @@ vplot.list <-
            llwd = par()$lwd,
            slwd = par()$lwd,
            add.arg = FALSE,
-           one.at.a.time = FALSE, 
+           one.at.a.time = FALSE,
            .include.dots = TRUE,
            noxmean = FALSE,
+           shade = FALSE, shcol = "gray80",
            ...) {
 
 
-  ans <- 
+  ans <-
   list(which.cf = which.cf,
        xlim = xlim, ylim = ylim,
        llty = llty, slty = slty,
@@ -417,7 +418,8 @@ vplot.list <-
        llwd = llwd, slwd = slwd,
        add.arg = add.arg,
        noxmean = noxmean,
-       one.at.a.time = one.at.a.time)
+       one.at.a.time = one.at.a.time,
+       shade = shade, shcol = shcol)
 
   if (.include.dots) {
     c(list(...), ans)
@@ -427,10 +429,9 @@ vplot.list <-
     for (ii in names(default.vals)) {
       replace.val <- !((length(ans[[ii]]) == length(default.vals[[ii]])) &&
             (length(default.vals[[ii]]) > 0) &&
-            (is.logical(all.equal(ans[[ii]], default.vals[[ii]]))) &&
-                        all.equal(ans[[ii]], default.vals[[ii]]))
+            identical(ans[[ii]], default.vals[[ii]]))
 
-      if (replace.val) 
+      if (replace.val)
         return.list[[ii]] <- ans[[ii]]
     }
     if (length(return.list)) {
@@ -465,9 +466,10 @@ vplot.numeric <-
            llwd = par()$lwd,
            slwd = par()$lwd,
            add.arg = FALSE,
-           one.at.a.time = FALSE, 
-           noxmean = FALSE, 
+           one.at.a.time = FALSE,
+           noxmean = FALSE,
            separator = ":",
+           shade = FALSE, shcol = "gray80",
            ...) {
 
 
@@ -477,7 +479,7 @@ vplot.numeric <-
 
     if (length(y)/length(x)  != round(length(y)/length(x)))
       stop("length of 'x' and 'y' do not seem to match")
-    y <- as.matrix(y) 
+    y <- as.matrix(y)
     if (!length(which.cf))
       which.cf <- 1:ncol(y)  # Added 20040807
 
@@ -544,7 +546,7 @@ vplot.numeric <-
                 range(c(ylim, residuals[, which.cf]), na.rm = TRUE)
       } else {
         residuals <- NULL
-        warning("Residuals do not match 'x' in \"", ylab, 
+        warning("Residuals do not match 'x' in \"", ylab,
                 "\" preplot object")
       }
     }
@@ -562,9 +564,9 @@ vplot.numeric <-
     if (!length(which.cf))
       which.cf <- 1:ncol(uy)  # Added 20040807
     if (!add.arg) {
-      matplot(ux, uy[, which.cf], type = "n", 
-              xlim = xlim, ylim = ylim, 
-              xlab = xlab, ylab = ylab, ...) 
+      matplot(ux, uy[, which.cf], type = "n",
+              xlim = xlim, ylim = ylim,
+              xlab = xlab, ylab = ylab, ...)
     }
     matlines(ux, uy[, which.cf],
              lwd = llwd, col = lcol, lty = llty)
@@ -583,7 +585,7 @@ vplot.numeric <-
      matlines(ux, se.lower[, which.cf], lty =  slty, lwd = slwd, col = scol)
     }
   } else {
-    YLAB <- ylab 
+    YLAB <- ylab
 
     pcex <- rep_len(pcex,  ncol(uy))
     pch  <- rep_len(pch ,  ncol(uy))
@@ -598,54 +600,62 @@ vplot.numeric <-
 
     for (ii in 1:ncol(uy)) {
       if (!length(which.cf) ||
-         (length(which.cf) && any(which.cf == ii))) {
+         ( length(which.cf) && any(which.cf == ii))) {
+
+        if (is.Numeric(ylim0, length.arg = 2)) {
+          ylim <- ylim0
+        } else {
+          ylim <- range(ylim0, uy[, ii], na.rm = TRUE)
+          if (se && !is.null(se.y))
+            ylim <- range(ylim0, se.lower[, ii], se.upper[, ii],
+                          na.rm = TRUE)
+          if (!is.null(residuals))
+            ylim <- range(c(ylim, residuals[, ii]), na.rm = TRUE)
+          ylim <- ylim.scale(ylim, scale)
+        }
+        if (ncol(uy) > 1 && length(separator))
+          YLAB <- paste(ylab, separator, ii, sep = "")
 
-          if (is.Numeric(ylim0, length.arg = 2)) {
-            ylim <- ylim0
-          } else {
-            ylim <- range(ylim0, uy[, ii], na.rm = TRUE)
-            if (se && !is.null(se.y))
-              ylim <- range(ylim0, se.lower[, ii], se.upper[, ii],
-                            na.rm = TRUE)
-            if (!is.null(residuals))
-              ylim <- range(c(ylim, residuals[, ii]), na.rm = TRUE)
-            ylim <- ylim.scale(ylim, scale)
+        if (!add.arg) {
+          if (one.at.a.time) {
+            readline("Hit return for the next plot ")
           }
-          if (ncol(uy) > 1 && length(separator))
-            YLAB <- paste(ylab, separator, ii, sep = "")  
-            if (!add.arg) {
-              if (one.at.a.time) {
-                readline("Hit return for the next plot ")
-              }
-              plot(ux, uy[, ii], type = "n", 
-                   xlim = xlim, ylim = ylim, 
-                   xlab = xlab, ylab = YLAB, ...)
-            }
-            lines(ux, uy[, ii], 
-                  lwd = llwd[ii], col = lcol[ii], lty = llty[ii])
-            if (!is.null(residuals))
-              points(x, residuals[, ii], pch = pch[ii],
-                     col = pcol[ii], cex = pcex[ii]) 
-            if (rugplot)
-              rug(jx, col = rcol[ii])
-
-            if (se && !is.null(se.y)) {
-              lines(ux, se.upper[, ii], lty = slty[ii], lwd = slwd[ii],
-                    col = scol[ii])
-              lines(ux, se.lower[, ii], lty = slty[ii], lwd = slwd[ii],
-                    col = scol[ii])
-            }
+           plot(ux, uy[, ii], type = "n",
+                xlim = xlim, ylim = ylim,
+                xlab = xlab, ylab = YLAB, ...)
         }
-    }
-  }
-}
+
+        lines(ux, uy[, ii], lwd = llwd[ii], col = lcol[ii], lty = llty[ii])
+        if (!is.null(residuals))
+          points(x, residuals[, ii], pch = pch[ii],
+                 col = pcol[ii], cex = pcex[ii])
+        if (rugplot)
+          rug(jx, col = rcol[ii])
+
+        if (se && !is.null(se.y)) {
+          if (shade) {
+            polygon(c(ux, rev(ux), ux[1]),
+                    c(se.upper[, ii], rev(se.lower[, ii]), se.upper[1, ii]),
+                    col = shcol, border = NA)
+            lines(ux, uy[, ii], lwd = llwd[ii], col = lcol[ii], lty = llty[ii])
+          } else {
+            lines(ux, se.upper[, ii], lty = slty[ii], lwd = slwd[ii],
+                  col = scol[ii])
+            lines(ux, se.lower[, ii], lty = slty[ii], lwd = slwd[ii],
+                  col = scol[ii])
+          }  # !shade
+        }  # se && !is.null(se.y))
+      }
+    }  # for()
+  }  # overlay
+}  # vplot.numeric()
 
 
 
 vplot.matrix <-
   function(x, y, se.y = NULL, xlab, ylab,
            residuals = NULL, rugplot = FALSE, scale = 0, se = FALSE,
-           offset.arg = 0, deriv.arg = 0, overlay = FALSE, 
+           offset.arg = 0, deriv.arg = 0, overlay = FALSE,
            which.cf = NULL, ...) {
   stop("You shouldn't ever call this function!")
 }
@@ -675,17 +685,17 @@ add.hookey <- function(ch, deriv.arg = 0) {
 
 
 vplot.factor <-
-  function(x, y, se.y = NULL, xlab, ylab, 
-           residuals = NULL, rugplot = FALSE, scale = 0, 
-           se = FALSE, xlim = NULL, ylim = NULL, 
-           offset.arg = 0, deriv.arg = 0, overlay = FALSE, 
+  function(x, y, se.y = NULL, xlab, ylab,
+           residuals = NULL, rugplot = FALSE, scale = 0,
+           se = FALSE, xlim = NULL, ylim = NULL,
+           offset.arg = 0, deriv.arg = 0, overlay = FALSE,
            which.cf = NULL, ...) {
   if (deriv.arg > 0)
     return(NULL)
 
   if (length(y)/length(x)  != round(length(y)/length(x)))
     stop("length of 'x' and 'y' do not seem to match")
-  y <- as.matrix(y) 
+  y <- as.matrix(y)
 
   if (!is.null(se.y))
     se.y <- as.matrix(se.y)
@@ -706,7 +716,7 @@ vplot.factor <-
                     xlab = xlab, ylab = ylab,
                     residuals = residuals,
                     rugplot = rugplot, scale = scale,
-                    se = se, xlim = xlim, ylim = ylim, ...) 
+                    se = se, xlim = xlim, ylim = ylim, ...)
   } else {
     for (ii in 1:ncol(y)) {
       ylab <- rep_len(ylab, ncol(y))
@@ -714,15 +724,15 @@ vplot.factor <-
         ylab <- dimnames(y)[[2]]
       vvplot.factor(x, y[, ii,drop = FALSE],
                     se.y = if (is.null(se.y)) NULL else
-                           se.y[, ii,drop = FALSE], 
+                           se.y[, ii,drop = FALSE],
                     xlab = xlab, ylab = ylab[ii],
                     residuals = if (is.null(residuals))
                         NULL else residuals[, ii,drop = FALSE],
                     rugplot = rugplot, scale = scale,
-                    se = se, xlim = xlim, ylim = ylim, ...) 
+                    se = se, xlim = xlim, ylim = ylim, ...)
 
     }
-  } 
+  }
   invisible(NULL)
 }
 
@@ -733,7 +743,7 @@ vplot.factor <-
 vvplot.factor <-
   function(x, y, se.y = NULL, xlab, ylab,
            residuals = NULL, rugplot = FALSE, scale = 0,
-           se = FALSE, xlim = NULL, ylim = NULL, 
+           se = FALSE, xlim = NULL, ylim = NULL,
            ...) {
 
   M <- ncol(y)
@@ -766,7 +776,7 @@ vvplot.factor <-
       ylim <- range(c(ylim, residuals))
     } else {
       residuals <- NULL
-      warning("Residuals do not match 'x' in \"", ylab, 
+      warning("Residuals do not match 'x' in \"", ylab,
               "\" preplot object")
     }
   }
@@ -811,7 +821,7 @@ vvplot.factor <-
       jux <- uxx[, ii]
       jux <- jux[codex]
       jux <- jux + runif(length(jux), -0.7*min(delta), 0.7*min(delta))
-      if (M == 1) points(jux, residuals[, ii]) else 
+      if (M == 1) points(jux, residuals[, ii]) else
                   points(jux, residuals[, ii], pch = as.character(ii))
     }
   }
@@ -859,7 +869,7 @@ setMethod("plot", "vgam",
 
 
 plotqrrvglm <- function(object,
-               rtype = c("response", "pearson", "deviance", "working"), 
+               rtype = c("response", "pearson", "deviance", "working"),
                ask = FALSE,
                main = paste(Rtype, "residuals vs latent variable(s)"),
                xlab = "Latent Variable",
@@ -873,7 +883,7 @@ plotqrrvglm <- function(object,
                      c("response", "pearson", "deviance", "working"))[1]
   res <- resid(object, type = rtype)
 
-  my.ylab <- if (length(object at misc$ynames)) object at misc$ynames else 
+  my.ylab <- if (length(object at misc$ynames)) object at misc$ynames else
              rep_len(" ", M)
   Rtype <- switch(rtype, pearson = "Pearson", response = "Response",
                   deviance = "Deviance", working = "Working")
@@ -922,7 +932,7 @@ put.caption <- function(text.arg = "(a)",
 
 
 
-setMethod("plot", "psvgam",
+setMethod("plot", "pvgam",
            function(x, y, ...) {
            if (!missing(y))
              stop("cannot process the 'y' argument")
diff --git a/R/plot.vglm.R b/R/plot.vglm.R
index 13c038f..1b34492 100644
--- a/R/plot.vglm.R
+++ b/R/plot.vglm.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -36,6 +36,8 @@ plotvglm <-
 
 
   presid <- resid(x, type = "pearson")
+  if (!is.matrix(presid) == 1)
+    presid <- as.matrix(presid)
   lapred <- predict(x)
   M <- ncol(lapred)
   for (jay in 1:M) {
diff --git a/R/predict.vgam.q b/R/predict.vgam.q
index 9b63b6e..9a102ea 100644
--- a/R/predict.vgam.q
+++ b/R/predict.vgam.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -86,7 +86,7 @@ predict.vgam <-
           return(untransformVGAM(object, answer)) else
           return(answer)
       }
-    } else 
+    } else
     if (type == "response") {
       if (se.fit) {
         stop("cannot handle this option (se.fit = TRUE) currently")
@@ -113,7 +113,7 @@ predict.vgam <-
 
   } else {
 
-    temp.type <- if (type == "link") "response" else type 
+    temp.type <- if (type == "link") "response" else type
 
 
     predictor <- predict.vlm(object, newdata,
@@ -121,7 +121,7 @@ predict.vgam <-
                              se.fit = se.fit,
                              terms.arg = terms.arg,
                              raw = raw,
-                             all = all, offset = offset, 
+                             all = all, offset = offset,
                              dispersion = dispersion,
                              ...)  # deriv.arg = deriv.arg,
   }
@@ -159,7 +159,7 @@ predict.vgam <-
       ncolHlist <- unlist(lapply(Hlist, ncol))
       if (intercept)
         ncolHlist <- ncolHlist[-1]
-    
+
       cs <- if (raw) cumsum(c(1, ncolHlist)) else
                      cumsum(c(1, M + 0 * ncolHlist))
       tmp6 <- vector("list", length(ncolHlist))
@@ -187,7 +187,7 @@ predict.vgam <-
         if (type == "terms") {
           hhh <- tmp6[[ii]]
           if (se.fit) {
-            predictor$fitted.values[, hhh] <- 
+            predictor$fitted.values[, hhh] <-
             predictor$fitted.values[, hhh] + eta.mat
 
             TS <- predictor$sigma^2
@@ -207,7 +207,7 @@ predict.vgam <-
                 }
         } else {
           if (se.fit) {
-            predictor$fitted.values <- predictor$fitted.values + eta.mat 
+            predictor$fitted.values <- predictor$fitted.values + eta.mat
 
             TS <- 1  # out$residual.scale^2
             TS <- predictor$sigma^2
@@ -216,7 +216,7 @@ predict.vgam <-
             predictor$se.fit <- sqrt(predictor$se.fit^2 +
                                      TS * object at var %*% rep_len(1, TT))
           } else {
-            predictor <- predictor + eta.mat 
+            predictor <- predictor + eta.mat
           }
         }
       }
@@ -260,7 +260,7 @@ predict.vgam <-
 
 
     if (deriv.arg >= 1) {
-      v <- attr(if (se.fit) predictor$fitted.values else 
+      v <- attr(if (se.fit) predictor$fitted.values else
           predictor, "vterm.assign")
       is.lin <- is.linear.term(names(v))
         coefmat <- coefvlm(object, matrix.out = TRUE)
@@ -293,14 +293,14 @@ predict.vgam <-
               ans <- coefmat[ii, 1:lindex]
               if (se.fit) {
                 predictor$fitted.values[, index] <-
-                predictor$fitted.values[, index] + 
+                predictor$fitted.values[, index] +
                      (if (deriv.arg == 1)
                       matrix(ans, nrow = nrow(predictor$fitted.values),
                              ncol = lindex, byrow = TRUE) else 0)
               } else {
                 predictor[, index] <- predictor[, index] +
                      (if (deriv.arg == 1)
-                      matrix(ans, nrow = nrow(predictor), 
+                      matrix(ans, nrow = nrow(predictor),
                        ncol = lindex, byrow = TRUE) else 0)
               }
           } else {
@@ -341,7 +341,7 @@ setMethod("predict", "vgam",
 
 
 
-varassign <- function(constraints, n.s.xargument) { 
+varassign <- function(constraints, n.s.xargument) {
 
   if (!length(n.s.xargument))
     stop("length(n.s.xargument) must be > 0")
@@ -357,7 +357,7 @@ varassign <- function(constraints, n.s.xargument) {
     ans[[ii]] <- ptr:(ptr + temp - 1)
     ptr <- ptr + temp
   }
-  ans 
+  ans
 }
 
 
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
index baa24cb..fa737e9 100644
--- a/R/predict.vglm.q
+++ b/R/predict.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -15,14 +15,25 @@ predictvglm <-
            deriv = 0,
            dispersion = NULL,
            untransform = FALSE,
-           extra = object at extra, ...) {
+           type.fitted = NULL,
+           percentiles = NULL,
+           ...) {
   na.act <- object at na.action
   object at na.action <- list()
 
-  if (missing(extra)) {
+
+
+
+  new.extra <- object at extra
+  if (length(percentiles)) {
+    new.extra$percentiles <- percentiles
+  }
+  if (length(type.fitted)) {
+    new.extra$type.fitted <- type.fitted
   }
 
 
+
   if (deriv != 0)
     stop("'deriv' must be 0 for predictvglm()")
 
@@ -47,9 +58,9 @@ predictvglm <-
                se.fit <- FALSE
                predictor <- predict.vlm(object, newdata = newdata,
                                         type = type, se.fit = se.fit,
-                                        deriv = deriv, 
-                                        dispersion = dispersion, ...) 
-               fv <- object at family@linkinv(predictor, extra)
+                                        deriv = deriv,
+                                        dispersion = dispersion, ...)
+               fv <- object at family@linkinv(predictor, extra = new.extra)
 
 
                fv <- as.matrix(fv)
@@ -58,34 +69,35 @@ predictvglm <-
                if (nrow(fv) == length(dn1) &&
                    ncol(fv) == length(dn2))
                  dimnames(fv) <- list(dn1, dn2)
-
-
                fv
              },
              link = {
                predict.vlm(object, newdata = newdata,
                            type = "response", se.fit = se.fit,
-                           deriv = deriv, dispersion = dispersion, ...) 
+                           deriv = deriv, dispersion = dispersion, ...)
              },
              terms = {
                predict.vlm(object, newdata = newdata,
                            type = type, se.fit = se.fit,
-                           deriv = deriv, dispersion = dispersion, ...) 
+                           deriv = deriv, dispersion = dispersion, ...)
              })  # End of switch
   } else {
     if (is.null(newdata)) {
-      switch(type, 
-             link = object at predictors, 
-             response = object at fitted.values,
+      switch(type,
+             link = object at predictors,
+             response = {
+               object at family@linkinv(eta = object at predictors,
+                                     extra = new.extra)
+             },
              terms = {
                  predict.vlm(object, newdata = newdata,
                              type = type, se.fit = se.fit,
-                             deriv = deriv, dispersion = dispersion, ...) 
+                             deriv = deriv, dispersion = dispersion, ...)
              })
     } else {
       if (!(length(object at offset) == 1 && object at offset == 0))
-        warning("zero offset used") 
-      switch(type, 
+        warning("zero offset used")
+      switch(type,
              response = {
 
 
@@ -93,25 +105,25 @@ predictvglm <-
 
                predictor <- predict.vlm(object, newdata = newdata,
                                         type = type, se.fit = se.fit,
-                                        deriv = deriv, 
+                                        deriv = deriv,
                                         dispersion = dispersion, ...)
 
 
 
                M <- object at misc$M
 
-               fv <- object at family@linkinv(predictor, extra)
-               if (M > 1 && is.matrix(fv)) {
-
-               fv <- as.matrix(fv)
-               dn1 <- dimnames(fv)[[1]]
-               dn2 <- dimnames(object at fitted.values)[[2]]
-               if (nrow(fv) == length(dn1) &&
-                   ncol(fv) == length(dn2))
-                 dimnames(fv) <- list(dn1, dn2)
+               fv <- object at family@linkinv(predictor, extra = new.extra)
+               double.check <- is.null(new.extra$type.fitted)
+               if (M > 1 && is.matrix(fv) && double.check) {
+                 fv <- as.matrix(fv)
+                 dn1 <- dimnames(fv)[[1]]
+                 dn2 <- dimnames(object at fitted.values)[[2]]
+                 if (nrow(fv) == length(dn1) &&
+                     ncol(fv) == length(dn2))
+                   dimnames(fv) <- list(dn1, dn2)
                } else {
                }
-                 fv
+               fv
                },
                link = {
                  predict.vlm(object, newdata = newdata,
@@ -121,7 +133,7 @@ predictvglm <-
                terms = {
                  predict.vlm(object, newdata = newdata,
                              type = type, se.fit = se.fit,
-                             deriv = deriv, dispersion = dispersion, ...) 
+                             deriv = deriv, dispersion = dispersion, ...)
                })  # End of switch
         }
   }  # End of se.fit == FALSE
@@ -129,6 +141,7 @@ predictvglm <-
 
 
 
+
   try.this <- findFirstMethod("predictvglmS4VGAM", object at family@vfamily)
   if (length(try.this)) {
     predn <-
@@ -156,14 +169,14 @@ predictvglm <-
       predn <- napredict(na.act[[1]], predn)
     }
   }
-  
+
   if (untransform) untransformVGAM(object, predn) else predn
 }  # predictvglm
 
 
 
 
-setMethod("predict", "vglm", function(object, ...) 
+setMethod("predict", "vglm", function(object, ...)
   predictvglm(object, ...))
 
 
@@ -173,17 +186,17 @@ setMethod("predict", "vglm", function(object, ...)
 
 
 predict.rrvglm <-
-  function(object, 
-           newdata = NULL, 
+  function(object,
+           newdata = NULL,
            type = c("link", "response", "terms"),
-           se.fit = FALSE, 
+           se.fit = FALSE,
            deriv = 0,
-           dispersion = NULL, 
+           dispersion = NULL,
            extra = object at extra, ...) {
 
   if (se.fit) {
-    stop("20030811; predict.rrvglm(..., se.fit=TRUE) not complete yet") 
-    pred <- 
+    stop("20030811; predict.rrvglm(..., se.fit=TRUE) not complete yet")
+    pred <-
     switch(type,
            response = {
              warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
@@ -191,9 +204,9 @@ predict.rrvglm <-
              se.fit <- FALSE
                predictor <- predict.vlm(object, newdata = newdata,
                                         type = type, se.fit = se.fit,
-                                        deriv = deriv, 
-                                        dispersion = dispersion, ...) 
-             fv <- object at family@linkinv(predictor, extra)
+                                        deriv = deriv,
+                                        dispersion = dispersion, ...)
+             fv <- object at family@linkinv(predictor, extra = extra)
 
 
              fv <- as.matrix(fv)
@@ -210,18 +223,18 @@ predict.rrvglm <-
              type <- "response"
              predict.vlm(object, newdata = newdata,
                          type = type, se.fit = se.fit,
-                         deriv = deriv, dispersion = dispersion, ...) 
+                         deriv = deriv, dispersion = dispersion, ...)
            },
            terms = {
              predict.vlm(object, newdata = newdata,
                          type = type, se.fit = se.fit,
-                         deriv = deriv, dispersion = dispersion, ...) 
+                         deriv = deriv, dispersion = dispersion, ...)
            }
           )
   } else {
     return(predictvglm(object, newdata = newdata,
                        type = type, se.fit = se.fit,
-                       deriv = deriv, 
+                       deriv = deriv,
                        dispersion = dispersion, ...))
   }
 
@@ -240,7 +253,7 @@ predict.rrvglm <-
 }
 
 
-setMethod("predict", "rrvglm", function(object, ...) 
+setMethod("predict", "rrvglm", function(object, ...)
   predict.rrvglm(object, ...))
 
 
@@ -249,7 +262,7 @@ setMethod("predict", "rrvglm", function(object, ...)
 
 
 untransformVGAM <- function(object, pred) {
- 
+
 
   M <- object at misc$M
   Links <- object at misc$link
diff --git a/R/predict.vlm.q b/R/predict.vlm.q
index e3b73d3..c5351a1 100644
--- a/R/predict.vlm.q
+++ b/R/predict.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -74,7 +74,7 @@ predict.vlm <- function(object,
       as.save <- attr(X, "assign")
       X <- X[rep_len(1, nrow(newdata)), , drop = FALSE]
       dimnames(X) <- list(dimnames(newdata)[[1]], "(Intercept)")
-      attr(X, "assign") <- as.save  # Restored 
+      attr(X, "assign") <- as.save  # Restored
     }
 
     offset <- if (!is.null(off.num <- attr(ttob, "offset"))) {
@@ -83,7 +83,7 @@ predict.vlm <- function(object,
       eval(object at call$offset, newdata)
 
     if (is.smart(object) && length(object at smart.prediction)) {
-      wrapup.smart() 
+      wrapup.smart()
     }
 
     attr(X, "assign") <- attrassigndefault(X, ttob)
@@ -143,7 +143,7 @@ predict.vlm <- function(object,
     coefs <- coefvlm(object)
     vasgn <- attr(X_vlm, "vassign")
 
- 
+
     if (type == "terms") {
       nv <- names(vasgn)
       if (hasintercept)
@@ -164,7 +164,7 @@ predict.vlm <- function(object,
     dname2 <- object at misc$predictors.names
     if (se.fit) {
       object <- as(object, "vlm")  # Coerce
-      fit.summary <- summaryvlm(object, dispersion=dispersion)
+      fit.summary <- summaryvlm(object, dispersion = dispersion)
       sigma <- if (is.numeric(fit.summary at sigma))
         fit.summary at sigma else
         sqrt(deviance(object) / object at df.residual)  # was @ResSS
@@ -223,7 +223,7 @@ predict.vlm <- function(object,
         } else {
           pred <- pred[, cs[-length(cs)], drop = FALSE]
         }
-      
+
         pp <- if (se.fit) ncol(pred$fitted.values) else ncol(pred)
         if (se.fit) {
           dimnames(pred$fitted.values) <- dimnames(pred$se.fit) <- NULL
@@ -240,7 +240,7 @@ predict.vlm <- function(object,
 
       if (raw) {
         kindex <- NULL
-        for (ii in 1:pp) 
+        for (ii in 1:pp)
           kindex <- c(kindex, (ii-1) * M + (1:ncolHlist[ii]))
         if (se.fit) {
           pred$fitted.values <- pred$fitted.values[, kindex, drop = FALSE]
@@ -248,17 +248,19 @@ predict.vlm <- function(object,
         } else {
           pred <- pred[, kindex, drop = FALSE]
         }
-      } 
+      }
 
       temp <- if (raw) ncolHlist else rep_len(M, length(ncolHlist))
       dd <- vlabel(names(ncolHlist), temp, M)
       if (se.fit) {
-        dimnames(pred$fitted.values) <- 
-        dimnames(pred$se.fit) <- list(if (length(newdata))
-                                      dimnames(newdata)[[1]] else dx1, dd)
+        dimnames(pred$fitted.values) <-
+        dimnames(pred$se.fit) <-
+          list(if (length(newdata)) dimnames(newdata)[[1]] else dx1,
+               dd)
       } else {
-        dimnames(pred) <- list(if (length(newdata))
-                               dimnames(newdata)[[1]] else dx1, dd)
+        dimnames(pred) <-
+          list(if (length(newdata)) dimnames(newdata)[[1]] else dx1,
+               dd)
       }
 
       if (!length(newdata) && length(na.act)) {
@@ -293,12 +295,13 @@ predict.vlm <- function(object,
   }
 
   pred
-}
+}  # predict.vlm()
+
 
 
 
 
-setMethod("predict", "vlm", 
+setMethod("predict", "vlm",
           function(object, ...)
           predict.vlm(object, ...))
 
@@ -380,7 +383,7 @@ is.linear.term <- function(ch) {
                    x != "/" & x != "*" & x != "^")
   }
   names(ans) <- ch
-  ans 
+  ans
 }
 
 
diff --git a/R/print.vglm.q b/R/print.vglm.q
index d3381c7..fe64caa 100644
--- a/R/print.vglm.q
+++ b/R/print.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -8,18 +8,188 @@
 
 
 
+
+
+
+endfpvgam <- function(object,
+                      nonlinear.edf = TRUE,
+                      diag.all = FALSE,
+                      return.endf = TRUE, ...) {
+
+
+
+
+
+  M <- npred(object)
+  n <- nobs(object, type = "lm")
+  wz <- weights(object, type = "working")
+  X.vlm.save <- model.matrix(object, type = "vlm")
+  U <- vchol(wz, M = M, n = n)
+  X.vlm <- mux111(U, X.vlm.save, M = M)
+  X.vlm.aug <- rbind(X.vlm,
+                 model.matrix(object, type = "penalty"))
+
+
+  poststuff <-
+    mgcv::magic.post.proc(X.vlm.aug,
+                          object = object at ospsslot$magicfit, w = NULL)
+
+
+  if (!return.endf)
+    return(poststuff)
+
+
+  which.X.sm.osps <- object at ospsslot$sm.osps.list$which.X.sm.osps
+  all.ncol.Hk <- unlist(lapply(constraints(object, type = "term"), ncol))
+  names.which.X.sm.osps <- names(which.X.sm.osps)
+  endf <- rep_len(NA_real_, sum(all.ncol.Hk[names.which.X.sm.osps]))
+  names(endf) <- vlabel(names.which.X.sm.osps,
+                        all.ncol.Hk[names.which.X.sm.osps],
+                        M = npred(object))
+  use.index <- NULL
+
+
+
+
+  qr1 <- qr(X.vlm.aug)
+  qr2 <- qr(X.vlm)
+  endf.all <-  diag(solve(crossprod(qr.R(qr1)), crossprod(qr.R(qr2))))
+  if (diag.all)
+    return(endf.all)
+
+
+
+
+  startstop <- startstoppvgam(object)
+  for (iterm in 1:length(startstop)) {
+    endf[iterm] <- sum(endf.all[(startstop[[iterm]])])
+  }
+  endf[endf < 1] <- 1  # Cannot be smoother than linear
+
+
+  if (nonlinear.edf) endf - 1 else endf
+}  # endfpvgam()
+
+
+
+
+
+show.pvgam <- function(object) {
+
+  digits <- 3
+
+
+  if (!is.null(cl <- object at call)) {
+    cat("\nCall:\n", paste(deparse(cl), sep = "\n", collapse = "\n"),
+        "\n\n", sep = "")
+  }
+
+
+
+  magicfit <- object at ospsslot$magicfit
+
+
+
+
+  if (FALSE) {
+  XX <- model.matrix(object, type = "vlm")
+  poststuff <-
+    mgcv::magic.post.proc(XX,
+                          object = object at ospsslot$magicfit, w = NULL)
+  }
+
+
+
+  if (FALSE) {
+    edf <- rep_len(NA_real_, n.smooth)
+    cat("\nEstimated degrees of freedom:\n")
+    for (i in 1:n.smooth)
+      edf[i] <- sum(x$edf[x$smooth[[i]]$first.para:x$smooth[[i]]$last.para])
+    edf.str <- format(round(edf, digits = 4), digits = 3, scientific = FALSE)
+    for (i in 1:n.smooth) {
+      cat(edf.str[i], " ", sep = "")
+      if (i%%7 == 0)
+        cat("\n")
+    }
+    cat(" total =", round(sum(poststuff$edf), digits = 2), "\n")
+  }
+
+
+  endf <- endfpvgam(object)
+  cat("\nEstimated nonlinear degrees of freedom:\n")  # based on endfpvgam()
+  print(round(endf, digits = digits + 2), digits = digits, scientific = FALSE)
+
+  if (length(endf) > 1)
+  cat("Total:",
+      format(sum(endf), digits = digits), "\n")
+
+  object at post$endf <- endf  # Good to save this on the object
+
+
+  if (FALSE)
+  cat("\nEstimated degrees of freedom based on poststuff:",
+      format(poststuff$edf, digits = digits),
+      "\nTotal:",
+      format(round(sum(poststuff$edf), digits = digits)), "\n")
+
+
+  cat("\nUBRE score:", format(magicfit$score, digits = digits + 1), "\n\n")
+
+
+  if (length(deviance(object)))
+    cat("Residual deviance:", format(deviance(object)), "\n")
+
+
+  llx <- logLik.vlm(object = object)
+  if (length(llx))
+    cat("Log-likelihood:", format(llx), "\n")
+
+
+
+
+  invisible(object)
+}
+
+
+
+setMethod("show", "pvgam", function(object) show.pvgam(object))
+
+
+
+
+
+
+
+if (!isGeneric("endf"))
+    setGeneric("endf", function(object, ...)
+    standardGeneric("endf"))
+
+
+setMethod("endf", "pvgam", function(object, ...)
+          endfpvgam(object, ...))
+
+setMethod("endf", "summary.pvgam", function(object, ...)
+          endfpvgam(object, ...))
+
+
+
+
+
+
+
+
 show.vglm <- function(object) {
 
   if (!is.null(cl <- object at call)) {
-    cat("Call:\n")
-    dput(cl)
+    cat("\nCall:\n", paste(deparse(cl), sep = "\n", collapse = "\n"),
+        "\n\n", sep = "")
   }
 
   coef <- object at coefficients
   if (any(nas <- is.na(coef))) {
     if (is.null(names(coef)))
-      names(coef) <- paste("b", seq_along(coef), sep = "")  
-    cat("\nCoefficients: (", sum(nas), 
+      names(coef) <- paste("b", seq_along(coef), sep = "")
+    cat("\nCoefficients: (", sum(nas),
         " not defined because of singularities)\n", sep = "")
   } else {
     cat("\nCoefficients:\n")
@@ -74,18 +244,14 @@ show.vglm <- function(object) {
 
 
 
-
-
-
-
 show.vgam <- function(object) {
 
   digits <- 2
 
 
   if (!is.null(cl <- object at call)) {
-    cat("Call:\n")
-    dput(cl)
+    cat("\nCall:\n", paste(deparse(cl), sep = "\n", collapse = "\n"),
+        "\n\n", sep = "")
   }
 
   coef <- object at coefficients
@@ -145,107 +311,4 @@ setMethod("show", "vgam", function(object) show.vgam(object))
 
 
 
- if (FALSE)
-print.vglm <- function(x, ...) {
-  if (!is.null(cl <- x at call)) {
-    cat("Call:\n")
-    dput(cl)
-  }
-
-  coef <- x at coefficients
-  if (any(nas <- is.na(coef))) {
-    if (is.null(names(coef)))
-      names(coef) <- paste("b", seq_along(coef), sep = "")  
-    cat("\nCoefficients: (", sum(nas), 
-        " not defined because of singularities)\n", sep = "")
-  } else {
-    cat("\nCoefficients:\n")
-  }
-  print.default(coef, ...)
-
-  rank <- x at rank
-  if (!length(rank))
-    rank <- sum(!nas)
-  nobs <- if (length(x at df.total)) x at df.total else
-          length(x at residuals)
-  rdf <- x at df.residual
-  if (!length(rdf))
-    rdf <- nobs - rank
-  cat("\nDegrees of Freedom:", nobs, "Total;", rdf, "Residual\n")
-
-  if (length(deviance(x)))
-    cat("Residual deviance:", format(deviance(x)), "\n")
-  llx <- logLik.vlm(object = x)
-
-  if (length(llx))
-    cat("Log-likelihood:", format(llx), "\n")
-
-  if (length(x at criterion)) {
-    ncrit <- names(x at criterion)
-    for (ii in ncrit)
-      if (ii != "loglikelihood" && ii != "deviance")
-          cat(paste(ii, ":", sep = ""),
-              format(x at criterion[[ii]]), "\n")
-  }
-
-  invisible(x)
-}
-
-
-
- if (FALSE)
-print.vgam <- function(x, digits = 2, ...) {
-
-  if (!is.null(cl <- x at call)) {
-    cat("Call:\n")
-    dput(cl)
-  }
-
-  coef <- x at coefficients
-  nas <- is.na(coef)
-
-  rank <- x at rank
-  if (is.null(rank))
-      rank <- sum(!nas)
-  nobs <- if (length(x at df.total)) x at df.total else
-          length(x at residuals)
-  rdf <- x at df.residual
-  if (is.null(rdf))
-    rdf <- nobs - rank
-  cat("\nDegrees of Freedom:", nobs, "Total;",
-      format(round(rdf, dig = digits)), "Residual\n")
-
-  if (length(deviance(x)))
-    cat("Residual deviance:", format(deviance(x)), "\n")
-
-  llx <- logLik.vlm(object = x)
-
-  if (length(llx))
-    cat("Log-likelihood:", format(llx), "\n")
-
-  criterion <- attr(terms(x), "criterion")  # 20030811; x at terms$terms,
-  if (!is.null(criterion) &&
-      criterion != "coefficients")
-    cat(paste(criterion, ":", sep = ""), format(x[[criterion]]), "\n")
-
-  invisible(x)
-}
-
-
-
- if (FALSE) {
-
-setMethod("print",  "vlm", function(x, ...)  print.vlm(x, ...))
-setMethod("print", "vglm", function(x, ...) print.vglm(x, ...))
-setMethod("print", "vgam", function(x, ...) print.vgam(x, ...))
-
-
-
-    setMethod("show",  "vlm", function(object)  print.vlm(object))
-    setMethod("show", "vglm", function(object) print.vglm(object))
-    setMethod("show", "vgam", function(object) print.vgam(object))
-
-}
-
-
 
diff --git a/R/print.vlm.q b/R/print.vlm.q
index 66c8f1b..bdfae9d 100644
--- a/R/print.vlm.q
+++ b/R/print.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -22,8 +22,8 @@ show.vlm <- function(object) {
   rank <- object at rank
   if (is.null(rank))
     rank <- sum(!is.na(coef))
-  n <- object at misc$n 
-  M <- object at misc$M 
+  n <- object at misc$n
+  M <- object at misc$M
   nobs <- if (length(object at df.total)) object at df.total else n * M
   rdf <- object at df.residual
   if (is.null(rdf))
@@ -67,8 +67,8 @@ print.vlm <- function(x, ...) {
   rank <- x at rank
   if (is.null(rank))
     rank <- sum(!is.na(coef))
-  n <- x at misc$n 
-  M <- x at misc$M 
+  n <- x at misc$n
+  M <- x at misc$M
   nobs <- if (length(x at df.total)) x at df.total else n * M
   rdf <- x at df.residual
   if (is.null(rdf))
diff --git a/R/profilevglm.R b/R/profilevglm.R
new file mode 100644
index 0000000..282da8c
--- /dev/null
+++ b/R/profilevglm.R
@@ -0,0 +1,303 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+profilevglm <-
+  function(object, which = 1:p.vlm, alpha = 0.01,
+           maxsteps = 10, del = zmax/5, trace = NULL, ...) {
+
+
+
+
+  Pnames <- names(B0 <- coef(object))
+  nonA <- !is.na(B0)
+  if (any(is.na(B0)))
+    stop("currently cannot handle NA-valued regression coefficients")
+  pv0 <- t(as.matrix(B0))  # 1 x p.vlm
+
+
+
+  p.vlm <- length(Pnames)
+  if (is.character(which))
+    which <- match(which, Pnames)
+  summ <- summary(object)
+  std.err <- coef(summ)[, "Std. Error", drop = FALSE]
+
+
+
+  M <- npred(object)
+  Xm2 <- model.matrix(object, type = "lm2")  # Could be a 0 x 0 matrix
+  if (!length(Xm2))
+     Xm2 <- NULL  # Make sure. This is safer
+  clist <- constraints(object, type = "lm")  # type = c("lm", "term")
+
+
+
+  mf <- model.frame(object)
+
+  Y <- model.response(mf)
+  if (!is.factor(Y))
+    Y <- as.matrix(Y)
+
+
+  n.lm <- nobs(object, type = "lm")
+  OOO <- object at offset
+  if (!length(OOO) || all(OOO == 0))
+    OOO <- matrix(0, n.lm, M)
+
+
+
+  mt <- attr(mf, "terms")
+
+
+
+
+  Wts <- model.weights(mf)
+  if (length(Wts) == 0L)
+    Wts <- rep(1, n.lm)  # Safest (uses recycling and is a vector)
+  Original.de <- deviance(object)  # Could be NULL
+  if (!(use.de <- is.Numeric(Original.de)))
+    Original.ll <- logLik(object)
+  DispersionParameter <- summ at dispersion
+  if (!all(DispersionParameter == 1))
+    stop("Currently can only handle dispersion parameters ",
+         "that are equal to 1")
+  X.lm  <- model.matrix(object, type =  "lm")
+  X.vlm <- model.matrix(object, type = "vlm")
+  fam <- object at family
+
+
+
+
+
+  quasi.type <- if (length(tmp3 <- fam at infos()$quasi.type))
+    tmp3 else FALSE
+  if (quasi.type)
+    stop("currently this function cannot handle quasi-type models",
+         " or models with an estimated dispersion parameter")
+
+
+  zmax <- sqrt(qchisq(1 - alpha, 1))
+  profName <- "z"
+
+
+  prof <- vector("list", length = length(which))
+  names(prof) <- Pnames[which]
+
+
+  for (i in which) {
+    zi <- 0
+    pvi <- pv0
+    aa <- nonA
+    aa[i] <- FALSE
+    X.vlm.i <- X.vlm[, aa, drop = FALSE]
+    X.lm.i  <-  X.lm  # Try this
+
+
+ # This is needed by vglm.fit():
+    attr(X.vlm.i, "assign") <- attr(X.vlm, "assign")  # zz; this is wrong!
+    attr( X.lm.i, "assign") <- attr( X.lm, "assign")
+
+
+    if (is.logical(trace))
+      object at control$trace <- trace
+
+
+    pnamesi <- Pnames[i]
+    for (sgn in c(-1, 1)) {
+      if (is.logical(trace) && trace)
+        message("\nParameter: ", pnamesi, " ",
+                c("down", "up")[(sgn + 1)/2 + 1])
+      step <- 0
+      zedd <- 0
+      LPmat <- matrix(c(X.vlm[, nonA, drop = FALSE] %*% B0[nonA]),
+                      n.lm, M, byrow = TRUE) + OOO
+
+
+      while ((step <- step + 1) < maxsteps &&
+             abs(zedd) < zmax) {
+        betai <- B0[i] + sgn * step * del * std.err[Pnames[i], 1]
+        ooo <- OOO + matrix(X.vlm[, i] * betai, n.lm, M, byrow = TRUE)
+
+
+
+
+        fm <- vglm.fit(x = X.lm.i,  # Possibly use X.lm.i or else X.lm
+                       y = Y, w = Wts,
+                       X.vlm.arg = X.vlm.i,  # X.vlm,
+                       Xm2 = Xm2, Terms = mt,
+                       constraints = clist, extra = object at extra,
+                       etastart = LPmat,
+                       offset = ooo, family = fam,
+                       control = object at control)
+
+
+
+        fmc <- fm$coefficients
+        LPmat <- matrix(X.vlm.i %*% fmc, n.lm, M, byrow = TRUE) + ooo
+        ri <- pv0
+        ri[, names(fmc)] <- fmc  # coef(fm)
+        ri[, pnamesi] <- betai
+        pvi <- rbind(pvi, ri)
+        zee <- if (use.de) {
+          fm$crit.list[["deviance"]] - Original.de
+        } else {
+          2 * (Original.ll - fm$crit.list[["loglikelihood"]])
+        }
+        if (zee > -1e-3) {
+          zee <- max(zee, 0)
+        } else {
+          stop("profiling has found a better solution, ",
+               "so original fit had not converged")
+        }
+        zedd <- sgn * sqrt(zee)
+        zi <- c(zi, zedd)
+      }  # while
+    }  # for sgn
+    si. <- order(zi)
+    prof[[pnamesi]] <- structure(data.frame(zi[si.]), names = profName)
+    prof[[pnamesi]]$par.vals <- pvi[si., ,drop = FALSE]
+  }  # for i
+
+
+  val <- structure(prof, original.fit = object, summary = summ)
+  class(val) <- c("profile.glm", "profile")
+  val
+}
+
+
+
+
+
+
+if (!isGeneric("profile"))
+    setGeneric("profile",
+               function(fitted, ...)
+               standardGeneric("profile"),
+           package = "VGAM")
+
+
+setMethod("profile", "vglm",
+          function(fitted, ...)
+          profilevglm(object = fitted, ...))
+
+
+
+
+
+
+
+vplot.profile <-
+  function(x, ...) {
+  nulls <- sapply(x, is.null)
+  if (all(nulls)) return(NULL)
+  x <- x[!nulls]
+  nm <- names(x)
+  nr <- ceiling(sqrt(length(nm)))
+  oldpar <- par(mfrow = c(nr, nr))
+  on.exit(par(oldpar))
+  for (nm in names(x)) {
+    tau <- x[[nm]][[1L]]
+    parval <- x[[nm]][[2L]][, nm]
+    dev.hold()
+    plot(parval, tau, xlab = nm, ylab = "tau", type = "n")
+    if (sum(tau == 0) == 1) points(parval[tau == 0], 0, pch = 3)
+    splineVals <- spline(parval, tau)
+    lines(splineVals$x, splineVals$y)
+    dev.flush()
+  }
+}
+
+
+
+vpairs.profile <-
+function(x, colours = 2:3, ...) {
+  parvals <- lapply(x, "[[", "par.vals")
+
+
+  rng <- apply(do.call("rbind", parvals), 2L, range, na.rm = TRUE)
+  Pnames <- colnames(rng)
+  npar <- length(Pnames)
+  coefs <- coef(attr(x, "original.fit"))
+  form <- paste(as.character(formula(attr(x, "original.fit")))[c(2, 1, 3)],
+                collapse = "")
+  oldpar <- par(mar = c(0, 0, 0, 0), mfrow = c(1, 1),
+                oma = c(3, 3, 6, 3), las = 1)
+  on.exit(par(oldpar))
+  fin <- par("fin")
+  dif <- (fin[2L] - fin[1L])/2
+  adj <- if (dif > 0) c(dif, 0, dif, 0) else c(0, -dif, 0, -dif)
+  par(omi = par("omi") + adj)
+  cex <- 1 + 1/npar
+  frame()
+  mtext(form, side = 3, line = 3, cex = 1.5, outer = TRUE)
+  del <- 1/npar
+  for (i in 1L:npar) {
+    ci <- npar - i
+    pi <- Pnames[i]
+    for (j in 1L:npar) {
+      dev.hold()
+      pj <- Pnames[j]
+      par(fig = del * c(j - 1, j, ci, ci + 1))
+      if (i == j) {
+        par(new = TRUE)
+        plot(rng[, pj], rng[, pi], axes = FALSE,
+             xlab = "", ylab = "", type = "n")
+        op <- par(usr = c(-1, 1, -1, 1))
+        text(0, 0, pi, cex = cex, adj = 0.5)
+        par(op)
+      } else {
+        col <- colours
+        if (i < j) col <- col[2:1]
+        if (!is.null(parvals[[pj]])) {
+          par(new = TRUE)
+          plot(spline(x <- parvals[[pj]][, pj],
+                      y <- parvals[[pj]][, pi]),
+               type = "l", xlim = rng[, pj],
+               ylim = rng[, pi], axes = FALSE,
+               xlab = "", ylab = "", col = col[2L])
+          pu <- par("usr")
+          smidge <- 2/100 * (pu[4L] - pu[3L])
+          segments(x, pmax(pu[3L], y - smidge),
+                   x, pmin(pu[4L], y + smidge))
+        } else
+          plot(rng[, pj], rng[, pi], axes = FALSE,
+               xlab = "", ylab = "", type = "n")
+        if (!is.null(parvals[[pi]])) {
+          lines(x <- parvals[[pi]][, pj],
+                y <- parvals[[pi]][, pi],
+                type = "l", col = col[1L])
+          pu <- par("usr")
+          smidge <- 2/100 * (pu[2L] - pu[1L])
+          segments(pmax(pu[1L], x - smidge), y,
+                   pmin(pu[2L], x + smidge), y)
+        }
+        points(coefs[pj], coefs[pi], pch = 3, cex = 3)
+      }
+      if (i == npar) axis(1)
+      if (j == 1) axis(2)
+      if (i == 1) axis(3)
+      if (j == npar) axis(4)
+      dev.flush()
+    }
+  }
+  par(fig = c(0, 1, 0, 1))
+  invisible(x)
+}
+
+
+
+
+
diff --git a/R/psfun.R b/R/psfun.R
deleted file mode 100644
index 198435c..0000000
--- a/R/psfun.R
+++ /dev/null
@@ -1,183 +0,0 @@
-# These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
-# All rights reserved.
-
-
-
-
-
-
-
-ps <-
-  function(x,
-           ...,
-           ps.intervals = NULL,
-           lambda = 0, degree = 2, order = 2,
-           ridge.adj = 1e-5, ridge.inv = 0.0001,
-           spillover = 0.01, maxlambda = 1e4) {
-
-
-  xs <- substitute(x)
-  ans <- as.character(xs)
-  x.index <- as.vector(x)
-
-
-
-
-  x.orig <- x.index
-  xdots <- list(...)
-  uses.xij <- length(xdots) > 0
-  if (uses.xij)
-    x.index <- as.vector(c(x.index, unlist(xdots)))
-  if (is.null(ps.intervals))
-    ps.intervals <- ceiling(1.5 * log(length(unique(x.index))))
-
-
-
-  number.knots <- ps.intervals + 2 * degree + 1
-  xl <- min(x.index)
-  xr <- max(x.index)
-
-
-
- if (smart.mode.is("read")) {
-    smartlist  <- get.smart()
-    xl <- smartlist$xl  # Overwrite its value
-    xr <- smartlist$xr  # Overwrite its value
-    ps.intervals <- smartlist$ps.intervals  # Ditto
-    number.knots <- ps.intervals + 2 * degree + 1  # Redo
-    lambda    <- smartlist$lambda
-    degree    <- smartlist$degree
-    order     <- smartlist$order
-    ridge.adj <- smartlist$ridge.adj
-    ridge.inv <- smartlist$ridge.inv
-    spillover <- smartlist$spillover
-    maxlambda <- smartlist$maxlambda
-    maXX      <- smartlist$maXX
-    Cmat      <- smartlist$Cmat
-  } else {
-    maXX      <- NULL
-    Cmat      <- NULL
-  }
-
-  xmax <- xr + spillover * (xr - xl)
-  xmin <- xl - spillover * (xr - xl)
-  dx <- (xmax - xmin) / ps.intervals
-  nx <- names(x.index)
-  nax <- is.na(x.index)
-  if (nas <- any(nax))
-    x.index <- x[!nax]
-  sorder <- degree + 1
-  if (length(ps.intervals)) {
-    nAknots <- ps.intervals - 1
-    if (nAknots < 1) {
-      nAknots <- 1
-      warning("ps.intervals was too small; have used 2")
-    }
-
-
-
-
-
-
-    if (nAknots > 0) {
-      Aknots <- seq(from = xmin - degree * dx,
-                    to   = xmax + degree * dx, by = dx)
-    } else {
-      knots <- NULL
-    }
-  }
-  basis <- splineDesign(Aknots, x.index, sorder, 0 * x.index)
-  n.col <- ncol(basis)
-  if (nas) {
-    nmat <- matrix(NA_real_, length(nax), n.col)
-    nmat[!nax, ] <- basis
-    basis <- nmat
-  }
-  dimnames(basis) <- list(1:nrow(basis), 1:n.col)
-  if ((order - n.col + 1) > 0) {
-    order <- n.col - 1
-    warning("order was too large; have used ", n.col - 1)
-  }
-  if (any(lambda < 0)) {
-    lambda[lambda < 0] <- 0
-    warning("some lambda values are negative : have used lambda = ",
-            paste(lambda, collapse = ", "))
-  }
-
-  if (any(lambda > maxlambda)) {
-    lambda[lambda > maxlambda] <- maxlambda
-    warning("some lambda values are > ", maxlambda, ": ",
-                  "for stability have used lambda = ",
-            paste(lambda, collapse = ", "))
-  }
-  aug <- if (order > 0) diff(diag(n.col), diff = order) else diag(n.col)
-
-  
-  Pen <- t(aug) %*% aug
-  pen.aug <- (Pen + t(Pen))/2
-
-
-
-  if (is.null(maXX))
-    maXX <- mean(abs(t(basis) %*% basis))
- maS <- mean(abs(pen.aug))/maXX
-
-
-
-  pen.aug <- pen.aug / maS
-  kk <- ncol(basis)
-  if (is.null(Cmat))
-    Cmat <- matrix(colSums(basis), 1, kk)
-
-
-  qrCt <- qr(t(Cmat))
-  jay <- nrow(Cmat)  # 1
-  XZ <- t(qr.qty(qrCt, t(basis))[(jay+1):kk, ])
-
-
-  ZtSZ <- qr.qty(qrCt, t(qr.qty(qrCt, t(pen.aug))))[(jay+1):kk,
-                                                    (jay+1):kk]
-
-
-  basis <- XZ
-
-
-  if (smart.mode.is("write"))
-    put.smart(list(xl        = xl,
-                   xr        = xr,
-                   ps.intervals = ps.intervals,
-                   lambda    = lambda,
-                   degree    = degree,
-                   order     = order,
-                   ridge.adj = ridge.adj,
-                   ridge.inv = ridge.inv,
-                   spillover = spillover,
-                   maxlambda = maxlambda,
-                   maXX      = maXX,
-                   Cmat      = Cmat))
-
-
-
-
-  basis <- basis[seq_along(x.orig), , drop = FALSE]
-
-
-
-
-  attr(basis, "S.arg") <- ZtSZ
-
-  attr(basis, "degree") <- degree
-  attr(basis, "knots") <- Aknots
-  attr(basis, "lambda") <- lambda  # Vector
-  attr(basis, "order") <- order
-  attr(basis, "ps.intervals") <- ps.intervals
-  attr(basis, "ps.xargument") <- ans
-  attr(basis, "ridge.adj") <- ridge.adj
-  attr(basis, "ridge.inv") <- ridge.inv
-  basis
-}
-
-
-
-
diff --git a/R/psv2magic.R b/R/psv2magic.R
index 67acbaf..f4dbe2b 100644
--- a/R/psv2magic.R
+++ b/R/psv2magic.R
@@ -1,37 +1,38 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
 
 
  psv2magic <-
-    function(x.VLM, constraints, lambda.vlm, ps.list) {
+    function(x.VLM, constraints, spar.vlm, sm.osps.list) {
 
 
 
 
   colperm <- function(x, from, to) {
+
     ncx <- ncol(x)
     if (length(from) != length(to) ||
         any(from != round(from)) ||
-        any(from < 1 | from > ncx) ||
+        any(from < 1 | ncx < from) ||
         any(duplicated(from)) ||
         any(sort(from) != sort(to)))
       stop("invalid column permutation indices")
-    perm <- seq(length = ncx)
+    perm <- seq_len(ncx)
     perm[to] <- perm[from]
     x[, perm]
   }
 
 
 
-  assignx <- ps.list$assignx
+  assignx <- sm.osps.list$assignx
   nassignx <- names(assignx)
-  indexterms <- ps.list$indexterms
-  which.X.ps <- ps.list$which.X.ps
-  term.labels <- ps.list$term.labels
-  ncol.X.ps <- sapply(which.X.ps, length)
+  indexterms <- sm.osps.list$indexterms
+  which.X.sm.osps <- sm.osps.list$which.X.sm.osps
+  term.labels <- sm.osps.list$term.labels
+  ncol.X.sm.osps <- sapply(which.X.sm.osps, length)
   ncolHlist.model <- unlist(lapply(constraints, ncol))
 
 
@@ -43,10 +44,9 @@
 
 
   ncol.H.ps <- ncolHlist.new[indexterms]
-  num.ps.terms <- length(which.X.ps)
+  num.osps.terms <- length(which.X.sm.osps)
 
 
-  allterms <- length(term.labels)
   ncol.allterms <- sapply(assignx, length)
 
   ncol.model <- if (names(constraints)[[1]] == "(Intercept)")
@@ -59,39 +59,40 @@
     if (indexterms[ii]) {
       jay <- jay + 1
       perm.list[[jay]] <-
-        matrix(jjoffset + 1:(ncol.X.ps[jay] * ncol.H.ps[jay]),
+        matrix(jjoffset + 1:(ncol.X.sm.osps[jay] * ncol.H.ps[jay]),
+               nrow = ncol.X.sm.osps[jay],  # Redundant really
                ncol = ncol.H.ps[jay], byrow = TRUE)
-      jjoffset <- jjoffset +  ncol.H.ps[[jay]] * ncol.X.ps[[jay]]
+      jjoffset <- jjoffset + ncol.H.ps[[jay]] * ncol.X.sm.osps[[jay]]
     } else {
       jjoffset <- jjoffset + ncolHlist.new[ii] * ncol.model[ii]
     }
-  }
+  }  # for ii
   vindex.min <- sapply(perm.list, min)  # function(x) min(x)
   vindex.max <- sapply(perm.list, max)  # function(x) max(x)
-  o1 <- vector("list", length(ncol.H.ps))  # list()
+  oo1 <- vector("list", length(ncol.H.ps))  # list()
   for (ii in seq_along(ncol.H.ps)) {
-    o1[[ii]] <- vindex.min[ii]:vindex.max[ii]
+    oo1[[ii]] <- seq.int(vindex.min[ii], vindex.max[ii])
   }
-  ooo <- unlist(o1)  # do.call("c", o1)
-  ppp <- unlist(perm.list)  # do.call("c", perm.list)
+  ooo <- unlist(oo1, use.names = FALSE)  # do.call("c", oo1)
+  ppp <- unlist(perm.list, use.names = FALSE)  # do.call("c", perm.list)
 
 
-  off.list <- vector("list", num.ps.terms)  # list()
-  for (ii in 1:num.ps.terms) {
+  OFF.list <- vector("list", num.osps.terms)  # list()
+  for (ii in 1:num.osps.terms) {
     index <- 0
-    off.list[[ii]] <- numeric()
-    for (jay in 1: ncol.H.ps[ii]) {
-      off.list[[ii]][jay] <- vindex.min[ii] + index
-      index <- ncol.X.ps[ii] * jay
+    OFF.list[[ii]] <- numeric()
+    for (jay in 1:(ncol.H.ps[ii])) {
+      OFF.list[[ii]][jay] <- vindex.min[ii] + index
+      index <- ncol.X.sm.osps[ii] * jay
     }
   }
 
-  rl <-
-    list(x.VLM.new = colperm(x.VLM, ppp, ooo),
-         sp = unlist(lambda.vlm),
-         S.arg = rep(ps.list$S.arg, ncol.H.ps),  # Argument 'S' of magic()
-         off = unlist(off.list))
-  rl
-}
+
+  list(x.VLM.new = if (identical(ppp, ooo)) x.VLM else
+                   colperm(x.VLM, ppp, ooo),
+       sp = unlist(spar.vlm),
+       S.arg = rep(sm.osps.list$S.arg, ncol.H.ps),  # Argument 'S' of magic()
+       OFF = unlist(OFF.list))
+}  # psv2magic
 
 
diff --git a/R/qrrvglm.control.q b/R/qrrvglm.control.q
index 4b81d1a..de9cceb 100644
--- a/R/qrrvglm.control.q
+++ b/R/qrrvglm.control.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -94,24 +94,24 @@ qrrvglm.control <- function(Rank = 1,
     if (length(isd.latvar) > 1 && any(diff(isd.latvar) > 0))
         stop("successive isd.latvar values must not increase")
     if (!is.Numeric(epsilon, positive = TRUE,
-                    length.arg = 1)) 
+                    length.arg = 1))
         stop("bad input for 'epsilon'")
     if (!is.Numeric(Etamat.colmax, positive = TRUE,
                     length.arg = 1) ||
         Etamat.colmax < Rank)
         stop("bad input for 'Etamat.colmax'")
-    if (!is.Numeric(Hstep, positive = TRUE, 
-                   length.arg = 1)) 
+    if (!is.Numeric(Hstep, positive = TRUE,
+                   length.arg = 1))
         stop("bad input for 'Hstep'")
     if (!is.Numeric(maxitl, positive = TRUE,
-                    length.arg = 1, integer.valued = TRUE)) 
+                    length.arg = 1, integer.valued = TRUE))
         stop("bad input for 'maxitl'")
     if (!is.Numeric(imethod, positive = TRUE,
-                    length.arg = 1, integer.valued = TRUE)) 
+                    length.arg = 1, integer.valued = TRUE))
         stop("bad input for 'imethod'")
     if (!is.Numeric(Maxit.optim, integer.valued = TRUE, positive = TRUE))
         stop("Bad input for 'Maxit.optim'")
-    if (!is.Numeric(MUXfactor, positive = TRUE)) 
+    if (!is.Numeric(MUXfactor, positive = TRUE))
         stop("bad input for 'MUXfactor'")
     if (any(MUXfactor < 1 | MUXfactor > 10))
         stop("MUXfactor values must lie between 1 and 10")
@@ -119,15 +119,15 @@ qrrvglm.control <- function(Rank = 1,
                     integer.valued = TRUE, positive = TRUE))
         stop("Bad input for 'optim.maxit'")
     if (!is.Numeric(Rank, positive = TRUE,
-                    length.arg = 1, integer.valued = TRUE)) 
+                    length.arg = 1, integer.valued = TRUE))
         stop("bad input for 'Rank'")
     if (!is.Numeric(sd.Cinit, positive = TRUE,
-                    length.arg = 1)) 
+                    length.arg = 1))
         stop("bad input for 'sd.Cinit'")
     if (I.tolerances && !eq.tolerances)
         stop("'eq.tolerances' must be TRUE if 'I.tolerances' is TRUE")
     if (!is.Numeric(Bestof, positive = TRUE,
-                    length.arg = 1, integer.valued = TRUE)) 
+                    length.arg = 1, integer.valued = TRUE))
         stop("bad input for 'Bestof'")
 
 
@@ -137,9 +137,9 @@ qrrvglm.control <- function(Rank = 1,
 
     if ((SmallNo < .Machine$double.eps) ||
        (SmallNo > .0001))
-      stop("SmallNo is out of range") 
+      stop("SmallNo is out of range")
     if (any(Parscale <= 0))
-       stop("Parscale must contain positive numbers only") 
+       stop("Parscale must contain positive numbers only")
 
     if (!is.logical(checkwz) ||
         length(checkwz) != 1)
@@ -169,7 +169,7 @@ qrrvglm.control <- function(Rank = 1,
            maxitl = maxitl,
            imethod = imethod,
            Maxit.optim = Maxit.optim,
-           min.criterion = TRUE,  # needed for calibrate 
+           min.criterion = TRUE,  # needed for calibrate
            MUXfactor = rep_len(MUXfactor, Rank),
            noRRR = noRRR,
            optim.maxit = optim.maxit,
diff --git a/R/qtplot.q b/R/qtplot.q
index 43929e7..e868265 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -12,9 +12,9 @@
 
 
 
- 
- 
- 
+
+
+
 qtplot.lms.bcn <- function(percentiles = c(25, 50, 75),
                            eta = NULL, yoffset = 0) {
 
@@ -28,11 +28,11 @@ qtplot.lms.bcn <- function(percentiles = c(25, 50, 75),
                              mu     = eta[, 2],
                              sigma  = eta[, 3])
   }
-  answer 
+  answer
 }
- 
- 
- 
+
+
+
 qtplot.lms.bcg <- function(percentiles = c(25,50,75),
                            eta = NULL, yoffset = 0) {
 
@@ -50,11 +50,11 @@ qtplot.lms.bcg <- function(percentiles = c(25,50,75),
     answer[, ii] <- eta[, 2] *
                     (qgamma(ccc, shape = shape)/shape)^(1/lambda)
   }
-  answer 
+  answer
 }
- 
- 
-qtplot.lms.yjn2 <- 
+
+
+qtplot.lms.yjn2 <-
 qtplot.lms.yjn <- function(percentiles = c(25,50,75),
                            eta = NULL, yoffset = 0) {
 
@@ -70,10 +70,10 @@ qtplot.lms.yjn <- function(percentiles = c(25,50,75),
     ccc <- mu + sigma * qnorm(cc[ii]/100)
     answer[, ii] <- yeo.johnson(ccc, lambda, inverse= TRUE) - yoffset
   }
-  answer 
+  answer
 }
- 
- 
+
+
 qtplot.default <- function(object, ...) {
 
   warning("no methods function. Returning the object")
@@ -85,14 +85,14 @@ qtplot.default <- function(object, ...) {
 "qtplot.vglm" <- function(object, Attach= TRUE, ...) {
 
   LL <- length(object at family@vfamily)
-  newcall <- paste("qtplot.", object at family@vfamily[LL], 
+  newcall <- paste("qtplot.", object at family@vfamily[LL],
                    "(object, ...)", sep = "")
   newcall <- parse(text = newcall)[[1]]
 
   if (Attach) {
     object at post$qtplot <- eval(newcall)
     invisible(object)
-  } else 
+  } else
     eval(newcall)
 }
 
@@ -109,7 +109,7 @@ qtplot.lmscreg <- function(object,
   if (same) {
     fitted.values <- if (!length(newdata))
       object at fitted.values else {
-      predict(object, newdata = newdata, type = "response") 
+      predict(object, newdata = newdata, type = "response")
     }
     fitted.values <- as.matrix(fitted.values)
   } else {
@@ -154,13 +154,13 @@ qtplot.lmscreg <- function(object,
 
   list(fitted.values = fitted.values, percentiles = percentiles)
 }
-    
- 
+
+
 
 plotqtplot.lmscreg <-
   function(fitted.values, object,
            newdata = NULL,
-           percentiles = object at misc$percentiles, 
+           percentiles = object at misc$percentiles,
            lp = NULL,
            add.arg = FALSE,
            y = if (length(newdata)) FALSE else TRUE,
@@ -173,14 +173,14 @@ plotqtplot.lmscreg <-
            xlim = NULL, ylim = NULL,
            llty.arg = par()$lty,
            lcol.arg = par()$col, llwd.arg = par()$lwd,
-           tcol.arg = par()$col, 
+           tcol.arg = par()$col,
            tadj = 1, ...) {
 
 
 
   if (!length(newdata)) {
     X <- model.matrixvlm(object, type = "lm")
-    if (is.matrix(X) && length(object at y) && ncol(X)==2 && 
+    if (is.matrix(X) && length(object at y) && ncol(X)==2 &&
        dimnames(X)[[2]][1] == "(Intercept)") {
       xx <- X[, 2]
       if (is.null(xlab)) {
@@ -198,7 +198,7 @@ plotqtplot.lmscreg <-
         if (!is.numeric(ylim))
           ylim <- c(min(fred), max(fred))
         matplot(x = xx, y = fred,
-                xlab = xlab, ylab = ylab, type = "n", 
+                xlab = xlab, ylab = ylab, type = "n",
                 xlim = xlim, ylim = ylim, ...)
       }
 
@@ -208,7 +208,7 @@ plotqtplot.lmscreg <-
     } else {
       warning("there is not a single covariate. ",
               "Returning the object.")
-      return(fitted.values) 
+      return(fitted.values)
     }
   } else {
 
@@ -218,13 +218,13 @@ plotqtplot.lmscreg <-
        length(object at s.xargument[firstterm]))
       firstterm <-  object at s.xargument[firstterm]
 
-    xx <- newdata[[firstterm]] 
+    xx <- newdata[[firstterm]]
     if (!is.numeric(xx))
       stop("couldn't extract the 'primary' variable from newdata")
 
     if (!add.arg) {
-      if (is.null(xlab)) 
-        xlab <- firstterm 
+      if (is.null(xlab))
+        xlab <- firstterm
       if (!is.numeric(xlim))
         xlim <- if (label)
             c(min(xx), max(xx)+size.label*diff(range(xx))) else
@@ -232,7 +232,7 @@ plotqtplot.lmscreg <-
       if (!is.numeric(ylim))
         ylim <- c(min(fitted.values), max(fitted.values))
       matplot(x = xx, y = fitted.values,
-              xlab = xlab, ylab = ylab, type = "n", 
+              xlab = xlab, ylab = ylab, type = "n",
                   xlim = xlim, ylim = ylim, col = pcol.arg)
     }
     if (y && length(object at y))
@@ -266,7 +266,7 @@ plotqtplot.lmscreg <-
 
     invisible(fitted.values)
 }
- 
+
 
 if (TRUE) {
   if (!isGeneric("qtplot"))
@@ -275,10 +275,10 @@ if (TRUE) {
 
 
   setMethod("qtplot", signature(object = "vglm"),
-            function(object, ...) 
+            function(object, ...)
             invisible(qtplot.vglm(object, ...)))
   setMethod("qtplot", signature(object = "vgam"),
-            function(object, ...) 
+            function(object, ...)
             invisible(qtplot.vglm(object, ...)))
 }
 
@@ -296,8 +296,8 @@ if (TRUE) {
   newcall <- parse(text = newcall)[[1]]
   eval(newcall)
 }
-    
- 
+
+
 qtplot.gumbelff <-
 qtplot.gumbel <-
     function(object, show.plot = TRUE, y.arg = TRUE,
@@ -334,7 +334,7 @@ qtplot.gumbel <-
   extra$mpv <- mpv  # Overwrite if necessary
   extra$R <- R
   extra$percentiles <- percentiles
-  fitted.values <- object at family@linkinv(eta = eta, extra = extra) 
+  fitted.values <- object at family@linkinv(eta = eta, extra = extra)
 
   answer <- list(fitted.values = fitted.values,
                  percentiles   = percentiles)
@@ -351,10 +351,10 @@ qtplot.gumbel <-
   llty.arg <- rep_len(llty.arg, lp+mpv)
 
   X <- model.matrixvlm(object, type = "lm")
-  if (is.matrix(X) && length(object at y) && ncol(X)==2 && 
+  if (is.matrix(X) && length(object at y) && ncol(X)==2 &&
      dimnames(X)[[2]][1] == "(Intercept)") {
     xx <- X[, 2]
-    if (!length(xlab)) 
+    if (!length(xlab))
       xlab <- if (object at misc$nonparametric &&
                  length(object at s.xargument))
                   object at s.xargument else names(object at assign)[2]
@@ -364,7 +364,7 @@ qtplot.gumbel <-
               xlab = xlab, ylab = ylab, type = "n", ...)
 
     if (y.arg) {
-       matpoints(x = xx, y = object at y, pch = pch, col = pcol.arg) 
+       matpoints(x = xx, y = object at y, pch = pch, col = pcol.arg)
     }
   } else {
     warning("there is not a single covariate.")
@@ -399,14 +399,14 @@ qtplot.gumbel <-
 
 deplot.lms.bcn <- function(object,
                            newdata,
-                           y.arg, 
+                           y.arg,
                            eta0) {
-  if (!any(object at family@vfamily == "lms.bcn")) 
+  if (!any(object at family@vfamily == "lms.bcn"))
     warning("I think you've called the wrong function")
 
   Zvec <- ((y.arg/eta0[, 2])^(eta0[, 1]) -1) / (eta0[, 1] * eta0[, 3])
   dZ.dy <- ((y.arg/eta0[, 2])^(eta0[, 1]-1)) / (eta0[, 2] * eta0[, 3])
-  yvec <- dnorm(Zvec) * abs(dZ.dy) 
+  yvec <- dnorm(Zvec) * abs(dZ.dy)
 
   list(newdata = newdata, y = y.arg, density = yvec)
 }
@@ -415,9 +415,9 @@ deplot.lms.bcn <- function(object,
 
 deplot.lms.bcg <- function(object,
                            newdata,
-                           y.arg, 
+                           y.arg,
                            eta0) {
-  if (!any(object at family@vfamily == "lms.bcg")) 
+  if (!any(object at family@vfamily == "lms.bcg"))
     warning("I think you've called the wrong function")
 
   Zvec <- (y.arg/eta0[, 2])^(eta0[, 1])  # different from lms.bcn
@@ -434,7 +434,7 @@ deplot.lms.bcg <- function(object,
 deplot.lms.yjn2 <-
 deplot.lms.yjn <- function(object,
                            newdata,
-                           y.arg, 
+                           y.arg,
                            eta0) {
 
   if (!length(intersect(object at family@vfamily, c("lms.yjn","lms.yjn2"))))
@@ -445,12 +445,12 @@ deplot.lms.yjn <- function(object,
                eta0[, 2]) / eta0[, 3]
   dZ.dy <- dyj.dy.yeojohnson(y.arg+object at misc$yoffset,
                              lambda = eta0[, 1]) / eta0[, 3]
-  yvec <- dnorm(Zvec) * abs(dZ.dy) 
+  yvec <- dnorm(Zvec) * abs(dZ.dy)
 
   list(newdata = newdata, y = y.arg, density = yvec)
 }
 
- 
+
 deplot.default <- function(object, ...) {
 
   warning("no methods function. Returning the object")
@@ -462,7 +462,7 @@ deplot.default <- function(object, ...) {
 
 "deplot.vglm" <- function(object, Attach= TRUE, ...) {
   LL <- length(object at family@vfamily)
-  newcall <- paste("deplot.", object at family@vfamily[LL], 
+  newcall <- paste("deplot.", object at family@vfamily[LL],
                    "(object, ...)", sep = "")
   newcall <- parse(text = newcall)[[1]]
 
@@ -484,10 +484,10 @@ deplot.default <- function(object, ...) {
 
   if (!length(newdata)) {
     newdata <- data.frame(x0=x0)
-    var1name <- attr(terms(object), "term.labels")[1] 
+    var1name <- attr(terms(object), "term.labels")[1]
     names(newdata) <- var1name
 
-    ii <- if (object at misc$nonparametric) 
+    ii <- if (object at misc$nonparametric)
           slot(object, "s.xargument") else NULL
     if (length(ii) && any(logic.vec <-
         names(slot(object, "s.xargument")) == var1name))
@@ -502,16 +502,16 @@ deplot.default <- function(object, ...) {
   eta0 <- eta2theta(eta0, link = object at misc$link,
                     earg = double.check.earg)  # lambda, mu, sigma
 
-  newcall <- paste("deplot.", object at family@vfamily[1], 
+  newcall <- paste("deplot.", object at family@vfamily[1],
                    "(object, newdata, y.arg = y.arg, eta0 = eta0)",
                    sep = "")
   newcall <- parse(text = newcall)[[1]]
   answer <- eval(newcall)
 
-  if (show.plot) 
+  if (show.plot)
     plotdeplot.lmscreg(answer, y.arg=y.arg, ...)
 
-  invisible(answer) 
+  invisible(answer)
 }
 
 
@@ -533,7 +533,7 @@ plotdeplot.lmscreg <- function(answer,
     if (!is.numeric(ylim))
       ylim <- c(min(yvec), max(yvec))
     matplot(x = xx, y = yvec,
-            xlab = xlab, ylab = ylab, type = "n", 
+            xlab = xlab, ylab = ylab, type = "n",
             xlim = xlim, ylim = ylim, ...)
   }
 
@@ -545,8 +545,8 @@ plotdeplot.lmscreg <- function(answer,
 
   invisible(answer)
 }
- 
- 
+
+
 
 
 if (TRUE) {
@@ -555,10 +555,10 @@ if (TRUE) {
   setGeneric("deplot", function(object, ...) standardGeneric("deplot"))
 
   setMethod("deplot", signature(object = "vglm"),
-            function(object, ...) 
+            function(object, ...)
             invisible(deplot.vglm(object, ...)))
   setMethod("deplot", signature(object = "vgam"),
-            function(object, ...) 
+            function(object, ...)
             invisible(deplot.vglm(object, ...)))
 }
 
@@ -571,18 +571,18 @@ if (TRUE) {
     setGeneric("cdf", function(object, ...) standardGeneric("cdf"))
 
   setMethod("cdf", signature(object = "vglm"),
-            function(object, ...) 
+            function(object, ...)
             cdf.vglm(object, ...))
 
   setMethod("cdf", signature(object = "vgam"),
-            function(object, ...) 
+            function(object, ...)
             cdf.vglm(object, ...))
 }
 
 
 "cdf.vglm" <- function(object, newdata = NULL, Attach = FALSE, ...) {
   LL <- length(object at family@vfamily)
-  newcall <- paste("cdf.", object at family@vfamily[LL], 
+  newcall <- paste("cdf.", object at family@vfamily[LL],
                   "(object, newdata, ...)", sep = "")
   newcall <- parse(text = newcall)[[1]]
 
@@ -611,9 +611,9 @@ if (TRUE) {
   eta0 <- eta2theta(eta0, link = object at misc$link,
                     earg = double.check.earg)  # lambda, mu, sigma
 
-  y <- vgety(object, newdata)   # Includes yoffset 
+  y <- vgety(object, newdata)   # Includes yoffset
 
-  newcall <- paste("cdf.", object at family@vfamily[1], 
+  newcall <- paste("cdf.", object at family@vfamily[1],
                    "(y, eta0, ... )", sep = "")
   newcall <- parse(text = newcall)[[1]]
   eval(newcall)
@@ -696,9 +696,9 @@ vgety <- function(object, newdata = NULL) {
   newcall <- parse(text = newcall)[[1]]
   eval(newcall)
 }
-    
-    
- 
+
+
+
 rlplot.gevff <-
 rlplot.gev <-
   function(object, show.plot = TRUE,
@@ -745,10 +745,10 @@ rlplot.gev <-
       plot(log(1/yp), zp, log = "", type = "n",
            ylim = if (length(ylim)) ylim else
                 c(min(c(ydata, zp)), max(c(ydata, zp))),
-           xlab = xlab, ylab = ylab, main = main, 
-           cex.axis = par()$cex.axis, 
-           cex.main = par()$cex.main, 
-           cex.lab  = par()$cex.lab, 
+           xlab = xlab, ylab = ylab, main = main,
+           cex.axis = par()$cex.axis,
+           cex.main = par()$cex.main,
+           cex.lab  = par()$cex.lab,
            ...)
     points(log(-1/log((1:n)/(n+1))), ydata, col = pcol.arg,
            pch = pch, cex = pcex)
@@ -761,9 +761,9 @@ rlplot.gev <-
                   c(min(c(ydata, zp)),
                     max(c(ydata, zp))),
            xlab = xlab, ylab = ylab, main = main,
-           cex.axis = par()$cex.axis, 
-           cex.main = par()$cex.main, 
-           cex.lab  = par()$cex.lab, 
+           cex.axis = par()$cex.axis,
+           cex.main = par()$cex.main,
+           cex.lab  = par()$cex.lab,
            ...)
     points(-1/log((1:n)/(n+1)), ydata, col = pcol.arg,
            pch = pch, cex = pcex)
@@ -860,9 +860,9 @@ explot.lms.bcn <- function(percentiles = c(25, 50, 75),
     answer[, ii] <- eta[, 2] * (1 + eta[, 1] * eta[, 3] *
                     qenorm(percentiles[ii]/100))^(1/eta[, 1])
   }
-  answer 
+  answer
 }
- 
+
 
 
 
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
index a99a79f..bde8582 100644
--- a/R/residuals.vlm.q
+++ b/R/residuals.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -27,7 +27,7 @@ residualsvlm  <-
   if (is.null(pooled.weight))
     pooled.weight <- FALSE
 
-  answer <- 
+  answer <-
   switch(type,
     working = if (pooled.weight) NULL else object at residuals,
     pearson = {
@@ -40,17 +40,17 @@ residualsvlm  <-
 
         if (M == 1) {
           if (any(wz < 0))
-            warning(paste("some weights are negative.",
-                          "Their residual will be assigned NA"))
+            warning("some weights are negative. ",
+                    "Their residual will be assigned NA")
           ans <- sqrt(c(wz)) * c(object at residuals)
           names(ans) <- names(object at residuals)
-          ans 
+          ans
         } else {
           wz.sqrt <- matrix.power(wz, M = M, power = 0.5, fast = TRUE)
           ans <- mux22(wz.sqrt, object at residuals,
                        M = M, upper = FALSE)
-          dim(ans) <- c(M, n) 
-          ans <- t(ans) 
+          dim(ans) <- c(M, n)
+          ans <- t(ans)
           dimnames(ans) <- dimnames(object at residuals)  # n x M
           ans
         }
@@ -91,7 +91,7 @@ residualsvglm  <-
   if (is.null(pooled.weight))
     pooled.weight <- FALSE
 
-  answer <- 
+  answer <-
   switch(type,
     working = if (pooled.weight) NULL else object at residuals,
     pearson = {
@@ -103,17 +103,17 @@ residualsvglm  <-
 
       if (M == 1) {
         if (any(wz < 0))
-          warning(paste("some weights are negative.",
-                        "Their residual will be assigned NA"))
+          warning("some weights are negative. ",
+                  "Their residual will be assigned NA")
         ans <- sqrt(c(wz)) * c(object at residuals)
         names(ans) <- names(object at residuals)
-        ans 
+        ans
       } else {
         wz.sqrt <- matrix.power(wz, M = M, power = 0.5, fast = TRUE)
         ans <- mux22(wz.sqrt, object at residuals,
                      M = M, upper = FALSE)
         dim(ans) <- c(M, n)
-        ans <- t(ans) 
+        ans <- t(ans)
         dimnames(ans) <- dimnames(object at residuals)   # n x M
         ans
       }
@@ -147,7 +147,7 @@ residualsvglm  <-
         }
         ans
       } else {
-        NULL 
+        NULL
       }
     },
     ldot = {
@@ -168,7 +168,7 @@ residualsvglm  <-
         }
         ans
       } else {
-        NULL 
+        NULL
       }
     },
     response = {
@@ -185,8 +185,8 @@ residualsvglm  <-
 
       if (!matrix.arg && length(ans)) {
         if (ncol(ans) == 1) {
-          names.ans <- dimnames(ans)[[1]] 
-          ans <- c(ans) 
+          names.ans <- dimnames(ans)[[1]]
+          ans <- c(ans)
           names(ans) <- names.ans
           ans
         } else {
@@ -225,7 +225,7 @@ residualsqrrvglm  <- function(object,
   if (is.null(pooled.weight))
     pooled.weight <- FALSE
 
-  answer <- 
+  answer <-
   switch(type,
     working = if (pooled.weight) NULL else object at residuals,
     pearson = {
@@ -250,8 +250,8 @@ residualsqrrvglm  <- function(object,
 
       if (!matrix.arg && length(ans)) {
         if (ncol(ans) == 1) {
-          names.ans <- dimnames(ans)[[1]] 
-          ans <- c(ans) 
+          names.ans <- dimnames(ans)[[1]]
+          ans <- c(ans)
           names(ans) <- names.ans
           ans
         } else {
@@ -274,29 +274,29 @@ residualsqrrvglm  <- function(object,
 
 
 setMethod("residuals",  "vlm",
-          function(object, ...) 
+          function(object, ...)
           residualsvlm(object, ...))
 setMethod("residuals",  "vglm",
-          function(object, ...) 
+          function(object, ...)
           residualsvglm(object, ...))
 setMethod("residuals",  "vgam",
-          function(object, ...) 
+          function(object, ...)
           residualsvglm(object, ...))
 setMethod("residuals",  "qrrvglm",
-          function(object, ...) 
+          function(object, ...)
           residualsqrrvglm(object, ...))
 
 setMethod("resid",  "vlm",
-          function(object, ...) 
+          function(object, ...)
           residualsvlm(object, ...))
 setMethod("resid",  "vglm",
-          function(object, ...) 
+          function(object, ...)
           residualsvglm(object, ...))
 setMethod("resid",  "vgam",
-          function(object, ...) 
+          function(object, ...)
           residualsvglm(object, ...))
 setMethod("resid",  "qrrvglm",
-          function(object, ...) 
+          function(object, ...)
           residualsqrrvglm(object, ...))
 
 
diff --git a/R/rrvglm.R b/R/rrvglm.R
index 897010c..bf5d517 100644
--- a/R/rrvglm.R
+++ b/R/rrvglm.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -9,27 +9,27 @@
 
 
 rrvglm <- function(formula,
-                 family, data=list(), 
+                 family, data=list(),
                  weights = NULL, subset = NULL, na.action=na.fail,
                  etastart = NULL, mustart = NULL, coefstart = NULL,
-                 control=rrvglm.control(...), 
-                 offset = NULL, 
+                 control=rrvglm.control(...),
+                 offset = NULL,
                  method="rrvglm.fit",
                  model = FALSE, x.arg = TRUE, y.arg = TRUE,
-                 contrasts = NULL, 
+                 contrasts = NULL,
                  constraints = NULL,
-                 extra = NULL, 
+                 extra = NULL,
                  qr.arg = FALSE, smart = TRUE, ...) {
     dataname <- as.character(substitute(data))  # "list" if no data=
     function.name <- "rrvglm"
 
     ocall <- match.call()
 
-    if (smart) 
+    if (smart)
         setup.smart("write")
 
     mt <- terms(formula, data = data)
-    if (missing(data)) 
+    if (missing(data))
         data <- environment(formula)
 
     mf <- match.call(expand.dots = FALSE)
@@ -38,9 +38,9 @@ rrvglm <- function(formula,
     mf$qr.arg <- NULL
     mf$coefstart <- mf$etastart <- mf$... <- NULL
     mf$smart <- NULL
-    mf$drop.unused.levels <- TRUE 
+    mf$drop.unused.levels <- TRUE
     mf[[1]] <- as.name("model.frame")
-    mf <- eval(mf, parent.frame()) 
+    mf <- eval(mf, parent.frame())
     if (method == "model.frame")
         return(mf)
     na.act <- attr(mf, "na.action")
@@ -60,7 +60,7 @@ rrvglm <- function(formula,
 
 
     offset <- model.offset(mf)
-    if (is.null(offset)) 
+    if (is.null(offset))
       offset <- 0  # yyy ???
     w <- model.weights(mf)
     if (!length(w)) {
@@ -92,7 +92,7 @@ rrvglm <- function(formula,
     fit <- rrvglm.fitter(x = x, y = y, w = w, offset = offset,
                         etastart = etastart, mustart = mustart,
                         coefstart = coefstart,
-                        family = family, 
+                        family = family,
                         control = control,
                         constraints = constraints,
                         criterion = control$criterion,
@@ -106,10 +106,10 @@ rrvglm <- function(formula,
          if (control$trace && (control$Bestof>1))
            cat(paste("\n========================= Fitting model", tries,
                        "=========================\n\n"))
-         it <- rrvglm.fitter(x = x, y = y, w = w, offset = offset, 
+         it <- rrvglm.fitter(x = x, y = y, w = w, offset = offset,
                    etastart = etastart, mustart = mustart,
                    coefstart = coefstart,
-                   family = family, 
+                   family = family,
                    control = control,
                    constraints = constraints,
                    criterion = control$criterion,
@@ -138,7 +138,7 @@ rrvglm <- function(formula,
       "constraints"  = fit$constraints,
       "criterion"    = fit$crit.list,
       "df.residual"  = fit$df.residual,
-      "df.total"     = fit$df.total, 
+      "df.total"     = fit$df.total,
       "dispersion"   = 1,
       "effects"      = fit$effects,
       "family"       = fit$family,
@@ -184,7 +184,7 @@ rrvglm <- function(formula,
     } else list()  # R-1.5.0
 
     slot(answer, "iter") = fit$iter
-    fit$predictors = as.matrix(fit$predictors)  # Must be a matrix 
+    fit$predictors = as.matrix(fit$predictors)  # Must be a matrix
     dimnames(fit$predictors) = list(dimnames(fit$predictors)[[1]],
                                     fit$misc$predictors.names)
     slot(answer, "predictors") = fit$predictors
diff --git a/R/rrvglm.control.q b/R/rrvglm.control.q
index f073cbd..9210288 100644
--- a/R/rrvglm.control.q
+++ b/R/rrvglm.control.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -29,7 +29,7 @@ rrvglm.control <-
            Norrr = NA,
 
            noWarning = FALSE,
- 
+
            trace = FALSE,
            Use.Init.Poisson.QO = FALSE,
            checkwz = TRUE,
@@ -55,7 +55,7 @@ rrvglm.control <-
   Algorithm <- match.arg(Algorithm, c("alternating", "derivative"))[1]
 
   if (Svd.arg)
-    Corner <- FALSE 
+    Corner <- FALSE
 
   if (!is.Numeric(Rank, positive = TRUE,
                   length.arg = 1, integer.valued = TRUE))
@@ -159,7 +159,7 @@ rrvglm.control <-
   }
 
 
-  ans$half.stepsizing <- FALSE  # Turn it off 
+  ans$half.stepsizing <- FALSE  # Turn it off
   ans
 }
 
@@ -185,8 +185,9 @@ setMethod("summary", "rrvglm",
 
 
 
-show.summary.rrvglm <- function(x, digits = NULL,
-                                quote = TRUE, prefix = "") {
+show.summary.rrvglm <-
+  function(x, digits = NULL, quote = TRUE, prefix = "",
+           signif.stars = NULL) {
 
 
   show.summary.vglm(x, digits = digits, quote = quote, prefix = prefix)
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
index 1b915c6..e7404a5 100644
--- a/R/rrvglm.fit.q
+++ b/R/rrvglm.fit.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -9,6 +9,7 @@
 
 
 
+
 rrvglm.fit <-
   function(x, y, w = rep_len(1, nrow(x)),
            etastart = NULL, mustart = NULL, coefstart = NULL,
@@ -57,8 +58,8 @@ rrvglm.fit <-
     intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
     y.names <- predictors.names <- NULL  # May be overwritten in @initialize
 
- 
-    n.save <- n 
+
+    n.save <- n
 
 
 
@@ -69,7 +70,7 @@ rrvglm.fit <-
       eval(slot(family, "initialize"))  # Initlz mu & M (and optionally w)
 
 
-    eval(rrr.init.expression)  
+    eval(rrr.init.expression)
 
 
     if (length(etastart)) {
@@ -95,7 +96,7 @@ rrvglm.fit <-
     M <- if (is.matrix(eta)) ncol(eta) else 1
 
     if (is.character(rrcontrol$Dzero)) {
-      index <- match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]]) 
+      index <- match(rrcontrol$Dzero, dimnames(as.matrix(y))[[2]])
       if (anyNA(index))
         stop("Dzero argument didn't fully match y-names")
       if (length(index) == M)
@@ -112,7 +113,7 @@ rrvglm.fit <-
       eval(family at constraints)
 
 
-    special.matrix <- matrix(-34956.125, M, M)  # An unlikely used matrix 
+    special.matrix <- matrix(-34956.125, M, M)  # An unlikely used matrix
     just.testing <- cm.VGAM(special.matrix, x, rrcontrol$noRRR,
                             constraints)
 
@@ -144,7 +145,7 @@ rrvglm.fit <-
                                      colx1.index  # Save it on the object
     colx2.index <- 1:ncol(x)
     names(colx2.index) <- dx2
-    if (length(colx1.index)) 
+    if (length(colx1.index))
       colx2.index <- colx2.index[-colx1.index]
 
     p1 <- length(colx1.index)
@@ -200,13 +201,12 @@ rrvglm.fit <-
 
 
     ncolHlist <- unlist(lapply(Hlist, ncol))
-    dimB <- sum(ncolHlist)
 
 
     X.vlm.save <- if (control$Quadratic) {
       tmp500 <- lm2qrrvlm.model.matrix(x = x, Hlist = Hlist,
                      C = Cmat, control = control)
-      xsmall.qrr <- tmp500$new.latvar.model.matrix 
+      xsmall.qrr <- tmp500$new.latvar.model.matrix
       H.list <- tmp500$constraints
       if (FALSE && modelno == 3) {
         H.list[[1]] <- (H.list[[1]])[, c(TRUE, FALSE), drop = FALSE]  # Amat
@@ -215,11 +215,11 @@ rrvglm.fit <-
 
       latvar.mat <- tmp500$latvar.mat
       if (length(tmp500$offset)) {
-        offset <- tmp500$offset 
+        offset <- tmp500$offset
       }
       lm2vlm.model.matrix(xsmall.qrr, H.list, xij = control$xij)
     } else {
-      latvar.mat <- x[, colx2.index, drop = FALSE] %*% Cmat 
+      latvar.mat <- x[, colx2.index, drop = FALSE] %*% Cmat
       lm2vlm.model.matrix(x, Hlist, xij = control$xij)
     }
 
@@ -288,7 +288,7 @@ rrvglm.fit <-
         if (control$Corner)
           zedd[, Index.corner] <- zedd[, Index.corner] - latvar.mat
       } else {
-        zedd <- z 
+        zedd <- z
       }
 
       if (!nice31)
@@ -300,14 +300,14 @@ rrvglm.fit <-
         rrcontrol$Ainit <- control$Ainit <- Amat  # Good for valt()
         rrcontrol$Cinit <- control$Cinit <- Cmat  # Good for valt()
       }
-    
+
       if (!nice31)
-        c.list$coeff <- tfit$coefficients 
-    
+        c.list$coeff <- tfit$coefficients
+
       if (control$Quadratic) {
         if (control$Corner)
           tfit$fitted.values[, Index.corner] <-
-          tfit$fitted.values[, Index.corner] + latvar.mat 
+          tfit$fitted.values[, Index.corner] + latvar.mat
       }
 
       if (!nice31)
@@ -336,7 +336,7 @@ rrvglm.fit <-
         eval(family at middle2)
 
       old.crit <- new.crit
-      new.crit <- 
+      new.crit <-
         switch(criterion,
                coefficients = new.coeffs,
                tfun(mu = mu, y = y, w = w,
@@ -351,8 +351,8 @@ rrvglm.fit <-
                        coefficients =
                          format(new.crit,
                                 dig = round(1 - log10(epsilon))),
-                         format(new.crit, 
-                                dig = max(4, 
+                         format(new.crit,
+                                dig = max(4,
                                           round(-0 - log10(epsilon) +
                                                 log10(sqrt(eff.n))))))
 
@@ -375,7 +375,7 @@ rrvglm.fit <-
       if (take.half.step) {
         stepsize <- 2 * min(orig.stepsize, 2*stepsize)
         new.coeffs.save <- new.coeffs
-        if (trace) 
+        if (trace)
           cat("Taking a modified step")
         repeat {
           if (trace) {
@@ -403,19 +403,19 @@ rrvglm.fit <-
             eval(family at middle2)
 
 
-          new.crit <- 
+          new.crit <-
             switch(criterion,
                    coefficients = new.coeffs,
                    tfun(mu = mu,y = y,w = w,res = FALSE,
                         eta = eta,extra))
 
-          if ((criterion == "coefficients") || 
+          if ((criterion == "coefficients") ||
               ( minimize.criterion && new.crit < old.crit) ||
               (!minimize.criterion && new.crit > old.crit))
             break
           }
 
-          if (trace) 
+          if (trace)
             cat("\n")
           if (too.small) {
             warning("iterations terminated because ",
@@ -430,8 +430,8 @@ rrvglm.fit <-
                        coefficients =
                          format(new.crit,
                                 dig = round(1 - log10(epsilon))),
-                         format(new.crit, 
-                                dig = max(4, 
+                         format(new.crit,
+                                dig = max(4,
                                           round(-0 - log10(epsilon) +
                                                 log10(sqrt(eff.n))))))
 
@@ -582,13 +582,13 @@ rrvglm.fit <-
                 constraints = if (control$Quadratic) H.list else Hlist,
                 df.residual = df.residual,
                 df.total = n*M,
-                effects = effects, 
+                effects = effects,
                 fitted.values = mu,
-                offset = offset, 
+                offset = offset,
                 rank = rank,
                 residuals = residuals,
                 R = R,
-                terms = Terms)  # terms: This used to be done in vglm() 
+                terms = Terms)  # terms: This used to be done in vglm()
 
     if (qr.arg && !nice31) {
       fit$qr <- tfit$qr
@@ -605,7 +605,7 @@ rrvglm.fit <-
         colnames.x = xn,
         colnames.X.vlm = xnrow.X.vlm,
         criterion = criterion,
-        function.name = function.name, 
+        function.name = function.name,
         intercept.only=intercept.only,
         predictors.names = predictors.names,
         M = M,
diff --git a/R/s.q b/R/s.q
index 2b13cd8..0fdb083 100644
--- a/R/s.q
+++ b/R/s.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -15,7 +15,7 @@ s <- function(x, df = 4, spar = 0, ...) {
   call <- deparse(sys.call())
 
   if (ncol(as.matrix(x)) > 1)
-    stop("argument 'x' must be a vector") 
+    stop("argument 'x' must be a vector")
   if (!is.null(levels(x))) {
     x <- if (is.ordered(x)) {
       as.vector(x)
@@ -27,7 +27,7 @@ s <- function(x, df = 4, spar = 0, ...) {
   attr(x, "call") <- call
   attr(x, "class") <- "smooth"
   attr(x, "s.xargument") <- ans   # Needed for prediction and constraints
- 
+
 
   a <- is.na(x)
   if (any(a))
diff --git a/R/s.vam.q b/R/s.vam.q
index 0fc9f2a..352c2a7 100644
--- a/R/s.vam.q
+++ b/R/s.vam.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -35,7 +35,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
     osparv <- lapply(data, attr, "spar")  # "o" for original
     odfvec <- lapply(data, attr, "df")
     s.xargument <- lapply(data, attr, "s.xargument")
-    
+
     for (kk in seq_along(nwhich)) {
       ii <- nwhich[kk]
 
@@ -48,7 +48,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
                 "'spar' are used for variable '", s.xargument, "'")
       }
       osparv[[ii]] <- rep_len(temp, ncolHlist[ii])  # Recycle
-    
+
       temp <- odfvec[[ii]]
       if (!is.numeric(temp) || any(temp < 1)) {
         stop("argument 'df' is non-numeric or less than 1")
@@ -73,12 +73,12 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
     odfvec <- unlist(odfvec)
     smooth.frame$osparv <- osparv  # Original
     smooth.frame$odfvec <- odfvec  # Original
-    
+
     if (sum(smooth.frame$dfvec[smooth.frame$osparv == 0]) + pbig >
       smooth.frame$n.lm * sum(ncolHlist[nwhich])) {
       stop("too many parameters/dof for data on hand")
     }
-    
+
     xnrow.X.vlm <- labels(X.vlm.save)[[2]]
     asgn <- attr(X.vlm.save, "assign")
     aa <- NULL
@@ -185,7 +185,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
 
   if (exists("flush.console"))
     flush.console()
- 
+
 
 
 
@@ -269,8 +269,8 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
     nl.df = nl.df,
     qr = list(qr = fit$qr, rank = qrank,
               qraux = fit$qraux, pivot = fit$qpivot),
-    R = R, 
-    rank = qrank, 
+    R = R,
+    rank = qrank,
     residuals = fit$y - fit$etamat,
     ResSS = fit$doubvec[2],
     smomat = fit$smomat,
diff --git a/R/simulate.vglm.R b/R/simulate.vglm.R
index 58d178b..7efa756 100644
--- a/R/simulate.vglm.R
+++ b/R/simulate.vglm.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -41,7 +41,7 @@ simulate.vlm <- function (object, nsim = 1, seed = NULL, ...) {
     class(val) <- "data.frame"
   }
   names(val) <- paste("sim", seq_len(nsim), sep = "_")
-  if (!is.null(nm)) 
+  if (!is.null(nm))
     row.names(val) <- nm
   attr(val, "seed") <- RNGstate
   val
diff --git a/R/sm.os.R b/R/sm.os.R
new file mode 100644
index 0000000..53f0a5c
--- /dev/null
+++ b/R/sm.os.R
@@ -0,0 +1,333 @@
+# These functions are
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+ sm.os <-
+  function(x, ...,
+           niknots = 6,  # NULL if 'alg.niknots' is to be used.
+           spar = -1,  # was 0 prior to 20160810
+           o.order = 2,
+           alg.niknots = c("s", ".nknots.smspl")[1],
+           all.knots = FALSE,  # 20161013
+           ridge.adj = 1e-5,
+           spillover = 0.01, maxspar = 1e12,
+           outer.ok = FALSE,
+           fixspar = FALSE) {
+
+
+
+
+
+
+  niknots.orig <- niknots
+  if (all.knots && length(niknots))
+    warning("ignoring 'niknots' because 'all.knots = TRUE'")
+
+
+
+
+
+
+Penalty.os <- function(a, b, intKnots, o.order = 2) {
+
+
+
+  if (any(diff(intKnots) <= 0))
+    stop("argument 'intKnots' must be sorted in increasing order")
+  if (length(unique(intKnots)) != (KK <- length(intKnots)))
+    stop("argument 'intKnots' must have unique values")
+  if (KK == 0)
+    stop("no interior knots (intKnots == 0)")
+
+
+  allKnots <- c(rep(a, 2 * o.order), intKnots, rep(b, 2 * o.order))
+  mkmat <- matrix(c(1, rep(NA, 6),
+           1/3, 4/3, 1/3, rep(NA, 4),
+           14/45, 64/45, 8/15, 64/45, 14/45, NA, NA,
+           41/140, 54/35, 27/140, 68/35, 27/140, 54/35, 41/140),
+           4, 7, byrow = TRUE)
+
+  vec.ell  <- 1:(KK + 4 * o.order - 1)  # length(allKnots) - 1
+  vec.ellp <- 0:(2 * o.order - 2)
+
+  hmell <- if (o.order == 1)
+    diff(allKnots) else diff(allKnots) /  (2 * o.order - 2)
+
+  xtilde <- wts <- numeric((2*o.order - 1) * (KK * 4*o.order - 1))
+  index1 <- (2*o.order - 1) * (vec.ell - 1) + 1
+
+  for (ellp in vec.ellp) {
+    xtilde[index1 + ellp] <- hmell * ellp + allKnots[vec.ell]
+       wts[index1 + ellp] <- hmell * mkmat[o.order, ellp + 1]
+  }
+
+  Bdd <- splineDesign(allKnots, xtilde,
+                      ord = 2 * o.order,
+                      derivs = rep(o.order, length(xtilde)),
+                      outer.ok = TRUE)
+  Omega <- crossprod(Bdd * wts, Bdd)
+  attr(Omega, "allKnots") <- allKnots
+  Omega
+}
+
+
+
+
+
+
+  xs <- substitute(x)
+  ans <- as.character(xs)
+  x.index <- as.vector(x)
+
+
+  alg.niknots <- match.arg(alg.niknots,
+                          c("s", ".nknots.smspl"))[1]
+  if (!is.Numeric(o.order, length.arg = 1, integer.valued = TRUE,
+                  positive = TRUE) ||
+      o.order > 4)
+    stop("argument 'o.order' must be one value from the set 1:4")
+
+
+
+  x.orig <- x.index
+  xdots <- list(...)
+  uses.xij <- length(xdots) > 0
+  if (uses.xij)
+    x.index <- as.vector(c(x.index, unlist(xdots)))
+
+
+  xl <- min(x.index)
+  xr <- max(x.index)
+
+
+ if (smart.mode.is("read")) {
+    smartlist  <- get.smart()
+    xl <- smartlist$xl  # Overwrite its value
+    xr <- smartlist$xr  # Overwrite its value
+    alg.niknots <- smartlist$alg.niknots  # Ditto
+    spar       <- smartlist$spar
+    o.order     <- smartlist$o.order
+    all.knots  <- smartlist$all.knots
+    ridge.adj  <- smartlist$ridge.adj
+    spillover  <- smartlist$spillover
+    maxspar    <- smartlist$maxspar
+    maXX       <- smartlist$maXX
+    Cmat       <- smartlist$Cmat
+    intKnots   <- smartlist$intKnots
+    outer.ok   <- smartlist$outer.ok
+    fixspar    <- smartlist$fixspar
+  } else {
+    intKnots   <- NULL
+    maXX       <- NULL
+    Cmat       <- NULL
+  }
+
+
+
+
+  xmax <- xr + spillover * (xr - xl)
+  xmin <- xl - spillover * (xr - xl)
+  nx <- names(x.index)
+  nax <- is.na(x.index)
+  if (nas <- any(nax))
+    x.index <- x[!nax]
+
+
+
+
+  usortx <- unique(sort(as.vector(x.index)))
+  neff <- length(usortx)
+  if (neff < 2) {
+    stop("not enough unique 'x' values (need 2 or more)")
+  }
+
+  noround <- TRUE   # Improvement 20020803
+
+
+  if (all.knots) {
+    xbar <- (usortx - usortx[1]) / (usortx[neff] - usortx[1])
+    knot <- if (noround) {
+      valid.vknotl2(c(rep_len(xbar[   1], 2 * o.order - 1),  # 3
+                      xbar,
+                      rep_len(xbar[neff], 2 * o.order - 1)))  # 3
+    } else {
+      c(rep_len(xbar[   1], 2 * o.order - 1),
+        xbar,
+        rep_len(xbar[neff], 2 * o.order - 1))
+    }
+    if (length(niknots.orig)) {
+      warning("overriding 'niknots' by 'all.knots = TRUE'")
+    }
+    niknots <- length(knot) - 2 * o.order  # TWYee
+  } else if (is.null(niknots.orig)) {
+    xbar <- (usortx - usortx[1]) / (usortx[neff] - usortx[1])
+    if (alg.niknots == "s") {
+      chosen <- length(niknots)
+      if (chosen && (niknots > neff + 2 || niknots <= 5)) {
+        stop("bad value for 'niknots'")
+      }
+      if (!chosen) {
+        niknots <- 0
+      }
+      knot.list <-
+        .C("vknootl2", as.double(xbar),
+           as.integer(neff),
+           knot = double(neff + 4 * o.order - 2),  # (neff+6), zz unsure
+           k = as.integer(niknots + 2 * o.order),  # (niknots+4), zz unsure
+           chosen = as.integer(chosen))
+      if (noround) {
+        knot <- valid.vknotl2(knot.list$knot[1:(knot.list$k)])
+        knot.list$k <- length(knot)
+      } else {
+        knot <- knot.list$knot[1:(knot.list$k)]
+      }
+      niknots <- knot.list$k - 2 * o.order  # TWYee
+    } else {
+      niknots <- .nknots.smspl(neff)
+    }
+
+  }  # !all.knots
+
+
+
+  if (!is.Numeric(niknots, positive = TRUE, integer.valued = TRUE,
+                  length.arg = 1)) {
+    stop("bad value of 'niknots'")
+  }
+
+
+
+
+
+
+  numIntKnots <- niknots
+
+
+  if (is.null(intKnots))
+    intKnots <- quantile(usortx,  # unique(x.index),
+                         probs = seq(0, 1, length = numIntKnots + 2)[
+                                 -c(1, numIntKnots + 2)])
+
+
+
+
+
+  Basis <- bs(x, knots = intKnots,
+              degree = 2 * o.order - 1,  # 3 by default
+              Boundary.knots = c(a = xmin, b = xmax),  # zz not sure
+              intercept = TRUE)
+
+
+
+
+  n.col <- ncol(Basis)
+  if (nas) {
+    nmat <- matrix(NA_real_, length(nax), n.col)
+    nmat[!nax, ] <- Basis
+    Basis <- nmat
+  }
+  dimnames(Basis) <- list(1:nrow(Basis), 1:n.col)
+
+
+  fixspar <- rep_len(fixspar, max(length(fixspar), length(spar)))
+     spar <- rep_len(   spar, max(length(fixspar), length(spar)))
+
+  if (any(spar < 0 & fixspar)) {
+     spar[spar < 0 & fixspar] <- 0
+     warning("some 'spar' values are negative : have used 'spar' = ",
+             paste(spar, collapse = ", "))
+  }
+
+  if (any(maxspar < spar)) {
+     spar[maxspar < spar] <- maxspar
+     warning("some 'spar' values are > ", maxspar, ": ",
+                   "for stability have used 'spar' = ",
+             paste(spar, collapse = ", "))
+  }
+
+
+
+
+
+
+  pen.aug <- Penalty.os(a = xmin, b = xmax, intKnots, o.order = o.order)
+  allKnots <- attr(pen.aug, "allKnots")  # Retrieved
+
+
+
+
+
+  if (is.null(maXX))
+    maXX <- mean(abs(crossprod(Basis)))
+ maS <- mean(abs(pen.aug)) / maXX
+
+  pen.aug <- pen.aug / maS
+
+
+
+
+  kk <- ncol(Basis)
+  if (is.null(Cmat))
+    Cmat <- matrix(colSums(Basis), 1, kk)
+
+  qrCt <- qr(t(Cmat))
+  jay <- nrow(Cmat)  # 1
+  XZ <- t(qr.qty(qrCt, t(Basis))[(jay+1):kk, ])
+
+  Basis <- XZ
+
+
+  ZtSZ <- qr.qty(qrCt, t(qr.qty(qrCt, t(pen.aug))))[(jay+1):kk,
+                                                    (jay+1):kk]
+
+
+
+
+
+  if (smart.mode.is("write"))
+    put.smart(list(xl          = xl,
+                   xr          = xr,
+                   alg.niknots = alg.niknots,
+                   spar        = spar,
+                   o.order     = o.order,
+                   all.knots   = all.knots,
+                   ridge.adj   = ridge.adj,
+                   spillover   = spillover,
+                   maxspar     = maxspar,
+                   maXX        = maXX,
+                   Cmat        = Cmat,
+                   intKnots    = intKnots,
+                   outer.ok    = outer.ok,
+                   fixspar     = fixspar))
+
+
+
+
+  Basis <- Basis[seq_along(x.orig), , drop = FALSE]
+
+
+
+
+
+  attr(Basis, "S.arg") <- ZtSZ
+
+  attr(Basis, "knots") <- allKnots  # zz might be intKnots
+  attr(Basis, "intKnots") <- intKnots
+  attr(Basis, "spar") <- spar  # Vector
+  attr(Basis, "o.order") <- o.order  # Save argument
+  attr(Basis, "ps.int") <- NA_real_  # For the psint() methods function
+  attr(Basis, "all.knots") <- all.knots  # Save logical argument
+  attr(Basis, "alg.niknots") <- alg.niknots  # Save argument
+  attr(Basis, "ridge.adj") <- ridge.adj  # Save argument
+  attr(Basis, "outer.ok") <- outer.ok  # Save argument
+  attr(Basis, "fixspar") <- fixspar  # Save argument
+
+  Basis
+}
+
+
+
diff --git a/R/sm.ps.R b/R/sm.ps.R
new file mode 100644
index 0000000..433abfd
--- /dev/null
+++ b/R/sm.ps.R
@@ -0,0 +1,206 @@
+# These functions are
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+ sm.ps <-
+  function(x,
+           ...,
+           ps.int = NULL,
+           spar = -1,  # was 0 prior to 20160810
+           degree = 3, p.order = 2,
+           ridge.adj = 1e-5,  # ridge.inv = 0.0001,
+           spillover = 0.01, maxspar = 1e12,
+           outer.ok = FALSE,
+           mux = NULL,  # 1.25,
+           fixspar = FALSE) {
+
+
+  xs <- substitute(x)
+  ans <- as.character(xs)
+  x.index <- as.vector(x)
+
+
+
+
+  x.orig <- x.index
+  xdots <- list(...)
+  uses.xij <- length(xdots) > 0
+  if (uses.xij)
+    x.index <- as.vector(c(x.index, unlist(xdots)))
+  if (is.null(ps.int)) {
+    ps.int <- if (length(mux)) {
+      nux <- length(unique(x.index))
+      ceiling(mux * log(nux))
+    } else {
+      min(max(degree, 7), length(x.index) - 2)
+    }
+  }  # if (is.null(ps.int))
+  if (length(x.index) - 1 <= ps.int)
+    stop("argument 'ps.int' is too large")
+
+
+  xl <- min(x.index)
+  xr <- max(x.index)
+
+
+
+ if (smart.mode.is("read")) {
+    smartlist  <- get.smart()
+    xl <- smartlist$xl  # Overwrite its value
+    xr <- smartlist$xr  # Overwrite its value
+    ps.int    <- smartlist$ps.int  # Ditto
+    spar      <- smartlist$spar
+    degree    <- smartlist$degree
+    p.order   <- smartlist$p.order
+    ridge.adj <- smartlist$ridge.adj
+    spillover <- smartlist$spillover
+    maxspar   <- smartlist$maxspar
+    maXX      <- smartlist$maXX
+    Cmat      <- smartlist$Cmat
+    outer.ok  <- smartlist$outer.ok
+    mux       <- smartlist$mux
+    fixspar   <- smartlist$fixspar
+  } else {
+    maXX      <- NULL
+    Cmat      <- NULL
+  }
+
+  xmax <- xr + spillover * (xr - xl)
+  xmin <- xl - spillover * (xr - xl)
+  dx <- (xmax - xmin) / ps.int
+  nx <- names(x.index)
+  nax <- is.na(x.index)
+  if (nas <- any(nax))
+    x.index <- x[!nax]
+  s.order <- degree + 1
+  if (length(ps.int)) {
+    nAknots <- ps.int - 1
+    if (nAknots < 1) {
+      nAknots <- 1
+      warning("'ps.int' was too small; have used 2")
+    }
+
+
+  if (FALSE && # nux < 6 &&
+      smart.mode.is("write"))
+    warning("smoothing when there are less than 6 distinct 'x' values",
+            " is not advised")
+
+
+
+    if (nAknots > 0) {
+      Aknots <- seq(from = xmin - degree * dx,
+                    to   = xmax + degree * dx, by = dx)
+    } else {
+      knots <- NULL
+    }
+  }  # length(ps.int)
+
+
+
+
+
+  basis <- splineDesign(Aknots, x.index, s.order, 0 * x.index,
+                        outer.ok = outer.ok)
+  n.col <- ncol(basis)
+  if (nas) {
+    nmat <- matrix(NA_real_, length(nax), n.col)
+    nmat[!nax, ] <- basis
+    basis <- nmat
+  }
+  dimnames(basis) <- list(1:nrow(basis), 1:n.col)
+  if ((p.order - n.col + 1) > 0) {
+    p.order <- n.col - 1
+    warning("argument 'p.order' was too large; have used ", n.col - 1)
+  }
+  fixspar <- rep_len(fixspar, max(length(fixspar), length(spar)))
+     spar <- rep_len(   spar, max(length(fixspar), length(spar)))
+
+  if (any(spar < 0 & fixspar)) {
+    spar[spar < 0 & fixspar] <- 0
+    warning("some 'spar' values are negative : have used 'spar' = ",
+            paste(spar, collapse = ", "))
+  }
+
+  if (any(spar > maxspar)) {
+    spar[spar > maxspar] <- maxspar
+    warning("some 'spar' values are > ", maxspar, ": ",
+                  "for stability have used 'spar' = ",
+            paste(spar, collapse = ", "))
+  }
+  aug <- if (p.order > 0) diff(diag(n.col), diff = p.order) else diag(n.col)
+
+  
+  pen.aug <- crossprod(aug)
+
+
+
+  if (is.null(maXX))
+    maXX <- mean(abs(crossprod(basis)))
+ maS <- mean(abs(pen.aug)) / maXX
+
+  pen.aug <- pen.aug / maS
+  kk <- ncol(basis)
+  if (is.null(Cmat))
+    Cmat <- matrix(colSums(basis), 1, kk)
+
+
+  qrCt <- qr(t(Cmat))
+  jay <- nrow(Cmat)  # 1
+  XZ <- t(qr.qty(qrCt, t(basis))[(jay+1):kk, ])
+
+
+  ZtSZ <- qr.qty(qrCt, t(qr.qty(qrCt, t(pen.aug))))[(jay+1):kk,
+                                                    (jay+1):kk]
+
+
+  basis <- XZ
+
+
+  if (smart.mode.is("write"))
+    put.smart(list(xl        = xl,
+                   xr        = xr,
+                   ps.int    = ps.int,
+                   spar      = spar,
+                   degree    = degree,
+                   p.order   = p.order,
+                   ridge.adj = ridge.adj,
+                   spillover = spillover,
+                   maxspar   = maxspar,
+                   maXX      = maXX,
+                   Cmat      = Cmat,
+                   outer.ok  = outer.ok,
+                   mux       = mux,
+                   fixspar   = fixspar))
+
+
+
+
+  basis <- basis[seq_along(x.orig), , drop = FALSE]
+
+
+
+
+  attr(basis, "S.arg") <- ZtSZ
+
+  attr(basis, "degree") <- degree
+  attr(basis, "knots") <- Aknots
+  attr(basis, "spar") <- spar  # Vector
+  attr(basis, "p.order") <- p.order
+  attr(basis, "ps.int") <- ps.int
+  attr(basis, "ridge.adj") <- ridge.adj
+  attr(basis, "outer.ok") <- outer.ok
+  attr(basis, "mux") <- mux
+  attr(basis, "fixspar") <- fixspar
+  basis
+}
+
+
+
+
diff --git a/R/smart.R b/R/smart.R
index ba9b4d4..e2ea0dd 100644
--- a/R/smart.R
+++ b/R/smart.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -92,7 +92,7 @@ get.smart.prediction <- function() {
       for(i in max.smart:(smart.prediction.counter + 1))
         smart.prediction[[i]] <- NULL
     smart.prediction
-  } else 
+  } else
     NULL
 }
 
@@ -139,7 +139,7 @@ smart.expression <- expression({
   assign(".smart.prediction.mode", "neutral", envir = smartpredenv)
 
   .smart.match.call <- as.character(smart$match.call)
-  smart$match.call <- NULL  # Kill it off for the do.call 
+  smart$match.call <- NULL  # Kill it off for the do.call
 
   ans.smart <- do.call(.smart.match.call[1], c(list(x=x), smart))
   assign(".smart.prediction.mode", "read", envir = smartpredenv)
@@ -180,7 +180,7 @@ is.smart <- function(object) {
 
 
  sm.bs <-
-  function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE, 
+  function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE,
             Boundary.knots = range(x)) {
   x <- x  # Evaluate x; needed for nested calls, e.g., sm.bs(sm.scale(x)).
   if (smart.mode.is("read")) {
@@ -190,15 +190,15 @@ is.smart <- function(object) {
   nx <- names(x)
   x <- as.vector(x)
   nax <- is.na(x)
-  if (nas <- any(nax)) 
+  if (nas <- any(nax))
     x <- x[!nax]
   if (!missing(Boundary.knots)) {
     Boundary.knots <- sort(Boundary.knots)
-    outside <- (ol <- x < Boundary.knots[1]) | (or <- x > 
+    outside <- (ol <- x < Boundary.knots[1]) | (or <- x >
         Boundary.knots[2L])
   } else outside <- FALSE
   ord <- 1 + (degree <- as.integer(degree))
-  if (ord <= 1) 
+  if (ord <= 1)
     stop("'degree' must be integer >= 1")
   if (!missing(df) && missing(knots)) {
     nIknots <- df - ord + (1 - intercept)
@@ -231,10 +231,10 @@ is.smart <- function(object) {
         tt <- splines::splineDesign(Aknots, rep(k.pivot, ord), ord, derivs)
         basis[or, ] <- xr %*% (tt/scalef)
       }
-      if (any(inside <- !outside)) 
+      if (any(inside <- !outside))
         basis[inside, ] <- splines::splineDesign(Aknots, x[inside], ord)
   } else basis <- splines::splineDesign(Aknots, x, ord)
-  if (!intercept) 
+  if (!intercept)
     basis <- basis[, -1L, drop = FALSE]
   n.col <- ncol(basis)
   if (nas) {
@@ -244,7 +244,7 @@ is.smart <- function(object) {
   }
   dimnames(basis) <- list(nx, 1L:n.col)
   a <- list(degree = degree,
-            knots = if (is.null(knots)) numeric(0L) else knots, 
+            knots = if (is.null(knots)) numeric(0L) else knots,
             Boundary.knots = Boundary.knots,
             intercept = intercept,
             Aknots = Aknots)
@@ -279,11 +279,11 @@ attr( sm.bs, "smart") <- TRUE
   nx <- names(x)
   x <- as.vector(x)
   nax <- is.na(x)
-  if (nas <- any(nax)) 
+  if (nas <- any(nax))
     x <- x[!nax]
   if (!missing(Boundary.knots)) {
     Boundary.knots <- sort(Boundary.knots)
-    outside <- (ol <- x < Boundary.knots[1L]) | (or <- x > 
+    outside <- (ol <- x < Boundary.knots[1L]) | (or <- x >
         Boundary.knots[2L])
   } else outside <- FALSE
   if (!missing(df) && missing(knots)) {
@@ -312,7 +312,7 @@ attr( sm.bs, "smart") <- TRUE
         tt <- splines::splineDesign(Aknots, rep(k.pivot, 2L), 4, c(0, 1))
         basis[or, ] <- xr %*% tt
       }
-      if (any(inside <- !outside)) 
+      if (any(inside <- !outside))
         basis[inside, ] <- splines::splineDesign(Aknots, x[inside], 4)
     } else basis <- splines::splineDesign(Aknots, x, 4)
   const <- splines::splineDesign(Aknots, Boundary.knots, 4, c(2, 2))
@@ -331,7 +331,7 @@ attr( sm.bs, "smart") <- TRUE
   }
   dimnames(basis) <- list(nx, 1L:n.col)
   a <- list(degree = 3,
-            knots = if (is.null(knots)) numeric(0) else knots, 
+            knots = if (is.null(knots)) numeric(0) else knots,
             Boundary.knots = Boundary.knots,
             intercept = intercept,
             Aknots = Aknots)
@@ -376,7 +376,7 @@ attr( sm.ns, "smart") <- TRUE
       m <- unclass(as.data.frame(cbind(x, ...)))
       return(do.call("polym", c(m, degree = degree, raw = raw)))
     }
-    if (degree < 1) 
+    if (degree < 1)
       stop("'degree' must be at least 1")
 
 
@@ -401,7 +401,7 @@ attr( sm.ns, "smart") <- TRUE
       return(Z)
     }
     if (is.null(coefs)) {
-      if (degree >= length(unique(x))) 
+      if (degree >= length(unique(x)))
         stop("'degree' must be less than number of unique points")
       xbar <- mean(x)
       x <- x - xbar
@@ -428,8 +428,8 @@ attr( sm.ns, "smart") <- TRUE
       Z <- matrix(, length(x), n)
       Z[, 1] <- 1
       Z[, 2] <- x - alpha[1L]
-      if (degree > 1) 
-        for (i in 2:degree) Z[, i + 1] <- (x - alpha[i]) * 
+      if (degree > 1)
+        for (i in 2:degree) Z[, i + 1] <- (x - alpha[i]) *
             Z[, i] - (norm2[i + 1]/norm2[i]) * Z[, i - 1]
       Z <- Z/rep(sqrt(norm2[-1L]), each = length(x))
       colnames(Z) <- 0:degree
@@ -467,7 +467,7 @@ attr(sm.poly, "smart") <- TRUE
       center <- colMeans(x, na.rm = TRUE)
       x <- sweep(x, 2L, center, check.margin = FALSE)
     }
-  } else if (is.numeric(center) && (length(center) == nc)) 
+  } else if (is.numeric(center) && (length(center) == nc))
     x <- sweep(x, 2L, center, check.margin = FALSE) else
     stop("length of 'center' must equal the number of columns of 'x'")
   if (is.logical(scale)) {
@@ -479,12 +479,12 @@ attr(sm.poly, "smart") <- TRUE
       scale <- apply(x, 2L, f)
       x <- sweep(x, 2L, scale, "/", check.margin = FALSE)
     }
-  } else if (is.numeric(scale) && length(scale) == nc) 
-    x <- sweep(x, 2L, scale, "/", check.margin = FALSE) else 
+  } else if (is.numeric(scale) && length(scale) == nc)
+    x <- sweep(x, 2L, scale, "/", check.margin = FALSE) else
     stop("length of 'scale' must equal the number of columns of 'x'")
-  if (is.numeric(center)) 
+  if (is.numeric(center))
     attr(x, "scaled:center") <- center
-  if (is.numeric(scale)) 
+  if (is.numeric(scale))
     attr(x, "scaled:scale") <- scale
 
   if (smart.mode.is("write")) {
@@ -501,7 +501,7 @@ attr(sm.scale.default, "smart") <- TRUE
 
 
 
- sm.scale <- function (x, center = TRUE, scale = TRUE) 
+ sm.scale <- function (x, center = TRUE, scale = TRUE)
   UseMethod("sm.scale")
 
 
@@ -542,7 +542,7 @@ sm.min2 <- function(x, .minx = min(x)) {
   x <- x  # Evaluate x; needed for nested calls, e.g., sm.bs(sm.scale(x)).
   if (smart.mode.is("read")) {  # Use recursion
     return(eval(smart.expression))
-  } else 
+  } else
   if (smart.mode.is("write"))
     put.smart(list( .minx = .minx , match.call = match.call()))
   .minx
diff --git a/R/summary.vgam.q b/R/summary.vgam.q
index 065d35f..4dbaf8c 100644
--- a/R/summary.vgam.q
+++ b/R/summary.vgam.q
@@ -1,14 +1,16 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
 
 
 
-summaryvgam <- function(object, dispersion = NULL,
-                        digits = options()$digits-2,
-                        presid = TRUE) {
+summaryvgam <-
+  function(object, dispersion = NULL,
+           digits = options()$digits-2,
+           presid = TRUE,
+           nopredictors = FALSE) {
 
 
 
@@ -19,10 +21,10 @@ summaryvgam <- function(object, dispersion = NULL,
          "sum of squares) for computing the dispersion parameter")
   }
 
-  newobject <- object 
+  newobject <- object
   class(newobject) <- "vglm"
   stuff <- summaryvglm(newobject, dispersion = dispersion)
-  rdf <- stuff at df[2] <- object at df.residual  # NA 
+  rdf <- stuff at df[2] <- object at df.residual  # NA
 
   M <- object at misc$M
   nrow.X.vlm <- object at misc$nrow.X.vlm
@@ -34,7 +36,6 @@ summaryvgam <- function(object, dispersion = NULL,
 
 
 
-  # Overwrite some of the stuff with the correct stuff
 
 
   useF <- object at misc$useF
@@ -51,18 +52,18 @@ summaryvgam <- function(object, dispersion = NULL,
       aod <- cbind(aod, NA, NA, NA)
       nl.chisq <- object at nl.chisq / object at dispersion
 
-      special <- abs(nldf) < 0.1  # This was the quick fix in s.vam()  
-      nldf[special] <- 1          # Give it a plausible value for pchisq & pf
+      special <- abs(nldf) < 0.1  # This was the quick fix in s.vam()
+      nldf[special] <- 1  # Give it a plausible value for pchisq & pf
 
       snames <- names(nldf)
       aod[snames, 2] <- round(nldf, 1)
       aod[snames, 3] <- if (useF) nl.chisq/nldf  else nl.chisq
       aod[snames, 4] <- if (useF)
-          pf(nl.chisq / nldf, nldf, rdf, lower.tail = FALSE) else 
+          pf(nl.chisq / nldf, nldf, rdf, lower.tail = FALSE) else
           pchisq(nl.chisq, nldf, lower.tail = FALSE)
 
       if (any(special)) {
-        aod[snames[special], 2:4] <- NA 
+        aod[snames[special], 2:4] <- NA
       }
 
       rnames <- c("Df", "Npar Df", "Npar Chisq", "P(Chi)")
@@ -70,9 +71,9 @@ summaryvgam <- function(object, dispersion = NULL,
           rnames[3:4] <- c("Npar F", "Pr(F)")
       dimnames(aod) <- list(names(df), rnames)
       heading <- if (useF)
-      "\nDF for Terms and Approximate F-values for Nonparametric Effects\n"
+  "\nDF for Terms and Approximate F-values for Nonparametric Effects\n"
       else
-      "\nDF for Terms and Approximate Chi-squares for Nonparametric Effects\n"
+  "\nDF for Terms and Approximate Chi-squares for Nonparametric Effects\n"
     } else {
       heading <- "DF for Terms\n\n"
     }
@@ -102,7 +103,9 @@ summaryvgam <- function(object, dispersion = NULL,
       answer at pearson.resid <- as.matrix(Presid)
   }
 
-  slot(answer, "anova") <- aod 
+  answer at misc$nopredictors <- nopredictors
+
+  slot(answer, "anova") <- aod
 
   answer
 }
@@ -110,14 +113,17 @@ summaryvgam <- function(object, dispersion = NULL,
 
 
 
-show.summary.vgam <- function(x, quote = TRUE, prefix = "",
-                              digits = options()$digits-2) {
+show.summary.vgam <-
+  function(x, quote = TRUE, prefix = "",
+           digits = options()$digits-2,
+           nopredictors = NULL) {
 
   M <- x at misc$M
 
 
-  cat("\nCall:\n")
-  dput(x at call)
+  cat("\nCall:\n", paste(deparse(x at call), sep = "\n", collapse = "\n"),
+      "\n\n", sep = "")
+
 
   Presid <- x at pearson.resid
   rdf <- x at df[2]
@@ -136,21 +142,39 @@ show.summary.vgam <- function(x, quote = TRUE, prefix = "",
     }
   }
 
+
+
+  use.nopredictors <- if (is.logical(nopredictors))
+    nopredictors else x at misc$nopredictors  # 20140728
+  if (!is.logical(use.nopredictors)) {
+    warning("cannot determine 'nopredictors'; choosing FALSE")
+    use.nopredictors <- FALSE
+  }
+
+
+
   cat("\nNumber of linear predictors:   ", M, "\n")
 
-  if (!is.null(x at misc$predictors.names))
-  if (M == 1)
-    cat("\nName of linear predictor:",
-        paste(x at misc$predictors.names, collapse = ", "), "\n") else
-  if (M <= 5)
-    cat("\nNames of linear predictors:",
-        paste(x at misc$predictors.names, collapse = ", "), "\n")
+
+
+
+  if (!is.null(x at misc$predictors.names) && !use.nopredictors) {
+    if (M == 1) {
+      cat("\nName of linear predictor:",
+          paste(x at misc$predictors.names, collapse = ", "), "\n")
+    } else
+    if (M <= 5) {
+      cat("\nNames of linear predictors:",
+        paste(x at misc$predictors.names, collapse = ", "), fill = TRUE)
+    }
+  }
+
 
   prose <- ""
   if (length(x at dispersion)) {
     if (is.logical(x at misc$estimated.dispersion) &&
         x at misc$estimated.dispersion) {
-      prose <- "(Estimated) " 
+      prose <- "(Estimated) "
     } else {
       if (is.numeric(x at misc$default.dispersion) &&
           x at dispersion == x at misc$default.dispersion)
@@ -206,10 +230,10 @@ setMethod("show", "summary.vgam",
 
 
 
- 
- 
+
+
 show.vanova <- function(x, digits = .Options$digits, ...) {
-  rrr <- row.names(x) 
+  rrr <- row.names(x)
   heading <- attr(x, "heading")
   if (!is.null(heading))
     cat(heading, sep = "\n")
@@ -231,7 +255,7 @@ show.vanova <- function(x, digits = .Options$digits, ...) {
 as.vanova <- function(x, heading) {
   if (!is.data.frame(x))
     stop("x must be a data frame")
-  rrr <- row.names(x) 
+  rrr <- row.names(x)
   attr(x, "heading") <- heading
   x <- as.data.frame(x, row.names = rrr)
   x
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index c405f00..5b0ca5f 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -39,8 +39,8 @@ summaryvglm <-
 
 
   if (length(dispersion) &&
-      dispersion == 0 && 
-      length(object at family@summary.dispersion) && 
+      dispersion == 0 &&
+      length(object at family@summary.dispersion) &&
       !object at family@summary.dispersion) {
     stop("cannot use the general VGLM formula (based on a residual ",
          "sum of squares) for computing the dispersion parameter")
@@ -52,7 +52,7 @@ summaryvglm <-
                       object,
 
                       presid = FALSE,
- 
+
                       correlation = correlation,
                       dispersion = dispersion)
 
@@ -137,7 +137,7 @@ setMethod("summaryvglmS4VGAM",  signature(VGAMff = "cumulative"),
                     object = object,
                     ...)
   object at post$reverse <- object at misc$reverse
- 
+
 
   cfit <- coef(object, matrix = TRUE)
   M <- ncol(cfit)
@@ -231,19 +231,24 @@ show.summary.vglm <-
            quote = TRUE,
            prefix = "",
            presid = TRUE,
-           signif.stars = NULL,  # Use this if logical; 20140728
+           signif.stars = NULL,   # Use this if logical; 20140728
            nopredictors = NULL,   # Use this if logical; 20150831
+           top.half.only = FALSE,  # Added 20160803
            ...  # Added 20151214
            ) {
 
+
+
   M <- x at misc$M
   coef <- x at coef3  # icients
   correl <- x at correlation
 
   digits <- if (is.null(digits)) options()$digits - 2 else digits
 
-  cat("\nCall:\n")
-  dput(x at call)
+  cat("\nCall:\n", paste(deparse(x at call), sep = "\n", collapse = "\n"),
+      "\n\n", sep = "")
+
+
 
   Presid <- x at pearson.resid
   rdf <- x at df[2]
@@ -280,10 +285,17 @@ show.summary.vglm <-
   }
 
 
-  cat("\nCoefficients:\n")
-  printCoefmat(coef, digits = digits,
-               signif.stars = use.signif.stars,
-               na.print = "NA")
+
+  if (length(coef)) {
+    cat(if (top.half.only) "\nParametric coefficients:" else
+        "\nCoefficients:", "\n")
+    printCoefmat(coef, digits = digits,
+                 signif.stars = use.signif.stars,
+                 na.print = "NA")
+  }
+
+  if (top.half.only)
+    return(invisible(NULL))
 
 
 
@@ -292,7 +304,7 @@ show.summary.vglm <-
   if (!is.null(x at misc$predictors.names) && !use.nopredictors) {
     if (M == 1) {
       cat("\nName of linear predictor:",
-          paste(x at misc$predictors.names, collapse = ", "), "\n") 
+          paste(x at misc$predictors.names, collapse = ", "), "\n")
     } else
     if (M <= 5) {
       cat("\nNames of linear predictors:",
@@ -314,6 +326,7 @@ show.summary.vglm <-
           x at dispersion != x at misc$default.dispersion)
         prose <- "(Pre-specified) "
     }
+    if (any(x at dispersion != 1))
     cat(paste("\n", prose, "Dispersion Parameter for ",
               x at family@vfamily[1],
               " family:   ", yformat(x at dispersion, digits), "\n",
@@ -456,7 +469,7 @@ function(object, dispersion = NULL, untransform = FALSE) {
 
   so <- summaryvlm(object, correlation = FALSE,
                    dispersion = dispersion)
-  d <- if (any(slotNames(so) == "dispersion") && 
+  d <- if (any(slotNames(so) == "dispersion") &&
            is.Numeric(so at dispersion))
        so at dispersion else 1
   answer <- d * so at cov.unscaled
@@ -472,7 +485,7 @@ function(object, dispersion = NULL, untransform = FALSE) {
 
 
 
-  new.way <- TRUE 
+  new.way <- TRUE
 
 
 
@@ -493,7 +506,7 @@ function(object, dispersion = NULL, untransform = FALSE) {
 
   tvector <- numeric(M)
   etavector <- predict(object)[1, ]  # Contains transformed parameters
-  LINK <- object at misc$link  # link.names # This should be a character vector.
+  LINK <- object at misc$link
   EARG <- object at misc$earg  # This could be a NULL
   if (is.null(EARG))
     EARG <- list(theta = NULL)
diff --git a/R/summary.vlm.q b/R/summary.vlm.q
index ba06f3c..886aed4 100644
--- a/R/summary.vlm.q
+++ b/R/summary.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -17,17 +17,17 @@ summaryvlm <-
   function(object, correlation = FALSE, dispersion = NULL,
            Colnames = c("Estimate", "Std. Error", "z value", "Pr(>|z|)"),
            presid = TRUE) {
-                         
+
 
 
   if (is.logical(object at misc$BFGS) && object at misc$BFGS)
-      warning(paste("the estimated variance-covariance matrix is",
-         "usually inaccurate as the working weight matrices are a",
-         "crude BFGS quasi-Newton approximation"))
+    warning("the estimated variance-covariance matrix is ",
+            "usually inaccurate because the working weight matrices are ",
+            "obtained by a crude BFGS quasi-Newton approximation")
 
   M <- object at misc$M
   n <- object at misc$n
-  nrow.X.vlm <- object at misc$nrow.X.vlm 
+  nrow.X.vlm <- object at misc$nrow.X.vlm
   ncol.X.vlm <- object at misc$ncol.X.vlm  # May be NULL for CQO objects
 
   Coefs <- object at coefficients
@@ -41,11 +41,11 @@ summaryvlm <-
   }
 
   if (anyNA(Coefs)) {
-    warning(paste("Some NAs in the coefficients---no summary",
-                  " provided; returning object\n"))
+    warning("Some NAs in the coefficients---no summary is ",
+            " provided; returning 'object'")
     return(object)
   }
-  rdf <- object at df.residual   
+  rdf <- object at df.residual
 
   if (!length(dispersion)) {
     if (is.numeric(object at misc$dispersion)) {
@@ -70,7 +70,7 @@ summaryvlm <-
       warning("overriding the value of object at misc$dispersion")
     object at misc$estimated.dispersion <- FALSE
   }
-  sigma <- sqrt(dispersion)  # Can be a vector 
+  sigma <- sqrt(dispersion)  # Can be a vector
 
   if (is.Numeric(ncol.X.vlm)) {
     R <- object at R
@@ -90,7 +90,7 @@ summaryvlm <-
   dimnames(coef3) <- list(cnames, Colnames)
   SEs <- sqrt(diag(covun))
   if (length(sigma) == 1 && is.Numeric(ncol.X.vlm)) {
-    coef3[, 2] <- SEs %o% sigma  # Fails here when sigma is a vector 
+    coef3[, 2] <- SEs %o% sigma  # Fails here when sigma is a vector
     coef3[, 3] <- coef3[, 1] / coef3[, 2]
     pvalue <- 2 * pnorm(-abs(coef3[, 3]))
     coef3[, 4] <- pvalue
@@ -126,7 +126,7 @@ summaryvlm <-
 
   if (is.Numeric(ncol.X.vlm))
     answer at cov.unscaled <- covun
-  answer at dispersion <- dispersion  # Overwrite this 
+  answer at dispersion <- dispersion  # Overwrite this
 
   if (length(Presid))
     answer at pearson.resid <- as.matrix(Presid)
@@ -143,7 +143,7 @@ show.summary.vlm <- function(x, digits = NULL, quote = TRUE,
                              prefix = "") {
 
 
-  M <- x at misc$M 
+  M <- x at misc$M
   coef3 <- x at coef3 # ficients
   correl <- x at correlation
 
@@ -184,11 +184,11 @@ show.summary.vlm <- function(x, digits = NULL, quote = TRUE,
   if (length(x at misc$predictors.names))
   if (M == 1) {
     cat("\nName of response:",
-        paste(x at misc$predictors.names, collapse = ", "), "\n") 
+        paste(x at misc$predictors.names, collapse = ", "), "\n")
   } else {
     UUU <- paste(x at misc$predictors.names, collapse = ", ")
     UUU <- x at misc$predictors.names
-    cat("\nNames of responses:\n") 
+    cat("\nNames of responses:\n")
     cat(UUU, fill = TRUE, sep = ", ")
   }
 
diff --git a/R/vcov.pvgam.R b/R/vcov.pvgam.R
new file mode 100644
index 0000000..3c9ced0
--- /dev/null
+++ b/R/vcov.pvgam.R
@@ -0,0 +1,526 @@
+# These functions are
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+vcov.pvgam <- function(object, ...) {
+  vcovpvgam(object, ...)
+}
+
+
+
+ vcovpvgam <-
+ function(object,
+          special = FALSE,
+          frequentist = FALSE, dispersion = NULL, unconditional = FALSE,
+          ...) {
+
+
+
+
+
+ if (!special) {
+   return(vcovvlm(object, ...))
+  }
+
+
+
+ warning("vcovpvgam() is only 50% finished")
+
+
+ print("in vcovpvgam; hi 2a")
+ print("class(object)")
+ print( class(object) )
+
+
+
+
+  M <- npred(object)
+  n <- nobs(object, type = "lm")
+  wz <- weights(object, type = "working")
+  X.vlm.save <- model.matrix(object, type = "vlm")
+  U <- vchol(wz, M = M, n = n)
+  X.vlm <- mux111(U, X.vlm.save, M = M)
+  X.vlm.aug <- rbind(X.vlm,
+                     model.matrix(object, type = "penalty"))
+
+
+  qr1 <- qr(X.vlm.aug)
+  qr2 <- qr(X.vlm)
+   poststuff <-
+     mgcv::magic.post.proc(X.vlm.aug,
+                           object = object at ospsslot$magicfit, w = NULL)
+  magicfit <- object at ospsslot$magicfit
+  rV <- magicfit$rV
+  Vb   <- poststuff$Vb
+  Ve   <- poststuff$Ve
+  hhat <- poststuff$hat
+  eedf <- poststuff$edf
+
+
+  scale.param <- 1  # Assumed
+
+
+  vc <- if (frequentist) {
+
+    mat1 <- solve(crossprod(qr.R(qr1)))
+    scale.param *
+        (mat1 %*% crossprod(qr.R(qr2)) %*% mat1)
+  } else {
+    Vc <- NULL  # Corrected ML or REML is not available.
+
+    Vp  <- scale.param * tcrossprod(solve(qr.R(qr1)))
+
+    Vp2 <- rV %*% t(rV)  # * sig2  # For checking
+
+ print("max(abs(Vp - Vp2)); should be 0")
+ print( max(abs(Vp - Vp2)) )
+
+
+    if (FALSE) {
+    He <- SemiParFit$fit$hessian
+    He.eig <- eigen(He, symmetric=TRUE)
+    Vb <- He.eig$vectors %*%
+          tcrossprod(diag(1/He.eig$values),
+                     He.eig$vectors)  # this could be taken from magic as well
+    Vb <- (Vb + t(Vb) ) / 2
+
+
+    HeSh <- He - SemiParFit$fit$S.h
+    F <- Vb%*%HeSh  # diag(SemiParFit$magpp$edf)
+
+
+    HeSh <- He
+    Ve <- Vb
+    F <- F1 <- diag(rep(1,dim(Vb)[1]))
+    R <- SemiParFit$bs.mgfit$R
+    }
+
+
+
+    if (unconditional && !is.null(Vc))
+      Vc else Vp
+  }  # Bayesian
+  if (is.null(dispersion)) {
+    sig2 <- 1  # zz
+    vc <- summary(object)@dispersion * vc / sig2
+  } else {
+    sig2 <- summary(object)@dispersion  # 1  # zz
+    vc <- dispersion * vc / sig2
+  }
+
+
+
+ print("head(sort(diag(vc)))")
+ print( head(sort(diag(vc))) )
+ print("head(sort(diag(Ve)))")
+ print( head(sort(diag(Ve))) )
+
+
+ print("tail(sort(diag(vc)))")
+ print( tail(sort(diag(vc))) )
+ print("tail(sort(diag(Ve)))")
+ print( tail(sort(diag(Ve))) )
+
+
+
+ print("head(sort(diag(vc))) / head(sort(diag(Ve)))")
+ print( head(sort(diag(vc))) / head(sort(diag(Ve))) )
+
+
+
+
+ print("max(abs(sort(diag(vc)) - sort(diag(Ve))))")
+ print( max(abs(sort(diag(vc)) - sort(diag(Ve)))) )
+
+
+
+
+
+  vc
+}
+
+
+
+
+
+
+setMethod("vcov", "pvgam",
+         function(object, ...)
+         vcovpvgam(object, ...))
+
+
+
+
+
+
+
+
+startstoppvgam <-
+  function(object, ...) {
+
+  which.X.sm.osps <- object at ospsslot$sm.osps.list$which.X.sm.osps
+  if (!length(which.X.sm.osps))
+    stop("no 'sm.os()' or 'sm.ps()' term in 'object'")
+  all.ncol.Hk <- unlist(lapply(constraints(object, type = "term"), ncol))
+  names.which.X.sm.osps <- names(which.X.sm.osps)
+  endf <- rep_len(NA_real_, sum(all.ncol.Hk[names.which.X.sm.osps]))
+  names(endf) <- vlabel(names.which.X.sm.osps,
+                        all.ncol.Hk[names.which.X.sm.osps],
+                        M = npred(object))
+  stopstart <- NULL
+
+
+  iptr <- 1
+  iterm <- 1
+  for (ii in names(all.ncol.Hk)) {
+    if (length(which.X.sm.osps[[ii]])) {
+      temp3 <- -1 + iptr + all.ncol.Hk[ii] * length(which.X.sm.osps[[ii]])
+      new.index <- iptr:temp3  # Includes all component functions wrt xk
+      iptr <- iptr + length(new.index)  # temp3
+      mat.index <- matrix(new.index, ncol = all.ncol.Hk[ii], byrow = TRUE)
+      for (jay in 1:all.ncol.Hk[ii]) {
+        cf.index <- mat.index[, jay]
+
+
+        stopstart <- c(stopstart, list(cf.index))
+
+
+        iterm <- iterm + 1
+      }  # for
+    } else {
+      iptr <- iptr + all.ncol.Hk[ii]
+    }
+  }  # ii
+  names(stopstart) <- names(endf)
+  stopstart
+}
+
+
+
+
+
+
+
+
+
+
+
+summarypvgam <-
+  function(object, dispersion = NULL,
+           digits = options()$digits-2,
+           presid = TRUE) {
+
+  stuff <- summaryvglm(object, dispersion = dispersion,
+           digits = digits,
+           presid = presid)
+
+  answer <-
+  new("summary.pvgam",
+      object,
+      call = stuff at call,
+      cov.unscaled = stuff at cov.unscaled,
+      correlation = stuff at correlation,
+      df = stuff at df,
+      sigma = stuff at sigma)
+
+  answer at misc$nopredictors <- stuff at misc$nopredictors
+  answer at ospsslot <- object at ospsslot
+
+
+  slot(answer, "coefficients") <- stuff at coefficients  # Replace
+
+  coef3 <- stuff at coef3
+  aassign <- attr(model.matrix(object, type = "vlm"),  "assign")
+  myterms <- names(object at ospsslot$sm.osps.list$which.X.sm.osps)
+  index.exclude <- NULL
+  for (ii in myterms) {
+    index.exclude <- c(index.exclude, unlist(aassign[[ii]]))
+  }
+  slot(answer, "coef3") <- coef3[-index.exclude, , drop = FALSE]
+
+
+
+  if (is.numeric(stuff at dispersion))
+    slot(answer, "dispersion") <- stuff at dispersion
+
+  if (presid) {
+    Presid <- residuals(object, type = "pearson")
+    if (length(Presid))
+      answer at pearson.resid <- as.matrix(Presid)
+  }
+
+
+
+
+
+
+
+  pinv <- function(V, M, rank.tol = 1e-6) {
+    D <- eigen(V, symmetric = TRUE)
+    M1 <- length(D$values[D$values > rank.tol * D$values[1]])
+    if (M > M1)
+      M <- M1  # avoid problems with zero eigen-values
+
+    if (M+1 <= length(D$values))
+      D$values[(M+1):length(D$values)] <- 1
+    D$values <- 1 / D$values
+    if (M+1 <= length(D$values))
+      D$values[(M+1):length(D$values)] <- 0
+    res <- D$vectors %*% (D$values * t(D$vectors))  ##D$u%*%diag(D$d)%*%D$v
+    attr(res, "rank") <- M
+    res
+  }  ## end of pinv
+
+
+
+  startstop <- startstoppvgam(object)
+  m <- length(startstop)
+
+  df <- edf1 <- edf <- s.pv <- chi.sq <- array(0, m)
+  names(chi.sq) <- names(startstop)
+  p.type <- 5  # Frequentist
+  est.disp <- if (is.logical(object at misc$estimated.dispersion))
+    object at misc$estimated.dispersion else FALSE
+
+  pvgam.residual.df <- df.residual_pvgam(object)
+
+
+
+  for (i in 1:m) {
+
+    p <- coef(as(object, "pvgam"))[(startstop[[i]])]  # params for smooth
+
+    endf <- endfpvgam(object, diag.all = TRUE)  # This is ENDF+1 actually
+
+
+    edf1[i] <- edf[i] <- sum(endf[(startstop[[i]])])
+    if (FALSE && !is.null(object$edf1))
+      edf1[i] <- sum(object$edf1[(startstop[[i]])])
+
+    V <- if (p.type == 5) {
+      Ve <- vcov(object, special = FALSE)
+      Ve[(startstop[[i]]), (startstop[[i]]), drop = FALSE]
+    } else {
+      Vp <- vcov(object, special = TRUE, frequentist = FALSE)
+      Vp[(startstop[[i]]), (startstop[[i]]), drop = FALSE]
+    }
+
+    if (p.type == 5) {
+      M1 <- length(startstop[[i]])  # zz
+      M <- min(M1,
+               ceiling(2*sum(endf[(startstop[[i]])]))
+               )
+      V <- pinv(V, M)
+      chi.sq[i] <- t(p) %*% V %*% p
+      df[i] <- attr(V, "rank")
+    }
+
+
+
+    if (p.type == 5) {
+      s.pv[i] <- if (est.disp) {
+        pf(chi.sq[i] / df[i], df1 = df[i], df2 = pvgam.residual.df,
+           lower.tail = FALSE)
+      } else {
+        pchisq(chi.sq[i], df = df[i], lower.tail = FALSE)
+      }
+      if (df[i] < 0.1)
+        s.pv[i] <- NA
+    }
+
+
+    if (est.disp) {
+      if (p.type == 5) {
+        s.table <- cbind(edf, df, chi.sq / df, s.pv)
+        dimnames(s.table) <- list(names(chi.sq),
+                                  c("edf", "Est.rank", "F", "p-value"))
+      } else {
+        s.table <- cbind(edf, df, chi.sq/df, s.pv)
+        dimnames(s.table) <- list(names(chi.sq),
+                                  c("edf", "Ref.df", "F", "p-value"))
+      }
+    } else {
+      if (p.type == 5) {
+ # This case is commonly executed
+        s.table <- cbind(edf, df, chi.sq, s.pv)
+        dimnames(s.table) <- list(names(chi.sq),
+                                  c("edf", "Est.rank", "Chi.sq", "p-value"))
+      } else {
+        s.table <- cbind(edf, df, chi.sq, s.pv)
+        dimnames(s.table) <- list(names(chi.sq),
+                                  c("edf", "Ref.df", "Chi.sq", "p-value"))
+      }
+    }  # else
+  }  # for (i)
+  answer at post$s.table <- s.table
+
+
+
+
+
+  aod <- data.frame(message = 'this does not work yet')
+  slot(answer, "anova") <- aod
+
+  answer
+}  # summarypvgam()
+
+
+
+
+
+
+
+show.summary.pvgam <-
+  function(x, quote = TRUE, prefix = "",
+           digits = options()$digits-2,
+           signif.stars = getOption("show.signif.stars")) {
+
+
+
+
+  show.summary.vglm(x, quote = quote, prefix = prefix,
+                    digits = digits, top.half.only = TRUE)
+
+
+
+  startstop <- startstoppvgam(x)
+  m <- length(startstop)
+  s.table <- x at post$s.table
+  if (0 < m && length(s.table)) {
+    cat("\nApproximate significance of smooth terms:\n")
+    printCoefmat(s.table, digits = digits,
+                 signif.stars = signif.stars, has.Pvalue = TRUE,
+                 na.print = "NA", cs.ind = 1)
+  }
+
+
+
+
+
+
+  M <- x at misc$M
+
+
+  Presid <- x at pearson.resid
+  rdf <- x at df[2]
+
+
+  cat("\nNumber of linear/additive predictors:   ", M, "\n")
+
+  if (!is.null(x at misc$predictors.names))
+  if (M == 1)
+    cat("\nName of linear/additive predictor:",
+        paste(x at misc$predictors.names, collapse = ", "), "\n") else
+  if (M <= 5)
+    cat("\nNames of linear/additive predictors:",
+        paste(x at misc$predictors.names, collapse = ", "), "\n")
+
+  prose <- ""
+  if (length(x at dispersion)) {
+    if (is.logical(x at misc$estimated.dispersion) &&
+        x at misc$estimated.dispersion) {
+      prose <- "(Estimated) "
+    } else {
+      if (is.numeric(x at misc$default.dispersion) &&
+          x at dispersion == x at misc$default.dispersion)
+        prose <- "(Default) "
+
+      if (is.numeric(x at misc$default.dispersion) &&
+          x at dispersion != x at misc$default.dispersion)
+        prose <- "(Pre-specified) "
+    }
+    cat(paste("\n", prose, "Dispersion Parameter for ",
+        x at family@vfamily[1],
+        " family:   ",
+        format(round(x at dispersion, digits)), "\n", sep = ""))
+  }
+
+  if (length(deviance(x)))
+    cat("\nResidual deviance: ", format(round(deviance(x), digits)),
+        "on", format(round(rdf, 3)), "degrees of freedom\n")
+
+  if (length(logLik.vlm(x)))
+    cat("\nLog-likelihood:", format(round(logLik.vlm(x), digits)),
+        "on", format(round(rdf, 3)), "degrees of freedom\n")
+
+  if (length(x at criterion)) {
+    ncrit <- names(x at criterion)
+    for (ii in ncrit)
+      if (ii != "loglikelihood" && ii != "deviance")
+        cat(paste(ii, ":", sep = ""), format(x at criterion[[ii]]), "\n")
+  }
+
+
+
+
+  if (is.Numeric(x at ospsslot$iter.outer)) {
+    cat("\nNumber of outer iterations: ", x at ospsslot$iter.outer, "\n")
+    cat("\nNumber of IRLS iterations at final outer iteration: ", x at iter,
+        "\n")
+  } else {
+    cat("\nNumber of IRLS iterations: ", x at iter, "\n")
+  }
+
+
+  if (FALSE && length(x at anova)) {
+    show.vanova(x at anova, digits = digits)   # ".vanova" for Splus6
+  }
+
+  invisible(NULL)
+}  # show.summary.pvgam()
+
+
+
+
+
+
+
+
+setMethod("summary", "pvgam",
+          function(object, ...)
+          summarypvgam(object, ...))
+
+
+
+setMethod("show", "summary.pvgam",
+          function(object)
+          show.summary.pvgam(object))
+
+
+
+
+
+
+psintpvgam <- function(object, ...) {
+  object at ospsslot$sm.osps.list$ps.int
+}
+
+
+if (!isGeneric("psint"))
+  setGeneric("psint", function(object, ...)
+              standardGeneric("psint"),
+             package = "VGAM")
+
+
+setMethod("psint", "pvgam",
+         function(object, ...)
+         psintpvgam(object, ...))
+
+
+
+
+
+
diff --git a/R/vgam.R b/R/vgam.R
index 70c82a5..f386f90 100644
--- a/R/vgam.R
+++ b/R/vgam.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -10,12 +10,13 @@
 
 
 
-vgam <- function(formula, 
-                 family, data = list(), 
+
+vgam <- function(formula,
+                 family, data = list(),
                  weights = NULL, subset = NULL, na.action = na.fail,
                  etastart = NULL, mustart = NULL, coefstart = NULL,
                  control = vgam.control(...),
-                 offset = NULL, 
+                 offset = NULL,
                  method = "vgam.fit",
                  model = FALSE, x.arg = TRUE, y.arg = TRUE,
                  contrasts = NULL,
@@ -23,7 +24,7 @@ vgam <- function(formula,
                  extra = list(),
                  form2 = NULL,  # Added 20130730
                  qr.arg = FALSE, smart = TRUE, ...) {
-  dataname <- as.character(substitute(data))  # "list" if no data= 
+  dataname <- as.character(substitute(data))  # "list" if no data=
   function.name <- "vgam"
 
   ocall <- match.call()
@@ -34,7 +35,8 @@ vgam <- function(formula,
   if (missing(data))
     data <- environment(formula)
 
-  mtsave <- terms(formula, specials = c("s", "ps"), data = data)
+  mtsave <- terms(formula, specials = c("s", "sm.os", "sm.ps"),
+                  data = data)
 
 
 
@@ -108,11 +110,11 @@ vgam <- function(formula,
 
   mf2 <- mf
   if (!missing(subset)) {
-    mf2$subset <- NULL 
-    mf2 <- eval(mf2, parent.frame())  # mf2 is the full data frame. 
-    spars2 <-  lapply(mf2, attr, "spar") 
-    dfs2   <-  lapply(mf2, attr, "df") 
-    sx2 <-  lapply(mf2, attr, "s.xargument") 
+    mf2$subset <- NULL
+    mf2 <- eval(mf2, parent.frame())  # mf2 is the full data frame.
+    spars2 <-  lapply(mf2, attr, "spar")
+    dfs2   <-  lapply(mf2, attr, "df")
+    sx2 <-  lapply(mf2, attr, "s.xargument")
     for (ii in seq_along(mf)) {
       if (length(sx2[[ii]])) {
         attr(mf[[ii]], "spar") <- spars2[[ii]]
@@ -120,7 +122,7 @@ vgam <- function(formula,
         attr(mf[[ii]], "s.xargument") <- sx2[[ii]]
       }
     }
-    rm(mf2) 
+    rm(mf2)
   }
 
 
@@ -154,12 +156,18 @@ vgam <- function(formula,
   aa <- attributes(mtsave)
   smoothers <- aa$specials
 
-  mgcv.ps <- length(smoothers$ps) > 0
-  mgcv.PS <- length(smoothers$PS) > 0
-  any.ps.terms <- mgcv.ps || mgcv.PS
+  mgcv.sm.os <- length(smoothers$sm.os) > 0
+  mgcv.sm.ps <- length(smoothers$sm.ps) > 0
+  mgcv.sm.PS <- length(smoothers$sm.PS) > 0
+  any.sm.os.terms <- mgcv.sm.os
+  any.sm.ps.terms <- mgcv.sm.ps || mgcv.sm.PS
   mgcv.s <- length(smoothers$s) > 0
-  if (any.ps.terms && mgcv.s)
-    stop("cannot include both s() and ps() (or PS()) terms in the formula")
+  if ((any.sm.os.terms || any.sm.ps.terms) && mgcv.s)
+    stop("cannot include both s() and any of sm.os() or ",
+         "sm.ps() (or sm.PS()) terms in the formula")
+  if (any.sm.os.terms && any.sm.ps.terms)
+    stop("cannot include both sm.os() and ",
+         "sm.ps() (or sm.PS()) terms in the formula")
 
 
 
@@ -173,23 +181,37 @@ vgam <- function(formula,
 
     smooth.labels <- aa$term.labels[unlist(smoothers)]
   } else {
-    function.name <- "vglm"  # This is effectively so 
+    function.name <- "vglm"  # This is effectively so
   }
-  
 
 
 
 
 
 
-  are.ps.terms <- (length(smoothers$ps) + length(smoothers$PS)) > 0
-  if (are.ps.terms) {
+
+  are.sm.os.terms <-  length(smoothers$sm.os) > 0
+  are.sm.ps.terms <- (length(smoothers$sm.ps) +
+                      length(smoothers$sm.PS)) > 0
+  if (are.sm.os.terms || are.sm.ps.terms) {
     control$criterion <- "coefficients"  # Overwrite if necessary
 
-    if (length(smoothers$ps) > 0) {
-      ff.ps <- apply(aa$factors[smoothers[["ps"]],,drop = FALSE], 2, any)
-      smoothers[["ps"]] <-
-        if (any(ff.ps)) seq(along = ff.ps)[aa$order == 1 & ff.ps] else NULL
+
+    if (length(smoothers$sm.os) > 0) {
+      ff.sm.os <- apply(aa$factors[smoothers[["sm.os"]],,drop = FALSE],
+                        2, any)
+      smoothers[["sm.os"]] <-
+        if (any(ff.sm.os))
+          seq(along = ff.sm.os)[aa$order == 1 & ff.sm.os] else NULL
+      smooth.labels <- aa$term.labels[unlist(smoothers)]
+    }
+
+    if (length(smoothers$sm.ps) > 0) {
+      ff.sm.ps <- apply(aa$factors[smoothers[["sm.ps"]],,drop = FALSE],
+                        2, any)
+      smoothers[["sm.ps"]] <-
+        if (any(ff.sm.ps))
+          seq(along = ff.sm.ps)[aa$order == 1 & ff.sm.ps] else NULL
       smooth.labels <- aa$term.labels[unlist(smoothers)]
     }
 
@@ -199,12 +221,15 @@ vgam <- function(formula,
 
 
     assignx <- attr(x, "assign")
-    which.X.ps <- assignx[smooth.labels]
-    data <- mf[, names(which.X.ps), drop = FALSE]
-    attr(data, "class") <- NULL
-    S.arg <- lapply(data, attr, "S.arg")
-    lambdalist <- lapply(data, attr, "lambda")
-    ridge.adj <- lapply(data, attr, "ridge.adj")
+    which.X.sm.osps <- assignx[smooth.labels]
+    Data <- mf[, names(which.X.sm.osps), drop = FALSE]
+    attr(Data, "class") <- NULL
+    S.arg     <- lapply(Data, attr, "S.arg")
+    sparlist  <- lapply(Data, attr, "spar")
+    ridge.adj <- lapply(Data, attr, "ridge.adj")
+    fixspar   <- lapply(Data, attr, "fixspar")
+    ps.int    <- lapply(Data, attr, "ps.int")  # FYI only; for sm.ps()
+    knots     <- lapply(Data, attr, "knots")   # FYI only; for sm.os()
     term.labels <- aa$term.labels
 
 
@@ -212,28 +237,33 @@ vgam <- function(formula,
 
 
 
-  ps.list <- if (any.ps.terms)
-               list(indexterms = ff.ps,
-                    intercept = aa$intercept,
-                    which.X.ps = which.X.ps,
-                    S.arg = S.arg,
-                    lambdalist = lambdalist,
-                    ridge.adj = ridge.adj,
-                    term.labels = term.labels,
-                    assignx = assignx) else
-               NULL
+  sm.osps.list <-
+    if (any.sm.os.terms || any.sm.ps.terms)
+      list(indexterms = if (any.sm.os.terms)
+                        ff.sm.os else ff.sm.ps,
+           intercept = aa$intercept,
+           which.X.sm.osps = which.X.sm.osps,
+           S.arg = S.arg,
+           sparlist = sparlist,
+           ridge.adj = ridge.adj,
+           term.labels = term.labels,
+           fixspar = fixspar,
+           orig.fixspar = fixspar,  # For posterity
+           ps.int = ps.int,  # FYI only
+           knots  = knots,   # FYI only
+           assignx = assignx) else
+      NULL
 
 
   fit <- vgam.fit(x = x, y = y, w = w, mf = mf,
                   Xm2 = Xm2, Ym2 = Ym2,  # Added 20130730
       etastart = etastart, mustart = mustart, coefstart = coefstart,
       offset = offset, family = family, control = control,
-      criterion = control$criterion,
       constraints = constraints, extra = extra, qr.arg = qr.arg,
       Terms = mtsave,
       nonparametric = nonparametric, smooth.labels = smooth.labels,
       function.name = function.name,
-      ps.list = ps.list,
+      sm.osps.list = sm.osps.list,
       ...)
 
 
@@ -251,9 +281,9 @@ vgam <- function(formula,
 
   fit$smomat <- NULL  # Not needed
 
-  fit$call <- ocall 
+  fit$call <- ocall
   if (model)
-    fit$model <- mf 
+    fit$model <- mf
   if (!x.arg)
     fit$x <- NULL
   if (!y.arg)
@@ -276,7 +306,7 @@ vgam <- function(formula,
 
   answer <-
   new(
-    if (any.ps.terms) "psvgam" else "vgam",
+    if (any.sm.os.terms || any.sm.ps.terms) "pvgam" else "vgam",
 
     "assign"       = attr(x, "assign"),
     "call"         = fit$call,
@@ -320,13 +350,15 @@ vgam <- function(formula,
 
 
   if (length(fit$misc$Xvlm.aug)) {
-    slot(answer, "psslot") <-
-      list(Xvlm.aug = fit$misc$Xvlm.aug,
-           ps.list  = fit$misc$ps.list,
-           magicfit = fit$misc$magicfit)
-    fit$misc$Xvlm.aug <- NULL
-    fit$misc$ps.list  <- NULL
-    fit$misc$magicfit <- NULL
+    slot(answer, "ospsslot") <-
+      list(Xvlm.aug     = fit$misc$Xvlm.aug,
+           sm.osps.list = fit$misc$sm.osps.list,
+           magicfit     = fit$misc$magicfit,
+           iter.outer   = fit$misc$iter.outer)
+    fit$misc$Xvlm.aug     <- NULL
+    fit$misc$sm.osps.list <- NULL
+    fit$misc$magicfit     <- NULL
+    fit$misc$iter.outer   <- NULL
   }
 
 
@@ -393,7 +425,7 @@ vgam <- function(formula,
   if (nonparametric && is.buggy.vlm(answer)) {
     warning("some s() terms have constraint matrices that have columns",
             " which are not orthogonal;",
-            " try using ps() instead of s().")
+            " try using sm.os() or sm.ps() instead of s().")
   } else {
   }
 
@@ -402,7 +434,7 @@ vgam <- function(formula,
 
   answer
 }
-attr(vgam, "smart") <- TRUE 
+attr(vgam, "smart") <- TRUE
 
 
 
@@ -410,24 +442,24 @@ attr(vgam, "smart") <- TRUE
 
 shadowvgam <-
         function(formula,
-                 family, data = list(), 
+                 family, data = list(),
                  weights = NULL, subset = NULL, na.action = na.fail,
                  etastart = NULL, mustart = NULL, coefstart = NULL,
-                 control = vgam.control(...), 
-                 offset = NULL, 
+                 control = vgam.control(...),
+                 offset = NULL,
                  method = "vgam.fit",
                  model = FALSE, x.arg = TRUE, y.arg = TRUE,
-                 contrasts = NULL, 
+                 contrasts = NULL,
                  constraints = NULL,
-                 extra = list(), 
+                 extra = list(),
                  qr.arg = FALSE, ...) {
     dataname <- as.character(substitute(data))  # "list" if no data=
     function.name <- "shadowvgam"
 
     ocall <- match.call()
 
-    if (missing(data)) 
-        data <- environment(formula)
+    if (missing(data))
+      data <- environment(formula)
 
     mf <- match.call(expand.dots = FALSE)
     m <- match(c("formula", "data", "subset", "weights", "na.action",
@@ -440,7 +472,7 @@ shadowvgam <-
            stop("invalid 'method': ", method))
     mt <- attr(mf, "terms")
 
-    x <- y <- NULL 
+    x <- y <- NULL
 
     xlev <- .getXlevels(mt, mf)
     y <- model.response(mf, "any")  # model.extract(mf, "response")
@@ -449,7 +481,7 @@ shadowvgam <-
     attr(x, "assign") <- attrassigndefault(x, mt)
 
     list(Xm2 = x, Ym2 = y, call = ocall)
-}
+}  # shadowvgam
 
 
 
@@ -461,7 +493,7 @@ shadowvgam <-
 is.buggy.vlm <- function(object, each.term = FALSE, ...) {
 
 
-    
+
   Hk.list <- constraints(object)
   ncl <- names(Hk.list)
   TFvec <- rep_len(FALSE, length(ncl))
diff --git a/R/vgam.control.q b/R/vgam.control.q
index 0f5db47..8b52582 100644
--- a/R/vgam.control.q
+++ b/R/vgam.control.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -7,13 +7,19 @@
 
 
 
+
+
 vgam.control <- function(all.knots = FALSE,
                          bf.epsilon = 1e-7,
-                         bf.maxit = 30, 
+                         bf.maxit = 30,
                          checkwz = TRUE,
-                         criterion = names(.min.criterion.VGAM), 
+                         Check.rank = TRUE,
+                         Check.cm.rank = TRUE,
+                         criterion = names(.min.criterion.VGAM),
                          epsilon = 1e-7,
                          maxit = 30,
+                         Maxit.outer = 20,
+                         noWarning = FALSE,
                          na.action=na.fail,
                          nk = NULL,
                          save.weights = FALSE,
@@ -21,6 +27,7 @@ vgam.control <- function(all.knots = FALSE,
                          trace = FALSE,
                          wzepsilon = .Machine$double.eps^0.75,
                          xij = NULL,
+                         gamma.arg = 1,
                          ...) {
 
 
@@ -58,6 +65,13 @@ vgam.control <- function(all.knots = FALSE,
     maxit <- 30
   }
 
+  if (!is.Numeric(Maxit.outer, length.arg = 1,
+                  positive = TRUE, integer.valued = TRUE)) {
+    warning("bad input for argument 'Maxit.outer'; ",
+            "using 20 instead")
+    Maxit.outer <- 20
+  }
+
   convergence <- expression({
     switch(criterion,
            coefficients =
@@ -65,28 +79,39 @@ vgam.control <- function(all.knots = FALSE,
                (iter < maxit &&
                 max(abs(new.coeffs - old.coeffs) / (
                     abs(old.coeffs) + epsilon)) > epsilon),
+           iter < maxit &&
            sqrt(sqrt(eff.n)) *
            abs(old.crit - new.crit) / (
-           abs(old.crit) + epsilon) > epsilon &&
-           iter < maxit)
+           abs(old.crit) + epsilon) > epsilon)
   })
 
 
+  if (!is.Numeric(gamma.arg, length.arg = 1))
+    stop("bad input for argument 'gamma.arg'")
+  if (gamma.arg < 0.5 || 3 < gamma.arg)
+    warning("input for argument 'gamma.arg' looks dubious")
+
+
   list(all.knots = as.logical(all.knots)[1],
-       bf.epsilon = bf.epsilon, 
-       bf.maxit = bf.maxit, 
+       bf.epsilon = bf.epsilon,
+       bf.maxit = bf.maxit,
        checkwz = checkwz,
+       Check.rank = Check.rank,
+       Check.cm.rank = Check.cm.rank,
        convergence = convergence,
        criterion = criterion,
-       epsilon = epsilon, 
-       maxit = maxit, 
+       epsilon = epsilon,
+       maxit = maxit,
+       Maxit.outer = Maxit.outer,
+       noWarning = as.logical(noWarning)[1],
        nk = nk,
        min.criterion = .min.criterion.VGAM,
        save.weights = as.logical(save.weights)[1],
        se.fit = as.logical(se.fit)[1],
        trace = as.logical(trace)[1],
        xij = if (is(xij, "formula")) list(xij) else xij,
-       wzepsilon = wzepsilon)
+       wzepsilon = wzepsilon,
+       gamma.arg = gamma.arg)
 }
 
 
@@ -94,7 +119,7 @@ vgam.control <- function(all.knots = FALSE,
 
 vgam.nlchisq <- function(qr, resid, wz, smomat, deriv, U, smooth.labels,
                          assign, M, n, constraints) {
-  attr(qr, "class") <- "qr" 
+  attr(qr, "class") <- "qr"
   class(qr) <- "qr"
 
   if (!is.matrix(smomat)) smomat <- as.matrix(smomat)
@@ -133,7 +158,7 @@ vgam.nlchisq <- function(qr, resid, wz, smomat, deriv, U, smooth.labels,
   names(ans) <- dimnames(smomat)[[2]]
   ans
 }
-    
+
 
 
 
diff --git a/R/vgam.fit.q b/R/vgam.fit.q
index 9e138f3..dd1c64e 100644
--- a/R/vgam.fit.q
+++ b/R/vgam.fit.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -10,29 +10,33 @@
 
 
 vgam.fit <-
-  function(x, y, w = rep_len(1, nrow(x)), mf,
+  function(x, y, w = rep_len(1, nrow(x)),
+           mf,  # No X.vlm.arg, but mf happens to be in its position
            Xm2 = NULL, Ym2 = NULL,  # Added 20130730
            etastart = NULL, mustart = NULL, coefstart = NULL,
-           offset = 0, family, control = vgam.control(),
-           criterion = "coefficients",
-           constraints = NULL, extra= NULL, qr.arg = FALSE,
+           offset = 0, family,
+           control = vgam.control(),
+           qr.arg = FALSE,
+           constraints = NULL, extra = NULL,
            Terms,
            nonparametric, smooth.labels,
            function.name = "vgam",
-           ps.list = NULL,  # mf,
+           sm.osps.list = NULL,  # mf,
            ...) {
 
 
-  mgcvvgam <- length(ps.list) > 0  # iff \exists ps() or PS() term
+  mgcvvgam <- length(sm.osps.list) > 0
 
 
+  if (is.null(criterion <- control$criterion))
+    criterion <- "coefficients"
+
 
   eff.n <- nrow(x)  # + sum(abs(w[1:nrow(x)]))
 
   specialCM <- NULL
   post <- list()
-    check.rank <- TRUE
-    check.rank <- control$Check.rank
+  check.rank <- control$Check.rank
   epsilon <- control$epsilon
   maxit <- control$maxit
   save.weights <- control$save.weights
@@ -48,21 +52,13 @@ vgam.fit <-
 
 
 
-  n <- dim(x)[1]
-
-
-
-
-
-
-
-
+  n <- nrow(x)
 
 
 
   old.coeffs <- coefstart
 
-  intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
+  intercept.only <- ncol(x) == 1 && colnames(x) == "(Intercept)"
   y.names <- predictors.names <- NULL  # May be overwritten in @initialize
 
   n.save <- n
@@ -72,36 +68,47 @@ vgam.fit <-
   if (length(etastart)) {
     eta <- etastart
     mu <- if (length(mustart)) mustart else
-          if (length(body(slot(family, "linkinv"))))
-            slot(family, "linkinv")(eta, extra) else
-            warning("argument 'etastart' assigned a value ",
-                    "but there is no 'linkinv' slot to use it")
+            slot(family, "linkinv")(eta, extra = extra)
   }
 
   if (length(mustart)) {
     mu <- mustart
     if (length(body(slot(family, "linkfun")))) {
-      eta <- slot(family, "linkfun")(mu, extra)
+      eta <- slot(family, "linkfun")(mu, extra = extra)
     } else {
       warning("argument 'mustart' assigned a value ",
-              "but there is no 'link' slot to use it")
+              "but there is no 'linkfun' slot to use it")
     }
   }
 
-  M <- if (is.matrix(eta)) ncol(eta) else 1
+
+  validparams <- validfitted <- TRUE
+  if (length(body(slot(family, "validparams"))))
+    validparams <- slot(family, "validparams")(eta, y = y, extra = extra)
+  if (length(body(slot(family, "validfitted"))))
+    validfitted <- slot(family, "validfitted")(mu, y = y, extra = extra)
+  if (!(validparams && validfitted))
+    stop("could not obtain valid initial values. ",
+         "Try using 'etastart', 'coefstart' or 'mustart', else ",
+         "family-specific arguments such as 'imethod'.")
+
+
+
+
+  M <- NCOL(eta)
 
 
   if (length(family at constraints))
-    eval(family at constraints)
-  Hlist <- process.constraints(constraints, x, M, specialCM = specialCM)
+    eval(slot(family, "constraints"))
+  Hlist <- process.constraints(constraints, x = x, M = M,
+                               specialCM = specialCM,
+                               Check.cm.rank = control$Check.cm.rank)
 
   ncolHlist <- unlist(lapply(Hlist, ncol))
-  dimB <- sum(ncolHlist)
 
 
     if (nonparametric) {
 
-
       smooth.frame <- mf
       assignx <- attr(x, "assign")
       which <- assignx[smooth.labels]
@@ -125,10 +132,10 @@ vgam.fit <-
 
       tfit <- list(smomat = smomat, smooth.frame = smooth.frame)
     } else {
-      bf.call <- expression(vlm.wfit(xmat = X.vlm.save, z,
-                                     Hlist = NULL, U = U,
-                                     matrix.out = FALSE, is.vlmX = TRUE,
-                                     qr = qr.arg, xij = NULL))
+      bf.call <-
+        expression(vlm.wfit(xmat = X.vlm.save, z, Hlist = NULL, U = U,
+                            matrix.out = FALSE, is.vlmX = TRUE,
+                            qr = qr.arg, xij = NULL))
       bf <- "vlm.wfit"
     }
 
@@ -143,17 +150,23 @@ vgam.fit <-
 
 
   if (mgcvvgam) {
-    Xvlm.aug <- Pen.psv(constraints = constraints, ps.list = ps.list)
-    first.ps <- TRUE
+    Xvlm.aug <- get.X.VLM.aug(constraints  = constraints,
+                              sm.osps.list = sm.osps.list)
+    first.sm.osps <- TRUE  # Useless actually
   }
 
 
 
+
     if (length(coefstart)) {
-      eta <- if (ncol(X.vlm.save) > 1) X.vlm.save %*% coefstart +
-               offset else X.vlm.save * coefstart + offset
-      eta <- if (M > 1) matrix(eta, ncol = M, byrow = TRUE) else c(eta)
-      mu <- family at linkinv(eta, extra)
+      eta <- if (ncol(X.vlm.save) > 1) {
+        matrix(X.vlm.save %*% coefstart, n, M, byrow = TRUE) + offset
+      } else {
+        matrix(X.vlm.save  *  coefstart, n, M, byrow = TRUE) + offset
+      }
+      if (M == 1)
+        eta <- c(eta)
+      mu <- slot(family, "linkinv")(eta, extra = extra)
     }
 
 
@@ -165,31 +178,26 @@ vgam.fit <-
     new.crit <- switch(criterion,
                        coefficients = 1,
                        tfun(mu = mu, y = y, w = w, res = FALSE,
-                            eta = eta, extra))
+                            eta = eta, extra = extra))
     old.crit <- ifelse(minimize.criterion,  10 * new.crit + 10,
                                            -10 * new.crit - 10)
 
-    deriv.mu <- eval(family at deriv)
-    wz <- eval(family at weight)
+    deriv.mu <- eval(slot(family, "deriv"))
+    wz <- eval(slot(family, "weight"))
     if (control$checkwz)
-      wz <- checkwz(wz, M = M, trace = trace,
-                    wzepsilon = control$wzepsilon)
+      wz <- checkwz(wz, M = M, trace = trace, wzepsilon = control$wzepsilon)
 
     U <- vchol(wz, M = M, n = n, silent = !trace)
     tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
     z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
 
-    c.list <- list(wz = as.double(wz), z = as.double(z),
-                   fit = as.double(t(eta)),
-                   one.more = TRUE, U = as.double(U),
-                   coeff = as.double(rep_len(1, ncol(X.vlm.save))))
 
 
-    dX.vlm <- as.integer(dim(X.vlm.save))
-    nrow.X.vlm <- dX.vlm[[1]]
-    ncol.X.vlm <- dX.vlm[[2]]
-    if (nrow.X.vlm < ncol.X.vlm)
-      stop(ncol.X.vlm, " parameters but only ", nrow.X.vlm, " observations")
+    nrow.X.vlm <- nrow(X.vlm.save)
+    ncol.X.vlm <- ncol(X.vlm.save)
+    if (!nonparametric && nrow.X.vlm < ncol.X.vlm)
+      stop("There are ", ncol.X.vlm, " parameters but only ",
+           nrow.X.vlm, " observations")
 
 
 
@@ -200,36 +208,46 @@ vgam.fit <-
         Hlist = Hlist, U = U, matrix.out = FALSE, is.vlmX = TRUE,
         qr = qr.arg, xij = NULL,
         Xvlm.aug = Xvlm.aug,
-        ps.list = ps.list, constraints = constraints,
-        first.ps = first.ps,
+        sm.osps.list = sm.osps.list, constraints = constraints,
+        first.sm.osps = first.sm.osps,
+        control = control,  # 20160813
         trace = trace))
+    bf <- "vlm.wfit"
   }
 
 
-    while (c.list$one.more) {
-      tfit <- eval(bf.call)  # fit$smooth.frame is new
-
 
-      if (mgcvvgam) {
-        first.ps <- tfit$first.ps
-        Xvlm.aug <- tfit$Xvlm.aug
-        ps.list  <- tfit$ps.list
-        magicfit <- tfit$magicfit
-      }
+  fully.cvged <- FALSE
+  for (iter.outer in 1:control$Maxit.outer) {
+    if (fully.cvged)
+      break
+    if (trace && mgcvvgam) {
+      cat("VGAM outer iteration ", iter.outer,
+          " =============================================\n")
+      flush.console()
+    }
 
 
 
+    iter <- 1  # This is a reset for iter.outer > 1.
+    one.more <- TRUE
+    sm.osps.list$fixspar <- sm.osps.list$orig.fixspar
 
 
 
+    while (one.more) {
 
-      c.list$coeff <- tfit$coefficients
-      tfit$predictors <- tfit$fitted.values + offset
+      tfit <- eval(bf.call)  # fit$smooth.frame is new
 
-      c.list$fit <- tfit$fitted.values
 
-      if (!c.list$one.more) {
-        break
+      if (mgcvvgam) {
+        first.sm.osps <- tfit$first.sm.osps
+        Xvlm.aug <- tfit$Xvlm.aug
+        sm.osps.list <- tfit$sm.osps.list
+        if (control$Maxit.outer > 1)
+          sm.osps.list$fixspar <-
+            rep_len(TRUE, length(sm.osps.list$fixspar))
+        magicfit <- tfit$magicfit
       }
 
 
@@ -237,30 +255,29 @@ vgam.fit <-
 
 
 
-      fv <- c.list$fit
+      fv <- tfit$fitted.values  # c.list$fit
 
       if (mgcvvgam) {
-        fv <- head(fv, n*M)
+        fv <- head(fv, n * M)
       }
 
 
-      new.coeffs <- c.list$coeff
+      new.coeffs <- tfit$coefficients  # c.list$coeff
 
-      if (length(family at middle))
-        eval(family at middle)
+      if (length(slot(family, "middle")))
+        eval(slot(family, "middle"))
 
       eta <- fv + offset
-      mu <- family at linkinv(eta, extra)
+      mu <- slot(family, "linkinv")(eta, extra = extra)
 
       if (length(family at middle2))
         eval(family at middle2)
 
       old.crit <- new.crit
-
       new.crit <- switch(criterion,
                          coefficients = new.coeffs,
                          tfun(mu = mu, y = y, w = w,
-                              res = FALSE, eta = eta, extra))
+                              res = FALSE, eta = eta, extra = extra))
       if (trace) {
         cat("VGAM ", bf, " loop ", iter, ": ", criterion, "= ")
 
@@ -268,8 +285,8 @@ vgam.fit <-
                        coefficients =
                          format(new.crit,
                                 dig = round(1 - log10(epsilon))),
-                         format(new.crit, 
-                                dig = max(4, 
+                         format(new.crit,
+                                dig = max(4,
                                           round(-0 - log10(epsilon) +
                                           log10(sqrt(eff.n))))))
 
@@ -283,58 +300,76 @@ vgam.fit <-
 
       flush.console()
 
-      if (!is.finite(one.more) || !is.logical(one.more))
+      if (!is.logical(one.more))
         one.more <- FALSE
 
 
 
       if (one.more) {
         iter <- iter + 1
-        deriv.mu <- eval(family at deriv)
-        wz <- eval(family at weight)
+        deriv.mu <- eval(slot(family, "deriv"))
+        wz <- eval(slot(family, "weight"))
         if (control$checkwz)
-          wz <- checkwz(wz, M = M, trace = trace,
-                        wzepsilon = control$wzepsilon)
+          wz <- checkwz(wz, M = M, trace = trace, wzepsilon = control$wzepsilon)
 
         U <- vchol(wz, M = M, n = n, silent = !trace)
         tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
         z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
 
-        c.list$z <- z
-        c.list$wz <- wz
-        c.list$U <- U
+      } else {
+        fully.cvged <- if (mgcvvgam) (iter <= 2) else TRUE
       }
 
-      c.list$one.more <- one.more
-      c.list$coeff <- runif(length(new.coeffs))  # 20030312; twist needed!
       old.coeffs <- new.coeffs
 
     }  # End of while()
+  }  # End of for()
+
+
+
+
+    if (maxit > 1 && iter >= maxit && !control$noWarning)
+      warning("convergence not obtained in ", maxit, " IRLS iterations")
+    if (control$Maxit.outer > 1 && iter.outer >= control$Maxit.outer &&
+        !control$noWarning)
+      warning("convergence not obtained in ", control$Maxit.outer,
+              " outer iterations")
 
-    if (maxit > 1 && iter >= maxit)
-      warning("convergence not obtained in ", maxit, " iterations")
 
 
     dnrow.X.vlm <- labels(X.vlm.save)
     xnrow.X.vlm <- dnrow.X.vlm[[2]]
     ynrow.X.vlm <- dnrow.X.vlm[[1]]
 
-    if (length(family at fini))
-      eval(family at fini)
+  if (length(slot(family, "fini")))
+    eval(slot(family, "fini"))
+
+
+  if (M > 1)
+    fv <- matrix(fv, n, M)
+
+  final.coefs <- new.coeffs  # Was tfit$coefficients prior to 20160317
+  asgn <- attr(X.vlm.save, "assign")
+
+  names(final.coefs) <- xnrow.X.vlm
+
+
 
-    coefs <- tfit$coefficients
-    asgn <- attr(X.vlm.save, "assign")  # 20011129 was x 
 
-    names(coefs) <- xnrow.X.vlm
-    cnames <- xnrow.X.vlm
 
     if (!is.null(tfit$rank)) {
       rank <- tfit$rank
-      if (rank < ncol(x)) 
-        stop("rank < ncol(x) is bad")
     } else {
-      rank <- ncol(x)
+      rank <- NCOL(x)
     }
+    cnames <- xnrow.X.vlm
+
+    if (!nonparametric &&  # The first condition needed for vgam()
+        check.rank && rank < ncol.X.vlm)
+      stop("vgam() only handles full-rank models (currently)")
+
+
+
 
     R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
     R[lower.tri(R)] <- 0
@@ -342,25 +377,38 @@ vgam.fit <-
                           dimnames = list(cnames, cnames), rank = rank)
 
 
+    dim(fv) <- c(n, M)
     dn <- labels(x)
     yn <- dn[[1]]
     xn <- dn[[2]]
 
 
+  wresiduals <- z - fv  # Replaced by fv 20160408
+  if (M == 1) {
+    fv <- as.vector(fv)
+    wresiduals <- as.vector(wresiduals)
+    names(wresiduals) <- names(fv) <- yn
+  } else {
+    dimnames(wresiduals) <-
+    dimnames(fv)         <- list(yn, predictors.names)
+  }
 
-    if (is.matrix(mu)) {
-      if (length(dimnames(mu)[[2]])) {
-        y.names <- dimnames(mu)[[2]]
-      } else
-      if (length(dimnames(y)[[2]])) {
-        y.names <- dimnames(y)[[2]]
-      }
-      dimnames(mu) <- list(yn, y.names)
-    } else {
-      names(mu) <- names(fv)
+  if (is.matrix(mu)) {
+    if (length(dimnames(y)[[2]])) {
+      y.names <- dimnames(y)[[2]]
     }
+    if (length(dimnames(mu)[[2]])) {
+      y.names <- dimnames(mu)[[2]]
+    }
+    dimnames(mu) <- list(yn, y.names)
+  } else {
+    names(mu) <- names(fv)
+  }
+
 
     tfit$fitted.values <- NULL  # Have to kill it off  20011203
+
+
     fit <- structure(c(tfit,
            list(assign = asgn,
                 constraints = Hlist,
@@ -373,7 +421,13 @@ vgam.fit <-
                 R = R,
                 terms = Terms)))
 
-    df.residual <- nrow.X.vlm - rank 
+
+
+  if (qr.arg) {
+    fit$qr <- tfit$qr
+    dimnames(fit$qr$qr) <- dnrow.X.vlm
+  }
+
 
     if (!mgcvvgam && !se.fit) {
       fit$varmat <- NULL
@@ -386,14 +440,6 @@ vgam.fit <-
 
 
 
-    if (M == 1) {
-      fit$predictors <- as.vector(fit$predictors)
-      fit$residuals <- as.vector(fit$residuals)
-      names(fit$residuals) <- names(fit$predictors) <- yn
-    } else {
-      dimnames(fit$residuals)  <-
-      dimnames(fit$predictors) <- list(yn, predictors.names)
-    }
 
     NewHlist <- process.constraints(constraints, x, M,
                                     specialCM = specialCM, by.col = FALSE)
@@ -403,7 +449,7 @@ vgam.fit <-
         colnames.X.vlm = xnrow.X.vlm,
         criterion = criterion,
         function.name = function.name,
-        intercept.only=intercept.only,
+        intercept.only = intercept.only,
         predictors.names = predictors.names,
         M = M,
         n = n,
@@ -413,11 +459,9 @@ vgam.fit <-
         orig.assign = attr(x, "assign"),
         p = ncol(x),
         ncol.X.vlm = ncol.X.vlm,
-        ynames = dimnames(y)[[2]])
+        ynames = colnames(y))
 
 
-    if (criterion != "coefficients")
-      fit[[criterion]] <- new.crit
 
 
 
@@ -426,13 +470,18 @@ vgam.fit <-
     }
 
 
-
     if (nonparametric) {
       misc$smooth.labels <- smooth.labels
     }
 
 
 
+  if (mgcvvgam) {
+    misc$Xvlm.aug     <- Xvlm.aug
+    misc$sm.osps.list <- sm.osps.list
+    misc$magicfit     <- magicfit
+    misc$iter.outer   <- iter.outer
+  }
 
 
 
@@ -441,19 +490,6 @@ vgam.fit <-
 
 
 
-
-
-
-
-  if (mgcvvgam) {
-    misc$Xvlm.aug <- Xvlm.aug
-    misc$ps.list  <- ps.list
-    misc$magicfit <- magicfit
-  }
-
-
-
-
     crit.list <- list()
     if (criterion != "coefficients")
       crit.list[[criterion]] <- fit[[criterion]] <- new.crit
@@ -464,39 +500,19 @@ vgam.fit <-
         fit[[ii]] <-
         crit.list[[ii]] <-
           (slot(family, ii))(mu = mu, y = y, w = w, res = FALSE,
-                             eta = eta, extra)
+                             eta = eta, extra = extra)
         }
     }
 
 
 
 
-    if (M == 1) {
-      fit$predictors <- as.vector(fit$predictors)
-      fit$residuals  <- as.vector(fit$residuals)
-      names(fit$residuals)  <-
-      names(fit$predictors) <- yn
-    } else {
-      dimnames(fit$residuals)  <-
-      dimnames(fit$predictors) <- list(yn, predictors.names)
-    }
-
-
-
-
-
-
-
-
-
-
-
-
     if (w[1] != 1 || any(w != w[1]))
       fit$prior.weights <- w
 
-    if (length(family at last))
-      eval(family at last)
+
+  if (length(slot(family, "last")))
+    eval(slot(family, "last"))
 
 
     if (!is.null(fit$smomat)) {
@@ -508,28 +524,27 @@ vgam.fit <-
     }
 
 
-    if (!qr.arg) { 
+    if (!qr.arg) {
       fit$qr <- NULL
     }
 
 
-
-
     fit$misc <- NULL
 
-    structure(c(fit, list(
-        contrasts = attr(x, "contrasts"),
-        control = control,
-        crit.list = crit.list,
-        extra = extra,
-        family = family,
-        iter = iter,
-        misc = misc,
-        post = post,
-        x = x,
-        y = y)),
-        vclass = family at vfamily)
-}
+    structure(c(fit,
+      list(predictors = fv,  # tfit$predictors,
+           contrasts = attr(x, "contrasts"),
+           control = control,
+           crit.list = crit.list,
+           extra = extra,
+           family = family,
+           iter = iter,
+           misc = misc,
+           post = post,
+           x = x,
+           y = y)),
+           vclass = slot(family, "vfamily"))
+}  # vgam.fit()
 
 
 
@@ -546,7 +561,7 @@ new.assign <- function(X, Hlist) {
   lasgn <- unlist(lapply(asgn, length))
 
   ncolHlist <- unlist(lapply(Hlist, ncol))
-  names(ncolHlist) <- NULL  # This is necessary for below to work 
+  names(ncolHlist) <- NULL  # This is necessary for below to work
 
   temp2 <- vlabel(nasgn, ncolHlist, M)
   L <- length(temp2)
@@ -565,7 +580,7 @@ new.assign <- function(X, Hlist) {
 
   names(newasgn) <- temp2
   newasgn
-}
+}  # new.assign
 
 
 
diff --git a/R/vgam.match.q b/R/vgam.match.q
index ae1c189..2120620 100644
--- a/R/vgam.match.q
+++ b/R/vgam.match.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -21,7 +21,7 @@ vgam.match <- function(x, all.knots = FALSE, nk = NULL) {
     knots <- vector("list", nvar)
     knots[[1]] <- temp$knots
 
-    if (nvar > 1) 
+    if (nvar > 1)
       for (ii in 2:nvar) {
         temp <- vgam.match(x[[ii]], all.knots = all.knots, nk = nk[ii])
         ooo[, ii] <- temp$matcho
@@ -31,7 +31,7 @@ vgam.match <- function(x, all.knots = FALSE, nk = NULL) {
         xmin[ii] <- temp$xmin
         xmax[ii] <- temp$xmax
       }
-    names(nknots) <- names(knots) <- 
+    names(nknots) <- names(knots) <-
     names(neffec) <- names(xmin) <- names(xmax) <- names(x)
     dimnames(ooo) <- list(NULL, names(x))
 
@@ -40,7 +40,7 @@ vgam.match <- function(x, all.knots = FALSE, nk = NULL) {
   }
 
   if (!is.null(attributes(x)$NAs) || anyNA(x))
-    stop("cannot smooth on variables with NAs") 
+    stop("cannot smooth on variables with NAs")
 
   sx <- unique(sort(as.vector(x)))  # "as.vector()" strips off attributes
   ooo <- match(x, sx)  # as.integer(match(x, sx))      # sx[o]==x
@@ -49,7 +49,7 @@ vgam.match <- function(x, all.knots = FALSE, nk = NULL) {
   if (neffec < 7)
     stop("smoothing variables must have at least 7 unique values")
 
-  xmin <- sx[1]     # Don't use rounded value 
+  xmin <- sx[1]     # Don't use rounded value
   xmax <- sx[neffec]
   xbar <- (sx - xmin) / (xmax - xmin)
 
diff --git a/R/vglm.R b/R/vglm.R
index 4b554fe..734efe8 100644
--- a/R/vglm.R
+++ b/R/vglm.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -7,17 +7,17 @@
 
 
 vglm <- function(formula,
-                 family, data = list(), 
+                 family, data = list(),
                  weights = NULL, subset = NULL, na.action = na.fail,
                  etastart = NULL, mustart = NULL, coefstart = NULL,
-                 control = vglm.control(...), 
-                 offset = NULL, 
+                 control = vglm.control(...),
+                 offset = NULL,
                  method = "vglm.fit",
                  model = FALSE, x.arg = TRUE, y.arg = TRUE,
-                 contrasts = NULL, 
+                 contrasts = NULL,
                  constraints = NULL,
-                 extra = list(), 
-                 form2 = NULL, 
+                 extra = list(),
+                 form2 = NULL,
                  qr.arg = TRUE, smart = TRUE, ...) {
   dataname <- as.character(substitute(data))  # "list" if no data=
   function.name <- "vglm"
@@ -25,10 +25,10 @@ vglm <- function(formula,
 
   ocall <- match.call()
 
-  if (smart) 
+  if (smart)
     setup.smart("write")
 
-  if (missing(data)) 
+  if (missing(data))
     data <- environment(formula)
 
   mf <- match.call(expand.dots = FALSE)
@@ -85,7 +85,7 @@ vglm <- function(formula,
 
 
   offset <- model.offset(mf)
-  if (is.null(offset)) 
+  if (is.null(offset))
     offset <- 0 # yyy ???
   w <- model.weights(mf)
   if (!length(w)) {
@@ -112,7 +112,7 @@ vglm <- function(formula,
   fit <- vglm.fitter(x = x, y = y, w = w, offset = offset,
            Xm2 = Xm2, Ym2 = Ym2,
            etastart = etastart, mustart = mustart, coefstart = coefstart,
-           family = family, 
+           family = family,
            control = control,
            constraints = constraints,
            extra = extra,
@@ -127,19 +127,19 @@ vglm <- function(formula,
   }
 
   answer <-
-  new(Class = "vglm", 
+  new(Class = "vglm",
     "assign"       = attr(x, "assign"),
     "call"         = ocall,
     "coefficients" = fit$coefficients,
     "constraints"  = fit$constraints,
     "criterion"    = fit$crit.list,
     "df.residual"  = fit$df.residual,
-    "df.total"     = fit$df.total, 
+    "df.total"     = fit$df.total,
     "dispersion"   = 1,
     "effects"      = fit$effects,
     "family"       = fit$family,
     "misc"         = fit$misc,
-    "model"        = if (model) mf else data.frame(), 
+    "model"        = if (model) mf else data.frame(),
     "R"            = fit$R,
     "rank"         = fit$rank,
     "residuals"    = as.matrix(fit$residuals),
@@ -217,23 +217,23 @@ attr(vglm, "smart") <- TRUE
 
 shadowvglm <-
         function(formula,
-                 family, data = list(), 
+                 family, data = list(),
                  weights = NULL, subset = NULL, na.action = na.fail,
                  etastart = NULL, mustart = NULL, coefstart = NULL,
-                 control = vglm.control(...), 
-                 offset = NULL, 
+                 control = vglm.control(...),
+                 offset = NULL,
                  method = "vglm.fit",
                  model = FALSE, x.arg = TRUE, y.arg = TRUE,
-                 contrasts = NULL, 
+                 contrasts = NULL,
                  constraints = NULL,
-                 extra = list(), 
+                 extra = list(),
                  qr.arg = FALSE, ...) {
     dataname <- as.character(substitute(data))  # "list" if no data=
     function.name <- "shadowvglm"
 
     ocall <- match.call()
 
-    if (missing(data)) 
+    if (missing(data))
         data <- environment(formula)
 
     mf <- match.call(expand.dots = FALSE)
@@ -247,7 +247,7 @@ shadowvglm <-
            stop("invalid 'method': ", method))
     mt <- attr(mf, "terms")
 
-    x <- y <- NULL 
+    x <- y <- NULL
 
     xlev <- .getXlevels(mt, mf)
     y <- model.response(mf, "any")  # model.extract(mf, "response")
diff --git a/R/vglm.control.q b/R/vglm.control.q
index d426584..586ed60 100644
--- a/R/vglm.control.q
+++ b/R/vglm.control.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -9,17 +9,17 @@
 .min.criterion.VGAM <-
   c("deviance"      = TRUE,
     "loglikelihood" = FALSE,
-    "AIC"           = TRUE, 
+    "AIC"           = TRUE,
     "Likelihood"    = FALSE,
     "ResSS"        = TRUE,
     "coefficients"  = TRUE)
 
 
- 
+
 
 vlm.control <- function(save.weights = TRUE,
                         tol = 1e-7,
-                        method = "qr", 
+                        method = "qr",
                         checkwz = TRUE,
                         wzepsilon = .Machine$double.eps^0.75,
                         ...) {
@@ -45,12 +45,12 @@ vlm.control <- function(save.weights = TRUE,
 vglm.control <- function(checkwz = TRUE,
                          Check.rank = TRUE,
                          Check.cm.rank = TRUE,
-                         criterion = names(.min.criterion.VGAM), 
+                         criterion = names(.min.criterion.VGAM),
                          epsilon = 1e-7,
                          half.stepsizing = TRUE,
-                         maxit = 30, 
+                         maxit = 30,
                          noWarning = FALSE,
-                         stepsize = 1, 
+                         stepsize = 1,
                          save.weights = FALSE,
                          trace = FALSE,
                          wzepsilon = .Machine$double.eps^0.75,
@@ -60,62 +60,62 @@ vglm.control <- function(checkwz = TRUE,
 
 
 
-    if (mode(criterion) != "character" && mode(criterion) != "name")
-      criterion <- as.character(substitute(criterion))
-    criterion <- pmatch(criterion[1], names(.min.criterion.VGAM),
+  if (mode(criterion) != "character" && mode(criterion) != "name")
+    criterion <- as.character(substitute(criterion))
+  criterion <- pmatch(criterion[1], names(.min.criterion.VGAM),
                         nomatch = 1)
-    criterion <- names(.min.criterion.VGAM)[criterion]
+  criterion <- names(.min.criterion.VGAM)[criterion]
 
 
 
-    if (!is.logical(checkwz) || length(checkwz) != 1)
-      stop("bad input for argument 'checkwz'")
-    if (!is.Numeric(wzepsilon, length.arg = 1, positive = TRUE))
-      stop("bad input for argument 'wzepsilon'")
+  if (!is.logical(checkwz) || length(checkwz) != 1)
+    stop("bad input for argument 'checkwz'")
+  if (!is.Numeric(wzepsilon, length.arg = 1, positive = TRUE))
+    stop("bad input for argument 'wzepsilon'")
 
-    convergence <- expression({
+  convergence <- expression({
 
 
-      switch(criterion,
-             coefficients = if (iter == 1) iter < maxit else
-                            (iter < maxit &&
-                            max(abs(new.crit - old.crit) / (
-                                abs(old.crit) + epsilon)) > epsilon),
-             iter < maxit &&
-             sqrt(eff.n) *
-             abs(old.crit - new.crit) / (
-             abs(old.crit) + epsilon)  > epsilon)
-    })
+    switch(criterion,
+           coefficients = if (iter == 1) iter < maxit else
+                          (iter < maxit &&
+                          max(abs(new.crit - old.crit) / (
+                              abs(old.crit) + epsilon)) > epsilon),
+           iter < maxit &&
+           sqrt(eff.n) *
+           abs(old.crit - new.crit) / (
+           abs(old.crit) + epsilon)  > epsilon)
+  })
 
-    if (!is.Numeric(epsilon, length.arg = 1, positive = TRUE)) {
-      warning("bad input for argument 'epsilon'; using 0.00001 instead")
-      epsilon <- 0.00001
-    }
-    if (!is.Numeric(maxit, length.arg = 1,
-                    positive = TRUE, integer.valued = TRUE)) {
-      warning("bad input for argument 'maxit'; using 30 instead")
-      maxit <- 30
-    }
-    if (!is.Numeric(stepsize, length.arg = 1, positive = TRUE)) {
-      warning("bad input for argument 'stepsize'; using 1 instead")
-      stepsize <- 1
-    }
+  if (!is.Numeric(epsilon, length.arg = 1, positive = TRUE)) {
+    warning("bad input for argument 'epsilon'; using 0.00001 instead")
+    epsilon <- 0.00001
+  }
+  if (!is.Numeric(maxit, length.arg = 1,
+                  positive = TRUE, integer.valued = TRUE)) {
+    warning("bad input for argument 'maxit'; using 30 instead")
+    maxit <- 30
+  }
+  if (!is.Numeric(stepsize, length.arg = 1, positive = TRUE)) {
+    warning("bad input for argument 'stepsize'; using 1 instead")
+    stepsize <- 1
+  }
 
-    list(checkwz = checkwz,
-         Check.rank = Check.rank, 
-         Check.cm.rank = Check.cm.rank,
-         convergence = convergence, 
-         criterion = criterion,
-         epsilon = epsilon,
-         half.stepsizing = as.logical(half.stepsizing)[1],
-         maxit = maxit,
-         noWarning = as.logical(noWarning)[1],
-         min.criterion = .min.criterion.VGAM,
-         save.weights = as.logical(save.weights)[1],
-         stepsize = stepsize,
-         trace = as.logical(trace)[1],
-         wzepsilon = wzepsilon,
-         xij = if (is(xij, "formula")) list(xij) else xij)
+  list(checkwz = checkwz,
+       Check.rank = Check.rank,
+       Check.cm.rank = Check.cm.rank,
+       convergence = convergence,
+       criterion = criterion,
+       epsilon = epsilon,
+       half.stepsizing = as.logical(half.stepsizing)[1],
+       maxit = maxit,
+       noWarning = as.logical(noWarning)[1],
+       min.criterion = .min.criterion.VGAM,
+       save.weights = as.logical(save.weights)[1],
+       stepsize = stepsize,
+       trace = as.logical(trace)[1],
+       wzepsilon = wzepsilon,
+       xij = if (is(xij, "formula")) list(xij) else xij)
 }
 
 
@@ -164,7 +164,7 @@ vcontrol.expression <- expression({
   for (ii in 1:2) {
     temp <- paste(if (ii == 1) "" else
                   paste(function.name, ".", sep = ""),
-                  family at vfamily[1], 
+                  family at vfamily[1],
                   ".", control$criterion, ".control", sep = "")
     if (exists(temp, inherit = TRUE)) {
       temp <- get(temp)
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index 455d575..fe1feb4 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -26,13 +26,13 @@ vglm.fit <-
 
   specialCM <- NULL
   post <- list()
-  check.rank <- TRUE  # Set this to FALSE for family functions vppr() etc.
   check.rank <- control$Check.rank
   nonparametric <- FALSE
   epsilon <- control$epsilon
   maxit <- control$maxit
   save.weights <- control$save.weights
   trace <- control$trace
+
   orig.stepsize <- control$stepsize
   minimize.criterion <- control$min.criterion
 
@@ -54,7 +54,7 @@ vglm.fit <-
   intercept.only <- ncol(x) == 1 && colnames(x) == "(Intercept)"
   y.names <- predictors.names <- NULL  # May be overwritten in @initialize
 
-  n.save <- n 
+  n.save <- n
 
 
   if (length(slot(family, "initialize")))
@@ -98,7 +98,7 @@ vglm.fit <-
 
 
 
-  M <- if (is.matrix(eta)) ncol(eta) else 1
+  M <- NCOL(eta)
 
 
 
@@ -133,7 +133,7 @@ vglm.fit <-
              matrix(X.vlm.save  *  coefstart, n, M, byrow = TRUE) + offset
            }
     if (M == 1)
-      eta <- c(eta) 
+      eta <- c(eta)
     mu <- slot(family, "linkinv")(eta, extra = extra)
   }
 
@@ -145,8 +145,8 @@ vglm.fit <-
   iter <- 1
   new.crit <- switch(criterion,
                      coefficients = 1,
-                     tfun(mu = mu, y = y, w = w,
-                          res = FALSE, eta = eta, extra))
+                     tfun(mu = mu, y = y, w = w, res = FALSE,
+                          eta = eta, extra = extra))
 
   deriv.mu <- eval(slot(family, "deriv"))
   wz <- eval(slot(family, "weight"))
@@ -176,9 +176,9 @@ vglm.fit <-
                      Hlist = NULL, U = U,
                      matrix.out = FALSE, is.vlmX = TRUE,
                      qr = qr.arg, xij = NULL)
-    
 
-    
+
+
 
 
 
@@ -200,7 +200,7 @@ vglm.fit <-
         switch(criterion,
             coefficients = new.coeffs,
             tfun(mu = mu, y = y, w = w,
-                 res = FALSE, eta = eta, extra))
+                 res = FALSE, eta = eta, extra = extra))
 
 
     if (trace && orig.stepsize == 1) {
@@ -208,11 +208,11 @@ vglm.fit <-
       UUUU <- switch(criterion,
                      coefficients =
                        format(new.crit,
-                              dig = round(1 - log10(epsilon))),
+                              digits = round(1 - log10(epsilon))),
                        format(new.crit,
-                              dig = max(4,
-                                        round(-0 - log10(epsilon) +
-                                              log10(sqrt(eff.n))))))
+                              digits = max(4,
+                                           round(-0 - log10(epsilon) +
+                                                 log10(sqrt(eff.n))))))
       switch(criterion,
              coefficients = {if (length(new.crit) > 2) cat("\n");
              cat(UUUU, fill = TRUE, sep = ", ")},
@@ -238,7 +238,7 @@ vglm.fit <-
       if (length(body(slot(family, "validfitted"))))
         validfitted <- slot(family, "validfitted")(mu, y = y, extra = extra)
       take.half.step <- !(validparams && validfitted)
-                        
+
 
      if (FALSE && take.half.step) {
        stepsize <- orig.stepsize / 4
@@ -276,11 +276,11 @@ vglm.fit <-
         if (length(slot(family, "middle2")))
           eval(slot(family, "middle2"))
 
-        new.crit <- 
+        new.crit <-
           switch(criterion,
                  coefficients = new.coeffs,
                  tfun(mu = mu, y = y, w = w,
-                      res = FALSE, eta = eta, extra))
+                      res = FALSE, eta = eta, extra = extra))
 
 
         validparams <- validfitted <- TRUE
@@ -291,13 +291,13 @@ vglm.fit <-
 
         if (validparams && validfitted &&
            (is.finite(new.crit)) &&  # 20160321
-           (criterion == "coefficients" || 
+           (criterion == "coefficients" ||
            (( minimize.criterion && new.crit < old.crit) ||
             (!minimize.criterion && new.crit > old.crit))))
           break
       }  # of repeat
 
-      if (trace) 
+      if (trace)
         cat("\n")
 
       if (too.small) {
@@ -312,11 +312,11 @@ vglm.fit <-
           UUUU <- switch(criterion,
                          coefficients =
                            format(new.crit,
-                                  dig = round(1 - log10(epsilon))),
-                           format(new.crit, 
-                                  dig = max(4,
-                                            round(-0 - log10(epsilon) +
-                                                  log10(sqrt(eff.n))))))
+                                  digits = round(1 - log10(epsilon))),
+                           format(new.crit,
+                                  digits = max(4,
+                                               round(-0 - log10(epsilon) +
+                                                     log10(sqrt(eff.n))))))
 
           switch(criterion,
                  coefficients = {
@@ -361,7 +361,7 @@ vglm.fit <-
 
 
   if (maxit > 1 && iter >= maxit && !control$noWarning)
-    warning("convergence not obtained in ", maxit, " iterations")
+    warning("convergence not obtained in ", maxit, " IRLS iterations")
 
 
 
@@ -385,7 +385,7 @@ vglm.fit <-
   cnames <- xnrow.X.vlm
 
   if (check.rank && rank < ncol.X.vlm)
-    stop("vglm only handles full-rank models (currently)")
+    stop("vglm() only handles full-rank models (currently)")
 
 
   R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
@@ -427,19 +427,18 @@ vglm.fit <-
   }
 
 
-  df.residual <- nrow.X.vlm - rank
   fit <- list(assign = asgn,
               coefficients = final.coefs,
-              constraints = Hlist, 
-              df.residual = df.residual,
+              constraints = Hlist,
+              df.residual = nrow.X.vlm - rank,
               df.total = n * M,
-              effects = effects, 
-              fitted.values = mu,
-              offset = offset, 
-              rank = rank,
+              effects = effects,   # this is good
+              fitted.values = mu,   # this is good
+              offset = offset,
+              rank = rank,   # this is good
               residuals = wresiduals,
               R = R,
-              terms = Terms)  # terms: This used to be done in vglm() 
+              terms = Terms)  # terms: This used to be done in vglm()
 
   if (qr.arg) {
     fit$qr <- tfit$qr
@@ -456,7 +455,7 @@ vglm.fit <-
       colnames.x = xn,
       colnames.X.vlm = xnrow.X.vlm,
       criterion = criterion,
-      function.name = function.name, 
+      function.name = function.name,
       intercept.only = intercept.only,
       predictors.names = predictors.names,
       M = M,
@@ -479,7 +478,8 @@ vglm.fit <-
         length(body(slot(family, ii)))) {
       fit[[ii]] <-
       crit.list[[ii]] <- (slot(family, ii))(mu = mu, y = y, w = w,
-                                            res = FALSE, eta = eta, extra)
+                                            res = FALSE, eta = eta,
+                                            extra = extra)
     }
   }
 
@@ -505,6 +505,6 @@ vglm.fit <-
         x = x,
         y = y)),
         vclass = slot(family, "vfamily"))
-}
+}  # vglm.fit()
 
 
diff --git a/R/vlm.R b/R/vlm.R
index 55354ed..28aa113 100644
--- a/R/vlm.R
+++ b/R/vlm.R
@@ -1,20 +1,20 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
 
 
 vlm <- function(formula,
-                data = list(), 
+                data = list(),
                 weights = NULL, subset = NULL, na.action = na.fail,
-                prior.weights = NULL, 
-                control = vlm.control(...), 
+                prior.weights = NULL,
+                control = vlm.control(...),
                 method = "qr",
                 model = FALSE, x.arg = FALSE, y.arg = TRUE, qr.arg = TRUE,
-                contrasts = NULL, 
+                contrasts = NULL,
                 constraints = NULL,
-                extra = NULL, offset = NULL,  
+                extra = NULL, offset = NULL,
                 smart = TRUE, ...) {
   dataname <- as.character(substitute(data))  # "list" if no data=
   function.name <- "vlm"
@@ -85,7 +85,7 @@ vlm <- function(formula,
     identity.wts <- FALSE
     temp <- ncol(as.matrix(wz))
     if (temp < M || temp > M*(M+1)/2)
-      stop("input 'w' must have between ", M, " and ", M*(M+1)/2, 
+      stop("input 'w' must have between ", M, " and ", M*(M+1)/2,
            " columns")
     wz <- prior.weights * wz
   }
@@ -129,25 +129,25 @@ vlm <- function(formula,
         p = ncol(x),
         ncol.X.vlm = ncol.X.vlm,
         ynames = dimnames(y)[[2]])
-    
+
     fit$misc <- misc
 
     fit$misc$dataname <- dataname
-    
 
-    
+
+
     if (smart) {
       fit$smart.prediction <- get.smart.prediction()
       wrapup.smart()
     }
 
     answer <-
-    new("vlm", 
+    new("vlm",
       "assign"       = attr(x, "assign"),
       "call"         = ocall,
       "coefficients" = fit$coefficients,
       "constraints"  = fit$constraints,
-      "control"      = control, 
+      "control"      = control,
       "criterion"    = list(deviance = fit$ResSS),
       "dispersion"   = 1,
       "df.residual"  = fit$df.residual,
@@ -190,7 +190,7 @@ vlm <- function(formula,
 
   answer
 }
-attr(vlm, "smart") <- TRUE    
+attr(vlm, "smart") <- TRUE
 
 
 
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
index a6f7004..a3ee7de 100644
--- a/R/vlm.wfit.q
+++ b/R/vlm.wfit.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -12,14 +12,15 @@
 
 
 
+
 vlm.wfit <-
-  function(xmat, zmat, Hlist, wz = NULL, U = NULL, 
+  function(xmat, zmat, Hlist, wz = NULL, U = NULL,
            matrix.out = FALSE, is.vlmX = FALSE, ResSS = TRUE, qr = FALSE,
            x.ret = FALSE,
            offset = NULL,
            omit.these = NULL, only.ResSS = FALSE,
            ncolx = if (matrix.out && is.vlmX) {
-                     stop("need argument 'ncolx'") 
+                     stop("need argument 'ncolx'")
                    } else {
                      ncol(xmat)
                    },
@@ -27,13 +28,15 @@ vlm.wfit <-
            lp.names = NULL, Eta.range = NULL, Xm2 = NULL,
 
            Xvlm.aug = NULL,
-           ps.list = NULL,
-           constraints = NULL, first.ps = FALSE,
+           sm.osps.list = NULL,
+           constraints = NULL, first.sm.osps = FALSE,
+           control = list(),  # This is vgam.control()
            trace = FALSE,
 
            ...) {
-  mgcvvgam <- length(ps.list)
+  mgcvvgam <- length(sm.osps.list)
 
+  fixspar <- unlist(sm.osps.list$fixspar)
   missing.Hlist <- missing(Hlist)
   zmat <- as.matrix(zmat)
   n <- nrow(zmat)
@@ -55,23 +58,23 @@ vlm.wfit <-
   }
 
   X.vlm.save <- if (is.vlmX) {
-        xmat 
-      } else {
-        if (missing.Hlist || !length(Hlist)) {
-          Hlist <- replace.constraints(vector("list", ncol(xmat)),
-                                       diag(M), 1:ncol(xmat))  # NULL
-        }
-        lm2vlm.model.matrix(x = xmat, Hlist = Hlist, M = M,
-                            assign.attributes = FALSE,
-                            xij = xij,
-                            Xm2 = Xm2)
-      }
+    xmat
+  } else {
+    if (missing.Hlist || !length(Hlist)) {
+      Hlist <- replace.constraints(vector("list", ncol(xmat)),
+                                   diag(M), 1:ncol(xmat))  # NULL
+    }
+    lm2vlm.model.matrix(x = xmat, Hlist = Hlist, M = M,
+                        assign.attributes = FALSE,
+                        xij = xij,
+                        Xm2 = Xm2)
+  }
   X.vlm <- mux111(U, X.vlm.save, M = M)
   z.vlm <- mux22(U, zmat, M = M, upper = TRUE, as.matrix = FALSE)
 
 
   if (length(omit.these)) {
-    X.vlm <- X.vlm[!omit.these, , drop = FALSE] 
+    X.vlm <- X.vlm[!omit.these, , drop = FALSE]
     z.vlm <- z.vlm[!omit.these]
   }
 
@@ -85,53 +88,66 @@ vlm.wfit <-
 
 
   if (mgcvvgam) {
+ # The matrix components of m.objects have colns reordered, for magic().
     m.objects <- psv2magic(x.VLM = X.vlm,
                            constraints = constraints,
-                           lambda.vlm = attr(Xvlm.aug, "lambda.vlm"),
-                           ps.list = ps.list)
+                           spar.vlm = attr(Xvlm.aug, "spar.vlm"),
+                           sm.osps.list = sm.osps.list)
+    fixspar <- rep_len(fixspar, length(m.objects$sp))
     if (FALSE && trace) {
       cat("m.objects$sp \n")
       print( m.objects$sp )
-      cat("m.objects$off \n")
-      print( m.objects$off )
+      cat("m.objects$OFF \n")
+      print( m.objects$OFF )
+      flush.console()
     }
 
-    if (first.ps) {
-      m.objects$sp <- rep_len(-1, length(m.objects$sp))
+    if (first.sm.osps) {
+
     }
 
 
 
-    magicfit <- mgcv::magic(y   = z.vlm,
-                            X   = m.objects$x.VLM.new,
-                            sp  = m.objects$sp,
-                            S   = m.objects$S.arg,
-                            off = m.objects$off,
-                            gcv = FALSE)
-    SP <- magicfit$sp
+
+
+    magicfit <-
+      mgcv::magic(y   = z.vlm,
+                  X = m.objects$x.VLM.new,  # Cols reordered if necessary
+                  sp  = m.objects$sp,
+                  S   = m.objects$S.arg,
+                  off = m.objects$OFF,
+                  gamma = control$gamma.arg,
+                  gcv = FALSE)
+    SP <- ifelse(fixspar, m.objects$sp, magicfit$sp)
     if (FALSE && trace) {
       cat("SP \n")
       print( SP )
+      flush.console()
     }
+  magicfit$sp <- SP  # Make sure; 20160809
+
 
-    length.lambda.vlm <- sapply(attr(Xvlm.aug, "lambda.vlm"), length)  # lambda.new
-    sp.opt <- vector("list", length(length.lambda.vlm))  # list()
+    length.spar.vlm <-
+      sapply(attr(Xvlm.aug, "spar.vlm"), length)  # spar.new
+    sp.opt <- vector("list", length(length.spar.vlm))  # list()
     iioffset <- 0
-    for (ii in seq_along(length.lambda.vlm)) {
-      sp.opt[[ii]] <- SP[iioffset + 1:length.lambda.vlm[ii]]
-      iioffset <- iioffset + length.lambda.vlm[ii]
+    for (ii in seq_along(length.spar.vlm)) {
+      sp.opt[[ii]] <- SP[iioffset + 1:length.spar.vlm[ii]]
+      iioffset <- iioffset + length.spar.vlm[ii]
     }
-    names(sp.opt) <- names(ps.list$which.X.ps)
+    names(sp.opt) <- names(sm.osps.list$which.X.sm.osps)
     if (FALSE && trace) {
       cat("sp.opt \n")
       print( sp.opt )
+      flush.console()
     }
 
-    ps.list$lambdalist <- sp.opt
-    Xvlm.aug <- Pen.psv(constraints = constraints, ps.list = ps.list)
+    sm.osps.list$sparlist <- sp.opt
+    Xvlm.aug <- get.X.VLM.aug(constraints  = constraints,
+                              sm.osps.list = sm.osps.list)
 
 
-    first.ps <- FALSE  # May have been TRUE on entry but is FALSE on exit
+    first.sm.osps <- FALSE
 
 
     X.vlm <- rbind(X.vlm, Xvlm.aug)
@@ -202,10 +218,10 @@ vlm.wfit <-
 
 
   if (mgcvvgam) {
-    ans$first.ps <- first.ps  # Updated.
-    ans$ps.list  <- ps.list   # Updated wrt "lambdalist" component.
-    ans$Xvlm.aug <- Xvlm.aug  # Updated matrix.
-    ans$magicfit <- magicfit  # Updated.
+    ans$first.sm.osps <- first.sm.osps  # Updated.
+    ans$sm.osps.list  <- sm.osps.list   # Updated wrt "sparlist" component
+    ans$Xvlm.aug      <- Xvlm.aug       # Updated matrix.
+    ans$magicfit      <- magicfit       # Updated.
   }
 
 
@@ -232,10 +248,10 @@ vlm.wfit <-
   if (is.null(Hlist)) {
     Hlist <- replace.constraints(vector("list", ncolx), diag(M), 1:ncolx)
   }
-  ncolHlist <- unlist(lapply(Hlist, ncol)) 
+  ncolHlist <- unlist(lapply(Hlist, ncol))
   temp <- c(0, cumsum(ncolHlist))
   for (ii in 1:ncolx) {
-    index <- (temp[ii]+1):temp[ii+1]
+    index <- (temp[ii]+1):(temp[ii+1])
     cm <- Hlist[[ii]]
     B[, ii] <- cm %*% ans$coef[index]
   }
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
index ea06895..4e88e26 100644
--- a/R/vsmooth.spline.q
+++ b/R/vsmooth.spline.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2016 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2017 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -112,16 +112,16 @@ vsmooth.spline <-
            x.constraint = diag(M),
            constraints = list("(Intercepts)" = i.constraint,
                               x = x.constraint),
-           all.knots = FALSE, 
+           all.knots = FALSE,
            var.arg = FALSE,
            scale.w = TRUE,
            nk = NULL,
            control.spar = list()) {
 
- 
-    
+
+
   if (var.arg) {
-    warning("@var will be returned, but no use will be made of it") 
+    warning("@var will be returned, but no use will be made of it")
   }
 
 
@@ -169,7 +169,7 @@ vsmooth.spline <-
   xvector <- x
   n_lm <- length(xvector)
   ymat <- as.matrix(y)
-  ny2 <- dimnames(ymat)[[2]]  # NULL if vector 
+  ny2 <- dimnames(ymat)[[2]]  # NULL if vector
   M <- ncol(ymat)
   if (n_lm != nrow(ymat)) {
     stop("lengths of arguments 'x' and 'y' must match")
@@ -197,7 +197,7 @@ vsmooth.spline <-
   }
   dim2wz <- ncol(wzmat)
 
- 
+
   if (missing.constraints) {
     constraints <- list("(Intercepts)" = eval(i.constraint),
                         "x"            = eval(x.constraint))
@@ -210,9 +210,9 @@ vsmooth.spline <-
   if (!is.list(constraints) || length(constraints) != 2) {
     stop("'constraints' must equal a list (of length 2) or a matrix")
   }
-  for (ii in 1:2) 
+  for (ii in 1:2)
     if (!is.numeric(constraints[[ii]]) ||
-        !is.matrix (constraints[[ii]]) || 
+        !is.matrix (constraints[[ii]]) ||
         nrow(constraints[[ii]]) != M   ||
         ncol(constraints[[ii]]) >  M)
       stop("something wrong with argument 'constraints'")
@@ -231,7 +231,7 @@ vsmooth.spline <-
     collaps <- .C("vsuff9",
       as.integer(n_lm), as.integer(neff), as.integer(ooo),
       as.double(xvector), as.double(ymat), as.double(wzmat),
-                    
+
       xbar = double(neff), ybar = double(neff * M),
           wzbar = double(neff * dim2wz),
       uwzbar = double(1), wzybar = double(neff * M), okint = as.integer(0),
@@ -305,10 +305,10 @@ vsmooth.spline <-
           "y"            = lfit at fitted.values,
           "yin"          = yinyin)
 
-    
+
       return(object)
   }
-    
+
 
   xbar <- (usortx - usortx[1]) / (usortx[neff] - usortx[1])
   noround <- TRUE   # Improvement 20020803
@@ -316,7 +316,7 @@ vsmooth.spline <-
   if (all.knots) {
     knot <- if (noround) {
       valid.vknotl2(c(rep_len(xbar[1], 3), xbar, rep_len(xbar[neff], 3)))
-    } else { 
+    } else {
       c(rep_len(xbar[1], 3), xbar, rep_len(xbar[neff], 3))
     }
     if (length(nknots)) {
@@ -347,7 +347,7 @@ vsmooth.spline <-
     stop("not enough distinct knots found")
   }
 
- 
+
   conmat <- (constraints[[2]])[, nonlin, drop = FALSE]
   ncb <- sum(nonlin)
   trivc <- trivial.constraints(conmat)
@@ -364,7 +364,7 @@ vsmooth.spline <-
   collaps <- .C("vsuff9",
       as.integer(neff), as.integer(neff), as.integer(ooo),
       as.double(collaps$xbar), as.double(resmat), as.double(collaps$wzbar),
-                  
+
       xbar = double(neff), ybar = double(neff * ncb),
           wzbar = double(neff * dim2wzbar),
       uwzbar = double(1), wzybar = double(neff * ncb), okint = as.integer(0),
@@ -382,9 +382,9 @@ vsmooth.spline <-
   dim(collaps$wzbar) <- c(neff, dim2wzbar)
 
 
- 
 
- 
+
+
 
 
   wzyb.c <-
@@ -401,14 +401,14 @@ vsmooth.spline <-
     wzyb.c[ii, ] <- one.Wmat.c %*% zedd.c[ii, ]
   }
 
- 
 
 
 
 
 
 
- 
+
+
 
   ldk <- 3 * ncb + 1  # 20020710; Previously 4 * ncb
   varmat <- if (var.arg) matrix(0, neff, ncb) else double(1)
@@ -420,7 +420,7 @@ vsmooth.spline <-
   vsplin <- .C("Yee_spline",
      xs = as.double(xbar),
      yyy = as.double(collaps$wzybar),  # zz
-                 
+
          as.double(collaps$wzbar), xknot = as.double(knot),
      n = as.integer(neff), nknots = as.integer(nknots), as.integer(ldk),
          M = as.integer(ncb), dim2wz = as.integer(dim2wzbar),
@@ -428,7 +428,7 @@ vsmooth.spline <-
      spar.nl = as.double(spar.nl), lamvec = as.double(spar.nl),
 
          iinfo = integer(1), fv = double(neff * ncb),
-     Bcoef = double(nknots * ncb), varmat = as.double(varmat), 
+     Bcoef = double(nknots * ncb), varmat = as.double(varmat),
 
      levmat = double(neff * ncb), as.double(dofr.nl),
 
@@ -445,7 +445,7 @@ vsmooth.spline <-
 
 
 
- 
+
   if (vsplin$ierror != 0) {
     stop("vsplin$ierror == ", vsplin$ierror,
          ". Something gone wrong in 'vsplin'")
@@ -462,11 +462,11 @@ vsmooth.spline <-
       dim(vsplin$varmat) <- c(neff, ncb)
   }
 
-  dofr.nl <- colSums(vsplin$levmat)  # Actual EDF used 
+  dofr.nl <- colSums(vsplin$levmat)  # Actual EDF used
+
 
 
 
- 
   fv <- lfit at fitted.values + vsplin$fv %*% t(conmat)
   if (M > 1) {
     dimnames(fv) <- list(NULL, ny2)
@@ -484,7 +484,7 @@ vsmooth.spline <-
                    "knots"         = knot,
                    "xmax"          = usortx[neff],
                    "xmin"          = usortx[1])
- 
+
   object <-
   new("vsmooth.spline",
       "call"         = my.call,
@@ -514,11 +514,11 @@ show.vsmooth.spline <- function(x, ...) {
   }
 
   ncb <- if (length(x at nlfit)) ncol(x at nlfit@Bcoefficients) else NULL
-  cat("\nSmoothing Parameter (Spar):", 
+  cat("\nSmoothing Parameter (Spar):",
     if (length(ncb) && ncb == 1) format(x at spar) else
         paste(format(x at spar), collapse = ", "), "\n")
 
-  cat("\nEquivalent Degrees of Freedom (Df):", 
+  cat("\nEquivalent Degrees of Freedom (Df):",
     if (length(ncb) && ncb == 1) format(x at df) else
         paste(format(x at df), collapse = ", "), "\n")
 
@@ -532,7 +532,7 @@ show.vsmooth.spline <- function(x, ...) {
 
 
 coefvsmooth.spline.fit <- function(object, ...) {
-  object at Bcoefficients 
+  object at Bcoefficients
 }
 
 
@@ -665,8 +665,8 @@ predictvsmooth.spline.fit <- function(object, x, deriv = 0) {
       }
     } else if (deriv == 1) {
       end.slopes <- Recall(object, xrange, 1)$y * drangex
-      y[bad.left,]  <- rep(end.slopes[1,], rep(sum(bad.left),  ncb)) 
-      y[bad.right,] <- rep(end.slopes[2,], rep(sum(bad.right), ncb)) 
+      y[bad.left,]  <- rep(end.slopes[1,], rep(sum(bad.left),  ncb))
+      y[bad.right,] <- rep(end.slopes[2,], rep(sum(bad.right), ncb))
     } else
       y[!good,] <- 0
   }
diff --git a/build/vignette.rds b/build/vignette.rds
index 88bcc61..87c1f70 100644
Binary files a/build/vignette.rds and b/build/vignette.rds differ
diff --git a/data/Huggins89.t1.rda b/data/Huggins89.t1.rda
index d0d3f98..8c362b0 100644
Binary files a/data/Huggins89.t1.rda and b/data/Huggins89.t1.rda differ
diff --git a/data/Huggins89table1.rda b/data/Huggins89table1.rda
index d56d6dd..de82a66 100644
Binary files a/data/Huggins89table1.rda and b/data/Huggins89table1.rda differ
diff --git a/data/alclevels.rda b/data/alclevels.rda
index 6018317..a96ea7d 100644
Binary files a/data/alclevels.rda and b/data/alclevels.rda differ
diff --git a/data/alcoff.rda b/data/alcoff.rda
index b78f322..7b31c6e 100644
Binary files a/data/alcoff.rda and b/data/alcoff.rda differ
diff --git a/data/auuc.rda b/data/auuc.rda
index ef07801..8b8a294 100644
Binary files a/data/auuc.rda and b/data/auuc.rda differ
diff --git a/data/backPain.rda b/data/backPain.rda
index d193471..9f0f13d 100644
Binary files a/data/backPain.rda and b/data/backPain.rda differ
diff --git a/data/beggs.rda b/data/beggs.rda
index c5c483a..f9da1a3 100644
Binary files a/data/beggs.rda and b/data/beggs.rda differ
diff --git a/data/car.all.rda b/data/car.all.rda
index 0e7cbd8..289786f 100644
Binary files a/data/car.all.rda and b/data/car.all.rda differ
diff --git a/data/cfibrosis.rda b/data/cfibrosis.rda
index 3a2968c..ff69e08 100644
Binary files a/data/cfibrosis.rda and b/data/cfibrosis.rda differ
diff --git a/data/corbet.rda b/data/corbet.rda
index 393c32a..1e1ff98 100644
Binary files a/data/corbet.rda and b/data/corbet.rda differ
diff --git a/data/crashbc.rda b/data/crashbc.rda
index 9ea59d9..c42042c 100644
Binary files a/data/crashbc.rda and b/data/crashbc.rda differ
diff --git a/data/crashf.rda b/data/crashf.rda
index 17cbc5a..77b2b20 100644
Binary files a/data/crashf.rda and b/data/crashf.rda differ
diff --git a/data/crashi.rda b/data/crashi.rda
index 7cfcbfa..40b2ba2 100644
Binary files a/data/crashi.rda and b/data/crashi.rda differ
diff --git a/data/crashmc.rda b/data/crashmc.rda
index 750fd44..f42aab6 100644
Binary files a/data/crashmc.rda and b/data/crashmc.rda differ
diff --git a/data/crashp.rda b/data/crashp.rda
index ebb43dc..f7d8cbd 100644
Binary files a/data/crashp.rda and b/data/crashp.rda differ
diff --git a/data/crashtr.rda b/data/crashtr.rda
index 028fb69..4e9f6b9 100644
Binary files a/data/crashtr.rda and b/data/crashtr.rda differ
diff --git a/data/deermice.rda b/data/deermice.rda
index 541ff0d..fb4106d 100644
Binary files a/data/deermice.rda and b/data/deermice.rda differ
diff --git a/data/ducklings.rda b/data/ducklings.rda
index e859522..949c8de 100644
Binary files a/data/ducklings.rda and b/data/ducklings.rda differ
diff --git a/data/finney44.rda b/data/finney44.rda
index b335fab..cd0127b 100644
Binary files a/data/finney44.rda and b/data/finney44.rda differ
diff --git a/data/flourbeetle.rda b/data/flourbeetle.rda
index 0b78e46..a8b8442 100644
Binary files a/data/flourbeetle.rda and b/data/flourbeetle.rda differ
diff --git a/data/hspider.rda b/data/hspider.rda
index 16ad1ef..16a6450 100644
Binary files a/data/hspider.rda and b/data/hspider.rda differ
diff --git a/data/lakeO.rda b/data/lakeO.rda
index 67ce7ab..fd71eb8 100644
Binary files a/data/lakeO.rda and b/data/lakeO.rda differ
diff --git a/data/leukemia.rda b/data/leukemia.rda
index 96e579a..10875f3 100644
Binary files a/data/leukemia.rda and b/data/leukemia.rda differ
diff --git a/data/marital.nz.rda b/data/marital.nz.rda
index fd555a3..f6ba57b 100644
Binary files a/data/marital.nz.rda and b/data/marital.nz.rda differ
diff --git a/data/melbmaxtemp.rda b/data/melbmaxtemp.rda
index dc45051..c273666 100644
Binary files a/data/melbmaxtemp.rda and b/data/melbmaxtemp.rda differ
diff --git a/data/pneumo.rda b/data/pneumo.rda
index cfba086..280e990 100644
Binary files a/data/pneumo.rda and b/data/pneumo.rda differ
diff --git a/data/prinia.rda b/data/prinia.rda
index 993474a..05bcbaf 100644
Binary files a/data/prinia.rda and b/data/prinia.rda differ
diff --git a/data/ruge.rda b/data/ruge.rda
index 2c0a632..d49f559 100644
Binary files a/data/ruge.rda and b/data/ruge.rda differ
diff --git a/data/toxop.rda b/data/toxop.rda
index 8720537..b648b5e 100644
Binary files a/data/toxop.rda and b/data/toxop.rda differ
diff --git a/data/venice.rda b/data/venice.rda
index 2a970cf..aad21cb 100644
Binary files a/data/venice.rda and b/data/venice.rda differ
diff --git a/data/venice90.rda b/data/venice90.rda
index aa8fbc2..6dc23ac 100644
Binary files a/data/venice90.rda and b/data/venice90.rda differ
diff --git a/data/wine.rda b/data/wine.rda
index 89d3408..0a6c8b1 100644
Binary files a/data/wine.rda and b/data/wine.rda differ
diff --git a/inst/CITATION b/inst/CITATION
index 207d42d..aed02ff 100644
--- a/inst/CITATION
+++ b/inst/CITATION
@@ -82,14 +82,14 @@ citEntry(entry = "Manual",
          author = personList(as.person("Thomas W. Yee")),
          year = year,
          note = note,
-         url = "http://CRAN.R-project.org/package=VGAM",
+         url = "https://CRAN.R-project.org/package=VGAM",
 
          textVersion =
          paste("Thomas W. Yee",
    	       sprintf("(%s).", year),	 
                "VGAM: Vector Generalized Linear and Additive Models.",
 	       paste(note, ".", sep = ""),
-               "URL http://CRAN.R-project.org/package=VGAM"),
+               "URL https://CRAN.R-project.org/package=VGAM"),
 	 header = "and/or"
 )
 
diff --git a/inst/doc/categoricalVGAM.R b/inst/doc/categoricalVGAM.R
index badcc3c..8d99af4 100644
--- a/inst/doc/categoricalVGAM.R
+++ b/inst/doc/categoricalVGAM.R
@@ -20,7 +20,7 @@ fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
 
 
 ###################################################
-### code chunk number 3: categoricalVGAM.Rnw:903-907
+### code chunk number 3: categoricalVGAM.Rnw:906-910
 ###################################################
 journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
 squaremat <- matrix(c(NA, 33, 320, 284,   730, NA, 813, 276,
@@ -29,37 +29,37 @@ dimnames(squaremat) <- list(winner = journal, loser = journal)
 
 
 ###################################################
-### code chunk number 4: categoricalVGAM.Rnw:1007-1011
+### code chunk number 4: categoricalVGAM.Rnw:1013-1017 (eval = FALSE)
 ###################################################
-abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
-fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, data = abodat)
-coef(fit, matrix = TRUE)
-Coef(fit)  # Estimated pA and pB
+## abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
+## fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, data = abodat)
+## coef(fit, matrix = TRUE)
+## Coef(fit)  # Estimated pA and pB
 
 
 ###################################################
-### code chunk number 5: categoricalVGAM.Rnw:1289-1291
+### code chunk number 5: categoricalVGAM.Rnw:1297-1299
 ###################################################
 head(marital.nz, 4)
 summary(marital.nz)
 
 
 ###################################################
-### code chunk number 6: categoricalVGAM.Rnw:1294-1296
+### code chunk number 6: categoricalVGAM.Rnw:1302-1304
 ###################################################
 fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
                data = marital.nz)
 
 
 ###################################################
-### code chunk number 7: categoricalVGAM.Rnw:1300-1302
+### code chunk number 7: categoricalVGAM.Rnw:1308-1310
 ###################################################
 head(depvar(fit.ms), 4)
 colSums(depvar(fit.ms))
 
 
 ###################################################
-### code chunk number 8: categoricalVGAM.Rnw:1311-1323
+### code chunk number 8: categoricalVGAM.Rnw:1319-1331
 ###################################################
 # Plot output
 mycol <- c("red", "darkgreen", "blue")
@@ -76,7 +76,7 @@ plot(fit.ms, se = TRUE, scale = 12,
 
 
 ###################################################
-### code chunk number 9: categoricalVGAM.Rnw:1366-1379
+### code chunk number 9: categoricalVGAM.Rnw:1374-1387
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 # Plot output
@@ -95,13 +95,13 @@ plot(fit.ms, se = TRUE, scale = 12,
 
 
 ###################################################
-### code chunk number 10: categoricalVGAM.Rnw:1399-1400
+### code chunk number 10: categoricalVGAM.Rnw:1407-1408
 ###################################################
 plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
 
 
 ###################################################
-### code chunk number 11: categoricalVGAM.Rnw:1409-1413
+### code chunk number 11: categoricalVGAM.Rnw:1417-1421
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 # Plot output
@@ -111,7 +111,7 @@ plot(fit.ms, deriv = 1, lcol = mycol, scale = 0.3)
 
 
 ###################################################
-### code chunk number 12: categoricalVGAM.Rnw:1436-1448
+### code chunk number 12: categoricalVGAM.Rnw:1444-1456
 ###################################################
 foo <- function(x, elbow = 50)
   poly(pmin(x, elbow), 2)
@@ -128,13 +128,13 @@ fit2.ms <-
 
 
 ###################################################
-### code chunk number 13: categoricalVGAM.Rnw:1451-1452
+### code chunk number 13: categoricalVGAM.Rnw:1459-1460
 ###################################################
 coef(fit2.ms, matrix = TRUE)
 
 
 ###################################################
-### code chunk number 14: categoricalVGAM.Rnw:1456-1463
+### code chunk number 14: categoricalVGAM.Rnw:1464-1471
 ###################################################
 par(mfrow = c(2, 2))
 plotvgam(fit2.ms, se = TRUE, scale = 12,
@@ -146,7 +146,7 @@ plotvgam(fit2.ms, se = TRUE, scale = 12,
 
 
 ###################################################
-### code chunk number 15: categoricalVGAM.Rnw:1474-1483
+### code chunk number 15: categoricalVGAM.Rnw:1482-1491
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 # Plot output
@@ -161,25 +161,25 @@ plotvgam(fit2.ms, se = TRUE, scale = 12,
 
 
 ###################################################
-### code chunk number 16: categoricalVGAM.Rnw:1501-1502
+### code chunk number 16: categoricalVGAM.Rnw:1509-1510
 ###################################################
 deviance(fit.ms) - deviance(fit2.ms)
 
 
 ###################################################
-### code chunk number 17: categoricalVGAM.Rnw:1508-1509
+### code chunk number 17: categoricalVGAM.Rnw:1516-1517
 ###################################################
 (dfdiff <- df.residual(fit2.ms) - df.residual(fit.ms))
 
 
 ###################################################
-### code chunk number 18: categoricalVGAM.Rnw:1512-1513
+### code chunk number 18: categoricalVGAM.Rnw:1520-1521
 ###################################################
 pchisq(deviance(fit.ms) - deviance(fit2.ms), df = dfdiff, lower.tail = FALSE)
 
 
 ###################################################
-### code chunk number 19: categoricalVGAM.Rnw:1526-1537
+### code chunk number 19: categoricalVGAM.Rnw:1534-1545
 ###################################################
 ooo <- with(marital.nz, order(age))
 with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo, ],
@@ -195,7 +195,7 @@ abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed
 
 
 ###################################################
-### code chunk number 20: categoricalVGAM.Rnw:1552-1565
+### code chunk number 20: categoricalVGAM.Rnw:1560-1573
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
  par(mfrow = c(1,1))
@@ -214,7 +214,7 @@ abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed
 
 
 ###################################################
-### code chunk number 21: categoricalVGAM.Rnw:1599-1603
+### code chunk number 21: categoricalVGAM.Rnw:1607-1611
 ###################################################
 # Scale the variables? Yes; the Anderson (1984) paper did (see his Table 6).
 head(backPain, 4)
@@ -223,32 +223,34 @@ backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale
 
 
 ###################################################
-### code chunk number 22: categoricalVGAM.Rnw:1607-1608
+### code chunk number 22: categoricalVGAM.Rnw:1615-1617
 ###################################################
-bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain)
+bp.rrmlm1 <- rrvglm(factor(pain, ordered = FALSE) ~ sx1 + sx2 + sx3,
+                    multinomial, data = backPain)
 
 
 ###################################################
-### code chunk number 23: categoricalVGAM.Rnw:1611-1612
+### code chunk number 23: categoricalVGAM.Rnw:1620-1621
 ###################################################
 Coef(bp.rrmlm1)
 
 
 ###################################################
-### code chunk number 24: categoricalVGAM.Rnw:1640-1641
+### code chunk number 24: categoricalVGAM.Rnw:1649-1650
 ###################################################
 set.seed(123)
 
 
 ###################################################
-### code chunk number 25: categoricalVGAM.Rnw:1644-1646
+### code chunk number 25: categoricalVGAM.Rnw:1653-1656
 ###################################################
-bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain, Rank = 2,
+bp.rrmlm2 <- rrvglm(factor(pain, ordered = FALSE) ~ sx1 + sx2 + sx3,
+                   multinomial, data = backPain, Rank = 2,
                    Corner = FALSE, Uncor = TRUE)
 
 
 ###################################################
-### code chunk number 26: categoricalVGAM.Rnw:1654-1658
+### code chunk number 26: categoricalVGAM.Rnw:1664-1668
 ###################################################
 biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
 #      xlim = c(-1, 6), ylim = c(-1.2, 4),  # Use this if not scaled
@@ -257,7 +259,7 @@ biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
 
 
 ###################################################
-### code chunk number 27: categoricalVGAM.Rnw:1690-1698
+### code chunk number 27: categoricalVGAM.Rnw:1700-1708
 ###################################################
 getOption("SweaveHooks")[["fig"]]()
 # Plot output
@@ -271,7 +273,7 @@ biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
 
 
 ###################################################
-### code chunk number 28: categoricalVGAM.Rnw:1812-1813
+### code chunk number 28: categoricalVGAM.Rnw:1822-1823
 ###################################################
 iam(NA, NA, M = 4, both = TRUE, diag = TRUE)
 
diff --git a/inst/doc/categoricalVGAM.Rnw b/inst/doc/categoricalVGAM.Rnw
index 8394144..79c156c 100644
--- a/inst/doc/categoricalVGAM.Rnw
+++ b/inst/doc/categoricalVGAM.Rnw
@@ -124,7 +124,7 @@ model and \texttt{multinom()}
 \citep[in \pkg{nnet};][]{Venables+Ripley:2002} for the multinomial
 logit model. However, both of these can be considered `one-off'
 modeling functions rather than providing a unified offering for CDA.
-The function \texttt{lrm()} \citep[in \pkg{rms};][]{Harrell:2009}
+The function \texttt{lrm()} \citep[in \pkg{rms};][]{Harrell:2016}
 has greater functionality: it can fit the proportional odds model
 (and the forward continuation ratio model upon preprocessing). Neither
 \texttt{polr()} or \texttt{lrm()} appear able to fit the nonproportional
@@ -248,6 +248,9 @@ background to this article include
 \cite{agre:2010},
 \cite{agre:2013},
 \cite{fahr:tutz:2001},
+\cite{full:xu:2016},
+\cite{harr:2015},
+\cite{hens:rose:gree:2015},
 \cite{leon:2000},
 \cite{lloy:1999},
 \cite{long:1997},
@@ -1003,13 +1006,18 @@ Any $g$ from Table \ref{tab:jsscat.links} appropriate for
 a parameter $\theta \in (0,1)$ will do.
 
 
+
 A toy example where $p=p_A$ and $q=p_B$ is
-<<>>=
+
+
+<<eval=F>>=
 abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
 fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, data = abodat)
 coef(fit, matrix = TRUE)
 Coef(fit)  # Estimated pA and pB
 @
+
+
 The function \texttt{Coef()}, which applies only to intercept-only models,
 applies to $g_{j}(\theta_{j})=\eta_{j}$
 the inverse link function $g_{j}^{-1}$ to $\widehat{\eta}_{j}$
@@ -1605,7 +1613,8 @@ backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale
 displays the six ordered categories.
 Now a rank-1 stereotype model can be fitted with
 <<>>=
-bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain)
+bp.rrmlm1 <- rrvglm(factor(pain, ordered = FALSE) ~ sx1 + sx2 + sx3,
+                    multinomial, data = backPain)
 @
 Then
 <<>>=
@@ -1642,7 +1651,8 @@ set.seed(123)
 @
 A rank-2 model fitted \textit{with a different normalization}
 <<>>=
-bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain, Rank = 2,
+bp.rrmlm2 <- rrvglm(factor(pain, ordered = FALSE) ~ sx1 + sx2 + sx3,
+                   multinomial, data = backPain, Rank = 2,
                    Corner = FALSE, Uncor = TRUE)
 @
 produces uncorrelated $\widehat{\bnu}_i = \widehat{\bC}^{\top} \bix_{2i}$.
diff --git a/inst/doc/categoricalVGAM.pdf b/inst/doc/categoricalVGAM.pdf
index 3da21d4..7e572a4 100644
Binary files a/inst/doc/categoricalVGAM.pdf and b/inst/doc/categoricalVGAM.pdf differ
diff --git a/inst/doc/crVGAM.pdf b/inst/doc/crVGAM.pdf
index 0f9b60d..b1e41f2 100644
Binary files a/inst/doc/crVGAM.pdf and b/inst/doc/crVGAM.pdf differ
diff --git a/man/A1A2A3.Rd b/man/A1A2A3.Rd
index 4932e5b..0a7a366 100644
--- a/man/A1A2A3.Rd
+++ b/man/A1A2A3.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ The A1A2A3 Blood Group System }
 \description{
-  Estimates the three independent parameters of the 
+  Estimates the three independent parameters of the
   the A1A2A3 blood group system.
 
 }
@@ -57,16 +57,16 @@ Lange, K. (2002)
 
 }
 \author{ T. W. Yee }
-\note{ 
+\note{
   The input can be a 6-column matrix of counts,
-  with columns corresponding to   
+  with columns corresponding to
   \code{A1A1},
   \code{A1A2},
   \code{A1A3},
   \code{A2A2},
   \code{A2A3},
-  \code{A3A3} (in order). 
-  Alternatively, the input can be a 6-column matrix of 
+  \code{A3A3} (in order).
+  Alternatively, the input can be a 6-column matrix of
   proportions (so each row adds to 1) and the \code{weights}
   argument is used to specify the total number of counts for each row.
 
@@ -90,7 +90,7 @@ ymat <- cbind(108, 196, 429, 143, 513, 559)
 fit <- vglm(ymat ~ 1, A1A2A3(link = probit), trace = TRUE, crit = "coef")
 fit <- vglm(ymat ~ 1, A1A2A3(link = logit, ip1 = 0.3, ip2 = 0.3, iF = 0.02),
             trace = TRUE, crit = "coef")
-Coef(fit)  # Estimated p1 and p2 
+Coef(fit)  # Estimated p1 and p2
 rbind(ymat, sum(ymat) * fitted(fit))
 sqrt(diag(vcov(fit)))
 }
diff --git a/man/AA.Aa.aa.Rd b/man/AA.Aa.aa.Rd
index a7b1e0b..2379370 100644
--- a/man/AA.Aa.aa.Rd
+++ b/man/AA.Aa.aa.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ The AA-Aa-aa Blood Group System }
 \description{
-   Estimates the parameter of the 
+   Estimates the parameter of the
    AA-Aa-aa blood group system,
    with or without Hardy Weinberg equilibrium.
 
@@ -64,7 +64,7 @@ AA.Aa.aa(linkp = "logit", linkf = "logit", inbreeding = FALSE,
 
 
 }
-\references{ 
+\references{
 
 
 Weir, B. S. (1996)
@@ -75,11 +75,11 @@ Sunderland, MA: Sinauer Associates, Inc.
 
 }
 \author{ T. W. Yee }
-\note{ 
-  The input can be a 3-column matrix of counts, where the columns 
+\note{
+  The input can be a 3-column matrix of counts, where the columns
   are AA, Ab and aa
   (in order).
-  Alternatively, the input can be a 3-column matrix of 
+  Alternatively, the input can be a 3-column matrix of
   proportions (so each row adds to 1) and the \code{weights}
   argument is used to specify the total number of counts for each row.
 
diff --git a/man/AB.Ab.aB.ab.Rd b/man/AB.Ab.aB.ab.Rd
index 0903854..a37587d 100644
--- a/man/AB.Ab.aB.ab.Rd
+++ b/man/AB.Ab.aB.ab.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ The AB-Ab-aB-ab Blood Group System }
 \description{
-   Estimates the parameter of the 
+   Estimates the parameter of the
    AB-Ab-aB-ab blood group system.
 
 }
@@ -41,11 +41,11 @@ Lange, K. (2002)
 
 }
 \author{ T. W. Yee }
-\note{ 
-  The input can be a 4-column matrix of counts, where the columns 
+\note{
+  The input can be a 4-column matrix of counts, where the columns
   are AB, Ab, aB and ab
   (in order).
-  Alternatively, the input can be a 4-column matrix of 
+  Alternatively, the input can be a 4-column matrix of
   proportions (so each row adds to 1) and the \code{weights}
   argument is used to specify the total number of counts for each row.
 
diff --git a/man/ABO.Rd b/man/ABO.Rd
index 2db7930..e489b94 100644
--- a/man/ABO.Rd
+++ b/man/ABO.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ The ABO Blood Group System }
 \description{
-  Estimates the two independent parameters of the 
+  Estimates the two independent parameters of the
   the ABO blood group system.
 
 }
@@ -28,7 +28,7 @@ ABO(link.pA = "logit", link.pB = "logit", ipA = NULL, ipB = NULL,
   \item{zero}{
   Details at \code{\link{CommonVGAMffArguments}}.
 
-  
+
   }
 
 }
@@ -61,10 +61,10 @@ ABO(link.pA = "logit", link.pB = "logit", ipA = NULL, ipB = NULL,
 
 }
 \author{ T. W. Yee }
-\note{ 
-  The input can be a 4-column matrix of counts, where the columns 
+\note{
+  The input can be a 4-column matrix of counts, where the columns
   are A, B, AB, O (in order).
-  Alternatively, the input can be a 4-column matrix of 
+  Alternatively, the input can be a 4-column matrix of
   proportions (so each row adds to 1) and the \code{weights}
   argument is used to specify the total number of counts for each row.
 
diff --git a/man/AICvlm.Rd b/man/AICvlm.Rd
index 1f04b05..d54d3ed 100644
--- a/man/AICvlm.Rd
+++ b/man/AICvlm.Rd
@@ -124,7 +124,7 @@ Regression and time series model selection in small samples,
 \section{Warning }{
   This code has not been double-checked.
   The general applicability of \code{AIC} for the VGLM/VGAM classes
-  has not been developed fully. 
+  has not been developed fully.
   In particular, \code{AIC} should not be run on some \pkg{VGAM} family
   functions because of violation of certain regularity conditions, etc.
 
diff --git a/man/AR1.Rd b/man/AR1.Rd
index b680a26..1f14273 100644
--- a/man/AR1.Rd
+++ b/man/AR1.Rd
@@ -12,7 +12,7 @@
 AR1(ldrift = "identitylink", lsd  = "loge", lvar = "loge", lrho = "rhobit",
     idrift  = NULL, isd  = NULL, ivar = NULL, irho = NULL, imethod = 1,
     ishrinkage = 0.95, type.likelihood = c("exact", "conditional"),
-    type.EIM  = c("exact", "approximate"), var.arg = FALSE, nodrift = FALSE, 
+    type.EIM  = c("exact", "approximate"), var.arg = FALSE, nodrift = FALSE,
     print.EIM = FALSE, zero = c(if (var.arg) "var" else "sd", "rho"))
 
 }
@@ -30,7 +30,7 @@ AR1(ldrift = "identitylink", lsd  = "loge", lvar = "loge", lrho = "rhobit",
     it is a scaled mean.
     See \code{\link{Links}} for more choices.
 
-  
+
   }
   \item{idrift, isd, ivar, irho}{
   Optional initial values for the parameters.
@@ -67,26 +67,26 @@ AR1(ldrift = "identitylink", lsd  = "loge", lvar = "loge", lrho = "rhobit",
 
   }
   \item{type.EIM}{
-   What type of expected information matrix (EIM) is used in 
+   What type of expected information matrix (EIM) is used in
    Fisher scoring. By default, this family function calls
    \code{\link[VGAM:AR1EIM]{AR1EIM}}, which recursively
    computes the exact EIM for the AR process with Gaussian
-   white noise. See Porat and Friedlander (1986) for further 
+   white noise. See Porat and Friedlander (1986) for further
    details on the exact EIM.
-   
+
    If \code{type.EIM = "approximate"} then
    approximate expression for the EIM of Autoregressive processes
    is used; this approach holds when the number of observations
    is large enough. Succinct details about the approximate EIM
    are delineated at Porat and Friedlander (1987).
 
-  
+
   }
   \item{print.EIM}{
-  Logical. If \code{TRUE}, then the first few EIMs are printed. 
+  Logical. If \code{TRUE}, then the first few EIMs are printed.
   Here, the result shown is the sum of each EIM.
-  
-  
+
+
   }
   \item{type.likelihood}{
     What type of likelihood function is maximized.
@@ -102,8 +102,8 @@ AR1(ldrift = "identitylink", lsd  = "loge", lvar = "loge", lrho = "rhobit",
   }
 %  \item{epsilon, maxit, stepsize,...}{
 %  Same as \code{\link[VGAM:vglm.control]{vglm.control}}.
-%  
-%  
+%
+%
 %  }
 
 
@@ -127,42 +127,42 @@ AR1(ldrift = "identitylink", lsd  = "loge", lvar = "loge", lrho = "rhobit",
   these are returned as the fitted values.
   (3). The correlation of all the \eqn{Y_i}{Y(i)} with \eqn{Y_{i-1}}{Y(i-1)}
   is \eqn{\rho}{rho}.
-  (4). The default link function ensures that 
+  (4). The default link function ensures that
                          \eqn{-1 < \rho < 1}{-1 < rho < 1}.
 
 
 
 % (1). ...  whether \eqn{\mu^*}{mu^*} is intercept-only.
 
-  
+
 }
 \section{Warning}{
   Monitoring convergence is urged, i.e., set \code{trace = TRUE}.
-  
+
   Moreover, if the exact EIMs are used, set \code{print.EIM = TRUE}
   to compare the computed exact to the approximate EIM.
-  
-  Under the VGLM/VGAM approach, parameters can be modelled in terms 
-  of covariates. Particularly, if the standard deviation of 
-  the white noise is modelled in this way, then 
-  \code{type.EIM = "exact"} may certainly lead to unstable 
+
+  Under the VGLM/VGAM approach, parameters can be modelled in terms
+  of covariates. Particularly, if the standard deviation of
+  the white noise is modelled in this way, then
+  \code{type.EIM = "exact"} may certainly lead to unstable
   results. The reason is that white noise is a stationary
-  process, and consequently, its variance must remain as a constant. 
+  process, and consequently, its variance must remain as a constant.
   Consequently, the use of variates to model
   this parameter contradicts the assumption of
   stationary random components to compute the exact EIMs proposed
   by Porat and Friedlander (1987).
-  
-  To prevent convergence issues in such cases, this family function 
-  internally verifies whether the variance of the white noise remains 
+
+  To prevent convergence issues in such cases, this family function
+  internally verifies whether the variance of the white noise remains
   as a constant at each Fisher scoring iteration.
-  If this assumption is violated and \code{type.EIM = "exact"} is set, 
+  If this assumption is violated and \code{type.EIM = "exact"} is set,
   then \code{AR1} automatically shifts to
   \code{type.EIM = "approximate"}.
   Also, a warning is accordingly displayed.
 
-  
-  %Thus, if modelling the standard deviation of the white noise 
+
+  %Thus, if modelling the standard deviation of the white noise
   %is required, the use of \code{type.EIM = "approximate"} is
   %highly recommended.
 
@@ -202,17 +202,17 @@ AR1(ldrift = "identitylink", lsd  = "loge", lvar = "loge", lrho = "rhobit",
 
   Multiple responses are handled.
   The mean is returned as the fitted values.
-  
+
 
 % Argument \code{zero} can be either a numeric or a character
-% vector. It must specify the position(s) or name(s) of the 
+% vector. It must specify the position(s) or name(s) of the
 % parameters to be modeled as intercept-only. If names are used,
 % notice that parameter names in this family function are
 
-  
+
 % \deqn{c("drift", "var" or "sd", "rho").}
 
-  
+
   %Practical experience has shown that half-stepping is a very
   %good idea. The default options use step sizes that are
   %about one third the usual step size. Consequently,
@@ -221,7 +221,7 @@ AR1(ldrift = "identitylink", lsd  = "loge", lvar = "loge", lrho = "rhobit",
 
 
 }
-  
+
 \seealso{
   \code{\link{AR1EIM}},
   \code{\link{vglm.control}},
@@ -243,7 +243,7 @@ tsdata  <- transform(tsdata,
                               sd = exp(1.5)),
               TS2 = arima.sim(nn, model = list(ar = ar.coef.2),
                               sd = exp(1.0 + 1.5 * x2)))
-                              
+
 ### An autoregressive intercept--only model.   ###
 ### Using the exact EIM, and "nodrift = TRUE"  ###
 fit1a <- vglm(TS1 ~ 1, data = tsdata, trace = TRUE,
@@ -254,10 +254,11 @@ fit1a <- vglm(TS1 ~ 1, data = tsdata, trace = TRUE,
 Coef(fit1a)
 summary(fit1a)
 
+\dontrun{
 ### Two responses. Here, the white noise standard deviation of TS2   ###
 ### is modelled in terms of 'x2'. Also, 'type.EIM = exact'.  ###
-fit1b <- vglm(cbind(TS1, TS2) ~ x2, 
-              AR1(zero = NULL, nodrift = TRUE, 
+fit1b <- vglm(cbind(TS1, TS2) ~ x2,
+              AR1(zero = NULL, nodrift = TRUE,
                   var.arg = FALSE,
                   type.EIM = "exact"),
               constraints = list("(Intercept)" = diag(4),
@@ -266,7 +267,6 @@ fit1b <- vglm(cbind(TS1, TS2) ~ x2,
 coef(fit1b, matrix = TRUE)
 summary(fit1b)
 
-\dontrun{
 ### Example 2: another stationary time series
 nn     <- 500
 my.rho <- rhobit(1.0, inverse = TRUE)
@@ -276,7 +276,7 @@ tsdata  <- data.frame(index = 1:nn, TS3 = runif(nn))
 
 set.seed(2)
 for (ii in 2:nn)
-  tsdata$TS3[ii] <- my.mu/(1 - my.rho) + 
+  tsdata$TS3[ii] <- my.mu/(1 - my.rho) +
                     my.rho * tsdata$TS3[ii-1] + rnorm(1, sd = my.sd)
 tsdata <- tsdata[-(1:ceiling(nn/5)), ]  # Remove the burn-in data:
 
diff --git a/man/AR1EIM.Rd b/man/AR1EIM.Rd
index 3ea23c3..e4f0b43 100644
--- a/man/AR1EIM.Rd
+++ b/man/AR1EIM.Rd
@@ -2,7 +2,7 @@
 \alias{AR1EIM}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{Computation of the Exact EIM of an Order-1 Autoregressive Process
-      
+
 
 }
 \description{Computation of the exact Expected Information Matrix of
@@ -13,23 +13,23 @@
 }
 
 \usage{
-AR1EIM(x = NULL, var.arg = NULL, p.drift = NULL, 
+AR1EIM(x = NULL, var.arg = NULL, p.drift = NULL,
        WNsd = NULL, ARcoeff1 = NULL, eps.porat = 1e-2)
 }
 
 \arguments{
   \item{x}{
-  A vector of quantiles. The gaussian time series for which the EIMs 
-  are computed. 
+  A vector of quantiles. The gaussian time series for which the EIMs
+  are computed.
 
 
-  If multiple time series are being analyzed, then \code{x} must be 
-  a matrix where each column allocates a response. 
+  If multiple time series are being analyzed, then \code{x} must be
+  a matrix where each column allocates a response.
   That is, the number of columns (denoted as \eqn{NOS}) must match
   the number of responses.
 
 
-  
+
   }
   \item{var.arg}{
   Logical. Same as with \code{\link[VGAM:AR1]{AR1}}.
@@ -47,11 +47,11 @@ AR1EIM(x = NULL, var.arg = NULL, p.drift = NULL,
   }
   \item{WNsd, ARcoeff1}{
   Matrices.
-  The standard deviation of the white noise, and the 
-  correlation (coefficient) of the AR(\eqn{1}) model, 
-  for \bold{each} observation. 
+  The standard deviation of the white noise, and the
+  correlation (coefficient) of the AR(\eqn{1}) model,
+  for \bold{each} observation.
+
 
-  
   That is, the dimension for each matrix is \eqn{N \times NOS}{N x NOS},
   where \eqn{N} is the number of observations and \eqn{NOS} is the
   number of responses. Else, these arguments are recycled.
@@ -60,7 +60,7 @@ AR1EIM(x = NULL, var.arg = NULL, p.drift = NULL,
 
   }
   \item{eps.porat}{
-  A very small positive number to test whether the standar deviation 
+  A very small positive number to test whether the standar deviation
   (\code{WNsd}) is close enough to its value estimated in this function.
 
   See below for further details.
@@ -77,163 +77,163 @@ AR1EIM(x = NULL, var.arg = NULL, p.drift = NULL,
   random components.
 
 
-  By default, when the VGLM/VGAM family function 
+  By default, when the VGLM/VGAM family function
   \code{\link[VGAM:AR1]{AR1}} is used to fit an AR(\eqn{1}) model
   via \code{\link[VGAM:vglm]{vglm}}, Fisher scoring is executed using
-  the \bold{approximate} EIM for the AR process. However, this model 
-  can also be fitted using the \bold{exact} EIMs computed by 
+  the \bold{approximate} EIM for the AR process. However, this model
+  can also be fitted using the \bold{exact} EIMs computed by
   \code{AR1EIM}.
 
 
   Given \eqn{N} consecutive data points,
-  \eqn{ {y_{0}, y_{1}, \ldots, y_{N - 1} } }{ {y[0], y[1], \ldots, 
-  y[N - 1]} } with probability density \eqn{f(\boldsymbol{y})}{f(y)}, 
+  \eqn{ {y_{0}, y_{1}, \ldots, y_{N - 1} } }{ {y[0], y[1], \ldots,
+  y[N - 1]} } with probability density \eqn{f(\boldsymbol{y})}{f(y)},
   the Porat and Friedlander algorithm
   calculates the EIMs
   \eqn{ [J_{n-1}(\boldsymbol{\theta})] }{J(n-1)[\theta]},
-  for all \eqn{1 \leq n \leq N}{1 \le n \le N}. This is done based on the 
-  Levinson-Durbin algorithm for computing the orthogonal polynomials of 
+  for all \eqn{1 \leq n \leq N}{1 \le n \le N}. This is done based on the
+  Levinson-Durbin algorithm for computing the orthogonal polynomials of
   a Toeplitz matrix.
-  In particular, for the AR(\eqn{1}) model, the vector of parameters 
+  In particular, for the AR(\eqn{1}) model, the vector of parameters
   to be estimated under the VGAM/VGLM approach is
-  
+
   \deqn{   \boldsymbol{\eta} = (\mu^{*}, loge(\sigma^2), rhobit(\rho)),}{
            \eta = ( mu^*, loge(sigma^2), rhobit(rho)),
-  } 
+  }
   where \eqn{\sigma^2}{sigma^2} is the variance of the white noise
   and \eqn{mu^{*}}{mu^*} is the drift parameter
   (See \code{\link[VGAM:AR1]{AR1}} for further details on this).
-  
-  Consequently, for each observation \eqn{n = 1, \ldots, N}, the EIM, 
+
+  Consequently, for each observation \eqn{n = 1, \ldots, N}, the EIM,
   \eqn{J_{n}(\boldsymbol{\theta})}{Jn[\theta]}, has dimension
   \eqn{3 \times 3}{3 x 3}, where the diagonal elements are:
-  %Notice, however, that the Porat and Friedlander algorithm considers 
-  %\eqn{ { y_t } }{ {y[t]}} as a zero-mean process. 
-  %Then, for each \eqn{n = 1, \ldots, N}, 
-  %\eqn{ [J_{n}(\boldsymbol{\theta})] }{Jn[\theta]} is a 
+  %Notice, however, that the Porat and Friedlander algorithm considers
+  %\eqn{ { y_t } }{ {y[t]}} as a zero-mean process.
+  %Then, for each \eqn{n = 1, \ldots, N},
+  %\eqn{ [J_{n}(\boldsymbol{\theta})] }{Jn[\theta]} is a
   %\eqn{2 \times 2}{2 x 2} matrix, with elements
-  \deqn{ J_{[n, 1, 1]} = 
+  \deqn{ J_{[n, 1, 1]} =
      E[ -\partial^2 \log f(\boldsymbol{y}) / \partial ( \mu^{*} )^2 ], }{
         J[n, 1, 1] = E[ -\delta^2 log f(y) / \delta (mu^*)^2 ], }
-        
-  \deqn{ J_{[n, 2, 2]} = 
+
+  \deqn{ J_{[n, 2, 2]} =
      E[ -\partial^2 \log f(\boldsymbol{y}) / \partial (\sigma^2)^2 ], }{
         J[n, 2, 2] = E[ - \delta^2 log f(y) / \delta (\sigma^2)^2 ],}
-        
+
   and
-  
-   \deqn{ J_{[n, 3, 3]} = 
+
+   \deqn{ J_{[n, 3, 3]} =
      E[ -\partial^2 \log f(\boldsymbol{y}) / \partial ( \rho )^2 ]. }{
         J[n, 3, 3] = E[ -\delta^2 log f(y) / \delta (rho)^2]. }
-        
+
   As for the off-diagonal elements, one has the usual entries, i.e.,
-   \deqn{ J_{[n, 1, 2]} = J_{[n, 2, 1]} = 
+   \deqn{ J_{[n, 1, 2]} = J_{[n, 2, 1]} =
      E[ -\partial^2 \log f(\boldsymbol{y}) / \partial \sigma^2
            \partial \rho], }{
-        J[n, 1, 2] = J[n, 2, 1] = 
+        J[n, 1, 2] = J[n, 2, 1] =
             E[ -\delta^2 log f(y) / \delta \sigma^2 \delta rho ],}
  etc.
 
- If \code{var.arg = FALSE}, then \eqn{\sigma} instead of \eqn{\sigma^2} 
+ If \code{var.arg = FALSE}, then \eqn{\sigma} instead of \eqn{\sigma^2}
  is estimated. Therefore, \eqn{J_{[n, 2, 2]}}{J[n, 2, 2]},
  \eqn{J_{[n, 1, 2]}}{J[n, 1, 2]}, etc., are correspondingly replaced.
 
 
   Once these expected values are internally computed, they are returned
-  in an array of dimension \eqn{N \times 1 \times 6}{N x 1 x 6}, 
+  in an array of dimension \eqn{N \times 1 \times 6}{N x 1 x 6},
   of the form
-  
-  \deqn{J[, 1, ] = [ J_{[ , 1, 1]}, J_{[ , 2, 2]}, J_{[ , 3, 3]}, 
+
+  \deqn{J[, 1, ] = [ J_{[ , 1, 1]}, J_{[ , 2, 2]}, J_{[ , 3, 3]},
                    J_{[ , 1, 2]}, J_{[, 2, 3]}, J_{[ , 1, 3]}  ].  }{
           J[, 1, ] = [ J[ , 1, 1], J[ , 2, 2], J[ , 3, 3],
                        J[ , 1, 2], J[ , 2, 3], J[ , 1, 3] ].
     }
 
-  \code{AR1EIM} handles multiple time series, say \eqn{NOS}. 
-  If this happens, then it accordingly returns an array of 
-  dimension \eqn{N \times NOS \times 6 }{N x NOS x 6}. Here, 
+  \code{AR1EIM} handles multiple time series, say \eqn{NOS}.
+  If this happens, then it accordingly returns an array of
+  dimension \eqn{N \times NOS \times 6 }{N x NOS x 6}. Here,
   \eqn{J[, k, ]}, for \eqn{k = 1, \ldots, NOS}, is a matrix
   of dimension \eqn{N \times 6}{N x 6}, which
-   stores the EIMs for the \eqn{k^{th}}{k}th response, as 
+   stores the EIMs for the \eqn{k^{th}}{k}th response, as
    above, i.e.,
-  
+
 
     \deqn{J[, k, ] = [ J_{[ , 1, 1]}, J_{[ , 2, 2]},
                        J_{[ , 3, 3]}, \ldots ], }{
-          J[, k, ] = [ J[ , 1, 1], J[ , 2, 2], J[ , 3, 3], \ldots ], 
+          J[, k, ] = [ J[ , 1, 1], J[ , 2, 2], J[ , 3, 3], \ldots ],
     }
 
 
-  the \emph{bandwith} form, as per required by 
+  the \emph{bandwith} form, as per required by
   \code{\link[VGAM:AR1]{AR1}}.
 
 
 }
 \value{
-  An array of dimension \eqn{N \times NOS \times 6}{N x NOS x 6}, 
+  An array of dimension \eqn{N \times NOS \times 6}{N x NOS x 6},
   as above.
 
 
   This array stores the EIMs calculated from the joint density as
-  a function of 
+  a function of
   \deqn{\boldsymbol{\theta} = (\mu^*, \sigma^2, \rho). }{
        \theta = (mu^*, sigma^2, rho).
   }
 
 
   Nevertheless, note that, under the VGAM/VGLM approach, the EIMs
-  must be correspondingly calculated in terms of the linear 
+  must be correspondingly calculated in terms of the linear
   predictors, \eqn{\boldsymbol{\eta}}{\eta}.
 
 
 }
 \note{
-  For simplicity, one can assume that the time series analyzed has 
-  a 0-mean. Consequently, where the family function 
-  \code{\link[VGAM:AR1]{AR1}} calls \code{AR1EIM} to compute 
-  the EIMs, the argument \code{p.drift} is internally set 
-  to zero-vector, whereas \code{x} is \emph{centered} by 
+  For simplicity, one can assume that the time series analyzed has
+  a 0-mean. Consequently, where the family function
+  \code{\link[VGAM:AR1]{AR1}} calls \code{AR1EIM} to compute
+  the EIMs, the argument \code{p.drift} is internally set
+  to zero-vector, whereas \code{x} is \emph{centered} by
   subtracting its mean value.
 
 
 }
 \section{Asymptotic behaviour of the algorithm}{
-  For large enough \eqn{n}, the EIMs, 
+  For large enough \eqn{n}, the EIMs,
   \eqn{J_n(\boldsymbol{\theta})}{Jn(\theta)},
   become approximately linear in \eqn{n}. That is, for some
-  \eqn{n_0}{n0}, 
- 
-  \deqn{ J_n(\boldsymbol{\theta}) \equiv 
-         J_{n_0}(\boldsymbol{\theta}) + (n - n_0) 
+  \eqn{n_0}{n0},
+
+  \deqn{ J_n(\boldsymbol{\theta}) \equiv
+         J_{n_0}(\boldsymbol{\theta}) + (n - n_0)
          \bar{J}(\boldsymbol{\theta}),~~~~~~(**) }{
          Jn(\theta) -> Jn0(\theta) + (n - n0) * Jbar(\theta),   (*)
          }
-  where \eqn{ \bar{J}(\boldsymbol{\theta})  }{ Jbar(\theta)} is 
+  where \eqn{ \bar{J}(\boldsymbol{\theta})  }{ Jbar(\theta)} is
   a constant matrix.
 
 
   This relationsihip is internally considered if a proper value
   of \eqn{n_0}{n0} is determined. Different ways can be adopted to
-  find \eqn{n_0}{n0}. In \code{AR1EIM}, this is done by checking 
+  find \eqn{n_0}{n0}. In \code{AR1EIM}, this is done by checking
   the difference between the internally estimated variances and the
-  entered ones at \code{WNsd}. 
+  entered ones at \code{WNsd}.
   If this difference is less than
-  \code{eps.porat} at some iteration, say at iteration \eqn{n_0}{n0}, 
+  \code{eps.porat} at some iteration, say at iteration \eqn{n_0}{n0},
   then \code{AR1EIM} takes
-  \eqn{ \bar{J}(\boldsymbol{\theta})}{Jbar(\theta)} 
-  as the last computed increment of 
-  \eqn{J_n(\boldsymbol{\theta})}{Jn(\theta)}, and extraplotates 
-  \eqn{J_k(\boldsymbol{\theta})}{Jk(\theta)}, for all 
+  \eqn{ \bar{J}(\boldsymbol{\theta})}{Jbar(\theta)}
+  as the last computed increment of
+  \eqn{J_n(\boldsymbol{\theta})}{Jn(\theta)}, and extraplotates
+  \eqn{J_k(\boldsymbol{\theta})}{Jk(\theta)}, for all
   \eqn{k \geq n_0 }{k \ge n0} using \eqn{(*)}.
-  Else, the algorithm will complete the iterations for 
+  Else, the algorithm will complete the iterations for
    \eqn{1 \leq n \leq N}{1 \le n \le N}.
 
 
-  Finally, note that the rate of convergence reasonably decreases if 
-  the asymptotic relationship \eqn{(*)} is used to compute 
+  Finally, note that the rate of convergence reasonably decreases if
+  the asymptotic relationship \eqn{(*)} is used to compute
   \eqn{J_k(\boldsymbol{\theta})}{Jk(\theta)},
   \eqn{k \geq n_0 }{k \ge n0}. Normally, the number
-  of operations involved on this algorithm is proportional to 
+  of operations involved on this algorithm is proportional to
   \eqn{N^2}.
 
 
@@ -272,28 +272,28 @@ AR1EIM(x = NULL, var.arg = NULL, p.drift = NULL,
 \examples{
   set.seed(1)
   nn <- 500
-  ARcoeff1 <- c(0.3, 0.25)        # Will be recycled. 
+  ARcoeff1 <- c(0.3, 0.25)        # Will be recycled.
   WNsd     <- c(exp(1), exp(1.5)) # Will be recycled.
   p.drift  <- c(0, 0)             # Zero-mean gaussian time series.
-  
+
   ### Generate two (zero-mean) AR(1) processes ###
-  ts1 <- p.drift[1]/(1 - ARcoeff1[1]) + 
+  ts1 <- p.drift[1]/(1 - ARcoeff1[1]) +
                    arima.sim(model = list(ar = ARcoeff1[1]), n = nn,
                    sd = WNsd[1])
-  ts2 <- p.drift[2]/(1 - ARcoeff1[2]) + 
+  ts2 <- p.drift[2]/(1 - ARcoeff1[2]) +
                    arima.sim(model = list(ar = ARcoeff1[2]), n = nn,
                    sd = WNsd[2])
-  
+
   ARdata <- matrix(cbind(ts1, ts2), ncol = 2)
-  
-  
+
+
   ### Compute the exact EIMs: TWO responses. ###
   ExactEIM <- AR1EIM(x = ARdata, var.arg = FALSE, p.drift = p.drift,
                            WNsd = WNsd, ARcoeff1 = ARcoeff1)
-                       
-  ### For response 1:                     
+
+  ### For response 1:
   head(ExactEIM[, 1 ,])      # NOTICE THAT THIS IS A (nn x 6) MATRIX!
-  
+
   ### For response 2:
   head(ExactEIM[, 2 ,])      # NOTICE THAT THIS IS A (nn x 6) MATRIX!
 }
diff --git a/man/AR1UC.Rd b/man/AR1UC.Rd
index 2a936b3..70edd75 100644
--- a/man/AR1UC.Rd
+++ b/man/AR1UC.Rd
@@ -49,7 +49,7 @@ dAR1(x, drift = 0, var.error = 1, ARcoef1 = 0.0,
   \code{dAR1} gives the density.
 
 
-  
+
 % \code{pAR1} gives the distribution function, and
 % \code{qAR1} gives the quantile function, and
 % \code{rAR1} generates random deviates.
diff --git a/man/BICvlm.Rd b/man/BICvlm.Rd
index 10e8879..0a0307f 100644
--- a/man/BICvlm.Rd
+++ b/man/BICvlm.Rd
@@ -69,7 +69,7 @@ BICvlm(object, \dots, k = log(nobs(object)))
 \section{Warning }{
   Like \code{\link{AICvlm}}, this code has not been double-checked.
   The general applicability of \code{BIC} for the VGLM/VGAM classes
-  has not been developed fully. 
+  has not been developed fully.
   In particular, \code{BIC} should not be run on some \pkg{VGAM} family
   functions because of violation of certain regularity conditions, etc.
 
diff --git a/man/Coef.qrrvglm-class.Rd b/man/Coef.qrrvglm-class.Rd
index 424d73a..d29004b 100644
--- a/man/Coef.qrrvglm-class.Rd
+++ b/man/Coef.qrrvglm-class.Rd
@@ -2,7 +2,7 @@
 \docType{class}
 \alias{Coef.qrrvglm-class}
 \title{Class ``Coef.qrrvglm'' }
-\description{  
+\description{
    The most pertinent matrices and other quantities pertaining to a
    QRR-VGLM (CQO model).
 
@@ -19,7 +19,7 @@ linear predictors and \eqn{n} is the number of observations.
 \section{Slots}{
   \describe{
     \item{\code{A}:}{Of class \code{"matrix"}, \bold{A}, which are the
-    linear `coefficients' of the matrix of latent variables. 
+    linear `coefficients' of the matrix of latent variables.
     It is \eqn{M} by \eqn{R}. }
     \item{\code{B1}:}{Of class \code{"matrix"}, \bold{B1}.
     These correspond to terms of the argument \code{noRRR}. }
@@ -27,34 +27,34 @@ linear predictors and \eqn{n} is the number of observations.
     canonical coefficients. It has \eqn{R} columns. }
     \item{\code{Constrained}:}{Logical. Whether the model is
     a constrained ordination model. }
-    \item{\code{D}:}{Of class \code{"array"}, 
+    \item{\code{D}:}{Of class \code{"array"},
     \code{D[,,j]} is an order-\code{Rank} matrix, for
     \code{j} = 1,\dots,\eqn{M}.
     Ideally, these are negative-definite in order to make the response
-    curves/surfaces bell-shaped. 
+    curves/surfaces bell-shaped.
     }
-    \item{\code{Rank}:}{The rank (dimension, number of latent variables) 
+    \item{\code{Rank}:}{The rank (dimension, number of latent variables)
     of the RR-VGLM. Called \eqn{R}. }
     \item{\code{latvar}:}{\eqn{n} by \eqn{R} matrix
           of latent variable values. }
     \item{\code{latvar.order}:}{Of class \code{"matrix"}, the permutation
-          returned when the function 
+          returned when the function
           \code{\link{order}} is applied to each column of \code{latvar}.
-          This enables each column of \code{latvar} to be easily sorted. 
+          This enables each column of \code{latvar} to be easily sorted.
           }
-    \item{\code{Maximum}:}{Of class \code{"numeric"}, the 
-          \eqn{M} maximum fitted values. That is, the fitted values 
+    \item{\code{Maximum}:}{Of class \code{"numeric"}, the
+          \eqn{M} maximum fitted values. That is, the fitted values
           at the optimums for \code{noRRR = ~ 1} models.
     If \code{noRRR} is not \code{~ 1} then these will be \code{NA}s. }
     \item{\code{NOS}:}{Number of species.}
     \item{\code{Optimum}:}{Of class \code{"matrix"}, the values
-          of the latent variables where the optimums are. 
+          of the latent variables where the optimums are.
           If the curves are not bell-shaped, then the value will
           be \code{NA} or \code{NaN}.}
     \item{\code{Optimum.order}:}{Of class \code{"matrix"}, the permutation
-          returned when the function 
+          returned when the function
           \code{\link{order}} is applied to each column of \code{Optimum}.
-          This enables each row of \code{Optimum} to be easily sorted. 
+          This enables each row of \code{Optimum} to be easily sorted.
           }
 %   \item{\code{Diagonal}:}{Vector of logicals: are the
 %         \code{D[,,j]} diagonal? }
@@ -72,7 +72,7 @@ linear predictors and \eqn{n} is the number of observations.
           tolerances (squared if on the diagonal).
           These are denoted by \bold{T} in Yee (2004).
     Ideally, these are positive-definite in order to make the response
-    curves/surfaces bell-shaped. 
+    curves/surfaces bell-shaped.
 The tolerance matrices satisfy
 \eqn{T_s = -\frac12 D_s^{-1}}{T_s = -(0.5 D_s^(-1)}.
 
@@ -83,7 +83,7 @@ The tolerance matrices satisfy
 %\section{Methods}{
 %No methods defined with class "Coef.qrrvglm" in the signature.
 %}
-\references{ 
+\references{
 
 
 Yee, T. W. (2004)
@@ -99,7 +99,7 @@ canonical Gaussian ordination.
 
 % ~Make other sections like Warning with \section{Warning }{....} ~
 
-\seealso{ 
+\seealso{
     \code{\link{Coef.qrrvglm}},
     \code{\link{cqo}},
     \code{print.Coef.qrrvglm}.
diff --git a/man/Coef.qrrvglm.Rd b/man/Coef.qrrvglm.Rd
index adb1f29..9b36571 100644
--- a/man/Coef.qrrvglm.Rd
+++ b/man/Coef.qrrvglm.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Returns Important Matrices etc. of a QO Object }
 \description{
-  This methods function returns important matrices etc. of a 
+  This methods function returns important matrices etc. of a
   QO object.
 
 
@@ -88,7 +88,7 @@ Coef.qrrvglm(object, varI.latvar = FALSE, refResponse = NULL, ...)
 
 
 }
-\references{ 
+\references{
 Yee, T. W. (2004)
 A new technique for maximum-likelihood
 canonical Gaussian ordination.
@@ -121,7 +121,7 @@ about how much information the parameters contain.
 
 }
 
-% ~Make other sections like Warning with \section{Warning }{....} ~ 
+% ~Make other sections like Warning with \section{Warning }{....} ~
 \seealso{
 \code{\link{cqo}},
 \code{\link{Coef.qrrvglm-class}},
diff --git a/man/Coef.rrvglm-class.Rd b/man/Coef.rrvglm-class.Rd
index 9ce80a5..c4f650d 100644
--- a/man/Coef.rrvglm-class.Rd
+++ b/man/Coef.rrvglm-class.Rd
@@ -18,12 +18,12 @@ and \eqn{n} is the number of observations.
     \item{\code{B1}:}{Of class \code{"matrix"}, \bold{B1}. }
     \item{\code{C}:}{Of class \code{"matrix"}, \bold{C}. }
     \item{\code{Rank}:}{The rank of the RR-VGLM. }
-    \item{\code{colx1.index}:}{Index of the columns of the 
+    \item{\code{colx1.index}:}{Index of the columns of the
           \code{"vlm"}-type model matrix corresponding to the variables
           in \bold{x1}. These correspond to \bold{B1}.
     }
     \item{\code{colx2.index}:}{
-          Index of the columns of the 
+          Index of the columns of the
           \code{"vlm"}-type model matrix corresponding to the variables
           in \bold{x2}. These correspond to the reduced-rank regression.
     }
diff --git a/man/Coef.rrvglm.Rd b/man/Coef.rrvglm.Rd
index 2fe0164..c3159a6 100644
--- a/man/Coef.rrvglm.Rd
+++ b/man/Coef.rrvglm.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Returns Important Matrices etc. of a RR-VGLM Object }
 \description{
-  This methods function returns important matrices etc. of a 
+  This methods function returns important matrices etc. of a
   RR-VGLM object.
 }
 \usage{
@@ -22,12 +22,12 @@ Coef.rrvglm(object, ...)
 
 }
 \value{
-  An object of class \code{"Coef.rrvglm"} 
+  An object of class \code{"Coef.rrvglm"}
 (see \code{\link{Coef.rrvglm-class}}).
 
 
 }
-\references{ 
+\references{
 
 Yee, T. W. and Hastie, T. J. (2003)
 Reduced-rank vector generalized linear models.
@@ -43,8 +43,8 @@ This function is an alternative to \code{coef.rrvglm}.
 
 }
 
-% ~Make other sections like Warning with \section{Warning }{....} ~ 
-\seealso{ 
+% ~Make other sections like Warning with \section{Warning }{....} ~
+\seealso{
   \code{\link{Coef.rrvglm-class}},
   \code{print.Coef.rrvglm},
   \code{\link{rrvglm}}.
diff --git a/man/Coef.vlm.Rd b/man/Coef.vlm.Rd
index baa2a6f..49a7aa1 100644
--- a/man/Coef.vlm.Rd
+++ b/man/Coef.vlm.Rd
@@ -5,7 +5,7 @@
 \description{
   Amongst other things, this function applies inverse
   link functions to the parameters of intercept-only
-  VGLMs. 
+  VGLMs.
 }
 \usage{
 Coef.vlm(object, ...)
@@ -13,7 +13,7 @@ Coef.vlm(object, ...)
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{object}{ A fitted model. }
-  \item{\dots}{ Arguments which may be passed into 
+  \item{\dots}{ Arguments which may be passed into
   \code{\link[stats]{coef}}.
   }
 }
diff --git a/man/CommonVGAMffArguments.Rd b/man/CommonVGAMffArguments.Rd
index 3cd89e4..8cd366b 100644
--- a/man/CommonVGAMffArguments.Rd
+++ b/man/CommonVGAMffArguments.Rd
@@ -5,7 +5,7 @@
 \description{
   Here is a description of some common and typical arguments found
   in many \pkg{VGAM} family functions, e.g.,
-  \code{lsigma}, 
+  \code{lsigma},
   \code{isigma},
   \code{gsigma},
   \code{nsimEI},
@@ -30,7 +30,9 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
                           parallel = TRUE,
                           ishrinkage = 0.95,
                           nointercept = NULL, imethod = 1,
-                          type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
+                          type.fitted = c("mean", "quantiles",
+                                          "pobs0", "pstr0", "onempstr0"),
+                          percentiles = c(25, 50, 75),
                           probs.x = c(0.15, 0.85),
                           probs.y = c(0.25, 0.50, 0.75),
                           multiple.responses = FALSE, earg.link = FALSE,
@@ -69,7 +71,7 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
   to be inputted more easily.
   One has something like
   \code{link.list = list("(Default)" = "identitylink", x2 = "loge", x3 = "logoff")}
-  and 
+  and
   \code{earg.list = list("(Default)" = list(), x2 = list(), x3 = "list(offset = -1)")}.
   Then any unnamed terms will have the default link with its
   corresponding extra argument.
@@ -122,7 +124,8 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
 
 
 
-% Then the actual search values will be \code{unique(sort(c(gshape, 1/gshape)))}, etc.
+% Then the actual search values will be
+% \code{unique(sort(c(gshape, 1/gshape)))}, etc.
 
 
   }
@@ -157,7 +160,7 @@ except for \eqn{X_2}.
 
 
   This argument is common in \pkg{VGAM} family functions for categorical
-  responses, e.g., \code{\link{cumulative}},  \code{\link{acat}}, 
+  responses, e.g., \code{\link{cumulative}},  \code{\link{acat}},
   \code{\link{cratio}}, \code{\link{sratio}}.
   For the proportional odds model (\code{\link{cumulative}}) having
   parallel constraints applied to each explanatory variable (except for
@@ -232,6 +235,18 @@ except for \eqn{X_2}.
 
 
   }
+  \item{percentiles}{
+  Numeric vector, with values between 0 and 100
+  (although it is not recommended that exactly 0 or 100 be inputted).
+  Used only if \code{type.fitted = "quantiles"} or
+  \code{type.fitted = "percentiles"}, then
+  this argument specifies the values of these quantiles.
+  The argument name tries to reinforce that the values
+  lie between 0 and 100.
+  See \code{\link{fittedvlm}} for more details.
+
+
+  }
   \item{probs.x, probs.y}{
   Numeric, with values in (0, 1).
   The probabilites that define quantiles with respect to some vector,
@@ -291,7 +306,7 @@ except for \eqn{X_2}.
   Either an integer vector, or a vector of character strings.
 
 
-  If an integer, then it specifies which 
+  If an integer, then it specifies which
   linear/additive predictor is modelled as \emph{intercept-only}.
   That is, the regression coefficients are
   set to zero for all covariates except for the intercept.
@@ -342,7 +357,7 @@ except for \eqn{X_2}.
   \code{fixed = TRUE}, meaning that wildcards \code{"*"} are not useful.
   See the example below---all the variants work;
   those with \code{LOCAT} issue a warning that that value is unmatched.
-Importantly, the parameter names 
+Importantly, the parameter names
 are \code{c("location1", "scale1", "location2", "scale2")}
 because there are 2 responses.
 Yee (2015) described \code{zero} for only numerical input.
@@ -445,7 +460,7 @@ In the future, most \pkg{VGAM} family functions might be converted
   \item{bred}{
   Logical.
   Some \pkg{VGAM} family functions will allow bias-reduction based
-  on the work by Kosmidis and Firth. 
+  on the work by Kosmidis and Firth.
   Sometimes half-stepping is a good idea; set \code{stepsize = 0.5}
   and monitor convergence by setting \code{trace = TRUE}.
 
diff --git a/man/Huggins89.t1.Rd b/man/Huggins89.t1.Rd
index 6c41801..f3bfeb0 100644
--- a/man/Huggins89.t1.Rd
+++ b/man/Huggins89.t1.Rd
@@ -35,7 +35,7 @@ data(Huggins89.t1)
 
 
   Both \code{Huggins89table1} and \code{Huggins89.t1} are identical.
-  The latter used variables beginning with \code{z}, 
+  The latter used variables beginning with \code{z},
   not \code{t}, and may be withdrawn very soon.
 
 
diff --git a/man/Inv.gaussian.Rd b/man/Inv.gaussian.Rd
index b8ea845..8a8631c 100644
--- a/man/Inv.gaussian.Rd
+++ b/man/Inv.gaussian.Rd
@@ -33,7 +33,7 @@ rinv.gaussian(n, mu, lambda)
 }
 \value{
   \code{dinv.gaussian} gives the density,
-  \code{pinv.gaussian} gives the distribution function, and 
+  \code{pinv.gaussian} gives the distribution function, and
   \code{rinv.gaussian} generates random deviates.
 
 
diff --git a/man/Links.Rd b/man/Links.Rd
index 6952148..79038e1 100644
--- a/man/Links.Rd
+++ b/man/Links.Rd
@@ -18,7 +18,7 @@ TypicalVGAMlink(theta, someParameter = 0, bvalue = NULL, inverse = FALSE,
   This is usually \eqn{\theta}{theta} (default) but can sometimes
   be \eqn{\eta}{eta},
   depending on the other arguments.
-  If \code{theta} is character then \code{inverse} and 
+  If \code{theta} is character then \code{inverse} and
   \code{deriv} are ignored.
   The name \code{theta} should always be the name of the first argument.
 
@@ -60,7 +60,7 @@ TypicalVGAMlink(theta, someParameter = 0, bvalue = NULL, inverse = FALSE,
 % For \pkg{VGAM} family functions with more than one link
 % function there usually will be an \code{earg}-type argument for
 % each link. For example, if there are two links called
-% \code{lshape} and \code{lscale} then 
+% \code{lshape} and \code{lscale} then
 % the \code{earg}-type arguments for these might be called
 % \code{eshape} and \code{escale}, say.
 %
@@ -116,7 +116,7 @@ TypicalVGAMlink(theta, someParameter = 0, bvalue = NULL, inverse = FALSE,
 
 
   If \code{inverse = FALSE} and \code{deriv = 1} then it is
-  \eqn{d\eta / d\theta}{d eta / d theta} 
+  \eqn{d\eta / d\theta}{d eta / d theta}
   \emph{as a function of} \eqn{\theta}{theta}.
   If \code{inverse = FALSE} and \code{deriv = 2} then it is
   \eqn{d^2\eta / d\theta^2}{d^2 eta / d theta^2}
@@ -124,7 +124,7 @@ TypicalVGAMlink(theta, someParameter = 0, bvalue = NULL, inverse = FALSE,
 
 
   If \code{inverse = TRUE} and \code{deriv = 1} then it is
-  \eqn{d\theta / d\eta}{d theta / d eta} 
+  \eqn{d\theta / d\eta}{d theta / d eta}
   \emph{as a function of} \eqn{\theta}{theta}.
   If \code{inverse = TRUE} and \code{deriv = 2} then it is
   \eqn{d^2\theta / d\eta^2}{d^2 theta / d eta^2}
@@ -146,9 +146,9 @@ TypicalVGAMlink(theta, someParameter = 0, bvalue = NULL, inverse = FALSE,
 
 
 % Prior to 20150711; this was what it was:
-  
+
 % If \code{inverse = FALSE} and \code{deriv = 1} then it is
-% \eqn{d\theta / d\eta}{d theta / d eta} 
+% \eqn{d\theta / d\eta}{d theta / d eta}
 % \emph{as a function of} \eqn{\theta}{theta}.
 % If \code{inverse = FALSE} and \code{deriv = 2} then it is
 % \eqn{d^2\theta / d\eta^2}{d^2 theta / d eta^2}
@@ -158,11 +158,11 @@ TypicalVGAMlink(theta, someParameter = 0, bvalue = NULL, inverse = FALSE,
 % If \code{inverse = TRUE} and \code{deriv = 0} then the inverse
 % link function is returned, hence \code{theta} is really
 % \eqn{\eta}{eta}.
-% If \code{inverse = TRUE} and \code{deriv} is positive then the 
+% If \code{inverse = TRUE} and \code{deriv} is positive then the
 % \emph{reciprocal} of the same link function with
 % \code{(theta = theta, someParameter, inverse = TRUE, deriv = deriv)}
 % is returned.
-  
+
 
 }
 \details{
@@ -252,12 +252,12 @@ TypicalVGAMlink(theta, someParameter = 0, bvalue = NULL, inverse = FALSE,
   Altogether, these are big changes and the user should beware!
 
 
-  
+
   One day in the future, \emph{all} \pkg{VGAM} link functions
   may be renamed to end in the characters \code{"link"}.
 
 
-  
+
 }
 
 \seealso{
@@ -286,7 +286,7 @@ TypicalVGAMlink(theta, someParameter = 0, bvalue = NULL, inverse = FALSE,
   all \pkg{VGAM} family functions will only
   contain one default value for each link argument rather than giving a
   vector of choices.
-  For example, rather than 
+  For example, rather than
   \code{binomialff(link = c("logit", "probit", "cloglog",
   "cauchit", "identitylink"), ...)}
   it is now
diff --git a/man/MNSs.Rd b/man/MNSs.Rd
index 153e803..9203ec9 100644
--- a/man/MNSs.Rd
+++ b/man/MNSs.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ The MNSs Blood Group System }
 \description{
-  Estimates the three independent parameters of the 
+  Estimates the three independent parameters of the
   the MNSs blood group system.
 }
 \usage{
@@ -18,15 +18,15 @@ MNSs(link = "logit", imS = NULL, ims = NULL, inS = NULL)
   }
   \item{imS, ims, inS}{
   Optional initial value for \code{mS}, \code{ms}
-  and \code{nS} respectively. 
+  and \code{nS} respectively.
   A \code{NULL} means they are computed internally.
 
   }
 }
 \details{
   There are three independent
-  parameters: \code{m_S}, \code{m_s}, \code{n_S}, say, so that 
-  \code{n_s = 1 - m_S - m_s - n_S}.  
+  parameters: \code{m_S}, \code{m_s}, \code{n_S}, say, so that
+  \code{n_s = 1 - m_S - m_s - n_S}.
   We let the eta vector (transposed) be
   \code{(g(m_S), g(m_s), g(n_S))} where \code{g} is the
   link function.
@@ -40,7 +40,7 @@ MNSs(link = "logit", imS = NULL, ims = NULL, inS = NULL)
 
 
 }
-\references{ 
+\references{
   Elandt-Johnson, R. C. (1971)
   \emph{Probability Models and Statistical Methods in Genetics},
   New York: Wiley.
@@ -48,10 +48,10 @@ MNSs(link = "logit", imS = NULL, ims = NULL, inS = NULL)
 
 }
 \author{ T. W. Yee }
-\note{ 
+\note{
   The input can be a 6-column matrix of counts, where the columns are
   MS, Ms, MNS, MNs, NS, Ns (in order).
-  Alternatively, the input can be a 6-column matrix of 
+  Alternatively, the input can be a 6-column matrix of
   proportions (so each row adds to 1) and the \code{weights}
   argument is used to specify the total number of counts for each row.
 
@@ -70,10 +70,10 @@ MNSs(link = "logit", imS = NULL, ims = NULL, inS = NULL)
 }
 \examples{
 # Order matters only:
-y <- cbind(MS = 295, Ms = 107, MNS = 379, MNs = 322, NS = 102, Ns = 214) 
+y <- cbind(MS = 295, Ms = 107, MNS = 379, MNs = 322, NS = 102, Ns = 214)
 fit <- vglm(y ~ 1, MNSs("logit", .25, .28, .08), trace = TRUE)
 fit <- vglm(y ~ 1, MNSs(link = logit), trace = TRUE, crit = "coef")
-Coef(fit) 
+Coef(fit)
 rbind(y, sum(y)*fitted(fit))
 sqrt(diag(vcov(fit)))
 }
diff --git a/man/Opt.Rd b/man/Opt.Rd
index cf2f9a2..a4434b7 100644
--- a/man/Opt.Rd
+++ b/man/Opt.Rd
@@ -35,7 +35,7 @@ Opt(object, ...)
   For quadratic ordination models there is a formula
   for the optimum but for additive ordination models the
   optimum must be searched for numerically. If it occurs
-  on the boundary, then the optimum is undefined. 
+  on the boundary, then the optimum is undefined.
   At an optimum, the fitted value of the response is
   called the \emph{maximum}.
 
diff --git a/man/ParetoUC.Rd b/man/ParetoUC.Rd
index c21d8f0..fca397b 100644
--- a/man/ParetoUC.Rd
+++ b/man/ParetoUC.Rd
@@ -65,7 +65,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 }
 %%\note{
-%%  The Pareto distribution is 
+%%  The Pareto distribution is
 %%}
 
 \seealso{
diff --git a/man/QvarUC.Rd b/man/QvarUC.Rd
index eeedaa2..90a86f6 100644
--- a/man/QvarUC.Rd
+++ b/man/QvarUC.Rd
@@ -125,7 +125,7 @@ Qvar(object, factorname = NULL, which.linpred = 1,
   and \eqn{j}. The diagonal elements are abitrary and are set
   to zero.
 
-  
+
   The matrix has an attribute that corresponds to the prior
   weight matrix; it is accessed by \code{\link{uninormal}}
   and replaces the usual \code{weights} argument.
diff --git a/man/Rcim.Rd b/man/Rcim.Rd
index aabcd59..63b0a67 100644
--- a/man/Rcim.Rd
+++ b/man/Rcim.Rd
@@ -2,7 +2,7 @@
 \alias{Rcim}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{
-  Mark the Baseline of Row and Column on a Matrix data 
+  Mark the Baseline of Row and Column on a Matrix data
 
 }
 \description{
@@ -14,7 +14,7 @@
 
 }
 \usage{
-  Rcim(mat, rbaseline = 1, cbaseline = 1) 
+  Rcim(mat, rbaseline = 1, cbaseline = 1)
 
 }
 %- maybe also 'usage' for other objects documented here.
@@ -28,7 +28,7 @@
 \item{rbaseline, cbaseline}{
   Numeric (row number of the matrix \code{mat}) or
   character (matching a row name of \code{mat}) that the user
-  wants as the row baseline or reference level. 
+  wants as the row baseline or reference level.
   Similarly \code{cbaseline} for the column.
 
 
@@ -46,7 +46,7 @@
 
 \value{
   Matrix of the same dimension as the input,
-  with \code{rbaseline} and \code{cbaseline} specifying the 
+  with \code{rbaseline} and \code{cbaseline} specifying the
   first rows and columns.
   The default is no change in \code{mat}.
 
@@ -64,9 +64,9 @@ Alfian F. Hadi and T. W. Yee.
   \code{rbaseline}
   and
   \code{cbaseline}
-  differ from arguments 
-  \code{roffset} 
-  and 
+  differ from arguments
+  \code{roffset}
+  and
   \code{coffset}
   in \code{\link{moffset}}
   by 1 (when elements of the matrix agree).
diff --git a/man/SURff.Rd b/man/SURff.Rd
index fbb2d5b..e5c9b01 100644
--- a/man/SURff.Rd
+++ b/man/SURff.Rd
@@ -82,7 +82,7 @@ a warning or an error will result.
   \emph{Zellner's two-stage Aitken estimator})
   can be obtained by setting
   \code{maxit = 1}
-  (and possibly \code{divisor = "sqrt"} or 
+  (and possibly \code{divisor = "sqrt"} or
   \code{divisor = "n-max"}).
 
 
diff --git a/man/Select.Rd b/man/Select.Rd
index 98b2dfd..a24ed01 100644
--- a/man/Select.Rd
+++ b/man/Select.Rd
@@ -5,14 +5,14 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Select Variables for a Formula Response or the RHS of a Formula
 
-  
+
 %%  ~~function to do ... ~~
 }
 \description{
   Select variables from a data frame whose names
   begin with a certain character string.
 
-  
+
 %%  ~~ A concise (1-5 lines) description of what the function does. ~~
 }
 \usage{
@@ -26,7 +26,7 @@ Select(data = list(), prefix = "y",
   \item{data}{
     A data frame or a matrix.
 
-    
+
 %%     ~~Describe \code{data} here~~
 }
 \item{prefix}{
@@ -39,14 +39,14 @@ Select(data = list(), prefix = "y",
   in \code{data} are chosen.
 
 
-  
+
 %%     ~~Describe \code{prefix} here~~
 }
 \item{lhs}{
   A character string.
   The response of a formula.
 
-  
+
 %%     ~~Describe \code{lhs} here~~
 }
 \item{rhs}{
@@ -54,7 +54,7 @@ Select(data = list(), prefix = "y",
   Included as part of the RHS a formula.
   Set \code{rhs = "0"} to suppress the intercept.
 
-  
+
 %%     ~~Describe \code{rhs} here~~
 }
 \item{rhs2, rhs3}{
@@ -66,21 +66,21 @@ Select(data = list(), prefix = "y",
 
 
 
-  
+
 %%     ~~Describe \code{rhs} here~~
 }
 \item{as.character}{
   Logical.
   Return the answer as a character string?
 
-  
+
 %%     ~~Describe \code{as.character} here~~
 }
 \item{as.formula.arg}{
   Logical.
   Is the answer a formula?
 
-  
+
 %%     ~~Describe \code{as.formula.arg} here~~
 }
 \item{tilde}{
@@ -89,20 +89,20 @@ Select(data = list(), prefix = "y",
   are both \code{TRUE}
   then include the tilde in the formula?
 
-  
+
 }
 \item{exclude}{
   Vector of character strings.
   Exclude these variables explicitly.
 
-  
+
 %%     ~~Describe \code{exclude} here~~
 }
 \item{sort.arg}{
   Logical.
   Sort the variables?
 
-  
+
 %%     ~~Describe \code{sort.arg} here~~
 }
 }
@@ -127,7 +127,7 @@ Select(data = list(), prefix = "y",
   as \code{"cbind(y1, y2, y3)"}.
 
 
-  
+
   If \code{as.character = FALSE} and
   \code{as.formula.arg = TRUE} then a \code{\link[stats]{formula}} such
   as \code{lhs ~ y1 + y2 + y3}.
@@ -141,7 +141,7 @@ Select(data = list(), prefix = "y",
 
 
 
-  
+
 %%  ~Describe the value returned
 %%  If it is a LIST, use
 %%  \item{comp1 }{Description of 'comp1'}
@@ -154,7 +154,7 @@ Select(data = list(), prefix = "y",
 \author{
   T. W. Yee.
 
-  
+
 %%  ~~who you are~~
 }
 \note{
@@ -182,7 +182,7 @@ Select(data = list(), prefix = "y",
   One of these functions might be withdrawn in the future.
 
 
-  
+
 %%  ~~further notes~~
 }
 
@@ -197,7 +197,7 @@ Select(data = list(), prefix = "y",
 
 
 
-  
+
 %% ~~objects to See Also as \code{\link{help}}, ~~~
 }
 \examples{
diff --git a/man/SurvS4-class.Rd b/man/SurvS4-class.Rd
index 2d833c8..f2319b4 100644
--- a/man/SurvS4-class.Rd
+++ b/man/SurvS4-class.Rd
@@ -44,7 +44,7 @@ Class \code{"\linkS4class{vector}"}, by class "matrix", distance 4, with explici
 
 \section{Warning }{
   This code has not been thoroughly tested.
-  
+
 }
 
 \seealso{
diff --git a/man/Tol.Rd b/man/Tol.Rd
index ee7b20c..dd9deb1 100644
--- a/man/Tol.Rd
+++ b/man/Tol.Rd
@@ -65,7 +65,7 @@ Constrained additive ordination.
 
 
 \note{
-  Tolerances are undefined for `linear' and additive 
+  Tolerances are undefined for `linear' and additive
   ordination models.
   They are well-defined for quadratic ordination models.
 
diff --git a/man/UtilitiesVGAM.Rd b/man/UtilitiesVGAM.Rd
index 13b98cf..923c68a 100644
--- a/man/UtilitiesVGAM.Rd
+++ b/man/UtilitiesVGAM.Rd
@@ -70,7 +70,7 @@ interleave.VGAM(.M, M1, inverse = FALSE)
   to be stored for each of the working weight matrices.
   They are represented as columns in the matrix \code{wz} in
   e.g., \code{vglm.fit()}.
-  See  the \emph{matrix-band} format described in 
+  See  the \emph{matrix-band} format described in
   Section 18.3.5 of Yee (2015).
 
 
diff --git a/man/V1.Rd b/man/V1.Rd
index 5df158b..c7daaf9 100644
--- a/man/V1.Rd
+++ b/man/V1.Rd
@@ -9,7 +9,7 @@
   (Pas-de-Calais) and Dutch coasts towards London.
   The number of hits per square grid around London were recorded.
 
-  
+
 
 }
 \usage{
@@ -19,7 +19,7 @@ data(V1)
   A data frame with the following variables.
 
   \describe{
-   
+
     \item{hits}{
       Values between 0 and 4, and 7.
       Actually, the 7 is really imputed from the paper
@@ -60,7 +60,7 @@ data(V1)
 
 }
 \source{
- 
+
   Clarke, R. D. (1946).
   An application of the Poisson distribution.
   \emph{Journal of the Institute of Actuaries},
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
index dbb2ba3..6660e0d 100644
--- a/man/VGAM-package.Rd
+++ b/man/VGAM-package.Rd
@@ -17,7 +17,6 @@ Vector Generalized Linear and Additive Models
 
 }
 \details{
-
 This package centers on the \emph{iteratively reweighted least squares} (IRLS)
 algorithm.
 Other key words include Fisher scoring, additive models, penalized
@@ -30,7 +29,7 @@ The central modelling functions are
 \code{\link{cqo}},
 \code{\link{cao}}.
 For detailed control of fitting,
-each of these has its own control function, e.g., 
+each of these has its own control function, e.g.,
 \code{\link{vglm.control}}.
 The package uses S4 (see \code{\link[methods]{methods-package}}).
 A companion package called \pkg{VGAMdata} contains some larger
@@ -59,10 +58,30 @@ problems.
 
 
 
-VGAMs are to VGLMs what GAMs are to GLMs.
-Vector smoothing (see \code{\link{vsmooth.spline}}) allows several
-additive predictors to be estimated as a sum of smooth functions of
-the covariates.
+Crudely, VGAMs are to VGLMs what GAMs are to GLMs.
+Two types of VGAMs are implemented:
+1st-generation VGAMs with \code{\link{s}} use vector backfitting,
+while
+2nd-generation VGAMs with \code{\link{sm.os}} and
+\code{\link{sm.ps}} use O-splines and P-splines,
+do not use the backfitting algorithm,
+and have automatic smoothing parameter selection.
+The former is older and is based on Yee and Wild (1996).
+The latter is more modern
+(Yee, Somchit and Wild, 2017)
+but it requires a reasonably large number of observations
+to work well.
+
+
+
+%(e.g., \eqn{n > 500}, say); and it does not always converge
+%and is not entirely reliable.
+
+
+
+%Vector smoothing (see \code{\link{vsmooth.spline}}) allows several
+%additive predictors to be estimated as a sum of smooth functions of
+%the covariates.
 
 
 
@@ -174,6 +193,12 @@ The \pkg{VGAM} package for categorical data analysis.
 
 
 
+Yee, T. W. and Somchit, C. and Wild, C. J. (2017)
+Penalized vector generalized additive models.
+Manuscript in preparation.
+
+
+
 My website for the \pkg{VGAM} package and book is at
 \url{https://www.stat.auckland.ac.nz/~yee}
 and I hope to put more resources there in the future,
@@ -192,7 +217,7 @@ especially as relating to my book.
 }
 
 
-\keyword{ package }
+\keyword{package}
 \keyword{models}
 \keyword{regression}
 \seealso{
@@ -203,7 +228,8 @@ especially as relating to my book.
   \code{\link{cqo}},
   \code{\link{TypicalVGAMfamilyFunction}},
   \code{\link{CommonVGAMffArguments}},
-  \code{\link{Links}}.
+  \code{\link{Links}},
+  \url{https://CRAN.R-project.org/package=VGAM}.
 
 
 
diff --git a/man/acat.Rd b/man/acat.Rd
index d433556..51d305c 100644
--- a/man/acat.Rd
+++ b/man/acat.Rd
@@ -14,8 +14,8 @@ acat(link = "loge", parallel = FALSE, reverse = FALSE,
 \arguments{
 
   \item{link}{
-  Link function applied to the ratios of the 
-  adjacent categories probabilities. 
+  Link function applied to the ratios of the
+  adjacent categories probabilities.
   See \code{\link{Links}} for more choices.
 
   }
diff --git a/man/alaplace3.Rd b/man/alaplace3.Rd
index 1242dbf..0c16679 100644
--- a/man/alaplace3.Rd
+++ b/man/alaplace3.Rd
@@ -189,12 +189,13 @@ alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
    location parameter \eqn{\xi}{xi} corresponds to the regression
    quantile estimate of the classical quantile regression approach of
    Koenker and Bassett (1978). An important property of the ALD is that
-   \eqn{P(Y \leq \xi) = \tau}{P(Y <=   xi) = tau} where 
+   \eqn{P(Y \leq \xi) = \tau}{P(Y <=   xi) = tau} where
    \eqn{\tau = \kappa^2 / (1 + \kappa^2)}{tau = kappa^2 / (1 + kappa^2)}
    so that
    \eqn{\kappa =  \sqrt{\tau / (1-\tau)}}{kappa = sqrt(tau / (1-tau))}.
    Thus \code{alaplace2()} might be used as an alternative to \code{rq}
-   in the \pkg{quantreg} package.
+   in the \pkg{quantreg} package, although scoring is really
+   an unsuitable algorithm for estimation here.
 
 
    Both \code{alaplace1()} and \code{alaplace2()} can handle
@@ -233,13 +234,6 @@ alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
   Boston: Birkhauser.
 
 
-Yee, T. W. (2015)
-Vector Generalized Linear and Additive Models: With an
-Implementation in R.
-\emph{Monograph to appear}.
-
-
-
 
 %  Yee, T. W. (2014)
 %  Quantile regression for counts and proportions.
@@ -264,10 +258,10 @@ Implementation in R.
 
 
 }
-\note{ 
+\note{
 % Commented out 20090326
 % The function \code{alaplace2()} is recommended over \code{alaplace1()}
-% for quantile regression because the solution is 
+% for quantile regression because the solution is
 % invariant to location and scale,
 % i.e., linear transformation of the response produces the
 % same linear transformation of the fitted quantiles.
@@ -298,7 +292,7 @@ Implementation in R.
   A second method for solving the noncrossing quantile problem is
   illustrated below in Example 3.
   This is called the \emph{accumulative quantile method} (AQM)
-  and details are in Yee (2014).
+  and details are in Yee (2015).
   It does not make the strong parallelism assumption.
 
 
@@ -337,7 +331,7 @@ fit <- vgam(y ~ s(x2, df = mydof), data = adata, trace = TRUE, maxit = 900,
                       parallel.locat = FALSE))
 fitp <- vgam(y ~ s(x2, df = mydof), data = adata, trace = TRUE, maxit = 900,
              alaplace2(tau = mytau, llocat = "loge", parallel.locat = TRUE))
- 
+
 par(las = 1); mylwd <- 1.5
 with(adata, plot(x2, jitter(y, factor = 0.5), col = "orange",
                  main = "Example 1; green: parallel.locat = TRUE",
@@ -365,7 +359,7 @@ fitp3 <- vglm(y ~ 1 + offset(predict(fitp2)[, 1]),
 with(adata, plot(x2, jitter(y, factor = 0.5), col = "orange",
                pch = "o", cex = 0.75, ylab = "y",
                main = "Example 2; parallel.locat = TRUE"))
-with(adata, matlines(x2, fitted(fitp2), col = "blue", 
+with(adata, matlines(x2, fitted(fitp2), col = "blue",
                      lty = 1, lwd = mylwd))
 with(adata, matlines(x2, fitted(fitp3), col = "black",
                      lty = 1, lwd = mylwd))
diff --git a/man/amlbinomial.Rd b/man/amlbinomial.Rd
index 7e05a01..69f6df9 100644
--- a/man/amlbinomial.Rd
+++ b/man/amlbinomial.Rd
@@ -107,7 +107,7 @@ amlbinomial(w.aml = 1, parallel = FALSE, digw = 4, link = "logit")
   all the \code{w.aml} values.
   See Equation (1.6) of Efron (1992).
 
-} 
+}
 \seealso{
   \code{\link{amlpoisson}},
   \code{\link{amlexponential}},
@@ -139,7 +139,7 @@ with(mydat, matlines(x, 100 * fitted(fit), lwd = 2, col = "blue", lty = 1))
 
 
 # Compare the fitted expectiles with the quantiles
-with(mydat, plot(x, jitter(y), col = "blue", las = 1, main = 
+with(mydat, plot(x, jitter(y), col = "blue", las = 1, main =
      paste(paste(round(fit at extra$percentile, digits = 1), collapse = ", "),
            "percentile curves are red")))
 with(mydat, matlines(x, 100 * fitted(fit), lwd = 2, col = "blue", lty = 1))
diff --git a/man/amlexponential.Rd b/man/amlexponential.Rd
index f91f02a..668db4b 100644
--- a/man/amlexponential.Rd
+++ b/man/amlexponential.Rd
@@ -117,7 +117,7 @@ amlexponential(w.aml = 1, parallel = FALSE, imethod = 1, digw = 4,
   See Equation (1.6) of Efron (1992).
 
 
-} 
+}
 \seealso{
   \code{\link{exponential}},
   \code{\link{amlbinomial}},
diff --git a/man/amlnormal.Rd b/man/amlnormal.Rd
index 9bca196..d146f99 100644
--- a/man/amlnormal.Rd
+++ b/man/amlnormal.Rd
@@ -126,7 +126,7 @@ amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identitylink",
 % loss (2.5).
 % If \code{w} has more than one value then the value returned by
 % \code{loglikelihood} is the sum taken over all the \code{w} values.
-%} 
+%}
 
 \seealso{
   \code{\link{amlpoisson}},
diff --git a/man/amlpoisson.Rd b/man/amlpoisson.Rd
index 1ce7476..2c83bae 100644
--- a/man/amlpoisson.Rd
+++ b/man/amlpoisson.Rd
@@ -132,7 +132,7 @@ amlpoisson(w.aml = 1, parallel = FALSE, imethod = 1, digw = 4,
   all the \code{w.aml} values.
   See Equation (1.6) of Efron (1992).
 
-} 
+}
 \seealso{
   \code{\link{amlnormal}},
   \code{\link{amlbinomial}},
diff --git a/man/auxposbernoulli.t.Rd b/man/auxposbernoulli.t.Rd
index c86a9a7..7d32dc8 100644
--- a/man/auxposbernoulli.t.Rd
+++ b/man/auxposbernoulli.t.Rd
@@ -25,7 +25,7 @@ aux.posbernoulli.t(y, check.y = FALSE, rename = TRUE, name = "bei")
 
   }
   \item{rename, name}{
-  If \code{rename = TRUE} then the behavioural effects indicator 
+  If \code{rename = TRUE} then the behavioural effects indicator
   are named using the value of \code{name} as the prefix.
   If \code{FALSE} then use the same column names as \code{y}.
 
@@ -49,12 +49,12 @@ aux.posbernoulli.t(y, check.y = FALSE, rename = TRUE, name = "bei")
       In any particular row there are 0s up to
       the first capture. Then there are 1s thereafter.
 
-      
+
     }
     \item{cap1}{
       A vector specifying which time occasion the animal
       was first captured.
-      
+
 
     }
     \item{y0i}{
@@ -63,12 +63,12 @@ aux.posbernoulli.t(y, check.y = FALSE, rename = TRUE, name = "bei")
     }
     \item{yr0i}{
       Number of noncaptures after the first capture.
-      
+
 
     }
     \item{yr1i}{
       Number of recaptures after the first capture.
-      
+
 
     }
   }
@@ -94,7 +94,7 @@ aux.posbernoulli.t(y, check.y = FALSE, rename = TRUE, name = "bei")
 %
 %}
 
-\seealso{ 
+\seealso{
   \code{\link{posbernoulli.t}},
   \code{\link{deermice}}.
 
diff --git a/man/backPain.Rd b/man/backPain.Rd
index be1a3a5..f5d096f 100644
--- a/man/backPain.Rd
+++ b/man/backPain.Rd
@@ -41,7 +41,7 @@ Yee, T. W. (2010)
 The \pkg{VGAM} package for categorical data analysis.
 \emph{Journal of Statistical Software},
 \bold{32}, 1--34.
-\url{http://www.jstatsoft.org/v32/i10/}. 
+\url{http://www.jstatsoft.org/v32/i10/}.
 
 
 }
diff --git a/man/beggs.Rd b/man/beggs.Rd
index d7e75da..28b149e 100644
--- a/man/beggs.Rd
+++ b/man/beggs.Rd
@@ -13,7 +13,7 @@ data(beggs)
   Data frame of a two way table.
 
   \describe{
-   
+
     \item{b0, b1, b2, b3, b4}{
     The \code{b} refers to bacon.
     The number of times bacon was purchased was 0, 1, 2, 3, or 4.
@@ -46,17 +46,17 @@ trips, were counted.
 %    Data from Bell and Latin (1998).
 %    Also see Danaher and Hardie (2005).
 
-     
+
 }
 \source{
- 
+
   Bell, D. R. and Lattin, J. M. (1998)
   Shopping Behavior and Consumer Preference
   for Store Price Format: Why `Large Basket' Shoppers Prefer EDLP.
   \emph{Marketing Science},
   \bold{17}, 66--88.
 
- 
+
 }
 \references{
 
diff --git a/man/benfUC.Rd b/man/benfUC.Rd
index 6da797a..020c8ad 100644
--- a/man/benfUC.Rd
+++ b/man/benfUC.Rd
@@ -97,7 +97,7 @@ fraud detection in accounting  and the design computers.
 
 
 }
-\references{ 
+\references{
 
 Benford, F. (1938)
 The Law of Anomalous Numbers.
@@ -111,7 +111,7 @@ Note on the Frequency of Use of the Different Digits in Natural Numbers.
 
 }
 \author{ T. W. Yee and Kai Huang }
-%\note{ 
+%\note{
 %  Currently only the leading digit is handled.
 %  The first two leading digits would be the next simple extension.
 %
diff --git a/man/benini.Rd b/man/benini.Rd
index ee4893f..ac36e7d 100644
--- a/man/benini.Rd
+++ b/man/benini.Rd
@@ -14,7 +14,7 @@ benini1(y0 = stop("argument 'y0' must be specified"), lshape = "loge",
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{y0}{
-  Positive scale parameter. 
+  Positive scale parameter.
 
   }
   \item{lshape}{
@@ -70,7 +70,7 @@ benini1(y0 = stop("argument 'y0' must be specified"), lshape = "loge",
 
 
 }
-\references{ 
+\references{
 
 Kleiber, C. and Kotz, S. (2003)
 \emph{Statistical Size Distributions in Economics and Actuarial Sciences},
diff --git a/man/beniniUC.Rd b/man/beniniUC.Rd
index d941b4c..825cd86 100644
--- a/man/beniniUC.Rd
+++ b/man/beniniUC.Rd
@@ -74,7 +74,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 }
 %\note{
-%  
+%
 %}
 \seealso{
   \code{\link{benini1}}.
diff --git a/man/betaII.Rd b/man/betaII.Rd
index 877acfe..a21258d 100644
--- a/man/betaII.Rd
+++ b/man/betaII.Rd
@@ -3,12 +3,12 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Beta Distribution of the Second Kind }
 \description{
-  Maximum likelihood estimation of the 3-parameter 
+  Maximum likelihood estimation of the 3-parameter
   beta II distribution.
 }
 \usage{
-betaII(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge", 
-       iscale = NULL, ishape2.p = NULL, ishape3.q = NULL, imethod = 1, 
+betaII(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
+       iscale = NULL, ishape2.p = NULL, ishape3.q = NULL, imethod = 1,
        gscale = exp(-5:5), gshape2.p = exp(-5:5),
        gshape3.q = seq(0.75, 4, by = 0.25),
        probs.y = c(0.25, 0.5, 0.75), zero = "shape")
@@ -54,7 +54,7 @@ The beta II distribution has density
   for \eqn{b > 0}, \eqn{p > 0}, \eqn{q > 0}, \eqn{y \geq 0}{y >= 0}.
 Here, \eqn{b} is the scale parameter \code{scale},
 and the others are shape parameters.
-The mean is 
+The mean is
   \deqn{E(Y) = b \, \Gamma(p + 1) \, \Gamma(q - 1) / (\Gamma(p) \, \Gamma(q))}{%
         E(Y) = b  gamma(p + 1)  gamma(q - 1) / ( gamma(p)  gamma(q))}
 provided \eqn{q > 1}; these are returned as the fitted values.
diff --git a/man/betaR.Rd b/man/betaR.Rd
index 5148f31..3451fc9 100644
--- a/man/betaR.Rd
+++ b/man/betaR.Rd
@@ -14,7 +14,7 @@ betaR(lshape1 = "loge", lshape2 = "loge",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lshape1, lshape2, i1, i2}{ 
+  \item{lshape1, lshape2, i1, i2}{
   Details at \code{\link{CommonVGAMffArguments}}.
   See \code{\link{Links}} for more choices.
 
@@ -28,7 +28,7 @@ betaR(lshape1 = "loge", lshape2 = "loge",
 
 
   }
-  \item{A, B}{ 
+  \item{A, B}{
   Lower and upper limits of the distribution.
   The defaults correspond to the \emph{standard beta distribution}
   where the response lies between 0 and 1.
@@ -44,7 +44,7 @@ betaR(lshape1 = "loge", lshape2 = "loge",
 \details{
   The two-parameter beta distribution is given by
   \eqn{f(y) =}
-    \deqn{(y-A)^{shape1-1} \times (B-y)^{shape2-1} / [Beta(shape1,shape2) 
+    \deqn{(y-A)^{shape1-1} \times (B-y)^{shape2-1} / [Beta(shape1,shape2)
                  \times (B-A)^{shape1+shape2-1}]}{%
           (y-A)^(shape1-1) * (B-y)^(shape2-1) / [Beta(shape1,shape2) *
                  (B-A)^(shape1+shape2-1)]}
@@ -86,9 +86,9 @@ betaR(lshape1 = "loge", lshape2 = "loge",
 
 
 }
-\references{ 
-  Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995) 
-  Chapter 25 of: 
+\references{
+  Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995)
+  Chapter 25 of:
   \emph{Continuous Univariate Distributions},
   2nd edition, Volume 2, New York: Wiley.
 
@@ -118,7 +118,7 @@ betaR(lshape1 = "loge", lshape2 = "loge",
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{betaff}},
 % \code{\link{zoibetaR}},
   \code{\link[stats:Beta]{Beta}},
diff --git a/man/betabinomUC.Rd b/man/betabinomUC.Rd
index d0e4196..dbe1289 100644
--- a/man/betabinomUC.Rd
+++ b/man/betabinomUC.Rd
@@ -29,11 +29,12 @@
 dbetabinom(x, size, prob, rho = 0, log = FALSE)
 pbetabinom(q, size, prob, rho, log.p = FALSE)
 rbetabinom(n, size, prob, rho = 0)
-dbetabinom.ab(x, size, shape1, shape2, log = FALSE, Inf.shape = 1e6)
+dbetabinom.ab(x, size, shape1, shape2, log = FALSE, Inf.shape = exp(20),
+              limit.prob = 0.5)
 pbetabinom.ab(q, size, shape1, shape2, log.p = FALSE)
 rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
 dzoibetabinom(x, size, prob, rho = 0, pstr0 = 0, pstrsize = 0, log = FALSE)
-pzoibetabinom(q, size, prob, rho, pstr0 = 0, pstrsize = 0, 
+pzoibetabinom(q, size, prob, rho, pstr0 = 0, pstrsize = 0,
               lower.tail = TRUE, log.p = FALSE)
 rzoibetabinom(n, size, prob, rho = 0, pstr0 = 0, pstrsize = 0)
 dzoibetabinom.ab(x, size, shape1, shape2, pstr0 = 0, pstrsize = 0, log = FALSE)
@@ -85,34 +86,44 @@ rzoibetabinom.ab(n, size, shape1, shape2, pstr0 = 0, pstrsize = 0)
   \item{Inf.shape}{
   Numeric. A large value such that,
   if \code{shape1} or \code{shape2} exceeds this, then
-  it is taken to be \code{Inf}.
+  special measures are taken, e.g., calling \code{\link[stats]{dbinom}}.
   Also, if \code{shape1} or \code{shape2} is less than its reciprocal,
-  then it might be loosely thought of as being effectively \code{0}
-  (although not treated exactly as so in the code).
+  then special measures are also taken.
   This feature/approximation is needed to avoid numerical
   problem with catastrophic cancellation of
   multiple \code{\link[base:Special]{lbeta}} calls.
 
 
   }
+  \item{limit.prob}{
+  If either shape parameters are \code{Inf} then the binomial limit is
+  taken, with \code{shape1 / (shape1 + shape2)} as the probability of
+  success. In the case where both are \code{Inf} this probability
+  will be a \code{NaN = Inf/Inf}, however,
+  the value \code{limit.prob} is used instead. Hence the default is to
+  assume that both shape parameters are equal as the limit is taken.
+  Purists may assign \code{NaN} to this argument.
+
+
+  }
   \item{.dontuse.prob}{
   An argument that should be ignored and unused.
 
 
   }
-  
+
   \item{pstr0}{
-  Probability of a structual zero (i.e., ignoring the beta-binomial distribution). 
+  Probability of a structual zero (i.e., ignoring the beta-binomial distribution).
   The default value of \code{pstr0} corresponds to the response having a
   beta-binomial distribuion inflated only at \code{size}.
-  
+
   }
-  
+
   \item{pstrsize}{
   Probability of a structual maximum value \code{size}. The default value of
   \code{pstrsize} corresponds to the response having a beta-binomial distribution
   inflated only at 0.
-  
+
   }
 
 
@@ -147,16 +158,16 @@ rzoibetabinom.ab(n, size, shape1, shape2, pstr0 = 0, pstrsize = 0)
   estimating the parameters, for the formula of the probability density
   function and other details.
 
-  For the inflated beta-binomial distribution, the probability mass 
+  For the inflated beta-binomial distribution, the probability mass
   function is
   \deqn{P(Y = y) =(1 - pstr0 - pstrsize) \times BB(y) + pstr0 \times I[y = 0] +
         pstrsize \times I[y = size]}{%
         F(Y = y) =(1 -  pstr0 - pstrsize) * BB(y) +  pstr0 * I[y = 0] +
         pstrsize * I[y = size]}
-        
+
   where \eqn{BB(y)} is the probability mass function
   of the beta-binomial distribution with the same shape parameters
-  (\code{\link[VGAM]{pbetabinom.ab}}), 
+  (\code{\link[VGAM]{pbetabinom.ab}}),
   \code{pstr0} is the inflated probability at 0
   and \code{pstrsize} is the inflated probability at 1.
   The default values of \code{pstr0} and \code{pstrsize} mean that these
@@ -166,7 +177,7 @@ rzoibetabinom.ab(n, size, shape1, shape2, pstr0 = 0, pstrsize = 0)
 
 }
 \note{
-  \code{pzoibetabinom}, \code{pzoibetabinom.ab}, 
+  \code{pzoibetabinom}, \code{pzoibetabinom.ab},
   \code{pbetabinom} and \code{pbetabinom.ab} can be particularly slow.
   The functions here ending in \code{.ab} are called from those
   functions which don't.
@@ -201,7 +212,7 @@ sum(dy * xx)  # Check expected values are equal
 sum(dbinom(xx, size = N, prob = s1 / (s1+s2)) * xx)
 cumsum(dy) - pbetabinom.ab(xx, N, shape1 = s1, shape2 = s2)  # Should be all 0
 
-y <- rbetabinom.ab(n = 10000, size = N, shape1 = s1, shape2 = s2)
+y <- rbetabinom.ab(n = 1e4, size = N, shape1 = s1, shape2 = s2)
 ty <- table(y)
 barplot(rbind(dy, ty / sum(ty)),
         beside = TRUE, col = c("blue", "orange"), las = 1,
@@ -209,22 +220,34 @@ barplot(rbind(dy, ty / sum(ty)),
                      ", shape2=", s2, ") (blue) vs\n",
         " Random generated beta-binomial(size=", N, ", prob=", s1/(s1+s2),
         ") (orange)", sep = ""), cex.main = 0.8,
-        names.arg = as.character(xx)) 
+        names.arg = as.character(xx))
 
-set.seed(208); N <- 1000000; size = 20;
-pstr0 <- 0.2; pstrsize <- 0.2
-k <- rzoibetabinom.ab(N, size, s1, s2, pstr0, pstrsize)
-hist(k, probability = TRUE, border = "blue",
-     main = "Blue = inflated; orange = ordinary beta-binomial",
+N <- 1e5; size <- 20; pstr0 <- 0.2; pstrsize <- 0.2
+kk <- rzoibetabinom.ab(N, size, s1, s2, pstr0, pstrsize)
+hist(kk, probability = TRUE, border = "blue", ylim = c(0, 0.25),
+     main = "Blue/green = inflated; orange = ordinary beta-binomial",
      breaks = -0.5 : (size + 0.5))
-sum(k == 0) / N  # Proportion of 0
-sum(k == size) / N  # Proportion of size
+sum(kk == 0) / N  # Proportion of 0
+sum(kk == size) / N  # Proportion of size
 lines(0 : size,
       dbetabinom.ab(0 : size, size, s1, s2), col = "orange")
-lines(0 : size, col = "blue",
+lines(0 : size, col = "green", type = "b",
       dzoibetabinom.ab(0 : size, size, s1, s2, pstr0, pstrsize))
 }
 }
 \keyword{distribution}
 
 
+% \item{Inf.shape}{
+% Numeric. A large value such that,
+% if \code{shape1} or \code{shape2} exceeds this, then
+% it is taken to be \code{Inf}.
+% Also, if \code{shape1} or \code{shape2} is less than its reciprocal,
+% then it might be loosely thought of as being effectively \code{0}
+% (although not treated exactly as so in the code).
+% This feature/approximation is needed to avoid numerical
+% problem with catastrophic cancellation of
+% multiple \code{\link[base:Special]{lbeta}} calls.
+% }
+
+
diff --git a/man/betabinomial.Rd b/man/betabinomial.Rd
index 11a82fc..288e761 100644
--- a/man/betabinomial.Rd
+++ b/man/betabinomial.Rd
@@ -15,7 +15,7 @@ betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
 %- maybe also 'usage' for other objects documented here.
 %            ishrinkage = 0.95, nsimEIM = NULL, zero = 2
 \arguments{
-  \item{lmu, lrho}{ 
+  \item{lmu, lrho}{
   Link functions applied to the two parameters.
   See \code{\link{Links}} for more choices.
   The defaults ensure the parameters remain in \eqn{(0,1)},
@@ -23,7 +23,7 @@ betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
 
 
   }
-  \item{irho}{ 
+  \item{irho}{
   Optional initial value for the correlation parameter.
   If given, it must be in \eqn{(0,1)}, and is recyled to the necessary
   length. Assign this argument a value if a convergence failure occurs.
@@ -40,7 +40,7 @@ betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
 
 
   }
-  \item{zero}{ 
+  \item{zero}{
   Specifyies which
   linear/additive predictor is to be modelled as an intercept only.
   If assigned, the single value can be either \code{1} or \code{2}.
@@ -52,7 +52,7 @@ betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
 
 
   }
-  \item{ishrinkage, nsimEIM}{ 
+  \item{ishrinkage, nsimEIM}{
   See \code{\link{CommonVGAMffArguments}} for more information.
   The argument \code{ishrinkage} is used only if \code{imethod = 2}.
   Using the argument \code{nsimEIM} may offer large advantages for large
@@ -65,7 +65,7 @@ betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
   There are several parameterizations of the beta-binomial distribution.
   This family function directly models the mean and correlation
   parameter, i.e.,
-  the probability of success. 
+  the probability of success.
   The model can be written
   \eqn{T|P=p \sim Binomial(N,p)}{T|P=p ~ Binomial(N,p)}
   where \eqn{P} has a beta distribution with shape parameters
@@ -76,7 +76,7 @@ betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
   That is, \eqn{Y} is the \emph{proportion} of successes. Like
   \code{\link{binomialff}}, the fitted values are the
   estimated probability
-  of success (i.e., \eqn{E[Y]} and not \eqn{E[T]}) 
+  of success (i.e., \eqn{E[Y]} and not \eqn{E[T]})
   and the prior weights \eqn{N} are attached separately on the
   object in a slot.
 
@@ -94,9 +94,9 @@ betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
   The default model is \eqn{\eta_1 = logit(\mu)}{eta1 =logit(mu)}
   and \eqn{\eta_2 = logit(\rho)}{eta2 = logit(rho)} because both
   parameters lie between 0 and 1.
-  The mean (of \eqn{Y}) is 
+  The mean (of \eqn{Y}) is
   \eqn{p = \mu = \alpha / (\alpha + \beta)}{p = mu = alpha / (alpha + beta)}
-  and the variance (of \eqn{Y}) is 
+  and the variance (of \eqn{Y}) is
   \eqn{\mu(1-\mu)(1+(N-1)\rho)/N}{mu(1-mu)(1+(N-1)rho)/N}.
   Here, the correlation \eqn{\rho}{rho} is given by
   \eqn{1/(1 + \alpha + \beta)}{1/(1 + alpha + beta)}
@@ -108,9 +108,9 @@ betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
 
   This family function uses Fisher scoring.
   Elements of the second-order expected
-  derivatives with respect to \eqn{\alpha}{alpha} and 
-  \eqn{\beta}{beta} are computed numerically, which may 
-  fail for large \eqn{\alpha}{alpha}, \eqn{\beta}{beta}, 
+  derivatives with respect to \eqn{\alpha}{alpha} and
+  \eqn{\beta}{beta} are computed numerically, which may
+  fail for large \eqn{\alpha}{alpha}, \eqn{\beta}{beta},
   \eqn{N} or else take a long time.
 
 
@@ -150,10 +150,10 @@ betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
 \author{ T. W. Yee }
 \note{
   This function processes the input in the same way
-  as \code{\link{binomialff}}. But it does not handle 
+  as \code{\link{binomialff}}. But it does not handle
   the case \eqn{N=1} very well because there are two
   parameters to estimate, not one, for each row of the input.
-  Cases where \eqn{N=1} can be omitted via the 
+  Cases where \eqn{N=1} can be omitted via the
   \code{subset} argument of \code{\link{vglm}}.
 
 
diff --git a/man/betabinomialff.Rd b/man/betabinomialff.Rd
index 67c8ea9..a625240 100644
--- a/man/betabinomialff.Rd
+++ b/man/betabinomialff.Rd
@@ -16,14 +16,14 @@ betabinomialff(lshape1 = "loge", lshape2 = "loge", ishape1 = 1,
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lshape1, lshape2}{ 
+  \item{lshape1, lshape2}{
   Link functions for the two (positive) shape parameters
   of the beta distribution.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{ishape1, ishape2}{ 
+  \item{ishape1, ishape2}{
   Initial value for the shape parameters.
   The first must be positive, and is recyled to the necessary length.
   The second is optional.
@@ -32,7 +32,7 @@ betabinomialff(lshape1 = "loge", lshape2 = "loge", ishape1 = 1,
 
 
   }
-  \item{zero}{ 
+  \item{zero}{
   Can be
   an integer specifying which linear/additive predictor is to be modelled
   as an intercept only. If assigned, the single value should be either
@@ -56,7 +56,7 @@ betabinomialff(lshape1 = "loge", lshape2 = "loge", ishape1 = 1,
   There are several parameterizations of the beta-binomial distribution.
   This family function directly models the two shape parameters of the
   associated beta distribution rather than the probability of
-  success (however, see \bold{Note} below). 
+  success (however, see \bold{Note} below).
   The model can be written
   \eqn{T|P=p \sim Binomial(N,p)}{T|P=p ~ Binomial(N,p)}
   where \eqn{P} has a beta distribution with shape parameters
@@ -67,7 +67,7 @@ betabinomialff(lshape1 = "loge", lshape2 = "loge", ishape1 = 1,
   That is, \eqn{Y} is the \emph{proportion} of successes. Like
   \code{\link{binomialff}}, the fitted values are the
   estimated probability
-  of success (i.e., \eqn{E[Y]} and not \eqn{E[T]}) 
+  of success (i.e., \eqn{E[Y]} and not \eqn{E[T]})
   and the prior weights \eqn{N} are attached separately on the
   object in a slot.
 
@@ -80,13 +80,13 @@ betabinomialff(lshape1 = "loge", lshape2 = "loge", ishape1 = 1,
    with shape parameters \eqn{\alpha}{alpha} and \eqn{\beta}{beta}.
    Recall \eqn{Y = T/N} is the real response being modelled.
 
-   
+
   The default model is \eqn{\eta_1 = \log(\alpha)}{eta1 = log(alpha)}
   and \eqn{\eta_2 = \log(\beta)}{eta2 = log(beta)} because both
   parameters are positive.
-  The mean (of \eqn{Y}) is 
+  The mean (of \eqn{Y}) is
   \eqn{p = \mu = \alpha / (\alpha + \beta)}{p = mu = alpha / (alpha + beta)}
-  and the variance (of \eqn{Y}) is 
+  and the variance (of \eqn{Y}) is
   \eqn{\mu(1-\mu)(1+(N-1)\rho)/N}{mu(1-mu)(1+(N-1)rho)/N}.
   Here, the correlation \eqn{\rho}{rho} is given by
   \eqn{1/(1 + \alpha + \beta)}{1/(1 + alpha + beta)}
@@ -98,9 +98,9 @@ betabinomialff(lshape1 = "loge", lshape2 = "loge", ishape1 = 1,
 
   This family function uses Fisher scoring. The two diagonal
   elements of the second-order expected
-  derivatives with respect to \eqn{\alpha}{alpha} and 
-  \eqn{\beta}{beta} are computed numerically, which may 
-  fail for large \eqn{\alpha}{alpha}, \eqn{\beta}{beta}, 
+  derivatives with respect to \eqn{\alpha}{alpha} and
+  \eqn{\beta}{beta} are computed numerically, which may
+  fail for large \eqn{\alpha}{alpha}, \eqn{\beta}{beta},
   \eqn{N} or else take a long time.
 
 
@@ -140,10 +140,10 @@ betabinomialff(lshape1 = "loge", lshape2 = "loge", ishape1 = 1,
 \author{ T. W. Yee }
 \note{
   This function processes the input in the same way
-  as \code{\link{binomialff}}. But it does not handle 
+  as \code{\link{binomialff}}. But it does not handle
   the case \eqn{N=1} very well because there are two
   parameters to estimate, not one, for each row of the input.
-  Cases where \eqn{N=1} can be omitted via the 
+  Cases where \eqn{N=1} can be omitted via the
   \code{subset} argument of \code{\link{vglm}}.
 
 
@@ -161,7 +161,7 @@ betabinomialff(lshape1 = "loge", lshape2 = "loge", ishape1 = 1,
   is currently not implemented in the \pkg{VGAM} package as it has
   range-restrictions for the correlation parameter that are currently
   too difficult to handle in this package.
-  
+
 
 }
 \section{Warning }{
diff --git a/man/betaff.Rd b/man/betaff.Rd
index 483ed19..3531c97 100644
--- a/man/betaff.Rd
+++ b/man/betaff.Rd
@@ -8,21 +8,22 @@
 }
 \usage{
 betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
-       imu = NULL, iphi = NULL, imethod = 1, zero = NULL)
+       imu = NULL, iphi = NULL,
+       gprobs.y = ppoints(8), gphi  = exp(-3:5)/4, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
 
 
-  \item{A, B}{ 
+  \item{A, B}{
   Lower and upper limits of the distribution.
   The defaults correspond to the \emph{standard beta distribution}
   where the response lies between 0 and 1.
   }
 
 
-  \item{lmu, lphi}{ 
-  Link function for the mean and precision parameters. 
+  \item{lmu, lphi}{
+  Link function for the mean and precision parameters.
   The values \eqn{A} and \eqn{B} are extracted from the
   \code{min} and \code{max} arguments of \code{\link{extlogit}}.
   Consequently, only \code{\link{extlogit}} is allowed.
@@ -40,14 +41,14 @@ betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
 
 
   }
-  \item{imethod, zero}{
+  \item{gprobs.y, gphi, zero}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
 
   }
 }
 \details{
-  The two-parameter beta distribution can be written 
+  The two-parameter beta distribution can be written
   \eqn{f(y) =}
     \deqn{(y-A)^{\mu_1 \phi-1} \times (B-y)^{(1-\mu_1) \phi-1} / [beta(\mu_1
           \phi,(1-\mu_1) \phi) \times (B-A)^{\phi-1}]}{%
@@ -93,7 +94,7 @@ betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
 
 
 }
-\references{ 
+\references{
   Ferrari, S. L. P. and Francisco C.-N. (2004)
   Beta regression for modelling rates and proportions.
   \emph{Journal of Applied Statistics},
@@ -111,12 +112,12 @@ betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
   The response must have values in the interval (\eqn{A}, \eqn{B}).
   The user currently needs to manually choose \code{lmu} to match
   the input of arguments \code{A} and \code{B}, e.g.,
-  with \code{\link{extlogit}}; see the example below. 
+  with \code{\link{extlogit}}; see the example below.
 
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{betaR}},
 % \code{\link{zoibetaR}},
   \code{\link[stats:Beta]{Beta}},
@@ -155,3 +156,7 @@ coef(fit, matrix = TRUE)
 }
 \keyword{models}
 \keyword{regression}
+%      imu = NULL, iphi = NULL, imethod = 1, zero = NULL)
+
+
+
diff --git a/man/betageomUC.Rd b/man/betageomUC.Rd
index 10a9aff..4e32c87 100644
--- a/man/betageomUC.Rd
+++ b/man/betageomUC.Rd
@@ -62,7 +62,7 @@ rbetageom(n, shape1, shape2)
 
 
 % See zz code{link{betageomzz}}, the \pkg{VGAM} family function
-% for estimating the parameters, 
+% for estimating the parameters,
 % for the formula of the probability density function and other details.
 
 }
diff --git a/man/betageometric.Rd b/man/betageometric.Rd
index daba7df..c13c460 100644
--- a/man/betageometric.Rd
+++ b/man/betageometric.Rd
@@ -13,7 +13,7 @@ betageometric(lprob = "logit", lshape = "loge",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lprob, lshape}{ 
+  \item{lprob, lshape}{
   Parameter link functions applied to the
   parameters \eqn{p}{prob} and \eqn{\phi}{phi}
   (called \code{prob} and \code{shape} below).
@@ -22,15 +22,15 @@ betageometric(lprob = "logit", lshape = "loge",
 
 
   }
-  \item{iprob, ishape}{ 
-  Numeric. 
+  \item{iprob, ishape}{
+  Numeric.
   Initial values for the two parameters.
   A \code{NULL} means a value is computed internally.
 
 
   }
-  \item{moreSummation}{ 
-  Integer, of length 2. 
+  \item{moreSummation}{
+  Integer, of length 2.
   When computing the expected information matrix a series summation from
   0 to \code{moreSummation[1]*max(y)+moreSummation[2]} is made, in which the
   upper limit is an approximation to infinity.
@@ -38,8 +38,8 @@ betageometric(lprob = "logit", lshape = "loge",
 
 
   }
-  \item{tolerance}{ 
-  Positive numeric. 
+  \item{tolerance}{
+  Positive numeric.
   When all terms are less than this then the series is deemed to have
   converged.
 
@@ -60,13 +60,13 @@ betageometric(lprob = "logit", lshape = "loge",
   \eqn{p}{prob} are generated from a standard beta distribution with
   shape parameters \code{shape1} and \code{shape2}.
   The parameterization here is to focus on the parameters
-  \eqn{p}{prob} and 
+  \eqn{p}{prob} and
   \eqn{\phi = 1/(shape1+shape2)}{phi = 1/(shape1+shape2)},
   where \eqn{\phi}{phi} is \code{shape}.
   The default link functions for these ensure that the appropriate range
   of the parameters is maintained.
   The mean of \eqn{Y} is
-  \eqn{E(Y) = shape2 / (shape1-1) = (1-p) / (p-\phi)}{E(Y) = 
+  \eqn{E(Y) = shape2 / (shape1-1) = (1-p) / (p-\phi)}{E(Y) =
        shape2 / (shape1-1) = (1-prob) / (prob-phi)}
   if \code{shape1 > 1}, and if so, then this is returned as
   the fitted values.
@@ -113,7 +113,7 @@ betageometric(lprob = "logit", lshape = "loge",
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{geometric}},
   \code{\link{betaff}},
   \code{\link{rbetageom}}.
diff --git a/man/betanormUC.Rd b/man/betanormUC.Rd
index f32402a..d8ab2b8 100644
--- a/man/betanormUC.Rd
+++ b/man/betanormUC.Rd
@@ -75,7 +75,7 @@ rbetanorm(n, shape1, shape2, mean = 0, sd = 1)
 \author{ T. W. Yee }
 \details{
   The function \code{betauninormal}, the \pkg{VGAM} family function
-  for estimating the parameters, 
+  for estimating the parameters,
   has not yet been written.
 
 
diff --git a/man/betaprime.Rd b/man/betaprime.Rd
index f466bd7..75b364d 100644
--- a/man/betaprime.Rd
+++ b/man/betaprime.Rd
@@ -9,32 +9,34 @@
 
 }
 \usage{
-betaprime(link = "loge", i1 = 2, i2 = NULL, zero = NULL)
+betaprime(lshape = "loge", ishape1 = 2, ishape2 = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{link}{ 
+  \item{lshape}{
   Parameter link function applied to the two (positive) shape parameters.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{i1, i2}{
-  Initial values for the first and second shape parameters.
-  A \code{NULL} value means it is obtained in the \code{initialize} slot.
-  Note that \code{i2} is obtained using \code{i1}.
+  \item{ishape1, ishape2, zero}{
+  See
+  \code{\link{CommonVGAMffArguments}}.
 
 
-  }
-  \item{zero}{
-  An integer-valued vector specifying which linear/additive predictors
-  are modelled as intercepts only.  The value must be from the set
-  \{1,2\} corresponding respectively to \code{shape1} and \code{shape2}
-  respectively.  If \code{zero=NULL} then both parameters are modelled
-  with the explanatory variables.
+% Initial values for the first and second shape parameters.
+% A \code{NULL} value means it is obtained in the \code{initialize} slot.
+% Note that \code{ishape2} is obtained using \code{ishape1}.
 
 
   }
+% \item{zero}{
+% An integer-valued vector specifying which linear/additive predictors
+% are modelled as intercepts only.  The value must be from the set
+% \{1,2\} corresponding respectively to \code{shape1} and \code{shape2}
+% respectively.  If \code{zero=NULL} then both parameters are modelled
+% with the explanatory variables.
+% }
 }
 
 %% what is the mean if shape2 < 1?
@@ -50,6 +52,7 @@ betaprime(link = "loge", i1 = 2, i2 = NULL, zero = NULL)
   these are returned as the fitted values.
 
 
+
   If \eqn{Y} has a \eqn{Beta(shape1,shape2)} distribution then
   \eqn{Y/(1-Y)} and \eqn{(1-Y)/Y} have a \eqn{Betaprime(shape1,shape2)}
   and \eqn{Betaprime(shape2,shape1)} distribution respectively.
@@ -59,6 +62,7 @@ betaprime(link = "loge", i1 = 2, i2 = NULL, zero = NULL)
   distribution.
 
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -70,10 +74,10 @@ betaprime(link = "loge", i1 = 2, i2 = NULL, zero = NULL)
 }
 
 %% zz not sure about the JKB reference.
-\references{ 
+\references{
 
-Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995) 
-Chapter 25 of: 
+Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995)
+Chapter 25 of:
 \emph{Continuous Univariate Distributions},
 2nd edition,
 Volume 2,
@@ -98,7 +102,7 @@ New York: Wiley.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{betaff}},
   \code{\link[stats]{Beta}}.
 
diff --git a/man/biamhcop.Rd b/man/biamhcop.Rd
index f68fe84..9f0dbdb 100644
--- a/man/biamhcop.Rd
+++ b/man/biamhcop.Rd
@@ -46,7 +46,7 @@ biamhcop(lapar = "rhobit", iapar = NULL, imethod = 1, nsimEIM = 250)
   The cumulative distribution function is
   \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = y_1 y_2
           / ( 1 - \alpha (1 - y_1) (1 - y_2) ) }{%
-        P(Y1 < =  y1, Y2 < =  y2)  = 
+        P(Y1 < =  y1, Y2 < =  y2)  =
         y1 * y2 / ( 1 - alpha * (1 - y1) * (1 - y2) ) }
   for \eqn{-1 < \alpha < 1}{-1 < alpha < 1}.
   The support of the function is the unit square.
diff --git a/man/biclaytoncop.Rd b/man/biclaytoncop.Rd
index 2ecc230..73a0ff3 100644
--- a/man/biclaytoncop.Rd
+++ b/man/biclaytoncop.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Clayton Copula (Bivariate) Family Function }
 \description{
-  Estimate the correlation parameter of 
+  Estimate the correlation parameter of
   the (bivariate) Clayton copula
   distribution by maximum likelihood estimation.
 
diff --git a/man/biclaytoncopUC.Rd b/man/biclaytoncopUC.Rd
index 1e51b3a..5b192c4 100644
--- a/man/biclaytoncopUC.Rd
+++ b/man/biclaytoncopUC.Rd
@@ -5,7 +5,7 @@
 \title{Clayton Copula (Bivariate) Distribution}
 \description{
   Density and random generation
-  for the (one parameter) bivariate 
+  for the (one parameter) bivariate
   Clayton copula distribution.
 
 
diff --git a/man/bifgmcop.Rd b/man/bifgmcop.Rd
index 9a4f424..2817633 100644
--- a/man/bifgmcop.Rd
+++ b/man/bifgmcop.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Farlie-Gumbel-Morgenstern's Bivariate Distribution Family Function }
 \description{
-  Estimate the association parameter of 
+  Estimate the association parameter of
   Farlie-Gumbel-Morgenstern's bivariate
   distribution by maximum likelihood estimation.
 
@@ -24,7 +24,7 @@ bifgmcop(lapar = "rhobit", iapar = NULL, imethod = 1)
   The cumulative distribution function is
   \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = y_1 y_2
              ( 1 + \alpha (1 - y_1) (1 - y_2) ) }{%
-        P(Y1 <= y1, Y2 <= y2) = 
+        P(Y1 <= y1, Y2 <= y2) =
         y1 * y2 * ( 1 + alpha * (1 - y1) * (1 - y2) ) }
   for \eqn{-1 < \alpha < 1}{-1 < alpha < 1}.
   The support of the function is the unit square.
diff --git a/man/bifgmcopUC.Rd b/man/bifgmcopUC.Rd
index b9ecffb..ebcb1be 100644
--- a/man/bifgmcopUC.Rd
+++ b/man/bifgmcopUC.Rd
@@ -6,7 +6,7 @@
 \title{Farlie-Gumbel-Morgenstern's Bivariate Distribution}
 \description{
   Density, distribution function, and random
-  generation for the (one parameter) bivariate 
+  generation for the (one parameter) bivariate
   Farlie-Gumbel-Morgenstern's distribution.
 
 
diff --git a/man/bifgmexp.Rd b/man/bifgmexp.Rd
index d4bc2e1..52d761f 100644
--- a/man/bifgmexp.Rd
+++ b/man/bifgmexp.Rd
@@ -52,7 +52,7 @@ bifgmexp(lapar = "rhobit", iapar = NULL, tola0 = 0.01, imethod = 1)
   \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = e^{-y_1-y_2}
              ( 1 + \alpha [1 - e^{-y_1}] [1 - e^{-y_2}] ) + 1 -
                e^{-y_1} - e^{-y_2} }{%
-        P(Y1 <= y1, Y2 <= y2) = 
+        P(Y1 <= y1, Y2 <= y2) =
         exp(-y1-y2) * ( 1 + alpha * [1 - exp(-y1)] * [1 - exp(-y2)] ) + 1 -
            exp(-y1) - exp(-y2) }
   for \eqn{\alpha}{alpha} between \eqn{-1} and \eqn{1}.
diff --git a/man/bifrankcop.Rd b/man/bifrankcop.Rd
index be37172..6cbc209 100644
--- a/man/bifrankcop.Rd
+++ b/man/bifrankcop.Rd
@@ -36,7 +36,7 @@ bifrankcop(lapar = "loge", iapar = 2, nsimEIM = 250)
   \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = H_{\alpha}(y_1,y_2) =
              \log_{\alpha} [1 + (\alpha^{y_1}-1)(\alpha^{y_2}-1)/
                      (\alpha-1)] }{%
-        P(Y1 <= y1, Y2 <= y2) = 
+        P(Y1 <= y1, Y2 <= y2) =
         H_{alpha}(y1,y2) =  log_{alpha} [1 + (alpha^(y1)-1)*(alpha^(y2)-1)/
                      (alpha-1)] }
   for \eqn{\alpha \ne 1}{alpha != 1}.
@@ -44,7 +44,7 @@ bifrankcop(lapar = "loge", iapar = 2, nsimEIM = 250)
   The support of the function is the unit square.
 
 
-  When \eqn{0 < \alpha < 1}{0<alpha<1} the probability density function 
+  When \eqn{0 < \alpha < 1}{0<alpha<1} the probability density function
   \eqn{h_{\alpha}(y_1,y_2)}{h_{alpha}(y_1,y_2)}
   is symmetric with respect to the lines \eqn{y_2=y_1}{y2=y1}
   and \eqn{y_2=1-y_1}{y2=1-y1}.
diff --git a/man/bigumbelIexp.Rd b/man/bigumbelIexp.Rd
index ee267fe..e8eaaef 100644
--- a/man/bigumbelIexp.Rd
+++ b/man/bigumbelIexp.Rd
@@ -40,7 +40,7 @@ bigumbelIexp(lapar = "identitylink", iapar = NULL, imethod = 1)
   The cumulative distribution function is
   \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = e^{-y_1-y_2+\alpha y_1 y_2}
              + 1  - e^{-y_1} - e^{-y_2} }{%
-        P(Y1 <= y1, Y2 <= y2) = 
+        P(Y1 <= y1, Y2 <= y2) =
         exp(-y1-y2+alpha*y1*y2) + 1 - exp(-y1) - exp(-y2) }
   for real \eqn{\alpha}{alpha}.
   The support of the function is for \eqn{y_1>0}{y1>0} and
diff --git a/man/bilogistic.Rd b/man/bilogistic.Rd
index 6bb81f3..868508c 100644
--- a/man/bilogistic.Rd
+++ b/man/bilogistic.Rd
@@ -29,7 +29,7 @@ bilogistic(llocation = "identitylink", lscale = "loge",
   \item{lscale}{
   Parameter link function applied to both
   (positive) scale parameters \eqn{s_1}{s1} and \eqn{s_2}{s2}.
-  See \code{\link{Links}} for more choices. 
+  See \code{\link{Links}} for more choices.
 
 
   }
@@ -60,7 +60,7 @@ bilogistic(llocation = "identitylink", lscale = "loge",
   }
 }
 \details{
-  The four-parameter bivariate logistic distribution 
+  The four-parameter bivariate logistic distribution
   has a density that can be written as
   \deqn{f(y_1,y_2;l_1,s_1,l_2,s_2) = 2 \frac{\exp[-(y_1-l_1)/s_1 -
       (y_2-l_2)/s_2]}{
@@ -69,20 +69,20 @@ bilogistic(llocation = "identitylink", lscale = "loge",
         f(y1,y2;l1,s1,l2,s2) = 2 * exp[-(y1-l1)/s1 - (y1-l1)/s1] /
       [s1 * s2 * ( 1 + exp[-(y1-l1)/s1] + exp[-(y2-l2)/s2] )^3]
  }
- where \eqn{s_1>0}{s1>0} \eqn{s_2>0}{s2>0} are the scale parameters,
+ where \eqn{s_1>0}{s1>0} and \eqn{s_2>0}{s2>0} are the scale parameters,
  and \eqn{l_1}{l1} and \eqn{l_2}{l2} are the location parameters.
  Each of the two responses are unbounded, i.e.,
  \eqn{-\infty<y_j<\infty}{-Inf<y_j<Inf}.
  The mean of \eqn{Y_1}{Y1} is \eqn{l_1}{l1} etc.
  The fitted values are returned in a 2-column matrix.
  The cumulative distribution function is
-  \deqn{F(y_1,y_2;l_1,s_1,l_2,s_2) = 
+  \deqn{F(y_1,y_2;l_1,s_1,l_2,s_2) =
       \left( 1 + \exp[-(y_1-l_1)/s_1] + \exp[-(y_2-l_2)/s_2]
       \right)^{-1}}{%
         F(y1,y2;l1,s1,l2,s2) = 1 / (1 + exp[-(y1-l1)/s1] + exp[-(y2-l2)/s2])
  }
  The marginal distribution of \eqn{Y_1}{Y1} is
-  \deqn{P(Y_1 \leq y_1) = F(y_1;l_1,s_1) = 
+  \deqn{P(Y_1 \leq y_1) = F(y_1;l_1,s_1) =
       \left( 1 + \exp[-(y_1-l_1)/s_1] \right)^{-1} .}{%
         P(Y1 <= y1) = F(y1;l1,s1) = 1 / (1 + exp[-(y1-l1)/s1]).
  }
@@ -93,7 +93,7 @@ bilogistic(llocation = "identitylink", lscale = "loge",
  \eqn{\eta_3=l_2}{eta3=l2},
  \eqn{\eta_4=\log(s_2)}{eta4=log(s2)} are the linear/additive predictors.
 
- 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
diff --git a/man/binom2.or.Rd b/man/binom2.or.Rd
index 484f929..4d0768c 100644
--- a/man/binom2.or.Rd
+++ b/man/binom2.or.Rd
@@ -19,7 +19,7 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{lmu}{
-  Link function applied to the two marginal probabilities. 
+  Link function applied to the two marginal probabilities.
   See \code{\link{Links}} for more choices.
   See the note below.
 
@@ -131,7 +131,7 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
   generic function.
 
 
-} 
+}
 \references{
   McCullagh, P. and Nelder, J. A. (1989)
   \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
@@ -144,8 +144,8 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
 
 
   Palmgren, J. (1989)
-  \emph{Regression Models for Bivariate Binary Responses}. 
-  Technical Report no. 101, Department of Biostatistics, 
+  \emph{Regression Models for Bivariate Binary Responses}.
+  Technical Report no. 101, Department of Biostatistics,
   University of Washington, Seattle.
 
 
diff --git a/man/binom2.rho.Rd b/man/binom2.rho.Rd
index 5529a3e..5504649 100644
--- a/man/binom2.rho.Rd
+++ b/man/binom2.rho.Rd
@@ -71,7 +71,8 @@ binom2.Rho(rho = 0, imu1 = NULL, imu2 = NULL,
   \item{rho}{
   Numeric vector.
   Values are recycled to the needed length,
-  and ought to be in range.
+  and ought to be in range, which is \eqn{(-1, 1)}.
+
 
 
   }
@@ -200,7 +201,7 @@ Freedman, D. A. and Sekhon, J. S. (2010)
 \seealso{
   \code{\link{rbinom2.rho}},
   \code{\link{rhobit}},
-  \code{\link{pnorm2}},
+  \code{\link{pbinorm}},
   \code{\link{binom2.or}},
   \code{\link{loglinb2}},
   \code{\link{coalminers}},
@@ -210,6 +211,7 @@ Freedman, D. A. and Sekhon, J. S. (2010)
 
 
 
+% \code{\link{pnorm2}},
 
 }
 \examples{
diff --git a/man/binom2.rhoUC.Rd b/man/binom2.rhoUC.Rd
index b397787..24ee910 100644
--- a/man/binom2.rhoUC.Rd
+++ b/man/binom2.rhoUC.Rd
@@ -42,12 +42,12 @@ dbinom2.rho(mu1,
     The correlation parameter.
     Must be numeric and lie between \eqn{-1} and \eqn{1}.
     The default value of zero means the responses are uncorrelated.
-    
+
   }
   \item{exchangeable}{
    Logical. If \code{TRUE}, the two marginal probabilities are constrained
    to be equal.
-    
+
   }
   \item{twoCols}{
    Logical.
@@ -103,7 +103,7 @@ dbinom2.rho(mu1,
 \examples{
 (myrho <- rhobit(2, inverse = TRUE))  # Example 1
 ymat <- rbinom2.rho(nn <- 2000, mu1 = 0.8, rho = myrho, exch = TRUE)
-(mytab <- table(ymat[, 1], ymat[, 2], dnn = c("Y1", "Y2")))                                     
+(mytab <- table(ymat[, 1], ymat[, 2], dnn = c("Y1", "Y2")))
 fit <- vglm(ymat ~ 1, binom2.rho(exch = TRUE))
 coef(fit, matrix = TRUE)
 
diff --git a/man/binomialff.Rd b/man/binomialff.Rd
index 00e80b5..82119b5 100644
--- a/man/binomialff.Rd
+++ b/man/binomialff.Rd
@@ -25,7 +25,7 @@ binomialff(link = "logit", dispersion = 1, multiple.responses = FALSE,
 
 
   }
-  \item{dispersion}{ 
+  \item{dispersion}{
   Dispersion parameter. By default, maximum likelihood is used to
   estimate the model because it is known.  However, the user can specify
   \code{dispersion = 0} to have it estimated, or else specify a known
@@ -33,7 +33,7 @@ binomialff(link = "logit", dispersion = 1, multiple.responses = FALSE,
 
 
   }
-  \item{multiple.responses}{ 
+  \item{multiple.responses}{
   Multivariate response? If \code{TRUE}, then the response is interpreted
   as \eqn{M} independent binary responses, where \eqn{M} is the number
   of columns of the response matrix. In this case, the response matrix
@@ -52,7 +52,7 @@ binomialff(link = "logit", dispersion = 1, multiple.responses = FALSE,
 
 
   }
-  \item{onedpar}{ 
+  \item{onedpar}{
   One dispersion parameter? If \code{multiple.responses}, then a separate dispersion
   parameter will be computed for each response (column), by default.
   Setting \code{onedpar = TRUE} will pool them so that there is only one
@@ -60,7 +60,7 @@ binomialff(link = "logit", dispersion = 1, multiple.responses = FALSE,
 
 
   }
-  \item{parallel}{ 
+  \item{parallel}{
   A logical or formula. Used only if \code{multiple.responses} is \code{TRUE}.  This
   argument allows for the parallelism assumption whereby the regression
   coefficients for a variable is constrained to be equal over the \eqn{M}
@@ -70,7 +70,7 @@ binomialff(link = "logit", dispersion = 1, multiple.responses = FALSE,
 
 
   }
-  \item{zero}{ 
+  \item{zero}{
   An integer-valued vector specifying which linear/additive predictors
   are modelled as intercepts only.  The values must be from the set
   \{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of the
@@ -79,7 +79,7 @@ binomialff(link = "logit", dispersion = 1, multiple.responses = FALSE,
 
 
   }
-  \item{earg.link}{ 
+  \item{earg.link}{
   Details at \code{\link{CommonVGAMffArguments}}.
 
 
diff --git a/man/binormal.Rd b/man/binormal.Rd
index 861a8cc..0e3faa2 100644
--- a/man/binormal.Rd
+++ b/man/binormal.Rd
@@ -30,12 +30,12 @@ binormal(lmean1 = "identitylink", lmean2 = "identitylink",
 
 
   }
-  \item{imean1, imean2, isd1, isd2, irho, imethod, zero}{ 
+  \item{imean1, imean2, isd1, isd2, irho, imethod, zero}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
 
   }
-  \item{eq.mean, eq.sd}{ 
+  \item{eq.mean, eq.sd}{
   Logical or formula.
   Constrains the means or the standard deviations to be equal.
 
@@ -99,10 +99,11 @@ binormal(lmean1 = "identitylink", lmean2 = "identitylink",
 \seealso{
   \code{\link{uninormal}},
   \code{\link{gaussianff}},
-  \code{\link{pnorm2}},
+  \code{\link{pbinorm}},
   \code{\link{bistudentt}}.
 
 
+% \code{\link{pnorm2}},
 }
 \examples{
 set.seed(123); nn <- 1000
@@ -119,10 +120,10 @@ summary(fit1)
 var1  <- loge(2 * predict(fit1)[, "loge(sd1)"], inverse = TRUE)
 var2  <- loge(2 * predict(fit1)[, "loge(sd2)"], inverse = TRUE)
 cov12 <- rhobit(predict(fit1)[, "rhobit(rho)"], inverse = TRUE)
-head(with(bdata, pnorm2(y1, y2,
-                        mean1 = predict(fit1)[, "mean1"],
-                        mean2 = predict(fit1)[, "mean2"],
-                        var1 = var1, var2 = var2, cov12 = cov12)))
+head(with(bdata, pbinorm(y1, y2,
+                         mean1 = predict(fit1)[, "mean1"],
+                         mean2 = predict(fit1)[, "mean2"],
+                         var1 = var1, var2 = var2, cov12 = cov12)))
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/binormalUC.Rd b/man/binormalUC.Rd
index a990b78..9c0f986 100644
--- a/man/binormalUC.Rd
+++ b/man/binormalUC.Rd
@@ -49,7 +49,7 @@ rbinorm(n,      mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
 
 % \item{rho}{
 % See \code{\link{binormal}}.
-% } 
+% }
 
 
 
@@ -124,11 +124,11 @@ rbinorm(n,      mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
 
 
 % \code{dbinorm()}'s arguments might change!
-% Currently they differ from \code{pbinorm()} 
+% Currently they differ from \code{pbinorm()}
 % and \code{rbinorm()}, so use the full argument name
 % to future-proof possible changes!
 
- 
+
 }
 
 
diff --git a/man/binormalcop.Rd b/man/binormalcop.Rd
index 0eb1d64..8c5778b 100644
--- a/man/binormalcop.Rd
+++ b/man/binormalcop.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Gaussian Copula (Bivariate) Family Function }
 \description{
-  Estimate the correlation parameter of 
+  Estimate the correlation parameter of
   the (bivariate) Gaussian copula
   distribution by maximum likelihood estimation.
 
@@ -33,7 +33,7 @@ binormalcop(lrho = "rhobit", irho = NULL, imethod = 1,
   The cumulative distribution function is
   \deqn{P(Y_1 \leq y_1, Y_2 \leq y_2) = \Phi_2
              ( \Phi^{-1}(y_1), \Phi^{-1}(y_2); \rho ) }{%
-        P(Y1 <= y1, Y2 <= y2) = 
+        P(Y1 <= y1, Y2 <= y2) =
         Phi_2(\Phi^(-1)(y_1), \Phi^(-1)(y_2); \rho)}
   for \eqn{-1 < \rho < 1}{-1 < rho < 1},
   \eqn{\Phi_2}{Phi_2} is the cumulative distribution function
diff --git a/man/binormcopUC.Rd b/man/binormcopUC.Rd
index e057634..243660f 100644
--- a/man/binormcopUC.Rd
+++ b/man/binormcopUC.Rd
@@ -7,7 +7,7 @@
 \description{
   Density, distribution function,
   and random generation
-  for the (one parameter) bivariate 
+  for the (one parameter) bivariate
   Gaussian copula distribution.
 
 
diff --git a/man/bisa.Rd b/man/bisa.Rd
index 88defe3..146dbcf 100644
--- a/man/bisa.Rd
+++ b/man/bisa.Rd
@@ -13,7 +13,7 @@ bisa(lscale = "loge", lshape = "loge", iscale = 1,
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{nowarning}{ Logical. Suppress a warning? 
+  \item{nowarning}{ Logical. Suppress a warning?
   Ignored for \pkg{VGAM} 0.9-7 and higher.
 
 
@@ -38,8 +38,8 @@ bisa(lscale = "loge", lshape = "loge", iscale = 1,
   \item{imethod}{
   An integer with value \code{1} or \code{2} or \code{3} which
   specifies the initialization method. If failure to converge occurs
-  try the other value, or else specify a value for 
-  \code{ishape} and/or \code{iscale}. 
+  try the other value, or else specify a value for
+  \code{ishape} and/or \code{iscale}.
 
 
   }
@@ -57,11 +57,11 @@ bisa(lscale = "loge", lshape = "loge", iscale = 1,
   }
 }
 \details{
-  The (two-parameter) Birnbaum-Saunders distribution 
+  The (two-parameter) Birnbaum-Saunders distribution
   has a cumulative distribution function that can be written as
   \deqn{F(y;a,b) = \Phi[ \xi(y/b)/a] }{%
         F(y;a,k) = pnorm[xi(y/b)/a] }
-  where \eqn{\Phi(\cdot)}{pnorm()} is the 
+  where \eqn{\Phi(\cdot)}{pnorm()} is the
   cumulative distribution function of a standard normal
   (see \code{\link[stats:Normal]{pnorm}}),
   \eqn{\xi(t) = \sqrt{t} - 1 / \sqrt{t}}{xi(t) = t^(0.5) - t^(-0.5)},
diff --git a/man/bisaUC.Rd b/man/bisaUC.Rd
index fa42efd..1bc4e8d 100644
--- a/man/bisaUC.Rd
+++ b/man/bisaUC.Rd
@@ -55,7 +55,7 @@ rbisa(n, scale = 1, shape)
   The Birnbaum-Saunders distribution
   is a distribution which is used in survival analysis.
   See \code{\link{bisa}}, the \pkg{VGAM} family function
-  for estimating the parameters, 
+  for estimating the parameters,
   for more details.
 
 
diff --git a/man/bistudentt.Rd b/man/bistudentt.Rd
index 376820d..863225e 100644
--- a/man/bistudentt.Rd
+++ b/man/bistudentt.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Bivariate Student-t Family Function }
 \description{
-  Estimate the degrees of freedom and correlation parameters of 
+  Estimate the degrees of freedom and correlation parameters of
   the (bivariate) Student-t
   distribution by maximum likelihood estimation.
 
diff --git a/man/bistudenttUC.Rd b/man/bistudenttUC.Rd
index de7b1dc..4577750 100644
--- a/man/bistudenttUC.Rd
+++ b/man/bistudenttUC.Rd
@@ -42,7 +42,7 @@ dbistudentt(x1, x2, df, rho = 0, log = FALSE)
 
 % \item{rho}{
 % See \code{\link{bistudenttal}}.
-% } 
+% }
 
 
 
diff --git a/man/bmi.nz.Rd b/man/bmi.nz.Rd
index 8bba559..8e71d5d 100644
--- a/man/bmi.nz.Rd
+++ b/man/bmi.nz.Rd
@@ -3,7 +3,7 @@
 \docType{data}
 \title{ Body Mass Index of New Zealand Adults Data}
 \description{
-  The body mass indexes and ages from an approximate random 
+  The body mass indexes and ages from an approximate random
   sample of 700 New Zealand adults.
 
 }
@@ -12,17 +12,17 @@
   A data frame with 700 observations on the following 2 variables.
   \describe{
     \item{age}{a numeric vector; their age (years). }
-    \item{BMI}{a numeric vector; their body mass indexes, which is 
+    \item{BMI}{a numeric vector; their body mass indexes, which is
     their weight divided by the square of their height
     (kg / \eqn{m^2}{m^2}).}
   }
 }
 \details{
   They are a random sample from the Fletcher Challenge/Auckland Heart and
-  Health survey conducted in the early 1990s. 
+  Health survey conducted in the early 1990s.
 
 
-  There are some outliers in the data set. 
+  There are some outliers in the data set.
 
 
   A variable \code{gender} would be useful, and may be added later.
diff --git a/man/borel.tanner.Rd b/man/borel.tanner.Rd
index 95c6d3d..692771b 100644
--- a/man/borel.tanner.Rd
+++ b/man/borel.tanner.Rd
@@ -48,7 +48,7 @@ borel.tanner(Qsize = 1, link = "logit", imethod = 1)
   \deqn{f(y;a) =
   \frac{ Q }{(y-Q)!} y^{y-Q-1} a^{y-Q}  \exp(-ay)
   }{%
-  f(y;a) = 
+  f(y;a) =
   (Q / (y-Q)!) * y^(y-Q-1) * a^(y-Q) * exp(-ay)}
   where \eqn{y=Q,Q+1,Q+2,\ldots}{y=Q,Q+1,Q+2,...}.
   The case \eqn{Q=1} corresponds to the \emph{Borel} distribution
@@ -107,7 +107,7 @@ Boston, MA, USA: Birkhauser.
 %
 %}
 
-\seealso{ 
+\seealso{
   \code{\link{rbort}},
   \code{\link{poissonff}},
   \code{\link{felix}}.
diff --git a/man/brat.Rd b/man/brat.Rd
index 61ed918..3737247 100644
--- a/man/brat.Rd
+++ b/man/brat.Rd
@@ -27,7 +27,7 @@ brat(refgp = "last", refvalue = 1, ialpha = 1)
 
   }
   \item{ialpha}{
-  Initial values for the \eqn{\alpha}{alpha}s. 
+  Initial values for the \eqn{\alpha}{alpha}s.
   These are recycled to the appropriate length.
 
   }
@@ -73,7 +73,7 @@ brat(refgp = "last", refvalue = 1, ialpha = 1)
 
 
 }
-\references{ 
+\references{
 Agresti, A. (2013)
 \emph{Categorical Data Analysis},
 3rd ed. Hoboken, NJ, USA: Wiley.
@@ -92,7 +92,7 @@ than this function.
 
 }
 \author{ T. W. Yee }
-\note{ 
+\note{
   The function \code{\link{Brat}} is useful for coercing
   a \eqn{M+1} by \eqn{M+1} matrix of counts into a one-row
   matrix suitable for \code{brat}. Diagonal elements are
@@ -148,8 +148,8 @@ summary(fit)
 c(0, coef(fit))  # Log-abilities (in order of "journal")
 c(1, Coef(fit))  # Abilities (in order of "journal")
 fitted(fit)     # Probabilities of winning in awkward form
-(check <- InverseBrat(fitted(fit)))  # Probabilities of winning 
-check + t(check)  # Should be 1's in the off-diagonals 
+(check <- InverseBrat(fitted(fit)))  # Probabilities of winning
+check + t(check)  # Should be 1's in the off-diagonals
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/bratUC.Rd b/man/bratUC.Rd
index b941fb8..7fd7abc 100644
--- a/man/bratUC.Rd
+++ b/man/bratUC.Rd
@@ -32,7 +32,7 @@ Brat(mat, ties = 0 * mat, string = c(">", "=="), whitespace = FALSE)
 
   }
   \item{string}{
-  Character. 
+  Character.
   The matrices are labelled with the first value of the
   descriptor, e.g., \code{"NZ > Oz"} `means' NZ beats
   Australia in rugby.  Suggested alternatives include
@@ -46,7 +46,7 @@ Brat(mat, ties = 0 * mat, string = c(">", "=="), whitespace = FALSE)
     and after \code{string}; it generally enhances readability.
     See \code{\link{CommonVGAMffArguments}} for some similar-type
     information.
- 
+
 
   }
 }
@@ -88,7 +88,7 @@ Agresti, A. (2013)
 
 
 }
-\seealso{ 
+\seealso{
   \code{\link{brat}},
   \code{\link{bratt}},
   \code{InverseBrat}.
diff --git a/man/bratt.Rd b/man/bratt.Rd
index 0bd5381..2070b30 100644
--- a/man/bratt.Rd
+++ b/man/bratt.Rd
@@ -26,13 +26,13 @@ bratt(refgp = "last", refvalue = 1, ialpha = 1, i0 = 0.01)
 
   }
   \item{ialpha}{
-  Initial values for the \eqn{\alpha}{alpha}s. 
+  Initial values for the \eqn{\alpha}{alpha}s.
   These are recycled to the appropriate length.
 
 
   }
   \item{i0}{
-  Initial value for \eqn{\alpha_0}{alpha_0}. 
+  Initial value for \eqn{\alpha_0}{alpha_0}.
   If convergence fails, try another positive value.
 
 
@@ -84,7 +84,7 @@ bratt(refgp = "last", refvalue = 1, ialpha = 1, i0 = 0.01)
 
 
 }
-\references{ 
+\references{
 
   Torsney, B. (2004)
   Fitting Bradley Terry models using a multiplicative algorithm.
@@ -95,7 +95,7 @@ bratt(refgp = "last", refvalue = 1, ialpha = 1, i0 = 0.01)
 
 }
 \author{ T. W. Yee }
-\note{ 
+\note{
   The function \code{\link{Brat}} is useful for coercing
   a \eqn{M} by \eqn{M} matrix of counts into a one-row
   matrix suitable for \code{bratt}.  Diagonal elements
@@ -124,10 +124,10 @@ bratt(refgp = "last", refvalue = 1, ialpha = 1, i0 = 0.01)
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{brat}},
   \code{\link{Brat}},
-  \code{\link{binomialff}}. 
+  \code{\link{binomialff}}.
 
 
 }
@@ -157,10 +157,10 @@ fit at misc$alpha0  # alpha_0
 
 fitted(fit)  # Probabilities of winning and tying, in awkward form
 predict(fit)
-(check <- InverseBrat(fitted(fit)))    # Probabilities of winning 
-qprob <- attr(fitted(fit), "probtie")  # Probabilities of a tie 
+(check <- InverseBrat(fitted(fit)))    # Probabilities of winning
+qprob <- attr(fitted(fit), "probtie")  # Probabilities of a tie
 qprobmat <- InverseBrat(c(qprob), NCo = nrow(ties))  # Probabilities of a tie
-check + t(check) + qprobmat  # Should be 1s in the off-diagonals 
+check + t(check) + qprobmat  # Should be 1s in the off-diagonals
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/calibrate-methods.Rd b/man/calibrate-methods.Rd
index 5146fb1..57efef4 100644
--- a/man/calibrate-methods.Rd
+++ b/man/calibrate-methods.Rd
@@ -4,7 +4,7 @@
 \alias{calibrate,Coef.qrrvglm-method}
 \title{ Calibration for Constrained Regression Models }
 \description{
-  \code{calibrate} is a generic function applied to QRR-VGLMs and 
+  \code{calibrate} is a generic function applied to QRR-VGLMs and
   RR-VGAMs etc.
 
 }
diff --git a/man/calibrate.Rd b/man/calibrate.Rd
index 25c44ca..d2972dc 100644
--- a/man/calibrate.Rd
+++ b/man/calibrate.Rd
@@ -36,7 +36,7 @@ calibrate(object, ...)
   It can be loosely thought of as the opposite of \code{\link{predict}}
   (which takes an \bold{X} and returns a \bold{Y}.)
 
-  
+
 }
 \value{
   In general, given a new response \bold{Y}, the explanatory
@@ -74,7 +74,7 @@ calibrate(object, ...)
 hspider[,1:6] <- scale(hspider[,1:6])  # Standardized environmental vars
 set.seed(123)
 p1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
-          WaterCon + BareSand + FallTwig + 
+          WaterCon + BareSand + FallTwig +
           CoveMoss + CoveHerb + ReflLux,
           family = poissonff, data = hspider, Rank = 1,
           df1.nl = c(Zoraspin = 2, 1.9),
diff --git a/man/calibrate.qrrvglm.Rd b/man/calibrate.qrrvglm.Rd
index 084cb4c..5870e83 100644
--- a/man/calibrate.qrrvglm.Rd
+++ b/man/calibrate.qrrvglm.Rd
@@ -124,12 +124,12 @@ Cambridge.
 }
 \author{T. W. Yee}
 \note{
-  Despite the name of this function, CAO models are handled 
+  Despite the name of this function, CAO models are handled
   as well.
 
 
 
-% Despite the name of this function, UQO and CAO models are handled 
+% Despite the name of this function, UQO and CAO models are handled
 
 
 }
diff --git a/man/calibrate.qrrvglm.control.Rd b/man/calibrate.qrrvglm.control.Rd
index e46b1da..5e262f9 100644
--- a/man/calibrate.qrrvglm.control.Rd
+++ b/man/calibrate.qrrvglm.control.Rd
@@ -21,20 +21,20 @@ calibrate.qrrvglm.control(object, trace = FALSE, Method.optim = "BFGS",
 
 
   }
-  \item{trace}{ 
+  \item{trace}{
     Logical indicating if output should be produced for each iteration. It
     is a good idea to set this argument to be \code{TRUE} since the
     computations are expensive.
 
 
   }
-\item{Method.optim}{ 
+\item{Method.optim}{
   Character. Fed into the \code{method} argument of
   \code{\link[stats]{optim}}.
 
 
   }
-\item{gridSize}{ 
+\item{gridSize}{
   Numeric, recycled to length \code{Rank}.  Controls the resolution
   of the grid used for initial values.  For each latent variable,
   an equally spaced grid of length \code{gridSize} is cast from the
@@ -46,13 +46,13 @@ calibrate.qrrvglm.control(object, trace = FALSE, Method.optim = "BFGS",
 
 
   }
-  \item{varI.latvar}{ 
+  \item{varI.latvar}{
   Logical. For CQO objects only, this argument is fed into
   \code{\link{Coef.qrrvglm}}.
 
 
   }
-  \item{\dots}{ 
+  \item{\dots}{
   Avoids an error message for extraneous arguments.
 
 
@@ -103,7 +103,7 @@ On constrained and unconstrained quadratic ordination.
 \examples{
 \dontrun{ hspider[, 1:6] <- scale(hspider[, 1:6])  # Needed when I.tol = TRUE
 set.seed(123)
-p1 <- cqo(cbind(Alopacce, Alopcune, Pardlugu, Pardnigr, 
+p1 <- cqo(cbind(Alopacce, Alopcune, Pardlugu, Pardnigr,
                 Pardpull, Trocterr, Zoraspin) ~
           WaterCon + BareSand + FallTwig +
           CoveMoss + CoveHerb + ReflLux,
diff --git a/man/cao.Rd b/man/cao.Rd
index bc30be3..32b65c5 100644
--- a/man/cao.Rd
+++ b/man/cao.Rd
@@ -20,7 +20,7 @@ cao(formula, family, data = list(),
 %- maybe also 'usage' for other objects documented here.
 \arguments{
 
-  \item{formula}{ 
+  \item{formula}{
     a symbolic description of the model to be fit.  The RHS of the
     formula is used to construct the latent variables, upon which the
     smooths are applied.  All the variables in the formula are used
@@ -31,7 +31,7 @@ cao(formula, family, data = list(),
 
 
   }
-  \item{family}{ 
+  \item{family}{
   a function of class \code{"vglmff"} (see \code{\link{vglmff-class}})
   describing what statistical model is to be fitted. This is called a
   ``\pkg{VGAM} family function''.  See \code{\link{CommonVGAMffArguments}}
@@ -49,20 +49,20 @@ cao(formula, family, data = list(),
 
 
   }
-  \item{weights}{ 
+  \item{weights}{
     an optional vector or matrix of (prior) weights to be used in the
     fitting process.  For \code{cao}, this argument currently should
     not be used.
 
 
   }
-  \item{subset}{ 
+  \item{subset}{
     an optional logical vector specifying a subset of observations to
     be used in the fitting process.
 
 
   }
-  \item{na.action}{ 
+  \item{na.action}{
     a function which indicates what should happen when the data contain
     \code{NA}s.  The default is set by the \code{na.action} setting of
     \code{\link[base]{options}}, and is \code{na.fail} if that is unset.
@@ -70,33 +70,33 @@ cao(formula, family, data = list(),
 
 
   }
-  \item{etastart}{ 
+  \item{etastart}{
     starting values for the linear predictors.  It is a \eqn{M}-column
     matrix. If \eqn{M=1} then it may be a vector.  For \code{cao},
     this argument currently should not be used.
 
 
     }
-  \item{mustart}{ 
+  \item{mustart}{
     starting values for the fitted values. It can be a vector or a
     matrix.  Some family functions do not make use of this argument.
     For \code{cao}, this argument currently should not be used.
 
 
   }
-  \item{coefstart}{ 
+  \item{coefstart}{
     starting values for the coefficient vector.  For \code{cao}, this
     argument currently should not be used.
 
 
   }
-  \item{control}{ 
+  \item{control}{
     a list of parameters for controlling the fitting process.
     See \code{\link{cao.control}} for details.
 
 
   }
-  \item{offset}{ 
+  \item{offset}{
     a vector or \eqn{M}-column matrix of offset values.  These are
     \emph{a priori} known and are added to the linear predictors during
     fitting.  For \code{cao}, this argument currently should not be used.
@@ -111,13 +111,13 @@ cao(formula, family, data = list(),
 
 
   }
-  \item{model}{ 
+  \item{model}{
     a logical value indicating whether the \emph{model frame} should be
     assigned in the \code{model} slot.
 
 
   }
-  \item{x.arg, y.arg}{ 
+  \item{x.arg, y.arg}{
     logical values indicating whether the model matrix and response
     vector/matrix used in the fitting process should be assigned in the
     \code{x} and \code{y} slots.  Note the model matrix is the linear
@@ -125,13 +125,13 @@ cao(formula, family, data = list(),
 
 
   }
-  \item{contrasts}{ 
+  \item{contrasts}{
     an optional list. See the \code{contrasts.arg} of
     \code{\link{model.matrix.default}}.
 
 
   }
-  \item{constraints}{ 
+  \item{constraints}{
     an optional list  of constraint matrices.  For \code{cao}, this
     argument currently should not be used.  The components of the list
     must be named with the term it corresponds to (and it must match
@@ -144,25 +144,25 @@ cao(formula, family, data = list(),
 
 
   }
-  \item{extra}{ 
+  \item{extra}{
     an optional list with any extra information that might be needed by
     the family function.  For \code{cao}, this argument currently should
     not be used.
 
 
   }
-  \item{qr.arg}{ 
+  \item{qr.arg}{
     For \code{cao}, this argument currently should not be used.
 
 
   }
-  \item{smart}{ 
+  \item{smart}{
     logical value indicating whether smart prediction
     (\code{\link{smartpred}}) will be used.
 
 
   }
-  \item{\dots}{ 
+  \item{\dots}{
     further arguments passed into \code{\link{cao.control}}.
 
 
@@ -264,7 +264,7 @@ Constrained additive ordination.
 
 
 
-}   
+}
 \author{T. W. Yee}
 \note{
   CAO models are computationally expensive, therefore
@@ -282,7 +282,7 @@ Constrained additive ordination.
 
 }
 \section{Warning }{
-  CAO is very costly to compute. With version 0.7-8 it took 28 minutes on 
+  CAO is very costly to compute. With version 0.7-8 it took 28 minutes on
   a fast machine. I hope to look at ways of speeding things up in the
   future.
 
@@ -332,7 +332,7 @@ Constrained additive ordination.
 \examples{
 \dontrun{
 hspider[, 1:6] <- scale(hspider[, 1:6])  # Standardized environmental vars
-set.seed(149)  # For reproducible results 
+set.seed(149)  # For reproducible results
 ap1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull) ~
            WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
            family = poissonff, data = hspider, Rank = 1,
diff --git a/man/cao.control.Rd b/man/cao.control.Rd
index 3081140..520b2f0 100644
--- a/man/cao.control.Rd
+++ b/man/cao.control.Rd
@@ -25,7 +25,7 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 %- maybe also 'usage' for other objects documented here.
 \arguments{
 
-  \item{Rank}{ 
+  \item{Rank}{
     The numerical rank \eqn{R} of the model, i.e., the number of latent
     variables.  Currently only \code{Rank = 1} is implemented.
 
@@ -38,7 +38,7 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
     expense. See \code{\link{vgam.control}} for details.
 
   }
-  \item{criterion}{ 
+  \item{criterion}{
     Convergence criterion. Currently, only one is supported:
     the deviance is minimized.
 
@@ -48,7 +48,7 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
     convergence.
 
   }
-  \item{Crow1positive}{ 
+  \item{Crow1positive}{
     Logical vector of length \code{Rank} (recycled if
     necessary): are the elements of the first row of \bold{C}
     positive?  For example, if \code{Rank} is 4, then specifying
@@ -78,7 +78,7 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 %   Logical.
 %   Whether compiled code is used.
 %   For \code{\link{cao}} this must be \code{TRUE}.
-% 
+%
 % }
 
 \item{GradientFunction}{
@@ -100,9 +100,9 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 %   \code{\link[stats]{optim}}.
 %   Used only if \code{GradientFunction} is \code{TRUE}.
 %
-%  } 
+%  }
 
-% \item{Kinit}{ 
+% \item{Kinit}{
 %  Initial values for the index parameters \code{k} in the negative
 %  binomial distribution (one per species).  In general, a smaller number
 %  is preferred over a larger number.  The vector is recycled to the
@@ -127,7 +127,7 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 
 
 
-% \item{Parscale}{ 
+% \item{Parscale}{
 %  Numerical and positive-valued vector of length \bold{C}
 %  (recycled if necessary).  Passed into \code{optim(...,
 %  control = list(parscale = Parscale))}; the elements of \bold{C} become
@@ -156,7 +156,7 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 
   }
 
-  \item{Bestof}{ 
+  \item{Bestof}{
     Integer. The best of \code{Bestof} models fitted is returned. This
     argument helps guard against local solutions by (hopefully) finding
     the global solution from many fits. The argument works only when
@@ -167,7 +167,7 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 
 
   }
-  \item{maxitl}{ 
+  \item{maxitl}{
     Positive integer. Maximum number of
     Newton-Raphson/Fisher-scoring/local-scoring iterations allowed.
 
@@ -177,22 +177,22 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
   See \code{\link{qrrvglm.control}}.
 
   }
-  \item{bf.epsilon}{ 
+  \item{bf.epsilon}{
     Positive numeric. Tolerance used by the modified vector backfitting
     algorithm for testing convergence.
 
   }
-  \item{bf.maxit}{ 
+  \item{bf.maxit}{
     Positive integer.
     Number of backfitting iterations allowed in the compiled code.
   }
-  \item{Maxit.optim}{ 
+  \item{Maxit.optim}{
     Positive integer.
     Number of iterations given to the function \code{\link[stats]{optim}}
     at each of the \code{optim.maxit} iterations.
 
   }
-  \item{optim.maxit}{ 
+  \item{optim.maxit}{
     Positive integer.
     Number of times \code{\link[stats]{optim}} is invoked.
 
@@ -200,13 +200,13 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 %   is fed into \code{\link[stats]{optim}}.
 
     }
-% \item{se.fit}{ 
+% \item{se.fit}{
 %   Logical indicating whether approximate
 %   pointwise standard errors are to be saved on the object.
 %   Currently this argument must have the value \code{FALSE}.
 
 % }
-  \item{sd.sitescores}{ 
+  \item{sd.sitescores}{
     Numeric. Standard deviation of the
     initial values of the site scores, which are generated from
     a normal distribution.
@@ -214,25 +214,25 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
 
 
     }
-  \item{sd.Cinit}{ 
+  \item{sd.Cinit}{
     Standard deviation of the initial values for the elements
     of \bold{C}.
     These are normally distributed with mean zero.
     This argument is used only if \code{Use.Init.Poisson.QO = FALSE}.
 
-  }   
-  \item{suppress.warnings}{ 
+  }
+  \item{suppress.warnings}{
     Logical. Suppress warnings?
- 
 
-  }   
-  \item{trace}{ 
+
+  }
+  \item{trace}{
     Logical indicating if output should be produced for each
     iteration. Having the value \code{TRUE} is a good idea for large
     data sets.
 
   }
-  \item{df1.nl, df2.nl}{ 
+  \item{df1.nl, df2.nl}{
     Numeric and non-negative, recycled to length \emph{S}.
     Nonlinear degrees
     of freedom for smooths of the first and second latent variables.
@@ -244,7 +244,7 @@ cao.control(Rank = 1, all.knots = FALSE, criterion = "deviance", Cinit = NULL,
     for \code{df2.nl}.
 
   }
-  \item{spar1, spar2}{ 
+  \item{spar1, spar2}{
     Numeric and non-negative, recycled to length \emph{S}.
     Smoothing parameters of the
     smooths of the first and second latent variables. The larger the value, the
@@ -311,7 +311,7 @@ London: Chapman & Hall.
 
 
 }
-\seealso{ 
+\seealso{
   \code{\link{cao}}.
 
 
@@ -360,13 +360,13 @@ persp(ap1, label = TRUE, col = 1:4)
 %            iKvector = 0.1,
 %            iShape = 0.1,
 %            noRRR = ~1,
-%%           Parscale = 1, 
+%%           Parscale = 1,
 %            SmallNo = 5e-13,
-%            Use.Init.Poisson.QO = TRUE, 
+%            Use.Init.Poisson.QO = TRUE,
 %            Bestof = if (length(Cinit)) 1 else 10, maxitl = 40,
 %            bf.epsilon = 1.0e-7, bf.maxit = 40,
 %            Maxit.optim = 250, optim.maxit = 20,
-%%           se.fit = FALSE, 
+%%           se.fit = FALSE,
 %            sd.sitescores = 1,
 %            sd.Cinit = 0.02, trace = TRUE,
 %%            df1.nl = 2.5, spar1 = 0, ...)
diff --git a/man/cardUC.Rd b/man/cardUC.Rd
index ec3eda8..00704db 100644
--- a/man/cardUC.Rd
+++ b/man/cardUC.Rd
@@ -73,13 +73,13 @@ rcard(n, mu, rho, ...)
 }
 %\references{ }
 \author{ Thomas W. Yee and Kai Huang }
-\note{ 
+\note{
   Convergence problems might occur with \code{rcard}.
 
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{cardioid}}.
 
 
diff --git a/man/cardioid.Rd b/man/cardioid.Rd
index c6af013..0164cf8 100644
--- a/man/cardioid.Rd
+++ b/man/cardioid.Rd
@@ -35,13 +35,13 @@ cardioid(lmu = extlogit(min = 0, max = 2*pi),
 
 }
 \details{
-  The two-parameter cardioid distribution 
+  The two-parameter cardioid distribution
   has a density that can be written as
   \deqn{f(y;\mu,\rho) = \frac{1}{2\pi}
         \left(1 + 2\, \rho \cos(y - \mu) \right) }{%
         f(y;mu,rho) = (1 + 2*rho*cos(y-mu)) / (2*pi)}
   where \eqn{0 < y < 2\pi}{0 < y < 2*pi},
-  \eqn{0 < \mu < 2\pi}{0 < mu < 2*pi}, and 
+  \eqn{0 < \mu < 2\pi}{0 < mu < 2*pi}, and
   \eqn{-0.5 < \rho < 0.5}{-0.5 < rho < 0.5} is the concentration parameter.
  The default link functions enforce the range constraints
  of the parameters.
@@ -64,7 +64,7 @@ cardioid(lmu = extlogit(min = 0, max = 2*pi),
 
 
 }
-\references{ 
+\references{
 
 Jammalamadaka, S. R. and SenGupta, A. (2001)
 \emph{Topics in Circular Statistics},
@@ -93,7 +93,7 @@ Singapore: World Scientific.
 
 
   \pkg{CircStats} and \pkg{circular} currently have a lot more
-  R functions for circular data than the \pkg{VGAM} package. 
+  R functions for circular data than the \pkg{VGAM} package.
 
 
 }
diff --git a/man/cauchit.Rd b/man/cauchit.Rd
index 4b4c098..6db0d62 100644
--- a/man/cauchit.Rd
+++ b/man/cauchit.Rd
@@ -78,7 +78,7 @@ cauchit(theta, bvalue = .Machine$double.eps,
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{logit}},
     \code{\link{probit}},
     \code{\link{cloglog}},
diff --git a/man/cauchy.Rd b/man/cauchy.Rd
index 5cb17d9..53dca53 100644
--- a/man/cauchy.Rd
+++ b/man/cauchy.Rd
@@ -18,20 +18,20 @@ cauchy1(scale.arg = 1, llocation = "identitylink",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{llocation, lscale}{ 
+  \item{llocation, lscale}{
   Parameter link functions for the location parameter \eqn{a}{a}
   and the scale parameter \eqn{b}{b}.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{ilocation, iscale}{ 
+  \item{ilocation, iscale}{
   Optional initial value for \eqn{a}{a} and \eqn{b}{b}.
   By default, an initial value is chosen internally for each.
 
 
   }
-  \item{imethod}{ 
+  \item{imethod}{
   Integer, either 1 or 2 or 3.
   Initial method, three algorithms are implemented.
   The user should try all possible values to help avoid converging
@@ -59,7 +59,7 @@ cauchy1(scale.arg = 1, llocation = "identitylink",
   }
 }
 \details{
-  The Cauchy distribution has density function 
+  The Cauchy distribution has density function
  \deqn{f(y;a,b) = \left\{ \pi  b [1 + ((y-a)/b)^2] \right\}^{-1} }{%
        f(y;a,b) = 1 / [pi * b * [1 + ((y-a)/b)^2]] }
   where \eqn{y} and \eqn{a} are real and finite,
@@ -96,7 +96,7 @@ cauchy1(scale.arg = 1, llocation = "identitylink",
 
 }
 
-\references{ 
+\references{
 
 Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
@@ -136,7 +136,7 @@ Observed versus expected Fisher information.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link[stats:Cauchy]{Cauchy}},
   \code{\link{cauchit}},
   \code{\link{studentt}},
diff --git a/man/cdf.lmscreg.Rd b/man/cdf.lmscreg.Rd
index 24c0dd4..a470a9d 100644
--- a/man/cdf.lmscreg.Rd
+++ b/man/cdf.lmscreg.Rd
@@ -11,7 +11,7 @@ cdf.lmscreg(object, newdata = NULL, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{object}{ A \pkg{VGAM} quantile regression model, i.e., 
+  \item{object}{ A \pkg{VGAM} quantile regression model, i.e.,
   an object produced by modelling functions such as \code{\link{vglm}}
   and \code{\link{vgam}} with a family function beginning with
   \code{"lms."}.
@@ -24,14 +24,14 @@ cdf.lmscreg(object, newdata = NULL, ...)
 
   }
   \item{\dots}{ Parameters which are passed into functions such as
-  \code{cdf.lms.yjn}. 
+  \code{cdf.lms.yjn}.
 
 
   }
 }
 \details{
   The CDFs returned here are values lying in [0,1] giving the relative
-  probabilities associated with the quantiles \code{newdata}. 
+  probabilities associated with the quantiles \code{newdata}.
   For example, a value near 0.75 means it is close to the upper quartile
   of the distribution.
 
@@ -42,7 +42,7 @@ cdf.lmscreg(object, newdata = NULL, ...)
 
 
 }
-\references{ 
+\references{
 
 
 Yee, T. W. (2004)
@@ -59,19 +59,19 @@ Quantile regression via vector generalized additive models.
 
 }
 \author{ Thomas W. Yee }
-\note{ 
+\note{
  The data are treated like quantiles, and the percentiles
 are returned. The opposite is performed by
 \code{\link{qtplot.lmscreg}}.
 
 
 The CDF values of the model have been placed in
-\code{@post$cdf} when the model was fitted. 
+\code{@post$cdf} when the model was fitted.
 
 
 }
 
-\seealso{ 
+\seealso{
 \code{\link{deplot.lmscreg}},
 \code{\link{qtplot.lmscreg}},
 \code{\link{lms.bcn}},
@@ -83,7 +83,7 @@ The CDF values of the model have been placed in
 \examples{
 fit <- vgam(BMI ~ s(age, df=c(4, 2)), lms.bcn(zero = 1), data = bmi.nz)
 head(fit at post$cdf)
-head(cdf(fit))  # Same 
+head(cdf(fit))  # Same
 head(depvar(fit))
 head(fitted(fit))
 
diff --git a/man/cens.gumbel.Rd b/man/cens.gumbel.Rd
index 79666f0..f0483cf 100644
--- a/man/cens.gumbel.Rd
+++ b/man/cens.gumbel.Rd
@@ -24,7 +24,7 @@ cens.gumbel(llocation = "identitylink", lscale = "loge", iscale = NULL,
 
   }
   \item{iscale}{
-  Numeric and positive. 
+  Numeric and positive.
   Initial value for \eqn{scale}. Recycled to the appropriate length.
   In general, a larger value is better than a smaller value.
   The default is to choose the value internally.
@@ -92,7 +92,7 @@ London: Springer-Verlag.
 
 }
 
-\note{ 
+\note{
   See \code{\link{gumbel}} for details about the Gumbel distribution.
   The initial values are based on assuming all uncensored observations,
   therefore could be improved upon.
diff --git a/man/cens.normal.Rd b/man/cens.normal.Rd
index f705e9e..022363d 100644
--- a/man/cens.normal.Rd
+++ b/man/cens.normal.Rd
@@ -20,7 +20,7 @@ cens.normal(lmu = "identitylink", lsd = "loge", imethod = 1, zero = "sd")
   Parameter link functions
   applied to the mean and standard deviation parameters.
   See \code{\link{Links}} for more choices.
-  The standard deviation is a positive quantity, therefore a log link 
+  The standard deviation is a positive quantity, therefore a log link
   is the default.
 
 
@@ -69,7 +69,7 @@ cens.normal(lmu = "identitylink", lsd = "loge", imethod = 1, zero = "sd")
 %}
 
 \author{ T. W. Yee }
-\note{ 
+\note{
   This function, which is an alternative to \code{\link{tobit}},
   cannot handle a matrix response
   and uses different working weights.
diff --git a/man/cfibrosis.Rd b/man/cfibrosis.Rd
index 8fe2e6c..8f24b5b 100644
--- a/man/cfibrosis.Rd
+++ b/man/cfibrosis.Rd
@@ -44,7 +44,7 @@ The likelihood has similarities with \code{\link{seq2binomial}}.
   The data is originally from Crow (1965) and
   appears as Table 2.3 of Lange (2002).
 
-  
+
 Crow, J. F. (1965)
 Problems of ascertainment in the analysis of family data.
 Epidemiology and Genetics of Chronic Disease.
diff --git a/man/cgo.Rd b/man/cgo.Rd
index fc564b6..223fd91 100644
--- a/man/cgo.Rd
+++ b/man/cgo.Rd
@@ -13,7 +13,7 @@ cgo(...)
   \item{\dots}{ Ignored. }
 }
 \details{
-The former function \code{cgo} has been renamed \code{\link{cqo}} 
+The former function \code{cgo} has been renamed \code{\link{cqo}}
 because CGO (for \emph{canonical Gaussian ordination}) is a confusing
 and inaccurate name.
 CQO (for \emph{constrained quadratic ordination}) is better.
@@ -26,12 +26,12 @@ This new nomenclature described in Yee (2006).
 
 
 }
-\references{ 
+\references{
 Yee, T. W. (2004)
 A new technique for maximum-likelihood
 canonical Gaussian ordination.
-\emph{Ecological Monographs}, 
-\bold{74}, 685--701. 
+\emph{Ecological Monographs},
+\bold{74}, 685--701.
 
 Yee, T. W. (2006)
 Constrained additive ordination.
@@ -47,7 +47,7 @@ The code, therefore, in Yee (2004) will not run without changing the
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{cqo}}.
 
 }
diff --git a/man/chest.nz.Rd b/man/chest.nz.Rd
index c5e602a..ffb0f47 100644
--- a/man/chest.nz.Rd
+++ b/man/chest.nz.Rd
@@ -17,7 +17,7 @@
   }
 }
 \details{
-  Each adult was asked their age and whether they experienced any 
+  Each adult was asked their age and whether they experienced any
   pain or discomfort in their chest over the last six months.
   If yes, they indicated whether it
   was on their LHS and/or RHS of their chest.
@@ -36,7 +36,7 @@
 }
 \examples{
 \dontrun{
-fit <- vgam(cbind(nolnor, nolr, lnor, lr) ~ s(age, c(4, 3)), 
+fit <- vgam(cbind(nolnor, nolr, lnor, lr) ~ s(age, c(4, 3)),
             binom2.or(exchan = TRUE, zero = NULL), data = chest.nz)
 coef(fit, matrix = TRUE)
 }
diff --git a/man/chisq.Rd b/man/chisq.Rd
index 7bd8124..de34f19 100644
--- a/man/chisq.Rd
+++ b/man/chisq.Rd
@@ -41,7 +41,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 }
 
 \author{ T. W. Yee }
-\note{ 
+\note{
   Multiple responses are permitted.
   There may be convergence problems if the degrees of freedom
   is very large or close to zero.
diff --git a/man/clo.Rd b/man/clo.Rd
index 66c093f..4e2a288 100644
--- a/man/clo.Rd
+++ b/man/clo.Rd
@@ -34,7 +34,7 @@ The new CLO/CQO/CAO nomenclature described in Yee (2006).
 
 
 }
-\references{ 
+\references{
 
 Yee, T. W. (2006)
 Constrained additive ordination.
@@ -48,7 +48,7 @@ Reduced-rank vector generalized linear models.
 }
 \author{Thomas W. Yee}
 
-\seealso{ 
+\seealso{
   \code{\link{rrvglm}},
   \code{\link{cqo}}.
 }
diff --git a/man/cloglog.Rd b/man/cloglog.Rd
index a7a5c62..ad51769 100644
--- a/man/cloglog.Rd
+++ b/man/cloglog.Rd
@@ -6,7 +6,7 @@
   Computes the complementary log-log transformation,
   including its inverse and the
   first two derivatives.
-  
+
 }
 \usage{
 cloglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
@@ -17,7 +17,7 @@ cloglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
   \item{theta}{
     Numeric or character.
     See below for further details.
-    
+
   }
   \item{bvalue}{
   See \code{\link{Links}} for general information about links.
@@ -50,14 +50,14 @@ cloglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 
 
   Here, all logarithms are natural logarithms, i.e., to base \eqn{e}.
-  
+
 
 }
 \references{
     McCullagh, P. and Nelder, J. A. (1989)
     \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
-    
-    
+
+
 }
 \author{ Thomas W. Yee }
 
@@ -85,7 +85,7 @@ cloglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{Links}},
     \code{\link{logitoffsetlink}},
     \code{\link{logit}},
diff --git a/man/coefvgam.Rd b/man/coefvgam.Rd
index 7116c97..c7f4404 100644
--- a/man/coefvgam.Rd
+++ b/man/coefvgam.Rd
@@ -15,7 +15,7 @@ coefvgam(object, type = c("linear", "nonlinear"), ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{object}{ A 
+  \item{object}{ A
     \code{\link{vgam}} object.
 
 
diff --git a/man/coefvlm.Rd b/man/coefvlm.Rd
index eaa72e6..72b4245 100644
--- a/man/coefvlm.Rd
+++ b/man/coefvlm.Rd
@@ -13,7 +13,7 @@ coefvlm(object, matrix.out = FALSE, label = TRUE, colon = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{object}{ An object for which the extraction of 
+  \item{object}{ An object for which the extraction of
     coefficients is meaningful.
     This will usually be a \code{\link{vglm}} object.
 
diff --git a/man/confintvglm.Rd b/man/confintvglm.Rd
index 202abb3..eba5a63 100644
--- a/man/confintvglm.Rd
+++ b/man/confintvglm.Rd
@@ -1,19 +1,23 @@
 \name{confintvglm}
 %\name{confint}
-\alias{confint}
+% \alias{confint}
 \alias{confintvglm}
 \alias{confintrrvglm}
 \alias{confintvgam}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Confidence Intervals for VGLM Parameters }
+\title{ Confidence Intervals for Parameters of VGLMs }
 \description{
-  Computes confidence intervals for one or more parameters in a fitted
-  model.  Currently the object must be a
+  Computes confidence intervals (CIs)
+  for one or more parameters in a fitted model.
+  Currently the object must be a
   \code{"\link{vglm}"} object.
 
+
 }
+% confint(object, parm, level = 0.95, \dots)
 \usage{
-confint(object, parm, level = 0.95, \dots)
+confintvglm(object, parm, level = 0.95, method = c("wald", "profile"),
+            trace = NULL, \dots)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -21,33 +25,75 @@ confint(object, parm, level = 0.95, \dots)
 
 
   }
- \item{parm, level, \dots}{Same as \code{\link[stats]{confint}}.
- }
+  \item{parm, level, \dots}{Same as \code{\link[stats]{confint}}.
+  }
+  \item{method}{Character.
+  The default is the first method.
+  Abbreviations are allowed.
+  Currently \code{"profile"} is basically working;
+  and it is likely to be more accurate especially for
+  small samples, as it is based on a profile log likelihood,
+  however it is computationally intensive.
+
+
+  }
+  \item{trace}{
+    Logical. If \code{TRUE} then one can monitor the
+    computation as it progresses (because it is expensive).
+    The default is the orginal model's \code{trace} value
+    (see \code{\link{vglm.control}}).
+    Setting \code{FALSE} suppresses all intermediate output.
+
+
+  }
 }
 \details{
-  This methods function is based on \code{\link[stats]{confint.default}}
+  The default for
+  this methods function is based on \code{\link[stats]{confint.default}}
   and assumes
   asymptotic normality. In particular,
   the \code{\link[VGAM:coefvlm]{coef}} and
   \code{vcov} methods functions are used for
   \code{\link[VGAM]{vglm-class}} objects.
-  Unlike for \code{\link[stats]{glm}} objects, there is no
-  profiling currently implemented.
+
+
+
+  When \code{method = "profile"} the function \code{profilevglm()}
+  is called to do the profiling. The code is very heavily
+  based on \code{\link[MASS]{profile.glm}}
+  which was originally written by
+  D. M. Bates and W. N. Venables (For S in 1996)
+  and subsequently corrected by B. D. Ripley.
+  Sometimes the profiling method can give problems, for
+  example, \code{\link{cumulative}} requires the \eqn{M}
+  linear predictors not to intersect in the data cloud.
+  Such numerical problems are less common when
+  \code{method = "wald"}, however, it is well-known
+  that inference based on profile likelihoods is generally
+  more accurate than Wald, especially when the sample size
+  is small.
+  The deviance (\code{deviance(object)}) is used if possible,
+  else the difference
+  \code{2 * (logLik(object) - ell)} is computed,
+  where \code{ell} are the values of the loglikelihood on a grid.
 
 
 
   For
+  Wald CIs and
   \code{\link[VGAM]{rrvglm-class}}
   objects, currently an error message is produced because
   I haven't gotten around to write the methods function;
   it's not too hard, but am too busy!
-  An interim measure is to 
+  An interim measure is to
   coerce the object into a \code{"\link{vglm}"} object,
   but then the confidence intervals will tend to be too narrow because
   the estimated constraint matrices are treated as known.
-  
-  
+
+
+
   For
+  Wald CIs and
   \code{\link[VGAM]{vgam-class}}
   objects, currently an error message is produced because
   the theory is undeveloped.
@@ -63,17 +109,45 @@ confint(object, parm, level = 0.95, \dots)
 }
 %\references{
 %}
-\author{ Thomas W. Yee }
+\author{
+  Thomas Yee adapted \code{\link[stats]{confint.lm}}
+  to handle \code{"vglm"} objects, for Wald-type
+  confidence intervals.
+  Also, \code{\link[MASS]{profile.glm}}
+  was originally written by
+  D. M. Bates and W. N. Venables (For S in 1996)
+  and subsequently corrected by B. D. Ripley.
+  This function effectively calls \code{confint.profile.glm()}
+  in \pkg{MASS}.
 
-%\note{
-%}
+
+
+  }
+
+\note{
+  The order of the values of argument \code{method} may change
+  in the future without notice.
+  The functions
+  \code{plot.profile.glm}
+  and
+  \code{pairs.profile.glm}
+  from \pkg{MASS}
+  appear to work with output from this function.
+
+
+}
 %\section{Warning }{
 %}
 
+
 \seealso{
   \code{\link{vcovvlm}},
   \code{\link{summaryvglm}},
-  \code{\link[stats]{confint}}.
+  \code{\link[stats]{confint}},
+  \code{\link[MASS]{profile.glm}},
+  \code{plot.profile.glm},
+  \code{pairs.profile.glm}.
+
 
 
 }
@@ -88,16 +162,22 @@ confint(glm.D93) # needs MASS to be present on the system
 confint.default(glm.D93)  # based on asymptotic normality
 confint(vglm.D93)
 confint(vglm.D93) - confint(glm.D93)    # Should be all 0s
-confint(vglm.D93) - confint.default(glm.D93)  # based on asymptotic normality
+confint(vglm.D93) - confint.default(glm.D93)  # based on asympt. normality
 
 # Example 2: simulated negative binomial data with multiple responses
-ndata <- data.frame(x2 = runif(nn <- 300))
+ndata <- data.frame(x2 = runif(nn <- 100))
 ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x2), size = exp(1)),
                           y2 = rnbinom(nn, mu = exp(2-x2), size = exp(0)))
 fit1 <- vglm(cbind(y1, y2) ~ x2, negbinomial, data = ndata, trace = TRUE)
+coef(fit1)
 coef(fit1, matrix = TRUE)
 confint(fit1)
 confint(fit1, "x2:1")  #  This might be improved to "x2" some day...
+\dontrun{
+confint(fit1, method = "profile")  # Computationally expensive
+confint(fit1, "x2:1", method = "profile", trace = FALSE)
+}
+
 fit2 <- rrvglm(y1 ~ x2, negbinomial(zero = NULL), data = ndata)
 confint(as(fit2, "vglm"))  # Too narrow (SEs are biased downwards)
 }
diff --git a/man/constraints.Rd b/man/constraints.Rd
index c8446c5..50123f2 100644
--- a/man/constraints.Rd
+++ b/man/constraints.Rd
@@ -176,7 +176,7 @@ abs(max(coef(fit1, matrix = TRUE) -
 
 # Fit a rank-1 stereotype (RR-multinomial logit) model:
 fit <- rrvglm(Country ~ Width + Height + HP, multinomial, data = car.all)
-constraints(fit)  # All except the first are the estimated A matrix 
+constraints(fit)  # All except the first are the estimated A matrix
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/corbet.Rd b/man/corbet.Rd
index 8be6ca8..ed41ed3 100644
--- a/man/corbet.Rd
+++ b/man/corbet.Rd
@@ -47,7 +47,7 @@
   The Relation Between the Number of Species and
      the Number of Individuals in a Random Sample of an Animal
      Population.
-\emph{Journal of Animal Ecology},   
+\emph{Journal of Animal Ecology},
 \bold{12}, 42--58.
 
 }
diff --git a/man/cqo.Rd b/man/cqo.Rd
index 46e6bf0..25d2371 100644
--- a/man/cqo.Rd
+++ b/man/cqo.Rd
@@ -23,11 +23,11 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
   \item{formula}{ a symbolic description of the model to be fit.
   The RHS of the formula is applied to each linear predictor. Different
   variables in each linear predictor can be chosen by specifying
-  constraint matrices. 
+  constraint matrices.
 
 
   }
-  \item{family}{ 
+  \item{family}{
   a function of class \code{"vglmff"} (see \code{\link{vglmff-class}})
   describing what statistical model is to be fitted. This is called a
   ``\pkg{VGAM} family function''.  See \code{\link{CommonVGAMffArguments}}
@@ -58,7 +58,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
 
 
   }
-  \item{weights}{ an optional vector or matrix of (prior) weights 
+  \item{weights}{ an optional vector or matrix of (prior) weights
     to be used in the fitting process.
     Currently, this argument should not be used.
 
@@ -70,7 +70,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
 
 
   }
-  \item{na.action}{ 
+  \item{na.action}{
   a function which indicates what should happen when the data contain
   \code{NA}s.  The default is set by the \code{na.action} setting of
   \code{\link[base]{options}}, and is \code{na.fail} if that is unset.
@@ -87,8 +87,8 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
 
   }
   \item{mustart}{
-  starting values for the 
-  fitted values. It can be a vector or a matrix. 
+  starting values for the
+  fitted values. It can be a vector or a matrix.
   Some family functions do not make use of this argument.
   Currently, this argument probably should not be used.
 
@@ -107,7 +107,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
 
 
   }
-  \item{offset}{ 
+  \item{offset}{
   This argument must not be used.
 
 %   especially when \code{I.tolerances = TRUE}.
@@ -138,7 +138,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
 
 %    ; to get the VGLM
 %    model matrix type \code{model.matrix(vglmfit)} where
-%    \code{vglmfit} is a \code{vglm} object. 
+%    \code{vglmfit} is a \code{vglm} object.
 
 
   }
@@ -151,11 +151,11 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
   \item{constraints}{
   an optional list  of constraint matrices.
   The components of the list must be named with the term it corresponds
-  to (and it must match in character format). 
+  to (and it must match in character format).
   Each constraint matrix must have \eqn{M} rows, and be of full-column
   rank. By default, constraint matrices are the \eqn{M} by \eqn{M}
   identity
-  matrix unless arguments in the family function itself override these values. 
+  matrix unless arguments in the family function itself override these values.
   If \code{constraints} is used it must contain \emph{all} the
   terms; an incomplete list is not accepted.
   Constraint matrices for \eqn{x_2}{x_2} variables are taken as the
@@ -179,7 +179,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
 
 
   }
-  \item{\dots}{ 
+  \item{\dots}{
   further arguments passed into \code{\link{qrrvglm.control}}.
 
 
@@ -275,7 +275,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
 
 }
 \value{
-  An object of class \code{"qrrvglm"}. 
+  An object of class \code{"qrrvglm"}.
 
 
 % Note that the slot \code{misc} has a list component called
@@ -301,7 +301,7 @@ A theory of gradient analysis.
 
 
 %Yee, T. W. (2005)
-%On constrained and unconstrained 
+%On constrained and unconstrained
 %quadratic ordination.
 %\emph{Manuscript in preparation}.
 
@@ -318,7 +318,7 @@ Thanks to Alvin Sou for converting a lot of the
 original FORTRAN code into C.
 
 
-} 
+}
 
 \note{
   The input requires care, preparation and
@@ -386,7 +386,7 @@ original FORTRAN code into C.
   Try \code{I.tolerance = TRUE} or \code{eq.tolerance = FALSE}
   if the inputted data set is large,
   so as to reduce the computational expense.
-  That's because the default, \code{I.tolerance = FALSE} and 
+  That's because the default, \code{I.tolerance = FALSE} and
   \code{eq.tolerance = TRUE}, is very memory hungry.
 
 
@@ -422,10 +422,10 @@ original FORTRAN code into C.
   %\code{eq.tolerances = TRUE} may help, especially when the number of sites,
   %\eqn{n}, is small.
 
-  %If the negative binomial family function \code{\link{negbinomial}} is 
+  %If the negative binomial family function \code{\link{negbinomial}} is
   %used for \code{cqo} then set \code{negbinomial(deviance = TRUE)}
   %is necessary. This means to minimize the deviance, which the fast
-  %algorithm can handle. 
+  %algorithm can handle.
 
 
   Many of the arguments applicable to \code{cqo} are common to
@@ -433,7 +433,7 @@ original FORTRAN code into C.
   The most important arguments are
   \code{Rank},
   \code{noRRR},
-  \code{Bestof}, 
+  \code{Bestof},
   \code{I.tolerances},
   \code{eq.tolerances},
   \code{isd.latvar}, and
@@ -631,7 +631,7 @@ p2 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
 sort(deviance(p2, history = TRUE))  # A history of all the iterations
 if (deviance(p2) > 1127) warning("suboptimal fit obtained")
 lvplot(p2, ellips = FALSE, label = TRUE, xlim = c(-3,4),
-       C = TRUE, Ccol = "brown", sites = TRUE, scol = "grey", 
+       C = TRUE, Ccol = "brown", sites = TRUE, scol = "grey",
        pcol = "blue", pch = "+", chull = TRUE, ccol = "grey")
 
 
diff --git a/man/crashes.Rd b/man/crashes.Rd
index 91d0c22..8f3062f 100644
--- a/man/crashes.Rd
+++ b/man/crashes.Rd
@@ -32,7 +32,7 @@ data(alclevels)
   The \code{alclevels} dataset has hourly times and alcohol levels.
 
   \describe{
-   
+
     \item{Mon, Tue, Wed, Thu, Fri, Sat, Sun}{
     Day of the week.
 
@@ -56,13 +56,13 @@ data(alclevels)
     The \code{rownames} of each data frame is the
     start time (hourly from midnight onwards) on a 24 hour clock,
     e.g., 21 means 9.00pm to 9.59pm.
-     
-     
+
+
   For crashes,
      \code{chrashi} are the number of injuries by car,
      \code{crashf}  are the number of fatalities by car
      (not included in \code{chrashi}),
-     \code{crashtr} are the number of crashes involving trucks, 
+     \code{crashtr} are the number of crashes involving trucks,
      \code{crashmc} are the number of crashes involving motorcyclists,
      \code{crashbc} are the number of crashes involving bicycles,
      and
@@ -73,10 +73,10 @@ data(alclevels)
      and
      \code{alclevels} are the blood alcohol levels of fatally injured drivers.
 
-     
+
 }
 \source{
-  
+
   \url{http://www.transport.govt.nz/research/Pages/Motor-Vehicle-Crashes-in-New-Zealand-2009.aspx}.
   Thanks to Warwick Goold and Alfian F. Hadi for assistance.
 
diff --git a/man/cratio.Rd b/man/cratio.Rd
index 0590a9b..3737d04 100644
--- a/man/cratio.Rd
+++ b/man/cratio.Rd
@@ -61,7 +61,7 @@ cratio(link = "logit", parallel = FALSE, reverse = FALSE, zero = NULL,
   There are a number of definitions for the \emph{continuation ratio}
   in the literature. To make life easier, in the \pkg{VGAM} package,
   we use \emph{continuation} ratios and \emph{stopping} ratios
-  (see \code{\link{sratio}}). 
+  (see \code{\link{sratio}}).
   Stopping ratios deal with quantities such as
   \code{logit(P[Y=j|Y>=j])}.
 
diff --git a/man/cumulative.Rd b/man/cumulative.Rd
index 4a25961..b4baddd 100644
--- a/man/cumulative.Rd
+++ b/man/cumulative.Rd
@@ -5,7 +5,7 @@
 \title{ Ordinal Regression with Cumulative Probabilities }
 \description{
   Fits a cumulative link
-  regression model to a (preferably ordered) factor response. 
+  regression model to a (preferably ordered) factor response.
 
 }
 \usage{
@@ -20,7 +20,7 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
 \arguments{
 
   \item{link}{
-  Link function applied to the \eqn{J} cumulative probabilities. 
+  Link function applied to the \eqn{J} cumulative probabilities.
   See \code{\link{Links}} for more choices,
   e.g., for the cumulative
   \code{\link{probit}}/\code{\link{cloglog}}/\code{\link{cauchit}}/\ldots
@@ -61,7 +61,7 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
   By default, the cumulative probabilities used are
   \eqn{P(Y\leq 1)}{P(Y<=1)}, \eqn{P(Y\leq 2)}{P(Y<=2)},
   \dots, \eqn{P(Y\leq J)}{P(Y<=J)}.
-  If \code{reverse} is \code{TRUE} then 
+  If \code{reverse} is \code{TRUE} then
   \eqn{P(Y\geq 2)}{P(Y>=2)}, \eqn{P(Y\geq 3)}{P(Y>=3)}, \dots,
   \eqn{P(Y\geq J+1)}{P(Y>=J+1)} are used.
 
@@ -119,6 +119,7 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
 % and for \code{scumulative()} \eqn{M=2J}.
 
 
+
   This \pkg{VGAM} family function fits the class of
   \emph{cumulative link models} to (hopefully) an ordinal response.
   By default, the \emph{non-parallel} cumulative logit model is fitted, i.e.,
@@ -127,11 +128,12 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
   where \eqn{j=1,2,\dots,M} and
   the \eqn{\eta_j}{eta_j} are not constrained to be parallel.
   This is also known as the \emph{non-proportional odds model}.
-  If the logit link is replaced by a complementary log-log link 
+  If the logit link is replaced by a complementary log-log link
   (\code{\link{cloglog}}) then
   this is known as the \emph{proportional-hazards model}.
 
 
+
   In almost all the literature, the constraint matrices associated
   with this family of models are known. For example, setting
   \code{parallel = TRUE} will make all constraint matrices
@@ -144,6 +146,7 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
   (RR-VGAMs) have not been implemented here.
 
 
+
 % The scaled version of \code{cumulative()}, called \code{scumulative()},
 % has \eqn{J} positive scaling factors.
 % They are described in pages 154 and 177 of McCullagh and Nelder (1989);
@@ -151,6 +154,7 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
 % which they call the \emph{generalized rational model}.
 
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -222,7 +226,7 @@ Vector generalized additive models.
 
   With the logit link, setting \code{parallel = TRUE} will fit a
   proportional odds model. Note that the \code{TRUE} here does
-  not apply to the intercept term. 
+  not apply to the intercept term.
   In practice, the validity of the proportional odds assumption
   needs to be checked, e.g., by a likelihood ratio test (LRT).
   If acceptable on the data,
@@ -246,14 +250,14 @@ Vector generalized additive models.
   and the self-starting initial values are not good enough then
   try using
   \code{mustart},
-  \code{coefstart} and/or 
+  \code{coefstart} and/or
   \code{etatstart}.
   See the example below.
 
 
   To fit the proportional odds model one can use the
   \pkg{VGAM} family function \code{\link{propodds}}.
-  Note that \code{propodds(reverse)} is equivalent to 
+  Note that \code{propodds(reverse)} is equivalent to
   \code{cumulative(parallel = TRUE, reverse = reverse)} (which is
   equivalent to
   \code{cumulative(parallel = TRUE, reverse = reverse, link = "logit")}).
diff --git a/man/dagum.Rd b/man/dagum.Rd
index a1da353..e46e537 100644
--- a/man/dagum.Rd
+++ b/man/dagum.Rd
@@ -3,12 +3,12 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Dagum Distribution Family Function }
 \description{
-  Maximum likelihood estimation of the 3-parameter 
+  Maximum likelihood estimation of the 3-parameter
   Dagum distribution.
 }
 \usage{
-dagum(lscale = "loge", lshape1.a = "loge", lshape2.p = "loge", 
-      iscale = NULL, ishape1.a = NULL, ishape2.p = NULL, imethod = 1, 
+dagum(lscale = "loge", lshape1.a = "loge", lshape2.p = "loge",
+      iscale = NULL, ishape1.a = NULL, ishape2.p = NULL, imethod = 1,
       lss = TRUE, gscale = exp(-5:5), gshape1.a = seq(0.75, 4, by = 0.25),
       gshape2.p = exp(-5:5), probs.y = c(0.25, 0.5, 0.75), zero = "shape")
 }
@@ -16,8 +16,8 @@ dagum(lscale = "loge", lshape1.a = "loge", lshape2.p = "loge",
 %     zero = ifelse(lss, -(2:3), -c(1, 3))
 \arguments{
   \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
-  
-  
+
+
   }
 
  \item{lshape1.a, lscale, lshape2.p}{
@@ -28,7 +28,7 @@ dagum(lscale = "loge", lshape1.a = "loge", lshape2.p = "loge",
   }
   \item{iscale, ishape1.a, ishape2.p, imethod, zero}{
   See \code{\link{CommonVGAMffArguments}} for information.
-  For \code{imethod = 2} a good initial value for 
+  For \code{imethod = 2} a good initial value for
   \code{ishape2.p} is needed to obtain a good estimate for
   the other parameter.
 
diff --git a/man/deplot.lmscreg.Rd b/man/deplot.lmscreg.Rd
index 135e96a..ebbed65 100644
--- a/man/deplot.lmscreg.Rd
+++ b/man/deplot.lmscreg.Rd
@@ -29,7 +29,7 @@ deplot.lmscreg(object, newdata = NULL, x0, y.arg, show.plot = TRUE, ...)
 
 
   }
-  \item{y.arg}{ Numerical vector. The values of the response variable 
+  \item{y.arg}{ Numerical vector. The values of the response variable
   at which to evaluate the density. This should be a grid that is fine
   enough to ensure the plotted curves are smooth.  }
   \item{show.plot}{ Logical. Plot it? If \code{FALSE} no plot will
@@ -50,7 +50,7 @@ deplot.lmscreg(object, newdata = NULL, x0, y.arg, show.plot = TRUE, ...)
 
 }
 \value{
-  The original \code{object} but with a list 
+  The original \code{object} but with a list
   placed in the slot \code{post}, called
   \code{@post$deplot}. The list has components
   \item{newdata }{ The argument \code{newdata} above, or a one-row
diff --git a/man/depvar.Rd b/man/depvar.Rd
index 08ef307..1cb7f45 100644
--- a/man/depvar.Rd
+++ b/man/depvar.Rd
@@ -50,7 +50,7 @@ depvar(object, ...)
 %}
 
 %\section{Warning }{
-%  This 
+%  This
 
 
 %}
diff --git a/man/df.residual.Rd b/man/df.residual.Rd
index de14e1a..1acc3d4 100644
--- a/man/df.residual.Rd
+++ b/man/df.residual.Rd
@@ -36,10 +36,10 @@ df.residual_vlm(object, type = c("vlm", "lm"), \dots)
   The number of rows is \eqn{M} times the `ordinary' number
   of rows of the LM-type model: \eqn{nM}.
   Here, \eqn{M} is the number of linear/additive predictors.
-  So the formula for the VLM-type residual degrees-of-freedom 
+  So the formula for the VLM-type residual degrees-of-freedom
   is \eqn{nM - p^{*}} where \eqn{p^{*}} is the number of
   columns of the `big' VLM matrix.
-  The formula for the LM-type residual degrees-of-freedom 
+  The formula for the LM-type residual degrees-of-freedom
   is \eqn{n - p_{j}} where \eqn{p_{j}} is the number of
   columns of the `ordinary' LM matrix corresponding
   to the \eqn{j}th linear/additive predictor.
diff --git a/man/diffzeta.Rd b/man/diffzeta.Rd
new file mode 100644
index 0000000..0294014
--- /dev/null
+++ b/man/diffzeta.Rd
@@ -0,0 +1,85 @@
+\name{diffzeta}
+\alias{diffzeta}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Differenced Zeta Distribution Family Function }
+\description{
+  Estimates the parameter of the differenced zeta distribution.
+
+}
+\usage{
+diffzeta(start = 1, lshape = "loge", ishape = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{lshape, ishape}{
+  Same as \code{\link{zetaff}}.
+
+
+  }
+  \item{start}{
+    Smallest value of the support of the distribution.
+    Must be a positive integer.
+
+
+  }
+}
+\details{
+  The PMF is
+  \deqn{P(Y=y) = (a/y)^{s} - (a/(1+y))^{s},\ \ s>0,\ \ y=a,a+1,\ldots,}{%
+        P(Y=y) = (a/y)^(s) - / (a/(1+y))^(s), s>0, y=a,a+1,...,}
+  where \eqn{s} is the positive shape parameter, and \eqn{a} is \code{start}.
+  According to   Moreno-Sanchez et al. (2016), this model
+  fits quite well to about 40 percent of all the English books in the
+  Project Gutenberg data base (about 30,000 texts).
+  Multiple responses are handled.
+
+
+
+}
+
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}},
+  and \code{\link{vgam}}.
+
+
+
+}
+
+\references{
+Moreno-Sanchez, I. and Font-Clos, F. and Corral, A.
+Large-Scale Analysis of Zipf's Law in English Texts,
+2016.
+PLoS ONE, \bold{11}(1), 1--19.
+
+
+
+}
+\author{ T. W. Yee }
+%\note{
+%  The \code{\link{zeta}} function may be used to compute values
+%  of the zeta function.
+%
+%
+%}
+
+\seealso{
+  \code{\link{Diffzeta}},
+  \code{\link{zetaff}},
+  \code{\link{zeta}},
+  \code{\link{zipf}},
+  \code{\link{zipf}}.
+
+
+}
+\examples{
+odata <- data.frame(x2 = runif(nn <- 1000))  # Artificial data
+odata <- transform(odata, shape = loge(-0.25 + x2, inverse = TRUE))
+odata <- transform(odata, y1 = rdiffzeta(nn, shape))
+with(odata, table(y1))
+ofit <- vglm(y1 ~ x2, diffzeta, data = odata, trace = TRUE, crit = "coef")
+coef(ofit, matrix = TRUE)
+}
+\keyword{models}
+\keyword{regression}
+%
diff --git a/man/diffzetaUC.Rd b/man/diffzetaUC.Rd
new file mode 100644
index 0000000..1e617c8
--- /dev/null
+++ b/man/diffzetaUC.Rd
@@ -0,0 +1,98 @@
+\name{Diffzeta}
+\alias{Diffzeta}
+\alias{ddiffzeta}
+\alias{pdiffzeta}
+\alias{qdiffzeta}
+\alias{rdiffzeta}
+\title{ Differenced Zeta Distribution }
+\description{
+  Density, distribution function,
+  quantile function,
+  and random generation
+  for the differenced zeta distribution.
+
+
+
+}
+\usage{
+ddiffzeta(x, shape, start = 1, log = FALSE)
+pdiffzeta(q, shape, start = 1, lower.tail = TRUE)
+qdiffzeta(p, shape, start = 1)
+rdiffzeta(n, shape, start = 1)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x, q, p, n}{
+  Same as in \code{\link[stats]{runif}}.
+
+
+  }
+  \item{shape, start}{
+   Details at \code{\link{diffzeta}}.
+
+
+%   For \code{rdiffzeta()} this pa%arameter must be of length 1.
+
+
+  }
+  \item{log, lower.tail}{
+  Same as in \code{\link[stats]{runif}}.
+
+
+  }
+}
+\details{
+  This distribution appears to work well on the distribution
+  of English words in such texts.
+  Some more details are given in \code{\link{diffzeta}}.
+
+
+}
+\value{
+  \code{ddiffzeta} gives the density,
+  \code{pdiffzeta} gives the distribution function,
+  \code{qdiffzeta} gives the quantile function, and
+  \code{rdiffzeta} generates random deviates.
+
+
+
+
+}
+%\references{
+%}
+\author{ T. W. Yee }
+\note{
+  Given some response data, the \pkg{VGAM} family function
+  \code{\link{diffzeta}} estimates the parameter \code{shape}.
+
+
+  Function \code{pdiffzeta()} suffers from the problems that
+  \code{\link{plog}} sometimes has, i.e., when \code{p}
+  is very close to 1.
+
+
+
+}
+
+\seealso{
+  \code{\link{diffzeta}},
+  \code{\link{zetaff}},
+  \code{\link{zipf}},
+  \code{\link{Oizeta}}.
+
+
+}
+\examples{
+ddiffzeta(1:20, 0.5, start = 2)
+rdiffzeta(20, 0.5)
+
+\dontrun{ shape <- 0.8; x <- 1:10
+plot(x, ddiffzeta(x, shape = shape), type = "h", ylim = 0:1,
+     sub = "shape=0.8", las = 1, col = "blue", ylab = "Probability",
+     main = "Differenced zeta distribution: blue=PMF; orange=CDF")
+lines(x + 0.1, pdiffzeta(x, shape = shape), col = "orange", lty = 3, type = "h") }
+}
+\keyword{distribution}
+
+
+
diff --git a/man/dirichlet.Rd b/man/dirichlet.Rd
index 3842567..ccb666d 100644
--- a/man/dirichlet.Rd
+++ b/man/dirichlet.Rd
@@ -62,7 +62,7 @@ dirichlet(link = "loge", parallel = FALSE, zero = NULL, imethod = 1)
 
   The Dirichlet distribution can be motivated by considering the random variables
   \eqn{(G_1,\ldots,G_{M})^T}{(G_1,\ldots,G_M)^T} which are each independent
-  and identically distributed as a gamma distribution with density 
+  and identically distributed as a gamma distribution with density
   \eqn{f(g_j)=g_j^{\alpha_j - 1} e^{-g_j} / \Gamma(\alpha_j)}{f(g_j)=
               g_j^(alpha_j - 1) e^(-g_j) / gamma(alpha_j)}.
   Then the Dirichlet distribution arises when
@@ -78,7 +78,7 @@ dirichlet(link = "loge", parallel = FALSE, zero = NULL, imethod = 1)
 
 
   When fitted, the \code{fitted.values} slot of the object contains the
-  \eqn{M}-column matrix of means. 
+  \eqn{M}-column matrix of means.
 
 
 }
diff --git a/man/dirmul.old.Rd b/man/dirmul.old.Rd
index 4db7120..5a4a1b9 100644
--- a/man/dirmul.old.Rd
+++ b/man/dirmul.old.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{Fitting a Dirichlet-Multinomial Distribution }
 \description{
-  Fits a Dirichlet-multinomial distribution to a matrix of 
+  Fits a Dirichlet-multinomial distribution to a matrix of
   non-negative integers.
 }
 \usage{
@@ -13,13 +13,13 @@ dirmul.old(link = "loge", ialpha = 0.01, parallel = FALSE, zero = NULL)
 \arguments{
   \item{link}{
   Link function applied to each of the \eqn{M} (positive)
-  shape parameters \eqn{\alpha_j}{alpha_j} for \eqn{j=1,\ldots,M}. 
+  shape parameters \eqn{\alpha_j}{alpha_j} for \eqn{j=1,\ldots,M}.
   See \code{\link{Links}} for more choices.
   Here, \eqn{M} is the number of columns of the response matrix.
 
   }
   \item{ialpha}{
-  Numeric vector. Initial values for the 
+  Numeric vector. Initial values for the
   \code{alpha} vector. Must be positive.
   Recycled to length \eqn{M}.
 
@@ -41,7 +41,7 @@ dirmul.old(link = "loge", ialpha = 0.01, parallel = FALSE, zero = NULL)
   The Dirichlet-multinomial distribution, which is somewhat similar to a
   Dirichlet distribution, has probability function
   \deqn{P(Y_1=y_1,\ldots,Y_M=y_M) =
-     {2y_{*} \choose {y_1,\ldots,y_M}} 
+     {2y_{*} \choose {y_1,\ldots,y_M}}
         \frac{\Gamma(\alpha_{+})}{\Gamma(2y_{*}+\alpha_{+})}
         \prod_{j=1}^M \frac{\Gamma(y_j+\alpha_{j})}{\Gamma(\alpha_{j})}}{%
         P(Y_1=y_1,\ldots,Y_M=y_M) = C_{y_1,\ldots,y_M}^{2y_{*}}
diff --git a/man/dirmultinomial.Rd b/man/dirmultinomial.Rd
index 04a4df5..5f8bac3 100644
--- a/man/dirmultinomial.Rd
+++ b/man/dirmultinomial.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{Fitting a Dirichlet-Multinomial Distribution }
 \description{
-  Fits a Dirichlet-multinomial distribution to a matrix response. 
+  Fits a Dirichlet-multinomial distribution to a matrix response.
 
 }
 \usage{
@@ -12,7 +12,7 @@ dirmultinomial(lphi = "logit", iphi = 0.10, parallel = FALSE, zero = "M")
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{lphi}{
-  Link function applied to the \eqn{\phi}{phi} 
+  Link function applied to the \eqn{\phi}{phi}
   parameter, which lies in the open unit interval \eqn{(0,1)}.
   See \code{\link{Links}} for more choices.
 
@@ -56,7 +56,7 @@ dirmultinomial(lphi = "logit", iphi = 0.10, parallel = FALSE, zero = "M")
   multivariate distribution called the Dirichlet distribution.
   The Dirichlet-multinomial distribution has probability function
   \deqn{P(Y_1=y_1,\ldots,Y_M=y_M) =
-     {N_{*} \choose {y_1,\ldots,y_M}} 
+     {N_{*} \choose {y_1,\ldots,y_M}}
         \frac{
         \prod_{j=1}^{M}
         \prod_{r=1}^{y_{j}}
@@ -87,10 +87,10 @@ dirmultinomial(lphi = "logit", iphi = 0.10, parallel = FALSE, zero = "M")
   applied to \eqn{\phi}{phi}.
 
 
-  Note that \eqn{E(Y_j) = N_* \pi_j}{E(Y_j) = N_* pi_j} but 
+  Note that \eqn{E(Y_j) = N_* \pi_j}{E(Y_j) = N_* pi_j} but
   the probabilities (returned as the fitted values)
   \eqn{\pi_j}{pi_j} are bundled together as a \eqn{M}-column matrix.
-  The quantities \eqn{N_*} are returned as the prior weights. 
+  The quantities \eqn{N_*} are returned as the prior weights.
 
 
   The beta-binomial distribution is a special case of
@@ -197,25 +197,26 @@ the Dirichlet-Multinomial Log-Likelihood Function.
 }
 
 \examples{
-nn <- 10; M <- 5
-ydata <- data.frame(round(matrix(runif(nn * M, max = 10), nn, M)))  # Integer counts
+nn <- 5; M <- 4; set.seed(1)
+ydata <- data.frame(round(matrix(runif(nn * M, max = 100), nn, M)))  # Integer counts
 colnames(ydata) <- paste("y", 1:M, sep = "")
 
-fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ 1, dirmultinomial,
+fit <- vglm(cbind(y1, y2, y3, y4) ~ 1, dirmultinomial,
             data = ydata, trace = TRUE)
 head(fitted(fit))
 depvar(fit)  # Sample proportions
 weights(fit, type = "prior", matrix = FALSE)  # Total counts per row
 
+\dontrun{
 ydata <- transform(ydata, x2 = runif(nn))
-fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2, dirmultinomial,
+fit <- vglm(cbind(y1, y2, y3, y4) ~ x2, dirmultinomial,
             data = ydata, trace = TRUE)
-\dontrun{ # This does not work:
-Coef(fit) }
+Coef(fit)
 coef(fit, matrix = TRUE)
 (sfit <- summary(fit))
 vcov(sfit)
 }
+}
 \keyword{models}
 \keyword{regression}
 
@@ -225,3 +226,11 @@ vcov(sfit)
 % for \eqn{j=1,\ldots,M}.
 
 % Currently, initial values can be improved upon.
+
+
+% \dontrun{ # This does not work:
+
+
+
+
+
diff --git a/man/double.cens.normal.Rd b/man/double.cens.normal.Rd
index 560a205..0370762 100644
--- a/man/double.cens.normal.Rd
+++ b/man/double.cens.normal.Rd
@@ -84,7 +84,7 @@ double.cens.normal(r1 = 0, r2 = 0, lmu = "identitylink", lsd = "loge",
 \examples{\dontrun{ # Repeat the simulations described in Harter and Moore (1966)
 SIMS <- 100  # Number of simulations (change this to 1000)
 mu.save <- sd.save <- rep(NA, len = SIMS)
-r1 <- 0; r2 <- 4; nn <- 20  
+r1 <- 0; r2 <- 4; nn <- 20
 for (sim in 1:SIMS) {
   y <- sort(rnorm(nn))
   y <- y[(1+r1):(nn-r2)]  # Delete r1 smallest and r2 largest
diff --git a/man/double.expbinomial.Rd b/man/double.expbinomial.Rd
index 3799030..32e7a7e 100644
--- a/man/double.expbinomial.Rd
+++ b/man/double.expbinomial.Rd
@@ -16,22 +16,22 @@ double.expbinomial(lmean = "logit", ldispersion = "logit",
 %                  idispersion = 0.25, zero = 2
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lmean, ldispersion}{ 
+  \item{lmean, ldispersion}{
   Link functions applied to the two parameters, called
   \eqn{\mu}{mu} and \eqn{\theta}{theta} respectively below.
   See \code{\link{Links}} for more choices.
-  The defaults cause the parameters to be restricted to \eqn{(0,1)}. 
+  The defaults cause the parameters to be restricted to \eqn{(0,1)}.
 
 
   }
-  \item{idispersion}{ 
+  \item{idispersion}{
   Initial value for the dispersion parameter.
   If given, it must be in range, and is recyled to the necessary length.
   Use this argument if convergence failure occurs.
 
 
   }
-  \item{zero}{ 
+  \item{zero}{
   A vector specifying which
   linear/additive predictor is to be modelled as intercept-only.
   If assigned, the single value can be either \code{1} or \code{2}.
diff --git a/man/eexpUC.Rd b/man/eexpUC.Rd
index ccbdf64..7c3b9fc 100644
--- a/man/eexpUC.Rd
+++ b/man/eexpUC.Rd
@@ -79,7 +79,7 @@ very close to 0 or 1.
 
 }
 
-%\references{ 
+%\references{
 %
 %Jones, M. C. (1994)
 %Expectiles and M-quantiles are quantiles.
@@ -89,7 +89,7 @@ very close to 0 or 1.
 %}
 \author{ T. W. Yee and Kai Huang }
 
-%\note{ 
+%\note{
 %The ``\code{q}'', as the first character of ``\code{qeunif}'',
 %may be changed to ``\code{e}'' in the future,
 %the reason being to emphasize that the expectiles are returned.
diff --git a/man/enormUC.Rd b/man/enormUC.Rd
index 7f18c7f..f54e208 100644
--- a/man/enormUC.Rd
+++ b/man/enormUC.Rd
@@ -80,7 +80,7 @@ very close to 0 or 1.
 
 }
 
-%\references{ 
+%\references{
 %
 %Jones, M. C. (1994)
 %Expectiles and M-quantiles are quantiles.
@@ -90,7 +90,7 @@ very close to 0 or 1.
 %}
 \author{ T. W. Yee and Kai Huang }
 
-%\note{ 
+%\note{
 %The ``\code{q}'', as the first character of ``\code{qeunif}'',
 %may be changed to ``\code{e}'' in the future,
 %the reason being to emphasize that the expectiles are returned.
diff --git a/man/enzyme.Rd b/man/enzyme.Rd
index 3280227..106a49b 100644
--- a/man/enzyme.Rd
+++ b/man/enzyme.Rd
@@ -3,7 +3,7 @@
 \docType{data}
 \title{ Enzyme Data}
 \description{
-  Enzyme velocity and substrate concentration. 
+  Enzyme velocity and substrate concentration.
 }
 \usage{data(enzyme)}
 \format{
diff --git a/man/erlang.Rd b/man/erlang.Rd
index 3c26974..2cd3a55 100644
--- a/man/erlang.Rd
+++ b/man/erlang.Rd
@@ -9,7 +9,7 @@
 
 }
 \usage{
-erlang(shape.arg, link = "loge", imethod = 1, zero = NULL)
+erlang(shape.arg, lscale = "loge", imethod = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -20,7 +20,7 @@ erlang(shape.arg, link = "loge", imethod = 1, zero = NULL)
 
 
   }
-  \item{link}{
+  \item{lscale}{
   Link function applied to the (positive) \eqn{scale} parameter.
   See \code{\link{Links}} for more choices.
 
@@ -48,7 +48,7 @@ erlang(shape.arg, link = "loge", imethod = 1, zero = NULL)
         f(y) =  exp(-y/scale) y^(shape-1) scale^(-shape) / gamma(shape)}
   for known positive integer \eqn{shape},
   unknown \eqn{scale > 0}  and \eqn{y > 0}.
-  Here, 
+  Here,
   \eqn{\Gamma(shape)}{gamma(shape)} is the gamma
   function, as in \code{\link[base:Special]{gamma}}.
   The mean of \emph{Y}
@@ -99,7 +99,7 @@ rate <- exp(2); myshape <- 3
 edata <- data.frame(y = rep(0, nn <- 1000))
 for (ii in 1:myshape)
   edata <- transform(edata, y = y + rexp(nn, rate = rate))
-fit <- vglm(y ~ 1, erlang(shape = myshape), data = edata, trace = TRUE) 
+fit <- vglm(y ~ 1, erlang(shape = myshape), data = edata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)  # Answer = 1/rate
 1/rate
diff --git a/man/eunifUC.Rd b/man/eunifUC.Rd
index b178eb3..6a793c1 100644
--- a/man/eunifUC.Rd
+++ b/man/eunifUC.Rd
@@ -118,7 +118,7 @@ very close to 0 or 1.
 
 
 }
-\references{ 
+\references{
 
 
 Jones, M. C. (1994)
@@ -137,7 +137,7 @@ quantile and expectile regression.
 }
 \author{ T. W. Yee and Kai Huang }
 
-%\note{ 
+%\note{
 %The ``\code{q}'', as the first character of ``\code{qeunif}'',
 %may be changed to ``\code{e}'' in the future,
 %the reason being to emphasize that the expectiles are returned.
diff --git a/man/expexpff.Rd b/man/expexpff.Rd
index f3aa8b5..3e88745 100644
--- a/man/expexpff.Rd
+++ b/man/expexpff.Rd
@@ -23,7 +23,7 @@ expexpff(lrate = "loge", lshape = "loge",
 
   }
   \item{ishape}{
-  Initial value for the \eqn{\alpha}{shape} 
+  Initial value for the \eqn{\alpha}{shape}
   parameter. If convergence fails try setting a different
   value for this argument.
 
diff --git a/man/expexpff1.Rd b/man/expexpff1.Rd
index ba219a5..a788411 100644
--- a/man/expexpff1.Rd
+++ b/man/expexpff1.Rd
@@ -73,7 +73,7 @@ expexpff1(lrate = "loge", irate = NULL, ishape = 1)
   The estimate of  \eqn{\alpha}{shape} is attached to the
   \code{misc} slot of the object, which is a list and contains
   the component \code{shape}.
-  
+
 
   As Newton-Raphson is used, the working weights are sometimes
   negative, and some adjustment is made to these to make them
diff --git a/man/expint3.Rd b/man/expint3.Rd
index c443bed..a3db2eb 100644
--- a/man/expint3.Rd
+++ b/man/expint3.Rd
@@ -40,8 +40,8 @@ expint.E1(x, deriv = 0)
   The function \eqn{E_1(x)} is the integral of
   \eqn{\exp(-t) / t}{exp(-t) / t}
   from \eqn{x} to infinity, for positive real \eqn{x}.
-  
-  
+
+
 
 
 }
@@ -49,10 +49,10 @@ expint.E1(x, deriv = 0)
   Function \code{expint(x, deriv = n)} returns the
   \eqn{n}th derivative of \eqn{Ei(x)} (up to the 3rd),
   function \code{expexpint(x, deriv = n)} returns the
-  \eqn{n}th derivative of 
+  \eqn{n}th derivative of
   \eqn{\exp(-x) \times Ei(x)}{exp(-x) * Ei(x)} (up to the 3rd),
-  function \code{expint.E1(x, deriv = n)} returns the \eqn{n}th derivative of 
-  \eqn{E_1(x)}(up to the 3rd).
+  function \code{expint.E1(x, deriv = n)} returns the \eqn{n}th
+  derivative of \eqn{E_1(x)} (up to the 3rd).
 
 
 }
@@ -69,7 +69,7 @@ NETLIB FORTRAN code.
 Xiangjie Xue modified the functions to calculate derivatives.
 Higher derivatives can actually be calculated---please let me
 know if you need it.
- 
+
 
 
 }
diff --git a/man/explink.Rd b/man/explink.Rd
index 0fda797..e133b8e 100644
--- a/man/explink.Rd
+++ b/man/explink.Rd
@@ -82,7 +82,7 @@ explink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FA
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{Links}},
     \code{\link{loge}},
     \code{\link{rcim}},
diff --git a/man/exponential.Rd b/man/exponential.Rd
index 6b26d06..7d58569 100644
--- a/man/exponential.Rd
+++ b/man/exponential.Rd
@@ -66,9 +66,9 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 }
 
 \author{ T. W. Yee }
-\note{ 
+\note{
   Suppose \eqn{A = 0}.
-  For a fixed time interval, the number of events is 
+  For a fixed time interval, the number of events is
   Poisson with mean \eqn{\lambda}{rate} if the time
   between events has a
   geometric distribution with mean \eqn{\lambda^{-1}}{1/rate}.
diff --git a/man/felix.Rd b/man/felix.Rd
index db1c44e..ae9961d 100644
--- a/man/felix.Rd
+++ b/man/felix.Rd
@@ -8,12 +8,13 @@
 
 }
 \usage{
-felix(link = extlogit(min = 0, max = 0.5), imethod = 1)
+felix(lrate = extlogit(min = 0, max = 0.5), imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{link}{
-  Link function for the parameter;
+  \item{lrate}{
+  Link function for the parameter,
+  called \eqn{a} below;
   see \code{\link{Links}} for more choices and for general information.
 
 
@@ -31,7 +32,7 @@ felix(link = extlogit(min = 0, max = 0.5), imethod = 1)
   \deqn{f(y;a) =
   \frac{ 1 }{((y-1)/2)!} y^{(y-3)/2} a^{(y-1)/2}  \exp(-ay)
   }{%
-  f(y;a) = 
+  f(y;a) =
   (1 / ((y-1)/2)!) * y^((y-3)/2) * a^((y-1)/2) * exp(-ay)}
   where \eqn{y=1,3,5,\ldots} and
   \eqn{0 < a < 0.5}.
@@ -60,7 +61,7 @@ Boston, USA: Birkhauser.
 %
 %}
 
-\seealso{ 
+\seealso{
   \code{\link{dfelix}},
   \code{\link{borel.tanner}}.
 
diff --git a/man/felixUC.Rd b/man/felixUC.Rd
index 32d1265..ca0cb07 100644
--- a/man/felixUC.Rd
+++ b/man/felixUC.Rd
@@ -17,19 +17,19 @@
 
 }
 \usage{
-dfelix(x, a = 0.25, log = FALSE)
+dfelix(x, rate = 0.25, log = FALSE)
 }
 
 
-%pfelix(q, a = 0.25)
-%qfelix(p, a = 0.25)
-%rfelix(n, a = 0.25)
+%pfelix(q, rate = 0.25)
+%qfelix(p, rate = 0.25)
+%rfelix(n, rate = 0.25)
 \arguments{
   \item{x}{vector of quantiles.}
 % \item{p}{vector of probabilities.}
 % \item{n}{number of observations.
 %   Must be a positive integer of length 1.}
-  \item{a}{ See \code{\link{felix}}.
+  \item{rate}{ See \code{\link{felix}}.
     }
   \item{log}{
   Logical.
@@ -58,7 +58,7 @@ dfelix(x, a = 0.25, log = FALSE)
 
 }
 \section{Warning }{
-  The default value of \code{a} is subjective.
+  The default value of \code{rate} is subjective.
 
 
 }
@@ -69,9 +69,9 @@ dfelix(x, a = 0.25, log = FALSE)
 }
 \examples{
 \dontrun{
-a <- 0.25; x <- 1:15
-plot(x, dfelix(x, a), type = "h", las = 1, col = "blue",
-     ylab = paste("dfelix(a=", a, ")"),
+rate <- 0.25; x <- 1:15
+plot(x, dfelix(x, rate), type = "h", las = 1, col = "blue",
+     ylab = paste("dfelix(rate=", rate, ")"),
      main = "Felix density function")
 }
 }
diff --git a/man/fff.Rd b/man/fff.Rd
index e453fb5..e89fa33 100644
--- a/man/fff.Rd
+++ b/man/fff.Rd
@@ -18,7 +18,7 @@ fff(link = "loge", idf1 = NULL, idf2 = NULL, nsimEIM = 100,
 
   }
   \item{idf1, idf2}{
-  Numeric and positive. 
+  Numeric and positive.
   Initial value for the parameters.
   The default is to choose each value internally.
 
@@ -47,10 +47,10 @@ fff(link = "loge", idf1 = NULL, idf2 = NULL, nsimEIM = 100,
 
 }
 \details{
-  The F distribution is named after Fisher and has a density function 
+  The F distribution is named after Fisher and has a density function
   that has two parameters, called \code{df1} and \code{df2} here.
   This function treats these degrees of freedom as \emph{positive reals}
-  rather than integers. 
+  rather than integers.
   The mean of the distribution is
   \eqn{df2/(df2-2)} provided \eqn{df2>2},
   and its variance is
@@ -88,7 +88,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 }
 
-%\note{ 
+%\note{
 %  This family function uses the BFGS quasi-Newton update formula for the
 %  working weight matrices.  Consequently the estimated variance-covariance
 %  matrix may be inaccurate or simply wrong! The standard errors must be
diff --git a/man/fill.Rd b/man/fill.Rd
index a0e75be..2c4e2fe 100644
--- a/man/fill.Rd
+++ b/man/fill.Rd
@@ -41,7 +41,7 @@ fill(x, values = 0, ncolx = ncol(x))
 
 
   }
-  \item{values}{ 
+  \item{values}{
     Numeric.
     The answer contains these values,
     which are recycled \emph{columnwise} if necessary, i.e.,
@@ -49,7 +49,7 @@ fill(x, values = 0, ncolx = ncol(x))
 
 
   }
-  \item{ncolx}{ 
+  \item{ncolx}{
     The number of columns of the returned matrix.
     The default is the number of columns of \code{x}.
 
@@ -86,7 +86,7 @@ fill(x, values = 0, ncolx = ncol(x))
 
 
 }
-%\references{ 
+%\references{
 %  More information can be found at
 %  \url{http://www.stat.auckland.ac.nz/~yee}.
 %
@@ -100,7 +100,7 @@ fill(x, values = 0, ncolx = ncol(x))
 %}
 
 \author{ T. W. Yee }
-\note{ 
+\note{
   The effect of the \code{xij} argument is after other arguments such as
   \code{exchangeable} and \code{zero}.
   Hence \code{xij} does not affect constraint matrices.
@@ -111,8 +111,8 @@ fill(x, values = 0, ncolx = ncol(x))
   if you need more then assign \code{fill4 = fill5 = fill1} etc.
   The reason for this is that if more than one \code{fill} function is
   needed then they must be unique.
-  For example, if \eqn{M=4} then 
-  \code{xij = op ~ lop + rop + fill(mop) + fill(mop)} would reduce to 
+  For example, if \eqn{M=4} then
+  \code{xij = op ~ lop + rop + fill(mop) + fill(mop)} would reduce to
   \code{xij = op ~ lop + rop + fill(mop)}, whereas
   \code{xij = op ~ lop + rop + fill1(mop) + fill2(mop)} would retain
   all \eqn{M} terms, which is needed.
@@ -132,7 +132,7 @@ fill(x, values = 0, ncolx = ncol(x))
 %
 % Examples 1 and 2 are deliberately misspecified.
 % The output from, e.g., \code{coef(fit, matrix=TRUE)}, looks wrong but
-% is correct because the coefficients are multiplied by the zeros 
+% is correct because the coefficients are multiplied by the zeros
 % produced from \code{fill}.
 
 
@@ -152,7 +152,7 @@ fill(x, values = 0, ncolx = ncol(x))
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{vglm.control}},
   \code{\link{vglm}},
   \code{\link{multinomial}},
diff --git a/man/fisherz.Rd b/man/fisherz.Rd
index 73c9709..e0ff456 100644
--- a/man/fisherz.Rd
+++ b/man/fisherz.Rd
@@ -8,7 +8,7 @@
 
 }
 \usage{
-fisherz(theta, bminvalue = NULL, bmaxvalue = NULL, 
+fisherz(theta, bminvalue = NULL, bmaxvalue = NULL,
         inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -77,7 +77,7 @@ fisherz(theta, bminvalue = NULL, bmaxvalue = NULL,
 \author{ Thomas W. Yee }
 
 \note{
-  Numerical instability may occur when \code{theta} is close to \eqn{-1} or 
+  Numerical instability may occur when \code{theta} is close to \eqn{-1} or
   \eqn{1}.
   One way of overcoming this is to use, e.g., \code{bminvalue}.
 
@@ -89,7 +89,7 @@ fisherz(theta, bminvalue = NULL, bmaxvalue = NULL,
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{Links}},
   \code{\link{rhobit}},
   \code{\link{atanh}},
diff --git a/man/fisk.Rd b/man/fisk.Rd
index 2ca33f7..0dc5174 100644
--- a/man/fisk.Rd
+++ b/man/fisk.Rd
@@ -3,21 +3,21 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Fisk Distribution family function }
 \description{
-  Maximum likelihood estimation of the 2-parameter 
+  Maximum likelihood estimation of the 2-parameter
   Fisk distribution.
 
 }
 \usage{
-fisk(lscale = "loge", lshape1.a = "loge", iscale = NULL, 
-    ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5), 
+fisk(lscale = "loge", lshape1.a = "loge", iscale = NULL,
+    ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5),
     gshape1.a = seq(0.75, 4, by = 0.25), probs.y = c(0.25, 0.5, 0.75),
     zero = "shape")
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
-  
-  
+
+
   }
 
   \item{lshape1.a, lscale}{
@@ -28,7 +28,7 @@ fisk(lscale = "loge", lshape1.a = "loge", iscale = NULL,
   }
   \item{iscale, ishape1.a, imethod, zero}{
   See \code{\link{CommonVGAMffArguments}} for information.
-  For \code{imethod = 2} a good initial value for 
+  For \code{imethod = 2} a good initial value for
   \code{iscale} is needed to obtain a good estimate for
   the other parameter.
 
@@ -50,7 +50,7 @@ fisk(lscale = "loge", lshape1.a = "loge", iscale = NULL,
   The 2-parameter Fisk (aka log-logistic) distribution is the 4-parameter
   generalized beta II distribution with shape parameter \eqn{q=p=1}.
   It is also the 3-parameter Singh-Maddala distribution
-  with shape parameter \eqn{q=1}, as well as the 
+  with shape parameter \eqn{q=1}, as well as the
   Dagum distribution with \eqn{p=1}.
   More details can be found in Kleiber and Kotz (2003).
 
diff --git a/man/fittedvlm.Rd b/man/fittedvlm.Rd
index 9c720b3..4b8aa51 100644
--- a/man/fittedvlm.Rd
+++ b/man/fittedvlm.Rd
@@ -11,7 +11,8 @@
 
 }
 \usage{
-fittedvlm(object, drop = FALSE, type.fitted = NULL, ...)
+fittedvlm(object, drop = FALSE, type.fitted = NULL,
+          percentiles = NULL, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -40,6 +41,13 @@ fittedvlm(object, drop = FALSE, type.fitted = NULL, ...)
   It is recomputed from the model after convergence.
   Note: this is an experimental feature and not all
   \pkg{VGAM} family functions have this implemented yet.
+  See \code{\link{CommonVGAMffArguments}} for more details.
+
+
+
+  }
+  \item{percentiles}{
+  See \code{\link{CommonVGAMffArguments}} for details.
 
 
 
@@ -83,10 +91,10 @@ Chambers, J. M. and T. J. Hastie (eds) (1992)
 \note{
   This function is one of several extractor functions for
   the \pkg{VGAM} package. Others include \code{coef},
-  \code{deviance}, \code{weights} and \code{constraints} etc. 
+  \code{deviance}, \code{weights} and \code{constraints} etc.
   This function is equivalent to the methods function for the
   generic function \code{fitted.values}.
-  
+
 
   If \code{fit} is a VLM or VGLM then \code{fitted(fit)} and
   \code{predict(fit, type = "response")} should be equivalent
@@ -112,7 +120,7 @@ pneumo <- transform(pneumo, let = log(exposure.time))
 fitted(fit1)
 
 # LMS quantile regression example 2
-fit2 <- vgam(BMI ~ s(age, df = c(4, 2)), 
+fit2 <- vgam(BMI ~ s(age, df = c(4, 2)),
              lms.bcn(zero = 1), data = bmi.nz, trace = TRUE)
 head(predict(fit2, type = "response"))  # Equal to the the following two:
 head(fitted(fit2))
diff --git a/man/flourbeetle.Rd b/man/flourbeetle.Rd
index 7fa2b9a..067b433 100644
--- a/man/flourbeetle.Rd
+++ b/man/flourbeetle.Rd
@@ -34,7 +34,7 @@ and their mortality measured.
 }
 \source{
 
- Bliss, C.I., 1935. 
+ Bliss, C.I., 1935.
  The calculation of the dosage-mortality curve.
  \emph{Annals of Applied Biology}, \bold{22}, 134--167.
 
@@ -49,8 +49,8 @@ and their mortality measured.
 
 %\references{
 %
-%  
-%  
+%
+%
 %
 %
 %}
diff --git a/man/foldnormUC.Rd b/man/foldnormUC.Rd
index 1ceaa55..38b0d14 100644
--- a/man/foldnormUC.Rd
+++ b/man/foldnormUC.Rd
@@ -58,7 +58,7 @@ rfoldnorm(n, mean = 0, sd = 1, a1 = 1, a2 = 1)
 \author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{foldnormal}}, the \pkg{VGAM} family function
-  for estimating the parameters, 
+  for estimating the parameters,
   for the formula of the probability density function and other details.
 
 
diff --git a/man/foldnormal.Rd b/man/foldnormal.Rd
index b58791e..ed44940 100644
--- a/man/foldnormal.Rd
+++ b/man/foldnormal.Rd
@@ -101,7 +101,7 @@ foldnormal(lmean = "identitylink", lsd = "loge", imean = NULL, isd = NULL,
 
 
   Johnson, N. L. (1962)
-  The folded normal distribution: 
+  The folded normal distribution:
   accuracy of estimation by maximum likelihood.
   \emph{Technometrics},
   \bold{4}, 249--256.
@@ -137,7 +137,7 @@ foldnormal(lmean = "identitylink", lsd = "loge", imean = NULL, isd = NULL,
 
 
 }
-\seealso{ 
+\seealso{
     \code{\link{rfoldnorm}},
     \code{\link{uninormal}},
     \code{\link[stats:Normal]{dnorm}},
diff --git a/man/foldsqrt.Rd b/man/foldsqrt.Rd
index 60b4797..72c638c 100644
--- a/man/foldsqrt.Rd
+++ b/man/foldsqrt.Rd
@@ -74,7 +74,7 @@ foldsqrt(theta, min = 0, max = 1, mux = sqrt(2),
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{Links}}.
 
 
diff --git a/man/frechet.Rd b/man/frechet.Rd
index ef2b8c9..dfb20d1 100644
--- a/man/frechet.Rd
+++ b/man/frechet.Rd
@@ -80,7 +80,7 @@ frechet(location = 0, lscale = "loge", lshape = logoff(offset = -2),
         f(y) = ((s*b) / (y-a)^2) * exp[-(b/(y-a))^s] * [b/(y-a)]^(s-1)}
   for \eqn{y > a} and scale parameter \eqn{b > 0}.
   The positive shape parameter is \eqn{s}.
-  The cumulative distribution function is 
+  The cumulative distribution function is
   \deqn{F(y) = \exp[-(b/(y-a))^s]. }{%
         F(y) =  exp[-(b/(y-a))^s].}
   The mean of \eqn{Y} is \eqn{a + b \Gamma(1-1/s)}{a + b*gamma(1-1/s)}
@@ -103,7 +103,7 @@ frechet(location = 0, lscale = "loge", lshape = logoff(offset = -2),
 % Estimating \eqn{a} well requires a lot of data and
 % a good choice of \code{ilocation} will help speed up convergence.
 % For \code{frechet3} the default linear/additive predictors are
-% \eqn{\log(D)}{log(D)}, 
+% \eqn{\log(D)}{log(D)},
 % It would be great if the first linear/additive predictor was a direct
 % function of the location parameter, but this can run the risk that
 % the estimate is out of range (i.e., greater than \code{min(y)}).
@@ -140,7 +140,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 }
 
-%\note{ 
+%\note{
 
 % Family function \code{frechet3} uses
 % the BFGS quasi-Newton update formula for the
diff --git a/man/freund61.Rd b/man/freund61.Rd
index 9a7a22a..6a8bc78 100644
--- a/man/freund61.Rd
+++ b/man/freund61.Rd
@@ -65,17 +65,17 @@ freund61(la = "loge",  lap = "loge",  lb = "loge", lbp = "loge",
   changes the parameter of the exponential life distribution
   of the \eqn{B}  component from \eqn{\beta}{beta} to
   \eqn{\beta'}{beta'}.
-  
-  
+
+
   The joint probability density function is given by
-  \deqn{f(y_1,y_2) = \alpha \beta' \exp(-\beta' y_2 - 
+  \deqn{f(y_1,y_2) = \alpha \beta' \exp(-\beta' y_2 -
                       (\alpha+\beta-\beta')y_1) }{%
-        f(y1,y2) = alpha * beta' * exp(-beta' * y2 - 
+        f(y1,y2) = alpha * beta' * exp(-beta' * y2 -
                       (alpha+beta-beta') * y1) }
   for \eqn{0 < y_1 < y_2}{0 < y1 < y2}, and
-  \deqn{f(y_1,y_2) = \beta \alpha' \exp(-\alpha' y_1 - 
+  \deqn{f(y_1,y_2) = \beta \alpha' \exp(-\alpha' y_1 -
                       (\alpha+\beta-\alpha')y_2) }{%
-        f(y1,y2) = beta * alpha' * exp(-alpha' * y1 - 
+        f(y1,y2) = beta * alpha' * exp(-alpha' * y1 -
                       (alpha+beta-alpha') * y2) }
   for \eqn{0 < y_2 < y_1}{0 < y2 < y1}.
   Here, all four parameters are positive, as well as the responses
@@ -88,13 +88,13 @@ freund61(la = "loge",  lap = "loge",  lb = "loge", lbp = "loge",
   \eqn{\alpha+\beta}{alpha+beta}. Furthermore, the
   distribution of the time from first failure to failure
   of the other component is a mixture of
-  Exponential(\eqn{\alpha'}{alpha'}) and 
+  Exponential(\eqn{\alpha'}{alpha'}) and
   Exponential(\eqn{\beta'}{beta'}) with proportions
   \eqn{\beta/(\alpha+\beta)}{beta/(alpha+beta)}
   and \eqn{\alpha/(\alpha+\beta)}{alpha/(alpha+beta)}
   respectively.
 
-  
+
   The marginal distributions are, in general, not exponential.
   By default, the linear/additive predictors are
   \eqn{\eta_1=\log(\alpha)}{eta1=log(alpha)},
@@ -102,7 +102,7 @@ freund61(la = "loge",  lap = "loge",  lb = "loge", lbp = "loge",
   \eqn{\eta_3=\log(\beta)}{eta3=log(beta)},
   \eqn{\eta_4=\log(\beta')}{eta4=log(beta')}.
 
-  
+
   A special case is when \eqn{\alpha=\alpha'}{alpha=alpha'}
   and \eqn{\beta=\beta'}{beta'=beta'}, which means that
   \eqn{y_1}{y1} and \eqn{y_2}{y2} are independent, and
@@ -110,7 +110,7 @@ freund61(la = "loge",  lap = "loge",  lb = "loge", lbp = "loge",
   \eqn{1 / \alpha}{1/alpha} and \eqn{1 / \beta}{1/beta}
   respectively.
 
-  
+
   Fisher scoring is used,
   and the initial values correspond to the MLEs of an intercept model.
   Consequently, convergence may take only one iteration.
diff --git a/man/gamma1.Rd b/man/gamma1.Rd
index 0f7e887..71bc2c7 100644
--- a/man/gamma1.Rd
+++ b/man/gamma1.Rd
@@ -32,7 +32,7 @@ gamma1(link = "loge", zero = NULL)
   Here, \eqn{\Gamma(shape)}{gamma(shape)} is the gamma
   function, as in \code{\link[base:Special]{gamma}}.
   The mean of \eqn{Y} (returned as the fitted values)
-  is \eqn{\mu=shape}{mu=shape}, and the variance is 
+  is \eqn{\mu=shape}{mu=shape}, and the variance is
   \eqn{\sigma^2 = shape}{sigma^2 = shape}.
 
 
@@ -48,7 +48,7 @@ gamma1(link = "loge", zero = NULL)
   Most standard texts on statistical distributions describe
   the 1-parameter gamma distribution, e.g.,
 
-  
+
 Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
 Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
@@ -57,7 +57,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 }
 \author{ T. W. Yee }
 \note{
-  This \pkg{VGAM} family function can handle a multiple 
+  This \pkg{VGAM} family function can handle a multiple
   responses, which is inputted as a matrix.
 
 
diff --git a/man/gamma2.Rd b/man/gamma2.Rd
index b0b74dc..6b395db 100644
--- a/man/gamma2.Rd
+++ b/man/gamma2.Rd
@@ -23,7 +23,7 @@ gamma2(lmu = "loge", lshape = "loge",
 
   }
   \item{ishape}{
-  Optional initial value for \emph{shape}. 
+  Optional initial value for \emph{shape}.
   A \code{NULL} means a value is computed internally.
   If a failure to converge occurs, try using this argument.
   This argument is ignored if used within \code{\link{cqo}}; see the
@@ -87,7 +87,7 @@ gamma2(lmu = "loge", lshape = "loge",
                (a y / \mu)^{a-1}
                \times a}{
                \mu \times \Gamma(a)}}{%
-   f(y;mu,shape) = exp(-shape * y / mu) y^(shape-1) shape^(shape) / 
+   f(y;mu,shape) = exp(-shape * y / mu) y^(shape-1) shape^(shape) /
           [mu^(shape) * gamma(shape)]}
   for
   \eqn{\mu > 0}{mu > 0},
diff --git a/man/gammaR.Rd b/man/gammaR.Rd
index 69cdd52..515b539 100644
--- a/man/gammaR.Rd
+++ b/man/gammaR.Rd
@@ -63,9 +63,9 @@ gammaR(lrate = "loge", lshape = "loge", irate = NULL,
   for \eqn{shape > 0}, \eqn{rate > 0} and \eqn{y > 0}.
   Here, \eqn{\Gamma(shape)}{gamma(shape)} is the gamma
   function, as in \code{\link[base:Special]{gamma}}.
-  The mean of \emph{Y} is \eqn{\mu = shape/rate}{mu = shape/rate} 
+  The mean of \emph{Y} is \eqn{\mu = shape/rate}{mu = shape/rate}
   (returned as the fitted values) with variance
-  \eqn{\sigma^2 = \mu^2 /shape = shape/rate^2}{sigma^2 = 
+  \eqn{\sigma^2 = \mu^2 /shape = shape/rate^2}{sigma^2 =
        mu^2 /shape = shape/rate^2}.
   By default, the two linear/additive predictors are
   \eqn{\eta_1 = \log(shape)}{eta1 = log(shape)} and
@@ -95,7 +95,7 @@ gammaR(lrate = "loge", lshape = "loge", irate = NULL,
 \references{
   Most standard texts on statistical distributions describe
   the 2-parameter gamma distribution, e.g.,
-  
+
 
 Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
diff --git a/man/gammahyperbola.Rd b/man/gammahyperbola.Rd
index b5f77b2..7437080 100644
--- a/man/gammahyperbola.Rd
+++ b/man/gammahyperbola.Rd
@@ -67,7 +67,7 @@ gammahyperbola(ltheta = "loge", itheta = NULL, expected = FALSE)
 \references{
 
 Reid, N. (2003)
-Asymptotics and the theory of inference. 
+Asymptotics and the theory of inference.
 \emph{Annals of Statistics},
 \bold{31}, 1695--1731.
 
diff --git a/man/garma.Rd b/man/garma.Rd
index fe6f8ca..a501a77 100644
--- a/man/garma.Rd
+++ b/man/garma.Rd
@@ -98,7 +98,7 @@ garma(link = "identitylink", p.ar.lag = 1, q.ma.lag = 0,
   Here,
   \eqn{D_t=\{x_t,\ldots,x_1,y_{t-1},\ldots,y_1,\mu_{t-1},\ldots,\mu_1\}}{
        D_t={x_t,\ldots,x_1,y_(t-1),\ldots,y_1,mu_(t-1),\ldots,mu_1}}
-  is the previous information set. 
+  is the previous information set.
   Secondly, the GARMA(\eqn{p, q}) model is defined by
   \deqn{g(\mu_t) = \eta_t = x_t^T \beta +
     \sum_{k=1}^p \phi_k (g(y_{t-k}) - x_{t-k}^T \beta) +
@@ -129,7 +129,7 @@ garma(link = "identitylink", p.ar.lag = 1, q.ma.lag = 0,
   \emph{Journal of the American Statistical Association},
   \bold{98}: 214--223.
 
-  
+
   Zeger, S. L. and Qaqish, B. (1988)
   Markov regression models for time series: a quasi-likelihood approach.
   \emph{Biometrics},
diff --git a/man/gaussianff.Rd b/man/gaussianff.Rd
index 2c6f6ae..6bea708 100644
--- a/man/gaussianff.Rd
+++ b/man/gaussianff.Rd
@@ -49,7 +49,7 @@ gaussianff(dispersion = 0, parallel = FALSE, zero = NULL)
 }
 \details{
   This function is usually used in conjunction with \code{\link{vglm}}, else
-  \code{vlm} is recommended instead.  
+  \code{vlm} is recommended instead.
   The notation \eqn{M} is used to denote the number of
   linear/additive predictors.
   This function can handle any finite \eqn{M}, and the default is to
diff --git a/man/genbetaII.Rd b/man/genbetaII.Rd
index c322ce6..e03bfbf 100644
--- a/man/genbetaII.Rd
+++ b/man/genbetaII.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Generalized Beta Distribution of the Second Kind }
 \description{
-  Maximum likelihood estimation of the 4-parameter 
+  Maximum likelihood estimation of the 4-parameter
   generalized beta II distribution.
 
 }
@@ -85,10 +85,10 @@ The 4-parameter generalized beta II distribution has density
   \deqn{f(y) = a y^{ap-1} / [b^{ap} B(p,q) \{1 + (y/b)^a\}^{p+q}]}{%
         f(y) = a y^(ap-1) / [b^(ap) B(p,q) (1 + (y/b)^a)^(p+q)]}
   for \eqn{a > 0}, \eqn{b > 0}, \eqn{p > 0}, \eqn{q > 0}, \eqn{y \geq 0}{y >= 0}.
-Here \eqn{B} is the beta function, and 
+Here \eqn{B} is the beta function, and
 \eqn{b} is the scale parameter \code{scale},
 while the others are shape parameters.
-The mean is 
+The mean is
   \deqn{E(Y) = b \, \Gamma(p + 1/a) \, \Gamma(q - 1/a) / (\Gamma(p) \, \Gamma(q))}{%
         E(Y) = b  gamma(p + 1/a)  gamma(q - 1/a) / ( gamma(p)  gamma(q))}
 provided \eqn{-ap < 1 < aq}; these are returned as the fitted values.
diff --git a/man/gengamma.Rd b/man/gengamma.Rd
index 5e0556e..56bc32c 100644
--- a/man/gengamma.Rd
+++ b/man/gengamma.Rd
@@ -117,12 +117,12 @@ Rayleigh          \eqn{f(y;c\sqrt{2},2,1)}{f(y;c sqrt(2),2,1)} where \eqn{c>0}.
 
 }
 \author{ T. W. Yee }
-\note{ 
+\note{
   The notation used here differs from Stacy (1962) and Prentice (1974).
-  Poor initial values may result in failure to converge so 
+  Poor initial values may result in failure to converge so
   if there are covariates and there are convergence problems,
   try using the \code{zero} argument (e.g., \code{zero = 2:3})
-  or the \code{ik} argument. 
+  or the \code{ik} argument.
 
 
 }
diff --git a/man/genpoisUC.Rd b/man/genpoisUC.Rd
index 0e12daf..1e0002e 100644
--- a/man/genpoisUC.Rd
+++ b/man/genpoisUC.Rd
@@ -34,7 +34,7 @@ dgenpois(x, lambda = 0, theta, log = FALSE)
   the parameter restrictions, e.g., if \eqn{\lambda > 1}{lambda > 1}.
 
 
-  
+
 % \code{pgenpois} gives the distribution function, and
 % \code{qgenpois} gives the quantile function, and
 % \code{rgenpois} generates random deviates.
diff --git a/man/genpoisson.Rd b/man/genpoisson.Rd
index e61db0a..f2f06aa 100644
--- a/man/genpoisson.Rd
+++ b/man/genpoisson.Rd
@@ -48,7 +48,7 @@ genpoisson(llambda = "rhobit", ltheta = "loge",
   \item{ishrinkage, zero}{
     See \code{\link{CommonVGAMffArguments}} for information.
 
-    
+
   }
 % \item{zero}{
 % An integer vector, containing the value 1 or 2.
@@ -89,7 +89,7 @@ probability when \eqn{\lambda} is negative.
 
 An ordinary Poisson distribution corresponds
 to \eqn{\lambda = 0}{lambda = 0}.
-The mean (returned as the fitted values) is 
+The mean (returned as the fitted values) is
 \eqn{E(Y) = \theta / (1 - \lambda)}
 and the variance is \eqn{\theta / (1 - \lambda)^3}.
 
@@ -132,7 +132,7 @@ New York, USA: Marcel Dekker.
   matches more closely with \code{lambda} of
   \code{\link[stats:Poisson]{dpois}}.
 
-  
+
 }
 
 \author{ T. W. Yee }
diff --git a/man/geometric.Rd b/man/geometric.Rd
index 6b75090..d5bee7a 100644
--- a/man/geometric.Rd
+++ b/man/geometric.Rd
@@ -25,8 +25,8 @@ truncgeometric(upper.limit = Inf,
 
 
   }
-  \item{expected}{ 
-  Logical. 
+  \item{expected}{
+  Logical.
   Fisher scoring is used if \code{expected = TRUE}, else Newton-Raphson.
 
 
@@ -59,9 +59,9 @@ truncgeometric(upper.limit = Inf,
   Thus the response \eqn{Y} should be a non-negative integer.
   The mean of \eqn{Y} is \eqn{E(Y) = (1-p)/p}{E(Y) = (1-prob)/prob}
   and its variance is \eqn{Var(Y) = (1-p)/p^2}{Var(Y) = (1-prob)/prob^2}.
-  The geometric distribution is a special case of the 
+  The geometric distribution is a special case of the
   negative binomial distribution (see \code{\link{negbinomial}}).
-  The geometric distribution is also a special case of the 
+  The geometric distribution is also a special case of the
   Borel distribution, which is a Lagrangian distribution.
   If \eqn{Y} has a geometric distribution with parameter \eqn{p}{prob} then
   \eqn{Y+1} has a positive-geometric distribution with the same parameter.
@@ -107,7 +107,7 @@ Help from Viet Hoang Quoc is gratefully acknowledged.
 %
 %}
 
-\seealso{ 
+\seealso{
   \code{\link{negbinomial}},
   \code{\link[stats]{Geometric}},
   \code{\link{betageometric}},
diff --git a/man/gev.Rd b/man/gev.Rd
index cbc6292..ae4ae97 100644
--- a/man/gev.Rd
+++ b/man/gev.Rd
@@ -41,7 +41,7 @@ gevff(llocation = "identitylink", lscale = "loge",
   For technical reasons (see \bold{Details}) it is a good idea
   for \eqn{A = 0.5}.
 
- 
+
   }
 
 
@@ -151,7 +151,7 @@ gevff(llocation = "identitylink", lscale = "loge",
 
 
   }
-  \item{zero}{ 
+  \item{zero}{
   A specifying which
   linear/additive predictors are modelled as intercepts only.
   The values can be from the set \{1,2,3\} corresponding
@@ -174,7 +174,7 @@ gevff(llocation = "identitylink", lscale = "loge",
   Here, \eqn{x_+ = \max(x,0)}{x_+ = max(x,0)}.
   The \eqn{\mu}{mu}, \eqn{\sigma}{sigma}, \eqn{\xi}{xi} are known as the
   \emph{location}, \emph{scale} and \emph{shape} parameters respectively.
-  The cases 
+  The cases
   \eqn{\xi>0}{xi>0},
   \eqn{\xi<0}{xi<0},
   \eqn{\xi = 0}{xi = 0}
@@ -235,7 +235,7 @@ gevff(llocation = "identitylink", lscale = "loge",
 
 
 }
-\references{ 
+\references{
   Yee, T. W. and Stephenson, A. G. (2007)
   Vector generalized linear and additive extreme value models.
   \emph{Extremes}, \bold{10}, 1--19.
@@ -254,13 +254,13 @@ gevff(llocation = "identitylink", lscale = "loge",
 
   Smith, R. L. (1985)
   Maximum likelihood estimation in a class of nonregular cases.
-  \emph{Biometrika}, \bold{72}, 67--90. 
+  \emph{Biometrika}, \bold{72}, 67--90.
 
 
 }
 \author{ T. W. Yee }
 
-\note{ 
+\note{
   The \pkg{VGAM} family function \code{gev} can handle a multivariate
   (matrix) response, cf. multiple responses.
   If so, each row of the matrix is sorted into
@@ -268,7 +268,7 @@ gevff(llocation = "identitylink", lscale = "loge",
   With a vector or one-column matrix response using
   \code{gevff} will give the same result but be faster and it handles
   the \eqn{\xi = 0}{xi = 0} case.
-  The function \code{gev} implements Tawn (1988) while 
+  The function \code{gev} implements Tawn (1988) while
   \code{gevff} implements Prescott and Walden (1980).
 
 
@@ -289,20 +289,20 @@ gevff(llocation = "identitylink", lscale = "loge",
   accurately unless there is a lot of data.
   Convergence is slow when \eqn{\xi}{xi} is near \eqn{-0.5}.
   Given many explanatory variables, it is often a good idea
-  to make sure \code{zero = 3}. 
+  to make sure \code{zero = 3}.
   The range restrictions of the parameter \eqn{\xi}{xi} are not
   enforced; thus it is possible for a violation to occur.
 
 
   Successful convergence often depends on having a reasonably good initial
   value for \eqn{\xi}{xi}. If failure occurs try various values for the
-  argument \code{ishape}, and if there are covariates, 
+  argument \code{ishape}, and if there are covariates,
   having \code{zero = 3} is advised.
 
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{rgev}},
   \code{\link{gumbel}},
   \code{\link{gumbelff}},
diff --git a/man/gevUC.Rd b/man/gevUC.Rd
index be7a64c..4b42eff 100644
--- a/man/gevUC.Rd
+++ b/man/gevUC.Rd
@@ -121,7 +121,7 @@ plot(x, dgev(x, loc, sigma, xi), type = "l", col = "blue", ylim = c(0,1),
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple are 5,10,...,95 percentiles", ylab = "", las = 1)
 abline(h = 0, col = "blue", lty = 2)
-lines(qgev(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), 
+lines(qgev(seq(0.05, 0.95, by = 0.05), loc, sigma, xi),
       dgev(qgev(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), loc, sigma, xi),
       col = "purple", lty = 3, type = "h")
 lines(x, pgev(x, loc, sigma, xi), type = "l", col = "orange")
diff --git a/man/gew.Rd b/man/gew.Rd
index 092fea7..6cf879c 100644
--- a/man/gew.Rd
+++ b/man/gew.Rd
@@ -51,7 +51,7 @@ dollars deflated by \eqn{P_2}.
   Here,
   \eqn{P_1 =} Implicit price deflator of producers durable
   equipment (base 1947),
-  \eqn{P_2 =} Implicit price deflator of G.N.P. 
+  \eqn{P_2 =} Implicit price deflator of G.N.P.
   (base 1947),
   \eqn{P_3 =} Depreciation expense deflator = ten years
   moving average of wholesale price index of metals and metal
diff --git a/man/golf.Rd b/man/golf.Rd
index caabb70..6d64027 100644
--- a/man/golf.Rd
+++ b/man/golf.Rd
@@ -45,7 +45,7 @@ golf(theta, lambda = 1, cutpoint = NULL,
 
 }
 \details{
-  The gamma-ordinal link function (GOLF) can be applied to a 
+  The gamma-ordinal link function (GOLF) can be applied to a
   parameter lying in the unit interval.
   Its purpose is to link cumulative probabilities associated with
   an ordinal response coming from an underlying 2-parameter gamma
@@ -65,7 +65,7 @@ golf(theta, lambda = 1, cutpoint = NULL,
 }
 \references{
   Yee, T. W. (2012)
-  \emph{Ordinal ordination with normalizing link functions for count data}, 
+  \emph{Ordinal ordination with normalizing link functions for count data},
   (in preparation).
 
 
@@ -89,12 +89,12 @@ golf(theta, lambda = 1, cutpoint = NULL,
 
 }
 \section{Warning }{
-  Prediction may not work on \code{\link{vglm}} or 
+  Prediction may not work on \code{\link{vglm}} or
   \code{\link{vgam}} etc. objects if this link function is used.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{Links}},
   \code{\link{gamma2}},
   \code{\link{polf}},
diff --git a/man/gompertz.Rd b/man/gompertz.Rd
index 15628d6..8f86add 100644
--- a/man/gompertz.Rd
+++ b/man/gompertz.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Gompertz Distribution Family Function }
 \description{
-  Maximum likelihood estimation of the 2-parameter 
+  Maximum likelihood estimation of the 2-parameter
   Gompertz distribution.
 
 }
@@ -14,7 +14,7 @@ gompertz(lscale = "loge", lshape = "loge",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{nowarning}{ Logical. Suppress a warning? 
+  \item{nowarning}{ Logical. Suppress a warning?
   Ignored for \pkg{VGAM} 0.9-7 and higher.
 
 
diff --git a/man/gpd.Rd b/man/gpd.Rd
index e2d3ee8..34bf41c 100644
--- a/man/gpd.Rd
+++ b/man/gpd.Rd
@@ -92,7 +92,7 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
 
 
   }
-% \item{rshape}{ 
+% \item{rshape}{
 % Numeric, of length 2.
 % Range of \eqn{\xi}{xi} if \code{lshape = "extlogit"} is chosen.
 % The default values ensures the algorithm works (\eqn{\xi > -0.5}{xi > -0.5})
@@ -212,7 +212,7 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
   helps handle the problem of straying outside the parameter space.
 
 
-}  
+}
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
   The object is used by modelling functions such as \code{\link{vglm}}
diff --git a/man/gpdUC.Rd b/man/gpdUC.Rd
index a454914..ea136c2 100644
--- a/man/gpdUC.Rd
+++ b/man/gpdUC.Rd
@@ -118,7 +118,7 @@ plot(x, dgpd(x, loc, sigma, xi), type = "l", col = "blue", ylim = c(0, 1),
      main = "Blue is density, red is cumulative distribution function",
      sub = "Purple are 5,10,...,95 percentiles", ylab = "", las = 1)
 abline(h = 0, col = "blue", lty = 2)
-lines(qgpd(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), 
+lines(qgpd(seq(0.05, 0.95, by = 0.05), loc, sigma, xi),
       dgpd(qgpd(seq(0.05, 0.95, by = 0.05), loc, sigma, xi), loc, sigma, xi),
       col = "purple", lty = 3, type = "h")
 lines(x, pgpd(x, loc, sigma, xi), type = "l", col = "red")
diff --git a/man/grain.us.Rd b/man/grain.us.Rd
index 44688f8..2d9c78e 100644
--- a/man/grain.us.Rd
+++ b/man/grain.us.Rd
@@ -34,7 +34,7 @@ Ahn and Reinsel (1988).
 
 Ahn, S. K  and Reinsel, G. C. (1988)
 Nested reduced-rank autoregressive models for multiple time series.
-\emph{Journal of the American Statistical Association}, 
+\emph{Journal of the American Statistical Association},
 \bold{83}, 849--856.
 
 }
diff --git a/man/grc.Rd b/man/grc.Rd
index e41b178..4b7ff08 100644
--- a/man/grc.Rd
+++ b/man/grc.Rd
@@ -10,7 +10,7 @@
   and more generally, row-column interaction models (RCIMs).
   RCIMs allow for unconstrained quadratic ordination (UQO).
 
-  
+
 
 }
 \usage{
@@ -22,7 +22,7 @@ rcim(y, family = poissonff, Rank = 0, M1 = NULL,
      rprefix = "Row.", cprefix = "Col.", iprefix = "X2.",
      offset = 0, str0 = if (Rank) 1 else NULL,
      summary.arg = FALSE, h.step = 0.0001,
-     rbaseline = 1, cbaseline = 1, 
+     rbaseline = 1, cbaseline = 1,
      has.intercept = TRUE, M = NULL,
      rindex = 2:nrow(y), cindex = 2:ncol(y), iindex = 2:nrow(y), ...)
 }
@@ -69,7 +69,7 @@ rcim(y, family = poissonff, Rank = 0, M1 = NULL,
 
   }
   \item{weights}{
-  Prior weights. Fed into 
+  Prior weights. Fed into
   \code{\link{rrvglm}}
   or
   \code{\link{vglm}}.
@@ -92,19 +92,19 @@ rcim(y, family = poissonff, Rank = 0, M1 = NULL,
 
 
   }
-  \item{rprefix, cprefix, iprefix}{ 
+  \item{rprefix, cprefix, iprefix}{
   Character, for rows and columns and interactions respectively.
   For labelling the indicator variables.
 
 
   }
-  \item{offset}{ 
+  \item{offset}{
   Numeric. Either a matrix of the right dimension, else
   a single numeric expanded into such a matrix.
 
 
   }
-  \item{str0}{ 
+  \item{str0}{
   Ignored if \code{Rank = 0}, else
   an integer from the set \{1,\ldots,\code{min(nrow(y), ncol(y))}\},
   specifying the row that is used as the structural zero.
@@ -189,12 +189,12 @@ rcim(y, family = poissonff, Rank = 0, M1 = NULL,
   the product of two `thin' matrices.
   Indeed, \code{A} and \code{C} have \code{Rank} columns.
 By default, the first column and row of the interaction matrix
-\code{A \%*\% t(C)} is chosen 
+\code{A \%*\% t(C)} is chosen
 to be structural zeros, because \code{str0 = 1}.
-This means the first row of \code{A} are all zeros. 
+This means the first row of \code{A} are all zeros.
 
 
-This function uses \code{options()$contrasts} to set up the row and 
+This function uses \code{options()$contrasts} to set up the row and
 column indicator variables.
 In particular, Equation (4.5) of Yee and Hastie (2003) is used.
 These are called \code{Row.} and \code{Col.} (by default) followed
@@ -208,7 +208,7 @@ row and column has a dummy variable associated with it.
 The first row and first column are baseline.
 The power of \code{rcim()} is that many \pkg{VGAM} family functions
 can be assigned to its \code{family} argument.
-For example, 
+For example,
 \code{\link{uninormal}} fits something in between a 2-way
 ANOVA with and without interactions,
 \code{\link{alaplace2}} with \code{Rank = 0} is something like
@@ -254,7 +254,7 @@ and tolerance matrix.
   a rank-0 \code{rcim()} object is of class \code{\link{rcim0-class}},
   else of class \code{"rcim"} (this may change in the future).
 
-  
+
 % Currently,
 % a rank-0 \code{rcim()} object is of class \code{\link{vglm-class}},
 % but it may become of class \code{"rcim"} one day.
@@ -282,12 +282,12 @@ of cross-classifications having ordered categories.
 \bold{76}, 320--334.
 
 
-%Documentation accompanying the \pkg{VGAM} package at 
+%Documentation accompanying the \pkg{VGAM} package at
 %\url{http://www.stat.auckland.ac.nz/~yee}
 %contains further information about the setting up of the
 %indicator variables.
 
- 
+
 }
 \author{
 Thomas W. Yee, with
diff --git a/man/gumbel.Rd b/man/gumbel.Rd
index 90e9999..80e49ad 100644
--- a/man/gumbel.Rd
+++ b/man/gumbel.Rd
@@ -18,28 +18,28 @@ gumbelff(llocation = "identitylink", lscale = "loge",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{llocation, lscale}{ 
+  \item{llocation, lscale}{
   Parameter link functions for \eqn{\mu}{mu} and \eqn{\sigma}{sigma}.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{iscale}{ 
-  Numeric and positive. 
+  \item{iscale}{
+  Numeric and positive.
   Optional initial value for \eqn{\sigma}{sigma}.
   Recycled to the appropriate length.
   In general, a larger value is better than a smaller value.
-  A \code{NULL} means an initial value is computed internally. 
+  A \code{NULL} means an initial value is computed internally.
 
 
   }
 
-  \item{R}{ 
+  \item{R}{
   Numeric. Maximum number of values possible.
-  See \bold{Details} for more details. 
+  See \bold{Details} for more details.
 
   }
-  \item{percentiles}{ 
+  \item{percentiles}{
   Numeric vector of percentiles used
   for the fitted values. Values should be between 0 and 100.
   This argument uses the argument \code{R} if assigned.
@@ -49,23 +49,23 @@ gumbelff(llocation = "identitylink", lscale = "loge",
 % This argument is ignored if \code{mean = TRUE}.
 
   }
-  \item{mpv}{ 
+  \item{mpv}{
   Logical. If \code{mpv = TRUE} then the \emph{median predicted value} (MPV)
   is computed and returned as the (last) column of the fitted values.
   This argument is ignored if \code{percentiles = NULL}.
-  See \bold{Details} for more details. 
+  See \bold{Details} for more details.
 
 % This argument is ignored if \code{mean = TRUE}.
 
   }
-% \item{mean}{ 
+% \item{mean}{
 % Logical. If \code{TRUE}, the mean is computed and returned
 % as the fitted values. This argument overrides the
 % \code{percentiles} and \code{mpv} arguments.
-% See \bold{Details} for more details. 
+% See \bold{Details} for more details.
 
 % }
-  \item{zero}{ 
+  \item{zero}{
   A vector specifying which linear/additive predictors
   are modelled as intercepts only.  The value (possibly values) can
   be from the set \{1, 2\} corresponding respectively to \eqn{\mu}{mu}
@@ -73,12 +73,12 @@ gumbelff(llocation = "identitylink", lscale = "loge",
   are modelled as a linear combination of the explanatory variables.
   See \code{\link{CommonVGAMffArguments}} for more details.
 
-  
+
 
   }
 }
 \details{
-  The Gumbel distribution is a generalized extreme value (GEV) 
+  The Gumbel distribution is a generalized extreme value (GEV)
   distribution with \emph{shape} parameter \eqn{\xi = 0}{xi = 0}.
   Consequently it is more easily estimated than the GEV.
   See \code{\link{gev}} for more details.
@@ -132,7 +132,7 @@ gumbelff(llocation = "identitylink", lscale = "loge",
 
   Rosen, O. and Cohen, A. (1996)
   Extreme percentile regression.
-  In: Haerdle, W. and Schimek, M. G. (eds.), 
+  In: Haerdle, W. and Schimek, M. G. (eds.),
   \emph{Statistical Theory and Computational Aspects of Smoothing:
   Proceedings of the COMPSTAT '94 Satellite Meeting held in
   Semmering, Austria, 27--28 August 1994}, pp.200--214,
@@ -156,10 +156,10 @@ gumbelff(llocation = "identitylink", lscale = "loge",
   however, the data are the highest sea level measurements recorded each
   year (it therefore equates to the median predicted value or MPV).
 
-  
+
 }
 
-\note{ 
+\note{
   Like many other usual \pkg{VGAM} family functions,
   \code{gumbelff()} handles (independent) multiple responses.
 
@@ -176,10 +176,10 @@ gumbelff(llocation = "identitylink", lscale = "loge",
   \code{na.action = na.pass}. The response matrix needs to be
   padded with any missing values. With a multivariate response
   one has a matrix \code{y}, say, where
-  \code{y[, 2]} contains the second order statistics, etc. 
+  \code{y[, 2]} contains the second order statistics, etc.
+
 
 
-  
 % If a random variable \eqn{Y} has a \emph{reverse}
 % \eqn{Gumbel(\mu,\sigma)}{Gumbel(mu,sigma)} distribution then \eqn{-Y}
 % has a \eqn{Gumbel(-\mu,\sigma)}{Gumbel(-mu,sigma)} distribution.
@@ -199,7 +199,7 @@ gumbelff(llocation = "identitylink", lscale = "loge",
   \code{\link{gevff}},
   \code{\link{venice}}.
 
-  
+
 % \code{\link{ogev}},
 
 
@@ -224,7 +224,7 @@ sqrt(diag(vcov(summary(fit2))))   # Standard errors
 # Example 3: Try a nonparametric fit ---------------------
 # Use the entire data set, including missing values
 # Same as as.matrix(venice[, paste0("r", 1:10)]):
-Y <- Select(venice, "r", sort = FALSE) 
+Y <- Select(venice, "r", sort = FALSE)
 fit3 <- vgam(Y ~ s(year, df = 3), gumbel(R = 365, mpv = TRUE),
              data = venice, trace = TRUE, na.action = na.pass)
 depvar(fit3)[4:5, ]  # NAs used to pad the matrix
@@ -245,7 +245,7 @@ matpoints(year, Y, pch = "*", col = "blue")
 lines(year, fitted(fit3)[, "99\%"], lwd = 2, col = "orange")
 
 # Check the 99 percentiles with a smoothing spline.
-# Nb. (1-0.99) * 365 = 3.65 is approx. 4, meaning the 4th order 
+# Nb. (1-0.99) * 365 = 3.65 is approx. 4, meaning the 4th order
 # statistic is approximately the 99 percentile.
 plot(year, Y[, 4], ylab = "Sea level (cm)", type = "n",
      main = "Orange is 99 percentile, Green is a smoothing spline")
diff --git a/man/gumbelII.Rd b/man/gumbelII.Rd
index 3bff665..16f1552 100644
--- a/man/gumbelII.Rd
+++ b/man/gumbelII.Rd
@@ -22,7 +22,7 @@ gumbelII(lscale = "loge", lshape = "loge", iscale = NULL, ishape = NULL,
 
 
   \item{lshape, lscale}{
-  Parameter link functions applied to the 
+  Parameter link functions applied to the
   (positive) shape parameter (called \eqn{s} below) and
   (positive) scale parameter (called \eqn{b} below).
   See \code{\link{Links}} for more choices.
@@ -36,7 +36,7 @@ gumbelII(lscale = "loge", lshape = "loge", iscale = NULL, ishape = NULL,
 % See \code{earg} in \code{\link{Links}} for general information.
 % }
 
-  Parameter link functions applied to the 
+  Parameter link functions applied to the
   \item{ishape, iscale}{
   Optional initial values for the shape and scale parameters.
 
@@ -57,14 +57,14 @@ gumbelII(lscale = "loge", lshape = "loge", iscale = NULL, ishape = NULL,
   argument to be the percentiles of these, e.g., 50 for median.
 
   }
-  
+
 }
 \details{
-  The Gumbel-II density for a response \eqn{Y} is 
+  The Gumbel-II density for a response \eqn{Y} is
   \deqn{f(y;b,s) = s y^{s-1} \exp[-(y/b)^s] / (b^s)}{%
         f(y;b,s) = s y^(s-1) * exp(-(y/b)^s) / [b^s]}
   for \eqn{b > 0}, \eqn{s > 0}, \eqn{y > 0}.
-  The cumulative distribution function is 
+  The cumulative distribution function is
   \deqn{F(y;b,s) = \exp[-(y/b)^{-s}].}{%
         F(y;b,s) = exp(-(y/b)^(-s)).}
   The mean of \eqn{Y} is \eqn{b \, \Gamma(1 - 1/s)}{b * gamma(1 - 1/s)}
@@ -113,7 +113,7 @@ U.S. Department of Commerce, National Bureau of Standards, USA.
 %  This function is under development to handle other censoring situations.
 %  The version of this function which will handle censored data will be
 %  called \code{cengumbelII()}. It is currently being written and will use
-%  \code{\link{SurvS4}} as input. 
+%  \code{\link{SurvS4}} as input.
 %  It should be released in later versions of \pkg{VGAM}.
 %
 %
diff --git a/man/gumbelUC.Rd b/man/gumbelUC.Rd
index 4533d78..23f2939 100644
--- a/man/gumbelUC.Rd
+++ b/man/gumbelUC.Rd
@@ -56,7 +56,7 @@ rgumbel(n, location = 0, scale = 1)
   \emph{generalized extreme value} (GEV) distribution where
   the shape parameter \eqn{\xi}{xi} = 0.
   The latter has 3 parameters, so the Gumbel distribution has two.
-  The Gumbel distribution function is 
+  The Gumbel distribution function is
   \deqn{G(y) = \exp \left( - \exp \left[ - \frac{y-\mu}{\sigma} \right]
                     \right) }{%
         G(y) =  exp( -exp[ - (y-mu)/sigma ] ) }
diff --git a/man/guplot.Rd b/man/guplot.Rd
index c8ae6ca..e367f9c 100644
--- a/man/guplot.Rd
+++ b/man/guplot.Rd
@@ -49,12 +49,12 @@ guplot.vlm(object, ...)
   distribution, respectively. Outliers may also be detected
   using this plot.
 
-  
+
   The function \code{guplot} is generic, and
   \code{guplot.default} and \code{guplot.vlm} are some
   methods functions for Gumbel plots.
 
-  
+
 }
 \value{
   A list is returned invisibly with the following components.
@@ -74,14 +74,14 @@ guplot.vlm(object, ...)
   \emph{Statistics of Extremes}.
   New York, USA: Columbia University Press.
 
-  
+
 }
 \author{ T. W. Yee }
 \note{
   The Gumbel distribution is a special case of the
   GEV distribution with shape parameter equal to zero.
 
-  
+
 }
 \seealso{
   \code{\link{gumbel}},
@@ -89,7 +89,7 @@ guplot.vlm(object, ...)
   \code{\link{gev}},
   \code{\link{venice}}.
 
-  
+
 }
 \examples{\dontrun{guplot(rnorm(500), las = 1) -> ii
 names(ii)
diff --git a/man/has.intercept.Rd b/man/has.intercept.Rd
index 279e614..76da7db 100644
--- a/man/has.intercept.Rd
+++ b/man/has.intercept.Rd
@@ -26,7 +26,7 @@ has.interceptvlm(object, form.number = 1, \dots)
 
 
   }
- \item{\dots}{Arguments that are might be passed from 
+ \item{\dots}{Arguments that are might be passed from
   one function to another.
 
  }
diff --git a/man/hormone.Rd b/man/hormone.Rd
index b912d0f..9c67289 100644
--- a/man/hormone.Rd
+++ b/man/hormone.Rd
@@ -14,7 +14,7 @@
 \usage{data(hormone)}
 \format{
   A data frame with 85 observations on the following 2 variables.
-  
+
   \describe{
     \item{\code{X}}{a numeric vector, suitable as the x-axis in
     a scatter plot.
@@ -59,7 +59,7 @@ Thus calibration might be of interest for the data.
 
 \references{
 
-  Carroll, R. J. and Ruppert, D. (1988) 
+  Carroll, R. J. and Ruppert, D. (1988)
   \emph{Transformation and Weighting in Regression}.
   New York, USA: Chapman & Hall.
 
diff --git a/man/hspider.Rd b/man/hspider.Rd
index 1eca4aa..69cfcb6 100644
--- a/man/hspider.Rd
+++ b/man/hspider.Rd
@@ -3,7 +3,7 @@
 \docType{data}
 \title{ Hunting Spider Data }
 \description{
-  Abundance of hunting spiders in a Dutch dune area. 
+  Abundance of hunting spiders in a Dutch dune area.
 }
 \usage{data(hspider)}
 \format{
@@ -89,7 +89,7 @@ coef(ahsb1)
 %# vvv          WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
 %# vvv          fam = poissonff, data = hspider, Crow1posit=FALSE)
 %# vvv nos = ncol(p1 at y)
-%# vvv lvplot(p1, y=TRUE, lcol=1:nos, pch=1:nos, pcol=1:nos) 
+%# vvv lvplot(p1, y=TRUE, lcol=1:nos, pch=1:nos, pcol=1:nos)
 %# vvv Coef(p1)
 %# vvv summary(p1)
 
diff --git a/man/huber.Rd b/man/huber.Rd
index 5ebef79..11585a8 100644
--- a/man/huber.Rd
+++ b/man/huber.Rd
@@ -22,13 +22,13 @@ huber2(llocation = "identitylink", lscale = "loge",
 
 
   }
-  \item{k}{ 
+  \item{k}{
   Tuning constant.
   See \code{\link{rhuber}} for more information.
 
 
   }
-  \item{imethod, zero}{ 
+  \item{imethod, zero}{
   See \code{\link{CommonVGAMffArguments}} for information.
   The default value of \code{zero} means the scale parameter is
   modelled as intercept-only.
@@ -44,7 +44,7 @@ huber2(llocation = "identitylink", lscale = "loge",
 
 
   By default, the mean is the first linear/additive predictor (returned
-  as the fitted values; this is the location parameter), and 
+  as the fitted values; this is the location parameter), and
   the log of the scale parameter is the second linear/additive predictor.
   The Fisher information matrix is diagonal; Fisher scoring is implemented.
 
@@ -100,7 +100,7 @@ hdata <- data.frame(x2 = sort(runif(NN)))
 hdata <- transform(hdata, y  = rhuber(NN, mu = coef1 + coef2 * x2))
 
 hdata$x2[1] <- 0.0  # Add an outlier
-hdata$y[1] <- 10  
+hdata$y[1] <- 10
 
 fit.huber2 <- vglm(y ~ x2, huber2(imethod = 3), data = hdata, trace = TRUE)
 fit.huber1 <- vglm(y ~ x2, huber1(imethod = 3), data = hdata, trace = TRUE)
diff --git a/man/huberUC.Rd b/man/huberUC.Rd
index 92d0024..9665ef7 100644
--- a/man/huberUC.Rd
+++ b/man/huberUC.Rd
@@ -114,7 +114,7 @@ lines(Q, dhuber(Q, mu = mu), col = "purple", lty = 3, type = "h")
 lines(Q, phuber(Q, mu = mu), col = "purple", lty = 3, type = "h")
 abline(h = probs, col = "purple", lty = 3)
 phuber(Q, mu = mu) - probs  # Should be all 0s
-} 
-} 
+}
+}
 \keyword{distribution}
 
diff --git a/man/hunua.Rd b/man/hunua.Rd
index e875df2..e292d24 100644
--- a/man/hunua.Rd
+++ b/man/hunua.Rd
@@ -4,7 +4,7 @@
 \title{Hunua Ranges Data}
 \description{
   The \code{hunua} data frame has 392 rows and 18 columns.
-  Altitude is explanatory, and there are binary responses 
+  Altitude is explanatory, and there are binary responses
   (presence/absence = 1/0 respectively) for 17 plant species.
 
 
@@ -36,18 +36,18 @@
 \details{
   These were collected from the Hunua Ranges, a small forest in southern
   Auckland, New Zealand. At 392 sites in the forest, the presence/absence
-  of 17 plant species was recorded, as well as the altitude. 
+  of 17 plant species was recorded, as well as the altitude.
   Each site was of area size 200\eqn{m^2}{m^2}.
 
 
 }
 \source{
-  Dr Neil Mitchell, University of Auckland. 
+  Dr Neil Mitchell, University of Auckland.
 
 
 }
 %\references{
-%  None. 
+%  None.
 %}
 \seealso{
   \code{\link{waitakere}}.
@@ -65,6 +65,6 @@ head(predict(fit.h, hunua, type = "response"))
 fit.w <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = waitakere)
 \dontrun{
 plot(fit.w, se = TRUE, lcol = "blue", scol = "blue", add = TRUE) }
-head(predict(fit.w, hunua, type = "response"))   # Same as above? 
+head(predict(fit.w, hunua, type = "response"))   # Same as above?
 }
 \keyword{datasets}
diff --git a/man/hyperg.Rd b/man/hyperg.Rd
index a8dbce4..3439a4b 100644
--- a/man/hyperg.Rd
+++ b/man/hyperg.Rd
@@ -15,7 +15,7 @@ hyperg(N = NULL, D = NULL, lprob = "logit", iprob = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{N}{ 
+  \item{N}{
     Total number of white and black balls in the urn.
     Must be a vector with positive values, and is recycled, if necessary,
     to the same length as the response.
@@ -23,7 +23,7 @@ hyperg(N = NULL, D = NULL, lprob = "logit", iprob = NULL)
 
 
   }
-  \item{D}{ 
+  \item{D}{
     Number of white balls in the urn.
     Must be a vector with positive values, and is recycled, if necessary,
     to the same length as the response.
@@ -32,13 +32,13 @@ hyperg(N = NULL, D = NULL, lprob = "logit", iprob = NULL)
 
   }
 
-  \item{lprob}{ 
+  \item{lprob}{
   Link function for the probabilities.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{iprob}{ 
+  \item{iprob}{
   Optional initial value for the probabilities.
   The default is to choose initial values internally.
 
@@ -53,7 +53,7 @@ hyperg(N = NULL, D = NULL, lprob = "logit", iprob = NULL)
   are black. A simple random sample (i.e., \emph{without} replacement) of
   \eqn{k} balls is taken.
   The response here is the sample \emph{proportion} of white balls.
-  In this document, 
+  In this document,
   \code{N} is \eqn{N=m+n},
   \code{D} is \eqn{m} (for the number of ``defectives'', in quality
   control terminology, or equivalently, the number of marked individuals).
diff --git a/man/hypersecant.Rd b/man/hypersecant.Rd
index 83a8cd4..aff2216 100644
--- a/man/hypersecant.Rd
+++ b/man/hypersecant.Rd
@@ -35,7 +35,7 @@ hypersecant01(link.theta = extlogit(min = -pi/2, max = pi/2), init.theta = NULL)
   is given by
   \deqn{f(y;\theta) = \exp(\theta y + \log(\cos(\theta ))) / (2 \cosh(\pi y/2)),}{%
         f(y; theta) = exp(theta*y + log(cos(theta))) / (2*cosh(pi*y/2)),}
-  for parameter \eqn{-\pi/2 < \theta < \pi/2}{pi/2 < theta < pi/2}
+  for parameter \eqn{-\pi/2 < \theta < \pi/2}{-pi/2 < theta < pi/2}
   and all real \eqn{y}.
   The mean of \eqn{Y} is \eqn{\tan(\theta)}{tan(theta)}
   (returned as the fitted values).
@@ -53,16 +53,18 @@ hypersecant01(link.theta = extlogit(min = -pi/2, max = pi/2), init.theta = NULL)
                          u^{-0.5+\theta/\pi} \times
                      (1-u)^{-0.5-\theta/\pi},}{%
         f(u;theta) = (cos(theta)/pi) * u^(-0.5+theta/pi) * (1-u)^(-0.5-theta/pi),}
-  for parameter \eqn{-\pi/2 < \theta < \pi/2}{pi/2 < theta < pi/2}
+  for parameter \eqn{-\pi/2 < \theta < \pi/2}{-pi/2 < theta < pi/2}
   and \eqn{0 < u < 1}.
   Then the mean of \eqn{U} is \eqn{0.5 + \theta/\pi}{0.5 + theta/pi}
   (returned as the fitted values) and the variance is
   \eqn{(\pi^2 - 4 \theta^2) / (8\pi^2)}{(pi^2 - 4*theta^2) / (8*pi^2)}.
 
 
+
   For both parameterizations Newton-Raphson is same as Fisher scoring.
 
 
+
 }
 
 \value{
@@ -90,7 +92,7 @@ Natural exponential families with quadratic variance functions.
 }
 
 \author{ T. W. Yee }
-%\note{ 
+%\note{
 
 %}
 \seealso{
diff --git a/man/hzeta.Rd b/man/hzeta.Rd
index 1d9a320..2d8e8c8 100644
--- a/man/hzeta.Rd
+++ b/man/hzeta.Rd
@@ -8,26 +8,20 @@
 
 }
 \usage{
-hzeta(link = "loglog", ialpha = NULL, nsimEIM = 100)
+hzeta(lshape = "loglog", ishape = NULL, nsimEIM = 100)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{link}{
-  Parameter link function for the parameter.
+  \item{lshape}{
+  Parameter link function for the parameter,
+  called \eqn{\alpha}{alpha} below.
   See \code{\link{Links}} for more choices.
   Here, a log-log link keeps the parameter greater than one, meaning
   the mean is finite.
 
 
   }
-  \item{ialpha}{
-  Optional initial value for the (positive) parameter. 
-  The default is to obtain an initial value internally. Use this argument
-  if the default fails.
-
-
-  }
-  \item{nsimEIM}{
+  \item{ishape,nsimEIM}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
 
@@ -57,7 +51,7 @@ hzeta(link = "loglog", ialpha = NULL, nsimEIM = 100)
 
 
 }
-\references{ 
+\references{
 
     Pages 533--4 of
     Johnson N. L., Kemp, A. W. and Kotz S. (2005)
@@ -67,7 +61,7 @@ hzeta(link = "loglog", ialpha = NULL, nsimEIM = 100)
 
 }
 \author{ T. W. Yee }
-%\note{ 
+%\note{
 %}
 
 
@@ -81,11 +75,11 @@ hzeta(link = "loglog", ialpha = NULL, nsimEIM = 100)
 
 }
 \examples{
-alpha <- exp(exp(-0.1))  # The parameter
-hdata <- data.frame(y = rhzeta(n = 1000, alpha))
+shape <- exp(exp(-0.1))  # The parameter
+hdata <- data.frame(y = rhzeta(n = 1000, shape))
 fit <- vglm(y ~ 1, hzeta, data = hdata, trace = TRUE, crit = "coef")
 coef(fit, matrix = TRUE)
-Coef(fit)  # Useful for intercept-only models; should be same as alpha
+Coef(fit)  # Useful for intercept-only models; should be same as shape
 c(with(hdata, mean(y)), head(fitted(fit), 1))
 summary(fit)
 }
@@ -93,18 +87,20 @@ summary(fit)
 \keyword{regression}
 
 
+
+
 %# Generate some hzeta random variates
 %set.seed(123)
-%nn = 400
-%x = 1:20
-%alpha = 1.1  # The parameter
-%probs = dhzeta(x, alpha)
+%nn <- 400
+%x <- 1:20
+%shape <- 1.1  # The parameter
+%probs <- dhzeta(x, shape)
 %\dontrun{
 %plot(x, probs, type="h", log="y")}
-%cs = cumsum(probs)
-%tab = table(cut(runif(nn), brea = c(0,cs,1)))
-%index = (1:length(tab))[tab>0]
-%y = rep(index, times=tab[index]) 
+%cs <- cumsum(probs)
+%tab <- table(cut(runif(nn), brea = c(0,cs,1)))
+%index <- (1:length(tab))[tab>0]
+%y <- rep(index, times=tab[index])
 
 
 
diff --git a/man/hzetaUC.Rd b/man/hzetaUC.Rd
index 333ad5a..f7d4e53 100644
--- a/man/hzetaUC.Rd
+++ b/man/hzetaUC.Rd
@@ -7,43 +7,30 @@
 \title{ Haight's Zeta Distribution  }
 \description{
   Density, distribution function, quantile function and random generation
-  for Haight's zeta distribution with parameter \code{alpha}.
+  for Haight's zeta distribution with parameter \code{shape}.
 
 
 }
 \usage{
-dhzeta(x, alpha, log = FALSE)
-phzeta(q, alpha, log.p = FALSE)
-qhzeta(p, alpha)
-rhzeta(n, alpha)
+dhzeta(x, shape, log = FALSE)
+phzeta(q, shape, log.p = FALSE)
+qhzeta(p, shape)
+rhzeta(n, shape)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{x, q}{
-   Vector of quantiles. For the density, it should be a vector with
-   positive integer values in order for the probabilities to be positive.
+  \item{x, q, p, n}{
+  Same meaning as \code{\link[stats]{runif}}.
 
 
   }
-  \item{p}{vector of probabilities.}
-  \item{n}{number of observations.
-  Same as \code{\link[stats]{runif}}.
+  \item{shape}{
+   The positive shape parameter.
+   Called \eqn{\alpha}{alpha} below.
 
 
   }
-  \item{alpha}{ 
-   The parameter value. Must contain positive values and is recycled to
-   the length of \code{x} or \code{p} or \code{q} if necessary.
-
-
-  }
-  \item{log}{
-  Logical.
-  If \code{log = TRUE} then the logarithm of the density is returned.
-
-
-  }
-  \item{log.p}{
+  \item{log,log.p}{
   Same meaning as in \code{\link[stats:Normal]{pnorm}}
   or \code{\link[stats:Normal]{qnorm}}.
 
@@ -66,7 +53,7 @@ rhzeta(n, alpha)
 
 
 }
-%\references{ 
+%\references{
 %
 %    Pages 533--4 of
 %    Johnson N. L., Kemp, A. W. and Kotz S. (2005)
@@ -77,9 +64,9 @@ rhzeta(n, alpha)
 %
 %}
 \author{ T. W. Yee and Kai Huang }
-\note{ 
+\note{
   Given some response data, the \pkg{VGAM} family function
-  \code{\link{hzeta}} estimates the parameter \code{alpha}.
+  \code{\link{hzeta}} estimates the parameter \code{shape}.
 
 
 }
@@ -99,11 +86,11 @@ rhzeta(20, 2.1)
 round(1000 * dhzeta(1:8, 2))
 table(rhzeta(1000, 2))
 
-\dontrun{ alpha <- 1.1; x <- 1:10
-plot(x, dhzeta(x, alpha = alpha), type = "h", ylim = 0:1, lwd = 2,
-     sub = paste("alpha =", alpha), las = 1, col = "blue", ylab = "Probability",
+\dontrun{ shape <- 1.1; x <- 1:10
+plot(x, dhzeta(x, shape = shape), type = "h", ylim = 0:1, lwd = 2,
+     sub = paste("shape =", shape), las = 1, col = "blue", ylab = "Probability",
      main = "Haight's zeta: blue = density; orange = distribution function")
-lines(x+0.1, phzeta(x, alpha = alpha), col = "orange", lty = 3, lwd = 2,
+lines(x+0.1, phzeta(x, shape = shape), col = "orange", lty = 3, lwd = 2,
       type = "h")
 }
 }
diff --git a/man/iam.Rd b/man/iam.Rd
index 8a84d88..c30088b 100644
--- a/man/iam.Rd
+++ b/man/iam.Rd
@@ -4,7 +4,7 @@
 \title{ Index from Array to Matrix }
 \description{
   Maps the elements of an array containing symmetric positive-definite
-  matrices to a matrix with sufficient columns to hold them 
+  matrices to a matrix with sufficient columns to hold them
   (called matrix-band format.)
 
 }
@@ -14,12 +14,12 @@ iam(j, k, M, both = FALSE, diag = TRUE)
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{j}{
-  An integer from the set \{\code{1:M}\} giving the row number 
+  An integer from the set \{\code{1:M}\} giving the row number
   of an element.
 
   }
   \item{k}{
-  An integer from the set \{\code{1:M}\} giving the column number 
+  An integer from the set \{\code{1:M}\} giving the column number
   of an element.
 
   }
@@ -29,32 +29,32 @@ iam(j, k, M, both = FALSE, diag = TRUE)
 
   }
   \item{both}{
-  Logical. Return both the row and column indices? 
+  Logical. Return both the row and column indices?
   See below for more details.
 
   }
   \item{diag}{
-  Logical. Return the indices for the diagonal elements? 
-  If \code{FALSE} then only the strictly upper triangular part of the matrix 
+  Logical. Return the indices for the diagonal elements?
+  If \code{FALSE} then only the strictly upper triangular part of the matrix
   elements are used.
 
   }
 }
 \details{
   Suppose we have \eqn{n} symmetric positive-definite square matrices,
-  each \eqn{M} by \eqn{M}, and 
+  each \eqn{M} by \eqn{M}, and
   these are stored in an \code{array} of dimension \code{c(n,M,M)}.
-  Then these can be more compactly represented by a 
+  Then these can be more compactly represented by a
   \code{matrix} of dimension \code{c(n,K)} where \code{K} is an integer
-  between \code{M} and \code{M*(M+1)/2} inclusive. 
+  between \code{M} and \code{M*(M+1)/2} inclusive.
   The mapping between these two representations is given by this function.
   It firstly enumerates by the diagonal elements, followed by the band
-  immediately above the diagonal, then the band above that one, etc. 
+  immediately above the diagonal, then the band above that one, etc.
   The last element is \code{(1,M)}.
   This function performs the mapping from elements \code{(j,k)}
   of symmetric positive-definite square matrices to the columns of
   another matrix representing such.
-  This is called the \emph{matrix-band} format and is used by 
+  This is called the \emph{matrix-band} format and is used by
   the \pkg{VGAM} package.
 
 
@@ -77,12 +77,12 @@ iam(j, k, M, both = FALSE, diag = TRUE)
   \item{col.index}{
   The column indices of the upper triangular part of the
   matrix (This may or may not include the diagonal elements, depending
-  on the argument \code{diagonal}). 
+  on the argument \code{diagonal}).
 
 
   }
 }
-%\references{ 
+%\references{
 %  The website \url{http://www.stat.auckland.ac.nz/~yee} contains
 %  some additional information.
 %
@@ -98,7 +98,7 @@ iam(j, k, M, both = FALSE, diag = TRUE)
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{vglmff-class}}.
 
 
diff --git a/man/identitylink.Rd b/man/identitylink.Rd
index 4def63e..475ec68 100644
--- a/man/identitylink.Rd
+++ b/man/identitylink.Rd
@@ -33,7 +33,7 @@ identitylink(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
   should be available to every parameter
   estimated by the \pkg{VGAM} library. However, it usually results in
   numerical problems because the estimates lie outside the permitted
-  range. Consequently, the result may contain 
+  range. Consequently, the result may contain
   \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
 
 
@@ -70,7 +70,7 @@ identitylink(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
 }
 \author{ Thomas W. Yee }
 
-\seealso{ 
+\seealso{
   \code{\link{Links}},
   \code{\link{loge}},
   \code{\link{logit}},
@@ -80,10 +80,10 @@ identitylink(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
 
 }
 \examples{
-identitylink((-5):5) 
+identitylink((-5):5)
 identitylink((-5):5, deriv = 1)
 identitylink((-5):5, deriv = 2)
-negidentity((-5):5) 
+negidentity((-5):5)
 negidentity((-5):5, deriv = 1)
 negidentity((-5):5, deriv = 2)
 }
diff --git a/man/inv.binomial.Rd b/man/inv.binomial.Rd
index 69d7c19..7340aef 100644
--- a/man/inv.binomial.Rd
+++ b/man/inv.binomial.Rd
@@ -36,7 +36,7 @@ inv.binomial(lrho = extlogit(min = 0.5, max = 1),
   \deqn{f(y;\rho,\lambda) =
   \frac{ \lambda  \,\Gamma(2y+\lambda) }{\Gamma(y+1) \, \Gamma(y+\lambda+1) }
   \{ \rho(1-\rho) \}^y  \rho^{\lambda}}{%
-  f(y;rho,lambda) = 
+  f(y;rho,lambda) =
   (lambda * Gamma(2y+lambda)) * [rho*(1-rho)]^y *
   rho^lambda / (Gamma(y+1) * Gamma(y+lambda+1))}
   where \eqn{y=0,1,2,\ldots}{y=0,1,2,...} and
@@ -100,7 +100,7 @@ results in an EIM that is diagonal.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{negbinomial}},
   \code{\link{poissonff}}.
 
diff --git a/man/inv.gaussianff.Rd b/man/inv.gaussianff.Rd
index da9efcc..d43d863 100644
--- a/man/inv.gaussianff.Rd
+++ b/man/inv.gaussianff.Rd
@@ -23,14 +23,14 @@ inv.gaussianff(lmu = "loge", llambda = "loge",
 
 
   }
-  \item{ilambda, parallel}{ 
+  \item{ilambda, parallel}{
   See \code{\link{CommonVGAMffArguments}} for more information.
   If \code{parallel = TRUE} then the constraint is not applied to the
   intercept.
 
 
   }
-  \item{imethod, ishrinkage, zero}{ 
+  \item{imethod, ishrinkage, zero}{
   See \code{\link{CommonVGAMffArguments}} for information.
 
 
@@ -67,7 +67,7 @@ inv.gaussianff(lmu = "loge", llambda = "loge",
 
 
 }
-\references{ 
+\references{
 
 Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
 \emph{Continuous Univariate Distributions},
@@ -90,7 +90,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{Inv.gaussian}},
   \code{\link{waldff}},
   \code{\link{bisa}}.
diff --git a/man/inv.lomax.Rd b/man/inv.lomax.Rd
index eb3277d..76d06f3 100644
--- a/man/inv.lomax.Rd
+++ b/man/inv.lomax.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Inverse Lomax Distribution Family Function }
 \description{
-  Maximum likelihood estimation of the 2-parameter 
+  Maximum likelihood estimation of the 2-parameter
   inverse Lomax distribution.
 }
 \usage{
@@ -22,7 +22,7 @@ inv.lomax(lscale = "loge", lshape2.p = "loge", iscale = NULL,
   }
   \item{iscale, ishape2.p, imethod, zero}{
   See \code{\link{CommonVGAMffArguments}} for information.
-  For \code{imethod = 2} a good initial value for 
+  For \code{imethod = 2} a good initial value for
   \code{ishape2.p} is needed to obtain a good estimate for
   the other parameter.
 
diff --git a/man/inv.paralogistic.Rd b/man/inv.paralogistic.Rd
index 27673a7..8638ef8 100644
--- a/man/inv.paralogistic.Rd
+++ b/man/inv.paralogistic.Rd
@@ -3,20 +3,20 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Inverse Paralogistic Distribution Family Function }
 \description{
-  Maximum likelihood estimation of the 2-parameter 
+  Maximum likelihood estimation of the 2-parameter
   inverse paralogistic distribution.
 }
 \usage{
-inv.paralogistic(lscale = "loge", lshape1.a = "loge", iscale = NULL, 
-    ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5), 
+inv.paralogistic(lscale = "loge", lshape1.a = "loge", iscale = NULL,
+    ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5),
     gshape1.a = seq(0.75, 4, by = 0.25), probs.y = c(0.25, 0.5, 0.75),
     zero = "shape")
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
-  
-  
+
+
   }
 
  \item{lshape1.a, lscale}{
@@ -27,7 +27,7 @@ inv.paralogistic(lscale = "loge", lshape1.a = "loge", iscale = NULL,
   }
   \item{iscale, ishape1.a, imethod, zero}{
   See \code{\link{CommonVGAMffArguments}} for information.
-  For \code{imethod = 2} a good initial value for 
+  For \code{imethod = 2} a good initial value for
   \code{ishape1.a} is needed to obtain a good estimate for
   the other parameter.
 
diff --git a/man/is.buggy.Rd b/man/is.buggy.Rd
index 7927b4d..83da6e1 100644
--- a/man/is.buggy.Rd
+++ b/man/is.buggy.Rd
@@ -45,6 +45,14 @@ is.buggy.vlm(object, each.term = FALSE, ...)
   if necessary).
 
 
+
+
+  Second-generation VGAMs based on \code{\link{sm.ps}} are a
+  modern alternative to using \code{\link{s}}. It does not
+  suffer from this bug. However, G2-VGAMs require a reasonably
+  large sample size in order to work more reliably.
+
+
 }
 \value{
   The default is a single logical (\code{TRUE} if any term is
@@ -53,7 +61,7 @@ is.buggy.vlm(object, each.term = FALSE, ...)
   If the value is \code{TRUE} then I suggest replacing the VGAM
   by a similar model fitted by \code{\link{vglm}} and using
   regression splines, e.g., \code{\link[splines]{bs}},
-  \code{\link[splines]{ns}}. 
+  \code{\link[splines]{ns}}.
 
 
 
@@ -64,17 +72,20 @@ is.buggy.vlm(object, each.term = FALSE, ...)
 %}
 
 \author{ T. W. Yee }
-\note{ 
+\note{
   When the bug is fixed this function may be withdrawn, otherwise
   always return \code{FALSE}s!
 
 
+
 }
 \seealso{
   \code{\link{vgam}}.
   \code{\link{vglm}},
+  \code{\link[VGAM]{s}},
+  \code{\link[VGAM]{sm.ps}},
   \code{\link[splines]{bs}},
-  \code{\link[splines]{ns}}. 
+  \code{\link[splines]{ns}}.
 
 
 }
diff --git a/man/kendall.tau.Rd b/man/kendall.tau.Rd
index 309162b..9a2fad3 100644
--- a/man/kendall.tau.Rd
+++ b/man/kendall.tau.Rd
@@ -25,7 +25,7 @@ kendall.tau(x, y, exact = FALSE, max.n = 3000)
 }
   \item{exact}{
     Logical. If \code{TRUE} then the exact value is computed.
-    
+
 
 }
 \item{max.n}{
@@ -41,8 +41,8 @@ kendall.tau(x, y, exact = FALSE, max.n = 3000)
   Loosely, two random variables are \emph{concordant} if large values
   of one random variable are associated with large values of the
   other random variable.
-  Similarly, two random variables are \emph{disconcordant} if large values 
-  of one random variable are associated with small values of the 
+  Similarly, two random variables are \emph{disconcordant} if large values
+  of one random variable are associated with small values of the
   other random variable.
   More formally, if \code{(x[i] - x[j])*(y[i] - y[j]) > 0} then
   that comparison is concordant \eqn{(i \neq j)}.
@@ -55,7 +55,7 @@ kendall.tau(x, y, exact = FALSE, max.n = 3000)
   If there are ties then half the ties are deemed concordant and
   half disconcordant so that \eqn{(c-d)/(c+d+t)} is used.
 
-  
+
 }
 \value{
   Kendall's tau, which lies between \eqn{-1} and \eqn{1}.
diff --git a/man/kumar.Rd b/man/kumar.Rd
index 517885c..422cf58 100644
--- a/man/kumar.Rd
+++ b/man/kumar.Rd
@@ -9,7 +9,7 @@
 }
 \usage{
 kumar(lshape1 = "loge", lshape2 = "loge",
-      ishape1 = NULL,   ishape2 = NULL, grid.shape1 = c(0.4, 6.0),
+      ishape1 = NULL,   ishape2 = NULL, gshape1 = exp(2*ppoints(5) - 1),
       tol12 = 1.0e-4, zero = NULL)
 
 }
@@ -39,8 +39,14 @@ kumar(lshape1 = "loge", lshape2 = "loge",
   If so then the working weights need to handle these singularities.
 
   }
-  \item{grid.shape1}{
-  Lower and upper limits for a grid search for the first shape parameter.
+  \item{gshape1}{
+  Values for a grid search for the first shape parameter.
+  See \code{\link{CommonVGAMffArguments}} for more information.
+
+
+% Lower and upper limits for a grid search for the first shape parameter.
+
+
 
   }
   \item{zero}{
@@ -50,7 +56,7 @@ kumar(lshape1 = "loge", lshape2 = "loge",
 }
 \details{
   The Kumaraswamy distribution has density function
-  \deqn{f(y;a = shape1,b = shape2)  = 
+  \deqn{f(y;a = shape1,b = shape2)  =
   a b y^{a-1} (1-y^{a})^{b-1}}{%
   a*b*y^(a-1)*(1-y^a)^(b-1)}
   where \eqn{0 < y < 1} and the two shape parameters,
@@ -95,7 +101,7 @@ kumar(lshape1 = "loge", lshape2 = "loge",
 %
 %}
 
-\seealso{ 
+\seealso{
   \code{\link{dkumar}},
   \code{\link{betaff}},
   \code{\link{simulate.vlm}}.
diff --git a/man/kumarUC.Rd b/man/kumarUC.Rd
index 2db29c4..859d465 100644
--- a/man/kumarUC.Rd
+++ b/man/kumarUC.Rd
@@ -46,7 +46,7 @@ rkumar(n, shape1, shape2)
 \author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{kumar}}, the \pkg{VGAM} family function
-  for estimating the parameters, 
+  for estimating the parameters,
   for the formula of the probability density function and other details.
 
 
diff --git a/man/lakeO.Rd b/man/lakeO.Rd
index 23963e7..bca9d96 100644
--- a/man/lakeO.Rd
+++ b/man/lakeO.Rd
@@ -19,7 +19,7 @@
   A data frame with 15 observations on the following 5 variables.
   \describe{
     \item{\code{year}}{a numeric vector,
-  the season began on 1 October of the year and ended 12 months later. 
+  the season began on 1 October of the year and ended 12 months later.
 % Hence the fishing ended around October 1989.
 
 
diff --git a/man/laplace.Rd b/man/laplace.Rd
index 9ed8de4..abaceea 100644
--- a/man/laplace.Rd
+++ b/man/laplace.Rd
@@ -92,7 +92,7 @@ Boston: Birkhauser.
 
 
 }
-\note{ 
+\note{
   This family function uses Fisher scoring.
   Convergence may be slow for non-intercept-only models;
   half-stepping is frequently required.
diff --git a/man/laplaceUC.Rd b/man/laplaceUC.Rd
index e4e62f4..c6ebc7b 100644
--- a/man/laplaceUC.Rd
+++ b/man/laplaceUC.Rd
@@ -54,14 +54,14 @@ rlaplace(n, location = 0, scale = 1)
   The Laplace distribution is often known as the double-exponential
   distribution and, for modelling, has heavier tail
   than the normal distribution.
-  The Laplace density function is 
+  The Laplace density function is
   \deqn{f(y) = \frac{1}{2b} \exp \left( - \frac{|y-a|}{b}
                     \right) }{%
         f(y) =  (1/(2b)) exp( -|y-a|/b ) }
   where \eqn{-\infty<y<\infty}{-Inf<y<Inf},
   \eqn{-\infty<a<\infty}{-Inf<a<Inf} and
   \eqn{b>0}.
-  The mean is \eqn{a}{a} and the variance is \eqn{2b^2}. 
+  The mean is \eqn{a}{a} and the variance is \eqn{2b^2}.
 
 
   See \code{\link{laplace}}, the \pkg{VGAM} family function
diff --git a/man/latvar.Rd b/man/latvar.Rd
index 441b254..82f4fb0 100644
--- a/man/latvar.Rd
+++ b/man/latvar.Rd
@@ -30,7 +30,7 @@ latvar(object, ...)
 }
 \details{
   Latent variables occur in reduced-rank regression models,
-  as well as in quadratic and additive ordination models. 
+  as well as in quadratic and additive ordination models.
   For the latter two,
   latent variable values are often called \emph{site scores}
   by ecologists.
diff --git a/man/leipnik.Rd b/man/leipnik.Rd
index 7c2fe02..fdb52e0 100644
--- a/man/leipnik.Rd
+++ b/man/leipnik.Rd
@@ -9,7 +9,8 @@
 
 }
 \usage{
-leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
+leipnik(lmu = "logit", llambda = logoff(offset = 1),
+        imu = NULL, ilambda = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -32,8 +33,8 @@ leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
   \mbox{Beta}( \frac{\lambda+1}{2}, \frac12 )}
   \left[ 1 +  \frac{(y-\mu)^2 }{y(1-y)}
   \right]^{ -\frac{\lambda}{2}}}{%
-  f(y;mu,lambda) = 
-  (y(1-y))^(-1/2) * (1 + (y-mu)^2 / (y*(1-y)))^(-lambda/2) / 
+  f(y;mu,lambda) =
+  (y(1-y))^(-1/2) * (1 + (y-mu)^2 / (y*(1-y)))^(-lambda/2) /
   Beta((lambda+1)/2, 1/2)}
   where \eqn{0 < y < 1} and \eqn{\lambda > -1}{lambda > -1}.
   The mean is \eqn{\mu}{mu} (returned as the fitted values)
@@ -85,25 +86,23 @@ leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
 
 }
 
-\section{Warning }{
-  If \code{llambda="identitylink"} then it is possible that the
-  \code{lambda} estimate becomes less than \eqn{-1}, i.e., out of
-  bounds. One way to stop this is to choose \code{llambda = "loge"},
-  however, \code{lambda} is then constrained to be positive.
+%\section{Warning }{
+% If \code{llambda="identitylink"} then it is possible that the
+% \code{lambda} estimate becomes less than \eqn{-1}, i.e., out of
+% bounds. One way to stop this is to choose \code{llambda = "loge"},
+% however, \code{lambda} is then constrained to be positive.
 
 
-}
+%}
 
-\seealso{ 
+\seealso{
   \code{\link{mccullagh89}}.
 
 
 }
 \examples{
 ldata <- data.frame(y = rnorm(n = 2000, mean = 0.5, sd = 0.1))  # Not proper data
-fit <- vglm(y ~ 1, leipnik(ilambda = 1), data = ldata, trace = TRUE, checkwz = FALSE)
-fit <- vglm(y ~ 1, leipnik(ilambda = 1, llambda = logoff(offset = 1)),
-            data = ldata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, leipnik(ilambda = 1), data = ldata, trace = TRUE)
 head(fitted(fit))
 with(ldata, mean(y))
 summary(fit)
@@ -119,3 +118,11 @@ sum(weights(fit, type = "work"))  # Sum of the working weights
 %fit <- vglm(y ~ 1, leipnik(ilambda = 1), tr = TRUE, cri = "c", checkwz = FALSE)
 
 
+% leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
+
+
+%fit <- vglm(y ~ 1, leipnik(ilambda = 1, llambda = logoff(offset = 1)),
+%            data = ldata, trace = TRUE, crit = "coef")
+% fit <- vglm(y ~ 1, leipnik(ilambda = 1), data = ldata, trace = TRUE, checkwz = FALSE)
+
+
diff --git a/man/lerch.Rd b/man/lerch.Rd
index f649aec..1e4cf7d 100644
--- a/man/lerch.Rd
+++ b/man/lerch.Rd
@@ -30,7 +30,7 @@ lerch(x, s, v, tolerance = 1.0e-10, iter = 100)
   }
 }
 \details{
-  The Lerch transcendental function is defined by 
+  The Lerch transcendental function is defined by
   \deqn{\Phi(x,s,v) = \sum_{n=0}^{\infty}  \frac{x^n}{(n+v)^s}}{%
     Phi(x,s,v) = sum_{n=0}^{infty} x^n / (n+v)^s}
   where \eqn{|x|<1} and
@@ -44,7 +44,7 @@ lerch(x, s, v, tolerance = 1.0e-10, iter = 100)
   See the URL below for more information.
   This function is a wrapper function for the C code described below.
 
-  
+
 }
 \value{
   Returns the value of the function evaluated at the values of
@@ -97,7 +97,7 @@ lerch(x, s, v, tolerance = 1.0e-10, iter = 100)
 
 \seealso{
     \code{\link{zeta}}.
-  
+
 
 }
 \examples{
diff --git a/man/levy.Rd b/man/levy.Rd
index e18f422..759d05d 100644
--- a/man/levy.Rd
+++ b/man/levy.Rd
@@ -36,7 +36,7 @@ levy(location = 0, lscale = "loge", iscale = NULL)
 }
 \details{
   The Levy distribution is one of three stable distributions
-  whose density function has a tractable form. 
+  whose density function has a tractable form.
   The formula for the density is
  \deqn{f(y;b) = \sqrt{\frac{b}{2\pi}}
        \exp \left( \frac{-b}{2(y - a)}
@@ -44,7 +44,7 @@ levy(location = 0, lscale = "loge", iscale = NULL)
   f(y;b) = sqrt(b / (2 pi))
        exp( -b / (2(y - a))) / (y - a)^{3/2} }
   where \eqn{a<y<\infty}{a<y<Inf} and \eqn{b>0}.
-  Note that if \eqn{a} is very close to \code{min(y)} 
+  Note that if \eqn{a} is very close to \code{min(y)}
   (where \code{y} is the response), then numerical problem will occur.
   The mean does not exist.
   The median is returned as the fitted values.
@@ -58,7 +58,7 @@ levy(location = 0, lscale = "loge", iscale = NULL)
 
 
 }
-\references{ 
+\references{
   Nolan, J. P. (2005)
   \emph{Stable Distributions: Models for Heavy Tailed Data}.
 
@@ -80,7 +80,7 @@ levy(location = 0, lscale = "loge", iscale = NULL)
 %}
 
 
-\seealso{ 
+\seealso{
   The Nolan article was at
   \code{http://academic2.american.edu/~jpnolan/stable/chap1.pdf}.
 
@@ -116,7 +116,7 @@ c(median = with(ldata, median(y2)), fitted.median = head(fitted(fit2), 1))
 
 
 %%\eqn{\delta + \gamma \Gamma(-0.5) / (2\sqrt{\pi})}{delta +
-%%               gamma * gamma(-0.5) / (2*sqrt(pi))} 
+%%               gamma * gamma(-0.5) / (2*sqrt(pi))}
 %%where \code{gamma} is a parameter but \code{gamma()} is the gamma function.
 
 %%mygamma = exp(1)    # log link ==> 1 is the answer
diff --git a/man/lgammaff.Rd b/man/lgammaff.Rd
index 5d0dd73..a33a143 100644
--- a/man/lgammaff.Rd
+++ b/man/lgammaff.Rd
@@ -25,15 +25,15 @@ lgamma3(llocation = "identitylink", lscale = "loge", lshape = "loge",
 
   }
   \item{lshape}{
-  Parameter link function applied to 
-  the positive shape parameter \eqn{k}. 
+  Parameter link function applied to
+  the positive shape parameter \eqn{k}.
   See \code{\link{Links}} for more choices.
 
 
   }
   \item{ishape}{
   Initial value for \eqn{k}.
-  If given, it must be positive. 
+  If given, it must be positive.
   If failure to converge occurs, try some other value.
   The default means an initial value is determined internally.
 
@@ -67,7 +67,7 @@ lgamma3(llocation = "identitylink", lscale = "loge", lshape = "loge",
   For the non-standard log-gamma distribution, one replaces \eqn{y}
   by \eqn{(y-a)/b}, where \eqn{a} is the location parameter
   and \eqn{b} is the positive scale parameter.
-  Then the density function is 
+  Then the density function is
     \deqn{f(y)=\exp[k(y-a)/b - \exp((y-a)/b)] / (b \, \Gamma(k)).}{%
           f(y) = exp[k(y-a)/b - exp((y-a)/b)]/(b*gamma(k)).}
   The mean and variance of \eqn{Y} are \code{a + b*digamma(k)} (returned as
@@ -100,7 +100,7 @@ New York: Wiley.
 }
 
 \author{ T. W. Yee }
-\note{ 
+\note{
   The standard log-gamma distribution can be viewed as a
   generalization of the standard type 1 extreme value density:
   when \eqn{k = 1} the distribution of \eqn{-Y} is the standard
diff --git a/man/lindUC.Rd b/man/lindUC.Rd
index dbd2f7e..571aaf9 100644
--- a/man/lindUC.Rd
+++ b/man/lindUC.Rd
@@ -15,6 +15,11 @@
 
 
 }
+
+
+% yettodo: 20170103; use csam-23-517.pdf to write plind() and/or qlind().
+
+
 \usage{
 dlind(x, theta, log = FALSE)
 plind(q, theta, lower.tail = TRUE, log.p = FALSE)
@@ -24,7 +29,7 @@ rlind(n, theta)
 \arguments{
   \item{x, q}{vector of quantiles.}
 %  \item{p}{vector of probabilities.}
-  \item{n}{number of observations. 
+  \item{n}{number of observations.
   Same as in \code{\link[stats]{runif}}.
 
 
diff --git a/man/lindley.Rd b/man/lindley.Rd
index 7654312..6a4d1c7 100644
--- a/man/lindley.Rd
+++ b/man/lindley.Rd
@@ -38,10 +38,10 @@ lindley(link = "loge", itheta = NULL, zero = NULL)
   The density function is given by
   \deqn{f(y; \theta) = \theta^2 (1 + y) \exp(-\theta y) / (1 + \theta)}{%
         f(y; theta) = theta^2 * (1 + y) * exp(-theta * y) / (1 + theta)}
-  for \eqn{theta > 0} and \eqn{y > 0}.
+  for \eqn{\theta > 0}{theta > 0} and \eqn{y > 0}.
   The mean of \eqn{Y} (returned as the fitted values)
   is \eqn{\mu = (\theta + 2) / (\theta (\theta + 1))}{mu = (theta + 2) / (theta * (theta + 1))}.
-  The variance 
+  The variance
   is \eqn{(\theta^2 + 4 \theta + 2) / (\theta (\theta + 1))^2}{(theta^2 + 4 * theta + 2) / (theta * (theta + 1))^2}.
 
 
@@ -70,7 +70,7 @@ Lindley distribution and its application.
 }
 \author{ T. W. Yee }
 \note{
-  This \pkg{VGAM} family function can handle multiple 
+  This \pkg{VGAM} family function can handle multiple
   responses (inputted as a matrix).
   Fisher scoring is implemented.
 
diff --git a/man/linkfun.Rd b/man/linkfun.Rd
index 07d74ff..72838dd 100644
--- a/man/linkfun.Rd
+++ b/man/linkfun.Rd
@@ -14,7 +14,7 @@ linkfun(object, ...)
 \arguments{
   \item{object}{ An object which has parameter link functions.
 
-    
+
   }
   \item{\dots}{ Other arguments fed into the specific
     methods function of the model.
diff --git a/man/linkfun.vglm.Rd b/man/linkfun.vglm.Rd
index b05150a..0e874b1 100644
--- a/man/linkfun.vglm.Rd
+++ b/man/linkfun.vglm.Rd
@@ -17,7 +17,7 @@ linkfun.vglm(object, earg = FALSE, ...)
     Return the extra arguments associated with each
     link function? If \code{TRUE}  then a list is returned.
 
-    
+
   }
   \item{\dots}{ Arguments that might be used
     in the future.
@@ -54,7 +54,7 @@ linkfun.vglm(object, earg = FALSE, ...)
 
 \author{ Thomas W. Yee }
 
-\note{ 
+\note{
   Presently,  the multinomial logit model has only
   one link function, \code{\link{multilogit}}, so a warning
   is not issued for that link.
@@ -68,7 +68,7 @@ linkfun.vglm(object, earg = FALSE, ...)
   \code{\link{linkfun}},
   \code{\link{multilogit}},
   \code{\link{vglm}}.
-  
+
 
 }
 
diff --git a/man/lino.Rd b/man/lino.Rd
index d0cc96e..3e4857d 100644
--- a/man/lino.Rd
+++ b/man/lino.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Generalized Beta Distribution Family Function }
 \description{
-  Maximum likelihood estimation of the 3-parameter 
+  Maximum likelihood estimation of the 3-parameter
   generalized beta distribution as proposed by Libby and Novick (1982).
 
 }
@@ -20,7 +20,7 @@ lino(lshape1 = "loge", lshape2 = "loge", llambda = "loge",
 
   }
   \item{llambda}{
-  Parameter link function applied to the 
+  Parameter link function applied to the
   parameter \eqn{\lambda}{lambda}.
   See \code{\link{Links}} for more choices.
 
diff --git a/man/linoUC.Rd b/man/linoUC.Rd
index cf778c1..64e0855 100644
--- a/man/linoUC.Rd
+++ b/man/linoUC.Rd
@@ -63,7 +63,7 @@ rlino(n, shape1, shape2, lambda = 1)
 \author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{lino}}, the \pkg{VGAM} family function
-  for estimating the parameters, 
+  for estimating the parameters,
   for the formula of the probability density function and other details.
 
 
diff --git a/man/lirat.Rd b/man/lirat.Rd
index 555a676..684dfdb 100644
--- a/man/lirat.Rd
+++ b/man/lirat.Rd
@@ -14,9 +14,9 @@
     \item{\code{N}}{Litter size.}
     \item{\code{R}}{Number of dead fetuses.}
     \item{\code{hb}}{Hemoglobin level.}
-    \item{\code{grp}}{Group number. 
+    \item{\code{grp}}{Group number.
           Group 1 is the untreated (low-iron) group,
-          group 2 received injections on day 7 or day 10 only, 
+          group 2 received injections on day 7 or day 10 only,
           group 3 received injections on days 0 and 7, and
           group 4 received injections weekly.}
   }
diff --git a/man/lms.bcg.Rd b/man/lms.bcg.Rd
index 6f68878..5903751 100644
--- a/man/lms.bcg.Rd
+++ b/man/lms.bcg.Rd
@@ -32,7 +32,7 @@ lms.bcg(percentiles = c(25, 50, 75), zero = c("lambda", "sigma"),
 
   \item{idf.mu, idf.sigma}{
   See \code{\link{lms.bcn}}.
-  
+
   }
   \item{ilambda, isigma}{
   See \code{\link{lms.bcn}}.
@@ -42,7 +42,7 @@ lms.bcg(percentiles = c(25, 50, 75), zero = c("lambda", "sigma"),
 }
 \details{
  Given a value of the covariate, this function applies a Box-Cox
- transformation to the response to best obtain a gamma distribution. 
+ transformation to the response to best obtain a gamma distribution.
  The parameters
  chosen to do this are estimated by maximum likelihood or penalized
  maximum likelihood.
diff --git a/man/lms.bcn.Rd b/man/lms.bcn.Rd
index 3f52f7b..7ca5ed8 100644
--- a/man/lms.bcn.Rd
+++ b/man/lms.bcn.Rd
@@ -164,7 +164,7 @@ viz. \code{zero = c(1, 3)}.
 Cole, T. J. and Green, P. J. (1992)
 Smoothing Reference Centile Curves: The LMS Method and
 Penalized Likelihood.
-\emph{Statistics in Medicine}, 
+\emph{Statistics in Medicine},
 \bold{11}, 1305--1319.
 
 
@@ -223,7 +223,7 @@ Quantile regression via vector generalized additive models.
   Also, the estimate may diverge quickly near the solution,
   in which case try prematurely
   stopping the iterations by assigning \code{maxits} to be the iteration
-  number corresponding to the highest likelihood value. 
+  number corresponding to the highest likelihood value.
 
 
   One trick is to fit a simple model and use it to provide
@@ -231,7 +231,7 @@ Quantile regression via vector generalized additive models.
   examples below.
 
 
-} 
+}
 \seealso{
   \code{\link{lms.bcg}},
   \code{\link{lms.yjn}},
diff --git a/man/lms.yjn.Rd b/man/lms.yjn.Rd
index 7f504a9..7a9f898 100644
--- a/man/lms.yjn.Rd
+++ b/man/lms.yjn.Rd
@@ -4,7 +4,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ LMS Quantile Regression with a Yeo-Johnson Transformation to Normality }
 \description{
-  LMS quantile regression with the Yeo-Johnson transformation 
+  LMS quantile regression with the Yeo-Johnson transformation
   to normality.
 }
 \usage{
@@ -22,14 +22,14 @@ lms.yjn2(percentiles=c(25,50,75), zero = c("lambda", "sigma"),
 \arguments{
 
   \item{percentiles}{
-  A numerical vector containing values between 0 and 100, 
+  A numerical vector containing values between 0 and 100,
   which are the quantiles. They will be returned as `fitted values'.
 
   }
   \item{zero}{
   See \code{\link{lms.bcn}}.
 
-  } 
+  }
   \item{llambda, lmu, lsigma}{
   See \code{\link{lms.bcn}}.
 
@@ -48,7 +48,7 @@ lms.yjn2(percentiles=c(25,50,75), zero = c("lambda", "sigma"),
   Number of abscissae used in the Gaussian integration
   scheme to work out elements of the weight matrices.
   The values given are the possible choices, with the first value
-  being the default. 
+  being the default.
   The larger the value, the more accurate the approximation is likely
   to be but involving more computational expense.
 
@@ -57,8 +57,8 @@ lms.yjn2(percentiles=c(25,50,75), zero = c("lambda", "sigma"),
   A value to be added to the response y, for the purpose
   of centering the response before fitting the model to the data.
   The default value, \code{NULL}, means \code{-median(y)} is used, so that
-  the response actually used has median zero. The \code{yoffset} is 
-  saved on the object and used during prediction. 
+  the response actually used has median zero. The \code{yoffset} is
+  saved on the object and used during prediction.
 
   }
   \item{diagW}{
@@ -67,7 +67,7 @@ lms.yjn2(percentiles=c(25,50,75), zero = c("lambda", "sigma"),
   be positive-definite. Using the diagonal elements of this matrix results
   in a higher chance of it being positive-definite, however convergence will
   be very slow.
-  If \code{TRUE}, then the first \code{iters.diagW} iterations will 
+  If \code{TRUE}, then the first \code{iters.diagW} iterations will
   use the diagonal of the expected information matrix.
   The default is \code{FALSE}, meaning faster convergence.
 
@@ -102,7 +102,7 @@ lms.yjn2(percentiles=c(25,50,75), zero = c("lambda", "sigma"),
 
 
 }
-\references{ 
+\references{
 Yeo, I.-K. and Johnson, R. A. (2000)
 A new family of power transformations to improve normality or symmetry.
 \emph{Biometrika},
@@ -127,7 +127,7 @@ Heidelberg: Physica-Verlag.
 
 }
 \author{ Thomas W. Yee }
-\note{ 
+\note{
 The response may contain both positive and negative values.
 In contrast, the LMS-Box-Cox-normal and LMS-Box-Cox-gamma
 methods only handle a positive response because the
@@ -145,12 +145,12 @@ In that case, try different starting values.
 
 
 The generic function \code{predict}, when applied to a
-\code{lms.yjn} fit, does not add back the \code{yoffset} value. 
+\code{lms.yjn} fit, does not add back the \code{yoffset} value.
 
 
-} 
-\seealso{ 
-  \code{\link{lms.bcn}}, 
+}
+\seealso{
+  \code{\link{lms.bcn}},
   \code{\link{lms.bcg}},
   \code{\link{qtplot.lmscreg}},
   \code{\link{deplot.lmscreg}},
diff --git a/man/logF.Rd b/man/logF.Rd
index aa5357a..305c4e9 100644
--- a/man/logF.Rd
+++ b/man/logF.Rd
@@ -80,7 +80,7 @@
 %
 %}
 
-%\note{ 
+%\note{
 %}
 
 \seealso{
diff --git a/man/logF.UC.Rd b/man/logF.UC.Rd
index 946f30d..6d15900 100644
--- a/man/logF.UC.Rd
+++ b/man/logF.UC.Rd
@@ -51,14 +51,14 @@ dlogF(x, shape1, shape2, log = FALSE)
 
 }
 
-%\references{ 
+%\references{
 %
 %
 %
 %}
 
 \author{ T. W. Yee }
-%\note{ 
+%\note{
 %
 %}
 
diff --git a/man/logUC.Rd b/man/logUC.Rd
index b868b42..64750b8 100644
--- a/man/logUC.Rd
+++ b/man/logUC.Rd
@@ -2,41 +2,39 @@
 \alias{Log}
 \alias{dlog}
 \alias{plog}
-% \alias{qlog}
+\alias{qlog}
 \alias{rlog}
 \title{ Logarithmic Distribution }
 \description{
   Density, distribution function,
+  quantile function,
   and random generation
   for the logarithmic distribution.
 
-% quantile function
 
 
 }
 \usage{
-dlog(x, prob, log = FALSE)
-plog(q, prob, log.p = FALSE)
-rlog(n, prob, Smallno = 1.0e-6)
+dlog(x, shape, log = FALSE)
+plog(q, shape, log.p = FALSE)
+qlog(p, shape)
+rlog(n, shape)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{x, q}{
-   Vector of quantiles. For the density, it should be a vector with
-   positive integer values in order for the probabilities to be positive.
+  \item{x, q, p, n}{
+  Same interpretation as in \code{\link[stats]{runif}}.
 
 
   }
-% \item{p}{vector of probabilities.}
-  \item{n}{number of observations.
-  Same as in \code{\link[stats]{runif}}.
+  \item{shape}{
+   The shape parameter value \eqn{c} described in in \code{\link{logff}}.
+%  Here it is called \code{shape} because \eqn{0<c<1} is the range.
 
 
-  }
-  \item{prob}{ 
-   The parameter value \eqn{c} described in in \code{\link{logff}}.
-   Here it is called \code{prob} because \eqn{0<c<1} is the range.
-   For \code{rlog()} this parameter must be of length 1.
+
+%  For \code{rlog()} this parameter must be of length 1.
+
 
 
   }
@@ -47,13 +45,6 @@ rlog(n, prob, Smallno = 1.0e-6)
 
 
   }
-  \item{Smallno}{
-  Numeric, a small value used by the rejection method for determining
-  the upper limit of the distribution.
-  That is, \code{plog(U, prob) > 1-Smallno} where \code{U} is the upper limit.
-
-
-  }
 }
 \details{
    The details are given in \code{\link{logff}}.
@@ -62,15 +53,15 @@ rlog(n, prob, Smallno = 1.0e-6)
 }
 \value{
   \code{dlog} gives the density,
-  \code{plog} gives the distribution function, and
+  \code{plog} gives the distribution function,
+  \code{qlog} gives the quantile function, and
   \code{rlog} generates random deviates.
 
 
-% \code{qlog} gives the quantile function, and
 
 
 }
-\references{ 
+\references{
 
 Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
@@ -79,9 +70,9 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 }
 \author{ T. W. Yee }
-\note{ 
+\note{
   Given some response data, the \pkg{VGAM} family function
-  \code{\link{logff}} estimates the parameter \code{prob}.
+  \code{\link{logff}} estimates the parameter \code{shape}.
   For \code{plog()}, if argument \code{q} contains large values
   and/or \code{q} is long in length
   then the memory requirements may be very high.
@@ -92,7 +83,9 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 }
 
 \seealso{
-  \code{\link{logff}}.
+  \code{\link{logff}},
+  \code{\link{Oilog}}.
+  \code{\link{Otlog}}.
 
 
 }
@@ -100,10 +93,13 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 dlog(1:20, 0.5)
 rlog(20, 0.5)
 
-\dontrun{ prob <- 0.8; x <- 1:10
-plot(x, dlog(x, prob = prob), type = "h", ylim = 0:1,
-     sub = "prob=0.8", las = 1, col = "blue", ylab = "Probability",
+\dontrun{ shape <- 0.8; x <- 1:10
+plot(x, dlog(x, shape = shape), type = "h", ylim = 0:1,
+     sub = "shape=0.8", las = 1, col = "blue", ylab = "shape",
      main = "Logarithmic distribution: blue=density; orange=distribution function")
-lines(x + 0.1, plog(x, prob = prob), col = "orange", lty = 3, type = "h") }
+lines(x + 0.1, plog(x, shape = shape), col = "orange", lty = 3, type = "h") }
 }
 \keyword{distribution}
+
+
+
diff --git a/man/logc.Rd b/man/logc.Rd
index 21b720a..3dddce8 100644
--- a/man/logc.Rd
+++ b/man/logc.Rd
@@ -74,7 +74,7 @@ logc(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{Links}},
     \code{\link{loge}},
     \code{\link{cloglog}},
diff --git a/man/loge.Rd b/man/loge.Rd
index 0a63cba..7b2b19b 100644
--- a/man/loge.Rd
+++ b/man/loge.Rd
@@ -91,7 +91,7 @@ logneg(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{Links}},
     \code{\link{explink}},
     \code{\link{logit}},
diff --git a/man/logff.Rd b/man/logff.Rd
index 10e00e2..81aa654 100644
--- a/man/logff.Rd
+++ b/man/logff.Rd
@@ -7,25 +7,19 @@
 
 }
 \usage{
-logff(link = "logit", init.c = NULL, zero = NULL)
+logff(lshape = "logit", gshape = ppoints(8), zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{link}{
+  \item{lshape}{
   Parameter link function for the parameter \eqn{c},
   which lies between 0 and 1.
   See \code{\link{Links}} for more choices and information.
+  Soon \code{logfflink()} will hopefully be available for event-rate data.
 
 
   }
-  \item{init.c}{
-  Optional initial value for the \eqn{c} parameter.
-  If given, it often pays to start with a larger value, e.g., 0.95.
-  The default is to choose an initial value internally.
-
-
-  }
-  \item{zero}{
+  \item{gshape, zero}{
   Details at \code{\link{CommonVGAMffArguments}}.
 
 
@@ -39,7 +33,8 @@ logff(link = "logit", init.c = NULL, zero = NULL)
   Its probability function is
   \eqn{f(y) = a c^y / y}{f(y) = a * c^y / y}, for
   \eqn{y=1,2,3,\ldots}{y=1,2,3,...},
-  where \eqn{0 < c < 1}, and \eqn{a = -1 / \log(1-c)}{a = -1 / log(1-c)}.
+  where \eqn{0 < c < 1} (called \code{shape}),
+  and \eqn{a = -1 / \log(1-c)}{a = -1 / log(1-c)}.
   The mean is \eqn{a c/(1-c)}{a*c/(1-c)} (returned as the fitted values)
   and variance is \eqn{a c (1-ac) /(1-c)^2}{a*c*(1-a*c)/(1-c)^2}.
 
@@ -52,7 +47,7 @@ logff(link = "logit", init.c = NULL, zero = NULL)
 
 
 }
-\references{ 
+\references{
 
 
 Chapter 7 of
@@ -85,8 +80,11 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{rlog}},
+  \code{\link{oalog}},
+  \code{\link{oilog}},
+  \code{\link{otlog}},
   \code{\link[base:Log]{log}},
   \code{\link{loge}},
   \code{\link{logoff}},
@@ -96,7 +94,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 }
 \examples{
-ldata <- data.frame(y = rlog(n = 1000, prob = logit(0.2, inverse = TRUE)))
+ldata <- data.frame(y = rlog(n = 1000, shape = logit(0.2, inverse = TRUE)))
 fit <- vglm(y ~ 1, logff, data = ldata, trace = TRUE, crit = "c")
 coef(fit, matrix = TRUE)
 Coef(fit)
@@ -113,8 +111,8 @@ corbet <- data.frame(nindiv = 1:24,
                                14, 6, 12, 6, 9, 9, 6, 10, 10, 11, 5, 3, 3))
 fit <- vglm(nindiv ~ 1, logff, data = corbet, weights = ofreq)
 coef(fit, matrix = TRUE)
-chat <- Coef(fit)["c"]
-pdf2 <- dlog(x = with(corbet, nindiv), prob = chat)
+shapehat <- Coef(fit)["shape"]
+pdf2 <- dlog(x = with(corbet, nindiv), shape = shapehat)
 print(with(corbet, cbind(nindiv, ofreq, fitted = pdf2 * sum(ofreq))), digits = 1)
 }
 \keyword{models}
diff --git a/man/logistic.Rd b/man/logistic.Rd
index 7d37c5b..22ba923 100644
--- a/man/logistic.Rd
+++ b/man/logistic.Rd
@@ -81,7 +81,7 @@ logistic(llocation = "identitylink", lscale = "loge",
 
 
 }
-\references{ 
+\references{
 
 Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
 \emph{Continuous Univariate Distributions},
diff --git a/man/logit.Rd b/man/logit.Rd
index 16ea190..3d343e8 100644
--- a/man/logit.Rd
+++ b/man/logit.Rd
@@ -39,8 +39,8 @@ extlogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
 
   \item{min, max}{
   For \code{extlogit},
-  \code{min} gives \eqn{A}, 
-  \code{max} gives \eqn{B}, and for out of range values, 
+  \code{min} gives \eqn{A},
+  \code{max} gives \eqn{B}, and for out of range values,
   \code{bminvalue} and \code{bmaxvalue}.
 
   }
@@ -54,7 +54,7 @@ extlogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
 }
 \details{
   The logit link function is very commonly used for parameters that
-  lie in the unit interval. 
+  lie in the unit interval.
   Numerical values of \code{theta} close to 0 or 1 or out of range
   result in
   \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
@@ -69,7 +69,7 @@ extlogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
   logit function.
   Numerical values of \code{theta} close to \eqn{A} or \eqn{B} or out of range
   result in
-  \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. 
+  \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
   However these can be replaced by values \eqn{bminvalue} and
   \eqn{bmaxvalue} first before computing the link function.
 
@@ -115,7 +115,7 @@ extlogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{Links}},
     \code{\link{logitoffsetlink}},
     \code{\link{probit}},
diff --git a/man/logitoffsetlink.Rd b/man/logitoffsetlink.Rd
index 394b6e2..8290213 100644
--- a/man/logitoffsetlink.Rd
+++ b/man/logitoffsetlink.Rd
@@ -83,7 +83,7 @@ logitoffsetlink(theta, offset = 0, inverse = FALSE, deriv = 0,
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{Links}},
     \code{\link{logit}}.
 
diff --git a/man/loglaplace.Rd b/man/loglaplace.Rd
index 0d79bac..bd0996f 100644
--- a/man/loglaplace.Rd
+++ b/man/loglaplace.Rd
@@ -38,9 +38,9 @@ logitlaplace1(tau = NULL, llocation = "logit",
   However, this argument should be left unchanged with
   count data because it restricts the quantiles to be positive.
   With proportions data  \code{llocation} can be assigned a link such as
-  \code{\link{logit}}, 
-  \code{\link{probit}}, 
-  \code{\link{cloglog}}, 
+  \code{\link{logit}},
+  \code{\link{probit}},
+  \code{\link{cloglog}},
   etc.
 
 
@@ -99,7 +99,7 @@ logitlaplace1(tau = NULL, llocation = "logit",
   The minimum and maximum values possible in the quantiles.
   These argument are effectively ignored by default since
   \code{\link{loge}} keeps all quantiles positive.
-  However, if 
+  However, if
   \code{llocation = logoff(offset = 1)}
   then it is possible that the fitted quantiles have value 0
   because \code{minquantile = 0}.
@@ -178,7 +178,7 @@ Log-Laplace distributions.
 
 
 }
-\note{ 
+\note{
   The form of input for \code{\link{logitlaplace1}} as response
   is a vector of proportions (values in \eqn{[0,1]}) and the
   number of trials is entered into the \code{weights} argument of
@@ -206,7 +206,7 @@ mytau <- c(0.1, 0.25, 0.5, 0.75, 0.9); mydof = 3
 # halfstepping is usual:
 fitp <- vglm(y ~ sm.bs(x2, df = mydof), data = adata, trace = TRUE,
             loglaplace1(tau = mytau, parallel.locat = TRUE))
- 
+
 \dontrun{
 par(las = 1)  # Plot on a log1p() scale
 mylwd <- 1.5
diff --git a/man/loglinb2.Rd b/man/loglinb2.Rd
index 5483a6e..4033714 100644
--- a/man/loglinb2.Rd
+++ b/man/loglinb2.Rd
@@ -30,14 +30,14 @@ loglinb2(exchangeable = FALSE, zero = "u12")
   The model is
   \deqn{P(Y_1=y_1,Y_2=y_2) = \exp(u_0+u_1 y_1+u_2 y_2+u_{12} y_1 y_2)}{%
     P(Y1=y1,Y2=y2) = exp(u0 + u1*y1 + u2*y2 + u12*y1*y2)}
-  where \eqn{y_1}{y1} and \eqn{y_2}{y2} are 0 or 1, and 
+  where \eqn{y_1}{y1} and \eqn{y_2}{y2} are 0 or 1, and
   the parameters are \eqn{u_1}{u1}, \eqn{u_2}{u2},
   \eqn{u_{12}}{u12}.
   The normalizing parameter \eqn{u_0}{u0} can be expressed as a function
   of the other parameters, viz.,
   \deqn{u_0 = -\log[1 + \exp(u_1) + \exp(u_2) + \exp(u_1 + u_2 + u_{12})].}{%
         u0 = -log[1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)].}
-  The linear/additive predictors are 
+  The linear/additive predictors are
   \eqn{(\eta_1,\eta_2,\eta_3)^T = (u_1,u_2,u_{12})^T}{(eta1,eta2,eta3) =
        (u1,u2,u12)}.
 
@@ -93,7 +93,7 @@ McCullagh, P. and Nelder, J. A. (1989)
 \examples{
 coalminers <- transform(coalminers, Age = (age - 42) / 5)
 
-# Get the n x 4 matrix of counts 
+# Get the n x 4 matrix of counts
 fit0 <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or, data = coalminers)
 counts <- round(c(weights(fit0, type = "prior")) * depvar(fit0))
 
@@ -109,11 +109,11 @@ newminers <- newminers[with(newminers, wt) > 0,]
 
 fit <- vglm(cbind(bln,wheeze) ~ Age, loglinb2(zero = NULL),
             weight = wt, data = newminers)
-coef(fit, matrix = TRUE)  # Same! (at least for the log odds-ratio) 
+coef(fit, matrix = TRUE)  # Same! (at least for the log odds-ratio)
 summary(fit)
 
-# Try reconcile this with McCullagh and Nelder (1989), p.234 
-(0.166-0.131) / 0.027458   # 1.275 is approximately 1.25 
+# Try reconcile this with McCullagh and Nelder (1989), p.234
+(0.166-0.131) / 0.027458   # 1.275 is approximately 1.25
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/loglog.Rd b/man/loglog.Rd
index 1a9e91c..990b3f3 100644
--- a/man/loglog.Rd
+++ b/man/loglog.Rd
@@ -73,7 +73,7 @@ loglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{Links}},
     \code{\link{loge}},
     \code{\link{logoff}}.
diff --git a/man/lognormal.Rd b/man/lognormal.Rd
index bb3c72a..af1d77b 100644
--- a/man/lognormal.Rd
+++ b/man/lognormal.Rd
@@ -16,7 +16,7 @@ lognormal(lmeanlog = "identitylink", lsdlog = "loge", zero = "sdlog")
   \item{lmeanlog, lsdlog}{
   Parameter link functions applied to the mean and (positive)
   \eqn{\sigma}{sigma} (standard deviation) parameter.
-  Both of these are on the log scale. 
+  Both of these are on the log scale.
   See \code{\link{Links}} for more choices.
 
 
@@ -69,7 +69,7 @@ lognormal(lmeanlog = "identitylink", lsdlog = "loge", zero = "sdlog")
   A random variable \eqn{Y} has a 2-parameter lognormal distribution
   if \eqn{\log(Y)}{log(Y)}
   is distributed \eqn{N(\mu, \sigma^2)}{N(mu, sigma^2)}.
-  The expected value of \eqn{Y}, which is 
+  The expected value of \eqn{Y}, which is
   \deqn{E(Y) = \exp(\mu + 0.5 \sigma^2)}{%
         E(Y) = exp(mu + 0.5 sigma^2)}
   and not \eqn{\mu}{mu}, make up the fitted values.
diff --git a/man/logoff.Rd b/man/logoff.Rd
index 1bdeb29..2b29091 100644
--- a/man/logoff.Rd
+++ b/man/logoff.Rd
@@ -38,7 +38,7 @@ logoff(theta, offset = 0, inverse = FALSE, deriv = 0,
   \code{offset} is the offset value. For example,
   if \code{offset = 0.5} then the value of \code{theta} is restricted
   to be greater than \eqn{-0.5}.
-  
+
 
   Numerical values of \code{theta} close to \code{-offset} or out of range
   result in
@@ -82,7 +82,7 @@ logoff(theta, offset = 0, inverse = FALSE, deriv = 0,
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{Links}},
   \code{\link{loge}}.
 
diff --git a/man/lomax.Rd b/man/lomax.Rd
index 071e3fd..7e2ee42 100644
--- a/man/lomax.Rd
+++ b/man/lomax.Rd
@@ -3,12 +3,12 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Lomax Distribution Family Function }
 \description{
-  Maximum likelihood estimation of the 2-parameter 
+  Maximum likelihood estimation of the 2-parameter
   Lomax distribution.
 
 }
 \usage{
-lomax(lscale = "loge", lshape3.q = "loge", iscale = NULL, 
+lomax(lscale = "loge", lshape3.q = "loge", iscale = NULL,
       ishape3.q = NULL, imethod = 1, gscale = exp(-5:5),
       gshape3.q = seq(0.75, 4, by = 0.25),
       probs.y = c(0.25, 0.5, 0.75), zero = "shape")
diff --git a/man/lqnorm.Rd b/man/lqnorm.Rd
index 248b40e..db4d18b 100644
--- a/man/lqnorm.Rd
+++ b/man/lqnorm.Rd
@@ -119,8 +119,8 @@ ldata <- transform(ldata, y = c(4*y[1], y[-1]), x = c(-1, x[-1]))
 fit <- vglm(y ~ x, lqnorm(qpower = 1.2), data = ldata)
 coef(fit, matrix = TRUE)
 head(fitted(fit))
-fit at misc$qpower 
-fit at misc$objectiveFunction 
+fit at misc$qpower
+fit at misc$objectiveFunction
 
 \dontrun{
 # Graphical check
diff --git a/man/lrtest.Rd b/man/lrtest.Rd
index b4afec7..9367c95 100644
--- a/man/lrtest.Rd
+++ b/man/lrtest.Rd
@@ -15,10 +15,10 @@
 \usage{
  lrtest(object, \dots)
 
- lrtest_vglm(object, \dots, name = NULL)
+ lrtest_vglm(object, \dots, no.warning = FALSE, name = NULL)
 
 }
-%\method{lrtest}{default}(object, \dots, name = NULL) 
+%\method{lrtest}{default}(object, \dots, name = NULL)
 
 %\method{lrtest}{formula}(object, \dots, data = list())
 
@@ -37,6 +37,14 @@
 
 
   }
+  \item{no.warning}{
+  logical; if \code{TRUE} then no warning is issued.
+  For example, setting \code{TRUE} might be a good idea when testing
+  for linearity of a variable for a \code{"pvgam"} object.
+
+
+
+  }
   \item{name}{
   a function for extracting a suitable name/description from
   a fitted model object.
diff --git a/man/lvplot.qrrvglm.Rd b/man/lvplot.qrrvglm.Rd
index 2cef1f7..5d2e306 100644
--- a/man/lvplot.qrrvglm.Rd
+++ b/man/lvplot.qrrvglm.Rd
@@ -11,21 +11,21 @@ y-axis are the first and second ordination axes respectively.
 }
 \usage{
 lvplot.qrrvglm(object, varI.latvar = FALSE, refResponse = NULL,
-    add = FALSE, show.plot = TRUE, 
-    rug = TRUE, y = FALSE, type = c("fitted.values", "predictors"), 
-    xlab = paste("Latent Variable", if (Rank == 1) "" else " 1", sep = ""), 
-    ylab = if (Rank == 1) switch(type, predictors = "Predictors", 
-    fitted.values = "Fitted values") else "Latent Variable 2", 
-    pcex = par()$cex, pcol = par()$col, pch = par()$pch, 
-    llty = par()$lty, lcol = par()$col, llwd = par()$lwd, 
-    label.arg = FALSE, adj.arg = -0.1, 
-    ellipse = 0.95, Absolute = FALSE, 
-    elty = par()$lty, ecol = par()$col, elwd = par()$lwd, egrid = 200, 
-    chull.arg = FALSE, clty = 2, ccol = par()$col, clwd = par()$lwd, 
+    add = FALSE, show.plot = TRUE,
+    rug = TRUE, y = FALSE, type = c("fitted.values", "predictors"),
+    xlab = paste("Latent Variable", if (Rank == 1) "" else " 1", sep = ""),
+    ylab = if (Rank == 1) switch(type, predictors = "Predictors",
+    fitted.values = "Fitted values") else "Latent Variable 2",
+    pcex = par()$cex, pcol = par()$col, pch = par()$pch,
+    llty = par()$lty, lcol = par()$col, llwd = par()$lwd,
+    label.arg = FALSE, adj.arg = -0.1,
+    ellipse = 0.95, Absolute = FALSE,
+    elty = par()$lty, ecol = par()$col, elwd = par()$lwd, egrid = 200,
+    chull.arg = FALSE, clty = 2, ccol = par()$col, clwd = par()$lwd,
     cpch = "   ",
     C = FALSE, OriginC = c("origin", "mean"),
-    Clty = par()$lty, Ccol = par()$col, Clwd = par()$lwd, 
-    Ccex = par()$cex, Cadj.arg = -0.1, stretchC = 1, 
+    Clty = par()$lty, Ccol = par()$col, Clwd = par()$lwd,
+    Ccex = par()$cex, Cadj.arg = -0.1, stretchC = 1,
     sites = FALSE, spch = NULL, scol = par()$col, scex = par()$cex,
     sfont = par()$font, check.ok = TRUE, ...)
 }
@@ -40,7 +40,7 @@ lvplot.qrrvglm(object, varI.latvar = FALSE, refResponse = NULL,
 
   }
   \item{varI.latvar}{
-  Logical that is fed into \code{\link{Coef.qrrvglm}}. 
+  Logical that is fed into \code{\link{Coef.qrrvglm}}.
 
 
   }
@@ -84,12 +84,12 @@ lvplot.qrrvglm(object, varI.latvar = FALSE, refResponse = NULL,
 
  }
   \item{pcex}{ Character expansion of the points.
-Here, for rank-1 models, points are the response \emph{y} data. 
+Here, for rank-1 models, points are the response \emph{y} data.
 For rank-2 models, points are the optimums.
   See the \code{cex} argument in \code{\link[graphics]{par}}.
 
  }
-  \item{pcol}{ Color of the points. 
+  \item{pcol}{ Color of the points.
   See the \code{col} argument in \code{\link[graphics]{par}}.
 
  }
@@ -116,17 +116,17 @@ For rank-2 models, points are the optimums.
   See the \code{lwd} argument of \code{\link[graphics]{par}}.
 
  }
-  \item{label.arg}{ Logical. Label the optimums and \bold{C}? 
+  \item{label.arg}{ Logical. Label the optimums and \bold{C}?
   (applies only to rank-2 models only).
 
  }
   \item{adj.arg}{ Justification of text strings for labelling the optimums
-  (applies only to rank-2 models only). 
+  (applies only to rank-2 models only).
   See the \code{adj} argument of \code{\link[graphics]{par}}.
 
  }
 
-  \item{ellipse}{ 
+  \item{ellipse}{
       Numerical, of length 0 or 1 (applies only to rank-2 models only).
       If \code{Absolute} is \code{TRUE} then \code{ellipse} should be
       assigned a value that is used for the elliptical contouring.
@@ -145,12 +145,12 @@ For rank-2 models, points are the optimums.
 
   }
   \item{Absolute}{ Logical.
-  If \code{TRUE}, the contours corresponding to \code{ellipse} 
+  If \code{TRUE}, the contours corresponding to \code{ellipse}
   are on an absolute scale.
-  If \code{FALSE}, the contours corresponding to \code{ellipse} 
-  are on a relative scale. 
+  If \code{FALSE}, the contours corresponding to \code{ellipse}
+  are on a relative scale.
   }
-  \item{elty}{ Line type of the ellipses. 
+  \item{elty}{ Line type of the ellipses.
   See the \code{lty} argument of \code{\link[graphics]{par}}. }
   \item{ecol}{ Line color of the ellipses.
   See the \code{col} argument of \code{\link[graphics]{par}}. }
@@ -158,34 +158,34 @@ For rank-2 models, points are the optimums.
   See the \code{lwd} argument of \code{\link[graphics]{par}}. }
   \item{egrid}{ Numerical. Line resolution of the ellipses.
   Choosing a larger value will result in smoother ellipses.
-  Useful when ellipses are large. 
+  Useful when ellipses are large.
   }
 
   \item{chull.arg}{ Logical. Add a convex hull around the
    site scores? }
-  \item{clty}{ Line type of the convex hull. 
+  \item{clty}{ Line type of the convex hull.
   See the \code{lty} argument of \code{\link[graphics]{par}}. }
   \item{ccol}{ Line color of the convex hull.
   See the \code{col} argument of \code{\link[graphics]{par}}. }
   \item{clwd}{ Line width of the convex hull.
   See the \code{lwd} argument of \code{\link[graphics]{par}}. }
-  \item{cpch}{ Character to be plotted at the intersection points of 
+  \item{cpch}{ Character to be plotted at the intersection points of
                the convex hull. Having white spaces means that site
-               labels are not obscured there. 
+               labels are not obscured there.
   See the \code{pch} argument of \code{\link[graphics]{par}}. }
 
   \item{C}{ Logical. Add \bold{C} (represented by arrows emanating
   from \code{OriginC}) to the plot? }
   \item{OriginC}{ Character or numeric.
-    Where the arrows representing \bold{C} emanate from. 
+    Where the arrows representing \bold{C} emanate from.
     If character, it must be one of the choices given. By default the
     first is chosen.
     The value \code{"origin"} means \code{c(0,0)}.
-    The value \code{"mean"} means 
-    the sample mean of the latent variables (centroid). 
+    The value \code{"mean"} means
+    the sample mean of the latent variables (centroid).
     Alternatively, the user may specify a numerical vector of length 2.
   }
-  \item{Clty}{ Line type of the arrows representing \bold{C}. 
+  \item{Clty}{ Line type of the arrows representing \bold{C}.
   See the \code{lty} argument of \code{\link[graphics]{par}}. }
   \item{Ccol}{ Line color of the arrows representing \bold{C}.
   See the \code{col} argument of \code{\link[graphics]{par}}. }
@@ -208,7 +208,7 @@ For rank-2 models, points are the optimums.
   data frame are used. They often are the site numbers.
   See the \code{pch} argument of \code{\link[graphics]{par}}.
   }
-  \item{scol}{ Color of the site scores. 
+  \item{scol}{ Color of the site scores.
   See the \code{col} argument of \code{\link[graphics]{par}}.
   }
   \item{scex}{ Character expansion of the site scores.
@@ -223,22 +223,22 @@ For rank-2 models, points are the optimums.
 % A value from the set \{1,2,\ldots,\eqn{M}\} indicating
 % which species (quadratic predictor) is to be chosen so that
 % its major and semi-minor axes are parallel to the latent variable
-% axes, i.e., that species' Tolerance matrix will be diagonal. 
+% axes, i.e., that species' Tolerance matrix will be diagonal.
 % If \code{Rotate} is \code{TRUE}, the first species is selected for rotation.
 
 % By default a rotation is performed only if the tolerance matrices are equal,
 % and \code{Rotation} only applies when the rank is greater than one.
 
-% See \code{\link{Coef.qrrvglm}} for details. 
+% See \code{\link{Coef.qrrvglm}} for details.
 % }
 
 % \item{I.tolerances}{
 % Logical.
 % If \code{TRUE}, the tolerances matrices are transformed so that they are
-% the order-\code{Rank} identity matrix. This means that a rank-2 
+% the order-\code{Rank} identity matrix. This means that a rank-2
 % latent variable plot
 % can be interpreted naturally in terms of distances and directions.
-% See \code{\link{Coef.qrrvglm}} for details. 
+% See \code{\link{Coef.qrrvglm}} for details.
 % }
 
   \item{check.ok}{ Logical. Whether a check is performed to see
@@ -276,7 +276,7 @@ For rank-2 models, points are the optimums.
 
 
 }
-\references{ 
+\references{
 
 Yee, T. W. (2004)
 A new technique for maximum-likelihood
diff --git a/man/lvplot.rrvglm.Rd b/man/lvplot.rrvglm.Rd
index 83efc0f..b3fd4e2 100644
--- a/man/lvplot.rrvglm.Rd
+++ b/man/lvplot.rrvglm.Rd
@@ -11,20 +11,20 @@
 
 }
 \usage{
-lvplot.rrvglm(object, 
+lvplot.rrvglm(object,
               A = TRUE, C = TRUE, scores = FALSE, show.plot = TRUE,
-              groups = rep(1, n), gapC = sqrt(sum(par()$cxy^2)), 
-              scaleA = 1, 
-              xlab = "Latent Variable 1", ylab = "Latent Variable 2", 
-              Alabels = if (length(object at misc$predictors.names)) 
-              object at misc$predictors.names else paste("LP", 1:M, sep = ""), 
-              Aadj = par()$adj, Acex = par()$cex, Acol = par()$col, 
-              Apch = NULL, 
-              Clabels = rownames(Cmat), Cadj = par()$adj, 
-              Ccex = par()$cex, Ccol = par()$col, Clty = par()$lty, 
-              Clwd = par()$lwd, 
-              chull.arg = FALSE, ccex = par()$cex, ccol = par()$col, 
-              clty = par()$lty, clwd = par()$lwd, 
+              groups = rep(1, n), gapC = sqrt(sum(par()$cxy^2)),
+              scaleA = 1,
+              xlab = "Latent Variable 1", ylab = "Latent Variable 2",
+              Alabels = if (length(object at misc$predictors.names))
+              object at misc$predictors.names else paste("LP", 1:M, sep = ""),
+              Aadj = par()$adj, Acex = par()$cex, Acol = par()$col,
+              Apch = NULL,
+              Clabels = rownames(Cmat), Cadj = par()$adj,
+              Ccex = par()$cex, Ccol = par()$col, Clty = par()$lty,
+              Clwd = par()$lwd,
+              chull.arg = FALSE, ccex = par()$cex, ccol = par()$col,
+              clty = par()$lty, clwd = par()$lwd,
               spch = NULL, scex = par()$cex, scol = par()$col,
               slabels = rownames(x2mat), ...)
 }
@@ -34,7 +34,7 @@ lvplot.rrvglm(object,
   \item{A}{ Logical. Allow the plotting of \bold{A}? }
   \item{C}{ Logical. Allow the plotting of \bold{C}? If \code{TRUE} then
             \bold{C} is represented by arrows emenating from the origin. }
-  \item{scores}{ Logical. Allow the plotting of the \eqn{n} scores? 
+  \item{scores}{ Logical. Allow the plotting of the \eqn{n} scores?
                  The scores are the values of the latent variables for each
                  observation. }
   \item{show.plot}{ Logical. Plot it? If \code{FALSE}, no plot is produced
@@ -42,8 +42,8 @@ lvplot.rrvglm(object,
   If \code{TRUE}, the rank of \code{object} need not be 2.
   }
   \item{groups}{ A vector whose distinct values indicate which group the
-  observation belongs to. By default, all the observations belong to a 
-  single group. Useful for the multinomial logit model (see 
+  observation belongs to. By default, all the observations belong to a
+  single group. Useful for the multinomial logit model (see
   \code{\link{multinomial}}.}
   \item{gapC}{ The gap between the end of the arrow and the text labelling
   of \bold{C}, in latent variable units.}
@@ -93,7 +93,7 @@ lvplot.rrvglm(object,
   See the \code{cex} argument of \code{\link[graphics]{par}}. }
   \item{scol}{ Line color of the arrows representing \bold{C}.
   See the \code{col} argument of \code{\link[graphics]{par}}. }
-  \item{slabels}{ Character vector to label the scores. 
+  \item{slabels}{ Character vector to label the scores.
         Must be of length \eqn{n}. }
   \item{\dots}{ Arguments passed into the \code{plot} function
   when setting up the entire plot. Useful arguments here include
diff --git a/man/machinists.Rd b/man/machinists.Rd
index d28af90..0558e1f 100644
--- a/man/machinists.Rd
+++ b/man/machinists.Rd
@@ -16,9 +16,9 @@ data(machinists)
   A data frame with the following variables.
 
   \describe{
-   
+
     \item{accidents}{
-      The number of accidents 
+      The number of accidents
 
     }
     \item{ofreq}{
diff --git a/man/makeham.Rd b/man/makeham.Rd
index 7a44c09..d838fca 100644
--- a/man/makeham.Rd
+++ b/man/makeham.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Makeham Distribution Family Function }
 \description{
-  Maximum likelihood estimation of the 3-parameter 
+  Maximum likelihood estimation of the 3-parameter
   Makeham distribution.
 
 }
@@ -69,32 +69,32 @@ makeham(lscale = "loge", lshape = "loge", lepsilon = "loge",
 The Makeham distribution, which adds another parameter
 to the Gompertz distribution,
 has cumulative distribution function
-\deqn{F(x; \alpha, \beta, \varepsilon) =
+\deqn{F(y; \alpha, \beta, \varepsilon) =
 1 - \exp
 \left\{
 -y \varepsilon + \frac {\alpha}{\beta}
 \left[ 1 - e^{\beta y} \right]
 \right\}
 }{%
-F(x; alpha, beta, epsilon) = 1 - exp(-y * epsilon + (alpha / beta) * [1 - e^(beta * y)])
+F(y; alpha, beta, epsilon) = 1 - exp(-y * epsilon + (alpha / beta) * [1 - e^(beta * y)])
 }
 which leads to a probability density function
-\deqn{f(x; \alpha, \beta, \varepsilon) =
+\deqn{f(y; \alpha, \beta, \varepsilon) =
 \left[
-\varepsilon + \alpha e^{\beta x} \right]
+\varepsilon + \alpha e^{\beta y} \right]
 \;
 \exp
 \left\{
--x \varepsilon + \frac {\alpha}{\beta}
-\left[ 1 - e^{\beta x} \right]
+-y \varepsilon + \frac {\alpha}{\beta}
+\left[ 1 - e^{\beta y} \right]
 \right\},
 }{%
-f(x; alpha, beta, epsilon) = (epsilon + alpha * e^(beta x) ) * exp(-x * epsilon + (alpha / beta) * [1 - e^(beta * x)])
+f(y; alpha, beta, epsilon) = (epsilon + alpha * e^(beta y) ) * exp(-y * epsilon + (alpha / beta) * [1 - e^(beta * y)])
 }
 for \eqn{\alpha > 0}{alpha > 0},
 \eqn{\beta > 0}{beta > 0},
 \eqn{\varepsilon \geq 0}{epsilon >= 0},
-\eqn{x > 0}.
+\eqn{y > 0}.
 Here, \eqn{\beta}{beta} is called the scale parameter \code{scale},
 and \eqn{\alpha}{alpha} is called a shape parameter.
 The moments for this distribution do
diff --git a/man/makehamUC.Rd b/man/makehamUC.Rd
index 9ec6789..337cf24 100644
--- a/man/makehamUC.Rd
+++ b/man/makehamUC.Rd
@@ -63,7 +63,7 @@ rmakeham(n, scale = 1, shape, epsilon = 0)
 }
 \references{
 
-Jodra, P. (2009) 
+Jodra, P. (2009)
 A closed-form expression for the quantile function of the
 Gompertz-Makeham distribution.
 \emph{Mathematics and Computers in Simulation},
diff --git a/man/margeff.Rd b/man/margeff.Rd
index f3b16d1..3951720 100644
--- a/man/margeff.Rd
+++ b/man/margeff.Rd
@@ -77,7 +77,7 @@ margeff(object, subset = NULL, ...)
   \code{is.numeric(subset)}
   and
   \code{length(subset) == 1} then a
-  \eqn{p} by \eqn{M+1} matrix is returned. 
+  \eqn{p} by \eqn{M+1} matrix is returned.
 
 
 }
diff --git a/man/maxwell.Rd b/man/maxwell.Rd
index 1a7fb5d..08c80b3 100644
--- a/man/maxwell.Rd
+++ b/man/maxwell.Rd
@@ -28,7 +28,7 @@ maxwell(link = "loge", zero = NULL)
   has a probability density function that can be written
   \deqn{f(y;a) = \sqrt{2/\pi} a^{3/2} y^2 \exp(-0.5 a y^2)}{%
     f(y;a) = sqrt(2/pi) * a^(3/2) * y^2 * exp(-0.5*a*y^2)}
-  for \eqn{y>0} and \eqn{a>0}. 
+  for \eqn{y>0} and \eqn{a>0}.
   The mean of \eqn{Y} is
   \eqn{\sqrt{8 / (a \pi)}}{sqrt(8 / (a * pi))}
   (returned as the fitted values), and its variance is
@@ -44,7 +44,7 @@ maxwell(link = "loge", zero = NULL)
 
 
 }
-\references{ 
+\references{
 
   von Seggern, D. H. (1993)
   \emph{CRC Standard Curves and Surfaces},
@@ -63,7 +63,7 @@ maxwell(link = "loge", zero = NULL)
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{Maxwell}},
   \code{\link{rayleigh}},
   \code{\link{poisson.points}}.
diff --git a/man/maxwellUC.Rd b/man/maxwellUC.Rd
index b0e88ff..57422d7 100644
--- a/man/maxwellUC.Rd
+++ b/man/maxwellUC.Rd
@@ -48,7 +48,7 @@ rmaxwell(n, rate)
 }
 \references{
 
-  Balakrishnan, N. and Nevzorov, V. B. (2003) 
+  Balakrishnan, N. and Nevzorov, V. B. (2003)
   \emph{A Primer on Statistical Distributions}.
   Hoboken, New Jersey: Wiley.
 
@@ -76,9 +76,9 @@ rmaxwell(n, rate)
 }
 \examples{
 \dontrun{ rate <- 3; x <- seq(-0.5, 3, length = 100)
-plot(x, dmaxwell(x, rate = rate), type = "l", col = "blue", las = 1, ylab = "",
+plot(x, dmaxwell(x, rate = rate), type = "l", col = "blue", las = 1,
      main = "Blue is density, orange is cumulative distribution function",
-     sub = "Purple lines are the 10,20,...,90 percentiles")
+     sub = "Purple lines are the 10,20,...,90 percentiles", ylab = "")
 abline(h = 0, col = "blue", lty = 2)
 lines(x, pmaxwell(x, rate = rate), type = "l", col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
diff --git a/man/mccullagh89.Rd b/man/mccullagh89.Rd
index ae2e0d1..9da9e8f 100644
--- a/man/mccullagh89.Rd
+++ b/man/mccullagh89.Rd
@@ -33,10 +33,10 @@ mccullagh89(ltheta = "rhobit", lnu = logoff(offset = 0.5),
 }
 \details{
 The McCullagh (1989) distribution has density function
- \deqn{f(y;\theta,\nu) = 
+ \deqn{f(y;\theta,\nu) =
 \frac{ \{ 1-y^2 \}^{\nu-\frac12}}
 { (1-2\theta y + \theta^2)^{\nu} \mbox{Beta}(\nu+\frac12, \frac12)}}{%
-f(y;theta,nu) = 
+f(y;theta,nu) =
 (1-y^2)^(nu-0.5) / [ (1 - 2*theta*y+theta^2)^nu * Beta(nu+0.5, 0.5)]}
 where \eqn{-1 < y < 1} and \eqn{-1 < \theta < 1}{-1 < theta < 1}.
 This distribution is equation (1) in that paper.
diff --git a/man/micmen.Rd b/man/micmen.Rd
index 914d301..b7a9dab 100644
--- a/man/micmen.Rd
+++ b/man/micmen.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Michaelis-Menten Model }
 \description{
-  Fits a Michaelis-Menten nonlinear regression model. 
+  Fits a Michaelis-Menten nonlinear regression model.
 
 }
 \usage{
diff --git a/man/mix2exp.Rd b/man/mix2exp.Rd
index 30abe0c..3fb5082 100644
--- a/man/mix2exp.Rd
+++ b/man/mix2exp.Rd
@@ -47,7 +47,7 @@ mix2exp(lphi = "logit", llambda = "loge", iphi = 0.5, il1 = NULL,
   }
 }
 \details{
-  The probability density function can be loosely written as 
+  The probability density function can be loosely written as
   \deqn{f(y) = \phi\,Exponential(\lambda_1) +
               (1-\phi)\,Exponential(\lambda_2)}{%
         f(y) = phi * Exponential(lambda1) +
diff --git a/man/mix2normal.Rd b/man/mix2normal.Rd
index 0e95155..8fd3ca4 100644
--- a/man/mix2normal.Rd
+++ b/man/mix2normal.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Mixture of Two Univariate Normal Distributions }
 \description{
-  Estimates the five parameters of a mixture of two univariate 
+  Estimates the five parameters of a mixture of two univariate
   normal distributions by maximum likelihood estimation.
 
 }
@@ -46,7 +46,7 @@ mix2normal(lphi = "logit", lmu = "identitylink", lsd = "loge",
     Optional initial value for \eqn{\sigma_1}{sd1} and \eqn{\sigma_2}{sd2}.
     The default is to compute initial values internally based on
     the argument \code{qmu}.
-    Currently these are not great, therefore using these arguments 
+    Currently these are not great, therefore using these arguments
     where practical is a good idea.
 
 
@@ -61,7 +61,7 @@ mix2normal(lphi = "logit", lmu = "identitylink", lsd = "loge",
 
   }
   \item{eq.sd}{
-    Logical indicating whether the two standard deviations should be 
+    Logical indicating whether the two standard deviations should be
     constrained to be equal. If \code{TRUE} then the appropriate
     constraint matrices will be used.
 
@@ -76,7 +76,7 @@ mix2normal(lphi = "logit", lmu = "identitylink", lsd = "loge",
   May be an integer vector
   specifying which linear/additive predictors are modelled as
   intercept-only.  If given, the value or values can be from the
-  set \eqn{\{1,2,\ldots,5\}}{1,2,...,5}. 
+  set \eqn{\{1,2,\ldots,5\}}{1,2,...,5}.
   The default is the first one only, meaning \eqn{\phi}{phi}
   is a single parameter even when there are explanatory variables.
   Set \code{zero = NULL} to model all linear/additive predictors as
@@ -87,12 +87,12 @@ mix2normal(lphi = "logit", lmu = "identitylink", lsd = "loge",
 }
 }
 \details{
-  The probability density function can be loosely written as 
+  The probability density function can be loosely written as
   \deqn{f(y) = \phi \, N(\mu_1,\sigma_1) + (1-\phi) \, N(\mu_2, \sigma_2)}{%
         f(y) = phi * N(mu1, sd1) + (1-phi) * N(mu2, sd2)}
   where \eqn{\phi}{phi} is the probability an observation belongs
   to the first group.
-  The parameters \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2} are the means, and 
+  The parameters \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2} are the means, and
   \eqn{\sigma_1}{sd1} and \eqn{\sigma_2}{sd2} are the standard deviations.
   The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}.
   The mean of \eqn{Y} is
diff --git a/man/mix2poisson.Rd b/man/mix2poisson.Rd
index 4a56ea6..218653f 100644
--- a/man/mix2poisson.Rd
+++ b/man/mix2poisson.Rd
@@ -57,7 +57,7 @@ mix2poisson(lphi = "logit", llambda = "loge",
   }
 }
 \details{
-  The probability function can be loosely written as 
+  The probability function can be loosely written as
   \deqn{P(Y=y) = \phi \, Poisson(\lambda_1) + (1-\phi) \, Poisson(\lambda_2)}{%
         P(Y=y) = phi * Poisson(lambda1) + (1-phi) * Poisson(lambda2)}
   where \eqn{\phi}{phi} is the probability an observation belongs
diff --git a/man/model.matrixvlm.Rd b/man/model.matrixvlm.Rd
index 053a278..54c2f33 100644
--- a/man/model.matrixvlm.Rd
+++ b/man/model.matrixvlm.Rd
@@ -10,7 +10,7 @@ model.matrixvlm(object, type = c("vlm", "lm", "lm2", "bothlmlm2"),
     \emph{vector linear model} (VLM).
 
   }
-  \item{type}{Type of design matrix returned. The first is the default. 
+  \item{type}{Type of design matrix returned. The first is the default.
   The value \code{"vlm"} is the VLM model matrix corresponding
   to the \code{formula} argument.
   The value \code{"lm"} is the LM model matrix corresponding
@@ -31,7 +31,7 @@ model.matrixvlm(object, type = c("vlm", "lm", "lm2", "bothlmlm2"),
     this is a LM-type matrix.
 
 
-  }    
+  }
   \item{\dots}{further arguments passed to or from other methods.
     These include \code{data} (which
     is a data frame created with \code{\link{model.framevlm}}),
@@ -39,7 +39,7 @@ model.matrixvlm(object, type = c("vlm", "lm", "lm2", "bothlmlm2"),
     See \code{\link[stats]{model.matrix}} for more information.
 
 
-  }    
+  }
 }
 
 \description{
@@ -55,7 +55,7 @@ model.matrixvlm(object, type = c("vlm", "lm", "lm2", "bothlmlm2"),
   This can be a small LM object or a big VLM object (default).
   The latter is constructed from the former and the constraint
   matrices.
-  
+
 
   This code implements \emph{smart prediction}
   (see \code{\link{smartpred}}).
@@ -64,7 +64,7 @@ model.matrixvlm(object, type = c("vlm", "lm", "lm2", "bothlmlm2"),
 }
 \value{
   The design matrix for a regression model with the specified formula
-  and data. 
+  and data.
   If \code{type = "bothlmlm2"} then a list is returned with components
   \code{"X"} and \code{"Xm2"}.
 
diff --git a/man/moffset.Rd b/man/moffset.Rd
index 86abcfb..a09d56e 100644
--- a/man/moffset.Rd
+++ b/man/moffset.Rd
@@ -31,7 +31,7 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "",
   the row or column names.
   For example, for the \code{\link{alcoff}},
   put \code{roffset = "6"} means that we make an effective day's
-  dataset start from 6:00 am, and this wraps around to 
+  dataset start from 6:00 am, and this wraps around to
   include midnight to 05.59 am on the next day.
 
 
@@ -74,7 +74,7 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "",
   \code{\link{Rcim}} only reorders the level of the rows and columns
   so that the data is shifted but not moved.
   That is, a value in one row stays in that row,
-  and ditto for column. 
+  and ditto for column.
   But in \code{\link{moffset}}
   values in one column can be moved to a previous column.
   See the examples below.
@@ -107,8 +107,8 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "",
 
 }
 \seealso{
-  \code{\link{Rcim}}, 
-  \code{\link{rcim}}, 
+  \code{\link{Rcim}},
+  \code{\link{rcim}},
   \code{\link{plotrcim0}},
   \code{\link{alcoff}},
   \code{\link{crashi}}.
@@ -124,7 +124,7 @@ moffset(alcoff, 3, 2, "*") - Rcim(alcoff, 3+1, 2+1)  # Note the differences
 # An 'effective day' data set:
 alcoff.e <- moffset(alcoff, roffset = "6", postfix = "*")
 fit.o <- rcim(alcoff)    # default baselines are first row and col
-fit.e <- rcim(alcoff.e)  # default baselines are first row and col 
+fit.e <- rcim(alcoff.e)  # default baselines are first row and col
 
 \dontrun{ par(mfrow = c(2, 2), mar = c(9, 4, 2, 1))
 plot(fit.o, rsub = "Not very interpretable", csub = "Not very interpretable")
diff --git a/man/multilogit.Rd b/man/multilogit.Rd
index e80081b..933c0d4 100644
--- a/man/multilogit.Rd
+++ b/man/multilogit.Rd
@@ -84,7 +84,7 @@ multilogit(theta, refLevel = "(Last)", M = NULL, whitespace = FALSE,
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{Links}},
     \code{\link{multinomial}},
     \code{\link{logit}},
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
index c49d9da..e4d50bc 100644
--- a/man/multinomial.Rd
+++ b/man/multinomial.Rd
@@ -115,7 +115,7 @@ The \pkg{VGAM} package for categorical data analysis.
 
 Yee, T. W. and Hastie, T. J. (2003)
 Reduced-rank vector generalized linear models.
-\emph{Statistical Modelling}, 
+\emph{Statistical Modelling},
 \bold{3}, 15--41.
 
 
@@ -142,7 +142,7 @@ New York, USA: Springer-Verlag.
 
 
 Anderson, J. A. (1984)
-Regression and ordered categorical variables. 
+Regression and ordered categorical variables.
 \emph{Journal of the Royal Statistical Society, Series B, Methodological},
 \bold{46}, 1--30.
 
@@ -254,7 +254,7 @@ data(iris)
 \dontrun{ fit <- vglm(Species ~ ., multinomial, iris)
 coef(fit, matrix = TRUE) }
 
-# Example 2a: a simple example 
+# Example 2a: a simple example
 ycounts <- t(rmultinom(10, size = 20, prob = c(0.1, 0.2, 0.8)))  # Counts
 fit <- vglm(ycounts ~ 1, multinomial)
 head(fitted(fit))   # Proportions
@@ -263,7 +263,7 @@ weights(fit, type = "prior", matrix = FALSE)  # The better method
 depvar(fit)         # Sample proportions; same as fit at y
 constraints(fit)    # Constraint matrices
 
-# Example 2b: Different reference level used as the baseline 
+# Example 2b: Different reference level used as the baseline
 fit2 <- vglm(ycounts ~ 1, multinomial(refLevel = 2))
 coef(fit2, matrix = TRUE)
 coef(fit , matrix = TRUE)  # Easy to reconcile this output with fit2
@@ -279,15 +279,15 @@ coef(fit3a, matrix = TRUE)  # "Treatment1" is the reference level
 coef(fit3b, matrix = TRUE)  # "Treatment1" is the reference level
 margeff(fit3b)
 
-# Example 4: Fit a rank-1 stereotype model 
+# Example 4: Fit a rank-1 stereotype model
 fit4 <- rrvglm(Country ~ Width + Height + HP, multinomial, data = car.all)
 coef(fit4)  # Contains the C matrix
-constraints(fit4)$HP       # The A matrix 
+constraints(fit4)$HP       # The A matrix
 coef(fit4, matrix = TRUE)  # The B matrix
-Coef(fit4)@C               # The C matrix 
+Coef(fit4)@C               # The C matrix
 concoef(fit4)              # Better to get the C matrix this way
-Coef(fit4)@A               # The A matrix 
-svd(coef(fit4, matrix = TRUE)[-1, ])$d  # This has rank 1; = C %*% t(A) 
+Coef(fit4)@A               # The A matrix
+svd(coef(fit4, matrix = TRUE)[-1, ])$d  # This has rank 1; = C %*% t(A)
 # Classification (but watch out for NAs in some of the variables):
 apply(fitted(fit4), 1, which.max)  # Classification
 colnames(fitted(fit4))[apply(fitted(fit4), 1, which.max)]  # Classification
@@ -301,7 +301,7 @@ M <- 3  # There are M+1 models of transport to go to work
 ycounts <- matrix(0, nn, M+1)
 ycounts[cbind(1:nn, sample(x = M+1, size = nn, replace = TRUE))] = 1
 dimnames(ycounts) <- list(NULL, c("bus","train","car","walk"))
-gotowork <- data.frame(cost.bus  = runif(nn), time.bus  = runif(nn), 
+gotowork <- data.frame(cost.bus  = runif(nn), time.bus  = runif(nn),
                        cost.train= runif(nn), time.train= runif(nn),
                        cost.car  = runif(nn), time.car  = runif(nn),
                        cost.walk = runif(nn), time.walk = runif(nn))
diff --git a/man/nakagami.Rd b/man/nakagami.Rd
index fe0b00e..cbcc374 100644
--- a/man/nakagami.Rd
+++ b/man/nakagami.Rd
@@ -29,9 +29,10 @@ nakagami(lscale = "loge", lshape = "loge", iscale = 1, ishape = NULL,
   Optional initial values for the shape and scale parameters.
   For \code{ishape}, a \code{NULL} value means it is obtained in the
   \code{initialize} slot based on the value of \code{iscale}.
-  For \code{iscale}, assigning a \code{NULL} means a value is obtained in the
-  \code{initialize} slot, however, setting another numerical
-  value is recommended if convergence fails or is too slow.
+  For \code{iscale}, assigning a \code{NULL} means a value
+  is obtained in the \code{initialize} slot, however, setting
+  another numerical value is recommended if convergence fails or
+  is too slow.
 
 
   }
@@ -47,8 +48,8 @@ nakagami(lscale = "loge", lshape = "loge", iscale = 1, ishape = NULL,
   for \eqn{y > 0}, \eqn{shape > 0}, \eqn{scale > 0}.
   The mean of \eqn{Y} is
   \eqn{\sqrt{scale/shape}  \times  \Gamma(shape+0.5) /
-       \Gamma(shape)}{sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)} and
-  these are returned as the fitted values.
+       \Gamma(shape)}{sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)}
+  and these are returned as the fitted values.
   By default, the linear/additive predictors are
   \eqn{\eta_1=\log(scale)}{eta1=log(scale)} and
   \eqn{\eta_2=\log(shape)}{eta2=log(shape)}.
@@ -83,9 +84,10 @@ nakagami(lscale = "loge", lshape = "loge", iscale = 1, ishape = NULL,
   The second moment is \eqn{E(Y^2)=m}.
 
 
-  If \eqn{Y} has a Nakagami distribution with parameters \emph{shape}
-  and \emph{scale} then \eqn{Y^2} has a gamma distribution with shape
-  parameter \emph{shape} and scale parameter \emph{scale/shape}.
+  If \eqn{Y} has a Nakagami distribution with parameters
+  \emph{shape} and \emph{scale} then \eqn{Y^2} has a gamma
+  distribution with shape parameter \emph{shape} and scale
+  parameter \emph{scale/shape}.
 
 
 }
diff --git a/man/nakagamiUC.Rd b/man/nakagamiUC.Rd
index 3e4f41e..dc88a18 100644
--- a/man/nakagamiUC.Rd
+++ b/man/nakagamiUC.Rd
@@ -7,7 +7,7 @@
 \title{Nakagami Distribution }
 \description{
   Density, cumulative distribution function, quantile function and
-  random generation for 
+  random generation for
   the Nakagami distribution.
 
 }
diff --git a/man/nbcanlink.Rd b/man/nbcanlink.Rd
index bf9fbae..c15f9c4 100644
--- a/man/nbcanlink.Rd
+++ b/man/nbcanlink.Rd
@@ -124,7 +124,7 @@ nbcanlink(theta, size = NULL, wrt.param = NULL, bvalue = NULL,
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{negbinomial}},
   \code{\link{negbinomial.size}}.
 
diff --git a/man/nbolf.Rd b/man/nbolf.Rd
index b17f39e..2eb18cb 100644
--- a/man/nbolf.Rd
+++ b/man/nbolf.Rd
@@ -20,7 +20,7 @@ nbolf(theta, cutpoint = NULL, k = NULL,
   }
   \item{cutpoint, k}{
   Here, \code{k} is the \eqn{k} parameter associated
-  with the negative binomial distribution; see 
+  with the negative binomial distribution; see
   \code{\link{negbinomial}}.
   The cutpoints should be non-negative integers.
   If \code{nbolf()} is used as the link function in
@@ -57,7 +57,7 @@ nbolf(theta, cutpoint = NULL, k = NULL,
 }
 \references{
   Yee, T. W. (2012)
-  \emph{Ordinal ordination with normalizing link functions for count data}, 
+  \emph{Ordinal ordination with normalizing link functions for count data},
   (in preparation).
 
 
@@ -87,7 +87,7 @@ nbolf(theta, cutpoint = NULL, k = NULL,
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{Links}},
   \code{\link{negbinomial}},
   \code{\link{polf}},
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index d6f7973..e754772 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -11,23 +11,25 @@
 }
 \usage{
 negbinomial(zero = "size", parallel = FALSE, deviance.arg = FALSE,
+            type.fitted = c("mean", "quantiles"),
+            percentiles = c(25, 50, 75),
             mds.min = 1e-3, nsimEIM = 500, cutoff.prob = 0.999,
             eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
             lmu = "loge", lsize = "loge",
             imethod = 1, imu = NULL, iprobs.y = NULL,
-            gprobs.y = (0:9)/10, isize = NULL,
+            gprobs.y = ppoints(6), isize = NULL,
             gsize.mux = exp(c(-30, -20, -15, -10, -6:3)))
 polya(zero = "size", type.fitted = c("mean", "prob"),
       mds.min = 1e-3, nsimEIM = 500, cutoff.prob = 0.999,
       eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
       lprob = "logit", lsize = "loge", imethod = 1, iprob = NULL,
-      iprobs.y = NULL, gprobs.y = (0:9)/10, isize = NULL,
+      iprobs.y = NULL, gprobs.y = ppoints(6), isize = NULL,
       gsize.mux = exp(c(-30, -20, -15, -10, -6:3)), imunb = NULL)
 polyaR(zero = "size", type.fitted = c("mean", "prob"),
        mds.min = 1e-3, nsimEIM = 500,  cutoff.prob = 0.999,
        eps.trig = 1e-7, max.support = 4000, max.chunk.MB = 30,
        lsize = "loge", lprob = "logit", imethod = 1, iprob = NULL,
-       iprobs.y = NULL, gprobs.y = (0:9)/10, isize = NULL,
+       iprobs.y = NULL, gprobs.y = ppoints(6), isize = NULL,
        gsize.mux = exp(c(-30, -20, -15, -10, -6:3)), imunb = NULL)
 }
 
@@ -41,7 +43,7 @@ polyaR(zero = "size", type.fitted = c("mean", "prob"),
   See \code{\link{Links}} for more choices.
   Note that the \eqn{\mu}{mu}, \eqn{k}
   and \eqn{p}  parameters are the \code{mu},
-  \code{size} and \code{prob} arguments of 
+  \code{size} and \code{prob} arguments of
   \code{\link[stats:NegBinomial]{rnbinom}} respectively.
   Common alternatives for \code{lsize} are
   \code{\link{negloge}} and
@@ -89,7 +91,7 @@ polyaR(zero = "size", type.fitted = c("mean", "prob"),
   in order to obtain a lower limit for the approximate
   support of the distribution, called \code{Qmin}, say.
   Hence the approximate support is \code{Qmin:Qmax}.
-  This argument should be 
+  This argument should be
   a numeric and close to 1 but never exactly 1.
   Used to specify how many terms of the infinite series
   for computing the second diagonal element of the
@@ -109,7 +111,7 @@ polyaR(zero = "size", type.fitted = c("mean", "prob"),
 
   }
   \item{max.chunk.MB, max.support}{
-    \code{max.support} is used to describe the eligibility of 
+    \code{max.support} is used to describe the eligibility of
     individual observations
     to have their EIM computed by the \emph{exact method}.
     Here, we are concerned about
@@ -169,7 +171,7 @@ A small positive value used in the computation of the EIMs.
 It focusses on the denominator of the terms of a series.
 Each term in the series (that is used to approximate an infinite series)
 has a value greater than \code{size / sqrt(eps.trig)},
-thus very small terms are ignored. 
+thus very small terms are ignored.
 It's a good idea to set a smaller value that will result in more accuracy,
 but it will require a greater computing time (when \eqn{k} is close to 0).
 And adjustment to \code{max.support} may be needed.
@@ -204,6 +206,19 @@ respect to the \code{size} parameter.
 
 
 % }
+
+
+
+
+
+  \item{type.fitted, percentiles}{
+  See \code{\link{CommonVGAMffArguments}} for more information.
+
+
+  }
+
+
+
   \item{deviance.arg}{
   Logical.
   If \code{TRUE}, the deviance is computed \emph{after} convergence.
@@ -295,11 +310,6 @@ respect to the \code{size} parameter.
 
 
   }
-  \item{type.fitted}{
-    See \code{\link{CommonVGAMffArguments}} for details.
-
-
-  }
 }
 \details{
   The negative binomial distribution can be motivated in several ways,
@@ -310,7 +320,7 @@ respect to the \code{size} parameter.
   The one used by \code{negbinomial()} uses the
   mean \eqn{\mu}{mu} and an \emph{index} parameter
   \eqn{k}, both which are positive.
-  Specifically, the density of a random variable \eqn{Y} is 
+  Specifically, the density of a random variable \eqn{Y} is
   \deqn{f(y;\mu,k) = {y + k - 1 \choose y} \,
     \left( \frac{\mu}{\mu+k} \right)^y\,
     \left( \frac{k}{k+\mu} \right)^k }{%
@@ -318,7 +328,7 @@ respect to the \code{size} parameter.
     [mu/(mu+k)]^y [k/(k+mu)]^k}
   where \eqn{y=0,1,2,\ldots},
   and \eqn{\mu > 0}{mu > 0} and \eqn{k > 0}.
-  Note that the \emph{dispersion} parameter is 
+  Note that the \emph{dispersion} parameter is
   \eqn{1/k}, so that as \eqn{k} approaches infinity the negative
   binomial distribution approaches a Poisson distribution.
   The response has variance \eqn{Var(Y)=\mu+\mu^2/k}{Var(Y)=mu*(1+mu/k)}.
@@ -339,7 +349,7 @@ respect to the \code{size} parameter.
     f(y;p,k) = C_{y}^{y + k - 1}
     [1 - p]^y p^k}
   where \eqn{y=0,1,2,\ldots},
-  and \eqn{k > 0} and \eqn{0 < p < 1}{0 < p < 1}. 
+  and \eqn{k > 0} and \eqn{0 < p < 1}{0 < p < 1}.
 
 
   Family function \code{polyaR()} is the same as \code{polya()} except
@@ -382,13 +392,13 @@ respect to the \code{size} parameter.
   intercept only.
 
 
-  
+
 }
 \section{Warning}{
   Poisson regression corresponds to \eqn{k} equalling
   infinity.  If the data is Poisson or close to Poisson,
   numerical problems may occur.
-  Some corrective measures are taken, e.g., 
+  Some corrective measures are taken, e.g.,
   \eqn{k} is effectively capped (relative to the mean) during estimation
   to some large value and a warning is issued.
   And setting \code{stepsize = 0.5} for half stepping is
@@ -410,16 +420,25 @@ respect to the \code{size} parameter.
 
 % Possibly choosing a log-log link may help in such cases,
 % otherwise try \code{\link{poissonff}} or
-% \code{\link{quasipoissonff}}.  It is possible to fit a NBD
+% \code{\link{quasipoissonff}}. It is possible to fit a NBD
 % that has a similar variance function as a quasi-Poisson; see
 % the NB-1 example below.
 
 
 
+The negative binomial distribution (NBD) is a strictly unimodal
+distribution. Any data set that does not exhibit a mode (somewhere
+in the middle) makes the estimation problem difficult.
+Set \code{trace = TRUE} to monitor convergence.
+
+
+
+
+
   These functions are fragile; the maximum likelihood
   estimate of the index parameter is fraught (see Lawless,
   1987). In general, the \code{\link{quasipoissonff}} is
-  more robust.  Other alternatives to \code{negbinomial} are
+  more robust. Other alternatives to \code{negbinomial} are
   to fit a NB-1 or RR-NB (aka NB-P) model; see Yee (2014).
   Also available are the NB-C, NB-H and NB-G.
   Assigning values to the \code{isize} argument may lead
@@ -441,7 +460,7 @@ respect to the \code{size} parameter.
   If SFS is used at all, then the \code{@weights} slot of the
   fitted object will be a matrix;
   otherwise that slot will be a \code{0 x 0} matrix.
-  
+
 
 
   Yet to do: write a family function which uses the methods
@@ -516,7 +535,7 @@ Fitting the negative binomial distribution to biological data.
   SFS should be better if \code{max(ymat)} is large,
   e.g., \code{max(ymat) > 1000},
   or if there are any outliers in \code{ymat}.
-  The default algorithm involves a finite series approximation 
+  The default algorithm involves a finite series approximation
   to the support \code{0:Inf};
   the arguments
   \code{max.memory},
@@ -570,7 +589,7 @@ Fitting the negative binomial distribution to biological data.
 
   There are two special uses of \code{negbinomial} for handling count data.
   Firstly,
-  when used by \code{\link{rrvglm}}  this 
+  when used by \code{\link{rrvglm}}  this
   results in a continuum of models in between and
   inclusive of quasi-Poisson and negative binomial regression.
   This is known as a reduced-rank negative binomial model \emph{(RR-NB)}.
@@ -601,7 +620,7 @@ Fitting the negative binomial distribution to biological data.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{quasipoissonff}},
   \code{\link{poissonff}},
   \code{\link{zinegbinomial}},
@@ -616,6 +635,7 @@ Fitting the negative binomial distribution to biological data.
   \code{\link{cqo}},
   \code{\link{CommonVGAMffArguments}},
   \code{\link{simulate.vlm}},
+  \code{\link[stats:ppoints]{ppoints}},
   \code{\link[stats:NegBinomial]{qnbinom}}.
 
 
@@ -702,7 +722,7 @@ summary(glm(y3 ~ x2 + x3, quasipoisson, mydata))$disper  # cf. moment estimator
 %polya(lprob = "logit", lsize = "loge",
 %      iprob = NULL, isize = NULL, probs.y = 0.75, nsimEIM = 100,
 %      imethod = 1, ishrinkage = 0.95, zero = "size")
-%polyaR(lsize = "loge", lprob = "logit", 
+%polyaR(lsize = "loge", lprob = "logit",
 %       isize = NULL, iprob = NULL, probs.y = 0.75, nsimEIM = 100,
 %       imethod = 1, ishrinkage = 0.95, zero = "size")
 
diff --git a/man/negbinomial.size.Rd b/man/negbinomial.size.Rd
index e5c549d..ee16d7c 100644
--- a/man/negbinomial.size.Rd
+++ b/man/negbinomial.size.Rd
@@ -96,7 +96,7 @@ Cambridge: Cambridge University Press.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{negbinomial}},
   \code{\link{nbcanlink}} (NB-C model),
   \code{\link{quasipoissonff}},
diff --git a/man/normal.vcm.Rd b/man/normal.vcm.Rd
index 18ace78..ccc252e 100644
--- a/man/normal.vcm.Rd
+++ b/man/normal.vcm.Rd
@@ -104,7 +104,7 @@ of such models have been named \emph{varying-coefficient models} (VCMs).
   baseline/reference group, and therefore excluded from
   the estimation.
 
-  
+
   By default,
   the log of the standard deviation is the last
   linear/additive predictor. It is recommended that this
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index e920ca4..a92801a 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -3,15 +3,47 @@
 %
 %
 %
+% 201612
+\alias{label.cols.y}
+\alias{valid.vknotl2}
+% 201611
+\alias{profilevglm}
+\alias{vpairs.profile}
+\alias{vplot.profile}
+% 201609
+\alias{prob.munb.size.VGAM}
+\alias{negbinomial.initialize.yj}
+% 201607, 201608
+\alias{mroot2}
+\alias{psint}
+\alias{psintpvgam}
+\alias{df.residual_pvgam}
+\alias{startstoppvgam}
+\alias{summary.pvgam-class}
+%%%%% \alias{summarypvgam}
+%%%%    % \alias{show.summary.pvgam}
+\alias{endf}
+\alias{endfpvgam}
+\alias{vcov.pvgam}
+\alias{vcov.pvgam-class}
+\alias{vlabel}
+\alias{show.pvgam}
+\alias{model.matrixpvgam}
+% 201606
+\alias{gharmonic}
+\alias{gharmonic2}
+\alias{bisection.basic}
+\alias{Zeta.aux}
+\alias{deflat.limit.oizeta}
 % 201605
 \alias{deflat.limit.oipospois}
 % 20160418 (keyword: mgcvvgam)
 % \alias{ps}
-\alias{Pen.psv}
+\alias{get.X.VLM.aug}
 \alias{psv2magic}
 % \alias{psvglm.fit}
 % \alias{psvlm.wfit}
-\alias{psvgam-class}
+\alias{pvgam-class}
 % \alias{PS}
 %
 \alias{checkwz}
@@ -158,7 +190,7 @@
 %
 %
 %
-% 20120514, 20120528, 
+% 20120514, 20120528,
 \alias{w.wz.merge}
 \alias{w.y.check}
 \alias{vweighted.mean.default}
@@ -213,7 +245,7 @@
 % \alias{show.summary.rc.exponential}
 \alias{show.summary.rrvglm}
 %\alias{show.summary.uqo}
-\alias{show.summary.vgam}
+% \alias{show.summary.vgam}
 % \alias{show.summary.vglm} % 20150831
 \alias{show.summary.vlm}
 %\alias{show.uqo}
@@ -636,7 +668,7 @@
 \alias{summaryrcim}
 \alias{summary.rrvglm}
 %\alias{summary.uqo}
-\alias{summaryvgam}
+% \alias{summaryvgam}
 %\alias{summaryvglm}  % 20150831
 \alias{summaryvlm}
 % \alias{tapplymat1}
diff --git a/man/oalog.Rd b/man/oalog.Rd
new file mode 100644
index 0000000..d0d9cdd
--- /dev/null
+++ b/man/oalog.Rd
@@ -0,0 +1,138 @@
+\name{oalog}
+\alias{oalog}
+%\alias{oalogff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-Altered Logarithmic Distribution }
+\description{
+  Fits a one-altered logarithmic distribution based on
+  a conditional model involving a Bernoulli distribution and a
+  1-truncated logarithmic distribution.
+
+}
+\usage{
+oalog(lpobs1 = "logit", lshape = "logit",
+      type.fitted = c("mean", "shape", "pobs1", "onempobs1"),
+      ipobs1 = NULL, gshape = ppoints(8), zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{lpobs1}{
+    Link function for the parameter \eqn{p_1}{pobs1} or \eqn{\phi}{phi},
+    called \code{pobs1} or \code{phi} here.
+    See \code{\link{Links}} for more choices.
+
+  }
+  \item{lshape}{
+    See \code{\link{logff}} for details.
+
+  }
+
+  \item{gshape, type.fitted}{
+  See \code{\link{CommonVGAMffArguments}}
+  and \code{\link{fittedvlm}} for information.
+
+
+  }
+
+
+% \item{epobs1, eshape}{
+% List. Extra argument for the respective links.
+% See \code{earg} in \code{\link{Links}} for general information.
+%           epobs1 = list(), eshape = list(),
+% }
+
+  \item{ipobs1, zero}{
+  See \code{\link{CommonVGAMffArguments}}
+  for information.
+
+  }
+
+
+}
+
+\details{
+  The response \eqn{Y} is one with probability \eqn{p_1}{pobs1},
+  or \eqn{Y} has a 1-truncated logarithmic distribution with
+  probability \eqn{1-p_1}{1-pobs1}.  Thus \eqn{0 < p_1 < 1}{0 < pobs1 < 1},
+  which is modelled as a function of the covariates.  The one-altered
+  logarithmic distribution differs from the one-inflated
+  logarithmic distribution in that the former has ones coming from one
+  source, whereas the latter has ones coming from the logarithmic
+  distribution too. The one-inflated logarithmic distribution
+  is implemented in the \pkg{VGAM} package.  Some people
+  call the one-altered logarithmic a \emph{hurdle} model.
+
+
+  The input can be a matrix (multiple responses).
+  By default, the two linear/additive predictors
+  of \code{oalog}
+  are \eqn{(logit(\phi), logit(s))^T}{(logit(phi), logit(shape))^T}.
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}},
+  and \code{\link{vgam}}.
+
+
+
+  The \code{fitted.values} slot of the fitted object,
+  which should be extracted by the generic function \code{fitted}, returns
+  the mean \eqn{\mu}{mu} (default) which is given by
+  \deqn{\mu = \phi + (1-\phi) A}{%
+         mu = phi + (1- phi) A}
+  where \eqn{A} is the mean of the one-truncated
+  logarithmic distribution.
+  If \code{type.fitted = "pobs1"} then \eqn{p_1}{pobs1} is returned.
+
+
+
+}
+%\references{
+%
+%
+%}
+%\section{Warning }{
+%}
+
+\author{ T. W. Yee }
+\note{
+
+  This family function effectively combines
+  \code{\link{binomialff}} and
+  \code{\link{otlog}} into
+  one family function.
+
+
+}
+
+\seealso{
+  \code{\link{Oalog}},
+  \code{\link{logff}},
+  \code{\link{oilog}},
+  \code{\link{CommonVGAMffArguments}},
+  \code{\link{simulate.vlm}}.
+
+
+}
+% \code{\link{poslogarithmic}},
+
+
+\examples{
+odata <- data.frame(x2 = runif(nn <- 1000))
+odata <- transform(odata, pobs1 = logit(-1 + 2*x2, inverse = TRUE),
+                          shape  = logit(-2 + 3*x2, inverse = TRUE))
+odata <- transform(odata, y1 = roalog(nn, shape = shape, pobs1 = pobs1),
+                          y2 = roalog(nn, shape = shape, pobs1 = pobs1))
+with(odata, table(y1))
+
+ofit <- vglm(cbind(y1, y2) ~ x2, oalog, data = odata, trace = TRUE)
+coef(ofit, matrix = TRUE)
+head(fitted(ofit))
+head(predict(ofit))
+summary(ofit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/oalogUC.Rd b/man/oalogUC.Rd
new file mode 100644
index 0000000..5bc8892
--- /dev/null
+++ b/man/oalogUC.Rd
@@ -0,0 +1,77 @@
+\name{Oalog}
+\alias{Oalog}
+\alias{doalog}
+\alias{poalog}
+\alias{qoalog}
+\alias{roalog}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-Altered Logarithmic Distribution }
+\description{
+  Density, distribution function, quantile function and random generation
+  for the one-altered logarithmic distribution with parameter \code{pobs1}.
+
+}
+\usage{
+doalog(x, shape, pobs1 = 0, log = FALSE)
+poalog(q, shape, pobs1 = 0)
+qoalog(p, shape, pobs1 = 0)
+roalog(n, shape, pobs1 = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x, q, n, p}{ Same \code{\link[stats:Uniform]{Unif}}.}
+  \item{shape, log}{
+  Same as \code{\link{Otlog}}).
+
+  }
+  \item{pobs1}{
+  Probability of (an observed) one, called \eqn{pobs1}.
+  The default value of \code{pobs1 = 0} corresponds
+  to the response having a 1-truncated logarithmic distribution.
+
+  }
+}
+\details{
+  The probability function of \eqn{Y} is 1 with probability \code{pobs1},
+  else a 1-truncated
+  logarithmic(shape)
+  distribution.
+
+}
+\value{
+  \code{doalog} gives the density and
+  \code{poalog} gives the distribution function,
+  \code{qoalog} gives the quantile function, and
+  \code{roalog} generates random deviates.
+
+}
+%\references{ }
+\author{ T. W. Yee }
+\note{
+    The argument \code{pobs1} is recycled to the required length, and
+    must have values which lie in the interval \eqn{[0,1]}.
+
+}
+
+\seealso{
+    \code{\link{oalog}},
+    \code{\link{oilog}},
+    \code{\link{Otlog}}.
+
+
+}
+\examples{
+shape <- 0.75; pobs1 <- 0.10; x <- (-1):7
+doalog(x, shape = shape, pobs1 = pobs1)
+table(roalog(100, shape = shape, pobs1 = pobs1))
+
+\dontrun{ x <- 0:10
+barplot(rbind(doalog(x, shape = shape, pobs1 = pobs1),
+                dlog(x, shape = shape)),
+        beside = TRUE, col = c("blue", "orange"), cex.main = 0.7, las = 1,
+        ylab = "Probability", names.arg = as.character(x),
+        main = paste("OAL(shape = ", shape, ", pobs1 = ", pobs1,
+                   ") [blue] vs",  " Logarithmic(shape = ", shape,
+                   ") [orange] densities", sep = "")) }
+}
+\keyword{distribution}
diff --git a/man/oapospoisUC.Rd b/man/oapospoisUC.Rd
new file mode 100644
index 0000000..d56a797
--- /dev/null
+++ b/man/oapospoisUC.Rd
@@ -0,0 +1,77 @@
+\name{Oapospois}
+\alias{Oapospois}
+\alias{doapospois}
+\alias{poapospois}
+\alias{qoapospois}
+\alias{roapospois}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-Altered Logarithmic Distribution }
+\description{
+  Density, distribution function, quantile function and random generation
+  for the one-altered positive-Poisson distribution with parameter \code{pobs1}.
+
+}
+\usage{
+doapospois(x, lambda, pobs1 = 0, log = FALSE)
+poapospois(q, lambda, pobs1 = 0)
+qoapospois(p, lambda, pobs1 = 0)
+roapospois(n, lambda, pobs1 = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x, q, n, p}{ Same \code{\link[stats:Uniform]{Unif}}.}
+  \item{lambda, log}{
+  Same as \code{\link{Otpospois}}).
+
+  }
+  \item{pobs1}{
+  Probability of (an observed) one, called \eqn{pobs1}.
+  The default value of \code{pobs1 = 0} corresponds
+  to the response having a 1-truncated positive-Poisson distribution.
+
+  }
+}
+\details{
+  The probability function of \eqn{Y} is 1 with probability \code{pobs1},
+  else a 1-truncated
+  positive-Poisson(lambda)
+  distribution.
+
+}
+\value{
+  \code{doapospois} gives the density and
+  \code{poapospois} gives the distribution function,
+  \code{qoapospois} gives the quantile function, and
+  \code{roapospois} generates random deviates.
+
+}
+%\references{ }
+\author{ T. W. Yee }
+\note{
+    The argument \code{pobs1} is recycled to the required length, and
+    must have values which lie in the interval \eqn{[0,1]}.
+
+}
+
+\seealso{
+    \code{\link{oapospoisson}},
+    \code{\link{Oipospois}},
+    \code{\link{Otpospois}}.
+
+
+}
+\examples{
+lambda <- 3; pobs1 <- 0.30; x <- (-1):7
+doapospois(x, lambda = lambda, pobs1 = pobs1)
+table(roapospois(100, lambda = lambda, pobs1 = pobs1))
+
+\dontrun{ x <- 0:10
+barplot(rbind(doapospois(x, lambda = lambda, pobs1 = pobs1),
+                dpospois(x, lambda = lambda)),
+        beside = TRUE, col = c("blue", "orange"), cex.main = 0.7, las = 1,
+        ylab = "Probability", names.arg = as.character(x),
+        main = paste("OAPP(lambda = ", lambda, ", pobs1 = ", pobs1,
+                   ") [blue] vs",  " PosPoisson(lambda = ", lambda,
+                   ") [orange] densities", sep = "")) }
+}
+\keyword{distribution}
diff --git a/man/oapospoisson.Rd b/man/oapospoisson.Rd
new file mode 100644
index 0000000..f40cc59
--- /dev/null
+++ b/man/oapospoisson.Rd
@@ -0,0 +1,131 @@
+\name{oapospoisson}
+\alias{oapospoisson}
+%\alias{oapospoisff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-Altered Positive-Poisson Distribution }
+\description{
+  Fits a one-altered positive-Poisson distribution based on
+  a conditional model involving a Bernoulli distribution and a
+  1-truncated positive-Poisson distribution.
+
+}
+\usage{
+oapospoisson(lpobs1 = "logit", llambda = "loge",
+             type.fitted = c("mean", "lambda", "pobs1", "onempobs1"),
+             ipobs1 = NULL, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{lpobs1}{
+    Link function for the parameter \eqn{p_1}{pobs1} or \eqn{\phi}{phi},
+    called \code{pobs1} or \code{phi} here.
+    See \code{\link{Links}} for more choices.
+
+  }
+  \item{llambda}{
+    See \code{\link{pospoisson}} for details.
+
+  }
+
+  \item{type.fitted}{
+  See \code{\link{CommonVGAMffArguments}}
+  and \code{\link{fittedvlm}} for information.
+
+
+  }
+
+
+  \item{ipobs1, zero}{
+  See \code{\link{CommonVGAMffArguments}}
+  for information.
+
+  }
+
+
+}
+
+\details{
+  The response \eqn{Y} is one with probability \eqn{p_1}{pobs1},
+  or \eqn{Y} has a 1-truncated positive-Poisson distribution with
+  probability \eqn{1-p_1}{1-pobs1}.  Thus \eqn{0 < p_1 < 1}{0 < pobs1 < 1},
+  which is modelled as a function of the covariates.  The one-altered
+  positive-Poisson distribution differs from the one-inflated
+  positive-Poisson distribution in that the former has ones coming from one
+  source, whereas the latter has ones coming from the positive-Poisson
+  distribution too. The one-inflated positive-Poisson distribution
+  is implemented in the \pkg{VGAM} package.  Some people
+  call the one-altered positive-Poisson a \emph{hurdle} model.
+
+
+  The input can be a matrix (multiple responses).
+  By default, the two linear/additive predictors
+  of \code{oapospoisson}
+  are \eqn{(logit(\phi), log(\lambda))^T}{(logit(phi), log(lambda))^T}.
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}},
+  and \code{\link{vgam}}.
+
+
+
+  The \code{fitted.values} slot of the fitted object,
+  which should be extracted by the generic function \code{fitted}, returns
+  the mean \eqn{\mu}{mu} (default) which is given by
+  \deqn{\mu = \phi + (1-\phi) A}{%
+         mu = phi + (1- phi) A}
+  where \eqn{A} is the mean of the one-truncated
+  positive-Poisson distribution.
+  If \code{type.fitted = "pobs1"} then \eqn{p_1}{pobs1} is returned.
+
+
+
+}
+%\references{
+%
+%
+%}
+%\section{Warning }{
+%}
+
+\author{ T. W. Yee }
+\note{
+
+  This family function effectively combines
+  \code{\link{binomialff}} and
+  \code{\link{otpospoisson}} into
+  one family function.
+
+
+}
+
+\seealso{
+  \code{\link{Oapospois}},
+  \code{\link{pospoisson}},
+  \code{\link{oipospoisson}},
+  \code{\link{CommonVGAMffArguments}},
+  \code{\link{simulate.vlm}}.
+
+
+}
+
+
+\examples{
+odata <- data.frame(x2 = runif(nn <- 1000))
+odata <- transform(odata, pobs1  = logit(-1 + 2*x2, inverse = TRUE),
+                          lambda =  loge( 1 + 1*x2, inverse = TRUE))
+odata <- transform(odata, y1 = roapospois(nn, lambda = lambda, pobs1 = pobs1),
+                          y2 = roapospois(nn, lambda = lambda, pobs1 = pobs1))
+with(odata, table(y1))
+
+ofit <- vglm(cbind(y1, y2) ~ x2, oapospoisson, data = odata, trace = TRUE)
+coef(ofit, matrix = TRUE)
+head(fitted(ofit))
+head(predict(ofit))
+summary(ofit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/oazeta.Rd b/man/oazeta.Rd
new file mode 100644
index 0000000..6e1ad96
--- /dev/null
+++ b/man/oazeta.Rd
@@ -0,0 +1,132 @@
+\name{oazeta}
+\alias{oazeta}
+%\alias{oazetaff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-Altered Zeta Distribution }
+\description{
+  Fits a one-altered zeta distribution based on
+  a conditional model involving a Bernoulli distribution and a
+  1-truncated zeta distribution.
+
+}
+\usage{
+oazeta(lpobs1 = "logit", lshape = "loge",
+       type.fitted = c("mean", "shape", "pobs1", "onempobs1"),
+       gshape = exp((-4:3)/4), ishape = NULL, ipobs1 = NULL, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{lpobs1}{
+    Link function for the parameter \eqn{p_1}{pobs1} or \eqn{\phi}{phi},
+    called \code{pobs1} or \code{phi} here.
+    See \code{\link{Links}} for more choices.
+
+  }
+  \item{lshape}{
+    See \code{\link{zeta}} for details.
+
+  }
+
+  \item{type.fitted}{
+  See \code{\link{CommonVGAMffArguments}}
+  and \code{\link{fittedvlm}} for information.
+
+
+  }
+
+
+  \item{gshape, ishape, ipobs1, zero}{
+  See \code{\link{CommonVGAMffArguments}}
+  for information.
+
+  }
+
+
+}
+
+\details{
+  The response \eqn{Y} is one with probability \eqn{p_1}{pobs1},
+  or \eqn{Y} has a 1-truncated zeta distribution with
+  probability \eqn{1-p_1}{1-pobs1}.  Thus \eqn{0 < p_1 < 1}{0 < pobs1 < 1},
+  which is modelled as a function of the covariates.  The one-altered
+  zeta distribution differs from the one-inflated
+  zeta distribution in that the former has ones coming from one
+  source, whereas the latter has ones coming from the zeta
+  distribution too. The one-inflated zeta distribution
+  is implemented in the \pkg{VGAM} package.  Some people
+  call the one-altered zeta a \emph{hurdle} model.
+
+
+  The input can be a matrix (multiple responses).
+  By default, the two linear/additive predictors
+  of \code{oazeta}
+  are \eqn{(logit(\phi), log(shape))^T}{(logit(phi), log(shape))^T}.
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}},
+  and \code{\link{vgam}}.
+
+
+
+  The \code{fitted.values} slot of the fitted object,
+  which should be extracted by the generic function \code{fitted}, returns
+  the mean \eqn{\mu}{mu} (default) which is given by
+  \deqn{\mu = \phi + (1-\phi) A}{%
+         mu = phi + (1- phi) A}
+  where \eqn{A} is the mean of the one-truncated
+  zeta distribution.
+  If \code{type.fitted = "pobs1"} then \eqn{p_1}{pobs1} is returned.
+
+
+
+}
+%\references{
+%
+%
+%}
+%\section{Warning }{
+%}
+
+\author{ T. W. Yee }
+\note{
+
+  This family function effectively combines
+  \code{\link{binomialff}} and
+  \code{\link{otzeta}} into
+  one family function.
+
+
+}
+
+\seealso{
+  \code{\link{Oazeta}},
+  \code{\link{zetaff}},
+  \code{\link{oizeta}},
+  \code{\link{otzeta}},
+  \code{\link{CommonVGAMffArguments}},
+  \code{\link{simulate.vlm}}.
+
+
+}
+
+
+\examples{
+odata <- data.frame(x2 = runif(nn <- 1000))
+odata <- transform(odata, pobs1 = logit(-1 + 2*x2, inverse = TRUE),
+                          shape =  loge( 1 + 1*x2, inverse = TRUE))
+odata <- transform(odata, y1 = roazeta(nn, shape = shape, pobs1 = pobs1),
+                          y2 = roazeta(nn, shape = shape, pobs1 = pobs1))
+with(odata, table(y1))
+
+ofit <- vglm(cbind(y1, y2) ~ x2, oazeta, data = odata, trace = TRUE)
+coef(ofit, matrix = TRUE)
+head(fitted(ofit))
+head(predict(ofit))
+summary(ofit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/oazetaUC.Rd b/man/oazetaUC.Rd
new file mode 100644
index 0000000..41187d6
--- /dev/null
+++ b/man/oazetaUC.Rd
@@ -0,0 +1,80 @@
+\name{Oazeta}
+\alias{Oazeta}
+\alias{doazeta}
+\alias{poazeta}
+\alias{qoazeta}
+\alias{roazeta}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-Altered Logarithmic Distribution }
+\description{
+  Density, distribution function, quantile function and random generation
+  for the one-altered zeta distribution with parameter \code{pobs1}.
+
+}
+\usage{
+doazeta(x, shape, pobs1 = 0, log = FALSE)
+poazeta(q, shape, pobs1 = 0)
+qoazeta(p, shape, pobs1 = 0)
+roazeta(n, shape, pobs1 = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x, q, n, p}{ Same \code{\link[stats:Uniform]{Unif}}.}
+  \item{shape, log}{
+  Same as \code{\link{Otzeta}}).
+
+  }
+  \item{pobs1}{
+  Probability of (an observed) one, called \eqn{pobs1}.
+  The default value of \code{pobs1 = 0} corresponds
+  to the response having a 1-truncated zeta distribution.
+
+  }
+}
+\details{
+  The probability function of \eqn{Y} is 1 with probability \code{pobs1},
+  else a 1-truncated
+  zeta
+  distribution.
+
+
+}
+\value{
+  \code{doazeta} gives the density and
+  \code{poazeta} gives the distribution function,
+  \code{qoazeta} gives the quantile function, and
+  \code{roazeta} generates random deviates.
+
+
+}
+%\references{ }
+\author{ T. W. Yee }
+\note{
+    The argument \code{pobs1} is recycled to the required length, and
+    must have values which lie in the interval \eqn{[0,1]}.
+
+}
+
+\seealso{
+    \code{\link{oazeta}},
+    \code{\link{Oizeta}},
+    \code{\link{Otzeta}},
+    \code{\link{zeta}}.
+
+
+}
+\examples{
+shape <- 1.1; pobs1 <- 0.10; x <- (-1):7
+doazeta(x, shape = shape, pobs1 = pobs1)
+table(roazeta(100, shape = shape, pobs1 = pobs1))
+
+\dontrun{ x <- 0:10
+barplot(rbind(doazeta(x, shape = shape, pobs1 = pobs1),
+                dzeta(x, shape = shape)),
+        beside = TRUE, col = c("blue", "orange"), cex.main = 0.7, las = 1,
+        ylab = "Probability", names.arg = as.character(x),
+        main = paste("OAZ(shape = ", shape, ", pobs1 = ", pobs1,
+                   ") [blue] vs",  " zeta(shape = ", shape,
+                   ") [orange] densities", sep = "")) }
+}
+\keyword{distribution}
diff --git a/man/oilog.Rd b/man/oilog.Rd
new file mode 100644
index 0000000..198a7c3
--- /dev/null
+++ b/man/oilog.Rd
@@ -0,0 +1,93 @@
+\name{oilog}
+\alias{oilog}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-inflated Logarithmic Distribution Family Function }
+\description{
+  Fits a 1-inflated logarithmic distribution.
+
+
+}
+\usage{
+oilog(lpstr1 = "logit", lshape = "logit",
+      type.fitted = c("mean", "shape", "pobs1", "pstr1", "onempstr1"),
+      ishape = NULL, gpstr1 = ppoints(8), gshape = ppoints(8), zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{lpstr1, lshape}{
+    Link functions.
+    For \code{lpstr1}: the same idea as \code{\link{zipoisson}}
+    except it applies to a structural 1.
+
+
+  }
+  \item{gpstr1, gshape, ishape}{
+  For initial values.
+  See \code{\link{CommonVGAMffArguments}} for information.
+
+
+  }
+  \item{type.fitted, zero}{
+  See \code{\link{CommonVGAMffArguments}} for information.
+
+  }
+
+
+}
+\details{
+  The 1-inflated logarithmic distribution is a mixture
+  distribution of the
+  logarithmic
+  distribution with some probability of obtaining a (structural) 1.
+  Thus there are two sources for obtaining the value 1.
+  This distribution is written here
+  in a way that retains a similar notation to the
+  one-inflated positive-Poisson, i.e., the
+  probability \eqn{P[Y=1]} involves another parameter \eqn{\phi}{phi}.
+  See \code{\link{oipospoisson}}.
+
+
+  This family function can handle multiple responses.
+
+
+}
+%\section{Warning }{
+%    Under- or over-flow may occur if the data is ill-conditioned.
+%    Lots of data is needed to estimate the parameters accurately.
+%    Usually, probably the \code{shape} parameter is best modelled as
+%    intercept-only.
+
+%}
+
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}},
+  \code{\link{rrvglm}} and \code{\link{vgam}}.
+
+
+}
+%\references{
+%}
+\author{ Thomas W. Yee }
+%\note{
+%}
+\seealso{
+  \code{\link{Oilog}},
+  \code{\link{logff}},
+  \code{\link{Oizeta}}.
+
+
+
+}
+\examples{
+\dontrun{ odata <- data.frame(x2 = runif(nn <- 1000))  # Artificial data
+odata <- transform(odata, pstr1 = logit(-1 + x2, inverse = TRUE), shape = 0.5)
+odata <- transform(odata, y1 = roilog(nn, shape, pstr1 = pstr1))
+with(odata, table(y1))
+fit1 <- vglm(y1 ~ x2, oilog(zero = "shape"), data = odata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+}
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/oilogUC.Rd b/man/oilogUC.Rd
new file mode 100644
index 0000000..85c79eb
--- /dev/null
+++ b/man/oilogUC.Rd
@@ -0,0 +1,126 @@
+\name{Oilog}
+\alias{Oilog}
+\alias{doilog}
+\alias{poilog}
+\alias{qoilog}
+\alias{roilog}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-Inflated Logarithmic Distribution }
+\description{
+  Density, distribution function, quantile function and random generation
+  for the one-inflated
+  logarithmic distribution with parameter \code{pstr1}.
+
+
+}
+\usage{
+doilog(x, shape, pstr1 = 0, log = FALSE)
+poilog(q, shape, pstr1 = 0)
+qoilog(p, shape, pstr1 = 0)
+roilog(n, shape, pstr1 = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x, q, p, n}{Same as \code{\link[stats]{Uniform}}.}
+  \item{shape}{ Vector of parameters that lie in \eqn{(0,1)}. }
+  \item{pstr1}{
+  Probability of a structural one
+  (i.e., ignoring the logarithmic distribution),
+  called \eqn{\phi}{phi}.
+  The default value of \eqn{\phi = 0}{phi = 0} corresponds
+  to the response having an ordinary logarithmic distribution.
+
+
+  }
+  \item{log}{Same as \code{\link[stats]{Uniform}}.}
+}
+\details{
+  The probability function of \eqn{Y} is 1 with probability
+  \eqn{\phi}{phi}, and \eqn{Logarithmic(prob)} with
+  probability \eqn{1-\phi}{1-phi}. Thus
+  \deqn{P(Y=1) =\phi + (1-\phi)  P(W=1)}{%
+        P(Y=1) = phi + (1-phi) * P(W=1)}
+      where \eqn{W} is distributed as a
+      \eqn{Logarithmic(shape)} random variable.
+  The \pkg{VGAM} family function \code{\link{oilog}} estimates
+  \eqn{\phi}{phi} by MLE.
+
+
+
+}
+\value{
+  \code{doilog} gives the density,
+  \code{poilog} gives the distribution function,
+  \code{qoilog} gives the quantile function, and
+  \code{roilog} generates random deviates.
+
+
+}
+%\references{ }
+\author{ T. W. Yee }
+\note{
+  The argument \code{pstr1} is recycled to the required length, and
+  usually has values which lie in the interval \eqn{[0,1]}.
+
+
+
+  These functions actually allow for the \emph{zero-deflated
+  logarithmic} distribution. Here, \code{pstr1} is also permitted
+  to lie in the interval \code{[-dlog(1, shape) / (1 - dlog(1, shape)), 0]}.
+  The resulting probability of a unit count is \emph{less than}
+  the nominal logarithmic value, and the use of \code{pstr1} to
+  stand for the probability of a structural 1 loses its
+  meaning.
+%
+%
+%
+   When \code{pstr1} equals \code{-dlog(1, shape) / (1 - dlog(1, shape))}
+   this corresponds to the 1-truncated logarithmic distribution.
+
+
+}
+
+\seealso{
+    \code{\link{oilog}},
+    \code{\link{rlog}},
+    \code{\link{logff}},
+    \code{\link{Otlog}}.
+%   \code{\link{zipf}}.
+
+
+
+}
+\examples{
+shape <- 0.5; pstr1 <- 0.3; x <- (-1):7
+(ii <- doilog(x, shape, pstr1 = pstr1))
+max(abs(poilog(1:200, shape) -
+        cumsum(shape^(1:200) / (-(1:200) * log1p(-shape)))))  # Should be 0
+
+\dontrun{ x <- 0:10
+par(mfrow = c(2, 1))  # One-Inflated logarithmic
+barplot(rbind(doilog(x, shape, pstr1 = pstr1), dlog(x, shape)),
+        beside = TRUE, col = c("blue", "orange"),
+        main = paste("OILogff(", shape, ", pstr1 = ", pstr1, ") (blue) vs",
+                     " Logff(", shape, ") (orange)", sep = ""),
+        names.arg = as.character(x))
+
+deflat.limit <- -dlog(1, shape) / plog(1, shape, lower.tail = FALSE)
+newpstr1 <- round(deflat.limit, 3) + 0.001  # Inside but near the boundary
+barplot(rbind(doilog(x, shape, pstr1 = newpstr1),
+                dlog(x, shape)),
+        beside = TRUE, col = c("blue","orange"),
+        main = paste("ODLogff(", shape, ", pstr1 = ", newpstr1, ") (blue) vs",
+                     " Logff(", shape, ") (orange)", sep = ""),
+        names.arg = as.character(x)) }
+}
+\keyword{distribution}
+
+
+
+%qoilog(p, shape, pstr1 = 0)
+%roilog(n, shape, pstr1 = 0)
+
+
+
+% table(roilog(100, shape, pstr1 = pstr1))
+% round(doilog(1:10, shape, pstr1 = pstr1) * 100)  # Should be similar
diff --git a/man/oiposbinomUC.Rd b/man/oiposbinomUC.Rd
index 361d858..c6402ac 100644
--- a/man/oiposbinomUC.Rd
+++ b/man/oiposbinomUC.Rd
@@ -24,12 +24,15 @@ roiposbinom(n, size, prob, pstr1 = 0)
 \arguments{
   \item{x, p, q, n}{Same as \code{\link{Posbinom}}. }
   \item{size, prob}{Same as \code{\link{Posbinom}}. }
-  \item{pstr1}{ 
+  \item{pstr1}{
   Probability of a structural one
   (i.e., ignoring the positive binomial distribution),
   called \eqn{\phi}{phi}.
   The default value of \eqn{\phi = 0}{phi = 0} corresponds
   to the response having a positive binomial distribution.
+  However, \code{pstr1} can also be negative, in which case it
+  ceases its interpretation as a probability, and this is known
+  as \emph{one-deflation}.
 
 
   }
@@ -37,12 +40,13 @@ roiposbinom(n, size, prob, pstr1 = 0)
 }
 \details{
   The probability function of \eqn{Y} is 1 with probability
-  \eqn{\phi}{phi}, and \eqn{PosBinomial(size, prob)}{PosBinomial(size, prob)} with
-  probability \eqn{1-\phi}{1-phi}. Thus 
+  \eqn{\phi}{phi}, and \eqn{PosBinomial(size, prob)}{PosBinomial(size, prob)}
+  with probability \eqn{1-\phi}{1-phi}. Thus
   \deqn{P(Y=1) =\phi + (1-\phi)  P(W=1)}{%
         P(Y=1) = phi + (1-phi) * P(W=1)}
       where \eqn{W} is distributed as a
-      positive \eqn{binomial(size, prob)}{binomial(size, prob)} random variate.
+      positive \eqn{binomial(size, prob)}{binomial(size, prob)}
+      random variable.
 
 
 }
@@ -56,7 +60,7 @@ roiposbinom(n, size, prob, pstr1 = 0)
 }
 %\references{ }
 \author{ T. W. Yee }
-\note{ 
+\note{
   The argument \code{pstr1} is recycled to the required length, and
   usually has values which lie in the interval \eqn{[0,1]}.
 %
@@ -79,7 +83,7 @@ roiposbinom(n, size, prob, pstr1 = 0)
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{posbinomial}},
     \code{\link[stats:binomial]{dbinom}},
     \code{\link{binomialff}}.
diff --git a/man/oiposbinomial.Rd b/man/oiposbinomial.Rd
new file mode 100644
index 0000000..371e0c8
--- /dev/null
+++ b/man/oiposbinomial.Rd
@@ -0,0 +1,207 @@
+\name{oiposbinomial}
+\alias{oiposbinomial}
+%\alias{oiposbinomialff}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-Inflated Positive Binomial Distribution Family Function }
+\description{
+  Fits a one-inflated positive binomial distribution by maximum likelihood
+  estimation.
+
+}
+\usage{
+oiposbinomial(lpstr1 = "logit", lprob = "logit",
+              type.fitted = c("mean", "prob", "pobs1", "pstr1", "onempstr1"),
+              iprob = NULL, gpstr1 = ppoints(9), gprob  = ppoints(9),
+              multiple.responses = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{lpstr1, lprob}{
+  Link functions for the parameter \eqn{\phi}{phi}
+  and the positive binomial probability \eqn{\mu}{prob} parameter.
+  See \code{\link{Links}} for more choices.
+  See \code{\link{CommonVGAMffArguments}} also.
+  For the one-\emph{deflated} model see below.
+
+  }
+
+% \item{epstr1, eprob}{
+%          epstr1 = list(),  eprob = list(),
+% List. Extra argument for the respective links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
+
+  \item{type.fitted}{
+  See \code{\link{CommonVGAMffArguments}} and \code{\link{fittedvlm}}.
+
+
+  }
+
+  \item{iprob, gpstr1, gprob}{
+  For initial values;
+  see \code{\link{CommonVGAMffArguments}}.
+
+  }
+
+% \item{lonempstr1, ionempstr1}{
+% Corresponding arguments for the other parameterization.
+% See details below.
+
+
+% }
+
+
+% \item{zero}{
+% An integer specifying which linear/additive predictor is modelled
+% as intercepts only.  If given, the value must be either 1 or 2,
+% and the default is the first. Setting \code{zero = NULL} enables both
+% \eqn{\phi}{phi} and \eqn{\mu}{prob} to be modelled as a function of
+% the explanatory variables.
+% See \code{\link{CommonVGAMffArguments}} for more information.
+
+% }
+  \item{multiple.responses}{
+  Logical.
+  See \code{\link{binomialff}}
+  and \code{\link{posbinomial}}.
+
+
+
+% Currently it must be \code{FALSE} to mean the
+% function does not handle multiple responses. This
+% is to remain compatible with the same argument in
+% \code{\link{binomialff}}.
+
+
+  }
+  \item{zero}{
+  See \code{\link{CommonVGAMffArguments}} for information.
+
+
+
+  }
+}
+\details{
+  These functions are based on
+  \deqn{P(Y=y) =  \phi + (1-\phi) N \mu (1-\mu)^N / (1-(1-\mu)^N),}{%
+        P(Y=y) =   phi + (1- phi) * N * prob * (1-prob)^N / (1-(1-prob)^N),}
+  for \eqn{y=1/N}, and
+  \deqn{P(Y=y) = (1-\phi) {N \choose Ny} \mu^{Ny} (1-\mu)^{N(1-y)} / (1-(1-\mu)^N).}{%
+        P(Y=y) = (1-phi) * choose(N,Ny) * prob^(N*y) * (1-prob)^(N*(1-y)) / (1-(1-prob)^N).}
+  for \eqn{y=2/N,\ldots,1}. That is, the response is a sample
+  proportion out of \eqn{N} trials, and the argument \code{size} in
+  \code{\link{roiposbinom}} is \eqn{N} here.
+  Ideally \eqn{N > 2} is needed.
+  The parameter \eqn{\phi}{phi} is the probability of a structural one,
+  and it satisfies \eqn{0 < \phi < 1}{0 < phi < 1} (usually).
+  The mean of \eqn{Y} is
+  \eqn{E(Y)=\phi + (1-\phi) \mu / (1-(1-\mu)^N)}{E(Y) = phi + (1-phi) * prob / (1-(1-prob)^N)}
+  and these are returned as the default fitted values.
+  By default, the two linear/additive predictors
+  for \code{oiposbinomial()}
+  are \eqn{(logit(\phi), logit(\mu))^T}{(logit(phi), logit(prob))^T}.
+
+
+
+% The \pkg{VGAM} family function \code{oiposbinomialff()} has a few
+% changes compared to \code{oiposbinomial()}.
+% These are:
+% (i)   the order of the linear/additive predictors is switched so the
+%       binomial probability comes first;
+% (ii)  argument \code{onempstr1} is now 1 minus
+%       the probability of a structural zero, i.e.,
+%       the probability of the parent (binomial) component,
+%       i.e., \code{onempstr1} is \code{1-pstr1};
+% (iii) argument \code{zero} has a new default so that the \code{onempstr1}
+%       is intercept-only by default.
+% Now \code{oiposbinomialff()} is generally recommended over
+% \code{oiposbinomial()}.
+% Both functions implement Fisher scoring.
+
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}}
+  and \code{\link{vgam}}.
+
+
+}
+%\references{
+%
+%
+%}
+
+\author{ T. W. Yee }
+\note{
+  The response variable should have one of the formats described by
+  \code{\link{binomialff}}, e.g., a factor or two column matrix or a
+  vector of sample proportions with the \code{weights} argument
+  specifying the values of \eqn{N}.
+
+
+
+  To work well, one ideally needs large values of \eqn{N}
+  and \eqn{\mu}{prob} much greater than 0, i.e.,
+  the larger \eqn{N} and \eqn{\mu}{prob} are, the better.
+  If \eqn{N = 1} then the model is unidentifiable since
+  the number of parameters is excessive.
+
+
+
+% 20130316; adding this:
+  Estimated probabilities of a structural one and an
+  observed one are returned, as in \code{\link{zipoisson}}.
+
+
+
+  The one-\emph{deflated} positive binomial distribution might
+  be fitted by setting \code{lpstr1 = "identitylink"}, albeit,
+  not entirely reliably. See \code{\link{zipoisson}}
+  for information that can be applied here.
+% Else try the one-altered positive binomial distribution (see
+% \code{\link{oabinomial}}).
+
+
+}
+
+
+\seealso{
+  \code{\link{roiposbinom}},
+  \code{\link{posbinomial}},
+  \code{\link{binomialff}},
+  \code{\link[stats:Binomial]{rbinom}}.
+
+
+}
+\examples{
+size <- 10  # Number of trials; N in the notation above
+nn <- 200
+odata <- data.frame(pstr1  = logit( 0, inverse = TRUE),  # 0.50
+                    mubin1 = logit(-1, inverse = TRUE),  # Mean of usual binomial
+                    svec   = rep(size, length = nn),
+                    x2     = runif(nn))
+odata <- transform(odata,
+                   mubin2 = logit(-1 + x2, inverse = TRUE))
+odata <- transform(odata,
+                   y1 = roiposbinom(nn, svec, pr = mubin1, pstr1 = pstr1),
+                   y2 = roiposbinom(nn, svec, pr = mubin2, pstr1 = pstr1))
+with(odata, table(y1))
+fit1 <- vglm(y1 / svec ~  1, oiposbinomial, data = odata,
+             weights = svec, trace = TRUE, crit = "coef")
+fit2 <- vglm(y2 / svec ~ x2, oiposbinomial, data = odata,
+             weights = svec, trace = TRUE)
+
+coef(fit1, matrix = TRUE)
+Coef(fit1)  # Useful for intercept-only models
+head(fitted(fit1, type = "pobs1"))  # Estimate of P(Y = 1)
+head(fitted(fit1))
+with(odata, mean(y1))  # Compare this with fitted(fit1)
+summary(fit1)
+}
+\keyword{models}
+\keyword{regression}
+
+
+% fit at misc$pobs0  # Estimate of P(Y = 0)
diff --git a/man/oipospoisUC.Rd b/man/oipospoisUC.Rd
index f3f2d7b..9163898 100644
--- a/man/oipospoisUC.Rd
+++ b/man/oipospoisUC.Rd
@@ -24,7 +24,7 @@ roipospois(n, lambda, pstr1 = 0)
 \arguments{
   \item{x, p, q, n}{Same as \code{\link{Pospois}}.}
   \item{lambda}{ Vector of positive means. }
-  \item{pstr1}{ 
+  \item{pstr1}{
   Probability of a structural one
   (i.e., ignoring the positive Poisson distribution),
   called \eqn{\phi}{phi}.
@@ -38,7 +38,7 @@ roipospois(n, lambda, pstr1 = 0)
 \details{
   The probability function of \eqn{Y} is 1 with probability
   \eqn{\phi}{phi}, and \eqn{PosPoisson(\lambda)}{PosPoisson(lambda)} with
-  probability \eqn{1-\phi}{1-phi}. Thus 
+  probability \eqn{1-\phi}{1-phi}. Thus
   \deqn{P(Y=1) =\phi + (1-\phi)  P(W=1)}{%
         P(Y=1) = phi + (1-phi) * P(W=1)}
       where \eqn{W} is distributed as a
@@ -56,7 +56,7 @@ roipospois(n, lambda, pstr1 = 0)
 }
 %\references{ }
 \author{ T. W. Yee }
-\note{ 
+\note{
   The argument \code{pstr1} is recycled to the required length, and
   usually has values which lie in the interval \eqn{[0,1]}.
 
@@ -78,9 +78,11 @@ roipospois(n, lambda, pstr1 = 0)
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{Pospois}},
+    \code{\link{oapospoisson}},
     \code{\link{oipospoisson}},
+    \code{\link{otpospoisson}},
     \code{\link{pospoisson}},
     \code{\link[stats:Poisson]{dpois}},
     \code{\link{poissonff}}.
diff --git a/man/oipospoisson.Rd b/man/oipospoisson.Rd
index 32387e3..a6ed57f 100644
--- a/man/oipospoisson.Rd
+++ b/man/oipospoisson.Rd
@@ -8,7 +8,7 @@
 \usage{
 oipospoisson(lpstr1 = "logit", llambda = "loge",
    type.fitted = c("mean", "lambda", "pobs1", "pstr1", "onempstr1"),
-   ilambda = NULL, gpstr1 = (1:19)/20, gprobs.y = (1:19)/20, 
+   ilambda = NULL, gpstr1 = (1:19)/20, gprobs.y = (1:19)/20,
    imethod = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -69,9 +69,11 @@ oipospoisson(lpstr1 = "logit", llambda = "loge",
 \author{ Thomas W. Yee }
 %\note{
 %}
-\seealso{ 
-  \code{\link{roipospois}},
+\seealso{
+  \code{\link{Oipospois}},
   \code{\link{pospoisson}},
+  \code{\link{oapospoisson}},
+  \code{\link{otpospoisson}},
   \code{\link{zipoisson}},
   \code{\link{poissonff}},
   \code{\link{simulate.vlm}}.
diff --git a/man/oipospoisson.Rd b/man/oizeta.Rd
similarity index 53%
copy from man/oipospoisson.Rd
copy to man/oizeta.Rd
index 32387e3..990e81a 100644
--- a/man/oipospoisson.Rd
+++ b/man/oizeta.Rd
@@ -1,25 +1,26 @@
-\name{oipospoisson}
-\alias{oipospoisson}
+\name{oizeta}
+\alias{oizeta}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ One-inflated Positive Poisson Distribution Family Function }
+\title{ One-inflated Zeta Distribution Family Function }
 \description{
-  Fits a 1-inflated positive Poisson distribution.
+  Fits a 1-inflated zeta distribution.
+
+
 }
 \usage{
-oipospoisson(lpstr1 = "logit", llambda = "loge",
-   type.fitted = c("mean", "lambda", "pobs1", "pstr1", "onempstr1"),
-   ilambda = NULL, gpstr1 = (1:19)/20, gprobs.y = (1:19)/20, 
-   imethod = 1, zero = NULL)
+oizeta(lpstr1 = "logit", lshape = "loge",
+  type.fitted = c("mean", "shape", "pobs1", "pstr1", "onempstr1"),
+  ishape = NULL, gpstr1 = ppoints(8), gshape = exp((-3:3) / 4), zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lpstr1, llambda}{
+  \item{lpstr1, lshape}{
     For \code{lpstr1}: the same idea as \code{\link{zipoisson}}
     except it applies to a structural 1.
 
 
   }
-  \item{ilambda, gpstr1, gprobs.y, imethod}{
+  \item{gpstr1, gshape, ishape}{
   For initial values.
   See \code{\link{CommonVGAMffArguments}} for information.
 
@@ -33,14 +34,11 @@ oipospoisson(lpstr1 = "logit", llambda = "loge",
 
 }
 \details{
-  The 1-inflated positive Poisson distribution is a mixture
+  The 1-inflated zeta distribution is a mixture
   distribution of the
-  positive (0-truncated) Poisson
+  zeta
   distribution with some probability of obtaining a (structural) 1.
   Thus there are two sources for obtaining the value 1.
-  It is similar to a zero-inflated Poisson model, except
-  the Poisson is replaced by a positive Poisson and the 0 is replaced
-  by 1.
   This distribution is written here
   in a way that retains a similar notation to the
   zero-inflated Poisson, i.e., the
@@ -54,6 +52,9 @@ oipospoisson(lpstr1 = "logit", llambda = "loge",
 }
 \section{Warning }{
     Under- or over-flow may occur if the data is ill-conditioned.
+    Lots of data is needed to estimate the parameters accurately.
+    Usually, probably the shape parameter is best modelled as
+    intercept-only.
 
 }
 
@@ -69,23 +70,27 @@ oipospoisson(lpstr1 = "logit", llambda = "loge",
 \author{ Thomas W. Yee }
 %\note{
 %}
-\seealso{ 
-  \code{\link{roipospois}},
-  \code{\link{pospoisson}},
-  \code{\link{zipoisson}},
-  \code{\link{poissonff}},
-  \code{\link{simulate.vlm}}.
+\seealso{
+  \code{\link{Oizeta}},
+  \code{\link{zetaff}},
+  \code{\link{oazeta}},
+  \code{\link{otzeta}},
+  \code{\link{diffzeta}},
+  \code{\link{zeta}},
+  \code{\link{Oizipf}}.
+
 
 
 }
-\examples{ set.seed(1)
-pdata <- data.frame(x2 = runif(nn <- 1000))  # Artificial data
-pdata <- transform(pdata, pstr1 = 0.5, lambda = exp(3 - x2))
-pdata <- transform(pdata, y1 = roipospois(nn, lambda, pstr1 = pstr1))
-with(pdata, table(y1))
-fit1 <- vglm(y1 ~ x2, oipospoisson, data = pdata, trace = TRUE)
+\examples{
+\dontrun{ odata <- data.frame(x2 = runif(nn <- 1000))  # Artificial data
+odata <- transform(odata, pstr1 = logit(-1 + x2, inverse = TRUE), shape = exp(-0.5))
+odata <- transform(odata, y1 = roizeta(nn, shape, pstr1 = pstr1))
+with(odata, table(y1))
+fit1 <- vglm(y1 ~ x2, oizeta(zero = "shape"), data = odata, trace = TRUE)
 coef(fit1, matrix = TRUE)
 }
+}
 \keyword{models}
 \keyword{regression}
 
diff --git a/man/oizetaUC.Rd b/man/oizetaUC.Rd
new file mode 100644
index 0000000..feec1cf
--- /dev/null
+++ b/man/oizetaUC.Rd
@@ -0,0 +1,122 @@
+\name{Oizeta}
+\alias{Oizeta}
+\alias{doizeta}
+\alias{poizeta}
+\alias{qoizeta}
+\alias{roizeta}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-Inflated Zeta Distribution }
+\description{
+  Density, distribution function, quantile function and random generation
+  for the one-inflated
+  zeta distribution with parameter \code{pstr1}.
+
+
+}
+\usage{
+doizeta(x, shape, pstr1 = 0, log = FALSE)
+poizeta(q, shape, pstr1 = 0)
+qoizeta(p, shape, pstr1 = 0)
+roizeta(n, shape, pstr1 = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x, q, p, n}{Same as \code{\link[stats]{Uniform}}.}
+  \item{shape}{ Vector of positive shape parameters. }
+  \item{pstr1}{
+  Probability of a structural one
+  (i.e., ignoring the zeta distribution),
+  called \eqn{\phi}{phi}.
+  The default value of \eqn{\phi = 0}{phi = 0} corresponds
+  to the response having an ordinary zeta distribution.
+
+
+  }
+  \item{log}{Same as \code{\link[stats]{Uniform}}.}
+}
+\details{
+  The probability function of \eqn{Y} is 1 with probability
+  \eqn{\phi}{phi}, and \eqn{Zeta(shape)} with
+  probability \eqn{1-\phi}{1-phi}. Thus
+  \deqn{P(Y=1) =\phi + (1-\phi)  P(W=1)}{%
+        P(Y=1) = phi + (1-phi) * P(W=1)}
+      where \eqn{W} is distributed as a
+      \eqn{zeta(shape)} random variable.
+
+
+}
+\value{
+  \code{doizeta} gives the density,
+  \code{poizeta} gives the distribution function,
+  \code{qoizeta} gives the quantile function, and
+  \code{roizeta} generates random deviates.
+
+
+}
+%\references{ }
+\author{ T. W. Yee }
+\note{
+  The argument \code{pstr1} is recycled to the required length, and
+  usually has values which lie in the interval \eqn{[0,1]}.
+
+
+
+  These functions actually allow for the \emph{zero-deflated
+  zeta} distribution. Here, \code{pstr1} is also permitted
+  to lie in the interval \code{[-dzeta(1, shape) / (1 - dzeta(1, shape)), 0]}.
+  The resulting probability of a unit count is \emph{less than}
+  the nominal zeta value, and the use of \code{pstr1} to
+  stand for the probability of a structural 1 loses its
+  meaning.
+%
+%
+%
+   When \code{pstr1} equals \code{-dzeta(1, shape) / (1 - dzeta(1, shape))}
+   this corresponds to the 1-truncated zeta distribution.
+
+
+}
+
+\seealso{
+    \code{\link{Zeta}},
+    \code{\link{zetaff}}.
+    \code{\link{Otzeta}},
+%   \code{\link{zipf}}.
+
+
+
+}
+\examples{
+shape <- 1.5; pstr1 <- 0.3; x <- (-1):7
+(ii <- doizeta(x, shape, pstr1 = pstr1))
+max(abs(poizeta(1:200, shape) -
+        cumsum(1/(1:200)^(1+shape)) / zeta(shape+1)))  # Should be 0
+
+\dontrun{ x <- 0:10
+par(mfrow = c(2, 1))  # One-Inflated zeta
+barplot(rbind(doizeta(x, shape, pstr1 = pstr1), dzeta(x, shape)),
+        beside = TRUE, col = c("blue", "orange"),
+        main = paste("OIZeta(", shape, ", pstr1 = ", pstr1, ") (blue) vs",
+                     " Zeta(", shape, ") (orange)", sep = ""),
+        names.arg = as.character(x))
+
+deflat.limit <- -dzeta(1, shape) / pzeta(1, shape, lower.tail = FALSE)
+newpstr1 <- round(deflat.limit, 3) + 0.001  # Inside but near the boundary
+barplot(rbind(doizeta(x, shape, pstr1 = newpstr1),
+                dzeta(x, shape)),
+        beside = TRUE, col = c("blue","orange"),
+        main = paste("ODZeta(", shape, ", pstr1 = ", newpstr1, ") (blue) vs",
+                     " Zeta(", shape, ") (orange)", sep = ""),
+        names.arg = as.character(x)) }
+}
+\keyword{distribution}
+
+
+
+%qoizeta(p, shape, pstr1 = 0)
+%roizeta(n, shape, pstr1 = 0)
+
+
+
+% table(roizeta(100, shape, pstr1 = pstr1))
+% round(doizeta(1:10, shape, pstr1 = pstr1) * 100)  # Should be similar
diff --git a/man/oipospoisson.Rd b/man/oizipf.Rd
similarity index 53%
copy from man/oipospoisson.Rd
copy to man/oizipf.Rd
index 32387e3..22be494 100644
--- a/man/oipospoisson.Rd
+++ b/man/oizipf.Rd
@@ -1,25 +1,31 @@
-\name{oipospoisson}
-\alias{oipospoisson}
+\name{oizipf}
+\alias{oizipf}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ One-inflated Positive Poisson Distribution Family Function }
+\title{ One-inflated Zipf Distribution Family Function }
 \description{
-  Fits a 1-inflated positive Poisson distribution.
+  Fits a 1-inflated Zipf distribution.
+
+
 }
 \usage{
-oipospoisson(lpstr1 = "logit", llambda = "loge",
-   type.fitted = c("mean", "lambda", "pobs1", "pstr1", "onempstr1"),
-   ilambda = NULL, gpstr1 = (1:19)/20, gprobs.y = (1:19)/20, 
-   imethod = 1, zero = NULL)
+oizipf(N = NULL, lpstr1 = "logit", lshape = "loge",
+  type.fitted = c("mean", "shape", "pobs1", "pstr1", "onempstr1"),
+  ishape = NULL, gpstr1 = ppoints(8), gshape = exp((-3:3) / 4), zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lpstr1, llambda}{
+  \item{N}{
+  Same as \code{\link{zipf}}.
+
+
+  }
+  \item{lpstr1, lshape}{
     For \code{lpstr1}: the same idea as \code{\link{zipoisson}}
     except it applies to a structural 1.
 
 
   }
-  \item{ilambda, gpstr1, gprobs.y, imethod}{
+  \item{gpstr1, gshape, ishape}{
   For initial values.
   See \code{\link{CommonVGAMffArguments}} for information.
 
@@ -33,14 +39,11 @@ oipospoisson(lpstr1 = "logit", llambda = "loge",
 
 }
 \details{
-  The 1-inflated positive Poisson distribution is a mixture
+  The 1-inflated Zipf distribution is a mixture
   distribution of the
-  positive (0-truncated) Poisson
+  Zipf
   distribution with some probability of obtaining a (structural) 1.
   Thus there are two sources for obtaining the value 1.
-  It is similar to a zero-inflated Poisson model, except
-  the Poisson is replaced by a positive Poisson and the 0 is replaced
-  by 1.
   This distribution is written here
   in a way that retains a similar notation to the
   zero-inflated Poisson, i.e., the
@@ -54,6 +57,9 @@ oipospoisson(lpstr1 = "logit", llambda = "loge",
 }
 \section{Warning }{
     Under- or over-flow may occur if the data is ill-conditioned.
+    Lots of data is needed to estimate the parameters accurately.
+    Usually, probably the shape parameter is best modelled as
+    intercept-only.
 
 }
 
@@ -69,23 +75,25 @@ oipospoisson(lpstr1 = "logit", llambda = "loge",
 \author{ Thomas W. Yee }
 %\note{
 %}
-\seealso{ 
-  \code{\link{roipospois}},
-  \code{\link{pospoisson}},
-  \code{\link{zipoisson}},
-  \code{\link{poissonff}},
-  \code{\link{simulate.vlm}}.
+\seealso{
+  \code{\link{Oizipf}}.
+  \code{\link{zipf}},
+  \code{\link{Oizeta}}.
+
 
 
 }
-\examples{ set.seed(1)
-pdata <- data.frame(x2 = runif(nn <- 1000))  # Artificial data
-pdata <- transform(pdata, pstr1 = 0.5, lambda = exp(3 - x2))
-pdata <- transform(pdata, y1 = roipospois(nn, lambda, pstr1 = pstr1))
-with(pdata, table(y1))
-fit1 <- vglm(y1 ~ x2, oipospoisson, data = pdata, trace = TRUE)
+\examples{
+\dontrun{ odata <- data.frame(x2 = runif(nn <- 1000))  # Artificial data
+odata <- transform(odata, pstr1 = logit(-1 + x2, inverse = TRUE),
+                          myN   = 10,
+                          shape = exp(-0.5))
+odata <- transform(odata, y1 = roizipf(nn, N = myN, s = shape, pstr1 = pstr1))
+with(odata, table(y1))
+fit1 <- vglm(y1 ~ x2, oizipf(zero = "shape"), data = odata, trace = TRUE)
 coef(fit1, matrix = TRUE)
 }
+}
 \keyword{models}
 \keyword{regression}
 
diff --git a/man/oizipfUC.Rd b/man/oizipfUC.Rd
new file mode 100644
index 0000000..c544ad4
--- /dev/null
+++ b/man/oizipfUC.Rd
@@ -0,0 +1,122 @@
+\name{Oizipf}
+\alias{Oizipf}
+\alias{doizipf}
+\alias{poizipf}
+\alias{qoizipf}
+\alias{roizipf}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-Inflated Zipf Distribution }
+\description{
+  Density, distribution function, quantile function and random generation
+  for the one-inflated
+  Zipf distribution with parameter \code{pstr1}.
+
+
+}
+\usage{
+doizipf(x, N, shape, pstr1 = 0, log = FALSE)
+poizipf(q, N, shape, pstr1 = 0)
+qoizipf(p, N, shape, pstr1 = 0)
+roizipf(n, N, shape, pstr1 = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x, q, p, n}{Same as \code{\link[stats]{Uniform}}.}
+  \item{N, shape}{ See \code{\link{Zipf}}. }
+  \item{pstr1}{
+  Probability of a structural one
+  (i.e., ignoring the Zipf distribution),
+  called \eqn{\phi}{phi}.
+  The default value of \eqn{\phi = 0}{phi = 0} corresponds
+  to the response having an ordinary Zipf distribution.
+
+
+  }
+  \item{log}{Same as \code{\link[stats]{Uniform}}.}
+}
+\details{
+  The probability function of \eqn{Y} is 1 with probability
+  \eqn{\phi}{phi}, and \eqn{Zipf(N, s)} with
+  probability \eqn{1-\phi}{1-phi}. Thus
+  \deqn{P(Y=1) =\phi + (1-\phi)  P(W=1)}{%
+        P(Y=1) = phi + (1-phi) * P(W=1)}
+      where \eqn{W} is distributed as a
+      \eqn{Zipf(N, s)} random variable.
+  The \pkg{VGAM} family function \code{\link{oizeta}} estimates
+  the two parameters of this model by Fisher scoring.
+
+
+}
+\value{
+  \code{doizipf} gives the density,
+  \code{poizipf} gives the distribution function,
+  \code{qoizipf} gives the quantile function, and
+  \code{roizipf} generates random deviates.
+
+
+}
+%\references{ }
+\author{ T. W. Yee }
+\note{
+  The argument \code{pstr1} is recycled to the required length, and
+  usually has values which lie in the interval \eqn{[0,1]}.
+
+
+
+  These functions actually allow for the \emph{zero-deflated
+  Zipf} distribution. Here, \code{pstr1} is also permitted
+  to lie in the interval \code{[-dzipf(1, N, s) / (1 - dzipf(1, N, s)), 0]}.
+  The resulting probability of a unit count is \emph{less than}
+  the nominal zipf value, and the use of \code{pstr1} to
+  stand for the probability of a structural 1 loses its
+  meaning.
+%
+%
+%
+   When \code{pstr1} equals \code{-dzipf(1, N, s) / (1 - dzipf(1, N, s))}
+   this corresponds to the 1-truncated zipf distribution.
+
+
+}
+
+\seealso{
+    \code{\link{oizeta}}.
+    \code{\link{Zipf}},
+    \code{\link{zipf}},
+    \code{\link{Oizeta}}.
+
+
+
+}
+\examples{
+N <- 10; shape <- 1.5; pstr1 <- 0.3; x <- (-1):N
+(ii <- doizipf(x, N, shape, pstr1 = pstr1))
+
+\dontrun{ x <- 0:10
+par(mfrow = c(2, 1))  # One-Inflated zipf
+barplot(rbind(doizipf(x, N, shape, pstr1 = pstr1), dzipf(x, N, shape)),
+        beside = TRUE, col = c("blue", "orange"),
+        main = paste("OIZipf(", N, ", ", shape, ", pstr1 = ", pstr1, ") (blue) vs",
+                     " Zipf(", N, ", ", shape, ") (orange)", sep = ""),
+        names.arg = as.character(x))
+
+deflat.limit <- -dzipf(1, N, shape) / (1 - dzipf(1, N, shape))
+newpstr1 <- round(deflat.limit, 3) + 0.001  # Inside but near the boundary
+barplot(rbind(doizipf(x, N, shape, pstr1 = newpstr1),
+                dzipf(x, N, shape)),
+        beside = TRUE, col = c("blue", "orange"),
+        main = paste("ODZipf(", N, ", ", shape, ", pstr1 = ", newpstr1, ") (blue) vs",
+                     " Zipf(", N, ", ", shape, ") (orange)", sep = ""),
+        names.arg = as.character(x)) }
+}
+\keyword{distribution}
+
+
+
+%qoizipf(p, shape, pstr1 = 0)
+%roizipf(n, shape, pstr1 = 0)
+
+
+
+% table(roizipf(100, shape, pstr1 = pstr1))
+% round(doizipf(1:10, shape, pstr1 = pstr1) * 100)  # Should be similar
diff --git a/man/ordpoisson.Rd b/man/ordpoisson.Rd
index 2ece5e0..8785b46 100644
--- a/man/ordpoisson.Rd
+++ b/man/ordpoisson.Rd
@@ -98,7 +98,7 @@ ordpoisson(cutpoints, countdata = FALSE, NOS = NULL,
 
 
   Yee, T. W. (2012)
-  \emph{Ordinal ordination with normalizing link functions for count data}, 
+  \emph{Ordinal ordination with normalizing link functions for count data},
   (in preparation).
 
 
@@ -116,11 +116,11 @@ ordpoisson(cutpoints, countdata = FALSE, NOS = NULL,
   The input requires care as little to no checking is done.
   If \code{fit} is the fitted object, have a look at \code{fit at extra} and
   \code{depvar(fit)} to check.
-  
+
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{poissonff}},
   \code{\link{polf}},
   \code{\link[base:factor]{ordered}}.
diff --git a/man/otlog.Rd b/man/otlog.Rd
new file mode 100644
index 0000000..628df55
--- /dev/null
+++ b/man/otlog.Rd
@@ -0,0 +1,65 @@
+\name{otlog}
+\alias{otlog}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-truncated Logarithmic Distribution  }
+\description{
+  Estimating the (single) parameter of the 1-truncated
+  logarithmic distribution.
+
+}
+\usage{
+otlog(lshape = "logit", gshape = ppoints(8), zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{lshape, gshape, zero}{
+  Same as
+  \code{\link{logff}}.
+
+
+  }
+}
+\details{
+  The 1-truncated logarithmic distribution is a logarithmic distribution but with
+  the probability of a one being zero. The other probabilities are scaled
+  to add to unity.
+  Some more details can be found at \code{\link{logff}}.
+  Multiple responses are permitted.
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}},
+  and \code{\link{vgam}}.
+
+
+}
+%\references{
+%}
+\author{ T. W. Yee }
+%\note{
+%}
+
+\seealso{
+  \code{\link{Otlog}},
+  \code{\link{logff}},
+  \code{\link{oalog}},
+  \code{\link{oilog}},
+  \code{\link{simulate.vlm}}.
+
+
+}
+\examples{
+odata <- data.frame(y1 = rotlog(n = 1000, shape = logit(1/3, inverse = TRUE)))
+ofit <- vglm(y1 ~ 1, otlog, data = odata, trace = TRUE, crit = "c")
+coef(ofit, matrix = TRUE)
+Coef(ofit)
+\dontrun{with(odata,
+    hist(y1, shape = TRUE, breaks = seq(0.5, max(y1) + 0.5, by = 1),
+         border = "blue"))
+x <- seq(1, with(odata, max(y1)), by = 1)
+with(odata, lines(x, dotlog(x, Coef(ofit)[1]), col = "orange", type = "h", lwd = 2)) }
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/otlogUC.Rd b/man/otlogUC.Rd
new file mode 100644
index 0000000..4c943a2
--- /dev/null
+++ b/man/otlogUC.Rd
@@ -0,0 +1,105 @@
+\name{Otlog}
+\alias{Otlog}
+\alias{dotlog}
+\alias{potlog}
+\alias{qotlog}
+\alias{rotlog}
+\title{ One-truncated Logarithmic Distribution }
+\description{
+  Density, distribution function,
+  quantile function,
+  and random generation
+  for the one-truncated logarithmic distribution.
+
+
+
+}
+\usage{
+dotlog(x, shape, log = FALSE)
+potlog(q, shape, log.p = FALSE)
+qotlog(p, shape)
+rotlog(n, shape)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x, q}{
+   Vector of quantiles. For the density, it should be a vector with
+   integer values \eqn{> 1} in order for the probabilities to be positive.
+
+
+  }
+  \item{p}{vector of probabilities.}
+  \item{n}{number of observations.
+  Same as in \code{\link[stats]{runif}}.
+
+
+  }
+  \item{shape}{
+   The parameter value \eqn{c} described in in \code{\link{logff}}.
+   Here it is called \code{shape} because \eqn{0<c<1} is the range.
+
+
+%   For \code{rotlog()} this pa%arameter must be of length 1.
+
+
+  }
+  \item{log, log.p}{
+  Logical.
+  If \code{log.p = TRUE} then all probabilities \code{p} are
+  given as \code{log(p)}.
+
+
+  }
+}
+\details{
+  The one-truncated logarithmic distribution is a logarithmic distribution but with
+  the probability of a one being zero. The other probabilities are scaled
+  to add to unity.
+  Some more details are given in \code{\link{logff}}.
+
+
+}
+\value{
+  \code{dotlog} gives the density,
+  \code{potlog} gives the distribution function,
+  \code{qotlog} gives the quantile function, and
+  \code{rotlog} generates random deviates.
+
+
+
+
+}
+%\references{
+%}
+\author{ T. W. Yee }
+\note{
+  Given some response data, the \pkg{VGAM} family function
+  \code{\link{otlog}} estimates the parameter \code{shape}.
+  Function \code{potlog()} suffers from the problems that
+  \code{\link{plog}} sometimes has.
+
+
+
+}
+
+\seealso{
+  \code{\link{otlog}},
+  \code{\link{rlog}},
+  \code{\link{Oilog}}.
+
+
+}
+\examples{
+dotlog(1:20, 0.5)
+rotlog(20, 0.5)
+
+\dontrun{ shape <- 0.8; x <- 1:10
+plot(x, dotlog(x, shape = shape), type = "h", ylim = 0:1,
+     sub = "shape=0.8", las = 1, col = "blue", ylab = "Probability",
+     main = "1-truncated logarithmic distribution: blue=PMF; orange=CDF")
+lines(x + 0.1, potlog(x, shape = shape), col = "orange", lty = 3, type = "h") }
+}
+\keyword{distribution}
+
+
+
diff --git a/man/otpospoisUC.Rd b/man/otpospoisUC.Rd
new file mode 100644
index 0000000..2504b73
--- /dev/null
+++ b/man/otpospoisUC.Rd
@@ -0,0 +1,87 @@
+\name{Otpospois}
+\alias{Otpospois}
+\alias{dotpospois}
+\alias{potpospois}
+\alias{qotpospois}
+\alias{rotpospois}
+\title{ One-truncated Positive-Poisson Distribution }
+\description{
+  Density, distribution function,
+  quantile function,
+  and random generation
+  for the one-truncated positive-Poisson distribution.
+
+
+
+}
+\usage{
+dotpospois(x, lambda, log = FALSE)
+potpospois(q, lambda, log.p = FALSE)
+qotpospois(p, lambda)
+rotpospois(n, lambda)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x, q, p, n}{
+   Same as \code{\link{Pospois}}.
+
+
+  }
+  \item{lambda, log, log.p}{
+   Same as \code{\link{Pospois}}.
+
+
+
+  }
+}
+\details{
+  The one-truncated positive-Poisson is a Poisson distribution but with
+  the probability of a one and a zero being 0.
+  That is, its support is 2, 3, \ldots.
+  The other probabilities are scaled to add to unity.
+  Some more details are given in \code{\link{pospoisson}}.
+
+
+}
+\value{
+  \code{dotpospois} gives the density,
+  \code{potpospois} gives the distribution function,
+  \code{qotpospois} gives the quantile function, and
+  \code{rotpospois} generates random deviates.
+
+
+
+
+}
+%\references{
+%}
+\author{ T. W. Yee }
+\note{
+  Given some response data, the \pkg{VGAM} family function
+  \code{\link{otpospoisson}} estimates the parameter \code{lambda}.
+
+
+
+}
+
+\seealso{
+  \code{\link{otpospoisson}},
+  \code{\link{Pospois}},
+  \code{\link{Oipospois}}.
+
+
+}
+\examples{
+dotpospois(1:20, 0.5)
+rotpospois(20, 0.5)
+
+\dontrun{ lambda <- 4; x <- 1:10
+plot(x, dotpospois(x, lambda = lambda), type = "h", ylim = 0:1,
+     sub = "lambda=4", las = 1, col = "blue", ylab = "Probability",
+     main = "1-truncated positive-Poisson distribution: blue=PMF; orange=CDF")
+lines(x + 0.1, potpospois(x, lambda = lambda), col = "orange", lty = 3, type = "h") }
+}
+\keyword{distribution}
+
+
+
diff --git a/man/otpospoisson.Rd b/man/otpospoisson.Rd
new file mode 100644
index 0000000..4007586
--- /dev/null
+++ b/man/otpospoisson.Rd
@@ -0,0 +1,71 @@
+\name{otpospoisson}
+\alias{otpospoisson}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-truncated Poisson Distribution  }
+\description{
+  Estimating the (single) parameter of the 1-truncated
+  positive Poisson distribution.
+
+}
+\usage{
+ otpospoisson(llambda = "loge",
+    type.fitted = c("mean", "lambda", "prob0", "prob1"),
+    ilambda = NULL, imethod = 1, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{llambda, type.fitted, ilambda}{
+  Same as \code{\link{pospoisson}}.
+
+
+  }
+  \item{imethod, zero}{
+  Same as \code{\link{pospoisson}}.
+
+
+  }
+}
+\details{
+  The 1-truncated positive Poisson distribution has support on 2, 3,
+  \ldots.
+  It is a Poisson distribution but with
+  the probability of a one  or zero being 0. The other probabilities are scaled
+  to add to unity.
+  Some more details can be found at \code{\link{pospoisson}}.
+  Multiple responses are permitted.
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}},
+  and \code{\link{vgam}}.
+
+
+}
+%\references{
+%}
+\author{ T. W. Yee }
+%\note{
+%}
+
+\seealso{
+  \code{\link{Otpospois}},
+  \code{\link{oipospoisson}},
+  \code{\link{simulate.vlm}}.
+
+
+}
+\examples{
+odata <- data.frame(y1 = rotpospois(n = 1000, lambda = loge(1, inverse = TRUE)))
+ofit <- vglm(y1 ~ 1, otpospoisson, data = odata, trace = TRUE, crit = "c")
+coef(ofit, matrix = TRUE)
+Coef(ofit)
+\dontrun{with(odata,
+    hist(y1, prob = TRUE, breaks = seq(0.5, max(y1) + 0.5, by = 1),
+         border = "blue"))
+x <- seq(1, with(odata, max(y1)), by = 1)
+with(odata, lines(x, dotpospois(x, Coef(ofit)[1]), col = "orange", type = "h", lwd = 2)) }
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/otzeta.Rd b/man/otzeta.Rd
new file mode 100644
index 0000000..9a42ab2
--- /dev/null
+++ b/man/otzeta.Rd
@@ -0,0 +1,73 @@
+\name{otzeta}
+\alias{otzeta}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ One-truncated Zeta Distribution Family Function }
+\description{
+  Estimates the parameter of the 1-truncated zeta distribution.
+
+}
+\usage{
+otzeta(lshape = "loge", ishape = NULL, gshape = exp((-4:3)/4), zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{lshape, ishape, gshape, zero}{
+  Same as \code{\link{zetaff}}.
+
+
+  }
+}
+\details{
+  The 1-truncated zeta distribution is the ordinary zeta
+  distribution but with the probability of one being 0.  Thus the
+  other probabilities are scaled up (i.e., divided by \eqn{1-P[Y=1]}).
+  The mean is returned by default as the fitted values.
+  More details can be found at \code{\link{zetaff}}.
+  Multiple responses are handled.
+
+
+
+}
+
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}},
+  and \code{\link{vgam}}.
+
+
+
+}
+
+%\references{
+%}
+\author{ T. W. Yee }
+%\note{
+%  The \code{\link{zeta}} function may be used to compute values
+%  of the zeta function.
+%
+%
+%}
+
+\seealso{
+  \code{\link{Otzeta}},
+  \code{\link{zetaff}},
+  \code{\link{oizeta}},
+  \code{\link{diffzeta}},
+  \code{\link{zeta}},
+  \code{\link{dzeta}},
+  \code{\link{hzeta}},
+  \code{\link{zipf}}.
+
+
+}
+\examples{
+odata <- data.frame(x2 = runif(nn <- 1000))  # Artificial data
+odata <- transform(odata, shape = loge(-0.25 + x2, inverse = TRUE))
+odata <- transform(odata, y1 = rotzeta(nn, shape))
+with(odata, table(y1))
+ofit <- vglm(y1 ~ x2, otzeta, data = odata, trace = TRUE, crit = "coef")
+coef(ofit, matrix = TRUE)
+}
+\keyword{models}
+\keyword{regression}
+%
diff --git a/man/otzetaUC.Rd b/man/otzetaUC.Rd
new file mode 100644
index 0000000..1f019c3
--- /dev/null
+++ b/man/otzetaUC.Rd
@@ -0,0 +1,98 @@
+\name{Otzeta}
+\alias{Otzeta}
+\alias{dotzeta}
+\alias{potzeta}
+\alias{qotzeta}
+\alias{rotzeta}
+\title{ One-truncated Zeta Distribution }
+\description{
+  Density, distribution function,
+  quantile function,
+  and random generation
+  for the one-truncated zeta distribution.
+
+
+
+}
+\usage{
+dotzeta(x, shape, log = FALSE)
+potzeta(q, shape, log.p = FALSE)
+qotzeta(p, shape)
+rotzeta(n, shape)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x, q, p, n}{
+  Same as in \code{\link[stats]{runif}}.
+
+
+  }
+  \item{shape}{
+   The positive shape parameter described in in \code{\link{zetaff}}.
+   Here it is called \code{shape} because it is positive.
+
+
+%   For \code{rotzeta()} this pa%arameter must be of length 1.
+
+
+  }
+  \item{log, log.p}{
+  Same as in \code{\link[stats]{runif}}.
+
+
+  }
+}
+\details{
+  The one-truncated zeta distribution is a zeta distribution but with
+  the probability of a one being zero. The other probabilities are scaled
+  to add to unity.
+  Some more details are given in \code{\link{zetaff}}.
+
+
+}
+\value{
+  \code{dotzeta} gives the density,
+  \code{potzeta} gives the distribution function,
+  \code{qotzeta} gives the quantile function, and
+  \code{rotzeta} generates random deviates.
+
+
+
+
+}
+%\references{
+%}
+\author{ T. W. Yee }
+\note{
+  Given some response data, the \pkg{VGAM} family function
+  \code{\link{otzeta}} estimates the parameter \code{shape}.
+
+
+% Function \code{potzeta()} suffers from the problems that
+% \code{\link{plog}} sometimes has.
+
+
+
+}
+
+\seealso{
+  \code{\link{Otzeta}},
+  \code{\link{zetaff}},
+  \code{\link{Oizeta}}.
+
+
+}
+\examples{
+dotzeta(1:20, 0.5)
+rotzeta(20, 0.5)
+
+\dontrun{ shape <- 0.8; x <- 1:10
+plot(x, dotzeta(x, shape = shape), type = "h", ylim = 0:1,
+     sub = "shape=0.8", las = 1, col = "blue", ylab = "Probability",
+     main = "1-truncated zeta distribution: blue=PMF; orange=CDF")
+lines(x + 0.1, potzeta(x, shape = shape), col = "orange", lty = 3, type = "h") }
+}
+\keyword{distribution}
+
+
+
diff --git a/man/paralogistic.Rd b/man/paralogistic.Rd
index ebf9a1d..e50d0aa 100644
--- a/man/paralogistic.Rd
+++ b/man/paralogistic.Rd
@@ -3,14 +3,14 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Paralogistic Distribution Family Function }
 \description{
-  Maximum likelihood estimation of the 2-parameter 
+  Maximum likelihood estimation of the 2-parameter
   paralogistic distribution.
 
 
 }
 \usage{
-paralogistic(lscale = "loge", lshape1.a = "loge", iscale = NULL, 
-    ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5), 
+paralogistic(lscale = "loge", lshape1.a = "loge", iscale = NULL,
+    ishape1.a = NULL, imethod = 1, lss = TRUE, gscale = exp(-5:5),
     gshape1.a = seq(0.75, 4, by = 0.25), probs.y = c(0.25, 0.5, 0.75),
     zero = "shape")
 }
@@ -18,8 +18,8 @@ paralogistic(lscale = "loge", lshape1.a = "loge", iscale = NULL,
 %   zero = ifelse(lss, -2, -1)
 \arguments{
   \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
-  
-  
+
+
   }
   \item{lshape1.a, lscale}{
   Parameter link functions applied to the
diff --git a/man/paretoIV.Rd b/man/paretoIV.Rd
index d9a5959..3fcefc6 100644
--- a/man/paretoIV.Rd
+++ b/man/paretoIV.Rd
@@ -30,7 +30,7 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
   \item{lscale, linequality, lshape}{
   Parameter link functions for the
   scale parameter (called \eqn{b} below),
-  inequality parameter (called \eqn{g} below), and 
+  inequality parameter (called \eqn{g} below), and
   shape parameter (called \eqn{s} below).
   See \code{\link{Links}} for more choices.
   A log link is the default for all because all these parameters are
@@ -63,7 +63,7 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
   for \eqn{y > a}, \eqn{b>0}, \eqn{g>0} and \eqn{s>0}.
   The \eqn{a}{a} is called the \emph{location} parameter,
   \eqn{b} the \emph{scale} parameter,
-  \eqn{g} the \emph{inequality} parameter, and 
+  \eqn{g} the \emph{inequality} parameter, and
   \eqn{s} the \emph{shape} parameter.
 
 
@@ -100,17 +100,17 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
 
 
 % The maximum likelihood estimator for the location parameter is
-% \code{min(y)}, i.e., the smallest response value. 
+% \code{min(y)}, i.e., the smallest response value.
 
 
   There are a number of special cases of the Pareto(IV) distribution.
   These include the Pareto(I), Pareto(II), Pareto(III), and Burr family
   of distributions.
-  Denoting \eqn{PIV(a,b,g,s)} as the Pareto(IV) distribution, 
+  Denoting \eqn{PIV(a,b,g,s)} as the Pareto(IV) distribution,
   the Burr distribution \eqn{Burr(b,g,s)} is \eqn{PIV(a=0,b,1/g,s)},
   the Pareto(III) distribution \eqn{PIII(a,b,g)} is \eqn{PIV(a,b,g,s=1)},
   the Pareto(II) distribution \eqn{PII(a,b,s)} is \eqn{PIV(a,b,g=1,s)},
-  and 
+  and
   the Pareto(I) distribution \eqn{PI(b,s)} is \eqn{PIV(b,b,g=1,s)}.
   Thus the Burr distribution can be fitted using the
   \code{\link{negloge}} link
@@ -128,7 +128,7 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
 
 
 }
-\references{ 
+\references{
 
 
 Johnson N. L., Kotz S., and Balakrishnan N. (1994)
@@ -151,7 +151,7 @@ Fairland, Maryland: International Cooperative Publishing House.
 
 }
 \author{ T. W. Yee }
-\note{ 
+\note{
   The \code{extra} slot of the fitted object has a component called
   \code{"location"} which stores the location parameter value(s).
 
@@ -171,7 +171,7 @@ Fairland, Maryland: International Cooperative Publishing House.
 
 
 
- 
+
  }
 \seealso{
   \code{\link{ParetoIV}},
diff --git a/man/paretoIVUC.Rd b/man/paretoIVUC.Rd
index 7ba2b28..983512c 100644
--- a/man/paretoIVUC.Rd
+++ b/man/paretoIVUC.Rd
@@ -104,7 +104,7 @@ Fairland, Maryland: International Cooperative Publishing House.
 }
 \author{ T. W. Yee and Kai Huang }
 \details{
-  For the formulas and other details 
+  For the formulas and other details
   see \code{\link{paretoIV}}.
 
 
diff --git a/man/paretoff.Rd b/man/paretoff.Rd
index a88f6bf..91e0bb0 100644
--- a/man/paretoff.Rd
+++ b/man/paretoff.Rd
@@ -87,7 +87,7 @@ truncpareto(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
         f(y) = k * alpha^k / [y^(k+1) (1-(\alpha/U)^k)]}
   for \eqn{0 < \alpha < y < U < \infty}{0< alpha < y < U < Inf}
   and \eqn{k>0}.
-  Possibly, better names for \eqn{k} are 
+  Possibly, better names for \eqn{k} are
   the \emph{index} and \emph{tail} parameters.
   Here, \eqn{\alpha}{alpha} and \eqn{U} are known.
   The mean of \eqn{Y} is
@@ -104,7 +104,7 @@ truncpareto(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
 
 
 }
-\references{ 
+\references{
 Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
 Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
@@ -133,13 +133,13 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
   and \eqn{b=\alpha}{b = alpha}.
 
 
-  In some applications the Pareto law is truncated by a 
+  In some applications the Pareto law is truncated by a
   natural upper bound on the probability tail.
   The upper truncated Pareto distribution has three parameters (called
   \eqn{\alpha}{alpha}, \eqn{U} and \eqn{k} here) but the family function
   \code{truncpareto()} estimates only \eqn{k}.
   With known lower and upper limits, the ML estimator of \eqn{k} has
-  the usual properties of MLEs. 
+  the usual properties of MLEs.
   Aban (2006) discusses other inferential details.
 
 
diff --git a/man/perks.Rd b/man/perks.Rd
index e8b7205..109c8a1 100644
--- a/man/perks.Rd
+++ b/man/perks.Rd
@@ -3,14 +3,14 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Perks Distribution Family Function }
 \description{
-  Maximum likelihood estimation of the 2-parameter 
+  Maximum likelihood estimation of the 2-parameter
   Perks distribution.
 
 }
 \usage{
 perks(lscale = "loge", lshape = "loge",
       iscale = NULL,   ishape = NULL,
-      gscale = exp(-5:5), gshape = exp(-5:5), 
+      gscale = exp(-5:5), gshape = exp(-5:5),
       nsimEIM = 500, oim.mean = FALSE, zero = NULL, nowarning = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -61,33 +61,35 @@ perks(lscale = "loge", lshape = "loge",
 \details{
 The Perks distribution
 has cumulative distribution function
-\deqn{F(x; \alpha, \beta) =
+\deqn{F(y; \alpha, \beta) =
 1 -
 \left\{
 \frac{1 + \alpha}{1 + \alpha e^{\beta y}}
 \right\}^{1 / \beta}
 }{%
-F(x; alpha, beta) = 1 - ((1 + \alpha)/(1 + alpha * e^(beta * y)))^(1 / beta)
+F(y; alpha, beta) = 1 - ((1 + \alpha)/(1 + alpha * e^(beta * y)))^(1 / beta)
 }
 which leads to a probability density function
-\deqn{f(x; \alpha, \beta) =
+\deqn{f(y; \alpha, \beta) =
 \left[ 1 + \alpha \right]^{1 / \beta}
 \alpha  e^{\beta y} / (1 + \alpha e^{\beta y})^{1 + 1 / \beta}
 }{%
-f(x; alpha, beta) = [ 1 + alpha]^(1 / \beta) * alpha * exp(beta * y) / (1 + alpha * exp(beta * y))^(1 + 1 / beta)
+f(y; alpha, beta) = [ 1 + alpha]^(1 / \beta) * alpha * exp(beta * y) / (1 + alpha * exp(beta * y))^(1 + 1 / beta)
 }
 for \eqn{\alpha > 0}{alpha > 0},
 \eqn{\beta > 0}{beta > 0},
-\eqn{x > 0}.
+\eqn{y > 0}.
 Here, \eqn{\beta}{beta} is called the scale parameter \code{scale},
 and \eqn{\alpha}{alpha} is called a shape parameter.
 The moments for this distribution do
 not appear to be available in closed form.
 
 
+
 Simulated Fisher scoring is used and multiple responses are handled.
 
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -98,6 +100,7 @@ Simulated Fisher scoring is used and multiple responses are handled.
 }
 \references{
 
+
 Perks, W. (1932)
 On some experiments in the graduation of mortality statistics.
 \emph{Journal of the Institute of Actuaries},
diff --git a/man/perksUC.Rd b/man/perksUC.Rd
index 81b3fa8..e085104 100644
--- a/man/perksUC.Rd
+++ b/man/perksUC.Rd
@@ -22,7 +22,7 @@ rperks(n, scale = 1, shape)
 \arguments{
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
-  \item{n}{number of observations. 
+  \item{n}{number of observations.
            Same as in \code{\link[stats]{runif}}.
 
   }
@@ -34,7 +34,7 @@ rperks(n, scale = 1, shape)
   \item{lower.tail, log.p}{
   Same meaning as in \code{\link[stats:Normal]{pnorm}}
   or \code{\link[stats:Normal]{qnorm}}.
-  
+
 
   }
 
diff --git a/man/persp.qrrvglm.Rd b/man/persp.qrrvglm.Rd
index 5b3fecc..11f1a19 100644
--- a/man/persp.qrrvglm.Rd
+++ b/man/persp.qrrvglm.Rd
@@ -8,7 +8,7 @@ applicable for rank-1 or rank-2 models with argument \code{noRRR = ~ 1}.
 
 }
 \usage{
-perspqrrvglm(x, varI.latvar = FALSE, refResponse = NULL, show.plot = TRUE, 
+perspqrrvglm(x, varI.latvar = FALSE, refResponse = NULL, show.plot = TRUE,
              xlim = NULL, ylim = NULL, zlim = NULL,
              gridlength = if (Rank == 1) 301 else c(51,51),
              which.species = NULL,
@@ -23,7 +23,7 @@ perspqrrvglm(x, varI.latvar = FALSE, refResponse = NULL, show.plot = TRUE,
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{x}{
-  Object of class \code{"qrrvglm"}, i.e., a 
+  Object of class \code{"qrrvglm"}, i.e., a
   constrained quadratic ordination (CQO) object.
 
   }
@@ -47,7 +47,7 @@ perspqrrvglm(x, varI.latvar = FALSE, refResponse = NULL, show.plot = TRUE,
   See \code{\link[graphics]{par}}.
 
   }
-  \item{gridlength}{ 
+  \item{gridlength}{
   Numeric. The fitted values are evaluated on a grid, and this
   argument regulates the fineness of the grid.  If \code{Rank = 2}
   then the argument is recycled to length 2, and the two numbers
@@ -70,7 +70,7 @@ perspqrrvglm(x, varI.latvar = FALSE, refResponse = NULL, show.plot = TRUE,
   \item{zlab}{Character caption for the z-axis.
   Used only if \code{Rank = 2}.
   By default, a suitable caption is found.
-  See the \code{xlab} argument in \code{\link[graphics]{plot}} 
+  See the \code{xlab} argument in \code{\link[graphics]{plot}}
   or \code{\link[graphics]{title}}.
 
   }
@@ -87,7 +87,7 @@ perspqrrvglm(x, varI.latvar = FALSE, refResponse = NULL, show.plot = TRUE,
   }
   \item{main}{
   Character, giving the title of the plot.
-  See the \code{main} argument in \code{\link[graphics]{plot}} 
+  See the \code{main} argument in \code{\link[graphics]{plot}}
   or \code{\link[graphics]{title}}.
 
   }
@@ -95,8 +95,8 @@ perspqrrvglm(x, varI.latvar = FALSE, refResponse = NULL, show.plot = TRUE,
   See \code{\link[graphics]{persp}} for more information.
 
   }
-  \item{col}{ Color. 
-  See \code{\link[graphics]{persp}} for more information. 
+  \item{col}{ Color.
+  See \code{\link[graphics]{persp}} for more information.
 
   }
   \item{llty}{ Line type.
@@ -224,7 +224,7 @@ if (deviance(r2) > 857) stop("suboptimal fit obtained")
 
 persp(r1, xlim = c(-6, 5), col = 1:4, label = TRUE)
 
-# Involves all species 
+# Involves all species
 persp(r2, xlim = c(-6, 5), ylim = c(-4, 5), theta = 10, phi = 20, zlim = c(0, 220))
 # Omit the two dominant species to see what is behind them
 persp(r2, xlim = c(-6, 5), ylim = c(-4, 5), theta = 10, phi = 20, zlim = c(0, 220),
diff --git a/man/pgamma.deriv.Rd b/man/pgamma.deriv.Rd
index 4bdd885..90a9a1b 100644
--- a/man/pgamma.deriv.Rd
+++ b/man/pgamma.deriv.Rd
@@ -34,7 +34,7 @@ pgamma.deriv(q, shape, tmax = 100)
   The first and second derivatives with respect to \eqn{q} and \eqn{a}
   are returned. This function is similar in spirit to
   \code{\link[stats]{pgamma}};
-  define 
+  define
   \deqn{P(a,x) = \frac{1}{\Gamma(a)} \int_0^x t^{a-1} e^{-t} dt}{P(a,x) =
     1/Gamma(a) integral_0^x t^(a-1) exp(-t) dt}
   so that
@@ -42,7 +42,7 @@ pgamma.deriv(q, shape, tmax = 100)
   Currently a 6-column matrix is returned (in the future this
   may change and an argument may be supplied so that only what
   is required by the user is computed.)
-  
+
 
   The computations use a series expansion
   for \eqn{a \leq x \leq 1}{a <= x <= 1} or
@@ -57,9 +57,9 @@ pgamma.deriv(q, shape, tmax = 100)
 \value{
   The first 5 columns, running from left to right, are the derivatives
   with respect to:
-  \eqn{x}, 
+  \eqn{x},
   \eqn{x^2},
-  \eqn{a}, 
+  \eqn{a},
   \eqn{a^2},
   \eqn{xa}.
   The 6th column is \eqn{P(a, x)} (but it is not as accurate
@@ -84,7 +84,7 @@ pgamma.deriv(q, shape, tmax = 100)
   The original code came from \code{http://lib.stat.cmu.edu/apstat/187}.
   but this website has since become stale.
 
-  
+
 }
 \note{
   If convergence does not occur then try increasing the value of
diff --git a/man/pgamma.deriv.unscaled.Rd b/man/pgamma.deriv.unscaled.Rd
index 66df99f..659e470 100644
--- a/man/pgamma.deriv.unscaled.Rd
+++ b/man/pgamma.deriv.unscaled.Rd
@@ -55,7 +55,7 @@ pgamma.deriv.unscaled(q, shape)
 \author{
   T. W. Yee.
 
-  
+
 }
 
 
diff --git a/man/plotdeplot.lmscreg.Rd b/man/plotdeplot.lmscreg.Rd
index aaa406c..c350646 100644
--- a/man/plotdeplot.lmscreg.Rd
+++ b/man/plotdeplot.lmscreg.Rd
@@ -7,17 +7,17 @@
   associated with a LMS quantile regression.
 }
 \usage{
-plotdeplot.lmscreg(answer, y.arg, add.arg = FALSE, 
-    xlab = "", ylab = "density", xlim = NULL, ylim = NULL, 
-    llty.arg = par()$lty, col.arg = par()$col, 
+plotdeplot.lmscreg(answer, y.arg, add.arg = FALSE,
+    xlab = "", ylab = "density", xlim = NULL, ylim = NULL,
+    llty.arg = par()$lty, col.arg = par()$col,
     llwd.arg = par()$lwd, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{answer}{
   Output from functions of the form
-  \code{deplot.???} where \code{???} is the name of the 
-  \pkg{VGAM} LMS family function, e.g., \code{lms.yjn}. 
+  \code{deplot.???} where \code{???} is the name of the
+  \pkg{VGAM} LMS family function, e.g., \code{lms.yjn}.
   See below for details.
 
   }
@@ -42,7 +42,7 @@ plotdeplot.lmscreg(answer, y.arg, add.arg = FALSE,
 
   }
   \item{llty.arg}{
-  Line type. 
+  Line type.
   See the \code{lty} argument of \code{\link[graphics]{par}}. }
   \item{col.arg}{
   Line color.
diff --git a/man/plotqrrvglm.Rd b/man/plotqrrvglm.Rd
index 308753c..8982ad1 100644
--- a/man/plotqrrvglm.Rd
+++ b/man/plotqrrvglm.Rd
@@ -7,9 +7,9 @@
 }
 \usage{
 plotqrrvglm(object, rtype = c("response", "pearson", "deviance", "working"),
-            ask = FALSE, 
-            main = paste(Rtype, "residuals vs latent variable(s)"), 
-            xlab = "Latent Variable", 
+            ask = FALSE,
+            main = paste(Rtype, "residuals vs latent variable(s)"),
+            xlab = "Latent Variable",
             I.tolerances = object at control$eq.tolerances, ...)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -28,12 +28,12 @@ plotqrrvglm(object, rtype = c("response", "pearson", "deviance", "working"),
 }
 \details{
   Plotting the residuals can be potentially very useful for checking
-  that the model fit is adequate.  
+  that the model fit is adequate.
 
 
 }
 \value{
-  The original object. 
+  The original object.
 
 
 }
@@ -52,7 +52,7 @@ canonical Gaussian ordination.
 
 \note{
   An ordination plot of a QRR-VGLM can be obtained
-  by \code{\link{lvplot.qrrvglm}}. 
+  by \code{\link{lvplot.qrrvglm}}.
 
 }
 
@@ -73,7 +73,7 @@ p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                 Trocterr, Zoraspin) ~
           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
           quasipoissonff, data = hspider, Crow1positive = FALSE)
-par(mfrow = c(3, 4)) 
+par(mfrow = c(3, 4))
 plot(p1, rtype = "response", col = "blue", pch = 4, las = 1, main = "")
 }
 }
diff --git a/man/plotqtplot.lmscreg.Rd b/man/plotqtplot.lmscreg.Rd
index 1bb5cea..0ddec96 100644
--- a/man/plotqtplot.lmscreg.Rd
+++ b/man/plotqtplot.lmscreg.Rd
@@ -7,10 +7,10 @@
   associated with a LMS quantile regression.
 }
 \usage{
-plotqtplot.lmscreg(fitted.values, object, newdata = NULL, 
-    percentiles = object at misc$percentiles, lp = NULL, 
-    add.arg = FALSE, y = if (length(newdata)) FALSE else TRUE, 
-    spline.fit = FALSE, label = TRUE, size.label = 0.06, 
+plotqtplot.lmscreg(fitted.values, object, newdata = NULL,
+    percentiles = object at misc$percentiles, lp = NULL,
+    add.arg = FALSE, y = if (length(newdata)) FALSE else TRUE,
+    spline.fit = FALSE, label = TRUE, size.label = 0.06,
     xlab = NULL, ylab = "",
     pch = par()$pch, pcex = par()$cex, pcol.arg = par()$col,
     xlim = NULL, ylim = NULL,
@@ -45,20 +45,20 @@ plotqtplot.lmscreg(fitted.values, object, newdata = NULL,
   \item{pch}{ Plotting character. See \code{\link[graphics]{par}}. }
   \item{pcex}{ Character expansion of the points.
     See \code{\link[graphics]{par}}. }
-  \item{pcol.arg}{ Color of the points. 
+  \item{pcol.arg}{ Color of the points.
     See the \code{col} argument of \code{\link[graphics]{par}}.  }
   \item{xlim}{ Limits of the x-axis. See \code{\link[graphics]{par}}. }
   \item{ylim}{ Limits of the y-axis. See \code{\link[graphics]{par}}. }
   \item{llty.arg}{ Line type. Line type.
   See the \code{lty} argument of \code{\link[graphics]{par}}. }
-  \item{lcol.arg}{ Color of the lines. 
+  \item{lcol.arg}{ Color of the lines.
   See the \code{col} argument of \code{\link[graphics]{par}}.
   }
   \item{llwd.arg}{ Line width.
   See the \code{lwd} argument of \code{\link[graphics]{par}}.
   }
-  \item{tcol.arg}{ Color of the text 
-  (if \code{label} is \code{TRUE}). 
+  \item{tcol.arg}{ Color of the text
+  (if \code{label} is \code{TRUE}).
   See the \code{col} argument of \code{\link[graphics]{par}}.
   }
   \item{tadj}{ Text justification.
diff --git a/man/plotrcim0.Rd b/man/plotrcim0.Rd
index 583dd48..cf817dd 100644
--- a/man/plotrcim0.Rd
+++ b/man/plotrcim0.Rd
@@ -1,7 +1,7 @@
 \name{plotrcim0}
 \alias{plotrcim0}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Main Effects Plot for a Row-Column Interaction Model (RCIM) 
+\title{ Main Effects Plot for a Row-Column Interaction Model (RCIM)
 
 }
 \description{
@@ -25,21 +25,21 @@
 
 }
 \arguments{
-\item{object}{   
+\item{object}{
   An \code{\link{rcim}} object.
   This should be of rank-0, i.e., main effects only and no
   interactions.
 
 
 }
-\item{which.plots}{  
+\item{which.plots}{
   Numeric, describing which plots are to be plotted.
   The row effects plot is 1 and the column effects plot is 2.
   Set the value \code{0}, say, for no plots at all.
 
 
 }
-\item{centered}{  
+\item{centered}{
   Logical.
   If \code{TRUE} then the row and column effects are centered
   (but not scaled) by \code{\link[base]{scale}}.
@@ -48,7 +48,7 @@
 
 
 }
-\item{hline0, hlty, hcol, hlwd}{  
+\item{hline0, hlty, hcol, hlwd}{
   \code{hline0} is logical. If \code{TRUE} then a horizontal line is
   plotted at 0 and the other arguments describe this line.
   Probably having \code{hline0 = TRUE} only makes sense when
@@ -56,20 +56,20 @@
 
 
 }
-\item{rfirst, cfirst}{  
+\item{rfirst, cfirst}{
   \code{rfirst} is the level of row that is placed first in the
   row effects plot, etc.
 
 
-} 
-\item{rmain, cmain}{  
+}
+\item{rmain, cmain}{
   Character.
   \code{rmain} is the main label in the row effects plot, etc.
 
 
 }
 \item{rtype, ctype, rsub, csub}{
-  See the \code{type} and \code{sub} arguments of 
+  See the \code{type} and \code{sub} arguments of
   \code{\link[graphics:plot]{plot}}.
 
 
@@ -78,7 +78,7 @@
 % rlabels = FALSE, clabels = FALSE,
 %  Currently not functioning properly.
 %  zz.
-%  See \code{labels} argument of 
+%  See \code{labels} argument of
 %  \code{\link[graphics:plot]{plot}}.
 %
 %}
@@ -92,7 +92,7 @@
 
 
 }
-\item{rcex.lab, ccex.lab}{  
+\item{rcex.lab, ccex.lab}{
   Numeric.
   \code{rcex.lab} is \code{cex} for the row effects plot label,
   etc.
@@ -170,7 +170,7 @@
 
 \note{
   This function should be only used to plot the object of rank-0 RCIM.
-  If the rank is positive then it will issue a warning. 
+  If the rank is positive then it will issue a warning.
 
 
   Using an argument \code{ylim} will mean the row and column
@@ -196,8 +196,8 @@
 
 
 \seealso{
-  \code{\link{moffset}} 
-  \code{\link{Rcim}}, 
+  \code{\link{moffset}}
+  \code{\link{Rcim}},
   \code{\link{rcim}}.
 
 
diff --git a/man/plotvgam.Rd b/man/plotvgam.Rd
index bd58952..2121d21 100644
--- a/man/plotvgam.Rd
+++ b/man/plotvgam.Rd
@@ -76,7 +76,7 @@ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
   }
   \item{deriv.arg}{
   Numerical. The order of the derivative.
-  Should be assigned an small 
+  Should be assigned an small
   integer such as 0, 1, 2. Only applying to \code{s()} terms,
   it plots the derivative.
 
@@ -151,7 +151,7 @@ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
   constraint matrix of interest.
 
 
-  Many of \code{plotvgam()}'s options can be found in  
+  Many of \code{plotvgam()}'s options can be found in
   \code{\link{plotvgam.control}}, e.g., line types, line widths,
   colors.
 
@@ -183,18 +183,18 @@ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
 
 \note{
   While \code{plot(fit)} will work if \code{class(fit)}
-  is \code{"vgam"}, it is necessary to use \code{plotvgam(fit)} 
+  is \code{"vgam"}, it is necessary to use \code{plotvgam(fit)}
   explicitly otherwise.
-  
-  
+
+
   \code{plotvgam()} is quite buggy at the moment.
 
-  
+
 % \code{plotvgam()} works in a similar
 % manner to S-PLUS's \code{plot.gam()}, however, there is no
-% options for interactive construction of the plots yet. 
+% options for interactive construction of the plots yet.
+
 
-  
 }
 
 \seealso{
@@ -203,8 +203,8 @@ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
   \code{predict.vgam},
   \code{\link{plotvglm}},
   \code{\link{vglm}}.
-  
-  
+
+
 }
 \examples{
 coalminers <- transform(coalminers, Age = (age - 42) / 5)
diff --git a/man/plotvgam.control.Rd b/man/plotvgam.control.Rd
index 1773e34..9784d42 100644
--- a/man/plotvgam.control.Rd
+++ b/man/plotvgam.control.Rd
@@ -14,7 +14,8 @@ plotvgam.control(which.cf = NULL,
                  lcol = par()$col, rcol = par()$col,
                  scol = par()$col, llwd = par()$lwd, slwd = par()$lwd,
                  add.arg = FALSE, one.at.a.time = FALSE,
-                 .include.dots = TRUE, noxmean = FALSE, ...)
+                 .include.dots = TRUE, noxmean = FALSE,
+                 shade = FALSE, shcol = "gray80", ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -63,12 +64,25 @@ plotvgam.control(which.cf = NULL,
   One might use this argument if \code{ylab} is specified.
 
   }
+
+
+
+  \item{shade, shcol}{
+  \code{shade} is logical; if \code{TRUE} then
+  the pointwise SE band is shaded gray by default.
+  The colour can be adjusted by setting \code{shcol}.
+  These arguments are ignored unless
+  \code{se = TRUE} and \code{overlay = FALSE};
+  If \code{shade = TRUE} then \code{scol} is ignored.
+
+
+  }
   \item{\dots}{ Other arguments that may be fed into \code{par()}. }
   In the above, \eqn{M} is the number of linear/additive predictors.
 }
 \details{
   The most obvious features of \code{\link{plotvgam}} can be
-  controlled by the above arguments. 
+  controlled by the above arguments.
 
 
 }
diff --git a/man/plotvglm.Rd b/man/plotvglm.Rd
index 5a17ac2..f3b7142 100644
--- a/man/plotvglm.Rd
+++ b/man/plotvglm.Rd
@@ -18,8 +18,8 @@ plotvglm(x, which = "(All)", ...)
   \item{x}{
     An object of class \code{"vglm"} (see \code{\link{vglm-class}})
     or inherits from that class.
-  
-  
+
+
 % Same as \code{\link{plotvgam}}.
 
 
@@ -66,20 +66,20 @@ plotvglm(x, which = "(All)", ...)
 %\note{
 % \code{plotvglm()} is quite buggy at the moment.
 
-  
+
 % \code{plotvglm()} works in a similar
 % manner to S-PLUS's \code{plot.gam()}, however, there is no
-% options for interactive construction of the plots yet. 
+% options for interactive construction of the plots yet.
+
 
-  
 %}
 
 \seealso{
   \code{\link{plotvgam}},
   \code{\link{plotvgam.control}},
   \code{\link{vglm}}.
-  
-  
+
+
 }
 \examples{
 \dontrun{
diff --git a/man/pneumo.Rd b/man/pneumo.Rd
index 802201f..c15c47d 100644
--- a/man/pneumo.Rd
+++ b/man/pneumo.Rd
@@ -19,7 +19,7 @@ Exposure time is explanatory, and there are 3 ordinal response variables.
 }
 \details{
 These were collected from coalface workers. In the original
-data set, the two most severe categories were combined. 
+data set, the two most severe categories were combined.
 
 
 }
@@ -45,7 +45,7 @@ data set, the two most severe categories were combined.
 
 }
 \examples{
-# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989) 
+# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
 pneumo <- transform(pneumo, let = log(exposure.time))
 vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)
 }
diff --git a/man/poisson.points.Rd b/man/poisson.points.Rd
index 11e69d6..6e30bc1 100644
--- a/man/poisson.points.Rd
+++ b/man/poisson.points.Rd
@@ -98,7 +98,7 @@ poisson.points(ostatistic, dimension = 2, link = "loge",
 
 
 }
-%\references{ 
+%\references{
 %}
 \author{ T. W. Yee }
 %\note{
diff --git a/man/poissonff.Rd b/man/poissonff.Rd
index fcc9bab..91b6999 100644
--- a/man/poissonff.Rd
+++ b/man/poissonff.Rd
@@ -12,7 +12,8 @@
 \usage{
 poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
           imethod = 1, parallel = FALSE, zero = NULL, bred = FALSE,
-          earg.link = FALSE)
+          earg.link = FALSE, type.fitted = c("mean", "quantiles"),
+                       percentiles = c(25, 50, 75))
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -71,6 +72,10 @@ poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
 
 
   }
+  \item{type.fitted, percentiles}{
+  Details at \code{\link{CommonVGAMffArguments}}.
+
+  }
 }
 \details{
   \eqn{M} defined above is the number of linear/additive predictors.
@@ -148,6 +153,9 @@ poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
     \code{\link{quasipoissonff}},
     \code{\link{genpoisson}},
     \code{\link{zipoisson}},
+    \code{\link{pospoisson}},
+    \code{\link{oipospoisson}},
+    \code{\link{otpospoisson}},
     \code{\link{skellam}},
     \code{\link{mix2poisson}},
     \code{\link{cens.poisson}},
diff --git a/man/polf.Rd b/man/polf.Rd
index ef0c1b0..700e302 100644
--- a/man/polf.Rd
+++ b/man/polf.Rd
@@ -32,10 +32,10 @@ polf(theta, cutpoint = NULL,
   }
 }
 \details{
-  The Poisson-ordinal link function (POLF) can be applied to a 
+  The Poisson-ordinal link function (POLF) can be applied to a
   parameter lying in the unit interval.
   Its purpose is to link cumulative probabilities associated with
-  an ordinal response coming from an underlying Poisson distribution. 
+  an ordinal response coming from an underlying Poisson distribution.
   If the cutpoint is zero then a complementary log-log link is used.
 
 
@@ -53,7 +53,7 @@ polf(theta, cutpoint = NULL,
 }
 \references{
   Yee, T. W. (2012)
-  \emph{Ordinal ordination with normalizing link functions for count data}, 
+  \emph{Ordinal ordination with normalizing link functions for count data},
   (in preparation).
 
 
@@ -83,7 +83,7 @@ polf(theta, cutpoint = NULL,
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{Links}},
   \code{\link{ordpoisson}},
   \code{\link{poissonff}},
diff --git a/man/polonoUC.Rd b/man/polonoUC.Rd
index b4d57f3..6bddd48 100644
--- a/man/polonoUC.Rd
+++ b/man/polonoUC.Rd
@@ -27,7 +27,7 @@ rpolono(n, meanlog = 0, sdlog = 1)
   \item{meanlog, sdlog }{
   the mean and standard deviation of the normal distribution
   (on the log scale).
-  They match the arguments in 
+  They match the arguments in
   \code{\link[stats:Lognormal]{Lognormal}}.
 
 
@@ -42,7 +42,7 @@ rpolono(n, meanlog = 0, sdlog = 1)
   For \code{bigx = 10},
   he showed that the approximation has a relative error less than
   0.001 for values of \code{meanlog} and
-  \code{sdlog} ``likely to be encountered in practice''. 
+  \code{sdlog} ``likely to be encountered in practice''.
   The argument can be assigned \code{Inf} in which case
   the approximation is not used.
 
@@ -55,7 +55,7 @@ rpolono(n, meanlog = 0, sdlog = 1)
 
   }
   \item{...}{
-  Arguments passed into 
+  Arguments passed into
   \code{\link[stats]{integrate}}.
 
 
@@ -96,7 +96,7 @@ rpolono(n, meanlog = 0, sdlog = 1)
 
 
 % See zz code{link{polonozz}}, the \pkg{VGAM} family function
-% for estimating the parameters, 
+% for estimating the parameters,
 % for the formula of the probability density function and other details.
 
 
diff --git a/man/posbernUC.Rd b/man/posbernUC.Rd
index c409518..246dc06 100644
--- a/man/posbernUC.Rd
+++ b/man/posbernUC.Rd
@@ -58,7 +58,7 @@ dposbern(x, prob, prob0 = prob, log = FALSE)
 
   }
   \item{cap.effect}{
-  Numeric, the capture effect. 
+  Numeric, the capture effect.
   Added to the linear predictor if captured previously.
   A positive or negative value corresponds to
   a trap-happy and trap-shy effect respectively.
@@ -95,18 +95,18 @@ dposbern(x, prob, prob0 = prob, log = FALSE)
 
   }
 
-  \item{link, earg.link}{ 
+  \item{link, earg.link}{
   The former is used to generate the probabilities for capture
   at each occasion.
   Other details at \code{\link{CommonVGAMffArguments}}.
 
 
   }
-  \item{prob, prob0}{ 
+  \item{prob, prob0}{
     Matrix of probabilities for the numerator and denominators
     respectively.
     The default does \emph{not} correspond to the
-    \eqn{M_b} model since the \eqn{M_b} model has a denominator 
+    \eqn{M_b} model since the \eqn{M_b} model has a denominator
     which involves the capture history.
 
 
@@ -153,7 +153,7 @@ dposbern(x, prob, prob0 = prob, log = FALSE)
 }
 %\references{ }
 \author{ Thomas W. Yee. }
-\note{ 
+\note{
   The \code{r}-type function is experimental only and does not follow the
   usual conventions of \code{r}-type R functions.
   It may change a lot in the future.
@@ -163,7 +163,7 @@ dposbern(x, prob, prob0 = prob, log = FALSE)
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{posbernoulli.tb}},
   \code{\link{posbernoulli.b}},
   \code{\link{posbernoulli.t}}.
@@ -181,8 +181,8 @@ M.bh <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2 + x3, posbernoulli.b(I2 = FALSE),
 constraints(M.bh)
 summary(M.bh)
 }
-\keyword{distribution} 
-\keyword{datagen} 
+\keyword{distribution}
+\keyword{datagen}
 
 
 %double.ch = FALSE,
diff --git a/man/posbernoulli.b.Rd b/man/posbernoulli.b.Rd
index ddd1437..d594dee 100644
--- a/man/posbernoulli.b.Rd
+++ b/man/posbernoulli.b.Rd
@@ -160,7 +160,7 @@ posbernoulli.b(link = "logit", drop.b = FALSE ~ 1,
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{posbernoulli.t}} and
   \code{\link{posbernoulli.tb}} (including estimating \eqn{N}),
   \code{\link{deermice}},
diff --git a/man/posbernoulli.t.Rd b/man/posbernoulli.t.Rd
index 4724c5f..b023af6 100644
--- a/man/posbernoulli.t.Rd
+++ b/man/posbernoulli.t.Rd
@@ -95,7 +95,7 @@ posbernoulli.t(link = "logit", parallel.t = FALSE ~ 1, iprob = NULL,
   It is well-known that some species of animals are affected
   by capture, e.g., trap-shy or trap-happy. This \pkg{VGAM}
   family function does \emph{not} allow any behavioral effect to be
-  modelled (\code{\link{posbernoulli.b}} 
+  modelled (\code{\link{posbernoulli.b}}
   and \code{\link{posbernoulli.tb}} do) because the
   denominator of the likelihood function must be free of
   behavioral effects.
@@ -205,7 +205,7 @@ conditional likelihood.
   \code{\link{posbernoulli.tb}}
   models with a
   \code{\link{posbinomial}}
-  model requires \code{posbinomial(omit.constant = TRUE)} 
+  model requires \code{posbinomial(omit.constant = TRUE)}
   because one needs to remove the normalizing constant from the
   log-likelihood function.
   See \code{\link{posbinomial}} for an example.
@@ -232,7 +232,7 @@ conditional likelihood.
 %
 %}
 
-\seealso{ 
+\seealso{
   \code{\link{posbernoulli.b}},
   \code{\link{posbernoulli.tb}},
   \code{\link{Select}},
diff --git a/man/posbernoulli.tb.Rd b/man/posbernoulli.tb.Rd
index 56fcca2..39d1b6c 100644
--- a/man/posbernoulli.tb.Rd
+++ b/man/posbernoulli.tb.Rd
@@ -198,7 +198,7 @@ posbernoulli.tb(link = "logit", parallel.t = FALSE ~ 1,
 }
 
 
-\seealso{ 
+\seealso{
   \code{\link{posbernoulli.b}} (including \code{N.hat}),
   \code{\link{posbernoulli.t}},
   \code{\link{posbinomial}},
diff --git a/man/posbinomUC.Rd b/man/posbinomUC.Rd
index 9189e7c..bc3b0d5 100644
--- a/man/posbinomUC.Rd
+++ b/man/posbinomUC.Rd
@@ -33,7 +33,7 @@ rposbinom(n, size, prob)
 
 
   }
-  \item{prob}{probability of success on each trial. 
+  \item{prob}{probability of success on each trial.
   Should be in \eqn{(0,1)}.
 
 
@@ -79,7 +79,7 @@ rposbinom(n, size, prob)
 
 
 }
-%\references{ 
+%\references{
 %None.
 %}
 
@@ -102,7 +102,7 @@ rposbinom(n, size, prob)
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{posbinomial}},
   \code{\link{dposbern}},
   \code{\link{zabinomial}},
diff --git a/man/posbinomial.Rd b/man/posbinomial.Rd
index 34f98bd..3b235d2 100644
--- a/man/posbinomial.Rd
+++ b/man/posbinomial.Rd
@@ -125,7 +125,7 @@ Drapers Company Research Memoirs.
 
 
 }
-\seealso{ 
+\seealso{
   \code{\link{posbernoulli.b}},
   \code{\link{posbernoulli.t}},
   \code{\link{posbernoulli.tb}},
diff --git a/man/posgeomUC.Rd b/man/posgeomUC.Rd
index e9031e1..683d567 100644
--- a/man/posgeomUC.Rd
+++ b/man/posgeomUC.Rd
@@ -21,7 +21,7 @@ rposgeom(n, prob)
 \arguments{
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
-  \item{n}{number of observations. 
+  \item{n}{number of observations.
   Fed into \code{\link[stats]{runif}}.
 
   }
@@ -56,7 +56,7 @@ rposgeom(n, prob)
 
 
 }
-%\references{ 
+%\references{
 %None.
 %}
 
@@ -76,7 +76,7 @@ rposgeom(n, prob)
 
 %}
 
-\seealso{ 
+\seealso{
   \code{\link{zageometric}},
   \code{\link{zigeometric}},
   \code{\link[stats:Geometric]{rgeom}}.
diff --git a/man/posnegbinUC.Rd b/man/posnegbinUC.Rd
index 13eed4f..d630219 100644
--- a/man/posnegbinUC.Rd
+++ b/man/posnegbinUC.Rd
@@ -13,7 +13,8 @@
 }
 \usage{
 dposnegbin(x, size, prob = NULL, munb = NULL, log = FALSE)
-pposnegbin(q, size, prob = NULL, munb = NULL)
+pposnegbin(q, size, prob = NULL, munb = NULL,
+           lower.tail = TRUE, log.p = FALSE)
 qposnegbin(p, size, prob = NULL, munb = NULL)
 rposnegbin(n, size, prob = NULL, munb = NULL)
 }
@@ -49,6 +50,12 @@ rposnegbin(n, size, prob = NULL, munb = NULL)
 
 
   }
+  \item{log.p, lower.tail}{
+  Same arguments as that of an ordinary negative binomial distribution
+  (see \code{\link[stats:NegBinomial]{pnbinom}}).
+
+
+  }
 }
 \details{
   The positive-negative binomial distribution is a negative binomial
@@ -80,7 +87,7 @@ rposnegbin(n, size, prob = NULL, munb = NULL)
 
 
 }
-\references{ 
+\references{
 
 Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and Lindenmayer,
 D. B. (1996)
@@ -102,7 +109,7 @@ for counts with extra zeros.
 %
 %}
 
-\seealso{ 
+\seealso{
   \code{\link{posnegbinomial}},
   \code{\link{zanegbinomial}},
   \code{\link{zinegbinomial}},
diff --git a/man/posnegbinomial.Rd b/man/posnegbinomial.Rd
index c6e02d7..8d4dcd0 100644
--- a/man/posnegbinomial.Rd
+++ b/man/posnegbinomial.Rd
@@ -13,38 +13,38 @@ posnegbinomial(zero = "size", type.fitted = c("mean", "munb", "prob0"),
                eps.trig = 1e-07, max.support = 4000, max.chunk.MB = 30,
                lmunb = "loge", lsize = "loge", imethod = 1,
                imunb = NULL, iprobs.y = NULL,
-               gprobs.y = (0:9)/10, isize = NULL,
+               gprobs.y = ppoints(8), isize = NULL,
                gsize.mux = exp(c(-30, -20, -15, -10, -6:3)))
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lmunb}{ 
+  \item{lmunb}{
   Link function applied to the \code{munb} parameter, which is the mean
   \eqn{\mu_{nb}}{munb} of an ordinary negative binomial distribution.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{lsize}{ 
+  \item{lsize}{
   Parameter link function applied to the dispersion parameter,
   called \code{k}.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{isize}{ 
+  \item{isize}{
   Optional initial value for \code{k}, an index parameter.
   The value \code{1/k} is known as a dispersion parameter.
   If failure to converge occurs try different values (and/or use
   \code{imethod}).
   If necessary this vector is recycled to length equal to the number
-  of responses. 
+  of responses.
   A value \code{NULL} means an initial value for each response is
   computed internally using a range of values.
 
 
   }
-  \item{nsimEIM, zero, eps.trig}{ 
+  \item{nsimEIM, zero, eps.trig}{
   See \code{\link{CommonVGAMffArguments}}.
 
 
@@ -107,7 +107,7 @@ posnegbinomial(zero = "size", type.fitted = c("mean", "munb", "prob0"),
 \section{Warning}{
   This family function is fragile;
   at least two cases will lead to numerical problems.
-  Firstly, 
+  Firstly,
   the positive-Poisson model corresponds to \code{k} equalling infinity.
   If the data is positive-Poisson or close to positive-Poisson,
   then the estimated \code{k} will diverge to \code{Inf} or some
@@ -119,7 +119,25 @@ posnegbinomial(zero = "size", type.fitted = c("mean", "munb", "prob0"),
   In the situation when both cases hold, the result returned
   (which will be untrustworthy) will depend on the initial values.
 
-  
+
+
+The negative binomial distribution (NBD) is a strictly unimodal distribution.
+Any data set that does not exhibit a mode (in the middle) makes the
+estimation problem difficult.
+The positive NBD inherits this feature.
+Set \code{trace = TRUE} to monitor convergence.
+
+
+
+  See the example below of a data set where \code{posbinomial()}
+  fails; the so-called solution is \emph{extremely} poor.
+  This is partly due to a lack of a
+  unimodal shape because the number of counts decreases only.
+  This long tail makes it very difficult to estimate the mean
+  parameter with any certainty. The result too is that the
+  \code{size} parameter is numerically fraught.
+
+
 %  Then trying a \code{\link{loglog}} link might help
 %  handle this problem.
 
@@ -141,7 +159,7 @@ posnegbinomial(zero = "size", type.fitted = c("mean", "munb", "prob0"),
 }
 \references{
   Barry, S. C. and Welsh, A. H. (2002)
-  Generalized additive modelling and zero inflated count data. 
+  Generalized additive modelling and zero inflated count data.
   \emph{Ecological Modelling},
   \bold{157},
   179--188.
@@ -167,12 +185,12 @@ posnegbinomial(zero = "size", type.fitted = c("mean", "munb", "prob0"),
   \code{max.support} so that the EIMs are positive-definite,
   e.g.,
   \code{eps.trig = 1e-8} and \code{max.support = Inf}.
-  
+
 
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{rposnegbin}},
   \code{\link{pospoisson}},
   \code{\link{negbinomial}},
@@ -224,6 +242,18 @@ matplot(ofreq, cbind(species, fitted = pdf2*sum(species)), las = 1,
         type = "b", ylab = "Number of species", col = c("blue", "orange"),
         main = "blue 1s = observe; orange 2s = fitted"))
 }
+
+\dontrun{
+# This data (courtesy of Maxim Gerashchenko) causes posbinomial() to fail
+pnbd.fail <- data.frame(
+ y1 = c(1:16, 18:21, 23:28, 33:38, 42, 44, 49:51, 55, 56, 58,
+ 59, 61:63, 66, 73, 76, 94, 107, 112, 124, 190, 191, 244),
+ ofreq = c(130, 80, 38, 23, 22, 11, 21, 14, 6, 7, 9, 9, 9, 4, 4, 5, 1,
+           4, 6, 1, 3, 2, 4, 3, 4, 5, 3, 1, 2, 1, 1, 4, 1, 2, 2, 1, 3,
+           1, 1, 2, 2, 2, 1, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1))
+fit.fail <- vglm(y1 ~ 1, weights = ofreq, posnegbinomial,
+               trace = TRUE, data = pnbd.fail)
+}
 }
 \keyword{models}
 \keyword{regression}
@@ -236,7 +266,7 @@ matplot(ofreq, cbind(species, fitted = pdf2*sum(species)), las = 1,
 %posnegbinomial(lmunb = "loge", lsize = "loge", imunb = NULL,
 %               isize = NULL, zero = "size", nsimEIM = 250,
 %               probs.y = 0.75, cutoff.prob = 0.999,
-%               max.support = 2000, max.chunk.MB = 30, 
+%               max.support = 2000, max.chunk.MB = 30,
 %               gsize = exp((-4):4), ishrinkage = 0.95, imethod = 1)
 
 
diff --git a/man/posnormUC.Rd b/man/posnormUC.Rd
index e856dba..76895ea 100644
--- a/man/posnormUC.Rd
+++ b/man/posnormUC.Rd
@@ -41,7 +41,7 @@ rposnorm(n, mean = 0, sd = 1)
 \author{ T. W. Yee }
 \details{
   See \code{\link{posnormal}}, the \pkg{VGAM} family function
-  for estimating the parameters, 
+  for estimating the parameters,
   for the formula of the probability density function and other details.
 
 
diff --git a/man/posnormal.Rd b/man/posnormal.Rd
index 32ad616..cde5ccb 100644
--- a/man/posnormal.Rd
+++ b/man/posnormal.Rd
@@ -95,7 +95,7 @@ posnormal(lmean = "identitylink", lsd = "loge",
   \deqn{f(y) = \frac{1}{\sigma} \frac{\phi((y-\mu) / \sigma)}{
                1-\Phi(-\mu/ \sigma)}.}{%
         f(y) = (1/sigma) * dnorm((y-mu)/sigma) / [1-pnorm(-mu/ sigma)].}
-  where \eqn{\phi()}{dnorm()} is the probability density function of a 
+  where \eqn{\phi()}{dnorm()} is the probability density function of a
   standard normal distribution (\code{\link[stats:Normal]{dnorm}}).
 
 
@@ -127,7 +127,7 @@ posnormal(lmean = "identitylink", lsd = "loge",
   The response variable for this family function is the same as
   \code{\link{uninormal}} except positive values are required.
   Reasonably good initial values are needed.
-  
+
 
 
   The distribution of the reciprocal of a positive normal random variable
@@ -138,14 +138,14 @@ posnormal(lmean = "identitylink", lsd = "loge",
 
 \section{Warning }{
   It is recommended that \code{trace = TRUE} be used to monitor convergence;
-  sometimes the estimated mean is \code{-Inf} and the 
+  sometimes the estimated mean is \code{-Inf} and the
   estimated mean standard deviation is \code{Inf}, especially
   when the sample size is small.
   Under- or over-flow may occur if the data is ill-conditioned.
 
 
 }
-\seealso{ 
+\seealso{
     \code{\link{uninormal}},
     \code{\link{tobit}}.
 
diff --git a/man/pospoisUC.Rd b/man/pospoisUC.Rd
index cb0d5c3..6b54e1d 100644
--- a/man/pospoisUC.Rd
+++ b/man/pospoisUC.Rd
@@ -63,7 +63,7 @@ rpospois(n, lambda)
 
 
 }
-%\references{ 
+%\references{
 %None.
 %}
 
@@ -82,7 +82,7 @@ rpospois(n, lambda)
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{pospoisson}},
   \code{\link{zapoisson}},
   \code{\link{zipoisson}},
diff --git a/man/pospoisson.Rd b/man/pospoisson.Rd
index 6eceb7f..958424a 100644
--- a/man/pospoisson.Rd
+++ b/man/pospoisson.Rd
@@ -83,10 +83,12 @@ The equilibrium size distribution of freely-forming groups.
 
 
 }
-\seealso{ 
+\seealso{
   \code{\link{Pospois}},
   \code{\link{posnegbinomial}},
   \code{\link{poissonff}},
+  \code{\link{otpospoisson}},
+  \code{\link{zapoisson}},
   \code{\link{zipoisson}},
   \code{\link{simulate.vlm}}.
 
diff --git a/man/powerlink.Rd b/man/powerlink.Rd
index 2b5319a..faa1c1f 100644
--- a/man/powerlink.Rd
+++ b/man/powerlink.Rd
@@ -70,7 +70,7 @@ powerlink(theta, power = 1, inverse = FALSE, deriv = 0,
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{Links}},
   \code{\link{loge}}.
 
diff --git a/man/prats.Rd b/man/prats.Rd
index 0c4cdf3..3ee0d7d 100644
--- a/man/prats.Rd
+++ b/man/prats.Rd
@@ -20,7 +20,7 @@ data(prats)
   A data frame with the following variables.
 
   \describe{
-   
+
     \item{treatment}{
     A \code{0} means control;
     a \code{1} means the chemical treatment.
@@ -51,7 +51,7 @@ data(prats)
 
 }
 \source{
- 
+
   Weil, C. S. (1970)
   Selection of the valid number of sampling units and a consideration
   of their combination in toxicological studies involving
@@ -64,7 +64,7 @@ data(prats)
 %Fd. Cosmet. Toxicol.
 
 
- 
+
 }
 \references{
 
diff --git a/man/predictqrrvglm.Rd b/man/predictqrrvglm.Rd
index 8fcb02d..72d396b 100644
--- a/man/predictqrrvglm.Rd
+++ b/man/predictqrrvglm.Rd
@@ -52,7 +52,7 @@ predictqrrvglm(object, newdata=NULL,
 
 
 }
-\references{ 
+\references{
 Yee, T. W. (2004)
 A new technique for maximum-likelihood
 canonical Gaussian ordination.
@@ -69,7 +69,7 @@ canonical Gaussian ordination.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{cqo}}.
 
 
diff --git a/man/predictvglm.Rd b/man/predictvglm.Rd
index 1a02d62..9944d8b 100644
--- a/man/predictvglm.Rd
+++ b/man/predictvglm.Rd
@@ -11,7 +11,8 @@
 predictvglm(object, newdata = NULL,
             type = c("link", "response", "terms"),
             se.fit = FALSE, deriv = 0, dispersion = NULL,
-            untransform = FALSE, extra = object at extra, ...)
+            untransform = FALSE,
+            type.fitted = NULL, percentiles = NULL, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -59,20 +60,38 @@ predictvglm(object, newdata = NULL,
   }
   \item{deriv}{
   Non-negative integer. Currently this must be zero.
-  Later, this may be implemented for general values. 
+  Later, this may be implemented for general values.
 
 
   }
   \item{dispersion}{
-  Dispersion parameter. 
+  Dispersion parameter.
   This may be inputted at this stage, but the default is to use
   the dispersion parameter of the fitted model.
 
 
   }
-  \item{extra}{
-  A list containing extra information.
-  This argument should be ignored.
+% \item{extra}{
+% A list containing extra information.
+% This argument should be ignored.
+
+
+% }
+  \item{type.fitted}{
+    Some \pkg{VGAM} family functions have an argument by
+    the same name. If so,  then one can obtain fitted values
+    by setting \code{type = "response"} and
+    choosing a value of \code{type.fitted} from what's
+    available.
+    If \code{type.fitted = "quantiles"} is available then
+    the \code{percentiles} argument can be used to specify
+    what quantile values are requested.
+
+
+  }
+  \item{percentiles}{
+    Used only if \code{type.fitted = "quantiles"} is
+    available and is selected.
 
 
   }
@@ -117,7 +136,7 @@ predictvglm(object, newdata = NULL,
 
 
 }
-\references{ 
+\references{
 
 Yee, T. W. and Hastie, T. J. (2003)
 Reduced-rank vector generalized linear models.
@@ -133,6 +152,13 @@ Reduced-rank vector generalized linear models.
   will generate an error.
 
 
+
+  The arguments \code{type.fitted}  and \code{percentiles}
+  are provided in this function to give more convenience than
+  modifying the \code{extra} slot directly.
+
+
+
 }
 
 \section{Warning }{
@@ -141,7 +167,7 @@ Reduced-rank vector generalized linear models.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link[stats]{predict}},
   \code{\link{vglm}},
   \code{predictvlm},
@@ -179,3 +205,4 @@ predict(fit, type = "terms", se = TRUE)
 \keyword{models}
 \keyword{regression}
 
+%           untransform = FALSE, extra = object at extra,
diff --git a/man/prentice74.Rd b/man/prentice74.Rd
index 77ab681..6f0ee5d 100644
--- a/man/prentice74.Rd
+++ b/man/prentice74.Rd
@@ -60,7 +60,7 @@ The mean of \eqn{Y} is \eqn{a} (returned as the fitted values).
 This is a different parameterization compared to \code{\link{lgamma3}}.
 
 
-Special cases: 
+Special cases:
 \eqn{q = 0} is the normal distribution with standard deviation \eqn{b},
 \eqn{q = -1} is the extreme value distribution for maximums,
 \eqn{q = 1} is the extreme value distribution for minima (Weibull).
@@ -97,11 +97,11 @@ else \eqn{q < 0} is right skew.
 
 }
 \author{ T. W. Yee }
-\note{ 
+\note{
   The notation used here differs from Prentice (1974):
   \eqn{\alpha = a}{alpha = a},
   \eqn{\sigma = b}{sigma = b}.
-  Fisher scoring is used. 
+  Fisher scoring is used.
 
 
 }
diff --git a/man/probit.Rd b/man/probit.Rd
index a9c985c..d377b84 100644
--- a/man/probit.Rd
+++ b/man/probit.Rd
@@ -31,7 +31,7 @@ probit(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 }
 \details{
   The probit link function is commonly used for parameters that
-  lie in the unit interval. 
+  lie in the unit interval.
   Numerical values of \code{theta} close to 0 or 1 or out of range
   result in
   \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
@@ -71,7 +71,7 @@ probit(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 
 
 }
-\seealso{ 
+\seealso{
   \code{\link{Links}},
   \code{\link{logit}},
   \code{\link{cloglog}},
@@ -95,7 +95,7 @@ lines(p,  probit(p), col = "purple")
 lines(p, cloglog(p), col = "chocolate")
 lines(p, cauchit(p), col = "tan")
 abline(v = 0.5, h = 0, lty = "dashed")
-legend(0.1, 4.0, c("logit", "probit", "cloglog", "cauchit"), 
+legend(0.1, 4.0, c("logit", "probit", "cloglog", "cauchit"),
        col = c("limegreen", "purple", "chocolate", "tan"), lwd = mylwd)
 par(lwd = 1) }
 }
diff --git a/man/propodds.Rd b/man/propodds.Rd
index 245b9d2..dbba0b9 100644
--- a/man/propodds.Rd
+++ b/man/propodds.Rd
@@ -3,7 +3,7 @@
 \title{ Proportional Odds Model for Ordinal Regression }
 \description{
   Fits the proportional odds model
-  to a (preferably ordered) factor response. 
+  to a (preferably ordered) factor response.
 
 }
 \usage{
diff --git a/man/ps.Rd b/man/ps.Rd
deleted file mode 100644
index fa74a2b..0000000
--- a/man/ps.Rd
+++ /dev/null
@@ -1,267 +0,0 @@
-\name{ps}
-\alias{ps}
-%- Also NEED an `\alias' for EACH other topic documented here.
-\title{ Defining Penalized Spline Smooths in VGAM Formulas }
-\description{
-  \code{ps} is used in the definition of P-spline smooth terms within
-  \code{vgam} formulas.
-
-
-}
-\usage{
-ps(x, ..., ps.intervals = NULL, lambda = 0, degree = 2,
-   order = 2, ridge.adj = 1e-5, ridge.inv = 0.0001,
-   spillover = 0.01, maxlambda = 1e4)
-}
-%- maybe also `usage' for other objects documented here.
-\arguments{
-
-  \item{x}{
-    covariate (abscissae) to be smoothed.
-    Also called the regressor.
-    If the \code{xij} facility is used then more
-    covariates are needed in the \code{\dots} argument.
-
-
-
-%   Currently at least 7 unique \code{x} values are needed.
-
-
-  }
-  \item{\dots}{
-    Used to accommodate the other \eqn{M-1} covariates
-    when the \code{xij} facility is used.
-    See Section 3.4.4 of Yee (2015) for something very similar.
-    This argument, found in the second argument, means that
-    the other argument names must be fully specified if used,
-    e.g., \code{ps.intervals} and not \code{ps.int}.
-    See the example below.
-    In the example below,
-    the term in the main formula is 
-    \code{ps(gcost.air, gcost.trn, gcost.bus)}
-    and one might be tempted to use something like
-    \code{ps(gcost)} to represent that \code{xij} term.
-    However, this is not recommended because
-    \code{ps(gcost)} might not have the same number of columns
-    as \code{ps(gcost.air, gcost.trn, gcost.bus)} etc.
-    That is, it is best to select one of the diagonal elements
-    of the block matrix to represent that term.
-
-
-
-  }
-
-
-  \item{ps.intervals}{
-    the number of equally-spaced B-spline intervals.
-    Note that the number of knots is equal to
-    \code{ps.intervals + 2*degree + 1}.
-    The default, signified by \code{NULL}, means that
-    \code{ceiling(1.5 * log(length(unique(x.index))))}
-    is used, where \code{x.index} is the combined data.
-    This is not guaranteed to work on every data set, and
-    it might change in the future.
-
-
-
-  }
-  \item{lambda, maxlambda}{
-    \code{maxlambda} are the
-    non-negative regularization parameters for difference penalty,
-    whose values should be less than \code{maxlambda}.
-    Can be a vector.   %  zz.
-    
-
-  }
-  \item{degree}{
-    degree of B-spline basis. Usually this will be 2 or 3; and
-    the values 1 or 4 might possibly be used.
-
-
-  }
-  \item{order}{
-    order of difference penalty (0 is the ridge penalty).
-
-
-  }
-  \item{ridge.adj, ridge.inv}{
-    small positive numbers to stabilize
-    linear dependencies among B-spline bases.
-
-
-  }
-  \item{spillover}{
-    small positive proportion of the range used on
-    the outside of the boundary values.
-
-
-  }
-}
-\details{
-  This function is currently used by \code{\link{vgam}} to
-  allow automatic smoothing parameter selection based on
-  P-splines and minimizing an UBRE quantity.
-  It is recommended above \code{\link{s}} also because backfitting
-  is not required.
-
-
-
-% Also, if \eqn{n} is the number of \emph{distinct} abscissae, then 
-% \code{ps} will fail if \eqn{n < 7}.
-
-
-
-  Unlike \code{s}, which is symbolic and does not perform any smoothing itself,
-  this function does compute the penalized spline when
-  used by \code{\link{vgam}}.
-  When this function is used within \code{\link{vgam}}, automatic
-  smoothing parameter selection is implemented by calling
-  \code{\link[mgcv]{magic}} after the necessary link-ups are done.
-
-
-
-  This function is smart; it can be used for smart prediction
-  (Section 18.6 of Yee (2015)).
-
-
-
-}
-\value{
-  A matrix with attributes that are (only) used by \code{\link{vgam}}.
-  The number of rows of the matrix is \code{length(x)} and
-  the number of columns is \code{ps.intervals + degree - 1}.
-
-
-}
-\references{
-
-
-Eilers, P. H. C. and Marx, B. D. (2002).
-Generalized Linear Additive Smooth Structures.
-\emph{Journal of Computational and Graphical Statistics},
-\bold{11}(4): 758--783.
-
-
-
-Marx, B. D. and Eilers, P. H. C. (1998).
-Direct generalized linear modeling
-with penalized likelihood.
-\emph{CSDA}, \bold{28}(2): 193--209.
-
-
-
-Eilers, P. H. C. and Marx, B. D. (1996).
-Flexible smoothing with B-splines
-and penalties (with comments and rejoinder).
-\emph{Statistical Science}, \bold{11}(2): 89--121.
-
-
-
-Wood, S. N. (2004).
-Stable and efficient multiple smoothing parameter estimation
-for generalized additive models.
-\emph{J. Amer. Statist. Assoc.}, \bold{99}(467): 673--686.
-
-
-
-}
-\author{
-  B. D. Marx wrote the original function.
-  Subsequent edits were made by T. W. Yee and C. Somchit.
-
-
-}
-\note{
-  This function is currently under development and
-  may change in the future.
-  In particular, the default for \code{ps.intervals} is
-  subject to change.
-
-
-}
-
-% ~Make other sections like WARNING with \section{WARNING }{....} ~
-
-\seealso{
-  \code{\link{vgam}},
-  \code{\link{s}},
-  \code{\link{smartpred}},
-  \code{\link{is.smart}},
-  \code{\link[splines]{splineDesign}},
-  \code{\link[splines]{bs}},
-  \code{\link[mgcv]{magic}}.
-
-
-
-}
-
-\examples{
-ps(runif(10))
-ps(runif(10), ps.intervals = 5)
-
-\dontrun{
-data("TravelMode", package = "AER")  # Need to install "AER" first
-air.df <- subset(TravelMode, mode == "air")  # Form 4 smaller data frames
-bus.df <- subset(TravelMode, mode == "bus")
-trn.df <- subset(TravelMode, mode == "train")
-car.df <- subset(TravelMode, mode == "car")
-TravelMode2 <- data.frame(income     = air.df$income,
-                          wait.air   = air.df$wait  - car.df$wait,
-                          wait.trn   = trn.df$wait  - car.df$wait,
-                          wait.bus   = bus.df$wait  - car.df$wait,
-                          gcost.air  = air.df$gcost - car.df$gcost,
-                          gcost.trn  = trn.df$gcost - car.df$gcost,
-                          gcost.bus  = bus.df$gcost - car.df$gcost,
-                          wait       = air.df$wait)  # Value is unimportant
-TravelMode2$mode <- subset(TravelMode, choice == "yes")$mode  # The response
-TravelMode2 <- transform(TravelMode2, incom.air = income, incom.trn = 0,
-                                      incom.bus = 0)
-set.seed(1)
-TravelMode2 <- transform(TravelMode2,
-                         junkx2 = runif(nrow(TravelMode2)))
-
-tfit2 <-
-  vgam(mode ~ ps(gcost.air, gcost.trn, gcost.bus) + ns(junkx2, 4) +
-              ps(incom.air, incom.trn, incom.bus) + wait ,
-       crit = "coef",
-       multinomial(parallel = FALSE ~ 1), data = TravelMode2,
-       xij = list(ps(gcost.air, gcost.trn, gcost.bus) ~
-                  ps(gcost.air, gcost.trn, gcost.bus) +
-                  ps(gcost.trn, gcost.bus, gcost.air) +
-                  ps(gcost.bus, gcost.air, gcost.trn),
-                  ps(incom.air, incom.trn, incom.bus) ~
-                  ps(incom.air, incom.trn, incom.bus) +
-                  ps(incom.trn, incom.bus, incom.air) +
-                  ps(incom.bus, incom.air, incom.trn),
-                  wait   ~  wait.air +  wait.trn +  wait.bus),
-       form2 = ~  ps(gcost.air, gcost.trn, gcost.bus) +
-                  ps(gcost.trn, gcost.bus, gcost.air) +
-                  ps(gcost.bus, gcost.air, gcost.trn) +
-                  wait +
-                  ps(incom.air, incom.trn, incom.bus) +
-                  ps(incom.trn, incom.bus, incom.air) +
-                  ps(incom.bus, incom.air, incom.trn) +
-                  junkx2 + ns(junkx2, 4) +
-                  incom.air + incom.trn + incom.bus +
-                  gcost.air + gcost.trn + gcost.bus +
-                  wait.air +  wait.trn +  wait.bus)
-par(mfrow = c(2, 2))
-plot(tfit2, se = TRUE, lcol = "orange", scol = "blue", ylim = c(-4, 4))
-}
-}
-\keyword{models}
-\keyword{regression}
-\keyword{smooth}
-
-%            binom2.or(exchangeable = TRUE ~ s(x2, 3))
-
-
-
-
-
-
-
-
-
-
-
diff --git a/man/qrrvglm.control.Rd b/man/qrrvglm.control.Rd
index b10f17a..bd81965 100644
--- a/man/qrrvglm.control.Rd
+++ b/man/qrrvglm.control.Rd
@@ -35,9 +35,9 @@ qrrvglm.control(Rank = 1,
                 optim.maxit = 20,
                 Parscale = if (I.tolerances) 0.001 else 1.0,
                 sd.Cinit = 0.02,
-                SmallNo = 5.0e-13, 
+                SmallNo = 5.0e-13,
                 trace = TRUE,
-                Use.Init.Poisson.QO = TRUE, 
+                Use.Init.Poisson.QO = TRUE,
                 wzepsilon = .Machine$double.eps^0.75, ...)
 }
 %- maybe also `usage' for other objects documented here.
@@ -77,7 +77,7 @@ qrrvglm.control(Rank = 1,
     initial values.
 
   }
-  \item{Crow1positive}{ 
+  \item{Crow1positive}{
       Logical vector of length \code{Rank} (recycled if necessary): are
       the elements of the first row of \eqn{C} positive? For example,
       if \code{Rank} is 4, then specifying \code{Crow1positive = c(FALSE,
@@ -141,7 +141,7 @@ qrrvglm.control(Rank = 1,
 
   }
 
-  \item{FastAlgorithm}{ 
+  \item{FastAlgorithm}{
    Logical. Whether a new fast algorithm is to be used. The fast
    algorithm results in a large speed increases compared to Yee
    (2004).
@@ -149,19 +149,19 @@ qrrvglm.control(Rank = 1,
    Setting \code{FastAlgorithm = FALSE} will give an error.
 
   }
-  \item{GradientFunction}{ 
+  \item{GradientFunction}{
    Logical. Whether \code{\link[stats]{optim}}'s argument \code{gr}
    is used or not, i.e., to compute gradient values. Used only if
    \code{FastAlgorithm} is \code{TRUE}. The default value is usually
    faster on most problems.
 
-  } 
-  \item{Hstep}{ 
+  }
+  \item{Hstep}{
    Positive value. Used as the step size in the finite difference
    approximation to the derivatives by \code{\link[stats]{optim}}.
 
 %  Used only if \code{FastAlgorithm} is \code{TRUE}.
-  } 
+  }
   \item{isd.latvar}{
    Initial standard deviations for the latent variables (site scores).
    Numeric, positive and of length \eqn{R} (recycled if necessary).
@@ -176,7 +176,7 @@ qrrvglm.control(Rank = 1,
    the most spread of site scores, followed by the second ordination
    axis, etc.
 
-  } 
+  }
   \item{iKvector, iShape}{
    Numeric, recycled to length \eqn{S} if necessary.
    Initial values used for estimating the positive \eqn{k} and
@@ -186,7 +186,7 @@ qrrvglm.control(Rank = 1,
    These arguments override the \code{ik} and \code{ishape}
    arguments in \code{\link{negbinomial}} and \code{\link{gamma2}}.
 
-  } 
+  }
 
   \item{I.tolerances}{
    Logical. If \code{TRUE} then the (common) tolerance matrix is the
@@ -194,7 +194,7 @@ qrrvglm.control(Rank = 1,
    \code{I.tolerances = TRUE} implies \code{eq.tolerances = TRUE}, but
    not vice versa. Internally, the quadratic terms will be treated as
    offsets (in GLM jargon) and so the models can potentially be fitted
-   very efficiently. \emph{However, it is a very good idea to center 
+   very efficiently. \emph{However, it is a very good idea to center
    and scale all numerical variables in the \eqn{x_2} vector}.
    See \bold{Details} for more details.
    The success of \code{I.tolerances = TRUE} often
@@ -217,14 +217,14 @@ qrrvglm.control(Rank = 1,
     Maximum number of times the optimizer is called or restarted.
     Most users should ignore this argument.
 
-  } 
+  }
   \item{imethod}{
     Method of initialization. A positive integer 1 or 2 or 3 etc.
     depending on the \pkg{VGAM} family function.
-    Currently it is used for \code{\link{negbinomial}} and 
+    Currently it is used for \code{\link{negbinomial}} and
     \code{\link{gamma2}} only, and used within the C.
 
-  } 
+  }
   \item{Maxit.optim}{
     Positive integer. Number of iterations given to the function
     \code{\link[stats]{optim}} at each of the \code{optim.maxit}
@@ -232,7 +232,7 @@ qrrvglm.control(Rank = 1,
 
   }
 
-  \item{MUXfactor}{ 
+  \item{MUXfactor}{
    Multiplication factor for detecting large offset values. Numeric,
    positive and of length \eqn{R} (recycled if necessary). This argument
    is used only if \code{I.tolerances = TRUE}. Offsets are \eqn{-0.5}
@@ -245,7 +245,7 @@ qrrvglm.control(Rank = 1,
    \eqn{x_2} is recommended). A value about 3 or 4 is recommended.
    If failure to converge occurs, try a slightly lower value.
 
-} 
+}
   \item{optim.maxit}{
     Positive integer. Number of times \code{\link[stats]{optim}}
     is invoked. At iteration \code{i}, the \code{i}th value of
@@ -253,7 +253,7 @@ qrrvglm.control(Rank = 1,
 
   }
 
-  \item{noRRR}{ 
+  \item{noRRR}{
     Formula giving terms that are \emph{not} to be included in the
     reduced-rank regression (or formation of the latent variables),
     i.e., those belong to \eqn{x_1}.
@@ -262,7 +262,7 @@ qrrvglm.control(Rank = 1,
     The default is to omit the intercept term from the latent variables.
 
 
-  } 
+  }
   \item{Norrr}{
   Defunct. Please use \code{noRRR}.
   Use of \code{Norrr} will become an error soon.
@@ -278,11 +278,11 @@ qrrvglm.control(Rank = 1,
    the elements of \eqn{C} become \eqn{C} / \code{Parscale}.
    Setting \code{I.tolerances = TRUE} results in line searches that
    are very large, therefore \eqn{C} has to be scaled accordingly
-   to avoid large step sizes. 
+   to avoid large step sizes.
    See \bold{Details} for more information.
    It's probably best to leave this argument alone.
-  } 
-  \item{sd.Cinit}{ 
+  }
+  \item{sd.Cinit}{
       Standard deviation of the initial values for the elements
       of \eqn{C}.
       These are normally distributed with mean zero.
@@ -301,7 +301,7 @@ qrrvglm.control(Rank = 1,
 
 %  \item{Kinit}{ Initial values for the index parameters \code{k} in the
 %   negative binomial distribution (one per species).
-%   In general, a smaller number is preferred over a larger number. 
+%   In general, a smaller number is preferred over a larger number.
 %   The vector is recycled to the number of responses (species).
 %   The argument is currently not used.
 %   }
@@ -313,8 +313,8 @@ qrrvglm.control(Rank = 1,
 %      Used only if \code{Quadratic = TRUE} and \code{FastAlgorithm = FALSE}.
 %  }
 \item{SmallNo}{
-  Positive numeric between \code{.Machine$double.eps} and \code{0.0001}. 
-  Used to avoid under- or over-flow in the IRLS algorithm. 
+  Positive numeric between \code{.Machine$double.eps} and \code{0.0001}.
+  Used to avoid under- or over-flow in the IRLS algorithm.
   Used only if \code{FastAlgorithm} is \code{TRUE}.
   }
   \item{Use.Init.Poisson.QO}{
@@ -516,10 +516,10 @@ p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
 sort(deviance(p1, history = TRUE))  # A history of all the iterations
 
 (isd.latvar <- apply(latvar(p1), 2, sd))  # Should be approx isd.latvar
- 
+
 # Refit the model with better initial values
 set.seed(111)  # This leads to the global solution
-p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, 
+p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
                Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
           I.tolerances = TRUE, quasipoissonff, data = hspider,
@@ -536,7 +536,7 @@ sort(deviance(p1, history = TRUE))  # A history of all the iterations
 %# 20120221; withdrawn for a while coz it creates a lot of error messages.
 %# Negative binomial CQO; smallest deviance is about 275.389
 %set.seed(1234)  # This leads to a reasonable (but not the global) solution?
-%nb1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi, 
+%nb1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
 %                Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
 %          WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
 %          I.tol = FALSE, eq.tol = TRUE,  # A good idea for negbinomial
diff --git a/man/qtplot.lmscreg.Rd b/man/qtplot.lmscreg.Rd
index 16524bb..1b15871 100644
--- a/man/qtplot.lmscreg.Rd
+++ b/man/qtplot.lmscreg.Rd
@@ -6,8 +6,8 @@
   Plots quantiles associated with a LMS quantile regression.
 }
 \usage{
-qtplot.lmscreg(object, newdata = NULL, 
-               percentiles = object at misc$percentiles, 
+qtplot.lmscreg(object, newdata = NULL,
+               percentiles = object at misc$percentiles,
                show.plot = TRUE, ...)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -21,8 +21,8 @@ qtplot.lmscreg(object, newdata = NULL,
   If missing, the original data is used.
   }
   \item{percentiles}{ Numerical vector with values between 0 and 100
-  that specify the percentiles (quantiles). 
-  The default are the percentiles used when the model was fitted. 
+  that specify the percentiles (quantiles).
+  The default are the percentiles used when the model was fitted.
   }
   \item{show.plot}{ Logical. Plot it? If \code{FALSE} no plot will
   be done. }
@@ -36,7 +36,7 @@ the regression or smoothing is performed. For example, in medical
 studies, it is often the age.
 In \pkg{VGAM}, it is possible to handle more than one covariate, however,
 the primary variable must be the first term after the intercept.
-  
+
 }
 \value{
    A list with the following components.
@@ -57,12 +57,12 @@ Quantile regression via vector generalized additive models.
 }
 \author{ Thomas W. Yee }
 \note{
-  \code{\link{plotqtplot.lmscreg}} does the actual plotting. 
+  \code{\link{plotqtplot.lmscreg}} does the actual plotting.
 
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{plotqtplot.lmscreg}},
   \code{\link{deplot.lmscreg}},
   \code{\link{lms.bcn}},
diff --git a/man/quasibinomialff.Rd b/man/quasibinomialff.Rd
index 2ccdfb3..92964e2 100644
--- a/man/quasibinomialff.Rd
+++ b/man/quasibinomialff.Rd
@@ -17,7 +17,7 @@ quasibinomialff(link = "logit", multiple.responses = FALSE,
   \item{link}{ Link function. See \code{\link{Links}} for more choices.
 
   }
-  \item{multiple.responses}{ 
+  \item{multiple.responses}{
    Multiple responses? If \code{TRUE}, then the response is interpreted
    as \eqn{M} binary responses, where \eqn{M} is the number of columns
    of the response matrix. In this case, the response matrix should have
@@ -30,7 +30,7 @@ quasibinomialff(link = "logit", multiple.responses = FALSE,
 
 
   }
-  \item{onedpar}{ 
+  \item{onedpar}{
   One dispersion parameter? If \code{multiple.responses}, then
   a separate dispersion
   parameter will be computed for each response (column), by default.
@@ -39,7 +39,7 @@ quasibinomialff(link = "logit", multiple.responses = FALSE,
 
 
   }
-  \item{parallel}{ 
+  \item{parallel}{
   A logical or formula. Used only if \code{multiple.responses}
   is \code{TRUE}.  This
   argument allows for the parallelism assumption whereby the regression
@@ -48,7 +48,7 @@ quasibinomialff(link = "logit", multiple.responses = FALSE,
 
 
   }
-  \item{zero}{ 
+  \item{zero}{
   Can be an integer-valued vector specifying which linear/additive predictors
   are modelled as intercepts only.  The values must be from the set
   \{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of
@@ -89,7 +89,7 @@ quasibinomialff(link = "logit", multiple.responses = FALSE,
 
 }
 \references{
-  McCullagh, P. and Nelder, J. A. (1989) 
+  McCullagh, P. and Nelder, J. A. (1989)
   \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
 
 
@@ -124,7 +124,7 @@ quasibinomialff(link = "logit", multiple.responses = FALSE,
 
 
 % With the introduction of name spaces for the \pkg{VGAM} package,
-% \code{"ff"} can be dropped for this family function. 
+% \code{"ff"} can be dropped for this family function.
 
 
 }
diff --git a/man/quasipoissonff.Rd b/man/quasipoissonff.Rd
index 3d26149..38cfdb5 100644
--- a/man/quasipoissonff.Rd
+++ b/man/quasipoissonff.Rd
@@ -18,7 +18,7 @@ quasipoissonff(link = "loge", onedpar = FALSE,
 
   }
   \item{onedpar}{
-  One dispersion parameter? If the response is a matrix, 
+  One dispersion parameter? If the response is a matrix,
   then a separate
   dispersion parameter will be computed for each response (column),
   by default.
diff --git a/man/rayleigh.Rd b/man/rayleigh.Rd
index b851c19..64e586f 100644
--- a/man/rayleigh.Rd
+++ b/man/rayleigh.Rd
@@ -43,7 +43,7 @@ cens.rayleigh(lscale = "loge", oim = TRUE)
   \item{oim}{
   Logical.
   For censored data only,
-  \code{TRUE} means the Newton-Raphson algorithm, and 
+  \code{TRUE} means the Newton-Raphson algorithm, and
   \code{FALSE} means Fisher scoring.
 
 
diff --git a/man/rcqo.Rd b/man/rcqo.Rd
index 3685b6a..ebce5c7 100644
--- a/man/rcqo.Rd
+++ b/man/rcqo.Rd
@@ -24,25 +24,25 @@ rcqo(n, p, S, Rank = 1,
 \arguments{
   \item{n}{
     Number of sites. It is denoted by \eqn{n} below.
-    
+
   }
   \item{p}{
-    Number of environmental variables, including an intercept term. 
+    Number of environmental variables, including an intercept term.
     It is denoted by \eqn{p} below.
     Must be no less than \eqn{1+R} in value.
-    
+
   }
   \item{S}{
     Number of species.
     It is denoted by \eqn{S} below.
-    
+
   }
   \item{Rank}{
     The rank or the number of latent variables or true dimension
     of the data on the reduced space.
     This must be either 1, 2, 3 or 4.
     It is denoted by \eqn{R}.
-    
+
   }
   \item{family}{
     What type of species data is to be returned.
@@ -60,7 +60,7 @@ rcqo(n, p, S, Rank = 1,
 %    and the resulting values are
 %    1,2,\ldots,\code{breaks} if \code{breaks} is a single integer zz
 %    else zz.
-    
+
   }
   \item{eq.maximums}{
     Logical. Does each species have the same maximum?
@@ -86,8 +86,8 @@ rcqo(n, p, S, Rank = 1,
     latent variable space
     in order to place the species' optimums.
     Also see the argument \code{sd.tolerances}.
-    
-    
+
+
   }
   \item{lo.abundance, hi.abundance}{
     Numeric. These are recycled to a vector of length \eqn{S}.
@@ -100,7 +100,7 @@ rcqo(n, p, S, Rank = 1,
     If \code{eq.maximums} is \code{FALSE} then the
     logarithm of the maximums are uniformly distributed between
     \code{log(lo.abundance)} and \code{log(hi.abundance)}.
-    
+
   }
   \item{sd.latvar}{
     Numeric, of length \eqn{R}
@@ -110,7 +110,7 @@ rcqo(n, p, S, Rank = 1,
     ordination axis contains the greatest spread of the species'
     site scores, followed by the second axis, followed by the third
     axis, etc.
-        
+
   }
   \item{sd.optimums}{
     Numeric, of length \eqn{R} (recycled if necessary).
@@ -122,7 +122,7 @@ rcqo(n, p, S, Rank = 1,
     are equally spaced about 0 along every latent variable axis.
     Regardless of the value of \code{es.optimums}, the optimums
     are then scaled to give standard deviation \code{sd.optimums[r]}.
-    
+
   }
   \item{sd.tolerances}{
     Logical. If \code{eq.tolerances = FALSE} then, for the
@@ -183,13 +183,13 @@ rcqo(n, p, S, Rank = 1,
     with mean 0. The standard deviation of each environmental variable
     is chosen so that the site scores have the determined standard
     deviation, as given by argument \code{sd.latvar}.
-    
+
   }
   \item{breaks}{
     If \code{family} is assigned an ordinal value then this argument
     is used to define the cutpoints. It is fed into the
     \code{breaks} argument of \code{\link[base]{cut}}.
-    
+
   }
   \item{seed}{
     If given, it is passed into \code{\link[base:Random]{set.seed}}.
@@ -208,19 +208,19 @@ rcqo(n, p, S, Rank = 1,
   }
   \item{Crow1positive}{
     See \code{\link{qrrvglm.control}} for details.
-    
+
   }
   \item{xmat}{
    The
    \eqn{n} by  \eqn{p-1}
    environmental matrix can be inputted.
-    
+
   }
   \item{scale.latvar}{
    Logical. If \code{FALSE} the argument
    \code{sd.latvar} is ignored and no scaling of the latent variable
-   values is performed. 
-    
+   values is performed.
+
   }
 }
 \details{
@@ -248,7 +248,7 @@ rcqo(n, p, S, Rank = 1,
   variables in \eqn{X_2}.
   This function has \eqn{X_1} only being a matrix of ones,
   i.e., containing an intercept only.
-  
+
 }
 \value{
   A \eqn{n} by \eqn{p-1+S} data frame with
@@ -261,33 +261,33 @@ rcqo(n, p, S, Rank = 1,
     Note that \code{x1} is not present; it is effectively a vector
     of ones since it corresponds to an intercept term when
     \code{\link{cqo}} is applied to the data.
-    
+
   }
   \item{y1, y2, x3, \ldots, yS}{
     The species data. This makes up the
     \eqn{n} by \eqn{S} matrix \eqn{Y}.
     This will be of the form described by the argument
     \code{family}.
-    
+
   }
   \item{"concoefficients"}{
     The \eqn{p-1} by \eqn{R} matrix of
     constrained coefficients
     (or canonical coefficients).
     These are also known as weights or loadings.
-    
+
   }
   \item{"formula"}{
     The formula involving the species and environmental variable names.
     This can be used directly in the \code{formula} argument of
     \code{\link{cqo}}.
-    
+
   }
   \item{"log.maximums"}{
     The \eqn{S}-vector of species' maximums, on a log scale.
     These are uniformly distributed between
     \code{log(lo.abundance)} and \code{log(hi.abundance)}.
-    
+
 
   }
   \item{"latvar"}{
@@ -295,11 +295,11 @@ rcqo(n, p, S, Rank = 1,
     Each successive column (latent variable) has
     sample standard deviation
     equal to successive values of \code{sd.latvar}.
-    
+
   }
   \item{"eta"}{
     The linear/additive predictor value.
-    
+
   }
   \item{"optimums"}{
     The \eqn{S} by \eqn{R} matrix of species' optimums.
@@ -311,7 +311,7 @@ rcqo(n, p, S, Rank = 1,
     square root of the diagonal elements of the tolerance matrices
     (recall that all tolerance matrices are restricted to being
     diagonal in this function).
-    
+
   }
   Other attributes are \code{"break"},
   \code{"family"}, \code{"Rank"},
@@ -371,7 +371,7 @@ n <- 100; p <- 5; S <- 5
 mydata <- rcqo(n, p, S, es.opt = TRUE, eq.max = TRUE)
 names(mydata)
 (myform <- attr(mydata, "formula"))
-fit <- cqo(myform, poissonff, mydata, Bestof = 3)  # eq.tol = TRUE 
+fit <- cqo(myform, poissonff, mydata, Bestof = 3)  # eq.tol = TRUE
 matplot(attr(mydata, "latvar"), mydata[,-(1:(p-1))], col = 1:S)
 persp(fit, col = 1:S, add = TRUE)
 lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S)  # The same plot as above
diff --git a/man/rdiric.Rd b/man/rdiric.Rd
index 5d782ac..a1b602a 100644
--- a/man/rdiric.Rd
+++ b/man/rdiric.Rd
@@ -25,7 +25,7 @@ rdiric(n, shape, dimension = NULL, is.matrix.shape = FALSE)
 
   }
   \item{dimension}{
-  the dimension of the distribution. 
+  the dimension of the distribution.
   If \code{dimension} is not numeric then it is taken to be
   \code{length(shape)}
   (or \code{ncol(shape)} if \code{is.matrix.shape == TRUE}).
@@ -47,7 +47,7 @@ rdiric(n, shape, dimension = NULL, is.matrix.shape = FALSE)
 \details{
   This function is based on a relationship between the gamma and
   Dirichlet distribution. Random gamma variates are generated, and
-  then Dirichlet random variates are formed from these. 
+  then Dirichlet random variates are formed from these.
 
 
 }
@@ -58,9 +58,9 @@ rdiric(n, shape, dimension = NULL, is.matrix.shape = FALSE)
   of the answer.
 
 
-} 
- 
-\references{ 
+}
+
+\references{
 
 Lange, K. (2002)
 \emph{Mathematical and Statistical Methods for Genetic Analysis},
@@ -70,12 +70,12 @@ New York: Springer-Verlag.
 
 }
 \author{ Thomas W. Yee }
-\seealso{ 
+\seealso{
   \code{\link{dirichlet}} is a \pkg{VGAM} family function for
   fitting a Dirichlet distribution to data.
 
 
-} 
+}
 
 \examples{
 ddata <- data.frame(rdiric(n = 1000, shape = c(y1 = 3, y2 = 1, y3 = 4)))
diff --git a/man/rec.exp1.Rd b/man/rec.exp1.Rd
index 5e21a5f..d488160 100644
--- a/man/rec.exp1.Rd
+++ b/man/rec.exp1.Rd
@@ -4,7 +4,7 @@
 \title{ Upper Record Values from a 1-parameter Exponential Distribution }
 \description{
   Maximum likelihood estimation of the rate parameter of a
-  1-parameter exponential distribution when the observations are upper 
+  1-parameter exponential distribution when the observations are upper
   record values.
 
 }
diff --git a/man/rec.normal.Rd b/man/rec.normal.Rd
index 05eaeb1..cad4189 100644
--- a/man/rec.normal.Rd
+++ b/man/rec.normal.Rd
@@ -4,7 +4,7 @@
 \title{ Upper Record Values from a Univariate Normal Distribution }
 \description{
   Maximum likelihood estimation of the two parameters of a
-  univariate normal distribution when the observations are upper 
+  univariate normal distribution when the observations are upper
   record values.
 
 }
@@ -69,7 +69,7 @@ rec.normal(lmean = "identitylink", lsd = "loge",
 \note{
   This family function tries to solve a difficult problem, and the
   larger the data set the better.
-  Convergence failure can commonly occur, and 
+  Convergence failure can commonly occur, and
   convergence may be very slow, so set \code{maxit = 200, trace = TRUE}, say.
   Inputting good initial values are advised.
 
diff --git a/man/reciprocal.Rd b/man/reciprocal.Rd
index 03bff63..0e2afdb 100644
--- a/man/reciprocal.Rd
+++ b/man/reciprocal.Rd
@@ -76,7 +76,7 @@ close to 0.
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{identity}},
     \code{\link{powerlink}}.
 
diff --git a/man/rhobit.Rd b/man/rhobit.Rd
index b3d39ad..ea198c6 100644
--- a/man/rhobit.Rd
+++ b/man/rhobit.Rd
@@ -17,7 +17,7 @@ rhobit(theta, bminvalue = NULL, bmaxvalue = NULL,
   See below for further details.
 
   }
-  \item{bminvalue, bmaxvalue}{ 
+  \item{bminvalue, bmaxvalue}{
   Optional boundary values, e.g.,
   values of \code{theta} which are less than or equal to -1 can be
   replaced by \code{bminvalue}
diff --git a/man/riceff.Rd b/man/riceff.Rd
index 3791916..42a6871 100644
--- a/man/riceff.Rd
+++ b/man/riceff.Rd
@@ -46,7 +46,7 @@ riceff(lsigma = "loge", lvee = "loge", isigma = NULL,
   \deqn{f(y;v,\sigma) =
   \frac{ y }{\sigma^2} \, \exp(-(y^2+v^2) / (2\sigma^2)) \, I_0(y v / \sigma^2)
   }{%
-  f(y;v,sigma) = 
+  f(y;v,sigma) =
   (y/sigma^2) * exp(-(y^2+v^2) / (2*sigma^2)) * I_0(y*v/sigma^2)}
   where \eqn{y > 0},
   \eqn{v > 0},
@@ -91,7 +91,7 @@ Mathematical Analysis of Random Noise.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{drice}},
   \code{\link{rayleigh}},
   \code{\link[base:Bessel]{besselI}},
diff --git a/man/rigff.Rd b/man/rigff.Rd
index 0c10d83..cfa08f3 100644
--- a/man/rigff.Rd
+++ b/man/rigff.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Reciprocal Inverse Gaussian distribution }
 \description{
-  Estimation of the parameters of a 
+  Estimation of the parameters of a
   reciprocal inverse Gaussian distribution.
 
 }
@@ -46,7 +46,7 @@ London: Chapman & Hall
 
 }
 \author{ T. W. Yee }
-\note{ 
+\note{
   This distribution is potentially useful for dispersion modelling.
 
 
diff --git a/man/rlplot.gevff.Rd b/man/rlplot.gevff.Rd
index eb32a08..8a28f0a 100644
--- a/man/rlplot.gevff.Rd
+++ b/man/rlplot.gevff.Rd
@@ -77,7 +77,7 @@ rlplot.gevff(object, show.plot = TRUE,
     when setting up the entire plot. Useful arguments here include
     \code{sub} and \code{las}.
 
-    
+
   }
 }
 \details{
@@ -100,8 +100,8 @@ rlplot.gevff(object, show.plot = TRUE,
 
 
   The points in the plot are the actual data.
-  
-  
+
+
 }
 \value{
   In the \code{post} slot of the object is a list called
diff --git a/man/rrar.Rd b/man/rrar.Rd
index 83ae723..347a29c 100644
--- a/man/rrar.Rd
+++ b/man/rrar.Rd
@@ -4,7 +4,7 @@
 \title{ Nested reduced-rank autoregressive models for multiple
 time series }
 \description{
-  Estimates the parameters of a 
+  Estimates the parameters of a
   nested reduced-rank autoregressive model for multiple
   time series.
 
@@ -17,14 +17,14 @@ rrar(Ranks = 1, coefstart = NULL)
   \item{Ranks}{
   Vector of integers: the ranks of the model.
   Each value must be at least one and no more than \code{M},
-  where \code{M} is the number of response variables in the time series. 
+  where \code{M} is the number of response variables in the time series.
   The length of \code{Ranks} is the \emph{lag}, which is often denoted by
   the symbol \emph{L} in the literature.
 
 
   }
   \item{coefstart}{
-  Optional numerical vector of initial values for the coefficients. 
+  Optional numerical vector of initial values for the coefficients.
   By default, the family function chooses these automatically.
 
 
@@ -33,8 +33,8 @@ rrar(Ranks = 1, coefstart = NULL)
 \details{
    Full details are given in Ahn and Reinsel (1988).
    Convergence may be very slow, so setting \code{maxits = 50}, say, may help.
-   If convergence is not obtained, you might like to try inputting different 
-   initial values. 
+   If convergence is not obtained, you might like to try inputting different
+   initial values.
 
 
    Setting \code{trace = TRUE} in \code{\link{vglm}} is useful for monitoring
@@ -60,7 +60,7 @@ time series.
 
 }
 \author{ T. W. Yee }
-\note{ 
+\note{
   This family function should be used within \code{\link{vglm}}
   and not with \code{\link{rrvglm}} because it does not fit into
   the RR-VGLM framework exactly. Instead, the reduced-rank model
@@ -68,7 +68,7 @@ time series.
 
 
   A methods function \code{Coef.rrar}, say, has yet to be written.
-  It would return the quantities 
+  It would return the quantities
   \code{Ak1},
   \code{C},
   \code{D},
diff --git a/man/rrvglm-class.Rd b/man/rrvglm-class.Rd
index 299837a..587449a 100644
--- a/man/rrvglm-class.Rd
+++ b/man/rrvglm-class.Rd
@@ -2,7 +2,7 @@
 \docType{class}
 \alias{rrvglm-class}
 \title{Class ``rrvglm'' }
-\description{ 
+\description{
 Reduced-rank vector generalized linear models.
 }
 \section{Objects from the Class}{
@@ -24,7 +24,7 @@ Objects can be created by calls to \code{\link{rrvglm}}.
   The number of IRLS iterations used.
   }
   \item{\code{predictors}:}{
-  Object of class \code{"matrix"} 
+  Object of class \code{"matrix"}
   with \eqn{M} columns which holds the \eqn{M} linear predictors.
   }
   \item{\code{assign}:}{
@@ -88,7 +88,7 @@ Objects can be created by calls to \code{\link{rrvglm}}.
   Object of class
   \code{"matrix"}, from class \code{ "vlm"}.
   The fitted values. This is usually the mean but may be
-  quantiles, or the location parameter, e.g., in the Cauchy model. 
+  quantiles, or the location parameter, e.g., in the Cauchy model.
 
   }
   \item{\code{misc}:}{
@@ -124,18 +124,18 @@ Objects can be created by calls to \code{\link{rrvglm}}.
   }
   \item{\code{prior.weights}:}{
   Object of class
-  \code{"matrix"}, from class \code{ "vlm"} 
+  \code{"matrix"}, from class \code{ "vlm"}
   holding the initially supplied weights.
   }
   \item{\code{qr}:}{
   Object of class \code{"list"},
   from class \code{ "vlm"}.
-  QR decomposition at the final iteration. 
+  QR decomposition at the final iteration.
   }
   \item{\code{R}:}{
   Object of class \code{"matrix"},
   from class \code{ "vlm"}.
-  The \bold{R} matrix in the QR decomposition used in the fitting. 
+  The \bold{R} matrix in the QR decomposition used in the fitting.
   }
   \item{\code{rank}:}{
   Object of class \code{"integer"},
@@ -208,7 +208,7 @@ Class \code{"vlm"}, by class "vglm".
   \describe{
     \item{biplot}{\code{signature(x = "rrvglm")}: biplot. }
     \item{Coef}{\code{signature(object = "rrvglm")}: more detailed
-       coefficients giving \bold{A}, 
+       coefficients giving \bold{A},
        \eqn{\bold{B}_1}{\bold{B}1}, \bold{C}, etc.
     }
   \item{biplot}{\code{signature(object = "rrvglm")}:
diff --git a/man/rrvglm.Rd b/man/rrvglm.Rd
index 87c2f19..03f002d 100644
--- a/man/rrvglm.Rd
+++ b/man/rrvglm.Rd
@@ -39,7 +39,7 @@ rrvglm(formula, family, data = list(), weights = NULL, subset = NULL,
 
   }
   \item{control}{
-  a list of parameters for controlling the fitting process. 
+  a list of parameters for controlling the fitting process.
   See \code{\link{rrvglm.control}} for details.
 
   }
@@ -59,7 +59,7 @@ rrvglm(formula, family, data = list(), weights = NULL, subset = NULL,
   process should be assigned in the \code{x} and \code{y} slots.
   Note the model matrix is the LM model matrix; to get the VGLM
   model matrix type \code{model.matrix(vglmfit)} where
-  \code{vglmfit} is a \code{vglm} object. 
+  \code{vglmfit} is a \code{vglm} object.
 
   }
   \item{constraints}{
@@ -270,7 +270,7 @@ Confint.rrnb(rrnb2)  # Quick way to get it
 
 # Plot the abundances and fitted values against the latent variable
 plot(y2 ~ latvar(rrnb2), data = mydata, col = "blue",
-     xlab = "Latent variable", las = 1) 
+     xlab = "Latent variable", las = 1)
 ooo <- order(latvar(rrnb2))
 lines(fitted(rrnb2)[ooo] ~ latvar(rrnb2)[ooo], col = "orange")
 
diff --git a/man/rrvglm.control.Rd b/man/rrvglm.control.Rd
index 6623039..7b71345 100644
--- a/man/rrvglm.control.Rd
+++ b/man/rrvglm.control.Rd
@@ -11,14 +11,14 @@
 rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
     Corner = TRUE, Uncorrelated.latvar = FALSE,
     Wmat = NULL, Svd.arg = FALSE,
-    Index.corner = if (length(str0)) 
+    Index.corner = if (length(str0))
     head((1:1000)[-str0], Rank) else 1:Rank,
     Ainit = NULL, Alpha = 0.5, Bestof = 1, Cinit = NULL,
     Etamat.colmax = 10,
     sd.Ainit = 0.02, sd.Cinit = 0.02, str0 = NULL,
     noRRR = ~1, Norrr = NA,
     noWarning = FALSE,
-    trace = FALSE, Use.Init.Poisson.QO = FALSE, 
+    trace = FALSE, Use.Init.Poisson.QO = FALSE,
     checkwz = TRUE, Check.rank = TRUE, Check.cm.rank = TRUE,
     wzepsilon = .Machine$double.eps^0.75, ...)
 }
@@ -26,7 +26,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
 \arguments{
   \item{Rank}{
     The numerical rank \eqn{R} of the model.
-    Must be an element from the set \{1,2,\ldots,min(\eqn{M},\emph{p2})\}. 
+    Must be an element from the set \{1,2,\ldots,min(\eqn{M},\emph{p2})\}.
     Here, the vector of explanatory variables \bold{x} is partitioned into
     (\bold{x1},\bold{x2}), which is of dimension \emph{p1}+\emph{p2}.
     The variables making up \bold{x1} are given by the terms in
@@ -77,11 +77,11 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
     The exponent in the singular value decomposition that is used in
     the first part: if the SVD is
     \eqn{U D V^T}{ U \%*\% D \%*\% t(V) } then the
-    first and second parts are 
-    \eqn{U D^{\alpha}}{ U \%*\% D^Alpha} 
+    first and second parts are
+    \eqn{U D^{\alpha}}{ U \%*\% D^Alpha}
     and
     \eqn{D^{1-\alpha}  V^T}{D^(1-Alpha) \%*\% t(V)} respectively.
-    A value of 0.5 is `symmetrical'. 
+    A value of 0.5 is `symmetrical'.
     This argument is used only when \code{Svd.arg=TRUE}.
 
 
@@ -131,7 +131,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
   \item{sd.Ainit, sd.Cinit}{
       Standard deviation of the initial values for the elements
       of \bold{A} and \bold{C}.
-      These are normally distributed with mean zero.  
+      These are normally distributed with mean zero.
       This argument is used only if \code{Use.Init.Poisson.QO = FALSE}.
 
 
@@ -190,7 +190,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
     Same as \code{\link{vglm.control}}.
     Ignored for \pkg{VGAM} 0.9-7 and higher.
 
-  
+
 
   }
   \item{wzepsilon}{
@@ -209,9 +209,9 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
 
 
   }
-  In the above, \eqn{R} is the \code{Rank} and 
+  In the above, \eqn{R} is the \code{Rank} and
   \eqn{M} is the number of linear predictors.
-  
+
 }
 \details{
 %   QRR-VGLMs are an extension of RR-VGLMs and are useful for constrained
diff --git a/man/rrvglm.optim.control.Rd b/man/rrvglm.optim.control.Rd
index 93f9078..9c1b33f 100644
--- a/man/rrvglm.optim.control.Rd
+++ b/man/rrvglm.optim.control.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Control Function for rrvglm() Calling optim() }
 \description{
-  Algorithmic constants and parameters for running \code{optim} within 
+  Algorithmic constants and parameters for running \code{optim} within
   \code{rrvglm} are set using this function.
 
 
@@ -20,8 +20,8 @@ rrvglm.optim.control(Fnscale = 1, Maxit = 100,
   \item{Switch.optimizer}{ Iteration number when the "Nelder-Mead" method
   of \code{optim} is switched to the quasi-Newton "BFGS" method.
   Assigning \code{Switch.optimizer} a negative number
-  means always BFGS, while assigning \code{Switch.optimizer} a value 
-  greater than \code{maxits} means always use Nelder-Mead. 
+  means always BFGS, while assigning \code{Switch.optimizer} a value
+  greater than \code{maxits} means always use Nelder-Mead.
   }
   \item{Abstol}{ Passed into \code{optim} as \code{abstol}. }
   \item{Reltol}{ Passed into \code{optim} as \code{reltol}. }
@@ -29,7 +29,7 @@ rrvglm.optim.control(Fnscale = 1, Maxit = 100,
 
 }
 \details{
-  See \code{\link[stats]{optim}} for more details. 
+  See \code{\link[stats]{optim}} for more details.
 
 
 }
@@ -45,7 +45,7 @@ rrvglm.optim.control(Fnscale = 1, Maxit = 100,
   unstable, so users may have to vary the value of
   \code{Switch.optimizer}.
 
- 
+
   Practical experience with \code{Switch.optimizer} shows
   that setting it to too large a value may lead to a local
   solution, whereas setting it to a low value will obtain
diff --git a/man/s.Rd b/man/s.Rd
index 3ce5594..884b881 100644
--- a/man/s.Rd
+++ b/man/s.Rd
@@ -5,6 +5,9 @@
 \description{
   \code{s} is used in the definition of (vector) smooth terms within
   \code{vgam} formulas.
+  This corresponds to 1st-generation VGAMs that use backfitting
+  for their estimation.
+  The effective degrees of freedom is prespecified.
 
 
 }
@@ -18,7 +21,7 @@ s(x, df = 4, spar = 0, ...)
   covariate (abscissae) to be smoothed.
   Note that \code{x} must be a \emph{single} variable
   and not a function of a variable.
-  For example, \code{s(x)} is fine but \code{s(log(x))} will fail. 
+  For example, \code{s(x)} is fine but \code{s(log(x))} will fail.
   In this case, let \code{logx <- log(x)} (in the data frame),
   say, and then use \code{s(logx)}.
   At this stage bivariate smoothers (\code{x} would be a two-column matrix)
@@ -29,21 +32,25 @@ s(x, df = 4, spar = 0, ...)
   \item{df}{
   numerical vector of length \eqn{r}.
   Effective degrees of freedom: must lie between 1 (linear fit)
-  and \eqn{n} (interpolation). 
+  and \eqn{n} (interpolation).
   Thus one could say that \code{df-1} is the
-  \emph{nonlinear degrees of freedom} of the smooth.
+  \emph{effective nonlinear degrees of freedom} (ENDF) of the smooth.
   Recycling of values will be used if \code{df} is not of length \eqn{r}.
   If \code{spar} is positive then this argument is ignored.
+  Thus \code{s()} means that the effective degrees of freedom is prespecified.
+  If it is known that the component function(s) are more wiggly
+  than usual then try increasing the value of this argument.
+
 
 
   }
-  \item{spar}{ numerical vector of length \eqn{r}. 
+  \item{spar}{ numerical vector of length \eqn{r}.
   Positive smoothing parameters (after scaling) .
   Larger values mean more smoothing so that the solution approaches
   a linear fit for that component function.
-  A zero value means that \code{df} is used. 
-  Recycling of values will be used if \code{spar} is not of length 
-  \eqn{r}.  
+  A zero value means that \code{df} is used.
+  Recycling of values will be used if \code{spar} is not of length
+  \eqn{r}.
 
 
   }
@@ -58,30 +65,34 @@ s(x, df = 4, spar = 0, ...)
   and \eqn{r} is the number of component functions to be
   estimated (so that \eqn{r} is an element from the set
   \{1,2,\ldots,\eqn{M}\}).
-  Also, if \eqn{n} is the number of \emph{distinct} abscissae, then 
+  Also, if \eqn{n} is the number of \emph{distinct} abscissae, then
   \code{s} will fail if \eqn{n < 7}.
 
 
+
   \code{s}, which is symbolic and does not perform any smoothing itself,
   only handles a single covariate.
   Note that \code{s} works in \code{\link{vgam}} only.
   It has no effect in \code{\link{vglm}}
   (actually, it is similar to the identity function \code{\link[base:AsIs]{I}}
   so that \code{s(x2)} is the same as \code{x2} in the LM model matrix).
-  It differs from the \code{s} of the \pkg{gam} and \pkg{mgcv} packages;
-  they should not be mixed together. 
+  It differs from the \code{s()} of the \pkg{gam} package and
+  the \code{\link[mgcv]{s}} of the \pkg{mgcv} package;
+  they should not be mixed together.
   Also, terms involving \code{s} should be simple additive terms, and not
   involving interactions and nesting etc.
   For example, \code{myfactor:s(x2)} is not a good idea.
 
 
+
 % It also differs from the S-PLUS \code{s} which allows
 % \code{spar} to be negative; \pkg{VGAM} does not allow this.
 
 
+
 }
 \value{
-  A vector with attributes that are (only) used by \code{vgam}. 
+  A vector with attributes that are (only) used by \code{vgam}.
 
 
 }
@@ -113,7 +124,20 @@ Vector generalized additive models.
 
 
 
-  An alternative to using
+  A more modern alternative to using
+  \code{s} with \code{\link{vgam}} is to use
+  \code{\link{sm.os}} or
+  \code{\link{sm.ps}}.
+  This does not require backfitting
+  and allows automatic smoothing parameter selection.
+  However, this alternative should only be used when the
+  sample size is reasonably large (\eqn{> 500}, say).
+  These are called Generation-2 VGAMs.
+
+
+
+
+  Another alternative to using
   \code{s} with \code{\link{vgam}} is
   \code{\link[splines]{bs}}
   and/or \code{\link[splines]{ns}}
@@ -127,11 +151,13 @@ Vector generalized additive models.
 
 % ~Make other sections like WARNING with \section{WARNING }{....} ~
 
+
 \seealso{
   \code{\link{vgam}},
   \code{\link{is.buggy}},
+  \code{\link{sm.os}},
+  \code{\link{sm.ps}},
   \code{\link{vsmooth.spline}}.
-% \code{\link{ps}}.
 
 
 }
@@ -144,7 +170,7 @@ fit1 <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua)
 # Bivariate logistic model with artificial data
 nn <- 300
 bdata <- data.frame(x1 = runif(nn), x2 = runif(nn))
-bdata <- transform(bdata, 
+bdata <- transform(bdata,
     y1 = rbinom(nn, size = 1, prob = logit(sin(2 * x2), inverse = TRUE)),
     y2 = rbinom(nn, size = 1, prob = logit(sin(2 * x2), inverse = TRUE)))
 fit2 <- vgam(cbind(y1, y2) ~ x1 + s(x2, 3), trace = TRUE,
diff --git a/man/sc.studentt2.Rd b/man/sc.studentt2.Rd
index 991a212..2bea609 100644
--- a/man/sc.studentt2.Rd
+++ b/man/sc.studentt2.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Scaled Student t Distribution with 2 df Family Function }
 \description{
-  Estimates the location and scale parameters of 
+  Estimates the location and scale parameters of
   a scaled Student t distribution with 2 degrees of freedom,
   by maximum likelihood estimation.
 
@@ -28,7 +28,7 @@ sc.studentt2(percentile = 50, llocation = "identitylink", lscale = "loge",
 
 
   }
-  \item{ilocation, iscale, imethod, zero}{ 
+  \item{ilocation, iscale, imethod, zero}{
   See \code{\link{CommonVGAMffArguments}} for details.
 
 
@@ -82,7 +82,7 @@ sc.studentt2(percentile = 50, llocation = "identitylink", lscale = "loge",
 
 
 }
-\references{ 
+\references{
 
 Koenker, R. (1993)
 When are expectiles percentiles? (solution)
@@ -96,7 +96,7 @@ When are expectiles percentiles? (solution)
 %
 %}
 
-\seealso{ 
+\seealso{
   \code{\link{dsc.t2}},
   \code{\link{studentt2}}.
 
diff --git a/man/sc.t2UC.Rd b/man/sc.t2UC.Rd
index f186c94..0da32fc 100644
--- a/man/sc.t2UC.Rd
+++ b/man/sc.t2UC.Rd
@@ -67,7 +67,7 @@ rsc.t2(n, location = 0, scale = 1)
 }
 \author{ T. W. Yee and Kai Huang }
 
-%\note{ 
+%\note{
 %}
 
 \seealso{
diff --git a/man/seq2binomial.Rd b/man/seq2binomial.Rd
index 4f15b4b..2e7be06 100644
--- a/man/seq2binomial.Rd
+++ b/man/seq2binomial.Rd
@@ -15,14 +15,14 @@ seq2binomial(lprob1 = "logit", lprob2 = "logit",
 %- maybe also 'usage' for other objects documented here.
 %  apply.parint = TRUE,
 \arguments{
-  \item{lprob1, lprob2}{ 
+  \item{lprob1, lprob2}{
   Parameter link functions applied to the two probabilities,
   called \eqn{p} and \eqn{q} below.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{iprob1, iprob2}{ 
+  \item{iprob1, iprob2}{
   Optional initial value for the first and second probabilities respectively.
   A \code{NULL} means a value is obtained in the \code{initialize} slot.
 
@@ -69,7 +69,7 @@ seq2binomial(lprob1 = "logit", lprob2 = "logit",
 
 
 }
-\references{ 
+\references{
 
   Crowder, M. and Sweeting, T. (1989).
   Bayesian inference for a bivariate binomial distribution.
@@ -95,7 +95,7 @@ seq2binomial(lprob1 = "logit", lprob2 = "logit",
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{binomialff}},
   \code{\link{cfibrosis}}.
 
@@ -120,7 +120,7 @@ head(weights(fit, type = "prior"))  # Same as with(sdata, mvector)
 # Number of first successes:
 head(depvar(fit)[, 1] * c(weights(fit, type = "prior")))
 # Number of second successes:
-head(depvar(fit)[, 2] * c(weights(fit, type = "prior")) * 
+head(depvar(fit)[, 2] * c(weights(fit, type = "prior")) *
                           depvar(fit)[, 1])
 }
 \keyword{models}
diff --git a/man/simplex.Rd b/man/simplex.Rd
index ebcb7d8..191238a 100644
--- a/man/simplex.Rd
+++ b/man/simplex.Rd
@@ -16,7 +16,7 @@ simplex(lmu = "logit", lsigma = "loge", imu = NULL, isigma = NULL,
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{lmu, lsigma}{
-  Link function for \code{mu} and \code{sigma}. 
+  Link function for \code{mu} and \code{sigma}.
   See \code{\link{Links}} for more choices.
 
 
@@ -87,14 +87,14 @@ simplex(lmu = "logit", lsigma = "loge", imu = NULL, isigma = NULL,
 
 }
 \author{ T. W. Yee }
-\note{ 
+\note{
   This distribution is potentially useful for dispersion modelling.
   Numerical problems may occur when \code{mu} is very close to 0 or 1.
 
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{dsimplex}},
   \code{\link{dirichlet}},
   \code{\link{rig}},
diff --git a/man/simplexUC.Rd b/man/simplexUC.Rd
index 4557206..2ac59b1 100644
--- a/man/simplexUC.Rd
+++ b/man/simplexUC.Rd
@@ -49,7 +49,7 @@ rsimplex(n, mu = 0.5, dispersion = 1)
 
 
 }
-% \references{ 
+% \references{
 %
 %}
 \author{ T. W. Yee }
diff --git a/man/simulate.vlm.Rd b/man/simulate.vlm.Rd
index b541bbe..32790aa 100644
--- a/man/simulate.vlm.Rd
+++ b/man/simulate.vlm.Rd
@@ -1,7 +1,7 @@
 % 20131230; adapted from simulate.Rd from R 3.0.2
 
 
-\newcommand{\CRANpkg}{\href{http://CRAN.R-project.org/package=#1}{\pkg{#1}}}
+% \newcommand{\CRANpkg}{\href{http://CRAN.R-project.org/package=#1}{\pkg{#1}}}
 
 
 \name{simulate.vlm}
@@ -49,7 +49,7 @@
 }
 \value{
   Similar to \code{\link[stats]{simulate}}.
-  Note that many \pkg{VGAM} family functions can handle multiple responses. 
+  Note that many \pkg{VGAM} family functions can handle multiple responses.
   This can result in a longer data frame with more rows
   (\code{nsim} multiplied by \code{n} rather than the
    ordinary \code{n}).
@@ -183,15 +183,15 @@
 \section{Warning}{
   With multiple response and/or multivariate responses,
   the order of the elements may differ.
-  For some \pkg{VGAM} families, the order is 
+  For some \pkg{VGAM} families, the order is
   \eqn{n \times N \times F}{n x N x F},
   where \eqn{n} is the sample size,
   \eqn{N} is \code{nsim} and
   \eqn{F} is \code{ncol(fitted(vglmObject))}.
-  For other \pkg{VGAM} families, the order is 
+  For other \pkg{VGAM} families, the order is
   \eqn{n \times F \times N}{n x F x N}.
   An example of each is given below.
-  
+
 
 }
 
diff --git a/man/sinmad.Rd b/man/sinmad.Rd
index 31cd360..398a0f5 100644
--- a/man/sinmad.Rd
+++ b/man/sinmad.Rd
@@ -3,12 +3,12 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Singh-Maddala Distribution Family Function }
 \description{
-  Maximum likelihood estimation of the 3-parameter 
+  Maximum likelihood estimation of the 3-parameter
   Singh-Maddala distribution.
 }
 \usage{
-sinmad(lscale = "loge", lshape1.a = "loge", lshape3.q = "loge", 
-       iscale = NULL, ishape1.a = NULL, ishape3.q = NULL, imethod = 1, 
+sinmad(lscale = "loge", lshape1.a = "loge", lshape3.q = "loge",
+       iscale = NULL, ishape1.a = NULL, ishape3.q = NULL, imethod = 1,
        lss = TRUE, gscale = exp(-5:5), gshape1.a = exp(-5:5),
        gshape3.q = exp(-5:5), probs.y = c(0.25, 0.5, 0.75),
        zero = "shape")
diff --git a/man/skellam.Rd b/man/skellam.Rd
index 0a9e1d7..d8df6cd 100644
--- a/man/skellam.Rd
+++ b/man/skellam.Rd
@@ -46,7 +46,7 @@ skellam(lmu1 = "loge", lmu2 = "loge", imu1 = NULL, imu2 = NULL,
   }{%
 f(y;mu1,mu2) =
   ( \mu1 / mu_2 )^(y/2) *
-  exp(-mu1-mu2 ) * I_(|y|)( 2 * sqrt(mu1*mu2)) 
+  exp(-mu1-mu2 ) * I_(|y|)( 2 * sqrt(mu1*mu2))
   }
   where \eqn{y} is an integer,
   \eqn{\mu_1 > 0}{mu1 > 0},
@@ -80,7 +80,7 @@ f(y;mu1,mu2) =
 \references{
 
 Skellam, J. G. (1946)
-The frequency distribution of the difference between 
+The frequency distribution of the difference between
 two Poisson variates belonging to different populations.
 \emph{Journal of the Royal Statistical Society, Series A},
 \bold{109}, 296.
@@ -95,7 +95,7 @@ two Poisson variates belonging to different populations.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{dskellam}},
   \code{\link[stats:Poisson]{dpois}},
   \code{\link{poissonff}}.
diff --git a/man/skewnormUC.Rd b/man/skewnormUC.Rd
index e68eb34..7ea7bee 100644
--- a/man/skewnormUC.Rd
+++ b/man/skewnormUC.Rd
@@ -7,7 +7,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Skew-Normal Distribution }
 \description{
-  Density and 
+  Density and
   random generation
   for the univariate skew-normal distribution.
 
@@ -33,7 +33,7 @@ rskewnorm(n, location = 0, scale = 1, shape = 0)
   }
 
   \item{location}{
-  The location parameter \eqn{\xi}{xi}. A vector. 
+  The location parameter \eqn{\xi}{xi}. A vector.
 
 
   }
@@ -77,7 +77,7 @@ rskewnorm(n, location = 0, scale = 1, shape = 0)
 
 
 }
-\references{ 
+\references{
     \url{http://tango.stat.unipd.it/SN}.
 
 
@@ -91,7 +91,7 @@ rskewnorm(n, location = 0, scale = 1, shape = 0)
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{skewnormal}}.
 
 
diff --git a/man/skewnormal.Rd b/man/skewnormal.Rd
index 0b61d1b..fd26f77 100644
--- a/man/skewnormal.Rd
+++ b/man/skewnormal.Rd
@@ -91,7 +91,7 @@ skewnormal(lshape = "identitylink", ishape = NULL, nsimEIM = NULL)
   and thus produces influential problems.
 
 
-}  
+}
 \seealso{
   \code{\link{skewnorm}},
   \code{\link{uninormal}},
diff --git a/man/slash.Rd b/man/slash.Rd
index 59f61fa..f9629a6 100644
--- a/man/slash.Rd
+++ b/man/slash.Rd
@@ -8,7 +8,7 @@
 }
 \usage{
 slash(lmu = "identitylink", lsigma = "loge",
-      imu = NULL, isigma = NULL, iprobs  =  c(0.1, 0.9), nsimEIM = 250,
+      imu = NULL, isigma = NULL, gprobs.y = ppoints(8), nsimEIM = 250,
       zero = NULL, smallno = .Machine$double.eps*1000)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -32,12 +32,13 @@ slash(lmu = "identitylink", lsigma = "loge",
   See \code{\link{CommonVGAMffArguments}} for more information.
 
   }
-  \item{iprobs}{
+  \item{gprobs.y}{
   Used to compute the initial values for \code{mu}.
   This argument is fed into the \code{probs} argument of
-  \code{\link[stats]{quantile}}, and then a grid between these two points
-  is used to evaluate the log-likelihood.
-  This argument must be of length two and have values between 0 and 1.
+  \code{\link[stats]{quantile}} to construct a grid,
+  which is used to evaluate the log-likelihood.
+  This must have values between 0 and 1.
+
 
   }
   \item{nsimEIM, zero}{
@@ -51,14 +52,14 @@ slash(lmu = "identitylink", lsigma = "loge",
   }
 }
 \details{
-  The standard slash distribution is the distribution of the ratio of 
-  a standard normal variable to an independent standard uniform(0,1) variable. 
+  The standard slash distribution is the distribution of the ratio of
+  a standard normal variable to an independent standard uniform(0,1) variable.
   It is mainly of use in simulation studies.
   One of its properties is that it has heavy tails, similar to those of
   the Cauchy.
-  
-  The general slash distribution can be obtained by replacing 
-  the univariate normal variable by a general normal 
+
+  The general slash distribution can be obtained by replacing
+  the univariate normal variable by a general normal
   \eqn{N(\mu,\sigma)}{N(mu,sigma)} random variable.
   It has a density that can be written as
   \deqn{f(y) = \left\{
@@ -68,10 +69,10 @@ slash(lmu = "identitylink", lsigma = "loge",
 \end{array} \right . }{%
 f(y) = 1/(2*sigma*sqrt(2*pi)) if y=mu
      = 1-exp(-(((x-mu)/sigma)^2)/2))/(sqrt(2*pi)*sigma*((x-mu)/sigma)^2) if y!=mu}
-  where \eqn{\mu}{mu} and \eqn{\sigma}{sigma} are 
-  the mean and standard deviation of 
+  where \eqn{\mu}{mu} and \eqn{\sigma}{sigma} are
+  the mean and standard deviation of
   the univariate normal distribution respectively.
-   
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -81,12 +82,12 @@ f(y) = 1/(2*sigma*sqrt(2*pi)) if y=mu
 
 
 }
-\references{ 
-  Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994) 
+\references{
+  Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
   \emph{Continuous Univariate Distributions},
   2nd edition, Volume 1, New York: Wiley.
-  
-  
+
+
   Kafadar, K. (1982)
   A Biweight Approach to the One-Sample Problem
   \emph{Journal of the American Statistical Association},
@@ -114,7 +115,7 @@ f(y) = 1/(2*sigma*sqrt(2*pi)) if y=mu
 \examples{
 \dontrun{
 sdata <- data.frame(y = rslash(n = 1000, mu = 4, sigma = exp(2)))
-fit <- vglm(y ~ 1, slash, data = sdata, trace = TRUE) 
+fit <- vglm(y ~ 1, slash, data = sdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
diff --git a/man/slashUC.Rd b/man/slashUC.Rd
index 1e8b8b7..b5742ce 100644
--- a/man/slashUC.Rd
+++ b/man/slashUC.Rd
@@ -27,10 +27,10 @@ rslash(n, mu = 0, sigma = 1)
 
 
   }
-  \item{mu, sigma}{the mean and standard deviation of 
+  \item{mu, sigma}{the mean and standard deviation of
   the univariate normal distribution.
 
-  
+
   }
   \item{log}{
   Logical.
@@ -92,7 +92,7 @@ rslash(n, mu = 0, sigma = 1)
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{slash}}.
 
 
diff --git a/man/sm.os.Rd b/man/sm.os.Rd
new file mode 100644
index 0000000..9b08abb
--- /dev/null
+++ b/man/sm.os.Rd
@@ -0,0 +1,472 @@
+\name{sm.os}
+\alias{sm.os}
+%
+%
+% 20161028; 20161213
+%
+%
+%
+%- Also NEED an `\alias' for EACH other topic documented here.
+\title{ Defining O'Sullivan Spline Smooths in VGAM Formulas }
+\description{
+  This function represents an O-spline smooth term
+  in a \code{vgam} formula
+  and confers automatic smoothing parameter selection.
+
+
+}
+\usage{
+sm.os(x, ..., niknots = 6, spar = -1, o.order = 2,
+      alg.niknots = c("s", ".nknots.smspl")[1], all.knots = FALSE,
+      ridge.adj = 1e-5, spillover = 0.01, maxspar = 1e12,
+      outer.ok = FALSE, fixspar = FALSE)
+}
+%     degree = 3,
+%- maybe also `usage' for other objects documented here.
+\arguments{
+
+  \item{x}{
+    covariate (abscissae) to be smoothed.
+    Also called the regressor.
+    If the \code{xij} facility is used then these
+    covariates are inputted via the \code{\dots} argument.
+
+
+
+%   Currently at least 7 unique \code{x} values are needed.
+
+
+  }
+  \item{\dots}{
+    Used to accommodate the other \eqn{M-1} covariates
+    when the \code{xij} facility is used.
+    See Section 3.4.4 of Yee (2015) for something very similar.
+    This argument, found in the second argument, means that
+    the other argument names must be fully specified if used,
+    e.g., \code{outer.ok} and not \code{outer}.
+    See the example below.
+    In the example below,
+    the term in the main formula is
+    \code{sm.os(gcost.air, gcost.trn, gcost.bus)}
+    and one might be tempted to use something like
+    \code{sm.os(gcost)} to represent that \code{xij} term.
+    However, this is not recommended because
+    \code{sm.os(gcost)} might not have the same number of columns
+    as \code{sm.os(gcost.air, gcost.trn, gcost.bus)} etc.
+    That is, it is best to select one of the diagonal elements
+    of the block matrix to represent that term.
+
+
+
+  }
+
+  \item{niknots}{
+    numeric,
+    the number of \emph{interior} knots,
+    called \eqn{K} below.
+    The default is to use this value.
+    If you want \code{alg.niknots} to operate then
+    assign \code{NULL} to this argument.
+
+
+  }
+
+  \item{alg.niknots}{
+    character.
+    The algorithm used to determine the number of interior knots.
+    Only used when \code{all.knots = FALSE} and \code{niknots = NULL}.
+    Note that \code{".nknots.smspl"} corresponds to the default of
+    \code{\link[stats]{smooth.spline}}.
+    The value \code{"s"} corresponds to the same algorithm
+    as \code{\link[VGAM]{s}}.
+
+
+%   the other algorithms tend to give fewer knots than this choice
+%   because when the model's \eqn{M} is large  then the number
+%   of parameters to be estimated and the amount of memory
+%   used quickly grows.
+
+
+  }
+  \item{all.knots}{
+    logical.
+    If \code{TRUE} then all distinct points in \code{x} are used as
+    the interior knots.
+    If \code{FALSE} (default) then
+    a subset of \code{x[]} is used, specifically
+    \code{x[j]} where the \code{niknots} indices are
+    quantiles that are evenly spaced with respect to the
+    argument \code{probs}---see \code{\link[stats]{quantile}}.
+    If \code{all.knots = FALSE} and
+    \code{niknots = NULL} then the argument
+    \code{alg.niknots} is used to compute \code{niknots}.
+
+
+
+  }
+  \item{spar, maxspar}{
+    \code{spar} is a vector of smoothing parameters.
+    Negative values mean that \code{\link[mgcv]{magic}} will
+    choose initial values in order to do the optimization at
+    each P-IRLS iteration.
+    Positive values mean that they are used as initial values
+    for \code{\link[mgcv]{magic}}.
+    If \code{fixspar = TRUE} then \code{spar} should be assigned
+    a vector of positive values (but having values
+    less than \code{maxspar});
+    then the smoothing parameters will
+    be fixed and \code{\link[mgcv]{magic}} will not be used.
+
+
+
+%    non-negative regularization parameters for difference penalty,
+%    whose values should be less than \code{maxspar}.
+%    Can be a vector.   %  zz.
+
+
+  }
+% \item{degree}{
+%   degree of B-spline basis.
+%   Currently only the value 3 is implemented.
+%   In the future one should usually assign 2 or 3; and
+%   the values 1 or 4 might possibly be recommended.
+
+
+%    zz--this argument may be unneeded.
+
+
+% }
+  \item{o.order}{
+    The order of the O'Sullivan penalzed spline.
+    Any one value from \code{1:4} is acceptable.
+    The degree of the spline is \code{2 * o.order - 1},
+    so that cubic splines are the default.
+    Setting \code{o.order = 1} results in a linear
+    spline which is a piecewise linear function.
+
+
+%   (p.191 ANZJS).
+
+
+
+  }
+  \item{ridge.adj}{
+    small positive number to stabilize
+    linear dependencies among B-spline bases.
+
+
+  }
+  \item{spillover}{
+    small and positive proportion of the range used on
+    the outside of the boundary values.
+    This defines the endpoints \eqn{a} and \eqn{b} that
+    cover the data \eqn{x_i}, i.e., we are interested
+    in the interval \eqn{[a,b]} which contains all the
+    abscissae. The interior knots are strictly
+    inside \eqn{(a,b)}.
+
+
+% Untrue, see ANZJS.
+%   Set \code{spillover = 0} to obtain the natural boundary conditions
+%   (NBCs), hence a fit based on natural splines.
+
+
+  }
+  \item{outer.ok}{
+    Fed into the argument (by the same name)
+    of \code{\link[splines]{splineDesign}}.
+
+
+  }
+  \item{fixspar}{
+    logical.
+    If \code{TRUE} then \code{spar} should be a vector
+    with positive values and
+    the smoothing parameters are fixed at those values.
+    If \code{FALSE} then \code{spar} contains the initial
+    values for the smoothing parameters, and
+    \code{\link[mgcv]{magic}} is called to determine (hopefully)
+    some good values for
+    the smoothing parameters.
+
+
+  }
+}
+\details{
+  This function is currently used by \code{\link{vgam}} to
+  allow automatic smoothing parameter selection based on
+  O-splines to minimize an UBRE quantity.
+  In contrast, \code{\link{s}} operates by having a
+  prespecified amount of smoothing, e.g., its \code{df} argument.
+  When the sample size is reasonably large
+  this function
+  is recommended over \code{\link{s}} also because backfitting
+  is not required.
+  This function therefore allows 2nd-generation VGAMs to be
+  fitted (called G2-VGAMs, or Penalized-VGAMs).
+
+
+
+%  A similar function is \code{\link{s}} which has a prespecified
+%  amount of smoothing.
+
+
+
+  This function should only be used with \code{\link{vgam}}.
+  This function uses \code{\link[stats]{quantile}} to
+  choose the knots, whereas \code{\link{sm.ps}}
+  chooses equally-spaced knots.
+  As Wand and Ormerod (2008) write,
+  in most situations the differences will be minor,
+  but it is possible for problems to arise
+  for either strategy by
+  constructing certain regression functions and
+  predictor variable distributions.
+  Any differences between O-splines and P-splines tend
+  to be at the boundaries. O-splines have
+  \emph{natural boundary constraints} so that the solution is
+  linear beyond the boundary knots.
+
+
+
+  Some arguments in decreasing order of precedence are:
+  \code{all.knots},
+  \code{niknots},
+  \code{alg.niknots}.
+
+
+
+  Unlike \code{\link[VGAM]{s}}, which is symbolic and does not perform
+  any smoothing itself, this function does compute the penalized spline
+  when used by \code{\link{vgam}}---it creates the appropriate columns
+  of the model matrix.  When this function is used within
+  \code{\link{vgam}}, automatic smoothing parameter selection is
+  implemented by calling \code{\link[mgcv]{magic}} after the necessary
+  link-ups are done.
+
+
+
+  By default this function centres the component function.
+  This function is also \emph{smart}; it can be used for
+  smart prediction (Section 18.6 of Yee (2015)).
+  Automatic smoothing parameter selection is performed using
+  \emph{performance-oriented iteration} whereby an optimization
+  problem is solved at each IRLS iteration.
+% Occasionally there are convergence problems for this.
+
+
+
+
+%  Eventually, in most cases, both model parameter estimates and
+%     smoothing parameter estimates converge.
+
+
+
+  This function works better when the sample size is large,
+  e.g., when in the hundreds, say.
+
+
+
+
+
+
+% Also, if \eqn{n} is the number of \emph{distinct} abscissae, then
+% \code{sm.os} will fail if \eqn{n < 7}.
+
+
+
+% Unlike \code{\link[VGAM]{s}}, which is symbolic and does not perform
+% any smoothing itself, this function does compute the penalized spline
+% when used by \code{\link{vgam}}---it creates the appropriate columns
+% of the model matrix.  When this function is used within
+% \code{\link{vgam}}, automatic smoothing parameter selection is
+% implemented by calling \code{\link[mgcv]{magic}} after the necessary
+% link-ups are done.
+
+
+
+% By default this function centres every component function.
+% This function is also \emph{smart}; it can be used for smart prediction
+% (Section 18.6 of Yee (2015)).
+% Automatic smoothing parameter selection is performed using
+% \emph{performance-oriented iteration} whereby an optimization
+% problem is solved at each IRLS iteration.
+% Occasionally there are convergence problems for this.
+
+
+
+
+%  Eventually, in most cases, both model parameter estimates and
+%     smoothing parameter estimates converge.
+
+
+
+
+}
+\value{
+  A matrix with attributes that are (only) used by \code{\link{vgam}}.
+  The number of rows of the matrix is \code{length(x)}.
+  The number of columns is a function of the number
+  of interior knots \eqn{K} and
+  the order of the O-spline \eqn{m}:
+  \eqn{K+2m-1}.
+  In code, this is
+  \code{niknots + 2 * o.order - 1}.
+
+
+% The \eqn{-1} is because of the centring.
+
+
+}
+\references{
+
+
+Wand, M. P. and Ormerod, J. T. (2008).
+On semiparametric regression with O'Sullivan penalized splines.
+\emph{Australian and New Zealand Journal of Statistics},
+\bold{50}(2): 179--198.
+
+
+
+%Wood, S. N. (2004).
+%Stable and efficient multiple smoothing parameter estimation
+%for generalized additive models.
+%\emph{J. Amer. Statist. Assoc.}, \bold{99}(467): 673--686.
+
+
+
+%Yee, T. W. (2016).
+%Comments on ``Smoothing parameter and model selection for
+%general smooth models''
+%by Wood, S. N. and Pya, N. and Safken, N.,
+%\emph{J. Amer. Statist. Assoc.}, \bold{110}(516).
+
+
+
+
+}
+\author{
+  T. W. Yee,
+  with some of the essential R code coming
+  from the appendix of Wand and Ormerod (2008).
+
+
+
+}
+\note{
+  This function is currently under development and
+  may change in the future.
+
+
+
+  One might try using this function with \code{\link{vglm}}
+  so as to fit a regression spline,
+  however, the default value of \code{niknots} will probably
+  be too high for most data sets.
+
+
+
+%  In particular, the default for \code{ps.int} is
+%  subject to change.
+
+
+}
+
+% ~Make other sections like WARNING with \section{WARNING }{....} ~
+\section{Warning }{
+  Being introduced into \pkg{VGAM} for the first time,
+  this function (and those associated with it) should
+  be used cautiously. Not all options are fully
+  working or have been tested yet,
+  and there are bound to be some bugs
+  lurking around.
+
+
+
+}
+\seealso{
+  \code{\link{vgam}},
+  \code{\link{sm.ps}},
+  \code{\link{s}},
+  \code{\link{smartpred}},
+  \code{\link{is.smart}},
+  \code{\link{summarypvgam}},
+  \code{\link[stats]{smooth.spline}},
+  \code{\link[splines]{splineDesign}},
+  \code{\link[splines]{bs}},
+  \code{\link[mgcv]{magic}}.
+
+
+
+}
+
+\examples{
+sm.os(runif(20))
+
+\dontrun{
+data("TravelMode", package = "AER")  # Need to install "AER" first
+air.df <- subset(TravelMode, mode == "air")  # Form 4 smaller data frames
+bus.df <- subset(TravelMode, mode == "bus")
+trn.df <- subset(TravelMode, mode == "train")
+car.df <- subset(TravelMode, mode == "car")
+TravelMode2 <- data.frame(income     = air.df$income,
+                          wait.air   = air.df$wait  - car.df$wait,
+                          wait.trn   = trn.df$wait  - car.df$wait,
+                          wait.bus   = bus.df$wait  - car.df$wait,
+                          gcost.air  = air.df$gcost - car.df$gcost,
+                          gcost.trn  = trn.df$gcost - car.df$gcost,
+                          gcost.bus  = bus.df$gcost - car.df$gcost,
+                          wait       = air.df$wait)  # Value is unimportant
+TravelMode2$mode <- subset(TravelMode, choice == "yes")$mode  # The response
+TravelMode2 <- transform(TravelMode2, incom.air = income, incom.trn = 0,
+                                      incom.bus = 0)
+set.seed(1)
+TravelMode2 <- transform(TravelMode2,
+                         junkx2 = runif(nrow(TravelMode2)))
+
+tfit2 <-
+  vgam(mode ~ sm.os(gcost.air, gcost.trn, gcost.bus) + ns(junkx2, 4) +
+              sm.os(incom.air, incom.trn, incom.bus) + wait ,
+       crit = "coef",
+       multinomial(parallel = FALSE ~ 1), data = TravelMode2,
+       xij = list(sm.os(gcost.air, gcost.trn, gcost.bus) ~
+                  sm.os(gcost.air, gcost.trn, gcost.bus) +
+                  sm.os(gcost.trn, gcost.bus, gcost.air) +
+                  sm.os(gcost.bus, gcost.air, gcost.trn),
+                  sm.os(incom.air, incom.trn, incom.bus) ~
+                  sm.os(incom.air, incom.trn, incom.bus) +
+                  sm.os(incom.trn, incom.bus, incom.air) +
+                  sm.os(incom.bus, incom.air, incom.trn),
+                  wait   ~  wait.air +  wait.trn +  wait.bus),
+       form2 = ~  sm.os(gcost.air, gcost.trn, gcost.bus) +
+                  sm.os(gcost.trn, gcost.bus, gcost.air) +
+                  sm.os(gcost.bus, gcost.air, gcost.trn) +
+                  wait +
+                  sm.os(incom.air, incom.trn, incom.bus) +
+                  sm.os(incom.trn, incom.bus, incom.air) +
+                  sm.os(incom.bus, incom.air, incom.trn) +
+                  junkx2 + ns(junkx2, 4) +
+                  incom.air + incom.trn + incom.bus +
+                  gcost.air + gcost.trn + gcost.bus +
+                  wait.air +  wait.trn +  wait.bus)
+par(mfrow = c(2, 2))
+plot(tfit2, se = TRUE, lcol = "orange", scol = "blue", ylim = c(-4, 4))
+summary(tfit2)
+}
+}
+\keyword{models}
+\keyword{regression}
+\keyword{smooth}
+
+%            binom2.or(exchangeable = TRUE ~ s(x2, 3))
+
+
+
+
+
+
+
+
+
+
+
diff --git a/man/sm.ps.Rd b/man/sm.ps.Rd
new file mode 100644
index 0000000..a315e46
--- /dev/null
+++ b/man/sm.ps.Rd
@@ -0,0 +1,269 @@
+\name{sm.ps}
+\alias{sm.ps}
+%- Also NEED an `\alias' for EACH other topic documented here.
+\title{ Defining Penalized Spline Smooths in VGAM Formulas }
+\description{
+  This function represents a P-spline smooth term
+  in a \code{vgam} formula
+  and confers automatic smoothing parameter selection.
+
+
+
+}
+\usage{
+sm.ps(x, ..., ps.int = NULL, spar = -1, degree = 3, p.order = 2,
+      ridge.adj = 1e-5, spillover = 0.01, maxspar = 1e12,
+      outer.ok = FALSE, mux = NULL, fixspar = FALSE)
+}
+%- maybe also `usage' for other objects documented here.
+\arguments{
+
+  \item{x, \dots}{
+    See \code{\link{sm.os}}.
+
+
+%   Currently at least 7 unique \code{x} values are needed.
+
+
+  }
+
+
+  \item{ps.int}{
+    the number of equally-spaced B-spline intervals.
+    Note that the number of knots is equal to
+    \code{ps.int + 2*degree + 1}.
+    The default, signified by \code{NULL}, means that the
+    maximum of the value 7 and \code{degree} is chosen.
+    This usually means 6 interior knots for big data sets.
+    However, if this is too high compared to the
+    length of \code{x}, then some adjustment is made.
+    In the case where \code{mux} is assigned a numerical
+    value (suggestions: some value between 1 and 2)
+    then
+    \code{ceiling(mux * log(length(unique(x.index))))}
+    is used, where \code{x.index} is the combined data.
+    No matter what, the above
+    is not guaranteed to work on every data set.
+    This argument may change in the future.
+    See also argument \code{mux}.
+
+
+
+
+% 20160805; correct:    Note that the number of knots is equal to
+% \code{ps.int + 2*degree + 1}.  Its called Aknots.
+
+
+
+% 20160801:
+%   \code{ceiling(2.5 * log1p(length(unique(x.index)))) + 3}
+
+
+% Prior to 20160801:
+%   The default, signified by \code{NULL}, means that
+%   \code{ceiling(1.5 * log(length(unique(x.index))))}
+
+
+
+  }
+  \item{spar, maxspar}{
+    See \code{\link{sm.os}}.
+
+
+  }
+  \item{mux}{
+    numeric. If given, then this argument multiplies
+    \code{log(length(unique(x)))}
+    to obtain \code{ps.int}.
+    If \code{ps.int} is given then this argument is ignored.
+
+
+  }
+  \item{degree}{
+    degree of B-spline basis. Usually this will be 2 or 3; and
+    the values 1 or 4 might possibly be used.
+
+
+  }
+  \item{p.order}{
+    order of difference penalty (0 is the ridge penalty).
+
+
+  }
+  \item{ridge.adj, spillover}{
+    See \code{\link{sm.os}}.
+
+
+%   however, setting this argument equal to 0 does not result in
+%   the natural boundary conditions (NBCs).
+
+
+
+  }
+  \item{outer.ok, fixspar}{
+    See \code{\link{sm.os}}.
+
+
+
+  }
+
+}
+\details{
+  This function can be used by \code{\link{vgam}} to
+  allow automatic smoothing parameter selection based on
+  P-splines and minimizing an UBRE quantity.
+
+
+
+%  For large sample sizes (\eqn{> 500}, say)
+
+
+% Also, if \eqn{n} is the number of \emph{distinct} abscissae, then
+% \code{sm.ps} will fail if \eqn{n < 7}.
+
+
+
+  This function should only be used with \code{\link{vgam}}
+  and is an alternative to \code{\link{sm.os}};
+  see that function for some details that also apply here.
+
+
+
+
+
+}
+\value{
+  A matrix with attributes that are (only) used by \code{\link{vgam}}.
+  The number of rows of the matrix is \code{length(x)} and
+  the number of columns is \code{ps.int + degree - 1}.
+  The latter is because the function is centred.
+
+
+}
+\references{
+
+
+%Eilers, P. H. C. and Marx, B. D. (2002).
+%Generalized Linear Additive Smooth Structures.
+%\emph{Journal of Computational and Graphical Statistics},
+%\bold{11}(4): 758--783.
+
+
+
+%Marx, B. D. and Eilers, P. H. C. (1998).
+%Direct generalized linear modeling
+%with penalized likelihood.
+%\emph{CSDA}, \bold{28}(2): 193--209.
+
+
+
+Eilers, P. H. C. and Marx, B. D. (1996).
+Flexible smoothing with B-splines
+and penalties (with comments and rejoinder).
+\emph{Statistical Science}, \bold{11}(2): 89--121.
+
+
+
+}
+\author{
+  B. D. Marx wrote the original function.
+  Subsequent edits were made by T. W. Yee and C. Somchit.
+
+
+
+}
+\note{
+  This function is currently under development and
+  may change in the future.
+  In particular, the default for \code{ps.int} is
+  subject to change.
+
+
+}
+
+% ~Make other sections like WARNING with \section{WARNING }{....} ~
+\section{Warning }{
+  See \code{\link{sm.os}}.
+
+
+}
+
+
+\seealso{
+  \code{\link{sm.os}},
+  \code{\link{s}},
+  \code{\link{vgam}},
+  \code{\link{smartpred}},
+  \code{\link{is.smart}},
+  \code{\link{summarypvgam}},
+  \code{\link[splines]{splineDesign}},
+  \code{\link[splines]{bs}},
+  \code{\link[mgcv]{magic}}.
+
+
+
+}
+
+\examples{
+sm.ps(runif(20))
+sm.ps(runif(20), ps.int = 5)
+
+\dontrun{
+data("TravelMode", package = "AER")  # Need to install "AER" first
+air.df <- subset(TravelMode, mode == "air")  # Form 4 smaller data frames
+bus.df <- subset(TravelMode, mode == "bus")
+trn.df <- subset(TravelMode, mode == "train")
+car.df <- subset(TravelMode, mode == "car")
+TravelMode2 <- data.frame(income     = air.df$income,
+                          wait.air   = air.df$wait  - car.df$wait,
+                          wait.trn   = trn.df$wait  - car.df$wait,
+                          wait.bus   = bus.df$wait  - car.df$wait,
+                          gcost.air  = air.df$gcost - car.df$gcost,
+                          gcost.trn  = trn.df$gcost - car.df$gcost,
+                          gcost.bus  = bus.df$gcost - car.df$gcost,
+                          wait       = air.df$wait)  # Value is unimportant
+TravelMode2$mode <- subset(TravelMode, choice == "yes")$mode  # The response
+TravelMode2 <- transform(TravelMode2, incom.air = income, incom.trn = 0,
+                                      incom.bus = 0)
+set.seed(1)
+TravelMode2 <- transform(TravelMode2,
+                         junkx2 = runif(nrow(TravelMode2)))
+
+tfit2 <-
+  vgam(mode ~ sm.ps(gcost.air, gcost.trn, gcost.bus) + ns(junkx2, 4) +
+              sm.ps(incom.air, incom.trn, incom.bus) + wait ,
+       crit = "coef",
+       multinomial(parallel = FALSE ~ 1), data = TravelMode2,
+       xij = list(sm.ps(gcost.air, gcost.trn, gcost.bus) ~
+                  sm.ps(gcost.air, gcost.trn, gcost.bus) +
+                  sm.ps(gcost.trn, gcost.bus, gcost.air) +
+                  sm.ps(gcost.bus, gcost.air, gcost.trn),
+                  sm.ps(incom.air, incom.trn, incom.bus) ~
+                  sm.ps(incom.air, incom.trn, incom.bus) +
+                  sm.ps(incom.trn, incom.bus, incom.air) +
+                  sm.ps(incom.bus, incom.air, incom.trn),
+                  wait   ~  wait.air +  wait.trn +  wait.bus),
+       form2 = ~  sm.ps(gcost.air, gcost.trn, gcost.bus) +
+                  sm.ps(gcost.trn, gcost.bus, gcost.air) +
+                  sm.ps(gcost.bus, gcost.air, gcost.trn) +
+                  wait +
+                  sm.ps(incom.air, incom.trn, incom.bus) +
+                  sm.ps(incom.trn, incom.bus, incom.air) +
+                  sm.ps(incom.bus, incom.air, incom.trn) +
+                  junkx2 + ns(junkx2, 4) +
+                  incom.air + incom.trn + incom.bus +
+                  gcost.air + gcost.trn + gcost.bus +
+                  wait.air +  wait.trn +  wait.bus)
+par(mfrow = c(2, 2))
+plot(tfit2, se = TRUE, lcol = "orange", scol = "blue", ylim = c(-4, 4))
+summary(tfit2)
+}
+}
+\keyword{models}
+\keyword{regression}
+\keyword{smooth}
+
+%            binom2.or(exchangeable = TRUE ~ s(x2, 3))
+
+
+
diff --git a/man/smartpred.Rd b/man/smartpred.Rd
index 7b66e2a..74886e0 100644
--- a/man/smartpred.Rd
+++ b/man/smartpred.Rd
@@ -180,6 +180,8 @@ sm.scale(x, center = TRUE, scale = TRUE)
   \code{\link{smart.mode.is}},
   \code{\link{setup.smart}},
   \code{\link{wrapup.smart}}.
+  For \code{\link[VGAM]{vgam}} in \pkg{VGAM},
+  \code{\link[VGAM]{sm.ps}} is important.
   Commonly used data-dependent functions include
   \code{\link[base]{scale}}, 
   \code{\link[stats]{poly}}, 
diff --git a/man/sratio.Rd b/man/sratio.Rd
index b666eaf..d52af23 100644
--- a/man/sratio.Rd
+++ b/man/sratio.Rd
@@ -15,7 +15,7 @@ sratio(link = "logit", parallel = FALSE, reverse = FALSE,
 
   \item{link}{
     Link function applied to the \eqn{M}
-    stopping ratio probabilities. 
+    stopping ratio probabilities.
     See \code{\link{Links}} for more choices.
 
   }
@@ -57,7 +57,7 @@ sratio(link = "logit", parallel = FALSE, reverse = FALSE,
   There are a number of definitions for the \emph{continuation ratio}
   in the literature. To make life easier, in the \pkg{VGAM} package,
   we use \emph{continuation} ratios (see \code{\link{cratio}})
-  and \emph{stopping} ratios. 
+  and \emph{stopping} ratios.
   Continuation ratios deal with quantities such as
   \code{logit(P[Y>j|Y>=j])}.
 
diff --git a/man/studentt.Rd b/man/studentt.Rd
index 55db46a..55d012a 100644
--- a/man/studentt.Rd
+++ b/man/studentt.Rd
@@ -20,7 +20,7 @@ studentt3(llocation = "identitylink", lscale = "loge", ldf = "loglog",
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{llocation, lscale, ldf}{
-  Parameter link functions for each parameter, 
+  Parameter link functions for each parameter,
   e.g., for degrees of freedom \eqn{\nu}{nu}.
   See \code{\link{Links}} for more choices.
   The defaults ensures the parameters are in range.
@@ -68,11 +68,11 @@ studentt3(llocation = "identitylink", lscale = "loge", ldf = "loglog",
   Then \eqn{E(Y)=0} if \eqn{\nu>1}{nu>1} (returned as the fitted values),
   and \eqn{Var(Y)= \nu/(\nu-2)}{Var(Y)= nu/(nu-2)}
   for \eqn{\nu > 2}{nu > 2}.
-  When \eqn{\nu=1}{nu=1} then the Student \eqn{t}-distribution 
+  When \eqn{\nu=1}{nu=1} then the Student \eqn{t}-distribution
   corresponds to the standard Cauchy distribution,
   \code{\link{cauchy1}}.
   When \eqn{\nu=2}{nu=2} with a scale parameter of \code{sqrt(2)} then
-  the Student \eqn{t}-distribution 
+  the Student \eqn{t}-distribution
   corresponds to the standard (Koenker) distribution,
   \code{\link{sc.studentt2}}.
   The degrees of freedom can be treated as a parameter to be estimated,
diff --git a/man/summarypvgam.Rd b/man/summarypvgam.Rd
new file mode 100644
index 0000000..848d7f2
--- /dev/null
+++ b/man/summarypvgam.Rd
@@ -0,0 +1,108 @@
+% 20160804; Adapted from summary.vgam.Rd
+
+
+
+\name{summarypvgam}
+\alias{summarypvgam}
+\alias{show.summary.pvgam}
+\title{Summarizing Penalized Vector Generalized Additive Model Fits}
+\usage{
+summarypvgam(object, dispersion = NULL, digits = options()$digits - 2,
+            presid = TRUE)
+\method{show}{summary.pvgam}(x, quote = TRUE, prefix = "", digits = options()$digits -
+    2, signif.stars = getOption("show.signif.stars"))
+}
+\arguments{
+  \item{object}{an object of class \code{"pvgam"},
+    which is the result of a
+    call to \code{\link{vgam}} with at least one
+    \code{\link{sm.os}} or
+    \code{\link{sm.ps}} term.
+
+
+  }
+  \item{x}{an object of class \code{"summary.pvgam"},
+    which is the result of a call to \code{summarypvgam()}.
+
+
+  }
+  \item{dispersion, digits, presid}{
+    See \code{\link{summaryvglm}}.
+
+
+  }
+  \item{quote, prefix, signif.stars}{
+    See \code{\link{summaryvglm}}.
+
+  }
+}
+\description{
+  These functions are all \code{\link{methods}} for class \code{"pvgam"} or
+  \code{summary.pvgam} objects.
+
+
+
+}
+\details{
+  This methods function reports a summary more similar to
+  \code{\link[mgcv]{summary.gam}} from \pkg{mgcv} than
+  \code{summary.gam()} from \pkg{gam}.
+  It applies to G2-VGAMs using
+  \code{\link{sm.os}} and O-splines, else
+  \code{\link{sm.ps}} and P-splines.
+  In particular, the hypothesis test for whether each
+  \code{\link{sm.os}} or
+  \code{\link{sm.ps}}
+  term can be deleted follows quite closely to
+  \code{\link[mgcv]{summary.gam}}.
+  The p-values from this type of test tend to be biased downwards (too
+  small)
+  and corresponds to \code{p.type = 5}.
+  It is hoped in the short future that improved p-values be implemented,
+  somewhat like the default of
+  \code{\link[mgcv]{summary.gam}}.
+  This methods function was adapted from
+  \code{\link[mgcv]{summary.gam}}.
+
+
+}
+\value{
+  \code{summarypvgam} returns an object of class \code{"summary.pvgam"};
+  see \code{\link{summary.pvgam-class}}.
+
+
+}
+\section{Warning }{
+  See \code{\link{sm.os}}.
+
+
+}
+\seealso{
+  \code{\link{vgam}},
+  \code{\link{summaryvgam}},
+  \code{\link{summary.pvgam-class}},
+  \code{\link{sm.os}},
+  \code{\link{sm.ps}},
+  \code{\link[stats]{summary.glm}},
+  \code{\link[stats]{summary.lm}},
+  \code{\link[mgcv]{summary.gam}} from \pkg{mgcv}, % A core R package
+  \code{\link{summaryvgam}} for G1-VGAMs.
+% \code{\link[gam]{summary.gam}}.  % May not be installed.
+
+
+
+}
+\examples{
+hfit2 <- vgam(agaaus ~ sm.os(altitude), binomialff, data = hunua)
+coef(hfit2, matrix = TRUE)
+summary(hfit2)
+}
+\keyword{models}
+\keyword{regression}
+
+
+
+% summary(hfit2)@post$s.table  # For sm.ps() terms.
+
+
+
diff --git a/man/summaryvgam.Rd b/man/summaryvgam.Rd
new file mode 100644
index 0000000..83bf18e
--- /dev/null
+++ b/man/summaryvgam.Rd
@@ -0,0 +1,86 @@
+% 20160804; Adapted from summary.vglm.Rd
+
+
+
+\name{summaryvgam}
+\alias{summaryvgam}
+\alias{show.summary.vgam}
+\title{Summarizing Vector Generalized Additive Model Fits}
+\usage{
+summaryvgam(object, dispersion = NULL, digits = options()$digits - 2,
+            presid = TRUE, nopredictors = FALSE)
+\method{show}{summary.vgam}(x, quote = TRUE, prefix = "",
+                            digits = options()$digits-2, nopredictors = NULL)
+}
+\arguments{
+  \item{object}{an object of class \code{"vgam"},
+    which is the result of a
+    call to \code{\link{vgam}} with at least one \code{\link[VGAM]{s}} term.
+
+
+  }
+  \item{x}{an object of class \code{"summary.vgam"},
+    which is the result of a call to \code{summaryvgam()}.
+
+
+  }
+  \item{dispersion, digits, presid}{
+    See \code{\link{summaryvglm}}.
+
+
+  }
+  \item{quote, prefix, nopredictors}{
+    See \code{\link{summaryvglm}}.
+
+  }
+}
+\description{
+  These functions are all \code{\link{methods}} for class \code{vgam} or
+  \code{summary.vgam} objects.
+
+
+}
+\details{
+  This methods function reports a summary more similar to
+  \code{summary.gam()} from \pkg{gam} than
+  \code{\link[mgcv]{summary.gam}} from \pkg{mgcv}.
+  It applies to G1-VGAMs using \code{\link{s}} and vector backfitting.
+  In particular, an approximate score test for \emph{linearity} is conducted
+  for each \code{\link{s}} term---see Section 4.3.4 of Yee (2015) for details.
+  The p-values from this type of test tend to be biased upwards (too large).
+
+
+
+
+}
+\value{
+  \code{summaryvgam} returns an object of class \code{"summary.vgam"};
+  see \code{\link{summary.vgam-class}}.
+
+
+}
+\seealso{
+  \code{\link{vgam}},
+  \code{\link[stats]{summary.glm}},
+  \code{\link[stats]{summary.lm}},
+  \code{\link[mgcv]{summary.gam}} from \pkg{mgcv},  % A core R package
+  \code{\link{summarypvgam}} for P-VGAMs.
+% \code{\link[gam]{summary.gam}}.  % May not be installed.
+
+
+
+}
+\examples{
+hfit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua)
+summary(hfit)
+summary(hfit)@anova  # Table for (approximate) testing of linearity
+}
+\keyword{models}
+\keyword{regression}
+
+
+
+% summary(hfit)@post$s.table  # For sm.ps() terms.
+
+
+
diff --git a/man/summaryvglm.Rd b/man/summaryvglm.Rd
index a9444e4..ceff464 100644
--- a/man/summaryvglm.Rd
+++ b/man/summaryvglm.Rd
@@ -15,7 +15,8 @@ summaryvglm(object, correlation = FALSE,
             nopredictors = FALSE, ...)
 \method{show}{summary.vglm}(x, digits = max(3L, getOption("digits") - 3L),
            quote = TRUE, prefix = "", presid = TRUE,
-           signif.stars = NULL, nopredictors = NULL, ...)
+           signif.stars = NULL, nopredictors = NULL,
+           top.half.only = FALSE, ...)
 }
 \arguments{
   \item{object}{an object of class \code{"vglm"}, usually, a result of a
@@ -37,9 +38,18 @@ summaryvglm(object, correlation = FALSE,
                 of these?  }
   \item{quote}{ Fed into \code{print()}. }
   \item{nopredictors}{ logical;
-                       if \code{TRUE} the names of the linear predictors
-                       are not printed out.
-                       The default is that they are. }
+    if \code{TRUE} the names of the linear predictors
+    are not printed out.
+    The default is that they are.
+
+
+  }
+  \item{top.half.only}{
+  logical; if \code{TRUE} then only print out the top half of the usual output.
+  Used for P-VGAMs.
+
+
+  }
   \item{prefix}{ Not used. }
   \item{\ldots}{ Not used. }
 
@@ -57,12 +67,12 @@ summaryvglm(object, correlation = FALSE,
   \sQuote{significance stars} if \code{signif.stars} is \code{TRUE}.
   The \code{coefficients} component of the result gives the estimated
   coefficients and their estimated standard errors, together with their
-  ratio. 
+  ratio.
   This third column is labelled \code{z value} regardless of
   whether the
   dispersion is estimated or known
   (or fixed by the family).  A fourth column gives the two-tailed
-  p-value corresponding to the z ratio based on a 
+  p-value corresponding to the z ratio based on a
   Normal reference distribution.
 % (It is possible that the dispersion is
 % not known and there are no residual degrees of freedom from which to
@@ -77,7 +87,7 @@ distribution is used.
 % Aliased coefficients are omitted in the returned object but restored
 % by the \code{print} method.
 
-  
+
   Correlations are printed to two decimal places (or symbolically): to
   see the actual correlations print \code{summary(object)@correlation}
   directly.
diff --git a/man/tikuv.Rd b/man/tikuv.Rd
index 6280cf3..b558560 100644
--- a/man/tikuv.Rd
+++ b/man/tikuv.Rd
@@ -117,7 +117,7 @@ tikuv(d, lmean = "identitylink", lsigma = "loge", isigma = NULL,
 
 
 }
-\seealso{ 
+\seealso{
   \code{\link{dtikuv}},
   \code{\link{uninormal}}.
 
diff --git a/man/tikuvUC.Rd b/man/tikuvUC.Rd
index ad1308e..10c2d13 100644
--- a/man/tikuvUC.Rd
+++ b/man/tikuvUC.Rd
@@ -7,7 +7,7 @@
 \title{A Short-tailed Symmetric Distribution }
 \description{
   Density, cumulative distribution function, quantile function and
-  random generation for 
+  random generation for
   the short-tailed symmetric distribution of Tiku and Vaughan (1999).
 
 
diff --git a/man/tobit.Rd b/man/tobit.Rd
index 679ee69..a91e41a 100644
--- a/man/tobit.Rd
+++ b/man/tobit.Rd
@@ -8,7 +8,7 @@
 }
 \usage{
 tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
-      imu = NULL, isd = NULL, 
+      imu = NULL, isd = NULL,
       type.fitted = c("uncensored", "censored", "mean.obs"),
       byrow.arg = FALSE, imethod = 1, zero = "sd")
 }
@@ -25,7 +25,7 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
 \arguments{
   \item{Lower}{
   Numeric. It is the value \eqn{L} described below.
-  Any value of the linear model 
+  Any value of the linear model
   \eqn{x_i^T \beta}{x_i^T beta} that
   is less than this lowerbound is assigned this value.
   Hence this should be the smallest possible value in the response
@@ -35,8 +35,8 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
 
   }
   \item{Upper}{
-  Numeric. It is the value \eqn{U} described below. 
-  Any value of the linear model 
+  Numeric. It is the value \eqn{U} described below.
+  Any value of the linear model
   \eqn{x_i^T \beta}{x_i^T beta} that
   is greater than this upperbound is assigned this value.
   Hence this should be the largest possible value in the response
@@ -48,7 +48,7 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
   \item{lmu, lsd}{
   Parameter link functions for the mean and standard deviation parameters.
   See \code{\link{Links}} for more choices.
-  The standard deviation is a positive quantity, therefore a log link 
+  The standard deviation is a positive quantity, therefore a log link
   is its default.
 
 
@@ -100,7 +100,7 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
   However, we measure \eqn{y_i = y_i^*} only if \eqn{y_i^* > L} and
   \eqn{y_i^* < U} for some
   cutpoints \eqn{L} and \eqn{U}. Otherwise we let \eqn{y_i=L} or
-  \eqn{y_i=U}, whatever is closer. 
+  \eqn{y_i=U}, whatever is closer.
   The Tobit model is thus a multiple linear regression but with censored
   responses if it is below or above certain cutpoints.
 
@@ -130,7 +130,7 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
   Currently, if any \code{y < Lower} or \code{y > Upper} then
   a warning is issued.
 
- 
+
 }
 
 \value{
@@ -212,7 +212,7 @@ meanfun2 <- function(x) 2 + 2*x
 meanfun3 <- function(x) 2 + 2*x
 meanfun4 <- function(x) 3 + 2*x
 tdata <- transform(tdata,
-  y1 = rtobit(nn, mean = meanfun1(x2)),  # Standard Tobit model 
+  y1 = rtobit(nn, mean = meanfun1(x2)),  # Standard Tobit model
   y2 = rtobit(nn, mean = meanfun2(x2), Lower = Lower, Upper = Upper),
   y3 = rtobit(nn, mean = meanfun3(x2), Lower = Lower.vec, Upper = Upper.vec),
   y4 = rtobit(nn, mean = meanfun3(x2), Lower = Lower.vec, Upper = Upper.vec))
diff --git a/man/tobitUC.Rd b/man/tobitUC.Rd
index ea2e6a8..40f5869 100644
--- a/man/tobitUC.Rd
+++ b/man/tobitUC.Rd
@@ -26,7 +26,7 @@ rtobit(n, mean = 0, sd = 1, Lower = 0, Upper = Inf)
 
   }
   \item{Lower, Upper}{vector of lower and upper
-  thresholds. 
+  thresholds.
 
   }
   \item{mean, sd, lower.tail, log, log.p}{
@@ -46,7 +46,7 @@ rtobit(n, mean = 0, sd = 1, Lower = 0, Upper = Inf)
 \author{ T. W. Yee }
 \details{
   See \code{\link{tobit}}, the \pkg{VGAM} family function
-  for estimating the parameters, 
+  for estimating the parameters,
   for details.
   Note that the density at \code{Lower} and \code{Upper} is the
   the area to the left and right of those points.
@@ -54,7 +54,7 @@ rtobit(n, mean = 0, sd = 1, Lower = 0, Upper = Inf)
   Consequently, \code{dtobit(Lower) + dtobit(Upper) + } the area
   in between equals unity.
 
-  
+
 
 
 % 20141223; this is old:
@@ -83,7 +83,7 @@ integrate(dtobit, lower = Lower, upper = Upper,
 dtobit(Lower, mean = mu, Lower = Lower, Upper = Upper) +
 dtobit(Upper, mean = mu, Lower = Lower, Upper = Upper)  # Adds to unity
 
-\dontrun{ 
+\dontrun{
 plot(x, ptobit(x, m = mu, Lower = Lower, Upper = Upper),
      type = "l", ylim = 0:1, las = 1, col = "orange",
      ylab = paste("ptobit(m = ", mu, ", sd = 1, Lower =", Lower,
diff --git a/man/topple.Rd b/man/topple.Rd
new file mode 100644
index 0000000..28c4a97
--- /dev/null
+++ b/man/topple.Rd
@@ -0,0 +1,73 @@
+\name{topple}
+\alias{topple}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Topp-Leone Distribution Family Function }
+\description{
+ Estimating the parameter of the Topp-Leone distribution by
+ maximum likelihood estimation.
+
+}
+\usage{
+topple(lshape = "logit", zero = NULL, gshape = ppoints(8))
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{lshape, zero, gshape}{
+  More information is at \code{\link{CommonVGAMffArguments}}.
+
+
+  }
+}
+\details{
+  The Topple distribution
+  has a probability density function that can be written
+  \deqn{f(y;s) = 2 s (1 - y) [y (2-y)]^{s-1}}{%
+        f(y;s) = 2 * s * (1 - y) * (y * (2-y))^(s-1)}
+  for \eqn{0<y<1} and shape parameter \eqn{0<s<1}.
+  The mean of \eqn{Y} is
+  \eqn{1 - 4^s [\Gamma(1+s)]^2 / \Gamma(2 + 2s)}{1 - 4^s [Gamma(1+s)]^2 / Gamma(2 + 2s)}
+  (returned as the fitted values).
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}},
+  and \code{\link{vgam}}.
+
+
+}
+\references{
+  Topp, C. W. and F. C. Leone (1955)
+  A family of J-shaped frequency functions.
+  \emph{Journal of the American Statistical Association},
+  \bold{50}, 209--219.
+
+
+
+}
+\author{ T. W. Yee }
+\note{
+  Fisher-scoring and Newton-Raphson are the same here.
+  A related distribution is the triangle distribution.
+  This \pkg{VGAM} family function handles multiple responses.
+
+
+
+}
+
+\seealso{
+  \code{\link{Topple}},
+  \code{\link{Triangle}}.
+
+
+
+}
+\examples{
+tdata <- data.frame(y = rtopple(1000, shape = logit(1, inverse = TRUE)))
+tfit <- vglm(y ~ 1, topple, data = tdata, trace = TRUE, crit = "coef")
+coef(tfit, matrix = TRUE)
+Coef(tfit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/toppleUC.Rd b/man/toppleUC.Rd
new file mode 100644
index 0000000..0f3d472
--- /dev/null
+++ b/man/toppleUC.Rd
@@ -0,0 +1,96 @@
+\name{Topple}
+\alias{Topple}
+\alias{dtopple}
+\alias{ptopple}
+\alias{qtopple}
+\alias{rtopple}
+\title{The Topp-Leone Distribution}
+\description{
+  Density,
+  distribution function,
+  quantile function and random generation
+  for the Topp-Leone distribution.
+
+}
+\usage{
+dtopple(x, shape, log = FALSE)
+ptopple(q, shape, lower.tail = TRUE, log.p = FALSE)
+qtopple(p, shape)
+rtopple(n, shape)
+}
+\arguments{
+  \item{x, q, p, n}{
+  Same as \code{\link[stats:Uniform]{Uniform}}.
+
+
+  }
+  \item{shape}{the (shape) parameter, which lies in \eqn{(0, 1)}.}
+  \item{log}{
+  Logical.
+  If \code{log = TRUE} then the logarithm of the density is returned.
+
+  }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
+
+}
+\value{
+  \code{dtopple} gives the density,
+  \code{ptopple} gives the distribution function,
+  \code{qtopple} gives the quantile function, and
+  \code{rtopple} generates random deviates.
+
+
+}
+\references{
+
+  Topp, C. W. and F. C. Leone (1955)
+  A family of J-shaped frequency functions.
+  \emph{Journal of the American Statistical Association},
+  \bold{50}, 209--219.
+
+
+
+}
+\author{ T. W. Yee }
+\details{
+  See \code{\link{topple}}, the \pkg{VGAM} family function for
+  estimating the (shape) parameter \eqn{s} by maximum likelihood
+  estimation, for the formula of the probability density function.
+
+
+}
+\note{
+  The Topp-Leone distribution is related to the triangle distribution.
+
+
+}
+\seealso{
+  \code{\link{topple}},
+  \code{\link{Triangle}}.
+
+
+
+}
+\examples{
+\dontrun{ shape <- 0.7; x <- seq(0.02, 0.999, length = 300)
+plot(x, dtopple(x, shape = shape), type = "l", col = "blue", las = 1,
+     main = "Blue is density, orange is cumulative distribution function",
+     sub = "Purple lines are the 10,20,...,90 percentiles", ylab = "")
+abline(h = 0, col = "blue", lty = 2)
+lines(x, ptopple(x, shape = shape), type = "l", col = "orange")
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qtopple(probs, shape = shape)
+lines(Q, dtopple(Q, shape), col = "purple", lty = 3, type = "h")
+lines(Q, ptopple(Q, shape), col = "purple", lty = 3, type = "h")
+abline(h = probs, col = "purple", lty = 3)
+max(abs(ptopple(Q, shape) - probs))  # Should be zero
+}
+}
+\keyword{distribution}
+
+
diff --git a/man/triangle.Rd b/man/triangle.Rd
index 64272b5..fa0a02a 100644
--- a/man/triangle.Rd
+++ b/man/triangle.Rd
@@ -60,7 +60,7 @@ triangle(lower = 0, upper = 1,
 
 
 }
-\references{ 
+\references{
 
 
 Kotz, S. and van Dorp, J. R. (2004)
@@ -70,6 +70,13 @@ Chapter 1.
 World Scientific: Singapore.
 
 
+Nguyen, H. D. and McLachlan, G. J. (2016)
+Maximum likelihood estimation of triangular and polygon distributions.
+\emph{Computational Statistics and Data Analysis},
+\bold{102}, 23--36.
+
+
+
 
 }
 \author{ T. W. Yee }
@@ -105,6 +112,7 @@ World Scientific: Singapore.
 }
 \seealso{
   \code{\link{Triangle}},
+  \code{\link{Topple}},
   \code{\link{simulate.vlm}}.
 
 
diff --git a/man/triangleUC.Rd b/man/triangleUC.Rd
index 65475ca..3043be7 100644
--- a/man/triangleUC.Rd
+++ b/man/triangleUC.Rd
@@ -66,7 +66,8 @@ rtriangle(n, theta, lower = 0, upper = 1)
 %
 %}
 \seealso{
-  \code{\link{triangle}}.
+  \code{\link{triangle}},
+  \code{\link{topple}}.
 
 
 }
diff --git a/man/trplot.Rd b/man/trplot.Rd
index 41b988e..463057b 100644
--- a/man/trplot.Rd
+++ b/man/trplot.Rd
@@ -23,7 +23,7 @@ trplot(object, ...)
 \details{
   Trajectory plots can be defined in different ways for different
   models.
-  Many models have no such notion or definition. 
+  Many models have no such notion or definition.
 
 
   For quadratic and additive ordination models they plot the
diff --git a/man/trplot.qrrvglm.Rd b/man/trplot.qrrvglm.Rd
index a8c4ace..3a847a6 100644
--- a/man/trplot.qrrvglm.Rd
+++ b/man/trplot.qrrvglm.Rd
@@ -12,12 +12,12 @@ It is only applicable for rank-1 models with argument
 }
 \usage{
 trplot.qrrvglm(object, which.species = NULL, add = FALSE, show.plot = TRUE,
-               label.sites = FALSE, sitenames = rownames(object at y), 
-               axes.equal = TRUE, cex = par()$cex, 
-               col = 1:(nos * (nos - 1)/2), log = "", 
-               lty = rep_len(par()$lty, nos * (nos - 1)/2), 
-               lwd = rep_len(par()$lwd, nos * (nos - 1)/2), 
-               tcol = rep_len(par()$col, nos * (nos - 1)/2), 
+               label.sites = FALSE, sitenames = rownames(object at y),
+               axes.equal = TRUE, cex = par()$cex,
+               col = 1:(nos * (nos - 1)/2), log = "",
+               lty = rep_len(par()$lty, nos * (nos - 1)/2),
+               lwd = rep_len(par()$lwd, nos * (nos - 1)/2),
+               tcol = rep_len(par()$col, nos * (nos - 1)/2),
                xlab = NULL, ylab = NULL,
                main = "", type = "b", check.ok = TRUE, ...)
 }
@@ -27,7 +27,7 @@ trplot.qrrvglm(object, which.species = NULL, add = FALSE, show.plot = TRUE,
   \item{which.species}{ Integer or character vector specifying the
   species to be plotted. If integer, these are the columns of the
   response matrix. If character, these must match exactly with the
-  species' names. 
+  species' names.
   The default is to use all species.
 
 
@@ -39,12 +39,12 @@ trplot.qrrvglm(object, which.species = NULL, add = FALSE, show.plot = TRUE,
   curves/trajectories are labelled with the \code{sitenames}. }
   \item{sitenames}{ Character vector. The names of the sites. }
   \item{axes.equal}{ Logical. If \code{TRUE}, the x- and y-axes
-  will be on the same scale. 
+  will be on the same scale.
 
 
   }
   \item{cex}{ Character expansion of the labelling of the site names.
-  Used only if \code{label.sites} is \code{TRUE}.   
+  Used only if \code{label.sites} is \code{TRUE}.
   See the \code{cex} argument in \code{\link[graphics]{par}}.
 
 
@@ -73,26 +73,26 @@ trplot.qrrvglm(object, which.species = NULL, add = FALSE, show.plot = TRUE,
   }
   \item{tcol}{Color of the text for the site names.
   See the \code{col} argument in \code{\link[graphics]{par}}.
-  Used only if \code{label.sites} is \code{TRUE}.  
+  Used only if \code{label.sites} is \code{TRUE}.
 
 
   }
   \item{xlab}{Character caption for the x-axis.
   By default, a suitable caption is found.
-  See the \code{xlab} argument in \code{\link[graphics]{plot}} 
+  See the \code{xlab} argument in \code{\link[graphics]{plot}}
   or \code{\link[graphics]{title}}.
 
 
   }
   \item{ylab}{Character caption for the y-axis.
   By default, a suitable caption is found.
-  See the \code{xlab} argument in \code{\link[graphics]{plot}} 
+  See the \code{xlab} argument in \code{\link[graphics]{plot}}
   or \code{\link[graphics]{title}}.
 
 
   }
   \item{main}{ Character, giving the title of the plot.
-  See the \code{main} argument in \code{\link[graphics]{plot}} 
+  See the \code{main} argument in \code{\link[graphics]{plot}}
   or \code{\link[graphics]{title}}.
 
 
@@ -104,7 +104,7 @@ trplot.qrrvglm(object, which.species = NULL, add = FALSE, show.plot = TRUE,
 
   }
   \item{check.ok}{ Logical. Whether a check is performed to see
-  that \code{noRRR = ~ 1} was used. 
+  that \code{noRRR = ~ 1} was used.
   It doesn't make sense to have a trace plot unless this is so.
 
 
@@ -120,9 +120,9 @@ trplot.qrrvglm(object, which.species = NULL, add = FALSE, show.plot = TRUE,
  A trajectory plot plots the fitted values of a `second' species
  against a `first' species. The argument \code{which.species} must
  therefore contain at least two species. By default, all of the
- species that were fitted in \code{object} are plotted. 
+ species that were fitted in \code{object} are plotted.
  With more than a few species
- the resulting plot will be very congested, and so it is recommended 
+ the resulting plot will be very congested, and so it is recommended
  that only a few species be selected for plotting.
 
 
@@ -163,7 +163,7 @@ quadratic ordination.
 
 \author{ Thomas W. Yee }
 
-\note{ 
+\note{
    Plotting the axes on a log scale is often a good idea.
    The use of \code{xlim} and \code{ylim} to control the axis limits
    is also a good idea, so as to limit the extent of the curves at low
diff --git a/man/truncparetoUC.Rd b/man/truncparetoUC.Rd
index cc94efd..37feacb 100644
--- a/man/truncparetoUC.Rd
+++ b/man/truncparetoUC.Rd
@@ -70,7 +70,7 @@ rtruncpareto(n, lower, upper, shape)
 
 }
 %%\note{
-%%  The truncated Pareto distribution is 
+%%  The truncated Pareto distribution is
 %%}
 \seealso{
   \code{\link{truncpareto}}.
diff --git a/man/truncweibull.Rd b/man/truncweibull.Rd
index 2811f69..ccf8781 100644
--- a/man/truncweibull.Rd
+++ b/man/truncweibull.Rd
@@ -28,9 +28,9 @@ truncweibull(lower.limit = 1e-5,
 
 
   }
-  
+
   \item{lAlpha, lBetaa}{
-  Parameter link functions applied to the 
+  Parameter link functions applied to the
   (positive) parameters \code{Alpha}
   (called \eqn{\alpha} below) and
   (positive) \code{Betaa} (called \eqn{\beta} below).
@@ -67,7 +67,7 @@ truncweibull(lower.limit = 1e-5,
 
 % More details about the Weibull density are \code{\link{weibullR}}.
 
-  
+
   Upon fitting the \code{extra} slot has a component called
   \code{lower.limit} which is of the same dimension as the
   response.
@@ -75,7 +75,7 @@ truncweibull(lower.limit = 1e-5,
   using \code{\link{pgamma.deriv}}
   and \code{\link{pgamma.deriv.unscaled}}.
 
-  
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -90,7 +90,7 @@ truncweibull(lower.limit = 1e-5,
   \emph{Statistical Papers},
   \bold{30}(1), 39--48.
 
-  
+
 }
 
 \author{ T. W. Yee }
diff --git a/man/ucberk.Rd b/man/ucberk.Rd
index 2ff893b..b00756e 100644
--- a/man/ucberk.Rd
+++ b/man/ucberk.Rd
@@ -44,7 +44,7 @@
 
 }
 %\source{
-%  
+%
 %
 %}
 \references{
@@ -65,5 +65,5 @@ summary(ucberk)
 }
 \keyword{datasets}
 % 7 February 1975
-% Bickel, et al., 187 (4175): 398-404 
+% Bickel, et al., 187 (4175): 398-404
 
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index 74c6c9b..5ff861c 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -4,8 +4,20 @@
 %\alias{ccoef-method}
 %
 %
+% 201607, 201608:
+\alias{psint,pvgam-method}
+\alias{summary,pvgam-method}
+\alias{show,summary.pvgam-method}
+\alias{df.residual,pvgam-method}
+\alias{endf,ANY-method}
+\alias{endf,pvgam-method}
+\alias{endf,summary.pvgam-method}
+\alias{vcov,pvgam-method}
+\alias{show,pvgam,ANY-method}
+\alias{show,pvgam-method}
+\alias{model.matrix,pvgam-method}
 % 201604:
-\alias{plot,psvgam,ANY-method}
+\alias{plot,pvgam,ANY-method}
 % 201602:
 \alias{predictvglmS4VGAM,ANY,binom2.or-method}
 % 201601:
diff --git a/man/vcovvlm.Rd b/man/vcovvlm.Rd
index ac028fd..96b0cc8 100644
--- a/man/vcovvlm.Rd
+++ b/man/vcovvlm.Rd
@@ -62,7 +62,7 @@ vcovvlm(object, dispersion = NULL, untransform = FALSE)
   is not entirely reliable because the elements of the
   \bold{A}--\bold{C} part of the matrix sometimes cannot be
   computed very accurately, so that the entire matrix is
-  not positive-definite. 
+  not positive-definite.
 
 
 }
diff --git a/man/venice.Rd b/man/venice.Rd
index 23be5e5..c026e23 100644
--- a/man/venice.Rd
+++ b/man/venice.Rd
@@ -36,17 +36,17 @@ data(venice90)
    number of hours since the midnight of 31 Dec 1939 and 1 Jan 1940.
 
   }
- 
+
   \item{Year}{numeric vector;
   approximate year as a real number.
   The formula is \code{start.year + ohour / (365.26 * 24)}
   where \code{start.year} is 1940.
   One can treat \code{Year} as continuous whereas
   \code{year} can be treated as both continuous and discrete.
-  
+
   }
-    
-  
+
+
   }
 
 }
@@ -62,7 +62,7 @@ data(venice90)
   (now) a very low and unusual measurement.
 
 
-  For \code{venice}, in 1935 only the top six values were recorded.   
+  For \code{venice}, in 1935 only the top six values were recorded.
 
 
   For \code{venice90}, this is a subset of a data set provided by
@@ -101,8 +101,8 @@ data(venice90)
   Extreme value theory based on the \emph{r} largest annual events.
   \emph{Journal of Hydrology},
   \bold{86}, 27--43.
-  
-  
+
+
 Battistin, D. and Canestrelli, P. (2006).
 \emph{La serie storica delle maree a Venezia, 1872--2004} (in Italian),
 Comune di Venezia.
diff --git a/man/vgam-class.Rd b/man/vgam-class.Rd
index 1aeaa0b..64f5f81 100644
--- a/man/vgam-class.Rd
+++ b/man/vgam-class.Rd
@@ -5,15 +5,15 @@
 \description{ Vector generalized additive models. }
 \section{Objects from the Class}{
 Objects can be created by calls of the form \code{vgam(...)}.
-%   ~~ describe objects here ~~ 
+%   ~~ describe objects here ~~
 }
 \section{Slots}{
   \describe{
-    \item{\code{nl.chisq}:}{Object of class \code{"numeric"}. 
+    \item{\code{nl.chisq}:}{Object of class \code{"numeric"}.
 Nonlinear chi-squared values. }
-    \item{\code{nl.df}:}{Object of class \code{"numeric"}. 
+    \item{\code{nl.df}:}{Object of class \code{"numeric"}.
 Nonlinear chi-squared degrees of freedom values. }
-    \item{\code{spar}:}{Object of class \code{"numeric"} 
+    \item{\code{spar}:}{Object of class \code{"numeric"}
 containing the (scaled) smoothing parameters. }
     \item{\code{s.xargument}:}{Object of class \code{"character"}
 holding the variable name of any \code{s()} terms. }
@@ -31,7 +31,7 @@ contains any extra information that might be needed
 The family function.  }
     \item{\code{iter}:}{Object of class \code{"numeric"}.
 The number of IRLS iterations used. }
-    \item{\code{predictors}:}{Object of class \code{"matrix"} 
+    \item{\code{predictors}:}{Object of class \code{"matrix"}
 with \eqn{M} columns which holds the \eqn{M} linear predictors. }
     \item{\code{assign}:}{Object of class \code{"list"},
   from class \code{ "vlm"}.
@@ -113,16 +113,16 @@ used by \code{\link{plotvgam}}; the plotting parameters
         may be put here.
  }
     \item{\code{prior.weights}:}{Object of class
-\code{"matrix"}, from class \code{ "vlm"} 
+\code{"matrix"}, from class \code{ "vlm"}
 holding the initially supplied weights.
  }
     \item{\code{qr}:}{Object of class \code{"list"},
  from class \code{ "vlm"}.
-QR decomposition at the final iteration. 
+QR decomposition at the final iteration.
  }
     \item{\code{R}:}{Object of class \code{"matrix"},
  from class \code{ "vlm"}.
-The \bold{R} matrix in the QR decomposition used in the fitting. 
+The \bold{R} matrix in the QR decomposition used in the fitting.
  }
     \item{\code{rank}:}{Object of class \code{"integer"},
  from class \code{ "vlm"}.
@@ -233,7 +233,7 @@ Vector generalized additive models.
 \note{
   VGAMs have all the slots that \code{\link{vglm}} objects
   have (\code{\link{vglm-class}}), plus the first few slots
-  described in the section above. 
+  described in the section above.
 
 
 }
diff --git a/man/vgam.Rd b/man/vgam.Rd
index 1184a53..3b3845d 100644
--- a/man/vgam.Rd
+++ b/man/vgam.Rd
@@ -2,19 +2,24 @@
 \alias{vgam}
 %\alias{vgam.fit}
 \title{ Fitting Vector Generalized Additive Models }
-% 15/2/03; based a lot from vglm.Rd 
+% 20030215; This file is based a lot from vglm.Rd
 \description{
-  Fit a vector generalized additive model (VGAM).  This is a large class
+  Fit a vector generalized additive model (VGAM).
+  Both 1st-generation VGAMs (based on backfitting) and
+  2nd-generation VGAMs (based on P-splines, with automatic
+  smoothing parameter selection) are implemented.
+  This is a large class
   of models that includes generalized additive models (GAMs) and vector
   generalized linear models (VGLMs) as special cases.
 
+
 }
 \usage{
-vgam(formula, family, data = list(), weights = NULL, subset = NULL, 
-     na.action = na.fail, etastart = NULL, mustart = NULL, 
-     coefstart = NULL, control = vgam.control(...), offset = NULL, 
-     method = "vgam.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE, 
-     contrasts = NULL, constraints = NULL, 
+vgam(formula, family, data = list(), weights = NULL, subset = NULL,
+     na.action = na.fail, etastart = NULL, mustart = NULL,
+     coefstart = NULL, control = vgam.control(...), offset = NULL,
+     method = "vgam.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE,
+     contrasts = NULL, constraints = NULL,
      extra = list(), form2 = NULL, qr.arg = FALSE, smart = TRUE, ...)
 }
 %- maybe also `usage' for other objects documented here.
@@ -24,7 +29,11 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
   \item{formula}{
   a symbolic description of the model to be fit.
   The RHS of the formula is applied to each linear/additive predictor,
-  and usually includes at least one \code{\link[VGAM]{s}} term.
+  and should include at least one
+     \code{\link[VGAM]{sm.os}} term
+  or \code{\link[VGAM]{sm.ps}} term
+  or \code{\link[VGAM]{s}} term.
+  Mixing both together is not allowed.
   Different variables in each linear/additive predictor
   can be chosen by specifying constraint matrices.
 
@@ -107,16 +116,34 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
   (almost always \eqn{x_1=1} for the intercept term),
   and
   \eqn{f_{(j)k}} are smooth functions of \eqn{x_k} that are estimated
-  by smoothers. The first term in the summation is just the intercept.
-  Currently only one type of smoother is
-  implemented and this is called a \emph{vector (cubic smoothing spline)
-  smoother}.
-  Here, \eqn{j=1,\ldots,M} where \eqn{M} is finite.
+  by smoothers.
+  The first term in the summation is just the intercept.
+  Currently
+  two types of smoothers are
+  implemented:
+  \code{\link[VGAM]{s}} represents
+  the older and more traditional one, called a
+  \emph{vector (cubic smoothing spline) smoother} and is
+  based on Yee and Wild (1996);
+  it is more similar to the \R{} package \pkg{gam}.
+  The newer one is represented by
+  \code{\link[VGAM]{sm.os}} and
+  \code{\link[VGAM]{sm.ps}}, and these are
+  based on O-splines and P-splines---they allow automatic
+  smoothing parameter selection; it is more similar
+  to the \R{} package \pkg{mgcv}.
+
+
+
+
+  In the above, \eqn{j=1,\ldots,M} where \eqn{M} is finite.
   If all the functions are constrained to be linear then
   the resulting model is a vector generalized linear model
   (VGLM).  VGLMs are best fitted with \code{\link{vglm}}.
 
 
+
+
   Vector (cubic smoothing spline) smoothers are represented
   by \code{s()} (see \code{\link[VGAM]{s}}). Local
   regression via \code{lo()} is \emph{not} supported. The
@@ -127,15 +154,61 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
   the number of additive predictors \eqn{M} is large.
 
 
-  The underlying algorithm of VGAMs is iteratively
-  reweighted least squares (IRLS) and modified vector backfitting
-  using vector splines. B-splines are used as the basis functions
-  for the vector (smoothing) splines. 
-  \code{vgam.fit()} is the function that actually does the
+
+  Second-generation VGAMs are based on the
+  O-splines and P-splines.
+  The latter is due to Eilers and Marx (1996).
+  Backfitting is not required, and estimation is performed using IRLS.
+  The function \code{\link{sm.os}} represents a \emph{smart}
+  implementation of O-splines.
+  The function \code{\link{sm.ps}} represents a \emph{smart}
+  implementation of P-splines.
+  Written G2-VGAMs or P-VGAMs, this methodology should not be used
+  unless the sample size is reasonably large.
+  Usually an UBRE predictive criterion is optimized
+  (at each IRLS iteration)
+  because the
+  scale parameter for VGAMs is usually assumed to be known.
+  This search for optimal smoothing parameters does not always converge,
+  and neither is it totally reliable.
+  G2-VGAMs implicitly set \code{criterion = "coefficients"} so that
+  convergence occurs when the change in the regression coefficients
+  between 2 IRLS iterations is sufficiently small.
+  Otherwise the search for the optimal smoothing parameters might
+  cause the log-likelihood to decrease between 2 IRLS iterations.
+  Currently \emph{outer iteration} is implemented,
+  by default,
+  rather than \emph{performance iteration} because the latter
+  is more easy to converge to a local solution; see
+  Wood (2004) for details.
+  One can use \emph{performance iteration}
+  by setting \code{Maxit.outer = 1} in
+  \code{\link{vgam.control}}.
+
+
+% outeriter
+
+
+
+%  A suggested rule-of-thumb is at least 500 observations.
+
+
+
+  The underlying algorithm of VGAMs is IRLS.
+  First-generation VGAMs (called G1-VGAMs)
+  are estimated by modified vector backfitting
+  using vector splines. O-splines are used as the basis functions
+  for the vector (smoothing) splines, which are a lower dimensional
+  version of natural B-splines.
+  The function \code{vgam.fit()} actually does the
   work.  The smoothing code is based on F. O'Sullivan's
   BART code.
 
 
+
+
+
+
 %  If more than one of \code{etastart}, \code{start} and \code{mustart}
 %  is specified, the first in the list will be used.
 
@@ -150,30 +223,68 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
 
 }
 \value{
-  An object of class \code{"vgam"}
-  (see \code{\link{vgam-class}} for further information). 
+  For G1-VGAMs and G2-VGAMs, an object of class
+  \code{"vgam"} or
+  \code{"pvgam"}
+  respectively
+  (see \code{\link{vgam-class}}
+   and \code{\link{pvgam-class}}
+  for further information).
 
 
 }
-\references{ 
+\references{
+
+
+
+Wood, S. N. (2004).
+Stable and efficient multiple smoothing parameter estimation
+for generalized additive models.
+\emph{J. Amer. Statist. Assoc.}, \bold{99}(467): 673--686.
+
+
+
 Yee, T. W. and Wild, C. J. (1996)
 Vector generalized additive models.
 \emph{Journal of the Royal Statistical Society, Series B, Methodological},
 \bold{58}, 481--493.
 
 
+
 Yee, T. W. (2008)
 The \code{VGAM} Package.
 \emph{R News}, \bold{8}, 28--39.
 
 
+
+Yee, T. W. (2015)
+Vector Generalized Linear and Additive Models:
+With an Implementation in R.
+New York, USA: \emph{Springer}.
+
+
+
+Yee, T. W. (2016).
+Comments on ``Smoothing parameter and model selection for
+general smooth models''
+by Wood, S. N. and Pya, N. and Safken, N.,
+\emph{J. Amer. Statist. Assoc.}, \bold{110}(516).
+
+
+
+%Yee, T. W. and Somchit, C. and Wild, C. J. (2016)
+%Generation-2
+%vector generalized additive models.
+%Manuscript in preparation.
+
+
+
 %Documentation accompanying the \pkg{VGAM} package at
 %\url{http://www.stat.auckland.ac.nz/~yee}
 %contains further information and examples.
 
 
 
-
 %Wood, S. N. (2004).
 %Stable and efficient multiple smoothing parameter estimation
 %for generalized additive models.
@@ -181,7 +292,6 @@ The \code{VGAM} Package.
 
 
 
-
 }
 
 \author{ Thomas W. Yee }
@@ -198,6 +308,7 @@ The \code{VGAM} Package.
   \code{coefstart} and \code{mustart}.
 
 
+
   Some \pkg{VGAM} family functions end in \code{"ff"}
   to avoid interference with other functions, e.g.,
   \code{\link{binomialff}}, \code{\link{poissonff}},
@@ -223,10 +334,19 @@ The \code{VGAM} Package.
 
 %~Make other sections like WARNING with \section{WARNING }{....} ~
 \section{WARNING}{
-  Currently \code{vgam} can only handle constraint matrices \code{cmat},
+  For G1-VGAMs,
+  currently \code{vgam} can only handle constraint matrices \code{cmat},
   say, such that \code{crossprod(cmat)} is diagonal.
-  This is a bug that I will try to fix up soon;
-  see \code{\link{is.buggy}}.
+  It can be detected by \code{\link{is.buggy}}.
+  VGAMs with constraint matrices that have non-orthogonal columns should
+  be fitted with
+  \code{\link{sm.os}} or
+  \code{\link{sm.ps}} terms
+  instead of \code{\link{s}}.
+
+
+%  This is a bug that I will try to fix up soon;
+
 
 
   See warnings in \code{\link{vglm.control}}.
@@ -240,61 +360,71 @@ The \code{VGAM} Package.
   \code{\link{vgam-class}},
   \code{\link{vglmff-class}},
   \code{\link{plotvgam}},
-  \code{\link{vglm}},
+  \code{\link{summaryvgam}},
+  \code{\link{summarypvgam}},
+  \code{\link{sm.os}},
+  \code{\link{sm.ps}},
   \code{\link[VGAM]{s}},
+  \code{\link[mgcv]{magic}}.
+  \code{\link{vglm}},
   \code{\link{vsmooth.spline}},
   \code{\link{cao}}.
 
 
-
-% \code{\link{ps}},
-% \code{\link[mgcv]{magic}}.
-
-
-
 }
 
-\examples{ # Nonparametric proportional odds model 
+\examples{# Nonparametric proportional odds model
 pneumo <- transform(pneumo, let = log(exposure.time))
 vgam(cbind(normal, mild, severe) ~ s(let),
      cumulative(parallel = TRUE), data = pneumo, trace = TRUE)
 
-# Nonparametric logistic regression 
-fit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua)
-\dontrun{ plot(fit, se = TRUE) }
-pfit <- predict(fit, type = "terms", raw = TRUE, se = TRUE)
-names(pfit)
-head(pfit$fitted)
-head(pfit$se.fit)
-pfit$df
-pfit$sigma
-
-# Fit two species simultaneously 
-fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
-             binomialff(multiple.responses = TRUE), data = hunua)
-coef(fit2, matrix = TRUE)  # Not really interpretable 
-\dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 3:4, scol = 3:4)
-
+# Nonparametric logistic regression
+hfit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, data = hunua)
+\dontrun{ plot(hfit, se = TRUE) }
+phfit <- predict(hfit, type = "terms", raw = TRUE, se = TRUE)
+names(phfit)
+head(phfit$fitted)
+head(phfit$se.fit)
+phfit$df
+phfit$sigma
+
+# Fit two species simultaneously
+hfit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
+              binomialff(multiple.responses = TRUE), data = hunua)
+coef(hfit2, matrix = TRUE)  # Not really interpretable
+\dontrun{
+plot(hfit2, se = TRUE, overlay = TRUE, lcol = 3:4, scol = 3:4)
 ooo <- with(hunua, order(altitude))
-with(hunua, matplot(altitude[ooo], fitted(fit2)[ooo,], ylim = c(0, 0.8),
+with(hunua, matplot(altitude[ooo], fitted(hfit2)[ooo,], ylim = c(0, 0.8),
      xlab = "Altitude (m)", ylab = "Probability of presence", las = 1,
      main = "Two plant species' response curves", type = "l", lwd = 2))
-with(hunua, rug(altitude)) }
+with(hunua, rug(altitude))
+}
 
-# The subset argument does not work here. Use subset() instead.
+# The 'subset' argument does not work here. Use subset() instead.
 set.seed(1)
-zdata <- data.frame(x2 = runif(nn <- 100))
+zdata <- data.frame(x2 = runif(nn <- 500))
 zdata <- transform(zdata, y = rbinom(nn, 1, 0.5))
 zdata <- transform(zdata, subS = runif(nn) < 0.7)
 sub.zdata <- subset(zdata, subS)  # Use this instead
 if (FALSE)
   fit4a <- vgam(cbind(y, y) ~ s(x2, df = 2),
-                binomialff(multiple.responses = TRUE), 
+                binomialff(multiple.responses = TRUE),
                 data = zdata, subset = subS)  # This fails!!!
 fit4b <- vgam(cbind(y, y) ~ s(x2, df = 2),
-              binomialff(multiple.responses = TRUE), 
+              binomialff(multiple.responses = TRUE),
+              data = sub.zdata)  # This succeeds!!!
+fit4c <- vgam(cbind(y, y) ~ sm.os(x2),
+              binomialff(multiple.responses = TRUE),
               data = sub.zdata)  # This succeeds!!!
+\dontrun{par(mfrow = c(2, 2))
+plot(fit4b, se = TRUE, shade = TRUE, shcol = "pink")
+plot(fit4c, se = TRUE, shade = TRUE, shcol = "pink")
+}
 }
 \keyword{models}
 \keyword{regression}
 \keyword{smooth}
+
+
+
diff --git a/man/vgam.control.Rd b/man/vgam.control.Rd
index 8813c32..d1ef9cc 100644
--- a/man/vgam.control.Rd
+++ b/man/vgam.control.Rd
@@ -4,24 +4,27 @@
 \title{ Control Function for vgam() }
 \description{
  Algorithmic constants and parameters for running \code{\link{vgam}}
- are set using this function. 
+ are set using this function.
 
 }
 \usage{
-vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30, 
-             checkwz=TRUE, criterion = names(.min.criterion.VGAM),
-             epsilon = 1e-07, maxit = 30, na.action = na.fail,
+vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
+             checkwz=TRUE, Check.rank = TRUE, Check.cm.rank = TRUE,
+             criterion = names(.min.criterion.VGAM),
+             epsilon = 1e-07, maxit = 30, Maxit.outer = 20,
+             noWarning = FALSE,
+             na.action = na.fail,
              nk = NULL, save.weights = FALSE, se.fit = TRUE,
              trace = FALSE, wzepsilon = .Machine$double.eps^0.75,
-             xij = NULL, ...)
+             xij = NULL, gamma.arg = 1, ...)
 }
 %- maybe also `usage' for other objects documented here.
 \arguments{
-% zz na.action differs from vglm 
+% zz na.action differs from vglm
 
 
   \item{all.knots}{
-  logical indicating if all distinct points of 
+  logical indicating if all distinct points of
   the smoothing variables are to be used as knots.
   By default, \code{all.knots=TRUE} for
   \eqn{n \leq 40}{n <= 40}, and
@@ -52,9 +55,15 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
   any values less than \code{wzepsilon} are replaced with this value.
 
   }
+  \item{Check.rank, Check.cm.rank}{
+  See \code{\link{vglm.control}}.
+
+
+  }
+
   \item{criterion}{
   character variable describing what criterion is to
-  be used to test for convergence. 
+  be used to test for convergence.
   The possibilities are listed in \code{.min.criterion.VGAM}, but
   most family functions only implement a few of these.
 
@@ -72,6 +81,22 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
   Newton-Raphson/Fisher-scoring/local-scoring iterations allowed.
 
   }
+  \item{Maxit.outer}{
+  maximum number of
+  outer iterations allowed when there are
+  \code{\link{sm.os}} or
+  \code{\link{sm.ps}} terms.
+  See \code{\link{vgam}} for a little information about
+  the default \emph{outer iteration}.
+  Note that one can use \emph{performance iteration}
+  by setting \code{Maxit.outer = 1}; then the
+  smoothing parameters will be automatically chosen at each
+  IRLS iteration (some specific programming
+  allows this).
+
+
+
+  }
   \item{na.action}{
   how to handle missing values.
   Unlike the SPLUS \code{gam} function, \code{\link{vgam}} cannot handle
@@ -81,14 +106,15 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
   \item{nk}{
   vector of length \eqn{d} containing positive integers.
   where \eqn{d} be the number of \code{\link{s}} terms
-  in the formula. 
-  Recycling is used if necessary. 
+  in the formula.
+  Recycling is used if necessary.
   The \eqn{i}th value is the number of B-spline coefficients to be
-  estimated for each component function of the \eqn{i}th 
+  estimated for each component function of the \eqn{i}th
   \code{s()} term.
   \code{nk} differs from the number of knots by some constant.
   If specified, \code{nk} overrides the automatic knot selection procedure.
 
+
   }
   \item{save.weights}{
   logical indicating whether the \code{weights} slot
@@ -99,7 +125,9 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
   \item{se.fit}{
   logical indicating whether approximate
   pointwise standard errors are to be saved on the object.
-  If \code{TRUE}, then these can be plotted with \code{plot(..., se = TRUE)}.
+  If \code{TRUE}, then these can be plotted
+  with \code{plot(..., se = TRUE)}.
+
 
   }
   \item{trace}{
@@ -107,8 +135,9 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
 
   }
   \item{wzepsilon}{
-  Small positive number used to test whether the diagonals of the working
-  weight matrices are sufficiently positive.
+  Small positive number used to test whether the diagonals
+  of the working weight matrices are sufficiently positive.
+
 
   }
 
@@ -117,12 +146,29 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
 %
 % }
 
+  \item{noWarning}{
+  Same as \code{\link{vglm.control}}.
+
+
+
+  }
   \item{xij}{
   Same as \code{\link{vglm.control}}.
 
 
 
   }
+  \item{gamma.arg}{
+    Numeric; same as \code{gamma} in \code{\link[mgcv]{magic}}.
+    Inflation factor for optimizing the UBRE/GCV criterion.
+    If given, a suggested value is 1.4 to help avoid overfitting,
+    based on the work of Gu and co-workers
+    (values between 1.2 and 1.4 appeared reasonable, based on simulations).
+    A warning may be given if the value is deemed out-of-range.
+
+
+
+  }
   \item{\dots}{
   other parameters that may be picked up from control
   functions that are specific to the \pkg{VGAM} family function.
@@ -157,7 +203,7 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
 
 
 }
-\references{ 
+\references{
 Yee, T. W. and Wild, C. J. (1996)
 Vector generalized additive models.
 \emph{Journal of the Royal Statistical Society, Series B, Methodological},
@@ -167,6 +213,18 @@ Vector generalized additive models.
 % \url{http://www.stat.auckland.ac.nz/~yee}
 
 
+
+% For gamma=1.4:
+% Kim, Y.-J. and Gu, C. 2004,
+% Smoothing spline Gaussian regression:
+% more scalable computation via efficient approximation.
+%\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+%\bold{66}, 337--356.
+%\bold{66}(2), 337--356.
+
+
+
+
 }
 \author{ Thomas W. Yee}
 
@@ -192,7 +250,7 @@ Vector generalized additive models.
   \code{\link{vgam}},
   \code{\link{vglm.control}},
   \code{\link{vsmooth.spline}},
-  \code{\link{vglm}}. 
+  \code{\link{vglm}}.
 
 
 }
diff --git a/man/vglm-class.Rd b/man/vglm-class.Rd
index 14c364e..d27dfc0 100644
--- a/man/vglm-class.Rd
+++ b/man/vglm-class.Rd
@@ -5,7 +5,7 @@
 \description{  Vector generalized linear models. }
 \section{Objects from the Class}{
 Objects can be created by calls of the form \code{vglm(...)}.
-%   ~~ describe objects here ~~ 
+%   ~~ describe objects here ~~
 }
 \section{Slots}{
 
@@ -20,7 +20,7 @@ contains any extra information that might be needed
 The family function.  }
     \item{\code{iter}:}{Object of class \code{"numeric"}.
 The number of IRLS iterations used. }
-    \item{\code{predictors}:}{Object of class \code{"matrix"} 
+    \item{\code{predictors}:}{Object of class \code{"matrix"}
 with \eqn{M} columns which holds the \eqn{M} linear predictors. }
     \item{\code{assign}:}{Object of class \code{"list"},
   from class \code{ "vlm"}.
@@ -73,7 +73,7 @@ The effects.
 \code{"matrix"}, from class \code{ "vlm"}.
 The fitted values.
 %This may be missing or consist entirely
-%of \code{NA}s, e.g., the Cauchy model. 
+%of \code{NA}s, e.g., the Cauchy model.
  }
     \item{\code{misc}:}{Object of class \code{"list"},
  from class \code{ "vlm"}.
@@ -101,16 +101,16 @@ used by \code{\link{plotvgam}}; the plotting parameters
         may be put here.
  }
     \item{\code{prior.weights}:}{Object of class
-\code{"matrix"}, from class \code{ "vlm"} 
+\code{"matrix"}, from class \code{ "vlm"}
 holding the initially supplied weights.
  }
     \item{\code{qr}:}{Object of class \code{"list"},
  from class \code{ "vlm"}.
-QR decomposition at the final iteration. 
+QR decomposition at the final iteration.
  }
     \item{\code{R}:}{Object of class \code{"matrix"},
  from class \code{ "vlm"}.
-The \bold{R} matrix in the QR decomposition used in the fitting. 
+The \bold{R} matrix in the QR decomposition used in the fitting.
  }
     \item{\code{rank}:}{Object of class \code{"integer"},
  from class \code{ "vlm"}.
@@ -171,32 +171,32 @@ Class \code{"vlm"}, directly.
 \section{Methods}{
   \describe{
     \item{cdf}{\code{signature(object = "vglm")}:
-cumulative distribution function. 
+cumulative distribution function.
 Applicable to, e.g., quantile regression and extreme value data models.}
-    \item{deplot}{\code{signature(object = "vglm")}: 
+    \item{deplot}{\code{signature(object = "vglm")}:
 Applicable to, e.g., quantile regression.}
-    \item{deviance}{\code{signature(object = "vglm")}: 
+    \item{deviance}{\code{signature(object = "vglm")}:
 deviance of the model (where applicable). }
-    \item{plot}{\code{signature(x = "vglm")}: 
+    \item{plot}{\code{signature(x = "vglm")}:
 diagnostic plots. }
-    \item{predict}{\code{signature(object = "vglm")}: 
-extract the linear predictors or 
+    \item{predict}{\code{signature(object = "vglm")}:
+extract the linear predictors or
 predict the linear predictors at a new data frame.}
-    \item{print}{\code{signature(x = "vglm")}: 
+    \item{print}{\code{signature(x = "vglm")}:
 short summary of the object. }
-    \item{qtplot}{\code{signature(object = "vglm")}: 
+    \item{qtplot}{\code{signature(object = "vglm")}:
 quantile plot (only applicable to some models). }
-    \item{resid}{\code{signature(object = "vglm")}: 
+    \item{resid}{\code{signature(object = "vglm")}:
 residuals. There are various types of these. }
-    \item{residuals}{\code{signature(object = "vglm")}: 
+    \item{residuals}{\code{signature(object = "vglm")}:
 residuals. Shorthand for \code{resid}. }
-    \item{rlplot}{\code{signature(object = "vglm")}: return level plot. 
+    \item{rlplot}{\code{signature(object = "vglm")}: return level plot.
 Useful for extreme value data models.}
-    \item{summary}{\code{signature(object = "vglm")}: 
+    \item{summary}{\code{signature(object = "vglm")}:
 a more detailed summary of the object. }
   }
 }
-\references{ 
+\references{
 
 Yee, T. W. and Hastie, T. J. (2003)
 Reduced-rank vector generalized linear models.
@@ -229,7 +229,7 @@ Vector generalized additive models.
 }
 
 \examples{
-# Multinomial logit model 
+# Multinomial logit model
 pneumo <- transform(pneumo, let = log(exposure.time))
 vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo)
 }
diff --git a/man/vglm.Rd b/man/vglm.Rd
index d1375f8..20f5a5a 100644
--- a/man/vglm.Rd
+++ b/man/vglm.Rd
@@ -10,11 +10,11 @@
 
 }
 \usage{
-vglm(formula, family, data = list(), weights = NULL, subset = NULL, 
-     na.action = na.fail, etastart = NULL, mustart = NULL, 
-     coefstart = NULL, control = vglm.control(...), offset = NULL, 
-     method = "vglm.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE, 
-     contrasts = NULL, constraints = NULL, extra = list(), 
+vglm(formula, family, data = list(), weights = NULL, subset = NULL,
+     na.action = na.fail, etastart = NULL, mustart = NULL,
+     coefstart = NULL, control = vglm.control(...), offset = NULL,
+     method = "vglm.fit", model = FALSE, x.arg = TRUE, y.arg = TRUE,
+     contrasts = NULL, constraints = NULL, extra = list(),
      form2 = NULL, qr.arg = TRUE, smart = TRUE, ...)
 }
 %- maybe also `usage' for other objects documented here.
@@ -57,7 +57,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
   \item{weights}{
   an optional vector or matrix of (prior fixed and known) weights
   to be used in the fitting process.
-  If the \pkg{VGAM} family function handles multiple responses 
+  If the \pkg{VGAM} family function handles multiple responses
   (\eqn{Q > 1} of them, say) then
   \code{weights} can be a matrix with \eqn{Q} columns.
   Each column matches the respective response.
@@ -95,7 +95,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
   }
   \item{subset}{
   an optional logical vector specifying a subset of
-  observations to 
+  observations to
   be used in the fitting process.
 
 
@@ -103,7 +103,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
   }
   \item{na.action}{
   a function which indicates what should happen when
-  the data contain \code{NA}s. 
+  the data contain \code{NA}s.
   The default is set by the \code{na.action} setting
   of \code{\link[base]{options}}, and is \code{na.fail} if that is unset.
   The ``factory-fresh'' default is \code{na.omit}.
@@ -137,7 +137,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
 
   }
   \item{control}{
-  a list of parameters for controlling the fitting process. 
+  a list of parameters for controlling the fitting process.
   See \code{\link{vglm.control}} for details.
 
 
@@ -169,7 +169,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
   process should be assigned in the \code{x} and \code{y} slots.
   Note the model matrix is the LM model matrix; to get the VGLM
   model matrix type \code{model.matrix(vglmfit)} where
-  \code{vglmfit} is a \code{vglm} object. 
+  \code{vglmfit} is a \code{vglm} object.
 
 
   }
@@ -333,7 +333,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
 
 
 }
-\references{ 
+\references{
 
 
 Yee, T. W. (2015)
@@ -433,7 +433,7 @@ The \code{VGAM} Package.
 
 
 
-\seealso{ 
+\seealso{
   \code{\link{vglm.control}},
   \code{\link{vglm-class}},
   \code{\link{vglmff-class}},
@@ -442,7 +442,7 @@ The \code{VGAM} Package.
   \code{\link{fill}},
   \code{\link{rrvglm}},
   \code{\link{vgam}}.
-  Methods functions include 
+  Methods functions include
   \code{\link{coefvlm}},
   \code{\link{constraints.vlm}},
   \code{\link{hatvaluesvlm}},
@@ -474,13 +474,13 @@ vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo)
 
 # Example 3. Proportional odds model
 fit3 <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)
-coef(fit3, matrix = TRUE) 
+coef(fit3, matrix = TRUE)
 constraints(fit3)
 model.matrix(fit3, type = "lm")  # LM model matrix
 model.matrix(fit3)               # Larger VGLM (or VLM) model matrix
 
 
-# Example 4. Bivariate logistic model 
+# Example 4. Bivariate logistic model
 fit4 <- vglm(cbind(nBnW, nBW, BnW, BW) ~ age, binom2.or, coalminers)
 coef(fit4, matrix = TRUE)
 depvar(fit4)  # Response are proportions
@@ -507,7 +507,7 @@ fit5 <- vglm(cbind(leye, reye) ~ op,
 coef(fit5)
 coef(fit5, matrix = TRUE)
 constraints(fit5)
-} 
+}
 \keyword{models}
 \keyword{regression}
 
diff --git a/man/vglm.control.Rd b/man/vglm.control.Rd
index d4d1ad3..80ef329 100644
--- a/man/vglm.control.Rd
+++ b/man/vglm.control.Rd
@@ -13,7 +13,7 @@ vglm.control(checkwz = TRUE, Check.rank = TRUE, Check.cm.rank = TRUE,
              epsilon = 1e-07, half.stepsizing = TRUE,
              maxit = 30, noWarning = FALSE,
              stepsize = 1, save.weights = FALSE,
-             trace = FALSE, wzepsilon = .Machine$double.eps^0.75, 
+             trace = FALSE, wzepsilon = .Machine$double.eps^0.75,
              xij = NULL, ...)
 }
 %- maybe also `usage' for other objects documented here.
@@ -76,8 +76,8 @@ vglm.control(checkwz = TRUE, Check.rank = TRUE, Check.cm.rank = TRUE,
 
   }
   \item{maxit}{
-  maximum number of (usually Fisher-scoring) iterations allowed.
-  Sometimes Newton-Raphson is used.
+  maximum number of (usually Fisher-scoring) iterations
+  allowed.  Sometimes Newton-Raphson is used.
 
 
   }
@@ -137,10 +137,10 @@ vglm.control(checkwz = TRUE, Check.rank = TRUE, Check.cm.rank = TRUE,
   e.g., the ocular pressure of each eye.
   The \eqn{M} terms must be unique;
   use \code{\link{fill1}}, \code{fill2}, \code{fill3}, etc. if necessary.
-  Each formula should have a response which is taken as the name of
-  that variable, and the \eqn{M} terms are enumerated in sequential order.
-  Each of the \eqn{M} terms multiply each successive row of the constraint
-  matrix.
+  Each formula should have a response which is taken as the
+  name of that variable, and the \eqn{M} terms are enumerated
+  in sequential order.  Each of the \eqn{M} terms multiply
+  each successive row of the constraint matrix.
   When \code{xij} is used, the use of \code{form2} is also required
   to give \emph{every} term used by the model.
 
@@ -162,7 +162,8 @@ vglm.control(checkwz = TRUE, Check.rank = TRUE, Check.cm.rank = TRUE,
 % This argument is made use of by \code{plotvgam()}.
 % Each formula has a RHS giving \eqn{r_k} unique terms,
 % one for each column of the constraint matrix.
-% Each formula should have a response that matches the \code{formula} argument.
+% Each formula should have a response that matches the
+% \code{formula} argument.
 % The argument \code{jix} is a reversal of \code{xij} to emphasize
 % the same framework for handling terms involving covariates that have
 % different values for each linear/additive predictor.
@@ -182,11 +183,12 @@ vglm.control(checkwz = TRUE, Check.rank = TRUE, Check.cm.rank = TRUE,
   understand the full details.
 
 
-  Setting \code{save.weights = FALSE} is useful for some models because
-  the \code{weights} slot of the object is the largest and so less
-  memory is used to store the object. However, for some \pkg{VGAM}
-  family function, it is necessary to set \code{save.weights = TRUE}
-  because the \code{weights} slot cannot be reconstructed later.
+  Setting \code{save.weights = FALSE} is useful for some
+  models because the \code{weights} slot of the object
+  is the largest and so less memory is used to store the
+  object. However, for some \pkg{VGAM} family function,
+  it is necessary to set \code{save.weights = TRUE} because
+  the \code{weights} slot cannot be reconstructed later.
 
 
   }
@@ -208,14 +210,14 @@ vglm.control(checkwz = TRUE, Check.rank = TRUE, Check.cm.rank = TRUE,
 
 }
 \author{ Thomas W. Yee}
-\note{ 
+\note{
   Reiterating from above,
   setting \code{trace = TRUE} is recommended in general.
 
 
-  In Example 2 below there are two covariates that have linear/additive
-  predictor specific values.
-  These are handled using the \code{xij} argument.
+  In Example 2 below there are two covariates that have
+  linear/additive predictor specific values.  These are
+  handled using the \code{xij} argument.
 
 
 }
@@ -254,8 +256,10 @@ vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo,
 
 # Example 2. The use of the xij argument (simple case).
 ymat <- rdiric(n <- 1000, shape = rep(exp(2), len = 4))
-mydat <- data.frame(x1 = runif(n), x2 = runif(n), x3 = runif(n), x4 = runif(n),
-                    z1 = runif(n), z2 = runif(n), z3 = runif(n), z4 = runif(n))
+mydat <- data.frame(x1 = runif(n), x2 = runif(n), x3 = runif(n),
+                    x4 = runif(n),
+                    z1 = runif(n), z2 = runif(n), z3 = runif(n),
+                    z4 = runif(n))
 mydat <- transform(mydat, X = x1, Z = z1)
 mydat <- round(mydat, digits = 2)
 fit2 <- vglm(ymat ~ X + Z,
@@ -286,8 +290,10 @@ coalminers <- transform(coalminers,
                         dum2 = round(runif(nrow(coalminers)), digits = 2),
                         dum3 = round(runif(nrow(coalminers)), digits = 2),
                         dumm = round(runif(nrow(coalminers)), digits = 2))
-BS <- function(x, ..., df = 3) sm.bs(c(x,...), df = df)[1:length(x),,drop = FALSE]
-NS <- function(x, ..., df = 3) sm.ns(c(x,...), df = df)[1:length(x),,drop = FALSE]
+BS <- function(x, ..., df = 3)
+  sm.bs(c(x,...), df = df)[1:length(x),,drop = FALSE]
+NS <- function(x, ..., df = 3)
+  sm.ns(c(x,...), df = df)[1:length(x),,drop = FALSE]
 
 # Equivalently...
 BS <- function(x, ..., df = 3)
@@ -307,7 +313,9 @@ head(model.matrix(fit3, type = "lm"))   # LM model matrix
 head(model.matrix(fit3, type = "vlm"))  # Big VLM model matrix
 coef(fit3)
 coef(fit3, matrix = TRUE)
-\dontrun{ plotvgam(fit3, se = TRUE, lcol = "red", scol = "blue", xlab = "dum1") }
+\dontrun{
+plotvgam(fit3, se = TRUE, lcol = "red", scol = "blue", xlab = "dum1")
+}
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/vglmff-class.Rd b/man/vglmff-class.Rd
index 75c1d6f..9bd73b9 100644
--- a/man/vglmff-class.Rd
+++ b/man/vglmff-class.Rd
@@ -5,12 +5,12 @@
 \description{  Family functions for the \pkg{VGAM} package }
 \section{Objects from the Class}{
 Objects can be created by calls of the form \code{new("vglmff", ...)}.
-  
+
 }
 \section{Slots}{
   In the following, \eqn{M} is the number of linear/additive
   predictors.
-  
+
 \describe{
   \item{\code{blurb}:}{
   Object of class \code{"character"} giving
@@ -63,7 +63,7 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
   and obtain starting values for the model.
   In general, \code{etastart} or
   \code{mustart} are assigned values based on the variables \code{y},
-  \code{x} and \code{w}. 
+  \code{x} and \code{w}.
 
   }
   \item{\code{linkinv}:}{
@@ -168,21 +168,21 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
   \eqn{\bold{W}_i}{\bold{W}i} matrix. Thus each row of the
   matrix returned by this slot is such a matrix.
   Like the \code{weights} slot of \code{vglm}/\code{vgam}, it is
-  stored in 
-  \emph{matrix-band} form, whereby the first \eqn{M} 
+  stored in
+  \emph{matrix-band} form, whereby the first \eqn{M}
   columns of the matrix are the
   diagonals, followed by the upper-diagonal band, followed by the
   band above that, etc. In this case, there can be up to \eqn{M(M+1)}
   columns, with the last column corresponding to the (1,\eqn{M}) elements
   of the weight matrices.
-   
+
   }
 
   \item{\code{validfitted, validparams}:}{
   Functions that test that the fitted values and
   all parameters are within range.
   These functions can issue a warning if violations are detected.
-   
+
 
   }
 
@@ -193,7 +193,7 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
 \section{Methods}{
   \describe{
     \item{print}{\code{signature(x = "vglmff")}:
-      short summary of the family function. 
+      short summary of the family function.
     }
   }
 }
@@ -225,14 +225,14 @@ Vector generalized additive models.
 % A unified method of handling arguments is to use
 % \code{match.arg}. This allows, for example,
 % \code{vglm(..., family = cratio(link = logit))}
-% and 
+% and
 % \code{vglm(..., family = cratio(link = "logi"))}
 % to be equivalent (Nb. there is a \code{logit} function).
 
 
   The \code{extra} argument in
   \code{linkinv}, \code{linkfun}, \code{deviance},
-  \code{loglikelihood}, etc. 
+  \code{loglikelihood}, etc.
   matches with the argument \code{extra}
   in \code{\link{vglm}}, \code{\link{vgam}} and \code{\link{rrvglm}}.
   This allows input to be fed into all slots of a \pkg{VGAM}
@@ -260,7 +260,7 @@ Vector generalized additive models.
   expected information are positive-definite over a larger
   parameter space.
 
-  
+
   }
 
 \section{Warning }{
@@ -269,7 +269,7 @@ Vector generalized additive models.
   \pkg{gam} or \pkg{mgcv} packages).
 
 
-} 
+}
 
 \seealso{
   \code{\link{vglm}},
diff --git a/man/vonmises.Rd b/man/vonmises.Rd
index 72b94ad..8c8d031 100644
--- a/man/vonmises.Rd
+++ b/man/vonmises.Rd
@@ -36,7 +36,7 @@ vonmises(llocation = extlogit(min = 0, max = 2 * pi), lscale = "loge",
   \item{imethod}{
   An integer with value \code{1} or \code{2} which
   specifies the initialization method. If failure to converge occurs
-  try the other value, or else specify a value for 
+  try the other value, or else specify a value for
   \code{ilocation} and \code{iscale}.
 
   }
@@ -67,7 +67,7 @@ vonmises(llocation = extlogit(min = 0, max = 2 * pi), lscale = "loge",
         f(y;a,k) = exp[k*cos(y-a)] / (2*pi*I0(k))}
  where \eqn{0 \leq y < 2\pi}{0 <= y < 2*pi},
  \eqn{k>0} is the scale parameter,
- \eqn{a} is the location parameter, and 
+ \eqn{a} is the location parameter, and
  \eqn{I_0(k)}{I0(k)} is the modified Bessel
  function of order 0 evaluated at \eqn{k}.
  The mean of \eqn{Y} (which is the fitted value) is \eqn{a}
@@ -90,7 +90,7 @@ vonmises(llocation = extlogit(min = 0, max = 2 * pi), lscale = "loge",
 
 
 }
-\references{ 
+\references{
 
 Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 \emph{Statistical Distributions},
@@ -100,7 +100,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 }
 \author{ T. W. Yee }
 \note{
- The response and the fitted values are scaled so that 
+ The response and the fitted values are scaled so that
  \eqn{0\leq y< 2\pi}{0<=y<2*pi}.
  The linear/additive predictors are left alone.
  Fisher scoring is used.
@@ -122,7 +122,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
   \pkg{CircStats} and \pkg{circular} currently have a lot more
-  R functions for circular data than the \pkg{VGAM} package. 
+  R functions for circular data than the \pkg{VGAM} package.
 
 
 }
diff --git a/man/vsmooth.spline.Rd b/man/vsmooth.spline.Rd
index 906e3c1..f004019 100644
--- a/man/vsmooth.spline.Rd
+++ b/man/vsmooth.spline.Rd
@@ -8,9 +8,9 @@
 \usage{
 vsmooth.spline(x, y, w = NULL, df = rep(5, M), spar = NULL,
                i.constraint = diag(M),
-               x.constraint = diag(M), 
+               x.constraint = diag(M),
                constraints = list("(Intercepts)" = i.constraint,
-                                  x = x.constraint), 
+                                  x = x.constraint),
                all.knots = FALSE, var.arg = FALSE, scale.w = TRUE,
                nk = NULL, control.spar = list())
 }
@@ -22,7 +22,7 @@ vsmooth.spline(x, y, w = NULL, df = rep(5, M), spar = NULL,
   If a matrix, the first column is used.
   \code{x} may also be a complex vector, in which case
   the real part is used, and the imaginary part is used for the response.
-  In this help file, \code{n} is the number of unique values of \code{x}. 
+  In this help file, \code{n} is the number of unique values of \code{x}.
 
 
 }
@@ -31,11 +31,11 @@ vsmooth.spline(x, y, w = NULL, df = rep(5, M), spar = NULL,
   If a list, the  \code{y} component is used.
   If a matrix, all but the first column is used.
   In this help file, \code{M} is the number of columns of \code{y} if
-  there are no constraints on the functions. 
+  there are no constraints on the functions.
 
 
 }
-  \item{w}{ 
+  \item{w}{
   The weight matrices or the number of observations.
   If the  weight matrices, then this must be a \code{n}-row matrix
   with the elements in matrix-band form (see \code{iam}).
@@ -47,7 +47,7 @@ vsmooth.spline(x, y, w = NULL, df = rep(5, M), spar = NULL,
 }
   \item{df}{
 Numerical vector containing the degrees of
-freedom for each component function (smooth). 
+freedom for each component function (smooth).
 If necessary, the vector is recycled to have length equal
 to the number of component functions to be estimated
 (\code{M} if there are no constraints), which
@@ -59,15 +59,15 @@ smooths.
 
 
 }
-  \item{spar}{ 
+  \item{spar}{
 Numerical vector containing the non-negative smoothing
-parameters for each component function (smooth). 
+parameters for each component function (smooth).
 If necessary, the vector is recycled to have length equal
 to the number of component functions to be estimated
 (\code{M} if there are no constraints), which
 equals the number of columns of the \code{x}-constraint matrix.
 A value of zero means the smooth goes through the data and hence
-is wiggly. 
+is wiggly.
 A value of \code{Inf} may be assigned, meaning the smooth will
 be linear.
 By default, the \code{NULL} value of \code{spar} means
@@ -79,7 +79,7 @@ parameters.
   \item{all.knots}{ Logical. If \code{TRUE} then each distinct
 value of \code{x} will be a knot. By default, only a subset of
 the unique values  of \code{x} are used; typically, the number
-of knots is \code{O(n^0.25)} for \code{n} large, 
+of knots is \code{O(n^0.25)} for \code{n} large,
 but if \code{n <= 40} then all the unique values of \code{x} are used.
 
 
@@ -91,14 +91,14 @@ By default, the constraint matrix for the intercepts is the
 
 
 }
-  \item{x.constraint}{ A \code{M}-row constraint matrix for \code{x}. 
+  \item{x.constraint}{ A \code{M}-row constraint matrix for \code{x}.
 It must be of full column rank.
 By default, the constraint matrix for the intercepts is the
 \code{M} by \code{M} identity matrix, meaning no constraints.
 
 
 }
-  \item{constraints}{ 
+  \item{constraints}{
 An alternative to specifying \code{i.constraint} and \code{x.constraint},
 this is a list with two components corresponding to the
 intercept and \code{x} respectively. They must both be a
@@ -106,14 +106,14 @@ intercept and \code{x} respectively. They must both be a
 
 
 }
-  \item{var.arg}{ Logical: return the pointwise variances 
+  \item{var.arg}{ Logical: return the pointwise variances
 of the fit?
 Currently, this corresponds only to the nonlinear part of the
 fit, and may be wrong.
 
 
 }
-  \item{scale.w}{ 
+  \item{scale.w}{
 Logical.
 By default, the weights \code{w} are scaled so that the
 diagonal elements have mean 1.
@@ -133,7 +133,7 @@ See \code{\link[stats]{smooth.spline}}.
 }
 }
 \details{
-  The algorithm implemented is detailed in Yee (2000). 
+  The algorithm implemented is detailed in Yee (2000).
   It involves decomposing the component functions into a linear and
   nonlinear part, and using B-splines.
   The cost of the computation is \code{O(n M^3)}.
@@ -189,12 +189,13 @@ Heidelberg: Physica-Verlag.
 
 
 \seealso{
-\code{vsmooth.spline-class},
-\code{plot.vsmooth.spline},
-\code{predict.vsmooth.spline},
-\code{iam},
-\code{\link[VGAM]{s}},
-\code{\link[stats]{smooth.spline}}.
+  \code{vsmooth.spline-class},
+  \code{plot.vsmooth.spline},
+  \code{predict.vsmooth.spline},
+  \code{iam},
+  \code{\link{sm.os}},
+  \code{\link[VGAM]{s}},
+  \code{\link[stats]{smooth.spline}}.
 
 
 }
diff --git a/man/waitakere.Rd b/man/waitakere.Rd
index e99fe19..0ba6052 100644
--- a/man/waitakere.Rd
+++ b/man/waitakere.Rd
@@ -4,7 +4,7 @@
 \title{Waitakere Ranges Data}
 \description{
   The \code{waitakere} data frame has 579 rows and 18 columns.
-  Altitude is explanatory, and there are binary responses 
+  Altitude is explanatory, and there are binary responses
   (presence/absence = 1/0 respectively) for 17 plant species.
 
 }
@@ -35,17 +35,17 @@
 \details{
   These were collected from the Waitakere Ranges, a small forest in northern
   Auckland, New Zealand. At 579 sites in the forest, the presence/absence
-  of 17 plant species was recorded, as well as the altitude. 
+  of 17 plant species was recorded, as well as the altitude.
   Each site was of area size 200\eqn{m^2}{m^2}.
 
 
 }
 \source{
-  Dr Neil Mitchell, University of Auckland. 
+  Dr Neil Mitchell, University of Auckland.
 
 }
 %\references{
-%None. 
+%None.
 %}
 \seealso{
     \code{\link{hunua}}.
diff --git a/man/waldff.Rd b/man/waldff.Rd
index cc9aa1e..b1556a8 100644
--- a/man/waldff.Rd
+++ b/man/waldff.Rd
@@ -9,19 +9,12 @@ by maximum likelihood estimation.
 
 }
 \usage{
-waldff(link.lambda = "loge", init.lambda = NULL)
+waldff(llambda = "loge", ilambda = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{link.lambda}{
-  Parameter link function for the \eqn{\lambda}{lambda} parameter. 
-  See \code{\link{Links}} for more choices and general information.
-
-
-  }
-  \item{init.lambda}{
-  Initial value for the \eqn{\lambda}{lambda} parameter.
-  The default means an initial value is chosen internally.
+  \item{llambda,ilambda}{
+  See \code{\link{CommonVGAMffArguments}} for information.
 
 
   }
@@ -50,7 +43,7 @@ waldff(link.lambda = "loge", init.lambda = NULL)
 
 
 }
-\references{ 
+\references{
 
 Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
 \emph{Continuous Univariate Distributions},
@@ -69,7 +62,7 @@ New York: Wiley.
 }
 
 
-\seealso{ 
+\seealso{
   \code{\link{inv.gaussianff}},
   \code{\link{rinv.gaussian}}.
 
@@ -77,10 +70,10 @@ New York: Wiley.
 }
 \examples{
 wdata <- data.frame(y = rinv.gaussian(n = 1000, mu =  1, lambda = exp(1)))
-fit <- vglm(y ~ 1, waldff(init = 0.2), data = wdata, trace = TRUE)
-coef(fit, matrix = TRUE)
-Coef(fit)
-summary(fit)
+wfit <- vglm(y ~ 1, waldff(ilambda = 0.2), data = wdata, trace = TRUE)
+coef(wfit, matrix = TRUE)
+Coef(wfit)
+summary(wfit)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/weibull.mean.Rd b/man/weibull.mean.Rd
index 95a33e5..ee4b407 100644
--- a/man/weibull.mean.Rd
+++ b/man/weibull.mean.Rd
@@ -19,7 +19,7 @@ weibull.mean(lmean = "loge", lshape = "loge", imean = NULL,
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{lmean, lshape}{
-  Parameter link functions applied to the 
+  Parameter link functions applied to the
   (positive) mean parameter (called \eqn{mu} below) and
   (positive) shape parameter (called \eqn{a} below).
   See \code{\link{Links}} for more choices.
@@ -47,7 +47,7 @@ weibull.mean(lmean = "loge", lshape = "loge", imean = NULL,
   also having a default \code{\link{loge}} link.
 
 
-  This \pkg{VGAM} family function currently does not handle 
+  This \pkg{VGAM} family function currently does not handle
   censored data.
   Fisher scoring is used to estimate the two parameters.
   Although the expected information matrices used here are valid
diff --git a/man/weibullR.Rd b/man/weibullR.Rd
index eee48b6..0202a9d 100644
--- a/man/weibullR.Rd
+++ b/man/weibullR.Rd
@@ -18,7 +18,7 @@ weibullR(lscale = "loge", lshape = "loge",
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{lshape, lscale}{
-  Parameter link functions applied to the 
+  Parameter link functions applied to the
   (positive) shape parameter (called \eqn{a} below) and
   (positive) scale parameter (called \eqn{b} below).
   See \code{\link{Links}} for more choices.
@@ -53,11 +53,11 @@ weibullR(lscale = "loge", lshape = "loge",
   }
 }
 \details{
-  The Weibull density for a response \eqn{Y} is 
+  The Weibull density for a response \eqn{Y} is
   \deqn{f(y;a,b) = a y^{a-1} \exp[-(y/b)^a] / (b^a)}{%
         f(y;a,b) = a y^(a-1) * exp(-(y/b)^a) / [b^a]}
   for \eqn{a > 0}, \eqn{b > 0}, \eqn{y > 0}.
-  The cumulative distribution function is 
+  The cumulative distribution function is
   \deqn{F(y;a,b) = 1 - \exp[-(y/b)^a].}{%
         F(y;a,b) = 1 - exp(-(y/b)^a).}
   The mean of \eqn{Y} is \eqn{b \, \Gamma(1+ 1/a)}{b * gamma(1+ 1/a)}
@@ -70,7 +70,7 @@ weibullR(lscale = "loge", lshape = "loge",
   The hazard function is \eqn{a t^{a-1} / b^a}{a * t^(a-1) / b^a}.
 
 
-  This \pkg{VGAM} family function currently does not handle 
+  This \pkg{VGAM} family function currently does not handle
   censored data.
   Fisher scoring is used to estimate the two parameters.
   Although the expected information matrices used here are valid
@@ -198,7 +198,7 @@ Concerns about Maximum Likelihood Estimation for
   This function is under development to handle other censoring situations.
   The version of this function which will handle censored data will be
   called \code{cenweibull()}. It is currently being written and will use
-  \code{\link{SurvS4}} as input. 
+  \code{\link{SurvS4}} as input.
   It should be released in later versions of \pkg{VGAM}.
 
 
diff --git a/man/weightsvglm.Rd b/man/weightsvglm.Rd
index a81f378..927bc8a 100644
--- a/man/weightsvglm.Rd
+++ b/man/weightsvglm.Rd
@@ -121,7 +121,7 @@ weightsvglm(object, type = c("prior", "working"),
 pneumo <- transform(pneumo, let = log(exposure.time))
 (fit <- vglm(cbind(normal, mild, severe) ~ let,
              cumulative(parallel = TRUE, reverse = TRUE), data = pneumo))
-depvar(fit)  # These are sample proportions 
+depvar(fit)  # These are sample proportions
 weights(fit, type = "prior", matrix = FALSE)  # Number of observations
 
 # Look at the working residuals
@@ -131,7 +131,7 @@ M <- ncol(predict(fit))
 wwt <- weights(fit, type = "working", deriv = TRUE)  # In matrix-band format
 wz <- m2a(wwt$weights, M = M)  # In array format
 wzinv <- array(apply(wz, 3, solve), c(M, M, nn))
-wresid <- matrix(NA, nn, M)  # Working residuals 
+wresid <- matrix(NA, nn, M)  # Working residuals
 for (ii in 1:nn)
   wresid[ii, ] <- wzinv[, , ii, drop = TRUE] \%*\% wwt$deriv[ii, ]
 max(abs(c(resid(fit, type = "work")) - c(wresid)))  # Should be 0
diff --git a/man/wine.Rd b/man/wine.Rd
index 65af8f7..e19f863 100644
--- a/man/wine.Rd
+++ b/man/wine.Rd
@@ -66,14 +66,14 @@ Christensen, R. H. B. (2013)
 Analysis of ordinal data with cumulative link models---estimation
 with the R-package \pkg{ordinal}.
 R Package Version 2013.9-30.
-\url{http://cran.r-project.org/package=ordinal}.
-%\url{http://www.r-project.org/package=ordinal}.
-%\url{http://www.cran.r-project.org/package=ordinal}.  % Prior to 20150728
+\url{https://CRAN.R-project.org/package=ordinal}.
+%\url{https://www.R-project.org/package=ordinal}.
+%\url{https://www.CRAN.R-project.org/package=ordinal}.  % Prior to 20150728
 
 
 
 
-  Randall, J. H. (1989) 
+  Randall, J. H. (1989)
   The analysis of sensory data by generalized linear model.
   \emph{Biometrical Journal} \bold{31}(7), 781--793.
 
diff --git a/man/yeo.johnson.Rd b/man/yeo.johnson.Rd
index 879319e..f1952a2 100644
--- a/man/yeo.johnson.Rd
+++ b/man/yeo.johnson.Rd
@@ -4,16 +4,16 @@
 \title{Yeo-Johnson Transformation}
 \description{
   Computes the Yeo-Johnson transformation, which is a
-  normalizing transformation. 
+  normalizing transformation.
 }
 \usage{
-yeo.johnson(y, lambda, derivative = 0, 
+yeo.johnson(y, lambda, derivative = 0,
             epsilon = sqrt(.Machine$double.eps), inverse = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{y}{Numeric, a vector or matrix. }
-  \item{lambda}{Numeric. It is recycled to the same length as 
+  \item{lambda}{Numeric. It is recycled to the same length as
    \code{y} if necessary. }
   \item{derivative}{Non-negative integer. The default is
    the ordinary function evaluation, otherwise the derivative
@@ -27,7 +27,7 @@ yeo.johnson(y, lambda, derivative = 0,
 }
 \details{
   The Yeo-Johnson transformation can be thought of as an extension
-  of the Box-Cox transformation. It handles both positive and 
+  of the Box-Cox transformation. It handles both positive and
   negative values, whereas the Box-Cox transformation only handles
   positive values. Both can be used to transform the data so
   as to improve normality. They can be used to perform LMS
diff --git a/man/yip88.Rd b/man/yip88.Rd
index 4935062..f80da8c 100644
--- a/man/yip88.Rd
+++ b/man/yip88.Rd
@@ -3,7 +3,7 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Zero-Inflated Poisson Distribution (Yip (1988) algorithm) }
 \description{
-  Fits a zero-inflated Poisson distribution based on Yip (1988). 
+  Fits a zero-inflated Poisson distribution based on Yip (1988).
 
 }
 \usage{
@@ -16,13 +16,13 @@ yip88(link = "loge", n.arg = NULL, imethod = 1)
   See \code{\link{Links}} for more choices.
 
   }
-  \item{n.arg}{ 
+  \item{n.arg}{
    The total number of observations in the data set. Needed when the
    response variable has all the zeros deleted from it, so that the
    number of zeros can be determined.
 
   }
-  \item{imethod}{ 
+  \item{imethod}{
    Details at \code{\link{CommonVGAMffArguments}}.
 
 
@@ -37,7 +37,7 @@ yip88(link = "loge", n.arg = NULL, imethod = 1)
 
   The probability function of \eqn{Y} is 0 with probability
   \eqn{\phi}{phi}, and
-  Poisson(\eqn{\lambda}{lambda}) with probability \eqn{1-\phi}{1-phi}. 
+  Poisson(\eqn{\lambda}{lambda}) with probability \eqn{1-\phi}{1-phi}.
   Thus
   \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{%
         P(Y=0) = phi + (1-phi) * P(W=0)}
@@ -59,7 +59,7 @@ yip88(link = "loge", n.arg = NULL, imethod = 1)
 
 
 }
-\references{ 
+\references{
 
 
 Yip, P. (1988)
@@ -108,7 +108,7 @@ model.
 
 
 }
-\seealso{ 
+\seealso{
   \code{\link{zipoisson}},
   \code{\link{Zipois}},
   \code{\link{zapoisson}},
@@ -128,16 +128,16 @@ table(y)
 fit1 <- vglm(y ~ 1, yip88(n = length(y)), subset = y > 0)
 fit2 <- vglm(y ~ 1, yip88, trace = TRUE, crit = "coef")
 (true.mean <- (1-phi) * lambda)
-mean(y) 
+mean(y)
 head(fitted(fit1))
 fit1 at misc$pstr0  # The estimate of phi
 
-# Compare the ZIP with the positive Poisson distribution 
+# Compare the ZIP with the positive Poisson distribution
 pp <- vglm(y ~ 1, pospoisson, subset = y > 0, crit = "c")
 coef(pp)
 Coef(pp)
-coef(fit1) - coef(pp)            # Same 
-head(fitted(fit1) - fitted(pp))  # Different 
+coef(fit1) - coef(pp)            # Same
+head(fitted(fit1) - fitted(pp))  # Different
 
 # Another example (Angers and Biswas, 2003) ---------------------
 abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
diff --git a/man/yulesimon.Rd b/man/yulesimon.Rd
index d6703ee..d2fe878 100644
--- a/man/yulesimon.Rd
+++ b/man/yulesimon.Rd
@@ -3,22 +3,24 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Yule-Simon Family Function }
 \description{
-  Estimating the parameter of the Yule-Simon distribution.
+  Estimating the shape parameter of the Yule-Simon distribution.
 
 }
 \usage{
-yulesimon(link = "loge", irho = NULL, nsimEIM = 200, zero = NULL)
+yulesimon(lshape = "loge", ishape = NULL, nsimEIM = 200, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{link}{
-  Link function for the \eqn{\rho}{rho} parameter.
+  \item{lshape}{
+  Link function for the shape parameter,
+  called
+  \eqn{\rho}{rho} below.
   See \code{\link{Links}} for more choices and for general information.
 
 
   }
-  \item{irho}{
-  Optional initial value for the (positive) parameter. 
+  \item{ishape}{
+  Optional initial value for the (positive) parameter.
   See \code{\link{CommonVGAMffArguments}} for more information.
   The default is to obtain an initial value internally. Use this argument
   if the default fails.
@@ -33,7 +35,7 @@ yulesimon(link = "loge", irho = NULL, nsimEIM = 200, zero = NULL)
 }
 \details{
     The probability function is
-    \deqn{f(y;\rho) = rho*beta(y,rho+1),}{%
+    \deqn{f(y;\rho) = \rho*beta(y,\rho+1),}{%
           f(y;rho) = rho*beta(y,rho+1),}
     where the parameter \eqn{\rho>0}{rho>0},
     \eqn{beta} is the \code{\link[base]{beta}} function,
@@ -62,7 +64,7 @@ yulesimon(link = "loge", irho = NULL, nsimEIM = 200, zero = NULL)
 
 
 }
-\references{ 
+\references{
 
     Simon, H. A. (1955)
     On a class of skew distribution functions.
@@ -73,7 +75,7 @@ yulesimon(link = "loge", irho = NULL, nsimEIM = 200, zero = NULL)
 
 }
 \author{ T. W. Yee }
-%\note{ 
+%\note{
 %}
 
 \seealso{
@@ -85,7 +87,7 @@ yulesimon(link = "loge", irho = NULL, nsimEIM = 200, zero = NULL)
 }
 \examples{
 ydata <- data.frame(x2 = runif(nn <- 1000))
-ydata <- transform(ydata, y = ryules(nn, rho = exp(1.5 - x2)))
+ydata <- transform(ydata, y = ryules(nn, shape = exp(1.5 - x2)))
 with(ydata, table(y))
 fit <- vglm(y ~ x2, yulesimon, data = ydata, trace = TRUE)
 coef(fit, matrix = TRUE)
@@ -106,7 +108,7 @@ summary(fit)
 %cs = cumsum(probs)
 %tab = table(cut(runif(nn), brea = c(0,cs,1)))
 %index = (1:length(tab))[tab>0]
-%y = rep(index, times=tab[index]) 
+%y = rep(index, times=tab[index])
 
 
 
diff --git a/man/yulesimonUC.Rd b/man/yulesimonUC.Rd
index 755897c..a46084c 100644
--- a/man/yulesimonUC.Rd
+++ b/man/yulesimonUC.Rd
@@ -2,7 +2,7 @@
 \alias{Yules}
 \alias{dyules}
 \alias{pyules}
-%\alias{qyules}
+\alias{qyules}
 \alias{ryules}
 \title{ Yule-Simon Distribution }
 \description{
@@ -11,29 +11,30 @@
 
 }
 \usage{
-dyules(x, rho, log = FALSE)
-pyules(q, rho, log.p = FALSE)
-ryules(n, rho)
+dyules(x, shape, log = FALSE)
+pyules(q, shape, lower.tail = TRUE, log.p = FALSE)
+qyules(p, shape)
+ryules(n, shape)
 }
-%qyules(p, rho)
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{x, q}{
-   Vector of quantiles. For the density, it should be a vector
-   with positive integer values in order for the probabilities
-   to be positive.
+  \item{x, q, p, n}{
+  Same meaning as in \code{\link[stats]{Normal}}.
+%  Vector of quantiles. For the density, it should be a vector
+%  with positive integer values in order for the probabilities
+%  to be positive.
 
 
   }
 % \item{p}{vector of probabilities.}
-  \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. }
-  \item{rho}{ 
+% \item{n}{number of observations. Same as in \code{\link[stats]{runif}}. }
+  \item{shape}{
    See \code{\link{yulesimon}}.
 
 
   }
-  \item{log}{logical; if TRUE, the logarithm is returned. }
-  \item{log.p}{
+% \item{log}{logical; if TRUE, the logarithm is returned. }
+  \item{log, lower.tail, log.p}{
   Same meaning as in \code{\link[stats:Normal]{pnorm}}
   or \code{\link[stats:Normal]{qnorm}}.
 
@@ -49,24 +50,30 @@ ryules(n, rho)
 }
 \value{
   \code{dyules} gives the density,
-  \code{pyules} gives the distribution function, and
+  \code{pyules} gives the distribution function,
+  \code{qyules} gives the quantile function, and
   \code{ryules} generates random deviates.
 
 
-% \code{qyules} gives the quantile function, and
 
 
 }
-%\references{ 
+%\references{
 %
 %}
 \author{ T. W. Yee }
-%\note{ 
-%}
+\note{
+  Numerical problems may occur with
+  \code{qyules()} when \code{p} is very close to 1.
+
+
+
+}
 
 \seealso{
   \code{\link{yulesimon}}.
 
+
 }
 \examples{
 dyules(1:20, 2.1)
@@ -76,6 +83,6 @@ round(1000 * dyules(1:8, 2))
 table(ryules(1000, 2))
 
 \dontrun{ x <- 0:6
-plot(x, dyules(x, rho = 2.2), type = "h", las = 1, col = "blue") }
+plot(x, dyules(x, shape = 2.2), type = "h", las = 1, col = "blue") }
 }
 \keyword{distribution}
diff --git a/man/zabinomUC.Rd b/man/zabinomUC.Rd
index abbee13..b619360 100644
--- a/man/zabinomUC.Rd
+++ b/man/zabinomUC.Rd
@@ -21,7 +21,7 @@ rzabinom(n, size, prob, pobs0 = 0)
 \arguments{
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
-  \item{n}{number of observations. 
+  \item{n}{number of observations.
   If \code{length(n) > 1} then the length is taken to be the number required.
   }
   \item{size, prob, log}{
@@ -54,14 +54,14 @@ rzabinom(n, size, prob, pobs0 = 0)
 }
 %\references{ }
 \author{ T. W. Yee }
-\note{ 
+\note{
     The argument \code{pobs0} is recycled to the required length, and
     must have values which lie in the interval \eqn{[0,1]}.
 
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{zibinomial}},
     \code{\link{rposbinom}}.
 
diff --git a/man/zabinomial.Rd b/man/zabinomial.Rd
index 1cddeb0..cf0d4ed 100644
--- a/man/zabinomial.Rd
+++ b/man/zabinomial.Rd
@@ -19,13 +19,13 @@ zabinomialff(lprob = "logit", lonempobs0 = "logit",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lprob}{ 
+  \item{lprob}{
     Parameter link function applied to the probability parameter
     of the binomial distribution.
     See \code{\link{Links}} for more choices.
 
   }
-  \item{lpobs0}{ 
+  \item{lpobs0}{
     Link function for the parameter \eqn{p_0}{pobs0}, called \code{pobs0} here.
     See \code{\link{Links}} for more choices.
 
@@ -38,7 +38,7 @@ zabinomialff(lprob = "logit", lonempobs0 = "logit",
 
   }
 
-  \item{iprob, ipobs0}{ 
+  \item{iprob, ipobs0}{
   See
   \code{\link{CommonVGAMffArguments}}.
 
@@ -70,7 +70,7 @@ zabinomialff(lprob = "logit", lonempobs0 = "logit",
   has zeros coming from one source, whereas the latter
   has zeros coming from the binomial distribution too. The
   zero-inflated binomial distribution is implemented in
-  \code{\link{zibinomial}}. 
+  \code{\link{zibinomial}}.
   Some people call the zero-altered binomial a \emph{hurdle} model.
 
 
@@ -107,7 +107,7 @@ zabinomialff(lprob = "logit", lonempobs0 = "logit",
 
   The \code{fitted.values} slot of the fitted object,
   which should be extracted by the generic function \code{fitted}, returns
-  the mean \eqn{\mu}{mu} (default) which is given by 
+  the mean \eqn{\mu}{mu} (default) which is given by
   \deqn{\mu = (1-p_0) \mu_{b} / [1 - (1 - \mu_{b})^N]}{%
          mu = (1-pobs0) * mub / [1 - (1 - mub)^N]}
   where \eqn{\mu_{b}}{mub} is the usual binomial mean.
diff --git a/man/zageomUC.Rd b/man/zageomUC.Rd
index e21e100..9e3e832 100644
--- a/man/zageomUC.Rd
+++ b/man/zageomUC.Rd
@@ -21,7 +21,7 @@ rzageom(n, prob, pobs0 = 0)
 \arguments{
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
-  \item{n}{number of observations. 
+  \item{n}{number of observations.
   If \code{length(n) > 1} then the length is taken to be the number required.
   }
   \item{prob, log}{
@@ -52,13 +52,13 @@ rzageom(n, prob, pobs0 = 0)
 }
 %\references{ }
 \author{ T. W. Yee }
-\note{ 
+\note{
     The argument \code{pobs0} is recycled to the required length, and
     must have values which lie in the interval \eqn{[0,1]}.
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{zageometric}},
     \code{\link{zigeometric}},
     \code{\link{rposgeom}}.
diff --git a/man/zageometric.Rd b/man/zageometric.Rd
index f43ded2..c185907 100644
--- a/man/zageometric.Rd
+++ b/man/zageometric.Rd
@@ -20,13 +20,13 @@ zageometricff(lprob = "logit", lonempobs0 = "logit",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lpobs0}{ 
+  \item{lpobs0}{
     Link function for the parameter \eqn{p_0}{pobs0} or \eqn{\phi}{phi},
     called \code{pobs0} or \code{phi} here.
     See \code{\link{Links}} for more choices.
 
   }
-  \item{lprob}{ 
+  \item{lprob}{
     Parameter link function applied to the probability of success,
     called \code{prob}
     or \eqn{p}.
@@ -48,7 +48,7 @@ zageometricff(lprob = "logit", lonempobs0 = "logit",
 %           epobs0 = list(), eprob = list(),
 % }
 
-  \item{ipobs0, iprob}{ 
+  \item{ipobs0, iprob}{
     Optional initial values for the parameters.
     If given, they must be in range.
     For multi-column responses, these are recycled sideways.
@@ -85,7 +85,7 @@ zageometricff(lprob = "logit", lonempobs0 = "logit",
 
   The input can be a matrix (multiple responses).
   By default, the two linear/additive predictors
-  of \code{zageometric} 
+  of \code{zageometric}
   are \eqn{(logit(\phi), logit(p))^T}{(logit(phi), logit(prob))^T}.
 
 
@@ -115,7 +115,7 @@ zageometricff(lprob = "logit", lonempobs0 = "logit",
 
   The \code{fitted.values} slot of the fitted object,
   which should be extracted by the generic function \code{fitted}, returns
-  the mean \eqn{\mu}{mu} (default) which is given by 
+  the mean \eqn{\mu}{mu} (default) which is given by
   \deqn{\mu = (1-\phi) / p.}{%
          mu = (1- phi) / p.}
   If \code{type.fitted = "pobs0"} then \eqn{p_0}{pobs0} is returned.
diff --git a/man/zanegbinUC.Rd b/man/zanegbinUC.Rd
index 8ce6b38..cbd5a3c 100644
--- a/man/zanegbinUC.Rd
+++ b/man/zanegbinUC.Rd
@@ -21,7 +21,7 @@ rzanegbin(n, size, prob = NULL, munb = NULL, pobs0 = 0)
 \arguments{
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
-  \item{n}{number of observations. 
+  \item{n}{number of observations.
   If \code{length(n) > 1} then the length is taken to be the number required. }
   \item{size, prob, munb, log}{
   Parameters from the ordinary negative binomial distribution
@@ -54,14 +54,14 @@ rzanegbin(n, size, prob = NULL, munb = NULL, pobs0 = 0)
 }
 %\references{ }
 \author{ T. W. Yee }
-\note{ 
+\note{
     The argument \code{pobs0} is recycled to the required length, and
     must have values which lie in the interval \eqn{[0,1]}.
 
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{zanegbinomial}},
     \code{\link{rposnegbin}}.
 
@@ -79,7 +79,7 @@ barplot(rbind(dzanegbin(x, munb = munb, size = size, pobs0 = pobs0),
         beside = TRUE, col = c("blue", "green"), cex.main = 0.7, las = 1,
         ylab = "Probability", names.arg = as.character(x),
         main = paste("ZANB(munb = ", munb, ", size = ", size,",
-                     pobs0 = ", pobs0, 
+                     pobs0 = ", pobs0,
                    ") [blue] vs",  " NB(mu = ", munb, ", size = ", size,
                    ") [green] densities", sep = "")) }
 }
diff --git a/man/zanegbinomial.Rd b/man/zanegbinomial.Rd
index 0ef7f43..fc4a037 100644
--- a/man/zanegbinomial.Rd
+++ b/man/zanegbinomial.Rd
@@ -28,18 +28,18 @@ zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lpobs0}{ 
+  \item{lpobs0}{
     Link function for the parameter \eqn{p_0}{pobs0}, called \code{pobs0} here.
     See \code{\link{Links}} for more choices.
 
   }
-  \item{lmunb}{ 
+  \item{lmunb}{
     Link function applied to the \code{munb} parameter, which is the mean
     \eqn{\mu_{nb}}{munb} of an ordinary negative binomial distribution.
     See \code{\link{Links}} for more choices.
 
   }
-  \item{lsize}{ 
+  \item{lsize}{
     Parameter link function applied to the reciprocal of the dispersion
     parameter, called \code{k}. That is, as \code{k} increases, the
     variance of the response decreases.
@@ -71,7 +71,7 @@ zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
 %             epobs0 = list(),  emunb = list(), esize = list(),
 % }
 
-  \item{ipobs0, imunb, isize}{ 
+  \item{ipobs0, imunb, isize}{
     Optional initial values for \eqn{p_0}{pobs0} and \code{munb}
     and \code{k}.
     If given then it is okay to give one value
@@ -80,7 +80,7 @@ zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
 
 
   }
-  \item{zero}{ 
+  \item{zero}{
 %   Integer valued vector, may be assigned, e.g., \eqn{-3} or \eqn{3} if
 %   the probability of an observed value is to be modelled with the
 %   covariates.
@@ -180,7 +180,7 @@ zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
 
   The \code{fitted.values} slot of the fitted object,
   which should be extracted by the generic function \code{fitted}, returns
-  the mean \eqn{\mu}{mu} (default) which is given by 
+  the mean \eqn{\mu}{mu} (default) which is given by
   \deqn{\mu = (1-p_0) \mu_{nb} / [1 - (k/(k+\mu_{nb}))^k].}{%
          mu = (1-pobs0) * munb / [1 - (k/(k+munb))^k].}
   If \code{type.fitted = "pobs0"} then \eqn{p_0}{pobs0} is returned.
diff --git a/man/zapoisUC.Rd b/man/zapoisUC.Rd
index b128098..b3c531f 100644
--- a/man/zapoisUC.Rd
+++ b/man/zapoisUC.Rd
@@ -49,13 +49,13 @@ rzapois(n, lambda, pobs0 = 0)
 }
 %\references{ }
 \author{ T. W. Yee }
-\note{ 
+\note{
     The argument \code{pobs0} is recycled to the required length, and
     must have values which lie in the interval \eqn{[0,1]}.
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{zapoisson}},
     \code{\link{dzipois}}.
 
diff --git a/man/zapoisson.Rd b/man/zapoisson.Rd
index caee709..cfea497 100644
--- a/man/zapoisson.Rd
+++ b/man/zapoisson.Rd
@@ -122,7 +122,7 @@ zapoissonff(llambda = "loge", lonempobs0 = "logit", type.fitted =
 
   The \code{fitted.values} slot of the fitted object,
   which should be extracted by the generic function \code{fitted},
-  returns the mean \eqn{\mu}{mu} (default) which is given by 
+  returns the mean \eqn{\mu}{mu} (default) which is given by
   \deqn{\mu = (1-p_0)  \lambda / [1 - \exp(-\lambda)].}{%
          mu = (1-pobs0) * lambda / [1 - exp(-lambda)].}
   If \code{type.fitted = "pobs0"} then \eqn{p_0}{pobs0} is returned.
diff --git a/man/zero.Rd b/man/zero.Rd
index 78412f1..77238da 100644
--- a/man/zero.Rd
+++ b/man/zero.Rd
@@ -14,7 +14,7 @@
 \value{
   Nothing is returned.
   It is simply a convenient argument for constraining
-  certain linear/additive predictors to be an intercept only. 
+  certain linear/additive predictors to be an intercept only.
 
 
 
@@ -27,7 +27,7 @@
   In general users
   should not assume any particular order of precedence when
   there is potential conflict of definition.
-  Currently no checking for consistency is made. 
+  Currently no checking for consistency is made.
 
 
   The argument \code{zero} may be renamed in the future to
diff --git a/man/zeta.Rd b/man/zeta.Rd
index b9a2f58..423ca04 100644
--- a/man/zeta.Rd
+++ b/man/zeta.Rd
@@ -4,27 +4,36 @@
 \title{ Riemann's Zeta Function }
 \description{
   Computes Riemann's zeta function and its first two derivatives.
+  Also can computes Hurwitz's zeta function.
 
 }
 \usage{
-zeta(x, deriv = 0)
-
+zeta(x, deriv = 0, shift = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{x}{ 
+  \item{x}{
    A complex-valued vector/matrix whose real values must be
    \eqn{\geq 1}{>= 1}. Otherwise, if \code{x} may be real.
    If \code{deriv} is 1 or 2 then \code{x} must be real and positive.
 
   }
-  \item{deriv}{ 
+  \item{deriv}{
   An integer equalling 0 or 1 or 2, which is the order of the derivative.
   The default means it is computed ordinarily.
 
   }
+  \item{shift}{
+  Positive and numeric, called \eqn{A} below.
+  Allows for the Hurwitz zeta to be returned.
+  The default corresponds to the Riemann formula.
+
+  }
 }
 \details{
+  The (Riemann) formula for real \eqn{s} is
+  \deqn{\sum_{n=1}^{\infty} 1 / n^s.}{%
+         sum_{n=1}^Inf      1 / n^s.}
   While the usual definition involves an infinite series that
   converges when the real part of the argument is \eqn{> 1},
   more efficient methods have been devised to compute the
@@ -33,13 +42,24 @@ zeta(x, deriv = 0)
   over the whole complex plane because of analytic continuation.
 
 
-  The formula used here for analytic continuation is
+
+  The (Riemann) formula used here for analytic continuation is
   \deqn{\zeta(s) = 2^s \pi^{s-1} \sin(\pi s/2) \Gamma(1-s) \zeta(1-s).}{%
         zeta(s)  = 2^s * pi^(s-1) * sin(pi*s/2) * gamma(1-s) *  zeta(1-s).}
   This is actually one of several formulas, but this one was discovered
   by Riemann himself and is called the \emph{functional equation}.
 
 
+
+  The Hurwitz zeta function for real \eqn{0 < s} is
+  \deqn{\sum_{n=0}^{\infty} 1 / (A + n)^s.}{%
+         sum_{n=0}^Inf      1 / (A + n)^s.}
+  where \eqn{0 < A} is known here as the \code{shift}.
+  Since \eqn{A=1} by default, this function will therefore return
+  Riemann's zeta function by default.
+  Currently derivatives are unavailable.
+
+
 }
 
 \section{Warning}{
@@ -50,17 +70,22 @@ zeta(x, deriv = 0)
   arguments.
 
 
+
 }
 
 \value{
-  A vector/matrix of computed values.
+  The default is a vector/matrix of computed values of Riemann's zeta
+  function.
+  If \code{shift} contains values not equal to 1, then this is
+  Hurwitz's zeta function.
+
 
 
 %  The derivative is attached as an attribute zz.
 
 
 }
-\references{ 
+\references{
 
 
 Riemann, B. (1859)
@@ -70,7 +95,7 @@ Ueber die Anzahl der Primzahlen unter einer gegebenen Grosse.
 
 Edwards, H. M. (1974)
 \emph{Riemann's Zeta Function}.
-Academic Press: New York. 
+Academic Press: New York.
 
 
 Markman, B. (1965)
@@ -88,19 +113,24 @@ New York: Dover Publications Inc.
 
 }
 \author{ T. W. Yee, with the help of Garry J. Tee. }
-\note{ 
+\note{
   Estimation of the parameter of the zeta distribution can
   be achieved with \code{\link{zetaff}}.
 
 
+
 }
 
-\seealso{ 
+\seealso{
   \code{\link{zetaff}},
+  \code{\link{oazeta}},
+  \code{\link{oizeta}},
+  \code{\link{otzeta}},
   \code{\link{lerch}},
   \code{\link[base:Special]{gamma}}.
 
 
+
 }
 \examples{
 zeta(2:10)
@@ -111,9 +141,8 @@ curve(zeta, -13, 0.8, xlim = c(-12, 10), ylim = c(-1, 4), col = "orange",
 curve(zeta, 1.2,  12, add = TRUE, col = "orange")
 abline(v = 0, h = c(0, 1), lty = "dashed", col = "gray")
 
-# Close up plot:
 curve(zeta, -14, -0.4, col = "orange", main = expression({zeta}(x)))
-abline(v = 0, h = 0, lty = "dashed", col = "gray")
+abline(v = 0, h = 0, lty = "dashed", col = "gray")  # Close up plot
 
 x <- seq(0.04, 0.8, len = 100)  # Plot of the first derivative
 plot(x, zeta(x, deriv = 1), type = "l", las = 1, col = "blue",
@@ -122,8 +151,8 @@ x <- seq(1.2, 3, len = 100)
 lines(x, zeta(x, deriv = 1), col = "blue")
 abline(v = 0, h = 0, lty = "dashed", col = "gray") }
 
-zeta(2) - pi^2 / 6     # Should be zero
-zeta(4) - pi^4 / 90    # Should be zero
+zeta(2) - pi^2 / 6     # Should be 0
+zeta(4) - pi^4 / 90    # Should be 0
 zeta(6) - pi^6 / 945   # Should be 0
 zeta(8) - pi^8 / 9450  # Should be 0
 # zeta(0, deriv = 1) + 0.5 * log(2*pi)  # Should be 0
diff --git a/man/zetaUC.Rd b/man/zetaUC.Rd
index 25d694e..922c7e0 100644
--- a/man/zetaUC.Rd
+++ b/man/zetaUC.Rd
@@ -1,37 +1,48 @@
 \name{Zeta}
 \alias{Zeta}
 \alias{dzeta}
+\alias{pzeta}
+\alias{qzeta}
+\alias{rzeta}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{The Zeta Distribution }
 \description{
-  Density for the zeta distribution.
+  Density, distribution function, quantile function and random generation
+  for the zeta distribution.
 
 }
-% zz p is not a good argument name, esp. with qzeta(p, p)
 \usage{
-dzeta(x, p, log = FALSE)
+dzeta(x, shape, log = FALSE)
+pzeta(q, shape, lower.tail = TRUE)
+qzeta(p, shape)
+rzeta(n, shape)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{x}{ Numerical vector/matrix to evaluate the density. }
-  \item{p}{ The parameter \eqn{p}. This must be positive. }
-  \item{log}{
-  Logical.
-  If \code{log = TRUE} then the logarithm of the density is returned.
+  \item{x, q, p, n}{Same as \code{\link[stats]{Poisson}}. }
+  \item{shape}{ The positive shape parameter \eqn{p}. }
+  \item{lower.tail, log}{
+  Same meaning as in \code{\link[stats]{Normal}}.
 
-  }
 
+  }
 }
 \details{
   The density function of the zeta distribution is given by
-  \deqn{y^{-p-1} / \zeta(p+1)}{%
-        y^(-p-1) /  zeta(p+1)}
-  where \eqn{p>0}, \eqn{y=1,2,\ldots}, and \eqn{\zeta}{zeta} is
+  \deqn{y^{-s-1} / \zeta(s+1)}{%
+        y^(-s-1) /  zeta(s+1)}
+  where \eqn{s>0}, \eqn{y=1,2,\ldots}, and \eqn{\zeta}{zeta} is
   Riemann's zeta function.
 
+
+
 }
 \value{
-  Returns the density evaluated at \code{x}.
+  \code{dzeta} gives the density,
+  \code{pzeta} gives the distribution function,
+  \code{qzeta} gives the quantile function, and
+  \code{rzeta} generates random deviates.
+
 
 }
 \references{
@@ -43,6 +54,7 @@ Johnson N. L., Kotz S., and Balakrishnan N. (1993)
 New York: Wiley.
 
 
+
 % Lindsey, J. K. (2002zz)
 % \emph{Applied Statistical Modelling}, 2nd ed.
 % London: Chapman & Hall.zz
@@ -55,30 +67,42 @@ New York: Wiley.
 }
 \author{ T. W. Yee }
 \note{
+    \code{qzeta()} runs slower and slower as \code{shape} approaches 0
+    and \code{p} approaches 1.
     The \pkg{VGAM} family function \code{\link{zetaff}} estimates the
-    parameter \eqn{p}.
+    shape parameter \eqn{s}.
+
 
 
 }
 
-\section{Warning}{
-    This function has not been fully tested.
+%\section{Warning}{
+%    These functions have not been fully tested.
 
 
-}
+
+%}
 
 \seealso{
     \code{\link{zeta}},
-    \code{\link{zetaff}}.
+    \code{\link{zetaff}},
+    \code{\link{Oazeta}},
+    \code{\link{Oizeta}},
+    \code{\link{Otzeta}}.
 
 
 }
 
 \examples{
-dzeta(1:20, p = 2)
-\dontrun{ plot(1:6, dzeta(1:6, p = 4), type = "h", las = 1, ylab = "Probability",
-     main = "zeta probability function; black: p = 4; blue: p = 2")
-points(0.10 + 1:6, dzeta(1:6, p = 2), type = "h", col = "blue") }
+dzeta(1:20, shape = 2)
+myshape <- 0.5
+max(abs(pzeta(1:200, myshape) -
+    cumsum(1/(1:200)^(1+myshape)) / zeta(myshape+1)))  # Should be 0
+
+\dontrun{ plot(1:6, dzeta(1:6, 2), type = "h", las = 1,
+               col = "orange", ylab = "Probability",
+     main = "zeta probability function; orange: shape = 2; blue: shape = 1")
+points(0.10 + 1:6, dzeta(1:6, 1), type = "h", col = "blue") }
 }
 \keyword{distribution}
 
diff --git a/man/zetaff.Rd b/man/zetaff.Rd
index 92cb7d8..b36b7be 100644
--- a/man/zetaff.Rd
+++ b/man/zetaff.Rd
@@ -4,13 +4,14 @@
 \title{ Zeta Distribution Family Function }
 \description{
   Estimates the parameter of the zeta distribution.
+
 }
 \usage{
-zetaff(link = "loge", ishape = NULL, gshape = exp(-3:4)/4, zero = NULL)
+zetaff(lshape = "loge", ishape = NULL, gshape = exp(-3:4)/4, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{link, ishape, zero}{
+  \item{lshape, ishape, zero}{
   These arguments apply to the (positive) parameter \eqn{p}.
   See \code{\link{Links}} for more choices.
   Choosing \code{\link{loglog}} constrains \eqn{p>1}, but
@@ -26,7 +27,7 @@ zetaff(link = "loge", ishape = NULL, gshape = exp(-3:4)/4, zero = NULL)
   }
 }
 \details{
-In this long tailed distribution 
+In this long tailed distribution
 the response must be a positive integer.
 The probability function for a response \eqn{Y} is
 \deqn{P(Y=y) = 1/[y^{p+1} \zeta(p+1)],\ \ \  p>0,\ \ \  y=1,2,...}{%
@@ -42,14 +43,17 @@ The variance of \eqn{Y} is
 provided \eqn{p>2}.
 
 
+
 It appears that good initial values are needed for successful
 convergence. If convergence is not obtained, try several values
 ranging from values near 0 to values about 10 or more.
 
 
+
 Multiple responses are handled.
 
 
+
 }
 
 \value{
@@ -58,13 +62,14 @@ Multiple responses are handled.
   and \code{\link{vgam}}.
 
 
+
 }
 
 %Lindsey, J. K. (1995)
 %\emph{Modelling Frequency and Count Data}.
-%Oxford: Clarendon Press. 
+%Oxford: Clarendon Press.
 
-\references{ 
+\references{
 
 
 pp.527-- of Chapter 11 of
@@ -75,10 +80,11 @@ pp.527-- of Chapter 11 of
 
 
 Knight, K. (2000)
-\emph{Mathematical Statistics}. 
+\emph{Mathematical Statistics}.
 Boca Raton: Chapman & Hall/CRC Press.
 
 
+
 }
 \author{ T. W. Yee }
 \note{
@@ -88,8 +94,12 @@ Boca Raton: Chapman & Hall/CRC Press.
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{zeta}},
+  \code{\link{oazeta}},
+  \code{\link{oizeta}},
+  \code{\link{otzeta}},
+  \code{\link{diffzeta}},
   \code{\link{dzeta}},
   \code{\link{hzeta}},
   \code{\link{zipf}}.
@@ -98,7 +108,7 @@ Boca Raton: Chapman & Hall/CRC Press.
 }
 \examples{
 zdata <- data.frame(y = 1:5, w =  c(63, 14, 5, 1, 2))  # Knight, p.304
-fit <- vglm(y ~ 1, zetaff, data = zdata, trace = TRUE, weight = w, crit = "coef")
+fit <- vglm(y ~ 1, zetaff, data = zdata, trace = TRUE, weight = w, crit = "c")
 (phat <- Coef(fit))  # 1.682557
 with(zdata, cbind(round(dzeta(y, phat) * sum(w), 1), w))
 
diff --git a/man/zibinomUC.Rd b/man/zibinomUC.Rd
index 1333aa4..7a801db 100644
--- a/man/zibinomUC.Rd
+++ b/man/zibinomUC.Rd
@@ -38,7 +38,7 @@ rzibinom(n, size, prob, pstr0 = 0)
 % \item{log, log.p, lower.tail}{
   \item{log}{
       Same as \code{\link[stats:Binomial]{pbinom}}.}
-  \item{pstr0}{ 
+  \item{pstr0}{
   Probability of a structural zero (i.e., ignoring the binomial distribution),
   called \eqn{\phi}{phi}.
   The default value of \eqn{\phi=0}{phi=0} corresponds to
@@ -49,7 +49,7 @@ rzibinom(n, size, prob, pstr0 = 0)
 \details{
   The probability function of \eqn{Y} is 0 with probability
   \eqn{\phi}{phi}, and \eqn{Binomial(size, prob)}{Binomial(size, prob)} with
-  probability \eqn{1-\phi}{1-phi}. Thus 
+  probability \eqn{1-\phi}{1-phi}. Thus
   \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{%
         P(Y=0) = phi + (1-phi) * P(W=0)}
   where \eqn{W} is
@@ -68,7 +68,7 @@ rzibinom(n, size, prob, pstr0 = 0)
 }
 %\references{ }
 \author{ T. W. Yee }
-\note{ 
+\note{
   The argument \code{pstr0} is recycled to the required length,
   and must have values which lie in the interval \eqn{[0,1]}.
 
@@ -82,7 +82,7 @@ rzibinom(n, size, prob, pstr0 = 0)
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{zibinomial}},
     \code{\link[stats:Binomial]{dbinom}}.
 
diff --git a/man/zibinomial.Rd b/man/zibinomial.Rd
index 0a1da1f..2431c98 100644
--- a/man/zibinomial.Rd
+++ b/man/zibinomial.Rd
@@ -55,7 +55,7 @@ zibinomialff(lprob = "logit", lonempstr0 = "logit",
   }
 
 
-% \item{zero}{ 
+% \item{zero}{
 % An integer specifying which linear/additive predictor is modelled
 % as intercepts only.  If given, the value must be either 1 or 2,
 % and the default is the first. Setting \code{zero = NULL} enables both
@@ -64,7 +64,7 @@ zibinomialff(lprob = "logit", lonempstr0 = "logit",
 % See \code{\link{CommonVGAMffArguments}} for more information.
 
 % }
-  \item{multiple.responses}{ 
+  \item{multiple.responses}{
   Logical. Currently it must be \code{FALSE} to mean the
   function does not handle multiple responses. This
   is to remain compatible with the same argument in
@@ -72,7 +72,7 @@ zibinomialff(lprob = "logit", lonempstr0 = "logit",
 
 
   }
-  \item{zero, imethod}{ 
+  \item{zero, imethod}{
   See \code{\link{CommonVGAMffArguments}} for information.
   Argument \code{zero} changed its default value for version 0.9-2.
 
@@ -83,7 +83,7 @@ zibinomialff(lprob = "logit", lonempstr0 = "logit",
   These functions are based on
   \deqn{P(Y=0) =  \phi + (1-\phi) (1-\mu)^N,}{%
         P(Y=0) =   phi + (1- phi) * (1-prob)^N,}
-  for \eqn{y=0}, and 
+  for \eqn{y=0}, and
   \deqn{P(Y=y) = (1-\phi) {N \choose Ny} \mu^{Ny} (1-\mu)^{N(1-y)}.}{%
         P(Y=y) = (1-phi) * choose(N,Ny) * prob^(N*y) * (1-prob)^(N*(1-y)).}
   for \eqn{y=1/N,2/N,\ldots,1}. That is, the response is a sample
@@ -141,7 +141,7 @@ Fitting and interpreting occupancy models.
 \note{
   The response variable must have one of the formats described by
   \code{\link{binomialff}}, e.g., a factor or two column matrix or a
-  vector of sample proportions with the \code{weights} argument 
+  vector of sample proportions with the \code{weights} argument
   specifying the values of \eqn{N}.
 
 
@@ -165,7 +165,7 @@ Fitting and interpreting occupancy models.
 % response (only \code{multiple.responses = FALSE} can be handled).
 
 % 20130316; adding this:
-  Estimated probabilities of a structural zero and an 
+  Estimated probabilities of a structural zero and an
   observed zero are returned, as in \code{\link{zipoisson}}.
 
 
@@ -189,7 +189,7 @@ Fitting and interpreting occupancy models.
   or \code{imethod}.
 
 
-} 
+}
 
 \seealso{
   \code{\link{rzibinom}},
@@ -205,7 +205,7 @@ nn <- 200
 zdata <- data.frame(pstr0 = logit( 0, inverse = TRUE),  # 0.50
                     mubin = logit(-1, inverse = TRUE),  # Mean of usual binomial
                     sv    = rep(size, length = nn))
-zdata <- transform(zdata, 
+zdata <- transform(zdata,
                    y = rzibinom(nn, size = sv, prob = mubin, pstr0 = pstr0))
 with(zdata, table(y))
 fit <- vglm(cbind(y, sv - y) ~ 1, zibinomialff, data = zdata, trace = TRUE)
diff --git a/man/zigeomUC.Rd b/man/zigeomUC.Rd
index abfbe58..59e1bfb 100644
--- a/man/zigeomUC.Rd
+++ b/man/zigeomUC.Rd
@@ -24,7 +24,7 @@ rzigeom(n, prob, pstr0 = 0)
   \item{p}{vector of probabilities.}
   \item{prob}{see \code{\link[stats]{dgeom}}.}
   \item{n}{ Same as in \code{\link[stats]{runif}}.  }
-  \item{pstr0}{ 
+  \item{pstr0}{
   Probability of structural zero (ignoring the geometric distribution),
   called \eqn{\phi}{phi}. The default value corresponds
   to the response having an ordinary geometric distribution.
@@ -35,7 +35,7 @@ rzigeom(n, prob, pstr0 = 0)
 \details{
   The probability function of \eqn{Y} is 0 with probability
   \eqn{\phi}{phi}, and \eqn{geometric(prob)} with
-  probability \eqn{1-\phi}{1-phi}. Thus 
+  probability \eqn{1-\phi}{1-phi}. Thus
   \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{%
         P(Y=0) = phi + (1-phi) * P(W=0)}
   where \eqn{W} is distributed \eqn{geometric(prob)}.
@@ -52,20 +52,20 @@ rzigeom(n, prob, pstr0 = 0)
 }
 %\references{ }
 \author{ T. W. Yee }
-\note{ 
+\note{
     The argument \code{pstr0} is recycled to the required length, and
     must have values which lie in the interval \eqn{[0,1]}.
 
   These functions actually allow for \emph{zero-deflation}.
   That is, the resulting probability of a zero count
   is \emph{less than} the nominal value of the parent
-  distribution. 
+  distribution.
   See \code{\link{Zipois}} for more information.
 
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{zigeometric}},
     \code{\link[stats]{dgeom}}.
 
diff --git a/man/zigeometric.Rd b/man/zigeometric.Rd
index 842c685..dcc43ba 100644
--- a/man/zigeometric.Rd
+++ b/man/zigeometric.Rd
@@ -20,7 +20,7 @@ zigeometricff(lprob = "logit", lonempstr0 = "logit",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lpstr0, lprob}{ 
+  \item{lpstr0, lprob}{
   Link functions for the parameters
   \eqn{\phi}{phi}
   and
@@ -62,7 +62,7 @@ zigeometricff(lprob = "logit", lonempstr0 = "logit",
 
 
   }
-  \item{zero, imethod}{ 
+  \item{zero, imethod}{
   See \code{\link{CommonVGAMffArguments}} for information.
 
 
@@ -72,7 +72,7 @@ zigeometricff(lprob = "logit", lonempstr0 = "logit",
   Function \code{zigeometric()} is based on
   \deqn{P(Y=0) =  \phi + (1-\phi) p,}{%
         P(Y=0) =  phi + (1-phi) * prob,}
-  for \eqn{y=0}, and 
+  for \eqn{y=0}, and
   \deqn{P(Y=y) = (1-\phi) p (1 - p)^{y}.}{%
         P(Y=y) = (1-phi) * prob * (1 - prob)^y.}
   for \eqn{y=1,2,\ldots}.
@@ -140,7 +140,7 @@ zigeometricff(lprob = "logit", lonempstr0 = "logit",
 %  Half-stepping is not uncommon.
 %  If failure to converge occurs, make use of the argument \code{ipstr0}.
 %
-%} 
+%}
 
 \seealso{
   \code{\link{rzigeom}},
diff --git a/man/zinegbinUC.Rd b/man/zinegbinUC.Rd
index 5f94c6d..24590d9 100644
--- a/man/zinegbinUC.Rd
+++ b/man/zinegbinUC.Rd
@@ -32,7 +32,7 @@ rzinegbin(n, size, prob = NULL, munb = NULL, pstr0 = 0)
   \emph{component}.
 
   }
-  \item{pstr0}{ 
+  \item{pstr0}{
   Probability of structural zero
   (i.e., ignoring the negative binomial distribution),
   called \eqn{\phi}{phi}.
@@ -42,7 +42,7 @@ rzinegbin(n, size, prob = NULL, munb = NULL, pstr0 = 0)
 \details{
   The probability function of \eqn{Y} is 0 with probability
   \eqn{\phi}{phi}, and a negative binomial distribution with
-  probability \eqn{1-\phi}{1-phi}. Thus 
+  probability \eqn{1-\phi}{1-phi}. Thus
   \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{%
         P(Y=0) = phi + (1-phi) * P(W=0)}
   where \eqn{W} is distributed as a negative binomial distribution
@@ -64,7 +64,7 @@ rzinegbin(n, size, prob = NULL, munb = NULL, pstr0 = 0)
 }
 %\references{ }
 \author{ T. W. Yee }
-\note{ 
+\note{
   The argument \code{pstr0} is recycled to the required
   length, and must have values which lie in the interval
   \eqn{[0,1]}.
@@ -73,13 +73,13 @@ rzinegbin(n, size, prob = NULL, munb = NULL, pstr0 = 0)
   These functions actually allow for \emph{zero-deflation}.
   That is, the resulting probability of a zero count
   is \emph{less than} the nominal value of the parent
-  distribution. 
+  distribution.
   See \code{\link{Zipois}} for more information.
 
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{zinegbinomial}},
     \code{\link[stats:NegBinomial]{rnbinom}},
     \code{\link{rzipois}}.
diff --git a/man/zinegbinomial.Rd b/man/zinegbinomial.Rd
index 9905dcd..e2c2842 100644
--- a/man/zinegbinomial.Rd
+++ b/man/zinegbinomial.Rd
@@ -30,7 +30,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lpstr0, lmunb, lsize}{ 
+  \item{lpstr0, lmunb, lsize}{
   Link functions for the parameters \eqn{\phi}{pstr0},
   the mean and \eqn{k}; see \code{\link{negbinomial}} for details,
   and \code{\link{Links}} for more choices.
@@ -46,7 +46,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
 % }
 
   \item{type.fitted}{
-  See \code{\link{CommonVGAMffArguments}} 
+  See \code{\link{CommonVGAMffArguments}}
   and \code{\link{fittedvlm}} for more information.
 
 
@@ -79,7 +79,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
 
 
   }
-  \item{zero}{ 
+  \item{zero}{
   Specifies which linear/additive predictors are to be modelled
   as intercept-only.  They can be such that their absolute values are
   either 1 or 2 or 3.
@@ -90,23 +90,23 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
 
 
   }
-  \item{nsimEIM}{ 
+  \item{nsimEIM}{
   See \code{\link{CommonVGAMffArguments}} for information.
 
 
   }
-  \item{iprobs.y, cutoff.prob, max.support, max.chunk.MB }{ 
+  \item{iprobs.y, cutoff.prob, max.support, max.chunk.MB }{
     See \code{\link{negbinomial}}
     and/or \code{\link{posnegbinomial}} for details.
 
 
   }
-  \item{mds.min, eps.trig}{ 
+  \item{mds.min, eps.trig}{
     See \code{\link{negbinomial}} for details.
 
 
   }
-  \item{gprobs.y, gsize.mux}{ 
+  \item{gprobs.y, gsize.mux}{
     These arguments relate to grid searching in the initialization process.
     See \code{\link{negbinomial}}
     and/or \code{\link{posnegbinomial}} for details.
@@ -176,7 +176,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
 
 
 % 20130316: adding this:
-  Estimated probabilities of a structural zero and an 
+  Estimated probabilities of a structural zero and an
   observed zero can be returned, as in \code{\link{zipoisson}};
   see \code{\link{fittedvlm}} for more information.
 
@@ -246,7 +246,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
 
 
 
-} 
+}
 
 \seealso{
   \code{\link{Zinegbin}},
@@ -257,6 +257,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
 
 }
 \examples{
+\dontrun{
 # Example 1
 ndata <- data.frame(x2 = runif(nn <- 1000))
 ndata <- transform(ndata, pstr0 = logit(-0.5 + 1 * x2, inverse = TRUE),
@@ -265,15 +266,14 @@ ndata <- transform(ndata, pstr0 = logit(-0.5 + 1 * x2, inverse = TRUE),
 ndata <- transform(ndata,
                    y1 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0))
 with(ndata, table(y1)["0"] / sum(table(y1)))
-fit <- vglm(y1 ~ x2, zinegbinomial(zero = NULL), data = ndata)
-coef(fit, matrix = TRUE)
-summary(fit)
-head(cbind(fitted(fit), with(ndata, (1 - pstr0) * munb)))
-round(vcov(fit), 3)
+nfit <- vglm(y1 ~ x2, zinegbinomial(zero = NULL), data = ndata)
+coef(nfit, matrix = TRUE)
+summary(nfit)
+head(cbind(fitted(nfit), with(ndata, (1 - pstr0) * munb)))
+round(vcov(nfit), 3)
 
 
 # Example 2: RR-ZINB could also be called a COZIVGLM-ZINB-2
-\dontrun{
 ndata <- data.frame(x2 = runif(nn <- 2000))
 ndata <- transform(ndata, x3 = runif(nn))
 ndata <- transform(ndata, eta1 =          3   + 1   * x2 + 2 * x3)
diff --git a/man/zipebcom.Rd b/man/zipebcom.Rd
index afb4e71..0955ccd 100644
--- a/man/zipebcom.Rd
+++ b/man/zipebcom.Rd
@@ -15,7 +15,7 @@
 }
 \usage{
 zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
-         imu12 = NULL, iphi12 = NULL, ioratio = NULL, 
+         imu12 = NULL, iphi12 = NULL, ioratio = NULL,
          zero = c("phi12", "oratio"), tol = 0.001, addRidge = 0.001)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -78,7 +78,7 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
   }
   \item{addRidge}{
   Some small positive numerical value.
-  The first two diagonal elements of the working weight matrices are 
+  The first two diagonal elements of the working weight matrices are
   multiplied by \code{1+addRidge} to make it diagonally dominant,
   therefore positive-definite.
 
@@ -149,7 +149,7 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
 
   The default models \eqn{\eta_2}{eta2} and \eqn{\eta_3}{eta3} as
   single parameters only, but this
-  can be circumvented by setting \code{zero=NULL} in order to model the 
+  can be circumvented by setting \code{zero=NULL} in order to model the
   \eqn{\phi}{phi} and odds ratio as a function of all the explanatory
   variables.
 
@@ -168,7 +168,7 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
   generic function.
 
 
-} 
+}
 \section{Warning }{
   The fact that the EIM is not of full rank may mean the model is
   naturally ill-conditioned.
@@ -178,7 +178,7 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
   responses.
 
 
-} 
+}
 \references{
 
   Yee, T. W. and Dirnbock, T. (2009)
diff --git a/man/zipf.Rd b/man/zipf.Rd
index 698b5af..1c7d55e 100644
--- a/man/zipf.Rd
+++ b/man/zipf.Rd
@@ -7,7 +7,7 @@
 
 }
 \usage{
-zipf(N = NULL, link = "loge", init.s = NULL)
+zipf(N = NULL, lshape = "loge", ishape = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -20,13 +20,13 @@ zipf(N = NULL, link = "loge", init.s = NULL)
 
 
   }
-  \item{link}{
-  Parameter link function applied to the (positive) parameter \eqn{s}.
+  \item{lshape}{
+  Parameter link function applied to the (positive) shape parameter \eqn{s}.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{init.s}{
+  \item{ishape}{
   Optional initial value for the parameter \eqn{s}.
   The default is to choose an initial value internally.
   If converge failure occurs use this argument to input a value.
@@ -45,6 +45,7 @@ zipf(N = NULL, link = "loge", init.s = NULL)
   is the \eqn{n}th generalized harmonic number.
 
 
+
   Zipf's law is an experimental law which is often applied
   to the study of the frequency of words in a corpus of
   natural language utterances. It states that the frequency
@@ -55,6 +56,7 @@ zipf(N = NULL, link = "loge", init.s = NULL)
   Many other natural phenomena conform to Zipf's law.
 
 
+
 }
 
 \value{
@@ -65,7 +67,7 @@ zipf(N = NULL, link = "loge", init.s = NULL)
 
 }
 
-\references{ 
+\references{
 
   pp.526-- of Chapter 11 of
   Johnson N. L., Kemp, A. W. and Kotz S. (2005)
@@ -82,7 +84,7 @@ zipf(N = NULL, link = "loge", init.s = NULL)
 
 }
 
-\seealso{ 
+\seealso{
   \code{\link{dzipf}},
   \code{\link{zetaff}},
   \code{\link{simulate.vlm}}.
@@ -91,13 +93,13 @@ zipf(N = NULL, link = "loge", init.s = NULL)
 }
 \examples{
 zdata <- data.frame(y = 1:5, ofreq = c(63, 14, 5, 1, 2))
-fit <- vglm(y ~ 1, zipf, data = zdata, trace = TRUE, weight = ofreq, crit = "coef")
-fit <- vglm(y ~ 1, zipf(link = identitylink, init = 3.4), data = zdata,
-            trace = TRUE, weight = ofreq)
-fit at misc$N
-(shat <- Coef(fit))
+zfit <- vglm(y ~ 1, zipf, data = zdata, trace = TRUE, weight = ofreq)
+zfit <- vglm(y ~ 1, zipf(lshape = "identitylink", ishape = 3.4), data = zdata,
+            trace = TRUE, weight = ofreq, crit = "coef")
+zfit at misc$N
+(shape.hat <- Coef(zfit))
 with(zdata, weighted.mean(y, ofreq))
-fitted(fit, matrix = FALSE)
+fitted(zfit, matrix = FALSE)
 }
 \keyword{models}
 \keyword{regression}
@@ -109,3 +111,7 @@ fitted(fit, matrix = FALSE)
 %2nd ed.
 %New York: Wiley.
 
+%http://www.math.uah.edu/stat/special/Zeta.html calls s 'shape'
+
+
+
diff --git a/man/zipfUC.Rd b/man/zipfUC.Rd
index 0b0c45a..7216e06 100644
--- a/man/zipfUC.Rd
+++ b/man/zipfUC.Rd
@@ -2,56 +2,48 @@
 \alias{Zipf}
 \alias{dzipf}
 \alias{pzipf}
-%\alias{qzipf}
-%\alias{rzipf}
+\alias{qzipf}
+\alias{rzipf}
 \title{The Zipf Distribution}
 \description{
-  Density, and cumulative distribution function 
+  Density, distribution function, quantile function and random generation
   for the Zipf distribution.
 
 }
 \usage{
-dzipf(x, N, s, log = FALSE)
-pzipf(q, N, s, log.p = FALSE)
+dzipf(x, N, shape, log = FALSE)
+pzipf(q, N, shape, log.p = FALSE)
+qzipf(p, N, shape)
+rzipf(n, N, shape)
 }
 \arguments{
-  \item{x, q}{vector of quantiles.}
-% \item{p}{vector of probabilities.}
-% \item{n}{number of observations.
-%   Must be a positive integer of length 1.}
-  \item{N, s }{
+  \item{x, q, p, n}{Same as \code{\link[stats]{Poisson}}. }
+  \item{N, shape}{
   the number of elements, and the exponent characterizing the
   distribution.
   See \code{\link{zipf}} for more details.
 
   }
-  \item{log}{
-  Logical.
-  If \code{log = TRUE} then the logarithm of the density is returned.
-
-
-  }
-  \item{log.p}{
-  Same meaning as in \code{\link[stats:Normal]{pnorm}}
-  or \code{\link[stats:Normal]{qnorm}}.
+  \item{log, log.p}{
+  Same meaning as in \code{\link[stats]{Normal}}.
 
 
   }
 }
 \value{
-  \code{dzipf} gives the density, and
-  \code{pzipf} gives the cumulative distribution function.
-
-
-% \code{qzipf} gives the quantile function, and
-% \code{rzipf} generates random deviates.
+  \code{dzipf} gives the density,
+  \code{pzipf} gives the cumulative distribution function,
+  \code{qzipf} gives the quantile function, and
+  \code{rzipf} generates random deviates.
 
 
 }
 \author{ T. W. Yee }
 \details{
   This is a finite version of the zeta distribution.
-  See \code{\link{zipf}} for more details.
+  See \code{\link{zetaff}} for more details.
+  In general, these functions runs slower and slower as \code{N} increases.
+
 
 
 }
@@ -65,13 +57,13 @@ pzipf(q, N, s, log.p = FALSE)
 
 }
 \examples{
-N <- 10; s <- 0.5; y <- 1:N
-proby <- dzipf(y, N = N, s = s)
+N <- 10; shape <- 0.5; y <- 1:N
+proby <- dzipf(y, N = N, shape = shape)
 \dontrun{ plot(proby ~ y, type = "h", col = "blue", ylab = "Probability",
-     ylim = c(0, 0.2), main = paste("Zipf(N = ",N,", s = ",s,")", sep = ""),
+     ylim = c(0, 0.2), main = paste("Zipf(N = ",N,", shape = ",shape,")", sep = ""),
      lwd = 2, las = 1) }
 sum(proby)  # Should be 1
-max(abs(cumsum(proby) - pzipf(y, N = N, s = s)))  # Should be 0
+max(abs(cumsum(proby) - pzipf(y, N = N, shape = shape)))  # Should be 0
 }
 \keyword{distribution}
 
diff --git a/man/zipoisUC.Rd b/man/zipoisUC.Rd
index 174b694..51bb3a4 100644
--- a/man/zipoisUC.Rd
+++ b/man/zipoisUC.Rd
@@ -24,7 +24,7 @@ rzipois(n, lambda, pstr0 = 0)
   \item{p}{vector of probabilities. }
   \item{n}{number of observations. Must be a single positive integer. }
   \item{lambda}{ Vector of positive means. }
-  \item{pstr0}{ 
+  \item{pstr0}{
   Probability of a structural zero
   (i.e., ignoring the Poisson distribution),
   called \eqn{\phi}{phi}.
@@ -39,7 +39,7 @@ rzipois(n, lambda, pstr0 = 0)
 \details{
   The probability function of \eqn{Y} is 0 with probability
   \eqn{\phi}{phi}, and \eqn{Poisson(\lambda)}{Poisson(lambda)} with
-  probability \eqn{1-\phi}{1-phi}. Thus 
+  probability \eqn{1-\phi}{1-phi}. Thus
   \deqn{P(Y=0) =\phi + (1-\phi)  P(W=0)}{%
         P(Y=0) = phi + (1-phi) * P(W=0)}
   where \eqn{W} is distributed \eqn{Poisson(\lambda)}{Poisson(lambda)}.
@@ -56,7 +56,7 @@ rzipois(n, lambda, pstr0 = 0)
 }
 %\references{ }
 \author{ T. W. Yee }
-\note{ 
+\note{
   The argument \code{pstr0} is recycled to the required length, and
   must have values which lie in the interval \eqn{[0,1]}.
 
@@ -74,7 +74,7 @@ rzipois(n, lambda, pstr0 = 0)
 
 }
 
-\seealso{ 
+\seealso{
     \code{\link{zipoisson}},
     \code{\link[stats:Poisson]{dpois}},
     \code{\link{rzinegbin}}.
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
index a536759..f7ed0ff 100644
--- a/man/zipoisson.Rd
+++ b/man/zipoisson.Rd
@@ -54,7 +54,7 @@ zipoissonff(llambda = "loge", lonempstr0 = "logit", type.fitted =
   The estimated probability of an observed 0 is an alternative, else
   the estimated probability of a  structural 0,
   or one minus the estimated probability of a  structural 0.
-  See \code{\link{CommonVGAMffArguments}} 
+  See \code{\link{CommonVGAMffArguments}}
   and \code{\link{fittedvlm}} for more information.
 
 
@@ -72,7 +72,7 @@ zipoissonff(llambda = "loge", lonempstr0 = "logit", type.fitted =
   }
   \item{ishrinkage}{
   How much shrinkage is used when initializing \eqn{\lambda}{lambda}.
-  The value must be between 0 and 1 inclusive, and 
+  The value must be between 0 and 1 inclusive, and
   a value of 0 means the individual response values are used,
   and a value of 1 means the median or mean is used.
   This argument is used in conjunction with \code{imethod}.
@@ -80,7 +80,7 @@ zipoissonff(llambda = "loge", lonempstr0 = "logit", type.fitted =
 
 
   }
-  \item{zero}{ 
+  \item{zero}{
   Specifies which linear/additive predictors are to be modelled as
   intercept-only.  If given, the value can be either 1 or 2, and the
   default is none of them. Setting \code{zero = 1} makes \eqn{\phi}{phi}
@@ -232,7 +232,7 @@ zipoissonff(llambda = "loge", lonempstr0 = "logit", type.fitted =
   The default for \code{zipoissonff()} is to model the
   structural zero probability as an intercept-only.
 
-} 
+}
 
 \seealso{
   \code{\link{zapoisson}},
diff --git a/man/zoabetaR.Rd b/man/zoabetaR.Rd
index 4651aa7..9fa4bc1 100644
--- a/man/zoabetaR.Rd
+++ b/man/zoabetaR.Rd
@@ -15,18 +15,18 @@ zoabetaR(lshape1 = "loge", lshape2 = "loge", lpobs0 = "logit",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lshape1, lshape2, lpobs0, lpobs1}{ 
+  \item{lshape1, lshape2, lpobs0, lpobs1}{
   Details at \code{\link{CommonVGAMffArguments}}.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{ishape1, ishape2}{ 
+  \item{ishape1, ishape2}{
   Details at \code{\link{CommonVGAMffArguments}}.
 
 
   }
-  \item{trim, zero}{ 
+  \item{trim, zero}{
   Same as \code{\link{betaR}}.
 
 
@@ -65,7 +65,7 @@ zoabetaR(lshape1 = "loge", lshape2 = "loge", lpobs0 = "logit",
 
 
 }
-%\references{ 
+%\references{
 
 
 %}
@@ -74,7 +74,7 @@ zoabetaR(lshape1 = "loge", lshape2 = "loge", lpobs0 = "logit",
 %}
 
 
-\seealso{ 
+\seealso{
   \code{\link{Zoabeta}},
   \code{\link{betaR}},
   \code{\link{betaff}},
diff --git a/man/zoabetaUC.Rd b/man/zoabetaUC.Rd
index 97141f9..3285f63 100644
--- a/man/zoabetaUC.Rd
+++ b/man/zoabetaUC.Rd
@@ -22,18 +22,18 @@ rzoabeta(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
          tol = .Machine$double.eps)
 }
 
-                                                                                                
-\arguments{                                                                                     
-  \item{x, q, p, n}{Same as \code{\link[stats]{Beta}}. }                                                          
+
+\arguments{
+  \item{x, q, p, n}{Same as \code{\link[stats]{Beta}}. }
   \item{pobs0, pobs1}{
     vector of probabilities that 0 and 1 are observed
     (\eqn{\omega_0}{omega_0}
     and
     \eqn{\omega_1}{omega_1}).
   }
-  
+
   \item{shape1, shape2}{
-  Same as \code{\link[stats]{Beta}}.   
+  Same as \code{\link[stats]{Beta}}.
   They are called \code{a} and \code{b} in
   \code{\link[base:Special]{beta}} respectively.
 
@@ -53,8 +53,8 @@ rzoabeta(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
 
 }
 \value{
-  \code{dzoabeta} gives the density, 
-  \code{pzoabeta} gives the distribution function, 
+  \code{dzoabeta} gives the density,
+  \code{pzoabeta} gives the distribution function,
   \code{qzoabeta} gives the quantile, and
   \code{rzoabeta} generates random deviates.
 
@@ -73,7 +73,7 @@ rzoabeta(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
         omega_1 * I[1 <= y]}
   where \eqn{B(y)} is the cumulative distribution function
   of the beta distribution with the same shape parameters
-  (\code{\link[stats]{pbeta}}), 
+  (\code{\link[stats]{pbeta}}),
   \eqn{\omega_0}{omega_0} is the inflated probability at 0
   and \eqn{\omega_1}{omega_1} is the inflated probability at 1.
   The default values of \eqn{\omega_j}{omega_j} mean that these
@@ -81,7 +81,7 @@ rzoabeta(n, shape1, shape2, pobs0 = 0, pobs1 = 0,
   when only the essential arguments are inputted.
 
 
-  
+
 }
 %\note{
 %
diff --git a/src/fgam.f b/src/fgam.f
index 43d4779..91edbe1 100644
--- a/src/fgam.f
+++ b/src/fgam.f
@@ -64,7 +64,8 @@ c     higher order on top of it.
          jp1mid = 1
          do 11 j=ideriv,k
             dbiatx(j,ideriv) = dbiatx(jp1mid,1)
-   11       jp1mid = jp1mid + 1
+            jp1mid = jp1mid + 1
+   11    continue
          ideriv = ideriv - 1
          call bsplvb(t,kp1-ideriv,2,x,left,dbiatx)
    15    continue
@@ -78,13 +79,19 @@ c
       jlow = 1
       do 20 i=1,k
          do 19 j=jlow,k
-   19       a(j,i) = 0d0
+            a(j,i) = 0d0
+   19    continue
          jlow = i
-   20    a(i,i) = 1d0
+         a(i,i) = 1d0
+   20 continue
 c     at this point, a(.,j) contains the b-coeffs for the j-th of the
 c     k  b-splines of interest here.
 c
-      do 40 m=2,mhigh
+c
+c
+c 20161111: was originally
+c     do 40 m=2,mhigh
+      do 400 m=2,mhigh
          kp1mm = kp1 - m
          fkp1mm = dble(kp1mm)
          il = left
@@ -99,9 +106,11 @@ c        i .lt. j  is used.sed.
 c           the assumption that t(left).lt.t(left+1) makes denominator
 c           in  factor  nonzero.
             do 24 j=1,i
-   24          a(i,j) = (a(i,j) - a(i-1,j))*factor
+               a(i,j) = (a(i,j) - a(i-1,j))*factor
+   24       continue
             il = il - 1
-   25       i = i - 1
+            i = i - 1
+   25    continue
 c
 c        for i=1,...,k, combine b-coeffs a(.,i) with b-spline values
 c        stored in dbiatx(.,m) to get value of  (m-1)st  derivative of
@@ -119,8 +128,12 @@ c  30    do 40 i=1,k
             sum = 0.
             jlow = max0(i,m)
             do 35 j=jlow,k
-   35          sum = a(j,i)*dbiatx(j,m) + sum
-   40       dbiatx(i,m) = sum
+               sum = a(j,i)*dbiatx(j,m) + sum
+   35       continue
+            dbiatx(i,m) = sum
+   40    continue
+c 20161111: twyee added this line (expanded 40  to two lines).
+  400 continue
    99                                   return
       end
       subroutine bsplvb ( t, jhigh, index, x, left, biatx )
@@ -195,7 +208,20 @@ c  superfluous additional arguments.
       data j/1/
 c     save j,deltal,deltar (valid in fortran 77)
 c
-                                        go to (10,20), index
+c
+c
+c 20161111; originally: 
+c                                       go to (10,20), index
+c See https://www.obliquity.com/computer/fortran/control.html
+      if (index .eq. 1) then
+        go to 10
+      else if (index .eq. 2) then
+        go to 20
+      end if
+c
+c
+c
+c
    10 j = 1
       biatx(1) = 1d0
       if (j .ge. jhigh)                 go to 99
@@ -207,7 +233,8 @@ c
          do 26 i=1,j
             term = biatx(i)/(deltar(i) + deltal(jp1-i))
             biatx(i) = saved + deltar(i)*term
-   26       saved = deltal(jp1-i)*term
+            saved = deltal(jp1-i)*term
+   26    continue
          biatx(jp1) = saved
          j = jp1
          if (j .lt. jhigh)              go to 20
@@ -312,50 +339,65 @@ c     to t(n+k) appropriately.
       if (imk .ge. 0)                   go to 8
       jcmin = 1 - imk
       do 5 j=1,i
-    5    dm(j) = x - t(i+1-j)
+         dm(j) = x - t(i+1-j)
+    5 continue
       do 6 j=i,km1
          aj(k-j) = 0.
-    6    dm(j) = dm(i)
+         dm(j) = dm(i)
+    6 continue
                                         go to 10
     8 do 9 j=1,km1
-    9    dm(j) = x - t(i+1-j)
+         dm(j) = x - t(i+1-j)
+    9 continue
 c
    10 jcmax = k
       nmi = n - i
       if (nmi .ge. 0)                   go to 18
       jcmax = k + nmi
       do 15 j=1,jcmax
-   15    dp(j) = t(i+j) - x
+         dp(j) = t(i+j) - x
+   15 continue
       do 16 j=jcmax,km1
          aj(j+1) = 0.
-   16    dp(j) = dp(jcmax)
+         dp(j) = dp(jcmax)
+   16 continue
                                         go to 20
    18 do 19 j=1,km1
-   19    dp(j) = t(i+j) - x
+         dp(j) = t(i+j) - x
+   19 continue
 c
    20 do 21 jc=jcmin,jcmax
-   21    aj(jc) = bcoef(imk + jc)
+         aj(jc) = bcoef(imk + jc)
+   21 continue
 c
 c               *** difference the coefficients  jderiv  times.
       if (jderiv .eq. 0)                go to 30
-      do 23 j=1,jderiv
+c 20161111; was:
+c     do 23 j=1,jderiv
+      do 233 j=1,jderiv
          kmj = k-j
          fkmj = dble(kmj)
          ilo = kmj
          do 23 jj=1,kmj
             aj(jj) = ((aj(jj+1) - aj(jj))/(dm(ilo) + dp(jj)))*fkmj
-   23       ilo = ilo - 1
+            ilo = ilo - 1
+   23    continue
+  233 continue
 c
 c  *** compute value at  x  in (t(i),t(i+1)) of jderiv-th derivative,
 c     given its relevant b-spline coeffs in aj(1),...,aj(k-jderiv).
    30 if (jderiv .eq. km1)              go to 39
       jdrvp1 = jderiv + 1
-      do 33 j=jdrvp1,km1
+c 20161111: was:
+c     do 33 j=jdrvp1,km1
+      do 34 j=jdrvp1,km1
          kmj = k-j
          ilo = kmj
          do 33 jj=1,kmj
             aj(jj) = (aj(jj+1)*dm(ilo) + aj(jj)*dp(jj))/(dm(ilo)+dp(jj))
-   33       ilo = ilo - 1
+            ilo = ilo - 1
+   33    continue
+   34 continue
    39 bvalue = aj(1)
 c
    99                                   return
diff --git a/src/tyeepolygamma3.c b/src/tyeepolygamma3.c
index 0c2c11a..c2858de 100644
--- a/src/tyeepolygamma3.c
+++ b/src/tyeepolygamma3.c
@@ -22,7 +22,7 @@ void eimpnbinomspecialp(int *interceptonly, double *nrows,
 void tyee_C_vdgam1(double *xval, double *lfu2qhid, int *dvhw1ulq) {
 
 
-  double wval, series, obr6tcex, tmp1;
+  double wval, series, obr6tcex = 0.0, tmp1;
 
   *dvhw1ulq = 1;
   if (*xval <= 0.0e0) {
@@ -55,7 +55,7 @@ void tyee_C_vdgam1(double *xval, double *lfu2qhid, int *dvhw1ulq) {
 void tyee_C_vtgam1(double *xval, double *lfu2qhid, int *dvhw1ulq) {
 
 
-  double wval, series, obr6tcex, tmp1;
+  double wval, series, obr6tcex = 0.0, tmp1;
   *dvhw1ulq = 1;
 
   if (*xval <= 0.0e0) {
diff --git a/src/vdigami.f b/src/vdigami.f
index 78eb241..2243b70 100644
--- a/src/vdigami.f
+++ b/src/vdigami.f
@@ -104,8 +104,8 @@ C
       PN(4) = X * B
       S0 = PN(3) / PN(4)
       DO 31 I = 1, 4
-	DP(I) = ZERO
-	DPP(I) = ZERO
+        DP(I) = ZERO
+        DPP(I) = ZERO
    31 CONTINUE
       DP(4) = -X
 C
@@ -137,9 +137,9 @@ C
       IF (TERM .GT. TMAX) GO TO 1001
       IF (DABS(PN(5)) .LT. OFLO) GO TO 32
       DO 41 I = 1, 4
-	DP(I) = DP(I) / OFLO
-	DPP(I) = DPP(I) / OFLO
-	PN(I) = PN(I) / OFLO
+        DP(I) = DP(I) / OFLO
+        DPP(I) = DPP(I) / OFLO
+        PN(I) = PN(I) / OFLO
    41 CONTINUE
       GO TO 32
 C
diff --git a/src/veigen.f b/src/veigen.f
index 99666b3..207438e 100644
--- a/src/veigen.f
+++ b/src/veigen.f
@@ -167,7 +167,8 @@ C
       IF (N .EQ. 1) GO TO 1001
 C
       DO 100 I = 2, N
-  100 E(I-1) = E(I)
+        E(I-1) = E(I)
+  100 CONTINUE
 C
       F = 0.0D0
       TST1 = 0.0D0
@@ -201,7 +202,8 @@ C     .......... FORM SHIFT ..........
          IF (L2 .GT. N) GO TO 145
 C
          DO 140 I = L2, N
-  140    D(I) = D(I) - H
+           D(I) = D(I) - H
+  140    CONTINUE
 C
   145    F = F + H
 C     .......... QL TRANSFORMATION ..........
@@ -326,7 +328,8 @@ C
       IF (N .EQ. 1) GO TO 1001
 C
       DO 100 I = 2, N
-  100 E2(I-1) = E2(I)
+        E2(I-1) = E2(I)
+  100 CONTINUE
 C
       F = 0.0D0
       T = 0.0D0
@@ -359,7 +362,8 @@ C     .......... FORM SHIFT ..........
          H = G - D(L)
 C
          DO 140 I = L1, N
-  140    D(I) = D(I) - H
+           D(I) = D(I) - H
+  140 CONTINUE
 C
          F = F + H
 C     .......... RATIONAL QL TRANSFORMATION ..........
@@ -470,7 +474,8 @@ C     .......... FOR I=N STEP -1 UNTIL 1 DO -- ..........
          IF (L .LT. 1) GO TO 130
 C     .......... SCALE ROW (ALGOL TOL THEN NOT NEEDED) ..........
          DO 120 K = 1, L
-  120    SCALE = SCALE + DABS(D(K))
+           SCALE = SCALE + DABS(D(K))
+  120    CONTINUE
 C
          IF (SCALE .NE. 0.0D0) GO TO 140
 C
@@ -498,7 +503,8 @@ C
          IF (L .EQ. 1) GO TO 285
 C     .......... FORM A*U ..........
          DO 170 J = 1, L
-  170    E(J) = 0.0D0
+           E(J) = 0.0D0
+  170    CONTINUE
 C
          DO 240 J = 1, L
             F = D(J)
@@ -524,14 +530,16 @@ C
          H = F / (H + H)
 C     .......... FORM Q ..........
          DO 250 J = 1, L
-  250    E(J) = E(J) - H * D(J)
+           E(J) = E(J) - H * D(J)
+  250    CONTINUE
 C     .......... FORM REDUCED A ..........
          DO 280 J = 1, L
             F = D(J)
             G = E(J)
 C
             DO 260 K = J, L
-  260       A(K,J) = A(K,J) - F * E(K) - G * D(K)
+              A(K,J) = A(K,J) - F * E(K) - G * D(K)
+  260       CONTINUE
 C
   280    CONTINUE
 C
@@ -596,7 +604,8 @@ C
       DO 100 I = 1, N
 C
          DO 80 J = I, N
-   80    Z(J,I) = A(J,I)
+           Z(J,I) = A(J,I)
+   80    CONTINUE
 C
          D(I) = A(N,I)
   100 CONTINUE
@@ -611,7 +620,8 @@ C     .......... FOR I=N STEP -1 UNTIL 2 DO -- ..........
          IF (L .LT. 2) GO TO 130
 C     .......... SCALE ROW (ALGOL TOL THEN NOT NEEDED) ..........
          DO 120 K = 1, L
-  120    SCALE = SCALE + DABS(D(K))
+           SCALE = SCALE + DABS(D(K))
+  120    CONTINUE
 C
          IF (SCALE .NE. 0.0D0) GO TO 140
   130    E(I) = D(L)
@@ -636,7 +646,8 @@ C
          D(L) = F - G
 C     .......... FORM A*U ..........
          DO 170 J = 1, L
-  170    E(J) = 0.0D0
+           E(J) = 0.0D0
+  170    CONTINUE
 C
          DO 240 J = 1, L
             F = D(J)
@@ -663,14 +674,16 @@ C
          HH = F / (H + H)
 C     .......... FORM Q ..........
          DO 250 J = 1, L
-  250    E(J) = E(J) - HH * D(J)
+           E(J) = E(J) - HH * D(J)
+  250    CONTINUE
 C     .......... FORM REDUCED A ..........
          DO 280 J = 1, L
             F = D(J)
             G = E(J)
 C
             DO 260 K = J, L
-  260       Z(K,J) = Z(K,J) - F * E(K) - G * D(K)
+              Z(K,J) = Z(K,J) - F * E(K) - G * D(K)
+  260       CONTINUE
 C
             D(J) = Z(L,J)
             Z(I,J) = 0.0D0
@@ -687,20 +700,26 @@ C     .......... ACCUMULATION OF TRANSFORMATION MATRICES ..........
          IF (H .EQ. 0.0D0) GO TO 380
 C
          DO 330 K = 1, L
-  330    D(K) = Z(K,I) / H
+           D(K) = Z(K,I) / H
+  330    CONTINUE
 C
-         DO 360 J = 1, L
+         DO 3600 J = 1, L
+c 20161111; originally was:
+c        DO 360 J = 1, L
             G = 0.0D0
 C
             DO 340 K = 1, L
-  340       G = G + Z(K,I) * Z(K,J)
+              G = G + Z(K,I) * Z(K,J)
+  340       CONTINUE
 C
             DO 360 K = 1, L
                Z(K,J) = Z(K,J) - G * D(K)
-  360    CONTINUE
+  360       CONTINUE
+ 3600    CONTINUE
 C
   380    DO 400 K = 1, L
-  400    Z(K,I) = 0.0D0
+           Z(K,I) = 0.0D0
+  400    CONTINUE
 C
   500 CONTINUE
 C
diff --git a/src/vlinpack2.f b/src/vlinpack2.f
index 9dbf0ca..2566b81 100644
--- a/src/vlinpack2.f
+++ b/src/vlinpack2.f
@@ -131,8 +131,9 @@ c
 c                   phase 3.  sum is mid-range.  no scaling.
 c
       do 95 j =i,nn,incx
-      if(dabs(dx(j)) .ge. hitest) go to 100
-   95    sum = sum + dx(j)**2
+        if(dabs(dx(j)) .ge. hitest) go to 100
+        sum = sum + dx(j)**2
+   95 continue
       vdnrm2 = dsqrt( sum )
       go to 300
 c
diff --git a/src/vlinpack3.f b/src/vlinpack3.f
index 91261ba..d773e9a 100644
--- a/src/vlinpack3.f
+++ b/src/vlinpack3.f
@@ -308,8 +308,9 @@ c
 c                   phase 3.  sum is mid-range.  no scaling.
 c
       do 95 j =i,nn,incx
-      if(dabs(dx(j)) .ge. hitest) go to 100
-   95    sum = sum + dx(j)**2
+        if(dabs(dx(j)) .ge. hitest) go to 100
+        sum = sum + dx(j)**2
+   95 continue
       dnrm28 = dsqrt( sum )
       go to 300
 c
diff --git a/vignettes/categoricalVGAM.Rnw b/vignettes/categoricalVGAM.Rnw
index 8394144..79c156c 100644
--- a/vignettes/categoricalVGAM.Rnw
+++ b/vignettes/categoricalVGAM.Rnw
@@ -124,7 +124,7 @@ model and \texttt{multinom()}
 \citep[in \pkg{nnet};][]{Venables+Ripley:2002} for the multinomial
 logit model. However, both of these can be considered `one-off'
 modeling functions rather than providing a unified offering for CDA.
-The function \texttt{lrm()} \citep[in \pkg{rms};][]{Harrell:2009}
+The function \texttt{lrm()} \citep[in \pkg{rms};][]{Harrell:2016}
 has greater functionality: it can fit the proportional odds model
 (and the forward continuation ratio model upon preprocessing). Neither
 \texttt{polr()} or \texttt{lrm()} appear able to fit the nonproportional
@@ -248,6 +248,9 @@ background to this article include
 \cite{agre:2010},
 \cite{agre:2013},
 \cite{fahr:tutz:2001},
+\cite{full:xu:2016},
+\cite{harr:2015},
+\cite{hens:rose:gree:2015},
 \cite{leon:2000},
 \cite{lloy:1999},
 \cite{long:1997},
@@ -1003,13 +1006,18 @@ Any $g$ from Table \ref{tab:jsscat.links} appropriate for
 a parameter $\theta \in (0,1)$ will do.
 
 
+
 A toy example where $p=p_A$ and $q=p_B$ is
-<<>>=
+
+
+<<eval=F>>=
 abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
 fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, data = abodat)
 coef(fit, matrix = TRUE)
 Coef(fit)  # Estimated pA and pB
 @
+
+
 The function \texttt{Coef()}, which applies only to intercept-only models,
 applies to $g_{j}(\theta_{j})=\eta_{j}$
 the inverse link function $g_{j}^{-1}$ to $\widehat{\eta}_{j}$
@@ -1605,7 +1613,8 @@ backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale
 displays the six ordered categories.
 Now a rank-1 stereotype model can be fitted with
 <<>>=
-bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain)
+bp.rrmlm1 <- rrvglm(factor(pain, ordered = FALSE) ~ sx1 + sx2 + sx3,
+                    multinomial, data = backPain)
 @
 Then
 <<>>=
@@ -1642,7 +1651,8 @@ set.seed(123)
 @
 A rank-2 model fitted \textit{with a different normalization}
 <<>>=
-bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain, Rank = 2,
+bp.rrmlm2 <- rrvglm(factor(pain, ordered = FALSE) ~ sx1 + sx2 + sx3,
+                   multinomial, data = backPain, Rank = 2,
                    Corner = FALSE, Uncor = TRUE)
 @
 produces uncorrelated $\widehat{\bnu}_i = \widehat{\bC}^{\top} \bix_{2i}$.
diff --git a/vignettes/categoricalVGAMbib.bib b/vignettes/categoricalVGAMbib.bib
index 7367aff..cc9ea9c 100644
--- a/vignettes/categoricalVGAMbib.bib
+++ b/vignettes/categoricalVGAMbib.bib
@@ -511,11 +511,11 @@ article{yee:wild:1996,
    url          = {http://CRAN.R-project.org/package=VGAM}
 }
 
- at Manual{Harrell:2009,
+ at Manual{Harrell:2016,
    title        = {\pkg{rms}: Regression Modeling Strategies},
    author       = {Frank E. {Harrell, Jr.}},
-   year         = {2009},
-   note         = {\proglang{R}~package version~2.1-0},
+   year         = {2016},
+   note         = {\proglang{R}~package version~4.5-0},
    url          = {http://CRAN.R-project.org/package=rms}
 }
 
@@ -651,3 +651,46 @@ article{yee:wild:1996,
    Address = {London},
 }
 
+
+ at book{harr:2015,
+    Author = {Harrell, F. E.},
+    Title = {Regression Modeling Strategies: With Applications to Linear Models,
+             Logistic and Ordinal Regression, and Survival Analysis},
+    Year = 2015,
+    Edition = {Second},
+    Publisher = {Springer},
+    ADDRESS = {New York, USA},
+    Pages = {582},
+}
+
+
+
+
+ at book{hens:rose:gree:2015,
+    author = {Hensher, D. A. and Rose, J. M. and Greene, W. H.},
+    title = {Applied Choice Analysis},
+    year = {2015},
+   EDITION = {Second},
+ PUBLISHER = {Cambridge University Press},
+    ADDRESS = {Cambridge, U.K.},
+     pages = {1188},
+}
+
+
+
+ at book{full:xu:2016,
+    AUTHOR = {Fullerton, A. S. and Xu, J.},
+   TITLE   = {Ordered Regression Models: Parallel, Partial,
+              and Non-Parallel Alternatives},
+      YEAR = {2016},
+ PUBLISHER = {Chapman \& Hall/CRC},
+   ADDRESS = {Boca Raton, FL, USA},
+}
+
+
+
+
+
+
+
+

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git



More information about the debian-science-commits mailing list