[r-cran-vgam] 39/63: Import Upstream version 0.9-5

Andreas Tille tille at debian.org
Tue Jan 24 13:54:36 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-vgam.

commit e969e27c47eab662c2836b826d3a234dc7acfb17
Author: Andreas Tille <tille at debian.org>
Date:   Tue Jan 24 14:17:00 2017 +0100

    Import Upstream version 0.9-5
---
 DESCRIPTION                                        |   15 +-
 MD5                                                |  600 ++---
 NAMESPACE                                          |  155 +-
 NEWS                                               |  163 +-
 R/aamethods.q                                      |   45 +-
 R/bAIC.q                                           |   10 +-
 R/calibrate.q                                      |   12 +-
 R/cao.R                                            |    2 +-
 R/cao.fit.q                                        |  157 +-
 R/cqo.fit.q                                        |    8 +-
 R/deviance.vlm.q                                   |   32 +
 R/family.actuary.R                                 |  516 ++---
 R/family.aunivariate.R                             |  354 ++-
 R/family.basics.R                                  |  178 +-
 R/family.binomial.R                                |  182 +-
 R/family.bivariate.R                               |  661 +++---
 R/family.categorical.R                             |  134 +-
 R/family.censored.R                                |  143 +-
 R/family.circular.R                                |   10 +-
 R/family.exp.R                                     |   22 +-
 R/family.extremes.R                                |  528 +++--
 R/family.functions.R                               |   32 +-
 R/family.genetic.R                                 |  457 ++--
 R/family.glmgam.R                                  |  116 +-
 R/family.loglin.R                                  |    8 +-
 R/family.math.R                                    |   33 +-
 R/family.mixture.R                                 |   19 +-
 R/family.nonlinear.R                               |    4 +-
 R/family.normal.R                                  |  274 +--
 R/family.others.R                                  |  374 ++--
 R/family.positive.R                                |   44 +-
 R/family.qreg.R                                    |  709 +++---
 R/family.quantal.R                                 |    8 +-
 R/family.rcim.R                                    |   75 +-
 R/family.rcqo.R                                    |  110 +-
 R/family.robust.R                                  |   32 +-
 R/family.rrr.R                                     |  289 +--
 R/family.sur.R                                     |   18 +-
 R/family.survival.R                                |   76 +-
 R/family.ts.R                                      |    2 +-
 R/family.univariate.R                              | 2166 ++++++++++++------
 R/family.zeroinf.R                                 |  181 +-
 R/fittedvlm.R                                      |   16 +-
 R/links.q                                          |   27 +-
 R/logLik.vlm.q                                     |   19 +-
 R/model.matrix.vglm.q                              |   74 +-
 R/nobs.R                                           |    8 +-
 R/plot.vglm.q                                      |   77 +-
 R/predict.vglm.q                                   |   82 +-
 R/qrrvglm.control.q                                |    2 +-
 R/qtplot.q                                         |   35 +-
 R/rrvglm.control.q                                 |   10 +-
 R/rrvglm.fit.q                                     |    2 +-
 R/smart.R                                          |  628 +++---
 R/summary.vglm.q                                   |   65 +-
 R/summary.vlm.q                                    |   20 +-
 R/vgam.control.q                                   |    4 +-
 R/vglm.control.q                                   |   20 +-
 R/vglm.fit.q                                       |    3 +-
 R/vsmooth.spline.q                                 |    2 +-
 build/vignette.rds                                 |  Bin 0 -> 381 bytes
 data/Huggins89.t1.rda                              |  Bin 443 -> 443 bytes
 data/Huggins89table1.rda                           |  Bin 444 -> 445 bytes
 data/alclevels.rda                                 |  Bin 549 -> 550 bytes
 data/alcoff.rda                                    |  Bin 546 -> 547 bytes
 data/auuc.rda                                      |  Bin 245 -> 246 bytes
 data/backPain.rda                                  |  Bin 474 -> 488 bytes
 data/beggs.rda                                     |  Bin 196 -> 198 bytes
 data/car.all.rda                                   |  Bin 6969 -> 6972 bytes
 data/cfibrosis.rda                                 |  Bin 264 -> 265 bytes
 data/corbet.rda                                    |  Bin 237 -> 244 bytes
 data/crashbc.rda                                   |  Bin 374 -> 374 bytes
 data/crashf.rda                                    |  Bin 340 -> 340 bytes
 data/crashi.rda                                    |  Bin 490 -> 491 bytes
 data/crashmc.rda                                   |  Bin 339 -> 385 bytes
 data/crashp.rda                                    |  Bin 375 -> 376 bytes
 data/crashtr.rda                                   |  Bin 361 -> 361 bytes
 data/deermice.rda                                  |  Bin 395 -> 394 bytes
 data/finney44.rda                                  |  Bin 209 -> 210 bytes
 data/flourbeetle.rda                               |  Bin 0 -> 344 bytes
 data/hspider.rda                                   |  Bin 1344 -> 1345 bytes
 data/lakeO.rda                                     |  Bin 335 -> 336 bytes
 data/leukemia.rda                                  |  Bin 329 -> 329 bytes
 data/marital.nz.rda                                |  Bin 10440 -> 10452 bytes
 data/melbmaxtemp.rda                               |  Bin 0 -> 4265 bytes
 data/mmt.rda                                       |  Bin 4238 -> 0 bytes
 data/pneumo.rda                                    |  Bin 267 -> 267 bytes
 data/prinia.rda                                    |  Bin 1230 -> 1229 bytes
 data/ruge.rda                                      |  Bin 257 -> 258 bytes
 data/toxop.rda                                     |  Bin 473 -> 474 bytes
 data/venice.rda                                    |  Bin 978 -> 976 bytes
 data/venice90.rda                                  |  Bin 8004 -> 8000 bytes
 data/wine.rda                                      |  Bin 269 -> 270 bytes
 inst/doc/categoricalVGAM.R                         |  278 +++
 inst/doc/categoricalVGAM.Rnw                       | 2325 ++++++++++++++++++++
 inst/doc/categoricalVGAM.pdf                       |  Bin 0 -> 735446 bytes
 man/{G1G2G3.Rd => A1A2A3.Rd}                       |   50 +-
 man/AA.Aa.aa.Rd                                    |   69 +-
 man/AB.Ab.aB.ab.Rd                                 |    9 +-
 man/AB.Ab.aB.ab2.Rd                                |   76 -
 man/ABO.Rd                                         |    7 +-
 man/AICvlm.Rd                                      |    4 +-
 man/Coef.Rd                                        |    4 +-
 man/Coef.qrrvglm-class.Rd                          |    4 +-
 man/Coef.qrrvglm.Rd                                |   19 +-
 man/CommonVGAMffArguments.Rd                       |   29 +-
 man/Links.Rd                                       |    6 +-
 man/MNSs.Rd                                        |    6 +-
 man/Max.Rd                                         |    8 +-
 man/Opt.Rd                                         |   10 +-
 man/{Pareto.Rd => ParetoUC.Rd}                     |   26 +-
 man/QvarUC.Rd                                      |   41 +-
 man/{SUR.Rd => SURff.Rd}                           |   17 +-
 man/Select.Rd                                      |   12 +-
 man/SurvS4.Rd                                      |    4 +-
 man/VGAM-package.Rd                                |    2 +-
 man/alaplace3.Rd                                   |  170 +-
 man/amlnormal.Rd                                   |   24 +-
 man/benini.Rd                                      |   34 +-
 man/beniniUC.Rd                                    |   20 +-
 man/betaII.Rd                                      |    4 +-
 man/{beta.ab.Rd => betaR.Rd}                       |   18 +-
 man/betabinomUC.Rd                                 |    4 +-
 man/betabinomial.Rd                                |   21 +-
 man/{betabinomial.ab.Rd => betabinomialff.Rd}      |   19 +-
 man/betaff.Rd                                      |    6 +-
 man/betageometric.Rd                               |    4 +-
 man/{amh.Rd => biamhcop.Rd}                        |   25 +-
 man/{amhUC.Rd => biamhcopUC.Rd}                    |   36 +-
 man/biclaytoncop.Rd                                |   15 +-
 man/biclaytoncopUC.Rd                              |   14 +-
 man/{fgm.Rd => bifgmcop.Rd}                        |   14 +-
 man/{fgmUC.Rd => bifgmcopUC.Rd}                    |   36 +-
 man/{morgenstern.Rd => bifgmexp.Rd}                |   23 +-
 man/bifrankcop.Rd                                  |    4 +-
 man/bifrankcopUC.Rd                                |   20 +-
 man/{bivgamma.mckay.Rd => bigamma.mckay.Rd}        |    4 +-
 man/{bigumbelI.Rd => bigumbelIexp.Rd}              |   29 +-
 man/{bilogis4UC.Rd => bilogisUC.Rd}                |   48 +-
 man/{bilogistic4.Rd => bilogistic.Rd}              |   16 +-
 man/binom2.or.Rd                                   |    7 +-
 man/binomialff.Rd                                  |   16 +-
 man/binormal.Rd                                    |    4 +-
 man/binormalUC.Rd                                  |    4 +-
 man/{plackett.Rd => biplackettcop.Rd}              |   17 +-
 man/{plackUC.Rd => biplackettcopUC.Rd}             |   36 +-
 man/bisa.Rd                                        |   18 +-
 man/bisaUC.Rd                                      |   14 +-
 man/bistudentt.Rd                                  |   18 +-
 man/borel.tanner.Rd                                |    6 +-
 man/bortUC.Rd                                      |    2 +-
 man/calibrate.qrrvglm.control.Rd                   |    2 +-
 man/cao.Rd                                         |    4 +-
 man/cao.control.Rd                                 |    2 +-
 man/cardioid.Rd                                    |    2 +-
 man/cauchit.Rd                                     |    6 +
 man/cauchy.Rd                                      |   10 +-
 man/{cgumbel.Rd => cens.gumbel.Rd}                 |   12 +-
 man/{cennormal.Rd => cens.normal.Rd}               |   17 +-
 man/{cenpoisson.Rd => cens.poisson.Rd}             |   16 +-
 man/concoef-methods.Rd                             |   14 +-
 man/concoef.Rd                                     |   11 +-
 man/cqo.Rd                                         |   26 +-
 man/dagum.Rd                                       |    4 +-
 man/dirichlet.Rd                                   |    7 +
 man/dirmul.old.Rd                                  |    2 +-
 man/dirmultinomial.Rd                              |   11 +-
 man/{double.cennormal.Rd => double.cens.normal.Rd} |   14 +-
 man/double.expbinomial.Rd                          |   12 +-
 man/eexpUC.Rd                                      |   14 +-
 man/enormUC.Rd                                     |   14 +-
 man/erf.Rd                                         |   15 +-
 man/erlang.Rd                                      |    9 +-
 man/eunifUC.Rd                                     |   16 +-
 man/{expexp.Rd => expexpff.Rd}                     |   62 +-
 man/{expexp1.Rd => expexpff1.Rd}                   |   44 +-
 man/expgeometric.Rd                                |    4 +-
 man/explogff.Rd                                    |    6 +-
 man/exponential.Rd                                 |   31 +-
 man/exppoisson.Rd                                  |   35 +-
 man/exppoissonUC.Rd                                |   31 +-
 man/fisk.Rd                                        |    4 +-
 man/fittedvlm.Rd                                   |   15 +-
 man/flourbeetle.Rd                                 |   63 +
 man/foldnormal.Rd                                  |    3 +-
 man/frechet.Rd                                     |   14 +-
 man/frechetUC.Rd                                   |    4 +-
 man/gamma1.Rd                                      |    6 +-
 man/gamma2.Rd                                      |   41 +-
 man/{gamma2.ab.Rd => gammaR.Rd}                    |   77 +-
 man/{gammahyp.Rd => gammahyperbola.Rd}             |   15 +-
 man/gaussianff.Rd                                  |    2 +-
 man/genbetaII.Rd                                   |    4 +-
 man/gengamma.Rd                                    |   37 +-
 man/gengammaUC.Rd                                  |   38 +-
 man/genpoisson.Rd                                  |    4 +-
 man/genrayleigh.Rd                                 |   31 +-
 man/genrayleighUC.Rd                               |   24 +-
 man/get.smart.Rd                                   |   12 +-
 man/gev.Rd                                         |   34 +-
 man/gew.Rd                                         |    5 +-
 man/gompertz.Rd                                    |   13 +-
 man/gompertzUC.Rd                                  |   27 +-
 man/gpd.Rd                                         |   61 +-
 man/gpdUC.Rd                                       |    3 +-
 man/grc.Rd                                         |   12 +-
 man/gumbel.Rd                                      |    2 +-
 man/gumbelII.Rd                                    |   52 +-
 man/gumbelIIUC.Rd                                  |   24 +-
 man/hormone.Rd                                     |    8 +-
 man/huberUC.Rd                                     |    4 +-
 man/hypersecant.Rd                                 |   20 +-
 man/{invbinomial.Rd => inv.binomial.Rd}            |   14 +-
 man/inv.gaussianff.Rd                              |    4 +-
 man/{invlomax.Rd => inv.lomax.Rd}                  |   24 +-
 man/{invlomaxUC.Rd => inv.lomaxUC.Rd}              |   36 +-
 man/{invparalogistic.Rd => inv.paralogistic.Rd}    |   18 +-
 ...{invparalogisticUC.Rd => inv.paralogisticUC.Rd} |   36 +-
 man/is.parallel.Rd                                 |    4 +-
 man/is.smart.Rd                                    |   18 +-
 man/levy.Rd                                        |   98 +-
 man/lgammaUC.Rd                                    |   30 +-
 man/lgammaff.Rd                                    |   34 +-
 man/lindley.Rd                                     |    2 +-
 man/lino.Rd                                        |   21 +-
 man/lms.bcg.Rd                                     |    4 +-
 man/lms.bcn.Rd                                     |   24 +-
 man/lms.yjn.Rd                                     |    6 +-
 man/logF.Rd                                        |    2 +-
 man/logistic.Rd                                    |   18 +-
 man/logit.Rd                                       |    2 +-
 man/loglaplace.Rd                                  |   14 +-
 man/lognormal.Rd                                   |  110 +-
 man/lomax.Rd                                       |    4 +-
 man/lqnorm.Rd                                      |    4 +-
 man/lvplot.qrrvglm.Rd                              |   47 +-
 man/makeham.Rd                                     |   12 +-
 man/makehamUC.Rd                                   |   24 +-
 man/matched.binomial.Rd                            |  180 --
 man/maxwell.Rd                                     |    5 +-
 man/maxwellUC.Rd                                   |   30 +-
 man/{mmt.Rd => melbmaxtemp.Rd}                     |   14 +-
 man/mix2exp.Rd                                     |    8 +-
 man/mix2poisson.Rd                                 |   18 +-
 man/{mlogit.Rd => multilogit.Rd}                   |   32 +-
 man/multinomial.Rd                                 |    2 +-
 man/nakagami.Rd                                    |   26 +-
 man/nakagamiUC.Rd                                  |   14 +-
 man/negbinomial.Rd                                 |   39 +-
 man/negbinomial.size.Rd                            |    4 +-
 man/normal.vcm.Rd                                  |   26 +-
 man/notdocumentedyet.Rd                            |   98 +-
 man/paralogistic.Rd                                |    4 +-
 man/paretoIV.Rd                                    |   48 +-
 man/paretoff.Rd                                    |   27 +-
 man/perks.Rd                                       |   21 +-
 man/perksUC.Rd                                     |    8 +-
 man/persp.qrrvglm.Rd                               |   14 +-
 man/plotrcim0.Rd                                   |   16 +-
 man/plotvgam.Rd                                    |    1 +
 man/poissonff.Rd                                   |    4 +-
 man/posbernoulli.t.Rd                              |   14 +
 man/posbernoulli.tb.Rd                             |   15 +-
 man/posnegbinomial.Rd                              |    4 +-
 man/powerlink.Rd                                   |    4 +-
 man/prats.Rd                                       |    2 +-
 man/predictqrrvglm.Rd                              |    6 +-
 man/predictvglm.Rd                                 |    6 +
 man/prentice74.Rd                                  |   10 +-
 man/put.smart.Rd                                   |   12 +-
 man/qrrvglm.control.Rd                             |    6 +-
 man/quasipoissonff.Rd                              |    2 +-
 man/qvar.Rd                                        |   10 +-
 man/rayleigh.Rd                                    |   20 +-
 man/rcqo.Rd                                        |   62 +-
 man/{recexp1.Rd => rec.exp1.Rd}                    |   12 +-
 man/{recnormal.Rd => rec.normal.Rd}                |   14 +-
 man/riceUC.Rd                                      |   70 +-
 man/riceff.Rd                                      |   11 +-
 man/rlplot.egev.Rd                                 |    9 +-
 man/rrvglm.Rd                                      |   19 +-
 man/rrvglm.control.Rd                              |   16 +-
 man/rrvglm.optim.control.Rd                        |   25 +-
 man/{koenker.Rd => sc.studentt2.Rd}                |   38 +-
 man/{koenkerUC.Rd => sc.t2UC.Rd}                   |   67 +-
 man/simplex.Rd                                     |    4 +-
 man/simulate.vlm.Rd                                |   94 +-
 man/sinmad.Rd                                      |    4 +-
 man/skewnormal.Rd                                  |    3 +
 man/smart.expression.Rd                            |   11 +-
 man/smart.mode.is.Rd                               |   15 +-
 man/studentt.Rd                                    |   13 +-
 man/tikuv.Rd                                       |    3 +-
 man/tobit.Rd                                       |    6 +-
 man/truncweibull.Rd                                |   14 +-
 man/undocumented-methods.Rd                        |   84 +-
 man/uninormal.Rd                                   |    6 +-
 man/vgam.control.Rd                                |    8 +-
 man/vglm.control.Rd                                |   23 +-
 man/vonmises.Rd                                    |    2 +-
 man/{weibull.Rd => weibullR.Rd}                    |   22 +-
 man/weightsvglm.Rd                                 |    6 +-
 man/zanegbinomial.Rd                               |    6 +-
 man/zinegbinomial.Rd                               |   10 +-
 man/zipoisson.Rd                                   |   10 +-
 vignettes/categoricalVGAM.Rnw                      | 2325 ++++++++++++++++++++
 vignettes/categoricalVGAMbib.bib                   |  653 ++++++
 307 files changed, 13754 insertions(+), 6187 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index 7a5f7f3..bd634e0 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,10 +1,10 @@
 Package: VGAM
-Version: 0.9-4
-Date: 2014-05-28
+Version: 0.9-5
+Date: 2014-11-06
 Title: Vector Generalized Linear and Additive Models
 Author: Thomas W. Yee <t.yee at auckland.ac.nz>
 Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
-Depends: R (>= 3.0.0), methods, stats, splines, stats4
+Depends: R (>= 3.0.0), methods, stats, stats4, splines
 Suggests: VGAMdata, MASS
 Description: This package fits many (150+) models and
     distributions by maximum likelihood estimation (MLE)
@@ -16,12 +16,15 @@ Description: This package fits many (150+) models and
     QRR-VGLMs, RR-VGAMs and RCIMs. These include constrained
     and unconstrained quadratic ordination (CQO/UQO) models
     in ecology as well as constrained additive ordination
-    (CAO).
+    (CAO). Note that these functions are subject to change,
+    especially before version 1.0.0 is released; see the NEWS
+    file for latest changes.
 License: GPL-2
 URL: http://www.stat.auckland.ac.nz/~yee/VGAM
 NeedsCompilation: yes
+BuildVignettes: yes
 LazyLoad: yes
 LazyData: yes
-Packaged: 2014-05-28 09:38:42 UTC; tyee001
+Packaged: 2014-11-05 19:32:04 UTC; tyee001
 Repository: CRAN
-Date/Publication: 2014-05-28 18:09:36
+Date/Publication: 2014-11-06 01:01:45
diff --git a/MD5 b/MD5
index e54155d..5dd4448 100644
--- a/MD5
+++ b/MD5
@@ -1,141 +1,143 @@
 66414b6ed296192426033f4ac29a6af2 *BUGS
-65dbed4d4709192005caace0ace41093 *DESCRIPTION
-40ee4f24a8b7a7a083f93a031907066d *NAMESPACE
-c079de6fe31ca109067e66373e433ee7 *NEWS
+433bd389fd4d3ea7c7a8edb43164404a *DESCRIPTION
+80e495256dd8946f3468ec738fc98bc6 *NAMESPACE
+bb948f5823a8d0fc9bc8ac61663d207a *NEWS
 21f682d56c4fc9327d48e2179788422e *R/Links.R
-1de714ae1b807fc880805d2aee94d7a1 *R/aamethods.q
+c99f9c8830068a399945b4dbcd19db8e *R/aamethods.q
 a0675022e96fd5fb7fddac8f0a57fd30 *R/add1.vglm.q
 b2b2be3f5bab46f3400b3e0448dd1e37 *R/attrassign.R
-a4768fa044164f345867ae8f77c4b5d9 *R/bAIC.q
+c6e91684ada0ba6cadd66223b65ffb88 *R/bAIC.q
 749604f5f400577a87ba72d3776660c4 *R/build.terms.vlm.q
-7f5741c3d74a10d20d2af5a9b726ded1 *R/calibrate.q
-e6585b1cef707a15cc6f07be91c46925 *R/cao.R
-71175038e1350d91ea14061aadaaac12 *R/cao.fit.q
+62254e8579894244f5fe24914f534de1 *R/calibrate.q
+1a1baf9981c434c5e24795e241bc719b *R/cao.R
+25144d1bbfed30056f51b9c1583e7876 *R/cao.fit.q
 4dc53e3eae827b9729fb799d6d8b3d7a *R/coef.vlm.q
 c8c22372a0a69c23aeb94fddab38428e *R/cqo.R
-8d234b9f98d2e5413a9a2e177ee083bc *R/cqo.fit.q
-5e023504bbd292f65c3cb661bdd1a103 *R/deviance.vlm.q
+fbdb96d0b2e55debaf13fd2772ea6e78 *R/cqo.fit.q
+4efd72fef4a0ae5b14bd8a05377b14f7 *R/deviance.vlm.q
 3d38c5950f4f9e52085e9ba782feeb90 *R/effects.vglm.q
-dc5a9908d2aef316c0ec049cf1ec731c *R/family.actuary.R
-eaf63a235b8d77e71ef81a24ad73d501 *R/family.aunivariate.R
-6f9837f66c09de69d5e2d5e436321647 *R/family.basics.R
-2e69a109b9d2eedbc7557c6bf64d2e4e *R/family.binomial.R
-923e995bd9bb91e9a174d980b7f6aa3e *R/family.bivariate.R
-8591e4e3bc3a58624bbe2fc7535628a3 *R/family.categorical.R
-63962030a0d145780c8cee5c6b01182a *R/family.censored.R
-c3026d87fe298fc355d6246bcef9592a *R/family.circular.R
-92fd84e0156ec22a2d97ab7a64b4ae5e *R/family.exp.R
-e79c06e47264498e2efea95c3e059dcb *R/family.extremes.R
-b7be2dc5c08287d4de03813371580486 *R/family.functions.R
-1a5a57fef114fa8a723a349f9569ecc6 *R/family.genetic.R
-f6bc5e914f9cdce4cb77c73ff5ef3261 *R/family.glmgam.R
-3e9ceb976c148ea8c84a075faf4f17b1 *R/family.loglin.R
-530540365fe3dcc08df646434e4f89cf *R/family.math.R
-5857ce228880accd72948eefa5473c79 *R/family.mixture.R
-d07db31f1d78500fa771541c96ac9b56 *R/family.nonlinear.R
-5d4cf6ddae66090b22d809bcce630d2e *R/family.normal.R
-3a1a58857f0dbb061554ddbc5de6a83e *R/family.others.R
-5dca7e937d0283c3de6ac527a5f1e886 *R/family.positive.R
-0366e052ff3c037b7c34888ebb1bfcfb *R/family.qreg.R
-1872cae3106eeebab56fb5b77686c55e *R/family.quantal.R
-4138f00529378d68b3e7f132406d1238 *R/family.rcim.R
-ebcafc714199b26cbb8a3ba56ccb28bb *R/family.rcqo.R
-298a298e0df22e88b2ebc89e552771c8 *R/family.robust.R
-aad7d2bedce4ad11f05e2a16bd3fe3d4 *R/family.rrr.R
-38e15e4f9090bbf918f3fa44ea3b2284 *R/family.sur.R
-dba8d81981e93b994f26de19d1a1b6f8 *R/family.survival.R
-36448d9640c9dcea8568c4c1ed4c1ea9 *R/family.ts.R
-5c134fb9f96a959466c1ae44eddccf07 *R/family.univariate.R
+f7c7f03bc4324676171f90c5635b2ad1 *R/family.actuary.R
+f7ae3a664396d372c36c9291d1349238 *R/family.aunivariate.R
+968e7fe2052ece7ad00a01b2b13270bf *R/family.basics.R
+d83ff1a3505017d355b06bf1ffd14d16 *R/family.binomial.R
+15c543df3e3f471cd6a944827b9977d6 *R/family.bivariate.R
+f41eb715c5b57c1fb662416db358808a *R/family.categorical.R
+380d5e67774cb7fe9751119c27e6510b *R/family.censored.R
+1f475904b76686075b7022bb972a9464 *R/family.circular.R
+d46827c37e9fb9ba5d17afb1b4bb8359 *R/family.exp.R
+c85261079319b92e74b49c310d73c9aa *R/family.extremes.R
+a18a6ff13edf752a0c48498c559a16be *R/family.functions.R
+86505922fc0f9dfba086820523c6aef1 *R/family.genetic.R
+70345321a3bf8553aed029b758fef4e9 *R/family.glmgam.R
+cf4a5403b58f1f1f8ff459488f5b0843 *R/family.loglin.R
+f10cb3d2544ed342015c9a7ed74191b2 *R/family.math.R
+0de0891b2e1603fd5eed723f3984bc50 *R/family.mixture.R
+7a559228e6eddad64acd3f662a579b74 *R/family.nonlinear.R
+94d97559eb73c68676ef5cc6cda108d8 *R/family.normal.R
+b7e285f97e1c7aa401b71485a639ec4b *R/family.others.R
+e765d07abcd78f51028896740e4cb33b *R/family.positive.R
+fc390b75cf9c8f6c882b905968d2d645 *R/family.qreg.R
+c05609977f1289db83e83a11f19302d4 *R/family.quantal.R
+3d2ce925abe6b8dcd367759b115d7486 *R/family.rcim.R
+b333e00eb110446c92c2edad114072fd *R/family.rcqo.R
+80d6bdca76c8a853483497dc403e971a *R/family.robust.R
+d0167179094f7ec535370d2d88d01adf *R/family.rrr.R
+6aaa42bed6ddb4f4429fac5d4ffbf7d9 *R/family.sur.R
+76b17657c66fa8405996ccdeeb20d82c *R/family.survival.R
+b32cbe9457bc213c65a6e937e2892b01 *R/family.ts.R
+d6c62ada6a6960782bb7da092c1d5188 *R/family.univariate.R
 23e25d09aed81048919b15e35d2a3fdf *R/family.vglm.R
-8fb138a98bd398259a8ea78f96b4fede *R/family.zeroinf.R
-10d7ae4e806309cb6c8b5d541e227585 *R/fittedvlm.R
+46ceb5189af72b4644c08d872eb5bfeb *R/family.zeroinf.R
+e77bfa0f3a2e6a802a308611f84b75c1 *R/fittedvlm.R
 e0f39e9a543e616c179f53c153ada17b *R/formula.vlm.q
 66dceb0aa1906a6358ddf5840e5c3a10 *R/generic.q
-3532a47fb5dc1f75a48281dd64bdf0f4 *R/links.q
-fa5db320fd8b18c7810e228a13730571 *R/logLik.vlm.q
+bdc951d12740944bbb70b27d98511f25 *R/links.q
+a111fc4dd1dbd7280c07009277eec304 *R/logLik.vlm.q
 9b3ae4a5273f991e6ac2feac29320343 *R/lrwaldtest.R
-b4b3fdae6ee99caee6d9185213148740 *R/model.matrix.vglm.q
+8a1d472a05bbe4a0fbcbea6969ef9ae1 *R/model.matrix.vglm.q
 6aa9138e0343b812d000949720fa3036 *R/mux.q
-b294455eb2982638f52c28176249218a *R/nobs.R
-3538974f12ad64b4fd1b439b4dd0a24c *R/plot.vglm.q
+26d9b0e5861ac9ecb838253d7fa89aa7 *R/nobs.R
+f3eeccb2f0f1740637f48308484ddf80 *R/plot.vglm.q
 d3050d098e945c23a2617410e4698b9a *R/predict.vgam.q
-040d0f0864c562edb80d01199011caed *R/predict.vglm.q
+b503ba5f6eb50d39b773c79d7e57a2d8 *R/predict.vglm.q
 1685c6757a7ddf887d842bfdcf66bff9 *R/predict.vlm.q
 6b6c07d978349b4c2dd82d1e3768f763 *R/print.vglm.q
 2a6435e29721cdb571796c0f2b2ba2f4 *R/print.vlm.q
-a70ae668b6cfda0ecbbab74d4e0f8e17 *R/qrrvglm.control.q
-4081d6213a8454f5d847ee2981f75ce1 *R/qtplot.q
+6b18d42adf25ab762686f45111fc9908 *R/qrrvglm.control.q
+13edf9b27deeec4720ce3c63805c0826 *R/qtplot.q
 cd95e96c163efedcf7dc452b5b9b80aa *R/residuals.vlm.q
 26fdc28282fb9f20f338f878e2078edb *R/rrvglm.R
-63aafad9e90d5ec0e7b35efb3eea4d2a *R/rrvglm.control.q
-ce0b32d9486058bca0cdff470484978e *R/rrvglm.fit.q
+ee1d3fe8e9731d47aab09577d16f296d *R/rrvglm.control.q
+7c9d06ff0cd0f5c02ae0f2d7e96f9ed6 *R/rrvglm.fit.q
 4d6331d1b7f7e9f49511ac68ed7ab830 *R/s.q
 8818a393944e9aacf3ca58907a9e0b8a *R/s.vam.q
 400c72e71f4820b58f292523029c6245 *R/simulate.vglm.R
-e3733f10e243f1a9f433d1a3bebfac14 *R/smart.R
+277ba59aa1a252dbfb97c6ca24e95b66 *R/smart.R
 40b65c32c98ed7fe00459f089182209f *R/step.vglm.q
 df48678099b0d4b643d22d0a25adc5f1 *R/summary.vgam.q
-daef9a10964e0ee69648fcd61e134c67 *R/summary.vglm.q
-35d715ccd0ceac184efe1e85c949af0e *R/summary.vlm.q
+02d08e22bbacafdecfb36cf581a04ccb *R/summary.vglm.q
+da2b6c528168d9b72212223ffa9d151a *R/summary.vlm.q
 7053fc5a348fa10962cf86faa0cd6be5 *R/vgam.R
-43f5ac8e4ce129c2cfd060f2a720e89d *R/vgam.control.q
+aee3a2ac9b9b36985e8f8c3d15709590 *R/vgam.control.q
 2aa25abd6c64976065b527635b5ce52a *R/vgam.fit.q
 58bb89dc29480d468782ac4e410716de *R/vgam.match.q
 fd0eeed4746bd415316290c34d907f2d *R/vglm.R
-8cb747a68c70f0f98bc2117a64bd6047 *R/vglm.control.q
-9adecdd26db8de0856aba98cdbb305cc *R/vglm.fit.q
+fa14b37834baabbffd171057e27a607c *R/vglm.control.q
+046141a6bfaa4a1787a3a22f9faab14f *R/vglm.fit.q
 0d279e4ac54a18c3b86931b97b9cb686 *R/vlm.R
 19455ed547e314ec5996c798587d2442 *R/vlm.wfit.q
-5369622d6ffaaf8209d41d5df861284e *R/vsmooth.spline.q
-23322d92942d7d395dbe248845f7ff27 *data/Huggins89.t1.rda
-6a596d2bb591e0593405ed1a6b8bb4bf *data/Huggins89table1.rda
+128ebb1abdd41656f5662b00a16600cc *R/vsmooth.spline.q
+fccfbabb1be99d6b15eb5e5449d1b66e *build/vignette.rds
+2cdabbff91d4f47a58705b2fff199298 *data/Huggins89.t1.rda
+3faa9073b7ae52defc01fde39527c39a *data/Huggins89table1.rda
 d89f69ab78bc3c7a526960c8bdb9454b *data/V1.txt.gz
-bab76494dc8067695f3b634016765a65 *data/alclevels.rda
-c02f13a9cda10a0a0ff58a74ba7b7a84 *data/alcoff.rda
-12ded1bc1c4eb2c470c1667b520f032d *data/auuc.rda
-e762d480945696788738c174b84147c1 *data/backPain.rda
+941fce2d969b7deed268fe512bc1bf64 *data/alclevels.rda
+5d2d8a0d2e992a2772e31175a9647b64 *data/alcoff.rda
+1a1c61f5e5286fb5dd35e317d1bef268 *data/auuc.rda
+912079f646e2b98ef4055a5c043e04c3 *data/backPain.rda
 4fa3eac69a59ea5ed0123d54528e5595 *data/backPain.txt.gz
-3b8b6009d5fbce815d622c05678c496f *data/beggs.rda
+027577fedb0d18b412e1148516500a53 *data/beggs.rda
 e039fd36c33b359830b2ac811ca7fc49 *data/bmi.nz.txt.xz
-52d2f9cab55848f2dbc0f469b9c0ef94 *data/car.all.rda
-23f6d39f731945a533c86c4b77f4660e *data/cfibrosis.rda
+c10d000ab6edd6d7b8fffde46371a8af *data/car.all.rda
+e521415c5ee7141dbf97fd5101c20047 *data/cfibrosis.rda
 b29c1a4125f0898885b0a723442d6a92 *data/chest.nz.txt.bz2
 4df5fd8b5db905c4c19071e1e6a698a4 *data/chinese.nz.txt.gz
 3cb8bc8e1fc615416f0c8838a50b3f51 *data/coalminers.txt.gz
-da6d3150cb16a66be3063b773a476e3e *data/corbet.rda
-d906323b58926c6b77d1ec90bf94b29e *data/crashbc.rda
-52ee69e2dad45519e4c4b093b91faefb *data/crashf.rda
-c7d8935806f8efa80b49beb66f57c777 *data/crashi.rda
-a2acb5f23d3791ddcc2c47dc80c686cf *data/crashmc.rda
-327758f943701d91882b2f99f6214174 *data/crashp.rda
-f9ac084246f904d2522c66bf3bf74f9a *data/crashtr.rda
-592253084c8e27fd39293e71235ab6c7 *data/deermice.rda
+eed6cd50d7aaef10522b1085fec41c11 *data/corbet.rda
+890f464c7e30620440ac65c265ac9b40 *data/crashbc.rda
+7cf569633b8dafb9a4f86b134bd7947a *data/crashf.rda
+145640d0af2b70d71607392c2767c53e *data/crashi.rda
+4774a3d5c2f62eb0beb038462196c53a *data/crashmc.rda
+1d3c4a6ebff20d079a6a3ed3c6fbdc74 *data/crashp.rda
+c6df5decc6ce502fecc236c65248eede *data/crashtr.rda
+2360553382387ee92888f6ada418d819 *data/deermice.rda
 08e87bb80a2364697b17ccec6260387c *data/enzyme.txt.gz
-b9969e0c972a3af06128eb2566b35c3e *data/finney44.rda
+67e2d5489a51805dcb70a8ed17113be1 *data/finney44.rda
+3f07cf57e178c098bb51d3bd9d8d00d5 *data/flourbeetle.rda
 3125b7b004c671f9d4516999c8473eac *data/gew.txt.gz
 bec512b2d2d680889c9b71c7b97dbffd *data/grain.us.txt.bz2
 9dcb8cdf026f5468fa70f8037fd72a0b *data/hormone.txt.bz2
-dfc26b76841c27a6c6fca69fb137f555 *data/hspider.rda
+9b109ac6270bf7de6eca8ae8108a3fde *data/hspider.rda
 dffe21fbabf645127bccc3f3733098a7 *data/hunua.txt.bz2
-1f3caa03946758feb9a0cd344e9e7d89 *data/lakeO.rda
-a988992fe21c5ef19588440bc2e65fd5 *data/leukemia.rda
+84961eee0908ed3ae95bccadf04134e8 *data/lakeO.rda
+4072c9dc17de0e500971b56398f26429 *data/leukemia.rda
 aba4885e0eeda8ee887a422fee01e02a *data/lirat.txt.gz
 7d7e59127af09903659c5727d71acc56 *data/machinists.txt.gz
-951cc829bda3b03ba29a3dca4a55e51d *data/marital.nz.rda
-9b957fa754a4289a4f46dd182289ac58 *data/mmt.rda
+9b042f23117458dcb00cfe37fc77c232 *data/marital.nz.rda
+3253d715eb3ed463af03fb54f365ef13 *data/melbmaxtemp.rda
 56490506642d6415ac67d9b6a7f7aff6 *data/olym08.txt.gz
 fe334fe839d5efbe61aa3a757c38faeb *data/olym12.txt.gz
 3ed63397c4a34f3233326ade6cfd1279 *data/oxtemp.txt.gz
-e1a792d5a43fba44f13bd72fc3252c25 *data/pneumo.rda
+b34cc9fc94e2f742156da5bd52dfd14d *data/pneumo.rda
 0cd66b7ce4e596ad3ca75e1e2ec0a73c *data/prats.txt.gz
-e7742492a1d4b0ea36ef08c475a96332 *data/prinia.rda
-1189583668fac01318e26539ecdc52e2 *data/ruge.rda
-d4e79e6a83e94ce43ea81c00a5475427 *data/toxop.rda
+ece7a33214e3d99d751a7bc0b320e79e *data/prinia.rda
+691569018c9e20b0ec911f9306f5b407 *data/ruge.rda
+e244f038a340e4613875602c82aac0f8 *data/toxop.rda
 1b059fc42c890bf89f2282298828d098 *data/ucberk.txt.gz
-8fbebe25dcb4bd9ff9fe14e3604fef31 *data/venice.rda
-2210f364ad19eff32bba9423b4a593d2 *data/venice90.rda
+1ce9f1e51fb745d0e13b98f6cb74e121 *data/venice.rda
+755a35ccd4c237fac6ef2ae5c0a0e914 *data/venice90.rda
 e990ca4deea25b60febd2d315a6a9ec4 *data/waitakere.txt.bz2
-1fa8460cb7624658da0488af0f43a273 *data/wine.rda
+7b2f5d6ccd5cf362280cdb3b9998f91a *data/wine.rda
 81f7f0844a196dc48e91870c4cfafc99 *demo/00Index
 9327dcfa4015cf47172717bac166f353 *demo/binom2.or.R
 b9f0af62a654d77a3052997eb4cc15e2 *demo/cqo.R
@@ -144,106 +146,113 @@ b9f0af62a654d77a3052997eb4cc15e2 *demo/cqo.R
 ab8081763fe2144558be25f3a154327b *demo/vgam.R
 65570d10948785994d70d817f574bd96 *demo/zipoisson.R
 60616e1e78fe61c1fd4acdf0d3129747 *inst/CITATION
-387dc3b872f48144d7dc5fdabb9b15c2 *man/AA.Aa.aa.Rd
-48010689a7ea309ce7068a78cb826bcc *man/AB.Ab.aB.ab.Rd
-a84233bee5f8105875949d2f47887852 *man/AB.Ab.aB.ab2.Rd
-d81a6dce7eb60dd61bcbfa4a9bffa05c *man/ABO.Rd
-61aea504f08590115f6a1c76298dec26 *man/AICvlm.Rd
+4ff0e35d38b3c5bb38f1f7232b9af863 *inst/doc/categoricalVGAM.R
+bfa11dbdbff271fb20342560f2bacd53 *inst/doc/categoricalVGAM.Rnw
+832bec013a1fc295ca49f4d927f35d21 *inst/doc/categoricalVGAM.pdf
+5ecb530e834d36b923e5167e587e5301 *man/A1A2A3.Rd
+c0d1e33c2b490cfa5d2bfcf15d8df7b4 *man/AA.Aa.aa.Rd
+26a120083d1d9d77ac0a5193d0c186b9 *man/AB.Ab.aB.ab.Rd
+e1d0ae13a5a827f23b54e5ba209ddb40 *man/ABO.Rd
+38647708600610216a454c61450810ff *man/AICvlm.Rd
 0f4a799e95b245cfa0b5a37280a446ef *man/BICvlm.Rd
-2dda55df0947c86b4614e2d722efb713 *man/Coef.Rd
-a07c068d1608d195ee2ba4c2ce44377d *man/Coef.qrrvglm-class.Rd
-713e4545f026c38ada6a3aafc709cf6b *man/Coef.qrrvglm.Rd
+32daae0afb71eae3cdeefc042f4241c6 *man/Coef.Rd
+7b7ad4188c687ac8361fa1176697ce88 *man/Coef.qrrvglm-class.Rd
+77ac83a0f65139c47a0427516552193d *man/Coef.qrrvglm.Rd
 a89beda3a48d5ff1cfdfae4636032a62 *man/Coef.rrvglm-class.Rd
 4da595e2cf6fffc2227871e745a5ee77 *man/Coef.rrvglm.Rd
 9d39d6e12ea6e56f687a10f76cb1803c *man/Coef.vlm.Rd
-9b251bc5d8dcddc426408cfca23bc493 *man/CommonVGAMffArguments.Rd
-392487b64b2b3b65bc466df62b150270 *man/G1G2G3.Rd
+d174c63ffcca8c67e21f3d0726a71eda *man/CommonVGAMffArguments.Rd
 098a57d6e5525de04157c61dea2e1b9b *man/Huggins89.t1.Rd
 ce79d0626711d299c9c0cc2efab3abac *man/Inv.gaussian.Rd
-3025bd52b3bc055c7f5939bc21b28d3b *man/Links.Rd
-30328145767a8cd2fff97506d5a2e9f7 *man/MNSs.Rd
-a730679155e139e134b186e7852c1ef9 *man/Max.Rd
-76bbf26207744bec8c21ae1d71701071 *man/Opt.Rd
-624e0666b195bc9596e0869aa35823cc *man/Pareto.Rd
-ee7e9a7ef0ce310b8fd8286a1ffd56d7 *man/QvarUC.Rd
+744e8c69d6102c5fca0dba602ce4dde2 *man/Links.Rd
+e53a7b5f977320e9a2b3cfba16e097ee *man/MNSs.Rd
+5ddd860d2b28b025dbf94b80062e3fc6 *man/Max.Rd
+00dce9ac476270fc8ce02ea1e75de191 *man/Opt.Rd
+63969bd45c758aaf8d5547861f0908e7 *man/ParetoUC.Rd
+9012ad8444a0b750e3155cd43d8965bc *man/QvarUC.Rd
 bd689bfc27028aea403c93863cf2e207 *man/Rcim.Rd
-becc3fe17f46d3c92c2b81da5499da83 *man/SUR.Rd
-021e7a60b6ba9058acf6f7e996a49596 *man/Select.Rd
+24c765969bd96b0a0b4f301c2c2dfdb5 *man/SURff.Rd
+685985b08b4668ae66206e9d72170b45 *man/Select.Rd
 20a760cb2a7468d974d2de5c88d870e3 *man/SurvS4-class.Rd
-47d9bbe45fe9a53bdd10db39feee0bb0 *man/SurvS4.Rd
+6ed5239b716d4aaef069b66f248503f0 *man/SurvS4.Rd
 21dc3918d6b5375c18dcc6cc05be554e *man/Tol.Rd
 6930cfc91e602940cafeb95cbe4a60d3 *man/V1.Rd
-4e83b546f545fa216bc9c4a29e8f4495 *man/VGAM-package.Rd
+af55f5a996cd8f44d8ec443481a0ea5e *man/VGAM-package.Rd
 f27b784569a22f080ff1ded6d9bbd17a *man/acat.Rd
-c8865faa06424eb7ead0f3cf4efbbc57 *man/alaplace3.Rd
+b346a61c9c3965d8ca97f3c98d9cacc0 *man/alaplace3.Rd
 573cdf092fc48b9b1c1f10e9af6b0fe5 *man/alaplaceUC.Rd
-5386dd7d0ed806b21fe9626c92cfd068 *man/amh.Rd
-f10fff7b5d459f0325e70423488dde18 *man/amhUC.Rd
 8e181f4f03b718c6c9825ea3b6c4b8d6 *man/amlbinomial.Rd
 f6c521d0142c7e65e7d5aad6880616ee *man/amlexponential.Rd
-a9b52ea0ee41c27fdb259aa21a621582 *man/amlnormal.Rd
+cf9c3d4f8799980be2f9e965eb809b42 *man/amlnormal.Rd
 ec213548ebb41e47b727541566160dfb *man/amlpoisson.Rd
 9f1ddcb0af49daaec702a1284341d778 *man/auuc.Rd
 c8efe93df8799ff106b6784e1bf50597 *man/auxposbernoulli.t.Rd
 bcddb8c1df8893cf14a4400ee5dee6df *man/backPain.Rd
 6ac5a3f07851ac3f7e19eaa977365e0f *man/beggs.Rd
 80c65642cf41be59e4b49be5d05d93f2 *man/benfUC.Rd
-3ff1c71b6f613cdb990ef0183db42c1b *man/benini.Rd
-d970a382e22015a5542a5d2bbe289688 *man/beniniUC.Rd
-ef17305a57f7c7208e749af0acab93b0 *man/beta.ab.Rd
-a9e33b0592552305e3501095b322ee9a *man/betaII.Rd
-39e052506d2f0fe3b733ae4340ae38eb *man/betabinomUC.Rd
-ab6c7d6b7483845ec535b9e764ddb218 *man/betabinomial.Rd
-5b51a37e2d0db141cdf62c4effbed59a *man/betabinomial.ab.Rd
-4fbf0280f00212d01fff554e0d86c4b5 *man/betaff.Rd
+afa1ccbe6dd6e769dc1bbbc5702148dd *man/benini.Rd
+c36237b73998bac0f19a3983cdb1df85 *man/beniniUC.Rd
+f4cabec88ec30505db5785b1aaf1eb48 *man/betaII.Rd
+d27525262d9c6975b15a77219afeb362 *man/betaR.Rd
+6d202361c5c1981d29c597fd716050f0 *man/betabinomUC.Rd
+bbb0ddef9113d1b8d1e036ac66f9bb87 *man/betabinomial.Rd
+481a382185943fa003bfe9c09ec4459c *man/betabinomialff.Rd
+581c39d3abaefd4d1a67e2e92ae1d925 *man/betaff.Rd
 4b590ee6208b2f3025109b82c1f6d67c *man/betageomUC.Rd
-8a730685525c85f22045f7de14896c4b *man/betageometric.Rd
+725a8c9d8b4a9facb0c3cb815d75266b *man/betageometric.Rd
 151cdf70cb16f8095369b88093ba48c7 *man/betanormUC.Rd
 5a0a047bcd18649d5076999057bd1d49 *man/betaprime.Rd
-0ab04f3892c3b98eb2c914bf8043afb2 *man/biclaytoncop.Rd
-94e05525dff5548fadbcd6efad58b086 *man/biclaytoncopUC.Rd
-729cbe9de5f560c300006b548f164d1f *man/bifrankcop.Rd
-f96df0cd8d773d5152f39cf2fb12608c *man/bifrankcopUC.Rd
-44aa896474dda679aee6f833c9fb8062 *man/bigumbelI.Rd
-adddf7bb27d9517288660180b4240058 *man/bilogis4UC.Rd
-f5eddde2e045ba31b3d37ad7785fdff6 *man/bilogistic4.Rd
-72ce4d6755354bf6082b82891a16fa5d *man/binom2.or.Rd
+f41bc1b37620bca37ba4d2f16fdae05d *man/biamhcop.Rd
+495e32601db2c4f22462811e27436c9d *man/biamhcopUC.Rd
+003ba5eb60e8e27f6c9a022ae1e336d1 *man/biclaytoncop.Rd
+f1afe1e3f5c95a262b998521408ede24 *man/biclaytoncopUC.Rd
+b25a2fadd7cdb9601aa3022d25265b30 *man/bifgmcop.Rd
+595e8849f5a49dbc321b446260206302 *man/bifgmcopUC.Rd
+57536bc44454e58eb293b928919c92ca *man/bifgmexp.Rd
+5e0bc6b73af5b7a56805a2f7600a439d *man/bifrankcop.Rd
+74f7aef8cea308cceec97a3f1e54d68a *man/bifrankcopUC.Rd
+3996c974a214c0d706d20d820a9a1fa0 *man/bigamma.mckay.Rd
+7a1c045834b0bd9de92a4aa97f52ab3c *man/bigumbelIexp.Rd
+ffcbfc72f334094f6dfd4842ab522e96 *man/bilogisUC.Rd
+cd241d3985e2b0dcf817f19417406596 *man/bilogistic.Rd
+cebfba7c59c17329f50eb34c40c0b810 *man/binom2.or.Rd
 dff1f2e8b34e8ebdfa7d090436dd5dbd *man/binom2.orUC.Rd
 a8cc7cbfa4c21672956a187c4ffba22d *man/binom2.rho.Rd
 a784926c9e5eb31b3ba4f40c1202fca3 *man/binom2.rhoUC.Rd
-83d059c09800ddf81edaf22f3557a039 *man/binomialff.Rd
-7bddfc42ae50a6b5a86e40e7d3f78cf0 *man/binormal.Rd
-4fdf8e186c66f5627ae9b8681cb72ae4 *man/binormalUC.Rd
+3a1ba0a046fd6c1147c675f0b87e4ddb *man/binomialff.Rd
+53f8bc3da41aabe202d80304f2f84b63 *man/binormal.Rd
+3e2bebdf7d5db7a0c7960d6b6f1597b5 *man/binormalUC.Rd
 ad66bf95a28851ff1f77b8675352cc04 *man/binormalcop.Rd
 9758ba4618c9c24caafec486b01238f5 *man/binormcopUC.Rd
+1d943aad478481e7bf4c4b1a9540706c *man/biplackettcop.Rd
+5e390ad2827ba53d631c5ef84ba64fda *man/biplackettcopUC.Rd
 bdad9ecfb116c4f30f930bcaf7208735 *man/biplot-methods.Rd
-00a210fc4a1bf5bf21f6da4f63dad66d *man/bisa.Rd
-8104993144f45c1fbe49da814cb05a41 *man/bisaUC.Rd
-18ab34ad46a2437bf0bcc89957164418 *man/bistudentt.Rd
+4b35070bbd74b15afd585110514a55f7 *man/bisa.Rd
+9901ef6bbaed14ee55eda08dc810867e *man/bisaUC.Rd
+f0816002d3fb698dbc17a6e55d91c18f *man/bistudentt.Rd
 0489e2ceeed7b2aaf9cbcf6cfcabae81 *man/bistudenttUC.Rd
-2a8c550e3daf4b2874de434bf2bf6004 *man/bivgamma.mckay.Rd
 81a2433effb7547679702256a5536b04 *man/bmi.nz.Rd
-2458f06d80b410c2c41b2ea691e1a668 *man/borel.tanner.Rd
-a7c85af5f86dd3cb74a1cb87bdbad789 *man/bortUC.Rd
+214e2f5b25156e937a5af65d1e6e1b58 *man/borel.tanner.Rd
+a25a019943aa0d82d35d6c46ec726c67 *man/bortUC.Rd
 b727c9787c7fcfe1e3dc19f92f6a4cb1 *man/brat.Rd
 4b158e93b6c981f016ed121e987c50b7 *man/bratUC.Rd
 5ee1485749d235a2d1aa1be8849accc7 *man/bratt.Rd
 f640961a0c1a206ce052a54bb7b4ca34 *man/calibrate-methods.Rd
 b121ffb4e604644ef7082d777b4411df *man/calibrate.Rd
 22f73cce0070ea9bb785567af837e14f *man/calibrate.qrrvglm.Rd
-abeec828fcb66694c75bcefed43a70c1 *man/calibrate.qrrvglm.control.Rd
-d5906e548cdac74caa0e9b8ffacb2f59 *man/cao.Rd
-43460f676389d6a89d57d646ef83f314 *man/cao.control.Rd
+22e9a881f2f077f7e01e1dde9043dc7d *man/calibrate.qrrvglm.control.Rd
+8a71703f9846bdda282e59f67832e941 *man/cao.Rd
+4005c8bdb2b1a2e7d0ff5f1a800f4224 *man/cao.control.Rd
 af70e01bb01bebbc1d06e309d8ec6ba5 *man/cardUC.Rd
-3abb66a3286726574ccc6ac96fa5d3cb *man/cardioid.Rd
-bfe6f5beb1de5e92cbf788afff8c4022 *man/cauchit.Rd
-6bea5a8d5ead4312b2f878fc9e6e8e84 *man/cauchy.Rd
+7d96d29fad17cf0d10564c04f00c3ecb *man/cardioid.Rd
+f4674b1787a58c87fbabdb369dc8a1ca *man/cauchit.Rd
+d361f0253fb328f70a716c09fd597fdc *man/cauchy.Rd
 9035d92ae411d748c08d35086d5d3be1 *man/cdf.lmscreg.Rd
-656cad836c7b8f0257f2e87422779d87 *man/cennormal.Rd
-affb84486f73c830ef44682c99919bb7 *man/cenpoisson.Rd
+6c41f48884c2e92fa7842266d02a5a6d *man/cens.gumbel.Rd
+f96d45016bcca1b72249a3548520a2cf *man/cens.normal.Rd
+d5293110487b396f767fbd2224774b58 *man/cens.poisson.Rd
 94e6c5ea5488d93e0400ce9675e4d692 *man/cfibrosis.Rd
 a443fafdb223e2fa87d3766ea31d3fd8 *man/cgo.Rd
-64eef4f31abc80ff9f0daf23e6866d1d *man/cgumbel.Rd
 1d5073eb8aded1b67fc52855c72fbc8d *man/chest.nz.Rd
 922ebc06682ee2090eb1804d9939ec03 *man/chinese.nz.Rd
 9dc1deb6ea4940257ebab8f072584b74 *man/chisq.Rd
@@ -251,113 +260,110 @@ aff05a422130d8ced689190eec1b09dd *man/clo.Rd
 66677ed162d3e368ad0f330c49467a25 *man/cloglog.Rd
 b1985e33c967fdddf79e10cbb646b974 *man/coalminers.Rd
 e492f5f148514df05cc4bf101b7505e2 *man/coefvlm.Rd
-8a8b05c233949dd6095d4d11ff31326a *man/concoef-methods.Rd
-364060a0f3177a3d5144cdd677e0f55a *man/concoef.Rd
+1409b01c52bad85c87e9740fb003699a *man/concoef-methods.Rd
+e9a2bf379aac3e4035b8259463a5374b *man/concoef.Rd
 e9cef803313f5a964f99b76995dd235f *man/constraints.Rd
 523567ea78adcaaeab2d9629b2aa2cf2 *man/corbet.Rd
-7ac196f506705ff2ed574b37fffbab9e *man/cqo.Rd
+d90f189cfb8abe5e452f220b59c8ab3d *man/cqo.Rd
 8b1b3a39d15fe353a7eceec9f6a327d4 *man/crashes.Rd
 ca3db2c26abb8120651e1d179ac6fbb3 *man/cratio.Rd
 d7fe2dd88f14e6c9a3bc2fc1f7f2211a *man/cumulative.Rd
-9b4269d1f89f9cd1181012d53d1d3631 *man/dagum.Rd
+36ffbf6456fae216d7c2eb26ee4c81bf *man/dagum.Rd
 97868e30408a4a35750f9692f5e87b68 *man/dagumUC.Rd
 8fa6a29bde444a45be31b3d8979afc00 *man/deermice.Rd
 dbebc9542906034905fe1137e86a1256 *man/deplot.lmscreg.Rd
 0e0f2e7368fa906e837d8432bb3cfb36 *man/depvar.Rd
 bffbb780b54bd3c8c76cf546ec87e4a0 *man/df.residual.Rd
-63207ab9225a1e84c8a6efadc8fb4302 *man/dirichlet.Rd
-07eb43ee6da403b89b19ff55406ab130 *man/dirmul.old.Rd
-342522859f8dcb3c621738ee45bd3f7b *man/dirmultinomial.Rd
-534db08b8488159ac057e6c4cde5cff4 *man/double.cennormal.Rd
-0f57c4635e0faf9485cf7e208098ce66 *man/double.expbinomial.Rd
-f8f3e5bb715d519d3c99cc94c81bae93 *man/eexpUC.Rd
-8271f348d0cfbd2765ae2e86c745ba2a *man/enormUC.Rd
+2e4c60a120c5a942ba0c0efe8037ae5b *man/dirichlet.Rd
+6ea8579fe8a75bec917b2c26019c9e0a *man/dirmul.old.Rd
+a9d177e01da25b52fe69fb04950436b3 *man/dirmultinomial.Rd
+ed927db10e5cf69502d5485f300a9aa7 *man/double.cens.normal.Rd
+8a470177087d891a5c58e512acc0133f *man/double.expbinomial.Rd
+9c2ddeb18b92c9c5db1c7126f8abb21a *man/eexpUC.Rd
+64ff48f7c2f32c485bd3c523f0263421 *man/enormUC.Rd
 ca3e766bd344902d3b8bf05c65d6c12b *man/enzyme.Rd
-b733cc1da9bd902ea8903b9a53cf9bba *man/erf.Rd
-5f6cdc1e6862241215ae5b340dde4825 *man/erlang.Rd
-a41abeaa6bc1cb826199b1bfdeed8427 *man/eunifUC.Rd
-fddd62bd1da33ed1d01a8f67b5160efd *man/expexp.Rd
-996fe6f72ef5c7097c4677153ddfce4e *man/expexp1.Rd
-779c6a5aff218b1b3daf8bd86bcd671e *man/expgeometric.Rd
+980efa41e75a65ef1c0a8ccf943f6398 *man/erf.Rd
+bce699d9d485230ad940142978689709 *man/erlang.Rd
+537ee9a86645761b5e71629458fa9edb *man/eunifUC.Rd
+cb83f77886603d8f133964c227915d08 *man/expexpff.Rd
+772ca8da2a38dbc5a2ffcb2138f91368 *man/expexpff1.Rd
+eccfa33017118bc7314ef168695a595e *man/expgeometric.Rd
 f39dd0be93d3e24eda78f08310ff4b2f *man/expgeometricUC.Rd
 93cc460d2fd8c787aa6feaf5347f1685 *man/expint.Rd
 59e10a79028eef76da5bdc868e6bb38e *man/explink.Rd
 89ce96662b931aa17182192618085ed0 *man/explogUC.Rd
-f2c881a921ae32035e8d41699faa7969 *man/explogff.Rd
-55891c7c57998f785aa4a277338aafc2 *man/exponential.Rd
-8ba1a5f581e370f49e5b91e12f90e42e *man/exppoisson.Rd
-2bfab14d29e3df39995627cfed355e85 *man/exppoissonUC.Rd
+e51211ad603eeecbe72cd7f6db0e76e0 *man/explogff.Rd
+4e490ef9e08ab74a3af274a720a988d3 *man/exponential.Rd
+f3cca02f31b091259c7a8cf690f93148 *man/exppoisson.Rd
+51ab1a41f49477bb283fe56d97cdbcf6 *man/exppoissonUC.Rd
 9a0ac8c5f8e7cc3d5fe05e1f937944ed *man/felix.Rd
 c5d0b237e64605d008502da6b8f4f64c *man/felixUC.Rd
 09fc6553edb037bc708396a30fe3c8f2 *man/fff.Rd
-539720cd34a0ad024848602974a5fc63 *man/fgm.Rd
-194983ad64cdcf165c37e8f48fed1db8 *man/fgmUC.Rd
 741f6474d688a5bc6ed61042d9a12eb6 *man/fill.Rd
 b929e2ab670eb59700bc4a1db07bbbc0 *man/finney44.Rd
 5fd279ebc2d6ec3df74557cdca6940c0 *man/fisherz.Rd
-3581b41c402484cad5bf602a04c0c497 *man/fisk.Rd
+1dd130b6f110b2d9ab9bbe8f7439ac08 *man/fisk.Rd
 8215ca60f756bf8f9f2e3b404741fbd7 *man/fiskUC.Rd
-5cb189f9d314ea1057b801c5eb2bfe71 *man/fittedvlm.Rd
+c75d3ae0a8669fed4a71f54b8be64266 *man/fittedvlm.Rd
+742b72298fd6b2ca944812681ad625a6 *man/flourbeetle.Rd
 cd73efab4c3e718d1a77a603eb5e341c *man/foldnormUC.Rd
-055879697fc53566c542526623fcc08f *man/foldnormal.Rd
-ccf7154b56d6f4fc317fa1e4007eb2a2 *man/frechet.Rd
-9a20f21cc479ec153a62b3cbba264fed *man/frechetUC.Rd
+3909f1a56c381d71501b6fde8d6647fe *man/foldnormal.Rd
+502ed9669d36b41697fc44a30165e4e2 *man/frechet.Rd
+537fb4f91167bddf5e76d732b9c4ad38 *man/frechetUC.Rd
 cad07bc11ec21b13ecdbc3b93ec8efc0 *man/freund61.Rd
 17c995a0692e2f600397ade32fcd6399 *man/fsqrt.Rd
-e894daa1763db143c10b7eb052ce19b0 *man/gamma1.Rd
-90368535c67e0169e92b84e4700f51cc *man/gamma2.Rd
-7c16404e8a5091a8f08869d8e39a22e2 *man/gamma2.ab.Rd
-1bec36703078a92dcc710f799742e253 *man/gammahyp.Rd
+c4aea59df1932e36cd6fb2ec38110e6d *man/gamma1.Rd
+6b32b9c30d5243afb42c0e403e70f842 *man/gamma2.Rd
+c173815d95bd553fa952911bd2ca71aa *man/gammaR.Rd
+3558584dfba54663dc4de34e21cc9aa9 *man/gammahyperbola.Rd
 edd2c4cefb99138667d2528f3d878bad *man/garma.Rd
-2907a13f1f68692ce6e621131fa0d35e *man/gaussianff.Rd
-dc2223631aac6c39212ee87fbac5a3c6 *man/genbetaII.Rd
-a51b1b6a73ff92fcec1b212e2ad71de2 *man/gengamma.Rd
-795f7e16b356cea3db6294b6ed430b91 *man/gengammaUC.Rd
-b44fdd5d441068c7443b4022d6158ec7 *man/genpoisson.Rd
-ca65498360cbe30840cfa4c9d931fb3b *man/genrayleigh.Rd
-5193d3fe8ab3e3a790247fd93a2c513c *man/genrayleighUC.Rd
+e0fdd50e95e43075ac79c911f05c0b61 *man/gaussianff.Rd
+a3a18ab32413faddd08a064dc1a07d9b *man/genbetaII.Rd
+59fb27b205e8ff10daca7d8d37a5d3f1 *man/gengamma.Rd
+d0abe9afa3720e0e5460565474b70544 *man/gengammaUC.Rd
+efe7d101e0303a53133b5b2dfcc21c94 *man/genpoisson.Rd
+15429ac99e67921a77cb78e47210d7fc *man/genrayleigh.Rd
+65c6a7b53c50b4e20c9c9b2acfec6d0a *man/genrayleighUC.Rd
 94c6189883bf1848735e23156e25cdc0 *man/geometric.Rd
-ee09405e381f088c31edde7c524c7f61 *man/get.smart.Rd
+ea16a72ebd8739cd2133e91fd9c92662 *man/get.smart.Rd
 d89a22500e2031841b7bcfa1d8607d44 *man/get.smart.prediction.Rd
-c89eadbed89ac2c589d03fe3bb3964bc *man/gev.Rd
+333a904359456c8b2e0d8054aa6ae3a7 *man/gev.Rd
 838c81d8d6c94f4f3ae49df0b25d1cfa *man/gevUC.Rd
-f87241a6011f5f5a49921a1842a177ed *man/gew.Rd
+fd070015282f2cca2b0a4b8200822551 *man/gew.Rd
 711704243b30d0270d3ac2a51e2768a8 *man/golf.Rd
-a0ce2419f0f16af5872c1b9b0eccedb3 *man/gompertz.Rd
-a521f6b84e19a2acd6080cdd01a538a3 *man/gompertzUC.Rd
-1533f3b411ceb3b9e7d55af00fab8e93 *man/gpd.Rd
-54b49cf2e3ba865dc7c9297948036d9a *man/gpdUC.Rd
+5e388a2ffa43825bb7ff93d9344385e2 *man/gompertz.Rd
+0aacf742d8e7601645fa2911b5ab006e *man/gompertzUC.Rd
+4d6b5b18dc48d7884f978c040d2ac4cd *man/gpd.Rd
+abb05712cc0126954637a4aeacc603e2 *man/gpdUC.Rd
 7e50fed7b6ffe72b14e243fcc601fc50 *man/grain.us.Rd
-0f4c8edd387b2f1334b9cccf09c209e9 *man/grc.Rd
-98ee8f7cc9da2e8288ae3545b2cb745c *man/gumbel.Rd
-72a533d779f90f1d43c6eb61e8f7f8e2 *man/gumbelII.Rd
-09d6b3c902029eeda151ea7408436746 *man/gumbelIIUC.Rd
+87ec862c14d795b891259f1e4af22946 *man/grc.Rd
+00bd52370e6b9e28b1ec106c6ecb2b09 *man/gumbel.Rd
+bd6be76e82363793b9186e55d0e35bd0 *man/gumbelII.Rd
+f2d0c51e632d7e98d9be50a4a4fac4f2 *man/gumbelIIUC.Rd
 6e8fe2f3bce2e1f173f97fcd5f25d38d *man/gumbelUC.Rd
 fc6b1658cbcb87054ab516552b6875f9 *man/guplot.Rd
 d5ad348b7727127369874c7e7faf49bd *man/hatvalues.Rd
-7d01681e24795448b9d0639c5f1b05c5 *man/hormone.Rd
+1fcc98c5f0e2cc306ef01b7367f3acf8 *man/hormone.Rd
 8ef9d44522eaef45b284b7f98794d48b *man/hspider.Rd
 f4fc4645d2d190ef9b82cce1ee8b29d2 *man/huber.Rd
-ea67b113e21bbe6197fff2358cb47179 *man/huberUC.Rd
+8dce67314ab9d642694d267ea911c6f4 *man/huberUC.Rd
 d3df700bb2a4f9ae85b13abe7ffea123 *man/hunua.Rd
 592f01af00d4309ecb01ed58b764e12e *man/hyperg.Rd
-aa2659c9ddd6b4323d23e4a8c8536026 *man/hypersecant.Rd
+77a4c2eb25f5db0e5fe8fb885de5bf38 *man/hypersecant.Rd
 2bf15af91bb331e94b94dd69050589c0 *man/hzeta.Rd
 db89dbd9462622d0e70f1648fd4ccfcd *man/hzetaUC.Rd
 c4b8cf96eae282e0746bf8126231a7f5 *man/iam.Rd
 c2796439b1c32144c3a1ffcbd7f6da72 *man/identitylink.Rd
-4fdddd75421773a580c9b319001a7c33 *man/inv.gaussianff.Rd
-225d88d9e072069bc74598a5fa35c2d6 *man/invbinomial.Rd
-80cb134d920e53979d9126c98013772a *man/invlomax.Rd
-f8b2a040f409d90fcc51c6b673f21d16 *man/invlomaxUC.Rd
-19f7f87e98e991b8bce976c92ece2a13 *man/invparalogistic.Rd
-a0cccd34cb51df34d82db073c574ec97 *man/invparalogisticUC.Rd
-9479a4710c3d24d98e2f7aacf460c0f2 *man/is.parallel.Rd
-2527fdc9fb684c48b4003ea6546f6029 *man/is.smart.Rd
+5a54823ff9d06736a8616aa6642a3b50 *man/inv.binomial.Rd
+745b6c5557776c23bed67b268f03f432 *man/inv.gaussianff.Rd
+ef005dcdf1e63aa98280b927adcb7820 *man/inv.lomax.Rd
+16fab00f1fdf4a2ec604edc74245d10d *man/inv.lomaxUC.Rd
+cdcbd3ab8696b74085b082ec0296377a *man/inv.paralogistic.Rd
+a72ba9b4b12830c8ea31a64a33949d20 *man/inv.paralogisticUC.Rd
+a501c3d3de4a744a0e0cdbc0673b543d *man/is.parallel.Rd
+e68a1f19e55cd95da21eec0b119c0ad8 *man/is.smart.Rd
 1b33dcd08e9f444146fb7fe03a425add *man/is.zero.Rd
 5cf973ee22fcfd1442e61458a9d91ce9 *man/kendall.tau.Rd
-586e77c2987689b1983f03b269901296 *man/koenker.Rd
-47bca557052f9620a8bfb73e48801b95 *man/koenkerUC.Rd
 149c759079151bd06084810c29f6c72c *man/kumar.Rd
 2e07c2e87f84e59aac2c1d4d6d7a3789 *man/kumarUC.Rd
 1bcedd3ac3a0c7467e5dee8ba1de9ace *man/lakeO.Rd
@@ -368,167 +374,165 @@ e80a85ec4d067a1549cc8249666f75c2 *man/laplace.Rd
 2cd5151baff29f9d8dd996dc48293301 *man/leipnik.Rd
 2e88465ad75446bbbccf208661193a8c *man/lerch.Rd
 8c7fca39c92e5f79391a7881a0f44026 *man/leukemia.Rd
-1fc675bb94504679ecd167636c4daf71 *man/levy.Rd
-0c6b5e56369b01507cef3729eac6290c *man/lgammaUC.Rd
-2ddc0f62bcc1db94e566aacd3e932427 *man/lgammaff.Rd
+632c83ea2a7b229a64a4679f9fa6b52f *man/levy.Rd
+18ae3dfb75762026bf20b93849bd3b89 *man/lgammaUC.Rd
+745ab1fea005b7572910ae5919111054 *man/lgammaff.Rd
 22cc8bb5e5ce47158dc867012db7c9c5 *man/lindUC.Rd
-7cfa64df25fe6f4b732b5a1ed0178be6 *man/lindley.Rd
-8d88640bdf9d18e6b356e491bf1e94d7 *man/lino.Rd
+271536a592dedaff73d9cde20c844d76 *man/lindley.Rd
+20873e71a07de6b42d07fc6e0008ea05 *man/lino.Rd
 8a4a3a1cc12bdb111c6de98ec1c45e9f *man/linoUC.Rd
 b5dfa4faa955b15ebade0a3bdc8f93fe *man/lirat.Rd
-5a9126c71990d5fec145c2d50ad5a2df *man/lms.bcg.Rd
-61db2b9d962515f51747d89186b0a261 *man/lms.bcn.Rd
-614541de8e2c01e8600536c85090c00c *man/lms.yjn.Rd
+1ecc473854215d5c5209ea54ad206370 *man/lms.bcg.Rd
+194627e9dc632ec82df59b116971582a *man/lms.bcn.Rd
+eea220ccf6de89caf996cf8edf346064 *man/lms.yjn.Rd
 20824c03fc9d40f749ca42d60805124d *man/log1pexp.Rd
-edd3e0869b059c33a01e3a2860e6feb8 *man/logF.Rd
+34cbd6bc583c55d2acd79a46a66e064e *man/logF.Rd
 06a1ce6e6f01fca7e7037eabc6cf3dad *man/logF.UC.Rd
 9f80bd504e1c75b0c7b29b3449cf7362 *man/logLikvlm.Rd
 f840f8e85c2092093673d6805cd21dc8 *man/logUC.Rd
 e956c4aae749e9034b7cf7fdf8661a64 *man/logc.Rd
 8c871e5697ed43662cd313fc777c2bcd *man/loge.Rd
 20cc0c73ee555790179879533cb526f7 *man/logff.Rd
-ca29f90ff7b7c4fed8b19781f7fc745b *man/logistic.Rd
-c1c9415c6f05f8e8d3e6aee71f7ea967 *man/logit.Rd
+12d3a7e35301ecb632191ccf31a63296 *man/logistic.Rd
+bb38e0972a038145ee81a2b28dea5d75 *man/logit.Rd
 15e03c1d93d5eef749c03ecb446945b3 *man/loglapUC.Rd
-b3b9edd1fc27bcebf7c4756db41454da *man/loglaplace.Rd
+0f6dd1a9c0fc77dd6521af733693f52e *man/loglaplace.Rd
 49d5183ac04d29b5427b9159fa101dc3 *man/loglinb2.Rd
 a569b31d918209e8b54a62e8594a3268 *man/loglinb3.Rd
 f5f48817604ad9b59304d4fb571359dd *man/loglog.Rd
-31652b3efe7d67b788e6995cc8642aea *man/lognormal.Rd
+a56f1a0e81c3dfdc8620c4cef1b87450 *man/lognormal.Rd
 e859c980e26eb3e483d0f3648b502d13 *man/logoff.Rd
-22b7830eacf728a157992ee6a974adb2 *man/lomax.Rd
+ad3e8f3b35bfbd792e8a8cb6105a2121 *man/lomax.Rd
 1c4a4a2ce7661905273c47b4d8b6f898 *man/lomaxUC.Rd
-356e56edee88c1a1e235fe40471df54e *man/lqnorm.Rd
+ac49f1d5575295a237328c2de3cbab10 *man/lqnorm.Rd
 fc9ca61a4c495cf650cba5a458b0dae1 *man/lrtest.Rd
 f0a38f0b82c1525dcd51687a2f2768c1 *man/lvplot.Rd
-f478dcd30289d69e7dc8468325b1c23f *man/lvplot.qrrvglm.Rd
+7dcf0051720ee4587304e819ecc8de71 *man/lvplot.qrrvglm.Rd
 16b238586876d84bad0a1420402b5718 *man/lvplot.rrvglm.Rd
 c5760c3960748f906230ded119478271 *man/machinists.Rd
-0984609f96be7b543c69a4767b734b24 *man/makeham.Rd
-c01957cac49ff8e3444d143901efab18 *man/makehamUC.Rd
+eb7e6bf84eead25f006dc2fb6bfa55f7 *man/makeham.Rd
+a31274ff2b0c56cdb095a4cb93a31506 *man/makehamUC.Rd
 583f3f406844c550079d2592ecba0c25 *man/margeff.Rd
 b5c6a5a36ebe07a60b152387e8096d9a *man/marital.nz.Rd
-ce0b52f5d9275e79be867d5e472155bf *man/matched.binomial.Rd
-aba16890d9923de9ddd31b18886beabe *man/maxwell.Rd
-0d3df98163de7b80cc3c600a791792c7 *man/maxwellUC.Rd
+b2f1aa9cecaec318a14cc5d4fbb20d67 *man/maxwell.Rd
+5eee0079954bf17587bc495e45cc4c7f *man/maxwellUC.Rd
 bd8250aaa1bc17c017c0b201642882dd *man/mccullagh89.Rd
+c007d94fac5c46a26baae899a04aaf9d *man/melbmaxtemp.Rd
 4d8d0f37dc8249d00e52283764534e98 *man/meplot.Rd
 3fe36bd9f77590dc17a9a2e9380dc0bd *man/micmen.Rd
-fb797d07f6906c113862ea3aff57eee2 *man/mix2exp.Rd
+5eed4788f6366c1814ea5c9a250424e8 *man/mix2exp.Rd
 232e7ac50df002b7c0a1d7ba70fd0bbf *man/mix2normal.Rd
-805b04c7a832073cf90af42c891720da *man/mix2poisson.Rd
-e3f93b50736c1a14398c6677f1efba97 *man/mlogit.Rd
-a8e171aca3ff63d12fdfd97587a81734 *man/mmt.Rd
+364791d9a909112b530deda4135f30f7 *man/mix2poisson.Rd
 131aaa836a137554786e8bda01d8e334 *man/model.framevlm.Rd
 3d875985c00b26af9cb66e0ae0e3aef8 *man/model.matrixvlm.Rd
 199ef13d300d6fe1210885af1647c13b *man/moffset.Rd
-e052249885fb0375b1a364bb8fe50f15 *man/morgenstern.Rd
-261ead0cc3b72f59bba126853a37c21c *man/multinomial.Rd
-efbd8f76e06e80c92c826cd15cb5d411 *man/nakagami.Rd
-dab44218c0733703c125d8741f07bb80 *man/nakagamiUC.Rd
+764cafd682a3364a495cdf243e3a528e *man/multilogit.Rd
+d2ecbe308776f1e5065b0399959e2d99 *man/multinomial.Rd
+c3248f9d509aecb0726bd0e6e36a13d4 *man/nakagami.Rd
+54346d08bf5b7e822c1b166365850222 *man/nakagamiUC.Rd
 892ee6d069216d6568be506a7460c1c4 *man/nbcanlink.Rd
 798f2e547a94356359c3d50a57ccef17 *man/nbolf.Rd
-192eb0236ed35e7b1f20c81271cc7781 *man/negbinomial.Rd
-f6048338d4d698e967110e1840ed79a1 *man/negbinomial.size.Rd
-0dd90278ee057c748e4ad673838b08d0 *man/normal.vcm.Rd
-120be8e546976970aa92bb293e019b8e *man/notdocumentedyet.Rd
+e707b37436b27c43ce07b77492e4fde2 *man/negbinomial.Rd
+01e4d3c6a45020bef55cbadbad8388d3 *man/negbinomial.size.Rd
+14c4a7db111d0d9f41e5a810a3afdea2 *man/normal.vcm.Rd
+e03710346340cd0b2b8bb818110f8c62 *man/notdocumentedyet.Rd
 d361e050435d7a4e64474487ecfd782c *man/olym.Rd
 858c73ce3c458d33e5151342a4e36707 *man/ordpoisson.Rd
 025c5545a37dd996931ea7d2b42211b5 *man/oxtemp.Rd
-ec7adc2f811b041ed01628cbb019d617 *man/paralogistic.Rd
+687d43f8b77241bea9e7cbee86333fdb *man/paralogistic.Rd
 73228cd851fcf468b1fe1ff209ef5eca *man/paralogisticUC.Rd
-725a5efd172a9dda442a25b138ee2486 *man/paretoIV.Rd
+b8a1bd0580460ec6155b7c7bb2dae503 *man/paretoIV.Rd
 d0228dcb5ba3bd2a99272100a401c989 *man/paretoIVUC.Rd
-8f1a34eab62c4e9f14809c42746b46ad *man/paretoff.Rd
-873783fc64c0e3ae5e9e1ff1f7ef2788 *man/perks.Rd
-a3658af3f9766a5ce0dfc20aebdf3186 *man/perksUC.Rd
-a704e5245d54a9e8094b52925c237385 *man/persp.qrrvglm.Rd
+c0c60830c70e697aeab8bc6d11472b78 *man/paretoff.Rd
+97cf8349af611f4a6acf10e445e6587e *man/perks.Rd
+22126a9f4b6e01d96fb88f43e85d9d6a *man/perksUC.Rd
+60fac0e03c8dce88e04e2c3f6def20b9 *man/persp.qrrvglm.Rd
 a38168dd57b4be503cf47732714e441b *man/pgamma.deriv.Rd
 8e0120c68b69d0760218c483490aed8e *man/pgamma.deriv.unscaled.Rd
-8ca9de18625c08de9d4acfa8001c7ca3 *man/plackUC.Rd
-bcda813e9efa01ebeff0c6db8fec5b2b *man/plackett.Rd
 791d04a5c3a3bc514bf0ed1fc639f8ab *man/plotdeplot.lmscreg.Rd
 cea29349aed21cbaf8c70f81b7900b15 *man/plotqrrvglm.Rd
 29857fd00ca50614d9564247b07a2bf3 *man/plotqtplot.lmscreg.Rd
-d875d55c83b8ec53e2f46b3206f434f8 *man/plotrcim0.Rd
-783d0e876b16eed32b4ab9be314cda14 *man/plotvgam.Rd
+3e689a8ffae086e45cbe82fcd5255042 *man/plotrcim0.Rd
+8c391f9ad83a6afeab6446044f22b16d *man/plotvgam.Rd
 72bade4a008240a55ae5a8e5298e30b8 *man/plotvgam.control.Rd
 6196fac00cd0044ba818ec0a794a031a *man/plotvglm.Rd
 40f1661d2f26cb11f54c9140c767c61b *man/pneumo.Rd
 606c4d8331ff8e0e4241f0284aba98cd *man/poisson.points.Rd
 8c7d77fdf6933ab63d412be61e3fa0ec *man/poisson.pointsUC.Rd
-2ddaa395bbdea2574df3e7c387186db1 *man/poissonff.Rd
+8d1096d9bfeee36841be53ebe7300e49 *man/poissonff.Rd
 c0578de27756a8b6912b7940f2de96e5 *man/polf.Rd
 696c74487d4cebf0251299be00d545c7 *man/polonoUC.Rd
 2f4dfc6a802a52da2e14e9789e0170ae *man/posbernUC.Rd
 a746161f043ec5c5517df4b9cf71501e *man/posbernoulli.b.Rd
-f995f3aeff44e63929519b7752bc240a *man/posbernoulli.t.Rd
-ea0f67b9b92d46c30c3a6a4ef6979d87 *man/posbernoulli.tb.Rd
+de03a99d1f36509f75b4a428eb36c76b *man/posbernoulli.t.Rd
+936b86f4b44e438536136d1aec313be4 *man/posbernoulli.tb.Rd
 ca1949d75cb146d17b16d46009f55b9a *man/posbinomUC.Rd
 a0ff19c3e87fa3697f2d3a48a4230473 *man/posbinomial.Rd
 dc19e3d023a2a46c670e431a2cc853e0 *man/posgeomUC.Rd
 2963a956fa63f0bd9452b10b432d4fc8 *man/posnegbinUC.Rd
-a9f3ff5f799d60588fe55623ba98a0ed *man/posnegbinomial.Rd
+d1594d0598d420affef6f14a1c263685 *man/posnegbinomial.Rd
 7176035d384054db426d3f3322429372 *man/posnormUC.Rd
 e130fade4adc7216d9d825d73cf83dd6 *man/posnormal.Rd
 137d3986fcbad41bf77c10585dace0b0 *man/pospoisUC.Rd
 02066c793ac6cc88cdcb14ceb9b67fcb *man/pospoisson.Rd
-f35d86d08cb2181e69403304101af4e7 *man/powerlink.Rd
-af139c6afa9ed0d34045609975dca53f *man/prats.Rd
-4ff051b77b97c2f0cd68f56063620bf5 *man/predictqrrvglm.Rd
-6a2efe9d46b7c686e320469698f9c1c7 *man/predictvglm.Rd
-95b3d2a018fb1afc48dba46e1170e566 *man/prentice74.Rd
+cc06ad7f82789c3703e4977cc39828ed *man/powerlink.Rd
+66bad6a1a2012e256b483e1727aca7e9 *man/prats.Rd
+ee31e58dfd33c2c3b0d51eac95b553ad *man/predictqrrvglm.Rd
+cb6a8c644c31d6ec5e8977ea7b1198df *man/predictvglm.Rd
+4b6da0d45912d1b7fbd9d833f20ec3e9 *man/prentice74.Rd
 5f4fbb060b2d8386d8d2bfde926d9d5d *man/prinia.Rd
 d1b88140c378a21755511fb4a6ae6bce *man/probit.Rd
 0718999b2644fa5d30ffcd81722350e5 *man/propodds.Rd
 241402d089ef4159f01fb4cd2c72b9a3 *man/prplot.Rd
-236c21982c4d7b440f8e5768c6806ecb *man/put.smart.Rd
-398396f20c5d46304cf9ec527505d541 *man/qrrvglm.control.Rd
+ab1399d5d5f71707fd46960dc3efad04 *man/put.smart.Rd
+8f4e6ebea74037334377e346c5b476f6 *man/qrrvglm.control.Rd
 0b4cf628cd3e15b0668ae4ddae4d3ee6 *man/qtplot.gumbel.Rd
 19419758045a8282b21c6c7a8412a725 *man/qtplot.lmscreg.Rd
 2d496ded26329ff563f7d838c1f6a2cd *man/quasibinomialff.Rd
-65cf15223e019a1e223afc9c15ca183e *man/quasipoissonff.Rd
-6691fe12d23149a7c28a75a62230b2d2 *man/qvar.Rd
-d288908bb349df629f1f057b63ec7fb3 *man/rayleigh.Rd
+1dbf7bc4c97a7aafebcd736cf1baddbb *man/quasipoissonff.Rd
+bbde69d1bad346cd4ad04763c96d6ffe *man/qvar.Rd
+2ff61f599fb26b31315233d793fdded4 *man/rayleigh.Rd
 45b293604a0e71f14b2dad2976d7b845 *man/rayleighUC.Rd
-42759865ba0f19ea889d982bc7abc121 *man/rcqo.Rd
+6c45f58f39a63abc2ce8a0923c75cecc *man/rcqo.Rd
 97b7c30ea27ac4fa16167599c35b136e *man/rdiric.Rd
-5eda556cce0510064a330d66565a00c8 *man/recexp1.Rd
+585af0deb3deb7b61388d6d4557994d8 *man/rec.exp1.Rd
+64ea5646e75515a8b40fbd136fa6065e *man/rec.normal.Rd
 49abf27f1c088a43cda71f0723cf188b *man/reciprocal.Rd
-38ff5f23f427985a349ba222b390e615 *man/recnormal.Rd
 a56ddce8598af2320fdadb94c42a9b24 *man/rhobit.Rd
-8320763391f5f25b47fe931a0cd1fa2a *man/riceUC.Rd
-47ea579d67d72e713f55cadf6d0b7fad *man/riceff.Rd
+70cd63e2118605590e782f086bf47b41 *man/riceUC.Rd
+728fcd45a64fbe92638143f6b1800038 *man/riceff.Rd
 9dd5a151bfc05adcce0ae88a02eb08a8 *man/rigff.Rd
-94d70d96afa235759b7d4a6b6775fe94 *man/rlplot.egev.Rd
+0e12c48578228c300e8c04ab3b08c04a *man/rlplot.egev.Rd
 3c6afb0af10ae003dfa8cf9caa567d9b *man/rrar.Rd
 c1638b6d6833abcd2eb5814a328a6777 *man/rrvglm-class.Rd
-b814d064706da7d367758f186a9d4bb1 *man/rrvglm.Rd
-3df0ab81d836bcbe3c7f34e7c698ae2a *man/rrvglm.control.Rd
-59d8b84425a1ce32215e150773386617 *man/rrvglm.optim.control.Rd
+b95a04698f6a2a7163a03717d72f7dc0 *man/rrvglm.Rd
+cf46faf7bd3cb7bbe65811130f78084f *man/rrvglm.control.Rd
+eb0e4a0a8b0c63cd0c17120e9ca8df53 *man/rrvglm.optim.control.Rd
 ecc44804896b8f3d4a9d469a952fe9a6 *man/ruge.Rd
 850477e7023b0617c4dd9bf177881736 *man/s.Rd
+3ebe2abf58080c4588a912c695adae77 *man/sc.studentt2.Rd
+e5c019ffe15b61578ec4c5ed894d70ea *man/sc.t2UC.Rd
 c3096134b4f765a7d1d893fb9388488b *man/seq2binomial.Rd
 9985ea15444cc317e3e8fc2aad7200da *man/setup.smart.Rd
-ad56969af369fe4120838caeb6ac60b6 *man/simplex.Rd
+451a726465c8e82555ba50a857e86ce0 *man/simplex.Rd
 f158e6c60a4e6b6e13f2a9519515a021 *man/simplexUC.Rd
-82f4e35552730791bb18e789c01ff861 *man/simulate.vlm.Rd
-158ce60e4d3abe5d370a43327e361ffd *man/sinmad.Rd
+41af17badd0ef1b17cee591a35d46a12 *man/simulate.vlm.Rd
+8b660b0d990b62c07cb5222e5966a1a9 *man/sinmad.Rd
 702d8c7998205774dde5a93d2e5a49fe *man/sinmadUC.Rd
 5327f9644795a6ed4e1909159156b656 *man/skellam.Rd
 2424940e3cff6d5a3ddd0ee99565ea39 *man/skellamUC.Rd
 b62da6a60b01916a10d691e980253bc0 *man/skewnormUC.Rd
-ba2cd271d5f1ed6eed17629588e7c4ec *man/skewnormal.Rd
+3797084c4e552d460e8b3942a661260a *man/skewnormal.Rd
 9f34bfb220e6d0400971a1efa5db28c6 *man/slash.Rd
 213b0f18e657b3c80f1af5f2bc1f4c6b *man/slashUC.Rd
-0b280d5a21d5f9f48d14caf7d366a06e *man/smart.expression.Rd
-41adcb0db6c8b560af197a65b7226477 *man/smart.mode.is.Rd
+21bada3a13aca65ba49fb28127575144 *man/smart.expression.Rd
+5726ef8bb900532df62b24bd4b7b8fe4 *man/smart.mode.is.Rd
 3d5d3a55f66ef8048b446da063e36ceb *man/smartpred.Rd
 098bc8b943b6ae2e0de9a4da57fcfd22 *man/sratio.Rd
-c8a04bcb150fa0dd37c4fc6f1e8efc1b *man/studentt.Rd
-1ba10a9db17520656ec1b0ca1c32d2b9 *man/tikuv.Rd
+0c48da9ab33eb24273c6348320a64f64 *man/studentt.Rd
+0258a94ee53da230fb2aea74fd90192a *man/tikuv.Rd
 dc0ae67e1d293040bf2d088e9bd4945b *man/tikuvUC.Rd
-821969db6bd5c548c51b3cbd82b6352a *man/tobit.Rd
+5fbf542c18e27e990c98bacedd614a39 *man/tobit.Rd
 2b4e875a4415043bf0cd019e71e955cd *man/tobitUC.Rd
 b70afa170b0cf98a6c2a9eea9dc58483 *man/toxop.Rd
 5a424c4e215899bc18b87099fcaf98e1 *man/triangle.Rd
@@ -536,24 +540,24 @@ b35739c390fd5566b8851cd070b09492 *man/triangleUC.Rd
 1d13e92969384eebec80c2b5901bc5db *man/trplot.Rd
 c786330c607d69d19e59fc3823d1e2f2 *man/trplot.qrrvglm.Rd
 d77a2419400b9ae1059949803b8a1dd2 *man/truncparetoUC.Rd
-d23efcf6ab32a3bd3e02555bc177e5c3 *man/truncweibull.Rd
+1d47c3a8f732ea01782c7e0b9929a921 *man/truncweibull.Rd
 50ada9ecd189456ce9f218d22b49089c *man/ucberk.Rd
-9987a953e8b42a47a8c3c515249dbe22 *man/undocumented-methods.Rd
-c5d717e96565afac189ec5e103fd30af *man/uninormal.Rd
+db1902b011f19b59642d53797848dcc8 *man/undocumented-methods.Rd
+2fd783dbf5c2dbcb81727fe479729163 *man/uninormal.Rd
 f787bf505e7e68f5f16a49f48abb9bcb *man/venice.Rd
 ecf0058b783f675c77a3ca1e5ab1a90a *man/vgam-class.Rd
 6db59f46bb2fbdbd6329f07498eca6d5 *man/vgam.Rd
-c059eb2c3a2c325bd3b9498abe0a5d46 *man/vgam.control.Rd
+ea3fe248b860921783367037c8302c49 *man/vgam.control.Rd
 1efef5d732a8585b81478fd03e103e5f *man/vglm-class.Rd
 cecde8d7fd2706132b92762bfed8055a *man/vglm.Rd
-84d3293dabcbc437cead24a6a39ede91 *man/vglm.control.Rd
+c21cd55efce9d242cbe555cb65aea5e3 *man/vglm.control.Rd
 8d9fa0cc290e49e459947c38c292df4c *man/vglmff-class.Rd
-4b0a3f2794103d8d6014a58041855f7f *man/vonmises.Rd
+d1e31ea42a122762891de9a8589e2a4e *man/vonmises.Rd
 7787a423c41dec21ed7c4440288ef9b7 *man/vsmooth.spline.Rd
 c498f29d7fc8156fd345b4892f02190d *man/waitakere.Rd
 9b9bdfbbf8060eb284c84e8ed9273154 *man/waldff.Rd
-c223012cb1da31f7e6cbd864de218cd2 *man/weibull.Rd
-31edfdbcd09aec27897f7a5167a57b40 *man/weightsvglm.Rd
+46cc302f6a200187ec753320ff6381a3 *man/weibullR.Rd
+e41e54f8623a002d20e55df65c5b6a87 *man/weightsvglm.Rd
 3557b17f6054a1699cb653b36f6d1a37 *man/wine.Rd
 f5a3078b689d511325cb1dc0fd4e21f3 *man/wrapup.smart.Rd
 622f0105b04159f54fcfb361972e4fb7 *man/yeo.johnson.Rd
@@ -565,7 +569,7 @@ ae671324c0f93f66adc72f053ef9ebd9 *man/zabinomUC.Rd
 7d5df5fee6f78c5cf37faaf71adbbb91 *man/zageomUC.Rd
 925e2c8e227ffb6a26192aeeb1fd4f28 *man/zageometric.Rd
 78eef8b541d039b00e9990ff758e53e9 *man/zanegbinUC.Rd
-8e35cb598399b4051aee185a72911f5c *man/zanegbinomial.Rd
+2c7cf46a95acba72a8d4315e057a4de0 *man/zanegbinomial.Rd
 b4bcb3a52a6e60efbdaa5d3cfed6fbf4 *man/zapoisUC.Rd
 9fddb7dcd81ef0e4d6777a4ae2a56bff *man/zapoisson.Rd
 41b375aed0074b0d0e87b2913685cda9 *man/zero.Rd
@@ -577,12 +581,12 @@ e0ef189ae8251b5e0d20b614c18cdd5a *man/zetaUC.Rd
 cf47526db95bc439da054ac97d2da36f *man/zigeomUC.Rd
 8de969235239ce10332c2b91304931f5 *man/zigeometric.Rd
 b4d704d064746b54f31f7d3d5c7e71c8 *man/zinegbinUC.Rd
-d720aa8eac5ca7628305e3a71585bf52 *man/zinegbinomial.Rd
+87def1c11bb8e7e5f4857a8c7eeda491 *man/zinegbinomial.Rd
 a9b1d67033daa03a9880227187041ae5 *man/zipebcom.Rd
 abfe2e5adf8a4fcd610adccf060e4f45 *man/zipf.Rd
 24ccbcefd8c1d93f609a39a1d29e4c17 *man/zipfUC.Rd
 0b8c923247c77bffa3dc24440e5d8bae *man/zipoisUC.Rd
-1ca7235ece422fbb566c94e46f0de6b2 *man/zipoisson.Rd
+93b8b3cb5ce61536968440f227416f03 *man/zipoisson.Rd
 f306f4262366ba8c13d31e6afd0e393b *src/caqo3.c
 ec1b60ab786ea922f9c9665ae352b147 *src/cqof.f
 8daac3d03d7cb7a355a4c5ba548c9793 *src/ei.f
@@ -607,3 +611,5 @@ e9187111f5c6ce1e5808bbb3dc088c17 *src/vlinpack3.f
 753359563526a9cd5ebac104dab2d754 *src/vmux.f
 9083b462bcc275ee6dda47e97f1ebf94 *src/vmux3.c
 b19585d2495c46800b0c95f347fe89f9 *src/zeta3.c
+bfa11dbdbff271fb20342560f2bacd53 *vignettes/categoricalVGAM.Rnw
+d7beca978b587625654f981f7dc433d0 *vignettes/categoricalVGAMbib.bib
diff --git a/NAMESPACE b/NAMESPACE
index 54ed662..802cfcd 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -7,6 +7,10 @@
 useDynLib(VGAM)
 
 
+importMethodsFrom("splines")
+importFrom("splines", splineDesign, bs, ns)
+
+
 export(sm.bs, sm.ns, sm.scale.default, sm.poly, sm.scale)
 exportMethods(coefficients, coef)
 importFrom("stats", coefficients, coef)
@@ -22,8 +26,11 @@ export(case.names,
        weights)
 
 
+export(expected.betabin.ab, grid.search)
 
-export(Select, subsetc)
+exportMethods(QR.Q, QR.R)
+export(QR.Q, QR.R)
+export(Select, subsetcol)
 
 
 export(simulate.vlm)
@@ -70,7 +77,7 @@ export(arwz2wz)
 
 
 export(link2list)
-export(mlogit)
+export(multilogit)
 
 
 export(perks, dperks, pperks, qperks, rperks)
@@ -124,7 +131,7 @@ export(lrtest, lrtest_vglm)
 export(update_default, update_formula)
 
 
-export(nvar, nvar.vlm, nvar.vgam, nvar.rrvglm, nvar.qrrvglm, nvar.cao, nvar.rcim)
+export(nvar, nvar.vlm, nvar.vgam, nvar.rrvglm, nvar.qrrvglm, nvar.rrvgam, nvar.rcim)
 export(      nobs.vlm)
 
 
@@ -152,8 +159,10 @@ export(is.empty.list)
 
 export(
 Build.terms.vlm,
+interleave.VGAM,
 procVec,
 ResSS.vgam,
+valt.control,
 vcontrol.expression, 
 vplot, vplot.default, vplot.factor, vplot.list,
 vplot.matrix, vplot.numeric, vvplot.factor)
@@ -176,7 +185,7 @@ explogff, dexplog, pexplog, qexplog, rexplog)
 export(Rcim, plotrcim0,
 rcim, summaryrcim)
 export(moffset)
-export(plotqvar, Qvar, qvar)
+export(plotqvar, qvplot, Qvar, qvar)
 export(depvar, depvar.vlm)
 
 
@@ -190,14 +199,15 @@ export(put.caption)
 
 
 export(
-d2theta.deta2, Deviance.categorical.data.vgam, 
+cm.VGAM, cm.nointercept.VGAM, cm.zero.VGAM,
+Deviance.categorical.data.vgam, 
 lm2qrrvlm.model.matrix,
-m2avglm, 
 dimm)
 
 
 
-export(is.smart, smart.mode.is, wrapup.smart, setup.smart, my1, my2)
+export(is.smart, smart.mode.is, wrapup.smart, setup.smart,
+sm.min1, sm.min2)
 export(
 smart.expression,
 get.smart, get.smart.prediction,
@@ -219,17 +229,17 @@ export(pnorm2, dnorm2)
 export(iam,
 fill, fill1, fill2, fill3,
 abbott,
-amh, damh, pamh, ramh, 
+biamhcop, dbiamhcop, pbiamhcop, rbiamhcop, 
 bigamma.mckay,
 freund61,
-frechet2, dfrechet, pfrechet, qfrechet, rfrechet,
+frechet, dfrechet, pfrechet, qfrechet, rfrechet,
 bifrankcop, dbifrankcop, pbifrankcop, rbifrankcop, 
-plackett, dplack, pplack, rplack, 
-benini, dbenini, pbenini, qbenini, rbenini, 
+biplackettcop, dbiplackcop, pbiplackcop, rbiplackcop,
+benini1, dbenini, pbenini, qbenini, rbenini, 
 maxwell, dmaxwell, pmaxwell, qmaxwell, rmaxwell,
-morgenstern,
-fgm, dfgm, pfgm, rfgm,
-bigumbelI,
+bifgmexp,
+bifgmcop, dbifgmcop, pbifgmcop, rbifgmcop,
+bigumbelIexp,
 erf, erfc, lerch, lambertW, log1pexp,
 truncpareto, dtruncpareto, qtruncpareto, rtruncpareto, ptruncpareto,
 paretoff, dpareto, qpareto, rpareto, ppareto,
@@ -237,27 +247,28 @@ paretoIV, dparetoIV, qparetoIV, rparetoIV, pparetoIV,
 paretoIII, dparetoIII, qparetoIII, rparetoIII, pparetoIII,
 paretoII, dparetoII, qparetoII, rparetoII, pparetoII,
 dparetoI, qparetoI, rparetoI, pparetoI,
-cgumbel, egumbel, gumbel,
+cens.gumbel, egumbel, gumbel,
 dgumbel, pgumbel, qgumbel, rgumbel, 
 foldnormal, dfoldnorm, pfoldnorm, qfoldnorm, rfoldnorm,
-cennormal1,
-cennormal, double.cennormal,
-recnormal, recnormal.control, recexp1,
-cenrayleigh, rayleigh, drayleigh, prayleigh, qrayleigh, rrayleigh, 
-drice, rrice, riceff,
+cennormal,
+cens.normal, double.cens.normal,
+rec.normal, rec.normal.control,
+rec.exp1,   rec.exp1.control,
+cens.rayleigh, rayleigh, drayleigh, prayleigh, qrayleigh, rrayleigh, 
+drice, price, qrice, rrice, riceff, marcumQ,
 dskellam, rskellam, skellam,
 inv.gaussianff, dinv.gaussian, pinv.gaussian, rinv.gaussian, waldff,
-expexp1, expexp)
+expexpff1, expexpff)
 
 
 
 
-export(A1A2A3, a2m, AAaa.nohw,
+export(a2m,
 AICvlm, AICvgam, AICrrvglm, AICqrrvglm, # AICvglm, 
 anova.vgam,
 anova.vglm, 
 bisa, dbisa, pbisa, qbisa, rbisa,
-betabinomial.ab, betabinomial,
+betabinomialff, betabinomial,
 double.expbinomial,
 dbetabinom,    pbetabinom,    rbetabinom,
 dbetabinom.ab, pbetabinom.ab, rbetabinom.ab,
@@ -265,20 +276,20 @@ biplot.qrrvglm,
 dbort, rbort, borel.tanner,
 care.exp,
 cauchy, cauchy1,
-concoef.cao, concoef.Coef.cao, concoef.Coef.qrrvglm, concoef.qrrvglm,
+concoef.rrvgam, concoef.Coef.rrvgam, concoef.Coef.qrrvglm, concoef.qrrvglm,
 cdf, cdf.lms.bcg, cdf.lms.bcn,
 cdf.lms.yjn, cdf.vglm, 
-Coef.cao, Coefficients,
+Coef.rrvgam, Coefficients,
 coefqrrvglm, 
 coefvlm,
 coefvsmooth.spline, coefvsmooth.spline.fit,
 constraints, constraints.vlm, 
 deplot, deplot.default, deplot.lms.bcg, deplot.lms.bcn,
 deplot.lms.yjn, deplot.lms.yjn2, deplot.vglm, 
-deviance.vlm,
+deviance.vlm, deviance.qrrvglm,
 df.residual_vlm,
 dirmultinomial, dirmul.old,
-dtheta.deta)
+dtheta.deta, d2theta.deta2)
 
 
 export(cloglog,cauchit,elogit,explink,fisherz,logc,loge,logneg,logit,
@@ -291,54 +302,55 @@ export(poisson.points, dpois.points)
 
 
 
-export(m2adefault, 
+export(m2a, 
 erlang,
 dfelix, felix,
 fittedvlm, fittedvsmooth.spline, fsqrt,
 formulavlm, formulaNA.VGAM,
 garma, gaussianff,
-hypersecant, hypersecant.1, 
+hypersecant, hypersecant01, 
 hyperg,
-invbinomial, InverseBrat, inverse.gaussianff,
+inv.binomial, InverseBrat, inverse.gaussianff,
 is.Numeric,
-mccullagh89, leipnik, levy,
+mccullagh89, leipnik,
+dlevy, plevy, qlevy, rlevy, levy,
 lms.bcg.control, lms.bcn.control, lmscreg.control,
 lms.yjn.control, 
 lms.bcg, lms.bcn, lms.yjn, lms.yjn2,
 lqnorm,
-dbilogis4, pbilogis4, rbilogis4, bilogistic4,
-logistic1, logistic2,
+dbilogis, pbilogis, rbilogis, bilogistic,
+logistic1, logistic,
 logLik.vlm,
-latvar.cao, latvar.Coef.qrrvglm, latvar.rrvglm, latvar.qrrvglm,
-lvplot.cao,
-Rank, Rank.rrvglm, Rank.qrrvglm, Rank.cao,
+latvar.rrvgam, latvar.Coef.qrrvglm, latvar.rrvglm, latvar.qrrvglm,
+lvplot.rrvgam,
+Rank, Rank.rrvglm, Rank.qrrvglm, Rank.rrvgam,
 Max.Coef.qrrvglm, Max.qrrvglm,
-is.bell.vlm, is.bell.rrvglm, is.bell.qrrvglm, is.bell.cao, is.bell,
+is.bell.vlm, is.bell.rrvglm, is.bell.qrrvglm, is.bell.rrvgam, is.bell,
 model.matrix.qrrvglm,
 model.matrixvlm,
 model.framevlm,
 nakagami, dnaka, pnaka, qnaka, rnaka,
 namesof,
 nlminbcontrol, negloge,
-Opt.Coef.qrrvglm, Opt.qrrvglm, persp.cao)
+Opt.Coef.qrrvglm, Opt.qrrvglm, persp.rrvgam)
 
 
 export( micmen )
 
 
-export( plot.cao,
+export( plot.rrvgam,
 plotpreplotvgam,
 plotvglm, plotvlm,
 plotvsmooth.spline, powerlink,
-predict.cao, predictcao,
+predict.rrvgam, predictrrvgam,
 predictors, predictors.vglm,
 predictqrrvglm, predict.rrvglm,
 predict.vgam,
 predictvglm, predict.vlm, predictvsmooth.spline,
 predictvsmooth.spline.fit,
-  show.Coef.cao,
+  show.Coef.rrvgam,
   show.Coef.qrrvglm, show.Coef.rrvglm, show.rrvglm,
-  show.summary.cao, show.summary.qrrvglm,
+  show.summary.rrvgam, show.summary.qrrvglm,
   show.summary.rrvglm,
   show.summary.vgam,
   show.summary.vglm,
@@ -347,8 +359,8 @@ predictvsmooth.spline.fit,
 show.vgam, show.vglm, show.vlm,
  show.vglmff,
 show.vsmooth.spline,
-process.binomial2.data.vgam, process.categorical.data.vgam,
-negzero.expression,
+process.binomial2.data.VGAM, process.categorical.data.VGAM,
+negzero.expression.VGAM,
 qtplot,
 qtplot.default, qtplot.gumbel, qtplot.lms.bcg,
 qtplot.lms.bcn, qtplot.lms.yjn, qtplot.lms.yjn2, qtplot.vextremes, qtplot.vglm,
@@ -362,16 +374,16 @@ rlplot, rlplot.vglm, rrar.control)
 export(
 SurvS4, is.SurvS4, as.character.SurvS4,
 show.SurvS4,
-simple.exponential, simple.poisson,
-matched.binomial,
+simple.exponential, better.exponential,
+simple.poisson,
 seq2binomial, size.binomial,
-stdze1, stdze2,
-summary.cao, summary.grc,
+sm.scale1, sm.scale2,
+summary.rrvgam, summary.grc,
   summary.qrrvglm,
 summary.rrvglm,
 summaryvgam, summaryvglm, summaryvlm,
 s.vam, terms.vlm, 
-theta2eta, Tol.Coef.qrrvglm, Tol.qrrvglm,
+Tol.Coef.qrrvglm, Tol.qrrvglm,
 triangle, dtriangle, ptriangle, qtriangle, rtriangle, 
   vcovvlm,
 vglm.fit, vgam.fit,
@@ -386,7 +398,6 @@ dzeta)
 
 
 
-
 export(lm2vlm.model.matrix) 
 export(vlm2lm.model.matrix) 
 
@@ -445,11 +456,11 @@ export(vlm2lm.model.matrix)
 export(ddagum, rdagum, qdagum, pdagum, dagum)
 export(dfisk, pfisk, qfisk, rfisk, fisk)
 export(dlomax, plomax, qlomax, rlomax, lomax)
-export(dinvlomax, pinvlomax, qinvlomax, rinvlomax, invlomax)
+export(dinv.lomax, pinv.lomax, qinv.lomax, rinv.lomax, inv.lomax)
 export(dparalogistic, pparalogistic, qparalogistic, rparalogistic,
        paralogistic)
-export(dinvparalogistic, pinvparalogistic, qinvparalogistic, rinvparalogistic,
-       invparalogistic)
+export(dinv.paralogistic, pinv.paralogistic, qinv.paralogistic, rinv.paralogistic,
+       inv.paralogistic)
 export(dsinmad, psinmad, qsinmad, rsinmad, sinmad)
 export(lognormal)
 export(dpolono, ppolono, rpolono)
@@ -474,8 +485,8 @@ export(fff, fff.control,
 
 
 export(
-AA.Aa.aa, AB.Ab.aB.ab2, AB.Ab.aB.ab, ABO, acat,
-beta.ab, betaff,
+AA.Aa.aa, AB.Ab.aB.ab, ABO, acat,
+betaR, betaff,
 dbetageom, pbetageom, rbetageom, betageometric, 
 dbetanorm, pbetanorm, qbetanorm, rbetanorm, # betanorm,
 betaprime,
@@ -488,22 +499,21 @@ bratt, Brat, calibrate.qrrvglm.control, calibrate.qrrvglm,
 calibrate, cao.control,
 cao,
 cdf.lmscreg, cgo, chisq, clo, 
-ccoef, concoef,
+concoef,
 Coef, Coef.qrrvglm, Coef.rrvglm, Coef.vlm,
 predictqrrvglm,
 cratio, cumulative, propodds, prplot, prplot.control)
 export(
 deplot.lmscreg, dirichlet,
-exponential, G1G2G3)
-
+exponential, A1A2A3)
 
 export(
-lgammaff, lgamma3ff)
+lgamma1, lgamma3)
 export(
-gammahyp,
-gengamma, gamma1, gamma2, gamma2.ab, gammaff)
+gammahyperbola,
+gengamma.stacy, gamma1, gamma2, gammaR, gammaff)
 export(dlgamma, plgamma, qlgamma, rlgamma)
-export(dgengamma, pgengamma, qgengamma, rgengamma)
+export(dgengamma.stacy, pgengamma.stacy, qgengamma.stacy, rgengamma.stacy)
 
 
 export(
@@ -521,7 +531,7 @@ dyules, pyules, ryules, yulesimon,
 logff, dlog, plog, rlog,
 logF, dlogF,
 loglinb2, loglinb3,
-loglog, lognormal3,
+loglog,
 lvplot.qrrvglm, lvplot.rrvglm,
 Max, MNSs,
 dmultinomial, multinomial, margeff)
@@ -538,21 +548,22 @@ slash, dslash, pslash, rslash)
 export(
 deunif, peunif, qeunif, reunif,
 denorm, penorm, qenorm, renorm,
-koenker, dkoenker, pkoenker, qkoenker, rkoenker,
+sc.studentt2, dsc.t2, psc.t2, qsc.t2, rsc.t2,
 deexp, peexp, qeexp, reexp)
 
 
 export(
 meplot, meplot.default, meplot.vlm,
 guplot, guplot.default, guplot.vlm,
-negbinomial, negbinomial.size, polya,
-uninormal, SUR, normal.vcm,
+negbinomial, negbinomial.size, polya, polyaR,
+uninormal, SURff, normal.vcm,
 nbcanlink,
 tobit, dtobit, ptobit, qtobit, rtobit,
 Opt, 
 perspqrrvglm, plotdeplot.lmscreg, plotqrrvglm, plotqtplot.lmscreg,
 plotvgam.control, plotvgam, 
-cenpoisson,
+plot.vgam, 
+cens.poisson,
 poissonff,
 dposbinom, pposbinom, qposbinom, rposbinom, posbinomial,
 dposgeom, pposgeom, qposgeom, rposgeom, # posgeometric,
@@ -564,7 +575,7 @@ rrar, rrvglm.control,
 rrvglm.optim.control)
 
 
-export(eta2theta, 
+export(eta2theta, theta2eta,
 rrvglm,
 simplex, dsimplex, rsimplex, 
 sratio, s,
@@ -576,7 +587,7 @@ cqo,
 qrrvglm.control,
 vgam.control, vgam, vglm.control, vglm,
 vsmooth.spline,
-weibull, yip88,
+weibullR, yip88,
 dzabinom, pzabinom, qzabinom, rzabinom, zabinomial, zabinomialff,
 dzageom, pzageom, qzageom, rzageom, zageometric, zageometricff,
 dzanegbin, pzanegbin, qzanegbin, rzanegbin, zanegbinomial, zanegbinomialff,
@@ -601,11 +612,11 @@ tikuv, dtikuv, ptikuv, qtikuv, rtikuv)
 
 exportClasses(vglmff, vlm, vglm, vgam,
 rrvglm, qrrvglm, grc,  rcim, 
-vlmsmall, cao,
+vlmsmall, rrvgam,
 summary.vgam, summary.vglm, summary.vlm,
 summary.qrrvglm,
-summary.cao, summary.rrvglm, 
-Coef.rrvglm, Coef.qrrvglm, Coef.cao,
+summary.rrvgam, summary.rrvglm, 
+Coef.rrvglm, Coef.qrrvglm, Coef.rrvgam,
 vcov.qrrvglm,
 vsmooth.spline.fit, vsmooth.spline)
 
diff --git a/NEWS b/NEWS
index effee19..e6d6516 100755
--- a/NEWS
+++ b/NEWS
@@ -6,6 +6,165 @@
 
 
 
+                CHANGES IN VGAM VERSION 0.9-5
+
+NEW FEATURES
+
+    o   Tested okay on R 3.1.2.
+    o   New argument 'lss' appears on some family functions.
+        This is important because it changes the order of the
+        parameters.
+    o   New functions: QR.Q(), QR.R(), [pq]rice() (thanks
+        to Benjamin Hall for pointing that these are based on the
+        Marcum-Q function).
+    o   exponential() has a new loglikelihood slot. Thanks to
+        Neyko Neykov for picking this up this omission.
+    o   Constraint matrices in process.constraints() are checked
+        that they are of full column-rank.
+    o   New family functions: better.exponential(), polyaR().
+    o   New functions: qvplot() is preferred over plotqvar(),
+        [dpqr]levy().
+    o   summary() applied to a "vglm" object now prints out
+        the table of estimates, SEs, test statistics
+        and p-values very similarly to glm() objects. In
+        particular, two-tailed p-values in the 4th column
+        are new; these correspond to the z ratio based on
+        a normal reference distribution.
+    o   gev(), egev(), and gpd() have a 'type.fitted' argument, which
+        should be set to "mean" if the mean is desired as
+        the fitted values.
+        gpd() has a stop() if the data is negative.
+    o   AA.Aa.aa() and A1A2A3() have a 'inbreeding = TRUE' argument.
+        If 'inbreeding = TRUE' then an extra parameter is estimated.
+        If 'inbreeding = FALSE' then the inbreeding coefficient is 0
+        by definition, and not estimated.
+        G1G2G3() is now renamed to A1A2A3().
+    o   Decommissioned VGAM family functions:
+        AAaa.nohw(), matched.binomial().
+    o   deviance() applied to a "qrrvglm" or "rrvgam" object
+        now has a 'history' argument.
+    o   binomialff(mv = TRUE) is no longer restricted to responses
+        having 0 and 1 values.
+    o   New data sets: flourbeetle.
+    o   The 'constraints' argument accepts a list with functions
+        as components, that compute the constraint matrices.
+
+
+BUG FIXES and CHANGES
+
+    o   Renamed the order of arguments and linear predictors (now,
+        'location'-type precedes 'scale'-type, and 'scale'-type precedes
+        'shape'-type parameters):
+          benini1(dpqr)
+          bisa(dpqr)
+          gumbelII(dpqr)
+          makeham(dpqr)
+          nakagami(dpqr)
+          perks(dpqr)
+          riceff(dpqr)
+          genrayleigh(dpqr)
+          expexpff1(), expexpff()
+          exppoisson(dpqr)
+          gammaR()
+    o   Renamed parameter names: poissonff() has "lambda", not "mu",
+        binomialff() has "prob", not "mu".
+    o   Renamed functions:
+        plot.vgam() plots "vgam" objects, not plotvgam().
+        Use plot(as(vglmObject, "vgam")) to plot vglm() objects as
+        if they were vgam() objects.
+        plot.vgam(): the user has total control over 'xlim' and 'ylim'
+        if specified.
+    o   Renamed functions: cm.zero.vgam() has become cm.zero.VGAM(),
+        cm.nointercept.vgam() has become cm.nointercept.VGAM(),
+        cm.vgam() has become cm.VGAM(),
+        process.categorical.data.vgam to process.categorical.data.VGAM,
+        process.binomial2.data.vgam to process.binomial2.data.VGAM.
+    o   Link loge() returns "loge" as its tag, not "log" anymore.
+    o   Class "cao" changed to "rrvgam".
+    o   dbilogis4() was faulty.
+    o   Renamed arguments: 'location' is now 'scale' in [dpqr]pareto(),
+        and paretoff().
+    o   gev() and egev() handle working weights better when sigma is close
+    o   gev(zero = 3) has changed to gev(zero = 2:3), by default, and
+        egev(zero = 3) has changed to egev(zero = 2:3), by default.
+        That is, only the location parameter is now modelled as
+        functions of covariates, by default;
+        the scale and shape parameters are intercept-only.
+    o   bigamma.mckay(zero = 1) has changed to bigamma.mckay(zero = 2:3),
+        by default.
+    o   rlplot() works for gev() model fits now.
+    o   Renamed functions: subsetc() has become subsetcol(),
+        my1 has become sc.min1(),
+        my2 has become sc.min2(),
+        stdze1() has become sc.scale1(),
+        stdze2() has become sc.scale2(),
+        mlogit() has become multilogit().
+    o   Decommissioned VGAM family functions:
+        AB.Ab.aB.ab2()
+    o   Renamed VGAM family functions:
+        OLD NAME:            NEW NAME:
+        amh()                biamhcop()
+        bigumbelI()          bigumbelIexp()
+        fgm()                bifgmcop()
+        gammahyp()           gammahyperbola()
+        morgenstern()        bifgmexp()
+        plackett()           biplackettcop()
+        benini()             benini1()
+        cgumbel()            cens.gumbel()
+        cenpoisson()         cens.poisson()
+        cennormal()          cens.normal()
+        double.cennormal()   double.cens.normal()
+        recnormal()          rec.normal()
+        recexp1()            rec.exp1()
+        invbinomial()        inv.binomial.exp1()
+        invlomax()           inv.lomax.exp1()
+        invparalogistic()    inv.paralogistic.exp1()
+        koenker()            sc.studentt2()
+        frechet2()           frechet()
+        hypersecant.1()      hypersecant01()
+        gengamma()           gengamma.stacy()
+        beta.ab()            betaR()
+        betabinom.ab()       betabinomialR()
+        gamma2.ab()          gammaR() [see note about reordered arguments]
+        logistic2()          logistic()
+        lgammaff()           lgamma1()
+        lgamma3ff()          lgamma3()
+        SUR()                SURff()
+        expexp()             expexpff()
+        expexp1()            expexpff1()
+        weibull()            weibullR(lss = FALSE). Also 'zero' has changed.
+    o   Functionality has changed:
+        weibull()            weibullR(lss = FALSE). Also 'zero' has changed.
+    o   Data sets renamed:
+        mmt renamed to melbmaxtemp.
+    o   lms.bcn(): changes in the arguments.
+    o   [log]alaplace[123](): changes in the arguments, e.g.,
+        'parallelLocation' changed to 'parallel.locat'.
+    o   Argument 'reference' has been changed to 'refResponse' for
+        CQO objects.
+    o   Argument 'shrinkage.init' has been changed to 'ishrinkage'.
+    o   Argument 'matrix.arg = TRUE' has been changed to
+        'drop = FALSE' in fittedvlm().
+    o   Bug in dbort(). Thanks to Benjamin Kjellson for picking this up.
+    o   vglm.control()$save.weight changed to vglm.control()$save.weights.
+        vgam.control()$save.weight changed to vgam.control()$save.weights.
+    o   "ccoef" has been replaced by "concoef".
+    o   Some documentation regarding qvar(se = TRUE) was wrong.
+    o   Argument "alpha" in several bivariate distributions have
+        been replaced by "apar", for association parameter.
+    o   Arguments "optima" replaced by "optimums",
+        "maxima" replaced by "maximums",
+        "logmaxima" replaced by "log.maximums".
+    o   Function getMaxMin() renamed to grid.search().
+    o   lognormal3() withdrawn.
+    o   dfbeta() returns the difference between the coeffs.
+    o   negbinomial(deviance = TRUE) works when fitting the NB-2,
+        provided criterion = "coef" or half.step = FALSE.
+    o   Argument "a" replaced by "rate" in maxwell(dpqr).
+    o   Arguments "x1" and "x2" replaced by "q1" and "q2" in pbinorm().
+
+
+
                 CHANGES IN VGAM VERSION 0.9-4
 
 NEW FEATURES
@@ -633,9 +792,9 @@ NEW FEATURES
     o    vchol() takes drastic action to avoid infinite looping: it sets
         the working weights to be proportional to the order-M diagonal
         matrix.
-    o    lognormal() and lognormal3() now have zero = 2 as the default
+    o   lognormal() and lognormal3() now have zero = 2 as the default
         (was zero = NULL).
-    o    Some variable names within grc() changed, e.g., Row. and not Row.
+    o   Some variable names within grc() changed, e.g., Row. and not Row.
     o   The smartpred package within VGAM has updated poly(), ns(),
         bs() and scale() for R version 2.12.0 (2010-10-15).
         Calls to smartpredenv are now VGAM:::smartpredenv.
diff --git a/R/aamethods.q b/R/aamethods.q
index b055aed..09134d5 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -392,7 +392,7 @@ setClass("uqo", representation(
     contains = "vlmsmall")
 
 
-setClass(Class = "cao",
+setClass(Class = "rrvgam",
          contains = "vgam")
 
 
@@ -402,12 +402,16 @@ setGeneric("lvplot", function(object, ...) standardGeneric("lvplot"),
 
 
 
+ if (FALSE) {
  if (!isGeneric("ccoef"))
     setGeneric("ccoef", function(object, ...) {
     .Deprecated("concoef")
 
     standardGeneric("ccoef")
     })
+}
+
+
 
  if (!isGeneric("concoef"))
     setGeneric("concoef", function(object, ...) {
@@ -499,6 +503,45 @@ if (!isGeneric("summary"))
 
 
 
+if (!isGeneric("QR.R"))
+  setGeneric("QR.R", function(object, ...)
+             standardGeneric("QR.R"),
+             package = "VGAM")
+
+
+setMethod("QR.R", "vglm",
+          function(object, ...) {
+  if (length(object at R)) object at R else {
+    warning("empty 'R' slot on object. Returning a NULL")
+    NULL
+  }
+})
+
+
+
+if (!isGeneric("QR.Q"))
+  setGeneric("QR.Q", function(object, ...)
+             standardGeneric("QR.Q"),
+             package = "VGAM")
+
+
+setMethod("QR.Q", "vglm",
+          function(object, ...) {
+  qr.list <- object at qr
+  if (length(qr.list)) {
+    class(qr.list) <- "qr"
+    qr.Q(qr.list)
+  } else {
+    warning("empty 'qr' slot on object. Returning a NULL")
+    NULL
+  }
+})
+
+
+
+
+
+
 
 
 
diff --git a/R/bAIC.q b/R/bAIC.q
index fc4cdd2..26d5294 100644
--- a/R/bAIC.q
+++ b/R/bAIC.q
@@ -176,7 +176,7 @@ AICqrrvglm <- function(object, ...,
 
 
  
- AICcao    <- function(object, ...,
+ AICrrvgam <- function(object, ...,
                        k = 2) {
 
 
@@ -251,9 +251,9 @@ setMethod("AIC", "qrrvglm",
           AICqrrvglm(object, ..., k = k))
 
 
-setMethod("AIC", "cao",
+setMethod("AIC", "rrvgam",
           function(object, ..., k = 2)
-            AICcao(object, ..., k = k))
+            AICrrvgam(object, ..., k = k))
 
 
 
@@ -321,9 +321,9 @@ setMethod("BIC", "qrrvglm",
           AICqrrvglm(object, ..., k = k))
 
 
-setMethod("BIC", "cao",
+setMethod("BIC", "rrvgam",
           function(object, ..., k = log(nobs(object)))
-            AICcao(object, ..., k = k))
+            AICrrvgam(object, ..., k = k))
 
 
 
diff --git a/R/calibrate.q b/R/calibrate.q
index 5e23488..c5e43d0 100644
--- a/R/calibrate.q
+++ b/R/calibrate.q
@@ -75,7 +75,7 @@ calibrate.qrrvglm <-
 
   if (!Quadratic && type == "vcov")
     stop("cannot have 'type=\"vcov\"' when object is ",
-         "a \"cao\" object")
+         "a \"rrvgam\" object")
 
   if (is.vector(newdata))
     newdata <- rbind(newdata)
@@ -143,7 +143,7 @@ calibrate.qrrvglm <-
               everything = FALSE,
               mu.function = slot(object at family, "linkinv")) else
         optim(par = initial.vals[ii, ],
-              fn = .my.calib.objfunction.cao,
+              fn = .my.calib.objfunction.rrvgam,
               method = optim.control$Method.optim,  # "BFGS" or "CG" or...
               control = c(fnscale = ifelse(minimize.obfunct, 1, -1),
                           use.optim.control),
@@ -217,7 +217,7 @@ calibrate.qrrvglm <-
                     misc.list = object at misc,
                     everything = TRUE,
                     mu.function = slot(object at family, "linkinv")) else
-            .my.calib.objfunction.cao(BestOFpar[i1, ],
+            .my.calib.objfunction.rrvgam(BestOFpar[i1, ],
                     y = newdata[i1, ],
                     extra = object at extra,
                     objfun = obfunct,
@@ -229,7 +229,7 @@ calibrate.qrrvglm <-
       muValues <- rbind(muValues, matrix(ans$mu, nrow = 1))
       etaValues <- rbind(etaValues, matrix(ans$eta, nrow = 1))
       if (Quadratic)
-        vcValues[,,i1] <- ans$vcmat  # Can be NULL for "cao" objects
+        vcValues[,,i1] <- ans$vcmat  # Can be NULL for "rrvgam" objects
     }
     if (type == "response") {
        dimnames(muValues) <- dimnames(newdata)
@@ -313,7 +313,7 @@ calibrate.qrrvglm <-
 
  
 
-.my.calib.objfunction.cao <-
+.my.calib.objfunction.rrvgam <-
   function(bnu, y, extra = NULL,
            objfun, object, Coefs,
            misc.list,
@@ -323,7 +323,7 @@ calibrate.qrrvglm <-
     NOS <- Coefs at NOS 
     eta <- matrix(as.numeric(NA), 1, NOS)
     for (jlocal in 1:NOS) {
-      eta[1, jlocal] <- predictcao(object, grid = bnu, sppno = jlocal,
+      eta[1, jlocal] <- predictrrvgam(object, grid = bnu, sppno = jlocal,
                                    Rank = Rank, deriv = 0)$yvals
     }
     mu <- rbind(mu.function(eta, extra))  # Make sure it has one row 
diff --git a/R/cao.R b/R/cao.R
index 12a6bde..b54cfde 100644
--- a/R/cao.R
+++ b/R/cao.R
@@ -117,7 +117,7 @@ cao  <- function(formula,
   }
 
   answer <-
-  new("cao",
+  new("rrvgam",
     "assign"       = attr(x, "assign"),
     "Bspline"      = fit$Bspline,
     "call"         = ocall,
diff --git a/R/cao.fit.q b/R/cao.fit.q
index c08f857..1955557 100644
--- a/R/cao.fit.q
+++ b/R/cao.fit.q
@@ -95,7 +95,7 @@ cao.fit <-
 
 
   special.matrix <- matrix(-34956.125, M, M)  # An unlikely used matrix
-  just.testing <- cm.vgam(special.matrix, x, rrcontrol$noRRR, constraints)
+  just.testing <- cm.VGAM(special.matrix, x, rrcontrol$noRRR, constraints)
   findex <- trivial.constraints(just.testing, special.matrix)
   tc1 <- trivial.constraints(constraints)
 
@@ -433,6 +433,9 @@ cao.control <- function(Rank = 1,
     warning("'df1.nl' values between 0 and 0.05 converted to 0.05")
     df1.nl[df1.nl < 0.05] <- 0.05
   }
+  if (any(df1.nl > 3.5)) {
+    warning("'df1.nl' values > 3.5 are excessive")
+  }
   if (!is.Numeric(df2.nl) || any(df2.nl < 0))
     stop("Bad input for argument 'df2.nl'")
   if (any(df2.nl >= 0 & df2.nl < 0.05)) {
@@ -1074,7 +1077,7 @@ warning("20100405; this is new:")
 
 
 
-setClass(Class = "Coef.cao", representation(
+setClass(Class = "Coef.rrvgam", representation(
       "Bspline"      = "list",
       "C"            = "matrix",
       "Constrained"  = "logical",
@@ -1099,7 +1102,7 @@ setClass(Class = "Coef.cao", representation(
 
 
 
-Coef.cao <- function(object,
+Coef.rrvgam <- function(object,
     epsOptimum = 0.00001,  # Determines how accurately Optimum is estimated
     gridlen = 40,      # Number of points on the grid (one level at a time)
     maxgriditer = 10,  # Maximum number of iters allowed for grid search
@@ -1164,10 +1167,11 @@ Coef.cao <- function(object,
 
     which.species <- 1:NOS  # Do it for all species
     if (Rank == 1) {
-      gridd <- cbind(seq(extents[1,1], extents[2,1], len = gridlen))
+      gridd <- cbind(seq(extents[1, 1], extents[2, 1], len = gridlen))
     } else {
-      gridd <- expand.grid(seq(extents[1,1], extents[2,1], len = gridlen[1]),
-                           seq(extents[1,2], extents[2,2], len = gridlen[2]))
+      gridd <-
+        expand.grid(seq(extents[1, 1], extents[2, 1], len = gridlen[1]),
+                    seq(extents[1, 2], extents[2, 2], len = gridlen[2]))
       eta2matrix <- matrix(0, NOS, 1)
     }
     gridd.orig <- gridd
@@ -1188,7 +1192,7 @@ Coef.cao <- function(object,
              ((griditer <= maxgriditer) &&
              ((gridres1 > epsOptimum) ||
               (gridres2 > epsOptimum)))) {
-        temp <- predictcao(object, grid = gridd, sppno = thisSpecies,
+        temp <- predictrrvgam(object, grid = gridd, sppno = thisSpecies,
                            Rank = Rank, deriv = 0, MSratio = MSratio)
         yvals <- temp$yvals  # gridlen-vector
         xvals <- temp$xvals  # gridlen x Rank; gridd
@@ -1211,26 +1215,26 @@ Coef.cao <- function(object,
           break
         }
         if (index == 1 || index == nnn) {
-          maximum[sppno] <- optimum[1,sppno] <- NA
+          maximum[sppno] <- optimum[1, sppno] <- NA
           gridres1 <- epsOptimum + 1  # equivalent to a break
           break  # just in case
         } else {
-          maximum[sppno] <- yvals[index]  # on the eta scale
-          optimum[1,sppno] <- xvals[index,1]
-          gridd[,1] <- seq(
-                  max(extents[1, 1], optimum[1,sppno] - gridres1),
-                  min(extents[2, 1], optimum[1,sppno] + gridres1),
+          maximum[sppno] <- yvals[index]  # On the eta scale
+          optimum[1, sppno] <- xvals[index, 1]
+          gridd[, 1] <- seq(
+                  max(extents[1, 1], optimum[1, sppno] - gridres1),
+                  min(extents[2, 1], optimum[1, sppno] + gridres1),
                   len = gridlen)
           gridres1 <- gridd[2, 1] - gridd[1, 1]
           griditer <- griditer + 1
         }
-      } # of while 
+      }  # of while 
 
       if (Rank == 2) {
         myfun <- function(x, object, sppno, Rank = 1,
                           deriv = 0, MSratio = 1) {
           x <- matrix(x, 1, length(x))
-          temp <- predictcao(object, grid = x, sppno = sppno,
+          temp <- predictrrvgam(object, grid = x, sppno = sppno,
                              Rank = Rank, deriv = deriv, MSratio = MSratio)
           temp$yval
         }
@@ -1254,7 +1258,7 @@ Coef.cao <- function(object,
     maximum <- c(maximum)  # Convert from matrix to vector 
     names(maximum) <- ynames
 
-    ans <- new(Class = "Coef.cao",
+    ans <- new(Class = "Coef.rrvgam",
                Bspline = object at Bspline,
                Constrained = ConstrainedO,
                df1.nl = object at extra$df1.nl,
@@ -1308,7 +1312,7 @@ Coef.cao <- function(object,
 
 
 
-show.Coef.cao <- function(object,
+show.Coef.rrvgam <- function(object,
                           digits = max(2, options()$digits-2), ...) {
   Rank <- object at Rank
   NOS <- object at NOS
@@ -1327,7 +1331,7 @@ show.Coef.cao <- function(object,
     cat("\nC matrix (constrained/canonical coefficients)\n")
     print(object at C, digits = digits, ...)
   }
-  cat("\nOptima and maxima\n")
+  cat("\nOptimums and maximums\n")
   print(cbind(Optimum = optmat,
               Maximum), digits = max(1, digits-1))
   cat("\nNonlinear degrees of freedom\n")
@@ -1344,22 +1348,22 @@ show.Coef.cao <- function(object,
 
 
 
-setMethod("show", "Coef.cao", function(object)
-  show.Coef.cao(object))
+setMethod("show", "Coef.rrvgam", function(object)
+  show.Coef.rrvgam(object))
 
 
 
 
 
-setMethod("coef", "cao", function(object, ...) Coef.cao(object, ...))
-setMethod("coefficients", "cao", function(object, ...)
-    Coef.cao(object, ...))
-setMethod("Coef", "cao", function(object, ...) Coef.cao(object, ...))
+setMethod("coef", "rrvgam", function(object, ...) Coef.rrvgam(object, ...))
+setMethod("coefficients", "rrvgam", function(object, ...)
+    Coef.rrvgam(object, ...))
+setMethod("Coef", "rrvgam", function(object, ...) Coef.rrvgam(object, ...))
 
 
 
 
-lvplot.cao <- function(object,
+lvplot.rrvgam <- function(object,
           add = FALSE, show.plot = TRUE, rugplot = TRUE, y = FALSE, 
           type = c("fitted.values", "predictors"),
           xlab = paste("Latent Variable",
@@ -1488,13 +1492,13 @@ lvplot.cao <- function(object,
 }
 
 
-setMethod("lvplot", "cao",
+setMethod("lvplot", "rrvgam",
            function(object, ...) {
-           invisible(lvplot.cao(object, ...))})
+           invisible(lvplot.rrvgam(object, ...))})
 
 
 
-predict.cao <- function (object, newdata = NULL,
+predict.rrvgam <- function (object, newdata = NULL,
                          type = c("link", "response", "terms"), 
                          deriv = 0, ...) {
   type <- match.arg(type, c("link", "response", "terms"))[1]
@@ -1582,7 +1586,7 @@ predict.cao <- function (object, newdata = NULL,
         stop("mismatch found in 'which.species'")
 
      temp345 <-
-       predictcao(object, grid = latvarmat, sppno = thisSpecies,
+       predictrrvgam(object, grid = latvarmat, sppno = thisSpecies,
                   Rank = Rank, deriv = deriv, MSratio = MSratio,
                   type = ifelse(type == "response", "link", type))
      if (MSratio == 2) {
@@ -1626,11 +1630,11 @@ predict.cao <- function (object, newdata = NULL,
 
 
 
-setMethod("predict", "cao", function(object, ...)
-           predict.cao(object, ...))
+setMethod("predict", "rrvgam", function(object, ...)
+           predict.rrvgam(object, ...))
 
 
-predictcao <- function(object, grid, sppno, Rank = 1,
+predictrrvgam <- function(object, grid, sppno, Rank = 1,
                        deriv = 0, MSratio = 1, type = "link") {
   if (type != "link" && type != "terms")
     stop("'link' must be \"link\" or \"terms\"")
@@ -1693,7 +1697,8 @@ predictcao <- function(object, grid, sppno, Rank = 1,
 
 
 
-plot.cao <- function(x,
+
+plot.rrvgam <- function(x,
                      xlab = if (Rank == 1) "Latent Variable" else 
                             paste("Latent Variable", 1:Rank),
                      ylab = NULL, residuals.arg = FALSE,
@@ -1745,10 +1750,11 @@ plot.cao <- function(x,
     indexSpecies <- if (is.character(which.species))
         match(which.species[sppno], sppnames) else which.species[sppno]
     if (is.na(indexSpecies))
-        stop("mismatch found in 'which.species'")
-    terms.mat <- predictcao(object = x, grid = latvarmat, type = "terms",
-                            sppno = indexSpecies, Rank = Rank,
-                            deriv = deriv, MSratio = MSratio)
+      stop("mismatch found in 'which.species'")
+    terms.mat <- predictrrvgam(object = x, grid = latvarmat,
+                               type = "terms",
+                               sppno = indexSpecies, Rank = Rank,
+                               deriv = deriv, MSratio = MSratio)
     for (rindex in WhichRank) {
       xvals <- latvarmat[, rindex]
       yvals <- terms.mat[, rindex]
@@ -1786,14 +1792,14 @@ plot.cao <- function(x,
 
 
 
-setMethod("plot", "cao",
+setMethod("plot", "rrvgam",
            function(x, y, ...) {
            if (!missing(y)) stop("cannot process the 'y' argument")
-           invisible(plot.cao(x, ...))})
+           invisible(plot.rrvgam(x, ...))})
 
 
 
-persp.cao <-
+persp.rrvgam <-
   function(x,
            show.plot = TRUE,
            xlim = NULL, ylim = NULL, zlim = NULL,  # zlim ignored if Rank == 1
@@ -1855,7 +1861,7 @@ persp.cao <-
 
   LP <- matrix(as.numeric(NA), nrow(latvarmat), NOS)
   for (sppno in 1:NOS) {
-    temp <- predictcao(object = object, grid = latvarmat, sppno = sppno,
+    temp <- predictrrvgam(object = object, grid = latvarmat, sppno = sppno,
                        Rank = Rank, deriv = 0, MSratio = MSratio)
     LP[, sppno] <- temp$yval
   }
@@ -1921,11 +1927,11 @@ persp.cao <-
 
 if (!isGeneric("persp"))
   setGeneric("persp", function(x, ...) standardGeneric("persp"))
-setMethod("persp", "cao", function(x, ...) persp.cao(x = x, ...))
+setMethod("persp", "rrvgam", function(x, ...) persp.rrvgam(x = x, ...))
 
 
 
-latvar.cao <- function(object, ...) {
+latvar.rrvgam <- function(object, ...) {
   Coef(object, ...)@latvar
 }
 
@@ -1941,8 +1947,8 @@ if (!isGeneric("lv"))
              },
              package = "VGAM")
 
- setMethod("lv", "cao",
-           function(object, ...) latvar.cao(object, ...))
+ setMethod("lv", "rrvgam",
+           function(object, ...) latvar.rrvgam(object, ...))
 
 
 
@@ -1950,8 +1956,8 @@ if (!isGeneric("lv"))
     setGeneric("latvar",
   function(object, ...) standardGeneric("latvar"))
 
-setMethod("latvar", "cao",
-  function(object, ...) latvar.cao(object, ...))
+setMethod("latvar", "rrvgam",
+  function(object, ...) latvar.rrvgam(object, ...))
 
 
 
@@ -1964,20 +1970,20 @@ setMethod("latvar", "cao",
 
 
 
-setClass(Class = "summary.cao",
+setClass(Class = "summary.rrvgam",
          representation("misc" = "list",
                         "call" = "call"),
-         contains = "Coef.cao")
+         contains = "Coef.rrvgam")
 
 
 
 
 
-summary.cao <- function(object, ...) {
+summary.rrvgam <- function(object, ...) {
   answer <- Coef(object, ...)
 
 
-  answer <- as(answer, "summary.cao")
+  answer <- as(answer, "summary.rrvgam")
 
 
   answer at misc <- object at misc
@@ -1986,17 +1992,17 @@ summary.cao <- function(object, ...) {
 }
 
 
-setMethod("summary", "cao", function(object, ...)
-  summary.cao(object, ...))
+setMethod("summary", "rrvgam", function(object, ...)
+  summary.rrvgam(object, ...))
 
 
 
 
-show.summary.cao <- function(x, ...) {
+show.summary.rrvgam <- function(x, ...) {
   cat("\nCall:\n")
   dput(x at call)
 
-  show.Coef.cao(x, ...)
+  show.Coef.rrvgam(x, ...)
 
   cat("\nNumber of species: ", x at NOS, "\n")
 
@@ -2013,25 +2019,26 @@ show.summary.cao <- function(x, ...) {
 
 
 
-setMethod("show", "summary.cao",
+setMethod("show", "summary.rrvgam",
           function(object)
-          show.summary.cao(object))
+          show.summary.rrvgam(object))
 
 
 
 
-concoef.cao <- function(object, ...) {
+concoef.rrvgam <- function(object, ...) {
   Coef(object, ...)@C
 }
 
 
-concoef.Coef.cao <- function(object, ...) {
+concoef.Coef.rrvgam <- function(object, ...) {
   if (length(list(...)))
     warning("Too late! Ignoring the extra arguments")
   object at C
 }
 
 
+if (FALSE) {
  if (!isGeneric("ccoef"))
      setGeneric("ccoef", function(object, ...) {
     .Deprecated("concoef")
@@ -2039,15 +2046,19 @@ concoef.Coef.cao <- function(object, ...) {
     standardGeneric("ccoef")
     })
 
-setMethod("ccoef", "cao", function(object, ...)
-    concoef.cao(object, ...))
-setMethod("ccoef", "Coef.cao", function(object, ...)
-    concoef.Coef.cao(object, ...))
 
-setMethod("concoef", "cao", function(object, ...)
-    concoef.cao(object, ...))
-setMethod("concoef", "Coef.cao", function(object, ...)
-    concoef.Coef.cao(object, ...))
+setMethod("ccoef", "rrvgam", function(object, ...)
+    concoef.rrvgam(object, ...))
+setMethod("ccoef", "Coef.rrvgam", function(object, ...)
+    concoef.Coef.rrvgam(object, ...))
+}
+
+
+
+setMethod("concoef", "rrvgam", function(object, ...)
+    concoef.rrvgam(object, ...))
+setMethod("concoef", "Coef.rrvgam", function(object, ...)
+    concoef.Coef.rrvgam(object, ...))
 
 
 
@@ -2063,7 +2074,7 @@ if (!isGeneric("calibrate"))
   standardGeneric("calibrate"))
 
 
-setMethod("calibrate", "cao", function(object, ...)
+setMethod("calibrate", "rrvgam", function(object, ...)
           calibrate.qrrvglm(object, ...))
 
     
@@ -2071,14 +2082,14 @@ setMethod("calibrate", "qrrvglm", function(object, ...)
           calibrate.qrrvglm(object, ...))
 
 
-Tol.cao <- function(object, ...) {
-  stop("The tolerance for a 'cao' object is undefined")
+Tol.rrvgam <- function(object, ...) {
+  stop("The tolerance for a 'rrvgam' object is undefined")
 }
 
 if (!isGeneric("Tol"))
   setGeneric("Tol", function(object, ...) standardGeneric("Tol"))
-setMethod("Tol", "cao", function(object, ...)
-          Tol.cao(object, ...))
+setMethod("Tol", "rrvgam", function(object, ...)
+          Tol.rrvgam(object, ...))
 
 
 
@@ -2087,7 +2098,7 @@ setMethod("Tol", "cao", function(object, ...)
 
 
 
-setMethod("show",  "cao", function(object) show.vgam(object))
+setMethod("show",  "rrvgam", function(object) show.vgam(object))
 
 
 
diff --git a/R/cqo.fit.q b/R/cqo.fit.q
index 1f5f9a5..a527459 100644
--- a/R/cqo.fit.q
+++ b/R/cqo.fit.q
@@ -173,7 +173,7 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
       if (sdnumat[lookat] > control$MUXfactor[lookat] *
                             control$isd.latvar[lookat]) {
           muxer <- control$isd.latvar[lookat] *
-                  control$MUXfactor[lookat] / sdnumat[lookat]
+                   control$MUXfactor[lookat] / sdnumat[lookat]
           cmatrix[, lookat] <- cmatrix[, lookat] * muxer
           if (control$trace) {
             cat(paste("Taking evasive action for latent variable ",
@@ -187,8 +187,8 @@ calldcqo <- function(cmatrix, etamat, xmat, ymat, wvec,
     numat <- xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
     evnu <- eigen(var(numat))
     temp7 <- if (Rank > 1)
-                   evnu$vector %*% diag(evnu$value^(-0.5)) else
-                   evnu$vector %*% evnu$value^(-0.5)
+               evnu$vector %*% diag(evnu$value^(-0.5)) else
+               evnu$vector %*% evnu$value^(-0.5)
         cmatrix <- cmatrix %*% temp7
         cmatrix <- crow1C(cmatrix, control$Crow1positive)
         numat <- xmat[, control$colx2.index, drop = FALSE] %*% cmatrix
@@ -381,7 +381,7 @@ ny <- names(y)
 
 
     special.matrix <- matrix(-34956.125, M, M)  # An unlikely used matrix
-    just.testing <- cm.vgam(special.matrix, x, rrcontrol$noRRR,
+    just.testing <- cm.VGAM(special.matrix, x, rrcontrol$noRRR,
                             constraints)
     findex <- trivial.constraints(just.testing, special.matrix)
     tc1 <- trivial.constraints(constraints)
diff --git a/R/deviance.vlm.q b/R/deviance.vlm.q
index a42fa1e..fd9a613 100644
--- a/R/deviance.vlm.q
+++ b/R/deviance.vlm.q
@@ -65,6 +65,38 @@ setMethod("deviance", "vglm", function(object, ...)
 
 
 
+
+
+
+
+
+deviance.qrrvglm <- function(object,
+                             summation = TRUE,
+                             history = FALSE,
+                             ...) {
+  if (history) {
+    if (summation) {
+      return(object at misc$deviance.Bestof)
+    } else {
+      stop("cannot handle 'history = TRUE' when 'summation = FALSE'")
+    }
+  }
+
+  deviance.vlm(object, summation = summation, ...)
+}
+
+
+setMethod("deviance", "qrrvglm", function(object, ...)
+           deviance.qrrvglm(object, ...))
+
+setMethod("deviance", "rrvgam",  function(object, ...)
+           deviance.qrrvglm(object, ...))
+
+
+
+
+
+
 df.residual_vlm <- function(object, type = c("vlm", "lm"), ...) {
   type <- type[1]
 
diff --git a/R/family.actuary.R b/R/family.actuary.R
index 8d39058..fb502df 100644
--- a/R/family.actuary.R
+++ b/R/family.actuary.R
@@ -14,7 +14,7 @@
 
 
 
-dgumbelII <- function(x, shape, scale = 1, log = FALSE) {
+dgumbelII <- function(x, scale = 1, shape, log = FALSE) {
 
 
   if (!is.logical(log.arg <- log) || length(log) != 1)
@@ -47,7 +47,7 @@ dgumbelII <- function(x, shape, scale = 1, log = FALSE) {
 }
 
 
-pgumbelII <- function(q, shape, scale = 1) {
+pgumbelII <- function(q, scale = 1, shape) {
 
   LLL <- max(length(q), length(shape), length(scale))
   if (length(q)       != LLL) q       <- rep(q,       length.out = LLL)
@@ -64,7 +64,7 @@ pgumbelII <- function(q, shape, scale = 1) {
 
 
 
-qgumbelII <- function(p, shape, scale = 1) {
+qgumbelII <- function(p, scale = 1, shape) {
 
   LLL <- max(length(p), length(shape), length(scale))
   if (length(p)       != LLL) p       <- rep(p,       length.out = LLL)
@@ -82,7 +82,7 @@ qgumbelII <- function(p, shape, scale = 1) {
 }
 
 
-rgumbelII <- function(n, shape, scale = 1) {
+rgumbelII <- function(n, scale = 1, shape) {
   qgumbelII(runif(n), shape = shape, scale = scale)
 }
 
@@ -95,11 +95,17 @@ rgumbelII <- function(n, shape, scale = 1) {
 
 
  gumbelII <-
-  function(lshape = "loge", lscale = "loge",
-           ishape = NULL,   iscale = NULL,
+  function(lscale = "loge", lshape = "loge",
+           iscale = NULL,   ishape = NULL,
            probs.y = c(0.2, 0.5, 0.8),
            perc.out = NULL,  # 50,
-           imethod = 1, zero = -2) {
+           imethod = 1, zero = -1, nowarning = FALSE) {
+
+
+
+  if (!nowarning)
+    warning("order of the linear/additive predictors has been changed",
+            " in VGAM version 0.9-5")
 
 
   lshape <- as.list(substitute(lshape))
@@ -139,15 +145,15 @@ rgumbelII <- function(n, shape, scale = 1) {
   new("vglmff",
   blurb = c("Gumbel Type II distribution\n\n",
             "Links:    ",
-            namesof("shape", lshape, eshape), ", ",
-            namesof("scale", lscale, escale), "\n",
+            namesof("scale", lscale, escale), ", ",
+            namesof("shape", lshape, eshape), "\n",
             "Mean:     scale^(1/shape) * gamma(1 - 1 / shape)\n",
             "Variance: scale^(2/shape) * (gamma(1 - 2/shape) - ",
                       "gamma(1 + 1/shape)^2)"),
  constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -179,13 +185,13 @@ rgumbelII <- function(n, shape, scale = 1) {
     M <- M1 * ncoly
 
 
-    mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
-    mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
 
 
     predictors.names <-
-        c(namesof(mynames1, .lshape , .eshape , tag = FALSE),
-          namesof(mynames2, .lscale , .escale , tag = FALSE))[
+        c(namesof(mynames1, .lscale , .escale , tag = FALSE),
+          namesof(mynames2, .lshape , .eshape , tag = FALSE))[
           interleave.VGAM(M, M = M1)]
 
 
@@ -216,8 +222,8 @@ rgumbelII <- function(n, shape, scale = 1) {
         }  # ilocal
 
         etastart <-
-          cbind(theta2eta(Shape.init, .lshape , .eshape ),
-                theta2eta(Scale.init, .lscale , .escale ))[,
+          cbind(theta2eta(Scale.init, .lscale , .escale ),
+                theta2eta(Shape.init, .lshape , .eshape ))[,
                 interleave.VGAM(M, M = M1)]
       }
     }
@@ -228,8 +234,8 @@ rgumbelII <- function(n, shape, scale = 1) {
             .probs.y = probs.y,
             .imethod = imethod ) )),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+    Scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , .eshape )
     Shape <- as.matrix(Shape)
 
     if (length( .perc.out ) > 1 && ncol(Shape) > 1)
@@ -258,16 +264,16 @@ rgumbelII <- function(n, shape, scale = 1) {
 
     M1 <- extra$M1
     misc$link <-
-      c(rep( .lshape , length = ncoly),
-        rep( .lscale , length = ncoly))[interleave.VGAM(M, M = M1)]
+      c(rep( .lscale , length = ncoly),
+        rep( .lshape , length = ncoly))[interleave.VGAM(M, M = M1)]
     temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
     names(misc$link) <- temp.names
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
     for (ii in 1:ncoly) {
-      misc$earg[[M1*ii-1]] <- .eshape
-      misc$earg[[M1*ii  ]] <- .escale
+      misc$earg[[M1*ii-1]] <- .escale
+      misc$earg[[M1*ii  ]] <- .eshape
     }
 
     misc$M1 <- M1
@@ -287,8 +293,8 @@ rgumbelII <- function(n, shape, scale = 1) {
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+    Scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , .eshape )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
@@ -315,8 +321,8 @@ rgumbelII <- function(n, shape, scale = 1) {
     if (any(pwts != 1)) 
       warning("ignoring prior weights")
     eta <- predict(object)
-    Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+    Scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , .eshape )
     rgumbelII(nsim * length(Scale), shape = Shape, scale = Scale)
   }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape
@@ -326,19 +332,19 @@ rgumbelII <- function(n, shape, scale = 1) {
 
   deriv = eval(substitute(expression({
     M1 <- 2
-    Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+    Scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , .eshape )
 
     dl.dshape <- 1 / Shape + log(Scale / y) -
                  log(Scale / y) * (Scale / y)^Shape
     dl.dscale <- Shape / Scale - (Shape / y) * (Scale / y)^(Shape - 1)
 
 
-    dshape.deta <- dtheta.deta(Shape, .lshape , .eshape )
     dscale.deta <- dtheta.deta(Scale, .lscale , .escale )
+    dshape.deta <- dtheta.deta(Shape, .lshape , .eshape )
 
-    myderiv <- c(w) * cbind(dl.dshape, dl.dscale) *
-                      cbind(dshape.deta, dscale.deta)
+    myderiv <- c(w) * cbind(dl.dscale, dl.dshape) *
+                      cbind(dscale.deta, dshape.deta)
     myderiv[, interleave.VGAM(M, M = M1)]
   }), list( .lscale = lscale, .lshape = lshape,
             .escale = escale, .eshape = eshape
@@ -352,8 +358,8 @@ rgumbelII <- function(n, shape, scale = 1) {
     ned2l.dshapescale <- digamma(2) / Scale
 
 
-    wz <- array(c(c(w) * ned2l.dshape2 * dshape.deta^2,
-                  c(w) * ned2l.dscale2 * dscale.deta^2,
+    wz <- array(c(c(w) * ned2l.dscale2 * dscale.deta^2,
+                  c(w) * ned2l.dshape2 * dshape.deta^2,
                   c(w) * ned2l.dshapescale * dscale.deta * dshape.deta),
                 dim = c(n, M / M1, 3))
     wz <- arwz2wz(wz, M = M, M1 = M1)
@@ -431,7 +437,7 @@ pmbeard <- function(q, shape, scale = 1, rho, epsilon) {
 
 
 
-dmperks <- function(x, shape, scale = 1, epsilon, log = FALSE) {
+dmperks <- function(x, scale = 1, shape, epsilon, log = FALSE) {
 
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -466,7 +472,7 @@ dmperks <- function(x, shape, scale = 1, epsilon, log = FALSE) {
 
 
 
-pmperks <- function(q, shape, scale = 1, epsilon) {
+pmperks <- function(q, scale = 1, shape, epsilon) {
 
   LLL <- max(length(q), length(shape), length(scale))
   if (length(q)       != LLL) q       <- rep(q,       length.out = LLL)
@@ -569,7 +575,7 @@ qbeard <- function(x, u = 0.5, alpha = 1, beta = 1,rho = 1) {
 
 
 
-dperks <- function(x, shape, scale = 1, log = FALSE) {
+dperks <- function(x, scale = 1, shape, log = FALSE) {
 
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -599,7 +605,7 @@ dperks <- function(x, shape, scale = 1, log = FALSE) {
 
 
 
-pperks <- function(q, shape, scale = 1) {
+pperks <- function(q, scale = 1, shape) {
 
   LLL <- max(length(q), length(shape), length(scale))
   if (length(q)       != LLL) q       <- rep(q,       length.out = LLL)
@@ -617,7 +623,7 @@ pperks <- function(q, shape, scale = 1) {
 }
 
 
-qperks <- function(p, shape, scale = 1) {
+qperks <- function(p, scale = 1, shape) {
 
   LLL <- max(length(p), length(shape), length(scale))
   if (length(p)       != LLL) p       <- rep(p,       length.out = LLL)
@@ -636,8 +642,8 @@ qperks <- function(p, shape, scale = 1) {
 }
 
 
-rperks <- function(n, shape, scale = 1) {
-  qperks(runif(n), shape = shape, scale = scale)
+rperks <- function(n, scale = 1, shape) {
+  qperks(runif(n), scale = scale, shape = shape)
 }
 
 
@@ -650,12 +656,17 @@ perks.control <- function(save.weight = TRUE, ...) {
 
 
  perks <-
-  function(lshape = "loge",    lscale = "loge",
-           ishape = NULL,      iscale = NULL,
-           gshape = exp(-5:5), gscale = exp(-5:5),
+  function(lscale = "loge",    lshape = "loge",
+           iscale = NULL,      ishape = NULL,
+           gscale = exp(-5:5), gshape = exp(-5:5),
            nsimEIM = 500,
            oim.mean = FALSE,
-           zero = NULL) {
+           zero = NULL, nowarning = FALSE) {
+
+
+  if (!nowarning)
+    warning("order of the linear/additive predictors has been changed",
+            " in VGAM version 0.9-5")
 
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
@@ -692,14 +703,14 @@ perks.control <- function(save.weight = TRUE, ...) {
   new("vglmff",
   blurb = c("Perks' distribution\n\n",
             "Links:    ",
-            namesof("shape", lshape, eshape), ", ",
-            namesof("scale", lscale, escale), "\n",
-            "Median:     qperks(p = 0.5, shape, scale)"),
+            namesof("scale", lscale, escale), ", ",
+            namesof("shape", lshape, eshape), "\n",
+            "Median:   qperks(p = 0.5, scale = scale, shape = shape)"),
 
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -731,11 +742,11 @@ perks.control <- function(save.weight = TRUE, ...) {
     M <- M1 * ncoly
 
 
-    mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
-    mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
     predictors.names <-
-        c(namesof(mynames1, .lshape , .eshape , tag = FALSE),
-          namesof(mynames2, .lscale , .escale , tag = FALSE))[
+        c(namesof(mynames1, .lscale , .escale , tag = FALSE),
+          namesof(mynames2, .lshape , .eshape , tag = FALSE))[
           interleave.VGAM(M, M = M1)]
 
 
@@ -761,14 +772,15 @@ perks.control <- function(save.weight = TRUE, ...) {
           ans
         }
 
+
         mymat <- matrix(-1, length(shape.grid), 2)
         for (jlocal in 1:length(shape.grid)) {
           mymat[jlocal, ] <-
-            getMaxMin(scale.grid,
-                      objfun = perks.Loglikfun,
-                      y = yvec, x = x, w = wvec,
-                      ret.objfun = TRUE,
-                      extraargs = list(Shape = shape.grid[jlocal]))
+            grid.search(scale.grid,
+                        objfun = perks.Loglikfun,
+                        y = yvec, x = x, w = wvec,
+                        ret.objfun = TRUE,
+                        extraargs = list(Shape = shape.grid[jlocal]))
         }
         index.shape <- which(mymat[, 2] == max(mymat[, 2]))[1]
 
@@ -779,8 +791,8 @@ perks.control <- function(save.weight = TRUE, ...) {
       }  # spp.
 
       etastart <-
-          cbind(theta2eta(matH, .lshape , .eshape ),
-                theta2eta(matC, .lscale , .escale ))[,
+          cbind(theta2eta(matC, .lscale , .escale ),
+                theta2eta(matH, .lshape , .eshape ))[,
                 interleave.VGAM(M, M = M1)]
     }  # End of !length(etastart)
   }), list( .lscale = lscale, .lshape = lshape,
@@ -790,8 +802,8 @@ perks.control <- function(save.weight = TRUE, ...) {
             ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+    Scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , .eshape )
 
     qperks(p = 0.5, shape = Shape, scale = Scale)
   }, list( .lscale = lscale, .lshape = lshape,
@@ -799,16 +811,16 @@ perks.control <- function(save.weight = TRUE, ...) {
   last = eval(substitute(expression({
 
     misc$link <-
-      c(rep( .lshape , length = ncoly),
-        rep( .lscale , length = ncoly))[interleave.VGAM(M, M = M1)]
+      c(rep( .lscale , length = ncoly),
+        rep( .lshape , length = ncoly))[interleave.VGAM(M, M = M1)]
     temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
     names(misc$link) <- temp.names
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
     for (ii in 1:ncoly) {
-      misc$earg[[M1*ii-1]] <- .eshape
-      misc$earg[[M1*ii  ]] <- .escale
+      misc$earg[[M1*ii-1]] <- .escale
+      misc$earg[[M1*ii  ]] <- .eshape
     }
 
 
@@ -823,8 +835,8 @@ perks.control <- function(save.weight = TRUE, ...) {
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+    Scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , .eshape )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
@@ -851,8 +863,8 @@ perks.control <- function(save.weight = TRUE, ...) {
     if (any(pwts != 1)) 
       warning("ignoring prior weights")
     eta <- predict(object)
-    Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+    Scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , .eshape )
     rperks(nsim * length(Scale), shape = Shape, scale = Scale)
   }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape ))),
@@ -865,10 +877,10 @@ perks.control <- function(save.weight = TRUE, ...) {
  
   deriv = eval(substitute(expression({
     M1 <- 2
-    shape <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
-                       .lshape , .eshape )
-    scale <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+    scale <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
                        .lscale , .escale )
+    shape <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+                       .lshape , .eshape )
 
 
     temp2 <- exp(y * scale)
@@ -882,8 +894,8 @@ perks.control <- function(save.weight = TRUE, ...) {
     dshape.deta <- dtheta.deta(shape, .lshape , .eshape )
     dscale.deta <- dtheta.deta(scale, .lscale , .escale )
 
-    dthetas.detas <- cbind(dshape.deta, dscale.deta)
-    myderiv <- c(w) * cbind(dl.dshape, dl.dscale) * dthetas.detas
+    dthetas.detas <- cbind(dscale.deta, dshape.deta)
+    myderiv <- c(w) * cbind(dl.dscale, dl.dshape) * dthetas.detas
     myderiv[, interleave.VGAM(M, M = M1)]
   }), list( .lscale = lscale, .lshape = lshape,
             .escale = escale, .eshape = eshape ))),
@@ -901,8 +913,8 @@ perks.control <- function(save.weight = TRUE, ...) {
 
     for (spp. in 1:NOS) {
       run.varcov <- 0
-      Shape <- shape[, spp.]
       Scale <- scale[, spp.]
+      Shape <- shape[, spp.]
 
 
 
@@ -939,7 +951,7 @@ if (ii < 3) {
                      (1 + 1 / Scale) * Shape * ysim * temp2 / temp3
 
 
-        temp7 <- cbind(dl.dshape, dl.dscale)
+        temp7 <- cbind(dl.dscale, dl.dshape)
 if (ii < 3) {
 }
         run.varcov <- run.varcov +
@@ -985,7 +997,7 @@ if (ii < 3) {
 
 
 
-dmakeham <- function(x, shape, scale = 1, epsilon = 0, log = FALSE) {
+dmakeham <- function(x, scale = 1, shape, epsilon = 0, log = FALSE) {
 
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -1015,7 +1027,7 @@ dmakeham <- function(x, shape, scale = 1, epsilon = 0, log = FALSE) {
 
 
 
-pmakeham <- function(q, shape, scale = 1, epsilon = 0) {
+pmakeham <- function(q, scale = 1, shape, epsilon = 0) {
 
   LLL <- max(length(q), length(shape), length(scale), length(epsilon))
   if (length(q)       != LLL) q       <- rep(q,       length.out = LLL)
@@ -1033,7 +1045,7 @@ pmakeham <- function(q, shape, scale = 1, epsilon = 0) {
 
 
 
-qmakeham <- function(p, shape, scale = 1, epsilon = 0) {
+qmakeham <- function(p, scale = 1, shape, epsilon = 0) {
 
   LLL <- max(length(p), length(shape), length(scale), length(epsilon))
   if (length(p)       != LLL) p       <- rep(p,       length.out = LLL)
@@ -1058,8 +1070,8 @@ qmakeham <- function(p, shape, scale = 1, epsilon = 0) {
 }
 
 
-rmakeham <- function(n, shape, scale = 1, epsilon = 0) {
-  qmakeham(runif(n), shape = shape, scale = scale, epsilon = epsilon)
+rmakeham <- function(n, scale = 1, shape, epsilon = 0) {
+  qmakeham(runif(n), scale = scale, shape = shape, epsilon = epsilon)
 }
 
 
@@ -1071,19 +1083,24 @@ makeham.control <- function(save.weight = TRUE, ...) {
 
 
  makeham <-
-  function(lshape = "loge", lscale = "loge", lepsilon = "loge",
-           ishape = NULL,   iscale = NULL,   iepsilon = NULL,  # 0.3,
-           gshape = exp(-5:5),
+  function(lscale = "loge", lshape = "loge", lepsilon = "loge",
+           iscale = NULL,   ishape = NULL,   iepsilon = NULL,  # 0.3,
            gscale = exp(-5:5),
+           gshape = exp(-5:5),
            gepsilon = exp(-4:1),
            nsimEIM = 500,
            oim.mean = TRUE,
-           zero = NULL) {
+           zero = NULL, nowarning = FALSE) {
 
 
 
 
 
+  if (!nowarning)
+    warning("order of the linear/additive predictors has been changed",
+            " in VGAM version 0.9-5")
+
+
   lepsil <- lepsilon
   iepsil <- iepsilon
 
@@ -1131,15 +1148,15 @@ makeham.control <- function(save.weight = TRUE, ...) {
   new("vglmff",
   blurb = c("Makeham distribution\n\n",
             "Links:    ",
-            namesof("shape",   lshape, eshape), ", ",
             namesof("scale",   lscale, escale), ", ",
+            namesof("shape",   lshape, eshape), ", ",
             namesof("epsilon", lepsil, eepsil), "\n",
-            "Median:   qmakeham(p = 0.5, shape, scale, epsilon)"),
+            "Median:   qmakeham(p = 0.5, scale, shape, epsilon)"),
 
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 3
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -1172,12 +1189,12 @@ makeham.control <- function(save.weight = TRUE, ...) {
     M <- M1 * ncoly
 
 
-    mynames1 <- paste("shape",   if (ncoly > 1) 1:ncoly else "", sep = "")
-    mynames2 <- paste("scale",   if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames1 <- paste("scale",   if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames2 <- paste("shape",   if (ncoly > 1) 1:ncoly else "", sep = "")
     mynames3 <- paste("epsilon", if (ncoly > 1) 1:ncoly else "", sep = "")
     predictors.names <-
-        c(namesof(mynames1, .lshape , .eshape , tag = FALSE),
-          namesof(mynames2, .lscale , .escale , tag = FALSE),
+        c(namesof(mynames1, .lscale , .escale , tag = FALSE),
+          namesof(mynames2, .lshape , .eshape , tag = FALSE),
           namesof(mynames3, .lepsil , .eepsil , tag = FALSE))[
           interleave.VGAM(M, M = M1)]
 
@@ -1213,12 +1230,12 @@ makeham.control <- function(save.weight = TRUE, ...) {
         mymat <- matrix(-1, length(shape.grid), 2)
         for (jlocal in 1:length(shape.grid)) {
           mymat[jlocal, ] <-
-            getMaxMin(scale.grid,
-                      objfun = makeham.Loglikfun,
-                      y = yvec, x = x, w = wvec,
-                      ret.objfun = TRUE,
-                      extraargs = list(Shape = shape.grid[jlocal],
-                                       Epsil = matE[1, spp.]))
+            grid.search(scale.grid,
+                        objfun = makeham.Loglikfun,
+                        y = yvec, x = x, w = wvec,
+                        ret.objfun = TRUE,
+                        extraargs = list(Shape = shape.grid[jlocal],
+                                         Epsil = matE[1, spp.]))
         }
         index.shape <- which(mymat[, 2] == max(mymat[, 2]))[1]
 
@@ -1245,18 +1262,18 @@ makeham.control <- function(save.weight = TRUE, ...) {
           ans
         }
         Init.epsil <-
-            getMaxMin(epsil.grid,
-                      objfun = makeham.Loglikfun2,
-                      y = yvec, x = x, w = wvec,
-                      extraargs = list(Shape = matH[1, spp.],
-                                       Scale = matC[1, spp.]))
+            grid.search(epsil.grid,
+                        objfun = makeham.Loglikfun2,
+                        y = yvec, x = x, w = wvec,
+                        extraargs = list(Shape = matH[1, spp.],
+                                         Scale = matC[1, spp.]))
 
         matE[, spp.] <- Init.epsil
       }  # spp.
 
 
-      etastart <- cbind(theta2eta(matH, .lshape , .eshape ),
-                        theta2eta(matC, .lscale , .escale ),
+      etastart <- cbind(theta2eta(matC, .lscale , .escale ),
+                        theta2eta(matH, .lshape , .eshape ),
                         theta2eta(matE, .lepsil , .eepsil ))[,
                         interleave.VGAM(M, M = M1)]
     }  # End of !length(etastart)
@@ -1268,10 +1285,10 @@ makeham.control <- function(save.weight = TRUE, ...) {
           ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lshape , .eshape )
-    scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , .escale )
+    scale <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lscale , .escale )
+    shape <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lshape , .eshape )
     epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lepsil , .eepsil )
-    qmakeham(p = 0.5, shape = shape, scale = scale, epsil = epsil)
+    qmakeham(p = 0.5, scale = scale, shape = shape, epsil = epsil)
   }, list(
             .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
             .eshape = eshape, .escale = escale, .eepsil = eepsil
@@ -1279,8 +1296,8 @@ makeham.control <- function(save.weight = TRUE, ...) {
   last = eval(substitute(expression({
     M1 <- extra$M1
     misc$link <-
-      c(rep( .lshape , length = ncoly),
-        rep( .lscale , length = ncoly),
+      c(rep( .lscale , length = ncoly),
+        rep( .lshape , length = ncoly),
         rep( .lepsil , length = ncoly))[interleave.VGAM(M, M = M1)]
     temp.names <- c(mynames1, mynames2, mynames3)[
                     interleave.VGAM(M, M = M1)]
@@ -1289,8 +1306,8 @@ makeham.control <- function(save.weight = TRUE, ...) {
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
     for (ii in 1:ncoly) {
-      misc$earg[[M1*ii-2]] <- .eshape
-      misc$earg[[M1*ii-1]] <- .escale
+      misc$earg[[M1*ii-2]] <- .escale
+      misc$earg[[M1*ii-1]] <- .eshape
       misc$earg[[M1*ii  ]] <- .eepsil
     }
 
@@ -1306,13 +1323,13 @@ makeham.control <- function(save.weight = TRUE, ...) {
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lshape , .eshape )
-    scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lscale , .escale )
+    scale <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .lscale , .escale )
+    shape <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .lshape , .eshape )
     epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .lepsil , .eepsil )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dmakeham(x = y, shape = shape, scale = scale,
+      ll.elts <- c(w) * dmakeham(x = y, scale = scale, shape = shape,
                                  epsil = epsil, log = TRUE)
       if (summation) {
         sum(ll.elts)
@@ -1335,14 +1352,14 @@ makeham.control <- function(save.weight = TRUE, ...) {
     if (any(pwts != 1)) 
       warning("ignoring prior weights")
     eta <- predict(object)
-    shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE), drop = FALSE],
-                       .lshape , .eshape )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE), drop = FALSE],
+    Scale <- eta2theta(eta[, c(TRUE, FALSE, FALSE), drop = FALSE],
                        .lscale , .escale )
+    shape <- eta2theta(eta[, c(FALSE, TRUE, FALSE), drop = FALSE],
+                       .lshape , .eshape )
     epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE), drop = FALSE],
                        .lepsil , .eepsil )
     rmakeham(nsim * length(Scale),
-             shape = c(shape), scale = c(Scale), epsilon = c(epsil))
+             scale = c(Scale), shape = c(shape), epsilon = c(epsil))
   }, list( .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
            .eshape = eshape, .escale = escale, .eepsil = eepsil ))),
 
@@ -1350,48 +1367,39 @@ makeham.control <- function(save.weight = TRUE, ...) {
 
 
   deriv = eval(substitute(expression({
-    M1 <- 3
-    shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE), drop = FALSE],
-                       .lshape , .eshape )
-    scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE), drop = FALSE],
+    scale <- eta2theta(eta[, c(TRUE, FALSE, FALSE), drop = FALSE],
                        .lscale , .escale )
+    shape <- eta2theta(eta[, c(FALSE, TRUE, FALSE), drop = FALSE],
+                       .lshape , .eshape )
     epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE), drop = FALSE],
                        .lepsil , .eepsil )
 
-
     temp2 <- exp(y * scale)
     temp3 <- epsil + shape * temp2
     dl.dshape <- temp2 / temp3 - expm1(y * scale) / scale
     dl.dscale <- shape * y * temp2 / temp3 +
                  shape * expm1(y * scale) / scale^2 -
                  shape * y * temp2 / scale
-
     dl.depsil <- 1 / temp3 - y
 
     dshape.deta <- dtheta.deta(shape, .lshape , .eshape )
     dscale.deta <- dtheta.deta(scale, .lscale , .escale )
     depsil.deta <- dtheta.deta(epsil, .lepsil , .eepsil )
 
-    dthetas.detas <- cbind(dshape.deta, dscale.deta, depsil.deta)
-    myderiv <- c(w) * cbind(dl.dshape,
-                            dl.dscale,
+    dthetas.detas <- cbind(dscale.deta, dshape.deta, depsil.deta)
+    myderiv <- c(w) * cbind(dl.dscale,
+                            dl.dshape,
                             dl.depsil) * dthetas.detas
     myderiv[, interleave.VGAM(M, M = M1)]
-  }), list(
-            .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
-            .eshape = eshape, .escale = escale, .eepsil = eepsil
-          ))),
-
+  }), list( .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
+            .eshape = eshape, .escale = escale, .eepsil = eepsil ))),
 
   weight = eval(substitute(expression({
-
     NOS <- M / M1
     dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
+    wz <- matrix(0.0, n, M + M - 1 + M - 2)  # wz has half-bandwidth 3
 
-    wz <- matrix(0.0, n, M + M - 1 + M - 2)  # wz has half-bw 3
-
-    ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
-
+    ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)  # Use simulated EIM
 
     for (spp. in 1:NOS) {
       run.varcov <- 0
@@ -1399,91 +1407,38 @@ makeham.control <- function(save.weight = TRUE, ...) {
       Scale <- scale[, spp.]
       Epsil <- epsil[, spp.]
 
-
-
-
-      if (FALSE && intercept.only && .oim.mean ) {
-
-      temp8 <- (1 + Shape * exp(Scale * y[, spp.]))^2
-      nd2l.dadb <- 2 * y[, spp.] * exp(Scale * y[, spp.]) / temp8
-
-      nd2l.dada <- 1 / Shape^2 + 1 / (1 + Shape)^2 -
-        2 * exp(2 * Scale * y[, spp.]) / temp8
-
-      nd2l.dbdb <- 2 * Shape * y[, spp.]^2 * exp(Scale * y[, spp.]) / temp8
-
-
-      ave.oim11 <- weighted.mean(nd2l.dada, w[, spp.])
-      ave.oim12 <- weighted.mean(nd2l.dadb, w[, spp.])
-      ave.oim22 <- weighted.mean(nd2l.dbdb, w[, spp.])
-      run.varcov <- cbind(ave.oim11, ave.oim22, ave.oim12)
-    } else {
-
       for (ii in 1:( .nsimEIM )) {
-        ysim <- rmakeham(n = n, shape = Shape, scale = Scale,
-                         epsil = Epsil)
-if (ii < 3) {
-}
-
+        ysim <- rmakeham(n = n, scale = Scale, shape = Shape, epsil = Epsil)
         temp2 <- exp(ysim * Scale)
         temp3 <- Epsil + Shape * temp2
- if (!is.Numeric(temp2))
-  stop("temp2 is not Numeric")
- if (!is.Numeric(temp3))
-  stop("temp3 is not Numeric")
         dl.dshape <- temp2 / temp3 - expm1(ysim * Scale) / Scale
         dl.dscale <- Shape * ysim * temp2 / temp3 +
                      Shape * expm1(ysim * Scale) / Scale^2 -
                      Shape * ysim * temp2 / Scale
         dl.depsil <- 1 / temp3 - ysim
 
-
-
-        temp7 <- cbind(dl.dshape, dl.dscale, dl.depsil)
-if (ii < 3) {
-}
-        run.varcov <- run.varcov +
-                      temp7[, ind1$row.index] *
-                      temp7[, ind1$col.index]
+        temp7 <- cbind(dl.dscale, dl.dshape, dl.depsil)
+        run.varcov <- run.varcov + temp7[, ind1$row.index] *
+                                   temp7[, ind1$col.index]
       }
       run.varcov <- cbind(run.varcov / .nsimEIM )
 
-    }
-
-
-
-      for (ilocal in 1:ncol(run.varcov)) {
-        indexInf <- is.finite(run.varcov[, ilocal])
-        run.varcov[!indexInf, ilocal] <-
-          mean(run.varcov[indexInf, ilocal])
-      }
-
-
 
       wz1 <- if (intercept.only)
-          matrix(colMeans(run.varcov, na.rm = TRUE),
-                 nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
-          run.varcov
-
+        matrix(colMeans(run.varcov, na.rm = TRUE),
+               nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else run.varcov
 
       wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
                    dThetas.detas[, M1 * (spp. - 1) + ind1$col]
-
-
       for (jay in 1:M1)
-        for (kay in jay:M1) {
+        for (kay in jay:M1) {  # Now copy wz1 into wz
           cptr <- iam((spp. - 1) * M1 + jay,
-                      (spp. - 1) * M1 + kay,
-                      M = M)
+                      (spp. - 1) * M1 + kay, M = M)
           wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
         }
     }  # End of for (spp.) loop
-
-
-
     w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
-  }), list(
-            .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
+  }), list( .lshape = lshape, .lscale = lscale, .lepsil = lepsil,
             .eshape = eshape, .escale = escale, .eepsil = eepsil,
             .nsimEIM = nsimEIM, .oim.mean = oim.mean ))))
 }  # makeham()
@@ -1495,7 +1450,7 @@ if (ii < 3) {
 
 
 
-dgompertz <- function(x, shape, scale = 1, log = FALSE) {
+dgompertz <- function(x, scale = 1, shape, log = FALSE) {
 
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -1527,7 +1482,7 @@ dgompertz <- function(x, shape, scale = 1, log = FALSE) {
 
 
 
-pgompertz <- function(q, shape, scale = 1) {
+pgompertz <- function(q, scale = 1, shape) {
 
   LLL <- max(length(q), length(shape), length(scale))
   if (length(q)       != LLL) q       <- rep(q,       length.out = LLL)
@@ -1542,7 +1497,7 @@ pgompertz <- function(q, shape, scale = 1) {
 }
 
 
-qgompertz <- function(p, shape, scale = 1) {
+qgompertz <- function(p, scale = 1, shape) {
 
   LLL <- max(length(p), length(shape), length(scale))
   if (length(p)       != LLL) p       <- rep(p,       length.out = LLL)
@@ -1559,8 +1514,8 @@ qgompertz <- function(p, shape, scale = 1) {
 }
 
 
-rgompertz <- function(n, shape, scale = 1) {
-  qgompertz(runif(n), shape = shape, scale = scale)
+rgompertz <- function(n, scale = 1, shape) {
+  qgompertz(runif(n), scale = scale, shape = shape)
 }
 
 
@@ -1575,13 +1530,18 @@ gompertz.control <- function(save.weight = TRUE, ...) {
 
 
  gompertz <-
-  function(lshape = "loge", lscale = "loge",
-           ishape = NULL,   iscale = NULL,
+  function(lscale = "loge", lshape = "loge",
+           iscale = NULL,   ishape = NULL,
            nsimEIM = 500,
-           zero = NULL) {
+           zero = NULL, nowarning = FALSE) {
+
 
 
 
+  if (!nowarning)
+    warning("order of the linear/additive predictors has been changed",
+            " in VGAM version 0.9-5")
+
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
   lshape <- attr(eshape, "function.name")
@@ -1614,14 +1574,14 @@ gompertz.control <- function(save.weight = TRUE, ...) {
   new("vglmff",
   blurb = c("Gompertz distribution\n\n",
             "Links:    ",
-            namesof("shape", lshape, eshape ), ", ",
-            namesof("scale", lscale, escale ), "\n",
+            namesof("scale", lscale, escale ), ", ",
+            namesof("shape", lshape, eshape ), "\n",
             "Median:     scale * log(2 - 1 / shape)"),
 
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -1653,11 +1613,11 @@ gompertz.control <- function(save.weight = TRUE, ...) {
     M <- M1 * ncoly
 
 
-    mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
-    mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
     predictors.names <-
-        c(namesof(mynames1, .lshape , .eshape , tag = FALSE),
-          namesof(mynames2, .lscale , .escale , tag = FALSE))[
+        c(namesof(mynames1, .lscale , .escale , tag = FALSE),
+          namesof(mynames2, .lshape , .eshape , tag = FALSE))[
           interleave.VGAM(M, M = M1)]
 
 
@@ -1689,11 +1649,11 @@ gompertz.control <- function(save.weight = TRUE, ...) {
         mymat <- matrix(-1, length(shape.grid), 2)
         for (jlocal in 1:length(shape.grid)) {
           mymat[jlocal, ] <-
-            getMaxMin(scale.grid,
-                      objfun = gompertz.Loglikfun,
-                      y = yvec, x = x, w = wvec,
-                      ret.objfun = TRUE,
-                      extraargs = list(Shape = shape.grid[jlocal]))
+            grid.search(scale.grid,
+                        objfun = gompertz.Loglikfun,
+                        y = yvec, x = x, w = wvec,
+                        ret.objfun = TRUE,
+                        extraargs = list(Shape = shape.grid[jlocal]))
         }
         index.shape <- which(mymat[, 2] == max(mymat[, 2]))[1]
 
@@ -1703,8 +1663,8 @@ gompertz.control <- function(save.weight = TRUE, ...) {
           matC[, spp.] <- mymat[index.shape, 1]
       }  # spp.
 
-      etastart <- cbind(theta2eta(matH, .lshape , .eshape ),
-                        theta2eta(matC, .lscale , .escale ))[,
+      etastart <- cbind(theta2eta(matC, .lscale , .escale ),
+                        theta2eta(matH, .lshape , .eshape ))[,
                         interleave.VGAM(M, M = M1)]
     }  # End of !length(etastart)
   }), list( .lshape = lshape, .lscale = lscale,
@@ -1713,24 +1673,24 @@ gompertz.control <- function(save.weight = TRUE, ...) {
           ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
-    scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+    scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , .eshape )
     log1p((scale / shape) * log(2)) / scale
   }, list( .lshape = lshape, .lscale = lscale,
            .eshape = eshape, .escale = escale ))),
   last = eval(substitute(expression({
     M1 <- extra$M1
     misc$link <-
-      c(rep( .lshape , length = ncoly),
-        rep( .lscale , length = ncoly))[interleave.VGAM(M, M = M1)]
+      c(rep( .lscale , length = ncoly),
+        rep( .lshape , length = ncoly))[interleave.VGAM(M, M = M1)]
     temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
     names(misc$link) <- temp.names
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
     for (ii in 1:ncoly) {
-      misc$earg[[M1*ii-1]] <- .eshape
-      misc$earg[[M1*ii  ]] <- .escale
+      misc$earg[[M1*ii-1]] <- .escale
+      misc$earg[[M1*ii  ]] <- .eshape
     }
 
     misc$M1 <- M1
@@ -1744,13 +1704,13 @@ gompertz.control <- function(save.weight = TRUE, ...) {
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
-    scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+    scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
+    shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , .eshape )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dgompertz(x = y, shape = shape,
-                                  scale = scale, log = TRUE)
+      ll.elts <- c(w) * dgompertz(x = y, scale = scale,
+                                  shape = shape, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -1772,8 +1732,8 @@ gompertz.control <- function(save.weight = TRUE, ...) {
     if (any(pwts != 1)) 
       warning("ignoring prior weights")
     eta <- predict(object)
-    Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , .eshape )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , .escale )
+    Scale <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , .escale )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , .eshape )
     rgompertz(nsim * length(Scale),
               shape = c(Shape), scale = c(Scale))
     }, list( .lshape = lshape, .lscale = lscale,
@@ -1783,10 +1743,10 @@ gompertz.control <- function(save.weight = TRUE, ...) {
 
   deriv = eval(substitute(expression({
     M1 <- 2
-    shape <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lshape ,
-                       .eshape )
-    scale <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lscale ,
+    scale <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lscale ,
                        .escale )
+    shape <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .lshape ,
+                       .eshape )
 
 
     temp2 <- exp(y * scale)
@@ -1795,11 +1755,11 @@ gompertz.control <- function(save.weight = TRUE, ...) {
     dl.dscale <- y * (1 - shape * temp2 / scale) -
                  shape * temp4 / scale^2
 
-    dshape.deta <- dtheta.deta(shape, .lshape , .eshape )
     dscale.deta <- dtheta.deta(scale, .lscale , .escale )
+    dshape.deta <- dtheta.deta(shape, .lshape , .eshape )
 
-    dthetas.detas <- cbind(dshape.deta, dscale.deta)
-    myderiv <- c(w) * cbind(dl.dshape, dl.dscale) * dthetas.detas
+    dthetas.detas <- cbind(dscale.deta, dshape.deta)
+    myderiv <- c(w) * cbind(dl.dscale, dl.dshape) * dthetas.detas
     myderiv[, interleave.VGAM(M, M = M1)]
   }), list( .lshape = lshape, .lscale = lscale,
             .eshape = eshape, .escale = escale ))),
@@ -1832,7 +1792,7 @@ if (ii < 3) {
                      shape * temp4 / scale^2
 
 
-        temp7 <- cbind(dl.dshape, dl.dscale)
+        temp7 <- cbind(dl.dscale, dl.dshape)
         run.varcov <- run.varcov +
                       temp7[, ind1$row.index] *
                       temp7[, ind1$col.index]
@@ -1985,7 +1945,7 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -2046,10 +2006,10 @@ exponential.mo.control <- function(save.weight = TRUE, ...) {
         }
         Alpha.init <- .ialpha0
         lambda.grid <- seq(0.1, 10.0, len = 21)
-        Lambda.init <- getMaxMin(lambda.grid,
-                                 objfun = moexpon.Loglikfun,
-                                 y = y, x = x, w = w,
-                                 extraargs = list(alpha = Alpha.init))
+        Lambda.init <- grid.search(lambda.grid,
+                                   objfun = moexpon.Loglikfun,
+                                   y = y, x = x, w = w,
+                                   extraargs = list(alpha = Alpha.init))
 
         if (length(mustart)) {
           Lambda.init <- Lambda.init / (1 - Phimat.init)
@@ -2258,7 +2218,7 @@ if (ii < 3) {
                     "gamma(shape3.q - 1/shape1.a) / ",
                     "(gamma(shape2.p) * gamma(shape3.q))"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -2457,11 +2417,11 @@ rdagum <- function(n, shape1.a, scale = 1, shape2.p)
          shape2.p = shape2.p)
 
 
-rinvlomax <- function(n, scale = 1, shape2.p)
+rinv.lomax <- function(n, scale = 1, shape2.p)
   rdagum(n, shape1.a = 1, scale = scale, shape2.p)
 
 
-rinvparalogistic <- function(n, shape1.a, scale = 1)
+rinv.paralogistic <- function(n, shape1.a, scale = 1)
   rdagum(n, shape1.a, scale = scale, shape1.a)
 
 
@@ -2531,11 +2491,11 @@ qdagum <- function(p, shape1.a, scale = 1, shape2.p) {
 
 
 
-qinvlomax <- function(p, scale = 1, shape2.p)
+qinv.lomax <- function(p, scale = 1, shape2.p)
   qdagum(p, shape1.a = 1, scale = scale, shape2.p)
 
 
-qinvparalogistic <- function(p, shape1.a, scale = 1)
+qinv.paralogistic <- function(p, shape1.a, scale = 1)
   qdagum(p, shape1.a, scale = scale, shape1.a)
 
 
@@ -2625,11 +2585,11 @@ pdagum <- function(q, shape1.a, scale = 1, shape2.p) {
 
 
 
-pinvlomax <- function(q, scale = 1, shape2.p)
+pinv.lomax <- function(q, scale = 1, shape2.p)
   pdagum(q, shape1.a = 1, scale = scale, shape2.p)
 
 
-pinvparalogistic <- function(q, shape1.a, scale = 1)
+pinv.paralogistic <- function(q, shape1.a, scale = 1)
   pdagum(q, shape1.a, scale = scale, shape1.a)
 
 
@@ -2714,11 +2674,11 @@ ddagum <- function(x, shape1.a, scale = 1, shape2.p, log = FALSE) {
 }
 
 
-dinvlomax <- function(x, scale = 1, shape2.p, log = FALSE)
+dinv.lomax <- function(x, scale = 1, shape2.p, log = FALSE)
   ddagum(x, shape1.a = 1, scale = scale, shape2.p, log = log)
 
 
-dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
+dinv.paralogistic <- function(x, shape1.a, scale = 1, log = FALSE)
   ddagum(x, shape1.a, scale = scale, shape1.a, log = log)
 
 
@@ -2761,7 +2721,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
                     "gamma(shape3.q - 1/shape1.a) / ",
                     "gamma(shape3.q)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -2982,7 +2942,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
                   "gamma(1 - 1/shape1.a) / ",
                   "gamma(shape2.p)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -3194,7 +3154,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
                   "gamma(shape3.q - 1) / ",
                   "(gamma(shape2.p) * gamma(shape3.q))"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -3383,7 +3343,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
           namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n", 
           "Mean:     scale / (shape3.q - 1)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (ncol(cbind(y)) != 1)
@@ -3420,10 +3380,10 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     yvec <- y
     wvec <- w
     Init.shape3.q <-
-        getMaxMin(shape3.q.grid,
-                  objfun = lomax.Loglikfun,
-                  y = yvec, x = x, w = wvec,
-                  extraargs = NULL)
+        grid.search(shape3.q.grid,
+                    objfun = lomax.Loglikfun,
+                    y = yvec, x = x, w = wvec,
+                    extraargs = NULL)
 
 
 
@@ -3586,7 +3546,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
             "Mean:     scale * gamma(1 + 1/shape1.a) * ",
                       "gamma(1 - 1/shape1.a)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -3732,7 +3692,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
 }
 
 
- invlomax <- function(lscale = "loge",
+ inv.lomax <- function(lscale = "loge",
                       lshape2.p = "loge",
                       iscale = NULL,
                       ishape2.p = 1.0, 
@@ -3761,7 +3721,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
           namesof("shape2.p", lshape2.p, earg = eshape2.p), "\n", 
           "Mean:     does not exist"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -3803,7 +3763,9 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     Scale  <- eta2theta(eta[, 1], .lscale ,   earg = .escale )
     parg   <- eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p )
 
-    NA * Scale
+
+
+    qinv.lomax(p = 0.5, scale = Scale, shape2.p = parg)
     }, list( .lscale = lscale,
              .escale = escale, 
              .eshape2.p = eshape2.p,
@@ -3830,7 +3792,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dinvlomax(x = y, scale = Scale,
+      ll.elts <- c(w) * dinv.lomax(x = y, scale = Scale,
                                   shape2.p = parg, log = TRUE)
       if (summation) {
         sum(ll.elts)
@@ -3840,7 +3802,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     }
   }, list( .lscale = lscale, .lshape2.p = lshape2.p,
            .escale = escale, .eshape2.p = eshape2.p ))),
-  vfamily = c("invlomax"),
+  vfamily = c("inv.lomax"),
 
 
   simslot = eval(substitute(
@@ -3853,7 +3815,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     eta <- predict(object)
     Scale <- eta2theta(eta[, 1], .lscale    , earg = .escale    )
     parg  <- eta2theta(eta[, 2], .lshape2.p , earg = .eshape2.p )
-    rinvlomax(nsim * length(Scale), scale = Scale, shape2.p = parg)
+    rinv.lomax(nsim * length(Scale), scale = Scale, shape2.p = parg)
   }, list( .lscale = lscale, .lshape2.p = lshape2.p,
            .escale = escale, .eshape2.p = eshape2.p ))),
 
@@ -3922,7 +3884,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
           "Mean:     scale * gamma(1 + 1/shape1.a) * ",
                     "gamma(shape1.a - 1/shape1.a) / gamma(shape1.a)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -4078,7 +4040,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
 }
 
 
- invparalogistic <- function(lshape1.a = "loge", lscale = "loge",
+ inv.paralogistic <- function(lshape1.a = "loge", lscale = "loge",
                              ishape1.a = 2,      iscale = NULL,
                              zero = NULL) {
 
@@ -4104,7 +4066,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
             "Mean:     scale * gamma(shape1.a + 1/shape1.a) * ",
                       "gamma(1 - 1/shape1.a)/gamma(shape1.a)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -4186,7 +4148,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dinvparalogistic(x = y, shape1.a = aa,
+      ll.elts <- c(w) * dinv.paralogistic(x = y, shape1.a = aa,
                                          scale = Scale, log = TRUE)
       if (summation) {
         sum(ll.elts)
@@ -4196,7 +4158,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     }
   }, list( .lshape1.a = lshape1.a, .lscale = lscale,
            .eshape1.a = eshape1.a, .escale = escale))),
-  vfamily = c("invparalogistic"),
+  vfamily = c("inv.paralogistic"),
 
 
 
@@ -4210,7 +4172,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
     eta <- predict(object)
     aa    <- eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
     Scale <- eta2theta(eta[, 2], .lscale  ,   earg = .escale )
-    rinvparalogistic(nsim * length(Scale), shape1.a = aa, scale = Scale)
+    rinv.paralogistic(nsim * length(Scale), shape1.a = aa, scale = Scale)
   }, list( .lshape1.a = lshape1.a, .lscale = lscale,
            .eshape1.a = eshape1.a, .escale = escale))),
 
@@ -4299,7 +4261,7 @@ warning("20040402; does not work, possibly because first derivs are ",
           namesof("sigma", link.sigma, earg = esigma, tag = TRUE), ", ",
           namesof("r",     link.r,     earg = er,     tag = TRUE)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (ncol(cbind(y)) != 1)
diff --git a/R/family.aunivariate.R b/R/family.aunivariate.R
index a61482e..31f4528 100644
--- a/R/family.aunivariate.R
+++ b/R/family.aunivariate.R
@@ -67,23 +67,22 @@ pkumar <- function(q, shape1, shape2) {
 
 
 
- kumar <- function(lshape1 = "loge", lshape2 = "loge",
-                   ishape1 = NULL,   ishape2 = NULL,
-                   grid.shape1 = c(0.4, 6.0),
-                   tol12 = 1.0e-4, zero = NULL) {
 
 
+
+
+
+ kumar <-
+  function(lshape1 = "loge", lshape2 = "loge",
+           ishape1 = NULL,   ishape2 = NULL,
+           grid.shape1 = c(0.4, 6.0), tol12 = 1.0e-4, zero = NULL) {
   lshape1 <- as.list(substitute(lshape1))
   eshape1 <- link2list(lshape1)
   lshape1 <- attr(eshape1, "function.name")
-
-
   lshape2 <- as.list(substitute(lshape2))
   eshape2 <- link2list(lshape2)
   lshape2 <- attr(eshape2, "function.name")
 
-
-
   if (length(ishape1) &&
      (!is.Numeric(ishape1, length.arg = 1, positive = TRUE)))
     stop("bad input for argument 'ishape1'")
@@ -95,61 +94,36 @@ pkumar <- function(q, shape1, shape2) {
   if (!is.Numeric(grid.shape1, length.arg = 2, positive = TRUE))
     stop("bad input for argument 'grid.shape1'")
 
-
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE))
     stop("bad input for argument 'zero'")
 
-
-
-
   new("vglmff",
   blurb = c("Kumaraswamy distribution\n\n",
-            "Links:    ",
-            namesof("shape1", lshape1, eshape1, tag = FALSE), ", ",
-            namesof("shape2", lshape2, eshape2, tag = FALSE), "\n",
-            "Mean:     ",
-            "shape2 * beta(1+1/shape1, shape2)"),
+            "Links:    ", namesof("shape1", lshape1, eshape1, tag = FALSE), ", ",
+                          namesof("shape2", lshape2, eshape2, tag = FALSE), "\n",
+            "Mean:     shape2 * beta(1 + 1 / shape1, shape2)"),
  constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
-
   infos = eval(substitute(function(...) {
-    list(M1 = 2,
-         Q1 = 1,
-         lshape1 = .lshape1 ,
-         zero = .zero )
-  }, list( .zero = zero,
-           .lshape1 = lshape1
-         ))),
-
-
+    list(M1 = 2, Q1 = 1, expected = TRUE, multipleResponses = TRUE,
+         lshape1 = .lshape1 , lshape2 = .lshape2 , zero = .zero )
+  }, list( .zero = zero, .lshape1 = lshape1, .lshape2 = lshape2 ))),
   initialize = eval(substitute(expression({
-
-    temp5 <-
-    w.y.check(w = w, y = y,
-              Is.positive.y = TRUE,
-              ncol.w.max = Inf,
-              ncol.y.max = Inf,
-              out.wy = TRUE,
-              colsyperw = 1,
-              maximize = TRUE)
-    w <- temp5$w
-    y <- temp5$y
-
+    checklist <- w.y.check(w = w, y = y, Is.positive.y = TRUE,
+                           ncol.w.max = Inf, ncol.y.max = Inf,
+                           out.wy = TRUE, colsyperw = 1, maximize = TRUE)
+    w <- checklist$w
+    y <- checklist$y  # Now 'w' and 'y' have the same dimension.
     if (any((y <= 0) | (y >= 1)))
       stop("the response must be in (0, 1)")
 
-
-    ncoly <- ncol(y)
-    M1 <- 2
-    extra$ncoly <- ncoly
-    extra$M1 <- M1
+    extra$ncoly <- ncoly <- ncol(y)
+    extra$M1 <- M1 <- 2
     M <- M1 * ncoly
-
-
     mynames1 <- paste("shape1", if (ncoly > 1) 1:ncoly else "", sep = "")
     mynames2 <- paste("shape2", if (ncoly > 1) 1:ncoly else "", sep = "")
     predictors.names <-
@@ -157,43 +131,27 @@ pkumar <- function(q, shape1, shape2) {
           namesof(mynames2, .lshape2 , earg = .eshape2 , tag = FALSE))[
           interleave.VGAM(M, M = M1)]
 
-
     if (!length(etastart)) {
-
-
-
       kumar.Loglikfun <- function(shape1, y, x, w, extraargs) {
-
-
-
-
-           mediany <- colSums(y * w) / colSums(w)  # weighted.mean(y, w)
-
-          shape2 <- log(0.5) / log1p(-(mediany^shape1))
-          sum(c(w) * dkumar(x = y, shape1 = shape1, shape2 = shape2,
-                            log = TRUE))
+        mediany <- colSums(y * w) / colSums(w)
+        shape2 <- log(0.5) / log1p(-(mediany^shape1))
+        sum(c(w) * dkumar(y, shape1 = shape1, shape2 = shape2, log = TRUE))
       }
 
-
       shape1.grid <- seq( .grid.shape1[1], .grid.shape1[2], len = 19)
       shape1.init <- if (length( .ishape1 )) .ishape1 else
-        getMaxMin(shape1.grid, objfun = kumar.Loglikfun,
-                  y = y,  x = x, w = w)
+        grid.search(shape1.grid, objfun = kumar.Loglikfun,
+                    y = y,  x = x, w = w)
       shape1.init <- matrix(shape1.init, n, ncoly, byrow = TRUE)
 
-
-
-       mediany <- colSums(y * w) / colSums(w)  # weighted.mean(y, w)
-
-
+      mediany <- colSums(y * w) / colSums(w)
       shape2.init <- if (length( .ishape2 )) .ishape2 else
         log(0.5) / log1p(-(mediany^shape1.init))
       shape2.init <- matrix(shape2.init, n, ncoly, byrow = TRUE)
 
-      etastart <- cbind(
-            theta2eta(shape1.init, .lshape1 , earg = .eshape1 ),
-            theta2eta(shape2.init, .lshape2 , earg = .eshape2 ))[,
-            interleave.VGAM(M, M = M1)]
+      etastart <- cbind(theta2eta(shape1.init, .lshape1 , earg = .eshape1 ),
+                        theta2eta(shape2.init, .lshape2 , earg = .eshape2 ))[,
+                  interleave.VGAM(M, M = M1)]
     }
   }), list( .lshape1 = lshape1, .lshape2 = lshape2,
             .ishape1 = ishape1, .ishape2 = ishape2,
@@ -206,10 +164,8 @@ pkumar <- function(q, shape1, shape2) {
   }, list( .lshape1 = lshape1, .lshape2 = lshape2,
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
   last = eval(substitute(expression({
-    M1 <- extra$M1
-    misc$link <-
-      c(rep( .lshape1 , length = ncoly),
-        rep( .lshape2 , length = ncoly))[interleave.VGAM(M, M = M1)]
+    misc$link <- c(rep( .lshape1 , length = ncoly),
+                   rep( .lshape2 , length = ncoly))[interleave.VGAM(M, M = M1)]
     temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
     names(misc$link) <- temp.names
 
@@ -219,107 +175,71 @@ pkumar <- function(q, shape1, shape2) {
       misc$earg[[M1*ii-1]] <- .eshape1
       misc$earg[[M1*ii  ]] <- .eshape2
     }
-
-    misc$M1 <- M1
-    misc$expected <- TRUE
-    misc$multipleResponses <- TRUE
   }), list( .lshape1 = lshape1, .lshape2 = lshape2,
             .eshape1 = eshape1, .eshape2 = eshape2 ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
-             summation = TRUE) {
+  function(mu, y, w, residuals = FALSE, eta, extra = NULL, summation = TRUE) {
     shape1 <- eta2theta(eta[, c(TRUE, FALSE)], .lshape1 , earg = .eshape1 )
     shape2 <- eta2theta(eta[, c(FALSE, TRUE)], .lshape2 , earg = .eshape2 )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dkumar(x = y, shape1 = shape1,
-                               shape2 = shape2, log = TRUE)
-      if (summation) {
-        sum(ll.elts)
-      } else {
-        ll.elts
-      }
+      ll.elts <- c(w) * dkumar(x = y, shape1, shape2, log = TRUE)
+      if (summation) sum(ll.elts) else ll.elts
     }
   }, list( .lshape1 = lshape1, .lshape2 = lshape2,
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
   vfamily = c("kumar"),
-
-
-
   simslot = eval(substitute(
   function(object, nsim) {
-
-    pwts <- if (length(pwts <- object at prior.weights) > 0)
-              pwts else weights(object, type = "prior")
-    if (any(pwts != 1)) 
-      warning("ignoring prior weights")
     eta <- predict(object)
     shape1 <- eta2theta(eta[, c(TRUE, FALSE)], .lshape1 , earg = .eshape1 )
     shape2 <- eta2theta(eta[, c(FALSE, TRUE)], .lshape2 , earg = .eshape2 )
-    rkumar(nsim * length(shape1),
-           shape1 = shape1, shape2 = shape2)
+    rkumar(nsim * length(shape1), shape1 = shape1, shape2 = shape2)
   }, list( .lshape1 = lshape1, .lshape2 = lshape2,
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
-
-
-
-
-
   deriv = eval(substitute(expression({
     shape1 <- eta2theta(eta[, c(TRUE, FALSE)], .lshape1 , earg = .eshape1 )
     shape2 <- eta2theta(eta[, c(FALSE, TRUE)], .lshape2 , earg = .eshape2 )
-
     dshape1.deta <- dtheta.deta(shape1, link = .lshape1 , earg = .eshape1 )
     dshape2.deta <- dtheta.deta(shape2, link = .lshape2 , earg = .eshape2 )
-
     dl.dshape1 <- 1 / shape1 + log(y) - (shape2 - 1) * log(y) *
                   (y^shape1) / (1 - y^shape1)
     dl.dshape2 <- 1 / shape2 + log1p(-y^shape1)
-
-    myderiv <- c(w) * cbind(dl.dshape1 * dshape1.deta,
+    dl.deta <- c(w) * cbind(dl.dshape1 * dshape1.deta,
                             dl.dshape2 * dshape2.deta)
-    myderiv[, interleave.VGAM(M, M = M1)]
+    dl.deta[, interleave.VGAM(M, M = M1)]
   }), list( .lshape1 = lshape1, .lshape2 = lshape2,
             .eshape1 = eshape1, .eshape2 = eshape2 ))),
   weight = eval(substitute(expression({
     ned2l.dshape11 <- (1 + (shape2 / (shape2 - 2)) *
-           ((digamma(shape2) -  digamma(2))^2 -
-           (trigamma(shape2) - trigamma(2)))) / shape1^2
-    ned2l.dshape22 <- 1.0 / shape2^2
+      ((digamma(shape2) -  digamma(2))^2 -
+      (trigamma(shape2) - trigamma(2)))) / shape1^2
+    ned2l.dshape22 <- 1 / shape2^2
     ned2l.dshape12 <-
-       -((digamma(1 + shape2) - digamma(2)) / (shape2 - 1.0)) / shape1
-
-    index1 <- (abs(shape2 - 1.0) < .tol12)
-    if (any(index1))
-      ned2l.dshape12[index1] <- -trigamma(2) / shape1[index1]
-
-    index2 <- (abs(shape2 - 2.0) < .tol12 )
-    if (any(index2))
-      ned2l.dshape11[index2] <-
-          (1.0 - 2.0 * psigamma(2.0, deriv = 2)) / shape1[index2]^2
-
+       (digamma(2) - digamma(1 + shape2)) / ((shape2 - 1) * shape1)
 
+    index1 <- (abs(shape2 - 1) < .tol12 )  # Fix up singular point at shape2 == 1
+    ned2l.dshape12[index1] <- -trigamma(2) / shape1[index1]
+    index2 <- (abs(shape2 - 2) < .tol12 )  # Fix up singular point at shape2 == 2
+    ned2l.dshape11[index2] <- (1 - 2 * psigamma(2, deriv = 2)) / shape1[index2]^2
 
     wz <- array(c(c(w) * ned2l.dshape11 * dshape1.deta^2,
                   c(w) * ned2l.dshape22 * dshape2.deta^2,
                   c(w) * ned2l.dshape12 * dshape1.deta * dshape2.deta),
                 dim = c(n, M / M1, 3))
     wz <- arwz2wz(wz, M = M, M1 = M1)
-
-
     wz
   }), list( .lshape1 = lshape1, .lshape2 = lshape2,
-            .eshape1 = eshape1, .eshape2 = eshape2,
-            .tol12 = tol12 ))))
+            .eshape1 = eshape1, .eshape2 = eshape2, .tol12 = tol12 ))))
 }
 
 
 
 
 
-drice <- function(x, vee, sigma, log = FALSE) {
+
+drice <- function(x, sigma, vee, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -345,7 +265,7 @@ drice <- function(x, vee, sigma, log = FALSE) {
 }
 
 
-rrice <- function(n, vee, sigma) {
+rrice <- function(n, sigma, vee) {
   if (!is.Numeric(n, integer.valued = TRUE, length.arg = 1))
     stop("bad input for argument 'n'")
   theta <- 1  # any number
@@ -356,14 +276,43 @@ rrice <- function(n, vee, sigma) {
 
 
 
+
+marcumQ <- function(a, b, m = 1, lower.tail = TRUE, ... ) {
+  pchisq(b^2, df = 2*m, ncp = a^2, lower.tail = lower.tail, ... )
+}
+
+
+price <- function(q, sigma, vee, lower.tail = TRUE, ...) {
+  marcumQ(vee/sigma, q/sigma, m = 1, lower.tail = lower.tail, ... )
+}
+
+
+qrice <- function(p, sigma, vee, ... ) {
+  sqrt(qchisq(p, df = 2, ncp = (vee/sigma)^2, ... )) * sigma
+}
+
+
+
+
+
+
+
+
+
 riceff.control <- function(save.weight = TRUE, ...) {
     list(save.weight = save.weight)
 }
 
 
- riceff <- function(lvee = "loge", lsigma = "loge",
-                    ivee = NULL, isigma = NULL,
-                    nsimEIM = 100, zero = NULL) {
+ riceff <- function(lsigma = "loge", lvee = "loge",
+                    isigma = NULL, ivee = NULL,
+                    nsimEIM = 100, zero = NULL, nowarning = FALSE) {
+
+  if (!nowarning)
+    warning("order of the linear/additive predictors has been changed",
+            " in VGAM version 0.9-5")
+
+
   lvee     <- as.list(substitute(lvee))
   evee     <- link2list(lvee)
   lvee     <- attr(evee, "function.name")
@@ -389,14 +338,14 @@ riceff.control <- function(save.weight = TRUE, ...) {
   new("vglmff",
   blurb = c("Rice distribution\n\n",
             "Links:    ",
-            namesof("vee",   lvee,   earg = evee,   tag = FALSE), ", ", 
-            namesof("sigma", lsigma, earg = esigma, tag = FALSE), "\n",
+            namesof("sigma", lsigma, earg = esigma, tag = FALSE), ", ",
+            namesof("vee",   lvee,   earg = evee,   tag = FALSE), "\n",
             "Mean:     ",
             "sigma*sqrt(pi/2)*exp(z/2)*((1-z)*",
             "besselI(-z/2, nu = 0) - z * besselI(-z/2, nu = 1)) ",
             "where z=-vee^2/(2*sigma^2)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -413,8 +362,9 @@ riceff.control <- function(save.weight = TRUE, ...) {
 
 
     predictors.names <-
-      c(namesof("vee",   .lvee,   earg = .evee,   tag = FALSE),
-        namesof("sigma", .lsigma , earg = .esigma, tag = FALSE))
+      c(namesof("sigma", .lsigma , earg = .esigma , tag = FALSE),
+        namesof("vee",   .lvee   , earg = .evee   , tag = FALSE))
+        
 
 
 
@@ -429,21 +379,21 @@ riceff.control <- function(save.weight = TRUE, ...) {
       seq(quantile(rep(y, w), probs = seq(0, 1, 0.2))["20%"],
           quantile(rep(y, w), probs = seq(0, 1, 0.2))["80%"], len = 11)
     vee.init <- if (length( .ivee )) .ivee else
-      getMaxMin(vee.grid, objfun = riceff.Loglikfun,
-                y = y,  x = x, w = w)
+      grid.search(vee.grid, objfun = riceff.Loglikfun, y = y,  x = x, w = w)
       vee.init <- rep(vee.init, length = length(y))
       sigma.init <- if (length( .isigma )) .isigma else
           sqrt(max((weighted.mean(y^2, w) - vee.init^2)/2, 0.001))
       sigma.init <- rep(sigma.init, length = length(y))
+
       etastart <-
-        cbind(theta2eta(vee.init,   .lvee,   earg = .evee),
-              theta2eta(sigma.init, .lsigma , earg = .esigma ))
+        cbind(theta2eta(sigma.init, .lsigma , earg = .esigma ),
+              theta2eta(vee.init,   .lvee ,   earg = .evee ))
     }
   }), list( .lvee = lvee, .lsigma = lsigma,
             .ivee = ivee, .isigma = isigma,
             .evee = evee, .esigma = esigma ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    vee   <- eta2theta(eta[, 1], link = .lvee,   earg = .evee)
+    vee   <- eta2theta(eta[, 1], link = .lvee ,   earg = .evee )
     sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
     temp9 <- -vee^2 / (2*sigma^2)
 
@@ -454,9 +404,9 @@ riceff.control <- function(save.weight = TRUE, ...) {
   }, list( .lvee = lvee, .lsigma = lsigma,
            .evee = evee, .esigma = esigma ))),
   last = eval(substitute(expression({
-    misc$link <-    c("vee" = .lvee, "sigma" = .lsigma)
+    misc$link <-    c("sigma" = .lsigma , "vee" = .lvee )
 
-    misc$earg <- list("vee" = .evee, "sigma" = .esigma )
+    misc$earg <- list("sigma" = .esigma , "vee" = .evee )
 
     misc$expected <- TRUE
     misc$nsimEIM <- .nsimEIM
@@ -467,12 +417,12 @@ riceff.control <- function(save.weight = TRUE, ...) {
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    vee   <- eta2theta(eta[, 1], link = .lvee   , earg = .evee )
-    sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
+    sigma <- eta2theta(eta[, 1], link = .lsigma , earg = .esigma )
+    vee   <- eta2theta(eta[, 2], link = .lvee   , earg = .evee )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * drice(x = y, vee = vee, sigma = sigma, log = TRUE)
+      ll.elts <- c(w) * drice(x = y, sigma = sigma, vee = vee, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -492,8 +442,8 @@ riceff.control <- function(save.weight = TRUE, ...) {
     if (any(pwts != 1)) 
       warning("ignoring prior weights")
     eta <- predict(object)
-    vee   <- eta2theta(eta[, 1], link = .lvee   , earg = .evee )
-    sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
+    sigma <- eta2theta(eta[, 1], link = .lsigma , earg = .esigma )
+    vee   <- eta2theta(eta[, 2], link = .lvee   , earg = .evee )
     rrice(nsim * length(vee),
           vee = vee, sigma = sigma)
   }, list( .lvee = lvee, .lsigma = lsigma,
@@ -502,10 +452,10 @@ riceff.control <- function(save.weight = TRUE, ...) {
 
 
   deriv = eval(substitute(expression({
-    vee   <- eta2theta(eta[, 1], link = .lvee, earg = .evee)
-    sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
+    sigma <- eta2theta(eta[, 1], link = .lsigma , earg = .esigma )
+    vee   <- eta2theta(eta[, 2], link = .lvee   , earg = .evee )
 
-    dvee.deta <- dtheta.deta(vee, link = .lvee, earg = .evee)
+    dvee.deta <- dtheta.deta(vee, link = .lvee , earg = .evee )
     dsigma.deta <- dtheta.deta(sigma, link = .lsigma , earg = .esigma )
 
     temp8 <- y * vee / sigma^2
@@ -515,8 +465,9 @@ riceff.control <- function(save.weight = TRUE, ...) {
                  (2 * temp8 / sigma) *
                  besselI(temp8, nu = 1) / besselI(temp8, nu = 0)
 
-    c(w) * cbind(dl.dvee * dvee.deta,
-                 dl.dsigma * dsigma.deta)
+    c(w) * cbind(dl.dsigma * dsigma.deta,
+                 dl.dvee   * dvee.deta)
+
   }), list( .lvee = lvee, .lsigma = lsigma,
             .evee = evee, .esigma = esigma, .nsimEIM = nsimEIM ))),
   weight = eval(substitute(expression({
@@ -531,7 +482,7 @@ riceff.control <- function(save.weight = TRUE, ...) {
                    besselI(temp8, nu = 1) / besselI(temp8, nu = 0)
 
       rm(ysim)
-      temp3 <- cbind(dl.dvee, dl.dsigma)
+      temp3 <- cbind(dl.dsigma, dl.dvee)
       run.var <- ((ii-1) * run.var + temp3^2) / ii
       run.cov <- ((ii-1) * run.cov + temp3[, 1] * temp3[, 2]) / ii
     }
@@ -539,7 +490,7 @@ riceff.control <- function(save.weight = TRUE, ...) {
         matrix(colMeans(cbind(run.var, run.cov)),
                n, dimm(M), byrow = TRUE) else cbind(run.var, run.cov)
 
-    dtheta.detas <- cbind(dvee.deta, dsigma.deta)
+    dtheta.detas <- cbind(dsigma.deta, dvee.deta)
     index0 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
     wz <- wz * dtheta.detas[, index0$row] *
                dtheta.detas[, index0$col]
@@ -646,11 +597,11 @@ skellam.control <- function(save.weight = TRUE, ...) {
          "Mean:     mu1-mu2", "\n",
          "Variance: mu1+mu2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel , 
                            constraints = constraints,
                            apply.int = TRUE)
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -888,7 +839,7 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -1112,7 +1063,7 @@ rlind <- function(n, theta) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -1316,7 +1267,7 @@ if (FALSE)
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -1534,6 +1485,7 @@ slash.control <- function(save.weight = TRUE, ...) {
 }
 
 
+
  slash <- function(lmu = "identitylink", lsigma = "loge",
                    imu = NULL, isigma = NULL,
                    iprobs = c(0.1, 0.9),
@@ -1584,7 +1536,7 @@ slash.control <- function(save.weight = TRUE, ...) {
          "\n1/(2*sigma*sqrt(2*pi))",
          "\t\t\t\t\t\t\ty=mu\n")),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -1619,8 +1571,8 @@ slash.control <- function(save.weight = TRUE, ...) {
       mu.grid <- quantile(rep(y, w), probs=iprobs)
       mu.grid <- seq(mu.grid[1], mu.grid[2], length=100)
       mu.init <- if (length( .imu )) .imu else
-                getMaxMin(mu.grid, objfun = slash.Loglikfun,
-                          y = y,  x = x, w = w)
+                 grid.search(mu.grid, objfun = slash.Loglikfun,
+                             y = y,  x = x, w = w)
       sigma.init <- if (is.Numeric(.isigma)) .isigma else
         max(0.01,
            ((quantile(rep(y, w), prob = 0.75)/2) -
@@ -1690,59 +1642,53 @@ slash.control <- function(save.weight = TRUE, ...) {
 
 
   deriv = eval(substitute(expression({
-    mu    <- eta2theta(eta[, 1], link = .lmu , earg = .emu )
+    mu    <- eta2theta(eta[, 1], link = .lmu    , earg = .emu    )
     sigma <- eta2theta(eta[, 2], link = .lsigma , earg = .esigma )
 
-    dmu.deta    <- dtheta.deta(mu,    link = .lmu , earg = .emu )
+    dmu.deta    <- dtheta.deta(mu,    link = .lmu    , earg = .emu    )
     dsigma.deta <- dtheta.deta(sigma, link = .lsigma , earg = .esigma )
 
-    zedd <- (y-mu)/sigma
-    d3 <- deriv3(~ w * log(1-exp(-(((y-mu)/sigma)^2)/2))-
-                log(sqrt(2*pi)*sigma*((y-mu)/sigma)^2),
-                c("mu", "sigma"))
+    zedd <- (y - mu) / sigma
+    d3 <- deriv3(~ w * log(1 - exp(-(((y - mu) / sigma)^2) / 2)) -
+                 log(sqrt(2 * pi) * sigma * ((y - mu) / sigma)^2),
+                 c("mu", "sigma"))
     eval.d3 <- eval(d3)
     dl.dthetas <-  attr(eval.d3, "gradient")
-    dl.dmu <- dl.dthetas[, 1]
+    dl.dmu    <- dl.dthetas[, 1]
     dl.dsigma <- dl.dthetas[, 2]
     ind0 <- (abs(zedd) < .smallno)
     dl.dmu[ind0] <- 0
-    dl.dsigma[ind0] <- -1/sigma[ind0]
-    ans <-  c(w) * cbind(dl.dmu    * dmu.deta,
-                         dl.dsigma * dsigma.deta)
-    ans
+    dl.dsigma[ind0] <- -1 / sigma[ind0]
+    c(w) * cbind(dl.dmu * dmu.deta, dl.dsigma * dsigma.deta)
   }), list( .lmu = lmu, .lsigma = lsigma,
             .emu = emu, .esigma = esigma, .smallno = smallno ))),
-  weight=eval(substitute(expression({
+  weight = eval(substitute(expression({
     run.varcov <- 0
     ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
-    sd3 <- deriv3(~ w * log(1-exp(-(((ysim-mu)/sigma)^2)/2))-
-                  log(sqrt(2*pi)*sigma*((ysim-mu)/sigma)^2),
+    sd3 <- deriv3(~ w * log(1 - exp(-(((ysim - mu) / sigma)^2) / 2))-
+                  log(sqrt(2 * pi) * sigma * ((ysim - mu) / sigma)^2),
                   c("mu", "sigma"))
     for (ii in 1:( .nsimEIM )) {
-        ysim <- rslash(n, mu = mu, sigma = sigma)
-        seval.d3 <- eval(sd3)
-
-        dl.dthetas <-  attr(seval.d3, "gradient")
-        dl.dmu <- dl.dthetas[, 1]
-        dl.dsigma <- dl.dthetas[, 2]
-
-
-
-            temp3 <- cbind(dl.dmu, dl.dsigma)
-            run.varcov <- ((ii-1) * run.varcov +
-                          temp3[, ind1$row.index] *
-                          temp3[, ind1$col.index]) / ii
-        }
-        wz <- if (intercept.only)
-            matrix(colMeans(run.varcov, na.rm = FALSE),
-                   n, ncol(run.varcov), byrow = TRUE) else run.varcov
-        dthetas.detas <- cbind(dmu.deta, dsigma.deta)
-        wz <- wz * dthetas.detas[, ind1$row] *
-                   dthetas.detas[, ind1$col]
-        c(w) * wz
-    }), list( .lmu = lmu, .lsigma = lsigma,
-              .emu = emu, .esigma = esigma,
-              .nsimEIM = nsimEIM, .smallno = smallno ))))
+      ysim <- rslash(n, mu = mu, sigma = sigma)
+      seval.d3 <- eval(sd3)
+
+      dl.dthetas <-  attr(seval.d3, "gradient")
+      dl.dmu    <- dl.dthetas[, 1]
+      dl.dsigma <- dl.dthetas[, 2]
+
+      temp3 <- cbind(dl.dmu, dl.dsigma)
+      run.varcov <- run.varcov + temp3[, ind1$row] * temp3[, ind1$col]
+    }
+    run.varcov <- run.varcov / .nsimEIM
+    wz <- if (intercept.only)
+        matrix(colMeans(run.varcov, na.rm = FALSE),
+               n, ncol(run.varcov), byrow = TRUE) else run.varcov
+    dthetas.detas <- cbind(dmu.deta, dsigma.deta)
+    wz <- wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
+    c(w) * wz
+  }), list( .lmu = lmu, .lsigma = lsigma,
+            .emu = emu, .esigma = esigma,
+            .nsimEIM = nsimEIM, .smallno = smallno ))))
 }
 
 
@@ -2206,7 +2152,7 @@ qbenf <- function(p, ndigits = 1) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
diff --git a/R/family.basics.R b/R/family.basics.R
index 6cedd07..0412571 100644
--- a/R/family.basics.R
+++ b/R/family.basics.R
@@ -12,7 +12,7 @@
 
 
 
-subsetc <-
+subsetcol <-
 Select <-
   function(
            data = list(),
@@ -177,6 +177,29 @@ subsetc <-
 
 
 
+ grid.search <- function(vov, objfun, y, x, w, extraargs = NULL,
+                         maximize = TRUE, abs.arg = FALSE,
+                         ret.objfun = FALSE, ...) {
+  if (!is.vector(vov))
+    stop("argument 'vov' must be a vector")
+  objvals <- vov
+  for (ii in 1:length(vov))
+    objvals[ii] <- objfun(vov[ii], y = y, x = x, w = w,
+                          extraargs = extraargs, ...)
+  try.this <- if (abs.arg) {
+               if (maximize) vov[abs(objvals) == max(abs(objvals))] else
+               vov[abs(objvals) == min(abs(objvals))]
+             } else {
+               if (maximize) vov[objvals == max(objvals)] else
+               vov[objvals == min(objvals)]
+             }
+  if (!length(try.this))
+    stop("something has gone wrong!")
+  ans <- if (length(try.this) == 1)
+    try.this else sample(try.this, size = 1)
+  if (ret.objfun) c(ans, objvals[ans == vov]) else ans
+}
+
 
 
 
@@ -218,11 +241,13 @@ subsetc <-
 
 
 
- cm.vgam <- function(cm, x, bool, constraints,
-                     apply.int = FALSE, overwrite = FALSE,
-                     cm.default = diag(nrow(cm)),  # 20121226
-                     cm.intercept.default = diag(nrow(cm))  # 20121226
-                    ) {
+
+ cm.VGAM <-
+  function(cm, x, bool, constraints,
+           apply.int = FALSE, 
+           cm.default = diag(nrow(cm)),  # 20121226
+           cm.intercept.default = diag(nrow(cm))  # 20121226
+          ) {
 
 
 
@@ -280,26 +305,26 @@ subsetc <-
       return(constraints)
     }
   } else {
-      tbool <- terms(bool)
-      if (attr(tbool, "response")) {
-        ii <- attr(tbool, "factors")
-        default <- dimnames(ii)[[1]]
-        default <- default[1]
-        default <- if (is.null(default[1])) {
-          t.or.f <- attr(tbool, "variables")
-
-          t.or.f <- as.character( t.or.f )
-          if (t.or.f[1] == "list" && length(t.or.f) == 2 &&
-             (t.or.f[2] == "TRUE" || t.or.f[2] == "FALSE")) {
-            t.or.f <- as.character( t.or.f[2] )
-            parse(text = t.or.f)[[1]]
-          } else {
-            stop("something gone awry")
-          }
+    tbool <- terms(bool)
+    if (attr(tbool, "response")) {
+      ii <- attr(tbool, "factors")
+      default <- dimnames(ii)[[1]]
+      default <- default[1]
+      default <- if (is.null(default[1])) {
+        t.or.f <- attr(tbool, "variables")
+
+        t.or.f <- as.character( t.or.f )
+        if (t.or.f[1] == "list" && length(t.or.f) == 2 &&
+           (t.or.f[2] == "TRUE" || t.or.f[2] == "FALSE")) {
+          t.or.f <- as.character( t.or.f[2] )
+          parse(text = t.or.f)[[1]]
         } else {
-          parse(text = default[1])[[1]]  # Original
+          stop("something gone awry")
         }
-        default <- as.logical(eval(default))
+      } else {
+        parse(text = default[1])[[1]]  # Original
+      }
+      default <- as.logical(eval(default))
     } else {
       default <- TRUE
     }
@@ -321,7 +346,7 @@ subsetc <-
 
 
 
-cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
+cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
   asgn <- attr(x, "assign")
   nasgn <- names(asgn)
@@ -361,7 +386,7 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
 
 
 
- cm.zero.vgam <- function(constraints, x, zero, M) {
+ cm.zero.VGAM <- function(constraints, x, zero, M) {
 
   asgn <- attr(x, "assign")
   nasgn <- names(asgn)
@@ -392,7 +417,7 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
       Hmatk[zero, ] <- 0
       index <- NULL
       for (kk in 1:ncol(Hmatk))
-        if (all(Hmatk[,kk] == 0)) index <- c(index, kk)
+        if (all(Hmatk[, kk] == 0)) index <- c(index, kk)
       if (length(index) == ncol(Hmatk)) 
         stop("constraint matrix has no columns!")
       if (!is.null(index))
@@ -404,8 +429,14 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
 
 
 
- process.constraints <- function(constraints, x, M,
-                                 by.col = TRUE, specialCM = NULL) {
+ process.constraints <-
+  function(constraints, x, M,
+           by.col = TRUE, specialCM = NULL,
+           Check.cm.rank = TRUE  # 20140626
+          ) {
+
+
+
 
 
 
@@ -429,31 +460,34 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
   lenconstraints <- length(constraints)
   if (lenconstraints > 0)
     for (ii in 1:lenconstraints) {
-      constraints[[ii]] <- eval(constraints[[ii]])
+      list.elt <- constraints[[ii]]
+
+      if (is.function(list.elt)) {
+        list.elt <- list.elt()
+      }
+
+      constraints[[ii]] <- eval(list.elt)
       if (!is.null  (constraints[[ii]]) &&
           !is.matrix(constraints[[ii]]))
-          stop("'constraints[[", ii, "]]' is not a matrix")
+        stop("'constraints[[", ii, "]]' is not a matrix")
     }
 
   if (is.null(names(constraints))) 
     names(constraints) <- rep(nasgn, length.out = lenconstraints) 
 
-  temp <- if (!is.R()) list() else {
-    junk <- vector("list", length(nasgn))
-    names(junk) <- nasgn
-    junk
-  }
+  temp <- vector("list", length(nasgn))
+  names(temp) <- nasgn
   for (ii in 1:length(nasgn))
     temp[[nasgn[ii]]] <-
       if (is.null(constraints[[nasgn[ii]]])) diag(M) else
              eval(constraints[[nasgn[ii]]])
 
   for (ii in 1:length(asgn)) {
-      if (!is.matrix(temp[[ii]])) {
-        stop("not a constraint matrix")
-      }
-      if (ncol(temp[[ii]]) > M)
-        stop("constraint matrix has too many columns")
+    if (!is.matrix(temp[[ii]])) {
+      stop("not a constraint matrix")
+    }
+    if (ncol(temp[[ii]]) > M)
+      stop("constraint matrix has too many columns")
   }
 
   if (!by.col)
@@ -477,6 +511,24 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
     }
   }
   names(Hlist) <- dimnames(x)[[2]]
+
+
+
+  if (Check.cm.rank) {
+    all.svd.d <- function(x) svd(x)$d
+    mylist <- lapply(Hlist, all.svd.d)
+
+    if (max(unlist(lapply(mylist, length))) > M)
+      stop("some constraint matrices have more than ", M,
+           "columns")
+
+    MyVector <- unlist(mylist)
+    if (min(MyVector) < 1.0e-10)
+      stop("some constraint matrices are not of ",
+           "full column-rank: ",
+           paste(names(MyVector)[MyVector < 1.0e-10], collapse = ", "))
+  }
+
   Hlist
 }
 
@@ -629,13 +681,9 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
 
 
 
- m2avglm <- function(object, upper = FALSE, allow.vector = FALSE)  {
-  m2adefault(wweights(object), M = object at misc$M,
-             upper = upper, allow.vector = allow.vector)
-}
+ m2a <- function(m, M, upper = FALSE, allow.vector = FALSE) {
 
 
- m2adefault <- function(m, M, upper = FALSE, allow.vector = FALSE) {
   if (!is.numeric(m))
       stop("argument 'm' is not numeric")
 
@@ -682,8 +730,7 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
   fred <- .C("a2m",
              as.double(a), m = double(dimm.value * n),
       as.integer(dimm.value),
-      as.integer(index$row-1),  
-      as.integer(index$col-1),  
+      as.integer(index$row-1),  as.integer(index$col-1),  
       as.integer(n),  as.integer(M), NAOK = TRUE)
   dim(fred$m) <- c(dimm.value,n)
   fred$m <- t(fred$m)
@@ -724,18 +771,11 @@ cm.nointercept.vgam <- function(constraints, x, nointercept, M) {
 
 
 
-if (!exists("is.R"))
-  is.R <- function()
-    exists("version") &&
-    !is.null(version$language) &&
-    version$language == "R"
-
-
+ wweights <- function(object, matrix.arg = TRUE, deriv.arg = FALSE,
+                      ignore.slot = FALSE, checkwz = TRUE) {
 
 
 
- wweights <- function(object, matrix.arg = TRUE, deriv.arg = FALSE,
-                      ignore.slot = FALSE, checkwz = TRUE) {
 
 
 
@@ -787,9 +827,20 @@ if (!exists("is.R"))
 
 
 
+
+  if (any(slotNames(object) == "family")) {
+    infos.list <- object at family@infos()
+    if (length(infos.list))
+      for (ii in names(infos.list)) {
+        assign(ii, infos.list[[ii]])
+      }
+  }
+
+
+
   if (any(slotNames(object) == "control"))
     for (ii in names(object at control)) {
-      assign(ii, object at control[[ii]]) 
+      assign(ii, object at control[[ii]])
     } 
 
   if (length(object at misc))
@@ -877,13 +928,7 @@ procVec <- function(vec, yn, Default) {
 
 if (FALSE) {
 
-if (!isGeneric("m2a"))
-    setGeneric("m2a",
-  function(object, ...) standardGeneric("m2a"))
 
-setMethod("m2a", "vglm",
-         function(object, ...)
-         m2avglm(object, ...))
 }
 
 
@@ -985,6 +1030,7 @@ qnupdate <- function(w, wzold, dderiv, deta, M, keeppd = TRUE,
 
 
 
+
 mbesselI0 <- function(x, deriv.arg = 0) {
   if (!is.Numeric(deriv.arg, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) &&
@@ -1111,7 +1157,7 @@ lerch <- function(x, s, v, tolerance = 1.0e-10, iter = 100) {
 
 
 
-negzero.expression <- expression({
+negzero.expression.VGAM <- expression({
 
 
 
@@ -1141,7 +1187,7 @@ negzero.expression <- expression({
   z.Index <- if (!length(dotzero)) NULL else
                    unique(sort(c(zneg.index, zpos.index)))
 
-  constraints <- cm.zero.vgam(constraints, x, z.Index, M)
+  constraints <- cm.zero.VGAM(constraints, x, z.Index, M)
 })
 
 
diff --git a/R/family.binomial.R b/R/family.binomial.R
index 1d5beee..4cd04c2 100644
--- a/R/family.binomial.R
+++ b/R/family.binomial.R
@@ -13,7 +13,7 @@
 
 
 
-process.binomial2.data.vgam <- expression({
+process.binomial2.data.VGAM <- expression({
 
 
 
@@ -76,7 +76,7 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
  betabinomial <- function(lmu = "logit",
                           lrho = "logit",
                           irho = NULL,
-                          imethod = 1, shrinkage.init = 0.95,
+                          imethod = 1, ishrinkage = 0.95,
                           nsimEIM = NULL, zero = 2) {
   lmu <- as.list(substitute(lmu))
   emu <- link2list(lmu)
@@ -93,10 +93,10 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
      imethod > 4)
     stop("argument 'imethod' must be 1, 2, 3 or 4")
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-      shrinkage.init < 0 ||
-      shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+      ishrinkage < 0 ||
+      ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
   if (!is.null(nsimEIM)) {
     if (!is.Numeric(nsimEIM, length.arg = 1,
                     integer.valued = TRUE))
@@ -113,7 +113,7 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
             "Mean:       mu", "\n",
             "Variance:   mu*(1-mu)*(1+(w-1)*rho)/w"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (!all(w == 1))
@@ -154,7 +154,7 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
       } else if ( .imethod == 1) {
         rep(weighted.mean(y, w), len = n)
       } else if ( .imethod == 2) {
-        .sinit * weighted.mean(y, w) + (1 - .sinit) * y
+        .ishrinkage * weighted.mean(y, w) + (1 - .ishrinkage ) * y
       } else if ( .imethod == 3) {
         y.matrix <- cbind(y)
         mat.temp <- matrix(colMeans(y.matrix), nrow(y.matrix),
@@ -163,13 +163,13 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
       } else {
         mustart
       }
-      try.this <- getMaxMin(rho.grid, objfun = betabinomial.Loglikfun,
-                            y = y,  x = x, w = w,
-                            extraargs = list(
-                            ycounts = ycounts,
-                            nvec = if (is.numeric(extra$orig.w))
-                                   round(w / extra$orig.w) else round(w),
-                            mustart = mustart.use))
+      try.this <- grid.search(rho.grid, objfun = betabinomial.Loglikfun,
+                              y = y,  x = x, w = w,
+                              extraargs = list(
+                              ycounts = ycounts,
+                              nvec = if (is.numeric(extra$orig.w))
+                                     round(w / extra$orig.w) else round(w),
+                              mustart = mustart.use))
       init.rho <- if (is.Numeric( .irho ))
                     rep( .irho , length = n) else
                     rep(try.this, length = n)
@@ -180,7 +180,7 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
     }
   }), list( .lmu = lmu, .lrho = lrho,
             .emu = emu, .erho = erho,
-            .imethod = imethod, .sinit = shrinkage.init,
+            .imethod = imethod, .ishrinkage = ishrinkage,
             .nsimEIM = nsimEIM, .irho = irho ))),
   linkinv = eval(substitute(function(eta, extra = NULL)
     eta2theta(eta[, 1], .lmu , earg = .emu ), 
@@ -512,18 +512,18 @@ rbinom2.or <-
             namesof("oratio", loratio, earg = eoratio)),
   constraints = eval(substitute(expression({
     cm.intercept.default <- diag(3)
-    constraints <- cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x = x,
+    constraints <- cm.VGAM(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x = x,
                            bool = .exchangeable ,
                            constraints = constraints,
                            apply.int = TRUE,
                            cm.default           = cm.intercept.default,
                            cm.intercept.default = cm.intercept.default)
-      constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
+      constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .exchangeable = exchangeable, .zero = zero ))),
   deviance = Deviance.categorical.data.vgam,
   initialize = eval(substitute(expression({
     mustart.orig <- mustart
-    eval(process.binomial2.data.vgam)
+    eval(process.binomial2.data.VGAM)
     if (length(mustart.orig))
       mustart <- mustart.orig  # Retain it if inputted
 
@@ -832,11 +832,11 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
             namesof("mu2", lmu12, earg = emu12), ", ",
             namesof("rho", lrho,  earg = erho)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x = x,
+    constraints <- cm.VGAM(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x = x,
                            bool = .exchangeable ,
                            constraints = constraints,
                            apply.int = TRUE)
-    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .exchangeable = exchangeable, .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -848,7 +848,7 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
 
   initialize = eval(substitute(expression({
     mustart.orig <- mustart
-    eval(process.binomial2.data.vgam)
+    eval(process.binomial2.data.VGAM)
 
     if (length(mustart.orig))
       mustart <- mustart.orig  # Retain it if inputted
@@ -923,14 +923,13 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
                        log = TRUE, dochecking = FALSE))
         }
         rho.grid <- .grho # seq(-0.95, 0.95, len = 31)
-        try.this <- getMaxMin(rho.grid, objfun = binom2.rho.Loglikfun,
-                             y = y, x = x, w = w, extraargs = list(
-                             orig.w = extra$orig.w,
-                             ycounts = ycounts,
-                             initmu1 = mu1.init,
-                             initmu2 = mu2.init,
-                             nvec = nvec 
-                             ))
+        try.this <- grid.search(rho.grid, objfun = binom2.rho.Loglikfun,
+                                y = y, x = x, w = w, extraargs = list(
+                                orig.w = extra$orig.w,
+                                ycounts = ycounts,
+                                initmu1 = mu1.init,
+                                initmu2 = mu2.init,
+                                nvec = nvec ))
 
 
       rho.init <- if (is.Numeric( .irho ))
@@ -1137,10 +1136,10 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
 
 
  pbinorm <-
- function(x1, x2,
-                    mean1 = 0, mean2 = 0,
-                    var1 = 1, var2 = 1,
-                    cov12 = 0) {
+ function(q1, q2,
+          mean1 = 0, mean2 = 0,
+          var1 = 1, var2 = 1,
+          cov12 = 0) {
 
 
 
@@ -1148,10 +1147,10 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
   sd2 <- sqrt(var2)
   rho <- cov12 / (sd1 * sd2)
 
-  if (any(is.na(x1)    | is.na(x2)    |
+  if (any(is.na(q1)    | is.na(q2)    |
           is.na(sd1)   | is.na(sd2)   |
           is.na(mean1) | is.na(mean2) | is.na(rho)))
-    stop("no NAs allowed in arguments or variables 'x1', 'x2', 'mean1', ",
+    stop("no NAs allowed in arguments or variables 'q1', 'q2', 'mean1', ",
          "'mean2', 'sd1', 'sd2', 'cov12'")
   if (min(rho) < -1 || max(rho) > +1)
     stop("correlation 'rho' is out of range")
@@ -1162,29 +1161,44 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
     warning("the call to pnorm2() seems based on the old version ",
             "of the arguments")
 
-  LLL <- max(length(x1), length(x2),
+  LLL <- max(length(q1), length(q2),
              length(mean1), length(mean2),
              length(sd1), length(sd2),
              length(rho))
-  if (length(x1)    != LLL) x1    <- rep(x1,     len = LLL)
-  if (length(x2)    != LLL) x2    <- rep(x2,     len = LLL)
+  if (length(q1)    != LLL) q1    <- rep(q1,     len = LLL)
+  if (length(q2)    != LLL) q2    <- rep(q2,     len = LLL)
   if (length(mean1) != LLL) mean1 <- rep(mean1,  len = LLL)
   if (length(mean2) != LLL) mean2 <- rep(mean2,  len = LLL)
   if (length(sd1)   != LLL) sd1   <- rep(sd1,    len = LLL)
   if (length(sd2)   != LLL) sd2   <- rep(sd2,    len = LLL)
   if (length(rho)   != LLL) rho   <- rep(rho,    len = LLL)
 
-  Z1 <- (x1 - mean1) / sd1
-  Z2 <- (x2 - mean2) / sd2
+  Zedd1 <- Z1 <- (q1 - mean1) / sd1
+  Zedd2 <- Z2 <- (q2 - mean2) / sd2
 
-  ans <- Z1
+  is.inf1.neg <- is.infinite(Z1) & Z1 < 0  # -Inf
+  is.inf1.pos <- is.infinite(Z1) & Z1 > 0  # +Inf
+  is.inf2.neg <- is.infinite(Z2) & Z2 < 0  # -Inf
+  is.inf2.pos <- is.infinite(Z2) & Z2 > 0  # +Inf
+  Zedd1[is.inf1.neg] <- 0
+  Zedd1[is.inf1.pos] <- 0
+  Zedd2[is.inf2.neg] <- 0
+  Zedd2[is.inf2.pos] <- 0
+
+  ans <- Zedd1
   singler <- ifelse(length(rho) == 1, 1, 0)
   answer <- .C("pnorm2",
-       ah = as.double(-Z1), ak = as.double(-Z2), r = as.double(rho),
+       ah = as.double(-Zedd1), ak = as.double(-Zedd2), r = as.double(rho),
        size = as.integer(LLL), singler = as.integer(singler),
        ans = as.double(ans))$ans
   if (any(answer < 0.0))
     warning("some negative values returned")
+
+  answer[is.inf1.neg] <- 0
+  answer[is.inf1.pos] <- pnorm(Z2[is.inf1.neg])
+  answer[is.inf2.neg] <- 0
+  answer[is.inf2.pos] <- pnorm(Z1[is.inf2.neg])
+
   answer
 }
 
@@ -1198,7 +1212,7 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
 
 
   warning("decommissioning pnorm2() soon; use ",
-          "dbinorm() instead")
+          "pbinorm() instead")
 
 
   sd1 <- sqrt(var1)
@@ -1561,17 +1575,20 @@ my.dbinom <- function(x,
 
 
 
-betabinomial.ab.control <- function(save.weight = TRUE, ...) {
+betabinomialff.control <- function(save.weight = TRUE, ...) {
     list(save.weight = save.weight)
 }
 
 
 
 
- betabinomial.ab <- function(lshape12 = "loge",
-                             i1 = 1, i2 = NULL, imethod = 1,
-                             shrinkage.init = 0.95, nsimEIM = NULL,
-                             zero = NULL) {
+ betabinomialff <-
+  function(lshape12 = "loge",
+           i1 = 1, i2 = NULL, imethod = 1,
+           ishrinkage = 0.95, nsimEIM = NULL,
+           zero = NULL) {
+
+
 
 
   lshape12 <- as.list(substitute(lshape12))
@@ -1609,7 +1626,7 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
             "Variance: mu * (1-mu) * (1+(w-1)*rho) / w, ",
                        "where rho = 1 / (shape1+shape2+1)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (!all(w == 1))
@@ -1640,8 +1657,8 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
                 } else if ( .imethod == 1) {
                   shape1 * (1 / weighted.mean(y, w)  - 1)
                 } else if ( .imethod == 2) {
-                  temp777 <- .sinit * weighted.mean(y, w) +
-                            (1 - .sinit) * y
+                  temp777 <- .ishrinkage * weighted.mean(y, w) +
+                            (1 - .ishrinkage ) * y
                   shape1 * (1 / temp777 - 1)
                 } else {
                   shape1 * (1 / weighted.mean(mustart.use, w) - 1)
@@ -1658,7 +1675,7 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
     }
   }), list( .lshape12 = lshape12, .earg = earg, .i1 = i1, .i2 = i2,
             .nsimEIM = nsimEIM,
-            .imethod = imethod, .sinit = shrinkage.init ))),
+            .imethod = imethod, .ishrinkage = ishrinkage ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     shape1 <- eta2theta(eta[, 1], .lshape12 , earg = .earg )
     shape2 <- eta2theta(eta[, 2], .lshape12 , earg = .earg )
@@ -1707,7 +1724,7 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
       }
     }
   }, list( .lshape12 = lshape12, .earg = earg ))),
-  vfamily = c("betabinomial.ab"),
+  vfamily = c("betabinomialff"),
 
 
 
@@ -1834,21 +1851,20 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
 
 
 
-
   new("vglmff",
   blurb = c("Beta-geometric distribution\n",
             "Links:    ",
             namesof("prob",  lprob,  earg = eprob), ", ",
             namesof("shape", lshape, earg = eshape)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     eval(geometric()@initialize)
 
     predictors.names <-
-         c(namesof("prob",  .lprob ,  earg = .eprob,  tag = FALSE),
-           namesof("shape", .lshape , earg = .eshape, short = FALSE))
+         c(namesof("prob",  .lprob  , earg = .eprob  , tag = FALSE),
+           namesof("shape", .lshape , earg = .eshape , tag = FALSE))
 
     if (length( .iprob ))
       prob.init <- rep( .iprob , len = n)
@@ -1943,8 +1959,9 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
   deriv = eval(substitute(expression({
     prob  <- eta2theta(eta[, 1], .lprob ,  earg = .eprob )
     shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
-    shape1 <- prob / shape; shape2 <- (1 - prob) / shape;
-    dprob.deta  <- dtheta.deta(prob,  .lprob ,  earg = .eprob )
+    shape1 <-      prob  / shape
+    shape2 <- (1 - prob) / shape
+    dprob.deta  <- dtheta.deta(prob , .lprob  , earg = .eprob  )
     dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
     dl.dprob <- 1 / prob
     dl.dshape <- 0 * y
@@ -1998,10 +2015,10 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
 
 
 
-seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
-                         iprob1 = NULL,    iprob2 = NULL,
-                         parallel = FALSE,  # apply.parint = TRUE,
-                         zero = NULL) {
+ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
+                          iprob1 = NULL,    iprob2 = NULL,
+                          parallel = FALSE,  # apply.parint = TRUE,
+                          zero = NULL) {
   apply.parint <- TRUE
 
   lprob1 <- as.list(substitute(lprob1))
@@ -2030,11 +2047,11 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
             namesof("prob1", lprob1, earg = eprob1), ", ",
             namesof("prob2", lprob2, earg = eprob2)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints = constraints,
                            apply.int = .apply.parint )
-    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .parallel = parallel,
             .apply.parint = apply.parint,
             .zero = zero ))),
@@ -2209,15 +2226,15 @@ seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
             namesof("phi12",  lphi12,  earg = ephi12), ", ",
             namesof("oratio", loratio, earg = eoratio)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
     }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
-    eval(process.binomial2.data.vgam)
+    eval(process.binomial2.data.VGAM)
 
     predictors.names <- c(
-             namesof("mu12",   .lmu12 ,   earg = .emu12 ,   short = TRUE), 
-             namesof("phi12",  .lphi12,  earg = .ephi12,  short = TRUE),
-             namesof("oratio", .loratio, earg = .eoratio, short = TRUE))
+             namesof("mu12",   .lmu12   , earg = .emu12   , tag = FALSE),
+             namesof("phi12",  .lphi12  , earg = .ephi12  , tag = FALSE),
+             namesof("oratio", .loratio , earg = .eoratio , tag = FALSE))
 
     propY1.eq.0 <- weighted.mean(y[,'00'], w) + weighted.mean(y[,'01'], w)
     propY2.eq.0 <- weighted.mean(y[,'00'], w) + weighted.mean(y[,'10'], w)
@@ -2390,7 +2407,7 @@ if (FALSE)
             namesof("rhopos", lrhopos, earg = erhopos), ", ",
             namesof("rhoneg", lrhoneg, earg = erhoneg)),
     initialize = eval(substitute(expression({
-        eval(process.binomial2.data.vgam)
+        eval(process.binomial2.data.VGAM)
 
 
 
@@ -2563,14 +2580,14 @@ if (FALSE)
             namesof("mu1", lmu12, earg = emu12), ", ",
             namesof("mu2", lmu12, earg = emu12)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(c(1, 1), 2, 1), x = x,
+    constraints <- cm.VGAM(matrix(c(1, 1), 2, 1), x = x,
                            bool = .exchangeable ,
                            constraints = constraints,
                            apply.int = TRUE)
   }), list( .exchangeable = exchangeable ))),
   deviance = Deviance.categorical.data.vgam,
   initialize = eval(substitute(expression({
-    eval(process.binomial2.data.vgam)
+    eval(process.binomial2.data.VGAM)
     predictors.names <- c(
                   namesof("mu1", .lmu12 , earg = .emu12 , short = TRUE),
                   namesof("mu2", .lmu12 , earg = .emu12 , short = TRUE))
@@ -2756,11 +2773,11 @@ if (FALSE)
             namesof("mu2", lmu12, earg = emu12), ", ",
             namesof("rho", l.rho, earg = e.rho)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x = x,
+    constraints <- cm.VGAM(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x = x,
                            bool = .exchangeable ,
                            constraints = constraints,
                            apply.int = TRUE)
-    constraints <- cm.zero.vgam(constraints, x = x, .zero , M = M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .exchangeable = exchangeable, .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -2880,13 +2897,12 @@ if (FALSE)
           retval
         }
         rho.grid <- .grho  # seq(-0.95, 0.95, len = 31)
-        try.this <- getMaxMin(rho.grid, objfun = binom2.rho.ss.Loglikfun,
-                              y = y, x = x, w = w, extraargs = list(
-                              ymat2col = extra$ymat2col,
-                              initmu1  = mu1.init,
-                              initmu2  = mu2.init,
-                              nvec = nvec 
-                              ))
+        try.this <- grid.search(rho.grid, objfun = binom2.rho.ss.Loglikfun,
+                                y = y, x = x, w = w, extraargs = list(
+                                ymat2col = extra$ymat2col,
+                                initmu1  = mu1.init,
+                                initmu2  = mu2.init,
+                                nvec = nvec ))
 
       rho.init <- if (is.Numeric( .irho ))
                    rep( .irho , len = n) else {
diff --git a/R/family.bivariate.R b/R/family.bivariate.R
index 8045c8e..b246b17 100644
--- a/R/family.bivariate.R
+++ b/R/family.bivariate.R
@@ -13,26 +13,26 @@
 
 
 
-dbiclaytoncop <- function(x1, x2, alpha = 0, log = FALSE){
+dbiclaytoncop <- function(x1, x2, apar = 0, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
-  A <- x1^(-alpha) + x2^(-alpha) - 1
-  logdensity <- log1p(alpha) -
-                (1 + alpha) * (log(x1) + log(x2)) - 
-                (2 + 1 / alpha) * log(abs(A))  # Avoid warning
+  A <- x1^(-apar) + x2^(-apar) - 1
+  logdensity <- log1p(apar) -
+                (1 + apar) * (log(x1) + log(x2)) - 
+                (2 + 1 / apar) * log(abs(A))  # Avoid warning
 
   out.square <- (x1 < 0) | (x1 > 1) | (x2 < 0) | (x2 > 1)
   logdensity[out.square] <- log(0.0)
 
 
-  index0 <- (rep(alpha, length = length(A)) < sqrt(.Machine$double.eps))
+  index0 <- (rep(apar, length = length(A)) < sqrt(.Machine$double.eps))
   if (any(index0))
     logdensity[index0] <- log(1.0)
 
 
-  index1 <- (rep(alpha, length = length(A)) < 0.0) | (A < 0.0)
+  index1 <- (rep(apar, length = length(A)) < 0.0) | (A < 0.0)
   if (any(index1))
     logdensity[index1] <- NaN
 
@@ -41,25 +41,23 @@ dbiclaytoncop <- function(x1, x2, alpha = 0, log = FALSE){
 
 
 
-
-
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
 
-rbiclaytoncop <- function(n, alpha = 0) {
-  if (any(alpha < 0))
-    stop("argument 'alpha' must be greater or equal to 0")
+rbiclaytoncop <- function(n, apar = 0) {
+  if (any(apar < 0))
+    stop("argument 'apar' must be greater or equal to 0")
 
   u1 <- runif(n = n)
   v2 <- runif(n = n)
 
-  u2 <- (u1^(-alpha) *
-        (v2^(-alpha / (1 + alpha)) - 1) + 1)^(-1 / alpha)
+  u2 <- (u1^(-apar) *
+        (v2^(-apar / (1 + apar)) - 1) + 1)^(-1 / apar)
 
 
-  index0 <- (rep(alpha, length = length(u1)) < sqrt(.Machine$double.eps))
+  index0 <- (rep(apar, length = length(u1)) < sqrt(.Machine$double.eps))
   if (any(index0))
     u2[index0] <- runif(sum(index0))
 
@@ -68,8 +66,8 @@ rbiclaytoncop <- function(n, alpha = 0) {
 
 
 
- biclaytoncop <- function(lalpha    = "loge",
-                          ialpha    = NULL,
+ biclaytoncop <- function(lapar    = "loge",
+                          iapar    = NULL,
                           imethod   = 1,
                           parallel  = FALSE,
                           zero = NULL) {
@@ -77,13 +75,13 @@ rbiclaytoncop <- function(n, alpha = 0) {
   apply.parint <- TRUE
 
 
-  lalpha <- as.list(substitute(lalpha))
-  ealpha <- link2list(lalpha)
-  lalpha <- attr(ealpha, "function.name")
+  lapar <- as.list(substitute(lapar))
+  eapar <- link2list(lapar)
+  lapar <- attr(eapar, "function.name")
 
 
-  if (length(ialpha) && any(ialpha <= 0))
-    stop("argument 'ialpha' must have values in (0, Inf)")
+  if (length(iapar) && any(iapar <= 0))
+    stop("argument 'iapar' must have values in (0, Inf)")
 
 
 
@@ -93,18 +91,18 @@ rbiclaytoncop <- function(n, alpha = 0) {
 
   new("vglmff",
   blurb = c(" bivariate clayton copula distribution)\n","Links:    ",
-                namesof("alpha", lalpha, earg = ealpha)),
+                namesof("apar", lapar, earg = eapar)),
 
   constraints = eval(substitute(expression({
-        constraints <- cm.vgam(matrix(1, M, 1), x = x,
-                               bool = .parallel , 
-                               constraints = constraints,
-                               apply.int = .apply.parint )
-
-        dotzero <- .zero
-        M1 <- 1
-        Yusual <- 2
-        eval(negzero.expression)
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
+                           bool = .parallel , 
+                           constraints = constraints,
+                           apply.int = .apply.parint )
+
+    dotzero <- .zero
+    M1 <- 1
+    Yusual <- 2
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero,
             .apply.parint = apply.parint,
             .parallel = parallel ))),
@@ -143,10 +141,10 @@ rbiclaytoncop <- function(n, alpha = 0) {
     extra$M1 <- M1
     extra$Yusual <- Yusual
     M <- M1 * (ncoly / Yusual)
-    mynames1 <- paste("alpha", if (M / M1 > 1) 1:(M / M1) else "",
+    mynames1 <- paste("apar", if (M / M1 > 1) 1:(M / M1) else "",
                       sep = "")
-    predictors.names <- c(
-      namesof(mynames1, .lalpha , earg = .ealpha , short = TRUE))
+    predictors.names <-
+      namesof(mynames1, .lapar , earg = .eapar , short = TRUE)
 
 
     extra$dimnamesy1 <- dimnames(y)[[1]]
@@ -155,16 +153,15 @@ rbiclaytoncop <- function(n, alpha = 0) {
     
     if (!length(etastart)) {
       
-      alpha.init <- matrix(if (length( .ialpha )) .ialpha else 0 + NA,
-                           n, M / M1, byrow = TRUE)
+      apar.init <- matrix(if (length( .iapar )) .iapar else 0 + NA,
+                          n, M / M1, byrow = TRUE)
 
-      if (!length( .ialpha ))
+      if (!length( .iapar ))
         for (spp. in 1:(M / M1)) {
           ymatj <- y[, (Yusual * spp. - 1):(Yusual * spp.)]
 
-              
-              
-          alpha.init0 <- if ( .imethod == 1) {
+
+          apar.init0 <- if ( .imethod == 1) {
             k.tau <- kendall.tau(ymatj[, 1], ymatj[, 2], exact = FALSE,
                                  max.n = 500)
 
@@ -178,44 +175,38 @@ rbiclaytoncop <- function(n, alpha = 0) {
             rhobit(pearson.rho)
           }
 
-
-
-
-          if (any(is.na(alpha.init[, spp.])))
-            alpha.init[, spp.] <- alpha.init0
+          if (any(is.na(apar.init[, spp.])))
+            apar.init[, spp.] <- apar.init0
         }
           
-      etastart <- theta2eta(alpha.init, .lalpha , earg = .ealpha )
+      etastart <- theta2eta(apar.init, .lapar , earg = .eapar )
     }
   }), list( .imethod = imethod,
-                .lalpha = lalpha,
-                .ealpha = ealpha,
-                .ialpha = ialpha ))),
+            .lapar = lapar,
+            .eapar = eapar,
+            .iapar = iapar ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-
     eta <- as.matrix(eta)
     fv.matrix <- matrix(0.5, nrow(eta), extra$ncoly)
-        
-        
+
     if (length(extra$dimnamesy2))
       dimnames(fv.matrix) <- list(extra$dimnamesy1,
                                   extra$dimnamesy2)
     fv.matrix
-  }  , list( .lalpha = lalpha,
-             .ealpha = ealpha ))),
+  }  , list( .lapar = lapar,
+             .eapar = eapar ))),
 
   last = eval(substitute(expression({
-        
     M1 <- extra$M1
     Yusual <- extra$Yusual
-    misc$link <- rep( .lalpha , length = M)
+    misc$link <- rep( .lapar , length = M)
     temp.names <- mynames1
     names(misc$link) <- temp.names
     
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
     for (ii in 1:M) {
-      misc$earg[[ii]] <- .ealpha
+      misc$earg[[ii]] <- .eapar
     }
 
     misc$M1 <- M1
@@ -225,16 +216,15 @@ rbiclaytoncop <- function(n, alpha = 0) {
     misc$parallel  <- .parallel
     misc$apply.parint <- .apply.parint
     misc$multipleResponses <- TRUE
-
   }) , list( .imethod = imethod,
              .parallel = parallel, .apply.parint = apply.parint,
-             .lalpha = lalpha,
-             .ealpha = ealpha ))),
+             .lapar = lapar,
+             .eapar = eapar ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    Alpha <- eta2theta(eta, .lalpha , earg = .ealpha )
+    Alpha <- eta2theta(eta, .lapar , earg = .eapar )
 
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
@@ -243,39 +233,34 @@ rbiclaytoncop <- function(n, alpha = 0) {
       ll.elts <-
         c(w) * dbiclaytoncop(x1  = c(y[, c(TRUE, FALSE)]),
                              x2  = c(y[, c(FALSE, TRUE)]),
-                             alpha = c(Alpha), log = TRUE)
+                             apar = c(Alpha), log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  } , list( .lalpha = lalpha,
-            .ealpha = ealpha,
+  } , list( .lapar = lapar,
+            .eapar = eapar,
             .imethod = imethod ))),
   vfamily = c("biclaytoncop"),
 
-
-
   simslot = eval(substitute(
   function(object, nsim) {
-
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
     if (any(pwts != 1)) 
       warning("ignoring prior weights")
     eta <- predict(object)
-    Alpha <- eta2theta(eta, .lalpha , earg = .ealpha )
+    Alpha <- eta2theta(eta, .lapar , earg = .eapar )
     rbiclaytoncop(nsim * length(Alpha),
-                  alpha = c(Alpha))
-  }  , list( .lalpha = lalpha,
-             .ealpha = ealpha ))),
-
-
+                  apar = c(Alpha))
+  }  , list( .lapar = lapar,
+             .eapar = eapar ))),
 
 
   deriv = eval(substitute(expression({
-    Alpha <- eta2theta(eta, .lalpha , earg = .ealpha )
+    Alpha <- eta2theta(eta, .lapar , earg = .eapar )
     Yindex1 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual)) - 1
     Yindex2 <- extra$Yusual * (1:(extra$ncoly/extra$Yusual))
 
@@ -284,42 +269,39 @@ rbiclaytoncop <- function(n, alpha = 0) {
 
     
     AA <- y[, Yindex1]^(-Alpha) + y[, Yindex2]^(-Alpha) - 1
-    dAA.dalpha <- -y[, Yindex1]^(-Alpha) * log(y[, Yindex1]) -
+    dAA.dapar <- -y[, Yindex1]^(-Alpha) * log(y[, Yindex1]) -
                    y[, Yindex2]^(-Alpha) * log(y[, Yindex2])
-    dl.dalpha <- 1 / (1 + Alpha) - log(y[, Yindex1] * y[, Yindex2]) -
-                 dAA.dalpha / AA * (2 + 1 / Alpha ) + log(AA) / Alpha^2
+    dl.dapar <- 1 / (1 + Alpha) - log(y[, Yindex1] * y[, Yindex2]) -
+                 dAA.dapar / AA * (2 + 1 / Alpha ) + log(AA) / Alpha^2
    
 
 
-    dalpha.deta <- dtheta.deta(Alpha, .lalpha , earg = .ealpha )
+    dapar.deta <- dtheta.deta(Alpha, .lapar , earg = .eapar )
 
-    dl.deta <- c(w) * cbind(dl.dalpha) * dalpha.deta
+    dl.deta <- c(w) * cbind(dl.dapar) * dapar.deta
     dl.deta
-  }), list( .lalpha = lalpha,
-            .ealpha = ealpha,
+  }), list( .lapar = lapar,
+            .eapar = eapar,
             .imethod = imethod ))),
 
   weight = eval(substitute(expression({
-
-
-    par <- Alpha +1 #20130808
+    par <- Alpha + 1  # 20130808
     denom1 <- (3 * par -2) * (2 * par - 1)
     denom2 <- 2 * (par - 1)
-    v1 <- trigamma(1 / (denom2))
-    v2 <- trigamma(par / (denom2))
-    v3 <- trigamma((2 * par - 1) / (denom2))
-    Rho. <- 1 / denom1 * (1 + par / (denom2) * (v1 - v2) +
-            1 / (denom2) * (v2 - v3))
+    v1 <- trigamma(1 / denom2)
+    v2 <- trigamma(par / denom2)
+    v3 <- trigamma((2 * par - 1) / denom2)
+    Rho. <- (1 + par  * (v1 - v2) / denom2 +
+                        (v2 - v3) / denom2) / denom1
     
     out <- 1 / par^2 + 2 / (par * (par - 1) * (2 * par - 1)) +
            4 * par / (3 * par - 2) - 2 * (2 * par - 1) * Rho. / (par - 1)
-    ned2l.dalpha  <- out
-
+    ned2l.dapar  <- out
 
-    wz <- ned2l.dalpha * dalpha.deta^2
- c(w) * wz
-  }), list( .lalpha = lalpha,
-            .ealpha = ealpha,
+    wz <- ned2l.dapar * dapar.deta^2
+    c(w) * wz
+  }), list( .lapar = lapar,
+            .eapar = eapar,
             .imethod = imethod ))))
 }
 
@@ -430,7 +412,7 @@ bistudent.deriv.dof <-  function(u, v, nu, rho) {
             namesof("rho", lrho, earg = erho)),
 
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel , 
                            constraints = constraints,
                            apply.int = .apply.parint )
@@ -438,7 +420,7 @@ bistudent.deriv.dof <-  function(u, v, nu, rho) {
     dotzero <- .zero
     M1 <- 2
     Yusual <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero,
             .apply.parint = apply.parint,
             .parallel = parallel ))),
@@ -736,17 +718,21 @@ pbinormcop <- function(q1, q2, rho = 0) {
       any(abs(rho) >= 1))
     stop("bad input for argument 'rho'")
 
-  pnorm2(x1 = qnorm(q1),
-         x2 = qnorm(q2),
-         cov12 = rho)
+  pbinorm(q1 = qnorm(q1), q2 = qnorm(q2), cov12 = rho)
 }
 
 
-rbinormcop <- function(n, rho = 0) {
+rbinormcop <- function(n, rho = 0  #, inverse = FALSE
+                      ) {
 
+  inverse <- FALSE
   ymat <- rbinorm(n = n, cov12 = rho)
-  cbind(y1 = pnorm(ymat[, 1]),
-        y2 = pnorm(ymat[, 2]))
+  if (inverse) {
+    ymat
+  } else {
+    cbind(y1 = pnorm(ymat[, 1]),
+          y2 = pnorm(ymat[, 2]))
+  }
 }
 
 
@@ -786,7 +772,7 @@ rbinormcop <- function(n, rho = 0) {
             namesof("rho", lrho, earg = erho)),
 
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel , 
                            constraints = constraints,
                            apply.int = .apply.parint )
@@ -794,7 +780,7 @@ rbinormcop <- function(n, rho = 0) {
     dotzero <- .zero
     M1 <- 1
     Yusual <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero,
             .apply.parint = apply.parint,
             .parallel = parallel ))),
@@ -997,12 +983,12 @@ rbinormcop <- function(n, rho = 0) {
 
 
 
-bilogistic4.control <- function(save.weight = TRUE, ...) {
+bilogistic.control <- function(save.weight = TRUE, ...) {
   list(save.weight = save.weight)
 }
 
 
- bilogistic4 <- function(llocation = "identitylink",
+ bilogistic  <- function(llocation = "identitylink",
                          lscale = "loge",
                          iloc1 = NULL, iscale1 = NULL,
                          iloc2 = NULL, iscale2 = NULL,
@@ -1033,7 +1019,7 @@ bilogistic4.control <- function(save.weight = TRUE, ...) {
             "\n", "\n",
             "Means:     location1, location2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -1139,7 +1125,7 @@ bilogistic4.control <- function(save.weight = TRUE, ...) {
     }
   }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
-  vfamily = c("bilogistic4"),
+  vfamily = c("bilogistic"),
 
 
   simslot = eval(substitute(
@@ -1154,9 +1140,9 @@ bilogistic4.control <- function(save.weight = TRUE, ...) {
     Scale1 <- eta2theta(eta[, 2], .lscale , .escale )
     locat2 <- eta2theta(eta[, 3], .llocat , .elocat )
     Scale2 <- eta2theta(eta[, 4], .lscale , .escale )
-    rbilogis4(nsim * length(locat1),
-              loc1 = locat1, scale1 = Scale1,
-              loc2 = locat2, scale2 = Scale2)
+    rbilogis(nsim * length(locat1),
+             loc1 = locat1, scale1 = Scale1,
+             loc2 = locat2, scale2 = Scale2)
   }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
 
@@ -1186,11 +1172,11 @@ bilogistic4.control <- function(save.weight = TRUE, ...) {
     dscale2.deta <- dtheta.deta(Scale2, .lscale , .escale )
 
     if (iter == 1) {
-        etanew <- eta
+      etanew <- eta
     } else {
-        derivold <- derivnew
-        etaold <- etanew
-        etanew <- eta
+      derivold <- derivnew
+      etaold <- etanew
+      etanew <- eta
     }
     derivnew <- c(w) * cbind(dl.dlocat1 * dlocat1.deta,
                              dl.dscale1 * dscale1.deta,
@@ -1219,8 +1205,8 @@ bilogistic4.control <- function(save.weight = TRUE, ...) {
 
 
 
-dbilogis4 <- function(x1, x2, loc1 = 0, scale1 = 1,
-                      loc2 = 0, scale2 = 1, log = FALSE) {
+dbilogis <- function(x1, x2, loc1 = 0, scale1 = 1,
+                     loc2 = 0, scale2 = 1, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -1228,25 +1214,31 @@ dbilogis4 <- function(x1, x2, loc1 = 0, scale1 = 1,
 
 
 
-    L <- max(length(x1), length(x2),
-             length(loc1), length(loc2),
-             length(scale1), length(scale2))
-    if (length(x1    ) != L) x1     <- rep(x1,     length.out = L)
-    if (length(x2    ) != L) x2     <- rep(x2,     length.out = L)
-    if (length(loc1  ) != L) loc1   <- rep(loc1,   length.out = L)
-    if (length(loc2  ) != L) loc2   <- rep(loc2,   length.out = L)
-    if (length(scale1) != L) scale1 <- rep(scale1, length.out = L)
-    if (length(scale2) != L) scale2 <- rep(scale2, length.out = L)
-    zedd1 <- (-(x1 - loc1) / scale1)
-    zedd2 <- (-(x2 - loc2) / scale2)
-    logdensity <- log(2) + log(zedd1) + log(zedd2) - log(scale1) - 
-                  log(scale1) - 3 * log1p(exp(zedd1) + exp(zedd2))
-    if (log.arg) logdensity else exp(logdensity)
+  L <- max(length(x1), length(x2),
+           length(loc1), length(loc2),
+           length(scale1), length(scale2))
+  if (length(x1    ) != L) x1     <- rep(x1,     length.out = L)
+  if (length(x2    ) != L) x2     <- rep(x2,     length.out = L)
+  if (length(loc1  ) != L) loc1   <- rep(loc1,   length.out = L)
+  if (length(loc2  ) != L) loc2   <- rep(loc2,   length.out = L)
+  if (length(scale1) != L) scale1 <- rep(scale1, length.out = L)
+  if (length(scale2) != L) scale2 <- rep(scale2, length.out = L)
+  zedd1 <- (x1 - loc1) / scale1
+  zedd2 <- (x2 - loc2) / scale2
+
+
+
+
+  logdensity <- log(2) - zedd1 - zedd2 - log(scale1) - 
+                log(scale1) - 3 * log1p(exp(-zedd1) + exp(-zedd2))
+
+
+  if (log.arg) logdensity else exp(logdensity)
 }
 
 
 
-pbilogis4 <-
+pbilogis <-
   function(q1, q2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
 
   ans <- 1 / (1 + exp(-(q1-loc1)/scale1) + exp(-(q2-loc2)/scale2))
@@ -1257,7 +1249,7 @@ pbilogis4 <-
 
 
 
-rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
+rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
 
 
   y1 <- rlogis(n = n, location = loc1, scale = scale1)
@@ -1305,11 +1297,11 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
             namesof("b",  lb,  earg = eb ), ", ",
             namesof("bp", lbp, earg = ebp)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(c(1, 1,0,0, 0,0, 1, 1), M, 2), x = x,
+    constraints <- cm.VGAM(matrix(c(1, 1,0,0, 0,0, 1, 1), M, 2), x = x,
                            bool = .independent ,
                            constraints = constraints,
                            apply.int = TRUE)
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list(.independent = independent, .zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -1471,7 +1463,7 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
                            ishape1 = NULL,
                            ishape2 = NULL,
                            imethod = 1,
-                           zero = 1) {
+                           zero = 2:3) {
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
   lscale <- attr(escale, "function.name")
@@ -1505,11 +1497,11 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
   new("vglmff",
   blurb = c("Bivariate gamma: McKay's distribution\n",
             "Links:    ",
-            namesof("scale",  lscale), ", ",
-            namesof("shape1", lshape1), ", ",
-            namesof("shape2", lshape2)),
+            namesof("scale",  lscale,  earg = escale ), ", ",
+            namesof("shape1", lshape1, earg = eshape1), ", ",
+            namesof("shape2", lshape2, earg = eshape2)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -1557,9 +1549,9 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
       a.grid <- if (length( .iscale )) c( .iscale ) else
          c(0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100)
       extraargs <- list(momentsY = momentsY)
-      ainit <- getMaxMin(a.grid, objfun = mcg2.loglik,
-                         y = y, x = x, w = w, maximize = TRUE,
-                         extraargs = extraargs)
+      ainit <- grid.search(a.grid, objfun = mcg2.loglik,
+                           y = y, x = x, w = w, maximize = TRUE,
+                           extraargs = extraargs)
       ainit <- rep(if (is.Numeric( .iscale )) .iscale else ainit,
                    length.out = n)
       pinit <- (1/ainit) * abs(momentsY[1]) + 0.01
@@ -1677,54 +1669,55 @@ rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
 
 
 
-rbifrankcop <- function(n, alpha) {
+rbifrankcop <- function(n, apar) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
                            length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
-  if (!is.Numeric(alpha, positive = TRUE))
-    stop("bad input for argument 'alpha'")
-  if (length(alpha) != use.n) alpha <- rep(alpha, length.out = use.n)
+  if (!is.Numeric(apar, positive = TRUE))
+    stop("bad input for argument 'apar'")
+  if (length(apar) != use.n)
+    apar <- rep(apar, length.out = use.n)
   U <- runif(use.n)
   V <- runif(use.n)
 
-  T <- alpha^U + (alpha - alpha^U) * V
+  T <- apar^U + (apar - apar^U) * V
   X <- U
-  index <- (abs(alpha - 1) < .Machine$double.eps)
+  index <- (abs(apar - 1) < .Machine$double.eps)
   Y <- U
   if (any(!index))
     Y[!index] <- logb(T[!index] / (T[!index] +
-                      (1 - alpha[!index]) * V[!index]),
-                      base = alpha[!index])
+                      (1 - apar[!index]) * V[!index]),
+                      base = apar[!index])
   ans <- matrix(c(X, Y), nrow = use.n, ncol = 2)
   if (any(index)) {
-    ans[index, 1] <- runif(sum(index))  # Uniform density for alpha == 1
+    ans[index, 1] <- runif(sum(index))  # Uniform density for apar == 1
     ans[index, 2] <- runif(sum(index))
   }
   ans
 }
 
 
-pbifrankcop <- function(q1, q2, alpha) {
+pbifrankcop <- function(q1, q2, apar) {
   if (!is.Numeric(q1))                     stop("bad input for 'q1'")
   if (!is.Numeric(q2))                     stop("bad input for 'q2'")
-  if (!is.Numeric(alpha, positive = TRUE)) stop("bad input for 'alpha'")
+  if (!is.Numeric(apar, positive = TRUE)) stop("bad input for 'apar'")
 
-  L <- max(length(q1), length(q2), length(alpha))
-  if (length(alpha) != L) alpha <- rep(alpha, length.out = L)
+  L <- max(length(q1), length(q2), length(apar))
+  if (length(apar) != L) apar <- rep(apar, length.out = L)
   if (length(q1   ) != L) q1    <- rep(q1,    length.out = L)
   if (length(q2   ) != L) q2    <- rep(q2,    length.out = L)
 
   x <- q1; y <- q2
   index <- (x >= 1 & y <  1) | (y >= 1 & x <  1) |
            (x <= 0 | y <= 0) | (x >= 1 & y >= 1) |
-           (abs(alpha - 1) < .Machine$double.eps)
+           (abs(apar - 1) < .Machine$double.eps)
   ans <- as.numeric(index)
   if (any(!index))
-  ans[!index] <- logb(1 + ((alpha[!index])^(x[!index]) - 1)*
-                 ((alpha[!index])^(y[!index]) - 1)/(alpha[!index] - 1), 
-                 base = alpha[!index])
-  ind2 <- (abs(alpha - 1) < .Machine$double.eps)
+  ans[!index] <- logb(1 + ((apar[!index])^(x[!index]) - 1)*
+                 ((apar[!index])^(y[!index]) - 1)/(apar[!index] - 1), 
+                 base = apar[!index])
+  ind2 <- (abs(apar - 1) < .Machine$double.eps)
   ans[ind2] <- x[ind2] * y[ind2]
   ans[x >= 1 & y <  1] <- y[x >= 1 & y < 1]  # P(Y2 < q2) = q2
   ans[y >= 1 & x <  1] <- x[y >= 1 & x < 1]  # P(Y1 < q1) = q1
@@ -1734,7 +1727,24 @@ pbifrankcop <- function(q1, q2, alpha) {
 }
 
 
-dbifrankcop <- function(x1, x2, alpha, log = FALSE) {
+
+
+
+if (FALSE)
+dbifrank <- function(x1, x2, apar, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+  logdens <- (x1+x2)*log(apar) + log(apar-1) + log(log(apar)) -
+             2 * log(apar - 1 + (apar^x1 - 1) * (apar^x2 - 1))
+
+  if (log.arg) logdens else exp(logdens)
+}
+
+
+
+
+dbifrankcop <- function(x1, x2, apar, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -1742,25 +1752,25 @@ dbifrankcop <- function(x1, x2, alpha, log = FALSE) {
 
   if (!is.Numeric(x1))                     stop("bad input for 'x1'")
   if (!is.Numeric(x2))                     stop("bad input for 'x2'")
-  if (!is.Numeric(alpha, positive = TRUE)) stop("bad input for 'alpha'")
+  if (!is.Numeric(apar, positive = TRUE)) stop("bad input for 'apar'")
 
-  L <- max(length(x1), length(x2), length(alpha))
-  if (length(alpha) != L) alpha <- rep(alpha, length.out = L)
+  L <- max(length(x1), length(x2), length(apar))
+  if (length(apar) != L) apar <- rep(apar, length.out = L)
   if (length(x1   ) != L) x1    <- rep(x1,    length.out = L)
   if (length(x2   ) != L) x2    <- rep(x2,    length.out = L)
 
   if (log.arg) {
-    denom <- alpha-1 + (alpha^x1  - 1) * (alpha^x2  - 1)
+    denom <- apar-1 + (apar^x1  - 1) * (apar^x2  - 1)
     denom <- abs(denom)
-    log((alpha - 1) * log(alpha)) + (x1+x2)*log(alpha) - 2 * log(denom)
+    log((apar - 1) * log(apar)) + (x1+x2)*log(apar) - 2 * log(denom)
   } else {
-    temp <- (alpha - 1) + (alpha^x1 - 1) * (alpha^x2 - 1)
-    index <- (abs(alpha - 1) < .Machine$double.eps)
+    temp <- (apar - 1) + (apar^x1 - 1) * (apar^x2 - 1)
+    index <- (abs(apar - 1) < .Machine$double.eps)
     ans <- x1
     if (any(!index))
-      ans[!index] <- (alpha[!index] - 1) * log(alpha[!index]) *
-                     (alpha[!index])^(x1[!index] +
-                                      x2[!index]) / (temp[!index])^2
+      ans[!index] <- (apar[!index] - 1) * log(apar[!index]) *
+                     (apar[!index])^(x1[!index] +
+                                     x2[!index]) / (temp[!index])^2
     ans[x1 <= 0 | x2 <= 0 | x1 >= 1 | x2 >= 1] <- 0
     ans[index] <- 1
     ans
@@ -1855,7 +1865,7 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <- c(w) * dbifrankcop(x1 = y[, 1], x2 = y[, 2],
-                                    alpha = apar, log = TRUE)
+                                    apar = apar, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -1876,7 +1886,7 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
       warning("ignoring prior weights")
     eta <- predict(object)
     apar <- eta2theta(eta, .lapar , earg = .eapar )
-    rbifrankcop(nsim * length(apar), alpha = c(apar))
+    rbifrankcop(nsim * length(apar), apar = c(apar))
   }, list( .lapar = lapar, .eapar = eapar ))),
 
 
@@ -1907,7 +1917,7 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
 
     run.mean <- 0
     for (ii in 1:( .nsimEIM )) {
-      ysim <- rbifrankcop(n, alpha = apar)
+      ysim <- rbifrankcop(n, apar = apar)
         y1 <- ysim[, 1]; y2 <- ysim[, 2];
         eval.de3 <- eval(de3)
         d2l.dthetas2 <-  attr(eval.de3, "hessian")
@@ -1950,7 +1960,8 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
 
 
 
- gammahyp <- function(ltheta = "loge", itheta = NULL, expected = FALSE) {
+ gammahyperbola <-
+  function(ltheta = "loge", itheta = NULL, expected = FALSE) {
 
   ltheta <- as.list(substitute(ltheta))
   etheta <- link2list(ltheta)
@@ -2024,7 +2035,7 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
       }
     }
   }, list( .ltheta = ltheta, .etheta = etheta ))),
-  vfamily = c("gammahyp"),
+  vfamily = c("gammahyperbola"),
   deriv = eval(substitute(expression({
     theta <- eta2theta(eta, .ltheta , .etheta )
     Dl.dtheta <- exp(-theta) * y[, 1] * (1+theta) / theta^2 - y[, 2]
@@ -2049,9 +2060,10 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
 
 
 
- morgenstern <- function(lapar = "rhobit",
-                         iapar = NULL, tola0 = 0.01,
-                         imethod = 1) {
+ bifgmexp <-
+  function(lapar = "rhobit",
+           iapar = NULL, tola0 = 0.01,
+           imethod = 1) {
   lapar <- as.list(substitute(lapar))
   earg  <- link2list(lapar)
   lapar <- attr(earg, "function.name")
@@ -2073,7 +2085,8 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
 
 
   new("vglmff",
-  blurb = c("Morgenstern's bivariate exponential distribution\n",
+  blurb = c("Bivariate Farlie-Gumbel-Morgenstern ",
+            "exponential distribution\n",  # Morgenstern's 
             "Links:    ",
             namesof("apar", lapar, earg = earg )),
   initialize = eval(substitute(expression({
@@ -2146,7 +2159,7 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
       }
     }
   }, list( .lapar = lapar, .earg = earg, .tola0 = tola0 ))),
-  vfamily = c("morgenstern"),
+  vfamily = c("bifgmexp"),  # morgenstern
   deriv = eval(substitute(expression({
     alpha  <- eta2theta(eta, .lapar , earg = .earg )
     alpha[abs(alpha) < .tola0 ] <- .tola0
@@ -2159,19 +2172,19 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
     dalpha.deta <- dtheta.deta(alpha,  .lapar , earg = .earg )
 
     c(w) * cbind(dl.dalpha * dalpha.deta)
-  }), list( .lapar = lapar, .earg = earg, .tola0=tola0 ))),
+  }), list( .lapar = lapar, .earg = earg, .tola0 = tola0 ))),
   weight = eval(substitute(expression({
     d2l.dalpha2 <- dl.dalpha^2
     d2alpha.deta2 <- d2theta.deta2(alpha,  .lapar , earg = .earg )
     wz <- c(w) * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
-    if (TRUE &&
-       intercept.only) {
-        wz <- cbind(wz)
+    if (TRUE  &&
+        intercept.only) {
+      wz <- cbind(wz)
       sumw <- sum(w)
       for (iii in 1:ncol(wz))
-        wz[,iii] <- sum(wz[, iii]) / sumw
+        wz[, iii] <- sum(wz[, iii]) / sumw
       pooled.weight <- TRUE
-      wz <- c(w) * wz   # Put back the weights
+      wz <- c(w) * wz  # Put back the weights
     } else {
       pooled.weight <- FALSE
     }
@@ -2182,71 +2195,71 @@ bifrankcop.control <- function(save.weight = TRUE, ...) {
 
 
 
-rfgm <- function(n, alpha) {
+rbifgmcop <- function(n, apar) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
                            length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
-  if (!is.Numeric(alpha))
-    stop("bad input for argument 'alpha'")
-  if (any(abs(alpha) > 1))
-    stop("argument 'alpha' has values out of range")
+  if (!is.Numeric(apar))
+    stop("bad input for argument 'apar'")
+  if (any(abs(apar) > 1))
+    stop("argument 'apar' has values out of range")
 
   y1 <- V1 <- runif(use.n)
   V2 <- runif(use.n)
   temp <- 2*y1 - 1
-  A <- alpha * temp - 1
-  B <- sqrt(1 - 2 * alpha * temp + (alpha*temp)^2 + 4 * alpha * V2 * temp)
+  A <- apar * temp - 1
+  B <- sqrt(1 - 2 * apar * temp + (apar*temp)^2 + 4 * apar * V2 * temp)
   y2 <- 2 * V2 / (B - A)
   matrix(c(y1, y2), nrow = use.n, ncol = 2)
 }
 
 
 
-dfgm <- function(x1, x2, alpha, log = FALSE) {
+dbifgmcop <- function(x1, x2, apar, log = FALSE) {
   if (!is.logical(log.arg <- log) ||
       length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
-  if (!is.Numeric(alpha))
-    stop("bad input for 'alpha'")
-  if (any(abs(alpha) > 1))
-    stop("'alpha' values out of range")
+  if (!is.Numeric(apar))
+    stop("bad input for 'apar'")
+  if (any(abs(apar) > 1))
+    stop("'apar' values out of range")
   if ( !is.logical( log.arg ) ||
        length( log.arg ) != 1 )
     stop("bad input for argument 'log'")
 
-  L <- max(length(x1), length(x2), length(alpha))
-  if (length(x1)    != L)  x1    <- rep(x1,    length.out = L)
-  if (length(x2)    != L)  x2    <- rep(x2,    length.out = L)
-  if (length(alpha) != L)  alpha <- rep(alpha, length.out = L)
+  L <- max(length(x1), length(x2), length(apar))
+  if (length(x1)    != L)  x1   <- rep(x1,   length.out = L)
+  if (length(x2)    != L)  x2   <- rep(x2,   length.out = L)
+  if (length(apar)  != L)  apar <- rep(apar, length.out = L)
   ans <- 0 * x1
   xnok <- (x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)
   if ( log.arg ) {
-    ans[!xnok] <- log1p(alpha[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok]))
+    ans[!xnok] <- log1p(apar[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok]))
     ans[xnok] <- log(0)
   } else {
-    ans[!xnok] <-   1 + alpha[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok])
+    ans[!xnok] <-   1 + apar[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok])
     ans[xnok] <- 0
     if (any(ans < 0))
-      stop("negative values in the density (alpha out of range)")
+      stop("negative values in the density (apar out of range)")
   }
   ans
 }
 
 
-pfgm <- function(q1, q2, alpha) {
+pbifgmcop <- function(q1, q2, apar) {
   if (!is.Numeric(q1))     stop("bad input for 'q1'")
   if (!is.Numeric(q2))     stop("bad input for 'q2'")
-  if (!is.Numeric(alpha))  stop("bad input for 'alpha'")
-  if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
+  if (!is.Numeric(apar))  stop("bad input for 'apar'")
+  if (any(abs(apar) > 1)) stop("'apar' values out of range")
 
-  L <- max(length(q1), length(q2), length(alpha))
+  L <- max(length(q1), length(q2), length(apar))
   if (length(q1)    != L)     q1 <- rep(q1,    length.out = L)
   if (length(q2)    != L)     q2 <- rep(q2,    length.out = L)
-  if (length(alpha) != L)  alpha <- rep(alpha, length.out = L)
+  if (length(apar) != L)  apar <- rep(apar, length.out = L)
 
   x <- q1
   y <- q2
@@ -2256,7 +2269,7 @@ pfgm <- function(q1, q2, alpha) {
            (x >= 1 & y >= 1)
   ans <- as.numeric(index)
   if (any(!index)) {
-    ans[!index] <-    q1[!index] *   q2[!index] * (1 + alpha[!index] *
+    ans[!index] <-    q1[!index] *   q2[!index] * (1 + apar[!index] *
                    (1-q1[!index])*(1-q2[!index]))
   }
   ans[x >= 1 & y<1] <- y[x >= 1 & y<1]  # P(Y2 < q2) = q2
@@ -2271,8 +2284,8 @@ pfgm <- function(q1, q2, alpha) {
 
 
 
- fgm <- function(lapar = "rhobit", iapar = NULL,
-                 imethod = 1) {
+ bifgmcop <- function(lapar = "rhobit", iapar = NULL,
+                      imethod = 1) {
 
   lapar <- as.list(substitute(lapar))
   earg  <- link2list(lapar)
@@ -2290,7 +2303,7 @@ pfgm <- function(q1, q2, alpha) {
 
 
   new("vglmff",
-  blurb = c("Farlie-Gumbel-Morgenstern distribution\n",
+  blurb = c("Farlie-Gumbel-Morgenstern copula \n",  # distribution
             "Links:    ",
             namesof("apar", lapar, earg = earg )),
   initialize = eval(substitute(expression({
@@ -2350,7 +2363,6 @@ pfgm <- function(q1, q2, alpha) {
   }, list( .lapar = lapar, .earg = earg ))),
   last = eval(substitute(expression({
     misc$link <-    c("apar" = .lapar )
-
     misc$earg <- list("apar" = .earg  )
 
     misc$expected <- FALSE
@@ -2364,8 +2376,8 @@ pfgm <- function(q1, q2, alpha) {
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dfgm(x1 = y[, 1],
-                             x2 = y[, 2], alpha = alpha, log = TRUE)
+      ll.elts <- c(w) * dbifgmcop(x1 = y[, 1],
+                                  x2 = y[, 2], apar = alpha, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -2373,9 +2385,7 @@ pfgm <- function(q1, q2, alpha) {
       }
     }
   }, list( .lapar = lapar, .earg = earg ))),
-  vfamily = c("fgm"),
-
-
+  vfamily = c("bifgmcop"),
 
 
   simslot = eval(substitute(
@@ -2387,13 +2397,11 @@ pfgm <- function(q1, q2, alpha) {
       warning("ignoring prior weights")
     eta <- predict(object)
     alpha <- eta2theta(eta, .lapar , earg = .earg )
-    rfgm(nsim * length(alpha), alpha = c(alpha))
+    rbifgmcop(nsim * length(alpha), apar = c(alpha))
   }, list( .lapar = lapar, .earg = earg ))),
 
 
 
-
-
   deriv = eval(substitute(expression({
     alpha  <- eta2theta(eta, .lapar , earg = .earg )
 
@@ -2423,7 +2431,9 @@ pfgm <- function(q1, q2, alpha) {
 
 
 
- bigumbelI <- function(lapar = "identitylink", iapar = NULL, imethod = 1) {
+
+ bigumbelIexp <-
+  function(lapar = "identitylink", iapar = NULL, imethod = 1) {
 
   lapar <- as.list(substitute(lapar))
   earg  <- link2list(lapar)
@@ -2440,7 +2450,7 @@ pfgm <- function(q1, q2, alpha) {
 
 
   new("vglmff",
-  blurb = c("Gumbel's Type I bivariate distribution\n",
+  blurb = c("Gumbel's Type I bivariate exponential distribution\n",
             "Links:    ",
             namesof("apar", lapar, earg = earg )),
   initialize = eval(substitute(expression({
@@ -2516,7 +2526,7 @@ pfgm <- function(q1, q2, alpha) {
       }
     }
   }, list( .lapar = lapar, .earg = earg ))),
-  vfamily = c("bigumbelI"),
+  vfamily = c("bigumbelIexp"),
   deriv = eval(substitute(expression({
     alpha  <- eta2theta(eta, .lapar , earg = .earg )
     numerator <- (alpha * y[, 1] - 1) * y[, 2] +
@@ -2555,7 +2565,7 @@ pfgm <- function(q1, q2, alpha) {
 
 
 
-pplack <- function(q1, q2, oratio) {
+pbiplackcop <- function(q1, q2, oratio) {
   if (!is.Numeric(q1)) stop("bad input for 'q1'")
   if (!is.Numeric(q2)) stop("bad input for 'q2'")
   if (!is.Numeric(oratio, positive = TRUE)) stop("bad input for 'oratio'")
@@ -2588,7 +2598,7 @@ pplack <- function(q1, q2, oratio) {
 
 
 
-rplack <- function(n, oratio) {
+rbiplackcop <- function(n, oratio) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
                            length.arg = 1, positive = TRUE))
@@ -2607,7 +2617,7 @@ rplack <- function(n, oratio) {
 
 
 
-dplack <- function(x1, x2, oratio, log = FALSE) {
+dbiplackcop <- function(x1, x2, oratio, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -2626,13 +2636,13 @@ dplack <- function(x1, x2, oratio, log = FALSE) {
 
 
 
-plackett.control <- function(save.weight = TRUE, ...) {
+biplackettcop.control <- function(save.weight = TRUE, ...) {
   list(save.weight = save.weight)
 }
 
 
 
- plackett <- function(link = "loge", ioratio = NULL,
+ biplackettcop <- function(link = "loge", ioratio = NULL,
                       imethod = 1, nsimEIM = 200) {
 
   link <- as.list(substitute(link))
@@ -2650,7 +2660,7 @@ plackett.control <- function(save.weight = TRUE, ...) {
 
 
   new("vglmff",
-  blurb = c("Plackett distribution\n",
+  blurb = c("Plackett distribution (bivariate copula)\n",
             "Links:    ",
             namesof("oratio", link, earg = earg )),
   initialize = eval(substitute(expression({
@@ -2724,7 +2734,7 @@ plackett.control <- function(save.weight = TRUE, ...) {
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dplack(x1 = y[, 1], x2 = y[, 2],
+      ll.elts <- c(w) * dbiplackcop(x1 = y[, 1], x2 = y[, 2],
                                oratio = oratio, log = TRUE)
       if (summation) {
         sum(ll.elts)
@@ -2733,7 +2743,7 @@ plackett.control <- function(save.weight = TRUE, ...) {
       }
     }
   }, list( .link = link, .earg = earg ))),
-  vfamily = c("plackett"),
+  vfamily = c("biplackettcop"),
 
 
   simslot = eval(substitute(
@@ -2745,7 +2755,7 @@ plackett.control <- function(save.weight = TRUE, ...) {
       warning("ignoring prior weights")
     eta <- predict(object)
     oratio <- eta2theta(eta, .link , earg = .earg )
-    rplack(nsim * length(oratio), oratio = c(oratio))
+    rbiplackcop(nsim * length(oratio), oratio = c(oratio))
   }, list(  .link = link, .earg = earg ))),
 
 
@@ -2774,7 +2784,7 @@ plackett.control <- function(save.weight = TRUE, ...) {
                     name = "oratio", hessian = FALSE)
     run.var <- 0
     for (ii in 1:( .nsimEIM )) {
-      ysim <- rplack(n, oratio = oratio)
+      ysim <- rbiplackcop(n, oratio = oratio)
       y1sim <- ysim[, 1]
       y2sim <- ysim[, 1]
         eval.sd3 <- eval(sd3)
@@ -2795,92 +2805,99 @@ plackett.control <- function(save.weight = TRUE, ...) {
 
 
 
-damh <- function(x1, x2, alpha, log = FALSE) {
+
+dbiamhcop <- function(x1, x2, apar, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
 
 
-  L <- max(length(x1), length(x2), length(alpha))
-  alpha <- rep(alpha,  length.out = L)
+  L <- max(length(x1), length(x2), length(apar))
+  apar <- rep(apar,  length.out = L)
   x1    <- rep(x1,     length.out = L)
   x2    <- rep(x2,     length.out = L)
-  temp <- 1 - alpha*(1-x1)*(1-x2)
+  temp <- 1 - apar*(1-x1)*(1-x2)
 
   if (log.arg) {
-    ans <- log1p(-alpha+2*alpha*x1*x2/temp) - 2*log(temp)
+    ans <- log1p(-apar+2*apar*x1*x2/temp) - 2*log(temp)
     ans[(x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)] <- log(0)
   } else {
-    ans <- (1-alpha+2*alpha*x1*x2/temp) / (temp^2)
+    ans <- (1-apar+2*apar*x1*x2/temp) / (temp^2)
     ans[(x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)] <- 0
   }
-  ans[abs(alpha) > 1] <- NA
+  ans[abs(apar) > 1] <- NA
   ans
 }
 
 
-pamh <- function(q1, q2, alpha) {
+pbiamhcop <- function(q1, q2, apar) {
   if (!is.Numeric(q1)) stop("bad input for 'q1'")
   if (!is.Numeric(q2)) stop("bad input for 'q2'")
-  if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
+  if (!is.Numeric(apar)) stop("bad input for 'apar'")
 
-  L <- max(length(q1), length(q2), length(alpha))
-  if (length(q1) != L)  q1 <- rep(q1, length.out = L)
-  if (length(q2) != L)  q2 <- rep(q2, length.out = L)
-  if (length(alpha) != L)  alpha <- rep(alpha, length.out = L)
+  L <- max(length(q1), length(q2), length(apar))
+  if (length(q1)    != L)  q1    <- rep(q1,    length.out = L)
+  if (length(q2)    != L)  q2    <- rep(q2,    length.out = L)
+  if (length(apar) != L)  apar <- rep(apar, length.out = L)
 
-  x <- q1; y <- q2
+  x <- q1
+  y <- q2
   index <- (x >= 1 & y < 1) | (y >= 1 & x <  1) |
            (x <= 0 | y<= 0) | (x >= 1 & y >= 1)
   ans <- as.numeric(index)
   if (any(!index)) {
-      ans[!index] <- (q1[!index]*q2[!index]) / (1 -
-                     alpha[!index]*(1-q1[!index])*(1-q2[!index]))
+    ans[!index] <- (q1[!index] * q2[!index]) / (1 -
+                   apar[!index] * (1-q1[!index]) * (1-q2[!index]))
   }
   ans[x >= 1 & y <  1] <- y[x >= 1 & y < 1]  # P(Y2 < q2) = q2
   ans[y >= 1 & x <  1] <- x[y >= 1 & x < 1]  # P(Y1 < q1) = q1
   ans[x <= 0 | y <= 0] <- 0
   ans[x >= 1 & y >= 1] <- 1
-  ans[abs(alpha) > 1] <- NA
+  ans[abs(apar) > 1] <- NA
   ans
 }
 
 
-ramh <- function(n, alpha) {
+rbiamhcop <- function(n, apar) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
                            length.arg = 1, positive = TRUE))
               stop("bad input for argument 'n'") else n
 
-  if (any(abs(alpha) > 1))
-    stop("'alpha' values out of range")
+
+
+
+
+
+  if (any(abs(apar) > 1))
+    stop("'apar' values out of range")
 
   U1 <- V1 <- runif(use.n)
   V2 <- runif(use.n)
   b <- 1-V1
-  A <- -alpha*(2*b*V2+1)+2*alpha^2*b^2*V2+1
-  B <- alpha^2*(4*b^2*V2-4*b*V2+1)+alpha*(4*V2-4*b*V2-2)+1
-  U2 <- (2*V2*(alpha*b - 1)^2)/(A+sqrt(B))
+  A <- -apar*(2*b*V2+1)+2*apar^2*b^2*V2+1
+  B <- apar^2*(4*b^2*V2-4*b*V2+1)+apar*(4*V2-4*b*V2-2)+1
+  U2 <- (2*V2*(apar*b - 1)^2)/(A+sqrt(B))
   matrix(c(U1, U2), nrow = use.n, ncol = 2)
 }
 
 
-amh.control <- function(save.weight = TRUE, ...) {
+biamhcop.control <- function(save.weight = TRUE, ...) {
   list(save.weight = save.weight)
 }
 
 
- amh <- function(lalpha = "rhobit", ialpha = NULL,
+ biamhcop <- function(lapar = "rhobit", iapar = NULL,
                  imethod = 1, nsimEIM = 250) {
-  lalpha <- as.list(substitute(lalpha))
-  ealpha <- link2list(lalpha)
-  lalpha <- attr(ealpha, "function.name")
+  lapar <- as.list(substitute(lapar))
+  eapar <- link2list(lapar)
+  lapar <- attr(eapar, "function.name")
 
 
 
-  if (length(ialpha) && (abs(ialpha) > 1))
-    stop("'ialpha' should be less than or equal to 1 in absolute value")
+  if (length(iapar) && (abs(iapar) > 1))
+    stop("'iapar' should be less than or equal to 1 in absolute value")
   if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
     imethod > 2)
@@ -2896,7 +2913,7 @@ amh.control <- function(save.weight = TRUE, ...) {
   new("vglmff",
   blurb = c("Ali-Mikhail-Haq distribution\n",
             "Links:    ",
-            namesof("alpha", lalpha, earg = ealpha )),
+            namesof("apar", lapar, earg = eapar )),
   initialize = eval(substitute(expression({
     if (any(y < 0) || any(y > 1))
         stop("the response must have values in the unit square")
@@ -2915,13 +2932,13 @@ amh.control <- function(save.weight = TRUE, ...) {
 
 
     predictors.names <-
-      c(namesof("alpha", .lalpha, earg = .ealpha, short = TRUE))
+      c(namesof("apar", .lapar, earg = .eapar, short = TRUE))
 
     if (length(dimnames(y)))
       extra$dimnamesy2 <- dimnames(y)[[2]]
 
     if (!length(etastart)) {
-      ainit  <- if (length( .ialpha ))  .ialpha else {
+      ainit  <- if (length( .iapar ))  .iapar else {
           mean1 <- if ( .imethod == 1) weighted.mean(y[, 1], w) else
                    median(y[, 1])
           mean2 <- if ( .imethod == 1) weighted.mean(y[, 2], w) else
@@ -2931,45 +2948,45 @@ amh.control <- function(save.weight = TRUE, ...) {
       }
       ainit <- min(0.95, max(ainit, -0.95))
       etastart <-
-        theta2eta(rep(ainit, length.out = n), .lalpha, earg = .ealpha )
+        theta2eta(rep(ainit, length.out = n), .lapar , earg = .eapar )
     }
-  }), list( .lalpha = lalpha, .ealpha = ealpha, .ialpha = ialpha,
+  }), list( .lapar = lapar, .eapar = eapar, .iapar = iapar,
             .imethod = imethod))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    alpha <- eta2theta(eta, .lalpha, earg = .ealpha )
-    fv.matrix <- matrix(0.5, length(alpha), 2)
+    apar <- eta2theta(eta, .lapar, earg = .eapar )
+    fv.matrix <- matrix(0.5, length(apar), 2)
     if (length(extra$dimnamesy2))
         dimnames(fv.matrix) <- list(names(eta), extra$dimnamesy2)
     fv.matrix
-  }, list( .lalpha = lalpha, .ealpha = ealpha ))),
+  }, list( .lapar = lapar, .eapar = eapar ))),
   last = eval(substitute(expression({
-    misc$link <-    c("alpha" = .lalpha )
+    misc$link <-    c("apar" = .lapar )
 
-    misc$earg <- list("alpha" = .ealpha )
+    misc$earg <- list("apar" = .eapar )
 
     misc$expected <- TRUE
     misc$nsimEIM <- .nsimEIM
     misc$multipleResponses <- FALSE
-  }), list( .lalpha = lalpha,
-            .ealpha = ealpha, .nsimEIM = nsimEIM ))),
+  }), list( .lapar = lapar,
+            .eapar = eapar, .nsimEIM = nsimEIM ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    alpha <- eta2theta(eta, .lalpha, earg = .ealpha )
+    apar <- eta2theta(eta, .lapar, earg = .eapar )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * damh(x1 = y[, 1], x2 = y[, 2],
-                             alpha = alpha, log = TRUE)
+      ll.elts <- c(w) * dbiamhcop(x1 = y[, 1], x2 = y[, 2],
+                             apar = apar, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .lalpha = lalpha, .ealpha = ealpha ))),
-  vfamily = c("amh"),
+  }, list( .lapar = lapar, .eapar = eapar ))),
+  vfamily = c("biamhcop"),
 
 
 
@@ -2981,44 +2998,44 @@ amh.control <- function(save.weight = TRUE, ...) {
     if (any(pwts != 1)) 
       warning("ignoring prior weights")
     eta <- predict(object)
-    alpha <- eta2theta(eta, .lalpha , earg = .ealpha )
-    ramh(nsim * length(alpha), alpha = c(alpha))
-  }, list( .lalpha = lalpha, .ealpha = ealpha ))),
+    apar <- eta2theta(eta, .lapar , earg = .eapar )
+    rbiamhcop(nsim * length(apar), apar = c(apar))
+  }, list( .lapar = lapar, .eapar = eapar ))),
 
 
 
   deriv = eval(substitute(expression({
-    alpha <- eta2theta(eta, .lalpha, earg = .ealpha )
+    apar <- eta2theta(eta, .lapar, earg = .eapar )
 
-    dalpha.deta <- dtheta.deta(alpha, .lalpha, earg = .ealpha )
+    dapar.deta <- dtheta.deta(apar, .lapar, earg = .eapar )
 
     y1 <- y[, 1]
     y2 <- y[, 2]
-    de3 <- deriv3(~ (log(1 - alpha+
-                        (2 * alpha*y1*y2/(1-alpha*(1-y1)*(1-y2)))) -
-                    2 * log(1 - alpha*(1-y1)*(1-y2))) ,
-                    name = "alpha", hessian = FALSE)
+    de3 <- deriv3(~ (log(1 - apar+
+                        (2 * apar*y1*y2/(1-apar*(1-y1)*(1-y2)))) -
+                    2 * log(1 - apar*(1-y1)*(1-y2))) ,
+                    name = "apar", hessian = FALSE)
     eval.de3 <- eval(de3)
 
-    dl.dalpha <-  attr(eval.de3, "gradient")
+    dl.dapar <-  attr(eval.de3, "gradient")
 
-    c(w) * dl.dalpha * dalpha.deta
-  }), list( .lalpha = lalpha, .ealpha = ealpha ))),
+    c(w) * dl.dapar * dapar.deta
+  }), list( .lapar = lapar, .eapar = eapar ))),
   weight = eval(substitute(expression({
-    sd3 <- deriv3(~ (log(1 - alpha +
-                        (2 * alpha * y1sim * y2sim / (1 - alpha *
+    sd3 <- deriv3(~ (log(1 - apar +
+                        (2 * apar * y1sim * y2sim / (1 - apar *
                          (1 - y1sim) * (1-y2sim)))) -
-                     2 * log(1-alpha*(1-y1sim)*(1-y2sim))),
-                     name = "alpha", hessian = FALSE)
+                     2 * log(1-apar*(1-y1sim)*(1-y2sim))),
+                     name = "apar", hessian = FALSE)
     run.var <- 0
     for (ii in 1:( .nsimEIM )) {
-      ysim <- ramh(n, alpha = alpha)
+      ysim <- rbiamhcop(n, apar = apar)
       y1sim <- ysim[, 1]
       y2sim <- ysim[, 1]
       eval.sd3 <- eval(sd3)
-      dl.alpha <-  attr(eval.sd3, "gradient")
+      dl.apar <-  attr(eval.sd3, "gradient")
       rm(ysim, y1sim, y2sim)
-      temp3 <- dl.dalpha
+      temp3 <- dl.dapar
       run.var <- ((ii - 1) * run.var + temp3^2) / ii
     }
 
@@ -3026,11 +3043,11 @@ amh.control <- function(save.weight = TRUE, ...) {
         matrix(colMeans(cbind(run.var)),
                n, dimm(M), byrow = TRUE) else cbind(run.var)
 
-    wz <- wz * dalpha.deta^2
+    wz <- wz * dapar.deta^2
 
     c(w) * wz
-  }), list( .lalpha = lalpha,
-            .ealpha = ealpha, .nsimEIM = nsimEIM ))))
+  }), list( .lapar = lapar,
+            .eapar = eapar, .nsimEIM = nsimEIM ))))
 }
 
 
@@ -3146,16 +3163,26 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
     temp8.m[2, 1] <- 1
     temp8.s <- diag(5)[, -4]
     temp8.s[4, 3] <- 1
-    constraints <- cm.vgam(temp8.m, x = x,
+    constraints <- cm.VGAM(temp8.m, x = x,
                            bool = .eq.mean ,
                            constraints = constraints, apply.int = TRUE)
-    constraints <- cm.vgam(temp8.s, x = x,
+    constraints <- cm.VGAM(temp8.s, x = x,
                            bool = .eq.sd ,
                            constraints = constraints, apply.int = TRUE)
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero,
             .eq.sd   = eq.sd,
             .eq.mean = eq.mean ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 5,
+         Q1 = 2,
+         eq.mean = .eq.mean ,
+         eq.sd   = .eq.sd   )
+    }, list( .zero    = zero,
+             .eq.mean = eq.mean,
+             .eq.sd   = eq.sd    ))),
+
   initialize = eval(substitute(expression({
 
     temp5 <-
@@ -3285,11 +3312,11 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
     if (any(pwts != 1)) 
       warning("ignoring prior weights")
     eta <- predict(object)
-    mean1 <- eta2theta(eta[, 1], .lmean1, earg = .emean1)
-    mean2 <- eta2theta(eta[, 2], .lmean2, earg = .emean2)
-    sd1   <- eta2theta(eta[, 3], .lsd1  , earg = .esd1  )
-    sd2   <- eta2theta(eta[, 4], .lsd2  , earg = .esd2  )
-    Rho   <- eta2theta(eta[, 5], .lrho  , earg = .erho  )
+    mean1 <- eta2theta(eta[, 1], .lmean1 , earg = .emean1 )
+    mean2 <- eta2theta(eta[, 2], .lmean2 , earg = .emean2 )
+    sd1   <- eta2theta(eta[, 3], .lsd1   , earg = .esd1   )
+    sd2   <- eta2theta(eta[, 4], .lsd2   , earg = .esd2   )
+    Rho   <- eta2theta(eta[, 5], .lrho   , earg = .erho   )
     rbinorm(nsim * length(sd1),
             mean1 = mean1, mean2 = mean2,
             var1 = sd1^2, var2 = sd2^2, cov12 = Rho * sd1 * sd2)
diff --git a/R/family.categorical.R b/R/family.categorical.R
index 2032185..d2efbc7 100644
--- a/R/family.categorical.R
+++ b/R/family.categorical.R
@@ -15,7 +15,7 @@
 
 
 
-process.categorical.data.vgam <- expression({
+process.categorical.data.VGAM <- expression({
 
 
 
@@ -216,10 +216,10 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
                    "mu[,j] * (1 - mu[,j]); -mu[,j] * mu[,k]",
                    "mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints = constraints)
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   deviance = Deviance.categorical.data.vgam,
 
@@ -231,7 +231,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
     delete.zero.colns <- TRUE 
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
     extra$wy.prod <- TRUE
     M <- ncol(y) - 1 
 
@@ -395,10 +395,10 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
                    "mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
 
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints = constraints)
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   deviance = Deviance.categorical.data.vgam,
 
@@ -410,7 +410,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
     delete.zero.colns <- TRUE 
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
     M <- ncol(y) - 1 
 
     mynames <- if ( .reverse )
@@ -663,12 +663,12 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
 
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
-                           bool = .parallel, 
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
+                           bool = .parallel ,
                            apply.int = TRUE,
                            constraints = constraints)
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
-    constraints <- cm.nointercept.vgam(constraints, x, .nointercept, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.nointercept.VGAM(constraints, x, .nointercept , M)
   }), list( .parallel = parallel, .zero = zero,
             .nointercept = nointercept,
             .refLevel = refLevel ))),
@@ -694,7 +694,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
     delete.zero.colns <- TRUE 
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
 
     M <- ncol(y)-1
     use.refLevel <- if ( .refLevel < 0) M+1 else .refLevel
@@ -718,7 +718,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     if (any(is.na(eta)))
       warning("there are NAs in eta in slot inverse")
 
-    ans <- mlogit(eta, refLevel = .refLevel , inverse = TRUE)
+    ans <- multilogit(eta, refLevel = .refLevel , inverse = TRUE)
     if (any(is.na(ans)))
       warning("there are NAs here in slot linkinv")
     if (min(ans) == 0 || max(ans) == 1)
@@ -729,9 +729,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
   last = eval(substitute(expression({
     misc$refLevel <- if ( .refLevel < 0) M+1 else .refLevel
-    misc$link <- "mlogit"
+    misc$link <- "multilogit"
 
-    misc$earg <- list(mlogit = list(
+    misc$earg <- list(multilogit = list(
       M = M,
       refLevel = use.refLevel
     ))
@@ -753,7 +753,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
           ))),
 
   linkfun = eval(substitute( function(mu, extra = NULL) {
-    mlogit(mu, refLevel = .refLevel )
+    multilogit(mu, refLevel = .refLevel )
   }), list( .refLevel = refLevel )),
 
   loglikelihood =
@@ -875,13 +875,13 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
           Llevels <- extra$Llevels
           NOS <- extra$NOS
           Hk.matrix <- kronecker(diag(NOS), matrix(1,Llevels-1,1))
-          constraints <- cm.vgam(Hk.matrix, x = x,
+          constraints <- cm.VGAM(Hk.matrix, x = x,
                                  bool = .parallel ,
                                  apply.int = .apply.parint ,
                                  constraints = constraints)
       }
     } else {
-      constraints <- cm.vgam(matrix(1, M, 1), x = x,
+      constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                              bool = .parallel ,
                              apply.int = .apply.parint ,
                              constraints = constraints)
@@ -940,7 +940,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
       use.y <- use.mustart <- NULL
       for (iii in 1:NOS) {
         y <- as.factor(orig.y[,iii])
-        eval(process.categorical.data.vgam)
+        eval(process.categorical.data.VGAM)
         use.y <- cbind(use.y, y)
         use.mustart <- cbind(use.mustart, mustart)
       }
@@ -966,7 +966,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
       delete.zero.colns <- TRUE
 
-      eval(process.categorical.data.vgam)
+      eval(process.categorical.data.VGAM)
       M <- ncol(y) - 1
       mynames <- if ( .reverse )
         paste("P[Y", .fillerChar , ">=", .fillerChar,
@@ -1240,10 +1240,10 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
             "mu[,j] * (1 - mu[,j]); -mu[,j] * mu[,k]",
             "mu[,j]*(1-mu[,j]); -mu[,j]*mu[,k]")),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints = constraints)
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
 
   deviance = Deviance.categorical.data.vgam,
@@ -1256,7 +1256,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
 
     delete.zero.colns <- TRUE 
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
     M <- ncol(y) - 1
     mynames <- if ( .reverse )
       paste("P[Y", .fillerChar , "=",
@@ -1457,25 +1457,25 @@ acat.deriv <- function(zeta, reverse, M, n) {
     predictors.names <-
       namesof(paste("alpha", uindex, sep = ""), "loge", short = TRUE)
 
-  }), list( .refgp = refgp, .init.alpha=init.alpha ))),
+  }), list( .refgp = refgp, .init.alpha = init.alpha ))),
 
   linkinv = eval(substitute( function(eta, extra = NULL) {
     probs <- NULL
     eta <- as.matrix(eta)  # in case M = 1
     for (ii in 1:nrow(eta)) {
-        alpha <- .brat.alpha(eta2theta(eta[ii,], "loge",
-                                       earg = list(theta = NULL)),
-                             .refvalue , .refgp )
-        alpha1 <- alpha[extra$ybrat.indices[, "rindex"]]
-        alpha2 <- alpha[extra$ybrat.indices[, "cindex"]]
-        probs <- rbind(probs, alpha1 / (alpha1 + alpha2))
+      alpha <- .brat.alpha(eta2theta(eta[ii, ], "loge",
+                                     earg = list(theta = NULL)),
+                           .refvalue , .refgp )
+      alpha1 <- alpha[extra$ybrat.indices[, "rindex"]]
+      alpha2 <- alpha[extra$ybrat.indices[, "cindex"]]
+      probs <- rbind(probs, alpha1 / (alpha1 + alpha2))
     }
     dimnames(probs) <- dimnames(eta)
     probs
   }, list( .refgp = refgp, .refvalue = refvalue) )),
 
   last = eval(substitute(expression({
-    misc$link <- rep( "loge", length = M)
+    misc$link <- rep("loge", length = M)
     names(misc$link) <- paste("alpha", uindex, sep = "")
 
     misc$earg <- vector("list", M)
@@ -1494,7 +1494,7 @@ acat.deriv <- function(zeta, reverse, M, n) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
-                 y * w # Convert proportions to counts
+                 y * w  # Convert proportions to counts
       nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
               round(w)
 
@@ -1518,16 +1518,16 @@ acat.deriv <- function(zeta, reverse, M, n) {
   deriv = eval(substitute(expression({
     ans <- NULL
     uindex <- if ( .refgp == "last") 1:M else (1:(M+1))[-( .refgp ) ]
-    eta <- as.matrix(eta)   # in case M = 1
+    eta <- as.matrix(eta)  # in case M = 1
     for (ii in 1:nrow(eta)) {
-      alpha <- .brat.alpha(eta2theta(eta[ii,], "loge",
+      alpha <- .brat.alpha(eta2theta(eta[ii, ], "loge",
                                      earg = list(theta = NULL)),
-                          .refvalue, .refgp)
-      ymat <- InverseBrat(y[ii,], NCo = M+1, diag = 0)
+                           .refvalue, .refgp )
+      ymat <- InverseBrat(y[ii, ], NCo = M+1, diag = 0)
       answer <- rep(0, len = M)
       for (aa in 1:(M+1)) {
         answer <- answer + (1 - (aa == uindex)) *
-                  (ymat[uindex,aa] * alpha[aa] - ymat[aa,uindex] *
+                  (ymat[uindex, aa] * alpha[aa] - ymat[aa, uindex] *
                   alpha[uindex]) / (alpha[aa] + alpha[uindex])
       }
       ans <- rbind(ans, w[ii] * answer)
@@ -1538,21 +1538,21 @@ acat.deriv <- function(zeta, reverse, M, n) {
   weight = eval(substitute(expression({
     wz <- matrix(0, n, dimm(M))
     for (ii in 1:nrow(eta)) {
-      alpha <- .brat.alpha(eta2theta(eta[ii,], "loge",
+      alpha <- .brat.alpha(eta2theta(eta[ii, ], "loge",
                                      earg = list(theta = NULL)),
                           .refvalue, .refgp)
-      ymat <- InverseBrat(y[ii,], NCo = M+1, diag = 0)
+      ymat <- InverseBrat(y[ii, ], NCo = M+1, diag = 0)
       for (aa in 1:(M+1)) {
         wz[ii, 1:M] <- wz[ii, 1:M] + (1 - (aa == uindex)) *
-                       (ymat[aa,uindex] + ymat[uindex,aa]) * alpha[aa] *
+                       (ymat[aa, uindex] + ymat[uindex, aa]) * alpha[aa] *
                        alpha[uindex] / (alpha[aa] + alpha[uindex])^2
       }
       if (M > 1) {
         ind5 <- iam(1, 1, M, both = TRUE, diag = FALSE)
-        wz[ii,(M+1):ncol(wz)] =
-          -(ymat[cbind(uindex[ind5$row],uindex[ind5$col])] +
-            ymat[cbind(uindex[ind5$col],uindex[ind5$row])]) *
-            alpha[uindex[ind5$col]] * alpha[uindex[ind5$row]] /
+        wz[ii, (M+1):ncol(wz)] <-
+          -(ymat[cbind(uindex[ind5$row], uindex[ind5$col])] +
+            ymat[cbind(uindex[ind5$col], uindex[ind5$row])]) *
+             alpha[uindex[ind5$col]] * alpha[uindex[ind5$row]] /
             (alpha[uindex[ind5$row]] + alpha[uindex[ind5$col]])^2
       }
     }
@@ -1564,6 +1564,8 @@ acat.deriv <- function(zeta, reverse, M, n) {
 
 
 
+
+
  bratt <- function(refgp = "last",
                    refvalue = 1,
                    init.alpha = 1,
@@ -1886,36 +1888,6 @@ InverseBrat <-
 
 
 
-tapplymat1 <- function(mat,
-                      function.arg = c("cumsum", "diff", "cumprod")) {
-
-
-  if (!missing(function.arg))
-    function.arg <- as.character(substitute(function.arg))
-  function.arg <- match.arg(function.arg,
-                            c("cumsum", "diff", "cumprod"))[1]
-
-  type <- switch(function.arg, cumsum = 1, diff = 2, cumprod = 3,
-                 stop("function.arg not matched"))
-
-  if (!is.matrix(mat))
-    mat <- as.matrix(mat)
-  NR <- nrow(mat)
-  NC <- ncol(mat)
-  fred <- .C("tapplymat1",
-               mat = as.double(mat),
-               as.integer(NR), as.integer(NC), as.integer(type), PACKAGE = "VGAM")
-
-  dim(fred$mat) <- c(NR, NC)
-  dimnames(fred$mat) <- dimnames(mat)
-  switch(function.arg,
-         cumsum = fred$mat,
-         diff   = fred$mat[, -1, drop = FALSE],
-         cumprod= fred$mat)
-}
-
-
-
 
  ordpoisson <- function(cutpoints,
                        countdata = FALSE, NOS = NULL, Levels = NULL,
@@ -1954,11 +1926,11 @@ tapplymat1 <- function(mat,
   blurb = c(paste("Ordinal Poisson model\n\n"), 
             "Link:     ", namesof("mu", link, earg = earg)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            apply.int = TRUE,
                            constraints = constraints)
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   initialize = eval(substitute(expression({
     orig.y <- cbind(y)  # Convert y into a matrix if necessary
@@ -1991,7 +1963,7 @@ tapplymat1 <- function(mat,
     for (iii in 1:NOS) {
         y <- factor(orig.y[,iii], levels=(1:Levels[iii]))
         if ( !( .countdata )) {
-            eval(process.categorical.data.vgam)  # Creates mustart and y
+            eval(process.categorical.data.VGAM)  # Creates mustart and y
             use.y[,cptr:(cptr+Levels[iii]-1)] <- y
         }
         use.etastart[,iii] <- if (is.Numeric(initmu))
@@ -2181,14 +2153,14 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
               namesof("scale_j", lscale, escale)),
     constraints = eval(substitute(expression({
         J <- M / 2
-        constraints <- cm.vgam(matrix(1, J, 1), x = x,
+        constraints <- cm.VGAM(matrix(1, J, 1), x = x,
                                bool = .parallel ,
                                apply.int = FALSE,
                                constraints = constraints)
         constraints[["(Intercept)"]] = rbind(constraints[["(Intercept)"]],
             matrix(0, J, ncol(constraints[["(Intercept)"]])))
 
-        cm2 <- cm.vgam(matrix(1, J, 1), x = x,
+        cm2 <- cm.VGAM(matrix(1, J, 1), x = x,
                            bool = .sparallel ,
                            apply.int = FALSE,
                            constraints = NULL)
@@ -2224,7 +2196,7 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
 
 
     delete.zero.colns = TRUE # Cannot have FALSE since then prob(Y=jay)=0
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
 
 
     M <- 2*(ncol(y)-1)
@@ -2565,7 +2537,7 @@ prplot <- function(object,
     control <- prplot.control(...)
 
 
-  object <- plotvgam(object, plot.arg = FALSE, raw = FALSE)  # , ...
+  object <- plot.vgam(object, plot.arg = FALSE, raw = FALSE)  # , ...
 
   if (length(names(object at preplot)) != 1)
       stop("object needs to have only one term")
diff --git a/R/family.censored.R b/R/family.censored.R
index 7284e0d..4339f6f 100644
--- a/R/family.censored.R
+++ b/R/family.censored.R
@@ -15,7 +15,8 @@
 
 
 
- cenpoisson <- function(link = "loge", imu = NULL) {
+
+ cens.poisson <- function(link = "loge", imu = NULL) {
 
   link <- as.list(substitute(link))
   earg <- link2list(link)
@@ -71,7 +72,7 @@
        init.mu[extra$leftcensored] <- pmax(y[extra$leftcensored, 1], 1/8)
     } else
     if (centype == "counting") {
-      stop("type == 'counting' not compatible with cenpoisson()")
+      stop("type == 'counting' not compatible with cens.poisson()")
       init.mu <- pmax(y[, 1], 1/8)
       stop("currently not working")
     } else
@@ -117,7 +118,7 @@
                         ppois(y[cenI, 1], mu[cenI])))
     }
   },
-  vfamily = "cenpoisson",
+  vfamily = "cens.poisson",
   deriv = eval(substitute(expression({
     cen0 <- extra$uncensored
     cenL <- extra$leftcensored
@@ -333,9 +334,10 @@ if (FALSE)
 
 
 
- cennormal1 <-
- cennormal <- function(lmu = "identitylink", lsd = "loge",
-                       imethod = 1, zero = 2) {
+ cennormal <-
+ cens.normal <- function(lmu = "identitylink", lsd = "loge",
+                         imethod = 1, zero = 2) {
+
 
 
   lmu <- as.list(substitute(lmu))
@@ -360,7 +362,7 @@ if (FALSE)
                           namesof("sd", lsd, tag = TRUE), "\n",
             "Conditional variance: sd^2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -429,7 +431,7 @@ if (FALSE)
     sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
   }, list( .lmu = lmu, .lsd = lsd,
            .emu = emu, .esd = esd ))),
-  vfamily = c("cennormal"),
+  vfamily = c("cens.normal"),
   deriv = eval(substitute(expression({
     cenL <- extra$leftcensored
     cenU <- extra$rightcensored
@@ -520,8 +522,8 @@ if (FALSE)
 
 
 
- cenrayleigh <- function(lscale = "loge",
-                         oim  = TRUE) {
+ cens.rayleigh <- function(lscale = "loge",
+                           oim  = TRUE) {
 
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
@@ -582,7 +584,7 @@ if (FALSE)
       sum(w[cenU] * (y[cenU]/Scale[cenU])^2) * 0.5
   }, list( .lscale = lscale,
            .escale = escale ))),
-  vfamily = c("cenrayleigh"),
+  vfamily = c("cens.rayleigh"),
   deriv = eval(substitute(expression({
     cen0 <- !extra$rightcensored   # uncensored obsns
     cenU <- extra$rightcensored
@@ -623,12 +625,14 @@ if (FALSE)
 
 
 
- weibull <-
-  function(lshape = "loge", lscale = "loge",
-           ishape = NULL,   iscale = NULL,
+
+ weibullR <-
+  function(lscale = "loge", lshape = "loge",
+           iscale = NULL,   ishape = NULL,
+           lss = TRUE,
            nrfs = 1,
            probs.y = c(0.2, 0.5, 0.8),
-           imethod = 1, zero = -2) {
+           imethod = 1, zero = ifelse(lss, -2, -1)) {
 
 
 
@@ -670,26 +674,34 @@ if (FALSE)
     if (!is.Numeric(iscale, positive = TRUE))
       stop("argument 'iscale' values must be positive")
 
+  scale.TF <- if (lss) c(TRUE, FALSE) else c(FALSE, TRUE)
+  scale.12 <- if (lss) 1:2 else 2:1
+  blurb.vec <- c(namesof("scale", lscale, earg = escale),
+                 namesof("shape", lshape, earg = eshape))
+  blurb.vec <- blurb.vec[scale.12]
 
   new("vglmff",
   blurb = c("Weibull distribution\n\n",
             "Links:    ",
-            namesof("shape", lshape, earg = eshape), ", ", 
-            namesof("scale", lscale, earg = escale), "\n", 
+            blurb.vec[1], ", ",
+            blurb.vec[2], "\n",
             "Mean:     scale * gamma(1 + 1/shape)\n",
             "Variance: scale^2 * (gamma(1 + 2/shape) - ",
                       "gamma(1 + 1/shape)^2)"),
  constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
-  }), list( .zero = zero ))),
+    eval(negzero.expression.VGAM)
+  }), list( .zero = zero,
+            .scale.12 = scale.12, .scale.TF = scale.TF, .lss = lss ))),
 
   infos = eval(substitute(function(...) {
     list(M1 = 2,
          Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
          zero = .zero )
-  }, list( .zero = zero
+  }, list( .zero = zero, .scale.12 = scale.12, .scale.TF = scale.TF
          ))),
 
   initialize = eval(substitute(expression({
@@ -717,11 +729,21 @@ if (FALSE)
            "don't use SurvS4()")
 
 
-    mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
-    mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
-    predictors.names <-
-        c(namesof(mynames1, .lshape , earg = .eshape , tag = FALSE),
-          namesof(mynames2, .lscale , earg = .escale , tag = FALSE))[
+    if ( .lss ) {
+      mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+      mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+      predictors.names <-
+          c(namesof(mynames1, .lscale , earg = .escale , tag = FALSE),
+            namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))
+            
+    } else {
+      mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+      mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+      predictors.names <-
+          c(namesof(mynames1, .lshape , earg = .eshape , tag = FALSE),
+            namesof(mynames2, .lscale , earg = .escale , tag = FALSE))
+    }
+    predictors.names <- predictors.names[
           interleave.VGAM(M, M = M1)]
 
 
@@ -750,7 +772,10 @@ if (FALSE)
             Scale.init[, ilocal] <- exp(fit0$coef["Intercept"])
         }  # ilocal
 
-        etastart <-
+        etastart <- if ( .lss )
+          cbind(theta2eta(Scale.init, .lscale , earg = .escale ),
+                theta2eta(Shape.init, .lshape , earg = .eshape ))[,
+                interleave.VGAM(M, M = M1)] else
           cbind(theta2eta(Shape.init, .lshape , earg = .eshape ),
                 theta2eta(Scale.init, .lscale , earg = .escale ))[,
                 interleave.VGAM(M, M = M1)]
@@ -760,13 +785,15 @@ if (FALSE)
             .escale = escale, .eshape = eshape,
             .iscale = iscale, .ishape = ishape,
             .probs.y = probs.y,
+            .scale.12 = scale.12, .scale.TF = scale.TF, .lss = lss,
             .imethod = imethod ) )),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , earg = .eshape )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
+    Scale <- eta2theta(eta[,    .scale.TF  ], .lscale , earg = .escale )
+    Shape <- eta2theta(eta[, !( .scale.TF )], .lshape , earg = .eshape )
     Scale * gamma(1 + 1 / Shape)
   }, list( .lscale = lscale, .lshape = lshape,
-           .escale = escale, .eshape = eshape ) )),
+           .escale = escale, .eshape = eshape,
+           .scale.12 = scale.12, .scale.TF = scale.TF, .lss = lss ) )),
   last = eval(substitute(expression({
     regnotok <- any(Shape <= 2)
     if (any(Shape <= 1)) {
@@ -788,17 +815,19 @@ if (FALSE)
 
 
     M1 <- extra$M1
-    misc$link <-
-      c(rep( .lshape , length = ncoly),
-        rep( .lscale , length = ncoly))[interleave.VGAM(M, M = M1)]
+    avector <- if ( .lss ) c(rep( .lscale , length = ncoly),
+                             rep( .lshape , length = ncoly)) else
+                           c(rep( .lshape , length = ncoly),
+                             rep( .lscale , length = ncoly))
+    misc$link <- avector[interleave.VGAM(M, M = M1)]
     temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
     names(misc$link) <- temp.names
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- temp.names
     for (ii in 1:ncoly) {
-      misc$earg[[M1*ii-1]] <- .eshape
-      misc$earg[[M1*ii  ]] <- .escale
+      misc$earg[[M1*ii-1]] <- if ( .lss ) .escale else .eshape
+      misc$earg[[M1*ii  ]] <- if ( .lss ) .eshape else .escale
     }
 
     misc$M1 <- M1
@@ -812,23 +841,25 @@ if (FALSE)
   }), list( .lscale = lscale, .lshape = lshape,
             .escale = escale, .eshape = eshape,
             .imethod = imethod,
+            .scale.12 = scale.12, .scale.TF = scale.TF, .lss = lss,
             .nrfs = nrfs ) )),
   loglikelihood = eval(substitute(
           function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-    Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , earg = .eshape )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
+    Scale <- eta2theta(eta[,    .scale.TF  ], .lscale , earg = .escale )
+    Shape <- eta2theta(eta[, !( .scale.TF )], .lshape , earg = .eshape )
 
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else {
       sum(c(w) * dweibull(x = y, shape = Shape, scale = Scale, log = TRUE))
     }
   }, list( .lscale = lscale, .lshape = lshape,
-           .escale = escale, .eshape = eshape ) )),
-  vfamily = c("weibull"),
+           .escale = escale, .eshape = eshape,
+           .scale.12 = scale.12, .scale.TF = scale.TF, .lss = lss ) )),
+  vfamily = c("weibullR"),
   deriv = eval(substitute(expression({
     M1 <- 2
-    Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , earg = .eshape )
-    Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
+    Scale <- eta2theta(eta[,    .scale.TF  ], .lscale , earg = .escale )
+    Shape <- eta2theta(eta[, !( .scale.TF )], .lshape , earg = .eshape )
 
     dl.dshape <- 1 / Shape + log(y / Scale) -
                  log(y / Scale) * (y / Scale)^Shape
@@ -837,11 +868,15 @@ if (FALSE)
     dshape.deta <- dtheta.deta(Shape, .lshape, earg = .eshape )
     dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
 
-    myderiv <- c(w) * cbind(dl.dshape * dshape.deta,
-                            dl.dscale * dscale.deta)
+    myderiv <- if ( .lss )
+                 c(w) * cbind(dl.dscale * dscale.deta,
+                              dl.dshape * dshape.deta) else
+                 c(w) * cbind(dl.dshape * dshape.deta,
+                              dl.dscale * dscale.deta)
     myderiv[, interleave.VGAM(M, M = M1)]
   }), list( .lscale = lscale, .lshape = lshape,
-            .escale = escale, .eshape = eshape ) )),
+            .escale = escale, .eshape = eshape,
+            .scale.12 = scale.12, .scale.TF = scale.TF, .lss = lss ) )),
   weight = eval(substitute(expression({
     EulerM <- -digamma(1.0)
 
@@ -850,15 +885,21 @@ if (FALSE)
     ned2l.dscale <- (Shape / Scale)^2
     ned2l.dshapescale <- (EulerM-1) / Scale
 
-    wz <- array(c(c(w) * ned2l.dshape * dshape.deta^2,
-                  c(w) * ned2l.dscale * dscale.deta^2,
-                  c(w) * ned2l.dshapescale * dscale.deta * dshape.deta),
-                dim = c(n, M / M1, 3))
+    wz <- if ( .lss )
+            array(c(c(w) * ned2l.dscale * dscale.deta^2,
+                    c(w) * ned2l.dshape * dshape.deta^2,
+                    c(w) * ned2l.dshapescale * dscale.deta * dshape.deta),
+                  dim = c(n, M / M1, 3)) else
+            array(c(c(w) * ned2l.dshape * dshape.deta^2,
+                    c(w) * ned2l.dscale * dscale.deta^2,
+                    c(w) * ned2l.dshapescale * dscale.deta * dshape.deta),
+                  dim = c(n, M / M1, 3))
     wz <- arwz2wz(wz, M = M, M1 = M1)
 
 
     wz
-  }), list( .eshape = eshape, .nrfs = nrfs ))))
+  }), list( .eshape = eshape, .nrfs = nrfs,
+            .scale.12 = scale.12, .scale.TF = scale.TF, .lss = lss ))))
 }
 
 
@@ -965,8 +1006,8 @@ is.SurvS4 <- function(x) inherits(x, "SurvS4")
 
 
 
-setIs(class1 = "SurvS4",
-      class2 = "matrix")  # Forces vglm()@y to be a matrix
+ setIs(class1 = "SurvS4",
+       class2 = "matrix")  # Forces vglm()@y to be a matrix
 
 
 
@@ -1140,7 +1181,7 @@ pgamma.deriv.unscaled <- function(q, shape) {
  constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
diff --git a/R/family.circular.R b/R/family.circular.R
index b735fe6..7a9d360 100644
--- a/R/family.circular.R
+++ b/R/family.circular.R
@@ -138,7 +138,7 @@ cardioid.control <- function(save.weight = TRUE, ...) {
             "pi + (rho/pi) *",
             "((2*pi-mu)*sin(2*pi-mu)+cos(2*pi-mu)-mu*sin(mu)-cos(mu))"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -169,9 +169,9 @@ cardioid.control <- function(save.weight = TRUE, ...) {
       }
       mu.grid <- seq(0.1, 6.0, len=19)
       mu.init <- if (length( .imu )) .imu else
-          getMaxMin(mu.grid, objfun = cardioid.Loglikfun,
-                    y = y,  x = x, w = w,
-                    extraargs = list(irho = rho.init))
+          grid.search(mu.grid, objfun = cardioid.Loglikfun,
+                      y = y,  x = x, w = w,
+                      extraargs = list(irho = rho.init))
       mu.init <- rep(mu.init, length=length(y))
       etastart <-
         cbind(theta2eta( mu.init, .lmu,  earg = .emu),
@@ -304,7 +304,7 @@ cardioid.control <- function(save.weight = TRUE, ...) {
             "\n", "\n",
             "Mean:     location"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 2,
diff --git a/R/family.exp.R b/R/family.exp.R
index 4962f00..aa2ceae 100644
--- a/R/family.exp.R
+++ b/R/family.exp.R
@@ -282,7 +282,7 @@ reexp <- function(n, rate = 1) {
 
 
 
-dkoenker <- function(x, location = 0, scale = 1, log = FALSE) {
+dsc.t2 <- function(x, location = 0, scale = 1, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -299,7 +299,7 @@ dkoenker <- function(x, location = 0, scale = 1, log = FALSE) {
 
 
 
-pkoenker <- function(q, location = 0, scale = 1, log = FALSE) {
+psc.t2 <- function(q, location = 0, scale = 1, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -317,7 +317,7 @@ pkoenker <- function(q, location = 0, scale = 1, log = FALSE) {
 
 
 
-qkoenker <- function(p, location = 0, scale = 1) {
+qsc.t2 <- function(p, location = 0, scale = 1) {
 
   answer <- -2 * (1 - 2*p) / sqrt(1 - (1 - 2*p)^2)
   answer[p  < 0] <- NaN
@@ -333,8 +333,8 @@ qkoenker <- function(p, location = 0, scale = 1) {
 
 
 
-rkoenker <- function(n, location = 0, scale = 1) {
-  answer <- qkoenker(runif(n)) * scale + location
+rsc.t2 <- function(n, location = 0, scale = 1) {
+  answer <- qsc.t2(runif(n)) * scale + location
   answer[scale <= 0] <- NaN
   answer
 }
@@ -343,7 +343,7 @@ rkoenker <- function(n, location = 0, scale = 1) {
 
 
 
- koenker <- function(percentile = 50,
+ sc.studentt2 <- function(percentile = 50,
                      llocation = "identitylink", lscale = "loge",
                      ilocation = NULL,   iscale = NULL,
                      imethod = 1,
@@ -379,14 +379,14 @@ rkoenker <- function(n, location = 0, scale = 1) {
 
 
   new("vglmff",
-  blurb = c("Koenker distribution\n\n",
+  blurb = c("Scaled Student t distribution with 2 degrees of freedom\n\n",
             "Links:    ",
             namesof("location", llocat, earg = elocat, tag = FALSE), ", ",
             namesof("scale",    lscale, earg = escale, tag = FALSE), "\n\n",
             "Mean:     location\n",
             "Variance: infinite"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -428,7 +428,7 @@ rkoenker <- function(n, location = 0, scale = 1) {
     Scale <- eta2theta(eta[, 2], link = .lscale, earg = .escale)
     answer <- matrix(locat, nrow(eta), length(Perce))
     for (ii in 1:length(Perce))
-      answer[, ii] <- qkoenker(Perce[ii] / 100, loc = locat, sc = Scale)
+      answer[, ii] <- qsc.t2(Perce[ii] / 100, loc = locat, sc = Scale)
     dimnames(answer) <- list(dimnames(eta)[[1]],
                              paste(as.character(Perce), "%", sep = ""))
     answer
@@ -463,7 +463,7 @@ rkoenker <- function(n, location = 0, scale = 1) {
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dkoenker(x = y, location = locat, scale = Scale,
+      ll.elts <- c(w) * dsc.t2(x = y, location = locat, scale = Scale,
                                  log = TRUE)
       if (summation) {
         sum(ll.elts)
@@ -473,7 +473,7 @@ rkoenker <- function(n, location = 0, scale = 1) {
     }
   }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
-  vfamily = c("koenker"),
+  vfamily = c("sc.studentt2"),
   deriv = eval(substitute(expression({
     locat <- eta2theta(eta[, 1], link = .llocat, earg = .elocat)
     Scale <- eta2theta(eta[, 2], link = .lscale, earg = .escale)
diff --git a/R/family.extremes.R b/R/family.extremes.R
index f66867c..303fc44 100644
--- a/R/family.extremes.R
+++ b/R/family.extremes.R
@@ -184,11 +184,16 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
           percentiles = c(95, 99),
           iscale = NULL, ishape = NULL,
           imethod = 1, gshape = c(-0.45, 0.45),
-          tolshape0 = 0.001, giveWarning = TRUE,
-          zero = 3) {
+          tolshape0 = 0.001,
+          type.fitted = c("percentiles", "mean"),
+          giveWarning = TRUE,
+          zero = 2:3) {
 
 
 
+  type.fitted <- match.arg(type.fitted,
+                           c("percentiles", "mean"))[1]
+
   llocat <- as.list(substitute(llocation))
   elocat <- link2list(llocat)
   llocat <- attr(elocat, "function.name")
@@ -208,14 +213,13 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
   if (!is.logical(giveWarning) || length(giveWarning) != 1)
     stop("bad input for argument 'giveWarning'")
 
-  mean <- FALSE
   if (length(iscale) &&
       !is.Numeric(iscale, positive = TRUE))
     stop("bad input for argument 'iscale'")
 
 
 
-  if (!mean &&  length(percentiles) &&
+  if (length(percentiles) &&
      (!is.Numeric(percentiles, positive = TRUE) ||
       max(percentiles) >= 100))
     stop("bad input for argument 'percentiles'")
@@ -245,19 +249,22 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
             namesof("scale",    lscale, escale), ", ",
             namesof("shape",    lshape, eshape)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 3,
          multipleResponses = FALSE,
+         type.fitted = .type.fitted ,
          zero = .zero )
-  }, list( .zero = zero ))),
+  }, list( .zero = zero,
+           .type.fitted = type.fitted ))),
 
 
   initialize = eval(substitute(expression({
     M1 <- extra$M1 <- 3
     ncoly <- ncol(y)
     extra$ncoly <- ncoly
+    extra$type.fitted <- .type.fitted
     extra$M1 <- M1
 
 
@@ -367,11 +374,11 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
               theta2eta(init.xi,  .lshape , .eshape ))
     }
   }), list( 
-            .llocat = llocat,  .lscale = lscale, .lshape = lshape,
+            .llocat = llocat, .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
             .ishape = ishape, .iscale = iscale,
 
-            .gshape = gshape,
+            .gshape = gshape, .type.fitted = type.fitted,
             .percentiles = percentiles,
             .tolshape0 = tolshape0,
             .imethod = imethod, .giveWarning = giveWarning ))),
@@ -381,13 +388,27 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
     sigma <- eta2theta(eta[, 2], .lscale , .escale )
     shape <- eta2theta(eta[, 3], .lshape , .eshape )
 
+
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning 'percentiles'.")
+                     "percentiles"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("percentiles", "mean"))[1]
+
+
+
     is.zero <- (abs(shape) < .tolshape0 )
+
     cent <- extra$percentiles
     LP <- length(cent)
-    fv <- matrix(as.numeric(NA), nrow(eta), LP)
-    if (LP) {
+    if (type.fitted == "percentiles" &&  # Upward compatibility:
+        LP > 0) {
+      fv <- matrix(as.numeric(NA), nrow(eta), LP)
       for (ii in 1:LP) {
-        yp <- -log(cent[ii]/100)
+        yp <- -log(cent[ii] / 100)
         fv[!is.zero, ii] <- Locat[!is.zero] - sigma[!is.zero] *
                             (1 - yp^(-shape[!is.zero])) / shape[!is.zero]
         fv[ is.zero, ii] <- Locat[ is.zero] - sigma[ is.zero] *
@@ -397,17 +418,17 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
                            paste(as.character(cent), "%", sep = ""))
     } else {
       EulerM <- -digamma(1)
-      fv <- Locat + sigma * EulerM # When shape = 0, is Gumbel
+      fv <- Locat + sigma * EulerM  # When shape = 0, is Gumbel
       fv[!is.zero] <- Locat[!is.zero] + sigma[!is.zero] *
-                     (gamma(1-shape[!is.zero])-1) / shape[!is.zero]
-      fv[shape >= 1] <- NA # Mean exists only if shape < 1.
+                      (gamma(1-shape[!is.zero])-1) / shape[!is.zero]
+      fv[shape >= 1] <- NA  # Mean exists only if shape < 1.
     }
     fv
   }, list(
-            .llocat = llocat,  .lscale = lscale, .lshape = lshape,
+            .llocat = llocat, .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
 
-           .tolshape0 = tolshape0 ))),
+            .tolshape0 = tolshape0 ))),
   last = eval(substitute(expression({
     misc$earg <- vector("list", M)
     names(misc$earg) <- c(mynames1, mynames2, mynames3)
@@ -426,8 +447,6 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
 
 
 
-
-
     misc$true.mu <- !length( .percentiles)  # @fitted is not a true mu
     misc$percentiles <- .percentiles
     misc$expected <- TRUE
@@ -437,18 +456,18 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
     if (any(shape < -0.5))
       warning("some values of the shape parameter are less than -0.5")
   }), list(
-            .llocat = llocat,  .lscale = lscale, .lshape = lshape,
+            .llocat = llocat, .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
 
             .tolshape0 = tolshape0, .percentiles = percentiles ))),
   loglikelihood = eval(substitute(
-  function(mu, y, w, residuals = FALSE,eta,extra = NULL) {
+  function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     Locat <- eta2theta(eta[, 1], .llocat , .elocat )
     sigma <- eta2theta(eta[, 2], .lscale , .escale )
     shape <- eta2theta(eta[, 3], .lshape , .eshape )
 
 
-    is.zero <- (abs(shape) < .tolshape0)
+    is.zero <- (abs(shape) < .tolshape0 )
     zedd <- (y-Locat) / sigma
     r.vec <- rowSums(cbind(!is.na(y)))
     A <- 1 + shape * (y-Locat)/sigma
@@ -456,31 +475,46 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
     A1 <- A[cbind(ii, r.vec)]
     mytolerance <- 0  # .Machine$double.eps
     if (any(bad <- (A1 <= mytolerance), na.rm = TRUE)) {
+      if ( .giveWarning )
+        warning("There are", sum(bad),
+                "range violations in @loglikelihood")
+
       cat("There are", sum(bad),
           "range violations in @loglikelihood\n")
       flush.console()
     }
-      igev <- !is.zero &  !bad
-      igum <-  is.zero &  !bad
-      pow <- 1 + 1/shape[igev]
-      if (residuals) stop("loglikelihood residuals not ",
-                          "implemented yet") else {
-
-        old.answer <-
-          sum(bad) * (-1.0e10) +
-          sum(w[igum] * (-r.vec[igum]*log(sigma[igum]) -
-                         exp(-zedd[igum,r.vec]) -
-                         rowSums(cbind(zedd, na.rm = TRUE)))) +
-          sum(w[igev] * (-r.vec[igev]*log(sigma[igev]) -
-                         pow*rowSums(cbind(log(A[igev])), na.rm = TRUE) -
-                         A1[igev]^(-1/shape[igev])))
-            old.answer
-      }
-  }, list( 
-            .llocat = llocat,  .lscale = lscale, .lshape = lshape,
-            .elocat = elocat, .escale = escale, .eshape = eshape,
+    igev <- !is.zero &  !bad
+    igum <-  is.zero &  !bad
+    pow <- 1 + 1/shape[igev]
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+
+
+
 
 
+      old.answer <-
+        sum(bad) * (-1.0e10) +
+        sum(w[igum] * (-r.vec[igum] * log(sigma[igum]) -
+                  exp(-zedd[igum, r.vec[igum]]) -
+                  rowSums(cbind(zedd)[igum, , drop = FALSE], na.rm = TRUE))) +
+        sum(w[igev] * (-r.vec[igev] * log(sigma[igev]) -
+                       pow * rowSums(cbind(log(A[igev])), na.rm = TRUE) -
+                       A1[igev]^(-1/shape[igev])))
+
+        new.answer <-
+          sum(w * dgev(x = y, location = Locat, scale = sigma, shape = shape,
+                       tolshape0 = .tolshape0 ,
+                       giveWarning = .giveWarning,
+                       log = TRUE, oobounds.log = -1.0e04))
+        check0 <- old.answer - new.answer
+      old.answer
+      new.answer
+    }
+  }, list( 
+            .llocat = llocat, .lscale = lscale, .lshape = lshape,
+            .elocat = elocat, .escale = escale, .eshape = eshape,
 
            .giveWarning = giveWarning, .tolshape0 = tolshape0 ))),
   vfamily = c("gev", "vextremes"),
@@ -492,13 +526,10 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
     sigma <- eta2theta(eta[, 2], .lscale , .escale )
     shape <- eta2theta(eta[, 3], .lshape , .eshape )
 
-
-
     dmu.deta <- dtheta.deta(Locat, .llocat , .elocat )
     dsi.deta <- dtheta.deta(sigma, .lscale , .escale )
     dxi.deta <- dtheta.deta(shape, .lshape , .eshape )
 
-
     is.zero <- (abs(shape) < .tolshape0)
     ii <- 1:nrow(eta)
     zedd <- (y-Locat) / sigma
@@ -527,15 +558,15 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
       zorro <- zorro[is.zero]
       ezedd <- exp(-zorro)
       dl.dmu[is.zero] <- (1-ezedd) / sigma[is.zero]
-      dl.dsi[is.zero] <- (zorro * (1-ezedd) - 1) / sigma[is.zero]
-      dl.dxi[is.zero] <- zorro * ((1 - ezedd) * zorro / 2 - 1)
+      dl.dsi[is.zero] <- (zorro *  (1 - ezedd) - 1) / sigma[is.zero]
+      dl.dxi[is.zero] <-  zorro * ((1 - ezedd) * zorro / 2 - 1)
     }
 
     c(w) * cbind(dl.dmu * dmu.deta,
                  dl.dsi * dsi.deta,
                  dl.dxi * dxi.deta)
   }), list(
-            .llocat = llocat,  .lscale = lscale, .lshape = lshape,
+            .llocat = llocat, .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
 
             .tolshape0 = tolshape0 ))),
@@ -548,13 +579,13 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
     temp33 <- 1 - 2 * kay * ddd +
               kay^2 * (1 + trigamma(r.vec+1) + ddd^2)
     temp23 <- -kay * dd + (1+(1-kay)^2) / (1-kay)
-    GR.gev <- function(j, ri, kay) gamma(ri - j*kay + 1) /  gamma(ri)
+    GR.gev <- function(j, ri, kay) gamma(ri - j*kay + 1) / gamma(ri)
     tmp2 <- (1-kay)^2 * GR.gev(2, r.vec, kay)  # Latter is GR2
     tmp1 <- (1-2*kay) * GR.gev(1, r.vec, kay)  # Latter is GR1
     k0 <- (1-2*kay)
     k1 <- k0 * kay
     k2 <- k1 * kay
-    k3 <- k2 * kay # kay^3 * (1-2*kay)
+    k3 <- k2 * kay  # kay^3 * (1-2*kay)
 
     wz <- matrix(as.numeric(NA), n, 6)
     wz[, iam(1, 1, M)] <- tmp2 / (sigma^2 * k0)
@@ -570,33 +601,32 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
       if (ncol(y) > 1)
         stop("cannot handle shape == 0 with a multivariate response")
 
-    EulerM <- -digamma(1)
-    wz[is.zero, iam(2, 2, M)] <- (pi^2/6 + (1-EulerM)^2) / sigma^2
-    wz[is.zero, iam(3, 3, M)] <- 2.4236
-    wz[is.zero, iam(1, 2, M)] <- (digamma(2) +
-                                 2*(EulerM-1)) / sigma^2
-    wz[is.zero, iam(1, 3, M)]= -(trigamma(1)/2 + digamma(1)*
-                            (digamma(1)/2+1))/sigma
-    wz[is.zero, iam(2, 3, M)] <- (-dgammadx(2, 3)/6 + dgammadx(1, 1) +
-                            2*dgammadx(1, 2) +
-                            2*dgammadx(1, 3)/3) / sigma
-
-    if (FALSE ) {
+      EulerM <- -digamma(1)
+      wz[is.zero, iam(2, 2, M)] <- (pi^2/6 + (1-EulerM)^2) / sigma[is.zero]^2
+      wz[is.zero, iam(3, 3, M)] <- 2.4236
+      wz[is.zero, iam(1, 2, M)] <- (digamma(2) + 2*(EulerM-1)) / sigma[is.zero]^2
+      wz[is.zero, iam(1, 3, M)]= -(trigamma(1)/2 + digamma(1)*
+                                   (digamma(1)/2+1)) / sigma[is.zero]
+      wz[is.zero, iam(2, 3, M)] <- (-dgammadx(2, 3)/6 + dgammadx(1, 1) +
+                                   2*dgammadx(1, 2) +
+                                   2*dgammadx(1, 3)/3) / sigma[is.zero]
+
+      if (FALSE ) {
         wz[, iam(1, 2, M)] <- 2 * r.vec / sigma^2
         wz[, iam(2, 2, M)] <- -4 * r.vec * digamma(r.vec+1) + 2 * r.vec +
-    (4 * dgammadx(r.vec+1, deriv.arg = 1) - 
-     3 * dgammadx(r.vec+1, deriv.arg = 2)) / gamma(r.vec)  # Not checked
-        }
+                              (4 * dgammadx(r.vec+1, deriv.arg = 1) - 
+          3 * dgammadx(r.vec+1, deriv.arg = 2)) / gamma(r.vec)  # Not checked
+      }
     }
 
     wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dmu.deta^2
     wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dsi.deta^2
     wz[, iam(3, 3, M)] <- wz[, iam(3, 3, M)] * dxi.deta^2
-    wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] * dmu.deta * dsi.deta
+    wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] * dmu.deta *   dsi.deta
     wz[, iam(1, 3, M)] <- wz[, iam(1, 3, M)] * dmu.deta * (-dxi.deta)
     wz[, iam(2, 3, M)] <- wz[, iam(2, 3, M)] * dsi.deta * (-dxi.deta)
     c(w) * wz
-  }), list( .llocat = llocat,  .lscale = lscale, .lshape = lshape,
+  }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape ))))
 }
 
@@ -628,20 +658,27 @@ dgammadx <- function(x, deriv.arg = 1) {
 
 
 
+
+
  egev <- function(llocation = "identitylink",
                   lscale = "loge",
                   lshape = logoff(offset = 0.5),
                   percentiles = c(95, 99),
                   iscale = NULL, ishape = NULL,
                   imethod = 1, gshape = c(-0.45, 0.45),
-                  tolshape0 = 0.001, giveWarning = TRUE,
-                  zero = 3) {
+                  tolshape0 = 0.001,
+                  type.fitted = c("percentiles", "mean"),
+                  giveWarning = TRUE,
+                  zero = 2:3) {
   if (!is.logical(giveWarning) || length(giveWarning) != 1)
     stop("bad input for argument 'giveWarning'")
   if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
     stop("bad input for argument 'iscale'")
 
 
+  type.fitted <- match.arg(type.fitted,
+                           c("percentiles", "mean"))[1]
+
   llocat <- as.list(substitute(llocation))
   elocat <- link2list(llocat)
   llocat <- attr(elocat, "function.name")
@@ -682,13 +719,27 @@ dgammadx <- function(x, deriv.arg = 1) {
   new("vglmff",
   blurb = c("Generalized extreme value distribution\n",
           "Links:    ",
-          namesof("location", link = llocat,  earg = elocat), ", ", 
+          namesof("location", link = llocat, earg = elocat), ", ", 
           namesof("scale",    link = lscale, earg = escale), ", ",
           namesof("shape",    link = lshape, earg = eshape)),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.vgam(constraints, x, .zero, M)
+      constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
+  infos = eval(substitute(function(...) {
+    list(M1 = 3,
+         multipleResponses = FALSE,
+         type.fitted = .type.fitted ,
+         zero = .zero )
+  }, list( .zero = zero,
+           .type.fitted = type.fitted ))),
+
   initialize = eval(substitute(expression({
+    M1 <- extra$M1 <- 3
+    ncoly <- ncol(y)
+    extra$ncoly <- ncoly
+    extra$type.fitted <- .type.fitted
+    extra$M1 <- M1
+
     predictors.names <-
      c(namesof("location", .llocat , earg = .elocat , short = TRUE),
        namesof("scale",    .lscale , earg = .escale , short = TRUE),
@@ -709,32 +760,31 @@ dgammadx <- function(x, deriv.arg = 1) {
       eshape <- .eshape
       if ( .lshape == "elogit" && length(init.xi) && 
          (any(init.xi <= eshape$min | init.xi >= eshape$max)))
-          stop("bad input for argument 'eshape'")
+        stop("bad input for argument 'eshape'")
       if ( .imethod == 1) {
-          nvector <- 4:10   # Arbitrary; could be made an argument
-          ynvector <- quantile(y, probs = 1-1/nvector)
-          objecFunction <- -Inf   # Actually the log-likelihood
-          est.sigma <- !length(init.sig)
-          gshape <- .gshape
-          temp234 <- if (length(init.xi)) init.xi[1] else
-                     seq(gshape[1], gshape[2], length.out = 12)
-          for (xi.try in temp234) {
-            xvec <- if (abs(xi.try) < .tolshape0) log(nvector) else
-                    (nvector^xi.try - 1) / xi.try
-            fit0 <- lsfit(x = xvec, y=ynvector, intercept = TRUE)
-            if (est.sigma) {
-              sigmaTry <- rep(fit0$coef["X"], length.out = length(y))
-            } else { 
-              sigmaTry <- init.sig
-            }
-            muTry <- rep(fit0$coef["Intercept"],
-                         length.out = length(y))
-            llTry <- egev(giveWarning = FALSE)@loglikelihood(mu = NULL,
-                                                 y = y, w = w,
-                                                 residuals = FALSE,
-            eta = cbind(theta2eta(muTry,    .llocat , earg = .elocat ),
-                        theta2eta(sigmaTry, .lscale , earg = .escale ), 
-                        theta2eta(xi.try,   .lshape , earg = .eshape )))
+        nvector <- 4:10   # Arbitrary; could be made an argument
+        ynvector <- quantile(y, probs = 1-1/nvector)
+        objecFunction <- -Inf   # Actually the log-likelihood
+        est.sigma <- !length(init.sig)
+        gshape <- .gshape
+        temp234 <- if (length(init.xi)) init.xi[1] else
+                   seq(gshape[1], gshape[2], length.out = 12)
+        for (xi.try in temp234) {
+          xvec <- if (abs(xi.try) < .tolshape0 ) log(nvector) else
+                  (nvector^xi.try - 1) / xi.try
+          fit0 <- lsfit(x = xvec, y=ynvector, intercept = TRUE)
+          if (est.sigma) {
+            sigmaTry <- rep(fit0$coef["X"], length.out = length(y))
+          } else { 
+            sigmaTry <- init.sig
+          }
+          muTry <- rep(fit0$coef["Intercept"], length.out = length(y))
+          llTry <- egev(giveWarning = FALSE)@loglikelihood(mu = NULL,
+                                             y = y, w = w,
+                                             residuals = FALSE,
+          eta <- cbind(theta2eta(muTry,    .llocat , earg = .elocat ),
+                       theta2eta(sigmaTry, .lscale , earg = .escale ), 
+                       theta2eta(xi.try,   .lshape , earg = .eshape )))
           if (llTry >= objecFunction) {
             if (est.sigma)
               init.sig <- sigmaTry
@@ -746,21 +796,20 @@ dgammadx <- function(x, deriv.arg = 1) {
         if (!length(init.xi))
           init.xi <- rep(bestxi, length.out = length(y))
 
-            } else {
-            init.xi = rep(if (length(init.xi)) init.xi else 0.05,
-                          length.out = length(y))
-            if (!length(init.sig))
-                init.sig <- rep(sqrt(6*var(y))/pi,
-                               length.out = length(y))
-            EulerM <- -digamma(1)
-            init.mu <- rep(median(y) - EulerM * init.sig,
-                          length.out = length(y))
+        } else {
+          init.xi <- rep(if (length(init.xi)) init.xi else 0.05,
+                         length.out = length(y))
+          if (!length(init.sig))
+            init.sig <- rep(sqrt(6 * var(y)) / pi, length.out = length(y))
+          EulerM <- -digamma(1)
+          init.mu <- rep(median(y) - EulerM * init.sig,
+                         length.out = length(y))
         }
-        bad <- (1 + init.xi*(y-init.mu)/init.sig <= 0)
+        bad <- (1 + init.xi * (y - init.mu) / init.sig <= 0)
         if (fred <- sum(bad, na.rm = TRUE)) {
           warning(paste(fred, "observations violating boundary",
           "constraints while initializing. Taking corrective action."))
-          init.xi[bad] = ifelse(y[bad] > init.mu[bad], 0.01, -0.01)
+          init.xi[bad] <- ifelse(y[bad] > init.mu[bad], 0.01, -0.01)
         }
 
       extra$percentiles <- .percentiles
@@ -770,29 +819,41 @@ dgammadx <- function(x, deriv.arg = 1) {
               theta2eta(init.sig, .lscale ,    earg = .escale ), 
               theta2eta(init.xi,  .lshape ,    earg = .eshape ))
     }
-  }), list( .llocat = llocat,  .lscale = lscale, .lshape = lshape,
+  }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
             .percentiles = percentiles, .tolshape0 = tolshape0,
-            .imethod = imethod,
+            .imethod = imethod, .type.fitted = type.fitted,
             .giveWarning= giveWarning,
             .iscale = iscale, .ishape = ishape, .gshape = gshape ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     loc   <- eta2theta(eta[, 1], .llocat , earg = .elocat )
     sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
     xi    <- eta2theta(eta[, 3], .lshape , earg = .eshape )
-    is.zero <- (abs(xi) < .tolshape0)
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning 'percentiles'.")
+                     "percentiles"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("percentiles", "mean"))[1]
+
+
+
+    is.zero <- (abs(xi) < .tolshape0 )
     cent <- extra$percentiles
     LP <- length(cent)
-    fv <- matrix(as.numeric(NA), nrow(eta), LP)
-    if (LP) {
+    if (type.fitted == "percentiles" &&  # Upward compatibility:
+        LP > 0) {
+      fv <- matrix(as.numeric(NA), nrow(eta), LP)
       for (ii in 1:LP) {
-        yp <- -log(cent[ii]/100)
-        fv[!is.zero,ii] <- loc[!is.zero] - sigma[!is.zero] *
-                        (1 - yp^(-xi[!is.zero])) / xi[!is.zero]
-        fv[is.zero,ii] <- loc[is.zero] - sigma[is.zero] * log(yp)
+        yp <- -log(cent[ii] / 100)
+        fv[!is.zero, ii] <- loc[!is.zero] - sigma[!is.zero] *
+                            (1 - yp^(-xi[!is.zero])) / xi[!is.zero]
+        fv[is.zero, ii] <- loc[is.zero] - sigma[is.zero] * log(yp)
       }
       dimnames(fv) <- list(dimnames(eta)[[1]],
-                          paste(as.character(cent), "%", sep = ""))
+                           paste(as.character(cent), "%", sep = ""))
     } else {
       EulerM <- -digamma(1)
       fv <- loc + sigma * EulerM  # When xi = 0, is Gumbel
@@ -801,9 +862,9 @@ dgammadx <- function(x, deriv.arg = 1) {
       fv[xi >= 1] <- NA  # Mean exists only if xi < 1.
     }
     fv
-  }, list( .llocat = llocat,  .lscale = lscale, .lshape = lshape,
+  }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
            .elocat = elocat, .escale = escale, .eshape = eshape,
-           .tolshape0 = tolshape0 ))),
+           .type.fitted = type.fitted, .tolshape0 = tolshape0 ))),
   last = eval(substitute(expression({
     misc$links <-   c(location = .llocat, 
                       scale    = .lscale ,
@@ -820,7 +881,7 @@ dgammadx <- function(x, deriv.arg = 1) {
     misc$expected <- TRUE 
     if (any(xi < -0.5))
       warning("some values of the shape parameter are less than -0.5")
-  }), list( .llocat = llocat,  .lscale = lscale, .lshape = lshape,
+  }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
             .tolshape0 = tolshape0,  .percentiles = percentiles ))),
   loglikelihood = eval(substitute(
@@ -834,25 +895,25 @@ dgammadx <- function(x, deriv.arg = 1) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <- c(w) * dgev(x = y, location = mmu, scale = sigma,
-                             shape = xi, tolshape0 = .tolshape0,
+                             shape = xi, tolshape0 = .tolshape0 ,
                              log = TRUE, oobounds.log = -1.0e04,
-                             giveWarning = .giveWarning)
+                             giveWarning = .giveWarning )
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .llocat = llocat,  .lscale = lscale, .lshape = lshape,
+  }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
            .elocat = elocat, .escale = escale, .eshape = eshape,
-           .giveWarning= giveWarning, .tolshape0 = tolshape0 ))),
+           .giveWarning = giveWarning, .tolshape0 = tolshape0 ))),
   vfamily = c("egev", "vextremes"),
   deriv = eval(substitute(expression({
     Locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
     sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
-    xi <- eta2theta(eta[, 3], .lshape , earg = .eshape)
+    xi    <- eta2theta(eta[, 3], .lshape , earg = .eshape)
     is.zero <- (abs(xi) < .tolshape0)
-    zedd <- (y-Locat) / sigma
+    zedd <- (y - Locat) / sigma
     A <- 1 + xi * zedd
     dA.dxi <- zedd
     dA.dmu <- -xi / sigma
@@ -880,7 +941,7 @@ dgammadx <- function(x, deriv.arg = 1) {
     c(w) * cbind(dl.dmu * dmu.deta,
                  dl.dsi * dsi.deta,
                  dl.dxi * dxi.deta)
-  }), list( .llocat = llocat,  .lscale = lscale, .lshape = lshape,
+  }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape,
             .tolshape0 = tolshape0 ))),
   weight = eval(substitute(expression({
@@ -895,23 +956,23 @@ dgammadx <- function(x, deriv.arg = 1) {
     qq <- temp100 * (digamma(1-kay) - (1-kay)/kay)
     wz <- matrix(as.numeric(NA), n, 6)
     wz[, iam(1, 1, M)] <- pp / sigma^2
-    wz[, iam(2, 2, M)] <- (1-2*temp100 + pp) / (sigma * kay)^2
+    wz[, iam(2, 2, M)] <- (1 - 2*temp100 + pp) / (sigma * kay)^2
     EulerM <- -digamma(1)
     wz[, iam(3, 3, M)] <- (pi^2 / 6 + (1-EulerM-1/kay)^2 +
-                       (2*qq + pp/kay)/kay) / kay^2 
+                          (2*qq + pp/kay)/kay) / kay^2 
     wz[, iam(1, 2, M)] <- (pp - temp100) / (sigma^2 * kay)
     wz[, iam(1, 3, M)] <- -(qq + pp/kay) / (sigma * kay)
     wz[, iam(2, 3, M)] <- (1-EulerM - (1-temp100)/kay - qq -
                         pp/kay) / (sigma * kay^2)
     if (any(is.zero)) {
-      wz[is.zero, iam(2, 2, M)] <- (pi^2/6 + (1-EulerM)^2) / sigma^2
+      wz[is.zero, iam(2, 2, M)] <- (pi^2/6 + (1-EulerM)^2) / sigma[is.zero]^2
       wz[is.zero, iam(3, 3, M)] <- 2.4236
-      wz[is.zero, iam(1, 2, M)] <- (digamma(2) + 2*(EulerM-1)) / sigma^2
+      wz[is.zero, iam(1, 2, M)] <- (digamma(2) + 2*(EulerM-1)) / sigma[is.zero]^2
       wz[is.zero, iam(1, 3, M)] <- -(trigamma(1)/2 + digamma(1)*
-                                 (digamma(1)/2+1))/sigma
+                                     (digamma(1)/2+1))/sigma[is.zero]
       wz[is.zero, iam(2, 3, M)] <- (-dgammadx(2, 3)/6 + dgammadx(1, 1) +
-                              2*dgammadx(1, 2) +
-                              2*dgammadx(1, 3)/3)/sigma
+                                   2*dgammadx(1, 2) +
+                                   2*dgammadx(1, 3) / 3) / sigma[is.zero]
     }
     wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dmu.deta^2
     wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dsi.deta^2
@@ -968,6 +1029,8 @@ pgumbel <- function(q, location = 0, scale = 1) {
 }
 
 
+
+
  gumbel <- function(llocation = "identitylink",
                     lscale = "loge",
                     iscale = NULL,
@@ -1006,7 +1069,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
             namesof("location", llocat,  earg = elocat ), ", ",
             namesof("scale",    lscale, earg = escale )),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -1052,7 +1115,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
               theta2eta( sc.init, .lscale , earg = .escale ))
     }
 
-  }), list( .llocat = llocat,  .lscale = lscale,
+  }), list( .llocat = llocat, .lscale = lscale,
             .elocat = elocat, .escale = escale,
                               .iscale = iscale,
             .R = R, .mpv = mpv, .percentiles = percentiles ))),
@@ -1069,12 +1132,14 @@ pgumbel <- function(q, location = 0, scale = 1) {
       Rvec <- extra$R
       for (ii in 1:LP) {
         ci <- if (is.Numeric(Rvec))
-               Rvec * (1 - Percentiles[ii] / 100) else
-               -log(Percentiles[ii] / 100)
-        mu[,ii] <- loc - sigma * log(ci)
+              Rvec * (1 - Percentiles[ii] / 100) else
+              -log(Percentiles[ii] / 100)
+        mu[, ii] <- loc - sigma * log(ci)
       }
       if (mpv) 
-        mu[,ncol(mu)] <- loc - sigma * log(log(2))
+        mu[, ncol(mu)] <- loc - sigma * log(log(2))
+
+
     dmn2 <- paste(as.character(Percentiles), "%", sep = "")
     if (mpv) 
       dmn2 <- c(dmn2, "MPV")
@@ -1084,7 +1149,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
     mu <- loc + sigma * EulerM
   }
   mu
-  }, list( .llocat = llocat,  .lscale = lscale,
+  }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
 
   last = eval(substitute(expression({
@@ -1094,9 +1159,9 @@ pgumbel <- function(q, location = 0, scale = 1) {
 
     misc$R <- .R
     misc$mpv <- .mpv
-    misc$true.mu <- !length( .percentiles)  # @fitted is not a true mu
+    misc$true.mu <- !length( .percentiles )  # @fitted is not a true mu
     misc$percentiles <- .percentiles
-  }), list( .llocat = llocat,  .lscale = lscale,
+  }), list( .llocat = llocat, .lscale = lscale,
             .elocat = elocat, .escale = escale,
             .percentiles = percentiles,
             .mpv = mpv, .R = R ))),
@@ -1108,12 +1173,12 @@ pgumbel <- function(q, location = 0, scale = 1) {
     sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
 
     r.vec <- rowSums(cbind(!is.na(y)))
-    yiri <- y[cbind(1:nrow(y),r.vec)]
+    yiri <- y[cbind(1:nrow(y), r.vec)]
     ans <- -r.vec * log(sigma) - exp( -(yiri-loc)/sigma )
     max.r.vec <- max(r.vec)
     for (jay in 1:max.r.vec) {
       index <- (jay <= r.vec)
-      ans[index] <- ans[index] - (y[index,jay]-loc[index]) / sigma[index]
+      ans[index] <- ans[index] - (y[index,jay] - loc[index]) / sigma[index]
     }
 
 
@@ -1127,28 +1192,28 @@ pgumbel <- function(q, location = 0, scale = 1) {
         ll.elts
       }
     }
-  }, list( .llocat = llocat,  .lscale = lscale,
+  }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
   deriv = eval(substitute(expression({
-    loc   <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+    locat <- eta2theta(eta[, 1], .llocat , earg = .elocat )
     sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
 
     r.vec <- rowSums(cbind(!is.na(y)))
-    yiri <- y[cbind(1:nrow(y),r.vec)]
+    yiri <- y[cbind(1:nrow(y), r.vec)]
     yi.bar <- rowMeans(y, na.rm = TRUE)
-    temp2 <- (yiri - loc) / sigma
+    temp2 <- (yiri - locat) / sigma
     term2 <- exp(-temp2)
 
-    dloc.deta <- dtheta.deta(loc, .llocat,  earg = .elocat)
+    dlocat.deta <- dtheta.deta(locat, .llocat , earg = .elocat )
     dsigma.deta <- dtheta.deta(sigma, .lscale , earg = .escale )
 
-    dl.dloc <- (r.vec - term2) / sigma
-    dl.dsigma <- (rowSums((y - loc) / sigma, na.rm = TRUE) -
+    dl.dlocat <- (r.vec - term2) / sigma
+    dl.dsigma <- (rowSums((y - locat) / sigma, na.rm = TRUE) -
                  r.vec - temp2 * term2) / sigma
 
-    c(w) * cbind(dl.dloc   * dloc.deta,
+    c(w) * cbind(dl.dlocat * dlocat.deta,
                  dl.dsigma * dsigma.deta)
-  }), list( .llocat = llocat,  .lscale = lscale,
+  }), list( .llocat = llocat, .lscale = lscale,
             .elocat = elocat, .escale = escale ))),
   weight = eval(substitute(expression({
     temp6 <- digamma(r.vec)  # , integer = T
@@ -1163,8 +1228,8 @@ pgumbel <- function(q, location = 0, scale = 1) {
     wz[, iam(2, 2, M)] <- (2*(r.vec+1)*temp6 + r.vec*(trigamma(r.vec) +
                           temp6^2) + 2 - r.vec - 2*temp5) / sigma^2
 
-    wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dloc.deta^2
-    wz[, iam(2, 1, M)] <- wz[, iam(2, 1, M)] * dsigma.deta * dloc.deta
+    wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dlocat.deta^2
+    wz[, iam(2, 1, M)] <- wz[, iam(2, 1, M)] * dsigma.deta * dlocat.deta
     wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dsigma.deta^2
 
     c(w) * wz
@@ -1173,6 +1238,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
 
 
 
+
 rgpd <- function(n, location = 0, scale = 1, shape = 0) {
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
@@ -1225,7 +1291,7 @@ dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
   if (length(shape)    != L)
     shape    <- rep(shape,        length.out = L)
   if (length(location) != L)
-    location <- rep(location,     length.out = L); 
+    location <- rep(location,     length.out = L)
   if (length(scale)    != L)
     scale    <- rep(scale,        length.out = L)
   if (length(x)        != L)
@@ -1287,7 +1353,7 @@ pgpd <- function(q, location = 0, scale = 1, shape = 0) {
   if (length(shape)    != use.n)
     shape    <- rep(shape,        length.out = use.n)
   if (length(location) != use.n)
-    location <- rep(location,     length.out = use.n); 
+    location <- rep(location,     length.out = use.n)
   if (length(scale)    != use.n)
     scale    <- rep(scale,        length.out = use.n)
   if (length(q)        != use.n)
@@ -1354,23 +1420,23 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
 
 
 
+
+
+
  gpd <- function(threshold = 0,
           lscale = "loge",
           lshape = logoff(offset = 0.5),
           percentiles = c(90, 95),
           iscale = NULL,
           ishape = NULL, 
-          tolshape0 = 0.001, giveWarning = TRUE,
+          tolshape0 = 0.001,
+          type.fitted = c("percentiles", "mean"),
+          giveWarning = TRUE,
           imethod = 1,
           zero = -2) {
-  if (!is.logical(giveWarning) || length(giveWarning) != 1)
-    stop("bad input for argument 'giveWarning'")
-  if (!is.Numeric(threshold)) 
-    stop("bad input for argument 'threshold'")
-  if (!is.Numeric(imethod, length.arg = 1,
-                  positive = TRUE, integer.valued = TRUE) ||
-     imethod > 2.5)
-    stop("argument 'imethod' must be 1 or 2")
+
+  type.fitted <- match.arg(type.fitted,
+                           c("percentiles", "mean"))[1]
 
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
@@ -1381,6 +1447,15 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
   lshape <- attr(eshape, "function.name")
 
 
+  if (!is.logical(giveWarning) || length(giveWarning) != 1)
+    stop("bad input for argument 'giveWarning'")
+  if (!is.Numeric(threshold)) 
+    stop("bad input for argument 'threshold'")
+  if (!is.Numeric(imethod, length.arg = 1,
+                  positive = TRUE, integer.valued = TRUE) ||
+     imethod > 2.5)
+    stop("argument 'imethod' must be 1 or 2")
+
   if (length(percentiles) && 
     (!is.Numeric(percentiles, positive = TRUE) ||
      max(percentiles) >= 100))
@@ -1402,14 +1477,15 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
  constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
     list(M1 = 2,
          Q1 = 1,
+         type.fitted = .type.fitted ,
          zero = .zero )
-  }, list( .zero = zero
+  }, list( .zero = zero, .type.fitted = type.fitted
          ))),
 
 
@@ -1436,6 +1512,8 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
     if (length(y.names) != ncoly)
       y.names <- paste("Y", 1:ncoly, sep = "")
     extra$y.names <- y.names
+    extra$type.fitted <- .type.fitted
+    extra$percentiles <- .percentiles
 
 
 
@@ -1445,6 +1523,12 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
       orig.y <- y
     }
     ystar <- as.matrix(y - Threshold)  # Operate on ystar
+    if (min(ystar, na.rm = TRUE) < 0)
+      stop("some response values, after subtracting ",
+           "argument 'threshold', are negative. ",
+           "Maybe argument 'subset' should be used. ",
+           "A threshold value no more than ", min(orig.y, na.rm = TRUE),
+           " is needed.")
     extra$threshold <- Threshold
 
 
@@ -1496,7 +1580,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
             .iscale = iscale, .ishape = ishape,
             .escale = escale, .eshape = eshape,
             .percentiles = percentiles,
-            .threshold = threshold,
+            .threshold = threshold, .type.fitted = type.fitted,
             .imethod = imethod ))),
 
 
@@ -1510,8 +1594,20 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
       shape <- as.matrix(shape)
 
 
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning 'percentiles'.")
+                     "percentiles"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("percentiles", "mean"))[1]
+
+
     M1 <- 2
-    pcent <- .percentiles
+    pcent <- extra$percentiles  # Post-20140912
+
+
     LP <- length(pcent)  # NULL means LP == 0 and the mean is returned
     ncoly <- ncol(eta) / M1
     if (!length(y.names <- extra$y.names))
@@ -1521,7 +1617,8 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
 
 
 
-    if (LP) {
+    if (type.fitted == "percentiles" &&  # Upward compatibility:
+        LP > 0) {
 
 
 
@@ -1575,7 +1672,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
       colnames(fv) <- colnames.cumsum.fv
     } else {
       fv <- Threshold + sigma / (1 - shape)
-      fv[shape >= 1] <- NA # Mean exists only if shape < 1.
+      fv[shape >= 1] <- Inf  # Mean exists only if shape < 1.
       dimnames(fv) <- list(dimnames(eta)[[1]], y.names)
     }
 
@@ -1583,8 +1680,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
   }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape,
            .threshold = threshold,
-           .tolshape0 = tolshape0,
-           .percentiles = percentiles ))),
+           .tolshape0 = tolshape0 ))),
 
 
 
@@ -1767,6 +1863,7 @@ setMethod("meplot", "vlm",
 
 
 
+
 guplot.default <-
   function(y, main = "Gumbel Plot",
            xlab = "Reduced data",
@@ -1852,7 +1949,7 @@ setMethod("guplot", "vlm",
             "Mean:     location + scale*0.5772..\n",
             "Variance: pi^2 * scale^2 / 6"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     y <- cbind(y)
@@ -1885,7 +1982,7 @@ setMethod("guplot", "vlm",
         cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
               theta2eta(sca.init, .lscale , earg = .escale ))
     }
-  }), list( .llocat = llocat,  .lscale = lscale,
+  }), list( .llocat = llocat, .lscale = lscale,
             .elocat = elocat, .escale = escale,
                               .iscale = iscale, 
             .R = R, .mpv = mpv, .percentiles = percentiles ))),
@@ -1913,7 +2010,7 @@ setMethod("guplot", "vlm",
       dmn2 <- c(dmn2, "MPV")
     dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
     mu
-  }, list( .llocat = llocat,  .lscale = lscale,
+  }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
   last = eval(substitute(expression({
     misc$link <-    c(location = .llocat,  scale = .lscale) 
@@ -1922,7 +2019,7 @@ setMethod("guplot", "vlm",
     misc$R <- .R
     misc$mpv <- .mpv
     misc$percentiles = .percentiles
-  }), list( .llocat = llocat,  .lscale = lscale, .mpv = mpv,
+  }), list( .llocat = llocat, .lscale = lscale, .mpv = mpv,
             .elocat = elocat, .escale = escale,
             .R = R, .percentiles = percentiles ))),
   loglikelihood = eval(substitute(
@@ -1941,7 +2038,7 @@ setMethod("guplot", "vlm",
         ll.elts
       }
     }
-  }, list( .llocat = llocat,  .lscale = lscale,
+  }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale ))),
   vfamily = "egumbel",
   deriv = eval(substitute(expression({
@@ -1955,7 +2052,7 @@ setMethod("guplot", "vlm",
     dsca.deta <- dtheta.deta(sca, .lscale , earg = .escale )
     c(w) * cbind(dl.dloc * dloc.deta,
                  dl.dsca * dsca.deta)
-  }), list( .llocat = llocat,  .lscale = lscale,
+  }), list( .llocat = llocat, .lscale = lscale,
             .elocat = elocat, .escale = escale ))),
   weight=expression({
     digamma1 <- digamma(1)
@@ -1975,10 +2072,10 @@ setMethod("guplot", "vlm",
 
 
 
- cgumbel <- function(llocation = "identitylink",
-                     lscale = "loge",
-                     iscale = NULL,
-                     mean = TRUE, percentiles = NULL, zero = 2) {
+ cens.gumbel <- function(llocation = "identitylink",
+                         lscale = "loge",
+                         iscale = NULL,
+                         mean = TRUE, percentiles = NULL, zero = 2) {
   llocat <- as.list(substitute(llocation))
   elocat <- link2list(llocat)
   llocat <- attr(elocat, "function.name")
@@ -2009,7 +2106,7 @@ setMethod("guplot", "vlm",
             "Mean:     location + scale*0.5772..\n",
             "Variance: pi^2 * scale^2 / 6"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     y <- cbind(y)
@@ -2034,15 +2131,15 @@ setMethod("guplot", "vlm",
       namesof("scale",    .lscale ,    earg = .escale   , tag = FALSE))
 
     if (!length(etastart)) {
-      sc.init <-  if (is.Numeric( .iscale, positive = TRUE)) 
-                     .iscale else 1.1 * sqrt(var(y) * 6 ) / pi
-      sc.init <- rep(sc.init, length.out = n)
+      sca.init <-  if (is.Numeric( .iscale, positive = TRUE)) 
+                      .iscale else 1.1 * sqrt(var(y) * 6 ) / pi
+      sca.init <- rep(sca.init, length.out = n)
       EulerM <- -digamma(1)
-      loc.init <- (y - sc.init * EulerM)
+      loc.init <- (y - sca.init * EulerM)
       loc.init[loc.init <= 0] = min(y)
       etastart <-
         cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
-              theta2eta(sc.init,  .lscale    , earg = .escale ))
+              theta2eta(sca.init, .lscale , earg = .escale ))
     }
   }), list( .lscale = lscale, .iscale = iscale,
             .llocat = llocat, 
@@ -2095,7 +2192,7 @@ setMethod("guplot", "vlm",
   }, list( .lscale = lscale,
            .llocat = llocat, 
            .elocat = elocat, .escale = escale ))),
-  vfamily = "cgumbel",
+  vfamily = "cens.gumbel",
   deriv = eval(substitute(expression({
     cenL <- extra$leftcensored
     cenU <- extra$rightcensored
@@ -2162,6 +2259,7 @@ setMethod("guplot", "vlm",
 
 
 
+
 dfrechet <- function(x, location = 0, scale = 1, shape, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -2224,18 +2322,18 @@ rfrechet <- function(n, location = 0, scale = 1, shape) {
 
 
 
-frechet2.control <- function(save.weight = TRUE, ...) {
+frechet.control <- function(save.weight = TRUE, ...) {
     list(save.weight = save.weight)
 }
 
 
 
- frechet2 <- function(location = 0,
-                      lscale = "loge",
-                      lshape = logoff(offset = -2),
-                      iscale = NULL, ishape = NULL,
-                      nsimEIM = 250,
-                      zero = NULL) {
+ frechet <- function(location = 0,
+                     lscale = "loge",
+                     lshape = logoff(offset = -2),
+                     iscale = NULL, ishape = NULL,
+                     nsimEIM = 250,
+                     zero = NULL) {
 
   if (!is.Numeric(location))
     stop("bad input for argument 'location'")
@@ -2259,7 +2357,7 @@ frechet2.control <- function(save.weight = TRUE, ...) {
             namesof("scale", link = lscale, earg = escale ), ", ",
             namesof("shape", link = lshape, earg = eshape )),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -2300,9 +2398,9 @@ frechet2.control <- function(save.weight = TRUE, ...) {
 
       shape.grid <- c(100, 70, 40, 20, 12, 8, 4, 2, 1.5)
       shape.grid <- c(1 / shape.grid, 1, shape.grid)
-      try.this <- getMaxMin(shape.grid, objfun = frech.aux,
-                           y = y,  x = x, w = w, maximize = FALSE,
-                           abs.arg = TRUE)
+      try.this <- grid.search(shape.grid, objfun = frech.aux,
+                              y = y,  x = x, w = w, maximize = FALSE,
+                              abs.arg = TRUE)
 
       shape.init <- if (length( .ishape ))
         rep( .ishape , length.out = n) else {
@@ -2372,7 +2470,7 @@ frechet2.control <- function(save.weight = TRUE, ...) {
     }
   }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape ))),
-  vfamily = c("frechet2", "vextremes"),
+  vfamily = c("frechet", "vextremes"),
   deriv = eval(substitute(expression({
     loctn <- extra$location
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
@@ -2399,9 +2497,9 @@ frechet2.control <- function(save.weight = TRUE, ...) {
 
     if (length( .nsimEIM )) {
       for (ii in 1:( .nsimEIM )) {
-          ysim <- rfrechet(n, location = loctn, scale = Scale, shape = shape)
+        ysim <- rfrechet(n, location = loctn, scale = Scale, shape = shape)
 
-          rzedd <- Scale / (ysim - loctn)   # reciprocial of zedd
+          rzedd <- Scale / (ysim - loctn)  # reciprocial of zedd
           dl.dloctn <- (shape + 1) / (ysim - loctn) -
                       (shape / (ysim - loctn)) * (rzedd)^shape
           dl.dScale <- shape * (1 - rzedd^shape) / Scale
@@ -2473,7 +2571,7 @@ if (FALSE)
             namesof("scale",      link = lscale, earg = escale), ", ",
             namesof("shape",      link = lshape, earg = eshape)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (ncol(cbind(y)) != 1)
@@ -2503,9 +2601,9 @@ if (FALSE)
 
       shape.grid <- c(100, 70, 40, 20, 12, 8, 4, 2, 1.5)
       shape.grid <- c(1 / shape.grid, 1, shape.grid)
-      try.this <- getMaxMin(shape.grid, objfun = frech.aux,
-                           y = y,  x = x, w = w, maximize = FALSE,
-                           abs.arg = TRUE)
+      try.this <- grid.search(shape.grid, objfun = frech.aux,
+                              y = y,  x = x, w = w, maximize = FALSE,
+                              abs.arg = TRUE)
 
       shape.init <-
         if (length( .ishape ))
@@ -2681,12 +2779,12 @@ if (FALSE)
 }
 
 
-recnormal.control <- function(save.weight = TRUE, ...) {
+rec.normal.control <- function(save.weight = TRUE, ...) {
     list(save.weight = save.weight)
 }
 
 
- recnormal <- function(lmean = "identitylink", lsd = "loge",
+ rec.normal <- function(lmean = "identitylink", lsd = "loge",
                        imean = NULL, isd = NULL, imethod = 1,
                        zero = NULL) {
   lmean <- as.list(substitute(lmean))
@@ -2715,7 +2813,7 @@ recnormal.control <- function(save.weight = TRUE, ...) {
             "\n",
             "Variance: sd^2"),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.vgam(constraints, x, .zero, M)
+      constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -2779,7 +2877,7 @@ recnormal.control <- function(save.weight = TRUE, ...) {
       }
     }
   }, list( .lsdev = lsdev, .esdev = esdev ))),
-  vfamily = c("recnormal"),
+  vfamily = c("rec.normal"),
   deriv = eval(substitute(expression({
     NN <- nrow(eta)
     mymu <- eta2theta(eta[, 1], .lmean)
@@ -2822,12 +2920,12 @@ recnormal.control <- function(save.weight = TRUE, ...) {
 
 
 
-recexp1.control <- function(save.weight = TRUE, ...) {
+rec.exp1.control <- function(save.weight = TRUE, ...) {
   list(save.weight = save.weight)
 }
 
 
- recexp1 <- function(lrate = "loge", irate = NULL, imethod = 1) {
+ rec.exp1 <- function(lrate = "loge", irate = NULL, imethod = 1) {
   lrate <- as.list(substitute(lrate))
   erate <- link2list(lrate)
   lrate <- attr(erate, "function.name")
@@ -2900,7 +2998,7 @@ recexp1.control <- function(save.weight = TRUE, ...) {
       }
     }
   }, list( .lrate = lrate, .erate = erate ))),
-  vfamily = c("recexp1"),
+  vfamily = c("rec.exp1"),
   deriv = eval(substitute(expression({
     NN <- length(eta)
     rate <- c(eta2theta(eta, .lrate , .erate ))
diff --git a/R/family.functions.R b/R/family.functions.R
index 9e0a873..4ad19ba 100644
--- a/R/family.functions.R
+++ b/R/family.functions.R
@@ -93,9 +93,8 @@ dneg.binomial <- function(x, k, prob) {
 }
 
 
-tapplymat1 <-
-  function(mat,
-           function.arg = c("cumsum", "diff", "cumprod")) {
+
+tapplymat1 <- function(mat, function.arg = c("cumsum", "diff", "cumprod")) {
 
 
   if (!missing(function.arg))
@@ -103,26 +102,21 @@ tapplymat1 <-
   function.arg <- match.arg(function.arg,
                             c("cumsum", "diff", "cumprod"))[1]
 
-  type <-
-    switch(function.arg,
-           cumsum = 1,
-           diff = 2,
-           cumprod = 3,
-           stop("function.arg not matched"))
+  type <- switch(function.arg, cumsum = 1, diff = 2, cumprod = 3,
+           stop("argument 'function.arg' not matched"))
 
   if (!is.matrix(mat))
     mat <- as.matrix(mat)
-  nr <- nrow(mat)
-  nc <- ncol(mat)
-  fred <- .C("tapply_mat1", mat = as.double(mat),
-             as.integer(nr), as.integer(nc), as.integer(type))
-
-  dim(fred$mat) <- c(nr, nc)
+  NR <- nrow(mat)
+  NC <- ncol(mat)
+  fred <- .C("tapply_mat1", mat = as.double(mat), as.integer(NR),
+             as.integer(NC), as.integer(type), PACKAGE = "VGAM")
+  dim(fred$mat) <- c(NR, NC)
   dimnames(fred$mat) <- dimnames(mat)
   switch(function.arg,
-      cumsum = fred$mat,
-      diff = fred$mat[, -1, drop = FALSE],
-      cumprod = fred$mat)
+         cumsum  = fred$mat,
+         diff    = fred$mat[, -1, drop = FALSE],
+         cumprod = fred$mat)
 }
 
 
@@ -203,7 +197,7 @@ wweighted.mean <- function(y, w = NULL, matrix.arg = TRUE) {
       denom <- t(w) %*% rep(1, n)
       denom <- matrix(denom, 1, length(denom))
       if (matrix.arg)
-        denom <- m2adefault(denom, M = M)[,,1]
+        denom <- m2a(denom, M = M)[, , 1]
       c(solve(denom, numer))
     }
   }
diff --git a/R/family.genetic.R b/R/family.genetic.R
index dbd7927..f3eb6a1 100644
--- a/R/family.genetic.R
+++ b/R/family.genetic.R
@@ -13,106 +13,153 @@
 
 
 
- G1G2G3 <- function(link = "logit",
+ A1A2A3 <- function(link = "logit", inbreeding = TRUE,
                     ip1 = NULL, ip2 = NULL, iF = NULL) {
 
+
+
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
 
+  if (!is.logical(inbreeding) || length(inbreeding) > 1)
+    stop("argument 'inbreeding' must be a single logical")
+
 
   new("vglmff",
-  blurb = c("G1-G2-G3 phenotype\n\n",
+  blurb = c("G1-G2-G3 phenotype (",
+            ifelse(inbreeding, "with", "without"),
+            " the Hardy-Weinberg equilibrium assumption)\n\n",
             "Links:    ",
-            namesof("p1", link, earg = earg), ", ", 
-            namesof("p2", link, earg = earg), ", ", 
-            namesof("f",  link, earg = earg, tag = FALSE)),
+            namesof("p1", link, earg = earg, tag = FALSE), ", ", 
+            namesof("p2", link, earg = earg, tag = FALSE),
+            if (!inbreeding) paste(",",
+            namesof("f",  link, earg = earg, tag = FALSE)) else
+            ""),
   deviance = Deviance.categorical.data.vgam,
+  infos = eval(substitute(function(...) {
+    list(Q1 = 6,
+         M1 = ifelse( .inbreeding , 2, 3),
+         expected = TRUE,
+         multipleResponses = FALSE,
+         link = if ( .inbreeding ) c("p1" = .link , "p2" = .link ) else
+                            c("p1" = .link , "p2" = .link , "f" = .link ))
+  }, list( .link = link, .inbreeding = inbreeding ))),
+
   initialize = eval(substitute(expression({
     mustart.orig <- mustart
 
     delete.zero.colns <- FALSE
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
 
     if (length(mustart.orig))
       mustart <- mustart.orig
 
-    ok.col.ny <- c("G1G1","G1G2","G1G3","G2G2","G2G3","G3G3")
+    ok.col.ny <- c("A1A1", "A1A2", "A1A3", "A2A2", "A2A3", "A3A3")
     if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
        setequal(ok.col.ny, col.ny)) {
         if (!all(ok.col.ny == col.ny))
             stop("the columns of the response matrix should have ",
                  "names (output of colnames()) ordered as ",
-                 "c('G1G1','G1G2','G1G3','G2G2','G2G3','G3G3')")
+                 "c('A1A1','A1A2','A1A3','A2A2','A2A3','A3A3')")
     }
 
     predictors.names <-
      c(namesof("p1", .link , earg = .earg , tag = FALSE),
        namesof("p2", .link , earg = .earg , tag = FALSE),
+       if ( .inbreeding ) NULL else
        namesof("f",  .link , earg = .earg , tag = FALSE))
+    mustart <- (y + mustart) / 2
+
 
     if (is.null(etastart)) {
 
 
 
-      mydeterminant <- mustart[, 2] * mustart[, 3] +
+      mydeterminant <- weighted.mean(
+                       mustart[, 2] * mustart[, 3] +
                        mustart[, 2] * mustart[, 5] +
-                       mustart[, 3] * mustart[, 5]
+                       mustart[, 3] * mustart[, 5], w)
       p1 <- if (is.numeric( .ip1 )) rep( .ip1 , len = n) else
-            mustart[, 2] * mustart[, 3] / mydeterminant
+            weighted.mean(mustart[, 2] * mustart[, 3], w) / mydeterminant
       p2 <- if (is.numeric( .ip2 )) rep( .ip2 , len = n) else
-            mustart[, 2] * mustart[, 5] / mydeterminant
+            weighted.mean(mustart[, 2] * mustart[, 5], w) / mydeterminant
       ff <- if (is.numeric( .iF  )) rep( .iF  , len = n) else
-            abs(1 - mustart[, 2] / (2 * p1 * p2))
-
-      if (any(p1 <= 0) || any(p1 >= 1))
-        stop("bad initial value for 'p1'")
-      if (any(p2 <= 0) || any(p2 >= 1))
-        stop("bad initial value for 'p2'")
+            weighted.mean(abs(1 - mustart[, 2] / (2 * p1 * p2)), w)
+      p1 <- rep(p1, len = n)
+      p2 <- rep(p2, len = n)
+      ff <- rep(ff, len = n)
+      p1[p1 < 0.05] <- 0.05
+      p1[p1 > 0.99] <- 0.99
+      p2[p2 < 0.05] <- 0.05
+      p2[p2 > 0.99] <- 0.99
+      ff[ff < 0.05] <- 0.05
+      ff[ff > 0.99] <- 0.99
 
       etastart <-
         cbind(theta2eta(p1, .link , earg = .earg ),
               theta2eta(p2, .link , earg = .earg ),
+              if ( .inbreeding ) NULL else
               theta2eta(ff, .link , earg = .earg ))
       mustart <- NULL  # Since etastart has been computed.
 
     }
   }), list( .link = link, .ip1 = ip1, .ip2 = ip2, .iF = iF,
+            .inbreeding = inbreeding,
             .earg = earg))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     p1 <- eta2theta(eta[, 1], link = .link , earg = .earg )
     p2 <- eta2theta(eta[, 2], link = .link , earg = .earg )
-    f  <- eta2theta(eta[, 3], link = .link , earg = .earg )
+    f  <- if ( .inbreeding ) 0 else
+          eta2theta(eta[, 3], link = .link , earg = .earg )
     p3 <- abs(1 - p1 - p2)
-      cbind("G1G1" = f*p1+(1-f)*p1^2,
-            "G1G2" = 2*p1*p2*(1-f),
-            "G1G3" = 2*p1*p3*(1-f),
-            "G2G2" = f*p2+(1-f)*p2^2,
-            "G2G3" = 2*p2*p3*(1-f),
-            "G3G3" = f*p3+(1-f)*p3^2)
-  }, list( .link = link, .earg = earg))),
+      cbind("A1A1" = f*p1+(1-f)*p1^2,
+            "A1A2" = 2*p1*p2*(1-f),
+            "A1A3" = 2*p1*p3*(1-f),
+            "A2A2" = f*p2+(1-f)*p2^2,
+            "A2A3" = 2*p2*p3*(1-f),
+            "A3A3" = f*p3+(1-f)*p3^2)
+  }, list( .link = link, .earg = earg, .inbreeding = inbreeding))),
 
   last = eval(substitute(expression({
-    misc$link <-    c(p1 = .link , p2 = .link , f = .link )
-
-    misc$earg <- list(p1 = .earg , p2 = .earg , f = .earg )
+    if ( .inbreeding ) {
+      misc$link <-    c(p1 = .link , p2 = .link )
+      misc$earg <- list(p1 = .earg , p2 = .earg )
+    } else {
+      misc$link <-    c(p1 = .link , p2 = .link , f = .link )
+      misc$earg <- list(p1 = .earg , p2 = .earg , f = .earg )
+    }
 
     misc$expected <- TRUE
-  }), list( .link = link, .earg = earg))),
+  }), list( .link = link, .earg = earg, .inbreeding = inbreeding ))),
 
-  loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+  loglikelihood = function(mu, y, w, residuals = FALSE,
+                           eta, extra = NULL)
     if (residuals)
       stop("loglikelihood residuals not implemented yet") else {
-          sum(dmultinomial(x = w * y, size = w, prob = mu,
-                           log = TRUE, dochecking = FALSE))
+      sum(dmultinomial(x = w * y, size = w, prob = mu,
+                       log = TRUE, dochecking = FALSE))
       },
-  vfamily = c("G1G2G3", "vgenetic"),
+  vfamily = c("A1A2A3", "vgenetic"),
   deriv = eval(substitute(expression({
     p1 <- eta2theta(eta[, 1], link = .link , earg = .earg )
     p2 <- eta2theta(eta[, 2], link = .link , earg = .earg )
     p3 <- 1-p1-p2
-    f  <- eta2theta(eta[, 3], link = .link , earg = .earg )
+    f  <- if ( .inbreeding ) 0 else
+          eta2theta(eta[, 3], link = .link , earg = .earg )
+    if ( .inbreeding ) {
+    dl.dp1 <- (2*y[, 1]+y[, 2]+y[, 4])/p1 -
+              (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
+    dl.dp2 <- (2*y[, 3]+y[, 2]+y[,5])/p2 -
+              (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
+
+    dp1.deta <- dtheta.deta(p1, link = .link , earg = .earg )
+    dp2.deta <- dtheta.deta(p2, link = .link , earg = .earg )
+
+    c(w) * cbind(dl.dp1 * dp1.deta,
+                 dl.dp2 * dp2.deta)
+    } else {
     dP1 <- cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1),
                 0, -2*(1-f)*p2, -f - 2*p3*(1-f))
     dP2 <- cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
@@ -127,8 +174,20 @@
     c(w) * cbind(dPP.deta[, 1] * dl1,
                  dPP.deta[, 2] * dl2, 
                  dPP.deta[, 3] * dl3)
-  }), list( .link = link, .earg = earg))),
+  }
+  }), list( .link = link, .earg = earg, .inbreeding = inbreeding ))),
   weight = eval(substitute(expression({
+    if ( .inbreeding ) {
+    qq <- 1-p1-p2
+    wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==3 because M==2
+    ned2l.dp12  <-  2 * (1/p1 + 1/qq)
+    ned2l.dp22  <-  2 * (1/p2 + 1/qq)
+    ned2l.dp1dp2 <-  2 / qq
+    wz[, iam(1, 1, M)] <- ned2l.dp12 * dp1.deta^2
+    wz[, iam(2, 2, M)] <- ned2l.dp22 * dp2.deta^2
+    wz[, iam(1, 2, M)] <- ned2l.dp1dp2 * dp1.deta * dp2.deta
+    c(w) * wz
+    } else {
     dPP <- array(c(dP1, dP2, dP3), c(n, 6, 3))
 
     wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==6 because M==3
@@ -140,13 +199,24 @@
                               dPP.deta[, i1] * dPP.deta[, i2]
     }
     c(w) * wz
-  }), list( .link = link, .earg = earg))))
+  }
+  }), list( .link = link, .earg = earg, .inbreeding = inbreeding ))))
 }
 
 
 
+
+
+
+
+
+if (FALSE)
  AAaa.nohw <- function(link = "logit", ipA = NULL, iF = NULL) {
 
+
+
+
+
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
@@ -162,7 +232,7 @@
     mustart.orig <- mustart
 
     delete.zero.colns <- FALSE
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
 
     if (length(mustart.orig))
       mustart <- mustart.orig
@@ -252,8 +322,14 @@
 
 
 
+
+ if (FALSE)
  AB.Ab.aB.ab2 <- function(link = "logit", init.p = NULL) {
 
+
+
+
+
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
@@ -268,26 +344,31 @@
     mustart.orig <- mustart
 
     delete.zero.colns <- FALSE
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
     predictors.names <- namesof("p", .link , earg = .earg , tag = FALSE)
 
-    if (length(mustart.orig))
+    if (length(mustart.orig)) {
       mustart <- mustart.orig
+    }
 
-        ok.col.ny <- c("AB","Ab","aB","ab")
-        if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+    ok.col.ny <- c("AB","Ab","aB","ab")
+    if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
            setequal(ok.col.ny, col.ny)) {
             if (!all(ok.col.ny == col.ny))
                 stop("the columns of the response matrix should have names ",
                      "(output of colnames()) ordered as c('AB','Ab','aB','ab')")
-        }
+    }
 
-        if (is.null(etastart)) {
-            p.init <- if (is.numeric(.init.p)) rep(.init.p, n) else
-                     c(1 - 2 * sqrt(mustart[, 4]))
-            etastart <- theta2eta(p.init, .link , earg = .earg )
-            mustart <- NULL  # Since etastart has been computed.
-        }
+    mustart <- (mustart + y) / 2
+
+    if (is.null(etastart)) {
+      p.init <- if (is.numeric( .init.p )) rep( .init.p , n) else
+                rep(c(1 - 2 * sqrt(weighted.mean(mustart[, 4], w))), n)
+      p.init <- ifelse(p.init < 0.01, 0.01, p.init)
+      p.init <- ifelse(p.init > 0.99, 0.99, p.init)
+      etastart <- theta2eta(p.init, .link , earg = .earg )
+      mustart <- NULL  # Since etastart has been computed.
+    }
     }), list( .link = link, .init.p=init.p, .earg = earg))),
     linkinv = eval(substitute(function(eta,extra = NULL) {
         p <- eta2theta(eta, link = .link , earg = .earg )
@@ -331,49 +412,68 @@
 
 
 
- A1A2A3 <- function(link = "logit", ip1 = NULL, ip2 = NULL) {
+
+
+
+
+
+
+ if (FALSE)
+ A1A2A3.orig <- function(link = "logit", ip1 = NULL, ip2 = NULL) {
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
 
+
+
   new("vglmff",
   blurb = c("A1A2A3 Allele System ",
             "(A1A1, A1A2, A2A2, A1A3, A2A3, A3A3)\n\n",
             "Links:    ",
-            namesof("p1", link, earg = earg), ", ", 
-            namesof("p2", link, earg = earg, tag = FALSE)),
+            namesof("pA", link, earg = earg), ", ", 
+            namesof("pB", link, earg = earg, tag = FALSE)),
   deviance = Deviance.categorical.data.vgam,
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 6,
+         multipleResponses = FALSE,
+         expected = TRUE,
+         link = c("pA" = .link , "pB" = .link ))
+  }, list( .link = link ))),
+
   initialize = eval(substitute(expression({
     mustart.orig <- mustart
 
     delete.zero.colns <- FALSE
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
 
     if (length(mustart.orig))
       mustart <- mustart.orig
 
-        ok.col.ny <- c("A1A1","A1A2","A2A2","A1A3","A2A3","A3A3")
+        ok.col.ny <- c("A1A1", "A1A2", "A2A2", "A1A3", "A2A3", "A3A3")
         if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
            setequal(ok.col.ny, col.ny)) {
             if (!all(ok.col.ny == col.ny))
                 stop("the columns of the response matrix should have names ",
                      "(output of colnames()) ordered as ",
-                     "c('A1A1','A1A2','A2A2','A1A3','A2A3','A3A3')")
+                     "c('A1A1', 'A1A2', 'A2A2', 'A1A3', 'A2A3', 'A3A3')")
         }
+    
+     predictors.names <-
+          c(namesof("pA", .link , earg = .earg , tag = FALSE),
+            namesof("pB", .link , earg = .earg , tag = FALSE))
+    mustart <- (y + mustart) / 2
 
-        predictors.names <-
-            c(namesof("pA", .link , earg = .earg , tag = FALSE),
-              namesof("pB", .link , earg = .earg , tag = FALSE))
-
-        if (is.null(etastart)) {
-            p1 <- if (is.numeric(.ip1)) rep(.ip1, n) else
-                       c(sqrt(mustart[, 1]))
-            p2 <- if (is.numeric(.ip2)) rep(.ip2, n) else
-                       c(sqrt(mustart[, 3]))
-            etastart <- cbind(theta2eta(p1, .link , earg = .earg ),
-                              theta2eta(p2, .link , earg = .earg ))
-            mustart <- NULL  # Since etastart has been computed.
+
+    if (is.null(etastart)) {
+        p1 <- if (is.numeric(.ip1 )) rep( .ip1 , len = n) else
+               rep(c(sqrt( weighted.mean(mustart[, 1], w) )), len = n)
+        p2 <- if (is.numeric(.ip2 )) rep( .ip2 , len = n) else
+               rep(c(sqrt( weighted.mean(mustart[, 3], w) )), len = n)
+        etastart <- cbind(theta2eta(p1, .link , earg = .earg ),
+                          theta2eta(p2, .link , earg = .earg ))
+        mustart <- NULL  # Since etastart has been computed.
     }
   }), list( .link = link, .ip1 = ip1, .ip2 = ip2, .earg = earg))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -397,7 +497,8 @@
   }), list( .link = link, .earg = earg))),
 
 
-  loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+  loglikelihood = function(mu, y, w, residuals = FALSE,
+                           eta, extra = NULL)
     if (residuals)
       stop("loglikelihood residuals not implemented yet") else {
       sum(dmultinomial(x = w * y, size = w, prob = mu,
@@ -408,8 +509,10 @@
     p1 <- eta2theta(eta[, 1], link = .link , earg = .earg )
     p2 <- eta2theta(eta[, 2], link = .link , earg = .earg )
 
-    dl.dp1 <- (2*y[, 1]+y[, 2]+y[, 4])/p1 - (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
-    dl.dp2 <- (2*y[, 3]+y[, 2]+y[,5])/p2 - (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
+    dl.dp1 <- (2*y[, 1]+y[, 2]+y[, 4])/p1 -
+              (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
+    dl.dp2 <- (2*y[, 3]+y[, 2]+y[,5])/p2 -
+              (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
 
     dp1.deta <- dtheta.deta(p1, link = .link , earg = .earg )
     dp2.deta <- dtheta.deta(p2, link = .link , earg = .earg )
@@ -452,7 +555,7 @@
     mustart.orig <- mustart
 
     delete.zero.colns <- FALSE
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
 
     if (length(mustart.orig))
       mustart <- mustart.orig
@@ -564,7 +667,7 @@
     mustart.orig <- mustart
 
     delete.zero.colns <- FALSE
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
 
     if (length(mustart.orig))
       mustart <- mustart.orig
@@ -581,15 +684,16 @@
     predictors.names <-
       c(namesof("pA", .link , earg = .earg , tag = FALSE),
         namesof("pB", .link , earg = .earg , tag = FALSE))
+    mustart <- (y + mustart) / 2
 
     if (!length(etastart)) {
       pO <- if (is.Numeric( .ipO )) rep( .ipO , len = n) else
-           c(sqrt(mustart[, 4]))
+        rep(c(sqrt( weighted.mean(mustart[, 4], w)) ), len = n)
       pA <- if (is.Numeric( .ipA )) rep( .ipA , len = n) else
-          c(1 - sqrt(mustart[, 2] + mustart[, 4]))
+        rep(c(1 - sqrt(weighted.mean(mustart[, 2] + mustart[, 4], w))), len = n)
       pB <- abs(1 - pA - pO)
       etastart <- cbind(theta2eta(pA, .link , earg = .earg ),
-                       theta2eta(pB, .link , earg = .earg ))
+                        theta2eta(pB, .link , earg = .earg ))
       mustart <- NULL  # Since etastart has been computed.
     }
   }), list( .link = link, .ipO = ipO, .ipA = ipA, .earg = earg))),
@@ -677,10 +781,11 @@
     mustart.orig <- mustart
 
     delete.zero.colns <- FALSE
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
 
-    if (length(mustart.orig))
+    if (length(mustart.orig)) {
       mustart <- mustart.orig
+    }
 
     ok.col.ny <- c("AB","Ab","aB","ab")
     if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
@@ -692,14 +797,17 @@
     }
 
     predictors.names <- namesof("p", .link , earg = .earg , tag = FALSE)
+    mustart <- (y + mustart) / 2
 
     if (is.null(etastart)) {
-      p <- if (is.numeric( .init.p )) rep(.init.p, len = n) else
-          c(sqrt(4 * mustart[, 4]))
-      etastart <- cbind(theta2eta(p, .link , earg = .earg ))
+      p.init <- if (is.numeric( .init.p )) rep( .init.p , len = n) else
+        rep(c(sqrt(4 * weighted.mean(mustart[, 4], w))), len = n)
+
+      etastart <- cbind(theta2eta(p.init, .link , earg = .earg ))
+      etastart <- jitter(etastart)
       mustart <- NULL  # Since etastart has been computed.
     }
-  }), list( .link = link, .init.p=init.p, .earg = earg))),
+  }), list( .link = link, .init.p = init.p, .earg = earg))),
   linkinv = eval(substitute(function(eta,extra = NULL) {
     p <- eta2theta(eta, link = .link , earg = .earg )
     pp4 <- p * p / 4
@@ -749,58 +857,121 @@
 
 
 
- AA.Aa.aa <- function(link = "logit", init.pA = NULL) {
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
 
 
+
+
+
+
+
+
+ AA.Aa.aa <-
+  function(linkp = "logit",
+           linkf = "logit",
+           inbreeding = TRUE,
+           ipA = NULL,
+           ifp = NULL,
+           zero = NULL) {
+    
+  linkp <- as.list(substitute(linkp))
+  eargp <- link2list(linkp)
+  linkp <- attr(eargp, "function.name")
+
+  linkf <- as.list(substitute(linkf))
+  eargf <- link2list(linkf)
+  linkf <- attr(eargf, "function.name")
+
+  if (!is.logical(inbreeding) || length(inbreeding) > 1)
+    stop("argument 'inbreeding' must be a single logical")
+
+  
+
   new("vglmff",
-  blurb = c("AA-Aa-aa phenotype\n\n",
-            "Links:    ", namesof("pA", link, earg = earg)),
+  blurb = c("AA-Aa-aa phenotype (",
+            ifelse(inbreeding, "with", "without"),
+            " the Hardy-Weinberg equilibrium assumption)\n\n",
+            "Links:    ",
+            namesof("pA", linkp, earg = eargp, tag = FALSE),
+            if (!inbreeding) paste(",",
+            namesof("f",  linkf, earg = eargf, tag = FALSE)) else
+            ""),
   deviance = Deviance.categorical.data.vgam,
+  infos = eval(substitute(function(...) {
+    list(M1 = ifelse( .inbreeding , 1, 2),
+         Q1 = 3,
+         multipleResponses = FALSE,
+         expected = TRUE,
+         zero = .zero ,
+         link = if ( .inbreeding ) c("pA" = .linkp ) else
+                            c("pA" = .linkp , "f" = .linkf ))
+  }, list( .linkp = linkp,
+           .linkf = linkf, .inbreeding = inbreeding,
+           .zero = zero ))),
+
   initialize = eval(substitute(expression({
     mustart.orig <- mustart
 
     delete.zero.colns <- FALSE
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
 
     if (length(mustart.orig))
       mustart <- mustart.orig
 
-    ok.col.ny <- c("AA","Aa","aa")
+    ok.col.ny <- c("AA", "Aa", "aa")
     if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
-       setequal(ok.col.ny, col.ny)) {
-        if (!all(ok.col.ny == col.ny))
-          stop("the columns of the response matrix ",
-               "should have names ",
-               "(output of colnames()) ordered as c('AA','Aa','aa')")
+      setequal(ok.col.ny, col.ny)) {
+      if (!all(ok.col.ny == col.ny))
+        stop("the columns of the response matrix ",
+             "should have names ",
+             "(output of colnames()) ordered as c('AA','Aa','aa')")
     }
 
-    predictors.names <- namesof("pA", .link , earg = .earg , tag = FALSE)
+    predictors.names <-
+      c(namesof("pA", .linkp , earg = .eargp , tag = FALSE),
+        if ( .inbreeding ) NULL else
+        namesof("f",  .linkf , earg = .eargf , tag = FALSE))
+    mustart <- (y + mustart) / 2
+        
 
     if (is.null(etastart)) {
-      pA <- if (is.numeric(.init.pA)) rep(.init.pA, n) else
-                c(sqrt(mustart[, 1]))
-      etastart <- cbind(theta2eta(pA, .link , earg = .earg ))
+      pA <- if (is.numeric( .ipA )) rep( .ipA , len = n) else
+              rep(c(sqrt( weighted.mean(mustart[, 1], w))), len = n)
+      fp <- if (is.numeric( .ifp )) rep( .ifp , len = n) else
+              runif(n)  # 1- mustart[, 2]/(2*pA*(1-pA))
+      etastart <- cbind(theta2eta(pA, .linkp , earg = .eargp ),
+                        if ( .inbreeding ) NULL else
+                        theta2eta(fp, .linkf , earg = .eargf ) )
       mustart <- NULL  # Since etastart has been computed.
     }
-  }), list( .link = link, .init.pA=init.pA, .earg = earg))),
-  linkinv = eval(substitute(function(eta,extra = NULL) {
-    pA <- eta2theta(eta, link = .link , earg = .earg )
-    pp <- pA*pA
-    cbind(AA = pp,
-          Aa = 2*pA*(1-pA),
-          aa = (1-pA)^2) 
-  }, list( .link = link, .earg = earg))),
+  }), list( .linkp = linkp, .linkf = linkf,
+            .ipA = ipA, .ifp = ifp, .inbreeding = inbreeding,
+            .eargp = eargp, .eargf = eargf ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    eta <- as.matrix(eta)
+    pA <- eta2theta(eta[, 1], link = .linkp , earg = .eargp )
+    fp <- if ( .inbreeding ) 0 else
+          eta2theta(eta[, 2], link = .linkf , earg = .eargf )
+
+    cbind(AA = pA^2 + pA * (1-pA) * fp,
+          Aa = 2 * pA * (1-pA) * (1 - fp),
+          aa = (1-pA)^2 + pA * (1-pA) * fp)
+  }, list( .linkp = linkp, .linkf = linkf,
+           .eargp = eargp, .eargf = eargf,
+           .inbreeding = inbreeding ))),
 
   last = eval(substitute(expression({
-    misc$link <-    c("pA" = .link )
-
-    misc$earg <- list("pA" = .earg )
+    if ( .inbreeding ) {
+      misc$link <-    c("pA" = .linkp )
+      misc$earg <- list("pA" = .eargp )
+    } else {
+      misc$link <-    c("pA" = .linkp, "f" = .linkf )
+      misc$earg <- list("pA" = .eargp, "f" = .eargf )
+    }
+    misc$expected <- TRUE
+  }), list( .linkp = linkp, .linkf = linkf,
+            .eargp = eargp, .eargf = eargf,
+            .inbreeding = inbreeding ))),
 
-    misc$expected = TRUE
-  }), list( .link = link, .earg = earg))),
 
 
   loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
@@ -811,19 +982,59 @@
     },
   vfamily = c("AA.Aa.aa", "vgenetic"),
   deriv = eval(substitute(expression({
-    pA  <- eta2theta(eta, link = .link , earg = .earg )
-    nAA <- w * y[, 1]
-    nAa <- w * y[, 2]
-    naa <- w * y[, 3]
-    dl.dpA <- (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
-    dpA.deta <- dtheta.deta(pA, link = .link , earg = .earg )
-    dl.dpA * dpA.deta
-  }), list( .link = link, .earg = earg))),
+    eta <- as.matrix(eta)
+    pA <- eta2theta(eta[, 1], link = .linkp , earg = .eargp )
+    fp <- if ( .inbreeding ) 0 else
+          eta2theta(eta[, 2], link = .linkf , earg = .eargf )
+
+    if ( .inbreeding ) {
+      nAA <- w * y[, 1]
+      nAa <- w * y[, 2]
+      naa <- w * y[, 3]
+      dl.dpA <- (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
+      dpA.deta <- dtheta.deta(pA, link = .linkp , earg = .eargp )
+      dl.dpA * dpA.deta
+    } else {
+      dP1 <- cbind(fp + 2*pA*(1-fp),
+                    2*(1-fp)*(1-2*pA),
+                   -2*(1-pA) + fp*(1-2*pA))
+      dP2 <- cbind(pA*(1-pA),
+                   -2*pA*(1-pA),
+                   pA*(1-pA))
+      dl1 <- rowSums(y * dP1 / mu)
+      dl2 <- rowSums(y * dP2 / mu)
+
+      dPP.deta <- dtheta.deta(pA, link = .linkp , earg = .eargp )
+      dfp.deta <- dtheta.deta(fp, link = .linkf , earg = .eargf )
+
+      c(w) * cbind(dPP.deta * dl1,
+                   dfp.deta * dl2)      
+    }  
+  }), list( .linkp = linkp, .linkf = linkf,
+            .eargp = eargp, .eargf = eargf,
+            .inbreeding = inbreeding ))),
   weight = eval(substitute(expression({
-    ned2l.dp2 <- (2*nAA+nAa)/pA^2 + (nAa+2*naa)/(1-pA)^2
-    wz <- cbind((dpA.deta^2) * ned2l.dp2)
-    wz
-  }), list( .link = link, .earg = earg))))
+    if ( .inbreeding ) {
+      ned2l.dp2 <- (2*nAA+nAa)/pA^2 + (nAa+2*naa)/(1-pA)^2
+      wz <- cbind((dpA.deta^2) * ned2l.dp2)
+      wz
+    } else {
+      dPP <- array(c(dP1, dP2), c(n, 3, 2))
+      dPP.deta <- cbind(dtheta.deta(pA, link = .linkp , earg = .eargp ),
+                        dtheta.deta(fp, link = .linkf , earg = .eargf ))
+      wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==3 because M==2
+      for (i1 in 1:M)
+        for (i2 in i1:M) {
+          index <- iam(i1, i2, M)
+          wz[, index] <- rowSums(dPP[,, i1, drop = TRUE] *
+                                 dPP[,, i2, drop = TRUE] / mu) *
+                                 dPP.deta[, i1] * dPP.deta[, i2]
+        }
+      c(w) * wz
+    }
+  }), list( .linkp = linkp, .linkf = linkf,
+            .eargp = eargp, .eargf = eargf,
+            .inbreeding = inbreeding ))))
 }
 
 
diff --git a/R/family.glmgam.R b/R/family.glmgam.R
index c3f96aa..762c923 100644
--- a/R/family.glmgam.R
+++ b/R/family.glmgam.R
@@ -48,15 +48,15 @@
          "Link:     ", namesof("mu[,j]", link, earg = earg), "\n",
          "Variance: mu[,j]*(1-mu[,j])") else
          c("Binomial model\n\n", 
-         "Link:     ", namesof("mu", link, earg = earg), "\n",
+         "Link:     ", namesof("prob", link, earg = earg), "\n",
          "Variance: mu * (1 - mu)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x, 
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x, 
                            bool = .parallel , 
                            constraints = constraints,
                            apply.int = .apply.parint )
 
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero,
             .parallel = parallel, .apply.parint = apply.parint ))),
   infos = eval(substitute(function(...) {
@@ -77,6 +77,10 @@
 
 
 
+    old.name <- "mu"
+    new.name <- "prob"
+
+
 
     if ( .mv ) {
       temp5 <-
@@ -91,7 +95,16 @@
       y <- temp5$y
 
 
+      y.counts <- y
+      y <- y / w
+
+
+
+
+
       M <- ncol(y)
+
+  if (FALSE)
       if (!all(y == 0 | y == 1))
         stop("response must contain 0s and 1s only")
 
@@ -101,18 +114,19 @@
       dn2 <- if (length(dn2)) {
         paste("E[", dn2, "]", sep = "") 
       } else {
-        paste("mu", 1:M, sep = "") 
+        paste(new.name, 1:M, sep = "") 
       }
       predictors.names <-
-          namesof(if (M > 1) dn2 else
-                  "mu", .link , earg = .earg , short = TRUE)
+          namesof(if (M > 1) dn2 else new.name,
+                  .link , earg = .earg , short = TRUE)
 
       if (!length(mustart) && !length(etastart))
-        mustart <- matrix(colMeans(y), nrow = nrow(y), ncol = ncol(y),
+        mustart <- matrix(colMeans(y.counts), nrow = nrow(y), ncol = ncol(y),
+                         byrow = TRUE) /
+                   matrix(colMeans(w), nrow = nrow(w), ncol = ncol(w),
                          byrow = TRUE)
 
-      if (!all(w == 1))
-        extra$orig.w <- w
+
 
       extra$mv <- TRUE
 
@@ -159,7 +173,7 @@
                "successes and col 2 is the no. of failures")
         }
         predictors.names <-
-          namesof("mu", .link , earg = .earg , short = TRUE)
+          namesof(new.name, .link , earg = .earg , short = TRUE)
     }
 
 
@@ -194,7 +208,7 @@
       temp87 <- (y-mu)^2 * wz / (
                 dtheta.deta(mu, link = .link ,
                             earg = .earg )^2)  # w cancel
-      if (.mv && ! .onedpar) {
+      if (.mv && ! .onedpar ) {
         dpar <- rep(as.numeric(NA), len = M)
         temp87 <- cbind(temp87)
         nrow.mu <- if (is.matrix(mu)) nrow(mu) else length(mu)
@@ -215,7 +229,7 @@
     misc$expected <- TRUE
 
     misc$link <- rep( .link , length = M)
-    names(misc$link) <- if (M > 1) dn2 else "mu"
+    names(misc$link) <- if (M > 1) dn2 else new.name  # Was old.name=="mu"
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
@@ -527,7 +541,7 @@
   deviance =
     function(mu, y, w, residuals = FALSE, eta, extra = NULL,
              summation = TRUE) {
-    pow <- 3 # Use Quasi()$deviance with pow==3
+    pow <- 3  # Use Quasi()$deviance with pow==3
     devy  <- y^(2-pow) / (1-pow) - y^(2-pow) / (2-pow)
     devmu <- y * mu^(1-pow) / (1-pow) - mu^(2-pow) / (2-pow)
     devi <- 2 * (devy - devmu)
@@ -700,7 +714,7 @@ rinv.gaussian <- function(n, mu, lambda) {
  inv.gaussianff <- function(lmu = "loge", llambda = "loge",
                             imethod = 1,  ilambda = NULL,
                             parallel = FALSE,
-                            shrinkage.init = 0.99,
+                            ishrinkage = 0.99,
                             zero = NULL) {
 
 
@@ -720,10 +734,10 @@ rinv.gaussian <- function(n, mu, lambda) {
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-     shrinkage.init < 0 ||
-     shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 ||
+     ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -745,12 +759,12 @@ rinv.gaussian <- function(n, mu, lambda) {
             "Mean:     ", "mu\n",
             "Variance: mu^3 / lambda"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x, 
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x, 
                            bool = .parallel , 
                            constraints = constraints,
                            apply.int = .apply.parint )
 
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero,
             .parallel = parallel, .apply.parint = apply.parint ))),
   infos = eval(substitute(function(...) {
@@ -797,7 +811,7 @@ rinv.gaussian <- function(n, mu, lambda) {
           matrix(1.1 * mediany + 1/8, n, ncoly, byrow = TRUE)
         } else if ( .imethod == 3) {
           use.this <- colSums(y * w) / colSums(w)  # weighted.mean(y, w)
-          (1 - .sinit) * y  + .sinit * use.this
+          (1 - .ishrinkage ) * y  + .ishrinkage * use.this
         } else {
           matrix(colSums(y * w) / colSums(w) + 1/8,
                  n, ncoly, byrow = TRUE)
@@ -815,7 +829,7 @@ rinv.gaussian <- function(n, mu, lambda) {
     }
   }), list( .lmu = lmu, .llambda = llambda,
             .emu = emu, .elambda = elambda,
-            .sinit = shrinkage.init,
+            .ishrinkage = ishrinkage,
             .imethod = imethod, .ilambda = ilambda ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -839,7 +853,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
     misc$M1 <- M1
     misc$imethod <- .imethod
-    misc$shrinkage.init <- .sinit 
+    misc$ishrinkage <- .ishrinkage 
     misc$expected <- TRUE
     misc$multipleResponses <- FALSE
     misc$parallel <- .parallel
@@ -847,7 +861,7 @@ rinv.gaussian <- function(n, mu, lambda) {
   }), list( .lmu = lmu, .llambda = llambda,
             .emu = emu, .elambda = elambda,
             .parallel = parallel, .apply.parint = apply.parint,
-            .sinit = shrinkage.init,
+            .ishrinkage = ishrinkage,
             .imethod = imethod ))),
 
   loglikelihood = eval(substitute(
@@ -946,13 +960,13 @@ rinv.gaussian <- function(n, mu, lambda) {
 
   new("vglmff",
   blurb = c("Poisson distribution\n\n",
-            "Link:     ", namesof("mu", link, earg = earg), "\n",
-            "Variance: mu"),
+            "Link:     ", namesof("lambda", link, earg = earg), "\n",
+            "Variance: lambda"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x, 
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x, 
                            bool = .parallel , 
                            constraints = constraints)
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
 
   deviance =
@@ -999,14 +1013,19 @@ rinv.gaussian <- function(n, mu, lambda) {
 
     assign("CQO.FastAlgorithm", ( .link == "loge"), envir = VGAMenv)
 
+
+
+    old.name <- "mu"
+    new.name <- "lambda"
     dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
     dn2 <- if (length(dn2)) {
       paste("E[", dn2, "]", sep = "") 
     } else {
-      paste("mu", 1:M, sep = "") 
+      paste(new.name, 1:M, sep = "") 
     }
     predictors.names <-
-      namesof(if (M > 1) dn2 else "mu", .link ,
+      namesof(if (M > 1) dn2 else new.name, # was "mu" == old.name
+              .link ,
               earg = .earg , short = TRUE)
 
 
@@ -1069,7 +1088,7 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
     misc$link <- rep( .link , length = M)
-    names(misc$link) <- if (M > 1) dn2 else "mu"
+    names(misc$link) <- if (M > 1) dn2 else new.name  # Was old.name=="mu"
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
@@ -1164,10 +1183,11 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
 
- quasibinomialff <- function(
-                             link = "logit",
-                             mv = FALSE, onedpar = !mv,
-                             parallel = FALSE, zero = NULL) {
+ quasibinomialff <-
+  function(
+           link = "logit",
+           mv = FALSE, onedpar = !mv,
+           parallel = FALSE, zero = NULL) {
 
 
   link <- as.list(substitute(link))
@@ -1180,11 +1200,20 @@ rinv.gaussian <- function(n, mu, lambda) {
                     mv = mv, onedpar = onedpar,
                     parallel = parallel, zero = zero)
   ans at vfamily <- "quasibinomialff"
+  ans at infos <- eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         zero = .zero )
+  }, list( .zero = zero )))
+
   ans
 }
 
 
 
+
+
+
  quasipoissonff <- function(link = "loge", onedpar = FALSE,
                             parallel = FALSE, zero = NULL) {
 
@@ -1199,12 +1228,19 @@ rinv.gaussian <- function(n, mu, lambda) {
                    dispersion = dispersion, onedpar = onedpar,
                    parallel = parallel, zero = zero)
   ans at vfamily <- "quasipoissonff"
+  ans at infos <- eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         zero = .zero )
+  }, list( .zero = zero )))
+
   ans
 }
 
 
 
 
+
  double.exppoisson <- function(lmean = "loge",
                          ldispersion = "logit",
                          idispersion = 0.8,
@@ -1233,7 +1269,7 @@ rinv.gaussian <- function(n, mu, lambda) {
             "Mean:     ", "mean\n",
             "Variance: mean / dispersion"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -1359,7 +1395,7 @@ rinv.gaussian <- function(n, mu, lambda) {
             namesof("dispersion", ldisp, earg = edisp), "\n",
             "Mean:     ", "mean\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (!all(w == 1))
@@ -1499,6 +1535,8 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
 
+
+if (FALSE)
  matched.binomial <- function(mvar = NULL, link = "logit",
                               parallel = TRUE,
                               smallno = .Machine$double.eps^(3/4)) {
@@ -1525,7 +1563,7 @@ rinv.gaussian <- function(n, mu, lambda) {
     blurb = c("Matched binomial model (intercepts fitted)\n\n", 
               "Link:     ", namesof("mu[,j]", link, earg = earg)),
     constraints = eval(substitute(expression({
-        constraints <- cm.vgam(matrix(1, M, 1), x = x,
+        constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                                bool = .parallel ,
                                constraints = constraints,
                                apply.int = TRUE)
@@ -1708,7 +1746,7 @@ mypool <- function(x, index) {
     blurb = c("Matched binomial model (intercepts not fitted)\n\n", 
               "Link:     ", namesof("mu[,j]", link, earg = earg)),
     constraints = eval(substitute(expression({
-        constraints <- cm.vgam(matrix(1, M, 1), x = x,
+        constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel , 
                            constraints = constraints,
                                apply.int = FALSE)
diff --git a/R/family.loglin.R b/R/family.loglin.R
index 582e8cf..9b23924 100644
--- a/R/family.loglin.R
+++ b/R/family.loglin.R
@@ -22,13 +22,13 @@
   constraints = eval(substitute(expression({
     cm.intercept.default <- diag(3)
 
-    constraints <- cm.vgam(matrix(c(1,1,0, 0,0,1), 3, 2), x = x,
+    constraints <- cm.VGAM(matrix(c(1,1,0, 0,0,1), 3, 2), x = x,
                            bool = .exchangeable ,
                            constraints = constraints,
                            apply.int = TRUE,
                            cm.default           = cm.intercept.default,
                            cm.intercept.default = cm.intercept.default)
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .exchangeable = exchangeable, .zero = zero ))),
   initialize = expression({
 
@@ -155,13 +155,13 @@
   constraints = eval(substitute(expression({
     cm.intercept.default <- diag(6)
 
-    constraints <- cm.vgam(matrix(c(1,1,1,0,0,0, 0,0,0,1,1,1), 6, 2), x = x,
+    constraints <- cm.VGAM(matrix(c(1,1,1,0,0,0, 0,0,0,1,1,1), 6, 2), x = x,
                            bool = .exchangeable ,
                            constraints = constraints,
                            apply.int = TRUE,
                            cm.default           = cm.intercept.default,
                            cm.intercept.default = cm.intercept.default)
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .exchangeable = exchangeable, .zero = zero ))),
   initialize = expression({
     predictors.names <- c("u1", "u2", "u3", "u12", "u13", "u23")
diff --git a/R/family.math.R b/R/family.math.R
index 762b67a..0559f29 100644
--- a/R/family.math.R
+++ b/R/family.math.R
@@ -26,11 +26,36 @@ log1pexp <- function(x) {
 
 
 
-erf <- function(x)
-  2 * pnorm(x * sqrt(2)) - 1
+erf <- function(x, inverse = FALSE) {
+  if (inverse) {
+    ans <- qnorm((x+1)/2) / sqrt(2)
+    ans[x <  -1] <- NA
+    ans[x >  +1] <- NA
+    ans[x == -1] <- -Inf
+    ans[x == +1] <-  Inf
+    ans
+  } else {
+    2 * pnorm(x * sqrt(2)) - 1
+  }
+}
+
+
+
+erfc <- function(x, inverse = FALSE) {
+  if (inverse) {
+    ans <- qnorm(x/2, lower.tail = FALSE) / sqrt(2)
+    ans[x <  0] <- NA
+    ans[x >  2] <- NA
+    ans[x == 0] <-  Inf
+    ans[x == 2] <- -Inf
+    ans
+  } else {
+    2 * pnorm(x * sqrt(2), lower.tail = FALSE)
+  }
+}
+
+
 
-erfc <- function(x)
-  2 * pnorm(x * sqrt(2), lower.tail = FALSE)
 
 
 
diff --git a/R/family.mixture.R b/R/family.mixture.R
index c77253b..a6ea3cf 100644
--- a/R/family.mixture.R
+++ b/R/family.mixture.R
@@ -87,11 +87,11 @@ mix2normal.control <- function(trace = TRUE, ...) {
             "Variance: phi*sd1^2 + (1 - phi)*sd2^2 + ",
                       "phi*(1 - phi)*(mu1-mu2)^2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(rbind(diag(4), c(0, 0, 1,0)), x = x,
+    constraints <- cm.VGAM(rbind(diag(4), c(0, 0, 1, 0)), x = x,
                            bool = .eq.sd ,
                            constraints = constraints,
                            apply.int = TRUE)
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero, .eq.sd = eq.sd ))),
   initialize = eval(substitute(expression({
 
@@ -234,10 +234,9 @@ mix2normal.control <- function(trace = TRUE, ...) {
            .nsimEIM = nsimEIM ))),
   weight = eval(substitute(expression({
 
-    d3 <- deriv3(~ log(
-        phi * dnorm((ysim-mu1)/sd1) / sd1 +
-        (1 - phi) * dnorm((ysim-mu2)/sd2) / sd2),
-        c("phi","mu1","sd1","mu2","sd2"), hessian= TRUE)
+    d3 <- deriv3(~ log(     phi  * dnorm((ysim-mu1)/sd1) / sd1 +
+                       (1 - phi) * dnorm((ysim-mu2)/sd2) / sd2),
+        c("phi","mu1","sd1","mu2","sd2"), hessian = TRUE)
     run.mean <- 0
     for (ii in 1:( .nsimEIM )) {
       ysim <- ifelse(runif(n) < phi, rnorm(n, mu1, sd1),
@@ -321,7 +320,7 @@ mix2poisson.control <- function(trace = TRUE, ...) {
             namesof("lambda2", llambda, earg = el2, tag = FALSE), "\n",
             "Mean:     phi*lambda1 + (1 - phi)*lambda2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -354,8 +353,8 @@ mix2poisson.control <- function(trace = TRUE, ...) {
 
       if (!length(etastart))  
         etastart <- cbind(theta2eta(init.phi, .lphi , earg = .ephi ),
-                         theta2eta(init.lambda1, .llambda , earg = .el1 ),
-                         theta2eta(init.lambda2, .llambda , earg = .el2 ))
+                          theta2eta(init.lambda1, .llambda , earg = .el1 ),
+                          theta2eta(init.lambda2, .llambda , earg = .el2 ))
     }
   }), list(.lphi = lphi, .llambda = llambda,
            .ephi = ephi, .el1 = el1, .el2 = el2,
@@ -536,7 +535,7 @@ mix2exp.control <- function(trace = TRUE, ...) {
             "Mean:     phi / lambda1 + (1 - phi) / lambda2\n"),
 
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
 
   initialize = eval(substitute(expression({
diff --git a/R/family.nonlinear.R b/R/family.nonlinear.R
index 32f70c0..f79cfb3 100644
--- a/R/family.nonlinear.R
+++ b/R/family.nonlinear.R
@@ -131,7 +131,7 @@ micmen.control <- function(save.weight = TRUE, ...) {
             "Variance: constant"),
 
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M = 2)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M = 2)
   }), list( .zero = zero))),
 
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -409,7 +409,7 @@ skira.control <- function(save.weight = TRUE, ...) {
             namesof("theta1", link1, earg = earg1), ", ",
             namesof("theta2", link2, earg = earg2)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M = 2)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M = 2)
   }), list( .zero = zero ))),
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     M <- if (is.matrix(y))
diff --git a/R/family.normal.R b/R/family.normal.R
index fc6a7ce..3d95daf 100644
--- a/R/family.normal.R
+++ b/R/family.normal.R
@@ -50,10 +50,10 @@ VGAM.weights.function <- function(w, M, n) {
   blurb = c("Vector linear/additive model\n",
             "Links:    identitylink for Y1,...,YM"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel , 
                            constraints = constraints)
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     M <- if (is.matrix(y)) ncol(y) else 1
@@ -168,7 +168,7 @@ VGAM.weights.function <- function(w, M, n) {
       } else {
         if (all(wz[1, ] == apply(wz, 2, min)) &&
             all(wz[1, ] == apply(wz, 2, max))) {
-          onewz <- m2adefault(wz[1, , drop = FALSE], M = M)
+          onewz <- m2a(wz[1, , drop = FALSE], M = M)
           onewz <- onewz[,, 1]  # M x M
 
 
@@ -182,7 +182,7 @@ VGAM.weights.function <- function(w, M, n) {
       } else {
         logretval <- -0.5 * temp1 - n * (M / 2) * log(2*pi)
         for (ii in 1:n) {
-          onewz <- m2adefault(wz[ii, , drop = FALSE], M = M)
+          onewz <- m2a(wz[ii, , drop = FALSE], M = M)
           onewz <- onewz[,, 1]  # M x M
           logdet <- determinant(onewz)$modulus
             logretval <- logretval + 0.5 * logdet
@@ -306,7 +306,7 @@ rposnorm <- function(n, mean = 0, sd = 1) {
           namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
           namesof("sd",   lsd,   earg = esd,   tag = TRUE)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 2,
@@ -686,7 +686,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
           "\n", "\n",
           "Mean:     mean"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -995,7 +995,7 @@ rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
             .a1 = a1, .a2 = a2, .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
-    mysd <- eta2theta(eta[, 2], .lsd , earg = .esd )
+    mysd <- eta2theta(eta[, 2], .lsd   , earg = .esd   )
     mytheta <- mymu / mysd
     mysd * (( .a1 + .a2 ) * (mytheta * pnorm(mytheta) +
         dnorm(mytheta)) - .a2 * mytheta)
@@ -1023,7 +1023,7 @@ rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
              extra = NULL,
              summation = TRUE) {
     mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
-    mysd <- eta2theta(eta[, 2], .lsd ,   earg = .esd )
+    mysd <- eta2theta(eta[, 2], .lsd   , earg = .esd   )
     a1vec <- .a1
     a2vec <- .a2
     if (residuals) {
@@ -1110,7 +1110,7 @@ lqnorm.control <- function(trace = TRUE, ...) {
 
 lqnorm <- function(qpower = 2,
                    link = "identitylink",
-                   imethod = 1, imu = NULL, shrinkage.init = 0.95) {
+                   imethod = 1, imu = NULL, ishrinkage = 0.95) {
 
 
   link <- as.list(substitute(link))
@@ -1126,10 +1126,10 @@ lqnorm <- function(qpower = 2,
       imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-      shrinkage.init < 0 ||
-      shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+      ishrinkage < 0 ||
+      ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
 
 
@@ -1154,26 +1154,26 @@ lqnorm <- function(qpower = 2,
 
 
     predictors.names <- if (!is.null(dy[[2]])) dy[[2]] else
-                       paste("mu", 1:M, sep = "")
+                        paste("mu", 1:M, sep = "")
     predictors.names <- namesof(predictors.names, link = .link,
-                               earg = .earg, short = TRUE)
+                                earg = .earg, short = TRUE)
 
 
     if (!length(etastart))  {
-        meany <- weighted.mean(y, w)
-        mean.init <- rep(if (length( .i.mu )) .i.mu else {
-          if ( .imethod == 2) median(y) else 
-          if ( .imethod == 1) meany else
-            .sinit * meany + (1 - .sinit) * y
-        }, len = n)
-        etastart <- theta2eta(mean.init, link = .link, earg = .earg)
+      meany <- weighted.mean(y, w)
+      mean.init <- rep(if (length( .i.mu )) .i.mu else {
+        if ( .imethod == 2) median(y) else 
+        if ( .imethod == 1) meany else
+          .ishrinkage * meany + (1 - .ishrinkage ) * y
+      }, len = n)
+      etastart <- theta2eta(mean.init, link = .link, earg = .earg)
     }
   }), list( .imethod = imethod, .i.mu = imu,
-            .sinit = shrinkage.init,
+            .ishrinkage = ishrinkage,
             .link = link, .earg = earg ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-      mu <- eta2theta(eta, link = .link, earg = .earg)
-      mu
+    mu <- eta2theta(eta, link = .link , earg = .earg )
+    mu
   }, list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
     dy <- dimnames(y)
@@ -1381,9 +1381,9 @@ tobit.control <- function(save.weight = TRUE, ...) {
     imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
   if ( # length(Lower) != 1 || length(Upper) != 1 ||
-    !is.numeric(Lower) ||
-    !is.numeric(Upper) ||
-    any(Lower >= Upper))
+      !is.numeric(Lower) ||
+      !is.numeric(Upper) ||
+      any(Lower >= Upper))
     stop("Lower and Upper must ",
          "be numeric with Lower < Upper")
 
@@ -1396,7 +1396,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
     stop("argument 'nsimEIM' should be an integer greater than 10")
 
   if (mode(type.fitted) != "character" && mode(type.fitted) != "name")
-        type.fitted <- as.character(substitute(type.fitted))
+    type.fitted <- as.character(substitute(type.fitted))
   type.fitted <- match.arg(type.fitted,
                            c("uncensored", "censored", "mean.obs"))[1]
 
@@ -1417,15 +1417,17 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
 
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
     list(M1 = 2,
+         type.fitted = .type.fitted ,
          zero = .zero ,
          nsimEIM = .nsimEIM )
-  }, list( .zero = zero, .nsimEIM = nsimEIM ))),
+  }, list( .zero = zero, .nsimEIM = nsimEIM,
+           .type.fitted = type.fitted ))),
 
   initialize = eval(substitute(expression({
     M1 <- 2
@@ -1449,6 +1451,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
     Lowmat <- matrix( .Lower , nrow = n, ncol = ncoly, byrow = TRUE)
     Uppmat <- matrix( .Upper , nrow = n, ncol = ncoly, byrow = TRUE)
 
+    extra$type.fitted <- .type.fitted
     extra$censoredL <- (y <= Lowmat)
     extra$censoredU <- (y >= Uppmat)
     if (any(y < Lowmat)) {
@@ -1467,75 +1470,90 @@ tobit.control <- function(save.weight = TRUE, ...) {
     temp2.names <-
       if (ncoly == 1) "sd" else paste("sd", 1:ncoly, sep = "")
     predictors.names <-
-        c(namesof(temp1.names, .lmu, earg = .emu, tag = FALSE),
-          namesof(temp2.names, .lsd , earg = .esd, tag = FALSE))
+      c(namesof(temp1.names, .lmu , earg = .emu , tag = FALSE),
+        namesof(temp2.names, .lsd , earg = .esd , tag = FALSE))
     predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
 
     if (!length(etastart)) {
       anyc <- cbind(extra$censoredL | extra$censoredU)
-      i11 <- if ( .imethod == 1) anyc else FALSE # can be all data
+      i11 <- if ( .imethod == 1) anyc else FALSE  # can be all data
 
       mu.init <-
       sd.init <- matrix(0.0, n, ncoly)
       for (ii in 1:ncol(y)) {
         use.i11 <- i11[, ii]
         mylm <- lm.wfit(x = cbind(x[!use.i11, ]),
-                       y = y[!use.i11, ii], w = w[!use.i11, ii])
+                        y = y[!use.i11, ii], w = w[!use.i11, ii])
         sd.init[, ii] <- sqrt( sum(w[!use.i11, ii] * mylm$resid^2)
                               / mylm$df.residual ) * 1.5
         mu.init[!use.i11, ii] <- mylm$fitted.values
         if (any(anyc[, ii]))
           mu.init[anyc[, ii], ii] <- x[anyc[, ii],, drop = FALSE] %*%
-                                    mylm$coeff
+                                     mylm$coeff
       }
 
-      if (length( .i.mu ))
-        mu.init <- matrix( .i.mu , n, ncoly, byrow = TRUE)
+      if (length( .Imu ))
+        mu.init <- matrix( .Imu , n, ncoly, byrow = TRUE)
       if (length( .isd ))
         sd.init <- matrix( .isd , n, ncoly, byrow = TRUE)
 
-      etastart <- cbind(theta2eta(mu.init, .lmu, earg = .emu ),
-                       theta2eta(sd.init, .lsd , earg = .esd ))
+      etastart <- cbind(theta2eta(mu.init, .lmu , earg = .emu ),
+                        theta2eta(sd.init, .lsd , earg = .esd ))
 
       etastart <- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
     }
  }), list( .Lower = Lower, .Upper = Upper,
            .lmu = lmu, .lsd = lsd,
            .emu = emu, .esd = esd,
-           .i.mu = imu, .isd = isd,
+           .Imu = imu, .isd = isd,
+           .type.fitted = type.fitted,
            .imethod = imethod ))),
   linkinv = eval(substitute( function(eta, extra = NULL) {
     M1 <- 2
     ncoly <- ncol(eta) / M1
-    mum <- eta2theta(eta[, M1*(1:ncoly)-1, drop=FALSE], .lmu, earg = .emu )
-    if ( .type.fitted == "uncensored")
+    mum <- eta2theta(eta[, M1*(1:ncoly)-1, drop = FALSE],
+                     .lmu , earg = .emu )
+
+
+
+    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
+                     warning("cannot find 'type.fitted'. ",
+                             "Returning 'uncensored'.")
+                     "uncensored"
+                   }
+
+    type.fitted <- match.arg(type.fitted,
+                             c("uncensored", "censored", "mean.obs"))[1]
+
+    if ( type.fitted == "uncensored")
       return(mum)
 
-    Lowmat <- matrix( .Lower, nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
-    Uppmat <- matrix( .Upper, nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
-    if ( .type.fitted == "censored") {
+    Lowmat <- matrix( .Lower , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
+    Uppmat <- matrix( .Upper , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
+    if ( type.fitted == "censored") {
       mum[mum < Lowmat] <- Lowmat[mum < Lowmat]
       mum[mum > Uppmat] <- Uppmat[mum > Uppmat]
       mum
     } else {
 
+
       sdm <- eta2theta(eta[, M1*(1:ncoly)-0, drop = FALSE],
-                      .lsd , earg = .esd )
+                       .lsd , earg = .esd )
       zeddL <- (Lowmat - mum) / sdm
       zeddU <- (Uppmat - mum) / sdm
       Phi.L <- pnorm(zeddL)
       phi.L <- dnorm(zeddL)
       Phi.U <- pnorm(zeddU)
       phi.U <- dnorm(zeddU)
+
       mum * (Phi.U - Phi.L) +
       sdm * (phi.L - phi.U) +
-      Lowmat *      Phi.L +
-      Uppmat * (1 - Phi.U)
+      ifelse(is.infinite(Lowmat), 0, Lowmat *      Phi.U ) +
+      ifelse(is.infinite(Uppmat), 0, Uppmat * (1 - Phi.U))
     }
   }, list( .lmu = lmu, .lsd = lsd,
            .emu = emu, .esd = esd,
-           .Lower = Lower, .Upper = Upper,
-           .type.fitted = type.fitted ))),
+           .Lower = Lower, .Upper = Upper ))),
   last = eval(substitute(expression({
 
     temp0303 <- c(rep( .lmu, length = ncoly),
@@ -1585,7 +1603,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
     cenL <- extra$censoredL
     cenU <- extra$censoredU
-    cen0 <- !cenL & !cenU   # uncensored obsns
+    cen0 <- !cenL & !cenU  # uncensored obsns
     Lowmat <- matrix( .Lower , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
     Uppmat <- matrix( .Upper , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
 
@@ -1595,7 +1613,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
     sdm <- eta2theta(eta[, M1*(1:ncoly)-0, drop = FALSE],
                      .lsd , earg = .esd )
 
-    ell0 <- dnorm(  y[cen0], mean = mum[cen0], sd = sdm[cen0],
+    ell0 <- dnorm(     y[cen0], mean = mum[cen0], sd = sdm[cen0],
                   log = TRUE)
     ellL <- pnorm(Lowmat[cenL], mean = mum[cenL], sd = sdm[cenL],
                   log.p = TRUE, lower.tail = TRUE)
@@ -1639,10 +1657,10 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
     cenL <- extra$censoredL
     cenU <- extra$censoredU
-    cen0 <- !cenL & !cenU   # uncensored obsns
+    cen0 <- !cenL & !cenU  # uncensored obsns
 
     mum <- eta2theta(eta[, M1*(1:ncoly)-1, drop = FALSE],
-                     .lmu, earg = .emu )
+                     .lmu , earg = .emu )
     sdm <- eta2theta(eta[, M1*(1:ncoly)-0, drop = FALSE],
                      .lsd , earg = .esd )
 
@@ -1650,7 +1668,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
     dl.dmu <- zedd / sdm
     dl.dsd <- (zedd^2 - 1) / sdm
 
-    dmu.deta <- dtheta.deta(mum, .lmu, earg = .emu )
+    dmu.deta <- dtheta.deta(mum, .lmu , earg = .emu )
     dsd.deta <- dtheta.deta(sdm, .lsd , earg = .esd )
 
     if (any(cenL)) {
@@ -1668,7 +1686,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
       PhiU <- pnorm(temp21U, lower.tail = FALSE)
       phiU <- dnorm(temp21U)
       fred21 <- -phiU / PhiU
-      dl.dmu[cenU] <- -fred21 / sdm[cenU]   # Negated
+      dl.dmu[cenU] <- -fred21 / sdm[cenU]  # Negated
       dl.dsd[cenU] <-  fred21 * (-mumU[cenU] / sdm[cenU]^2)
     }
 
@@ -1688,7 +1706,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
 
     if (is.numeric( .nsimEIM ) &&
-        ! .stdTobit ) {
+      ! .stdTobit ) {
 
 
     run.varcov <- 0
@@ -1723,7 +1741,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
         PhiU <- pnorm(temp21U, lower.tail = FALSE)
         phiU <- dnorm(temp21U)
         fred21 <- -phiU / PhiU
-        dl.dmu[cenU] <- -fred21 / sdvec[cenU]   # Negated
+        dl.dmu[cenU] <- -fred21 / sdvec[cenU]  # Negated
         dl.dsd[cenU] <-  fred21 * (-mumU[cenU] / sdvec[cenU]^2)
       }
 
@@ -1779,11 +1797,11 @@ tobit.control <- function(save.weight = TRUE, ...) {
                    dThetas.detas[, M1 * (spp. - 1) + ind1$col]
 
       for (jay in 1:M1)
-          for (kay in jay:M1) {
-              cptr <- iam((spp. - 1) * M1 + jay,
-                          (spp. - 1) * M1 + kay,
-                          M = M)
-              wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
+        for (kay in jay:M1) {
+          cptr <- iam((spp. - 1) * M1 + jay,
+                      (spp. - 1) * M1 + kay,
+                      M = M)
+          wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
       }
       }  # End of for (spp.) loop
 
@@ -1883,14 +1901,14 @@ tobit.control <- function(save.weight = TRUE, ...) {
   constraints = eval(substitute(expression({
 
     constraints <-
-      cm.vgam(matrix(1, M, 1), x = x,
+      cm.VGAM(matrix(1, M, 1), x = x,
               bool = .parallel ,
               constraints = constraints,
               apply.int = .apply.parint )
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero,
             .parallel = parallel, .apply.parint = apply.parint ))),
 
@@ -1898,6 +1916,8 @@ tobit.control <- function(save.weight = TRUE, ...) {
   infos = eval(substitute(function(...) {
     list(M1 = 2,
          Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
          zero = .zero)
   }, list( .zero = zero ))),
 
@@ -1972,8 +1992,8 @@ tobit.control <- function(save.weight = TRUE, ...) {
           if ( .imethod == 1) median(y[, jay]) else
           if ( .imethod == 2) weighted.mean(y[, jay], w = w[, jay]) else
           if ( .imethod == 3) weighted.mean(y[, jay], w = w[, jay]) *
-                             0.5 + y[, jay] * 0.5 else
-                                 mean(jfit$fitted)
+                              0.5 + y[, jay] * 0.5 else
+                              mean(jfit$fitted)
 
         sdev.init[, jay] <-
           if ( .imethod == 1) {
@@ -2188,9 +2208,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
             .smallno = smallno,
             .var.arg = var.arg ))),
   weight = eval(substitute(expression({
-    wz <- matrix(as.numeric(NA), n, M)  # diag matrix; y is 1-column too
-
-
+    wz <- matrix(as.numeric(NA), n, M)  # Diagonal matrix
 
 
     ned2l.dmu2 <- 1 / sdev^2
@@ -2284,7 +2302,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
       dotzero <- M
 
     M1 <- NA
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero 
           ))),
 
@@ -2360,12 +2378,13 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
     
 
-  if (any(is.mlogit <- (unlist(link.list.ordered) == "mlogit"))) {
-    if (sum(is.mlogit) < 2)
-      stop("at least two 'mlogit' links need to be specified, else none")
-    col.index.is.mlogit <- (1:length(is.mlogit))[is.mlogit]
-    extra$col.index.is.mlogit <- col.index.is.mlogit
-    extra$is.mlogit <- is.mlogit
+  if (any(is.multilogit <- (unlist(link.list.ordered) == "multilogit"))) {
+    if (sum(is.multilogit) < 2)
+      stop("at least two 'multilogit' links need to be specified, ",
+           "else none")
+    col.index.is.multilogit <- (1:length(is.multilogit))[is.multilogit]
+    extra$col.index.is.multilogit <- col.index.is.multilogit
+    extra$is.multilogit <- is.multilogit
   }
 
     
@@ -2385,7 +2404,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
     extra$ncoly <- ncoly <- ncol(y)
     extra$M <- M <- ncol(Xm2) + 1 -
-                    (length(extra$is.mlogit) > 0)
+                    (length(extra$is.multilogit) > 0)
     M1 <- NA  # Since this cannot be determined apriori.
 
     extra$M1 <- M1
@@ -2406,8 +2425,8 @@ tobit.control <- function(save.weight = TRUE, ...) {
   }
   extra$all.mynames1 <- all.mynames1 <- mynames1
 
-  if (LLL <- length(extra$is.mlogit)) {
-    mynames1 <- mynames1[-max(extra$col.index.is.mlogit)]
+  if (LLL <- length(extra$is.multilogit)) {
+    mynames1 <- mynames1[-max(extra$col.index.is.multilogit)]
   }
 
     mynames2 <- paste(if ( .var.arg ) "var" else "sd",
@@ -2463,10 +2482,10 @@ tobit.control <- function(save.weight = TRUE, ...) {
       }
 
       if (!icoefficients.given)
-      if (LLL <- length(extra$is.mlogit)) {
-        raw.coeffs <- jfit.coeff[extra$col.index.is.mlogit]
+      if (LLL <- length(extra$is.multilogit)) {
+        raw.coeffs <- jfit.coeff[extra$col.index.is.multilogit]
         possum1 <- (0.01 + abs(raw.coeffs)) / sum(0.01 + abs(raw.coeffs))
-        jfit.coeff[extra$is.mlogit] <- possum1
+        jfit.coeff[extra$is.multilogit] <- possum1
       }
 
 
@@ -2480,17 +2499,17 @@ tobit.control <- function(save.weight = TRUE, ...) {
           extra$earg.list[[jlocal]]
         }
 
-        if (length(extra$is.mlogit) && !extra$is.mlogit[jlocal])
+        if (length(extra$is.multilogit) && !extra$is.multilogit[jlocal])
           etamat.init[, jlocal] <-
             theta2eta(thetamat.init[, jlocal],
                       link = extra$link.list[[jlocal]],
                       earg = earg.use)
       }
 
-      if (LLL <- length(extra$col.index.is.mlogit)) {
-        etamat.init[, extra$col.index.is.mlogit[-LLL]] <-
-          mlogit(thetamat.init[, extra$col.index.is.mlogit])
-        etamat.init <- etamat.init[, -max(extra$col.index.is.mlogit)]
+      if (LLL <- length(extra$col.index.is.multilogit)) {
+        etamat.init[, extra$col.index.is.multilogit[-LLL]] <-
+          multilogit(thetamat.init[, extra$col.index.is.multilogit])
+        etamat.init <- etamat.init[, -max(extra$col.index.is.multilogit)]
       }
       
 
@@ -2539,10 +2558,10 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
   coffs <- eta[, -M, drop = FALSE]
 
-  if (LLL <- length(extra$col.index.is.mlogit)) {
-    last.one <- extra$col.index.is.mlogit[LLL]
+  if (LLL <- length(extra$col.index.is.multilogit)) {
+    last.one <- extra$col.index.is.multilogit[LLL]
     coffs <- cbind(coffs[, 1:(last.one-1)],
-                   probs.last.mlogit = 0,  # \eta_j\equiv0 for last "mlogit"
+                   probs.last.multilogit = 0,
                    if (last.one == M) NULL else
                    coffs[, last.one:ncol(coffs)])
     colnames(coffs) <- extra$all.mynames1 
@@ -2556,8 +2575,8 @@ tobit.control <- function(save.weight = TRUE, ...) {
       extra$earg.list[[jlocal]]
     }
 
-    if (length(extra$is.mlogit) && !extra$is.mlogit[jlocal]) {
-      iskip <- (jlocal > max(extra$col.index.is.mlogit))
+    if (length(extra$is.multilogit) && !extra$is.multilogit[jlocal]) {
+      iskip <- (jlocal > max(extra$col.index.is.multilogit))
       coffs[, jlocal] <- eta2theta(eta[, jlocal - iskip],
                                    link = extra$link.list[[jlocal]],
                                    earg = earg.use)
@@ -2565,9 +2584,9 @@ tobit.control <- function(save.weight = TRUE, ...) {
   }
 
 
-    if (LLL <- length(extra$col.index.is.mlogit)) {
-      coffs[, extra$col.index.is.mlogit] <-
-        mlogit(eta[, extra$col.index.is.mlogit[-LLL], drop = FALSE],
+    if (LLL <- length(extra$col.index.is.multilogit)) {
+      coffs[, extra$col.index.is.multilogit] <-
+        multilogit(eta[, extra$col.index.is.multilogit[-LLL], drop = FALSE],
                inverse = TRUE)
     }
 
@@ -2651,16 +2670,16 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
     coffs <- eta[, -M, drop = FALSE]  # Exclude log(sdev) or log(var)
 
-    if (LLL <- length(extra$is.mlogit)) {
-      last.one <- max(extra$col.index.is.mlogit)
+    if (LLL <- length(extra$is.multilogit)) {
+      last.one <- max(extra$col.index.is.multilogit)
       coffs <- cbind(coffs[, 1:(last.one-1)],
-                     probsLastmlogit = 0,  # \eta_j\equiv0 for last "mlogit"
+                     probsLastmultilogit = 0,
                      if (last.one == M) NULL else
                      coffs[, last.one:ncol(coffs)])
       colnames(coffs) <- extra$all.mynames1
     }
 
-    dcoffs.deta <- coffs  # Includes any last "mlogit"
+    dcoffs.deta <- coffs  # Includes any last "multilogit"
 
     for (jlocal in 1:ncol(coffs)) {
       earg.use <- if (!length(extra$earg.list[[jlocal]])) {
@@ -2669,26 +2688,26 @@ tobit.control <- function(save.weight = TRUE, ...) {
         extra$earg.list[[jlocal]]
       }
 
-      if (!length(extra$is.mlogit) ||
-          !extra$is.mlogit[jlocal]) {
-        iskip <- length(extra$is.mlogit) &&
-                 (jlocal  > max(extra$col.index.is.mlogit))
+      if (!length(extra$is.multilogit) ||
+          !extra$is.multilogit[jlocal]) {
+        iskip <- length(extra$is.multilogit) &&
+                 (jlocal  > max(extra$col.index.is.multilogit))
         coffs[, jlocal] <- eta2theta(eta[, jlocal - iskip],
                                      link = extra$link.list[[jlocal]],
                                      earg = earg.use)
       }
     }
 
-    if (LLL <- length(extra$col.index.is.mlogit)) {
-      coffs[, extra$col.index.is.mlogit] <-
-        mlogit(eta[, extra$col.index.is.mlogit[-LLL], drop = FALSE],
+    if (LLL <- length(extra$col.index.is.multilogit)) {
+      coffs[, extra$col.index.is.multilogit] <-
+        multilogit(eta[, extra$col.index.is.multilogit[-LLL], drop = FALSE],
                inverse = TRUE)
     }
 
 
   for (jlocal in 1:ncol(coffs)) {
-    if (!length(extra$is.mlogit) ||
-        !extra$is.mlogit[jlocal]) {
+    if (!length(extra$is.multilogit) ||
+        !extra$is.multilogit[jlocal]) {
       earg.use <- if (!length(extra$earg.list[[jlocal]])) {
         list(theta = NULL)
       } else {
@@ -2717,13 +2736,13 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
     
     dMu.deta <- dmu.dcoffs * dcoffs.deta  # n x pLM, but may change below
-    if (LLL <- length(extra$col.index.is.mlogit)) {
-      dMu.deta[, extra$col.index.is.mlogit[-LLL]] <-
-         coffs[, extra$col.index.is.mlogit[-LLL]] *
-        (dmu.dcoffs[, extra$col.index.is.mlogit[-LLL]] -
-         rowSums(dmu.dcoffs[, extra$col.index.is.mlogit]  *
-                      coffs[, extra$col.index.is.mlogit]))
-      dMu.deta <- dMu.deta[, -extra$col.index.is.mlogit[LLL]]
+    if (LLL <- length(extra$col.index.is.multilogit)) {
+      dMu.deta[, extra$col.index.is.multilogit[-LLL]] <-
+         coffs[, extra$col.index.is.multilogit[-LLL]] *
+        (dmu.dcoffs[, extra$col.index.is.multilogit[-LLL]] -
+         rowSums(dmu.dcoffs[, extra$col.index.is.multilogit]  *
+                      coffs[, extra$col.index.is.multilogit]))
+      dMu.deta <- dMu.deta[, -extra$col.index.is.multilogit[LLL]]
     }
     
 
@@ -2756,8 +2775,8 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
 
 
-    if (length(extra$col.index.is.mlogit)) {
-      LLL <- max(extra$col.index.is.mlogit)
+    if (length(extra$col.index.is.multilogit)) {
+      LLL <- max(extra$col.index.is.multilogit)
       dmu.dcoffs <- dmu.dcoffs[, -LLL]
       dcoffs.deta <- dcoffs.deta[, -LLL]
     }
@@ -2772,10 +2791,10 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
 
  
-    if ((LLL <- length(extra$col.index.is.mlogit))) {
-       dmu.dcoffs[, extra$col.index.is.mlogit[-LLL]] <-
-         dMu.deta[, extra$col.index.is.mlogit[-LLL]]
-      dcoffs.deta[, extra$col.index.is.mlogit[-LLL]] <- 1
+    if ((LLL <- length(extra$col.index.is.multilogit))) {
+       dmu.dcoffs[, extra$col.index.is.multilogit[-LLL]] <-
+         dMu.deta[, extra$col.index.is.multilogit[-LLL]]
+      dcoffs.deta[, extra$col.index.is.multilogit[-LLL]] <- 1
      }
   
     twz  <- crossprod(dmu.dcoffs * sqrt(c(w))) / sum(w)
@@ -2838,7 +2857,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
           namesof("meanlog", lmulog, earg = emulog, tag = TRUE), ", ",
           namesof("sdlog",   lsdlog, earg = esdlog, tag = TRUE)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -2948,6 +2967,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
 
 
 
+if (FALSE)
  lognormal3 <- function(lmeanlog = "identitylink", lsdlog = "loge",
                         powers.try = (-3):3,
                         delta = NULL, zero = 2) {
@@ -2986,7 +3006,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
           namesof("sdlog",   lsdlog, earg = esdlog, tag = TRUE), "; ",
           namesof("lambda", "identitylink", earg = list(), tag = TRUE)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -3402,14 +3422,14 @@ if (FALSE)
 
   constraints = eval(substitute(expression({
 
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel , 
                            constraints = constraints,
                            apply.int = .apply.parint )
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero,
             .parallel = parallel, .apply.parint = apply.parint ))),
 
diff --git a/R/family.others.R b/R/family.others.R
index 1355deb..d3ded89 100644
--- a/R/family.others.R
+++ b/R/family.others.R
@@ -17,34 +17,38 @@
 
 
 
-dexppois <- function(x, lambda, betave = 1, log = FALSE) {
+
+
+
+
+dexppois <- function(x, rate = 1, shape, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
 
-  N <- max(length(x), length(lambda), length(betave))
-  x <- rep(x, len = N); lambda = rep(lambda, len = N);
-  betave <- rep(betave, len = N)
+  N <- max(length(x), length(shape), length(rate))
+  x     <- rep(x,     len = N)
+  shape <- rep(shape, len = N)
+  rate  <- rep(rate,  len = N)
 
   logdensity <- rep(log(0), len = N)
   xok <- (0 < x)
  
-  logdensity[xok] <- log(lambda[xok]) + log(betave[xok]) -
-                     log1p(-exp(-lambda[xok])) - lambda[xok] - 
-                     betave[xok] * x[xok] + lambda[xok] * 
-                     exp(-betave[xok] * x[xok])
+  logdensity[xok] <- log(shape[xok]) + log(rate[xok]) -
+                     log1p(-exp(-shape[xok])) - shape[xok] - 
+                     rate[xok] * x[xok] + shape[xok] * 
+                     exp(-rate[xok] * x[xok])
    
-  logdensity[lambda <= 0] <- NaN
-  logdensity[betave <= 0] <- NaN
+  logdensity[shape <= 0] <- NaN
+  logdensity[rate <= 0] <- NaN
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
-qexppois<- function(p, lambda, betave = 1) {
-  ans <- -log(log(p * -(expm1(lambda)) +
-         exp(lambda)) / lambda) / betave
-  ans[(lambda <= 0) | (betave <= 0)] = NaN
+qexppois<- function(p, rate = 1, shape) {
+  ans <- -log(log(p * -(expm1(shape)) + exp(shape)) / shape) / rate
+  ans[(shape <= 0) | (rate <= 0)] = NaN
   ans[p < 0] <- NaN
   ans[p > 1] <- NaN
   ans
@@ -52,20 +56,20 @@ qexppois<- function(p, lambda, betave = 1) {
 
 
 
-pexppois<- function(q, lambda, betave = 1) {
-  ans <-(exp(lambda * exp(-betave * q)) -
-         exp(lambda)) / -expm1(lambda)  
+pexppois<- function(q, rate = 1, shape) {
+  ans <-(exp(shape * exp(-rate * q)) -
+         exp(shape)) / -expm1(shape)  
   ans[q <= 0] <- 0
-  ans[(lambda <= 0) | (betave <= 0)] <- NaN
+  ans[(shape <= 0) | (rate <= 0)] <- NaN
   ans
 }
 
 
 
-rexppois <- function(n, lambda, betave = 1) {
-  ans <- -log(log(runif(n) * -(expm1(lambda)) +
-         exp(lambda)) / lambda) / betave
-  ans[(lambda <= 0) | (betave <= 0)] <- NaN
+rexppois <- function(n, rate = 1, shape) {
+  ans <- -log(log(runif(n) * -(expm1(shape)) +
+         exp(shape)) / shape) / rate
+  ans[(shape <= 0) | (rate <= 0)] <- NaN
   ans
 }
 
@@ -75,19 +79,22 @@ rexppois <- function(n, lambda, betave = 1) {
 
 
 
- exppoisson <- function(llambda = "loge", lbetave = "loge",
-                        ilambda = 1.1,   ibetave = 2.0,
+
+
+ exppoisson <- function(lrate = "loge", lshape = "loge",
+                        irate = 2.0, ishape = 1.1,   
                         zero = NULL) {
 
-  llambda <- as.list(substitute(llambda))
-  elambda <- link2list(llambda)
-  llambda <- attr(elambda, "function.name")
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
 
-  lbetave <- as.list(substitute(lbetave))
-  ebetave <- link2list(lbetave)
-  lbetave <- attr(ebetave, "function.name")
+  lratee <- as.list(substitute(lrate))
+  eratee <- link2list(lratee)
+  lratee <- attr(eratee, "function.name")
 
 
+  iratee <- irate
 
 
 
@@ -95,28 +102,28 @@ rexppois <- function(n, lambda, betave = 1) {
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'zero'")
 
-  if (length(ilambda) &&
-      !is.Numeric(ilambda, positive = TRUE))
-    stop("bad input for argument 'ilambda'")
-  if (length(ibetave) &&
-      !is.Numeric(ibetave, positive = TRUE))
-    stop("bad input for argument 'ibetave'")
+  if (length(ishape) &&
+      !is.Numeric(ishape, positive = TRUE))
+    stop("bad input for argument 'ishape'")
+  if (length(iratee) &&
+      !is.Numeric(iratee, positive = TRUE))
+    stop("bad input for argument 'irate'")
 
-  ilambda[abs(ilambda - 1) < 0.01] = 1.1
+  ishape[abs(ishape - 1) < 0.01] = 1.1
 
 
   new("vglmff",
   blurb = c("Exponential Poisson distribution \n \n",
             "Links:    ",
-            namesof("lambda", llambda, earg = elambda), ", ",
-            namesof("betave", lbetave, earg = ebetave), "\n",
-            "Mean:     lambda/(expm1(lambda) * betave)) * ",
-                      "genhypergeo(c(1, 1),c(2, 2),lambda)"),
+            namesof("rate",  lratee, earg = eratee), ", ",
+            namesof("shape", lshape, earg = eshape), "\n",
+            "Mean:     shape/(expm1(shape) * rate)) * ",
+                      "genhypergeo(c(1, 1), c(2, 2), shape)"),
 
 
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
-    }), list( .zero = zero))),
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+  }), list( .zero = zero))),
 
   initialize = eval(substitute(expression({
 
@@ -129,60 +136,63 @@ rexppois <- function(n, lambda, betave = 1) {
 
 
     predictors.names <- c(
-      namesof("lambda", .llambda, earg = .elambda, short = TRUE),
-      namesof("betave", .lbetave, earg = .ebetave, short = TRUE))
+      namesof("rate",  .lratee, earg = .eratee, short = TRUE),
+      namesof("shape", .lshape, earg = .eshape, short = TRUE))
 
     if (!length(etastart)) {
-      betave.init <- if (length( .ibetave ))
-              rep( .ibetave , len = n) else
-              stop("Need to input a value into argument 'ibetave'")
-      lambda.init <- if (length( .ilambda ))
-                      rep( .ilambda , len = n) else
-                      (1/betave.init - mean(y)) / ((y * 
-                      exp(-betave.init * y))/n)
+      ratee.init <- if (length( .iratee ))
+              rep( .iratee , len = n) else
+              stop("Need to input a value into argument 'iratee'")
+      shape.init <- if (length( .ishape ))
+                      rep( .ishape , len = n) else
+                      (1/ratee.init - mean(y)) / ((y * 
+                      exp(-ratee.init * y))/n)
 
 
-      betave.init <- rep(weighted.mean(betave.init, w = w), len = n)
+      ratee.init <- rep(weighted.mean(ratee.init, w = w), len = n)
       
       etastart <-
-        cbind(theta2eta(lambda.init, .llambda ,earg = .elambda ),
-              theta2eta(betave.init, .lbetave ,earg = .ebetave ))
+        cbind(theta2eta(ratee.init, .lratee , earg = .eratee ),
+              theta2eta(shape.init, .lshape , earg = .eshape ))
+              
 
     }
-  }), list( .llambda = llambda, .lbetave = lbetave, 
-            .ilambda = ilambda, .ibetave = ibetave, 
-            .elambda = elambda, .ebetave = ebetave))), 
+  }), list( .lshape = lshape, .lratee = lratee, 
+            .ishape = ishape, .iratee = iratee, 
+            .eshape = eshape, .eratee = eratee))), 
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
-    betave <- eta2theta(eta[, 2], .lbetave , earg = .ebetave )
+    ratee <- eta2theta(eta[, 1], .lratee , earg = .eratee )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+
 
 
-    -lambda * genhypergeo(c(1, 1), c(2, 2), lambda) / (expm1(-lambda) *
-    betave)
-  }, list( .llambda = llambda, .lbetave = lbetave, 
-           .elambda = elambda, .ebetave = ebetave))), 
 
-  last = eval(substitute(expression({
-    misc$link <-    c(lambda = .llambda , betave = .lbetave )
 
-    misc$earg <- list(lambda = .elambda , betave = .ebetave )
+    qexppois(p = 0.5, rate = ratee, shape = shape)
+  }, list( .lshape = lshape, .lratee = lratee, 
+           .eshape = eshape, .eratee = eratee))), 
+
+  last = eval(substitute(expression({
+    misc$link <-    c( rate = .lratee , shape = .lshape )
+    misc$earg <- list( rate = .eratee , shape = .eshape )
 
     misc$expected <- TRUE
     misc$multipleResponses <- FALSE
-  }), list( .llambda = llambda, .lbetave = lbetave,
-            .elambda = elambda, .ebetave = ebetave))), 
+  }), list( .lshape = lshape, .lratee = lratee,
+            .eshape = eshape, .eratee = eratee))), 
 
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
-    betave <- eta2theta(eta[, 2], .lbetave , earg = .ebetave )
+    ratee <- eta2theta(eta[, 1], .lratee , earg = .eratee )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dexppois(x = y, lambda = lambda, betave = betave,
+      ll.elts <- c(w) * dexppois(x = y, shape = shape, rate = ratee,
                                  log = TRUE)
       if (summation) {
         sum(ll.elts)
@@ -190,42 +200,43 @@ rexppois <- function(n, lambda, betave = 1) {
         ll.elts
       }
     }
-  }, list( .lbetave = lbetave , .llambda = llambda , 
-           .elambda = elambda , .ebetave = ebetave ))), 
+  }, list( .lratee = lratee , .lshape = lshape , 
+           .eshape = eshape , .eratee = eratee ))), 
 
   vfamily = c("exppoisson"),
 
   deriv = eval(substitute(expression({
-    lambda <- eta2theta(eta[, 1], .llambda , earg = .elambda )
-    betave <- eta2theta(eta[, 2], .lbetave , earg = .ebetave )
-    dl.dbetave <- 1/betave - y - y * lambda * exp(-betave * y)
-    dl.dlambda <- 1/lambda - 1/expm1(lambda) - 1 + exp(-betave * y)
-    dbetave.deta <- dtheta.deta(betave, .lbetave , earg = .ebetave )
-    dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
-    c(w) * cbind(dl.dlambda * dlambda.deta,
-                 dl.dbetave * dbetave.deta)
-  }), list( .llambda = llambda, .lbetave = lbetave,
-            .elambda = elambda, .ebetave = ebetave ))), 
+    ratee <- eta2theta(eta[, 1], .lratee , earg = .eratee )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+
+    dl.dratee <- 1/ratee - y - y * shape * exp(-ratee * y)
+    dl.dshape <- 1/shape - 1/expm1(shape) - 1 + exp(-ratee * y)
+    dratee.deta <- dtheta.deta(ratee, .lratee , earg = .eratee )
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+    c(w) * cbind(dl.dratee * dratee.deta,
+                 dl.dshape * dshape.deta)
+  }), list( .lshape = lshape, .lratee = lratee,
+            .eshape = eshape, .eratee = eratee ))), 
 
   weight = eval(substitute(expression({
     
-    temp1 <- -expm1(-lambda)
+    temp1 <- -expm1(-shape)
     
-    ned2l.dlambda2 <- (1 + exp(2 * lambda) - lambda^2 * exp(lambda) - 2 *
-                    exp(lambda)) / (lambda * temp1)^2
+    ned2l.dshape2 <- (1 + exp(2 * shape) - shape^2 * exp(shape) - 2 *
+                      exp(shape)) / (shape * temp1)^2
 
 
-    ned2l.dbetave2 <- 1 / betave^2 - (lambda^2 * exp(-lambda) / (4 * 
-                    betave^2 * temp1)) * 
-                    genhypergeo(c(2, 2, 2),c(3, 3, 3),lambda) 
+    ned2l.dratee2 <- 1 / ratee^2 - (shape^2 * exp(-shape) / (4 * 
+                    ratee^2 * temp1)) * 
+                    genhypergeo(c(2, 2, 2), c(3, 3, 3), shape) 
 
-    ned2l.dbetavelambda <- (lambda * exp(-lambda) / (4 * betave * temp1)) *
-                         genhypergeo(c(2, 2),c(3, 3),lambda)   
+    ned2l.drateeshape <- (shape * exp(-shape) / (4 * ratee * temp1)) *
+                           genhypergeo(c(2, 2), c(3, 3), shape)   
 
     wz <- matrix(0, n, dimm(M))
-    wz[, iam(1, 1, M)] <- dlambda.deta^2 * ned2l.dlambda2
-    wz[, iam(2, 2, M)] <- dbetave.deta^2 * ned2l.dbetave2
-    wz[, iam(1, 2, M)] <- dbetave.deta * dlambda.deta * ned2l.dbetavelambda
+    wz[, iam(1, 1, M)] <- dratee.deta^2 * ned2l.dratee2
+    wz[, iam(1, 2, M)] <- dratee.deta * dshape.deta * ned2l.drateeshape
+    wz[, iam(2, 2, M)] <- dshape.deta^2 * ned2l.dshape2
     c(w) * wz
   }), list( .zero = zero ))))
 }
@@ -239,7 +250,7 @@ rexppois <- function(n, lambda, betave = 1) {
 
 
 
-dgenray <- function(x, shape, scale = 1, log = FALSE) {
+dgenray <- function(x, scale = 1, shape, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -267,7 +278,7 @@ dgenray <- function(x, shape, scale = 1, log = FALSE) {
 }
 
 
-pgenray <- function(q, shape, scale = 1) {
+pgenray <- function(q, scale = 1, shape) {
   ans <- (-expm1(-(q/scale)^2))^shape
   ans[q <= 0] <- 0
   ans[(shape <= 0) | (scale <= 0)] <- NaN
@@ -275,7 +286,7 @@ pgenray <- function(q, shape, scale = 1) {
 }
 
 
-qgenray <- function(p, shape, scale = 1) {
+qgenray <- function(p, scale = 1, shape) {
   ans <- scale * sqrt(-log1p(-(p^(1/shape))))
   ans[(shape <= 0) | (scale <= 0)] <- NaN
   ans[p < 0] <- NaN
@@ -288,7 +299,7 @@ qgenray <- function(p, shape, scale = 1) {
 
 
 
-rgenray <- function(n, shape, scale = 1) {
+rgenray <- function(n, scale = 1, shape) {
   ans <- qgenray(runif(n), shape = shape, scale = scale)
   ans[(shape <= 0) | (scale <= 0)] <- NaN
   ans
@@ -302,10 +313,11 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
 }
 
 
- genrayleigh <- function(lshape = "loge", lscale = "loge",
-                         ishape = NULL,   iscale = NULL,
-                         tol12 = 1.0e-05, 
-                         nsimEIM = 300, zero = 1) {
+ genrayleigh <-
+  function(lscale = "loge", lshape = "loge",
+           iscale = NULL,   ishape = NULL,
+           tol12 = 1.0e-05, 
+           nsimEIM = 300, zero = 2) {
 
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
@@ -335,10 +347,10 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
   new("vglmff",
   blurb = c("Generalized Rayleigh distribution\n",
             "Links:    ",
-            namesof("shape", lshape, earg = eshape), ", ",
-            namesof("scale", lscale, earg = escale), "\n"),
+            namesof("scale", lscale, earg = escale), ", ",
+            namesof("shape", lshape, earg = eshape), "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
 
   initialize = eval(substitute(expression({
@@ -355,8 +367,8 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
 
 
     predictors.names <- c(
-      namesof("shape", .lshape , earg = .eshape , short = TRUE),
-      namesof("scale", .lscale , earg = .escale , short = TRUE))
+      namesof("scale", .lscale , earg = .escale , short = TRUE),
+      namesof("shape", .lshape , earg = .eshape , short = TRUE))
 
     if (!length(etastart)) {
       genrayleigh.Loglikfun <- function(scale, y, x, w, extraargs) {
@@ -371,8 +383,8 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
       scale.grid <- seq(0.2 * stats::sd(c(y)),
                         5.0 * stats::sd(c(y)), len = 29)
       scale.init <- if (length( .iscale )) .iscale else
-                    getMaxMin(scale.grid, objfun = genrayleigh.Loglikfun,
-                               y = y, x = x, w = w)
+                    grid.search(scale.grid, objfun = genrayleigh.Loglikfun,
+                                y = y, x = x, w = w)
       scale.init <- rep(scale.init, length = length(y))
  
       shape.init <- if (length( .ishape )) .ishape else
@@ -380,24 +392,25 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
                      w = w)
       shape.init <- rep(shape.init, length = length(y))
 
-      etastart <- cbind(theta2eta(shape.init, .lshape, earg = .eshape),
-                        theta2eta(scale.init, .lscale, earg = .escale))
+      etastart <- cbind(theta2eta(scale.init, .lscale , earg = .escale ),
+                        theta2eta(shape.init, .lshape , earg = .eshape ))
+                        
         }
     }), list( .lscale = lscale, .lshape = lshape,
               .iscale = iscale, .ishape = ishape,
               .escale = escale, .eshape = eshape))), 
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
-    Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
     qgenray(p = 0.5, shape = shape, scale = Scale)
   }, list( .lshape = lshape, .lscale = lscale, 
            .eshape = eshape, .escale = escale ))),
 
   last = eval(substitute(expression({
-    misc$link <-    c(shape = .lshape , scale = .lscale )
+    misc$link <-    c(scale = .lscale, shape = .lshape )
 
-    misc$earg <- list(shape = .eshape , scale = .escale )
+    misc$earg <- list(scale = .escale, shape = .eshape )
 
     misc$expected <- TRUE
     misc$nsimEIM <- .nsimEIM
@@ -410,8 +423,8 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
     function(mu, y, w, residuals = FALSE, eta, extra = NULL,
              summation = TRUE) {
 
-    shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
-    Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
 
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
@@ -430,11 +443,11 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
   vfamily = c("genrayleigh"),
 
   deriv = eval(substitute(expression({
-    shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
-    Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
-    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
     dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
-    dthetas.detas <- cbind(dshape.deta, dscale.deta)
+    dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
+    dthetas.detas <- cbind(dscale.deta, dshape.deta)
 
     temp1 <- y / Scale
     temp2 <- exp(-temp1^2)
@@ -447,7 +460,7 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
     dl.dshape[!is.finite(dl.dshape)] =
       max(dl.dshape[is.finite(dl.dshape)])
 
-    answer <- c(w) * cbind(dl.dshape, dl.dscale) * dthetas.detas
+    answer <- c(w) * cbind(dl.dscale, dl.dshape) * dthetas.detas
     answer
   }), list( .lshape = lshape , .lscale = lscale,
             .eshape = eshape,  .escale = escale ))),
@@ -458,22 +471,22 @@ genrayleigh.control <- function(save.weight = TRUE, ...) {
     run.varcov <- 0
     ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
     for (ii in 1:( .nsimEIM )) {
-        ysim <- rgenray(n = n, shape = shape, scale = Scale)
-
-        temp1 <- ysim / Scale
-        temp2 <- exp(-temp1^2)  # May be 1 if ysim is very close to 0.
-        temp3 <- temp1^2 / Scale
-        AAA   <- 2 * temp1^2 / Scale  # 2 * y^2 / Scale^3
-        BBB   <- -expm1(-temp1^2)     # denominator
-        dl.dshape <- 1/shape + log1p(-temp2)
-        dl.dscale <- -2 / Scale + AAA * (1 - (shape - 1) * temp2 / BBB)
-
-        dl.dshape[!is.finite(dl.dshape)] <- max(
-        dl.dshape[is.finite(dl.dshape)])
-
-        temp3 <- cbind(dl.dshape, dl.dscale)
-        run.varcov <- run.varcov + temp3[, ind1$row.index] *
-                                   temp3[, ind1$col.index]
+      ysim <- rgenray(n = n, shape = shape, scale = Scale)
+
+      temp1 <- ysim / Scale
+      temp2 <- exp(-temp1^2)  # May be 1 if ysim is very close to 0.
+      temp3 <- temp1^2 / Scale
+      AAA   <- 2 * temp1^2 / Scale  # 2 * y^2 / Scale^3
+      BBB   <- -expm1(-temp1^2)     # denominator
+      dl.dshape <- 1/shape + log1p(-temp2)
+      dl.dscale <- -2 / Scale + AAA * (1 - (shape - 1) * temp2 / BBB)
+
+      dl.dshape[!is.finite(dl.dshape)] <- max(
+      dl.dshape[is.finite(dl.dshape)])
+
+      temp3 <- cbind(dl.dscale, dl.dshape)
+      run.varcov <- run.varcov + temp3[, ind1$row.index] *
+                                 temp3[, ind1$col.index]
     }
     run.varcov <- run.varcov / .nsimEIM
 
@@ -553,6 +566,7 @@ rexpgeom <- function(n, scale = 1, shape) {
 
 
 
+
 expgeometric.control <- function(save.weight = TRUE, ...) {
   list(save.weight = save.weight)
 }
@@ -597,13 +611,13 @@ expgeometric.control <- function(save.weight = TRUE, ...) {
   new("vglmff",
   blurb = c("Exponential geometric distribution\n\n",
             "Links:    ",
-            namesof("Scale", lscale, earg = escale), ", ",
+            namesof("scale", lscale, earg = escale), ", ",
             namesof("shape", lshape, earg = eshape), "\n",
             "Mean:     ", "(shape - 1) * log(1 - ",
-            "shape) / (shape / Scale)"), 
+            "shape) / (shape / scale)"), 
                            
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
  
 
@@ -618,12 +632,8 @@ expgeometric.control <- function(save.weight = TRUE, ...) {
 
 
 
-
-
-
-
     predictors.names <- c(
-      namesof("Scale", .lscale , earg = .escale , short = TRUE),
+      namesof("scale", .lscale , earg = .escale , short = TRUE),
       namesof("shape", .lshape , earg = .eshape , short = TRUE))
 
     if (!length(etastart)) {
@@ -662,9 +672,9 @@ expgeometric.control <- function(save.weight = TRUE, ...) {
            .escale = escale, .eshape = eshape ))),
 
   last = eval(substitute(expression({
-    misc$link <-    c(Scale = .lscale , shape = .lshape )
+    misc$link <-    c(scale = .lscale , shape = .lshape )
 
-    misc$earg <- list(Scale = .escale , shape = .eshape )
+    misc$earg <- list(scale = .escale , shape = .eshape )
 
     misc$expected <- TRUE
     misc$nsimEIM <- .nsimEIM
@@ -725,35 +735,35 @@ expgeometric.control <- function(save.weight = TRUE, ...) {
 
 
 
-        run.varcov <- 0
-        ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+      run.varcov <- 0
+      ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
 
-        if (length( .nsimEIM )) {
-            for (ii in 1:( .nsimEIM )) {
-                ysim <- rexpgeom(n, scale=Scale, shape=shape)
+      if (length( .nsimEIM )) {
+        for (ii in 1:( .nsimEIM )) {
+          ysim <- rexpgeom(n, scale=Scale, shape=shape)
 
-                temp2 <- exp(-ysim / Scale)
-                temp3 <- shape * temp2
-                temp4 <- ysim / Scale^2
-                dl.dscale <-  -1 / Scale + temp4 + 
-                             2 * temp4 * temp3 / (1 - temp3)
-                dl.dshape <- -1 / (1 - shape) + 
-                             2 * temp2 / (1 - temp3)
+          temp2 <- exp(-ysim / Scale)
+          temp3 <- shape * temp2
+          temp4 <- ysim / Scale^2
+          dl.dscale <-  -1 / Scale + temp4 + 
+                       2 * temp4 * temp3 / (1 - temp3)
+          dl.dshape <- -1 / (1 - shape) + 
+                       2 * temp2 / (1 - temp3)
 
-                temp6 <- cbind(dl.dscale, dl.dshape)
-                run.varcov <- run.varcov +
-                    temp6[,ind1$row.index] * temp6[,ind1$col.index]
-            }
+          temp6 <- cbind(dl.dscale, dl.dshape)
+          run.varcov <- run.varcov +
+              temp6[,ind1$row.index] * temp6[,ind1$col.index]
+      }
 
-            run.varcov <- run.varcov / .nsimEIM
+      run.varcov <- run.varcov / .nsimEIM
 
-            wz <- if (intercept.only)
-                matrix(colMeans(run.varcov),
-                       n, ncol(run.varcov), byrow = TRUE) else run.varcov
+      wz <- if (intercept.only)
+              matrix(colMeans(run.varcov),
+                     n, ncol(run.varcov), byrow = TRUE) else run.varcov
 
-            wz <- wz * dthetas.detas[, ind1$row] *
-                      dthetas.detas[, ind1$col]
-        }
+      wz <- wz * dthetas.detas[, ind1$row] *
+                 dthetas.detas[, ind1$col]
+    }
 
     c(w) * wz      
   }), list( .nsimEIM = nsimEIM ))))
@@ -879,12 +889,12 @@ explogff.control <- function(save.weight = TRUE, ...) {
   new("vglmff",
   blurb = c("Exponential logarithmic distribution\n\n",
             "Links:    ",
-            namesof("Scale", lscale, earg = escale), ", ",
+            namesof("scale", lscale, earg = escale), ", ",
             namesof("shape", lshape, earg = eshape), "\n",
-            "Mean:     ", "(-polylog(2, 1 - p) * Scale) / log(shape)"),
+            "Mean:     ", "(-polylog(2, 1 - p) * scale) / log(shape)"),
 
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
 
   initialize = eval(substitute(expression({
@@ -899,7 +909,7 @@ explogff.control <- function(save.weight = TRUE, ...) {
 
 
     predictors.names <- c(
-      namesof("Scale", .lscale , earg = .escale , short = TRUE),
+      namesof("scale", .lscale , earg = .escale , short = TRUE),
       namesof("shape", .lshape , earg = .eshape , short = TRUE))
 
     if (!length(etastart)) {
@@ -929,20 +939,20 @@ explogff.control <- function(save.weight = TRUE, ...) {
              .escale = escale, .eshape = eshape))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
     shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
 
 
 
-    qexplog(p = 0.5, shape = shape, scale = Scale)  
+    qexplog(p = 0.5, shape = shape, scale = scale)  
 
   }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape ))),
 
   last = eval(substitute(expression({
-    misc$link <-    c(Scale = .lscale , shape = .lshape )
+    misc$link <-    c(scale = .lscale , shape = .lshape )
 
-    misc$earg <- list(Scale = .escale , shape = .eshape )
+    misc$earg <- list(scale = .escale , shape = .eshape )
 
     misc$expected <- TRUE
     misc$nsimEIM <- .nsimEIM
@@ -1220,7 +1230,7 @@ tpnff <- function(llocation = "identitylink", lscale = "loge",
             namesof("scale",     lscale,  earg = escale), "\n\n",
             "Mean: "),
   constraints = eval(substitute(expression({
-          constraints <- cm.zero.vgam(constraints, x, .zero, M)
+          constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -1393,7 +1403,7 @@ tpnff3 <- function(llocation = "identitylink",
             namesof("skewpar",  lscale, earg = eskewp),  "\n\n",
             "Mean: "),
   constraints = eval(substitute(expression({
-          constraints <- cm.zero.vgam(constraints, x, .zero, M)
+          constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
diff --git a/R/family.positive.R b/R/family.positive.R
index ea83b84..da7f7f9 100644
--- a/R/family.positive.R
+++ b/R/family.positive.R
@@ -426,7 +426,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
  posnegbinomial <- function(lmunb = "loge", lsize = "loge",
                             isize = NULL, zero = -2,
                             nsimEIM = 250,
-                            shrinkage.init = 0.95, imethod = 1) {
+                            ishrinkage = 0.95, imethod = 1) {
 
   if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
@@ -434,10 +434,10 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
     stop("argument 'imethod' must be 1 or 2")
   if (length(isize) && !is.Numeric(isize, positive = TRUE))
       stop("bad input for argument 'isize'")
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-     shrinkage.init < 0 ||
-     shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 ||
+     ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
 
   lmunb <- as.list(substitute(lmunb))
@@ -466,7 +466,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 2,
@@ -477,7 +477,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
          esize = .esize )
   }, list( .lmunb = lmunb, .lsize = lsize, .isize = isize,
             .emunb = emunb, .esize = esize,
-            .sinit = shrinkage.init,
+            .ishrinkage = ishrinkage,
             .imethod = imethod ))),
 
   initialize = eval(substitute(expression({
@@ -524,7 +524,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
         } else {
           median(y[,iii])
         }
-        mu.init[, iii] <- (1 - .sinit) * y[, iii] + .sinit * use.this
+        mu.init[, iii] <- (1 - .ishrinkage ) * y[, iii] + .ishrinkage * use.this
       }
 
       if ( is.Numeric( .isize )) {
@@ -539,10 +539,11 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
             k.grid <- 2^((-6):6)
             kmat0 <- matrix(0, nrow = n, ncol = NOS)
             for (spp. in 1:NOS) {
-              kmat0[, spp.] <- getMaxMin(k.grid,
-                                objfun = posnegbinomial.Loglikfun,
-                                y = y[, spp.], x = x, w = w[, spp.],
-                                extraargs = mu.init[, spp.])
+              kmat0[, spp.] <-
+                grid.search(k.grid,
+                            objfun = posnegbinomial.Loglikfun,
+                            y = y[, spp.], x = x, w = w[, spp.],
+                            extraargs = mu.init[, spp.])
             }
       }
       p00 <- (kmat0 / (kmat0 + mu.init))^kmat0
@@ -554,7 +555,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...) {
     }
   }), list( .lmunb = lmunb, .lsize = lsize, .isize = isize,
             .emunb = emunb, .esize = esize,
-            .sinit = shrinkage.init,
+            .ishrinkage = ishrinkage,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     M1 <- 2
@@ -892,7 +893,7 @@ rposnegbin <- function(n, size, prob = NULL, munb = NULL) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -1145,13 +1146,13 @@ dposbinom <- function(x, size, prob, log = FALSE) {
             namesof("prob", link, earg = earg, tag = FALSE),
             "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x, 
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x, 
                            bool = .parallel , 
                            constraints = constraints)
 
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .parallel = parallel, .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 1,
@@ -1457,7 +1458,7 @@ if (length(extra$tau)) {
             namesof("probM", link, earg = earg, tag = FALSE),
             "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x, 
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x, 
                            bool = .parallel.t , 
                            constraints = constraints,
                            apply.int = .apply.parint ,  #  TRUE,
@@ -1719,7 +1720,7 @@ if (length(extra$tau)) {
 
     cm.intercept.default <- if ( .I2 ) diag(2) else cbind(0:1, 1)
 
-    constraints <- cm.vgam(matrix(1, 2, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, 2, 1), x = x,
                            bool = .drop.b ,
                            constraints = constraints,
                            apply.int = .apply.parint.b ,  # TRUE, 
@@ -2100,7 +2101,7 @@ if (length(extra$tau)) {
     constraints.orig <- constraints
     cm1.d <-
     cmk.d <- matrix(0, M, 1)  # All 0s inside
-    con.d <- cm.vgam(matrix(1, M, 1), x = x,
+    con.d <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .drop.b ,
                            constraints = constraints.orig,
                            apply.int = .apply.parint.d ,  # FALSE,  
@@ -2111,7 +2112,7 @@ if (length(extra$tau)) {
 
     cm1.t <-
     cmk.t <- rbind(diag(tau), diag(tau)[-1, ])  # More readable
-    con.t <- cm.vgam(matrix(1, M, 1), x = x,
+    con.t <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel.t ,  # Same as .parallel.b
                            constraints = constraints.orig,
                            apply.int = .apply.parint.t ,  # FALSE,  
@@ -2122,7 +2123,7 @@ if (length(extra$tau)) {
 
     cm1.b <-
     cmk.b <- rbind(matrix(0, tau, tau-1), diag(tau-1))
-    con.b <- cm.vgam(matrix(c(rep(0, len = tau  ),
+    con.b <- cm.VGAM(matrix(c(rep(0, len = tau  ),
                               rep(1, len = tau-1)), M, 1), x = x,
                            bool = .parallel.b ,  # Same as .parallel.b
                            constraints = constraints.orig,
@@ -2131,7 +2132,6 @@ if (length(extra$tau)) {
                            cm.intercept.default = cm1.b)
    
     con.use <- con.b
-    con.names <- names(con.use)
     for (klocal in 1:length(con.b)) {
       con.use[[klocal]] <-
         cbind(if (any(con.d[[klocal]] == 1)) NULL else con.b[[klocal]],
diff --git a/R/family.qreg.R b/R/family.qreg.R
index 7ec0bcc..6d74454 100644
--- a/R/family.qreg.R
+++ b/R/family.qreg.R
@@ -19,6 +19,12 @@
 
 
 
+
+
+
+
+
+
 lms.bcn.control <-
 lms.bcg.control <-
 lms.yjn.control <- function(trace = TRUE, ...)
@@ -26,22 +32,21 @@ lms.yjn.control <- function(trace = TRUE, ...)
 
 
 
-
-
  lms.bcn <- function(percentiles = c(25, 50, 75),
                       zero = c(1, 3),
                       llambda = "identitylink",
                       lmu = "identitylink",
                       lsigma = "loge",
-                      dfmu.init = 4,
-                      dfsigma.init = 2,
+                      idf.mu = 4,
+                      idf.sigma = 2,
                       ilambda = 1,
                       isigma = NULL,
-                      tol0 = 0.001, expectiles = FALSE) {
+                      tol0 = 0.001) {
   llambda <- as.list(substitute(llambda))
   elambda <- link2list(llambda)
   llambda <- attr(elambda, "function.name")
 
+
   lmu <- as.list(substitute(lmu))
   emu <- link2list(lmu)
   lmu <- attr(emu, "function.name")
@@ -59,20 +64,19 @@ lms.yjn.control <- function(trace = TRUE, ...)
   if (length(isigma) &&
       !is.Numeric(isigma, positive = TRUE))
     stop("bad input for argument 'isigma'")
-  if (length(expectiles) != 1 || !is.logical(expectiles))
-    stop("bad input for argument 'expectiles'")
 
 
 
   new("vglmff",
-  blurb = c("LMS ", if (expectiles) "Expectile" else "Quantile",
-            " Regression (Box-Cox transformation to normality)\n",
+  blurb = c("LMS ",
+            "quantile",
+            " regression (Box-Cox transformation to normality)\n",
             "Links:    ",
             namesof("lambda", link = llambda, earg = elambda), ", ",
             namesof("mu",     link = lmu,     earg = emu), ", ",
             namesof("sigma",  link = lsigma,  earg = esigma)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -89,16 +93,16 @@ lms.yjn.control <- function(trace = TRUE, ...)
     if (!length(etastart)) {
 
         Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
-                               y = y, w = w, df = .dfmu.init)
+                               y = y, w = w, df = .idf.mu )
         fv.init <- c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
 
         lambda.init <- if (is.Numeric( .ilambda )) .ilambda else 1.0
         sigma.init <- if (is.null(.isigma)) {
           myratio <- ((y/fv.init)^lambda.init - 1) / lambda.init
-          if (is.Numeric( .dfsigma.init )) {
+          if (is.Numeric( .idf.sigma )) {
             fit600 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
                                        y = myratio^2,
-                                       w = w, df = .dfsigma.init)
+                                       w = w, df = .idf.sigma)
             sqrt(c(abs(predict(fit600, x = x[, min(ncol(x), 2)])$y)))
           } else {
             sqrt(var(myratio))
@@ -114,38 +118,34 @@ lms.yjn.control <- function(trace = TRUE, ...)
     }
   }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
             .elambda = elambda, .emu = emu, .esigma = esigma, 
-            .dfmu.init = dfmu.init,
-            .dfsigma.init = dfsigma.init,
+            .idf.mu = idf.mu,
+            .idf.sigma = idf.sigma,
             .ilambda = ilambda, .isigma = isigma ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
       eta[, 1] <- eta2theta(eta[, 1], .llambda, earg = .elambda)
       eta[, 2] <- eta2theta(eta[, 2], .lmu,     earg = .emu)
       eta[, 3] <- eta2theta(eta[, 3], .lsigma,  earg = .esigma)
-      if ( .expectiles ) {
-        explot.lms.bcn(percentiles = .percentiles, eta = eta)
-      } else {
-        qtplot.lms.bcn(percentiles = .percentiles, eta = eta)
-      }
+      qtplot.lms.bcn(percentiles = .percentiles, eta = eta)
   }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
            .elambda = elambda, .emu = emu, .esigma = esigma, 
-           .percentiles = percentiles, .expectiles = expectiles ))),
+           .percentiles = percentiles ))),
   last = eval(substitute(expression({
     misc$links <-    c(lambda = .llambda, mu = .lmu, sigma = .lsigma )
 
     misc$earg  <- list(lambda = .elambda, mu = .emu, sigma = .esigma )
 
     misc$tol0 <- .tol0
-    misc$percentiles <- .percentiles
-    misc$true.mu <- FALSE # @fitted is not a true mu
-    misc$expectiles <- .expectiles
+    misc$percentiles  <- .percentiles  # These are argument values
+    misc$true.mu <- FALSE  # @fitted is not a true mu
     if (control$cdf) {
-      post$cdf <- cdf.lms.bcn(y,
-                  eta0 = matrix(c(lambda, mymu, sigma), ncol = 3,
-                                dimnames = list(dimnames(x)[[1]], NULL)))
+      post$cdf <-
+        cdf.lms.bcn(y,
+                    eta0 = matrix(c(lambda, mymu, sigma), ncol = 3,
+                                  dimnames = list(dimnames(x)[[1]], NULL)))
     }
   }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
             .elambda = elambda, .emu = emu, .esigma = esigma, 
-            .percentiles = percentiles, .expectiles = expectiles,
+            .percentiles = percentiles,
             .tol0 = tol0 ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
@@ -192,13 +192,13 @@ lms.yjn.control <- function(trace = TRUE, ...)
     dl.dmu <- zedd / (mymu * sigma) + z2m1 * lambda / mymu
     dl.dsigma <- z2m1 / sigma
 
-    dlambda.deta <- dtheta.deta(lambda, .llambda, earg = .elambda)
-    dmu.deta     <- dtheta.deta(mymu,   .lmu, earg = .emu)
-    dsigma.deta  <- dtheta.deta(sigma,  .lsigma, earg = .esigma)
+    dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
+    dmu.deta     <- dtheta.deta(mymu,   .lmu     , earg = .emu )
+    dsigma.deta  <- dtheta.deta(sigma,  .lsigma  , earg = .esigma )
 
     c(w) * cbind(dl.dlambda  * dlambda.deta,
-                 dl.dmu    * dmu.deta,
-                 dl.dsigma * dsigma.deta)
+                 dl.dmu      * dmu.deta,
+                 dl.dsigma   * dsigma.deta)
   }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
             .elambda = elambda, .emu = emu, .esigma = esigma ))),
   weight = eval(substitute(expression({
@@ -214,7 +214,10 @@ lms.yjn.control <- function(trace = TRUE, ...)
     c(w) * wz
   }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
             .elambda = elambda, .emu = emu, .esigma = esigma ))))
-}
+}  # End of lms.bcn
+
+
+
 
 
 
@@ -223,8 +226,8 @@ lms.yjn.control <- function(trace = TRUE, ...)
                      llambda = "identitylink",
                      lmu = "identitylink",
                      lsigma = "loge",
-                     dfmu.init=4,
-                     dfsigma.init = 2,
+                     idf.mu = 4,
+                     idf.sigma = 2,
                      ilambda = 1,
                      isigma = NULL) {
   llambda <- as.list(substitute(llambda))
@@ -253,7 +256,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
             namesof("mu", link = lmu, earg = emu), ", ",
             namesof("sigma", link = lsigma, earg = esigma)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list(.zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -269,18 +272,18 @@ lms.yjn.control <- function(trace = TRUE, ...)
         if (!length(etastart)) {
 
           Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
-                                 y = y, w = w, df = .dfmu.init)
+                                 y = y, w = w, df = .idf.mu )
           fv.init <- c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
 
           lambda.init <- if (is.Numeric( .ilambda )) .ilambda else 1.0
 
           sigma.init <- if (is.null( .isigma )) {
             myratio <- ((y/fv.init)^lambda.init-1) / lambda.init
-            if (is.numeric( .dfsigma.init ) &&
-                is.finite( .dfsigma.init )) {
+            if (is.numeric( .idf.sigma ) &&
+                is.finite( .idf.sigma )) {
               fit600 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
                                        y = (myratio)^2,
-                                       w = w, df = .dfsigma.init )
+                                       w = w, df = .idf.sigma )
               sqrt(c(abs(predict(fit600, x = x[, min(ncol(x), 2)])$y)))
             } else {
               sqrt(var(myratio))
@@ -294,8 +297,8 @@ lms.yjn.control <- function(trace = TRUE, ...)
         }
   }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
             .elambda = elambda, .emu = emu, .esigma = esigma, 
-            .dfmu.init = dfmu.init,
-            .dfsigma.init = dfsigma.init,
+            .idf.mu = idf.mu,
+            .idf.sigma = idf.sigma,
             .ilambda = ilambda, .isigma = isigma ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     eta[, 1] <- eta2theta(eta[, 1], .llambda , earg = .elambda )
@@ -310,7 +313,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
 
     misc$earg <- list(lambda = .elambda, mu = .emu, sigma = .esigma )
 
-    misc$percentiles <- .percentiles
+     misc$percentiles <- .percentiles  # These are argument values
     misc$true.mu <- FALSE    # $fitted is not a true mu
     if (control$cdf) {
       post$cdf <- cdf.lms.bcg(y, eta0 = matrix(c(lambda, mymu, sigma),
@@ -656,9 +659,9 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
                       llambda = "identitylink",
                       lmu = "identitylink",
                       lsigma = "loge",
-                      dfmu.init=4,
-                      dfsigma.init = 2,
-                      ilambda=1.0,
+                      idf.mu = 4,
+                      idf.sigma = 2,
+                      ilambda = 1.0,
                       isigma = NULL,
                       yoffset = NULL,
                       nsimEIM = 250) {
@@ -693,7 +696,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
             ", ",
             namesof("sigma", link = lsigma, earg = esigma)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list(.zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -719,17 +722,17 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
         if (smoothok <-
          (length(unique(sort(x[, min(ncol(x), 2)]))) > 7)) {
           fit700 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
-                                   y = y.tx, w = w, df = .dfmu.init)
+                                   y = y.tx, w = w, df = .idf.mu )
           c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
         } else {
           rep(weighted.mean(y, w), length.out = n)
         }
 
         sigma.init <- if (!is.Numeric(.isigma)) {
-                     if (is.Numeric( .dfsigma.init) && smoothok) {
+                     if (is.Numeric( .idf.sigma) && smoothok) {
                      fit710 = vsmooth.spline(x = x[, min(ncol(x), 2)],
                                       y = (y.tx - fv.init)^2,
-                                      w = w, df = .dfsigma.init)
+                                      w = w, df = .idf.sigma)
                           sqrt(c(abs(predict(fit710,
                                x = x[, min(ncol(x), 2)])$y)))
                    } else {
@@ -746,8 +749,8 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
       }
   }), list(.llambda = llambda, .lmu = lmu, .lsigma = lsigma,
            .elambda = elambda, .emu = emu, .esigma = esigma, 
-           .dfmu.init = dfmu.init,
-           .dfsigma.init = dfsigma.init,
+           .idf.mu = idf.mu,
+           .idf.sigma = idf.sigma,
            .ilambda = ilambda,
            .yoffset=yoffset,
            .isigma = isigma))),
@@ -766,7 +769,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
 
     misc$expected <- TRUE
     misc$nsimEIM <- .nsimEIM
-    misc$percentiles <- .percentiles
+    misc$percentiles  <- .percentiles  # These are argument values
 
     misc$true.mu <- FALSE # $fitted is not a true mu
     misc[["yoffset"]] <- extra$yoffset
@@ -862,8 +865,8 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
                     zero = c(1, 3),
                     llambda = "identitylink",
                     lsigma = "loge",
-                    dfmu.init = 4,
-                    dfsigma.init = 2,
+                    idf.mu = 4,
+                    idf.sigma = 2,
                     ilambda = 1.0,
                     isigma = NULL,
                     rule = c(10, 5),
@@ -895,7 +898,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
             ", mu, ",
             namesof("sigma", link = lsigma, earg = esigma)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list(.zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -922,18 +925,18 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
             if (smoothok <-
                (length(unique(sort(x[, min(ncol(x), 2)]))) > 7)) {
                 fit700 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
-                                        y = y.tx, w = w, df = .dfmu.init)
+                                        y = y.tx, w = w, df = .idf.mu )
                 fv.init <- c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
             } else {
                 fv.init <- rep(weighted.mean(y, w), length.out = n)
             }
 
             sigma.init <- if (!is.Numeric( .isigma )) {
-                           if (is.Numeric( .dfsigma.init) &&
+                           if (is.Numeric( .idf.sigma) &&
                                smoothok) {
                            fit710 = vsmooth.spline(x = x[, min(ncol(x), 2)],
                                       y = (y.tx - fv.init)^2,
-                                      w = w, df = .dfsigma.init)
+                                      w = w, df = .idf.sigma)
                              sqrt(c(abs(predict(fit710,
                                         x = x[, min(ncol(x), 2)])$y)))
                            } else {
@@ -951,8 +954,8 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
   }), list(.lsigma = lsigma,
            .llambda = llambda,
            .esigma = esigma, .elambda = elambda,
-           .dfmu.init = dfmu.init,
-           .dfsigma.init = dfsigma.init,
+           .idf.mu = idf.mu,
+           .idf.sigma = idf.sigma,
            .ilambda = ilambda,
            .yoffset=yoffset,
            .isigma = isigma))),
@@ -968,12 +971,12 @@ lms.yjn2.control <- function(save.weight = TRUE, ...) {
           .lsigma = lsigma))),
   last = eval(substitute(expression({
     misc$link <-    c(lambda = .llambda, mu = "identitylink",
-                     sigma = .lsigma)
+                      sigma = .lsigma)
 
     misc$earg <- list(lambda = .elambda, mu = list(theta = NULL),
-                     sigma = .esigma)
+                      sigma = .esigma)
 
-    misc$percentiles <- .percentiles
+    misc$percentiles  <- .percentiles  # These are argument values
     misc$true.mu <- FALSE    # $fitted is not a true mu
     misc[["yoffset"]] <- extra$yoff
 
@@ -1264,7 +1267,7 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
             "Links:    ",
             namesof("expectile", link = lexpectile, earg = eexpectile)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints = constraints)
   }), list( .parallel = parallel ))),
@@ -1328,7 +1331,7 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
     ans
   }, list( .lexpectile = lexpectile, .eexpectile = eexpectile ))),
   last = eval(substitute(expression({
-    misc$link <- rep(.lexpectile, length = M)
+    misc$link <- rep(.lexpectile , length = M)
     names(misc$link) <- extra$y.names
 
     misc$earg <- vector("list", M)
@@ -1338,14 +1341,14 @@ amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
 
     misc$parallel <- .parallel
     misc$expected <- TRUE
-    extra$percentile <- numeric(M)
+    extra$percentile <- numeric(M)  # These are estimates (empirical)
     misc$multipleResponses <- TRUE
 
 
     for (ii in 1:M) {
-        use.w <- if (M > 1 && ncol(cbind(w)) == M) w[, ii] else w
-        extra$percentile[ii] <- 100 *
-          weighted.mean(myresid[, ii] <= 0, use.w)
+      use.w <- if (M > 1 && ncol(cbind(w)) == M) w[, ii] else w
+      extra$percentile[ii] <- 100 *
+        weighted.mean(myresid[, ii] <= 0, use.w)
     }
     names(extra$percentile) <- names(misc$link)
 
@@ -1427,7 +1430,7 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
             " asymmetric maximum likelihood estimation\n\n",
             "Link:     ", namesof("expectile", link, earg = earg)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints = constraints)
   }), list( .parallel = parallel ))),
@@ -1491,7 +1494,7 @@ amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
       misc$earg[[ilocal]] <- list(theta = NULL)
     names(misc$earg) <- names(misc$link)
 
-    extra$percentile <- numeric(M)
+    extra$percentile <- numeric(M)  # These are estimates (empirical)
     for (ii in 1:M)
       extra$percentile[ii] <- 100 * weighted.mean(myresid[, ii] <= 0, w)
     names(extra$percentile) <- names(misc$link)
@@ -1583,7 +1586,7 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
             "asymmetric maximum likelihood estimation\n\n",
             "Link:     ", namesof("expectile", link, earg = earg)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints = constraints)
   }), list( .parallel = parallel ))),
@@ -1660,7 +1663,7 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
     misc$parallel <- .parallel
     misc$expected <- TRUE
 
-    extra$percentile <- numeric(M)
+    extra$percentile <- numeric(M)  # These are estimates (empirical)
     for (ii in 1:M)
       extra$percentile[ii] <- 100 * weighted.mean(myresid[, ii] <= 0, w)
     names(extra$percentile) <- names(misc$link)
@@ -1752,7 +1755,7 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
             " asymmetric maximum likelihood estimation\n\n",
             "Link:     ", predictors.names),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints = constraints)
   }), list( .parallel = parallel ))),
@@ -1820,7 +1823,7 @@ amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
     names(misc$earg) <- names(misc$link)
 
 
-    extra$percentile <- numeric(M)
+    extra$percentile <- numeric(M)  # These are estimates (empirical)
     for (ii in 1:M)
       extra$percentile[ii] <- 100 * weighted.mean(myresid[, ii] <= 0, w)
     names(extra$percentile) <- names(misc$link)
@@ -2371,22 +2374,34 @@ pclogloglap <- function(q, location.ald = 0, scale.ald = 1,
 
 
 
+
+
 alaplace2.control <- function(maxit = 100, ...) {
   list(maxit = maxit)
 }
 
 
- alaplace2 <- function(tau = NULL,
-              llocation = "identitylink", lscale = "loge",
-              ilocation = NULL,       iscale = NULL,
-              kappa = sqrt(tau / (1-tau)),
-              shrinkage.init = 0.95,
-              parallelLocation = FALSE, digt = 4,
-              eq.scale = TRUE,
-              dfmu.init = 3,
-              intparloc = FALSE,
-              imethod = 1,
-              zero = -2) {
+ alaplace2 <-
+  function(tau = NULL,
+           llocation = "identitylink", lscale = "loge",
+           ilocation = NULL,           iscale = NULL,
+           kappa = sqrt(tau / (1-tau)),
+           ishrinkage = 0.95,
+
+           parallel.locat = TRUE  ~ 0,
+           parallel.scale = FALSE ~ 0,
+
+           digt = 4,
+           idf.mu = 3,
+           imethod = 1,
+           zero = -2) {
+
+
+
+  apply.parint.locat <- FALSE
+  apply.parint.scale <- TRUE
+
+
 
 
   llocat <- as.list(substitute(llocation))
@@ -2410,10 +2425,10 @@ alaplace2.control <- function(maxit = 100, ...) {
   if (length(iscale) &&
       !is.Numeric(iscale, positive = TRUE))
     stop("bad input for argument 'iscale'")
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-    shrinkage.init < 0 ||
-    shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+    ishrinkage < 0 ||
+    ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
   if (length(zero) &&
      !(is.Numeric(zero, integer.valued = TRUE) ||
        is.character(zero )))
@@ -2425,82 +2440,86 @@ alaplace2.control <- function(maxit = 100, ...) {
 
 
 
-  if (!is.logical(intparloc) || length(intparloc) != 1)
-    stop("argument 'intparloc' must be a single logical")
-  if (!is.logical(eq.scale) || length(eq.scale) != 1)
-    stop("argument 'eq.scale' must be a single logical")
-  if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
-    stop("argument 'parallelLocation' must be a single logical")
+
+
   fittedMean <- FALSE
   if (!is.logical(fittedMean) || length(fittedMean) != 1)
     stop("bad input for argument 'fittedMean'")
 
+
+
+
+
   new("vglmff",
   blurb = c("Two-parameter asymmetric Laplace distribution\n\n",
             "Links:      ",
-            namesof("location", llocat, earg = elocat), ", ",
-            namesof("scale",    lscale, earg = escale), "\n\n",
+            namesof("location1", llocat, earg = elocat), ", ",
+            namesof("scale1",    lscale, earg = escale), ", ",
+            namesof("location2", llocat, earg = elocat), ", ",
+            namesof("scale2",    lscale, earg = escale),
+            ", ..., ",
+            "\n\n",
             "Mean:       ",
             "location + scale * (1/kappa - kappa) / sqrt(2)", "\n",
             "Quantiles:  location", "\n",
             "Variance:   scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
-  constraints = eval(substitute(expression({
-
-
-
-    orig.constraints <- constraints
 
 
 
-    .PARALLEL <- .parallelLocation
 
+  constraints = eval(substitute(expression({
+ 
 
     onemat <- matrix(1, Mdiv2, 1)
-    locatHmat1 <- kronecker(if ( .intparloc ) onemat else
-                           diag(Mdiv2), rbind(1, 0))
-    scaleHmat1 <- kronecker(if ( .eq.scale ) onemat else
-                           diag(Mdiv2), rbind(0, 1))
-
-    locatHmatk <- kronecker(if ( .PARALLEL ) onemat else
-                           diag(Mdiv2), rbind(1, 0))
-    scaleHmatk <- scaleHmat1
-
-
-    constraints <- cm.vgam(cbind(locatHmatk, scaleHmatk),
-                           x = x,
-                           bool = .PARALLEL , 
-                           constraints = constraints,
-                           apply.int = FALSE)
-
-      if (names(constraints)[1] == "(Intercept)") {
-        constraints[["(Intercept)"]] = cbind(locatHmat1, scaleHmat1)
-      }
+    constraints.orig <- constraints
+
+
+    cm1.locat <- kronecker(diag(Mdiv2), rbind(1, 0))
+    cmk.locat <- kronecker(onemat,      rbind(1, 0))
+    con.locat <- cm.VGAM(cmk.locat,
+                         x = x, bool = .parallel.locat ,
+                         constraints = constraints.orig,
+                         apply.int = .apply.parint.locat ,
+                         cm.default           = cm1.locat,
+                         cm.intercept.default = cm1.locat)
+   
+    
 
+    cm1.scale <- kronecker(diag(Mdiv2), rbind(0, 1))
+    cmk.scale <- kronecker(onemat,      rbind(0, 1))
+    con.scale <- cm.VGAM(cmk.scale,
+                         x = x, bool = .parallel.scale ,
+                         constraints = constraints.orig,
+                         apply.int = .apply.parint.scale ,
+                         cm.default           = cm1.scale,
+                         cm.intercept.default = cm1.scale)
+   
+    con.use <- con.scale
+    for (klocal in 1:length(con.scale)) {
+      con.use[[klocal]] <- cbind(con.locat[[klocal]],
+                                 con.scale[[klocal]])
+    }
 
-      dotzero <- .zero
-      M1 <- 2
-      eval(negzero.expression)
-      constraints <- cm.zero.vgam(constraints, x, z.Index, M)
+    
+    constraints <- con.use
 
+    dotzero <- .zero
+    M1 <- 2
+    eval(negzero.expression.VGAM)
+    constraints <- cm.zero.VGAM(constraints, x, z.Index, M)
+  }), list( .parallel.locat = parallel.locat,
+            .parallel.scale = parallel.scale,
+            .zero = zero,
+            .apply.parint.scale = apply.parint.scale,
+            .apply.parint.locat = apply.parint.locat ))),
 
 
 
-  if (length(orig.constraints)) {
-    if (!identical(orig.constraints, constraints)) {
-      warning("the inputted 'constraints' argument does not match with ",
-              "the 'zero', 'parallel', 'eq.scale' arguments. ",
-              "Using the inputted 'constraints'.")
-      constraints <- orig.constraints
-    }
-  }
 
-  }), list( .eq.scale = eq.scale,
-            .parallelLocation = parallelLocation,
-            .intparloc = intparloc,
-            .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 2,
-         zero = .zero)
+         summary.pvalues = FALSE,
+         zero = .zero )
   }, list( .zero = zero ))),
   initialize = eval(substitute(expression({
     extra$M1 <- M1 <- 2
@@ -2508,9 +2527,10 @@ alaplace2.control <- function(maxit = 100, ...) {
 
     temp5 <-
     w.y.check(w = w, y = y,
-              ncol.w.max = 1,
+              ncol.w.max = if (length( .kappa ) > 1) 1 else Inf,
               ncol.y.max = if (length( .kappa ) > 1) 1 else Inf,
               out.wy = TRUE,
+              colsyperw = 1,  # Uncommented out 20140621
               maximize = TRUE)
     w <- temp5$w
     y <- temp5$y
@@ -2558,27 +2578,28 @@ alaplace2.control <- function(maxit = 100, ...) {
     if (!length(etastart)) {
       for (jay in 1:Mdiv2) {
         y.use <- if (ncoly > 1) y[, jay] else y
+        Jay   <- if (ncoly > 1) jay else 1
         if ( .imethod == 1) {
-          locat.init[, jay] <- weighted.mean(y.use, w[, jay])
+          locat.init[, jay] <- weighted.mean(y.use, w[, Jay])
           scale.init[, jay] <- sqrt(var(y.use) / 2)
         } else if ( .imethod == 2) {
           locat.init[, jay] <- median(y.use)
-          scale.init[, jay] <- sqrt(sum(c(w[, jay]) *
-             abs(y - median(y.use))) / (sum(w[, jay]) * 2))
+          scale.init[, jay] <- sqrt(sum(c(w[, Jay]) *
+             abs(y - median(y.use))) / (sum(w[, Jay]) * 2))
         } else if ( .imethod == 3) {
           Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
-                                y = y.use, w = w[, jay],
-                                df = .dfmu.init )
+                                 y = y.use, w = w[, Jay],
+                                 df = .idf.mu )
           locat.init[, jay] <- predict(Fit5, x = x[, min(ncol(x), 2)])$y
-          scale.init[, jay] <-
-            sqrt(sum(c(w[, jay]) *
-            abs(y.use - median(y.use))) / (sum(w[, jay]) * 2))
+          scale.init[, jay] <- sqrt(sum(c(w[, Jay]) *
+                                    abs(y.use - median(y.use))) / (
+                                        sum(w[, Jay]) * 2))
         } else {
-          use.this <- weighted.mean(y.use, w[, jay])
-          locat.init[, jay] <- (1 - .sinit) * y.use + .sinit * use.this
-          scale.init[, jay] =
-            sqrt(sum(c(w[, jay]) *
-            abs(y.use - median(y.use ))) / (sum(w[, jay]) * 2))
+          use.this <- weighted.mean(y.use, w[, Jay])
+          locat.init[, jay] <- (1 - .ishrinkage ) * y.use + .ishrinkage * use.this
+          scale.init[, jay] <-
+            sqrt(sum(c(w[, Jay]) *
+            abs(y.use - median(y.use ))) / (sum(w[, Jay]) * 2))
         }
       }
 
@@ -2597,8 +2618,8 @@ alaplace2.control <- function(maxit = 100, ...) {
       etastart <- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
     }
   }), list( .imethod = imethod,
-            .dfmu.init = dfmu.init,
-            .sinit = shrinkage.init, .digt = digt,
+            .idf.mu = idf.mu,
+            .ishrinkage = ishrinkage, .digt = digt,
             .elocat = elocat, .escale = escale,
             .llocat = llocat, .lscale = lscale, .kappa = kappa,
             .ilocat = ilocat, .iscale = iscale ))),
@@ -2609,9 +2630,9 @@ alaplace2.control <- function(maxit = 100, ...) {
     dimnames(locat) <- list(dimnames(eta)[[1]], extra$y.names)
     myans <- if ( .fittedMean ) {
       kappamat <- matrix(extra$kappa, extra$n, extra$Mdiv2,
-                        byrow = TRUE)
+                         byrow = TRUE)
       Scale <- eta2theta(eta[, 2 * (1:Mdiv2)    , drop = FALSE],
-                        .lscale , earg = .escale )
+                         .lscale , earg = .escale )
       locat + Scale * (1/kappamat - kappamat)
     } else {
       locat
@@ -2626,10 +2647,10 @@ alaplace2.control <- function(maxit = 100, ...) {
     M1 <- extra$M1
 
     tmp34 <- c(rep( .llocat , length = Mdiv2),
-              rep( .lscale , length = Mdiv2))
+               rep( .lscale , length = Mdiv2))
     names(tmp34) <- c(mynames1, mynames2) 
     tmp34 <- tmp34[interleave.VGAM(M, M = M1)]
-    misc$link <- tmp34 # Already named
+    misc$link <- tmp34  # Already named
 
     misc$earg <- vector("list", M)
     misc$M1 <- M1
@@ -2644,22 +2665,20 @@ alaplace2.control <- function(maxit = 100, ...) {
     misc$expected <- TRUE
     extra$kappa <- misc$kappa <- .kappa
     extra$tau <- misc$tau <- misc$kappa^2 / (1 + misc$kappa^2)
-    misc$true.mu <- .fittedMean # @fitted is not a true mu?
-    misc$intparloc <- .intparloc
+    misc$true.mu <- .fittedMean  # @fitted is not a true mu?
 
     extra$percentile <- numeric(Mdiv2)  # length(misc$kappa)
     locat <- as.matrix(locat)
     for (ii in 1:Mdiv2) {
       y.use <- if (ncoly > 1) y[, ii] else y
+      Jay   <- if (ncoly > 1) ii else 1
       extra$percentile[ii] <- 100 * weighted.mean(y.use <= locat[, ii],
-                                                 w[, ii])
+                                                  w[, Jay])
     }
-    # if (ncoly > 1) names(misc$link) else zz:
     names(extra$percentile) <- y.names
   }), list( .elocat = elocat, .llocat = llocat,
             .escale = escale, .lscale = lscale,
             .fittedMean = fittedMean,
-            .intparloc = intparloc,
             .kappa = kappa ))),
 
   loglikelihood = eval(substitute(
@@ -2732,9 +2751,9 @@ alaplace2.control <- function(maxit = 100, ...) {
     kappamat <- matrix(extra$kappa, n, Mdiv2, byrow = TRUE)
     zedd <- abs(ymat - locat) / Scale
     dl.dlocat <- sqrt(2) * ifelse(ymat >= locat, kappamat, 1/kappamat) *
-                sign(ymat - locat) / Scale
+                 sign(ymat - locat) / Scale
     dl.dscale <- sqrt(2) * ifelse(ymat >= locat, kappamat, 1/kappamat) *
-                zedd / Scale - 1 / Scale
+                 zedd / Scale - 1 / Scale
     dlocat.deta <- dtheta.deta(locat, .llocat , earg = .elocat )
     dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
 
@@ -2757,8 +2776,7 @@ alaplace2.control <- function(maxit = 100, ...) {
     c(w) * wz
   }), list( .escale = escale, .lscale = lscale,
             .elocat = elocat, .llocat = llocat ))))
-}
-
+}  # End of alaplace2().
 
 
 
@@ -2777,19 +2795,31 @@ alaplace1.control <- function(maxit = 100, ...) {
 
 
 
- alaplace1 <- function(tau = NULL,
-                      llocation = "identitylink",
-                      ilocation = NULL,
-                      kappa = sqrt(tau/(1-tau)),
-                      Scale.arg = 1,
-                      shrinkage.init = 0.95,
-                      parallelLocation = FALSE, digt = 4,
-                      dfmu.init = 3,
-                      intparloc = FALSE,
-                      imethod = 1) {
 
 
 
+
+
+ alaplace1 <-
+  function(tau = NULL,
+           llocation = "identitylink",
+           ilocation = NULL,
+           kappa = sqrt(tau/(1-tau)),
+           Scale.arg = 1,
+           ishrinkage = 0.95,
+           parallel.locat = TRUE  ~ 0,  # FALSE,
+           digt = 4,
+           idf.mu = 3,
+           zero = NULL,
+           imethod = 1) {
+
+
+
+  apply.parint.locat <- FALSE
+
+
+
+  
   if (!is.Numeric(kappa, positive = TRUE))
     stop("bad input for argument 'kappa'")
   if (length(tau) &&
@@ -2810,17 +2840,17 @@ alaplace1.control <- function(maxit = 100, ...) {
   ilocat <- ilocation
 
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-     shrinkage.init < 0 ||
-     shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 ||
+     ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
   if (!is.Numeric(Scale.arg, positive = TRUE))
     stop("bad input for argument 'Scale.arg'")
 
-
-  if (!is.logical(parallelLocation) ||
-      length(parallelLocation) != 1)
-    stop("bad input for argument 'parallelLocation'")
+  if (length(zero) &&
+     !(is.Numeric(zero, integer.valued = TRUE) ||
+       is.character(zero )))
+      stop("bad input for argument 'zero'")
 
 
 
@@ -2841,42 +2871,38 @@ alaplace1.control <- function(maxit = 100, ...) {
                          "sqrt(2)", "\n",
             "Quantiles:  location", "\n",
             "Variance:   scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
-  constraints = eval(substitute(expression({
 
-    orig.constraints <- constraints
 
-    
 
 
-    onemat <- matrix(1, M, 1)
-    locatHmat1 <- if ( .intparloc ) onemat else diag(M)
-    locatHmatk <- if ( .parallelLocation ) onemat else diag(M)
-
-      constraints <- cm.vgam(locatHmatk, x = x,
-                             bool = .parallelLocation, 
-                             constraints = constraints,
-                             apply.int = FALSE)
-
-      if (names(constraints)[1] == "(Intercept)") {
-          constraints[["(Intercept)"]] = locatHmat1
-      }
+  constraints = eval(substitute(expression({
 
+    onemat <- matrix(1, M, 1)
+    constraints.orig <- constraints
+
+
+    cm1.locat <- diag(M)
+    cmk.locat <- onemat
+    con.locat <- cm.VGAM(cmk.locat,
+                         x = x, bool = .parallel.locat ,
+                         constraints = constraints.orig,
+                         apply.int = .apply.parint.locat ,
+                         cm.default           = cm1.locat,
+                         cm.intercept.default = cm1.locat)
+   
+    
+    constraints <- con.locat
 
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+  }), list( .parallel.locat = parallel.locat,
+            .zero = zero,
+            .apply.parint.locat = apply.parint.locat ))),
 
 
-  if (length(orig.constraints)) {
-    if (!identical(orig.constraints, constraints)) {
-      warning("the inputted 'constraints' argument does not match with ",
-              "the 'parallel', 'eq.scale' arguments. ",
-              "Using the inputted 'constraints'.")
-      constraints <- orig.constraints
-    }
-  }
 
-  }), list( .parallelLocation = parallelLocation,
-            .intparloc = intparloc ))),
   infos = eval(substitute(function(...) {
     list(M1 = 1,
+         summary.pvalues = FALSE,
          tau   = .tau,
          kappa = .kappa)
   }, list( .kappa = kappa,
@@ -2887,7 +2913,7 @@ alaplace1.control <- function(maxit = 100, ...) {
 
     temp5 <-
     w.y.check(w = w, y = y,
-              ncol.w.max = 1,
+              ncol.w.max = if (length( .kappa ) > 1) 1 else Inf,
               ncol.y.max = if (length( .kappa ) > 1) 1 else Inf,
               out.wy = TRUE,
               maximize = TRUE)
@@ -2906,13 +2932,13 @@ alaplace1.control <- function(maxit = 100, ...) {
     extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
 
 
-        extra$M <- M <- max(length( .Scale.arg ),
-                          ncoly,
-                          length( .kappa ))  # Recycle
-        extra$Scale <- rep( .Scale.arg, length = M)
-        extra$kappa <- rep( .kappa, length = M)
-        extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
-        extra$n <- n
+    extra$M <- M <- max(length( .Scale.arg ),
+                        ncoly,
+                        length( .kappa ))  # Recycle
+    extra$Scale <- rep( .Scale.arg , length = M)
+    extra$kappa <- rep( .kappa , length = M)
+    extra$tau <- extra$kappa^2 / (1 + extra$kappa^2)
+    extra$n <- n
 
 
 
@@ -2929,7 +2955,7 @@ alaplace1.control <- function(maxit = 100, ...) {
 
     mynames1 <- paste("location", if (M > 1) 1:M else "", sep = "")
     predictors.names <-
-        c(namesof(mynames1, .llocat , earg = .elocat, tag = FALSE))
+        c(namesof(mynames1, .llocat , earg = .elocat , tag = FALSE))
 
 
     locat.init <- matrix(0, n, M)
@@ -2938,16 +2964,16 @@ alaplace1.control <- function(maxit = 100, ...) {
       for (jay in 1:M) {
         y.use <- if (ncoly > 1) y[, jay] else y
         if ( .imethod == 1) {
-          locat.init[, jay] <- weighted.mean(y.use, w)
+          locat.init[, jay] <- weighted.mean(y.use, w[, min(jay, ncol(w))])
         } else if ( .imethod == 2) {
           locat.init[, jay] <- median(y.use)
         } else if ( .imethod == 3) {
-          Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
-                                y = y.use, w = w, df = .dfmu.init)
+          Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
+                                 y = y.use, w = w, df = .idf.mu )
           locat.init[, jay] <- c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
         } else {
-          use.this <- weighted.mean(y.use, w)
-          locat.init[, jay] <- (1- .sinit) * y.use + .sinit * use.this
+          use.this <- weighted.mean(y.use, w[, min(jay, ncol(w))])
+          locat.init[, jay] <- (1- .ishrinkage ) * y.use + .ishrinkage * use.this
         }
 
 
@@ -2961,8 +2987,8 @@ alaplace1.control <- function(maxit = 100, ...) {
       }
     }
     }), list( .imethod = imethod,
-              .dfmu.init = dfmu.init,
-              .sinit = shrinkage.init, .digt = digt,
+              .idf.mu = idf.mu,
+              .ishrinkage = ishrinkage, .digt = digt,
               .elocat = elocat, .Scale.arg = Scale.arg,
               .llocat = llocat, .kappa = kappa,
               .ilocat = ilocat ))),
@@ -3006,8 +3032,8 @@ alaplace1.control <- function(maxit = 100, ...) {
     locat <- as.matrix(locat)
     for (ii in 1:M) {
       y.use <- if (ncoly > 1) y[, ii] else y
-      extra$percentile[ii] =
-        100 * weighted.mean(y.use <= locat[, ii], w)
+      extra$percentile[ii] <-
+        100 * weighted.mean(y.use <= locat[, ii], w[, min(ii, ncol(w))])
     }
     names(extra$percentile) <- y.names
 
@@ -3105,10 +3131,10 @@ alaplace3.control <- function(maxit = 100, ...) {
 
 
 
- alaplace3 <- function(
-          llocation = "identitylink", lscale = "loge", lkappa = "loge",
-          ilocation = NULL,       iscale = NULL,   ikappa = 1.0,
-          imethod = 1, zero = 2:3) {
+ alaplace3 <-
+  function(llocation = "identitylink", lscale = "loge", lkappa = "loge",
+           ilocation = NULL,           iscale = NULL,   ikappa = 1.0,
+           imethod = 1, zero = 2:3) {
 
   llocat <- as.list(substitute(llocation))
   elocat <- link2list(llocat)
@@ -3147,8 +3173,14 @@ alaplace3.control <- function(maxit = 100, ...) {
             "\n",
             "Variance: Scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
+  infos = eval(substitute(function(...) {
+    list(M1 = 3,
+         summary.pvalues = FALSE,
+         zero = .zero )
+  }, list( .zero = zero ))),
+
   initialize = eval(substitute(expression({
 
     w.y.check(w = w, y = y,
@@ -3339,6 +3371,9 @@ rlaplace <- function(n, location = 0, scale = 1) {
 }
 
 
+
+
+  
  laplace <- function(llocation = "identitylink", lscale = "loge",
                      ilocation = NULL, iscale = NULL,
                      imethod = 1, zero = 2) {
@@ -3377,7 +3412,7 @@ rlaplace <- function(n, location = 0, scale = 1) {
             "Mean:     location", "\n",
             "Variance: 2*scale^2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -3522,7 +3557,7 @@ fff.control <- function(save.weight = TRUE, ...) {
             "2*df2^2*(df1+df2-2)/(df1*(df2-2)^2*(df2-4)) ",
             "provided df2>4 and ncp = 0"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -3800,7 +3835,8 @@ fff.control <- function(save.weight = TRUE, ...) {
 
 
 
-dbenini <- function(x, shape, y0, log = FALSE) {
+
+dbenini <- function(x, y0, shape, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -3821,7 +3857,7 @@ dbenini <- function(x, shape, y0, log = FALSE) {
 }
 
 
-pbenini <- function(q, shape, y0) {
+pbenini <- function(q, y0, shape) {
   if (!is.Numeric(q))
     stop("bad input for argument 'q'")
   if (!is.Numeric(shape, positive = TRUE))
@@ -3840,7 +3876,7 @@ pbenini <- function(q, shape, y0) {
 }
 
 
-qbenini <- function(p, shape, y0) {
+qbenini <- function(p, y0, shape) {
   if (!is.Numeric(p, positive = TRUE) ||
       any(p >= 1)) 
     stop("bad input for argument 'p'")
@@ -3852,16 +3888,17 @@ qbenini <- function(p, shape, y0) {
 }
 
 
-rbenini <- function(n, shape, y0) {
+rbenini <- function(n, y0, shape) {
   y0 * exp(sqrt(-log(runif(n)) / shape))
 }
 
 
 
 
- benini <- function(y0 = stop("argument 'y0' must be specified"),
-                   lshape = "loge",
-                   ishape = NULL, imethod = 1, zero = NULL) {
+
+ benini1 <- function(y0 = stop("argument 'y0' must be specified"),
+                     lshape = "loge",
+                     ishape = NULL, imethod = 1, zero = NULL) {
 
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
@@ -3886,11 +3923,11 @@ rbenini <- function(n, shape, y0) {
             "Link:    ",
             namesof("shape", lshape, earg = eshape),
             "\n", "\n",
-            "Median:     qbenini(p = 0.5, shape, y0)"),
+            "Median:     qbenini(p = 0.5, y0, shape)"),
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -3950,7 +3987,7 @@ rbenini <- function(n, shape, y0) {
     shape <- eta2theta(eta, .lshape , earg = .eshape )
 
 
-    qbenini(p = 0.5, shape, y0 = extra$y0)
+    qbenini(p = 0.5, y0 = extra$y0, shape)
   }, list( .lshape = lshape, .eshape = eshape ))),
   last = eval(substitute(expression({
     M1 <- extra$M1
@@ -3980,7 +4017,7 @@ rbenini <- function(n, shape, y0) {
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dbenini(x = y, shape = shape, y0 = y0, log = TRUE)
+      ll.elts <- c(w) * dbenini(x = y, y0 = y0, shape = shape, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -3988,7 +4025,7 @@ rbenini <- function(n, shape, y0) {
       }
     }
   }, list( .lshape = lshape, .eshape = eshape ))),
-  vfamily = c("benini"),
+  vfamily = c("benini1"),
 
 
 
@@ -4005,7 +4042,7 @@ rbenini <- function(n, shape, y0) {
     extra <- object at extra
     shape <- eta2theta(eta, .lshape , earg = .eshape )
     y0 <- extra$y0
-    rbenini(nsim * length(shape), shape = shape, y0 = y0)
+    rbenini(nsim * length(shape), y0 = y0, shape = shape)
   }, list( .lshape = lshape, .eshape = eshape ))),
 
 
@@ -4408,9 +4445,9 @@ loglaplace1.control <- function(maxit = 300, ...) {
                      ilocation = NULL,
                      kappa = sqrt(tau/(1-tau)),
                      Scale.arg = 1,
-                     shrinkage.init = 0.95,
-                     parallelLocation = FALSE, digt = 4,
-                     dfmu.init = 3,
+                     ishrinkage = 0.95,
+                     parallel.locat = FALSE, digt = 4,
+                     idf.mu = 3,
                      rep0 = 0.5,  # 0.0001,
                      minquantile = 0, maxquantile = Inf,
                      imethod = 1, zero = NULL) {
@@ -4448,10 +4485,10 @@ loglaplace1.control <- function(maxit = 300, ...) {
     stop("argument 'imethod' must be 1, 2 or ... 4")
 
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-     shrinkage.init < 0 ||
-     shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 ||
+     ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
   if (length(zero) &&
      !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
@@ -4459,9 +4496,9 @@ loglaplace1.control <- function(maxit = 300, ...) {
     stop("bad input for argument 'zero'")
   if (!is.Numeric(Scale.arg, positive = TRUE))
     stop("bad input for argument 'Scale.arg'")
-  if (!is.logical(parallelLocation) ||
-      length(parallelLocation) != 1)
-    stop("bad input for argument 'parallelLocation'")
+  if (!is.logical(parallel.locat) ||
+      length(parallel.locat) != 1)
+    stop("bad input for argument 'parallel.locat'")
 
   fittedMean <- FALSE
   if (!is.logical(fittedMean) || length(fittedMean) != 1)
@@ -4485,11 +4522,11 @@ loglaplace1.control <- function(maxit = 300, ...) {
             "Links:      ", mystring0, "\n", "\n",
           "Quantiles:  ", mystring1),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
-                           bool = .parallelLocation ,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
+                           bool = .parallel.locat ,
                            constraints = constraints, apply.int = FALSE)
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
-  }), list( .parallelLocation = parallelLocation,
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+  }), list( .parallel.locat = parallel.locat,
             .Scale.arg = Scale.arg, .zero = zero ))),
   initialize = eval(substitute(expression({
     extra$M <- M <- max(length( .Scale.arg ), length( .kappa ))  # Recycle
@@ -4536,18 +4573,19 @@ loglaplace1.control <- function(maxit = 300, ...) {
 
         if (!length(etastart)) {
             if ( .imethod == 1) {
-                locat.init <- quantile(rep(y, w), probs= extra$tau) + 1/16
+              locat.init <- quantile(rep(y, w), probs= extra$tau) + 1/16
             } else if ( .imethod == 2) {
-                locat.init <- weighted.mean(y, w)
+              locat.init <- weighted.mean(y, w)
             } else if ( .imethod == 3) {
-                locat.init <- median(y)
+              locat.init <- median(y)
             } else if ( .imethod == 4) {
-                Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)], y = y, w = w,
-                                        df = .dfmu.init)
-                locat.init <- c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+              Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
+                                     y = y, w = w,
+                                     df = .idf.mu )
+              locat.init <- c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
             } else {
-                use.this <- weighted.mean(y, w)
-                locat.init <- (1- .sinit)*y + .sinit * use.this
+              use.this <- weighted.mean(y, w)
+              locat.init <- (1- .ishrinkage )*y + .ishrinkage * use.this
             }
             locat.init <- if (length( .ilocat))
                              rep( .ilocat, length.out = M) else
@@ -4559,8 +4597,8 @@ loglaplace1.control <- function(maxit = 300, ...) {
                 cbind(theta2eta(locat.init, .llocat , earg = .elocat ))
         }
     }), list( .imethod = imethod,
-              .dfmu.init = dfmu.init, .rep0 = rep0,
-              .sinit = shrinkage.init, .digt = digt,
+              .idf.mu = idf.mu, .rep0 = rep0,
+              .ishrinkage = ishrinkage, .digt = digt,
               .elocat = elocat, .Scale.arg = Scale.arg,
               .llocat = llocat, .kappa = kappa,
               .ilocat = ilocat ))),
@@ -4686,10 +4724,10 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
                          llocation = "loge", lscale = "loge",
                          ilocation = NULL, iscale = NULL,
                          kappa = sqrt(tau/(1-tau)),
-                         shrinkage.init = 0.95,
-                         parallelLocation = FALSE, digt = 4,
+                         ishrinkage = 0.95,
+                         parallel.locat = FALSE, digt = 4,
                          eq.scale = TRUE,
-                         dfmu.init = 3,
+                         idf.mu = 3,
                          rep0 = 0.5, nsimEIM = NULL,
                          imethod = 1, zero = "(1 + M/2):M") {
  warning("it is best to use loglaplace1()")
@@ -4728,19 +4766,19 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
     stop("bad input for argument 'iscale'")
 
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-     shrinkage.init < 0 ||
-     shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 ||
+     ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
   if (length(zero) &&
      !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
        is.character(zero )))
     stop("bad input for argument 'zero'")
   if (!is.logical(eq.scale) || length(eq.scale) != 1)
     stop("bad input for argument 'eq.scale'")
-  if (!is.logical(parallelLocation) ||
-      length(parallelLocation) != 1)
-    stop("bad input for argument 'parallelLocation'")
+  if (!is.logical(parallel.locat) ||
+      length(parallel.locat) != 1)
+    stop("bad input for argument 'parallel.locat'")
   fittedMean <- FALSE
   if (!is.logical(fittedMean) || length(fittedMean) != 1)
     stop("bad input for argument 'fittedMean'")
@@ -4762,18 +4800,18 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
   constraints = eval(substitute(expression({
       .ZERO <- .zero
       if (is.character( .ZERO)) .ZERO <- eval(parse(text = .ZERO))
-      .PARALLEL <- .parallelLocation
+      .PARALLEL <- .parallel.locat
       parelHmat <- if (is.logical( .PARALLEL ) && .PARALLEL )
                   matrix(1, M/2, 1) else diag(M/2)
       scaleHmat <- if (is.logical( .eq.scale ) && .eq.scale )
                   matrix(1, M/2, 1) else diag(M/2)
       mycmatrix <- cbind(rbind(  parelHmat, 0*parelHmat),
                         rbind(0*scaleHmat,   scaleHmat))
-      constraints <- cm.vgam(mycmatrix, x = x,
+      constraints <- cm.VGAM(mycmatrix, x = x,
                              bool = .PARALLEL ,
                              constraints = constraints,
                              apply.int = FALSE)
-      constraints <- cm.zero.vgam(constraints, x, .ZERO, M)
+      constraints <- cm.zero.VGAM(constraints, x, .ZERO, M)
 
       if ( .PARALLEL && names(constraints)[1] == "(Intercept)") {
           parelHmat <- diag(M/2)
@@ -4787,7 +4825,7 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
         temp3 <- cbind(temp3[,1:(M/2)], rbind(0*scaleHmat, scaleHmat))
         constraints[["(Intercept)"]] = temp3
       }
-    }), list( .eq.scale = eq.scale, .parallelLocation = parallelLocation,
+    }), list( .eq.scale = eq.scale, .parallel.locat = parallel.locat,
               .zero = zero ))),
   initialize = eval(substitute(expression({
     extra$kappa <- .kappa
@@ -4824,37 +4862,38 @@ loglaplace2.control <- function(save.weight = TRUE, ...) {
                "Choose larger values for 'tau'.")
 
         if (!length(etastart)) {
-            if ( .imethod == 1) {
-                locat.init.y <- weighted.mean(y, w)
-                scale.init <- sqrt(var(y) / 2)
-            } else if ( .imethod == 2) {
-                locat.init.y <- median(y)
-                scale.init <- sqrt(sum(c(w)*abs(y-median(y))) / (sum(w) *2))
-            } else if ( .imethod == 3) {
-                Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)], y = y, w = w,
-                                        df = .dfmu.init)
-                locat.init.y <- c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
-                scale.init <- sqrt(sum(c(w)*abs(y-median(y))) / (sum(w) *2))
-            } else {
-                use.this <- weighted.mean(y, w)
-                locat.init.y <- (1- .sinit)*y + .sinit * use.this
-                scale.init <- sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
-            }
-            locat.init.y <- if (length( .ilocat ))
-                             rep( .ilocat , length.out = n) else
-                             rep(locat.init.y, length.out = n)
-            locat.init.y <- matrix(locat.init.y, n, M/2)
-            scale.init <- if (length( .iscale))
-                             rep( .iscale, length.out = n) else
-                             rep(scale.init, length.out = n)
-            scale.init <- matrix(scale.init, n, M/2)
-            etastart <-
-                cbind(theta2eta(locat.init.y, .llocat , earg = .elocat ),
-                      theta2eta(scale.init, .lscale , earg = .escale ))
+          if ( .imethod == 1) {
+            locat.init.y <- weighted.mean(y, w)
+            scale.init <- sqrt(var(y) / 2)
+          } else if ( .imethod == 2) {
+            locat.init.y <- median(y)
+            scale.init <- sqrt(sum(c(w)*abs(y-median(y))) / (sum(w) *2))
+          } else if ( .imethod == 3) {
+            Fit5 <- vsmooth.spline(x = x[, min(ncol(x), 2)],
+                                   y = y, w = w,
+                                   df = .idf.mu )
+            locat.init.y <- c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+            scale.init <- sqrt(sum(c(w)*abs(y-median(y))) / (sum(w) *2))
+          } else {
+            use.this <- weighted.mean(y, w)
+            locat.init.y <- (1- .ishrinkage )*y + .ishrinkage * use.this
+            scale.init <- sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
+          }
+          locat.init.y <- if (length( .ilocat ))
+                           rep( .ilocat , length.out = n) else
+                           rep(locat.init.y, length.out = n)
+          locat.init.y <- matrix(locat.init.y, n, M/2)
+          scale.init <- if (length( .iscale))
+                           rep( .iscale, length.out = n) else
+                           rep(scale.init, length.out = n)
+          scale.init <- matrix(scale.init, n, M/2)
+          etastart <-
+            cbind(theta2eta(locat.init.y, .llocat , earg = .elocat ),
+                  theta2eta(scale.init, .lscale , earg = .escale ))
         }
     }), list( .imethod = imethod,
-              .dfmu.init = dfmu.init, .kappa = kappa,
-              .sinit = shrinkage.init, .digt = digt,
+              .idf.mu = idf.mu, .kappa = kappa,
+              .ishrinkage = ishrinkage, .digt = digt,
               .llocat = llocat, .lscale = lscale,
               .elocat = elocat, .escale = escale,
               .ilocat = ilocat, .iscale = iscale ))),
@@ -5018,8 +5057,8 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
         ilocation = NULL,
         kappa = sqrt(tau/(1-tau)),
         Scale.arg = 1,
-        shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
-        dfmu.init = 3,
+        ishrinkage = 0.95, parallel.locat = FALSE, digt = 4,
+        idf.mu = 3,
         rep01 = 0.5,
         imethod = 1, zero = NULL) {
 
@@ -5051,10 +5090,10 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
      imethod > 4)
     stop("argument 'imethod' must be 1, 2 or ... 4")
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-     shrinkage.init < 0 ||
-     shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 ||
+     ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
   if (length(zero) &&
      !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
        is.character(zero )))
@@ -5062,9 +5101,9 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
 
   if (!is.Numeric(Scale.arg, positive = TRUE))
     stop("bad input for argument 'Scale.arg'")
-  if (!is.logical(parallelLocation) ||
-      length(parallelLocation) != 1)
-    stop("bad input for argument 'parallelLocation'")
+  if (!is.logical(parallel.locat) ||
+      length(parallel.locat) != 1)
+    stop("bad input for argument 'parallel.locat'")
   fittedMean <- FALSE
   if (!is.logical(fittedMean) ||
       length(fittedMean) != 1)
@@ -5085,11 +5124,11 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
             "Links:      ", mystring0, "\n", "\n",
           "Quantiles:  ", mystring1),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
-                           bool = .parallelLocation ,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
+                           bool = .parallel.locat ,
                            constraints = constraints, apply.int = FALSE)
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
-  }), list( .parallelLocation = parallelLocation,
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+  }), list( .parallel.locat = parallel.locat,
             .Scale.arg = Scale.arg, .zero = zero ))),
   initialize = eval(substitute(expression({
     extra$M <- M <- max(length( .Scale.arg ), length( .kappa ))  # Recycle
@@ -5141,7 +5180,7 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
           locat.init <- median(rep(y, w))
         } else if ( .imethod == 3) {
           use.this <- weighted.mean(y, w)
-          locat.init <- (1- .sinit)*y + use.this * .sinit
+          locat.init <- (1- .ishrinkage )*y + use.this * .ishrinkage
         } else {
           stop("this option not implemented")
         }
@@ -5156,8 +5195,8 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
           cbind(theta2eta(locat.init, .llocat , earg = .elocat ))
     }
   }), list( .imethod = imethod,
-            .dfmu.init = dfmu.init,
-            .sinit = shrinkage.init, .digt = digt,
+            .idf.mu = idf.mu,
+            .ishrinkage = ishrinkage, .digt = digt,
             .elocat = elocat, .Scale.arg = Scale.arg,
             .llocat = llocat, .kappa = kappa,
             .ilocat = ilocat ))),
diff --git a/R/family.quantal.R b/R/family.quantal.R
index be36bbc..e46d5c4 100644
--- a/R/family.quantal.R
+++ b/R/family.quantal.R
@@ -56,7 +56,7 @@
             namesof("prob0", link0, earg = earg0), ",  ",
             namesof("prob1", link1, earg = earg1)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero # ,
            ))),
 
@@ -257,8 +257,8 @@ if (FALSE)
             namesof("prob0", lprob0, earg = eprob0), ",  ",
             namesof("prob1", lprob1, earg = eprob1)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
-    constraints <- cm.nointercept.vgam(constraints, x, .nointercept, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.nointercept.VGAM(constraints, x, .nointercept, M)
   }), list( .zero = zero,
             .nointercept = nointercept ))),
 
@@ -429,7 +429,7 @@ abbott.EM.control <- function(maxit = 1000, ...) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
diff --git a/R/family.rcim.R b/R/family.rcim.R
index c4b6542..4c63236 100644
--- a/R/family.rcim.R
+++ b/R/family.rcim.R
@@ -689,9 +689,9 @@ Confint.rrnb <- function(rrnb2, level = 0.95) {
   if (!all(rrnb2 at misc$link == "loge"))
     stop("argument 'rrnb2' does not have log links for both parameters")
 
-  a21.hat <- (Coef(rrnb2)@A)["log(size)", 1]
-  beta11.hat <- Coef(rrnb2)@B1["(Intercept)", "log(mu)"]
-  beta21.hat <- Coef(rrnb2)@B1["(Intercept)", "log(size)"]
+  a21.hat <- (Coef(rrnb2)@A)["loge(size)", 1]
+  beta11.hat <- Coef(rrnb2)@B1["(Intercept)", "loge(mu)"]
+  beta21.hat <- Coef(rrnb2)@B1["(Intercept)", "loge(size)"]
   delta1.hat <- exp(a21.hat * beta11.hat - beta21.hat)
   delta2.hat <- 2 - a21.hat
 
@@ -740,8 +740,8 @@ Confint.nb1 <- function(nb1, level = 0.95) {
     stop("argument 'nb1' does not have log links for both parameters")
 
   cnb1 <- coefficients(as(nb1, "vglm"), matrix = TRUE)
-  mydiff <- (cnb1["(Intercept)", "log(size)"] -
-             cnb1["(Intercept)", "log(mu)"])
+  mydiff <- (cnb1["(Intercept)", "loge(size)"] -
+             cnb1["(Intercept)", "loge(mu)"])
   delta0.hat <- exp(mydiff)
   (phi0.hat <- 1 + 1 / delta0.hat)  # MLE of phi0
 
@@ -811,28 +811,28 @@ plota21 <- function(rrvglm2, show.plot = TRUE, nseq.a21 = 31,
   if (!alreadyComputed)
   for (ii in 1:nseq.a21) {
     if (trace.arg)
+      print(ii)
+    argslist <- vector("list", length(listcall) - 1)
+    for (kay in 2:(length(listcall)))
+      argslist[[kay - 1]] <- listcall[[kay]]
 
-       argslist <- vector("list", length(listcall) - 1)
-       for (kay in 2:(length(listcall)))
-         argslist[[kay - 1]] <- listcall[[kay]]
+    names(argslist) <- c(names(listcall)[-1])
 
-       names(argslist) <- c(names(listcall)[-1])
+    argslist$trace       <- trace.arg
+    argslist$etastart    <- prev.etastart
+    argslist$constraints <- Hlist.orig
 
-       argslist$trace       <- trace.arg
-       argslist$etastart    <- prev.etastart
-       argslist$constraints <- Hlist.orig
 
-
-       for (kay in 2:length(argslist[["constraints"]])) {
-         argslist[["constraints"]][[kay]] <- rbind(1, a21.matrix[ii, 1])
-       }
+    for (kay in 2:length(argslist[["constraints"]])) {
+       argslist[["constraints"]][[kay]] <- rbind(1, a21.matrix[ii, 1])
+    }
 
 
-       fitnew <- do.call(what = funname, args = argslist)
+    fitnew <- do.call(what = funname, args = argslist)
 
-       a21.matrix[ii, 2] <- logLik(fitnew)
+    a21.matrix[ii, 2] <- logLik(fitnew)
 
-       prev.etastart <- predict(fitnew)
+    prev.etastart <- predict(fitnew)
   }
 
 
@@ -1055,7 +1055,7 @@ plota21 <- function(rrvglm2, show.plot = TRUE, nseq.a21 = 31,
   attr(logAllvcov, "which.linpred") <- which.linpred
 
   logAllvcov
-}
+}  # End of Qvar()
 
 
 
@@ -1220,23 +1220,24 @@ qvar <- function(object, se = FALSE, ...) {
 
 
 
-plotqvar <- function(object,
-                     interval.width = 2,
-                     ylab = "Estimate",
-                     xlab = NULL,  # x$factorname,
-                     ylim = NULL,
-                     main = "",
-                     level.names = NULL,
-                     conf.level = 0.95,
-                     warn.ratio = 10,
-                     border = "transparent",  # None
-                     points.arg = TRUE,
-                     length.arrows = 0.25, angle = 30,
-                     lwd = par()$lwd,
-                     scol = par()$col,
-                     slwd = par()$lwd,
-                     slty = par()$lty,
-                     ...) {
+plotqvar <-
+qvplot   <-  function(object,
+                      interval.width = 2,
+                      ylab = "Estimate",
+                      xlab = NULL,  # x$factorname,
+                      ylim = NULL,
+                      main = "",
+                      level.names = NULL,
+                      conf.level = 0.95,
+                      warn.ratio = 10,
+                      border = "transparent",  # None
+                      points.arg = TRUE,
+                      length.arrows = 0.25, angle = 30,
+                      lwd = par()$lwd,
+                      scol = par()$col,
+                      slwd = par()$lwd,
+                      slty = par()$lty,
+                      ...) {
 
 
 
diff --git a/R/family.rcqo.R b/R/family.rcqo.R
index 4fa41a8..d380f1d 100644
--- a/R/family.rcqo.R
+++ b/R/family.rcqo.R
@@ -12,23 +12,23 @@ rcqo <- function(n, p, S,
                  family = c("poisson", "negbinomial", "binomial-poisson",
                             "Binomial-negbinomial", "ordinal-poisson",
                             "Ordinal-negbinomial", "gamma2"),
-                 eq.maxima = FALSE,
+                 eq.maximums = FALSE,
                  eq.tolerances = TRUE,
-                 es.optima = FALSE,
-                 lo.abundance = if (eq.maxima) hi.abundance else 10,
+                 es.optimums = FALSE,
+                 lo.abundance = if (eq.maximums) hi.abundance else 10,
                  hi.abundance = 100,
                  sd.latvar = head(1.5/2^(0:3), Rank),
-                 sd.optima = ifelse(es.optima, 1.5/Rank, 1) *
+                 sd.optimums = ifelse(es.optimums, 1.5/Rank, 1) *
                             ifelse(scale.latvar, sd.latvar, 1),
                  sd.tolerances = 0.25,
                  Kvector = 1,
                  Shape = 1,
                  sqrt.arg = FALSE,
-                 Log = FALSE,
+                 log.arg = FALSE,
                  rhox = 0.5,
                  breaks = 4,  # ignored unless family = "ordinal"
                  seed = NULL,
-                 optima1.arg = NULL,
+                 optimums1.arg = NULL,
                  Crow1positive = TRUE,
                  xmat = NULL,  # Can be input
                  scale.latvar = TRUE) {
@@ -71,9 +71,9 @@ rcqo <- function(n, p, S,
     stop("bad input for argument 'lo.abundance'")
   if (!is.Numeric(sd.latvar, positive = TRUE))
     stop("bad input for argument 'sd.latvar'")
-  if (!is.Numeric(sd.optima, positive = TRUE))
-    stop("bad input for argument 'sd.optima'")
-  if (eq.maxima && lo.abundance != hi.abundance)
+  if (!is.Numeric(sd.optimums, positive = TRUE))
+    stop("bad input for argument 'sd.optimums'")
+  if (eq.maximums && lo.abundance != hi.abundance)
     stop("arguments 'lo.abundance' and 'hi.abundance' must ",
          "be equal when 'eq.tolerances = TRUE'")
   if (any(lo.abundance > hi.abundance))
@@ -85,9 +85,9 @@ rcqo <- function(n, p, S,
   }
   Shape <- rep(Shape, len = S)
   sd.latvar <- rep(sd.latvar, len = Rank)
-  sd.optima <- rep(sd.optima, len = Rank)
+  sd.optimums <- rep(sd.optimums, len = Rank)
   sd.tolerances <- rep(sd.tolerances, len = Rank)
-  AA <- sd.optima / 3^0.5
+  AA <- sd.optimums / 3^0.5
   if (Rank > 1 && any(diff(sd.latvar) > 0))
    stop("argument 'sd.latvar)' must be a vector with decreasing values")
 
@@ -123,18 +123,18 @@ rcqo <- function(n, p, S,
     dimnames(xmat) <- list(as.character(1:n), xnames)
   }
   eval(change.seed.expression)
-  ccoefs <- matrix(rnorm((p-1)*Rank), p-1, Rank)
-  latvarmat <- cbind(xmat %*% ccoefs)
+  Ccoefs <- matrix(rnorm((p-1)*Rank), p-1, Rank)
+  latvarmat <- cbind(xmat %*% Ccoefs)
   if (Rank > 1) {
     Rmat <- chol(var(latvarmat))
     iRmat <- solve(Rmat)
     latvarmat <- latvarmat %*% iRmat  # var(latvarmat) == diag(Rank)
-    ccoefs <- ccoefs %*% iRmat
+    Ccoefs <- Ccoefs %*% iRmat
   }
   for (r in 1:Rank)
-    if (( Crow1positive[r] && ccoefs[1, r] < 0) ||
-        (!Crow1positive[r] && ccoefs[1, r] > 0)) {
-      ccoefs[ , r] <- -ccoefs[ , r]
+    if (( Crow1positive[r] && Ccoefs[1, r] < 0) ||
+        (!Crow1positive[r] && Ccoefs[1, r] > 0)) {
+      Ccoefs[ , r] <- -Ccoefs[ , r]
       latvarmat[ , r] <- -latvarmat[ , r]
     }
 
@@ -142,7 +142,7 @@ rcqo <- function(n, p, S,
     for (r in 1:Rank) {
       sd.latvarr <- sd(latvarmat[, r])
       latvarmat[, r] <- latvarmat[, r] * sd.latvar[r] / sd.latvarr
-      ccoefs[, r]  <- ccoefs[, r] * sd.latvar[r] / sd.latvarr
+      Ccoefs[, r]  <- Ccoefs[, r] * sd.latvar[r] / sd.latvarr
     }
   } else {
     sd.latvarr <- NULL
@@ -150,44 +150,44 @@ rcqo <- function(n, p, S,
       sd.latvarr <- c(sd.latvarr, sd(latvarmat[, r]))
     }
   }
-  if (es.optima) {
+  if (es.optimums) {
     if (!is.Numeric(S^(1/Rank), integer.valued = TRUE) ||
         S^(1/Rank) < 2)
       stop("S^(1/Rank) must be an integer greater or equal to 2")
     if (Rank == 1) {
-      optima <- matrix(as.numeric(NA), S, Rank)
+      optimums <- matrix(as.numeric(NA), S, Rank)
       for (r in 1:Rank) {
-        optima[, r] <- seq(-AA, AA, len = S^(1/Rank))
+        optimums[, r] <- seq(-AA, AA, len = S^(1/Rank))
       }
     } else if (Rank == 2) {
-      optima <- expand.grid(latvar1 = seq(-AA[1], AA[1], len = S^(1/Rank)),
+      optimums <- expand.grid(latvar1 = seq(-AA[1], AA[1], len = S^(1/Rank)),
                             latvar2 = seq(-AA[2], AA[2], len = S^(1/Rank)))
     } else if (Rank == 3) {
-      optima <- expand.grid(latvar1 = seq(-AA[1], AA[1], len = S^(1/Rank)),
+      optimums <- expand.grid(latvar1 = seq(-AA[1], AA[1], len = S^(1/Rank)),
                             latvar2 = seq(-AA[2], AA[2], len = S^(1/Rank)),
                             latvar3 = seq(-AA[3], AA[3], len = S^(1/Rank)))
     } else {
-      optima <- expand.grid(latvar1 = seq(-AA[1], AA[1], len = S^(1/Rank)),
+      optimums <- expand.grid(latvar1 = seq(-AA[1], AA[1], len = S^(1/Rank)),
                             latvar2 = seq(-AA[2], AA[2], len = S^(1/Rank)),
                             latvar3 = seq(-AA[3], AA[3], len = S^(1/Rank)),
                             latvar4 = seq(-AA[4], AA[4], len = S^(1/Rank)))
     }
     if (Rank > 1)
-      optima <- matrix(unlist(optima), S, Rank)  # Make sure its a matrix
+      optimums <- matrix(unlist(optimums), S, Rank)  # Make sure its a matrix
   } else {
-    optima <- matrix(1, S, Rank)
+    optimums <- matrix(1, S, Rank)
     eval(change.seed.expression)
     for (r in 1:Rank) {
-      optima[, r] <- rnorm(n = S, sd = sd.optima[r])
+      optimums[, r] <- rnorm(n = S, sd = sd.optimums[r])
     }
   }
   for (r in 1:Rank)
-    optima[, r] <- optima[, r] * sd.optima[r] / sd(optima[, r])
+    optimums[, r] <- optimums[, r] * sd.optimums[r] / sd(optimums[, r])
 
 
-  if (length(optima1.arg) && Rank == 1)
+  if (length(optimums1.arg) && Rank == 1)
   for (r in 1:Rank)
-    optima[, r] <- optima1.arg
+    optimums[, r] <- optimums1.arg
 
 
 
@@ -211,16 +211,16 @@ rcqo <- function(n, p, S,
   }
 
   dimnames(Tols)   <- list(ynames, latvarnames)
-  dimnames(ccoefs) <- list(xnames, latvarnames)
-  dimnames(optima) <- list(ynames, latvarnames)
+  dimnames(Ccoefs) <- list(xnames, latvarnames)
+  dimnames(optimums) <- list(ynames, latvarnames)
   loeta <- log(lo.abundance)   # May be a vector
   hieta <- log(hi.abundance)
   eval(change.seed.expression)
-  logmaxima <- runif(S, min = loeta, max = hieta)
-  names(logmaxima) <- ynames
-  etamat <- matrix(logmaxima, n, S, byrow = TRUE)
+  log.maximums <- runif(S, min = loeta, max = hieta)
+  names(log.maximums) <- ynames
+  etamat <- matrix(log.maximums, n, S, byrow = TRUE)
   for (jay in 1:S) {
-      optmat <- matrix(optima[jay, ], nrow = n, ncol = Rank, byrow = TRUE)
+      optmat <- matrix(optimums[jay, ], nrow = n, ncol = Rank, byrow = TRUE)
       tolmat <- matrix(  Tols[jay, ], nrow = n, ncol = Rank, byrow = TRUE)
       temp <- cbind((latvarmat - optmat) / tolmat)
       for (r in 1:Rank)
@@ -247,7 +247,7 @@ rcqo <- function(n, p, S,
     ymat <- matrix(rgamma(n * S, shape = Shape,
                                  scale = exp(etamat) / Shape),
                    n, S)
-    if (Log) ymat <- log(ymat)
+    if (log.arg) ymat <- log(ymat)
   } else {
     stop("argument 'rootdist' unmatched")
   }
@@ -268,28 +268,28 @@ rcqo <- function(n, p, S,
 
   dimnames(ymat) <- list(as.character(1:n), ynames)
   ans <- data.frame(xmat, ymat)
-  attr(ans, "concoefficients") <- ccoefs
+  attr(ans, "concoefficients") <- Ccoefs
   attr(ans, "Crow1positive") <- Crow1positive
   attr(ans, "family") <- family
   attr(ans, "formula") <- myform # Useful for running cqo() on the data
   attr(ans, "Rank") <- Rank
   attr(ans, "family") <- family
   attr(ans, "Kvector") <- Kvector
-  attr(ans, "logmaxima") <- logmaxima
+  attr(ans, "log.maximums") <- log.maximums
   attr(ans, "lo.abundance") <- lo.abundance
   attr(ans, "hi.abundance") <- hi.abundance
-  attr(ans, "optima") <- optima
-  attr(ans, "Log") <- Log
+  attr(ans, "optimums") <- optimums
+  attr(ans, "log.arg") <- log.arg
   attr(ans, "latvar") <- latvarmat
   attr(ans, "eta") <- etamat
   attr(ans, "eq.tolerances") <- eq.tolerances
-  attr(ans, "eq.maxima") <- eq.maxima ||
+  attr(ans, "eq.maximums") <- eq.maximums ||
                               all(lo.abundance == hi.abundance)
-  attr(ans, "es.optima") <- es.optima
+  attr(ans, "es.optimums") <- es.optimums
   attr(ans, "seed") <- seed # RNGstate
   attr(ans, "sd.tolerances") <- sd.tolerances
   attr(ans, "sd.latvar") <- if (scale.latvar) sd.latvar else sd.latvarr
-  attr(ans, "sd.optima") <- sd.optima
+  attr(ans, "sd.optimums") <- sd.optimums
   attr(ans, "Shape") <- Shape
   attr(ans, "sqrt") <- sqrt.arg
   attr(ans, "tolerances") <- Tols^0.5  # Like a standard deviation
@@ -306,12 +306,12 @@ dcqo <-
            family = c("poisson", "binomial", "negbinomial", "ordinal"),
            Rank = 1,
            eq.tolerances = TRUE,
-           eq.maxima = FALSE,
+           eq.maximums = FALSE,
            EquallySpacedOptima = FALSE,
-           lo.abundance = if (eq.maxima) 100 else 10,
+           lo.abundance = if (eq.maximums) 100 else 10,
            hi.abundance = 100,
            sd.tolerances = 1,
-           sd.optima = 1,
+           sd.optimums = 1,
            nlevels = 4,  # ignored unless family = "ordinal"
            seed = NULL) {
  warning("12/6/06; needs a lot of work based on rcqo()")
@@ -338,7 +338,7 @@ dcqo <-
     stop("bad input for argument 'seed'")
   if (!is.logical(eq.tolerances) || length(eq.tolerances)>1)
     stop("bad input for argument 'eq.tolerances)'")
-  if (eq.maxima && lo.abundance != hi.abundance)
+  if (eq.maximums && lo.abundance != hi.abundance)
     stop("'lo.abundance' and 'hi.abundance' must ",
          "be equal when 'eq.tolerances = TRUE'")
   if (length(seed)) set.seed(seed)
@@ -346,18 +346,18 @@ dcqo <-
   xmat <- matrix(rnorm(n*(p-1)), n, p-1,
                  dimnames = list(as.character(1:n),
                                  paste("x", 2:p, sep = "")))
-  ccoefs <- matrix(rnorm((p-1)*Rank), p-1, Rank)
-  latvarmat <- xmat %*% ccoefs
-  optima <- matrix(rnorm(Rank*S, sd = sd.optima), S, Rank)
+  Ccoefs <- matrix(rnorm((p-1)*Rank), p-1, Rank)
+  latvarmat <- xmat %*% Ccoefs
+  optimums <- matrix(rnorm(Rank*S, sd = sd.optimums), S, Rank)
   Tols <- if (eq.tolerances) matrix(1, S, Rank) else
          matrix(rnorm(Rank*S, mean = 1, sd = 1), S, Rank)
   loeta <- log(lo.abundance)
   hieta <- log(hi.abundance)
-  logmaxima <- runif(S, min = loeta, max = hieta)
+  log.maximums <- runif(S, min = loeta, max = hieta)
 
-  etamat <- matrix(logmaxima, n, S, byrow = TRUE)
+  etamat <- matrix(log.maximums, n, S, byrow = TRUE)
   for (jay in 1:S) {
-    optmat <- matrix(optima[jay, ], n, Rank, byrow = TRUE)
+    optmat <- matrix(optimums[jay, ], n, Rank, byrow = TRUE)
     tolmat <- matrix(  Tols[jay, ], n, Rank, byrow = TRUE)
     temp <- cbind((latvarmat - optmat) * tolmat)
     for (r in 1:Rank)
@@ -378,7 +378,7 @@ dcqo <-
   dimnames(ymat) <- list(as.character(1:n),
                          paste("y", 1:S, sep = ""))
   ans <- data.frame(xmat, ymat)
-  attr(ans, "concoefficients") <- ccoefs
+  attr(ans, "concoefficients") <- Ccoefs
   attr(ans, "family") <- family
   ans
 }
diff --git a/R/family.robust.R b/R/family.robust.R
index 1d1c7e6..f4988fb 100644
--- a/R/family.robust.R
+++ b/R/family.robust.R
@@ -25,7 +25,7 @@ edhuber <- function(x, k = 0.862, mu = 0, sigma = 1, log = FALSE) {
 
   zedd <- (x - mu) / sigma
   fk <- dnorm(k)
-   eps <- 1 - 1 / (pnorm(k) - pnorm(-k) + 2 * fk /k)
+   eps <- 1 - 1 / (pnorm(k) - pnorm(-k) + 2 * fk / k)
   ceps <-     1 / (pnorm(k) - pnorm(-k) + 2 * fk / k)
 
   if (log.arg) {
@@ -168,7 +168,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
             namesof("scale",     lscale,  earg = escale), "\n\n",
             "Mean: location"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -182,8 +182,8 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
 
 
     predictors.names <-
-       c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
-         namesof("scale",    .lscale, earg = .escale, tag = FALSE))
+       c(namesof("location", .llocat , earg = .elocat, tag = FALSE),
+         namesof("scale",    .lscale , earg = .escale, tag = FALSE))
 
     if (!length(etastart)) {
       junk <- lm.wfit(x = x, y = c(y), w = c(w))
@@ -200,14 +200,14 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
         }
       }
       etastart <- cbind(
-           theta2eta(location.init,  .llocat, earg = .elocat),
-           theta2eta(scale.y.est,    .lscale, earg = .escale))
+           theta2eta(location.init,  .llocat , earg = .elocat ),
+           theta2eta(scale.y.est,    .lscale , earg = .escale ))
     }
   }), list( .llocat = llocat, .lscale = lscale,
             .elocat = elocat, .escale = escale,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    eta2theta(eta[, 1], .llocat, earg = .elocat)
+    eta2theta(eta[, 1], .llocat , earg = .elocat )
   }, list( .llocat = llocat,
            .elocat = elocat, .escale = escale ))),
   last = eval(substitute(expression({
@@ -244,8 +244,8 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
            .k      = k ))),
   vfamily = c("huber2"),
   deriv = eval(substitute(expression({
-    mylocat <- eta2theta(eta[, 1], .llocat,  earg = .elocat)
-    myscale <- eta2theta(eta[, 2], .lscale,  earg = .escale)
+    mylocat <- eta2theta(eta[, 1], .llocat ,  earg = .elocat )
+    myscale <- eta2theta(eta[, 2], .lscale ,  earg = .escale )
     myk     <- .k
 
     zedd <- (y - mylocat) / myscale
@@ -263,8 +263,8 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
     dl.dscale[cond3] <- ( myk * zedd)[cond3]
     dl.dscale <- (-1 + dl.dscale) / myscale
 
-    dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
-    dscale.deta <- dtheta.deta(myscale, .lscale, earg = .escale)
+    dlocat.deta <- dtheta.deta(mylocat, .llocat , earg = .elocat )
+    dscale.deta <- dtheta.deta(myscale, .lscale , earg = .escale )
     ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
                         dl.dscale * dscale.deta)
     ans
@@ -332,7 +332,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
 
 
     predictors.names <-
-       c(namesof("location", .llocat, earg = .elocat, tag = FALSE))
+       c(namesof("location", .llocat , earg = .elocat, tag = FALSE))
 
 
     if (!length(etastart)) {
@@ -349,13 +349,13 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
         }
       }
       etastart <- cbind(
-           theta2eta(location.init,  .llocat, earg = .elocat))
+           theta2eta(location.init,  .llocat , earg = .elocat ))
     }
   }), list( .llocat = llocat,
             .elocat = elocat,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    eta2theta(eta, .llocat, earg = .elocat)
+    eta2theta(eta, .llocat , earg = .elocat )
   }, list( .llocat = llocat,
            .elocat = elocat ))),
   last = eval(substitute(expression({
@@ -392,7 +392,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
            .k      = k ))),
   vfamily = c("huber1"),
   deriv = eval(substitute(expression({
-    mylocat <- eta2theta(eta, .llocat,  earg = .elocat)
+    mylocat <- eta2theta(eta, .llocat ,  earg = .elocat )
     myk     <- .k
 
     zedd <- (y - mylocat)  # / myscale
@@ -412,7 +412,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
       dl.dscale <- (-1 + dl.dscale) / myscale
     }
 
-    dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
+    dlocat.deta <- dtheta.deta(mylocat, .llocat , earg = .elocat )
     ans <- c(w) * cbind(dl.dlocat * dlocat.deta)
     ans
   }), list( .llocat = llocat,
diff --git a/R/family.rrr.R b/R/family.rrr.R
index 0affebb..d40d828 100644
--- a/R/family.rrr.R
+++ b/R/family.rrr.R
@@ -47,7 +47,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
   moff <- NULL
   ans <- if (Quadratic) {
            index <- iam(NA, NA, M = Rank, diag = TRUE, both = TRUE) 
-           temp1 <- cbind(numat[,index$row] * numat[,index$col])
+           temp1 <- cbind(numat[, index$row] * numat[, index$col])
            if (I.tolerances) {
              moff <- 0
              for (ii in 1:Rank)
@@ -132,7 +132,7 @@ qrrvglm.xprod <- function(numat, Aoffset, Quadratic, I.tolerances) {
 
   fit <- list(res.ss = 0)  # Only for initial old.crit below
 
-  C <- Cinit # This is input for the main iter loop
+  C <- Cinit  # This is input for the main iter loop
   old.crit <- switch(Criterion, coefficients = C, res.ss = fit$res.ss)
 
   recover <- 0  # Allow a few iterations between different line searches 
@@ -500,7 +500,7 @@ rrr.alternating.expression <- expression({
         elts <- Dmat[kay, , drop = FALSE]  # Manual recycling
         if (length(elts) < Rank)
           elts <- matrix(elts, 1, Rank)
-        Dk <- m2adefault(elts, M = Rank)[, , 1]
+        Dk <- m2a(elts, M = Rank)[, , 1]
         Dk <- matrix(Dk, Rank, Rank)
         Dk <- t(Mmat) %*% Dk  %*% Mmat  # 22/8/03; Not diagonal in general
         Dmat[kay, ] <- Dk[cbind(ind0$row.index[1:ncol(Dmat)],
@@ -898,19 +898,22 @@ nlminbcontrol <- function(Abs.tol = 10^(-6),
 
 
 
-Coef.qrrvglm <- function(object, varI.latvar = FALSE,
-                         reference = NULL, ...) {
+Coef.qrrvglm <-
+  function(object,
+           varI.latvar = FALSE,
+           refResponse = NULL, ...) {
+
 
 
   if (length(varI.latvar) != 1 || !is.logical(varI.latvar)) 
     stop("'varI.latvar' must be TRUE or FALSE")
-  if (length(reference) > 1)
-    stop("'reference' must be of length 0 or 1")
-  if (length(reference) &&
-      is.Numeric(reference))
-      if (!is.Numeric(reference, length.arg = 1,
+  if (length(refResponse) > 1)
+    stop("argument 'refResponse' must be of length 0 or 1")
+  if (length(refResponse) &&
+      is.Numeric(refResponse))
+      if (!is.Numeric(refResponse, length.arg = 1,
                       integer.valued = TRUE))
-        stop("bad input for argument 'reference'")
+        stop("bad input for argument 'refResponse'")
   if (!is.logical(ConstrainedQO <- object at control$ConstrainedQO))
     stop("cannot determine whether the model is constrained or not")
 
@@ -957,7 +960,7 @@ Coef.qrrvglm <- function(object, varI.latvar = FALSE,
   td.expression <- function(Dmat, Amat, M, Dzero, Rank, bellshaped) {
 
 
-    Tolerance <- Darray <- m2adefault(Dmat, M = Rank)
+    Tolerance <- Darray <- m2a(Dmat, M = Rank)
     for (ii in 1:M)
       if (length(Dzero) && any(Dzero == ii)) {
         Tolerance[, , ii] <- NA   # Darray[,,ii] == O 
@@ -987,13 +990,13 @@ Coef.qrrvglm <- function(object, varI.latvar = FALSE,
   B1   <- object at extra$B1    #
   bellshaped <- rep(FALSE, length = M)
 
-  if (is.character(reference)) {
-      reference <- (1:NOS)[reference == ynames]
-      if (length(reference) != 1)
-         stop("could not match argument 'reference' with any response")
+  if (is.character(refResponse)) {
+      refResponse <- (1:NOS)[refResponse == ynames]
+      if (length(refResponse) != 1)
+         stop("could not match argument 'refResponse' with any response")
   }
   ptr1 <- 1
-  candidates <- if (length(reference)) reference else {
+  candidates <- if (length(refResponse)) refResponse else {
       if (length(ocontrol$Dzero)) (1:M)[-ocontrol$Dzero] else (1:M)}
   repeat {
     if (ptr1 > 0) {
@@ -1002,7 +1005,7 @@ Coef.qrrvglm <- function(object, varI.latvar = FALSE,
   elts <- Dmat[this.spp,, drop = FALSE]
       if (length(elts) < Rank)
         elts <- matrix(elts, 1, Rank)
-      Dk <- m2adefault(elts, M = Rank)[,, 1]  # Hopefully negative-def 
+      Dk <- m2a(elts, M = Rank)[, , 1]  # Hopefully negative-def 
       temp400 <- eigen(Dk)
       ptr1 <- ptr1 + 1 
       if (all(temp400$value < 0))
@@ -1044,8 +1047,8 @@ Coef.qrrvglm <- function(object, varI.latvar = FALSE,
 
 
     } else {
-      if (length(reference) == 1) 
-        stop("tolerance matrix specified by 'reference' ",
+      if (length(refResponse) == 1) 
+        stop("tolerance matrix specified by 'refResponse' ",
              "is not positive-definite") else
         warning("could not find any positive-definite ",
                 "tolerance matrix")
@@ -1185,7 +1188,7 @@ Coef.qrrvglm <- function(object, varI.latvar = FALSE,
   dimnames(ans at Optimum) <- list(latvar.names, ynames)
   dimnames(ans at Tolerance) <- list(latvar.names, latvar.names, ynames)
   ans 
-}
+}  # End of Coef.qrrvglm
 
 
 setClass(Class = "Coef.rrvglm", representation(
@@ -1276,7 +1279,7 @@ show.Coef.qrrvglm <- function(x, ...) {
     cat("\nB1 and A matrices\n")
     print(cbind(t(object at B1),
                 A = object at A), ...)
-    cat("\nOptima and maxima\n")
+    cat("\nOptimums and maximums\n")
     print(cbind(Optimum = optmat,
                 Maximum), ...)
     if (Rank > 1) {  # !object at Diagonal && Rank > 1
@@ -1318,7 +1321,7 @@ predictqrrvglm <-
            deriv = 0,
            dispersion = NULL,
            extra = object at extra, 
-           varI.latvar = FALSE, reference = NULL, ...) {
+           varI.latvar = FALSE, refResponse = NULL, ...) {
   if (se.fit)
     stop("cannot handle se.fit == TRUE yet")
   if (deriv != 0)
@@ -1402,9 +1405,9 @@ predictqrrvglm <-
 
 
     if (length(newdata)) {
-      Coefs <- Coef(object, varI.latvar = varI.latvar, reference = reference)
-      X1mat <- X[,ocontrol$colx1.index, drop = FALSE]
-      X2mat <- X[,ocontrol$colx2.index, drop = FALSE]
+      Coefs <- Coef(object, varI.latvar = varI.latvar, refResponse = refResponse)
+      X1mat <- X[, ocontrol$colx1.index, drop = FALSE]
+      X2mat <- X[, ocontrol$colx2.index, drop = FALSE]
       latvarmat <- as.matrix(X2mat %*% Coefs at C)  # n x Rank
 
       etamat <- as.matrix(X1mat %*% Coefs at B1 + latvarmat %*% t(Coefs at A))
@@ -1412,8 +1415,8 @@ predictqrrvglm <-
       for (sppno in 1:length(which.species)) {
         thisSpecies <- which.species[sppno]
         Dmat <- matrix(Coefs at D[,,thisSpecies], Rank, Rank)
-        etamat[,thisSpecies] <- etamat[,thisSpecies] +
-                               mux34(latvarmat, Dmat, symmetric = TRUE)
+        etamat[, thisSpecies] <- etamat[, thisSpecies] +
+                                 mux34(latvarmat, Dmat, symmetric = TRUE)
       }
     } else {
       etamat <-  object at predictors
@@ -1751,7 +1754,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
       bb <- NULL 
       for (ii in 1:length(ooo)) {
         if (any(ooo[[ii]][1] == colx1.index))
-            bb <- c(bb, names(ooo)[ii])
+          bb <- c(bb, names(ooo)[ii])
       }
 
       has.intercept <- any(bb == "(Intercept)")
@@ -1772,8 +1775,8 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
     if (fit at misc$dataname == "list") {
       dspec <- FALSE
     } else {
-      mytext1 <- "exists(x=fit at misc$dataname, envir = VGAMenv)"
-      myexp1 <- parse(text=mytext1)
+      mytext1 <- "exists(x = fit at misc$dataname, envir = VGAMenv)"
+      myexp1 <- parse(text = mytext1)
       is.there <- eval(myexp1)
       bbdata <- if (is.there)
                 get(fit at misc$dataname, envir = VGAMenv) else
@@ -1784,14 +1787,14 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
 
     fit1122 <- if (dspec)
                vlm(bb,
-                  constraints = Hlist, criterion = "d", weights = wz,
-                  data = bbdata,
-                  save.weight = TRUE, smart = FALSE, trace = trace.arg,
-                  x.arg = TRUE) else
+                   constraints = Hlist, criterion = "d", weights = wz,
+                   data = bbdata,
+                   save.weight = TRUE, smart = FALSE, trace = trace.arg,
+                   x.arg = TRUE) else
                vlm(bb,
-                  constraints = Hlist, criterion = "d", weights = wz,
-                  save.weight = TRUE, smart = FALSE, trace = trace.arg,
-                  x.arg = TRUE)
+                   constraints = Hlist, criterion = "d", weights = wz,
+                   save.weight = TRUE, smart = FALSE, trace = trace.arg,
+                   x.arg = TRUE)
 
 
 
@@ -1809,47 +1812,46 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
 
 
     if (omit13) 
-        cov13 <- cov13 * 0   # zero it
+      cov13 <- cov13 * 0   # zero it
 
     if (kill.all) {
-        cov13 <- cov13 * 0   # zero it
-        if (fixA) {
-            cov12 <- cov12 * 0   # zero it
-        } else {
-            cov23 <- cov23 * 0   # zero it
-        }
+      cov13 <- cov13 * 0   # zero it
+      if (fixA) {
+        cov12 <- cov12 * 0   # zero it
+      } else {
+        cov23 <- cov23 * 0   # zero it
+      }
     }
 
- cov13 <- -cov13   # Richards (1961)
+   cov13 <- -cov13  # Richards (1961)
 
     if (fixA) {
-        cov.unscaled <- rbind(cbind(cov1122, rbind(cov13, cov23)),
-                              cbind(t(cov13), t(cov23), cov33))
+      cov.unscaled <- rbind(cbind(cov1122, rbind(cov13, cov23)),
+                            cbind(t(cov13), t(cov23), cov33))
     } else {
-        cov.unscaled <- rbind(cbind(cov11, cov12, cov13),
-                              cbind(rbind(t(cov12), t(cov13)), cov2233))
+      cov.unscaled <- rbind(cbind(cov11, cov12, cov13),
+                            cbind(rbind(t(cov12), t(cov13)), cov2233))
     }
 
     ans <- solve(cov.unscaled)
 
-    # Get all the coefficients 
     acoefs <- c(fit1122 at coefficients[log.vec11], fit at coefficients)
     dimnames(ans) <- list(names(acoefs), names(acoefs))
     list(cov.unscaled = ans,
          coefficients = acoefs,
-         res.ss = sfit1122 at res.ss)
+         res.ss       = sfit1122 at res.ss)
 }
 
 
 
 get.rrvglm.se2 <- function(cov.unscaled, dispersion = 1, coefficients) {
 
-    d8 <-  dimnames(cov.unscaled)[[1]]
-    ans <- matrix(coefficients, length(coefficients), 3) 
-    ans[, 2] <- sqrt(dispersion) * sqrt(diag(cov.unscaled))
-    ans[, 3] <- ans[, 1] / ans[, 2]
-    dimnames(ans) <- list(d8, c("Estimate", "Std. Error", "z value"))
-    ans
+  d8 <-  dimnames(cov.unscaled)[[1]]
+  ans <- matrix(coefficients, length(coefficients), 3) 
+  ans[, 2] <- sqrt(dispersion) * sqrt(diag(cov.unscaled))
+  ans[, 3] <- ans[, 1] / ans[, 2]
+  dimnames(ans) <- list(d8, c("Estimate", "Std. Error", "z value"))
+  ans
 }
 
 
@@ -1860,56 +1862,57 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
                           xij = NULL, str0 = NULL) {
 
 
-    nn <- nrow(x2mat)
-    if (nrow(Cimat) != p2 || ncol(Cimat) != r)
-        stop("'Cimat' wrong shape")
-
-    dct.da <- matrix(as.numeric(NA), (M-r-length(str0))*r, r*p2)
-
-    if ((length(Index.corner) + length(str0)) == M)
-        stop("cannot handle full rank models yet")
-    cbindex <- (1:M)[-c(Index.corner, str0)]
-
-    ptr <- 1
-    for (sss in 1:r)
-        for (tt in cbindex) {
-            small.Hlist <- vector("list", p2)
-            pAmat <- Aimat
-            pAmat[tt,sss] <- pAmat[tt,sss] + h.step   # Perturb it
-            for (ii in 1:p2)
-                small.Hlist[[ii]] <- pAmat
-
-            offset <- if (length(fit at offset)) fit at offset else 0
-            if (all(offset == 0)) offset <- 0
-            neweta <- x2mat %*% Cimat %*% t(pAmat)
-            if (is.numeric(x1mat))
-              neweta <- neweta + x1mat %*% B1mat
-            fit at predictors <- neweta
-
-
-            newmu <- fit at family@linkinv(neweta, fit at extra) 
-            fit at fitted.values <- as.matrix(newmu)  # 20100909
+  nn <- nrow(x2mat)
+  if (nrow(Cimat) != p2 || ncol(Cimat) != r)
+    stop("'Cimat' wrong shape")
 
-            fred <- weights(fit, type = "w", deriv = TRUE, ignore.slot = TRUE)
-            if (!length(fred))
-              stop("cannot get @weights and @deriv from object")
-            wz <- fred$weights
-            deriv.mu <- fred$deriv
+  dct.da <- matrix(as.numeric(NA), (M-r-length(str0))*r, r*p2)
 
-            U <- vchol(wz, M = M, n = nn, silent = TRUE)
-            tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = nn)
-            newzmat <- neweta + vbacksub(U, tvfor, M = M, n = nn) - offset
-            if (is.numeric(x1mat))
-              newzmat <- newzmat - x1mat %*% B1mat
+  if ((length(Index.corner) + length(str0)) == M)
+    stop("cannot handle full rank models yet")
+  cbindex <- (1:M)[-c(Index.corner, str0)]
 
-            newfit <- vlm.wfit(xmat = x2mat, zmat = newzmat,
+  ptr <- 1
+  for (sss in 1:r)
+    for (tt in cbindex) {
+      small.Hlist <- vector("list", p2)
+      pAmat <- Aimat
+      pAmat[tt,sss] <- pAmat[tt,sss] + h.step   # Perturb it
+      for (ii in 1:p2)
+        small.Hlist[[ii]] <- pAmat
+
+      offset <- if (length(fit at offset)) fit at offset else 0
+      if (all(offset == 0))
+        offset <- 0
+      neweta <- x2mat %*% Cimat %*% t(pAmat)
+      if (is.numeric(x1mat))
+        neweta <- neweta + x1mat %*% B1mat
+      fit at predictors <- neweta
+
+
+      newmu <- fit at family@linkinv(neweta, fit at extra) 
+      fit at fitted.values <- as.matrix(newmu)  # 20100909
+
+      fred <- weights(fit, type = "w", deriv = TRUE, ignore.slot = TRUE)
+      if (!length(fred))
+        stop("cannot get @weights and @deriv from object")
+      wz <- fred$weights
+      deriv.mu <- fred$deriv
+
+      U <- vchol(wz, M = M, n = nn, silent = TRUE)
+      tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = nn)
+      newzmat <- neweta + vbacksub(U, tvfor, M = M, n = nn) - offset
+      if (is.numeric(x1mat))
+        newzmat <- newzmat - x1mat %*% B1mat
+
+      newfit <- vlm.wfit(xmat = x2mat, zmat = newzmat,
                                Hlist = small.Hlist, U = U,
                                matrix.out = FALSE, is.vlmX = FALSE,
                                res.ss = TRUE, qr = FALSE, x.ret = FALSE,
                                offset = NULL, xij = xij)
-            dct.da[ptr,] <- (newfit$coef - t(Cimat)) / h.step
-            ptr <- ptr + 1
-        }
+      dct.da[ptr, ] <- (newfit$coef - t(Cimat)) / h.step
+      ptr <- ptr + 1
+    }
 
     dct.da
 }
@@ -1936,20 +1939,20 @@ dctda.fast.only <- function(theta, wz, U, zmat, M, r, x1mat, x2mat,
   barney <- kronecker(matrix(1, nn, 1), barney)
 
   temp <- array(t(barney*fred), c(p2*r, M, nn))
-  temp <- aperm(temp, c(2, 1, 3))     # M by p2*r by nn
+  temp <- aperm(temp, c(2, 1, 3))  # M by p2*r by nn
   temp <- mux5(wz, temp, M = M, matrix.arg= TRUE)
-  temp <- m2adefault(temp, M=p2*r)         # Note M != M here!
-  G <- solve(rowSums(temp, dims = 2))   # p2*r by p2*r 
+  temp <- m2a(temp, M = p2 * r)  # Note M != M here!
+  G <- solve(rowSums(temp, dims = 2))  # p2*r by p2*r 
 
   dc.da <- array(NA, c(p2, r, M, r))  # different from other functions
   if (length(Index.corner) == M)
       stop("cannot handle full rank models yet")
-  cbindex <- (1:M)[-Index.corner]    # complement of Index.corner 
+  cbindex <- (1:M)[-Index.corner]  # complement of Index.corner 
   resid2 <- if (length(x1mat))
-   mux22(t(wz), zmat - x1mat %*% B1mat, M = M,
-         upper = FALSE, as.matrix = TRUE) else
-   mux22(t(wz), zmat                  , M = M,
-         upper = FALSE, as.matrix = TRUE)
+    mux22(t(wz), zmat - x1mat %*% B1mat, M = M,
+          upper = FALSE, as.matrix = TRUE) else
+    mux22(t(wz), zmat                  , M = M,
+          upper = FALSE, as.matrix = TRUE)
 
   for (sss in 1:r)
     for (ttt in cbindex) {
@@ -2023,7 +2026,7 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
   temp <- array(t(barney*fred), c(r*pp,M,nn))
   temp <- aperm(temp, c(2, 1, 3))
   temp <- mux5(wz, temp, M = M, matrix.arg = TRUE)
-  temp <- m2adefault(temp, M = r*pp)     # Note M != M here!
+  temp <- m2a(temp, M = r * pp)     # Note M != M here!
   G <- solve(rowSums(temp, dims = 2))
 
   dc.da <- array(NA, c(pp,r,M,r))  # different from other functions
@@ -2071,14 +2074,14 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
   etastar <- (if (intercept) xmat[,-1, drop = FALSE] else xmat) %*% Cimat
   eta <- matrix(int.vec, nn, M, byrow = TRUE) + etastar %*% t(Aimat)
 
-  sumWinv <- solve((m2adefault(t(colSums(wz)), M = M))[,, 1])
+  sumWinv <- solve((m2a(t(colSums(wz)), M = M))[, , 1])
 
   deta0.da <- array(0,c(M,M,r))
   AtWi <- kronecker(matrix(1, nn, 1), Aimat)
   AtWi <- mux111(t(wz), AtWi, M = M, upper= FALSE)  # matrix.arg= TRUE, 
   AtWi <- array(t(AtWi), c(r, M, nn))
   for (ss in 1:r) {
-    temp90 <- (m2adefault(t(colSums(etastar[,ss]*wz)), M = M))[,, 1]  # MxM
+    temp90 <- (m2a(t(colSums(etastar[, ss]*wz)), M = M))[, , 1]  # MxM
     temp92 <- array(detastar.da[,,ss,], c(M,r,nn))
     temp93 <- mux7(temp92, AtWi)
     temp91 <- rowSums(temp93, dims = 2)  # M x M
@@ -2163,7 +2166,7 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
   temp <- array(t(barney*fred), c(r*pp, M, nn))
   temp <- aperm(temp, c(2, 1, 3))
   temp <- mux5(wz, temp, M = M, matrix.arg= TRUE)
-  temp <- m2adefault(temp, M = r*pp)     # Note M != M here!
+  temp <- m2a(temp, M = r * pp)  # Note M != M here!
   G <- solve(rowSums(temp, dims = 2))
 
   dc.da <- array(NA,c(pp,r,r,M))
@@ -2204,7 +2207,7 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
   etastar <- (if (intercept) xmat[, -1, drop = FALSE] else xmat) %*% Cimat
   eta <- matrix(int.vec, nn, M, byrow = TRUE) + etastar %*% t(Aimat)
 
-  sumWinv <- solve((m2adefault(t(colSums(wz)), M = M))[, , 1])
+  sumWinv <- solve((m2a(t(colSums(wz)), M = M))[, , 1])
 
   deta0.da <- array(0, c(M, M, r))
 
@@ -2213,7 +2216,7 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
   AtWi <- array(t(AtWi), c(r, M, nn))
 
   for (ss in 1:r) {
-    temp90 <- (m2adefault(t(colSums(etastar[,ss]*wz)), M = M))[,, 1]
+    temp90 <- (m2a(t(colSums(etastar[, ss] * wz)), M = M))[, , 1]
     temp92 <- array(detastar.da[, , ss, ], c(M, r, nn))
     temp93 <- mux7(temp92,AtWi)
     temp91 <- apply(temp93, 1:2,sum)  # M x M
@@ -2265,7 +2268,7 @@ biplot.qrrvglm <- function(x, ...) {
 
 
  lvplot.qrrvglm <-
-  function(object, varI.latvar = FALSE, reference = NULL,
+  function(object, varI.latvar = FALSE, refResponse = NULL,
            add = FALSE, show.plot = TRUE, rug = TRUE, y = FALSE, 
            type = c("fitted.values", "predictors"),
            xlab = paste("Latent Variable",
@@ -2317,7 +2320,7 @@ biplot.qrrvglm <- function(x, ...) {
              "noRRR = ~ 1 models")
 
     Coef.list <- Coef(object, varI.latvar = varI.latvar,
-                      reference = reference)
+                      refResponse = refResponse)
     if ( C) Cmat <- Coef.list at C
     nustar <- Coef.list at latvar  # n x Rank 
 
@@ -2714,11 +2717,11 @@ setMethod("biplot",  "rrvglm", function(x, ...)
 
 summary.qrrvglm <-
   function(object,
-           varI.latvar = FALSE, reference = NULL, ...) {
+           varI.latvar = FALSE, refResponse = NULL, ...) {
     answer <- object
     answer at post$Coef <- Coef(object,
                              varI.latvar = varI.latvar,
-                             reference = reference, 
+                             refResponse = refResponse, 
                              ...)  # Store it here; non-elegant
 
   if (length((answer at post$Coef)@dispersion) &&
@@ -2993,7 +2996,7 @@ trplot.qrrvglm <-
 setMethod("trplot", "qrrvglm",
     function(object, ...) trplot.qrrvglm(object, ...))
 
-setMethod("trplot", "cao",
+setMethod("trplot", "rrvgam",
     function(object, ...) trplot.qrrvglm(object, ...))
 
 
@@ -3110,7 +3113,7 @@ setMethod("model.matrix",  "qrrvglm", function(object, ...)
 
 
 perspqrrvglm <-
-  function(x, varI.latvar = FALSE, reference = NULL,
+  function(x, varI.latvar = FALSE, refResponse = NULL,
       show.plot = TRUE,
       xlim = NULL, ylim = NULL,
       zlim = NULL,  # zlim ignored if Rank == 1
@@ -3132,7 +3135,7 @@ perspqrrvglm <-
   oylim <- ylim
   object <- x  # Do not like x as the primary argument 
   coef.obj <- Coef(object, varI.latvar = varI.latvar,
-                   reference = reference)
+                   refResponse = refResponse)
   if ((Rank <- coef.obj at Rank) > 2)
     stop("object must be a rank-1 or rank-2 model")
   fv <- fitted(object)
@@ -3269,7 +3272,7 @@ Rank.qrrvglm <- function(object, ...) {
 }
 
 
-Rank.cao <- function(object, ...) {
+Rank.rrvgam <- function(object, ...) {
   object at control$Rank
 }
 
@@ -3277,8 +3280,8 @@ Rank.cao <- function(object, ...) {
 
 
 concoef.qrrvglm <- function(object, varI.latvar = FALSE,
-                          reference = NULL, ...) {
-  Coef(object, varI.latvar = varI.latvar, reference = reference, ...)@C
+                          refResponse = NULL, ...) {
+  Coef(object, varI.latvar = varI.latvar, refResponse = refResponse, ...)@C
 }
 
 
@@ -3300,10 +3303,10 @@ latvar.rrvglm <- function(object, ...) {
 
 latvar.qrrvglm <- function(object,
                            varI.latvar = FALSE,
-                           reference = NULL, ...) {
+                           refResponse = NULL, ...) {
   Coef(object,
        varI.latvar = varI.latvar,
-       reference = reference, ...)@latvar
+       refResponse = refResponse, ...)@latvar
 }
 
 
@@ -3316,9 +3319,9 @@ latvar.Coef.qrrvglm <- function(object, ...) {
 
 Max.qrrvglm <-
   function(object, varI.latvar = FALSE,
-           reference = NULL, ...) {
+           refResponse = NULL, ...) {
   Coef(object, varI.latvar = varI.latvar,
-       reference = reference, ...)@Maximum
+       refResponse = refResponse, ...)@Maximum
 }
 
 
@@ -3332,23 +3335,23 @@ Max.Coef.qrrvglm <- function(object, ...) {
 
 
 Opt.qrrvglm <-
-  function(object, varI.latvar = FALSE, reference = NULL, ...) {
+  function(object, varI.latvar = FALSE, refResponse = NULL, ...) {
       Coef(object, varI.latvar = varI.latvar,
-           reference = reference, ...)@Optimum
+           refResponse = refResponse, ...)@Optimum
 }
 
 
 Opt.Coef.qrrvglm <- function(object, ...) {
   if (length(list(...)))
     warning("Too late! Ignoring the extra arguments")
-  Coef(object, ...)@Optimum
+  object at Optimum
 }
 
 
 Tol.qrrvglm <-
-  function(object, varI.latvar = FALSE, reference = NULL, ...) {
+  function(object, varI.latvar = FALSE, refResponse = NULL, ...) {
       Coef(object, varI.latvar = varI.latvar,
-           reference = reference, ...)@Tolerance
+           refResponse = refResponse, ...)@Tolerance
 }
 
 
@@ -3361,6 +3364,7 @@ Tol.Coef.qrrvglm <- function(object, ...) {
 
 
 
+ if (FALSE) {
  if (!isGeneric("ccoef"))
     setGeneric("ccoef", function(object, ...) {
     .Deprecated("concoef")
@@ -3376,6 +3380,9 @@ setMethod("ccoef",  "Coef.rrvglm",
   function(object, ...) concoef.Coef.qrrvglm(object, ...))
 setMethod("ccoef", "Coef.qrrvglm",
   function(object, ...) concoef.Coef.qrrvglm(object, ...))
+}
+
+
  if (!isGeneric("concoef"))
     setGeneric("concoef", function(object, ...)
     standardGeneric("concoef")) 
@@ -3456,7 +3463,7 @@ setMethod("Max", "Coef.qrrvglm",
 
 
 
-setMethod("Max", "cao",
+setMethod("Max", "rrvgam",
   function(object, ...) Coef(object, ...)@Maximum)
 
 
@@ -3471,7 +3478,7 @@ setMethod("Opt", "Coef.qrrvglm",
   function(object, ...) Opt.Coef.qrrvglm(object, ...))
 
 
-setMethod("Opt", "cao",
+setMethod("Opt", "rrvgam",
   function(object, ...) Coef(object, ...)@Optimum)
 
 
@@ -3516,7 +3523,7 @@ is.bell.qrrvglm <- function(object, ...) {
 }
 
 
-is.bell.cao <- function(object, ...) {
+is.bell.rrvgam <- function(object, ...) {
   NA * Max(object, ...)
 }
 
@@ -3530,8 +3537,8 @@ setMethod("is.bell","rrvglm",
   function(object, ...) is.bell.rrvglm(object, ...))
 setMethod("is.bell","vlm",
   function(object, ...) is.bell.vlm(object, ...))
-setMethod("is.bell","cao",
-  function(object, ...) is.bell.cao(object, ...))
+setMethod("is.bell","rrvgam",
+  function(object, ...) is.bell.rrvgam(object, ...))
 setMethod("is.bell","Coef.qrrvglm",
   function(object,...) is.bell.qrrvglm(object,...))
 
@@ -3545,8 +3552,8 @@ setMethod("Rank",  "rrvglm",
   function(object, ...) Rank.rrvglm(object, ...))
 setMethod("Rank", "qrrvglm",
   function(object, ...) Rank.qrrvglm(object, ...))
-setMethod("Rank", "cao",
-  function(object, ...) Rank.cao(object, ...))
+setMethod("Rank", "rrvgam",
+  function(object, ...) Rank.rrvgam(object, ...))
 
 
 
diff --git a/R/family.sur.R b/R/family.sur.R
index b51c5a1..acad9d6 100644
--- a/R/family.sur.R
+++ b/R/family.sur.R
@@ -12,12 +12,12 @@
 
 
 
- SUR <- function(
-                 mle.normal = FALSE,
-                 divisor = c("n", "n-max(pj,pk)", "sqrt((n-pj)*(n-pk))"),
-                 parallel = FALSE, 
-                 Varcov = NULL,
-                 matrix.arg = FALSE) {
+ SURff <-
+  function(mle.normal = FALSE,
+           divisor = c("n", "n-max(pj,pk)", "sqrt((n-pj)*(n-pk))"),
+           parallel = FALSE, 
+           Varcov = NULL,
+           matrix.arg = FALSE) {
 
 
 
@@ -58,7 +58,7 @@
   new("vglmff",
   blurb = c("Seemingly unrelated regressions"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel , 
                            constraints = constraints,
                            apply.int = .apply.parint )
@@ -177,7 +177,7 @@
             .divisor = divisor
           ))),
 
-  vfamily = "SUR",
+  vfamily = "SURff",
 
 
   deriv = eval(substitute(expression({
@@ -272,7 +272,7 @@
       temp1 <- ResSS.vgam(y-mu, wz = wz, M = M)
       onewz <- if (length(extra$invSigma.mat))
                  extra$invSigma.mat else
-                 (m2adefault(wz[1, , drop = FALSE], M = M))[,, 1]  # M x M
+                 (m2a(wz[1, , drop = FALSE], M = M))[,, 1]  # M x M
 
 
       logdet <- determinant(onewz)$modulus
diff --git a/R/family.survival.R b/R/family.survival.R
index cfeda14..4ec7b0e 100644
--- a/R/family.survival.R
+++ b/R/family.survival.R
@@ -12,7 +12,7 @@
 
 
 
- double.cennormal <-
+ double.cens.normal <-
   function(r1 = 0, r2 = 0,
            lmu = "identitylink",
            lsd = "loge",
@@ -41,7 +41,7 @@
             "\n",
             "Variance: sd^2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }) , list( .zero = zero))),
 
 
@@ -113,7 +113,7 @@
   } , list( .lmu = lmu, .lsd = lsd,
             .emu = emu, .esd = esd,
             .r1 = r1, .r2 = r2 ))),
-  vfamily = c("double.cennormal"),
+  vfamily = c("double.cens.normal"),
   deriv = eval(substitute(expression({
     sd <- eta2theta(eta[, 2], .lsd, earg =.esd)
 
@@ -165,7 +165,7 @@
 
 
 
-dbisa <- function(x, shape, scale = 1, log = FALSE) {
+dbisa <- function(x, scale = 1, shape, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -193,7 +193,7 @@ dbisa <- function(x, shape, scale = 1, log = FALSE) {
 }
 
 
-pbisa <- function(q, shape, scale = 1) {
+pbisa <- function(q, scale = 1, shape) {
   if (!is.Numeric(q))
     stop("bad input for argument 'q'")
   if (!is.Numeric(shape, positive = TRUE))
@@ -207,7 +207,7 @@ pbisa <- function(q, shape, scale = 1) {
 }
 
 
-qbisa <- function(p, shape, scale = 1) {
+qbisa <- function(p, scale = 1, shape) {
   if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
       stop("argument 'p' must have values inside the interval (0,1)")
   if (!is.Numeric(shape, positive = TRUE))
@@ -222,7 +222,7 @@ qbisa <- function(p, shape, scale = 1) {
 }
 
 
-rbisa <- function(n, shape, scale = 1) {
+rbisa <- function(n, scale = 1, shape) {
 
   A <- rnorm(n)
   temp1 <- A * shape
@@ -245,9 +245,15 @@ rbisa <- function(n, shape, scale = 1) {
 
 
 
- bisa <- function(lshape = "loge", lscale = "loge",
-                  ishape = NULL,   iscale = 1,
-                  imethod = 1, zero = NULL) {
+ bisa <- function(lscale = "loge", lshape = "loge",
+                  iscale = 1,      ishape = NULL,
+                  imethod = 1, zero = NULL, nowarning = FALSE) {
+
+  if (!nowarning)
+    warning("order of the linear/additive predictors has been changed",
+            " in VGAM version 0.9-5")
+
+
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
   lshape <- attr(eshape, "function.name")
@@ -270,22 +276,22 @@ rbisa <- function(n, shape, scale = 1) {
   new("vglmff",
   blurb = c("Birnbaum-Saunders distribution\n\n",
             "Links:    ",
-            namesof("shape", lshape, earg = eshape, tag = TRUE), "; ",
-            namesof("scale", lscale, earg = escale, tag = TRUE)),
+            namesof("scale", lscale, earg = escale, tag = TRUE), "; ",
+            namesof("shape", lshape, earg = eshape, tag = TRUE)),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.vgam(constraints, x, .zero , M)
+      constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }) , list( .zero = zero))),
   initialize = eval(substitute(expression({
     if (ncol(y <- cbind(y)) != 1)
       stop("the response must be a vector or a one-column matrix")
 
     predictors.names <-
-      c(namesof("shape", .lshape, earg = .eshape, tag = FALSE),
-        namesof("scale", .lscale, earg = .escale, tag = FALSE))
+      c(namesof("scale", .lscale , earg = .escale, tag = FALSE),
+        namesof("shape", .lshape , earg = .eshape, tag = FALSE))
 
     if (!length(etastart)) {
-      scale.init <- rep( .iscale, len = n)
-      shape.init <- if (is.Numeric( .ishape)) rep( .ishape, len = n) else {
+      scale.init <- rep( .iscale , len = n)
+      shape.init <- if (is.Numeric( .ishape)) rep( .ishape , len = n) else {
       if ( .imethod == 1) {
         ybar <- rep(weighted.mean(y, w), len = n)
         ybarr <- rep(1 / weighted.mean(1/y, w), len = n)  # Reqrs y > 0
@@ -297,23 +303,23 @@ rbisa <- function(n, shape, scale = 1) {
         sqrt(2*(pmax(ybar, scale.init + 0.1) / scale.init - 1))
       }
     }
-      etastart <- cbind(theta2eta(shape.init, .lshape, earg = .eshape),
-                        theta2eta(scale.init, .lscale, earg = .escale))
+      etastart <- cbind(theta2eta(scale.init, .lscale , earg = .escale ),
+                        theta2eta(shape.init, .lshape , earg = .eshape ))
     }
   }) , list( .lshape = lshape, .lscale = lscale,
              .ishape = ishape, .iscale = iscale,
              .eshape = eshape, .escale = escale,
              .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    sh <- eta2theta(eta[, 1], .lshape, earg = .eshape)
-    sc <- eta2theta(eta[, 2], .lscale, earg = .escale)
+    sc <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    sh <- eta2theta(eta[, 2], .lshape , earg = .eshape )
     sc * (1 + sh^2 / 2)
   }, list( .lshape = lshape, .lscale = lscale,
            .eshape = eshape, .escale = escale ))),
   last = eval(substitute(expression({
-    misc$link <-    c(shape = .lshape, scale = .lscale)
+    misc$link <-    c(scale = .lscale , shape = .lshape )
 
-    misc$earg <- list(shape = .eshape, scale = .escale)
+    misc$earg <- list(scale = .escale , shape = .eshape )
 
     misc$expected <- TRUE
     misc$multipleResponses <- FALSE
@@ -323,12 +329,12 @@ rbisa <- function(n, shape, scale = 1) {
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    sh <- eta2theta(eta[, 1], .lshape , earg = .eshape )
-    sc <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    sc <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    sh <- eta2theta(eta[, 2], .lshape , earg = .eshape )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dbisa(x = y, shape = sh, scale = sc, log = TRUE)
+      ll.elts <- c(w) * dbisa(x = y, scale = sc, shape = sh, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -340,28 +346,28 @@ rbisa <- function(n, shape, scale = 1) {
 
   vfamily = c("bisa"),
   deriv = eval(substitute(expression({
-    sh <- eta2theta(eta[, 1], .lshape, earg = .eshape)
-    sc <- eta2theta(eta[, 2], .lscale, earg = .escale)
+    sc <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    sh <- eta2theta(eta[, 2], .lshape , earg = .eshape )
 
     dl.dsh <- ((y/sc - 2 + sc/y) / sh^2 - 1) / sh 
     dl.dsc <- -0.5 / sc + 1/(y+sc) + sqrt(y) * ((y+sc)/y) *
              (sqrt(y/sc) - sqrt(sc/y)) / (2 * sh^2 * sc^1.5)
 
-    dsh.deta <- dtheta.deta(sh, .lshape, earg = .eshape)
-    dsc.deta <- dtheta.deta(sc, .lscale, earg = .escale)
+    dsh.deta <- dtheta.deta(sh, .lshape , earg = .eshape )
+    dsc.deta <- dtheta.deta(sc, .lscale , earg = .escale )
 
-    c(w) * cbind(dl.dsh * dsh.deta,
-                 dl.dsc * dsc.deta)
+    c(w) * cbind(dl.dsc * dsc.deta,
+                 dl.dsh * dsh.deta)
   }) , list( .lshape = lshape, .lscale = lscale,
              .eshape = eshape, .escale = escale ))),
   weight = eval(substitute(expression({
     wz <- matrix(as.numeric(NA), n, M)  # Diagonal!!
-    wz[,iam(1,1,M)] <- 2 * dsh.deta^2 / sh^2
+    wz[, iam(2, 2, M)] <- 2 * dsh.deta^2 / sh^2
     hfunction <- function(alpha)
       alpha * sqrt(pi/2) - pi * exp(2/alpha^2) *
                            pnorm(2/alpha, lower.tail = FALSE)
-    wz[,iam(2,2,M)] <- dsc.deta^2 *
-                       (sh * hfunction(sh)  / sqrt(2*pi) + 1) / (sh*sc)^2
+    wz[, iam(1, 1, M)] <- dsc.deta^2 *
+                          (sh * hfunction(sh) / sqrt(2*pi) + 1) / (sh*sc)^2
     c(w) * wz
   }), list( .zero = zero ))))
 }
diff --git a/R/family.ts.R b/R/family.ts.R
index 50040cf..56c7ae8 100644
--- a/R/family.ts.R
+++ b/R/family.ts.R
@@ -324,7 +324,7 @@ vglm.garma.control <- function(save.weight = TRUE, ...) {
     if ( .link == "logit"   || .link == "probit" ||
          .link == "cloglog" || .link == "cauchit") {
         delete.zero.colns <- TRUE
-        eval(process.categorical.data.vgam)
+        eval(process.categorical.data.VGAM)
         mustart <- mustart[tt.index, 2]
         y <- y[, 2]
     } else {
diff --git a/R/family.univariate.R b/R/family.univariate.R
index 1de0cbb..84368f8 100644
--- a/R/family.univariate.R
+++ b/R/family.univariate.R
@@ -28,31 +28,6 @@
 
 
 
- getMaxMin <- function(vov, objfun, y, x, w, extraargs = NULL,
-                       maximize = TRUE, abs.arg = FALSE,
-                       ret.objfun = FALSE) {
-  if (!is.vector(vov))
-    stop("'vov' must be a vector")
-  objvals <- vov
-  for (ii in 1:length(vov))
-    objvals[ii] <- objfun(vov[ii], y = y, x = x, w = w,
-                          extraargs = extraargs)
-  try.this <- if (abs.arg) {
-               if (maximize) vov[abs(objvals) == max(abs(objvals))] else
-               vov[abs(objvals) == min(abs(objvals))]
-             } else {
-               if (maximize) vov[objvals == max(objvals)] else
-               vov[objvals == min(objvals)]
-             }
-  if (!length(try.this))
-    stop("something has gone wrong!")
-  ans <- if (length(try.this) == 1)
-    try.this else sample(try.this, size = 1)
-  if (ret.objfun) c(ans, objvals[ans == vov]) else ans
-}
-
-
-
 
  mccullagh89 <- function(ltheta = "rhobit",
                          lnu = logoff(offset = 0.5),
@@ -88,7 +63,7 @@
             "\n", "\n",
             "Mean:     nu*theta/(1+nu)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     w.y.check(w, y)
@@ -109,9 +84,9 @@
           mean((y - thetaval) *
                (thetaval^2 - 1) / (1 - 2*thetaval*y + thetaval^2))
         theta.grid <- seq(-0.9, 0.9, by = 0.05)
-        try.this <- getMaxMin(theta.grid, objfun = mccullagh89.aux,
-                              y = y,  x = x, w = w, maximize = FALSE,
-                              abs.arg = TRUE)
+        try.this <- grid.search(theta.grid, objfun = mccullagh89.aux,
+                                y = y,  x = x, w = w, maximize = FALSE,
+                                abs.arg = TRUE)
         try.this <- rep(try.this, length.out = n)
         try.this
       }
@@ -440,16 +415,16 @@ rhzeta <- function(n, alpha) {
     } else {
       mycmatrix <- if (M == 1) diag(1) else diag(M)
     }
-    constraints <- cm.vgam(mycmatrix, x = x,
+    constraints <- cm.VGAM(mycmatrix, x = x,
                            bool = .PARALLEL ,
                            constraints, apply.int = TRUE)
-    constraints <- cm.zero.vgam(constraints, x, .ZERO , M)
+    constraints <- cm.zero.VGAM(constraints, x, .ZERO , M)
   }), list( .parallel = parallel, .zero = zero ))),
   initialize = eval(substitute(expression({
     mustart.orig <- mustart
 
     delete.zero.colns <- TRUE
-    eval(process.categorical.data.vgam)
+    eval(process.categorical.data.VGAM)
 
     if (length(mustart.orig))
       mustart <- mustart.orig
@@ -756,10 +731,10 @@ dirmul.old <- function(link = "loge", init.alpha = 0.01,
             "Posterior mean:    (n_j + shape_j)/(2*sum(n_j) + ",
                                 "sum(shape_j))\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints, apply.int = TRUE)
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   initialize = eval(substitute(expression({
     y <- as.matrix(y)
@@ -923,10 +898,10 @@ rdiric <- function(n, shape, dimension = NULL,
             namesof("shapej", link, earg = earg), "\n\n",
             "Mean:     shape_j/(1 + sum(shape_j)), j = 1,..,ncol(y)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints, apply.int = TRUE)
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   initialize = eval(substitute(expression({
     y <- as.matrix(y)
@@ -1222,8 +1197,8 @@ dzeta <- function(x, p, log = FALSE) {
                        n, M, byrow = TRUE)
       if (!length( .init.p ))
       for (spp. in 1:ncoly) {
-        pp.init[, spp.] <- getMaxMin(p.grid, objfun = zetaff.Loglikfun,
-                                     y = y[, spp.], x = x, w = w[, spp.])
+        pp.init[, spp.] <- grid.search(p.grid, objfun = zetaff.Loglikfun,
+                                       y = y[, spp.], x = x, w = w[, spp.])
         if ( .link == "loglog")
           pp.init[pp.init <= 1, spp.] <- 1.2
       }
@@ -1547,12 +1522,12 @@ cauchy.control <- function(save.weight = TRUE, ...) {
             "Mean:     NA\n",
             "Variance: NA"),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.vgam(constraints, x, .zero , M)
+      constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     predictors.names <- c(
       namesof("location", .llocat , earg = .elocat , tag = FALSE),
-      namesof("scale",    .lscale ,    earg = .escale ,    tag = FALSE))
+      namesof("scale",    .lscale , earg = .escale , tag = FALSE))
 
 
 
@@ -1576,8 +1551,8 @@ cauchy.control <- function(save.weight = TRUE, ...) {
                                     log = TRUE))
              }
              loc.grid <- c(quantile(y, probs = seq(0.1, 0.9, by = 0.05)))
-             try.this <- getMaxMin(loc.grid, objfun = cauchy2.Loglikfun,
-                                  y = y,  x = x, w = w)
+             try.this <- grid.search(loc.grid, objfun = cauchy2.Loglikfun,
+                                     y = y,  x = x, w = w)
                 try.this <- rep(c(try.this), length.out = n)
                 try.this
             }
@@ -1762,10 +1737,10 @@ cauchy.control <- function(save.weight = TRUE, ...) {
                }
                loc.grid <- quantile(y, probs = seq(0.1, 0.9,
                                                   by = 0.05))
-                 try.this <- getMaxMin(loc.grid,
-                                    objfun = cauchy1.Loglikfun,
-                                    y = y,  x = x, w = w,
-                                    extraargs = .scale.arg )
+                 try.this <- grid.search(loc.grid,
+                                         objfun = cauchy1.Loglikfun,
+                                         y = y,  x = x, w = w,
+                                         extraargs = .scale.arg )
               try.this <- rep(try.this, length.out = n)
               try.this
             }
@@ -1967,7 +1942,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
   function(shape.arg, link = "loge",
            imethod = 1, zero = NULL) {
 
-  if (!is.Numeric(shape.arg, length.arg = 1,
+  if (!is.Numeric(shape.arg,  # length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
       stop("'shape' must be a positive integer")
   if (!is.Numeric(imethod, length.arg = 1,
@@ -1995,7 +1970,7 @@ cauchy.control <- function(save.weight = TRUE, ...) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -2031,15 +2006,18 @@ cauchy.control <- function(save.weight = TRUE, ...) {
       namesof(mynames1, .link , earg = .earg , tag = FALSE)
 
 
+    shape.mat <- matrix( .shape.arg , nrow(cbind(y)), ncol(cbind(y)),
+                        byrow = TRUE)
+
     if (!length(etastart)) {
         if ( .imethod == 1) {
-          sc.init <- y / .shape.arg
+          sc.init <- y / shape.mat
         }
         if ( .imethod == 2) {
-          sc.init <- (colSums(y * w) / colSums(w))/ .shape.arg
+          sc.init <- (colSums(y * w) / colSums(w)) / shape.mat
         }
         if ( .imethod == 3) {
-          sc.init <- median(y) / .shape.arg
+          sc.init <- median(y) / shape.mat
         }
 
         if ( !is.matrix(sc.init))
@@ -2052,8 +2030,10 @@ cauchy.control <- function(save.weight = TRUE, ...) {
   }), list( .link = link, .earg = earg,
             .shape.arg = shape.arg, .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    sc <- eta2theta(eta, .link , earg = .earg )
-    .shape.arg * sc 
+    eta <- as.matrix(eta)
+    SC <- eta2theta(eta, .link , earg = .earg )
+    shape.mat <- matrix( .shape.arg , nrow(eta), ncol(eta), byrow = TRUE)
+    shape.mat * SC
   }, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
   last = eval(substitute(expression({
     M1 <- extra$M1
@@ -2077,12 +2057,14 @@ cauchy.control <- function(save.weight = TRUE, ...) {
              extra = NULL,
              summation = TRUE) {
     sc <- eta2theta(eta, .link , earg = .earg )
+    shape.mat <- matrix( .shape.arg , nrow(cbind(y)), ncol(cbind(y)),
+                        byrow = TRUE)
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <-
-        c(w) * (( .shape.arg - 1) * log(y) - y / sc -
-                  .shape.arg * log(sc) - lgamma( .shape.arg ))
+        c(w) * (( shape.mat - 1) * log(y) - y / sc -
+                  shape.mat * log(sc) - lgamma( shape.mat ))
       if (summation) {
         sum(ll.elts)
       } else {
@@ -2103,7 +2085,9 @@ cauchy.control <- function(save.weight = TRUE, ...) {
       warning("ignoring prior weights")
     eta <- predict(object)
     Scale <- eta2theta(eta, .link , earg = .earg )
-    rgamma(nsim * length(Scale), shape = .shape.arg , scale = Scale )
+    shape.mat <- matrix( .shape.arg , nrow(cbind(eta)), ncol(cbind(eta)),
+                        byrow = TRUE)
+    rgamma(nsim * length(Scale), shape = shape.mat , scale = Scale )
   }, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
 
 
@@ -2112,12 +2096,14 @@ cauchy.control <- function(save.weight = TRUE, ...) {
 
   deriv = eval(substitute(expression({
     sc <- eta2theta(eta, .link , earg = .earg )
-    dl.dsc <- (y / sc - .shape.arg) / sc
+    shape.mat <- matrix( .shape.arg , nrow(cbind(eta)), ncol(cbind(eta)),
+                        byrow = TRUE)
+    dl.dsc <- (y / sc - shape.mat) / sc
     dsc.deta <- dtheta.deta(sc, .link , earg = .earg )
     c(w) * dl.dsc * dsc.deta
   }), list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
   weight = eval(substitute(expression({
-    ned2l.dsc2 <- .shape.arg / sc^2
+    ned2l.dsc2 <- shape.mat / sc^2
     wz <- c(w) * dsc.deta^2 * ned2l.dsc2
     wz
   }), list( .earg = earg, .shape.arg = shape.arg ))))
@@ -2140,13 +2126,13 @@ dbort <- function(x, Qsize = 1, a = 0.5, log = FALSE) {
   if (!is.Numeric(a, positive = TRUE) || max(a) >= 1)
     stop("bad input for argument 'a'")
   N <- max(length(x), length(Qsize), length(a))
-  x <- rep(x, length.out = N);
-  Qsize <- rep(Qsize, length.out = N);
-  a <- rep(a, length.out = N);
+  x <- rep(x, length.out = N)
+  Qsize <- rep(Qsize, length.out = N)
+  a <- rep(a, length.out = N)
 
   xok <- (x >= Qsize) & (x == round(x)) & (a > 0) & (a < 1)
   ans <- rep(if (log.arg) log(0) else 0, length.out = N)  # loglikelihood
-  ans[xok] <- lgamma(1 + Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
+  ans[xok] <- log(Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
              (x[xok] - 1 - Qsize[xok]) * log(x[xok]) +
              (x[xok] - Qsize[xok]) * log(a[xok]) - a[xok] * x[xok]
   if (!log.arg) {
@@ -2189,6 +2175,8 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
 
  borel.tanner <- function(Qsize = 1, link = "logit",
                           imethod = 1) {
+
+
   if (!is.Numeric(Qsize, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'Qsize'")
@@ -2210,9 +2198,19 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
   blurb = c("Borel-Tanner distribution\n\n",
             "Link:    ",
             namesof("a", link, earg = earg), "\n\n",
-            "Mean:     Qsize/(1-a)",
+            "Mean:     Qsize / (1-a)",
             "\n",
-            "Variance: Qsize*a / (1-a)^3"),
+            "Variance: Qsize * a / (1 - a)^3"),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 1,
+         Q1 = 1,
+         Qsize = .Qsize ,
+         link = .link ,
+         multipleResponses = FALSE )
+  }, list( .Qsize  = Qsize,
+           .link = link ))),
+
   initialize = eval(substitute(expression({
     if (any(y < .Qsize ))
       stop("all y values must be >= ", .Qsize )
@@ -2227,7 +2225,7 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
 
     if (!length(etastart)) {
       a.init <- switch(as.character( .imethod ),
-              "1" = 1 - .Qsize / (y+1/8),
+              "1" = 1 - .Qsize / (y + 1/8),
               "2" = rep(1 - .Qsize / weighted.mean(y, w), length.out = n),
               "3" = rep(1 - .Qsize / median(y), length.out = n),
               "4" = rep(0.5, length.out = n))
@@ -2285,7 +2283,7 @@ rbort <- function(n, Qsize = 1, a = 0.5) {
 
   deriv = eval(substitute(expression({
     aa <- eta2theta(eta, .link , earg = .earg )
-    dl.da <- (y - .Qsize) / aa - y 
+    dl.da <- (y - .Qsize ) / aa - y 
     da.deta <- dtheta.deta(aa, .link , earg = .earg )
     c(w) * dl.da * da.deta
   }), list( .link = link, .earg = earg, .Qsize = Qsize ))),
@@ -2473,7 +2471,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
             namesof("mu",  lmu,  earg = emu),  ", ",
             namesof("phi", lphi, earg = ephi)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (min(y) <= .A || max(y) >= .B)
@@ -2610,9 +2608,10 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
 
 
- beta.ab <- function(lshape1 = "loge", lshape2 = "loge",
-                     i1 = NULL, i2 = NULL, trim = 0.05,
-                     A = 0, B = 1, parallel = FALSE, zero = NULL) {
+ betaR <-
+  function(lshape1 = "loge", lshape2 = "loge",
+           i1 = NULL, i2 = NULL, trim = 0.05,
+           A = 0, B = 1, parallel = FALSE, zero = NULL) {
 
   lshape1 <- as.list(substitute(lshape1))
   eshape1 <- link2list(lshape1)
@@ -2654,11 +2653,20 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
             namesof("shape1", lshape1, earg = eshape1),  ", ",
             namesof("shape2", lshape2, earg = eshape2)),
   constraints = eval(substitute(expression({
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints, apply.int  = TRUE)
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         A  = .A,
+         B  = .B,
+         multipleResponses = FALSE,
+         zero = .zero )
+  }, list( .A = A, .B = B,
+           .zero = zero ))),
   initialize = eval(substitute(expression({
     if (min(y) <= .A || max(y) >= .B)
       stop("data not within (A, B)")
@@ -2667,7 +2675,6 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
       stop("response must be a vector or a one-column matrix")
 
 
-
     w.y.check(w = w, y = y)
 
 
@@ -2676,7 +2683,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
           namesof("shape2", .lshape2 , earg = .eshape2 , short = TRUE))
 
     if (!length(etastart)) {
-      mu1d <- mean(y, trim = .trim)
+      mu1d <- mean(y, trim = .trim )
       uu <- (mu1d - .A) / ( .B - .A) 
       DD <- ( .B - .A)^2 
       pinit <- max(0.01, uu^2 * (1 - uu) * DD / var(y) - uu)
@@ -2685,10 +2692,10 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
       etastart[, 1] <- theta2eta( pinit, .lshape1 , earg = .eshape1 )
       etastart[, 2] <- theta2eta( qinit, .lshape2 , earg = .eshape2 )
     }
-      if (is.Numeric( .i1 ))
-        etastart[, 1] <- theta2eta( .i1, .lshape1 , earg = .eshape1 )
-      if (is.Numeric( .i2 ))
-        etastart[, 2] <- theta2eta( .i2, .lshape2 , earg = .eshape2 )
+    if (is.Numeric( .i1 ))
+      etastart[, 1] <- theta2eta( .i1 , .lshape1 , earg = .eshape1 )
+    if (is.Numeric( .i2 ))
+      etastart[, 2] <- theta2eta( .i2 , .lshape2 , earg = .eshape2 )
   }), list( .lshape1 = lshape1, .lshape2 = lshape2,
             .i1 = i1, .i2 = i2, .trim = trim, .A = A, .B = B,
             .eshape1 = eshape1, .eshape2 = eshape2 ))),
@@ -2706,8 +2713,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
             .A = A, .B = B,
             .eshape1 = eshape1, .eshape2 = eshape2 ))),
   loglikelihood = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta,
-             extra = NULL,
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL,
              summation = TRUE) {
     shapes <- cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
                     eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
@@ -2727,7 +2733,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
     }
   }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
            .eshape1 = eshape1, .eshape2 = eshape2 ))),
-  vfamily = "beta.ab",
+  vfamily = "betaR",
 
 
 
@@ -2767,17 +2773,15 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
   }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B, 
             .eshape1 = eshape1, .eshape2 = eshape2 ))),
   weight = expression({
-    temp2 <- trigamma(shapes[, 1] + shapes[, 2])
-    ned2l.dshape12 <- trigamma(shapes[, 1]) - temp2 
-    ned2l.dshape22 <- trigamma(shapes[, 2]) - temp2 
-    ned2l.dshape1shape2 <- -temp2
-
-    wz <- matrix(as.numeric(NA), n, dimm(M))  #3=dimm(M)
+    trig.sum <- trigamma(shapes[, 1] + shapes[, 2])
+    ned2l.dshape12 <- trigamma(shapes[, 1]) - trig.sum 
+    ned2l.dshape22 <- trigamma(shapes[, 2]) - trig.sum 
+    ned2l.dshape1shape2 <- -trig.sum
+    wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M) == 3
     wz[, iam(1, 1, M)] <- ned2l.dshape12      * dshapes.deta[, 1]^2
     wz[, iam(2, 2, M)] <- ned2l.dshape22      * dshapes.deta[, 2]^2
     wz[, iam(1, 2, M)] <- ned2l.dshape1shape2 * dshapes.deta[, 1] *
                                                 dshapes.deta[, 2]
-
     c(w) * wz
   }))
 }
@@ -2785,34 +2789,35 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
 
 
- simple.exponential <- function() {
+
+simple.exponential <- function() {
   new("vglmff",
   blurb = c("Simple exponential distribution\n",
             "Link:    log(rate)\n"),
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL,
                       summation = TRUE) {
     devy <- -log(y) - 1
-    devmu <- -log(mu) - y/mu
+    devmu <- -log(mu) - y / mu
     devi <- 2 * (devy - devmu)
     if (residuals) {
-      sign(y - mu) * sqrt(abs(devi) * w)
+      sign(y - mu) * sqrt(abs(devi) * c(w))
     } else {
       dev.elts <- c(w) * devi
-      if (summation) {
-        sum(dev.elts)
-      } else {
-        dev.elts
-      }
+      if (summation) sum(dev.elts) else dev.elts
     }
   },
+  loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+                           summation = TRUE) {
+    if (residuals) return(NULL)
+    if (summation) sum(c(w) * dexp(y, rate  = 1 / mu, log = TRUE)) else
+      c(w) * dexp(y, rate  = 1 / mu, log = TRUE)
+  },
   initialize = expression({
-    predictors.names <- "log(rate)"
+    predictors.names <- "loge(rate)"
     mustart <- y + (y == 0) / 8
   }),
-  linkinv = function(eta, extra = NULL)
-    exp(-eta),
-  linkfun = function(mu, extra = NULL)
-    -log(mu),
+  linkinv = function(eta, extra = NULL) exp(-eta),
+  linkfun = function(mu,  extra = NULL) -log(mu),
   vfamily = "simple.exponential",
   deriv = expression({
     rate <- 1 / mu
@@ -2821,7 +2826,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
     c(w) * dl.drate * drate.deta
   }),
   weight = expression({
-    ned2l.drate2 <- 1 / rate^2
+    ned2l.drate2 <- 1 / rate^2  # EIM
     wz <- c(w) * drate.deta^2 * ned2l.drate2
     wz
   }))
@@ -2833,11 +2838,124 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
 
 
+
+
+
+
+ better.exponential <-
+  function(link = "loge", location = 0, expected = TRUE,
+           ishrinkage = 0.95, parallel = FALSE, zero = NULL) {
+  link <- as.list(substitute(link))
+  earg <- link2list(link)
+  link <- attr(earg, "function.name")
+
+  new("vglmff",
+  blurb = c("Exponential distribution\n\n",
+            "Link:     ", namesof("rate", link, earg, tag = TRUE), "\n",
+            "Mean:     ", "mu = ", if (all(location == 0)) "1 / rate" else
+            if (length(unique(location)) == 1)
+            paste(location[1], "+ 1 / rate") else "location + 1 / rate"),
+  constraints = eval(substitute(expression({
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x, bool = .parallel ,
+                           constraints = constraints, apply.int = TRUE)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+  }), list( .parallel = parallel, .zero = zero ))),
+  infos = eval(substitute(function(...) {
+    list(M1 = 1, Q1 = 1, multipleResponses = TRUE, zero = .zero )
+  }, list( .zero = zero ))),
+  deviance = function(mu, y, w, residuals = FALSE, eta,
+                      extra = NULL, summation = TRUE) {
+    location <- extra$location
+    devy <- -log(y - location) - 1
+    devmu <- -log(mu - location) - (y - location ) / (mu - location)
+    devi <- 2 * (devy - devmu)
+    if (residuals) sign(y - mu) * sqrt(abs(devi) * w) else {
+      dev.elts <- c(w) * devi
+      if (summation) sum(dev.elts) else dev.elts
+    }
+  },
+  initialize = eval(substitute(expression({
+    checklist <- w.y.check(w = w, y = y, ncol.w.max = Inf, ncol.y.max = Inf,
+                           out.wy = TRUE, colsyperw = 1, maximize = TRUE)
+    w <- checklist$w  # So ncol(w) == ncol(y)
+    y <- checklist$y
+
+    extra$ncoly <- ncoly <- ncol(y)
+    extra$M1 <- M1 <- 1
+    M <- M1 * ncoly
+
+    extra$location <- matrix( .location , n, ncoly, byrow = TRUE)  # By row!
+    if (any(y <= extra$location))
+      stop("all responses must be greater than argument 'location'")
+
+    mynames1 <- if (M == 1) "rate" else paste("rate", 1:M, sep = "")
+    predictors.names <-
+      namesof(mynames1, .link , earg = .earg , short = TRUE)
+
+    if (length(mustart) + length(etastart) == 0)
+      mustart <- matrix(colSums(y * w) / colSums(w), n, M, byrow = TRUE) *
+                 .ishrinkage + (1 - .ishrinkage ) * y + 1 / 8
+    if (!length(etastart))
+      etastart <- theta2eta(1 / (mustart - extra$location), .link , .earg )
+  }), list( .location = location, .link = link, .earg = earg,
+            .ishrinkage = ishrinkage ))),
+  linkinv = eval(substitute(function(eta, extra = NULL)
+    extra$location + 1 / eta2theta(eta, .link , earg = .earg ),
+  list( .link = link, .earg = earg ))),
+  last = eval(substitute(expression({
+    misc$link <- rep( .link , length = M)
+    misc$earg <- vector("list", M)
+    names(misc$link) <- names(misc$earg) <- mynames1
+    for (ii in 1:M)
+      misc$earg[[ii]] <- .earg
+    misc$location <- .location
+    misc$expected <- .expected
+  }), list( .link = link, .earg = earg,
+            .expected = expected, .location = location ))),
+  linkfun = eval(substitute(function(mu, extra = NULL) 
+    theta2eta(1 / (mu - extra$location), .link , earg = .earg ),
+  list( .link = link, .earg = earg ))),
+  loglikelihood =
+  function(mu, y, w, residuals = FALSE, eta, extra = NULL, summation = TRUE)
+    if (residuals) stop("loglikelihood residuals not implemented yet") else {
+      rate <- 1 / (mu - extra$location)
+      ll.elts <- c(w) * dexp(y - extra$location, rate = rate, log = TRUE)
+      if (summation) sum(ll.elts) else ll.elts
+    },
+  vfamily = c("better.exponential"),
+  simslot = eval(substitute(function(object, nsim) {
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1)) warning("ignoring prior weights")
+    mu <- fitted(object)
+    rate <- 1 / (mu - object at extra$location)
+    rexp(nsim * length(rate), rate = rate)
+  }, list( .link = link, .earg = earg ))),
+  deriv = eval(substitute(expression({
+    rate <- 1 / (mu - extra$location)
+    dl.drate <- mu - y
+    drate.deta <- dtheta.deta(rate, .link , earg = .earg )
+    c(w) * dl.drate * drate.deta
+  }), list( .link = link, .earg = earg ))),
+  weight = eval(substitute(expression({
+    ned2l.drate2 <- (mu - extra$location)^2
+    wz <- ned2l.drate2 * drate.deta^2  # EIM
+    if (! .expected ) {  # Use the OIM, not the EIM
+      d2rate.deta2 <- d2theta.deta2(rate, .link , earg = .earg )
+      wz <- wz - dl.drate * d2rate.deta2
+    }
+    c(w) * wz
+  }), list( .link = link, .expected = expected, .earg = earg ))))
+}
+
+
+
+
+
+
  exponential <-
   function(link = "loge", location = 0, expected = TRUE,
-           shrinkage.init = 0.95, zero = NULL) {
-  if (!is.Numeric(location, length.arg = 1))
-    stop("bad input for argument 'location'")
+           ishrinkage = 0.95, parallel = FALSE, zero = NULL) {
   if (!is.logical(expected) || length(expected) != 1)
     stop("bad input for argument 'expected'")
 
@@ -2849,9 +2967,9 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'zero'")
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-      shrinkage.init < 0 || shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+      ishrinkage < 0 || ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
 
   new("vglmff",
@@ -2859,24 +2977,25 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
             "Link:     ",
             namesof("rate", link, earg, tag = TRUE), "\n",
             "Mean:     ", "mu = ", 
-            if (location == 0) "1 / rate" else
-            paste(location, "+ 1 / rate"), "\n",
-            "Variance: ",
-            if (location == 0) "Exponential: mu^2" else
-            paste("(mu - ", location, ")^2", sep = "")),
+            if (all(location == 0)) "1 / rate" else
+            if (length(unique(location)) == 1)
+            paste(location[1], "+ 1 / rate") else
+            "location + 1 / rate"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
-  }), list( .zero = zero ))),
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x, bool = .parallel ,
+                           constraints = constraints, apply.int = TRUE)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+  }), list( .parallel = parallel, .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 1,
          Q1 = 1,
          zero = .zero )
   }, list( .zero = zero ))),
-  deviance = eval(substitute(
-    function(mu, y, w, residuals = FALSE, eta, extra = NULL,
-                      summation = TRUE) {
-    devy <- -log(y - .location) - 1
-    devmu <- -log(mu - .location) - (y - .location) / (mu - .location)
+  deviance = function(mu, y, w, residuals = FALSE, eta,
+                      extra = NULL, summation = TRUE) {
+    location <- extra$location
+    devy <- -log(y - location) - 1
+    devmu <- -log(mu - location) - (y - location ) / (mu - location)
     devi <- 2 * (devy - devmu)
     if (residuals) {
       sign(y - mu) * sqrt(abs(devi) * w)
@@ -2888,17 +3007,17 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
         dev.elts
       }
     }
-  }, list( .location = location, .earg = earg ))),
+  },
   initialize = eval(substitute(expression({
-    temp5 <-
+    checklist <-
     w.y.check(w = w, y = y,
               ncol.w.max = Inf,
               ncol.y.max = Inf,
               out.wy = TRUE,
               colsyperw = 1,
               maximize = TRUE)
-    w <- temp5$w
-    y <- temp5$y
+    w <- checklist$w
+    y <- checklist$y
 
     ncoly <- ncol(y)
     M1 <- 1
@@ -2906,37 +3025,34 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
     extra$M1 <- M1
     M <- M1 * ncoly
 
-    extra$Loc <- matrix( .location , n, ncoly, byrow = TRUE)
+    extra$location <- matrix( .location , n, ncoly, byrow = TRUE)  # By row!
 
-    if (any(y <= extra$Loc))
-      stop("all responses must be greater than ", extra$Loc)
+    if (any(y <= extra$location))
+      stop("all responses must be greater than ", extra$location)
 
     mynames1 <- if (M == 1) "rate" else paste("rate", 1:M, sep = "")
     predictors.names <-
       namesof(mynames1, .link , earg = .earg , short = TRUE)
 
     if (length(mustart) + length(etastart) == 0)
-      mustart <- matrix(colSums(y * w) / colSums(w),
-                        n, M, byrow = TRUE) * .sinit +
-                 (1 - .sinit) * y + 1 / 8
+      mustart <- matrix(colSums(y * w) / colSums(w), n, M, byrow = TRUE) *
+                 .ishrinkage + (1 - .ishrinkage ) * y + 1 / 8
     if (!length(etastart))
-      etastart <- theta2eta(1 / (mustart - extra$Loc),
+      etastart <- theta2eta(1 / (mustart - extra$location),
                             .link , earg = .earg )
   }), list( .location = location,
             .link = link, .earg = earg,
-            .sinit = shrinkage.init ))),
+            .ishrinkage = ishrinkage ))),
   linkinv = eval(substitute(function(eta, extra = NULL)
-    extra$Loc + 1 / eta2theta(eta, .link , earg = .earg ),
+    extra$location + 1 / eta2theta(eta, .link , earg = .earg ),
   list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
     misc$link <- rep( .link , length = M)
     names(misc$link) <- mynames1
-
     misc$earg <- vector("list", M)
     names(misc$earg) <- mynames1
-    for (ii in 1:M) {
+    for (ii in 1:M)
       misc$earg[[ii]] <- .earg
-    }
     misc$location <- .location
     misc$expected <- .expected
     misc$multipleResponses <- TRUE
@@ -2944,8 +3060,17 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
   }), list( .link = link, .earg = earg,
             .expected = expected, .location = location ))),
   linkfun = eval(substitute(function(mu, extra = NULL) 
-    theta2eta(1 / (mu - extra$Loc), .link , earg = .earg ),
+    theta2eta(1 / (mu - extra$location), .link , earg = .earg ),
   list( .link = link, .earg = earg ))),
+  loglikelihood =
+  function(mu, y, w, residuals = FALSE, eta, extra = NULL, summation = TRUE)
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      rate <- 1 / (mu - extra$location)
+      ll.elts <- c(w) * dexp(x = y - extra$location, rate = rate, log = TRUE)
+      if (summation) sum(ll.elts) else ll.elts
+  },
   vfamily = c("exponential"),
   simslot = eval(substitute(
   function(object, nsim) {
@@ -2953,22 +3078,20 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
               pwts else weights(object, type = "prior")
     if (any(pwts != 1))
       warning("ignoring prior weights")
-    eta <- predict(object)
     mu <- fitted(object)
-    extra <- object at extra
-    rate <- 1 / (mu - extra$Loc)
+    rate <- 1 / (mu - object at extra$location)
     rexp(nsim * length(rate), rate = rate)
   }, list( .link = link, .earg = earg ))),
   deriv = eval(substitute(expression({
-    rate <- 1 / (mu - extra$Loc)
+    rate <- 1 / (mu - extra$location)
     dl.drate <- mu - y
     drate.deta <- dtheta.deta(rate, .link , earg = .earg )
     c(w) * dl.drate * drate.deta
   }), list( .link = link, .earg = earg ))),
   weight = eval(substitute(expression({
-    ned2l.drate2 <- (mu - extra$Loc)^2
+    ned2l.drate2 <- (mu - extra$location)^2
     wz <- ned2l.drate2 * drate.deta^2
-    if (! .expected ) {
+    if (! .expected ) {  # Use the OIM, not the EIM
       d2rate.deta2 <- d2theta.deta2(rate, .link , earg = .earg )
       wz <- wz - dl.drate * d2rate.deta2
     }
@@ -2979,6 +3102,25 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
 
 
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
  gamma1 <- function(link = "loge", zero = NULL) {
 
 
@@ -3002,7 +3144,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -3106,100 +3248,201 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
 
 
- gamma2.ab <-
-  function(lrate = "loge", lshape = "loge",
-           irate = NULL, ishape = NULL, expected = TRUE, zero = 2) {
 
-  lrate <- as.list(substitute(lrate))
-  erate <- link2list(lrate)
-  lrate <- attr(erate, "function.name")
+
+
+
+
+
+ gammaR <-
+  function(lrate = "loge", lshape = "loge", 
+           irate = NULL,   ishape = NULL,
+           lss = TRUE,
+           zero = ifelse(lss, -2, -1)) {
+
+
+  expected <- TRUE  # FALSE does not work well
+
+  iratee <- irate
+
+  lratee <- as.list(substitute(lrate))
+  eratee <- link2list(lratee)
+  lratee <- attr(eratee, "function.name")
 
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
   lshape <- attr(eshape, "function.name")
 
 
-  if (length( irate) && !is.Numeric(irate, positive = TRUE))
+  if (length( iratee) && !is.Numeric(iratee, positive = TRUE))
     stop("bad input for argument 'irate'")
   if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
     stop("bad input for argument 'ishape'")
 
   if (length(zero) &&
-      !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+      !is.Numeric(zero, integer.valued = TRUE))
     stop("bad input for argument 'zero'")
 
   if (!is.logical(expected) || length(expected) != 1)
     stop("bad input for argument 'expected'")
 
 
+  ratee.TF <- if (lss) c(TRUE, FALSE) else c(FALSE, TRUE)
+  scale.12 <- if (lss) 1:2 else 2:1
+  blurb.vec <- c(namesof("rate",  lratee, earg = eratee),
+                 namesof("shape", lshape, earg = eshape))
+  blurb.vec <- blurb.vec[scale.12]
+
 
 
   new("vglmff",
   blurb = c("2-parameter Gamma distribution\n",
             "Links:    ",
-            namesof("rate",  lrate,  earg = erate), ", ", 
-            namesof("shape", lshape, earg = eshape), "\n",
+            blurb.vec[1], ", ",
+            blurb.vec[2], "\n",
             "Mean:     mu = shape/rate\n",
             "Variance: (mu^2)/shape = shape/rate^2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    dotzero <- .zero
+    M1 <- 2
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = .expected ,
+         multipleResponses = TRUE,
+         zero = .zero )
+  }, list( .zero = zero, .scale.12 = scale.12, .ratee.TF = ratee.TF,
+           .expected = expected
+         ))),
+
   initialize = eval(substitute(expression({
 
+    temp5 <-
     w.y.check(w = w, y = y,
-              Is.positive.y = TRUE)
+              Is.positive.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+    ncoly <- ncol(y)
+    M1 <- 2
+    extra$ncoly <- ncoly
+    extra$M1 <- M1
+    M <- M1 * ncoly
 
 
+    if ( .lss ) {
+      mynames1 <- paste("rate",  if (ncoly > 1) 1:ncoly else "", sep = "")
+      mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
       predictors.names <-
-        c(namesof("rate",  .lrate  , earg = .erate  , tag = FALSE),
-          namesof("shape", .lshape , earg = .eshape , tag = FALSE))
+          c(namesof(mynames1, .lratee , earg = .eratee , tag = FALSE),
+            namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))
 
-      if (!length(etastart)) {
-        mymu = y + 0.167 * (y == 0)
-        junk <- lsfit(x, y, wt = w, intercept = FALSE)
-        var.y.est <- sum(c(w) * junk$resid^2) / (nrow(x) - length(junk$coef))
-        init.shape <-  if (length( .ishape )) .ishape else mymu^2 / var.y.est
-        init.rate <-  if (length( .irate)) .irate else init.shape / mymu
-        init.rate <- rep(init.rate, length.out = n)
-        init.shape <- rep(init.shape, length.out = n)
-        if ( .lshape == "loglog")
-          init.shape[init.shape <= 1] <- 3.1 # Hopefully value is big enough
-          etastart <-
-            cbind(theta2eta(init.rate,  .lrate  , earg = .erate ),
-                  theta2eta(init.shape, .lshape , earg = .eshape ))
+    } else {
+      mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+      mynames2 <- paste("rate",  if (ncoly > 1) 1:ncoly else "", sep = "")
+      predictors.names <-
+          c(namesof(mynames1, .lshape , earg = .eshape , tag = FALSE),
+            namesof(mynames2, .lratee , earg = .eratee , tag = FALSE))
+    }
+    predictors.names <- predictors.names[
+          interleave.VGAM(M, M = M1)]
+
+
+
+    Ratee.init <- matrix(if (length( .iratee )) .iratee else 0 + NA,
+                         n, ncoly, byrow = TRUE)
+    Shape.init <- matrix(if (length( .ishape )) .iscale else 0 + NA,
+                         n, ncoly, byrow = TRUE)
+
+
+    if (!length(etastart)) {
+      mymu <- y + 0.167 * (y == 0)
+
+
+      for (ilocal in 1:ncoly) {
+        junk <- lsfit(x, y[, ilocal], wt = w[, ilocal], intercept = FALSE)
+        var.y.est <- sum(c(w[, ilocal]) * junk$resid^2) / (nrow(x) -
+                     length(junk$coef))
+
+        if (!is.Numeric(Shape.init[, ilocal]))
+          Shape.init[, ilocal] <- (mymu[, ilocal])^2 / var.y.est
+
+        if (!is.Numeric(Ratee.init[, ilocal]))
+          Ratee.init[, ilocal] <- Shape.init[, ilocal] / mymu[, ilocal]
       }
-  }), list( .lrate = lrate, .lshape = lshape,
-            .irate = irate, .ishape = ishape,
-            .erate = erate, .eshape = eshape))),
+
+      if ( .lshape == "loglog")
+        Shape.init[Shape.init <= 1] <- 3.1  # Hopefully value is big enough
+      etastart <- if ( .lss )
+        cbind(theta2eta(Ratee.init, .lratee , earg = .eratee ),
+              theta2eta(Shape.init, .lshape , earg = .eshape ))[,
+              interleave.VGAM(M, M = M1)] else
+        cbind(theta2eta(Shape.init, .lshape , earg = .eshape ),
+              theta2eta(Ratee.init, .lratee , earg = .eratee ))[,
+              interleave.VGAM(M, M = M1)]
+    }
+  }), list( .lratee = lratee, .lshape = lshape,
+            .iratee = iratee, .ishape = ishape,
+            .eratee = eratee, .eshape = eshape,
+            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    eta2theta(eta[, 2], .lshape , earg = .eshape ) / (
-    eta2theta(eta[, 1], .lrate  , earg = .erate ))
-  }, list( .lrate = lrate, .lshape = lshape,
-           .erate = erate, .eshape = eshape))),
+    Ratee <- eta2theta(eta[,    .ratee.TF  ], .lratee , earg = .eratee )
+    Shape <- eta2theta(eta[, !( .ratee.TF )], .lshape , earg = .eshape )
+    Shape / Ratee
+  }, list( .lratee = lratee, .lshape = lshape,
+           .eratee = eratee, .eshape = eshape,
+            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
   last = eval(substitute(expression({
-    misc$link <-    c(rate = .lrate , shape = .lshape)
-    misc$earg <- list(rate = .erate, shape = .eshape )
-  }), list( .lrate = lrate, .lshape = lshape,
-            .erate = erate, .eshape = eshape))),
+    misc$multipleResponses <- TRUE
+
+    M1 <- extra$M1
+    avector <- if ( .lss ) c(rep( .lratee , length = ncoly),
+                             rep( .lshape , length = ncoly)) else
+                           c(rep( .lshape , length = ncoly),
+                             rep( .lratee , length = ncoly))
+    misc$link <- avector[interleave.VGAM(M, M = M1)]
+    temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+    names(misc$link) <- temp.names
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- temp.names
+    for (ii in 1:ncoly) {
+      misc$earg[[M1*ii-1]] <- if ( .lss ) .eratee else .eshape
+      misc$earg[[M1*ii  ]] <- if ( .lss ) .eshape else .eratee
+    }
+
+    misc$M1 <- M1
+  }), list( .lratee = lratee, .lshape = lshape,
+            .eratee = eratee, .eshape = eshape,
+            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    rate  <- eta2theta(eta[, 1], .lrate  , earg = .erate )
-    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+    Ratee <- eta2theta(eta[,    .ratee.TF  ], .lratee , earg = .eratee )
+    Shape <- eta2theta(eta[, !( .ratee.TF )], .lshape , earg = .eshape )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dgamma(x = y, shape = shape, rate = rate, log = TRUE)
+      ll.elts <- c(w) * dgamma(x=y, shape = Shape, rate = Ratee, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .lrate = lrate, .lshape = lshape,
-           .erate = erate, .eshape = eshape))),
-  vfamily = c("gamma2.ab"),
+  }, list( .lratee = lratee, .lshape = lshape,
+           .eratee = eratee, .eshape = eshape,
+           .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
+  vfamily = c("gammaR"),
 
 
 
@@ -3212,44 +3455,61 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
     if (any(pwts != 1)) 
       warning("ignoring prior weights")
     eta <- predict(object)
-    rate  <- eta2theta(eta[, 1], .lrate  , earg = .erate )
-    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
-    rgamma(nsim * length(shape), shape = shape, rate = rate)
-  }, list( .lrate = lrate, .lshape = lshape,
-           .erate = erate, .eshape = eshape))),
-
-
-
+    Ratee <- eta2theta(eta[,    .ratee.TF  ], .lratee , earg = .eratee )
+    Shape <- eta2theta(eta[, !( .ratee.TF )], .lshape , earg = .eshape )
+    rgamma(nsim * length(Shape), shape = Shape, rate = Ratee)
+  }, list( .lratee = lratee, .lshape = lshape,
+           .eratee = eratee, .eshape = eshape,
+           .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
 
 
   deriv = eval(substitute(expression({
-      rate <- eta2theta(eta[, 1], .lrate , earg = .erate )
-      shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
-      dl.drate <- mu - y
-      dl.dshape <- log(y*rate) - digamma(shape)
-      dratedeta <- dtheta.deta(rate, .lrate , earg = .erate )
-      dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
-      c(w) * cbind(dl.drate * dratedeta,
-                   dl.dshape * dshape.deta)
-  }), list( .lrate = lrate, .lshape = lshape,
-            .erate = erate, .eshape = eshape))),
+    M1 <- 2
+    Ratee <- eta2theta(eta[,    .ratee.TF  ], .lratee , earg = .eratee )
+    Shape <- eta2theta(eta[, !( .ratee.TF )], .lshape , earg = .eshape )
+    dl.dratee <- mu - y
+    dl.dshape <- log(y * Ratee) - digamma(Shape)
+    dratee.deta <- dtheta.deta(Ratee, .lratee , earg = .eratee )
+    dshape.deta <- dtheta.deta(Shape, .lshape , earg = .eshape )
+
+    myderiv <- if ( .lss )
+                 c(w) * cbind(dl.dratee * dratee.deta,
+                              dl.dshape * dshape.deta) else
+                 c(w) * cbind(dl.dshape * dshape.deta,
+                              dl.dratee * dratee.deta)
+    myderiv[, interleave.VGAM(M, M = M1)]
+  }), list( .lratee = lratee, .lshape = lshape,
+            .eratee = eratee, .eshape = eshape,
+            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss ))),
   weight = eval(substitute(expression({
-    d2l.dshape2 <- -trigamma(shape)
-    d2l.drate2 <- -shape/(rate^2)
-    d2l.drateshape <- 1/rate
-    wz <- matrix(as.numeric(NA), n, dimm(M))  #3=dimm(M)
-    wz[, iam(1, 1, M)] <- -d2l.drate2 * dratedeta^2
-    wz[, iam(2, 2, M)] <- -d2l.dshape2 * dshape.deta^2
-    wz[, iam(1, 2, M)] <- -d2l.drateshape * dratedeta * dshape.deta
-    if (! .expected) {
-      d2ratedeta2 <- d2theta.deta2(rate, .lrate , earg = .erate )
-      d2shapedeta2 <- d2theta.deta2(shape, .lshape , earg = .eshape )
-      wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] - dl.drate * d2ratedeta2
-      wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] - dl.dshape * d2shapedeta2
-    }
-    c(w) * wz
-  }), list( .lrate = lrate, .lshape = lshape,
-            .erate = erate, .eshape = eshape, .expected = expected ))))
+    ned2l.dratee2 <- Shape / (Ratee^2)
+    ned2l.drateeshape <- -1/Ratee
+    ned2l.dshape2 <- trigamma(Shape)
+
+    if ( .expected ) {
+     ratee.adjustment <-  0
+     shape.adjustment <-  0
+    } else {
+      d2ratee.deta2 <- d2theta.deta2(Ratee, .lratee , earg = .eratee )
+      d2shape.deta2 <- d2theta.deta2(Shape, .lshape , earg = .eshape )
+      ratee.adjustment <- dl.dratee * d2ratee.deta2
+      shape.adjustment <- dl.dshape * d2shape.deta2
+    }
+
+    wz <- if ( .lss )
+            array(c(c(w) * (ned2l.dratee2 * dratee.deta^2 - ratee.adjustment),
+                    c(w) * (ned2l.dshape2 * dshape.deta^2 - shape.adjustment),
+                    c(w) * (ned2l.drateeshape * dratee.deta * dshape.deta)),
+                  dim = c(n, M / M1, 3)) else
+            array(c(c(w) * (ned2l.dshape2 * dshape.deta^2 - shape.adjustment),
+                    c(w) * (ned2l.dratee2 * dratee.deta^2 - ratee.adjustment),
+                    c(w) * (ned2l.drateeshape * dratee.deta * dshape.deta)),
+                  dim = c(n, M / M1, 3))
+    wz <- arwz2wz(wz, M = M, M1 = M1)
+    wz
+  }), list( .lratee = lratee, .lshape = lshape,
+            .eratee = eratee, .eshape = eshape, .expected = expected,
+            .scale.12 = scale.12, .ratee.TF = ratee.TF, .lss = lss  ))))
 }
 
 
@@ -3311,15 +3571,15 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
               "Variance: (mu^2)/shape"),
     constraints = eval(substitute(expression({
 
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel , 
                            constraints = constraints,
                            apply.int = .apply.parint )
 
         dotzero <- .zero
         M1 <- 2
-        eval(negzero.expression)
-        constraints <- cm.zero.vgam(constraints, x, z.Index, M)
+        eval(negzero.expression.VGAM)
+        constraints <- cm.zero.VGAM(constraints, x, z.Index, M)
   }), list( .zero = zero,
             .parallel = parallel, .apply.parint = apply.parint ))),
 
@@ -3361,7 +3621,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
         if (NOS == 1) "shape" else paste("shape", 1:NOS, sep = "")
       predictors.names <-
           c(namesof(temp1.names, .lmu ,    earg = .emu ,    tag = FALSE),
-            namesof(temp2.names, .lshape , earg = .eshape, tag = FALSE))
+            namesof(temp2.names, .lshape , earg = .eshape , tag = FALSE))
       predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
 
 
@@ -3410,7 +3670,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
         rm("CQO.FastAlgorithm", envir = VGAMenv)
 
     tmp34 <- c(rep( .lmu ,    length = NOS),
-              rep( .lshape , length = NOS))
+               rep( .lshape , length = NOS))
     names(tmp34) =
        c(if (NOS == 1) "mu"    else paste("mu",    1:NOS, sep = ""), 
          if (NOS == 1) "shape" else paste("shape", 1:NOS, sep = ""))
@@ -3589,7 +3849,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -3817,7 +4077,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
            nsimEIM = 100, cutoff = 0.995, Maxiter = 5000,
            deviance.arg = FALSE, imethod = 1,
            parallel = FALSE,
-           shrinkage.init = 0.95, zero = -2) {
+           ishrinkage = 0.95, zero = -2) {
 
 
 
@@ -3865,10 +4125,10 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
     integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-    shrinkage.init < 0 ||
-     shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+    ishrinkage < 0 ||
+     ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
   if (!is.null(nsimEIM)) {
     if (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE))
@@ -3903,11 +4163,11 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
 
     if ( .parallel && ncol(cbind(y)) > 1)
       stop("univariate responses needed if 'parallel = TRUE'")
-    constraints <- cm.vgam(matrix(1, M, 1), x = x,
+    constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel , 
                            constraints = constraints)
   }), list( .parallel = parallel, .zero = zero ))),
@@ -3992,7 +4252,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
         } else {
           medabsres <- median(abs(y[, iii] - use.this)) + 1/32
           allowfun <- function(z, maxtol = 1) sign(z) * pmin(abs(z), maxtol)
-          mu.init[, iii] <- use.this + (1 - .sinit ) *
+          mu.init[, iii] <- use.this + (1 - .ishrinkage ) *
                            allowfun(y[, iii] - use.this, maxtol = medabsres)
 
           mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
@@ -4010,10 +4270,10 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
         k.grid <- 2^(seq(-8, 8, length = 40))
         kay.init <- matrix(0, nrow = n, ncol = NOS)
         for (spp. in 1:NOS) {
-          kay.init[, spp.] <- getMaxMin(k.grid,
-                                       objfun = negbinomial.Loglikfun,
-                                       y = y[, spp.], x = x, w = w[, spp.],
-                                       extraargs = mu.init[, spp.])
+          kay.init[, spp.] <- grid.search(k.grid,
+                                          objfun = negbinomial.Loglikfun,
+                                          y = y[, spp.], x = x, w = w[, spp.],
+                                          extraargs = mu.init[, spp.])
         }
       }
 
@@ -4038,7 +4298,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
             .mu.init = imu,
             .deviance.arg = deviance.arg,
             .k.init = isize, .probs.y = probs.y,
-            .sinit = shrinkage.init, .nsimEIM = nsimEIM,
+            .ishrinkage = ishrinkage, .nsimEIM = nsimEIM,
             .zero = zero, .imethod = imethod ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -4085,13 +4345,13 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
     misc$imethod <- .imethod 
     misc$nsimEIM <- .nsimEIM
     misc$expected <- TRUE
-    misc$shrinkage.init <- .sinit
+    misc$ishrinkage <- .ishrinkage
     misc$multipleResponses <- TRUE
   }), list( .lmuuu = lmuuu, .lsize = lsize,
             .emuuu = emuuu, .esize = esize,
             .cutoff = cutoff,
             .nsimEIM = nsimEIM,
-            .sinit = shrinkage.init,
+            .ishrinkage = ishrinkage,
             .imethod = imethod ))),
 
   linkfun = eval(substitute(function(mu, extra = NULL) {
@@ -4189,7 +4449,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
   if ( iter == 1 && .deviance.arg ) {
-    if (control$criterion != "coefficients" ||
+    if (control$criterion != "coefficients" &&
         control$half.step)
       warning("Argument 'criterion' should be 'coefficients' ",
                "or 'half.step' should be 'FALSE' when ",
@@ -4367,56 +4627,444 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
     w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
-  }), list( .cutoff = cutoff,
-            .Maxiter = Maxiter,
-            .lmuuu = lmuuu,
-            .nsimEIM = nsimEIM ))))
+  }), list( .cutoff = cutoff,
+            .Maxiter = Maxiter,
+            .lmuuu = lmuuu,
+            .nsimEIM = nsimEIM ))))
+
+
+
+  if (deviance.arg) {
+    ans at deviance <- eval(substitute(
+      function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+               summation = TRUE) {
+
+
+
+
+
+
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+    eta.k <- eta[, M1 * (1:NOS) , drop = FALSE]
+    kmat <- eta2theta(eta.k, .lsize , earg = .esize )
+
+    if (residuals) {
+      stop("this part of the function has not been written yet.")
+    } else {
+      size <- kmat
+      dev.elts <- 2 * c(w) *
+                  (y * log(pmax(1, y) / mu) -
+                  (y + size) * log((y + size) / (mu + size)))
+      if (summation) {
+        sum(dev.elts)
+      } else {
+        dev.elts
+      }
+    }
+  }, list( .lsize = lsize, .esize = esize,
+           .lmuuu = lmuuu, .emuuu = emuuu )))
+
+
+
+
+
+  }
+
+
+
+
+
+  ans
+}  # End of negbinomial()
+
+
+
+
+
+polya.control <- function(save.weight = TRUE, ...) {
+    list(save.weight = save.weight)
+}
+
+
+
+ polya <-
+  function(lprob = "logit", lsize = "loge",
+           iprob = NULL,    isize = NULL,
+           probs.y = 0.75,
+           nsimEIM = 100,
+           imethod = 1,
+           ishrinkage = 0.95, zero = -2) {
+
+
+  deviance.arg <- FALSE  # 20131212; for now
+      
+
+
+  if (length(iprob) && !is.Numeric(iprob, positive = TRUE))
+    stop("bad input for argument 'iprob'")
+  if (length(isize) && !is.Numeric(isize, positive = TRUE))
+    stop("bad input for argument 'isize'")
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 3)
+     stop("argument 'imethod' must be 1 or 2 or 3")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 ||
+     ishrinkage > 1)
+     stop("bad input for argument 'ishrinkage'")
+
+  if (!is.Numeric(nsimEIM, length.arg = 1,
+                  integer.valued = TRUE))
+    stop("bad input for argument 'nsimEIM'")
+  if (nsimEIM <= 10)
+    warning("argument 'nsimEIM' should be an integer ",
+            "greater than 10, say")
+
+
+  lprob <- as.list(substitute(lprob))
+  eprob <- link2list(lprob)
+  lprob <- attr(eprob, "function.name")
+
+  lsize <- as.list(substitute(lsize))
+  esize <- link2list(lsize)
+  lsize <- attr(esize, "function.name")
+
+
+
+  ans <-
+  new("vglmff",
+  blurb = c("Polya (negative-binomial) distribution\n\n",
+            "Links:    ",
+            namesof("prob", lprob, earg = eprob), ", ",
+            namesof("size", lsize, earg = esize), "\n",
+            "Mean:     size * (1 - prob) / prob\n",
+            "Variance: mean / prob"),
+  constraints = eval(substitute(expression({
+
+    dotzero <- .zero
+    M1 <- 2
+    eval(negzero.expression.VGAM)
+
+  }), list( .zero = zero ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         zero = .zero)
+  }, list( .zero = zero ))),
+
+  initialize = eval(substitute(expression({
+    M1 <- 2
+    if (any(function.name == c("cqo", "cao")))
+      stop("polya() does not work with cqo() or cao(). ",
+           "Try negbinomial()")
+
+
+
+    temp5 <- w.y.check(w = w, y = y,
+              Is.integer.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1, maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+
+
+    M <- M1 * ncol(y)
+    NOS <- ncoly <- ncol(y)  # Number of species
+
+    predictors.names <-
+      c(namesof(if (NOS == 1) "prob" else
+                paste("prob", 1:NOS, sep = ""),
+               .lprob , earg = .eprob , tag = FALSE),
+        namesof(if (NOS == 1) "size" else
+                paste("size", 1:NOS, sep = ""),
+               .lsize ,  earg = .esize ,  tag = FALSE))
+    predictors.names <- predictors.names[interleave.VGAM(M, M = 2)]
+
+    if (is.null( .nsimEIM )) {
+       save.weight <- control$save.weight <- FALSE
+    }
+
+    
+    PROB.INIT <- if (is.numeric( .pinit )) {
+      matrix( .pinit, nrow(y), ncol(y), byrow = TRUE)
+    } else {
+      NULL
+    }
+
+    if (!length(etastart)) {
+      mu.init <- y
+      for (iii in 1:ncol(y)) {
+        use.this <- if ( .imethod == 1) {
+          weighted.mean(y[, iii], w[, iii]) + 1/16
+        } else if ( .imethod == 3) {
+          c(quantile(y[, iii], probs <- .probs.y) + 1/16)
+        } else {
+          median(y[, iii]) + 1/16
+        }
+
+        if (FALSE) {
+          mu.init[, iii] <- MU.INIT[, iii]
+        } else {
+          medabsres <- median(abs(y[, iii] - use.this)) + 1/32
+          allowfun <- function(z, maxtol = 1) sign(z) * pmin(abs(z), maxtol)
+          mu.init[, iii] <- use.this + (1 - .ishrinkage ) * allowfun(y[, iii] -
+                          use.this, maxtol = medabsres)
+
+          mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
+        }
+      }
+
+
+
+      if ( is.Numeric( .kinit )) {
+        kayy.init <- matrix( .kinit, nrow = n, ncol = NOS, byrow = TRUE)
+      } else {
+        negbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
+            mu <- extraargs
+            sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
+        }
+        k.grid <- 2^((-7):7)
+        k.grid <- 2^(seq(-8, 8, length = 40))
+        kayy.init <- matrix(0, nrow = n, ncol = NOS)
+        for (spp. in 1:NOS) {
+          kayy.init[, spp.] <- grid.search(k.grid,
+                                           objfun = negbinomial.Loglikfun,
+                                           y = y[, spp.], x = x, w = w,
+                                           extraargs = mu.init[, spp.])
+        }
+      }
+
+      prob.init <- if (length(PROB.INIT)) PROB.INIT else
+                  kayy.init / (kayy.init + mu.init)
+
+
+      etastart <-
+        cbind(theta2eta(prob.init, .lprob , earg = .eprob),
+              theta2eta(kayy.init, .lsize , earg = .esize))
+      etastart <-
+        etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
+      }
+  }), list( .lprob = lprob, .lsize = lsize,
+            .eprob = eprob, .esize = esize,
+            .pinit = iprob, .kinit = isize,
+            .probs.y = probs.y,
+            .ishrinkage = ishrinkage, .nsimEIM = nsimEIM, .zero = zero,
+            .imethod = imethod ))),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+    pmat <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+                     .lprob , earg = .eprob)
+    kmat <- eta2theta(eta[, M1*(1:NOS)-  0, drop = FALSE],
+                     .lsize , earg = .esize)
+    kmat / (kmat + pmat)
+  }, list( .lprob = lprob, .eprob = eprob,
+           .lsize = lsize, .esize = esize))),
+  last = eval(substitute(expression({
+    temp0303 <- c(rep( .lprob , length = NOS),
+                 rep( .lsize , length = NOS))
+    names(temp0303) =
+      c(if (NOS == 1) "prob" else paste("prob", 1:NOS, sep = ""),
+        if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
+    temp0303 <- temp0303[interleave.VGAM(M, M = 2)]
+    misc$link <- temp0303 # Already named
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- names(misc$link)
+    for (ii in 1:NOS) {
+      misc$earg[[M1*ii-1]] <- .eprob
+      misc$earg[[M1*ii  ]] <- .esize
+    }
+
+    misc$isize <- .isize  
+    misc$imethod <- .imethod 
+    misc$nsimEIM <- .nsimEIM
+    misc$expected <- TRUE
+    misc$ishrinkage <- .ishrinkage
+    misc$M1 <- 2
+    misc$multipleResponses <- TRUE
+  }), list( .lprob = lprob, .lsize = lsize,
+            .eprob = eprob, .esize = esize,
+            .isize = isize,
+            .nsimEIM = nsimEIM,
+            .ishrinkage = ishrinkage, .imethod = imethod ))),
+
+
+  loglikelihood = eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta,
+             extra = NULL,
+             summation = TRUE) {
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+    pmat  <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+                       .lprob , earg = .eprob)
+    temp300 <-         eta[, M1*(1:NOS)    , drop = FALSE]
+    if ( .lsize == "loge") {
+      bigval <- 68
+      temp300 <- ifelse(temp300 >  bigval,  bigval, temp300)
+      temp300 <- ifelse(temp300 < -bigval, -bigval, temp300)
+    }
+    kmat <- eta2theta(temp300, .lsize , earg = .esize)
+    if (residuals) {
+      stop("loglikelihood residuals not implemented yet")
+    } else {
+      ll.elts <- c(w) * dnbinom(x = y, prob = pmat, size = kmat, log = TRUE)
+      if (summation) {
+        sum(ll.elts)
+      } else {
+        ll.elts
+      }
+    }
+  }, list( .lsize = lsize, .lprob = lprob,
+           .esize = esize, .eprob = eprob ))),
+  vfamily = c("polya"),
+
+
+
+  simslot = eval(substitute(
+  function(object, nsim) {
+
+    pwts <- if (length(pwts <- object at prior.weights) > 0)
+              pwts else weights(object, type = "prior")
+    if (any(pwts != 1)) 
+      warning("ignoring prior weights")
+    eta <- predict(object)
+    pmat <- eta2theta(eta[, c(TRUE, FALSE)], .lprob , .eprob )
+    kmat <- eta2theta(eta[, c(FALSE, TRUE)], .lsize , .esize )
+    rnbinom(nsim * length(pmat), prob = pmat, size = kmat)
+  }, list( .lprob = lprob, .lsize = lsize,
+           .eprob = eprob, .esize = esize ))),
+
+
+
+
+  deriv = eval(substitute(expression({
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+    M <- ncol(eta)
+
+    pmat  <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+                      .lprob , earg = .eprob)
+    temp3 <-           eta[, M1*(1:NOS)    , drop = FALSE]
+    if ( .lsize == "loge") {
+      bigval <- 68
+      temp3 <- ifelse(temp3 >  bigval,  bigval, temp3)
+      temp3 <- ifelse(temp3 < -bigval, -bigval, temp3)
+    }
+    kmat <- eta2theta(temp3, .lsize , earg = .esize)
+
+    dl.dprob <- kmat / pmat - y / (1.0 - pmat)
+    dl.dkayy <- digamma(y + kmat) - digamma(kmat) + log(pmat)
+
+    dprob.deta <- dtheta.deta(pmat, .lprob , earg = .eprob)
+    dkayy.deta <- dtheta.deta(kmat, .lsize , earg = .esize)
+    dthetas.detas <- cbind(dprob.deta, dkayy.deta)
+    dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
+    myderiv <- c(w) * cbind(dl.dprob, dl.dkayy) * dthetas.detas
+    myderiv[, interleave.VGAM(M, M = M1)]
+  }), list( .lprob = lprob, .lsize = lsize,
+            .eprob = eprob, .esize = esize))),
+  weight = eval(substitute(expression({
+    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal' 
+
+    ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
+    mumat <- as.matrix(mu)
+
+
+    for (spp. in 1:NOS) {
+      run.varcov <- 0
+      kvec <- kmat[, spp.]
+      pvec <- pmat[, spp.]
+
+      for (ii in 1:( .nsimEIM )) {
+        ysim <- rnbinom(n = n, prob = pvec, size = kvec)
+
+        dl.dprob <- kvec / pvec - ysim / (1.0 - pvec)
+        dl.dkayy <- digamma(ysim + kvec) - digamma(kvec) + log(pvec)
+        temp3 <- cbind(dl.dprob, dl.dkayy)
+        run.varcov <- run.varcov +
+                     temp3[, ind1$row.index] *
+                     temp3[, ind1$col.index]
+      }
+      run.varcov <- cbind(run.varcov / .nsimEIM)
+
+      wz1 <- if (intercept.only)
+          matrix(colMeans(run.varcov),
+                 nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
+          run.varcov
+
+      wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
+                  dThetas.detas[, M1 * (spp. - 1) + ind1$col]
+
+
+      for (jay in 1:M1)
+          for (kay in jay:M1) {
+              cptr <- iam((spp. - 1) * M1 + jay,
+                         (spp. - 1) * M1 + kay,
+                         M = M)
+              wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
+          }
+    }  # End of for (spp.) loop
+
+
+
+    w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
+  }), list( .nsimEIM = nsimEIM ))))
 
 
 
-  if (deviance.arg) {
-    ans at deviance <- eval(substitute(
-      function(mu, y, w, residuals = FALSE, eta, extra = NULL,
-               summation = TRUE) {
 
+  if (deviance.arg)
+  ans at deviance <- eval(substitute(
+    function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+             summation = TRUE) {
+    M1 <- 2
+    NOS <- ncol(eta) / M1
+    temp300 <-  eta[, M1*(1:NOS), drop = FALSE]
 
 
 
 
+    if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
+      stop("cannot handle matrix 'w' yet")
 
-    M1 <- 2
-    NOS <- ncol(eta) / M1
-    eta.k <- eta[, M1 * (1:NOS) , drop = FALSE]
-    kmat <- eta2theta(eta.k, .lsize , earg = .esize )
 
+
+    if ( .lsize == "loge") {
+      bigval <- 68
+      temp300[temp300 >  bigval] <-  bigval
+      temp300[temp300 < -bigval] <- -bigval
+    } else {
+      stop("can only handle the 'loge' link")
+    }
+    kayy <-  eta2theta(temp300, .lsize , earg = .esize)
+    devi <- 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
+                (y + kayy) * log((mu + kayy) / (kayy + y)))
     if (residuals) {
-      stop("this part of the function has not been written yet.")
+      sign(y - mu) * sqrt(abs(devi) * w)
     } else {
-      size <- kmat
-      dev.elts <- 2 * c(w) *
-                  (y * log(pmax(1, y) / mu) -
-                  (y + size) * log((y + size) / (mu + size)))
+      dev.elts <- sum(c(w) * devi)
       if (summation) {
         sum(dev.elts)
       } else {
         dev.elts
       }
     }
-  }, list( .lsize = lsize, .esize = esize,
-           .lmuuu = lmuuu, .emuuu = emuuu )))
-
-
-
-
-
-  }
+  }, list( .lsize = lsize, .eprob = eprob,
+           .esize = esize)))
 
+  ans
+}  # End of polya()
 
 
 
 
-  ans
-}
 
 
 
@@ -4424,23 +5072,24 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
-polya.control <- function(save.weight = TRUE, ...) {
+polyaR.control <- function(save.weight = TRUE, ...) {
     list(save.weight = save.weight)
 }
 
 
 
- polya <-
-  function(lprob = "logit", lsize = "loge",
-           iprob = NULL,    isize = NULL,
+ polyaR <-
+  function(lsize = "loge", lprob = "logit", 
+           isize = NULL,   iprob = NULL,    
            probs.y = 0.75,
            nsimEIM = 100,
            imethod = 1,
-           shrinkage.init = 0.95, zero = -2) {
+           ishrinkage = 0.95, zero = -1) {
+
 
 
   deviance.arg <- FALSE  # 20131212; for now
-      
+
 
 
   if (length(iprob) && !is.Numeric(iprob, positive = TRUE))
@@ -4452,10 +5101,10 @@ polya.control <- function(save.weight = TRUE, ...) {
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
      stop("argument 'imethod' must be 1 or 2 or 3")
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-     shrinkage.init < 0 ||
-     shrinkage.init > 1)
-     stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 ||
+     ishrinkage > 1)
+     stop("bad input for argument 'ishrinkage'")
 
   if (!is.Numeric(nsimEIM, length.arg = 1,
                   integer.valued = TRUE))
@@ -4479,15 +5128,15 @@ polya.control <- function(save.weight = TRUE, ...) {
   new("vglmff",
   blurb = c("Polya (negative-binomial) distribution\n\n",
             "Links:    ",
-            namesof("prob", lprob, earg = eprob), ", ",
-            namesof("size", lsize, earg = esize), "\n",
+            namesof("size", lsize, earg = esize), ", ",
+            namesof("prob", lprob, earg = eprob), "\n",
             "Mean:     size * (1 - prob) / prob\n",
             "Variance: mean / prob"),
   constraints = eval(substitute(expression({
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
 
   }), list( .zero = zero ))),
 
@@ -4500,7 +5149,7 @@ polya.control <- function(save.weight = TRUE, ...) {
   initialize = eval(substitute(expression({
     M1 <- 2
     if (any(function.name == c("cqo", "cao")))
-      stop("polya() does not work with cqo() or cao(). ",
+      stop("polyaR() does not work with cqo() or cao(). ",
            "Try negbinomial()")
 
 
@@ -4520,12 +5169,12 @@ polya.control <- function(save.weight = TRUE, ...) {
     NOS <- ncoly <- ncol(y)  # Number of species
 
     predictors.names <-
-      c(namesof(if (NOS == 1) "prob" else
-                paste("prob", 1:NOS, sep = ""),
-               .lprob , earg = .eprob , tag = FALSE),
-        namesof(if (NOS == 1) "size" else
+      c(namesof(if (NOS == 1) "size" else
                 paste("size", 1:NOS, sep = ""),
-               .lsize ,  earg = .esize ,  tag = FALSE))
+               .lsize ,  earg = .esize ,  tag = FALSE),
+        namesof(if (NOS == 1) "prob" else
+                paste("prob", 1:NOS, sep = ""),
+               .lprob , earg = .eprob , tag = FALSE))
     predictors.names <- predictors.names[interleave.VGAM(M, M = 2)]
 
     if (is.null( .nsimEIM )) {
@@ -4555,7 +5204,7 @@ polya.control <- function(save.weight = TRUE, ...) {
         } else {
           medabsres <- median(abs(y[, iii] - use.this)) + 1/32
           allowfun <- function(z, maxtol = 1) sign(z) * pmin(abs(z), maxtol)
-          mu.init[, iii] <- use.this + (1 - .sinit) * allowfun(y[, iii] -
+          mu.init[, iii] <- use.this + (1 - .ishrinkage ) * allowfun(y[, iii] -
                           use.this, maxtol = medabsres)
 
           mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
@@ -4575,10 +5224,10 @@ polya.control <- function(save.weight = TRUE, ...) {
         k.grid <- 2^(seq(-8, 8, length = 40))
         kayy.init <- matrix(0, nrow = n, ncol = NOS)
         for (spp. in 1:NOS) {
-          kayy.init[, spp.] <- getMaxMin(k.grid,
-                             objfun = negbinomial.Loglikfun,
-                             y = y[, spp.], x = x, w = w,
-                             extraargs = mu.init[, spp.])
+          kayy.init[, spp.] <- grid.search(k.grid,
+                                           objfun = negbinomial.Loglikfun,
+                                           y = y[, spp.], x = x, w = w,
+                                           extraargs = mu.init[, spp.])
         }
       }
 
@@ -4587,8 +5236,9 @@ polya.control <- function(save.weight = TRUE, ...) {
 
 
       etastart <-
-        cbind(theta2eta(prob.init, .lprob , earg = .eprob),
-              theta2eta(kayy.init, .lsize , earg = .esize))
+        cbind(theta2eta(kayy.init, .lsize , earg = .esize),
+              theta2eta(prob.init, .lprob , earg = .eprob))
+              
       etastart <-
         etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
       }
@@ -4596,46 +5246,48 @@ polya.control <- function(save.weight = TRUE, ...) {
             .eprob = eprob, .esize = esize,
             .pinit = iprob, .kinit = isize,
             .probs.y = probs.y,
-            .sinit = shrinkage.init, .nsimEIM = nsimEIM, .zero = zero,
+            .ishrinkage = ishrinkage, .nsimEIM = nsimEIM, .zero = zero,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     M1 <- 2
     NOS <- ncol(eta) / M1
-    pmat <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
-                     .lprob , earg = .eprob)
-    kmat <- eta2theta(eta[, M1*(1:NOS)-  0, drop = FALSE],
+    kmat <- eta2theta(eta[, M1*(1:NOS)-  1, drop = FALSE],
                      .lsize , earg = .esize)
+    pmat <- eta2theta(eta[, M1*(1:NOS) - 0, drop = FALSE],
+                     .lprob , earg = .eprob)
     kmat / (kmat + pmat)
   }, list( .lprob = lprob, .eprob = eprob,
            .lsize = lsize, .esize = esize))),
   last = eval(substitute(expression({
-    temp0303 <- c(rep( .lprob , length = NOS),
-                 rep( .lsize , length = NOS))
-    names(temp0303) =
-      c(if (NOS == 1) "prob" else paste("prob", 1:NOS, sep = ""),
-        if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
+    temp0303 <- c(rep( .lsize , length = NOS),
+                  rep( .lprob , length = NOS))
+                  
+    names(temp0303) <-
+      c(if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""),
+        if (NOS == 1) "prob" else paste("prob", 1:NOS, sep = ""))
+        
     temp0303 <- temp0303[interleave.VGAM(M, M = 2)]
     misc$link <- temp0303 # Already named
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
     for (ii in 1:NOS) {
-      misc$earg[[M1*ii-1]] <- .eprob
-      misc$earg[[M1*ii  ]] <- .esize
+      misc$earg[[M1*ii-1]] <- .esize
+      misc$earg[[M1*ii  ]] <- .eprob
     }
 
     misc$isize <- .isize  
     misc$imethod <- .imethod 
     misc$nsimEIM <- .nsimEIM
     misc$expected <- TRUE
-    misc$shrinkage.init <- .sinit
+    misc$ishrinkage <- .ishrinkage
     misc$M1 <- 2
     misc$multipleResponses <- TRUE
   }), list( .lprob = lprob, .lsize = lsize,
             .eprob = eprob, .esize = esize,
             .isize = isize,
             .nsimEIM = nsimEIM,
-            .sinit = shrinkage.init, .imethod = imethod ))),
+            .ishrinkage = ishrinkage, .imethod = imethod ))),
 
 
   loglikelihood = eval(substitute(
@@ -4644,9 +5296,9 @@ polya.control <- function(save.weight = TRUE, ...) {
              summation = TRUE) {
     M1 <- 2
     NOS <- ncol(eta) / M1
-    pmat  <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+    pmat  <- eta2theta(eta[, M1*(1:NOS) - 0, drop = FALSE],
                        .lprob , earg = .eprob)
-    temp300 <-         eta[, M1*(1:NOS)    , drop = FALSE]
+    temp300 <-         eta[, M1*(1:NOS) - 1, drop = FALSE]
     if ( .lsize == "loge") {
       bigval <- 68
       temp300 <- ifelse(temp300 >  bigval,  bigval, temp300)
@@ -4665,7 +5317,7 @@ polya.control <- function(save.weight = TRUE, ...) {
     }
   }, list( .lsize = lsize, .lprob = lprob,
            .esize = esize, .eprob = eprob ))),
-  vfamily = c("polya"),
+  vfamily = c("polyaR"),
 
 
 
@@ -4677,8 +5329,8 @@ polya.control <- function(save.weight = TRUE, ...) {
     if (any(pwts != 1)) 
       warning("ignoring prior weights")
     eta <- predict(object)
-    pmat <- eta2theta(eta[, c(TRUE, FALSE)], .lprob , .eprob )
-    kmat <- eta2theta(eta[, c(FALSE, TRUE)], .lsize , .esize )
+    kmat <- eta2theta(eta[, c(TRUE, FALSE)], .lsize , .esize )
+    pmat <- eta2theta(eta[, c(FALSE, TRUE)], .lprob , .eprob )
     rnbinom(nsim * length(pmat), prob = pmat, size = kmat)
   }, list( .lprob = lprob, .lsize = lsize,
            .eprob = eprob, .esize = esize ))),
@@ -4691,9 +5343,9 @@ polya.control <- function(save.weight = TRUE, ...) {
     NOS <- ncol(eta) / M1
     M <- ncol(eta)
 
-    pmat  <- eta2theta(eta[, M1*(1:NOS) - 1, drop = FALSE],
+    pmat  <- eta2theta(eta[, M1*(1:NOS) - 0, drop = FALSE],
                       .lprob , earg = .eprob)
-    temp3 <-           eta[, M1*(1:NOS)    , drop = FALSE]
+    temp3 <-           eta[, M1*(1:NOS) - 1, drop = FALSE]
     if ( .lsize == "loge") {
       bigval <- 68
       temp3 <- ifelse(temp3 >  bigval,  bigval, temp3)
@@ -4701,14 +5353,14 @@ polya.control <- function(save.weight = TRUE, ...) {
     }
     kmat <- eta2theta(temp3, .lsize , earg = .esize)
 
-    dl.dprob <- kmat / pmat - y / (1.0 - pmat)
     dl.dkayy <- digamma(y + kmat) - digamma(kmat) + log(pmat)
+    dl.dprob <- kmat / pmat - y / (1.0 - pmat)
 
-    dprob.deta <- dtheta.deta(pmat, .lprob , earg = .eprob)
     dkayy.deta <- dtheta.deta(kmat, .lsize , earg = .esize)
-    dthetas.detas <- cbind(dprob.deta, dkayy.deta)
+    dprob.deta <- dtheta.deta(pmat, .lprob , earg = .eprob)
+    dthetas.detas <- cbind(dkayy.deta, dprob.deta)
     dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
-    myderiv <- c(w) * cbind(dl.dprob, dl.dkayy) * dthetas.detas
+    myderiv <- c(w) * cbind(dl.dkayy, dl.dprob) * dthetas.detas
     myderiv[, interleave.VGAM(M, M = M1)]
   }), list( .lprob = lprob, .lsize = lsize,
             .eprob = eprob, .esize = esize))),
@@ -4727,12 +5379,11 @@ polya.control <- function(save.weight = TRUE, ...) {
       for (ii in 1:( .nsimEIM )) {
         ysim <- rnbinom(n = n, prob = pvec, size = kvec)
 
-        dl.dprob <- kvec / pvec - ysim / (1.0 - pvec)
         dl.dkayy <- digamma(ysim + kvec) - digamma(kvec) + log(pvec)
-        temp3 <- cbind(dl.dprob, dl.dkayy)
-        run.varcov <- run.varcov +
-                     temp3[, ind1$row.index] *
-                     temp3[, ind1$col.index]
+        dl.dprob <- kvec / pvec - ysim / (1.0 - pvec)
+        temp3 <- cbind(dl.dkayy, dl.dprob)
+        run.varcov <- run.varcov + temp3[, ind1$row.index] *
+                                   temp3[, ind1$col.index]
       }
       run.varcov <- cbind(run.varcov / .nsimEIM)
 
@@ -4742,14 +5393,14 @@ polya.control <- function(save.weight = TRUE, ...) {
           run.varcov
 
       wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
-                  dThetas.detas[, M1 * (spp. - 1) + ind1$col]
+                   dThetas.detas[, M1 * (spp. - 1) + ind1$col]
 
 
       for (jay in 1:M1)
           for (kay in jay:M1) {
               cptr <- iam((spp. - 1) * M1 + jay,
-                         (spp. - 1) * M1 + kay,
-                         M = M)
+                          (spp. - 1) * M1 + kay,
+                          M = M)
               wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
           }
     }  # End of for (spp.) loop
@@ -4768,7 +5419,7 @@ polya.control <- function(save.weight = TRUE, ...) {
              summation = TRUE) {
     M1 <- 2
     NOS <- ncol(eta) / M1
-    temp300 <-  eta[, M1*(1:NOS), drop = FALSE]
+    temp300 <-  eta[, M1*(1:NOS) - 1, drop = FALSE]
 
 
 
@@ -4802,7 +5453,7 @@ polya.control <- function(save.weight = TRUE, ...) {
            .esize = esize)))
 
   ans
-}  # End of polya()
+}  # End of polyaR()
 
 
 
@@ -4836,7 +5487,7 @@ polya.control <- function(save.weight = TRUE, ...) {
     if (ncol(cbind(y)) != 1)
       stop("response must be a vector or a one-column matrix")
 
-    predictors.names <- "log(lambda)"
+    predictors.names <- "loge(lambda)"
 
     mu <- (weighted.mean(y, w) + y) / 2 + 1/8
 
@@ -5099,7 +5750,7 @@ polya.control <- function(save.weight = TRUE, ...) {
 
     dotzero <- .zero
     M1 <- 3
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 3,
@@ -5400,7 +6051,7 @@ polya.control <- function(save.weight = TRUE, ...) {
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 2,
@@ -5612,7 +6263,7 @@ polya.control <- function(save.weight = TRUE, ...) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -5805,7 +6456,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
  simplex <- function(lmu = "logit", lsigma = "loge",
                      imu = NULL, isigma = NULL,
-                     imethod = 1, shrinkage.init = 0.95,
+                     imethod = 1, ishrinkage = 0.95,
                      zero = 2) {
 
 
@@ -5827,10 +6478,10 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
                   integer.valued = TRUE, positive = TRUE) ||
        imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-      shrinkage.init < 0 ||
-      shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+      ishrinkage < 0 ||
+      ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -5849,7 +6500,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
             "Mean:              mu\n",
             "Variance function: V(mu) = mu^3 * (1 - mu)^3"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (any(y <= 0.0 | y >= 1.0))
@@ -5875,7 +6526,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
                               mean(y, trim = 0.1)
 
 
-        init.mu <- (1 - .sinit) * y + .sinit * use.this
+        init.mu <- (1 - .ishrinkage ) * y + .ishrinkage * use.this
         mu.init <- rep(if (length( .imu )) .imu else init.mu, length = n)
         sigma.init <- if (length( .isigma )) rep( .isigma, leng = n) else {
         use.this <- deeFun(y, mu=init.mu)
@@ -5891,7 +6542,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
   }), list( .lmu = lmu, .lsigma = lsigma,
             .emu = emu, .esigma = esigma,
             .imu = imu, .isigma = isigma,
-            .sinit = shrinkage.init, .imethod = imethod ))),
+            .ishrinkage = ishrinkage, .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     eta2theta(eta[, 1], .lmu , earg = .emu )
   }, list( .lmu = lmu, .emu = emu ))),
@@ -5903,11 +6554,11 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
     misc$imu   <- .imu
     misc$isigma <- .isigma
     misc$imethod <- .imethod
-    misc$shrinkage.init <- .sinit
+    misc$ishrinkage <- .ishrinkage
   }), list( .lmu = lmu, .lsigma = lsigma,
             .imu = imu, .isigma = isigma,
             .emu = emu, .esigma = esigma,
-            .sinit = shrinkage.init, .imethod = imethod ))),
+            .ishrinkage = ishrinkage, .imethod = imethod ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
@@ -6190,7 +6841,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
 
 
- hypersecant.1 <- function(link.theta = elogit(min = -pi/2, max = pi/2),
+ hypersecant01 <- function(link.theta = elogit(min = -pi/2, max = pi/2),
                            init.theta = NULL) {
 
 
@@ -6200,7 +6851,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
 
   new("vglmff",
-  blurb = c("Hyperbolic Secant distribution \n",
+  blurb = c("Hyperbolic secant distribution \n",
             "f(y) = (cos(theta)/pi) * y^(-0.5+theta/pi) * \n",
             "       (1-y)^(-0.5-theta/pi), ",
             "  0 < y < 1,\n",
@@ -6260,7 +6911,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
       }
     }
   }, list( .link.theta = link.theta , .earg = earg ))),
-  vfamily = c("hypersecant.1"),
+  vfamily = c("hypersecant01"),
   deriv = eval(substitute(expression({
     theta <- eta2theta(eta, .link.theta , earg = .earg )
     dl.dthetas <-  -tan(theta) + log(y/(1-y)) / pi 
@@ -6423,11 +7074,11 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
 
 
- invbinomial <- function(lrho = elogit(min = 0.5, max = 1),
-                         llambda = "loge",
-                         irho = NULL,
-                         ilambda = NULL,
-                         zero = NULL) {
+ inv.binomial <- function(lrho = elogit(min = 0.5, max = 1),
+                          llambda = "loge",
+                          irho = NULL,
+                          ilambda = NULL,
+                          zero = NULL) {
 
 
 
@@ -6454,7 +7105,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
             "Mean:     lambda*(1-rho)/(2*rho-1)\n",
             "Variance: lambda*rho*(1-rho)/(2*rho-1)^3\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -6519,7 +7170,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
     }
   }, list( .llambda = llambda, .lrho = lrho,
            .elambda = elambda, .erho = erho ))),
-  vfamily = c("invbinomial"),
+  vfamily = c("inv.binomial"),
   deriv = eval(substitute(expression({
     rho    <- eta2theta(eta[, 1], .lrho    , earg = .erho )
     lambda <- eta2theta(eta[, 2], .llambda , earg = .elambda )
@@ -6605,7 +7256,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
             "Mean:     theta / (1-lambda)\n",
             "Variance: theta / (1-lambda)^3"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -6741,62 +7392,65 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
 
 
-
-dlgamma <- function(x, location = 0, scale = 1, k = 1, log = FALSE) {
+dlgamma <- function(x, location = 0, scale = 1, shape = 1, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
   if (!is.Numeric(scale, positive = TRUE))
     stop("bad input for argument 'scale'")
-  if (!is.Numeric(k, positive = TRUE))
-    stop("bad input for argument 'k'")
+  if (!is.Numeric(shape, positive = TRUE))
+    stop("bad input for argument 'shape'")
   z <- (x-location) / scale
   if (log.arg) {
-    k * z - exp(z) - log(scale) - lgamma(k)
+    shape * z - exp(z) - log(scale) - lgamma(shape)
   } else {
-    exp(k * z - exp(z)) / (scale * gamma(k))
+    exp(shape * z - exp(z)) / (scale * gamma(shape))
   }
 }
 
 
-plgamma <- function(q, location = 0, scale = 1, k = 1) {
+plgamma <- function(q, location = 0, scale = 1, shape = 1) {
 
   zedd <- (q - location) / scale
-  ans <- pgamma(exp(zedd), k)
+  ans <- pgamma(exp(zedd), shape)
   ans[scale <  0] <- NaN
   ans
 }
 
 
-qlgamma <- function(p, location = 0, scale = 1, k = 1) {
+qlgamma <- function(p, location = 0, scale = 1, shape = 1) {
   if (!is.Numeric(scale, positive = TRUE))
     stop("bad input for argument 'scale'")
 
-  ans <- location + scale * log(qgamma(p, k))
+  ans <- location + scale * log(qgamma(p, shape))
   ans[scale <  0] <- NaN
   ans
 }
 
 
-rlgamma <- function(n, location = 0, scale = 1, k = 1) {
-  ans <- location + scale * log(rgamma(n, k))
+rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
+  ans <- location + scale * log(rgamma(n, shape))
   ans[scale < 0] <- NaN
   ans
 }
 
 
 
- lgammaff <- function(link = "loge", init.k = NULL) {
+ lgamma1 <- function(lshape = "loge", ishape = NULL) {
 
-  link <- as.list(substitute(link))
+
+  init.k <- ishape
+
+  link <- as.list(substitute(lshape))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
 
 
   new("vglmff",
   blurb = c("Log-gamma distribution ",
-            "f(y) = exp(ky - e^y)/gamma(k)), k>0\n\n",
+            "f(y) = exp(ky - e^y)/gamma(k)), k>0, ",
+            "shape=k>0\n\n",
             "Link:    ",
             namesof("k", link, earg = earg), "\n", "\n",
             "Mean:    digamma(k)", "\n"),
@@ -6808,7 +7462,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
 
 
     predictors.names <-
-      namesof("k", .link , earg = .earg , tag = FALSE) 
+      namesof("shape", .link , earg = .earg , tag = FALSE) 
 
     if (!length(etastart)) {
       k.init <- if (length( .init.k))
@@ -6824,8 +7478,8 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
     digamma(kay)
   }, list( .link = link, .earg = earg ))),
   last = eval(substitute(expression({
-    misc$link <- c(k = .link )
-    misc$earg <- list(k = .earg )
+    misc$link <-    c(shape = .link )
+    misc$earg <- list(shape = .earg )
     misc$expected <- TRUE
   }), list( .link = link, .earg = earg ))),
   loglikelihood = eval(substitute(
@@ -6838,7 +7492,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
     } else {
       ll.elts <-
         c(w) * dlgamma(x = y, location = 0, scale = 1,
-                       k = kay, log = TRUE)
+                       shape = kay, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -6846,7 +7500,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
       }
     }
   }, list( .link = link, .earg = earg ))),
-  vfamily = c("lgammaff"),
+  vfamily = c("lgamma1"),
 
 
 
@@ -6859,7 +7513,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
       warning("ignoring prior weights")
     eta <- predict(object)
     kay <- eta2theta(eta, .link , earg = .earg )
-    rlgamma(nsim * length(kay), location = 0, scale = 1, k = kay)
+    rlgamma(nsim * length(kay), location = 0, scale = 1, shape = kay)
   }, list( .link = link, .earg = earg ))),
 
 
@@ -6883,9 +7537,10 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
 
 
 
- lgamma3ff <-
+ lgamma3   <-
   function(llocation = "identitylink", lscale = "loge", lshape = "loge",
-           ilocation = NULL, iscale = NULL, ishape = 1, zero = NULL) {
+           ilocation = NULL, iscale = NULL, ishape = 1, zero = 2:3) {
+
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -6917,11 +7572,11 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
             "location=a, scale=b>0, shape=k>0\n\n",
             "Links:    ",
             namesof("location", llocat, earg = elocat), ", ",
-            namesof("scale", lscale, earg = escale), ", ",
-            namesof("shape", lshape, earg = eshape), "\n\n",
-            "Mean:     a + b*digamma(k)", "\n"),
+            namesof("scale",    lscale, earg = escale), ", ",
+            namesof("shape",    lshape, earg = eshape), "\n\n",
+            "Mean:     a + b * digamma(k)", "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -6934,7 +7589,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
     predictors.names <-
       c(namesof("location", .llocat , earg = .elocat , tag = FALSE),
         namesof("scale", .lscale , earg = .escale , tag = FALSE),
-        namesof("shape", .lshape , earg = .eshape, tag = FALSE))
+        namesof("shape", .lshape , earg = .eshape , tag = FALSE))
 
 
     if (!length(etastart)) {
@@ -6943,7 +7598,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
           rep(exp(median(y)), length.out = length(y))
       }
       scale.init <- if (length( .iscale ))
-          rep( .iscale, length.out = length(y)) else {
+          rep( .iscale , length.out = length(y)) else {
           rep(sqrt(var(y) / trigamma(k.init)), length.out = length(y))
       }
       loc.init <- if (length( .ilocat ))
@@ -6966,11 +7621,15 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
   }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
            .elocat = elocat, .escale = escale, .eshape = eshape))),
   last = eval(substitute(expression({
-    misc$link <-    c(location = .llocat , scale = .lscale ,
-                     shape = .lshape)
+    misc$link <-    c(location = .llocat ,
+                      scale    = .lscale ,
+                      shape    = .lshape)
 
-    misc$earg <- list(location = .elocat , scale = .escale ,
-                     shape = .eshape )
+    misc$earg <- list(location = .elocat ,
+                      scale    = .escale ,
+                      shape    = .eshape )
+
+    misc$multipleResponses <- FALSE
   }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
             .elocat = elocat, .escale = escale, .eshape = eshape))),
   loglikelihood = eval(substitute(
@@ -6984,7 +7643,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <-
-        c(w) * dlgamma(x = y, locat = aa, scale = bb, k = kk,
+        c(w) * dlgamma(x = y, locat = aa, scale = bb, shape = kk,
                        log = TRUE)
       if (summation) {
         sum(ll.elts)
@@ -6994,7 +7653,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
     }
   }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
            .elocat = elocat, .escale = escale, .eshape = eshape))),
-  vfamily = c("lgamma3ff"),
+  vfamily = c("lgamma3"),
 
 
 
@@ -7011,7 +7670,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
     aa <- eta2theta(eta[, 1], .llocat , earg = .elocat )
     bb <- eta2theta(eta[, 2], .lscale , earg = .escale )
     kk <- eta2theta(eta[, 3], .lshape , earg = .eshape )
-    rlgamma(nsim * length(kk), location = aa, scale = bb, k = kk)
+    rlgamma(nsim * length(kk), location = aa, scale = bb, shape = kk)
   }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
            .elocat = elocat, .escale = escale, .eshape = eshape))),
 
@@ -7061,7 +7720,8 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
 
 
  prentice74 <-
-  function(llocation = "identitylink", lscale = "loge", lshape = "identitylink",
+  function(llocation = "identitylink", lscale = "loge",
+           lshape = "identitylink",
            ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3) {
 
   if (length(zero) &&
@@ -7098,7 +7758,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
             namesof("shape", lshape, earg = eshape), "\n", "\n",
             "Mean:     a", "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -7111,7 +7771,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
     predictors.names <-
     c(namesof("location", .llocat , earg = .elocat , tag = FALSE),
       namesof("scale", .lscale , earg = .escale , tag = FALSE),
-      namesof("shape", .lshape , earg = .eshape, tag = FALSE))
+      namesof("shape", .lshape , earg = .eshape , tag = FALSE))
 
 
 
@@ -7123,7 +7783,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
             rep(-skewness, length.out = length(y))
         }
         scale.init <- if (length( .iscale ))
-            rep( .iscale, length.out = length(y)) else {
+            rep( .iscale , length.out = length(y)) else {
             rep(sdy, length.out = length(y))
         }
         loc.init <- if (length( .iloc ))
@@ -7221,7 +7881,7 @@ rlgamma <- function(n, location = 0, scale = 1, k = 1) {
 
 
 
-dgengamma <- function(x, scale = 1, d = 1, k = 1, log = FALSE) {
+dgengamma.stacy <- function(x, scale = 1, d = 1, k = 1, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -7256,7 +7916,7 @@ dgengamma <- function(x, scale = 1, d = 1, k = 1, log = FALSE) {
 
 
 
-pgengamma <- function(q, scale = 1, d = 1, k = 1) {
+pgengamma.stacy <- function(q, scale = 1, d = 1, k = 1) {
   zedd <- (q / scale)^d
   ans <- pgamma(zedd, k)
   ans[scale <  0] <- NaN
@@ -7265,7 +7925,7 @@ pgengamma <- function(q, scale = 1, d = 1, k = 1) {
 }
 
 
-qgengamma <- function(p, scale = 1, d = 1, k = 1) {
+qgengamma.stacy <- function(p, scale = 1, d = 1, k = 1) {
   ans <- scale * qgamma(p, k)^(1/d)
   ans[scale <  0] <- NaN
   ans[d     <= 0] <- NaN
@@ -7273,7 +7933,7 @@ qgengamma <- function(p, scale = 1, d = 1, k = 1) {
 }
 
 
-rgengamma <- function(n, scale = 1, d = 1, k = 1) {
+rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
 
   ans <- scale * rgamma(n, k)^(1/d)
   ans[scale <  0] <- NaN
@@ -7282,8 +7942,9 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
 }
 
 
- gengamma <- function(lscale = "loge", ld = "loge", lk = "loge",
-                      iscale = NULL, id = NULL, ik = NULL, zero = NULL) {
+ gengamma.stacy <-
+  function(lscale = "loge", ld = "loge", lk = "loge",
+           iscale = NULL, id = NULL, ik = NULL, zero = NULL) {
 
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
@@ -7319,7 +7980,7 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
             namesof("k", lk, earg = ek), "\n", "\n",
             "Mean:     b * gamma(k+1/d) / gamma(k)", "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -7338,7 +7999,7 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
 
     if (!length(etastart)) {
       b.init <- if (length( .iscale ))
-          rep( .iscale, length.out = length(y)) else {
+          rep( .iscale , length.out = length(y)) else {
           rep(mean(y^2) / mean(y), length.out = length(y))
       }
       k.init <- if (length( .ik ))
@@ -7383,7 +8044,7 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <-
-        c(w) * dgengamma(x = y, scale = b, d = d, k = k, log = TRUE)
+        c(w) * dgengamma.stacy(x = y, scale = b, d = d, k = k, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -7392,7 +8053,7 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
     }
   }, list( .lscale = lscale, .ld = ld, .lk = lk,
            .escale = escale, .ed = ed, .ek = ek ))),
-  vfamily = c("gengamma"),
+  vfamily = c("gengamma.stacy"),
 
 
 
@@ -7409,7 +8070,7 @@ rgengamma <- function(n, scale = 1, d = 1, k = 1) {
     bbb <- eta2theta(eta[, 1], .lscale , earg = .escale )
     ddd <- eta2theta(eta[, 2], .ld     , earg = .ed )
     kkk <- eta2theta(eta[, 3], .lk     , earg = .ek )
-    rgengamma(nsim * length(kkk), scale = bbb, d = ddd, k = kkk)
+    rgengamma.stacy(nsim * length(kkk), scale = bbb, d = ddd, k = kkk)
   }, list( .lscale = lscale, .ld = ld, .lk = lk,
            .escale = escale, .ed = ed, .ek = ek ))),
 
@@ -7608,7 +8269,7 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -7654,10 +8315,10 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
       if (!length( .init.c ))
         for (ilocal in 1:ncoly) {
           prob.grid <- seq(0.05, 0.95, by = 0.05)
-          Init.c[, ilocal] <- getMaxMin(prob.grid,
-                                        objfun = logff.Loglikfun,
-                                        y = y[, ilocal], x = x,
-                                        w = w[, ilocal])
+          Init.c[, ilocal] <- grid.search(prob.grid,
+                                          objfun = logff.Loglikfun,
+                                          y = y[, ilocal], x = x,
+                                          w = w[, ilocal])
 
         }
       etastart <- theta2eta(Init.c, .link , earg = .earg )
@@ -7739,28 +8400,67 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
 
 
 
- levy <- function(delta = NULL, link.gamma = "loge",
-                  idelta = NULL, igamma = NULL) {
+dlevy <- function(x, location = 0, scale = 1, log.arg = FALSE) {
+  logdensity <- 0.5 * log(scale / (2*pi)) - 1.5 * log(x - location) -
+                      0.5 * scale / (x - location)
+  if (log.arg) logdensity else exp(logdensity)
+}
+
+
+
+plevy <- function(q, location = 0, scale = 1) {
+
+  erfc(sqrt(scale * 0.5 / (q - location)))
+}
+
+
+
+
+qlevy <- function(p, location = 0, scale = 1) {
+
+  location + 0.5 * scale / (erfc(p, inverse = TRUE))^2
+}
+
+
+rlevy <- function(n, location = 0, scale = 1)
+  qlevy(runif(n), location = location, scale = scale)
+
+
+
+ levy <- function(location = 0, lscale = "loge",
+                  iscale = NULL) {
+
+
+
 
 
 
-  delta.known = is.Numeric(delta, length.arg = 1)
 
-  link.gamma <- as.list(substitute(link.gamma))
+
+
+
+
+  delta.known <- is.Numeric(location)  # , length.arg = 1
+
+  if (!delta.known)
+    stop("argument 'location' must be specified")
+  idelta <- NULL
+  delta <- location  # Lazy to change variable names below
+
+
+  link.gamma <- as.list(substitute(lscale))
   earg <- link2list(link.gamma)
   link.gamma <- attr(earg, "function.name")
 
 
 
   new("vglmff",
-  blurb = c("Levy distribution f(y) = sqrt(gamma/(2*pi)) * ",
-            "(y-delta)^(-3/2) * \n",
-            "          exp(-gamma / (2*(y-delta ))),\n",
-            "          delta < y, gamma > 0",
-            if (delta.known) paste(", delta = ", delta, ",", sep = ""),
-            "\n\n",
+  blurb = c("Levy distribution f(y) = sqrt(scale/(2*pi)) * ",
+            "(y-location)^(-3/2) * \n",
+            "          exp(-scale / (2*(y-location ))),\n",
+            "          location < y < Inf, scale > 0",
             if (delta.known) "Link:    " else "Links:   ",
-            namesof("gamma", link.gamma, earg = earg),
+            namesof("scale", link.gamma, earg = earg),
             if (! delta.known) 
                 c(", ", namesof("delta", "identitylink", earg = list())),
             "\n\n",
@@ -7776,49 +8476,49 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
 
 
     predictors.names <-
-      c(namesof("gamma", .link.gamma, earg = .earg , tag = FALSE),
+      c(namesof("scale", .link.gamma , earg = .earg , tag = FALSE),
         if ( .delta.known) NULL else 
         namesof("delta", "identitylink", earg = list(), tag = FALSE))
 
 
     if (!length(etastart)) {
       delta.init <- if ( .delta.known) {
-                     if (min(y,na.rm = TRUE) <= .delta)
-                         stop("delta must be < min(y)")
+                     if (min(y, na.rm = TRUE) <= .delta )
+                         stop("'location' must be < min(y)")
                      .delta 
                    } else {
-                     if (length( .idelta)) .idelta else
+                     if (length( .idelta )) .idelta else
                          min(y,na.rm = TRUE) - 1.0e-4 *
                          diff(range(y,na.rm = TRUE))
                    }
-      gamma.init <- if (length( .igamma)) .igamma else
-                   median(y - delta.init)  # = 1/median(1/(y-delta.init))
+      gamma.init <- if (length( .iscale )) .iscale else
+                    median(y - delta.init)  # = 1/median(1/(y-delta.init))
       gamma.init <- rep(gamma.init, length = length(y))
       etastart <-
         cbind(theta2eta(gamma.init, .link.gamma , earg = .earg ),
-                        if ( .delta.known) NULL else delta.init)
+                        if ( .delta.known ) NULL else delta.init)
                        
     }
   }), list( .link.gamma = link.gamma, .earg = earg,
             .delta.known = delta.known,
             .delta = delta,
             .idelta = idelta,
-            .igamma = igamma ))),
+            .iscale = iscale ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     eta <- as.matrix(eta)
-    mygamma <- eta2theta(eta[, 1], .link.gamma, earg = .earg )
+    mygamma <- eta2theta(eta[, 1], .link.gamma , earg = .earg )
     delta <- if ( .delta.known) .delta else eta[, 2]
 
 
-    NA * mygamma
+    qlevy(p = 0.5, location = delta, scale = mygamma)
   }, list( .link.gamma = link.gamma, .earg = earg,
            .delta.known = delta.known,
            .delta = delta ))),
   last = eval(substitute(expression({
     misc$link <- if ( .delta.known) NULL else c(delta = "identitylink")
-    misc$link <- c(gamma = .link.gamma, misc$link)
-    misc$earg <- if ( .delta.known) list(gamma = .earg ) else
-                list(gamma = .earg , delta = list())
+    misc$link <- c(scale = .link.gamma , misc$link)
+    misc$earg <- if ( .delta.known ) list(scale = .earg ) else
+                list(scale = .earg , delta = list())
     if ( .delta.known)
       misc$delta <- .delta
   }), list( .link.gamma = link.gamma, .earg = earg,
@@ -7829,14 +8529,13 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
              extra = NULL,
              summation = TRUE) {
     eta <- as.matrix(eta)
-    mygamma <- eta2theta(eta[, 1], .link.gamma, earg = .earg )
+    mygamma <- eta2theta(eta[, 1], .link.gamma , earg = .earg )
     delta <- if ( .delta.known) .delta else eta[, 2]
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <-
-        c(w) * 0.5 * (log(mygamma) -3 * log(y - delta) -
-                      mygamma / (y - delta))
+        c(w) * dlevy(x = y, location = delta, scale = mygamma, log.arg = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -7849,21 +8548,21 @@ rlog <- function(n, prob, Smallno = 1.0e-6) {
   vfamily = c("levy"),
   deriv = eval(substitute(expression({
     eta <- as.matrix(eta)
-    mygamma <- eta2theta(eta[, 1], .link.gamma, earg = .earg )
-    delta <- if ( .delta.known) .delta else eta[, 2]
+    mygamma <- eta2theta(eta[, 1], .link.gamma , earg = .earg )
+    delta <- if ( .delta.known ) .delta else eta[, 2]
     if (! .delta.known)
       dl.ddelta  <- (3 - mygamma / (y-delta)) / (2 * (y-delta))
     dl.dgamma <- 0.5 * (1 / mygamma - 1 / (y-delta))
-    dgamma.deta <- dtheta.deta(mygamma, .link.gamma, earg = .earg )
+    dgamma.deta <- dtheta.deta(mygamma, .link.gamma , earg = .earg )
     c(w) * cbind(dl.dgamma * dgamma.deta, 
-                 if ( .delta.known) NULL else dl.ddelta)
+                 if ( .delta.known ) NULL else dl.ddelta)
   }), list( .link.gamma = link.gamma, .earg = earg,
             .delta.known = delta.known,
             .delta = delta ))),
   weight = eval(substitute(expression({
-    wz <- matrix(as.numeric(NA), n, dimm(M))  # M = if (delta is known) 1 else 2
-    wz[, iam(1, 1, M)] <- 1 * dgamma.deta^2 
-    if (! .delta.known) {
+    wz <- matrix(as.numeric(NA), n, dimm(M))
+    wz[, iam(1, 1, M)] <- 1 * dgamma.deta^2
+    if (! .delta.known ) {
       wz[, iam(1, 2, M)] <-  3 * dgamma.deta
       wz[, iam(2, 2, M)] <-  21
     }
@@ -7951,7 +8650,7 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
             namesof("lambda", llambda, earg = elambda), "\n", 
             "Mean:     something complicated"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (min(y) <= 0 || max(y) >= 1)
@@ -7999,7 +8698,9 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
     shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
     shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
     lambda <- eta2theta(eta[, 3], .llambda , earg = .elambda )
-    rep(as.numeric(NA), length = nrow(eta))
+
+
+    qlino(p = 0.5, shape1 = shape1, shape2 = shape2, lambda = lambda)
   }, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
            .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
   last = eval(substitute(expression({
@@ -8118,7 +8819,7 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
             namesof("shape2", link, earg = earg), "\n",
             "Mean:     shape1/(shape2-1) provided shape2>1"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -8207,46 +8908,46 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
 
 
 
-dmaxwell <- function(x, a, log = FALSE) {
+dmaxwell <- function(x, rate, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
-  L <- max(length(x), length(a))
-  x <- rep(x, length.out = L)
-  a <- rep(a, length.out = L)
+  L <- max(length(x), length(rate))
+  x    <- rep(x,    length.out = L)
+  rate <- rep(rate, length.out = L)
   logdensity <- rep(log(0), length.out = L)
   xok <- (x >= 0)
-  logdensity[xok] <- 0.5 * log(2/pi) + 1.5 * log(a[xok]) +
-                     2 * log(x[xok]) - 0.5 * a[xok] * x[xok]^2
-  logdensity[a <= 0] <- NaN
+  logdensity[xok] <- 0.5 * log(2/pi) + 1.5 * log(rate[xok]) +
+                     2 * log(x[xok]) - 0.5 * rate[xok] * x[xok]^2
+  logdensity[rate <= 0] <- NaN
   logdensity[x == Inf] <- log(0)
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
 
-pmaxwell <- function(q, a) {
-  L <- max(length(q), length(a))
-  q <- rep(q, length.out = L)
-  a <- rep(a, length.out = L) 
+pmaxwell <- function(q, rate) {
+  L <- max(length(q), length(rate))
+  q    <- rep(q,    length.out = L)
+  rate <- rep(rate, length.out = L) 
   ans <- ifelse(q > 0,
-                erf(q*sqrt(a/2)) - q*exp(-0.5*a*q^2) * sqrt(2*a/pi),
+                erf(q*sqrt(rate/2)) - q*exp(-0.5*rate*q^2) * sqrt(2*rate/pi),
                 0)
-  ans[a <= 0] <- NaN
+  ans[rate <= 0] <- NaN
   ans
 }
 
 
-qmaxwell <- function(p, a) {
+qmaxwell <- function(p, rate) {
 
-  sqrt(2 * qgamma(p = p, 1.5) / a)
+  sqrt(2 * qgamma(p = p, 1.5) / rate)
 }
 
 
-rmaxwell <- function(n, a) {
+rmaxwell <- function(n, rate) {
 
-  sqrt(2 * rgamma(n = n, 1.5) / a)
+  sqrt(2 * rgamma(n = n, 1.5) / rate)
 }
 
 
@@ -8268,14 +8969,14 @@ rmaxwell <- function(n, a) {
 
 
   new("vglmff",
-  blurb = c("Maxwell distribution f(y;a) = sqrt(2/pi) * a^(3/2) * y^2 *",
-            " exp(-0.5*a*y^2), y>0, a>0\n",
+  blurb = c("Maxwell distribution f(y;rate) = sqrt(2/pi) * rate^(3/2) * y^2 *",
+            " exp(-0.5*rate*y^2), y>0, rate>0\n",
             "Link:    ",
-            namesof("a", link, earg = earg),
+            namesof("rate", link, earg = earg),
             "\n", "\n",
-            "Mean:    sqrt(8 / (a * pi))"),
+            "Mean:    sqrt(8 / (rate * pi))"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -8306,7 +9007,7 @@ rmaxwell <- function(n, a) {
     M <- M1 * ncoly
 
 
-    mynames1  <- paste("a", if (ncoly > 1) 1:ncoly else "", sep = "")
+    mynames1  <- paste("rate", if (ncoly > 1) 1:ncoly else "", sep = "")
     predictors.names <-
       namesof(mynames1, .link , earg = .earg )
 
@@ -8347,7 +9048,7 @@ rmaxwell <- function(n, a) {
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * dmaxwell(x = y, a = aa, log = TRUE)
+      ll.elts <- c(w) * dmaxwell(x = y, rate = aa, log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -8385,8 +9086,7 @@ rmaxwell <- function(n, a) {
     da.deta <- dtheta.deta(aa, .link , earg = .earg )
 
     c(w) * dl.da * da.deta
-  }), list( .link = link,
-            .earg = earg ))),
+  }), list( .link = link, .earg = earg ))),
   weight = eval(substitute(expression({
     ned2l.da2 <- 1.5 / aa^2
     wz <- c(w) * ned2l.da2 * da.deta^2
@@ -8400,7 +9100,7 @@ rmaxwell <- function(n, a) {
 
 
 
-dnaka <- function(x, shape, scale = 1, log = FALSE) {
+dnaka <- function(x, scale = 1, shape, log = FALSE) {
     if (!is.logical(log.arg <- log) || length(log) != 1)
       stop("bad input for argument 'log'")
     rm(log)
@@ -8420,7 +9120,7 @@ dnaka <- function(x, shape, scale = 1, log = FALSE) {
 }
 
 
-pnaka <- function(q, shape, scale = 1) {
+pnaka <- function(q, scale = 1, shape) {
     if (!is.Numeric(q))
       stop("bad input for argument 'q'")
     if (!is.Numeric(shape, positive = TRUE))
@@ -8437,7 +9137,7 @@ pnaka <- function(q, shape, scale = 1) {
 }
 
 
-qnaka <- function(p, shape, scale = 1, ...) {
+qnaka <- function(p, scale = 1, shape, ...) {
   if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
     stop("bad input for argument 'p'")
   if (!is.Numeric(shape, positive = TRUE))
@@ -8468,7 +9168,7 @@ qnaka <- function(p, shape, scale = 1, ...) {
 }
 
 
-rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
+rnaka <- function(n, scale = 1, shape, Smallno = 1.0e-6) {
 
   use.n <- if ((length.n <- length(n)) > 1) length.n else
            if (!is.Numeric(n, integer.valued = TRUE,
@@ -8496,7 +9196,7 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
       Upper <- Upper + scale
     x <- runif(2*use.n, min = 0, max = Upper)
     index <- runif(2*use.n, max = ymax) < dnaka(x, shape = shape,
-                                                scale = scale)
+                                                   scale = scale)
     sindex <- sum(index)
     if (sindex) {
       ptr2 <- min(use.n, ptr1 + sindex - 1)
@@ -8512,8 +9212,8 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
 
 
 
- nakagami <- function(lshape = "loge", lscale = "loge",
-                      ishape = NULL, iscale = 1) {
+ nakagami <- function(lscale = "loge", lshape = "loge",
+                      iscale = 1, ishape = NULL, nowarning = FALSE) {
 
   if (!is.null(iscale) && !is.Numeric(iscale, positive = TRUE))
     stop("argument 'iscale' must be a positive number or NULL")
@@ -8528,6 +9228,10 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
   lscale <- attr(escale, "function.name")
 
 
+  if (!nowarning)
+    warning("order of the linear/additive predictors has been changed",
+            " in VGAM version 0.9-5")
+
   new("vglmff",
   blurb = c("Nakagami distribution f(y) = 2 * (shape/scale)^shape *\n",
             "                             ",
@@ -8535,8 +9239,8 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
             "                             ",
             "y>0, shape>0, scale>0\n",
             "Links:    ",
-            namesof("shape", lshape, earg = eshape), ", ",
-            namesof("scale", lscale, earg = escale),
+            namesof("scale", lscale, earg = escale), ", ",
+            namesof("shape", lshape, earg = eshape),
             "\n",
             "\n",
             "Mean:    sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)"),
@@ -8549,33 +9253,33 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
 
 
     predictors.names <-
-      c(namesof("shape", .lshape , earg = .eshape, tag = FALSE),
-        namesof("scale", .lscale , earg = .escale , tag = FALSE))
+      c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+        namesof("shape", .lshape , earg = .eshape , tag = FALSE))
 
 
     if (!length(etastart)) {
-      init2 <- if (is.Numeric( .iscale, positive = TRUE))
-                  rep( .iscale, length.out = n) else
+      init2 <- if (is.Numeric( .iscale , positive = TRUE))
+                  rep( .iscale , length.out = n) else
                   rep(1, length.out = n)
       init1 <- if (is.Numeric( .ishape, positive = TRUE))
                   rep( .ishape, length.out = n) else
               rep(init2 / (y+1/8)^2, length.out = n)
       etastart <-
-        cbind(theta2eta(init1, .lshape , earg = .eshape ),
-              theta2eta(init2, .lscale , earg = .escale ))
+        cbind(theta2eta(init2, .lscale , earg = .escale ),
+              theta2eta(init1, .lshape , earg = .eshape ))
     }
   }), list( .lscale = lscale, .lshape = lshape,
             .escale = escale, .eshape = eshape,
             .ishape = ishape, .iscale = iscale ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
-    scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
     sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)
   }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape))),
   last = eval(substitute(expression({
-    misc$link <-    c(shape = .lshape , scale = .lscale)
-    misc$earg <- list(shape = .eshape, scale = .escale )
+    misc$link <-    c(scale = .lscale , shape = .lshape )
+    misc$earg <- list(scale = .escale , shape = .eshape )
     misc$expected = TRUE
   }), list( .lscale = lscale, .lshape = lshape,
             .escale = escale, .eshape = eshape))),
@@ -8583,8 +9287,8 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
-    scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
@@ -8607,23 +9311,24 @@ rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
 
 
   deriv = eval(substitute(expression({
-    shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
-    Scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+
     dl.dshape <- 1 + log(shape/Scale) - digamma(shape) +
                 2 * log(y) - y^2 / Scale
     dl.dscale <- -shape/Scale + shape * (y/Scale)^2
     dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
     dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
-    c(w) * cbind(dl.dshape * dshape.deta,
-                 dl.dscale * dscale.deta)
+    c(w) * cbind(dl.dscale * dscale.deta,
+                 dl.dshape * dshape.deta)
   }), list( .lscale = lscale, .lshape = lshape,
             .escale = escale, .eshape = eshape))),
   weight = eval(substitute(expression({
     d2l.dshape2 <- trigamma(shape) - 1/shape
     d2l.dscale2 <- shape / Scale^2
     wz <- matrix(as.numeric(NA), n, M)  # diagonal
-    wz[, iam(1, 1, M)] <- d2l.dshape2 * dshape.deta^2
-    wz[, iam(2, 2, M)] <- d2l.dscale2 * dscale.deta^2
+    wz[, iam(1, 1, M)] <- d2l.dscale2 * dscale.deta^2
+    wz[, iam(2, 2, M)] <- d2l.dshape2 * dshape.deta^2
     c(w) * wz
   }), list( .lscale = lscale, .lshape = lshape,
             .escale = escale, .eshape = eshape))))
@@ -8649,7 +9354,7 @@ drayleigh <- function(x, scale = 1, log = FALSE) {
 
 
 prayleigh <- function(q, scale = 1) {
-  if (any(scale <= 0))
+  if (any(scale <= 0, na.rm = TRUE))
     stop("argument 'scale' must be positive")
 
   L     <- max(length(q), length(scale)) 
@@ -8661,7 +9366,7 @@ prayleigh <- function(q, scale = 1) {
 
 
 qrayleigh <- function(p, scale = 1) {
-  if (any(p <= 0) || any(p >= 1))
+  if (any(p <= 0, na.rm = TRUE) || any(p >= 1, na.rm = TRUE))
     stop("argument 'p' must be between 0 and 1")
   ans <- scale * sqrt(-2 * log1p(-p))
   ans[scale <= 0] <- NaN
@@ -8709,7 +9414,7 @@ rrayleigh <- function(n, scale = 1) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 1
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -9044,7 +9749,7 @@ rparetoI <- function(n, scale = 1, shape = 1)
      c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
        namesof("inequality", .linequ ,
                earg = .einequ , tag = FALSE),
-       namesof("shape", .lshape , earg = .eshape, tag = FALSE))
+       namesof("shape", .lshape , earg = .eshape , tag = FALSE))
 
 
 
@@ -9082,11 +9787,13 @@ rparetoI <- function(n, scale = 1, shape = 1)
       .escale = escale, .einequ = einequ, .eshape = eshape,
       .iscale = iscale, .iinequ = iinequ, .ishape = ishape ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-      location <- extra$location
-      Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
-      inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
-      shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
-      location + Scale * NA
+    location <- extra$location
+    Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+    inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
+    shape <- eta2theta(eta[, 3], .lshape , earg = .eshape )
+
+    qparetoIV(p = 0.5, location = location, scale = Scale,
+              inequality = inequ, shape = shape)
   }, list( .lscale = lscale, .linequ = linequ, .lshape = lshape,
            .escale = escale, .einequ = einequ, .eshape = eshape))),
   last = eval(substitute(expression({
@@ -9251,7 +9958,10 @@ rparetoI <- function(n, scale = 1, shape = 1)
     location <- extra$location
     Scale      <- eta2theta(eta[, 1], .lscale     , earg = .escale )
     inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
-    location + Scale * NA
+
+    qparetoIII(p = 0.5, location = location, scale = Scale,
+              inequality = inequ)
+
   }, list( .lscale = lscale, .linequ = linequ,
            .escale = escale, .einequ = einequ ))),
   last = eval(substitute(expression({
@@ -9283,32 +9993,32 @@ rparetoI <- function(n, scale = 1, shape = 1)
     }
     }, list( .lscale = lscale, .linequ = linequ,
              .escale = escale, .einequ = einequ ))),
-    vfamily = c("paretoIII"),
-    deriv = eval(substitute(expression({
-        location <- extra$location
-        Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
-        inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
-        shape <- 1
-        zedd <- (y - location) / Scale
-        temp100 <- 1 + zedd^(1/inequ)
-        dl.dscale <- (shape  - (1+shape) / temp100) / (inequ * Scale)
-        dl.dinequ <- ((log(zedd) * (shape - (1+shape)/temp100)) /
-                         inequ - 1) / inequ
-        dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
-        dinequ.deta <- dtheta.deta(inequ, .linequ, earg = .einequ)
-        c(w) * cbind(dl.dscale * dscale.deta,
-                     dl.dinequ * dinequ.deta)
-    }), list( .lscale = lscale, .linequ = linequ,
-              .escale = escale, .einequ = einequ ))),
-    weight = eval(substitute(expression({
-        d2scale.deta2 <- 1 / ((inequ*Scale)^2 * 3)
-        d2inequ.deta2 <- (1 + 2* trigamma(1)) / (inequ^2 * 3)
-        wz <- matrix(0, n, M)  # It is diagonal
-        wz[, iam(1, 1, M)] <- dscale.deta^2 * d2scale.deta2
-        wz[, iam(2, 2, M)] <- dinequ.deta^2 * d2inequ.deta2
-        c(w) * wz
-    }), list( .lscale = lscale, .linequ = linequ,
-              .escale = escale, .einequ = einequ ))))
+  vfamily = c("paretoIII"),
+  deriv = eval(substitute(expression({
+      location <- extra$location
+      Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
+      inequ <- eta2theta(eta[, 2], .linequ, earg = .einequ)
+      shape <- 1
+      zedd <- (y - location) / Scale
+      temp100 <- 1 + zedd^(1/inequ)
+      dl.dscale <- (shape  - (1+shape) / temp100) / (inequ * Scale)
+      dl.dinequ <- ((log(zedd) * (shape - (1+shape)/temp100)) /
+                       inequ - 1) / inequ
+      dscale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
+      dinequ.deta <- dtheta.deta(inequ, .linequ, earg = .einequ)
+      c(w) * cbind(dl.dscale * dscale.deta,
+                   dl.dinequ * dinequ.deta)
+  }), list( .lscale = lscale, .linequ = linequ,
+            .escale = escale, .einequ = einequ ))),
+  weight = eval(substitute(expression({
+      d2scale.deta2 <- 1 / ((inequ*Scale)^2 * 3)
+      d2inequ.deta2 <- (1 + 2* trigamma(1)) / (inequ^2 * 3)
+      wz <- matrix(0, n, M)  # It is diagonal
+      wz[, iam(1, 1, M)] <- dscale.deta^2 * d2scale.deta2
+      wz[, iam(2, 2, M)] <- dinequ.deta^2 * d2inequ.deta2
+      c(w) * wz
+  }), list( .lscale = lscale, .linequ = linequ,
+            .escale = escale, .einequ = einequ ))))
 }
 
 
@@ -9391,7 +10101,9 @@ rparetoI <- function(n, scale = 1, shape = 1)
     location <- extra$location
     Scale <- eta2theta(eta[, 1], .lscale , earg = .escale )
     shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
-    location + Scale * NA
+
+
+    qparetoII(p = 0.5, scale = Scale, shape = shape)
   }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape))),
   last = eval(substitute(expression({
@@ -9456,61 +10168,63 @@ rparetoI <- function(n, scale = 1, shape = 1)
 
 
 
-dpareto <- function(x, location, shape, log = FALSE) {
+
+
+dpareto <- function(x, scale = 1, shape, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
-  L <- max(length(x), length(location), length(shape)) 
+  L <- max(length(x), length(scale), length(shape)) 
   x <- rep(x, length.out = L);
-  location <- rep(location, length.out = L);
+  scale <- rep(scale, length.out = L);
   shape <- rep(shape, length.out = L)
 
   logdensity <- rep(log(0), length.out = L)
-  xok <- (x > location)
-  logdensity[xok] <- log(shape[xok]) + shape[xok] * log(location[xok]) -
+  xok <- (x > scale)
+  logdensity[xok] <- log(shape[xok]) + shape[xok] * log(scale[xok]) -
                       (shape[xok]+1) * log(x[xok])
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
-ppareto <- function(q, location, shape) {
+ppareto <- function(q, scale = 1, shape) {
 
-  L <- max(length(q), length(location), length(shape))
+  L <- max(length(q), length(scale), length(shape))
   q <- rep(q, length.out = L);
-  location <- rep(location, length.out = L);
+  scale <- rep(scale, length.out = L);
   shape <- rep(shape, length.out = L)
 
-  ans <- ifelse(q > location, 1 - (location/q)^shape, 0)
-  ans[location <= 0] <- NaN
-  ans[shape    <= 0] <- NaN
+  ans <- ifelse(q > scale, 1 - (scale/q)^shape, 0)
+  ans[scale <= 0] <- NaN
+  ans[shape <= 0] <- NaN
   ans
 }
 
 
-qpareto <- function(p, location, shape) {
+qpareto <- function(p, scale = 1, shape) {
   if (any(p <= 0) || any(p >= 1))
     stop("argument 'p' must be between 0 and 1")
 
-  ans <- location / (1 - p)^(1/shape)
-  ans[location <= 0] <- NaN
-  ans[shape    <= 0] <- NaN
+  ans <- scale / (1 - p)^(1/shape)
+  ans[scale <= 0] <- NaN
+  ans[shape <= 0] <- NaN
   ans
 }
 
 
-rpareto <- function(n, location, shape) {
-  ans <- location / runif(n)^(1/shape)
-  ans[location <= 0] <- NaN
-  ans[shape    <= 0] <- NaN
+rpareto <- function(n, scale = 1, shape) {
+  ans <- scale / runif(n)^(1/shape)
+  ans[scale <= 0] <- NaN
+  ans[shape <= 0] <- NaN
   ans
 }
 
 
 
- paretoff <- function(lshape = "loge", location = NULL) {
-  if (is.Numeric(location) && location <= 0)
-    stop("argument 'location' must be positive")
+ paretoff <- function(scale = NULL, lshape = "loge") {
+  if (is.Numeric(scale) && scale <= 0)
+    stop("argument 'scale' must be positive")
 
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
@@ -9522,11 +10236,11 @@ rpareto <- function(n, location, shape) {
 
   new("vglmff",
   blurb = c("Pareto distribution ",
-            "f(y) = shape * location^shape / y^(shape+1),",
-            " 0<location<y, shape>0\n",
+            "f(y) = shape * scale^shape / y^(shape+1),",
+            " 0<scale<y, shape>0\n",
             "Link:    ", namesof("shape", lshape, earg = earg),
             "\n", "\n",
-            "Mean:    location*shape/(shape-1) for shape>1"),
+            "Mean:    scale*shape/(shape-1) for shape>1"),
   initialize = eval(substitute(expression({
 
     w.y.check(w = w, y = y,
@@ -9538,49 +10252,49 @@ rpareto <- function(n, location, shape) {
       namesof("shape", .lshape , earg = .earg , tag = FALSE)
 
 
-    locationhat <- if (!length( .location )) {
-      locationEstimated <- TRUE
+    scalehat <- if (!length( .scale )) {
+      scaleEstimated <- TRUE
       min(y)  # - .smallno
     } else {
-      locationEstimated <- FALSE
-      .location
+      scaleEstimated <- FALSE
+      .scale
     }
-    if (any(y < locationhat))
-      stop("the value of location is too high ",
-           "(requires 0 < location < min(y))")
-    extra$location <- locationhat
-    extra$locationEstimated <- locationEstimated
+    if (any(y < scalehat))
+      stop("the value of 'scale' is too high ",
+           "(requires 0 < scale < min(y))")
+    extra$scale <- scalehat
+    extra$scaleEstimated <- scaleEstimated
 
     if (!length(etastart)) {
-      k.init <- (y + 1/8) / (y - locationhat + 1/8)
+      k.init <- (y + 1/8) / (y - scalehat + 1/8)
       etastart <- theta2eta(k.init, .lshape , earg = .earg )
     }
   }), list( .lshape = lshape, .earg = earg,
-            .location = location ))),
+            .scale = scale ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     k <- eta2theta(eta, .lshape , earg = .earg )
-    location <- extra$location
-    ifelse(k > 1, k * location / (k-1), NA)
+    scale <- extra$scale
+    ifelse(k > 1, k * scale / (k-1), NA)
   }, list( .lshape = lshape, .earg = earg ))),
   last = eval(substitute(expression({
     misc$link <-    c(k = .lshape)
 
     misc$earg <- list(k = .earg )
 
-    misc$location <- extra$location # Use this for prediction
+    misc$scale <- extra$scale # Use this for prediction
   }), list( .lshape = lshape, .earg = earg ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
     k <- eta2theta(eta, .lshape , earg = .earg )
-    location <- extra$location
+    scale <- extra$scale
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
 
 
-      ll.elts <- c(w) * (log(k) + k * log(location) - (k+1) * log(y))
+      ll.elts <- c(w) * (log(k) + k * log(scale) - (k+1) * log(y))
       if (summation) {
         sum(ll.elts)
       } else {
@@ -9590,9 +10304,9 @@ rpareto <- function(n, location, shape) {
   }, list( .lshape = lshape, .earg = earg ))),
   vfamily = c("paretoff"),
   deriv = eval(substitute(expression({
-    location <- extra$location
+    scale <- extra$scale
     k <- eta2theta(eta, .lshape , earg = .earg )
-    dl.dk <- 1/k + log(location/y)
+    dl.dk <- 1/k + log(scale/y)
     dk.deta <- dtheta.deta(k, .lshape , earg = .earg )
     c(w) * dl.dk * dk.deta
   }), list( .lshape = lshape, .earg = earg ))),
@@ -9762,8 +10476,8 @@ rtruncpareto <- function(n, lower, upper, shape) {
                      (shape + 1) * log(y) - log1p(-myratio^shape)))
         }
         shape.grid <- 2^((-4):4)
-        try.this <- getMaxMin(shape.grid, objfun = truncpareto.Loglikfun,
-                              y = y,  x = x, w = w)
+        try.this <- grid.search(shape.grid, objfun = truncpareto.Loglikfun,
+                                y = y,  x = x, w = w)
         try.this <- rep(try.this, length.out = n)
         try.this
       }
@@ -9916,10 +10630,13 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
 
 
- expexp <- function(lshape = "loge", lscale = "loge",
-                    ishape = 1.1, iscale = NULL,  # ishape cannot be 1
-                    tolerance = 1.0e-6,
-                    zero = NULL) {
+
+
+
+ expexpff <- function(lrate = "loge", lshape = "loge",
+                      irate = NULL, ishape = 1.1,  # ishape cannot be 1
+                      tolerance = 1.0e-6,
+                      zero = NULL) {
 
 
   if (length(zero) &&
@@ -9932,15 +10649,16 @@ rtruncpareto <- function(n, lower, upper, shape) {
   if (!is.Numeric(ishape, positive = TRUE))
       stop("bad input for argument 'ishape'")
 
-  if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
-      stop("bad input for argument 'iscale'")
+  if (length(irate) && !is.Numeric(irate, positive = TRUE))
+      stop("bad input for argument 'irate'")
 
   ishape[ishape == 1] <- 1.1 # Fails in @deriv
+  iratee <- irate
 
 
-  lscale <- as.list(substitute(lscale))
-  escale <- link2list(lscale)
-  lscale <- attr(escale, "function.name")
+  lratee <- as.list(substitute(lrate))
+  eratee <- link2list(lratee)
+  lratee <- attr(eratee, "function.name")
 
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
@@ -9951,11 +10669,11 @@ rtruncpareto <- function(n, lower, upper, shape) {
   new("vglmff",
   blurb = c("Exponentiated Exponential Distribution\n",
              "Links:    ",
-             namesof("shape", lshape, earg = eshape), ", ",
-             namesof("scale", lscale, earg = escale),"\n",
-             "Mean:     (digamma(shape+1)-digamma(1))/scale"),
+             namesof("rate",  lratee, earg = eratee), ", ",
+             namesof("shape", lshape, earg = eshape), "\n",
+             "Mean:     (digamma(shape+1)-digamma(1)) / rate"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -9966,128 +10684,130 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
 
       predictors.names <-
-      c(namesof("shape", .lshape , earg = .eshape, short = TRUE), 
-        namesof("scale", .lscale , earg = .escale , short = TRUE))
+        c(namesof("rate",  .lratee , earg = .eratee , short = TRUE),
+          namesof("shape", .lshape , earg = .eshape , short = TRUE)) 
 
 
       if (!length(etastart)) {
         shape.init <- if (!is.Numeric( .ishape, positive = TRUE))
                stop("argument 'ishape' must be positive") else
                rep( .ishape, length.out = n)
-        scale.init <- if (length( .iscale ))
-                    rep( .iscale, length.out = n) else
+        ratee.init <- if (length( .iratee ))
+                    rep( .iratee , length.out = n) else
                     (digamma(shape.init+1) - digamma(1)) / (y+1/8)
-        scale.init <- rep(weighted.mean(scale.init, w = w),
+        ratee.init <- rep(weighted.mean(ratee.init, w = w),
                           length.out = n)
         etastart <-
-          cbind(theta2eta(shape.init, .lshape , earg = .eshape ),
-                theta2eta(scale.init, .lscale , earg = .escale ))
+          cbind(theta2eta(ratee.init, .lratee , earg = .eratee ),
+                theta2eta(shape.init, .lshape , earg = .eshape ))
+                
     }
-  }), list( .lshape = lshape, .lscale = lscale,
-            .iscale = iscale, .ishape = ishape,
-            .eshape = eshape, .escale = escale))),
+  }), list( .lshape = lshape, .lratee = lratee,
+            .iratee = iratee, .ishape = ishape,
+            .eshape = eshape, .eratee = eratee))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-      shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
-      scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
-      (digamma(shape+1) - digamma(1)) / scale
-  }, list( .lshape = lshape, .lscale = lscale,
-           .eshape = eshape, .escale = escale))),
+    ratee <- eta2theta(eta[, 1], .lratee , earg = .eratee )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
+    (digamma(shape+1) - digamma(1)) / ratee
+  }, list( .lshape = lshape, .lratee = lratee,
+           .eshape = eshape, .eratee = eratee))),
   last = eval(substitute(expression({
-    misc$link <-    c("shape" = .lshape , "scale" = .lscale )
-    misc$earg <- list("shape" = .eshape , "scale" = .escale )
+    misc$link <-    c("rate" = .lratee , "shape" = .lshape )
+    misc$earg <- list("rate" = .eratee , "shape" = .eshape )
 
     misc$expected <- TRUE
-  }), list( .lshape = lshape, .lscale = lscale,
-            .eshape = eshape, .escale = escale))),
+  }), list( .lshape = lshape, .lratee = lratee,
+            .eshape = eshape, .eratee = eratee))),
   loglikelihood= eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
-    scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    ratee <- eta2theta(eta[, 1], .lratee , earg = .eratee )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <-
-        c(w) * (log(shape) + log(scale) + 
-               (shape-1)*log1p(-exp(-scale*y)) - scale*y)
+        c(w) * (log(shape) + log(ratee) + 
+               (shape-1)*log1p(-exp(-ratee*y)) - ratee*y)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .lscale = lscale, .lshape = lshape,
-           .eshape = eshape, .escale = escale))),
-  vfamily = c("expexp"),
+  }, list( .lratee = lratee, .lshape = lshape,
+           .eshape = eshape, .eratee = eratee))),
+  vfamily = c("expexpff"),
   deriv = eval(substitute(expression({
-    shape <- eta2theta(eta[, 1], .lshape , earg = .eshape )
-    scale <- eta2theta(eta[, 2], .lscale , earg = .escale )
+    ratee <- eta2theta(eta[, 1], .lratee , earg = .eratee )
+    shape <- eta2theta(eta[, 2], .lshape , earg = .eshape )
 
-    dl.dscale <- 1/scale + (shape-1)*y*exp(-scale*y) / (-expm1(-scale*y)) - y
-    dl.dshape <- 1/shape + log1p(-exp(-scale*y))
+    dl.dratee <- 1/ratee + (shape-1)*y*exp(-ratee*y) / (-expm1(-ratee*y)) - y
+    dl.dshape <- 1/shape + log1p(-exp(-ratee*y))
 
-    dscale.deta <- dtheta.deta(scale, .lscale , earg = .escale )
+    dratee.deta <- dtheta.deta(ratee, .lratee , earg = .eratee )
     dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
 
-    c(w) * cbind(dl.dshape * dshape.deta,
-                 dl.dscale * dscale.deta)
-  }), list( .lshape = lshape, .lscale = lscale,
-            .eshape = eshape, .escale = escale))),
+    c(w) * cbind(dl.dratee * dratee.deta,
+                 dl.dshape * dshape.deta)
+  }), list( .lshape = lshape, .lratee = lratee,
+            .eshape = eshape, .eratee = eratee))),
   weight = eval(substitute(expression({
-        d11 <- 1 / shape^2  # True for all shape
-        d22 <- d12 <- rep(as.numeric(NA), length.out = n)
-        index2 <- abs(shape - 2) > .tolerance  # index2 = shape != 1
-        largeno <- 10000
-        if (any(index2)) {
-            Shape <- shape[index2]
-            Shape[abs(Shape-1) < .tolerance] <- 1.001 # digamma(0) is undefined
-            Scale <- scale[index2]
-            tmp200 <- trigamma(1)-trigamma(Shape-1) +
-                  (digamma(Shape-1)-digamma(1))^2    # Fails when Shape == 1
-            tmp300 <- trigamma(1)-digamma(Shape)+(digamma(Shape)-digamma(1))^2
-            d22[index2] <- (1 + Shape*(Shape-1)*tmp200/(Shape-2)) / Scale^2 +
-                          Shape*tmp300 / Scale^2
-        }
-        if (any(!index2)) {
-            Scale <- scale[!index2]
-            d22[!index2] <- (1 + 4 * sum(1/(2 + (0:largeno))^3)) / Scale^2
-        }
-
-        index1 <- abs(shape - 1) > .tolerance  # index1 <- shape != 1
-        if (any(index1)) {
-            Shape <- shape[index1]
-            Scale <- scale[index1]
-            d12[index1] <- -(Shape*(digamma(Shape)-digamma(1))/(Shape-1) -
-                          digamma(Shape+1) + digamma(1)) / Scale
-        }
-        if (any(!index1)) {
-            Scale <- scale[!index1]
-            d12[!index1] <- -sum(1/(2 + (0:largeno))^2) / Scale
-        }
-        wz <- matrix(0, n, dimm(M))
-        wz[, iam(1, 1, M)] <- dshape.deta^2 * d11
-        wz[, iam(2, 2, M)] <- dscale.deta^2 * d22
-        wz[, iam(1, 2, M)] <- dscale.deta * dshape.deta * d12
-        c(w) * wz
-    }), list( .tolerance = tolerance ))))
+    d11 <- 1 / shape^2  # True for all shape
+    d22 <- d12 <- rep(as.numeric(NA), length.out = n)
+    index2 <- abs(shape - 2) > .tolerance  # index2 = shape != 1
+    largeno <- 10000
+    if (any(index2)) {
+      Shape <- shape[index2]
+      Shape[abs(Shape-1) < .tolerance] <- 1.001  # digamma(0) is undefined
+      Scale <- ratee[index2]
+      tmp200 <- trigamma(1)-trigamma(Shape-1) +
+               (digamma(Shape-1)-digamma(1))^2  # Fails when Shape == 1
+      tmp300 <- trigamma(1)-digamma(Shape)+(digamma(Shape)-digamma(1))^2
+      d22[index2] <- (1 + Shape*(Shape-1)*tmp200/(Shape-2)) / Scale^2 +
+                     Shape*tmp300 / Scale^2
+    }
+    if (any(!index2)) {
+      Scale <- ratee[!index2]
+      d22[!index2] <- (1 + 4 * sum(1/(2 + (0:largeno))^3)) / Scale^2
+    }
+
+    index1 <- abs(shape - 1) > .tolerance  # index1 <- shape != 1
+    if (any(index1)) {
+      Shape <- shape[index1]
+      Scale <- ratee[index1]
+      d12[index1] <- -(Shape*(digamma(Shape)-digamma(1))/(Shape-1) -
+                      digamma(Shape+1) + digamma(1)) / Scale
+    }
+    if (any(!index1)) {
+      Scale <- ratee[!index1]
+      d12[!index1] <- -sum(1/(2 + (0:largeno))^2) / Scale
+    }
+    wz <- matrix(0, n, dimm(M))
+    wz[, iam(1, 1, M)] <- dratee.deta^2 * d22
+    wz[, iam(1, 2, M)] <- dratee.deta * dshape.deta * d12
+    wz[, iam(2, 2, M)] <- dshape.deta^2 * d11
+      c(w) * wz
+  }), list( .tolerance = tolerance ))))
 }
 
 
 
 
 
- expexp1 <- function(lscale = "loge",
-                     iscale = NULL,
-                     ishape = 1) {
 
-  lscale <- as.list(substitute(lscale))
-  escale <- link2list(lscale)
-  lscale <- attr(escale, "function.name")
+ expexpff1 <- function(lrate = "loge",
+                       irate = NULL,
+                       ishape = 1) {
 
+  lrate <- as.list(substitute(lrate))
+  erate <- link2list(lrate)
+  lrate <- attr(erate, "function.name")
 
-  if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
-      stop("bad input for argument 'iscale'")
+
+  if (length(irate) && !is.Numeric(irate, positive = TRUE))
+      stop("bad input for argument 'irate'")
 
 
 
@@ -10095,8 +10815,8 @@ rtruncpareto <- function(n, lower, upper, shape) {
   blurb = c("Exponentiated Exponential Distribution",
             " (profile likelihood estimation)\n",
             "Links:    ",
-            namesof("scale", lscale, earg = escale), "\n",
-            "Mean:     (digamma(shape+1)-digamma(1))/scale"),
+            namesof("rate", lrate, earg = erate), "\n",
+            "Mean:     (digamma(shape+1)-digamma(1)) / rate"),
   initialize = eval(substitute(expression({
 
     w.y.check(w = w, y = y,
@@ -10107,7 +10827,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
 
     predictors.names <-
-      namesof("scale", .lscale , earg = .escale , short = TRUE)
+      namesof("rate", .lrate , earg = .erate , short = TRUE)
 
     if (length(w) != n ||
         !is.Numeric(w, integer.valued = TRUE, positive = TRUE))
@@ -10124,69 +10844,69 @@ rtruncpareto <- function(n, lower, upper, shape) {
       shape.init <- if (!is.Numeric( .ishape, positive = TRUE))
              stop("argument 'ishape' must be positive") else
              rep( .ishape, length.out = n)
-      scaleinit <- if (length( .iscale ))
-                  rep( .iscale, length.out = n) else
+      rateinit <- if (length( .irate ))
+                  rep( .irate , length.out = n) else
                   (digamma(shape.init+1) - digamma(1)) / (y+1/8)  
       etastart <-
-        cbind(theta2eta(scaleinit, .lscale , earg = .escale ))
+        cbind(theta2eta(rateinit, .lrate , earg = .erate ))
     }
-  }), list( .lscale = lscale, .iscale = iscale, .ishape = ishape,
-            .escale = escale))),
+  }), list( .lrate = lrate, .irate = irate, .ishape = ishape,
+            .erate = erate))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    scale <- eta2theta(eta, .lscale , earg = .escale )
-    temp7 <-  -expm1(-scale*extra$yvector)
+    rate <- eta2theta(eta, .lrate , earg = .erate )
+    temp7 <-  -expm1(-rate*extra$yvector)
     shape <- -extra$sumw / sum(extra$w*log(temp7))  # \gamma(\theta)
-    (digamma(shape+1)-digamma(1)) / scale
-  }, list( .lscale = lscale,
-           .escale = escale))),
+    (digamma(shape+1)-digamma(1)) / rate
+  }, list( .lrate = lrate,
+           .erate = erate))),
   last = eval(substitute(expression({
-    misc$link <-    c("scale" = .lscale)
-    misc$earg <- list("scale" = .escale )
+    misc$link <-    c("rate" = .lrate)
+    misc$earg <- list("rate" = .erate )
 
-    temp7 <-  -expm1(-scale*y)
+    temp7 <-  -expm1(-rate*y)
     shape <- -extra$sumw / sum(w*log(temp7))  # \gamma(\theta)
     misc$shape <- shape   # Store the ML estimate here
     misc$pooled.weight <- pooled.weight
-  }), list( .lscale = lscale, .escale = escale))),
+  }), list( .lrate = lrate, .erate = erate))),
   loglikelihood= eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    scale <- eta2theta(eta, .lscale , earg = .escale )
-    temp7 <-  -expm1(-scale*y)
+    rate <- eta2theta(eta, .lrate , earg = .erate )
+    temp7 <-  -expm1(-rate*y)
     shape <- -extra$sumw / sum(w*log(temp7))  # \gamma(\theta)
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <-
-        c(w) * (log(shape) + log(scale) + 
-               (shape-1)*log1p(-exp(-scale*y)) - scale*y)
+        c(w) * (log(shape) + log(rate) + 
+               (shape-1)*log1p(-exp(-rate*y)) - rate*y)
       if (summation) {
         sum(ll.elts)
       } else {
         ll.elts
       }
     }
-  }, list( .lscale = lscale, .escale = escale))),
-  vfamily = c("expexp1"),
+  }, list( .lrate = lrate, .erate = erate))),
+  vfamily = c("expexpff1"),
   deriv = eval(substitute(expression({
-    scale <- eta2theta(eta, .lscale , earg = .escale )
+    rate <- eta2theta(eta, .lrate , earg = .erate )
 
-    temp6 <- exp(-scale*y)
+    temp6 <- exp(-rate*y)
     temp7 <- 1-temp6
     shape <- -extra$sumw / sum(w*log(temp7))  # \gamma(\theta)
-    d1 <- 1/scale + (shape-1)*y*temp6/temp7 - y
+    d1 <- 1/rate + (shape-1)*y*temp6/temp7 - y
 
-    c(w) * cbind(d1 * dtheta.deta(scale, .lscale , earg = .escale ))
-  }), list( .lscale = lscale, .escale = escale))),
+    c(w) * cbind(d1 * dtheta.deta(rate, .lrate , earg = .erate ))
+  }), list( .lrate = lrate, .erate = erate))),
   weight = eval(substitute(expression({
-    d11 <- 1/scale^2  + y*(temp6/temp7^2) * ((shape-1) *
-          (y*temp7+temp6) - y*temp6 / (log(temp7))^2)
+    d11 <- 1/rate^2  + y*(temp6/temp7^2) * ((shape-1) *
+           (y*temp7+temp6) - y*temp6 / (log(temp7))^2)
 
     wz <- matrix(0, n, dimm(M))
-    wz[, iam(1, 1, M)] =
-      dtheta.deta(scale, .lscale , earg = .escale )^2 * d11 -
-      d2theta.deta2(scale, .lscale , earg = .escale ) * d1
+    wz[, iam(1, 1, M)] <-
+      dtheta.deta(rate, .lrate , earg = .erate )^2 * d11 -
+      d2theta.deta2(rate, .lrate , earg = .erate ) * d1
 
     if (FALSE && intercept.only) {
       sumw <- sum(w)
@@ -10197,7 +10917,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
     } else
       pooled.weight <- FALSE
     c(w) * wz
-  }), list( .lscale = lscale, .escale = escale))))
+  }), list( .lrate = lrate, .erate = erate))))
 }
 
 
@@ -10209,7 +10929,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
 
 
- logistic2 <- function(llocation = "identitylink",
+ logistic  <- function(llocation = "identitylink",
                        lscale = "loge",
                        ilocation = NULL, iscale = NULL,
                        imethod = 1, zero = -2) {
@@ -10251,7 +10971,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -10374,7 +11094,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
     }
   }, list( .llocat = llocat, .lscale = lscale,
            .elocat = elocat, .escale = escale))),
-  vfamily = c("logistic2"),
+  vfamily = c("logistic"),
 
 
 
@@ -10439,7 +11159,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
                               imu = NULL,
                               probs.y = 0.75,
                               imethod = 1,
-                              shrinkage.init = 0.95, zero = NULL) {
+                              ishrinkage = 0.95, zero = NULL) {
 
 
 
@@ -10467,10 +11187,10 @@ rtruncpareto <- function(n, lower, upper, shape) {
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1 or 2 or 3")
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-     shrinkage.init < 0 ||
-     shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 ||
+     ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
 
 
@@ -10488,7 +11208,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
 
   }), list( .zero = zero ))),
 
@@ -10546,7 +11266,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
           medabsres <- median(abs(y[, iii] - use.this)) + 1/32
           allowfun <- function(z, maxtol = 1)
             sign(z)*pmin(abs(z), maxtol)
-          mu.init[, iii] <- use.this + (1 - .sinit) *
+          mu.init[, iii] <- use.this + (1 - .ishrinkage ) *
                            allowfun(y[, iii] - use.this,
                                     maxtol = medabsres)
 
@@ -10574,7 +11294,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
             .emu = emu,
             .mu.init = imu,
             .size = size, .probs.y = probs.y,
-            .sinit = shrinkage.init,
+            .ishrinkage = ishrinkage,
             .zero = zero, .imethod = imethod ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -10613,11 +11333,11 @@ rtruncpareto <- function(n, lower, upper, shape) {
 
     misc$imethod <- .imethod 
     misc$expected <- TRUE
-    misc$shrinkage.init <- .sinit
+    misc$ishrinkage <- .ishrinkage
     misc$size <- kmat
   }), list( .lmu = lmu,
             .emu = emu,
-            .sinit = shrinkage.init,
+            .ishrinkage = ishrinkage,
             .imethod = imethod ))),
 
 
diff --git a/R/family.zeroinf.R b/R/family.zeroinf.R
index fd9fde4..7ff1127 100644
--- a/R/family.zeroinf.R
+++ b/R/family.zeroinf.R
@@ -508,7 +508,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -783,7 +783,7 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -1037,7 +1037,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
            zero = -3,  # Prior to 20130917 the default was: c(-1, -3),
            imethod = 1,
            nsimEIM = 250,
-           shrinkage.init = 0.95) {
+           ishrinkage = 0.95) {
 
 
 
@@ -1063,10 +1063,10 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-     shrinkage.init < 0 ||
-     shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 ||
+     ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
 
   lpobs0 <- as.list(substitute(lpobs0))
@@ -1099,7 +1099,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
 
     dotzero <- .zero
     M1 <- 3
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
 
@@ -1157,8 +1157,8 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
         if ( .imethod == 1) {
           use.this <- weighted.mean(y[index.posy, iii],
                                     w[index.posy, iii])
-          mu.init[ index.posy, iii] <- (1 - .sinit ) * y[index.posy, iii] +
-                                            .sinit   * use.this
+          mu.init[ index.posy, iii] <- (1 - .ishrinkage ) * y[index.posy, iii] +
+                                            .ishrinkage   * use.this
           mu.init[!index.posy, iii] <- use.this
         } else {
           use.this <-
@@ -1196,11 +1196,10 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
           index.posy <- (y[, spp.] > 0)
           posy <- y[index.posy, spp.]
           kmat0[, spp.] <-
-            getMaxMin(k.grid,
-                      objfun = posnegbinomial.Loglikfun,
-                      y = posy, x = x[index.posy, ],
-                      w = w[index.posy, spp.],
-                      extraargs = mu.init[index.posy, spp.])
+            grid.search(k.grid, objfun = posnegbinomial.Loglikfun,
+                        y = posy, x = x[index.posy, ],
+                        w = w[index.posy, spp.],
+                        extraargs = mu.init[index.posy, spp.])
         }
       }
 
@@ -1214,7 +1213,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
   }), list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
             .epobs0 = epobs0, .emunb = emunb, .esize = esize,
             .ipobs0 = ipobs0,                 .isize = isize,
-            .imethod = imethod, .sinit = shrinkage.init,
+            .imethod = imethod, .ishrinkage = ishrinkage,
             .type.fitted = type.fitted ))), 
   linkinv = eval(substitute(function(eta, extra = NULL) {
    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
@@ -1500,7 +1499,7 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
            zero = c(-2, -3),
            imethod = 1,
            nsimEIM = 250,
-           shrinkage.init = 0.95) {
+           ishrinkage = 0.95) {
 
 
 
@@ -1523,10 +1522,10 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-     shrinkage.init < 0 ||
-     shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 ||
+     ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
   lmunb <- as.list(substitute(lmunb))
   emunb <- link2list(lmunb)
@@ -1559,7 +1558,7 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
 
     dotzero <- .zero
     M1 <- 3
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
 
@@ -1619,8 +1618,8 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
         if ( .imethod == 1) {
           use.this <- weighted.mean(y[index.posy, iii],
                                     w[index.posy, iii])
-          mu.init[ index.posy, iii] <- (1 - .sinit ) * y[index.posy, iii] +
-                                            .sinit   * use.this
+          mu.init[ index.posy, iii] <- (1 - .ishrinkage ) * y[index.posy, iii] +
+                                            .ishrinkage   * use.this
           mu.init[!index.posy, iii] <- use.this
         } else {
           use.this <-
@@ -1658,11 +1657,10 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
           index.posy <- (y[, spp.] > 0)
           posy <- y[index.posy, spp.]
           kmat0[, spp.] <-
-            getMaxMin(k.grid,
-                      objfun = posnegbinomial.Loglikfun,
-                      y = posy, x = x[index.posy, ],
-                      w = w[index.posy, spp.],
-                      extraargs = mu.init[index.posy, spp.])
+            grid.search(k.grid, objfun = posnegbinomial.Loglikfun,
+                        y = posy, x = x[index.posy, ],
+                        w = w[index.posy, spp.],
+                        extraargs = mu.init[index.posy, spp.])
         }
       }
 
@@ -1677,7 +1675,7 @@ zanegbinomialff.control <- function(save.weight = TRUE, ...) {
   }), list( .lonempobs0 = lonempobs0, .lmunb = lmunb, .lsize = lsize,
             .eonempobs0 = eonempobs0, .emunb = emunb, .esize = esize,
             .ionempobs0 = ionempobs0,                 .isize = isize,
-            .imethod = imethod, .sinit = shrinkage.init,
+            .imethod = imethod, .ishrinkage = ishrinkage,
             .type.fitted = type.fitted ))), 
   linkinv = eval(substitute(function(eta, extra = NULL) {
    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
@@ -2000,7 +1998,7 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
            type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
            ipstr0 = NULL,    ilambda = NULL,
            imethod = 1,
-           shrinkage.init = 0.8, zero = NULL) {
+           ishrinkage = 0.8, zero = NULL) {
   ipstr00 <- ipstr0
 
 
@@ -2032,10 +2030,10 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
      imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-     shrinkage.init < 0 ||
-     shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+     ishrinkage < 0 ||
+     ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
 
   new("vglmff",
@@ -2048,7 +2046,7 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -2117,10 +2115,10 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
         } else if ( .imethod == 2) {
           mymean <- weighted.mean(yvec[yvec > 0],
                                      w[yvec > 0]) + 1/16
-          Lambda.init <- (1 - .sinit) * (yvec + 1/8) + .sinit * mymean
+          Lambda.init <- (1 - .ishrinkage ) * (yvec + 1/8) + .ishrinkage * mymean
         } else {
           use.this <- median(yvec[yvec > 0]) + 1 / 16
-          Lambda.init <- (1 - .sinit) * (yvec + 1/8) + .sinit * use.this
+          Lambda.init <- (1 - .ishrinkage ) * (yvec + 1/8) + .ishrinkage * use.this
         }
 
         zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
@@ -2129,10 +2127,9 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
                           log = TRUE))
         }
         phi.grid <- seq(0.02, 0.98, len = 21)
-        Phimat.init <- getMaxMin(phi.grid,
-                                 objfun = zipois.Loglikfun,
-                                 y = y, x = x, w = w,
-                                 extraargs = list(lambda = Lambda.init))
+        Phimat.init <- grid.search(phi.grid, objfun = zipois.Loglikfun,
+                                   y = y, x = x, w = w,
+                                   extraargs = list(lambda = Lambda.init))
 
         if (length(mustart)) {
           Lambda.init <- Lambda.init / (1 - Phimat.init)
@@ -2154,7 +2151,7 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
             .ipstr00 = ipstr00, .ilambda = ilambda,
             .imethod = imethod,
             .type.fitted = type.fitted,
-            .sinit = shrinkage.init ))),
+            .ishrinkage = ishrinkage ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
                      warning("cannot find 'type.fitted'. ",
@@ -2367,7 +2364,7 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
             namesof("prob" , lprob , earg = eprob ), "\n",
             "Mean:     (1 - pstr0) * prob"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
 
 
@@ -2648,7 +2645,7 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
             namesof("onempstr0", lonempstr0, earg = eonempstr0), "\n",
             "Mean:     onempstr0 * prob"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.vgam(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
 
 
@@ -3196,7 +3193,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
            type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
            ipstr0 = NULL,                    isize = NULL,
            zero = -3,  # 20130917; used to be c(-1, -3)
-           imethod = 1, shrinkage.init = 0.95,
+           imethod = 1, ishrinkage = 0.95,
            nsimEIM = 250) {
 
 
@@ -3235,10 +3232,10 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
   if (nsimEIM <= 50)
     warning("argument 'nsimEIM' should be greater than 50, say")
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-      shrinkage.init < 0 ||
-      shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+      ishrinkage < 0 ||
+      ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
 
 
@@ -3254,7 +3251,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
 
     dotzero <- .zero
     M1 <- 3
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
 
@@ -3311,7 +3308,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
               weighted.mean(y[index, iii], w     = w[index, iii]) else
                  median(rep(y[index, iii], times = w[index, iii])) + 1/8
         }
-        (1 - .sinit) * (y + 1/16) + .sinit * mum.init
+        (1 - .ishrinkage ) * (y + 1/16) + .ishrinkage * mum.init
       }
 
 
@@ -3351,11 +3348,11 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
           k.grid <- 2^((-6):6)
           kay.init <- matrix(0, nrow = n, ncol = NOS)
           for (spp. in 1:NOS) {
-            kay.init[, spp.] <- getMaxMin(k.grid,
-                              objfun = zinegbin.Loglikfun,
-                              y = y[, spp.], x = x, w = w[, spp.],
-                              extraargs = list(pstr0 = pstr0.init[, spp.],
-                                               mu  = mum.init[, spp.]))
+            kay.init[, spp.] <-
+              grid.search(k.grid, objfun = zinegbin.Loglikfun,
+                          y = y[, spp.], x = x, w = w[, spp.],
+                          extraargs = list(pstr0 = pstr0.init[, spp.],
+                          mu  = mum.init[, spp.]))
           }
           kay.init
         }
@@ -3371,7 +3368,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
             .epstr0 = epstr0, .emunb = emunb, .esize = esize,
             .ipstr0 = ipstr0,                 .isize = isize,
             .type.fitted = type.fitted,
-            .sinit = shrinkage.init,
+            .ishrinkage = ishrinkage,
             .imethod = imethod ))),
       
   linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -3684,7 +3681,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
            type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
            isize = NULL, ionempstr0 = NULL,  
            zero = c(-2, -3),
-           imethod = 1, shrinkage.init = 0.95,
+           imethod = 1, ishrinkage = 0.95,
            nsimEIM = 250) {
 
 
@@ -3724,10 +3721,10 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
   if (nsimEIM <= 50)
     warning("argument 'nsimEIM' should be greater than 50, say")
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-      shrinkage.init < 0 ||
-      shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+      ishrinkage < 0 ||
+      ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
 
 
@@ -3744,7 +3741,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
 
     dotzero <- .zero
     M1 <- 3
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
 
@@ -3800,7 +3797,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
               weighted.mean(y[index, iii], w     = w[index, iii]) else
                  median(rep(y[index, iii], times = w[index, iii])) + 1/8
         }
-        (1 - .sinit) * (y + 1/16) + .sinit * mum.init
+        (1 - .ishrinkage ) * (y + 1/16) + .ishrinkage * mum.init
       }
 
 
@@ -3840,11 +3837,11 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
           k.grid <- 2^((-6):6)
           kay.init <- matrix(0, nrow = n, ncol = NOS)
           for (spp. in 1:NOS) {
-            kay.init[, spp.] <- getMaxMin(k.grid,
-                  objfun = zinegbin.Loglikfun,
-                  y = y[, spp.], x = x, w = w[, spp.],
-                  extraargs = list(pstr0 = 1 - onempstr0.init[, spp.],
-                                   mu    = mum.init[, spp.]))
+            kay.init[, spp.] <-
+              grid.search(k.grid, objfun = zinegbin.Loglikfun,
+                          y = y[, spp.], x = x, w = w[, spp.],
+                         extraargs = list(pstr0 = 1 - onempstr0.init[, spp.],
+                                          mu    = mum.init[, spp.]))
           }
           kay.init
         }
@@ -3861,7 +3858,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
             .eonempstr0 = eonempstr0, .emunb = emunb, .esize = esize,
             .ionempstr0 = ionempstr0,                 .isize = isize,
             .type.fitted = type.fitted,
-            .sinit = shrinkage.init,
+            .ishrinkage = ishrinkage,
             .imethod = imethod ))),
       
   linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -4169,7 +4166,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
   function(llambda = "loge", lonempstr0 = "logit",
            type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
            ilambda = NULL,   ionempstr0 = NULL, imethod = 1,
-           shrinkage.init = 0.8, zero = -2) {
+           ishrinkage = 0.8, zero = -2) {
 
 
   type.fitted <- match.arg(type.fitted,
@@ -4201,10 +4198,10 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
     imethod > 2)
     stop("argument 'imethod' must be 1 or 2")
 
-  if (!is.Numeric(shrinkage.init, length.arg = 1) ||
-    shrinkage.init < 0 ||
-    shrinkage.init > 1)
-    stop("bad input for argument 'shrinkage.init'")
+  if (!is.Numeric(ishrinkage, length.arg = 1) ||
+    ishrinkage < 0 ||
+    ishrinkage > 1)
+    stop("bad input for argument 'ishrinkage'")
 
 
 
@@ -4217,7 +4214,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
   constraints = eval(substitute(expression({
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -4283,10 +4280,10 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
           } else if ( .imethod == 2) {
             mymean <- weighted.mean(yjay[yjay > 0],
                                        w[yjay > 0]) + 1/16
-            Lambda.init <- (1 - .sinit) * (yjay + 1/8) + .sinit * mymean
+            Lambda.init <- (1 - .ishrinkage ) * (yjay + 1/8) + .ishrinkage * mymean
           } else {
             use.this <- median(yjay[yjay > 0]) + 1 / 16
-            Lambda.init <- (1 - .sinit) * (yjay + 1/8) + .sinit * use.this
+            Lambda.init <- (1 - .ishrinkage ) * (yjay + 1/8) + .ishrinkage * use.this
           }
 
           zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
@@ -4295,10 +4292,10 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
                             log = TRUE))
           }
           phi0.grid <- seq(0.02, 0.98, len = 21)
-          Phi0mat.init <- getMaxMin(phi0.grid,
-                                   objfun = zipois.Loglikfun,
-                                   y = y, x = x, w = w,
-                                   extraargs = list(lambda = Lambda.init))
+          Phi0mat.init <- grid.search(phi0.grid,
+                                      objfun = zipois.Loglikfun,
+                                      y = y, x = x, w = w,
+                                      extraargs = list(lambda = Lambda.init))
           if (length(mustart)) {
             Lambda.init <- Lambda.init / (1 - Phi0mat.init)
           }
@@ -4320,7 +4317,7 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
             .eonempstr0 = eonempstr0, .elambda = elambda,
             .ionempstr0 = ionempstr0, .ilambda = ilambda,
             .type.fitted = type.fitted,
-            .imethod = imethod, .sinit = shrinkage.init ))),
+            .imethod = imethod, .ishrinkage = ishrinkage ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
     type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
@@ -4431,10 +4428,10 @@ zinegbinomialff.control <- function(save.weight = TRUE, ...) {
     if (any(pwts != 1)) 
       warning("ignoring prior weights")
     eta <- predict(object)
-    onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
-                           earg = .eonempstr0 )
     lambda    <- eta2theta(eta[, c(TRUE, FALSE)], .llambda ,
                            earg = .elambda    )
+    onempstr0 <- eta2theta(eta[, c(FALSE, TRUE)], .lonempstr0 ,
+                           earg = .eonempstr0 )
     rzipois(nsim * length(lambda), lambda = lambda, pstr0 = 1 - onempstr0)
   }, list( .lonempstr0 = lonempstr0, .llambda = llambda,
            .eonempstr0 = eonempstr0, .elambda = elambda ))),
@@ -4689,7 +4686,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -5019,7 +5016,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -5536,7 +5533,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
             namesof("prob" ,   lprob,  earg = eprob),  "\n",
             "Mean:     (1 - pobs0) * prob / (1 - (1 - prob)^size)"),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.vgam(constraints, x, .zero , M)
+      constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -5840,7 +5837,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
             namesof("onempobs0", lonempobs0, earg = eonempobs0), "\n",
             "Mean:     onempobs0 * prob / (1 - (1 - prob)^size)"),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.vgam(constraints, x, .zero , M)
+      constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -6146,7 +6143,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -6447,7 +6444,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
 
     dotzero <- .zero
     M1 <- 2
-    eval(negzero.expression)
+    eval(negzero.expression.VGAM)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
diff --git a/R/fittedvlm.R b/R/fittedvlm.R
index 50a10db..5e580e6 100644
--- a/R/fittedvlm.R
+++ b/R/fittedvlm.R
@@ -13,25 +13,25 @@
 
 
 
-fittedvlm <- function(object, matrix.arg = TRUE,
+fittedvlm <- function(object,
+                      drop = FALSE,
                       type.fitted = NULL,
                       ...) {
 
 
   if (is.null(type.fitted)) {
-    answer <- if (matrix.arg) {
-        object at fitted.values
-    } else {
+    answer <- if (drop) {
       if (!is.matrix(object at fitted.values) ||
           !length(object at fitted.values))
         stop("object at fitted.values is not a matrix or is empty")
-  
       if (ncol(object at fitted.values) == 1) {
         c(object at fitted.values)
       } else {
         warning("ncol(object at fitted.values) is not 1")
         c(object at fitted.values)
       }
+    } else {
+        object at fitted.values
     }
   } else {
     linkinv <- object at family@linkinv
@@ -39,10 +39,10 @@ fittedvlm <- function(object, matrix.arg = TRUE,
     new.extra$type.fitted <- type.fitted
     answer <- linkinv(eta = predict(object), extra = new.extra)
 
-    answer <- if (matrix.arg) {
-      as.matrix(answer)
-    } else {
+    answer <- if (drop) {
       c(answer)
+    } else {
+      as.matrix(answer)
     }
   }
 
diff --git a/R/links.q b/R/links.q
index 655e0b0..93a3250 100644
--- a/R/links.q
+++ b/R/links.q
@@ -23,8 +23,8 @@ ToString <- function(x)
            link.list = list("(Default)" = "identitylink",
                             x2          = "loge",
                             x3          = "logoff",
-                            x4          = "mlogit",
-                            x5          = "mlogit"),
+                            x4          = "multilogit",
+                            x5          = "multilogit"),
            earg.list = list("(Default)" = list(),
                             x2          = list(),
                             x3          = list(offset = -1),
@@ -32,13 +32,13 @@ ToString <- function(x)
                             x5          = list()),
            gsigma = exp(-5:5),
            parallel = TRUE,
-           shrinkage.init = 0.95,
+           ishrinkage = 0.95,
            nointercept = NULL, imethod = 1,
            type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
            probs.x = c(0.15, 0.85),
            probs.y = c(0.25, 0.50, 0.75),
            mv = FALSE, earg.link = FALSE,
-           whitespace = FALSE, bred = FALSE,
+           whitespace = FALSE, bred = FALSE, lss = TRUE,
            oim = FALSE, nsimEIM = 100,
            zero = NULL) {
   NULL
@@ -80,8 +80,8 @@ care.exp <- function(x,
 
   if (is.character(theta)) {
     string <- if (short)
-        paste("log(",  theta, ")", sep = "") else
-        paste("log(",  theta, ")", sep = "")
+        paste("loge(",  theta, ")", sep = "") else
+        paste("loge(",  theta, ")", sep = "")
     if (tag)
       string <- paste("Log:", string)
     return(string)
@@ -268,12 +268,17 @@ care.exp <- function(x,
       1 / Recall(theta = theta, bvalue = bvalue,
                  inverse = FALSE, deriv = deriv)
     } else {
-        exp(theta - log1p(exp(theta)))
+        yy <- theta
+        Neg <- (theta <  0) & !is.na(theta)
+        yy[ Neg] <- exp(theta[Neg]) / (1 + exp(theta[Neg]))
+        Pos <- (theta >= 0) & !is.na(theta)
+        yy[Pos] <- 1 / (1 + exp(-theta[Pos]))
+        yy
       }
   } else {
     switch(deriv+1, {
        temp2 <- log(theta) - log1p(-theta)
-       if (any(near0.5 <- (abs(theta - 0.5) < 0.000125)))
+       if (any(near0.5 <- (abs(theta - 0.5) < 0.000125) & !is.na(theta)))
          temp2[near0.5] <- log(theta[near0.5] / (1 - theta[near0.5]))
        temp2
        },
@@ -685,7 +690,7 @@ care.exp <- function(x,
 
 
 
- mlogit <-
+ multilogit <-
   function(theta,
            refLevel = "last",
            M = NULL,  # stop("argument 'M' not specified"),
@@ -724,7 +729,7 @@ care.exp <- function(x,
   if (is.character(theta)) {
     is.M <- is.finite(M) && is.numeric(M)
     string <- if (short)
-        paste("mlogit(", theta, ")", sep = "") else {
+        paste("multilogit(", theta, ")", sep = "") else {
          if (refLevel < 0) {
            ifelse(whitespace,
              paste("log(", theta, "[,j] / ",
@@ -826,7 +831,7 @@ care.exp <- function(x,
       care.exp(log(theta) + log1p(-theta)),
       care.exp(log(theta) + log1p(-theta)) * (1 - 2 * theta))
   }
-}  # end of mlogit
+}  # end of multilogit
 
 
 
diff --git a/R/logLik.vlm.q b/R/logLik.vlm.q
index 460df6f..a15df7a 100644
--- a/R/logLik.vlm.q
+++ b/R/logLik.vlm.q
@@ -38,7 +38,9 @@ logLik.vlm <- function(object,
 
 
  
-logLik.qrrvglm <- function(object, ...) {
+logLik.qrrvglm <- function(object,
+                           summation = TRUE,
+                           ...) {
 
   ff.code <- object at family
   ll.ff.code <- ff.code at loglikelihood
@@ -54,7 +56,8 @@ logLik.qrrvglm <- function(object, ...) {
                w = prior.weights,
                residuals = FALSE,
                eta = predict(object),
-               extra = object at extra)
+               extra = object at extra,
+               summation = summation)
   if (!is.numeric(loglik.try))
     loglik.try <- NULL
 
@@ -75,26 +78,26 @@ if (!isGeneric("logLik"))
 
 
 setMethod("logLik",  "vlm", function(object, ...)
-    logLik.vlm(object, ...))
+  logLik.vlm(object, ...))
 
 
 setMethod("logLik",  "vglm", function(object, ...)
-    logLik.vlm(object, ...))
+  logLik.vlm(object, ...))
 
 
 setMethod("logLik",  "vgam", function(object, ...)
-    logLik.vlm(object, ...))
+  logLik.vlm(object, ...))
 
 
 
 
 
 setMethod("logLik",  "qrrvglm", function(object, ...)
-    logLik.qrrvglm(object, ...))
+  logLik.qrrvglm(object, ...))
 
 
-setMethod("logLik",  "cao", function(object, ...)
-    logLik.qrrvglm(object, ...))
+setMethod("logLik",  "rrvgam", function(object, ...)
+  logLik.qrrvglm(object, ...))
 
 
 
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index a6839c9..e9a300c 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -301,7 +301,7 @@ attrassignlm <- function(object, ...)
 
 
   M <- object at misc$M  
-  Hlist <- object at constraints # == constraints(object, type = "lm")
+  Hlist <- object at constraints  # == constraints(object, type = "lm")
   X.vlm <- lm2vlm.model.matrix(x = x, Hlist = Hlist,
                                xij = object at control$xij, Xm2 = Xm2)
 
@@ -459,13 +459,18 @@ setMethod("model.frame",  "vlm", function(formula, ...)
 
 
 
-depvar.vlm <- function(object, type = c("lm", "lm2"), ...) {
+depvar.vlm <-
+  function(object,
+           type = c("lm", "lm2"),
+           drop = FALSE,
+           ...) {
   type <- match.arg(type, c("lm", "lm2"))[1]
-  if (type == "lm") {
+  ans <- if (type == "lm") {
     object at y
   } else {
     object at Ym2
   }
+  ans[, , drop = drop]
 }
 
 
@@ -483,7 +488,7 @@ setMethod("depvar",  "rrvglm", function(object, ...)
            depvar.vlm(object, ...))
 setMethod("depvar",  "qrrvglm", function(object, ...)
            depvar.vlm(object, ...))
-setMethod("depvar",  "cao", function(object, ...)
+setMethod("depvar",  "rrvgam", function(object, ...)
            depvar.vlm(object, ...))
 setMethod("depvar",  "rcim", function(object, ...)
            depvar.vlm(object, ...))
@@ -491,12 +496,45 @@ setMethod("depvar",  "rcim", function(object, ...)
 
 
 
-npred.vlm <- function(object, ...) {
-  if (length(object at misc$M))
-    object at misc$M else
-  if (ncol(as.matrix(predict(object))) > 0)
-    ncol(as.matrix(predict(object))) else
-  stop("cannot seem to obtain 'M'")
+
+npred.vlm <- function(object,
+                      type = c("total", "one.response"),
+                      ...) {
+  if (!missing(type))
+    type <- as.character(substitute(type))
+  type.arg <- match.arg(type, c("total", "one.response"))[1]
+
+
+  ans <- 
+    if (length(object at misc$M))
+      object at misc$M else
+    if (ncol(as.matrix(predict(object))) > 0)
+      ncol(as.matrix(predict(object))) else
+    stop("cannot seem to obtain 'M'")
+
+
+  ans <-
+  if (type.arg == "one.response") {
+    ans.infos <- ans.y <- NULL
+    infos.fun <- object at family@infos
+    Ans.infos <- infos.fun()
+    if (is.list(Ans.infos) && length(Ans.infos$M1))
+      ans.infos <- Ans.infos$M1
+
+    Q1 <- Ans.infos$Q1
+    if (is.numeric(Q1)) {
+      ans.y <- ncol(depvar(object)) / Q1
+      if (is.numeric(ans.infos) && ans.infos != ans.y)
+        warning("contradiction in values after computing it two ways")
+    }
+
+
+    if (is.numeric(ans.infos)) ans.infos else
+    if (is.numeric(ans.y    )) ans.y     else
+    ans
+  } else ans
+
+  ans
 }
 
 
@@ -511,7 +549,7 @@ setMethod("npred",  "rrvglm", function(object, ...)
            npred.vlm(object, ...))
 setMethod("npred",  "qrrvglm", function(object, ...)
            npred.vlm(object, ...))
-setMethod("npred",  "cao", function(object, ...)
+setMethod("npred",  "rrvgam", function(object, ...)
            npred.vlm(object, ...))
 setMethod("npred",  "rcim", function(object, ...)
            npred.vlm(object, ...))
@@ -604,7 +642,7 @@ setMethod("hatvalues",  "rrvglm", function(model, ...)
            hatvaluesvlm(model, ...))
 setMethod("hatvalues",  "qrrvglm", function(model, ...)
            hatvaluesvlm(model, ...))
-setMethod("hatvalues",  "cao", function(model, ...)
+setMethod("hatvalues",  "rrvgam", function(model, ...)
            hatvaluesvlm(model, ...))
 setMethod("hatvalues",  "rcim", function(model, ...)
            hatvaluesvlm(model, ...))
@@ -678,7 +716,7 @@ setMethod("hatplot",  "rrvglm", function(model, ...)
            hatplot.vlm(model, ...))
 setMethod("hatplot",  "qrrvglm", function(model, ...)
            hatplot.vlm(model, ...))
-setMethod("hatplot",  "cao", function(model, ...)
+setMethod("hatplot",  "rrvgam", function(model, ...)
            hatplot.vlm(model, ...))
 setMethod("hatplot",  "rcim", function(model, ...)
            hatplot.vlm(model, ...))
@@ -705,7 +743,6 @@ dfbetavlm <-
   X.vlm <- model.matrix(model, type = "vlm")
   p.vlm <- ncol(X.vlm)  # nvar(model, type = "vlm")
   M    <- npred(model)
-  wz <- weights(model, type = "work")  # zz unused!!!!!!!
   etastart <- predict(model)
   offset <- matrix(model at offset, n.lm, M)
   new.control <- model at control
@@ -714,6 +751,7 @@ dfbetavlm <-
               model at extra$orig.w else 1
   y.integer <- if (is.logical(model at extra$y.integer))
                  model at extra$y.integer else FALSE
+  coef.model <- coef(model)
 
 
   new.control$trace <- trace.new
@@ -736,7 +774,7 @@ dfbetavlm <-
     w.orig <- if (length(orig.w) != n.lm)
                 rep(orig.w, length.out = n.lm) else
                 orig.w
-    w.orig[ii] <- w.orig[ii] * smallno # Relative
+    w.orig[ii] <- w.orig[ii] * smallno  # Relative
 
     fit <- vglm.fit(x = X.lm,
                     X.vlm.arg = X.vlm,  # Should be more efficient
@@ -756,11 +794,11 @@ dfbetavlm <-
                     Terms = Terms.zz,
                     function.name = "vglm")
 
-    dfbeta[ii, ] <- fit$coeff
+    dfbeta[ii, ] <- coef.model - fit$coeff
   }
 
 
-  dimnames(dfbeta) <- list(rownames(X.lm), names(coef(model)))
+  dimnames(dfbeta) <- list(rownames(X.lm), names(coef.model))
   dfbeta
 }
 
@@ -783,7 +821,7 @@ setMethod("dfbeta",  "rrvglm", function(model, ...)
            dfbetavlm(model, ...))
 setMethod("dfbeta",  "qrrvglm", function(model, ...)
            dfbetavlm(model, ...))
-setMethod("dfbeta",  "cao", function(model, ...)
+setMethod("dfbeta",  "rrvgam", function(model, ...)
            dfbetavlm(model, ...))
 setMethod("dfbeta",  "rcim", function(model, ...)
            dfbetavlm(model, ...))
diff --git a/R/nobs.R b/R/nobs.R
index 7bf74eb..0b7dca3 100644
--- a/R/nobs.R
+++ b/R/nobs.R
@@ -115,14 +115,14 @@ nvar.qrrvglm <- function(object, type = c("qrrvglm", "zz"), ...) {
 
 
 
-nvar.cao <- function(object, type = c("cao", "zz"), ...) {
+nvar.rrvgam <- function(object, type = c("cao", "zz"), ...) {
 
   if (mode(type) != "character" && mode(type) != "name")
     type <- as.character(substitute(type))
   type <- match.arg(type,
                     c("rrvglm", "zz"))[1]
 
-  stop("function nvar.cao() has not been written yet")
+  stop("function nvar.rrvgam() has not been written yet")
 
   if (type == "cao") {
     object at misc$p
@@ -182,9 +182,9 @@ setMethod("nvar", "qrrvglm",
 
 
 
-setMethod("nvar", "cao",
+setMethod("nvar", "rrvgam",
          function(object, ...)
-         nvar.cao(object, ...))
+         nvar.rrvgam(object, ...))
 
 
 
diff --git a/R/plot.vglm.q b/R/plot.vglm.q
index 5085ebe..bfdd684 100644
--- a/R/plot.vglm.q
+++ b/R/plot.vglm.q
@@ -15,6 +15,7 @@
 
 
 plotvgam <-
+plot.vgam <-
   function(x, newdata = NULL, y = NULL, residuals = NULL, rugplot = TRUE,
            se = FALSE, scale = 0, 
            raw = TRUE, offset.arg = 0, deriv.arg = 0, overlay = FALSE,
@@ -225,10 +226,11 @@ getallresponses <- function(xij) {
 
 
 
-preplotvgam <- function(object, newdata = NULL,
-                terms = attr((object at terms)$terms, "term.labels"),
-                raw = TRUE, deriv.arg = deriv.arg, se = FALSE,
-                varxij = 1) {
+preplotvgam <-
+  function(object, newdata = NULL,
+           terms = attr((object at terms)$terms, "term.labels"),
+           raw = TRUE, deriv.arg = deriv.arg, se = FALSE,
+           varxij = 1) {
 
   result1 <- headpreplotvgam(object, newdata = newdata, terms = terms,
                              raw = raw, deriv.arg = deriv.arg, se = se,
@@ -276,7 +278,7 @@ preplotvgam <- function(object, newdata = NULL,
               paste("partial for", term) else term
 
     TT <- list(x = .VGAM.x,
-               y = fits[,(if (is.null(fred)) term else fred[[term]])],
+               y = fits[, (if (is.null(fred)) term else fred[[term]])],
                se.y = if (is.null(se.fit)) NULL else
                      se.fit[, (if (is.null(fred)) term else fred[[term]])],
                xlab = xnames[[term]],
@@ -325,16 +327,16 @@ plotvglm <-
 
 
   invisible(
-  plotvgam(x = x, newdata = newdata, y = y,
-           residuals = residuals, rugplot = rugplot,
-           se = se, scale = scale, 
-           raw = raw, offset.arg = offset.arg,
-           deriv.arg = deriv.arg, overlay = overlay,
-           type.residuals = type.residuals,
-           plot.arg = plot.arg,
-           which.term = which.term, which.cf = which.cf,
-           control = control,
-           varxij = varxij, ...)
+  plot.vgam(x = x, newdata = newdata, y = y,
+            residuals = residuals, rugplot = rugplot,
+            se = se, scale = scale, 
+            raw = raw, offset.arg = offset.arg,
+            deriv.arg = deriv.arg, overlay = overlay,
+            type.residuals = type.residuals,
+            plot.arg = plot.arg,
+            which.term = which.term, which.cf = which.cf,
+            control = control,
+            varxij = varxij, ...)
     )
 }
 
@@ -555,33 +557,44 @@ vplot.numeric <-
        (se && !is.null(se.y) && !noxmean &&
         all(substring(ylab, 1:nchar(ylab), 1:nchar(ylab)) != "("))) {
       x <- c(x, mean(x))
-      y <- rbind(y, 0 * y[1,])
-      se.y <- rbind(se.y, 0 * se.y[1,])
+      y <- rbind(y, 0 * y[1, ])
+      se.y <- rbind(se.y, 0 * se.y[1, ])
       if (!is.null(residuals))
-        residuals <- rbind(residuals, NA*residuals[1,])  # NAs not plotted
+        residuals <- rbind(residuals, NA*residuals[1, ])  # NAs not plotted
     }
 
     ux <- unique(sort(x))
     ooo <- match(ux, x)
     uy <- y[ooo, , drop = FALSE]
-    xlim <- range(xlim, ux)
-    ylim <- range(ylim, uy[, which.cf], na.rm = TRUE)
+
+
+    xlim.orig <- xlim
+    ylim.orig <- ylim
+    xlim <- range(if (length(xlim)) NULL else ux, xlim, na.rm = TRUE)
+    ylim <- range(if (length(ylim)) NULL else uy[, which.cf],
+                  ylim, na.rm = TRUE)
+
+
     if (rugplot) {
       usex <- if (xmeanAdded) x[-length(x)] else x
       jx <- jitter(usex[!is.na(usex)])
-      xlim <- range(c(xlim, jx))
+      xlim <- range(if (length(xlim.orig)) NULL else jx,
+                    xlim.orig, na.rm = TRUE)
     }
 
     if (se && !is.null(se.y)) {
       se.upper <- uy + 2 * se.y[ooo, , drop = FALSE]
       se.lower <- uy - 2 * se.y[ooo, , drop = FALSE]
-      ylim <- range(c(ylim, se.upper[, which.cf], se.lower[, which.cf]))
+
+      ylim <- if (length(ylim.orig)) range(ylim.orig) else
+              range(c(ylim, se.upper[, which.cf], se.lower[, which.cf]))
     }
 
     if (!is.null(residuals)) {
       if (length(residuals) == length(y)) {
         residuals <- as.matrix(y + residuals)
-        ylim <- range(c(ylim, residuals[, which.cf]), na.rm = TRUE)
+        ylim <- if (length(ylim.orig)) range(ylim.orig) else
+                range(c(ylim, residuals[, which.cf]), na.rm = TRUE)
       } else {
         residuals <- NULL
         warning("Residuals do not match 'x' in \"", ylab, 
@@ -595,25 +608,27 @@ vplot.numeric <-
   if (all.missingy)
     return()
 
-  ylim <- ylim.scale(ylim, scale)
+  if (!length(ylim.orig))
+    ylim <- ylim.scale(ylim, scale)
 
   if (overlay) {
     if (!length(which.cf))
       which.cf <- 1:ncol(uy)  # Added 20040807
     if (!add.arg) {
-      matplot(ux, uy[,which.cf], type = "n", 
+      matplot(ux, uy[, which.cf], type = "n", 
               xlim = xlim, ylim = ylim, 
               xlab = xlab, ylab = ylab, ...) 
     }
-    matlines(ux, uy[,which.cf],
+    matlines(ux, uy[, which.cf],
              lwd = llwd, col = lcol, lty = llty)
-    if (!is.null(residuals))
+    if (!is.null(residuals)) {
       if (ncol(y) == 1) {
-        points(x, residuals, pch = pch, col = pcol, cex = pcex) 
+        points(x, residuals, pch = pch, col = pcol, cex = pcex)
       } else {
-        matpoints(x, residuals[,which.cf],
+        matpoints(x, residuals[, which.cf],
                   pch = pch, col = pcol, cex = pcex)  # add.arg = TRUE,
       }
+    }
     if (rugplot)
       rug(jx, col = rcol)
     if (se && !is.null(se.y)) {
@@ -649,7 +664,7 @@ vplot.numeric <-
               ylim <- range(c(ylim, residuals[, ii]), na.rm = TRUE)
             ylim <- ylim.scale(ylim, scale)
           }
-          if (ncol(uy)>1 && length(separator))
+          if (ncol(uy) > 1 && length(separator))
             YLAB <- paste(ylab, separator, ii, sep = "")  
             if (!add.arg) {
               if (one.at.a.time) {
@@ -898,7 +913,7 @@ setMethod("plot", "vgam",
            function(x, y, ...) {
            if (!missing(y))
              stop("cannot process the 'y' argument")
-           invisible(plotvgam(x = x, y = y, ...))})
+           invisible(plot.vgam(x = x, y = y, ...))})
 
 
 
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
index 634aaa8..8a882c5 100644
--- a/R/predict.vglm.q
+++ b/R/predict.vglm.q
@@ -6,14 +6,15 @@
 
 
 
-predictvglm <- function(object,
-                        newdata = NULL,
-                        type = c("link", "response", "terms"),
-                        se.fit = FALSE,
-                        deriv = 0,
-                        dispersion = NULL,
-                        untransform = FALSE,
-                        extra = object at extra, ...) {
+predictvglm <-
+  function(object,
+           newdata = NULL,
+           type = c("link", "response", "terms"),  # "parameters",
+           se.fit = FALSE,
+           deriv = 0,
+           dispersion = NULL,
+           untransform = FALSE,
+           extra = object at extra, ...) {
   na.act <- object at na.action
   object at na.action <- list()
 
@@ -27,14 +28,16 @@ predictvglm <- function(object,
     type <- as.character(substitute(type))
   type <- match.arg(type, c("link", "response", "terms"))[1]
 
-  if (untransform && (type!="link" || se.fit || deriv != 0))
+  if (untransform &&
+     (type == "response" || type == "terms" || se.fit || deriv != 0))
     stop("argument 'untransform=TRUE' only if 'type=\"link\", ",
          "se.fit = FALSE, deriv=0'")
 
 
 
 
-  pred <- if (se.fit) {
+  pred <-
+    if (se.fit) {
       switch(type,
              response = {
                warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
@@ -45,8 +48,16 @@ predictvglm <- function(object,
                                         deriv = deriv, 
                                         dispersion = dispersion, ...) 
                fv <- object at family@linkinv(predictor, extra)
-               dimnames(fv) <- list(dimnames(fv)[[1]],
-                                    dimnames(object at fitted.values)[[2]])
+
+
+               fv <- as.matrix(fv)
+               dn1 <- dimnames(fv)[[1]]
+               dn2 <- dimnames(object at fitted.values)[[2]]
+               if (nrow(fv) == length(dn1) &&
+                   ncol(fv) == length(dn2))
+                 dimnames(fv) <- list(dn1, dn2)
+
+
                fv
              },
              link = {
@@ -89,22 +100,21 @@ predictvglm <- function(object,
 
                fv <- object at family@linkinv(predictor, extra)
                if (M > 1 && is.matrix(fv)) {
-                 dimnames(fv) <- list(dimnames(fv)[[1]],
-                                      dimnames(object at fitted.values)[[2]])
+
+               fv <- as.matrix(fv)
+               dn1 <- dimnames(fv)[[1]]
+               dn2 <- dimnames(object at fitted.values)[[2]]
+               if (nrow(fv) == length(dn1) &&
+                   ncol(fv) == length(dn2))
+                 dimnames(fv) <- list(dn1, dn2)
                } else {
                }
                  fv
                },
                link = {
-
-
-
                  predict.vlm(object, newdata = newdata,
                              type = "response", se.fit = se.fit,
                              deriv = deriv, dispersion = dispersion, ...)
-
-
-
                },
                terms = {
                  predict.vlm(object, newdata = newdata,
@@ -124,7 +134,9 @@ predictvglm <- function(object,
   }
   
   if (untransform) untransformVGAM(object, pred) else pred
-}
+} # predictvglm
+
+
 
 
 setMethod("predict", "vglm", function(object, ...) 
@@ -156,8 +168,16 @@ predict.rrvglm <- function(object,
                                         deriv = deriv, 
                                         dispersion = dispersion, ...) 
              fv <- object at family@linkinv(predictor, extra)
-             dimnames(fv) <- list(dimnames(fv)[[1]],
-                                  dimnames(object at fitted.values)[[2]])
+
+
+             fv <- as.matrix(fv)
+             dn1 <- dimnames(fv)[[1]]
+             dn2 <- dimnames(object at fitted.values)[[2]]
+             if (nrow(fv) == length(dn1) &&
+                 ncol(fv) == length(dn2))
+               dimnames(fv) <- list(dn1, dn2)
+
+
              fv
            },
            link = {
@@ -200,6 +220,8 @@ setMethod("predict", "rrvglm", function(object, ...)
 
 
 untransformVGAM <- function(object, pred) {
+ 
+
   M <- object at misc$M
   Links <- object at misc$link
   if (length(Links) != M && length(Links) != 1)
@@ -212,7 +234,6 @@ untransformVGAM <- function(object, pred) {
 
 
 
-
   LINK <- object at misc$link  # link.names # This should be a character vector.
   EARG <- object at misc$earg  # This could be a NULL
   if (is.null(EARG))
@@ -255,16 +276,13 @@ untransformVGAM <- function(object, pred) {
     TTheta <- pred[, ii]  # Transformed theta
 
 
-    use.earg      <-
-      if (llink == 1) EARG[[1]] else EARG[[ii]]
-   function.name <-
-      if (llink == 1) LINK else LINK[ii]
-
+    use.earg <- if (llink == 1) EARG[[1]] else EARG[[ii]]
+    function.name <- if (llink == 1) LINK else LINK[ii]
 
-      use.earg[["inverse"]] <- TRUE  # New
-      use.earg[["theta"]] <- TTheta  # New
-      Theta <- do.call(function.name, use.earg)
 
+    use.earg[["inverse"]] <- TRUE  # New
+    use.earg[["theta"]] <- TTheta  # New
+    Theta <- do.call(function.name, use.earg)
 
 
 
diff --git a/R/qrrvglm.control.q b/R/qrrvglm.control.q
index f6cc5cc..9b9721c 100644
--- a/R/qrrvglm.control.q
+++ b/R/qrrvglm.control.q
@@ -178,7 +178,7 @@ qrrvglm.control <- function(Rank = 1,
            Parscale = Parscale,
            Quadratic = TRUE,
            Rank = Rank,
-           save.weight = FALSE,
+           save.weights = FALSE,
            sd.Cinit = sd.Cinit,
            SmallNo = SmallNo,
            str0 = NULL,
diff --git a/R/qtplot.q b/R/qtplot.q
index ed13441..2ffb996 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -319,7 +319,7 @@ qtplot.gumbel <-
 
 
   if (is.Numeric(R))
-    R <- rep(R, length=nrow(eta))
+    R <- rep(R, length = nrow(eta))
 
   if (!is.Numeric(percentiles))
     stop("the 'percentiles' argument needs to be assigned a value")
@@ -332,7 +332,7 @@ qtplot.gumbel <-
   fitted.values <- object at family@linkinv(eta = eta, extra = extra) 
 
   answer <- list(fitted.values = fitted.values,
-                 percentiles = percentiles)
+                 percentiles   = percentiles)
 
   if (!show.plot)
     return(answer)
@@ -380,7 +380,8 @@ qtplot.gumbel <-
       if (label) {
         mylabel <- (dimnames(answer$fitted)[[2]])[ii]
         text(par()$usr[2], temp[nrow(temp), 2],
-             mylabel, adj = tadj, col = tcol.arg[ii], err = -1)
+             mylabel, adj = tadj, col = tcol.arg[ii], err = -1,
+             cex = par()$cex.axis, xpd = par()$xpd)
     }
   }
 
@@ -697,20 +698,18 @@ rlplot.gev <-
   function(object, show.plot = TRUE,
            probability = c((1:9)/100, (1:9)/10, 0.95, 0.99, 0.995, 0.999),
            add.arg = FALSE,
-           xlab = "Return Period",
+           xlab = if(log.arg) "Return Period (log-scale)" else "Return Period",
            ylab = "Return Level",
            main = "Return Level Plot",
            pch = par()$pch, pcol.arg = par()$col, pcex = par()$cex,
            llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd,
            slty.arg = par()$lty, scol.arg = par()$col, slwd.arg = par()$lwd,
            ylim = NULL,
-           log = TRUE,
+           log.arg = TRUE,
            CI = TRUE,
            epsilon = 1.0e-05,
            ...) {
 
-  log.arg <- log
-  rm(log)
   if (!is.Numeric(epsilon, length.arg = 1) ||
       abs(epsilon) > 0.10)
     stop("bad input for 'epsilon'")
@@ -740,7 +739,11 @@ rlplot.gev <-
       plot(log(1/yp), zp, log = "", type = "n",
            ylim = if (length(ylim)) ylim else
                 c(min(c(ydata, zp)), max(c(ydata, zp))),
-           xlab = xlab, ylab = ylab, main = main, ...)
+           xlab = xlab, ylab = ylab, main = main, 
+           cex.axis = par()$cex.axis, 
+           cex.main = par()$cex.main, 
+           cex.lab  = par()$cex.lab, 
+           ...)
     points(log(-1/log((1:n)/(n+1))), ydata, col = pcol.arg,
            pch = pch, cex = pcex)
     lines(log(1/yp), zp,
@@ -751,7 +754,11 @@ rlplot.gev <-
            ylim = if (length(ylim)) ylim else
                   c(min(c(ydata, zp)),
                     max(c(ydata, zp))),
-           xlab = xlab, ylab = ylab, main = main, ...)
+           xlab = xlab, ylab = ylab, main = main,
+           cex.axis = par()$cex.axis, 
+           cex.main = par()$cex.main, 
+           cex.lab  = par()$cex.lab, 
+           ...)
     points(-1/log((1:n)/(n+1)), ydata, col = pcol.arg,
            pch = pch, cex = pcex)
     lines(1/yp, zp, lwd = llwd.arg, col = lcol.arg, lty = llty.arg)
@@ -767,13 +774,19 @@ rlplot.gev <-
       TTheta <- eta[, ii]
       use.earg <- earg[[ii]]
       newcall <- paste(Links[ii],
-                "(theta = TTheta, earg = use.earg, inverse = TRUE)",
+                "(theta = TTheta, ",
+                "  inverse = TRUE)",
                  sep = "")
+
+
+
       newcall <- parse(text = newcall)[[1]]
       uteta <- eval(newcall)  # Theta, the untransformed parameter
       uteta <- uteta + epsilon  # Perturb it
       newcall <- paste(Links[ii],
-                       "(theta = uteta, earg = use.earg)", sep = "")
+                       "(theta = uteta",
+                       ")",
+                       sep = "")
       newcall <- parse(text = newcall)[[1]]
       teta <- eval(newcall)  # The transformed parameter
       peta <- eta
diff --git a/R/rrvglm.control.q b/R/rrvglm.control.q
index 81cfb34..cc187c4 100644
--- a/R/rrvglm.control.q
+++ b/R/rrvglm.control.q
@@ -14,10 +14,10 @@ rrvglm.control <-
            Uncorrelated.latvar = FALSE,
            Wmat = NULL,
            Svd.arg = FALSE,
-           Index.corner = if (length(str0)) 
+           Index.corner = if (length(str0))
            head((1:1000)[-str0], Rank) else 1:Rank,
            Ainit = NULL,
-           Alpha = 0.5, 
+           Alpha = 0.5,
            Bestof = 1,
            Cinit = NULL,
            Etamat.colmax = 10,
@@ -25,7 +25,7 @@ rrvglm.control <-
            sd.Cinit = 0.02,
            str0 = NULL,
 
-           noRRR = ~ 1, 
+           noRRR = ~ 1,
            Norrr = NA,
 
            noWarning = FALSE,
@@ -34,6 +34,7 @@ rrvglm.control <-
            Use.Init.Poisson.QO = FALSE,
            checkwz = TRUE,
            Check.rank = TRUE,
+           Check.cm.rank = TRUE,
            wzepsilon = .Machine$double.eps^0.75,
            ...) {
 
@@ -120,13 +121,14 @@ rrvglm.control <-
                  trace = trace,
                  checkwz = checkwz,
                  Check.rank = Check.rank,
+                 Check.cm.rank = Check.cm.rank,
                  wzepsilon = wzepsilon,
                  noWarning = noWarning,
                  ...),
 
     switch(Algorithm,
            "alternating" = valt.control(...),
-           "derivative" = rrvglm.optim.control(...)),
+           "derivative"  = rrvglm.optim.control(...)),
     list(Rank = Rank,
          Ainit = Ainit,
          Algorithm = Algorithm,
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
index d225f3e..0ebb583 100644
--- a/R/rrvglm.fit.q
+++ b/R/rrvglm.fit.q
@@ -112,7 +112,7 @@ rrvglm.fit <-
 
 
     special.matrix <- matrix(-34956.125, M, M)  # An unlikely used matrix 
-    just.testing <- cm.vgam(special.matrix, x, rrcontrol$noRRR,
+    just.testing <- cm.VGAM(special.matrix, x, rrcontrol$noRRR,
                             constraints)
 
     findex <- trivial.constraints(just.testing, special.matrix)
diff --git a/R/smart.R b/R/smart.R
index 5622e64..acda626 100644
--- a/R/smart.R
+++ b/R/smart.R
@@ -50,7 +50,7 @@ smart.mode.is <- function(mode.arg = NULL) {
 
 setup.smart <- function(mode.arg, smart.prediction = NULL,
                         max.smart = 30) {
-  actual <- if (mode.arg == "write") vector("list", max.smart) else 
+  actual <- if (mode.arg == "write") vector("list", max.smart) else
             if (mode.arg == "read") smart.prediction else
             stop("value of 'mode.arg' unrecognized")
 
@@ -67,6 +67,7 @@ setup.smart <- function(mode.arg, smart.prediction = NULL,
   }
 }
 
+
 wrapup.smart <- function() {
   if (exists(".smart.prediction", envir = smartpredenv))
     rm(".smart.prediction", envir = smartpredenv)
@@ -86,7 +87,6 @@ get.smart.prediction <- function() {
   max.smart <- get(".max.smart", envir = smartpredenv)
 
   if (smart.prediction.counter > 0) {
-    # Save this on the object for smart prediction later
     smart.prediction <- get(".smart.prediction", envir = smartpredenv)
     if (max.smart >= (smart.prediction.counter + 1))
       for(i in max.smart:(smart.prediction.counter + 1))
@@ -108,8 +108,7 @@ put.smart <- function(smart) {
   smart.prediction.counter <- smart.prediction.counter + 1
 
   if (smart.prediction.counter > max.smart) {
-    # if list is too small, make it larger
-    max.smart <- max.smart + (inc.smart <- 10) # can change inc.smart
+    max.smart <- max.smart + (inc.smart <- 10)  # can change inc.smart
     smart.prediction <- c(smart.prediction, vector("list", inc.smart))
     assign(".max.smart", max.smart, envir = smartpredenv)
   }
@@ -122,7 +121,6 @@ put.smart <- function(smart) {
 
 
 get.smart <- function() {
-  # Returns one list component of information
   smart.prediction <- get(".smart.prediction", envir = smartpredenv)
   smart.prediction.counter <- get(".smart.prediction.counter",
                                   envir = smartpredenv)
@@ -133,6 +131,7 @@ get.smart <- function() {
   smart
 }
 
+
 smart.expression <- expression({
 
 
@@ -150,6 +149,7 @@ smart.expression <- expression({
 
 
 
+
 is.smart <- function(object) {
   if (is.function(object)) {
     if (is.logical(a <- attr(object, "smart"))) a else FALSE
@@ -179,93 +179,87 @@ is.smart <- function(object) {
 
 
 
-
  sm.bs <-
-function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE, 
-    Boundary.knots = range(x)) {
-    x <- x  # Evaluate x
-    if (smart.mode.is("read")) {
-        return(eval(smart.expression))
-    }
+  function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE, 
+            Boundary.knots = range(x)) {
+  x <- x  # Evaluate x; needed for nested calls, e.g., sm.bs(sm.scale(x)).
+  if (smart.mode.is("read")) {
+    return(eval(smart.expression))
+  }
 
-    nx <- names(x)
-    x <- as.vector(x)
-    nax <- is.na(x)
-    if (nas <- any(nax)) 
-        x <- x[!nax]
-    if (!missing(Boundary.knots)) {
-        Boundary.knots <- sort(Boundary.knots)
-        outside <- (ol <- x < Boundary.knots[1]) | (or <- x > 
-            Boundary.knots[2L])
-    } else outside <- FALSE
-    ord <- 1 + (degree <- as.integer(degree))
-    if (ord <= 1) 
-        stop("'degree' must be integer >= 1")
-    if (!missing(df) && missing(knots)) {
-        nIknots <- df - ord + (1 - intercept)
-        if (nIknots < 0) {
-            nIknots <- 0
-            warning("'df' was too small; have used  ", ord - 
-                (1 - intercept))
-        }
-        knots <- if (nIknots > 0) {
-            knots <- seq(from = 0, to = 1, length = nIknots + 
-                2)[-c(1, nIknots + 2)]
-            stats::quantile(x[!outside], knots)
-        }
+  nx <- names(x)
+  x <- as.vector(x)
+  nax <- is.na(x)
+  if (nas <- any(nax)) 
+    x <- x[!nax]
+  if (!missing(Boundary.knots)) {
+    Boundary.knots <- sort(Boundary.knots)
+    outside <- (ol <- x < Boundary.knots[1]) | (or <- x > 
+        Boundary.knots[2L])
+  } else outside <- FALSE
+  ord <- 1 + (degree <- as.integer(degree))
+  if (ord <= 1) 
+    stop("'degree' must be integer >= 1")
+  if (!missing(df) && missing(knots)) {
+    nIknots <- df - ord + (1 - intercept)
+    if (nIknots < 0) {
+      nIknots <- 0
+      warning("'df' was too small; have used  ", ord - (1 - intercept))
     }
-    Aknots <- sort(c(rep(Boundary.knots, ord), knots))
-    if (any(outside)) {
-        warning("some 'x' values beyond boundary knots may ",
-                "cause ill-conditioned bases")
-        derivs <- 0:degree
-        scalef <- gamma(1L:ord)
-        basis <- array(0, c(length(x), length(Aknots) - degree - 
-            1L))
-        if (any(ol)) {
-            k.pivot <- Boundary.knots[1L]
-            xl <- cbind(1, outer(x[ol] - k.pivot, 1L:degree, "^"))
-            tt <- spline.des(Aknots, rep(k.pivot, ord), ord, 
-                derivs)$design
-            basis[ol, ] <- xl %*% (tt/scalef)
-        }
-        if (any(or)) {
-            k.pivot <- Boundary.knots[2L]
-            xr <- cbind(1, outer(x[or] - k.pivot, 1L:degree, "^"))
-            tt <- spline.des(Aknots, rep(k.pivot, ord), ord, 
-                derivs)$design
-            basis[or, ] <- xr %*% (tt/scalef)
-        }
-        if (any(inside <- !outside)) 
-            basis[inside, ] <- spline.des(Aknots, x[inside], 
-                ord)$design
-    } else basis <- spline.des(Aknots, x, ord)$design
-    if (!intercept) 
-        basis <- basis[, -1L, drop = FALSE]
-    n.col <- ncol(basis)
-    if (nas) {
-        nmat <- matrix(NA, length(nax), n.col)
-        nmat[!nax, ] <- basis
-        basis <- nmat
+    knots <- if (nIknots > 0) {
+      knots <- seq(from = 0, to = 1, length = nIknots +
+          2)[-c(1, nIknots + 2)]
+      stats::quantile(x[!outside], knots)
     }
-    dimnames(basis) <- list(nx, 1L:n.col)
-    a <- list(degree = degree,
-        knots = if (is.null(knots)) numeric(0L) else knots, 
-        Boundary.knots = Boundary.knots,
-        intercept = intercept,
-        Aknots = Aknots)
-    attributes(basis) <- c(attributes(basis), a)
-    class(basis) <- c("bs", "basis", "matrix")
-
-    if (smart.mode.is("write"))
-        put.smart(list(df = df,
-                       knots = knots,
-                       degree = degree,
-                       intercept = intercept,
-                       Boundary.knots = Boundary.knots,
-                       match.call = match.call()))
-
-    basis
+  }
+  Aknots <- sort(c(rep(Boundary.knots, ord), knots))
+  if (any(outside)) {
+    warning("some 'x' values beyond boundary knots may ",
+            "cause ill-conditioned bases")
+    derivs <- 0:degree
+    scalef <- gamma(1L:ord)
+    basis <- array(0, c(length(x), length(Aknots) - degree - 1L))
+      if (any(ol)) {
+        k.pivot <- Boundary.knots[1L]
+        xl <- cbind(1, outer(x[ol] - k.pivot, 1L:degree, "^"))
+        tt <- splines::splineDesign(Aknots, rep(k.pivot, ord), ord, derivs)
+        basis[ol, ] <- xl %*% (tt/scalef)
+      }
+      if (any(or)) {
+        k.pivot <- Boundary.knots[2L]
+        xr <- cbind(1, outer(x[or] - k.pivot, 1L:degree, "^"))
+        tt <- splines::splineDesign(Aknots, rep(k.pivot, ord), ord, derivs)
+        basis[or, ] <- xr %*% (tt/scalef)
+      }
+      if (any(inside <- !outside)) 
+        basis[inside, ] <- splines::splineDesign(Aknots, x[inside], ord)
+  } else basis <- splines::splineDesign(Aknots, x, ord)
+  if (!intercept) 
+    basis <- basis[, -1L, drop = FALSE]
+  n.col <- ncol(basis)
+  if (nas) {
+    nmat <- matrix(NA, length(nax), n.col)
+    nmat[!nax, ] <- basis
+    basis <- nmat
+  }
+  dimnames(basis) <- list(nx, 1L:n.col)
+  a <- list(degree = degree,
+            knots = if (is.null(knots)) numeric(0L) else knots, 
+            Boundary.knots = Boundary.knots,
+            intercept = intercept,
+            Aknots = Aknots)
+  attributes(basis) <- c(attributes(basis), a)
+  class(basis) <- c("bs", "basis", "matrix")
+
+  if (smart.mode.is("write"))
+    put.smart(list(df = df,
+                   knots = knots,
+                   degree = degree,
+                   intercept = intercept,
+                   Boundary.knots = Boundary.knots,
+                   match.call = match.call()))
+
+  basis
 }
 attr( sm.bs, "smart") <- TRUE
 
@@ -277,84 +271,81 @@ attr( sm.bs, "smart") <- TRUE
  sm.ns <-
   function (x, df = NULL, knots = NULL, intercept = FALSE,
             Boundary.knots = range(x)) {
-    x <- x  # Evaluate x
-    if (smart.mode.is("read")) {
-        return(eval(smart.expression))
-    }
+  x <- x  # Evaluate x; needed for nested calls, e.g., sm.bs(sm.scale(x)).
+  if (smart.mode.is("read")) {
+    return(eval(smart.expression))
+  }
 
-    nx <- names(x)
-    x <- as.vector(x)
-    nax <- is.na(x)
-    if (nas <- any(nax)) 
-        x <- x[!nax]
-    if (!missing(Boundary.knots)) {
-        Boundary.knots <- sort(Boundary.knots)
-        outside <- (ol <- x < Boundary.knots[1L]) | (or <- x > 
-            Boundary.knots[2L])
-    } else outside <- FALSE
-    if (!missing(df) && missing(knots)) {
-        nIknots <- df - 1 - intercept
-        if (nIknots < 0) {
-            nIknots <- 0
-            warning("'df' was too small; have used ", 1 + intercept)
-        }
-        knots <- if (nIknots > 0) {
-            knots <- seq.int(0, 1, length.out = nIknots + 2L)[-c(1L, nIknots +
-                2L)]
-            stats::quantile(x[!outside], knots)
-        }
-    } else nIknots <- length(knots)
-    Aknots <- sort(c(rep(Boundary.knots, 4), knots))
-    if (any(outside)) {
-        basis <- array(0, c(length(x), nIknots + 4L))
-        if (any(ol)) {
-            k.pivot <- Boundary.knots[1L]
-            xl <- cbind(1, x[ol] - k.pivot)
-            tt <- spline.des(Aknots, rep(k.pivot, 2L), 4, c(0, 
-                1))$design
-            basis[ol, ] <- xl %*% tt
-        }
-        if (any(or)) {
-            k.pivot <- Boundary.knots[2L]
-            xr <- cbind(1, x[or] - k.pivot)
-            tt <- spline.des(Aknots, rep(k.pivot, 2L), 4, c(0, 
-                1))$design
-            basis[or, ] <- xr %*% tt
-        }
-        if (any(inside <- !outside)) 
-            basis[inside, ] <- spline.des(Aknots, x[inside], 4)$design
-    } else basis <- spline.des(Aknots, x, 4)$design
-    const <- spline.des(Aknots, Boundary.knots, 4, c(2, 2))$design
-    if (!intercept) {
-        const <- const[, -1, drop = FALSE]
-        basis <- basis[, -1, drop = FALSE]
+  nx <- names(x)
+  x <- as.vector(x)
+  nax <- is.na(x)
+  if (nas <- any(nax)) 
+    x <- x[!nax]
+  if (!missing(Boundary.knots)) {
+    Boundary.knots <- sort(Boundary.knots)
+    outside <- (ol <- x < Boundary.knots[1L]) | (or <- x > 
+        Boundary.knots[2L])
+  } else outside <- FALSE
+  if (!missing(df) && missing(knots)) {
+    nIknots <- df - 1 - intercept
+    if (nIknots < 0) {
+      nIknots <- 0
+      warning("'df' was too small; have used ", 1 + intercept)
+    }
+    knots <- if (nIknots > 0) {
+      knots <- seq.int(0, 1, length.out = nIknots + 2L)[-c(1L, nIknots + 2L)]
+      stats::quantile(x[!outside], knots)
     }
-    qr.const <- qr(t(const))
-    basis <- as.matrix((t(qr.qty(qr.const, t(basis))))[, -(1L:2L),
-        drop = FALSE])
-    n.col <- ncol(basis)
-    if (nas) {
-        nmat <- matrix(NA, length(nax), n.col)
-        nmat[!nax, ] <- basis
-        basis <- nmat
+  } else nIknots <- length(knots)
+  Aknots <- sort(c(rep(Boundary.knots, 4), knots))
+  if (any(outside)) {
+    basis <- array(0, c(length(x), nIknots + 4L))
+    if (any(ol)) {
+      k.pivot <- Boundary.knots[1L]
+      xl <- cbind(1, x[ol] - k.pivot)
+      tt <- splines::splineDesign(Aknots, rep(k.pivot, 2L), 4, c(0, 1))
+      basis[ol, ] <- xl %*% tt
     }
-    dimnames(basis) <- list(nx, 1L:n.col)
-    a <- list(degree = 3,
-              knots = if (is.null(knots)) numeric(0) else knots, 
-              Boundary.knots = Boundary.knots,
-              intercept = intercept,
-              Aknots = Aknots)
-    attributes(basis) <- c(attributes(basis), a)
-    class(basis) <- c("ns", "basis", "matrix")
-
-    if (smart.mode.is("write"))
-        put.smart(list(df = df,
-                       knots = knots,
-                       intercept = intercept,
-                       Boundary.knots = Boundary.knots,
-                       match.call = match.call()))
-
-    basis
+    if (any(or)) {
+        k.pivot <- Boundary.knots[2L]
+        xr <- cbind(1, x[or] - k.pivot)
+        tt <- splines::splineDesign(Aknots, rep(k.pivot, 2L), 4, c(0, 1))
+        basis[or, ] <- xr %*% tt
+      }
+      if (any(inside <- !outside)) 
+        basis[inside, ] <- splines::splineDesign(Aknots, x[inside], 4)
+    } else basis <- splines::splineDesign(Aknots, x, 4)
+  const <- splines::splineDesign(Aknots, Boundary.knots, 4, c(2, 2))
+  if (!intercept) {
+    const <- const[, -1, drop = FALSE]
+    basis <- basis[, -1, drop = FALSE]
+  }
+  qr.const <- qr(t(const))
+  basis <- as.matrix((t(qr.qty(qr.const, t(basis))))[, -(1L:2L),
+      drop = FALSE])
+  n.col <- ncol(basis)
+  if (nas) {
+    nmat <- matrix(NA, length(nax), n.col)
+    nmat[!nax, ] <- basis
+    basis <- nmat
+  }
+  dimnames(basis) <- list(nx, 1L:n.col)
+  a <- list(degree = 3,
+            knots = if (is.null(knots)) numeric(0) else knots, 
+            Boundary.knots = Boundary.knots,
+            intercept = intercept,
+            Aknots = Aknots)
+  attributes(basis) <- c(attributes(basis), a)
+  class(basis) <- c("ns", "basis", "matrix")
+
+  if (smart.mode.is("write"))
+    put.smart(list(df = df,
+                   knots = knots,
+                   intercept = intercept,
+                   Boundary.knots = Boundary.knots,
+                   match.call = match.call()))
+
+  basis
 }
 attr( sm.ns, "smart") <- TRUE
 
@@ -367,30 +358,29 @@ attr( sm.ns, "smart") <- TRUE
 
  sm.poly <-
   function (x, ..., degree = 1, coefs = NULL, raw = FALSE) {
-    x <- x  # Evaluate x
+    x <- x  # Evaluate x; needed for nested calls, e.g., sm.bs(sm.scale(x)).
     if (!raw && smart.mode.is("read")) {
-        smart <- get.smart()
-        degree <- smart$degree
-        coefs  <- smart$coefs
-        raw  <- smart$raw
+      smart <- get.smart()
+      degree <- smart$degree
+      coefs  <- smart$coefs
+      raw  <- smart$raw
     }
 
     dots <- list(...)
     if (nd <- length(dots)) {
-        if (nd == 1 && length(dots[[1]]) == 1L)
-            degree <- dots[[1L]] else
-        return(polym(x, ..., degree = degree, raw = raw))
+      if (nd == 1 && length(dots[[1]]) == 1L)
+        degree <- dots[[1L]] else
+      return(polym(x, ..., degree = degree, raw = raw))
     }
     if (is.matrix(x)) {
-        m <- unclass(as.data.frame(cbind(x, ...)))
-        return(do.call("polym", c(m, degree = degree, raw = raw)))
+      m <- unclass(as.data.frame(cbind(x, ...)))
+      return(do.call("polym", c(m, degree = degree, raw = raw)))
     }
     if (degree < 1) 
-        stop("'degree' must be at least 1")
+      stop("'degree' must be at least 1")
 
 
 
-    # At prediction time x may be less than the degree
     if (smart.mode.is("write") || smart.mode.is("neutral"))
     if (degree >= length(x))
         stop("degree must be less than number of points")
@@ -399,64 +389,63 @@ attr( sm.ns, "smart") <- TRUE
 
 
     if (any(is.na(x))) 
-        stop("missing values are not allowed in 'poly'")
+      stop("missing values are not allowed in 'poly'")
     n <- degree + 1
     if (raw) {
-        if (degree >= length(unique(x)))
-            stop("'degree' must be less than number of unique points")
-        Z <- outer(x, 1L:degree, "^")
-        colnames(Z) <- 1L:degree
-        attr(Z, "degree") <- 1L:degree
-        class(Z) <- c("poly", "matrix")
-        return(Z)
+      if (degree >= length(unique(x)))
+        stop("'degree' must be less than number of unique points")
+      Z <- outer(x, 1L:degree, "^")
+      colnames(Z) <- 1L:degree
+      attr(Z, "degree") <- 1L:degree
+      class(Z) <- c("poly", "matrix")
+      return(Z)
     }
     if (is.null(coefs)) {
-        if (degree >= length(unique(x))) 
-            stop("'degree' must be less than number of unique points")
-        xbar <- mean(x)
-        x <- x - xbar
-        X <- outer(x, seq_len(n) - 1, "^")
-        QR <- qr(X)
-
-        if (QR$rank < degree)
-            stop("'degree' must be less than number of unique points")
-
-        z <- QR$qr
-        z <- z * (row(z) == col(z))
-        raw <- qr.qy(QR, z)
-        norm2 <- colSums(raw^2)
-        alpha <- (colSums(x * raw^2)/norm2 + xbar)[1L:degree]
-        Z <- raw/rep(sqrt(norm2), each = length(x))
-        colnames(Z) <- 1L:n - 1L
-        Z <- Z[, -1, drop = FALSE]
-        attr(Z, "degree") <- 1:degree
-        attr(Z, "coefs") <- list(alpha = alpha, norm2 = c(1, 
-            norm2))
-        class(Z) <- c("poly", "matrix")
+      if (degree >= length(unique(x))) 
+        stop("'degree' must be less than number of unique points")
+      xbar <- mean(x)
+      x <- x - xbar
+      X <- outer(x, seq_len(n) - 1, "^")
+      QR <- qr(X)
+
+      if (QR$rank < degree)
+        stop("'degree' must be less than number of unique points")
+
+      z <- QR$qr
+      z <- z * (row(z) == col(z))
+      raw <- qr.qy(QR, z)
+      norm2 <- colSums(raw^2)
+      alpha <- (colSums(x * raw^2)/norm2 + xbar)[1L:degree]
+      Z <- raw/rep(sqrt(norm2), each = length(x))
+      colnames(Z) <- 1L:n - 1L
+      Z <- Z[, -1, drop = FALSE]
+      attr(Z, "degree") <- 1:degree
+      attr(Z, "coefs") <- list(alpha = alpha, norm2 = c(1, norm2))
+      class(Z) <- c("poly", "matrix")
     } else {
-        alpha <- coefs$alpha
-        norm2 <- coefs$norm2
-        Z <- matrix(, length(x), n)
-        Z[, 1] <- 1
-        Z[, 2] <- x - alpha[1L]
-        if (degree > 1) 
-            for (i in 2:degree) Z[, i + 1] <- (x - alpha[i]) * 
-                Z[, i] - (norm2[i + 1]/norm2[i]) * Z[, i - 1]
-        Z <- Z/rep(sqrt(norm2[-1L]), each = length(x))
-        colnames(Z) <- 0:degree
-        Z <- Z[, -1, drop = FALSE]
-        attr(Z, "degree") <- 1L:degree
-        attr(Z, "coefs") <- list(alpha = alpha, norm2 = norm2)
-        class(Z) <- c("poly", "matrix")
+      alpha <- coefs$alpha
+      norm2 <- coefs$norm2
+      Z <- matrix(, length(x), n)
+      Z[, 1] <- 1
+      Z[, 2] <- x - alpha[1L]
+      if (degree > 1) 
+        for (i in 2:degree) Z[, i + 1] <- (x - alpha[i]) * 
+            Z[, i] - (norm2[i + 1]/norm2[i]) * Z[, i - 1]
+      Z <- Z/rep(sqrt(norm2[-1L]), each = length(x))
+      colnames(Z) <- 0:degree
+      Z <- Z[, -1, drop = FALSE]
+      attr(Z, "degree") <- 1L:degree
+      attr(Z, "coefs") <- list(alpha = alpha, norm2 = norm2)
+      class(Z) <- c("poly", "matrix")
     }
 
-    if (smart.mode.is("write"))
-        put.smart(list(degree = degree,
-                       coefs = attr(Z, "coefs"),
-                       raw = FALSE,  # raw is changed above
-                       match.call = match.call()))
+  if (smart.mode.is("write"))
+    put.smart(list(degree = degree,
+                   coefs = attr(Z, "coefs"),
+                   raw = FALSE,  # raw is changed above
+                   match.call = match.call()))
 
-    Z
+  Z
 }
 attr(sm.poly, "smart") <- TRUE
 
@@ -465,46 +454,45 @@ attr(sm.poly, "smart") <- TRUE
 
 
 
- sm.scale.default <-
-  function (x, center = TRUE, scale = TRUE) {
-    x <- as.matrix(x)
+ sm.scale.default <- function (x, center = TRUE, scale = TRUE) {
+  x <- as.matrix(x)
 
-    if (smart.mode.is("read")) {
-        return(eval(smart.expression))
-    }
+  if (smart.mode.is("read")) {
+    return(eval(smart.expression))
+  }
 
-    nc <- ncol(x)
-    if (is.logical(center)) {
-        if (center) {
-            center <- colMeans(x, na.rm = TRUE)
-            x <- sweep(x, 2L, center, check.margin = FALSE)
-        }
-    } else if (is.numeric(center) && (length(center) == nc)) 
-        x <- sweep(x, 2L, center, check.margin = FALSE) else
+  nc <- ncol(x)
+  if (is.logical(center)) {
+    if (center) {
+      center <- colMeans(x, na.rm = TRUE)
+      x <- sweep(x, 2L, center, check.margin = FALSE)
+    }
+  } else if (is.numeric(center) && (length(center) == nc)) 
+    x <- sweep(x, 2L, center, check.margin = FALSE) else
     stop("length of 'center' must equal the number of columns of 'x'")
-    if (is.logical(scale)) {
-        if (scale) {
-            f <- function(v) {
-                v <- v[!is.na(v)]
-                sqrt(sum(v^2)/max(1, length(v) - 1L))
-            }
-            scale <- apply(x, 2L, f)
-            x <- sweep(x, 2L, scale, "/", check.margin = FALSE)
-        }
-    } else if (is.numeric(scale) && length(scale) == nc) 
-        x <- sweep(x, 2L, scale, "/", check.margin = FALSE) else 
-    stop("length of 'scale' must equal the number of columns of 'x'")
-    if (is.numeric(center)) 
-        attr(x, "scaled:center") <- center
-    if (is.numeric(scale)) 
-        attr(x, "scaled:scale") <- scale
-
-    if (smart.mode.is("write")) {
-        put.smart(list(center = center, scale = scale,
-                       match.call = match.call()))
+  if (is.logical(scale)) {
+    if (scale) {
+      f <- function(v) {
+        v <- v[!is.na(v)]
+        sqrt(sum(v^2) / max(1, length(v) - 1L))
+      }
+      scale <- apply(x, 2L, f)
+      x <- sweep(x, 2L, scale, "/", check.margin = FALSE)
     }
+  } else if (is.numeric(scale) && length(scale) == nc) 
+    x <- sweep(x, 2L, scale, "/", check.margin = FALSE) else 
+    stop("length of 'scale' must equal the number of columns of 'x'")
+  if (is.numeric(center)) 
+    attr(x, "scaled:center") <- center
+  if (is.numeric(scale)) 
+    attr(x, "scaled:scale") <- scale
+
+  if (smart.mode.is("write")) {
+    put.smart(list(center = center, scale = scale,
+                   match.call = match.call()))
+  }
 
-    x
+  x
 }
 attr(sm.scale.default, "smart") <- TRUE
 
@@ -534,93 +522,79 @@ attr(sm.scale, "smart") <- TRUE
 
 
 
-"my1" <- function(x, minx = min(x)) {
-
-    x <- x   # Evaluate x
-
-    if (smart.mode.is("read")) {
-        smart  <- get.smart()
-        minx <- smart$minx          # Overwrite its value 
-    } else 
-    if (smart.mode.is("write"))
-        put.smart(list(minx=minx))
-
-    (x-minx)^2
+sm.min1 <- function(x) {
+  x <- x  # Evaluate x; needed for nested calls, e.g., sm.bs(sm.scale(x)).
+  minx <- min(x)
+  if (smart.mode.is("read")) {
+    smart  <- get.smart()
+    minx <- smart$minx  # Overwrite its value
+  } else if (smart.mode.is("write"))
+    put.smart(list(minx = minx))
+  minx
 }
-attr(my1, "smart") <- TRUE
-
+attr(sm.min1, "smart") <- TRUE
 
 
 
-"my2" <- function(x, minx = min(x)) {
 
-    x <- x   # Evaluate x
 
-    if (smart.mode.is("read")) {
-        return(eval(smart.expression))
-    } else 
-    if (smart.mode.is("write"))
-        put.smart(list(minx=minx, match.call=match.call()))
-
-    (x-minx)^2
+sm.min2 <- function(x, .minx = min(x)) {
+  x <- x  # Evaluate x; needed for nested calls, e.g., sm.bs(sm.scale(x)).
+  if (smart.mode.is("read")) {  # Use recursion
+    return(eval(smart.expression))
+  } else 
+  if (smart.mode.is("write"))
+    put.smart(list( .minx = .minx , match.call = match.call()))
+  .minx
 }
-
-attr(my2, "smart") <- TRUE
+attr(sm.min2, "smart") <- TRUE
 
 
 
 
-"stdze1" <- function(x, center = TRUE, scale = TRUE) {
 
-    x <- x  # Evaluate x
 
-    if (!is.vector(x))
-        stop("x must be a vector")
-
-    if (smart.mode.is("read")) {
-        smart  <- get.smart()
-        return((x-smart$center)/smart$scale)
-    }
 
-    if (is.logical(center))
-        center <- if (center) mean(x) else 0
-    if (is.logical(scale))
-        scale <- if (scale) sqrt(var(x)) else 1
 
-    if (smart.mode.is("write"))
-        put.smart(list(center=center,
-                       scale=scale))
-    # Normal use
-    (x-center)/scale
+sm.scale1 <- function(x, center = TRUE, scale = TRUE) {
+  x <- x  # Evaluate x; needed for nested calls, e.g., sm.bs(sm.scale(x)).
+  if (!is.vector(x))
+    stop("argument 'x' must be a vector")
+  if (smart.mode.is("read")) {
+    smart  <- get.smart()
+    return((x - smart$Center) / smart$Scale)
+  }
+  if (is.logical(center))
+    center <- if (center) mean(x) else 0
+  if (is.logical(scale))
+    scale <- if (scale) sqrt(var(x)) else 1
+  if (smart.mode.is("write"))
+    put.smart(list(Center = center,
+                   Scale  = scale))
+  (x - center) / scale
 }
-attr(stdze1, "smart") <- TRUE
-
-
+attr(sm.scale1, "smart") <- TRUE
 
-"stdze2" <- function(x, center = TRUE, scale = TRUE) {
 
-    x <- x  # Evaluate x
 
-    if (!is.vector(x))
-        stop("x must be a vector")
-
-    if (smart.mode.is("read")) {
-        return(eval(smart.expression))
-    }
-
-    if (is.logical(center))
-        center <- if (center) mean(x) else 0
-    if (is.logical(scale))
-        scale <- if (scale) sqrt(var(x)) else 1
-
-    if (smart.mode.is("write"))
-        put.smart(list(center=center,
-                       scale=scale,
-                       match.call=match.call()))
-
-    (x-center)/scale
+sm.scale2 <- function(x, center = TRUE, scale = TRUE) {
+  x <- x  # Evaluate x; needed for nested calls, e.g., sm.bs(sm.scale(x)).
+  if (!is.vector(x))
+    stop("argument 'x' must be a vector")
+  if (smart.mode.is("read")) {
+    return(eval(smart.expression))  # Recursion used
+  }
+  if (is.logical(center))
+    center <- if (center) mean(x) else 0
+  if (is.logical(scale))
+    scale <- if (scale) sqrt(var(x)) else 1
+  if (smart.mode.is("write"))
+    put.smart(list(center = center,
+                   scale  = scale,
+                   match.call = match.call()))
+    (x - center) / scale
 }
-attr(stdze2, "smart") <- TRUE
+attr(sm.scale2, "smart") <- TRUE
 
 
 
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index a3c4c5e..e4cfee2 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -20,9 +20,13 @@ yformat <- function(x, digits = options()$digits) {
 
 
 
-summaryvglm <- function(object, correlation = FALSE,
-                        dispersion = NULL, digits = NULL,
-                        presid = TRUE) {
+
+summaryvglm <-
+  function(object, correlation = FALSE,
+           dispersion = NULL, digits = NULL,
+           presid = TRUE,
+           signif.stars = getOption("show.signif.stars")
+          ) {
 
 
 
@@ -38,12 +42,33 @@ summaryvglm <- function(object, correlation = FALSE,
          "sum of squares) for computing the dispersion parameter")
   }
 
-  stuff <- summaryvlm(as(object, "vlm"),
+
+
+  stuff <- summaryvlm(
+                      object,
+
                       correlation = correlation,
                       dispersion = dispersion)
 
 
 
+
+
+  infos.fun <- object at family@infos
+  infos.list <- infos.fun()
+  summary.pvalues <- if (is.logical(infos.list$summary.pvalues))
+    infos.list$summary.pvalues else TRUE
+  if (!summary.pvalues && ncol(stuff at coef3) == 4)
+    stuff at coef3 <- stuff at coef3[, -4]  # Delete the pvalues column
+
+
+
+
+
+
+
+
+
   answer <-
   new("summary.vglm",
       object,
@@ -61,6 +86,10 @@ summaryvglm <- function(object, correlation = FALSE,
 
   slot(answer, "misc") <- stuff at misc  # Replace
 
+
+  answer at misc$signif.stars <- signif.stars  # 20140728
+
+
   if (is.numeric(stuff at dispersion))
     slot(answer, "dispersion") <- stuff at dispersion
 
@@ -77,13 +106,17 @@ setMethod("logLik",  "summary.vglm", function(object, ...)
   logLik.vlm(object, ...))
 
 
-show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
-                              prefix = "",
-                              presid = TRUE,
-                              nopredictors = FALSE) {
+show.summary.vglm <-
+  function(x,
+           digits = max(3L, getOption("digits") - 3L),  # Same as glm()
+           quote = TRUE,
+           prefix = "",
+           presid = TRUE,
+           signif.stars = NULL,  # Use this if logical; 20140728
+           nopredictors = FALSE) {
 
   M <- x at misc$M
-  coef <- x at coef3   # icients
+  coef <- x at coef3  # icients
   correl <- x at correlation
 
   digits <- if (is.null(digits)) options()$digits - 2 else digits
@@ -111,8 +144,19 @@ show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
     }
   }
 
+
+  use.signif.stars <- if (is.logical(signif.stars))
+    signif.stars else x at misc$signif.stars  # 20140728
+  if (!is.logical(use.signif.stars))
+    use.signif.stars <- getOption("show.signif.stars")
+
+
   cat("\nCoefficients:\n")
-  print.default(coef, digits = digits)
+  printCoefmat(coef, digits = digits,
+               signif.stars = use.signif.stars,
+               na.print = "NA")
+
+
 
   cat("\nNumber of linear predictors: ", M, "\n")
 
@@ -199,6 +243,7 @@ setMethod("summary", "vglm",
 
 
 
+
 setMethod("show", "summary.vglm",
           function(object)
           show.summary.vglm(object))
diff --git a/R/summary.vlm.q b/R/summary.vlm.q
index ed24efd..bdad485 100644
--- a/R/summary.vlm.q
+++ b/R/summary.vlm.q
@@ -10,9 +10,12 @@
 
 
 
+
+
+
 summaryvlm <-
   function(object, correlation = FALSE, dispersion = NULL,
-           Colnames = c("Estimate", "Std. Error", "z value"),
+           Colnames = c("Estimate", "Std. Error", "z value", "Pr(>|z|)"),
            presid = TRUE) {
                          
 
@@ -64,7 +67,7 @@ summaryvlm <-
       warning("overriding the value of object at misc$dispersion")
     object at misc$estimated.dispersion <- FALSE
   }
-  sigma <- dispersion^0.5  # Can be a vector 
+  sigma <- sqrt(dispersion)  # Can be a vector 
 
   if (is.Numeric(ncol.X.vlm)) {
     R <- object at R
@@ -80,15 +83,23 @@ summaryvlm <-
 
     dimnames(covun) <- list(cnames, cnames)
   }
-  coef3 <- matrix(rep(Coefs, 3), ncol = 3)
+  coef3 <- matrix(rep(Coefs, 4), ncol = 4)
   dimnames(coef3) <- list(cnames, Colnames)
   SEs <- sqrt(diag(covun))
   if (length(sigma) == 1 && is.Numeric(ncol.X.vlm)) {
     coef3[, 2] <- SEs %o% sigma  # Fails here when sigma is a vector 
     coef3[, 3] <- coef3[, 1] / coef3[, 2]
+    pvalue <- 2 * pnorm(-abs(coef3[, 3]))
+    coef3[, 4] <- pvalue
+
+    if (is.logical(object at misc$estimated.dispersion) &&
+        object at misc$estimated.dispersion)
+      coef3 <- coef3[, -4]  # Delete the pvalues column
   } else {
-    coef3[, 1] <- coef3[, 2] <- coef3[, 3] <- NA
+    coef3[, 1] <- coef3[, 2] <- coef3[, 3] <- coef3[, 4] <- NA
+    coef3 <- coef3[, -4]  # Delete the pvalues column
   }
+
   if (correlation) {
     correl <- covun * outer(1 / SEs, 1 / SEs)
 
@@ -124,6 +135,7 @@ summaryvlm <-
 
 
 
+
 show.summary.vlm <- function(x, digits = NULL, quote = TRUE,
                              prefix = "") {
 
diff --git a/R/vgam.control.q b/R/vgam.control.q
index 522cf54..e0703f1 100644
--- a/R/vgam.control.q
+++ b/R/vgam.control.q
@@ -16,7 +16,7 @@ vgam.control <- function(all.knots = FALSE,
                          maxit = 30,
                          na.action=na.fail,
                          nk = NULL,
-                         save.weight = FALSE,
+                         save.weights = FALSE,
                          se.fit = TRUE,
                          trace = FALSE,
                          wzepsilon = .Machine$double.eps^0.75,
@@ -81,7 +81,7 @@ vgam.control <- function(all.knots = FALSE,
        maxit = maxit, 
        nk=nk,
        min.criterion = .min.criterion.VGAM,
-       save.weight = as.logical(save.weight)[1],
+       save.weights = as.logical(save.weights)[1],
        se.fit = as.logical(se.fit)[1],
        trace = as.logical(trace)[1],
        wzepsilon = wzepsilon)
diff --git a/R/vglm.control.q b/R/vglm.control.q
index 202f154..24fdd77 100644
--- a/R/vglm.control.q
+++ b/R/vglm.control.q
@@ -7,17 +7,17 @@
 
 
 .min.criterion.VGAM <-
-  c("deviance" = TRUE,
+  c("deviance"      = TRUE,
     "loglikelihood" = FALSE,
-    "AIC" = TRUE, 
-    "Likelihood" = FALSE,
-    "res.ss" = TRUE,
-    "coefficients" = TRUE)
+    "AIC"           = TRUE, 
+    "Likelihood"    = FALSE,
+    "res.ss"        = TRUE,
+    "coefficients"  = TRUE)
 
 
  
 
-vlm.control <- function(save.weight = TRUE,
+vlm.control <- function(save.weights = TRUE,
                         tol = 1e-7,
                         method = "qr", 
                         checkwz = TRUE,
@@ -32,7 +32,7 @@ vlm.control <- function(save.weight = TRUE,
   if (!is.Numeric(wzepsilon, length.arg = 1, positive = TRUE))
     stop("bad input for argument 'wzepsilon'")
 
-  list(save.weight = save.weight,
+  list(save.weights = save.weights,
        tol = tol,
        method = method,
        checkwz = checkwz,
@@ -44,13 +44,14 @@ vlm.control <- function(save.weight = TRUE,
 
 vglm.control <- function(checkwz = TRUE,
                          Check.rank = TRUE,
+                         Check.cm.rank = TRUE,
                          criterion = names(.min.criterion.VGAM), 
                          epsilon = 1e-7,
                          half.stepsizing = TRUE,
                          maxit = 30, 
                          noWarning = FALSE,
                          stepsize = 1, 
-                         save.weight = FALSE,
+                         save.weights = FALSE,
                          trace = FALSE,
                          wzepsilon = .Machine$double.eps^0.75,
                          xij = NULL,
@@ -102,6 +103,7 @@ vglm.control <- function(checkwz = TRUE,
 
     list(checkwz = checkwz,
          Check.rank = Check.rank, 
+         Check.cm.rank = Check.cm.rank,
          convergence = convergence, 
          criterion = criterion,
          epsilon = epsilon,
@@ -109,7 +111,7 @@ vglm.control <- function(checkwz = TRUE,
          maxit = maxit,
          noWarning = as.logical(noWarning)[1],
          min.criterion = .min.criterion.VGAM,
-         save.weight = as.logical(save.weight)[1],
+         save.weights = as.logical(save.weights)[1],
          stepsize = stepsize,
          trace = as.logical(trace)[1],
          wzepsilon = wzepsilon,
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index 12ca1b7..a5ec8fa 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -91,7 +91,8 @@ vglm.fit <-
 
 
   Hlist <- process.constraints(constraints, x, M,
-                               specialCM = specialCM)
+                               specialCM = specialCM,
+                               Check.cm.rank = control$Check.cm.rank)
 
 
   ncolHlist <- unlist(lapply(Hlist, ncol))
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
index feab966..3da89fd 100644
--- a/R/vsmooth.spline.q
+++ b/R/vsmooth.spline.q
@@ -391,7 +391,7 @@ vsmooth.spline <-
   Wmat.c <- array(0, c(ncb, ncb, neff))
  if (FALSE)
   for (ii in 1:neff) {
-    Wi.indiv <- m2adefault(wzmat[ii, , drop = FALSE], M = ncb)
+    Wi.indiv <- m2a(wzmat[ii, , drop = FALSE], M = ncb)
     Wi.indiv <- Wi.indiv[,, 1]  # Drop the 3rd dimension
     Wmat.c[,, ii] <- t(conmat) %*% Wi.indiv %*% conmat
     one.Wmat.c <- matrix(Wmat.c[,, ii], ncb, ncb)
diff --git a/build/vignette.rds b/build/vignette.rds
new file mode 100644
index 0000000..e9fd57c
Binary files /dev/null and b/build/vignette.rds differ
diff --git a/data/Huggins89.t1.rda b/data/Huggins89.t1.rda
index 4202808..9518e99 100644
Binary files a/data/Huggins89.t1.rda and b/data/Huggins89.t1.rda differ
diff --git a/data/Huggins89table1.rda b/data/Huggins89table1.rda
index 40b9ea7..ee81ec0 100644
Binary files a/data/Huggins89table1.rda and b/data/Huggins89table1.rda differ
diff --git a/data/alclevels.rda b/data/alclevels.rda
index 507a0f2..b3c90f1 100644
Binary files a/data/alclevels.rda and b/data/alclevels.rda differ
diff --git a/data/alcoff.rda b/data/alcoff.rda
index 1699e04..6b90a83 100644
Binary files a/data/alcoff.rda and b/data/alcoff.rda differ
diff --git a/data/auuc.rda b/data/auuc.rda
index 4b199ad..b483f23 100644
Binary files a/data/auuc.rda and b/data/auuc.rda differ
diff --git a/data/backPain.rda b/data/backPain.rda
index 7e0578c..8d3d08a 100644
Binary files a/data/backPain.rda and b/data/backPain.rda differ
diff --git a/data/beggs.rda b/data/beggs.rda
index 9fd70c6..201e207 100644
Binary files a/data/beggs.rda and b/data/beggs.rda differ
diff --git a/data/car.all.rda b/data/car.all.rda
index 074a63c..dc42cb4 100644
Binary files a/data/car.all.rda and b/data/car.all.rda differ
diff --git a/data/cfibrosis.rda b/data/cfibrosis.rda
index 5fe8e08..ceffc54 100644
Binary files a/data/cfibrosis.rda and b/data/cfibrosis.rda differ
diff --git a/data/corbet.rda b/data/corbet.rda
index b0f880f..735b925 100644
Binary files a/data/corbet.rda and b/data/corbet.rda differ
diff --git a/data/crashbc.rda b/data/crashbc.rda
index 572f176..2561fe8 100644
Binary files a/data/crashbc.rda and b/data/crashbc.rda differ
diff --git a/data/crashf.rda b/data/crashf.rda
index 45f2359..033a40d 100644
Binary files a/data/crashf.rda and b/data/crashf.rda differ
diff --git a/data/crashi.rda b/data/crashi.rda
index d8eba09..1b1491b 100644
Binary files a/data/crashi.rda and b/data/crashi.rda differ
diff --git a/data/crashmc.rda b/data/crashmc.rda
index 654fee5..43bd5c8 100644
Binary files a/data/crashmc.rda and b/data/crashmc.rda differ
diff --git a/data/crashp.rda b/data/crashp.rda
index 70a81a3..8e13f89 100644
Binary files a/data/crashp.rda and b/data/crashp.rda differ
diff --git a/data/crashtr.rda b/data/crashtr.rda
index 1f9f6ed..09e18c2 100644
Binary files a/data/crashtr.rda and b/data/crashtr.rda differ
diff --git a/data/deermice.rda b/data/deermice.rda
index ff35c27..896593c 100644
Binary files a/data/deermice.rda and b/data/deermice.rda differ
diff --git a/data/finney44.rda b/data/finney44.rda
index ce60866..229e54c 100644
Binary files a/data/finney44.rda and b/data/finney44.rda differ
diff --git a/data/flourbeetle.rda b/data/flourbeetle.rda
new file mode 100644
index 0000000..81775d3
Binary files /dev/null and b/data/flourbeetle.rda differ
diff --git a/data/hspider.rda b/data/hspider.rda
index 0111a78..42bab6c 100644
Binary files a/data/hspider.rda and b/data/hspider.rda differ
diff --git a/data/lakeO.rda b/data/lakeO.rda
index 3911833..d40ec1c 100644
Binary files a/data/lakeO.rda and b/data/lakeO.rda differ
diff --git a/data/leukemia.rda b/data/leukemia.rda
index 8b0d11b..74667fa 100644
Binary files a/data/leukemia.rda and b/data/leukemia.rda differ
diff --git a/data/marital.nz.rda b/data/marital.nz.rda
index c9f4568..852416b 100644
Binary files a/data/marital.nz.rda and b/data/marital.nz.rda differ
diff --git a/data/melbmaxtemp.rda b/data/melbmaxtemp.rda
new file mode 100644
index 0000000..c55b82b
Binary files /dev/null and b/data/melbmaxtemp.rda differ
diff --git a/data/mmt.rda b/data/mmt.rda
deleted file mode 100644
index a9782a6..0000000
Binary files a/data/mmt.rda and /dev/null differ
diff --git a/data/pneumo.rda b/data/pneumo.rda
index 0a3cd82..6deb34e 100644
Binary files a/data/pneumo.rda and b/data/pneumo.rda differ
diff --git a/data/prinia.rda b/data/prinia.rda
index 25ed41a..f36e640 100644
Binary files a/data/prinia.rda and b/data/prinia.rda differ
diff --git a/data/ruge.rda b/data/ruge.rda
index 46cecfe..da3d073 100644
Binary files a/data/ruge.rda and b/data/ruge.rda differ
diff --git a/data/toxop.rda b/data/toxop.rda
index f860477..48482bf 100644
Binary files a/data/toxop.rda and b/data/toxop.rda differ
diff --git a/data/venice.rda b/data/venice.rda
index a5cbb07..e82fa48 100644
Binary files a/data/venice.rda and b/data/venice.rda differ
diff --git a/data/venice90.rda b/data/venice90.rda
index 8cabb8a..15452b9 100644
Binary files a/data/venice90.rda and b/data/venice90.rda differ
diff --git a/data/wine.rda b/data/wine.rda
index 843901e..608584d 100644
Binary files a/data/wine.rda and b/data/wine.rda differ
diff --git a/inst/doc/categoricalVGAM.R b/inst/doc/categoricalVGAM.R
new file mode 100644
index 0000000..badcc3c
--- /dev/null
+++ b/inst/doc/categoricalVGAM.R
@@ -0,0 +1,278 @@
+### R code from vignette source 'categoricalVGAM.Rnw'
+
+###################################################
+### code chunk number 1: categoricalVGAM.Rnw:84-90
+###################################################
+library("VGAM")
+library("VGAMdata")
+ps.options(pointsize = 12)
+options(width = 72, digits = 4)
+options(SweaveHooks = list(fig = function() par(las = 1)))
+options(prompt = "R> ", continue = "+")
+
+
+###################################################
+### code chunk number 2: pneumocat
+###################################################
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
+            cumulative(reverse = TRUE, parallel = TRUE), data = pneumo)
+
+
+###################################################
+### code chunk number 3: categoricalVGAM.Rnw:903-907
+###################################################
+journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
+squaremat <- matrix(c(NA, 33, 320, 284,   730, NA, 813, 276,
+                      498, 68, NA, 325,   221, 17, 142, NA), 4, 4)
+dimnames(squaremat) <- list(winner = journal, loser = journal)
+
+
+###################################################
+### code chunk number 4: categoricalVGAM.Rnw:1007-1011
+###################################################
+abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
+fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, data = abodat)
+coef(fit, matrix = TRUE)
+Coef(fit)  # Estimated pA and pB
+
+
+###################################################
+### code chunk number 5: categoricalVGAM.Rnw:1289-1291
+###################################################
+head(marital.nz, 4)
+summary(marital.nz)
+
+
+###################################################
+### code chunk number 6: categoricalVGAM.Rnw:1294-1296
+###################################################
+fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
+               data = marital.nz)
+
+
+###################################################
+### code chunk number 7: categoricalVGAM.Rnw:1300-1302
+###################################################
+head(depvar(fit.ms), 4)
+colSums(depvar(fit.ms))
+
+
+###################################################
+### code chunk number 8: categoricalVGAM.Rnw:1311-1323
+###################################################
+# Plot output
+mycol <- c("red", "darkgreen", "blue")
+par(mfrow = c(2, 2))
+plot(fit.ms, se = TRUE, scale = 12,
+         lcol = mycol, scol = mycol)
+
+# Plot output overlayed
+#par(mfrow=c(1,1))
+plot(fit.ms, se = TRUE, scale = 12,
+         overlay = TRUE,
+         llwd = 2,
+         lcol = mycol, scol = mycol)
+
+
+###################################################
+### code chunk number 9: categoricalVGAM.Rnw:1366-1379
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+# Plot output
+mycol <- c("red", "darkgreen", "blue")
+ par(mfrow = c(2, 2))
+ par(mar = c(4.2, 4.0, 1.2, 2.2) + 0.1)
+plot(fit.ms, se = TRUE, scale = 12,
+         lcol = mycol, scol = mycol)
+
+# Plot output overlaid
+#par(mfrow = c(1, 1))
+plot(fit.ms, se = TRUE, scale = 12,
+         overlay = TRUE,
+         llwd = 2,
+         lcol = mycol, scol = mycol)
+
+
+###################################################
+### code chunk number 10: categoricalVGAM.Rnw:1399-1400
+###################################################
+plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
+
+
+###################################################
+### code chunk number 11: categoricalVGAM.Rnw:1409-1413
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+# Plot output
+ par(mfrow = c(1, 3))
+ par(mar = c(4.5, 4.0, 0.2, 2.2) + 0.1)
+plot(fit.ms, deriv = 1, lcol = mycol, scale = 0.3)
+
+
+###################################################
+### code chunk number 12: categoricalVGAM.Rnw:1436-1448
+###################################################
+foo <- function(x, elbow = 50)
+  poly(pmin(x, elbow), 2)
+
+clist <- list("(Intercept)" = diag(3),
+             "poly(age, 2)" = rbind(1, 0, 0),
+             "foo(age)"     = rbind(0, 1, 0),
+             "age"          = rbind(0, 0, 1))
+fit2.ms <-
+    vglm(mstatus ~ poly(age, 2) + foo(age) + age,
+         family = multinomial(refLevel = 2),
+         constraints = clist,
+         data = marital.nz)
+
+
+###################################################
+### code chunk number 13: categoricalVGAM.Rnw:1451-1452
+###################################################
+coef(fit2.ms, matrix = TRUE)
+
+
+###################################################
+### code chunk number 14: categoricalVGAM.Rnw:1456-1463
+###################################################
+par(mfrow = c(2, 2))
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[1], scol = mycol[1], which.term = 1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[2], scol=mycol[2], which.term = 2)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[3], scol = mycol[3], which.term = 3)
+
+
+###################################################
+### code chunk number 15: categoricalVGAM.Rnw:1474-1483
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+# Plot output
+par(mfrow=c(2,2))
+ par(mar=c(4.5,4.0,1.2,2.2)+0.1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[1], scol = mycol[1], which.term = 1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[2], scol = mycol[2], which.term = 2)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[3], scol = mycol[3], which.term = 3)
+
+
+###################################################
+### code chunk number 16: categoricalVGAM.Rnw:1501-1502
+###################################################
+deviance(fit.ms) - deviance(fit2.ms)
+
+
+###################################################
+### code chunk number 17: categoricalVGAM.Rnw:1508-1509
+###################################################
+(dfdiff <- df.residual(fit2.ms) - df.residual(fit.ms))
+
+
+###################################################
+### code chunk number 18: categoricalVGAM.Rnw:1512-1513
+###################################################
+pchisq(deviance(fit.ms) - deviance(fit2.ms), df = dfdiff, lower.tail = FALSE)
+
+
+###################################################
+### code chunk number 19: categoricalVGAM.Rnw:1526-1537
+###################################################
+ooo <- with(marital.nz, order(age))
+with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo, ],
+     type = "l", las = 1, lwd = 2, ylim = 0:1,
+     ylab = "Fitted probabilities",
+     xlab = "Age",  # main="Marital status amongst NZ Male Europeans",
+     col = c(mycol[1], "black", mycol[-1])))
+legend(x = 52.5, y = 0.62,  # x="topright",
+       col = c(mycol[1], "black", mycol[-1]),
+       lty = 1:4,
+       legend = colnames(fit.ms at y), lwd = 2)
+abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed")
+
+
+###################################################
+### code chunk number 20: categoricalVGAM.Rnw:1552-1565
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+ par(mfrow = c(1,1))
+ par(mar = c(4.5,4.0,0.2,0.2)+0.1)
+ooo <- with(marital.nz, order(age))
+with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
+     type = "l", las = 1, lwd = 2, ylim = 0:1,
+     ylab = "Fitted probabilities",
+     xlab = "Age",
+     col = c(mycol[1], "black", mycol[-1])))
+legend(x = 52.5, y = 0.62,
+       col = c(mycol[1], "black", mycol[-1]),
+       lty = 1:4,
+       legend = colnames(fit.ms at y), lwd = 2.1)
+abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed")
+
+
+###################################################
+### code chunk number 21: categoricalVGAM.Rnw:1599-1603
+###################################################
+# Scale the variables? Yes; the Anderson (1984) paper did (see his Table 6).
+head(backPain, 4)
+summary(backPain)
+backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale(x3))
+
+
+###################################################
+### code chunk number 22: categoricalVGAM.Rnw:1607-1608
+###################################################
+bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain)
+
+
+###################################################
+### code chunk number 23: categoricalVGAM.Rnw:1611-1612
+###################################################
+Coef(bp.rrmlm1)
+
+
+###################################################
+### code chunk number 24: categoricalVGAM.Rnw:1640-1641
+###################################################
+set.seed(123)
+
+
+###################################################
+### code chunk number 25: categoricalVGAM.Rnw:1644-1646
+###################################################
+bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain, Rank = 2,
+                   Corner = FALSE, Uncor = TRUE)
+
+
+###################################################
+### code chunk number 26: categoricalVGAM.Rnw:1654-1658
+###################################################
+biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
+#      xlim = c(-1, 6), ylim = c(-1.2, 4),  # Use this if not scaled
+       xlim = c(-4.5, 2.2), ylim = c(-2.2, 2.2),  # Use this if scaled
+       chull = TRUE, clty = 2, ccol = "blue")
+
+
+###################################################
+### code chunk number 27: categoricalVGAM.Rnw:1690-1698
+###################################################
+getOption("SweaveHooks")[["fig"]]()
+# Plot output
+ par(mfrow=c(1,1))
+ par(mar=c(4.5,4.0,0.2,2.2)+0.1)
+
+biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
+#      xlim = c(-1,6), ylim = c(-1.2,4),  # Use this if not scaled
+       xlim = c(-4.5,2.2), ylim = c(-2.2, 2.2),  # Use this if scaled
+       chull = TRUE, clty = 2, ccol = "blue")
+
+
+###################################################
+### code chunk number 28: categoricalVGAM.Rnw:1812-1813
+###################################################
+iam(NA, NA, M = 4, both = TRUE, diag = TRUE)
+
+
diff --git a/inst/doc/categoricalVGAM.Rnw b/inst/doc/categoricalVGAM.Rnw
new file mode 100644
index 0000000..8394144
--- /dev/null
+++ b/inst/doc/categoricalVGAM.Rnw
@@ -0,0 +1,2325 @@
+\documentclass[article,shortnames,nojss]{jss}
+\usepackage{thumbpdf}
+%% need no \usepackage{Sweave.sty}
+
+\SweaveOpts{engine=R,eps=FALSE}
+%\VignetteIndexEntry{The VGAM Package for Categorical Data Analysis}
+%\VignetteDepends{VGAM}
+%\VignetteKeywords{categorical data analysis, Fisher scoring, iteratively reweighted least squares, multinomial distribution, nominal and ordinal polytomous responses, smoothing, vector generalized linear and additive models, VGAM R package}
+%\VignettePackage{VGAM}
+
+%% new commands
+\newcommand{\sVLM}{\mbox{\scriptsize VLM}}
+\newcommand{\sformtwo}{\mbox{\scriptsize F2}}
+\newcommand{\pr}{\mbox{$P$}}
+\newcommand{\logit}{\mbox{\rm logit}}
+\newcommand{\bzero}{{\bf 0}}
+\newcommand{\bone}{{\bf 1}}
+\newcommand{\bid}{\mbox{\boldmath $d$}}
+\newcommand{\bie}{\mbox{\boldmath $e$}}
+\newcommand{\bif}{\mbox{\boldmath $f$}}
+\newcommand{\bix}{\mbox{\boldmath $x$}}
+\newcommand{\biy}{\mbox{\boldmath $y$}}
+\newcommand{\biz}{\mbox{\boldmath $z$}}
+\newcommand{\biY}{\mbox{\boldmath $Y$}}
+\newcommand{\bA}{\mbox{\rm \bf A}}
+\newcommand{\bB}{\mbox{\rm \bf B}}
+\newcommand{\bC}{\mbox{\rm \bf C}}
+\newcommand{\bH}{\mbox{\rm \bf H}}
+\newcommand{\bI}{\mbox{\rm \bf I}}
+\newcommand{\bX}{\mbox{\rm \bf X}}
+\newcommand{\bW}{\mbox{\rm \bf W}}
+\newcommand{\bY}{\mbox{\rm \bf Y}}
+\newcommand{\bbeta}{\mbox{\boldmath $\beta$}}
+\newcommand{\boldeta}{\mbox{\boldmath $\eta$}}
+\newcommand{\bmu}{\mbox{\boldmath $\mu$}}
+\newcommand{\bnu}{\mbox{\boldmath $\nu$}}
+\newcommand{\diag}{ \mbox{\rm diag} }
+\newcommand{\Var}{ \mbox{\rm Var} }
+\newcommand{\R}{{\textsf{R}}}
+\newcommand{\VGAM}{\pkg{VGAM}}
+
+
+\author{Thomas W. Yee\\University of Auckland}
+\Plainauthor{Thomas W. Yee}
+
+\title{The \pkg{VGAM} Package for Categorical Data Analysis}
+\Plaintitle{The VGAM Package for Categorical Data Analysis}
+
+\Abstract{
+  Classical categorical regression models such as the multinomial logit and
+  proportional odds models are shown to be readily handled by the  vector
+  generalized linear and additive model (VGLM/VGAM) framework. Additionally,
+  there are natural extensions, such as reduced-rank VGLMs for
+  dimension reduction, and allowing covariates that have values
+  specific to each linear/additive predictor,
+  e.g., for consumer choice modeling. This article describes some of the
+  framework behind the \pkg{VGAM} \R{} package, its usage and implementation
+  details.
+}
+\Keywords{categorical data analysis, Fisher scoring,
+  iteratively reweighted least squares,
+  multinomial distribution, nominal and ordinal polytomous responses,
+  smoothing, vector generalized linear and additive models,
+  \VGAM{} \R{} package}
+\Plainkeywords{categorical data analysis, Fisher scoring,
+  iteratively reweighted least squares, multinomial distribution,
+  nominal and ordinal polytomous responses, smoothing,
+  vector generalized linear and additive models, VGAM R package}
+
+\Address{
+  Thomas W. Yee \\
+  Department of Statistics \\
+  University of Auckland, Private Bag 92019 \\
+  Auckland Mail Centre \\
+  Auckland 1142, New Zealand \\
+  E-mail: \email{t.yee at auckland.ac.nz}\\
+  URL: \url{http://www.stat.auckland.ac.nz/~yee/}
+}
+
+
+\begin{document}
+
+
+<<echo=FALSE, results=hide>>=
+library("VGAM")
+library("VGAMdata")
+ps.options(pointsize = 12)
+options(width = 72, digits = 4)
+options(SweaveHooks = list(fig = function() par(las = 1)))
+options(prompt = "R> ", continue = "+")
+@
+
+
+% ----------------------------------------------------------------------
+\section{Introduction}
+\label{sec:jsscat.intoduction}
+
+
+This is a \pkg{VGAM} vignette for categorical data analysis (CDA)
+based on \cite{Yee:2010}.
+Any subsequent features (especially non-backward compatible ones)
+will appear here.
+
+The subject of CDA is concerned with
+analyses where the response is categorical regardless of whether
+the explanatory variables are continuous or categorical. It is a
+very frequent form of data. Over the years several CDA regression
+models for polytomous responses have become popular, e.g., those
+in Table \ref{tab:cat.quantities}. Not surprisingly, the models
+are interrelated: their foundation is the multinomial distribution
+and consequently they share similar and overlapping properties which
+modellers should know and exploit. Unfortunately, software has been
+slow to reflect their commonality and this makes analyses unnecessarily
+difficult for the practitioner on several fronts, e.g., using different
+functions/procedures to fit different models which does not aid the
+understanding of their connections.
+
+
+This historical misfortune can be seen by considering \R{} functions
+for CDA. From the Comprehensive \proglang{R} Archive Network
+(CRAN, \url{http://CRAN.R-project.org/}) there is \texttt{polr()}
+\citep[in \pkg{MASS};][]{Venables+Ripley:2002} for a proportional odds
+model and \texttt{multinom()}
+\citep[in \pkg{nnet};][]{Venables+Ripley:2002} for the multinomial
+logit model. However, both of these can be considered `one-off'
+modeling functions rather than providing a unified offering for CDA.
+The function \texttt{lrm()} \citep[in \pkg{rms};][]{Harrell:2009}
+has greater functionality: it can fit the proportional odds model
+(and the forward continuation ratio model upon preprocessing). Neither
+\texttt{polr()} or \texttt{lrm()} appear able to fit the nonproportional
+odds model. There are non-CRAN packages too, such as the modeling
+function \texttt{nordr()} \citep[in \pkg{gnlm};][]{gnlm:2007}, which can fit
+the proportional odds, continuation ratio and adjacent categories models;
+however it calls \texttt{nlm()} and the user must supply starting values.
+In general these \R{} \citep{R} modeling functions are not modular
+and often require preprocessing and sometimes are not self-starting.
+The implementations can be perceived as a smattering and piecemeal
+in nature. Consequently if the practitioner wishes to fit the models
+of Table \ref{tab:cat.quantities} then there is a need to master several
+modeling functions from several packages each having different syntaxes
+etc. This is a hindrance to efficient CDA.
+
+
+ 
+\begin{table}[tt]
+\centering
+\begin{tabular}{|c|c|l|}
+\hline
+Quantity & Notation &
+%Range of $j$ &
+\VGAM{} family function \\
+\hline
+%
+$\pr(Y=j+1) / \pr(Y=j)$ &$\zeta_{j}$ &
+%$1,\ldots,M$ &
+\texttt{acat()} \\
+%
+$\pr(Y=j) / \pr(Y=j+1)$ &$\zeta_{j}^{R}$ &
+%$2,\ldots,M+1$ &
+\texttt{acat(reverse = TRUE)} \\
+%
+$\pr(Y>j|Y \geq j)$ &$\delta_{j}^*$ &
+%$1,\ldots,M$ & 
+\texttt{cratio()} \\
+%
+$\pr(Y<j|Y \leq j)$ &$\delta_{j}^{*R}$ &
+%$2,\ldots,M+1$ &
+\texttt{cratio(reverse = TRUE)} \\
+%
+$\pr(Y\leq j)$ &$\gamma_{j}$ &
+%$1,\ldots,M$ &
+\texttt{cumulative()} \\
+%
+$\pr(Y\geq j)$ &$\gamma_{j}^R$&
+%$2,\ldots,M+1$ &
+\texttt{cumulative(reverse = TRUE)} \\
+%
+$\log\{\pr(Y=j)/\pr(Y=M+1)\}$ & &
+%$1,\ldots,M$ &
+\texttt{multinomial()} \\
+%
+$\pr(Y=j|Y \geq j)$ &$\delta_{j}$ &
+%$1,\ldots,M$ &
+\texttt{sratio()} \\
+%
+$\pr(Y=j|Y \leq j)$ &$\delta_{j}^R$ &
+%$2,\ldots,M+1$ &
+\texttt{sratio(reverse = TRUE)} \\
+%
+\hline
+\end{tabular}
+\caption{
+Quantities defined in \VGAM{} for a
+categorical response $Y$ taking values $1,\ldots,M+1$.
+Covariates \bix{} have been omitted for clarity.
+The LHS quantities are $\eta_{j}$
+or $\eta_{j-1}$ for $j=1,\ldots,M$ (not reversed)
+and $j=2,\ldots,M+1$ (if reversed), respectively.
+All models are estimated by minimizing the deviance.
+All except for \texttt{multinomial()} are suited to ordinal $Y$.
+\label{tab:cat.quantities}
+}
+\end{table}
+ 
+
+
+
+\proglang{SAS} \citep{SAS} does not fare much better than \R. Indeed,
+it could be considered as having an \textit{excess} of options which
+bewilders the non-expert user; there is little coherent overriding
+structure. Its \code{proc logistic} handles the multinomial logit
+and proportional odds models, as well as exact logistic regression
+\citep[see][which is for Version 8 of \proglang{SAS}]{stok:davi:koch:2000}.
+The fact that the proportional odds model may be fitted by \code{proc
+logistic}, \code{proc genmod} and \code{proc probit} arguably leads
+to possible confusion rather than the making of connections, e.g.,
+\code{genmod} is primarily for GLMs and the proportional odds model is not
+a GLM in the classical \cite{neld:wedd:1972} sense. Also, \code{proc
+phreg} fits the multinomial logit model, and \code{proc catmod} with
+its WLS implementation adds to further potential confusion.
+
+
+This article attempts to show how these deficiencies can be addressed
+by considering the vector generalized linear and additive model
+(VGLM/VGAM) framework, as implemented by the author's \pkg{VGAM}
+package for \R{}. The main purpose of this paper is to demonstrate
+how the framework is very well suited to many `classical' regression
+models for categorical responses, and to describe the implementation and
+usage of \pkg{VGAM} for such. To this end an outline of this article
+is as follows. Section \ref{sec:jsscat.VGLMVGAMoverview} summarizes
+the basic VGLM/VGAM framework. Section \ref{sec:jsscat.vgamff}
+centers on functions for CDA in \VGAM. Given an adequate framework,
+some natural extensions of Section \ref{sec:jsscat.VGLMVGAMoverview} are
+described in Section \ref{sec:jsscat.othermodels}. Users of \pkg{VGAM}
+can benefit from Section \ref{sec:jsscat.userTopics} which shows how
+the software reflects their common theory. Some examples are given in
+Section \ref{sec:jsscat.eg}. Section \ref{sec:jsscat.implementDetails}
+contains selected topics in statistial computing that are
+more relevant to programmers interested in the underlying code.
+Section \ref{sec:jsscat.extnUtil} discusses several utilities and
+extensions needed for advanced CDA modeling, and the article concludes
+with a discussion. This document was run using \pkg{VGAM} 0.7-10
+\citep{yee:VGAM:2010} under \R 2.10.0.
+
+
+Some general references for categorical data providing
+background to this article include
+\cite{agre:2010},
+\cite{agre:2013},
+\cite{fahr:tutz:2001},
+\cite{leon:2000},
+\cite{lloy:1999},
+\cite{long:1997},
+\cite{mccu:neld:1989},
+\cite{simo:2003},
+\citet{smit:merk:2013} and
+\cite{tutz:2012}.
+An overview of models for ordinal responses is \cite{liu:agre:2005},
+and a manual for fitting common models found in \cite{agre:2002}
+to polytomous responses with various software is \cite{thom:2009}.
+A package for visualizing categorical data in \R{} is \pkg{vcd}
+\citep{Meyer+Zeileis+Hornik:2006,Meyer+Zeileis+Hornik:2009}.
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{VGLM/VGAM overview}
+\label{sec:jsscat.VGLMVGAMoverview}
+
+
+This section summarizes the VGLM/VGAM framework with a particular emphasis
+toward categorical models since the classes encapsulates many multivariate
+response models in, e.g., survival analysis, extreme value analysis,
+quantile and expectile regression, time series, bioassay data, nonlinear
+least-squares models, and scores of standard and nonstandard univariate
+and continuous distributions. The framework is partially summarized by
+Table \ref{tab:rrvglam.jss.subset}. More general details about VGLMs
+and VGAMs can be found in \cite{yee:hast:2003} and \cite{yee:wild:1996}
+respectively. An informal and practical article connecting the general
+framework with the software is \cite{Rnews:Yee:2008}.
+
+
+
+\subsection{VGLMs}
+\label{sec:wffc.appendixa.vglms}
+
+Suppose the observed response \biy{} is a $q$-dimensional vector.
+VGLMs are defined as a model for which the conditional distribution
+of $\biY$ given explanatory $\bix$ is of the form
+\begin{eqnarray}
+f(\biy | \bix ; \bB, \phi)  =  h(\biy, \eta_1,\ldots, \eta_M, \phi)
+\label{gammod}
+\end{eqnarray}
+for some known function $h(\cdot)$, where $\bB = (\bbeta_1 \,
+\bbeta_2 \, \cdots \, \bbeta_M)$ is a $p \times M$ matrix of
+unknown regression coefficients,
+and the $j$th linear predictor is
+\begin{equation}
+\eta_j  =  \eta_j(\bix)  =  \bbeta_j^{\top} \bix  = 
+\sum_{k=1}^p \beta_{(j)k} \, x_k ,  \qquad j=1,\ldots,M.
+\label{gammod2}
+\end{equation}
+Here $\bix=(x_1,\ldots,x_p)^{\top}$ with $x_1 = 1$ if there is an intercept.
+Note that (\ref{gammod2}) means that \textit{all} the parameters may be
+potentially modelled as functions of \bix. It can be seen that VGLMs are
+like GLMs but allow for multiple linear predictors, and they encompass
+models outside the small confines of the exponential family.
+In (\ref{gammod}) the quantity $\phi$ is an optional scaling parameter
+which is included for backward compatibility with common adjustments
+to overdispersion, e.g., with respect to GLMs.
+
+
+In general there is no relationship between $q$ and $M$: it
+depends specifically on the model or distribution to be fitted.
+However, for the `classical' categorical regression models of
+Table \ref{tab:cat.quantities} we have $M=q-1$ since $q$ is the number
+of levels the multi-category response $Y$ has.
+
+
+
+
+
+The $\eta_j$ of VGLMs may be applied directly to parameters of a
+distribution rather than just to a mean for GLMs. A simple example is
+a univariate distribution with a location parameter $\xi$ and a scale
+parameter $\sigma > 0$, where we may take $\eta_1 = \xi$ and $\eta_2 =
+\log\,\sigma$. In general, $\eta_{j}=g_{j}(\theta_{j})$ for some parameter
+link function $g_{j}$ and parameter $\theta_{j}$.
+For example, the adjacent categories models in
+Table \ref{tab:cat.quantities} are ratios of two probabilities, therefore
+a log link of $\zeta_{j}^{R}$ or $\zeta_{j}$ is the default.
+In \VGAM{}, there are currently over a dozen links to choose from, of
+which any can be assigned to any parameter, ensuring maximum flexibility.
+Table \ref{tab:jsscat.links} lists some of them.
+
+
+
+\begin{table}[tt]
+\centering
+%\  ~~~ \par
+\begin{tabular}{|l|l|l|l|}
+\hline
+\qquad \qquad $\boldeta$ & 
+Model & Modeling & Reference \\
+ & & function & \\
+%-------------------------------------------------------------
+\hline
+\hline
+%-------------------------------------------------------------
+ &&&\\[-1.1ex]
+$\bB_1^{\top} \bix_{1} + \bB_2^{\top} \bix_{2}\ ( = \bB^{\top} \bix)$ &
+VGLM & \texttt{vglm()}
+&
+\cite{yee:hast:2003} \\[1.6ex]
+%Yee \& Hastie (2003) \\[1.6ex]
+%-------------------------------------------------------------
+\hline
+ &&&\\[-1.1ex]
+$\bB_1^{\top} \bix_{1} +
+ \sum\limits_{k=p_1+1}^{p_1+p_2} \bH_k \, \bif_{k}^{*}(x_k)$ &
+%\sum\limits_{k=1}^{p_2} \bH_k \, \bif_k(x_k)$ &
+VGAM & \texttt{vgam()}
+&
+\cite{yee:wild:1996} \\[2.2ex]
+%Yee \& Wild (1996) \\[2.2ex]
+%-------------------------------------------------------------
+\hline
+ &&&\\[-1.1ex]
+$\bB_1^{\top} \bix_{1} + \bA \, \bnu$ &
+RR-VGLM & \texttt{rrvglm()}
+&
+\cite{yee:hast:2003} \\[1.8ex]
+%Yee \& Hastie (2003) \\[1.8ex]
+%-------------------------------------------------------------
+\hline
+ &&&\\[-1.1ex]
+See \cite{yee:hast:2003} &
+Goodman's RC & \texttt{grc()}
+&
+%\cite{yee:hast:2003} \\[1.8ex]
+\cite{good:1981} \\[1.8ex]
+%-------------------------------------------------------------
+\hline
+\end{tabular}
+\caption{
+Some of 
+the package \VGAM{} and
+its framework.
+The vector of latent variables $\bnu = \bC^{\top} \bix_2$
+where
+$\bix^{\top} = (\bix_1^{\top}, \bix_2^{\top})$.
+\label{tab:rrvglam.jss.subset}
+}
+%\medskip
+\end{table}
+
+
+
+
+
+
+VGLMs are estimated using iteratively reweighted least squares (IRLS) 
+which is particularly suitable for categorical models
+\citep{gree:1984}.
+All models in this article have a log-likelihood
+\begin{equation}
+\ell  =  \sum_{i=1}^n \, w_i \, \ell_i
+\label{eq:log-likelihood.VGAM}
+\end{equation}
+where the $w_i$ are known positive prior weights.
+Let $\bix_i$ denote the explanatory vector for the $i$th observation,
+for $i=1,\dots,n$.
+Then one can write
+\begin{eqnarray}
+\boldeta_i &=& \boldeta(\bix_i)  = 
+\left(
+\begin{array}{c}
+\eta_1(\bix_i) \\
+\vdots \\
+\eta_M(\bix_i)
+\end{array} \right)  = 
+\bB^{\top} \bix_i  =  
+\left(
+\begin{array}{c}
+\bbeta_1^{\top} \bix_i \\
+\vdots \\
+\bbeta_M^{\top} \bix_i
+\end{array} \right)
+\nonumber
+\\
+&=& 
+\left(
+\begin{array}{cccc}
+\beta_{(1)1} & \cdots & \beta_{(1)p} \\
+\vdots \\
+\beta_{(M)1} & \cdots & \beta_{(M)p} \\
+\end{array} \right)
+\bix_i  = 
+\left(
+\bbeta_{(1)} \; \cdots \; \bbeta_{(p)}
+\right)
+\bix_i .
+\label{eq:lin.pred}
+\end{eqnarray}
+In IRLS,
+an adjusted dependent vector $\biz_i = \boldeta_i + \bW_i^{-1} \bid_i$
+is regressed upon a large (VLM) model matrix, with
+$\bid_i = w_i \, \partial \ell_i / \partial \boldeta_i$.
+The working weights $\bW_i$ here are 
+$w_i \Var(\partial \ell_i / \partial \boldeta_i)$
+(which, under regularity conditions, is equal to
+$-w_i \, E[ \partial^2 \ell_i / (\partial \boldeta_i \,
+\partial \boldeta_i^{\top})]$),
+giving rise to the Fisher scoring algorithm.
+
+
+Let $\bX=(\bix_1,\ldots,\bix_n)^{\top}$ be the usual $n \times p$
+(LM) model matrix
+obtained from the \texttt{formula} argument of \texttt{vglm()}.
+Given $\biz_i$, $\bW_i$ and $\bX{}$ at the current IRLS iteration,
+a weighted multivariate regression is performed.
+To do this, a \textit{vector linear model} (VLM) model matrix 
+$\bX_{\sVLM}$ is formed from $\bX{}$ and $\bH_k$
+(see Section \ref{sec:wffc.appendixa.vgams}).
+This is has $nM$ rows, and if there are no constraints then $Mp$ columns.
+Then $\left(\biz_1^{\top},\ldots,\biz_n^{\top}\right)^{\top}$ is regressed
+upon $\bX_{\sVLM}$
+with variance-covariance matrix $\diag(\bW_1^{-1},\ldots,\bW_n^{-1})$.
+This system of linear equations is converted to one large
+WLS fit by premultiplication of the output of
+a Cholesky decomposition of the $\bW_i$.
+
+
+Fisher scoring usually has good numerical stability
+because the $\bW_i$ are positive-definite over a larger
+region of parameter space than Newton-Raphson. 
+For the categorical models in this article the expected
+information matrices are simpler than the observed
+information matrices, and are easily derived,
+therefore all the families in Table \ref{tab:cat.quantities}
+implement Fisher scoring.
+
+
+
+\subsection{VGAMs and constraint matrices}
+\label{sec:wffc.appendixa.vgams}
+
+
+VGAMs provide additive-model extensions to VGLMs, that is,
+(\ref{gammod2}) is generalized to
+\begin{equation}
+\eta_j(\bix)  =  \beta_{(j)1} +
+\sum_{k=2}^p \; f_{(j)k}(x_k), \qquad j = 1,\ldots, M,
+\label{addmod}
+\end{equation}
+a sum of smooth functions of the individual covariates, just as
+with ordinary GAMs \citep{hast:tibs:1990}. The $\bif_k =
+(f_{(1)k}(x_k),\ldots,f_{(M)k}(x_k))^{\top}$ are centered for uniqueness,
+and are estimated simultaneously using \textit{vector smoothers}.
+VGAMs are thus a visual data-driven method that is well suited to
+exploring data, and they retain the simplicity of interpretation that
+GAMs possess.
+
+
+
+An important concept, especially for CDA, is the idea of
+`constraints-on-the functions'.
+In practice we often wish to constrain the effect of a covariate to
+be the same for some of the $\eta_j$ and to have no effect for others.
+We shall see below that this constraints idea is important
+for several categorical models because of a popular parallelism assumption.
+As a specific example, for VGAMs we may wish to take
+\begin{eqnarray*}
+\eta_1 & = & \beta_{(1)1} + f_{(1)2}(x_2) + f_{(1)3}(x_3), \\
+\eta_2 & = & \beta_{(2)1} + f_{(1)2}(x_2),
+\end{eqnarray*}
+so that $f_{(1)2} \equiv f_{(2)2}$ and $f_{(2)3} \equiv 0$.
+For VGAMs, we can represent these models using
+\begin{eqnarray}
+\boldeta(\bix) & = & \bbeta_{(1)} + \sum_{k=2}^p \, \bif_k(x_k)
+\ =\ \bH_1 \, \bbeta_{(1)}^* + \sum_{k=2}^p \, \bH_k \, \bif_k^*(x_k)
+\label{eqn:constraints.VGAM}
+\end{eqnarray}
+where $\bH_1,\bH_2,\ldots,\bH_p$ are known full-column rank
+\textit{constraint matrices}, $\bif_k^*$ is a vector containing a
+possibly reduced set of component functions and $\bbeta_{(1)}^*$ is a
+vector of unknown intercepts. With no constraints at all, $\bH_1 =
+\bH_2 = \cdots = \bH_p = \bI_M$ and $\bbeta_{(1)}^* = \bbeta_{(1)}$.
+Like the $\bif_k$, the $\bif_k^*$ are centered for uniqueness.
+For VGLMs, the $\bif_k$ are linear so that
+\begin{eqnarray}
+{\bB}^{\top} &=&
+\left(
+\bH_1 \bbeta_{(1)}^*
+ \;
+\Bigg|
+ \;
+\bH_2 \bbeta_{(2)}^*
+ \;
+\Bigg|
+ \;
+\cdots
+ \;
+\Bigg|
+ \;
+\bH_p \bbeta_{(p)}^*
+\right) 
+\label{eqn:lin.coefs4}
+\end{eqnarray}
+for some vectors
+$\bbeta_{(1)}^*,\ldots,\bbeta_{(p)}^*$.
+
+
+The
+$\bX_{\sVLM}$ matrix is constructed from \bX{} and the $\bH_k$ using
+Kronecker product operations.
+For example, with trivial constraints,
+$\bX_{\sVLM} = \bX \otimes \bI_M$.
+More generally,
+\begin{eqnarray}
+\bX_{\sVLM} &=& 
+\left(
+\left( \bX \, \bie_{1} \right) \otimes \bH_1
+ \;
+\Bigg|
+ \;
+\left( \bX \, \bie_{2} \right) \otimes \bH_2
+ \;
+\Bigg|
+ \;
+\cdots
+ \;
+\Bigg|
+ \;
+\left( \bX \, \bie_{p} \right) \otimes \bH_p
+\right)
+\label{eqn:X_vlm_Hk}
+\end{eqnarray}
+($\bie_{k}$ is a vector of zeros except for a one in the $k$th position)
+so that 
+$\bX_{\sVLM}$ is $(nM) \times p^*$ where
+$p^* = \sum_{k=1}^{p} \mbox{\textrm{ncol}}(\bH_k)$ is the total number
+of columns of all the constraint matrices.
+Note that $\bX_{\sVLM}$ and \bX{} can be obtained by
+\texttt{model.matrix(vglmObject, type = "vlm")}
+and
+\texttt{model.matrix(vglmObject, type = "lm")}
+respectively.
+Equation \ref{eqn:lin.coefs4} focusses on the rows of \bB{} whereas
+\ref{eq:lin.pred} is on the columns.
+
+
+VGAMs are estimated by applying a modified vector backfitting algorithm
+\citep[cf.][]{buja:hast:tibs:1989} to the $\biz_i$.
+
+
+
+\subsection{Vector splines and penalized likelihood}
+\label{sec:ex.vspline}
+
+If (\ref{eqn:constraints.VGAM}) is estimated using a vector spline (a
+natural extension of the cubic smoothing spline to vector responses)
+then it can be shown that the resulting solution maximizes a penalized
+likelihood; some details are sketched in \cite{yee:step:2007}. In fact,
+knot selection for vector spline follows the same idea as O-splines
+\citep[see][]{wand:orme:2008} in order to lower the computational cost.
+
+
+The usage of \texttt{vgam()} with smoothing is very similar
+to \texttt{gam()} \citep{gam:pack:2009}, e.g.,
+to fit a nonparametric proportional odds model
+\citep[cf. p.179 of][]{mccu:neld:1989}
+to the pneumoconiosis data one could try
+<<label = pneumocat, eval=T>>=
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
+            cumulative(reverse = TRUE, parallel = TRUE), data = pneumo)
+@
+Here, setting \texttt{df = 1} means a linear fit so that
+\texttt{df = 2} affords a little nonlinearity.
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section[VGAM family functions]{\pkg{VGAM} family functions}
+\label{sec:jsscat.vgamff}
+
+
+
+This section summarizes and comments on the \VGAM{} family functions
+of Table \ref{tab:cat.quantities} for a categorical response variable
+taking values $Y=1,2,\ldots,M+1$. In its most basic invokation, the usage
+entails a trivial change compared to \texttt{glm()}: use \texttt{vglm()}
+instead and assign the \texttt{family} argument a \VGAM{} family function.
+The use of a \VGAM{} family function to fit a specific model is far
+simpler than having a different modeling function for each model.
+Options specific to that model appear as arguments of that \VGAM{} family
+function.
+
+
+
+
+
+While writing \texttt{cratio()} it was found that various authors defined
+the quantity ``continuation ratio'' differently, therefore it became
+necessary to define a ``stopping ratio''. Table \ref{tab:cat.quantities}
+defines these quantities for \VGAM{}.
+
+
+
+
+The multinomial logit model is usually described by choosing the first or
+last level of the factor to be baseline. \VGAM{} chooses the last level
+(Table \ref{tab:cat.quantities}) by default, however that can be changed
+to any other level by use of the \texttt{refLevel} argument.
+
+
+
+
+If the proportional odds assumption is inadequate then one strategy is
+to try use a different link function (see Section \ref{sec:jsscat.links}
+for a selection). Another alternative is to add extra terms such as
+interaction terms into the linear predictor
+\citep[available in the \proglang{S} language;][]{cham:hast:1993}.
+Another is to fit the so-called \textit{partial}
+proportional odds model \citep{pete:harr:1990}
+which \VGAM{} can fit via constraint matrices.
+
+
+
+In the terminology of \cite{agre:2002},
+\texttt{cumulative()} fits the class of \textit{cumulative link models},
+e.g.,
+\texttt{cumulative(link = probit)} is a cumulative probit model.
+For \texttt{cumulative()}
+it was difficult to decide whether
+\texttt{parallel = TRUE}
+or
+\texttt{parallel = FALSE}
+should be the default.
+In fact, the latter is (for now?).
+Users need to set
+\texttt{cumulative(parallel = TRUE)} explicitly to
+fit a proportional odds model---hopefully this will alert
+them to the fact that they are making
+the proportional odds assumption and
+check its validity (\cite{pete:1990}; e.g., through a deviance or
+likelihood ratio test). However the default means numerical problems
+can occur with far greater likelihood.
+Thus there is tension between the two options.
+As a compromise there is now a \VGAM{} family function
+called \texttt{propodds(reverse = TRUE)} which is equivalent to
+\texttt{cumulative(parallel = TRUE, reverse = reverse, link = "logit")}.
+
+
+
+By the way, note that arguments such as 
+\texttt{parallel}
+can handle a slightly more complex syntax.
+A call such as
+\code{parallel = TRUE ~ x2 + x5 - 1} means the parallelism assumption
+is only applied to $X_2$ and $X_5$.
+This might be equivalent to something like
+\code{parallel = FALSE ~ x3 + x4}, i.e., to the remaining
+explanatory variables.
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{Other models}
+\label{sec:jsscat.othermodels}
+
+
+Given the VGLM/VGAM framework of Section \ref{sec:jsscat.VGLMVGAMoverview}
+it is found that natural extensions are readily proposed in several
+directions. This section describes some such extensions.
+
+
+
+
+\subsection{Reduced-rank VGLMs}
+\label{sec:jsscat.RRVGLMs}
+
+
+Consider a multinomial logit model where $p$ and $M$ are both large.
+A (not-too-convincing) example might be the data frame \texttt{vowel.test}
+in the package \pkg{ElemStatLearn} \citep[see][]{hast:tibs:buja:1994}.
+The vowel recognition data set involves $q=11$ symbols produced from
+8 speakers with 6 replications of each. The training data comprises
+$10$ input features (not including the intercept) based on digitized
+utterances. A multinomial logit model fitted to these data would
+have $\widehat{\bB}$ comprising of $p \times (q-1) = 110$ regression
+coefficients for $n=8\times 6\times 11 = 528$ observations. The ratio
+of $n$ to the number of parameters is small, and it would be good to
+introduce some parsimony into the model.
+
+
+
+A simple and elegant solution is to represent $\widehat{\bB}$ by
+its reduced-rank approximation. To do this, partition $\bix$ into
+$(\bix_1^{\top}, \bix_2^{\top})^{\top}$ and $\bB = (\bB_1^{\top} \;
+\bB_2^{\top})^{\top}$ so that the reduced-rank regression is applied
+to $\bix_2$. In general, \bB{} is a dense matrix of full rank, i.e., rank
+$=\min(M,p)$, and since there are $M \times p$ regression coefficients
+to estimate this is `too' large for some models and/or data sets.
+If we approximate $\bB_2$ by a reduced-rank regression \begin{equation}
+\label{eq:rrr.BAC} \bB_2  =  \bC{} \, \bA^{\top} \end{equation} and if
+the rank $R$ is kept low then this can cut down the number of regression
+coefficients dramatically. If $R=2$ then the results may be biplotted
+(\texttt{biplot()} in \VGAM{}). Here, \bC{} and \bA{} are $p_2 \times R$
+and $M \times R$ respectively, and usually they are `thin'.
+
+
+More generally, the class of \textit{reduced-rank VGLMs} (RR-VGLMs)
+is simply a VGLM where $\bB_2$ is expressed as a product of two thin
+estimated matrices (Table \ref{tab:rrvglam.jss.subset}). Indeed,
+\cite{yee:hast:2003} show that RR-VGLMs are VGLMs with constraint
+matrices that are unknown and estimated. Computationally, this is
+done using an alternating method: in (\ref{eq:rrr.BAC}) estimate \bA{}
+given the current estimate of \bC{}, and then estimate \bC{} given the
+current estimate of \bA{}. This alternating algorithm is repeated until
+convergence within each IRLS iteration.
+
+
+Incidentally, special cases of RR-VGLMs have appeared in the
+literature. For example, a RR-multinomial logit model, is known as the
+\textit{stereotype} model \citep{ande:1984}. Another is \cite{good:1981}'s
+RC model (see Section \ref{sec:jsscat.rrr.goodman}) which is reduced-rank
+multivariate Poisson model. Note that the parallelism assumption of the
+proportional odds model \citep{mccu:neld:1989} can be thought of as a
+type of reduced-rank regression where the constraint matrices are thin
+($\bone_M$, actually) and known.
+
+
+
+The modeling function \texttt{rrvglm()} should work with any \VGAM{}
+family function compatible with \texttt{vglm()}. Of course, its
+applicability should be restricted to models where a reduced-rank
+regression of $\bB_2$ makes sense.
+
+
+
+
+
+
+
+
+
+\subsection[Goodman's R x C association model]{Goodman's $R \times C$ association model}
+\label{sec:jsscat.rrr.goodman}
+
+
+
+
+
+Let $\bY = [(y_{ij})]$ be a $n \times M$ matrix of counts.
+Section 4.2 of \cite{yee:hast:2003} shows that Goodman's RC$(R)$ association
+model \citep{good:1981} fits within the VGLM framework by setting up
+the appropriate indicator variables, structural zeros and constraint
+matrices. Goodman's model fits a reduced-rank type model to \bY{}
+by firstly assuming that $Y_{ij}$ has a Poisson distribution, and that
+\begin{eqnarray}
+\log \, \mu_{ij} &=& \mu + \alpha_{i} + \gamma_{j} + 
+\sum_{k=1}^R a_{ik} \, c_{jk} , 
+\ \ \ i=1,\ldots,n;\ \ j=1,\ldots,M,
+\label{eqn:goodmanrc}
+\end{eqnarray}
+where $\mu_{ij} = E(Y_{ij})$ is the mean of the $i$-$j$ cell, and the
+rank $R$ satisfies $R < \min(n,M)$.
+
+
+The modeling function \texttt{grc()} should work on any two-way
+table \bY{} of counts generated by (\ref{eqn:goodmanrc}) provided
+the number of 0's is not too large. Its usage is quite simple, e.g.,
+\texttt{grc(Ymatrix, Rank = 2)} fits a rank-2 model to a matrix of counts.
+By default a \texttt{Rank = 1} model is fitted.
+
+
+
+
+\subsection{Bradley-Terry models}
+\label{sec:jsscat.brat}
+
+Consider
+an experiment consists of $n_{ij}$ judges who compare
+pairs of items $T_i$, $i=1,\ldots,M+1$.
+They express their preferences between $T_i$ and $T_j$. 
+Let $N=\sum \sum_{i<j} n_{ij}$ be the total number of pairwise
+comparisons, and assume independence for ratings of the same pair
+by different judges and for ratings of different pairs by the same judge.
+Let $\pi_i$ be the \textit{worth} of item $T_i$,
+\[
+\pr(T_i > T_j)  =  p_{i/ij}  =  \frac{\pi_i}{\pi_i + \pi_j},
+\  \qquad i \neq {j},
+\]
+where ``$T_i>T_j$'' means $i$ is preferred over $j$.
+Suppose that $\pi_i > 0$.
+Let $Y_{ij}$ be the number of times that $T_i$ is preferred
+over $T_j$ in the $n_{ij}$ comparisons of the pairs.
+Then $Y_{ij} \sim {\rm Bin}(n_{ij},p_{i/ij})$.
+This is a Bradley-Terry model (without ties),
+and the \VGAM{} family function is \texttt{brat()}.
+
+
+Maximum likelihood estimation of the parameters $\pi_1,\ldots,\pi_{M+1}$
+involves maximizing
+\[
+\prod_{i<j}^{M+1}
+\left(
+\begin{array}{c}
+n_{ij} \\
+y_{ij}
+\end{array} \right)
+\left(
+\frac{\pi_i}{\pi_i + \pi_j}
+\right)^{y_{ij}}
+\left(
+\frac{\pi_j}{\pi_i + \pi_j}
+\right)^{n_{ij}-y_{ij}} .
+\]
+By default, $\pi_{M+1} \equiv 1$ is used for identifiability,
+however, this can be changed very easily.
+Note that one can define 
+linear predictors $\eta_{ij}$ of the form
+\begin{equation}
+\label{eq:bradter.logit}
+\logit 
+\left(
+\frac{\pi_i}{\pi_i + \pi_j}
+\right)  =  \log 
+\left(
+\frac{\pi_i}{\pi_j}
+\right)  =  \lambda_i - \lambda_j .
+\end{equation}
+The VGAM{} framework can handle the Bradley-Terry model only for
+intercept-only models; it has
+\begin{equation}
+\label{eq:bradter}
+\lambda_j  =  \eta_j  =  \log\, \pi_j = \beta_{(1)j},
+\ \ \ \ j=1,\ldots,M.
+\end{equation}
+
+
+As well as having many applications in the field of preferences,
+the Bradley-Terry model has many uses in modeling `contests' between
+teams $i$ and $j$, where only one of the teams can win in each
+contest (ties are not allowed under the classical model).
+The {packaging} function \texttt{Brat()} can be used to
+convert a square matrix into one that has more columns, to
+serve as input to \texttt{vglm()}.
+For example,
+for journal citation data where a citation of article B
+by article A is a win for article B and a loss for article A.
+On a specific data set,
+<<>>=
+journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
+squaremat <- matrix(c(NA, 33, 320, 284,   730, NA, 813, 276,
+                      498, 68, NA, 325,   221, 17, 142, NA), 4, 4)
+dimnames(squaremat) <- list(winner = journal, loser = journal)
+@
+then \texttt{Brat(squaremat)} returns a $1 \times 12$ matrix.
+
+
+
+
+
+
+
+\subsubsection{Bradley-Terry model with ties}
+\label{sec:cat.bratt}
+
+
+The \VGAM{} family function \texttt{bratt()} implements
+a Bradley-Terry model with ties (no preference), e.g.,
+where both $T_i$ and $T_j$ are equally good or bad.
+Here we assume
+\begin{eqnarray*}
+ \pr(T_i > T_j) &=& \frac{\pi_i}{\pi_i + \pi_j + \pi_0},
+\ \qquad
+ \pr(T_i = T_j)  =  \frac{\pi_0}{\pi_i + \pi_j + \pi_0},
+\end{eqnarray*}
+with $\pi_0 > 0$ as an extra parameter.
+It has 
+\[
+\boldeta=(\log \pi_1,\ldots, \log \pi_{M-1}, \log \pi_{0})^{\top}
+\]
+by default, where there are $M$ competitors and $\pi_M \equiv 1$.
+Like \texttt{brat()}, one can choose a different reference group
+and reference value.
+
+
+Other \R{} packages for the Bradley-Terry model
+include \pkg{BradleyTerry2}
+by H. Turner and D. Firth
+\citep[with and without ties;][]{firth:2005,firth:2008}
+and \pkg{prefmod} \citep{Hatzinger:2009}.
+
+
+
+
+\begin{table}[tt]
+\centering
+\begin{tabular}[small]{|l|c|}
+\hline
+\pkg{VGAM} family function & Independent parameters \\
+\hline
+\texttt{ABO()} & $p, q$ \\
+\texttt{MNSs()} & $m_S, m_s, n_S$ \\
+\texttt{AB.Ab.aB.ab()} & $p$ \\
+\texttt{AB.Ab.aB.ab2()} & $p$ \\
+\texttt{AA.Aa.aa()} & $p_A$ \\
+\texttt{G1G2G3()} & $p_1, p_2, f$ \\
+\hline
+\end{tabular}
+\caption{Some genetic models currently implemented
+and their unique parameters.
+\label{tab:gen.all}
+}
+\end{table}
+
+
+
+
+
+\subsection{Genetic models}
+\label{sec:jsscat.genetic}
+
+
+There are quite a number of population genetic models based on the
+multinomial distribution,
+e.g., \cite{weir:1996}, \cite{lang:2002}.
+Table \ref{tab:gen.all} lists some \pkg{VGAM} family functions for such.
+
+
+
+
+For example the ABO blood group system
+has two independent parameters $p$ and $q$, say.
+Here,
+the blood groups A, B and O form six possible combinations (genotypes)
+consisting of AA, AO, BB, BO, AB, OO
+(see Table \ref{tab:ABO}). A and B are dominant over
+bloodtype O. Let $p$, $q$ and $r$ be the probabilities
+for A, B and O respectively (so that
+$p+q+r=1$) for a given population. 
+The log-likelihood function is 
+\[
+\ell(p,q) \;=\; n_A\, \log(p^2 + 2pr) + n_B\, \log(q^2 + 2qr) + n_{AB}\,
+\log(2pq) + 2 n_O\, \log(1-p-q),
+\]
+where $r = 1 - p -q$, $p \in (\,0,1\,)$,
+$q \in (\,0,1\,)$, $p+q<1$.
+We let $\boldeta = (g(p), g(r))^{\top}$ where $g$ is the link function.
+Any $g$ from Table \ref{tab:jsscat.links} appropriate for
+a parameter $\theta \in (0,1)$ will do.
+
+
+A toy example where $p=p_A$ and $q=p_B$ is
+<<>>=
+abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
+fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, data = abodat)
+coef(fit, matrix = TRUE)
+Coef(fit)  # Estimated pA and pB
+@
+The function \texttt{Coef()}, which applies only to intercept-only models,
+applies to $g_{j}(\theta_{j})=\eta_{j}$
+the inverse link function $g_{j}^{-1}$ to $\widehat{\eta}_{j}$
+to give $\widehat{\theta}_{j}$.
+
+
+
+
+
+
+
+\begin{table}[tt]
+% Same as Table 14.1 of E-J, and Table 2.6 of Weir 1996
+\begin{center}
+\begin{tabular}{|l|cc|cc|c|c|}
+\hline
+Genotype   & AA  & AO  & BB  &  BO  & AB  &  OO  \\
+Probability&$p^2$&$2pr$&$q^2$&$ 2qr$&$2pq$& $r^2$\\
+Blood group&  A  &  A  &  B  &  B   &  AB &  O \\
+\hline
+\end{tabular}
+\end{center}
+\caption{Probability table for the ABO blood group system.
+Note that $p$ and $q$ are the parameters and $r=1-p-q$.
+\label{tab:ABO}
+}
+\end{table}
+
+
+
+
+
+\subsection{Three main distributions}
+\label{sec:jsscat.3maindist}
+
+\cite{agre:2002} discusses three main distributions for categorical
+variables: binomial, multinomial, and Poisson
+\citep{thom:2009}.
+All these are well-represented in the \VGAM{} package,
+accompanied by variant forms.
+For example,
+there is a
+\VGAM{} family function named \texttt{mbinomial()}
+which implements a 
+matched-binomial (suitable for matched case-control studies),
+Poisson ordination (useful in ecology for multi-species-environmental data),
+negative binomial families,
+positive and zero-altered and zero-inflated variants,
+and the bivariate odds ratio model
+\citep[\texttt{binom2.or()}; see Section 6.5.6 of][]{mccu:neld:1989}.
+The latter has an \texttt{exchangeable} argument to allow for an
+exchangeable error structure:
+\begin{eqnarray}
+\bH_1  = 
+\left( \begin{array}{cc}
+1 & 0 \\
+1 & 0 \\
+0 & 1 \\
+\end{array} \right), \qquad
+\bH_k  = 
+\left( \begin{array}{cc}
+1 \\
+1 \\
+0 \\
+\end{array} \right), \quad k=2,\ldots,p,
+\label{eqn:blom.exchangeable}
+\end{eqnarray}
+since, for data $(Y_1,Y_2,\bix)$,
+$\logit \, P\!\left( Y_{j} = 1 \Big{|} \bix \right) = 
+\eta_{j}$ for ${j}=1,2$, and
+$\log \, \psi = \eta_{3}$
+where $\psi$ is the odds ratio,
+and so $\eta_{1}=\eta_{2}$.
+Here, \texttt{binom2.or(zero = 3)} by default meaning $\psi$ is
+modelled as an intercept-only
+(in general, \texttt{zero} may be assigned an integer vector
+such that the value $j$ means $\eta_{j} = \beta_{(j)1}$,
+i.e., the $j$th linear/additive predictor is an intercept-only).
+See the online help for all of these models.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{Some user-oriented topics}
+\label{sec:jsscat.userTopics}
+
+
+Making the most of \VGAM{} requires an understanding of the general
+VGLM/VGAM framework described Section \ref{sec:jsscat.VGLMVGAMoverview}.
+In this section we connect elements of that framework with the software.
+Before doing so it is noted that
+a fitted \VGAM{} categorical model has access to the usual
+generic functions, e.g.,
+\texttt{coef()} for
+$\left(\widehat{\bbeta}_{(1)}^{*T},\ldots,\widehat{\bbeta}_{(p)}^{*T}\right)^{\top}$
+(see Equation \ref{eqn:lin.coefs4}),
+\texttt{constraints()} for $\bH_k$,
+\texttt{deviance()} for $2\left(\ell_{\mathrm{max}} - \ell\right)$,
+\texttt{fitted()} for $\widehat{\bmu}_i$,
+\texttt{logLik()} for $\ell$,
+\texttt{predict()} for $\widehat{\boldeta}_i$,
+\texttt{print()},
+\texttt{residuals(..., type = "response")} for $\biy_i - \widehat{\bmu}_i$ etc.,
+\texttt{summary()},
+\texttt{vcov()} for $\widehat{\Var}(\widehat{\bbeta})$,
+etc.
+The methods function for the extractor function
+\texttt{coef()} has an argument \texttt{matrix}
+which, when set \texttt{TRUE}, returns $\widehat{\bB}$
+(see Equation \ref{gammod}) as a $p \times M$ matrix,
+and this is particularly useful for confirming that a fit
+has made a parallelism assumption.
+
+
+
+
+
+
+
+\subsection{Common arguments}
+\label{sec:jsscat.commonArgs}
+
+
+The structure of the unified framework given in
+Section \ref{sec:jsscat.VGLMVGAMoverview}
+appears clearly through
+the pool of common arguments
+shared by the
+\VGAM{} family functions in Table \ref{tab:cat.quantities}.
+In particular,
+\texttt{reverse} and
+\texttt{parallel}
+are prominent with CDA.
+These are merely convenient shortcuts for the argument \texttt{constraints},
+which accepts a named list of constraint matrices $\bH_k$.
+For example, setting
+\texttt{cumulative(parallel = TRUE)} would constrain the coefficients $\beta_{(j)k}$
+in (\ref{gammod2}) to be equal for all $j=1,\ldots,M$,
+each separately for $k=2,\ldots,p$.
+That is, $\bH_k = \bone_M$.
+The argument \texttt{reverse} determines the `direction' of
+the parameter or quantity.
+
+Another argument not so much used with CDA is \texttt{zero};
+this accepts a vector specifying which $\eta_j$ is to be modelled as
+an intercept-only; assigning a \texttt{NULL} means none.
+
+
+
+
+
+
+
+
+\subsection{Link functions}
+\label{sec:jsscat.links}
+
+Almost all \VGAM{} family functions
+(one notable exception is \texttt{multinomial()})
+allow, in theory, for any link function to be assigned to each $\eta_j$.
+This provides maximum capability.
+If so then there is an extra argument to pass in any known parameter
+associated with the link function.
+For example, \texttt{link = "logoff", earg = list(offset = 1)}
+signifies a log link with a unit offset:
+$\eta_{j} = \log(\theta_{j} + 1)$ for some parameter $\theta_{j}\ (> -1)$.
+The name \texttt{earg} stands for ``extra argument''.
+Table \ref{tab:jsscat.links} lists some links relevant to categorical data.
+While the default gives a reasonable first choice,
+users are encouraged to try different links.
+For example, fitting a binary regression model
+(\texttt{binomialff()}) to the coal miners data set \texttt{coalminers} with
+respect to the response wheeze gives a
+nonsignificant regression coefficient for $\beta_{(1)3}$ with probit analysis
+but not with a logit link when
+$\eta = \beta_{(1)1} + \beta_{(1)2} \, \mathrm{age} + \beta_{(1)3} \, \mathrm{age}^2$.
+Developers and serious users are encouraged to write and use
+new link functions compatible with \VGAM.
+
+
+
+
+
+
+\begin{table*}[tt]
+\centering
+\medskip
+\begin{tabular}{|l|c|c|}
+\hline
+Link function & $g(\theta)$ & Range of $\theta$ \\
+\hline
+\texttt{cauchit()} & $\tan(\pi(\theta-\frac12))$ & $(0,1)$ \\
+\texttt{cloglog()} & $\log_e\{-\log_e(1 - \theta)\}$ & $(0,1)$ \\
+\texttt{fisherz()} & 
+$\frac12\,\log_e\{(1 + \theta)/(1 - \theta)\}$ & $(-1,1)$ \\
+\texttt{identity()} & $\theta$ & $(-\infty,\infty)$ \\
+\texttt{logc()} & $\log_e(1 - \theta)$ & $(-\infty,1)$ \\
+\texttt{loge()} & $\log_e(\theta)$ & $(0,\infty)$ \\
+\texttt{logit()} & $\log_e(\theta/(1 - \theta))$ & $(0,1)$ \\
+\texttt{logoff()} & $\log_e(\theta + A)$ & $(-A,\infty)$ \\
+\texttt{probit()} & $\Phi^{-1}(\theta)$ & $(0,1)$ \\
+\texttt{rhobit()} & $\log_e\{(1 + \theta)/(1 - \theta)\}$ & $(-1,1)$ \\
+\hline
+\end{tabular}
+\caption{
+Some \VGAM{} link functions pertinent to this article.
+\label{tab:jsscat.links}
+}
+\end{table*}
+
+
+
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{Examples}
+\label{sec:jsscat.eg}
+
+This section illustrates CDA modeling on three
+data sets in order to give a flavour of what is available in the package.
+
+
+
+
+%20130919
+%Note: 
+%\subsection{2008 World Fly Fishing Championships}
+%\label{sec:jsscat.eg.WFFC}
+%are deleted since there are problems with accessing the \texttt{wffc.nc}
+%data etc. since they are now in \pkg{VGAMdata}.
+
+
+
+
+
+
+
+\subsection{Marital status data}
+\label{sec:jsscat.eg.mstatus}
+
+We fit a nonparametric multinomial logit model to data collected from
+a self-administered questionnaire administered in a large New Zealand
+workforce observational study conducted during 1992--3.
+The data were augmented by a second study consisting of retirees.
+For homogeneity, this analysis is restricted
+to a subset of 6053 European males with no missing values.
+The ages ranged between 16 and 88 years.
+The data can be considered a reasonable representation of the white
+male New Zealand population in the early 1990s, and
+are detailed in \cite{macm:etal:1995} and \cite{yee:wild:1996}.
+We are interested in exploring how $Y=$ marital status varies as a function
+of $x_2=$ age. The nominal response $Y$ has four levels;
+in sorted order, they are divorced or separated, married or partnered,
+single and widower.
+We will write these levels as $Y=1$, $2$, $3$, $4$, respectively,
+and will choose the married/partnered (second level) as the reference group
+because the other levels emanate directly from it.
+
+Suppose the data is in a data frame called \texttt{marital.nz}
+and looks like
+<<>>=
+head(marital.nz, 4)
+summary(marital.nz)
+@
+We fit the VGAM
+<<>>=
+fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
+               data = marital.nz)
+@
+
+Once again let's firstly check the input.
+<<>>=
+head(depvar(fit.ms), 4)
+colSums(depvar(fit.ms))
+@
+This seems okay.
+
+
+
+
+Now the estimated component functions $\widehat{f}_{(s)2}(x_2)$
+may be plotted with
+<<fig=F>>=
+# Plot output
+mycol <- c("red", "darkgreen", "blue")
+par(mfrow = c(2, 2))
+plot(fit.ms, se = TRUE, scale = 12,
+         lcol = mycol, scol = mycol)
+
+# Plot output overlayed
+#par(mfrow=c(1,1))
+plot(fit.ms, se = TRUE, scale = 12,
+         overlay = TRUE,
+         llwd = 2,
+         lcol = mycol, scol = mycol)
+@
+to produce Figure \ref{fig:jsscat.eg.mstatus}.
+The \texttt{scale} argument is used here to ensure that the $y$-axes have
+a common scale---this makes comparisons between the component functions
+less susceptible to misinterpretation.
+The first three plots are the (centered) $\widehat{f}_{(s)2}(x_2)$ for
+$\eta_1$,
+$\eta_2$,
+$\eta_3$,
+where
+\begin{eqnarray}
+\label{eq:jsscat.eg.nzms.cf}
+\eta_{s}  = 
+\log(\pr(Y={t}) / \pr(Y={2}))  = 
+\beta_{(s)1} + f_{(s)2}(x_2),
+\end{eqnarray}
+$(s,t) = (1,1), (2,3), (3,4)$,
+and $x_2$ is \texttt{age}.
+The last plot are the smooths overlaid to aid comparison.
+
+
+It may be seen that the $\pm 2$ standard error bands
+about the \texttt{Widowed} group is particularly wide at
+young ages because of a paucity of data, and
+likewise at old ages amongst the \texttt{Single}s.
+The $\widehat{f}_{(s)2}(x_2)$ appear as one would expect.
+The log relative risk of
+being single relative to being married/partnered drops sharply from
+ages 16 to 40.
+The fitted function for the \texttt{Widowed} group increases
+with \texttt{age} and looks reasonably linear.
+The $\widehat{f}_{(1)2}(x_2)$
+suggests a possible maximum around 50 years old---this
+could indicate the greatest marital conflict occurs during
+the mid-life crisis years!
+
+
+
+\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
+
+\begin{figure}[tt]
+\begin{center}
+<<fig=TRUE,width=8,height=5.6,echo=FALSE>>=
+# Plot output
+mycol <- c("red", "darkgreen", "blue")
+ par(mfrow = c(2, 2))
+ par(mar = c(4.2, 4.0, 1.2, 2.2) + 0.1)
+plot(fit.ms, se = TRUE, scale = 12,
+         lcol = mycol, scol = mycol)
+
+# Plot output overlaid
+#par(mfrow = c(1, 1))
+plot(fit.ms, se = TRUE, scale = 12,
+         overlay = TRUE,
+         llwd = 2,
+         lcol = mycol, scol = mycol)
+@
+\caption{
+Fitted (and centered) component functions
+$\widehat{f}_{(s)2}(x_2)$
+from the NZ marital status data
+(see Equation \ref{eq:jsscat.eg.nzms.cf}).
+The bottom RHS plot are the smooths overlaid.
+\label{fig:jsscat.eg.mstatus}
+}
+\end{center}
+\end{figure}
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
+
+The methods function for \texttt{plot()} can also plot the
+derivatives of the smooths.
+The call
+<<fig=F>>=
+plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
+@
+results in Figure \ref{fig:jsscat.eg.mstatus.cf.deriv}.
+Once again the $y$-axis scales are commensurate.
+
+\setkeys{Gin}{width=\textwidth} % 0.8 is the current default
+
+\begin{figure}[tt]
+\begin{center}
+<<fig=TRUE,width=7.2,height=2.4,echo=FALSE>>=
+# Plot output
+ par(mfrow = c(1, 3))
+ par(mar = c(4.5, 4.0, 0.2, 2.2) + 0.1)
+plot(fit.ms, deriv = 1, lcol = mycol, scale = 0.3)
+@
+\caption{
+Estimated first derivatives of the component functions,
+$\widehat{f'}_{(s)2}(x_2)$,
+from the NZ marital status data
+(see Equation \ref{eq:jsscat.eg.nzms.cf}).
+\label{fig:jsscat.eg.mstatus.cf.deriv}
+}
+\end{center}
+\end{figure}
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
+The derivative for the \texttt{Divorced/Separated} group appears
+linear so that a quadratic component function could be tried.
+Not surprisingly the \texttt{Single} group shows the greatest change;
+also, $\widehat{f'}_{(2)2}(x_2)$ is approximately linear till 50
+and then flat---this suggests one could fit a piecewise quadratic
+function to model that component function up to 50 years.
+The \texttt{Widowed} group appears largely flat.
+We thus fit the parametric model
+<<>>=
+foo <- function(x, elbow = 50)
+  poly(pmin(x, elbow), 2)
+
+clist <- list("(Intercept)" = diag(3),
+             "poly(age, 2)" = rbind(1, 0, 0),
+             "foo(age)"     = rbind(0, 1, 0),
+             "age"          = rbind(0, 0, 1))
+fit2.ms <-
+    vglm(mstatus ~ poly(age, 2) + foo(age) + age,
+         family = multinomial(refLevel = 2),
+         constraints = clist,
+         data = marital.nz)
+@
+Then
+<<>>=
+coef(fit2.ms, matrix = TRUE)
+@
+confirms that one term was used for each component function.
+The plots from
+<<fig=F>>=
+par(mfrow = c(2, 2))
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[1], scol = mycol[1], which.term = 1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[2], scol=mycol[2], which.term = 2)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[3], scol = mycol[3], which.term = 3)
+@
+are given in Figure \ref{fig:jsscat.eg.mstatus.vglm}
+and appear like
+Figure \ref{fig:jsscat.eg.mstatus}.
+
+
+\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
+
+\begin{figure}[tt]
+\begin{center}
+<<fig=TRUE,width=8,height=5.6,echo=FALSE>>=
+# Plot output
+par(mfrow=c(2,2))
+ par(mar=c(4.5,4.0,1.2,2.2)+0.1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[1], scol = mycol[1], which.term = 1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[2], scol = mycol[2], which.term = 2)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[3], scol = mycol[3], which.term = 3)
+@
+\caption{
+Parametric version of \texttt{fit.ms}: \texttt{fit2.ms}.
+The component functions are now quadratic, piecewise quadratic/zero,
+or linear.
+\label{fig:jsscat.eg.mstatus.vglm}
+}
+\end{center}
+\end{figure}
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
+
+
+It is possible to perform very crude inference based on heuristic theory
+of a deviance test:
+<<>>=
+deviance(fit.ms) - deviance(fit2.ms)
+@
+is small, so it seems the parametric model is quite reasonable
+against the original nonparametric model.
+Specifically,
+the difference in the number of `parameters' is approximately
+<<>>=
+(dfdiff <- df.residual(fit2.ms) - df.residual(fit.ms))
+@
+which gives an approximate $p$ value of
+<<>>=
+pchisq(deviance(fit.ms) - deviance(fit2.ms), df = dfdiff, lower.tail = FALSE)
+@
+Thus \texttt{fit2.ms} appears quite reasonable.
+
+
+
+
+
+
+
+
+The estimated probabilities of the original fit can be plotted
+against \texttt{age} using
+<<fig=F>>=
+ooo <- with(marital.nz, order(age))
+with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo, ],
+     type = "l", las = 1, lwd = 2, ylim = 0:1,
+     ylab = "Fitted probabilities",
+     xlab = "Age",  # main="Marital status amongst NZ Male Europeans",
+     col = c(mycol[1], "black", mycol[-1])))
+legend(x = 52.5, y = 0.62,  # x="topright",
+       col = c(mycol[1], "black", mycol[-1]),
+       lty = 1:4,
+       legend = colnames(fit.ms at y), lwd = 2)
+abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed")
+@
+which gives Figure \ref{fig:jsscat.eg.mstatus.fitted}.
+This shows that between 80--90\% of NZ white males
+aged between their early 30s to mid-70s
+were married/partnered.
+The proportion widowed
+started to rise steeply from 70 years onwards but remained below 0.5
+since males die younger than females on average.
+
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+\begin{figure}[tt]
+\begin{center}
+<<fig=TRUE,width=8,height=4.8,echo=FALSE>>=
+ par(mfrow = c(1,1))
+ par(mar = c(4.5,4.0,0.2,0.2)+0.1)
+ooo <- with(marital.nz, order(age))
+with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
+     type = "l", las = 1, lwd = 2, ylim = 0:1,
+     ylab = "Fitted probabilities",
+     xlab = "Age",
+     col = c(mycol[1], "black", mycol[-1])))
+legend(x = 52.5, y = 0.62,
+       col = c(mycol[1], "black", mycol[-1]),
+       lty = 1:4,
+       legend = colnames(fit.ms at y), lwd = 2.1)
+abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed")
+@
+\caption{
+Fitted probabilities for each class for the
+NZ male European
+marital status data
+(from Equation \ref{eq:jsscat.eg.nzms.cf}).
+\label{fig:jsscat.eg.mstatus.fitted}
+}
+\end{center}
+\end{figure}
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
+
+
+
+
+
+\subsection{Stereotype model}
+\label{sec:jsscat.eg.grc.stereotype}
+
+We reproduce some of the analyses of \cite{ande:1984} regarding the
+progress of 101 patients with back pain
+using the data frame \texttt{backPain} from \pkg{gnm}
+\citep{Rnews:Turner+Firth:2007,Turner+Firth:2009}.
+The three prognostic variables are
+length of previous attack ($x_1=1,2$),
+pain change ($x_2=1,2,3$) 
+and lordosis ($x_3=1,2$).
+Like him, we treat these as numerical and standardize and negate them.
+%
+The output
+<<>>=
+# Scale the variables? Yes; the Anderson (1984) paper did (see his Table 6).
+head(backPain, 4)
+summary(backPain)
+backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale(x3))
+@
+displays the six ordered categories.
+Now a rank-1 stereotype model can be fitted with
+<<>>=
+bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain)
+@
+Then
+<<>>=
+Coef(bp.rrmlm1)
+@
+are the fitted \bA, \bC{} and $\bB_1$ (see Equation \ref{eq:rrr.BAC}) and
+Table \ref{tab:rrvglam.jss.subset}) which agrees with his Table 6.
+Here, what is known as ``corner constraints'' is used
+($(1,1)$ element of \bA{} $\equiv 1$),
+and only the intercepts are not subject to any reduced-rank regression
+by default.
+The maximized log-likelihood from \textsl{\texttt{logLik(bp.rrmlm1)}}
+is $\Sexpr{round(logLik(bp.rrmlm1), 2)}$.
+The standard errors of each parameter can be obtained by
+\textsl{\texttt{summary(bp.rrmlm1)}}.
+The negative elements of $\widehat{\bC}$ imply the
+latent variable $\widehat{\nu}$ decreases in value with increasing
+\textsl{\texttt{sx1}},
+\textsl{\texttt{sx2}} and
+\textsl{\texttt{sx3}}.
+The elements of $\widehat{\bA}$ tend to decrease so it suggests
+patients get worse as $\nu$ increases,
+i.e., get better as \textsl{\texttt{sx1}},
+\textsl{\texttt{sx2}} and
+\textsl{\texttt{sx3}} increase.
+
+
+
+
+
+
+<<echo=FALSE>>=
+set.seed(123)
+@
+A rank-2 model fitted \textit{with a different normalization}
+<<>>=
+bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain, Rank = 2,
+                   Corner = FALSE, Uncor = TRUE)
+@
+produces uncorrelated $\widehat{\bnu}_i = \widehat{\bC}^{\top} \bix_{2i}$.
+In fact \textsl{\texttt{var(lv(bp.rrmlm2))}} equals $\bI_2$
+so that the latent variables are also scaled to have unit variance.
+The fit was biplotted
+(rows of $\widehat{\bC}$ plotted as arrow;
+ rows of $\widehat{\bA}$ plotted as labels) using
+<<figure=F>>=
+biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
+#      xlim = c(-1, 6), ylim = c(-1.2, 4),  # Use this if not scaled
+       xlim = c(-4.5, 2.2), ylim = c(-2.2, 2.2),  # Use this if scaled
+       chull = TRUE, clty = 2, ccol = "blue")
+@
+to give Figure \ref{fig:jsscat.eg.rrmlm2.backPain}.
+It is interpreted via inner products due to (\ref{eq:rrr.BAC}).
+The different normalization means that the interpretation of $\nu_1$
+and $\nu_2$ has changed, e.g., increasing
+\textsl{\texttt{sx1}},
+\textsl{\texttt{sx2}} and
+\textsl{\texttt{sx3}} results in increasing $\widehat{\nu}_1$ and
+patients improve more.
+Many of the latent variable points $\widehat{\bnu}_i$ are coincidental
+due to discrete nature of the $\bix_i$. The rows of $\widehat{\bA}$
+are centered on the blue labels (rather cluttered unfortunately) and
+do not seem to vary much as a function of $\nu_2$.
+In fact this is confirmed by \cite{ande:1984} who showed a rank-1
+model is to be preferred.
+
+
+
+This example demonstrates the ability to obtain a low dimensional view
+of higher dimensional data. The package's website has additional
+documentation including more detailed Goodman's RC and stereotype
+examples.
+
+
+
+
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+\begin{figure}[tt]
+\begin{center}
+<<fig=TRUE,width=8,height=5.3,echo=FALSE>>=
+# Plot output
+ par(mfrow=c(1,1))
+ par(mar=c(4.5,4.0,0.2,2.2)+0.1)
+
+biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
+#      xlim = c(-1,6), ylim = c(-1.2,4),  # Use this if not scaled
+       xlim = c(-4.5,2.2), ylim = c(-2.2, 2.2),  # Use this if scaled
+       chull = TRUE, clty = 2, ccol = "blue")
+@
+\caption{
+Biplot of a rank-2 reduced-rank multinomial logit (stereotype) model
+fitted to the back pain data.
+A convex hull surrounds the latent variable scores
+$\widehat{\bnu}_i$
+(whose observation numbers are obscured because of their discrete nature).
+The position of the $j$th row of $\widehat{\bA}$
+is the center of the label ``\texttt{log(mu[,j])/mu[,6])}''.
+\label{fig:jsscat.eg.rrmlm2.backPain}
+}
+\end{center}
+\end{figure}
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
+
+
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{Some implementation details}
+\label{sec:jsscat.implementDetails}
+
+This section describes some implementation details of \VGAM{}
+which will be more of interest to the developer than to the casual user.
+
+
+
+\subsection{Common code}
+\label{sec:jsscat.implementDetails.code}
+
+It is good programming practice to write reusable code where possible.
+All the \VGAM{} family functions in Table \ref{tab:cat.quantities}
+process the response in the same way because the same segment of code
+is executed. This offers a degree of uniformity in terms of how input is
+handled, and also for software maintenance
+(\cite{altm:jack:2010} enumerates good programming techniques and references).
+As well, the default initial values are computed in the same manner
+based on sample proportions of each level of $Y$.
+
+
+
+
+
+\subsection[Matrix-band format of wz]{Matrix-band format of \texttt{wz}}
+\label{sec:jsscat.implementDetails.mbformat}
+
+The working weight matrices $\bW_i$ may become large for categorical
+regression models. In general, we have to evaluate the $\bW_i$
+for $i=1,\ldots,n$, and naively, this could be held in an \texttt{array} of
+dimension \texttt{c(M, M, n)}. However, since the $\bW_i$ are symmetric
+positive-definite it suffices to only store the upper or lower half of
+the matrix.
+
+
+
+The variable \texttt{wz} in \texttt{vglm.fit()}
+stores the working weight matrices $\bW_i$ in 
+a special format called the \textit{matrix-band} format. This
+format comprises a $n \times M^*$ matrix where
+\[
+M^*  =  \sum_{i=1}^{\footnotesize \textit{hbw}} \;
+\left(M-i+1\right)  =  
+\frac12 \, \textit{hbw}\, \left(2\,M - \textit{hbw} +1\right)
+\]
+is the number of columns. Here, \textit{hbw} refers to the
+\textit{half-bandwidth} of the matrix, which is an integer
+between 1 and $M$ inclusive. A diagonal matrix has
+unit half-bandwidth, a tridiagonal matrix has half-bandwidth 2, etc.
+
+
+Suppose $M=4$. Then \texttt{wz} will have up to $M^*=10$ columns
+enumerating the unique elements of $\bW_i$ as follows:
+\begin{eqnarray}
+\bW_i  =  
+\left( \begin{array}{rrrr}
+1 & 5 & 8 & 10 \\
+  & 2 & 6 & 9 \\
+  &   & 3 & 7 \\
+  &   &   & 4 
+\end{array} \right).
+\label{eqn:hbw.eg}
+\end{eqnarray}
+That is, the order is firstly the diagonal, then the band above that,
+followed by the second band above the diagonal etc.
+Why is such a format adopted? 
+For this example, if $\bW_i$ is diagonal then only the first 4 columns
+of \texttt{wz} are needed. If $\bW_i$ is tridiagonal then only the
+first 7 columns of \texttt{wz} are needed. 
+If $\bW_i$ \textit{is} banded then \texttt{wz} needs not have
+all $\frac12 M(M+1)$ columns; only $M^*$ columns suffice, and the
+rest of the elements of $\bW_i$ are implicitly zero.
+As well as reducing the size of \texttt{wz} itself in most cases, the
+matrix-band format often makes the computation of \texttt{wz} very
+simple and efficient. Furthermore, a Cholesky decomposition of a
+banded matrix will be banded. A final reason is that sometimes we
+want to input $\bW_i$ into \VGAM: if \texttt{wz} is $M \times M \times
+n$ then \texttt{vglm(\ldots, weights = wz)} will result in an error
+whereas it will work if \texttt{wz} is an $n \times M^*$ matrix.
+
+
+
+To facilitate the use of the matrix-band format,
+a few auxiliary functions have been written.
+In particular, there is \texttt{iam()} which gives the indices
+for an array-to-matrix.
+In the $4\times 4$ example above,
+<<>>=
+iam(NA, NA, M = 4, both = TRUE, diag = TRUE)
+@
+returns the indices for the respective array coordinates for
+successive columns of matrix-band format
+(see Equation \ref{eqn:hbw.eg}).
+If \texttt{diag = FALSE} then the first 4 elements in each vector
+are omitted. Note that the first two arguments of 
+\texttt{iam()} are not used here and have been assigned
+\texttt{NA}s for simplicity.
+For its use on the multinomial logit model, where
+$(\bW_i)_{jj} = w_i\,\mu_{ij} (1-\mu_{ij}),\ j=1,\ldots,M$, and 
+$(\bW_i)_{jk} = -w_i\,\mu_{ij} \mu_{ik},\ j\neq k$,
+this can be programmed succinctly like
+\begin{Code}
+wz <- mu[, 1:M] * (1 - mu[, 1:M])
+if (M > 1) {
+  index <- iam(NA, NA, M = M, both = TRUE, diag = FALSE)
+  wz <- cbind(wz, -mu[, index$row] * mu[, index$col])
+}
+wz <- w * wz
+\end{Code}
+(the actual code is slightly more complicated).
+In general, \VGAM{} family functions can be remarkably compact,
+e.g.,
+\texttt{acat()},
+\texttt{cratio()}
+and
+\texttt{multinomial()} are all less than 120 lines of code each.
+
+
+
+
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{Extensions and utilities}
+\label{sec:jsscat.extnUtil}
+
+This section describes some useful utilities/extensions of the above.
+
+
+
+\subsection{Marginal effects}
+\label{sec:jsscat.extnUtil.margeff}
+
+
+Models such as the multinomial logit and cumulative link models
+model the posterior probability $p_{j} = \pr(Y=j|\bix)$ directly.
+In some applications, knowing the derivative of $p_{j}$
+with respect to some of the $x_k$ is useful;
+in fact, often just knowing the sign is important.
+The function \texttt{margeff()} computes the derivatives and
+returns them as a $p \times (M+1) \times n$ array.
+For the multinomial logit model it is easy to show
+\begin{eqnarray}
+\frac{\partial \, p_{j}(\bix_i)}{\partial \,
+\bix_{i}}
+&=&
+p_{j}(\bix_i)
+\left\{
+ \bbeta_{j} -
+\sum_{s=1}^{M+1}
+p_{s}(\bix_i)
+\,
+ \bbeta_{s}
+\right\},
+\label{eqn:multinomial.marginalEffects}
+\end{eqnarray}
+while for
+\texttt{cumulative(reverse = FALSE)}
+we have
+$p_{j} = \gamma_{j} - \gamma_{j-1} = h(\eta_{j}) - h(\eta_{j-1})$
+where $h=g^{-1}$ is the inverse of the link function
+(cf. Table \ref{tab:cat.quantities})
+so that
+\begin{eqnarray}
+\frac{\partial \, p_{j}(\bix_{})}{\partial \,
+\bix}
+&=&
+h'(\eta_{j}) \, \bbeta_{j} -
+h'(\eta_{j-1}) \, \bbeta_{j-1} .
+\label{eqn:cumulative.marginalEffects}
+\end{eqnarray}
+
+
+
+
+The function \texttt{margeff()} returns an array with these
+derivatives and should handle any value of
+\texttt{reverse} and \texttt{parallel}.
+
+
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\subsection[The xij argument]{The \texttt{xij} argument}
+\label{sec:jsscat.extnUtil.xij}
+
+There are many models, including those for categorical data,
+where the value of an explanatory variable $x_k$ differs depending
+on which linear/additive predictor $\eta_{j}$.
+Here is a well-known example from {consumer choice} modeling.
+Suppose an econometrician is interested in peoples'
+choice of transport for travelling to work
+and that there are four choices:
+$Y=1$ for ``bus'',
+$Y=2$ ``train'', 
+$Y=3$ ``car'' and
+$Y=4$ means ``walking''.
+Assume that people only choose one means to go to work.
+Suppose there are three covariates:
+$X_2=$ cost,
+$X_3=$ journey time, and
+$X_4=$ distance.
+Of the covariates only $X_4$ (and the intercept $X_1$)
+is the same for all transport choices;
+the cost and journey time differ according to the means chosen.
+Suppose a random sample of $n$ people is collected
+from some population, and that each person has
+access to all these transport modes.
+For such data, a natural regression model would be a 
+multinomial logit model with $M=3$:
+for $j=1,\ldots,M$, we have
+$\eta_{j} =$
+\begin{eqnarray}
+\log \frac{\pr(Y=j)}{\pr(Y=M+1)}
+&=&
+\beta_{(j)1}^{*} +
+\beta_{(1)2}^{*} \, (x_{i2j}-x_{i24}) +
+\beta_{(1)3}^{*} \, (x_{i3j}-x_{i34}) +
+\beta_{(1)4}^{*} \, x_{i4},
+\label{eqn:xij.eg.gotowork}
+\end{eqnarray}
+where, for the $i$th person,
+$x_{i2j}$ is the cost for the $j$th transport means, and
+$x_{i3j}$ is the journey time of the $j$th transport means.
+The distance to get to work is $x_{i4}$; it has the same value
+regardless of the transport means.
+
+
+Equation \ref{eqn:xij.eg.gotowork}
+implies $\bH_1=\bI_3$ and $\bH_2=\bH_3=\bH_4=\bone_3$.
+Note
+also that if the last response category is used as the baseline or
+reference group (the default of \texttt{multinomial()}) then $x_{ik,M+1}$
+can be subtracted from $x_{ikj}$ for $j=1,\ldots,M$---this
+is the natural way $x_{ik,M+1}$ enters into the model.
+
+
+
+
+Recall from (\ref{gammod2}) that we had
+\begin{equation}
+\eta_j(\bix_i)  =  \bbeta_j^{\top} \bix_i  = 
+\sum_{k=1}^{p} \, x_{ik} \, \beta_{(j)k} .
+\label{eqn:xij0}
+\end{equation}
+Importantly, this can be generalized to
+\begin{equation}
+\eta_j(\bix_{ij})  =  \bbeta_j^{\top} \bix_{ij}  = 
+\sum_{k=1}^{p} \, x_{ikj} \, \beta_{(j)k} ,
+\label{eqn:xij}
+\end{equation}
+or writing this another way (as a mixture or hybrid),
+\begin{equation}
+\eta_j(\bix_{i}^{*},\bix_{ij}^{*})  =  
+\bbeta_{j}^{*T} \bix_{i}^{*} + \bbeta_{j}^{**T} \bix_{ij}^{*} .
+\label{eqn:xij2}
+\end{equation}
+Often $\bbeta_{j}^{**} = \bbeta_{}^{**}$, say.
+In (\ref{eqn:xij2}) the variables in $\bix_{i}^{*}$ are common to
+all $\eta_{j}$, and the variables in $\bix_{ij}^{*}$ have
+different values for differing $\eta_{j}$.
+This allows for covariate values that are specific to each $\eta_j$,
+a facility which is very important in many applications.
+
+
+The use of the \texttt{xij} argument with the \VGAM{} family function
+\texttt{multinomial()} has very important applications in economics.
+In that field the term ``multinomial logit model'' includes a variety of
+models such as the ``generalized logit model'' where (\ref{eqn:xij0})
+holds, the ``conditional logit model'' where (\ref{eqn:xij}) holds,
+and the ``mixed logit model,'' which is a combination of the two,
+where (\ref{eqn:xij2}) holds.
+The generalized logit model focusses on the individual as the unit of
+analysis, and uses individual characteristics as explanatory variables,
+e.g., age of the person in the transport example.
+The conditional logit model assumes different values for each
+alternative and the impact of a unit of $x_k$ is assumed to be constant
+across alternatives, e.g., journey time in the choice of transport mode.
+Unfortunately, there is confusion in the literature for the terminology
+of the models. Some authors call \texttt{multinomial()}
+with (\ref{eqn:xij0}) the ``generalized logit model''.
+Others call the mixed
+logit model the ``multinomial logit model'' and view the generalized
+logit and conditional logit models as special cases.
+In \VGAM{} terminology there is no need to give different names to
+all these slightly differing special cases. They are all still called
+multinomial logit models, although it may be added that there are
+some covariate-specific linear/additive predictors.
+The important thing is that the framework accommodates $\bix_{ij}$,
+so one tries to avoid making life unnecessarily complicated.
+And \texttt{xij} can apply in theory to any VGLM and not just to the
+multinomial logit model.
+\cite{imai:king:lau:2008} present another perspective on the
+$\bix_{ij}$ problem with illustrations from \pkg{Zelig}
+\citep{Zelig:2009}.
+
+
+
+
+
+\subsubsection[Using the xij argument]{Using the \texttt{xij} argument}
+\label{sec:xij.sub}
+
+\VGAM{} handles variables whose values depend on $\eta_{j}$,
+(\ref{eqn:xij2}), using the \texttt{xij} argument.
+It is assigned an S formula or a list of \proglang{S} formulas.
+Each formula, which must have $M$ \textit{different} terms,
+forms a matrix that premultiplies a constraint matrix.
+In detail, (\ref{eqn:xij0}) can be written in vector form as
+\begin{equation}
+\boldeta(\bix_i)  =  \bB^{\top} \bix_i  = 
+\sum_{k=1}^{p} \, \bH_{k} \, \bbeta_{k}^{*} \, x_{ik},
+\label{eqn:xij0.vector}
+\end{equation}
+where
+$\bbeta_{k}^{*} =
+\left( \beta_{(1)k}^{*},\ldots,\beta_{(r_k)k}^{*} \right)^{\top}$
+is to be estimated.
+This may be written
+\begin{eqnarray}
+\boldeta(\bix_{i})
+&=&
+\sum_{k=1}^{p} \, \diag(x_{ik},\ldots,x_{ik}) \,
+\bH_k \, \bbeta_{k}^{*}.
+\label{eqn:xij.d.vector}
+\end{eqnarray}
+To handle (\ref{eqn:xij})--(\ref{eqn:xij2})
+we can generalize (\ref{eqn:xij.d.vector}) to
+\begin{eqnarray}
+\boldeta_i
+&=&
+\sum_{k=1}^{p} \, \diag(x_{ik1},\ldots,x_{ikM}) \;
+\bH_k \, \bbeta_{k}^{*}
+\ \ \ \ \left(=
+\sum_{k=1}^{p} \, \bX_{(ik)}^{*} \,
+\bH_k \, \bbeta_{k}^{*} ,
+\mathrm{\ say} \right).
+\label{eqn:xij.vector}
+\end{eqnarray}
+Each component of the list \texttt{xij} is a formula having $M$ terms
+(ignoring the intercept) which
+specifies the successive diagonal elements of the matrix $\bX_{(ik)}^{*}$.
+Thus each row of the constraint matrix may be multiplied by a different
+vector of values.
+The constraint matrices themselves are not affected by the
+\texttt{xij} argument.
+
+
+
+
+
+How can one fit such models in \VGAM{}?
+Let us fit (\ref{eqn:xij.eg.gotowork}).
+Suppose the journey cost and time variables have had the
+cost and time of walking subtracted from them.
+Then,
+using ``\texttt{.trn}'' to denote train,
+\begin{Code}
+fit2 <- vglm(cbind(bus, train, car, walk) ~ Cost + Time + Distance,
+             fam = multinomial(parallel = TRUE ~ Cost + Time + Distance - 1),
+             xij = list(Cost ~ Cost.bus + Cost.trn + Cost.car,
+                        Time ~ Time.bus + Time.trn + Time.car),
+             form2 = ~  Cost.bus + Cost.trn + Cost.car +
+                        Time.bus + Time.trn + Time.car +
+                        Cost + Time + Distance,
+             data = gotowork)
+\end{Code}
+should do the job.
+Here, the argument \texttt{form2} is assigned a second \proglang{S} formula which
+is used in some special circumstances or by certain types
+of \VGAM{} family functions.
+The model has $\bH_{1} = \bI_{3}$ and $\bH_{2} = \bH_{3} = \bH_{4} = \bone_{3}$
+because the lack of parallelism only applies to the intercept.
+However, unless \texttt{Cost} is the same as \texttt{Cost.bus} and
+\texttt{Time} is the same as \texttt{Time.bus},
+this model should not be plotted with \texttt{plotvgam()};
+see the author's homepage for further documentation.
+
+
+By the way,
+suppose 
+$\beta_{(1)4}^{*}$
+in (\ref{eqn:xij.eg.gotowork})
+is replaced by $\beta_{(j)4}^{*}$.
+Then the above code but with
+\begin{Code}
+  fam = multinomial(parallel = FALSE ~ 1 + Distance),
+\end{Code}
+should fit this model.
+Equivalently,
+\begin{Code}
+  fam = multinomial(parallel = TRUE ~ Cost + Time - 1),
+\end{Code}
+
+
+
+
+
+
+\subsubsection{A more complicated example}
+\label{sec:xij.complicated}
+
+The above example is straightforward because the
+variables were entered linearly. However, things
+become more tricky if data-dependent functions are used in
+any \texttt{xij} terms, e.g., \texttt{bs()}, \texttt{ns()} or \texttt{poly()}.
+In particular, regression splines such as \texttt{bs()} and \texttt{ns()}
+can be used to estimate a general smooth function $f(x_{ij})$, which is
+very useful for exploratory data analysis.
+
+
+
+Suppose we wish to fit the variable \texttt{Cost} with a smoother.
+This is possible with regression splines and using a trick.
+Firstly note that
+\begin{Code}
+fit3 <- vglm(cbind(bus, train, car, walk) ~ ns(Cost) + Time + Distance,
+             multinomial(parallel = TRUE ~ ns(Cost) + Time + Distance - 1),
+             xij = list(ns(Cost) ~ ns(Cost.bus) + ns(Cost.trn) + ns(Cost.car),
+                        Time ~ Time.bus + Time.trn + Time.car),
+             form2 = ~  ns(Cost.bus) + ns(Cost.trn) + ns(Cost.car) +
+                        Time.bus + Time.trn + Time.car +
+                        ns(Cost) + Cost + Time + Distance,
+             data = gotowork)
+\end{Code}
+will \textit{not} work because the basis functions for
+\texttt{ns(Cost.bus)}, \texttt{ns(Cost.trn)} and \texttt{ns(Cost.car)}
+are not identical since the knots differ.
+Consequently, they represent different functions despite
+having common regression coefficients.
+
+
+Fortunately, it is possible to force the \texttt{ns()} terms
+to have identical basis functions by using a trick:
+combine the vectors temporarily.
+To do this, one can let
+\begin{Code}
+NS <- function(x, ..., df = 3)
+      sm.ns(c(x, ...), df = df)[1:length(x), , drop = FALSE]
+\end{Code}
+This computes a natural cubic B-spline evaluated at \texttt{x} but it uses the
+other arguments as well to form an overall vector from which to obtain
+the (common) knots.
+Then the usage of \texttt{NS()} can be something like
+\begin{Code}
+fit4 <- vglm(cbind(bus, train, car, walk) ~ NS(Cost.bus, Cost.trn, Cost.car)
+                                          + Time + Distance,
+             multinomial(parallel = TRUE ~  NS(Cost.bus, Cost.trn, Cost.car)
+                                          + Time + Distance - 1),
+             xij = list(NS(Cost.bus, Cost.trn, Cost.car) ~
+                        NS(Cost.bus, Cost.trn, Cost.car) +
+                        NS(Cost.trn, Cost.car, Cost.bus) +
+                        NS(Cost.car, Cost.bus, Cost.trn),
+                        Time ~ Time.bus + Time.trn + Time.car),
+             form2 = ~  NS(Cost.bus, Cost.trn, Cost.car) +
+                        NS(Cost.trn, Cost.car, Cost.bus) +
+                        NS(Cost.car, Cost.bus, Cost.trn) +
+                        Time.bus + Time.trn + Time.car +
+                        Cost.bus + Cost.trn + Cost.car +
+                        Time + Distance,
+             data = gotowork)
+\end{Code}
+So \texttt{NS(Cost.bus, Cost.trn, Cost.car)}
+is the smooth term for
+\texttt{Cost.bus}, etc.
+Furthermore, \texttt{plotvgam()} may be applied to
+\texttt{fit4}, in which case the fitted regression spline is plotted
+against its first inner argument, viz. \texttt{Cost.bus}.
+
+
+One of the reasons why it will predict correctly, too,
+is due to ``smart prediction''
+\citep{Rnews:Yee:2008}.
+
+
+
+\subsubsection{Implementation details} 
+\label{sec:jss.xij.implementationDetails} 
+
+The \texttt{xij} argument operates \textit{after} the
+ordinary $\bX_{\sVLM}$ matrix is created. Then selected columns
+of $\bX_{\sVLM}$ are modified from the constraint matrices, \texttt{xij}
+and \texttt{form2} arguments. That is, from \texttt{form2}'s model
+matrix $\bX_{\sformtwo}$, and the $\bH_k$. This whole operation
+is possible because $\bX_{\sVLM}$ remains structurally the same.
+The crucial equation is (\ref{eqn:xij.vector}).
+
+
+Other \texttt{xij} examples are given in the online help of
+\texttt{fill()} and \texttt{vglm.control()},
+as well as at the package's webpage.
+
+
+
+
+
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{Discussion}
+\label{sec:jsscat.discussion}
+
+
+This article has sought to convey how VGLMs/VGAMs are well suited for
+fitting regression models for categorical data. Its primary strength
+is its simple and unified framework, and when reflected in software,
+makes practical CDA more understandable and efficient. Furthermore,
+there are natural extensions such as a reduced-rank variant and
+covariate-specific $\eta_{j}$. The \VGAM{} package potentially offers
+a wide selection of models and utilities.
+
+
+There is much future work to do.
+Some useful additions to the package include:
+\begin{enumerate}
+
+\item
+Bias-reduction \citep{firt:1993} is a method for removing the $O(n^{-1})$
+bias from a maximum likelihood estimate. For a substantial class of
+models including GLMs it can be formulated in terms of a minor adjustment
+of the score vector within an IRLS algorithm \citep{kosm:firt:2009}.
+One by-product, for logistic regression, is that while the maximum
+likelihood estimate (MLE) can be infinite, the adjustment leads to
+estimates that are always finite. At present the \R{} package \pkg{brglm}
+\citep{Kosmidis:2008} implements bias-reduction for a number of models.
+Bias-reduction might be implemented by adding an argument
+\texttt{bred = FALSE}, say, to some existing \VGAM{} family functions.
+
+
+\item
+Nested logit models were developed to overcome a fundamental shortcoming
+related to the multinomial logit model, viz. the independence of
+irrelevant alternatives (IIA) assumption. Roughly, the multinomial logit
+model assumes the ratio of the choice probabilities of two alternatives
+is not dependent on the presence or absence of other alternatives in
+the model. This presents problems that are often illustrated by the
+famed red bus-blue bus problem.
+
+
+
+
+\item
+The generalized estimating equations (GEE) methodology is largely
+amenable to IRLS and this should be added to the package in the future
+\citep{wild:yee:1996}.
+
+
+\item
+For logistic regression \proglang{SAS}'s \code{proc logistic} gives
+a warning if the data is {completely separate} or {quasi-completely
+separate}. Its effects are that some regression coefficients tend to $\pm
+\infty$. With such data, all (to my knowledge) \R{} implementations
+give warnings that are vague, if any at all, and this is rather
+unacceptable \citep{alli:2004}. The \pkg{safeBinaryRegression} package
+\citep{Konis:2009} overloads \code{glm()} so that a check for the
+existence of the MLE is made before fitting a binary response GLM.
+
+
+\end{enumerate}
+
+
+In closing, the \pkg{VGAM} package is continually being developed,
+therefore some future changes in the implementation details and usage
+may occur. These may include non-backward-compatible changes (see the
+\code{NEWS} file.) Further documentation and updates are available at
+the author's homepage whose URL is given in the \code{DESCRIPTION} file.
+
+
+
+% ----------------------------------------------------------------------
+\section*{Acknowledgments}
+
+The author thanks Micah Altman, David Firth and Bill Venables for helpful
+conversations, and Ioannis Kosmidis for a reprint.
+Thanks also to The Institute for Quantitative Social Science at Harvard
+University for their hospitality while this document was written during a
+sabbatical visit.
+
+
+
+
+
+\bibliography{categoricalVGAMbib}
+
+\end{document}
+
+
+
+
diff --git a/inst/doc/categoricalVGAM.pdf b/inst/doc/categoricalVGAM.pdf
new file mode 100644
index 0000000..73b83fa
Binary files /dev/null and b/inst/doc/categoricalVGAM.pdf differ
diff --git a/man/G1G2G3.Rd b/man/A1A2A3.Rd
similarity index 63%
rename from man/G1G2G3.Rd
rename to man/A1A2A3.Rd
index bfa9eb7..392378a 100644
--- a/man/G1G2G3.Rd
+++ b/man/A1A2A3.Rd
@@ -1,14 +1,14 @@
-\name{G1G2G3}
-\alias{G1G2G3}
+\name{A1A2A3}
+\alias{A1A2A3}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ The G1G2G3 Blood Group System }
+\title{ The A1A2A3 Blood Group System }
 \description{
   Estimates the three independent parameters of the 
-  the G1G2G3 blood group system.
+  the A1A2A3 blood group system.
 
 }
 \usage{
-G1G2G3(link = "logit", ip1 = NULL, ip2 = NULL, iF = NULL)
+A1A2A3(link = "logit", inbreeding = TRUE, ip1 = NULL, ip2 = NULL, iF = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -17,6 +17,14 @@ G1G2G3(link = "logit", ip1 = NULL, ip2 = NULL, iF = NULL)
   See \code{\link{Links}} for more choices.
 
   }
+  \item{inbreeding}{
+    Logical. Is the inbreeding coefficient \eqn{f} equal to 0?
+
+
+%   Logical. Is the HWE assumption to be made?
+
+
+  }
   \item{ip1, ip2, iF}{
   Optional initial value for \code{p1}, \code{p2} and \code{f}.
 
@@ -25,7 +33,10 @@ G1G2G3(link = "logit", ip1 = NULL, ip2 = NULL, iF = NULL)
 \details{
   The parameters \code{p1} and \code{p2} are probabilities, so that
   \code{p3=1-p1-p2} is the third probability.
-  The parameter \code{f} is the third independent parameter.
+  The parameter \code{f} is the third independent parameter if
+  \code{inbreeding = TRUE}.
+  If \code{inbreeding = FALSE} then \eqn{f = 0}.
+
 
 
 }
@@ -48,12 +59,12 @@ Lange, K. (2002)
 \note{ 
   The input can be a 6-column matrix of counts,
   with columns corresponding to   
-  \code{G1G1},
-  \code{G1G2},
-  \code{G1G3},
-  \code{G2G2},
-  \code{G2G3},
-  \code{G3G3} (in order). 
+  \code{A1A1},
+  \code{A1A2},
+  \code{A1A3},
+  \code{A2A2},
+  \code{A2A3},
+  \code{A3A3} (in order). 
   Alternatively, the input can be a 6-column matrix of 
   proportions (so each row adds to 1) and the \code{weights}
   argument is used to specify the total number of counts for each row.
@@ -64,20 +75,23 @@ Lange, K. (2002)
 \seealso{
   \code{\link{AA.Aa.aa}},
   \code{\link{AB.Ab.aB.ab}},
-  \code{\link{AB.Ab.aB.ab2}},
   \code{\link{ABO}},
   \code{\link{MNSs}}.
 
 
+% \code{\link{AB.Ab.aB.ab2}},
+
+
+
 }
 \examples{
 ymat <- cbind(108, 196, 429, 143, 513, 559)
-fit <- vglm(ymat ~ 1, G1G2G3(link = probit), trace = TRUE, crit = "coef")
-fit <- vglm(ymat ~ 1, G1G2G3(link = logit, ip1 = 0.3, ip2 = 0.3, iF = 0.02),
-           trace = TRUE, crit = "coef")
-fit <- vglm(ymat ~ 1, G1G2G3(link = "identitylink"), trace = TRUE)
+fit <- vglm(ymat ~ 1, A1A2A3(link = probit), trace = TRUE, crit = "coef")
+fit <- vglm(ymat ~ 1, A1A2A3(link = logit, ip1 = 0.3, ip2 = 0.3, iF = 0.02),
+            trace = TRUE, crit = "coef")
+fit <- vglm(ymat ~ 1, A1A2A3(link = "identitylink"), trace = TRUE)
 Coef(fit)  # Estimated p1, p2 and f
-rbind(ymat, sum(ymat)*fitted(fit))
+rbind(ymat, sum(ymat) * fitted(fit))
 sqrt(diag(vcov(fit)))
 }
 \keyword{models}
diff --git a/man/AA.Aa.aa.Rd b/man/AA.Aa.aa.Rd
index bfd1f11..5cb6afe 100644
--- a/man/AA.Aa.aa.Rd
+++ b/man/AA.Aa.aa.Rd
@@ -4,24 +4,54 @@
 \title{ The AA-Aa-aa Blood Group System }
 \description{
    Estimates the parameter of the 
-   AA-Aa-aa blood group system.
+   AA-Aa-aa blood group system,
+   with or without Hardy Weinberg equilibrium.
+
 }
 \usage{
-AA.Aa.aa(link = "logit", init.pA = NULL)
+AA.Aa.aa(linkp = "logit", linkf = "logit", inbreeding = TRUE,
+         ipA = NULL, ifp = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{link}{
-  Link function applied to \code{pA}.
+  \item{linkp, linkf}{
+  Link functions applied to \code{pA} and \code{f}.
   See \code{\link{Links}} for more choices.
 
   }
-  \item{init.pA}{ Optional initial value for \code{pA}. }
+  \item{ipA, ifp}{
+  Optional initial values for \code{pA} and \code{f}.
+
+
+  }
+  \item{inbreeding}{
+    Logical. Is the inbreeding coefficient \eqn{f} equal to 0?
+
+
+%HWE assumption to be made?
+
+
+
+
+  }
+  \item{zero}{
+    See \code{\link{CommonVGAMffArguments}} for information.
+
+
+  }
 }
 \details{
-  This one parameter model involves a probability called \code{pA}.
+  This one or two parameter model involves a probability called \code{pA}.
   The probability of getting a count in the first column of the
   input (an AA) is \code{pA*pA}.
+  When \code{inbreeding = TRUE}, an additional parameter \code{f} is used.
+  If \code{inbreeding = FALSE} then \eqn{f = 0}.
+
+
+
+% With Hardy Weinberg equilibrium (HWE),
+% Without the HWE assumption, an additional parameter \code{f} is used.
+
 
 
 }
@@ -53,21 +83,36 @@ Sunderland, MA: Sinauer Associates, Inc.
 
 
 }
+
+\section{Warning }{
+  Setting \code{inbreeding = FALSE} makes estimation difficult
+  with non-intercept-only models.
+  Currently, this code seems to work with intercept-only models.
+
+
+}
+
+
 \seealso{
   \code{\link{AB.Ab.aB.ab}},
-  \code{\link{AB.Ab.aB.ab2}},
   \code{\link{ABO}},
-  \code{\link{G1G2G3}},
+  \code{\link{A1A2A3}},
   \code{\link{MNSs}}.
 
 
+% \code{\link{AB.Ab.aB.ab2}},
+
+
+
 }
 \examples{
 y <- cbind(53, 95, 38)
-fit <- vglm(y ~ 1, AA.Aa.aa(link = "probit"), trace = TRUE)
-rbind(y, sum(y) * fitted(fit))
-Coef(fit)  # Estimated pA
-summary(fit)
+fit1 <- vglm(y ~ 1, AA.Aa.aa(linkp = "probit"), trace = TRUE)
+fit2 <- vglm(y ~ 1, AA.Aa.aa(inbreeding = FALSE), trace = TRUE)
+rbind(y, sum(y) * fitted(fit1))
+Coef(fit1)  # Estimated pA
+Coef(fit2)  # Estimated pA and f
+summary(fit1)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/AB.Ab.aB.ab.Rd b/man/AB.Ab.aB.ab.Rd
index 0801dce..0903854 100644
--- a/man/AB.Ab.aB.ab.Rd
+++ b/man/AB.Ab.aB.ab.Rd
@@ -55,17 +55,20 @@ Lange, K. (2002)
 
 \seealso{
   \code{\link{AA.Aa.aa}},
-  \code{\link{AB.Ab.aB.ab2}},
   \code{\link{ABO}},
-  \code{\link{G1G2G3}},
+  \code{\link{A1A2A3}},
   \code{\link{MNSs}}.
 
 
+% \code{\link{AB.Ab.aB.ab2}},
+
+
+
 }
 
 \examples{
 ymat <- cbind(AB=1997, Ab=906, aB=904, ab=32)  # Data from Fisher (1925)
-fit <- vglm(ymat ~ 1, AB.Ab.aB.ab(link = "identitylink", init.p = 0.9), trace = TRUE)
+fit <- vglm(ymat ~ 1, AB.Ab.aB.ab(link = "identitylink"), trace = TRUE)
 fit <- vglm(ymat ~ 1, AB.Ab.aB.ab, trace = TRUE)
 rbind(ymat, sum(ymat)*fitted(fit))
 Coef(fit)  # Estimated p
diff --git a/man/AB.Ab.aB.ab2.Rd b/man/AB.Ab.aB.ab2.Rd
deleted file mode 100644
index a324f23..0000000
--- a/man/AB.Ab.aB.ab2.Rd
+++ /dev/null
@@ -1,76 +0,0 @@
-\name{AB.Ab.aB.ab2}
-\alias{AB.Ab.aB.ab2}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ The AB-Ab-aB-ab2 Blood Group System }
-\description{
-  Estimates the parameter of the 
-  the AB-Ab-aB-ab2 blood group system.
-
-}
-\usage{
-AB.Ab.aB.ab2(link = "logit", init.p = NULL)
-}
-%- maybe also 'usage' for other objects documented here.
-\arguments{
-  \item{link}{ 
-  Link function applied to \code{p}.
-  See \code{\link{Links}} for more choices.
-
-  }
-  \item{init.p}{ Optional initial value for \code{p}. }
-}
-\details{
-  This one parameter model involves a probability called \code{p}.
-
-
-}
-\value{
-  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
-  The object is used by modelling functions such as \code{\link{vglm}}
-  and \code{\link{vgam}}.
-
-
-}
-\references{
-
-  Elandt-Johnson, R. C. (1971)
-  \emph{Probability Models and Statistical Methods in Genetics},
-  New York: Wiley.
-
-
-}
-\author{ T. W. Yee }
-\note{
-  The input can be a 4-column matrix of counts.
-  Alternatively, the input can be a 4-column matrix of
-  proportions (so each row adds to 1) and the \code{weights}
-  argument is used to specify the total number of counts for each row.
-
-
-}
-
-\section{Warning}{
-  There may be a bug in the \code{deriv} and \code{weight} slot of the
-  family function.
-
-
-}
-\seealso{
-  \code{\link{AA.Aa.aa}},
-  \code{\link{AB.Ab.aB.ab}},
-  \code{\link{ABO}},
-  \code{\link{G1G2G3}},
-  \code{\link{MNSs}}.
-
-
-}
-
-\examples{
-ymat <- cbind(68, 11, 13, 21)  # See Elandt-Johnson, pp.430,427
-fit <- vglm(ymat ~ 1, AB.Ab.aB.ab2(link = cloglog), trace = TRUE, crit = "coef")
-Coef(fit)  # Estimated p
-rbind(ymat, sum(ymat) * fitted(fit))
-sqrt(diag(vcov(fit)))  # Estimated variance is approx 0.0021
-}
-\keyword{models}
-\keyword{regression}
diff --git a/man/ABO.Rd b/man/ABO.Rd
index c102aec..b0f24cc 100644
--- a/man/ABO.Rd
+++ b/man/ABO.Rd
@@ -68,11 +68,14 @@ ABO(link = "logit", ipA = NULL, ipO = NULL)
 \seealso{
   \code{\link{AA.Aa.aa}},
   \code{\link{AB.Ab.aB.ab}},
-  \code{\link{AB.Ab.aB.ab2}},
-  \code{\link{G1G2G3}},
+  \code{\link{A1A2A3}},
   \code{\link{MNSs}}.
 
 
+% \code{\link{AB.Ab.aB.ab2}},
+
+
+
 }
 \examples{
 ymat <- cbind(A = 725, B = 258, AB = 72, O = 1073)  # Order matters, not the name
diff --git a/man/AICvlm.Rd b/man/AICvlm.Rd
index 9889bab..1f04b05 100644
--- a/man/AICvlm.Rd
+++ b/man/AICvlm.Rd
@@ -4,7 +4,7 @@
 \alias{AICvgam}
 \alias{AICrrvglm}
 \alias{AICqrrvglm}
-\alias{AICcao}
+\alias{AICrrvgam}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Akaike's Information Criterion }
 \description{
@@ -17,7 +17,7 @@
    AICvgam(object, \dots, k = 2)
  AICrrvglm(object, \dots, k = 2)
 AICqrrvglm(object, \dots, k = 2)
-    AICcao(object, \dots, k = 2)
+ AICrrvgam(object, \dots, k = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
diff --git a/man/Coef.Rd b/man/Coef.Rd
index 0ced124..4b84a88 100644
--- a/man/Coef.Rd
+++ b/man/Coef.Rd
@@ -40,7 +40,7 @@ Coef(object, ...)
 
   For quadratic and additive ordination models, this function
   can return ecological meaningful quantities such as tolerances,
-  optima, maxima.
+  optimums, maximums.
 
 
 }
@@ -85,7 +85,7 @@ Reduced-rank vector generalized linear models.
 \examples{
 nn <- 1000
 bdata <- data.frame(y = rbeta(nn, shape1 = 1, shape2 = 3))  # Original scale
-fit <- vglm(y ~ 1, beta.ab, data = bdata, trace = TRUE)  # Intercept-only model
+fit <- vglm(y ~ 1, betaR, data = bdata, trace = TRUE)  # Intercept-only model
 coef(fit, matrix = TRUE)  # Both on a log scale
 Coef(fit)  # On the original scale
 }
diff --git a/man/Coef.qrrvglm-class.Rd b/man/Coef.qrrvglm-class.Rd
index c3037fd..424d73a 100644
--- a/man/Coef.qrrvglm-class.Rd
+++ b/man/Coef.qrrvglm-class.Rd
@@ -44,11 +44,11 @@ linear predictors and \eqn{n} is the number of observations.
           }
     \item{\code{Maximum}:}{Of class \code{"numeric"}, the 
           \eqn{M} maximum fitted values. That is, the fitted values 
-          at the optima for \code{noRRR = ~ 1} models.
+          at the optimums for \code{noRRR = ~ 1} models.
     If \code{noRRR} is not \code{~ 1} then these will be \code{NA}s. }
     \item{\code{NOS}:}{Number of species.}
     \item{\code{Optimum}:}{Of class \code{"matrix"}, the values
-          of the latent variables where the optima are. 
+          of the latent variables where the optimums are. 
           If the curves are not bell-shaped, then the value will
           be \code{NA} or \code{NaN}.}
     \item{\code{Optimum.order}:}{Of class \code{"matrix"}, the permutation
diff --git a/man/Coef.qrrvglm.Rd b/man/Coef.qrrvglm.Rd
index f954f15..adb1f29 100644
--- a/man/Coef.qrrvglm.Rd
+++ b/man/Coef.qrrvglm.Rd
@@ -9,7 +9,7 @@
 
 }
 \usage{
-Coef.qrrvglm(object, varI.latvar = FALSE, reference = NULL, ...)
+Coef.qrrvglm(object, varI.latvar = FALSE, refResponse = NULL, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -17,6 +17,8 @@ Coef.qrrvglm(object, varI.latvar = FALSE, reference = NULL, ...)
 % A CQO or UQO object.
   A CQO object.
   The former has class \code{"qrrvglm"}.
+
+
   }
 
 
@@ -27,20 +29,25 @@ Coef.qrrvglm(object, varI.latvar = FALSE, reference = NULL, ...)
   and this option stretches or shrinks the ordination axes if \code{TRUE}.
   See below for further details.
 
+
+
   }
-  \item{reference}{
+  \item{refResponse}{
     Integer or character.
-    Specifies the \emph{reference species}. By default, the reference
+    Specifies the \emph{reference response} or \emph{reference species}.
+    By default, the reference
     species is found by searching sequentially starting from the first
     species until a positive-definite tolerance matrix is found. Then
     this tolerance matrix is transformed to the identity matrix. Then
     the sites scores (latent variables) are made uncorrelated.
     See below for further details.
 
+
 %   If \code{eq.tolerances=FALSE}, then transformations occur so that
 %   the reference species has a tolerance matrix equal to the rank-\eqn{R}
 %   identity matrix.
 
+
   }
   \item{\dots}{ Currently unused. }
 }
@@ -58,11 +65,11 @@ Coef.qrrvglm(object, varI.latvar = FALSE, reference = NULL, ...)
   angle of its major axis and minor axis is zero, i.e., parallel to
   the ordination axes.  This means the effect on the latent vars is
   independent on that species, and that its tolerance matrix is diagonal.
-  The argument \code{reference} allows one to choose which is the reference
+  The argument \code{refResponse} allows one to choose which is the reference
   species, which must have a positive-definite tolerance matrix, i.e.,
-  is bell-shaped.  If \code{reference} is not specified, then the code will
+  is bell-shaped.  If \code{refResponse} is not specified, then the code will
   try to choose some reference species starting from the first species.
-  Although the \code{reference} argument could possibly be offered as
+  Although the \code{refResponse} argument could possibly be offered as
   an option when fitting the model, it is currently available after
   fitting the model, e.g., in the functions \code{\link{Coef.qrrvglm}} and
   \code{\link{lvplot.qrrvglm}}.
diff --git a/man/CommonVGAMffArguments.Rd b/man/CommonVGAMffArguments.Rd
index 2000bcc..2aca39f 100644
--- a/man/CommonVGAMffArguments.Rd
+++ b/man/CommonVGAMffArguments.Rd
@@ -19,8 +19,8 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
                           link.list = list("(Default)" = "identitylink",
                                            x2          = "loge",
                                            x3          = "logoff",
-                                           x4          = "mlogit",
-                                           x5          = "mlogit"),
+                                           x4          = "multilogit",
+                                           x5          = "multilogit"),
                           earg.list = list("(Default)" = list(),
                                            x2          = list(),
                                            x3          = list(offset = -1),
@@ -28,13 +28,13 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
                                            x5          = list()),
                           gsigma = exp(-5:5),
                           parallel = TRUE,
-                          shrinkage.init = 0.95,
+                          ishrinkage = 0.95,
                           nointercept = NULL, imethod = 1,
                           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
                           probs.x = c(0.15, 0.85),
                           probs.y = c(0.25, 0.50, 0.75),
                           mv = FALSE, earg.link = FALSE,
-                          whitespace = FALSE, bred = FALSE,
+                          whitespace = FALSE, bred = FALSE, lss = TRUE,
                           oim = FALSE, nsimEIM = 100, zero = NULL)
 }
 \arguments{
@@ -72,7 +72,7 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
   \code{earg.list = list("(Default)" = list(), x2 = list(), x3 = "list(offset = -1)")}.
   Then any unnamed terms will have the default link with its
   corresponding extra argument.
-  Note: the \code{\link{mlogit}} link is also possible, and if so,
+  Note: the \code{\link{multilogit}} link is also possible, and if so,
   at least two instances of it are necessary.
   Then the last term is the baseline/reference group.
 
@@ -226,6 +226,19 @@ except for \eqn{X_2}.
 
 
   }
+  \item{lss}{
+  Logical.
+  This stands for the ordering: location, scale and shape.
+  Should the ordering of the parameters be in this order?
+  Almost all \pkg{VGAM} family functions have this order by default,
+  but in order to match the arguments of existing R functions, one
+  might need to set \code{lss = FALSE}.
+  For example, the arguments of \code{\link{weibullR}} are
+  scale and shape, whereas \code{\link[stats]{rweibull}}
+  are shape and scale.
+
+
+  }
   \item{whitespace}{
   Logical.
   Should white spaces (\code{" "}) be used in the
@@ -291,7 +304,7 @@ except for \eqn{X_2}.
 
 
   }
-  \item{shrinkage.init}{
+  \item{ishrinkage}{
   Shrinkage factor \eqn{s} used for obtaining initial values.
   Numeric, between 0 and 1.
   In general, the formula used is something like
@@ -414,7 +427,7 @@ Bias reduction in exponential family nonlinear models.
   \code{\link{Links}},
   \code{\link{vglmff-class}},
   \code{\link{normal.vcm}},
-  \code{\link{mlogit}}.
+  \code{\link{multilogit}}.
 
 
 }
@@ -438,7 +451,7 @@ cumulative(link = "probit", reverse = TRUE, parallel = TRUE)
 wdata <- data.frame(x2 = runif(nn <- 1000))
 wdata <- transform(wdata,
          y = rweibull(nn, shape = 2 + exp(1 + x2), scale = exp(-0.5)))
-fit <- vglm(y ~ x2, weibull(lshape = logoff(offset = -2), zero = 2), data = wdata)
+fit <- vglm(y ~ x2, weibullR(lshape = logoff(offset = -2), zero = 2), data = wdata)
 coef(fit, mat = TRUE)
 
 # Example 3; multivariate (multiple) response
diff --git a/man/Links.Rd b/man/Links.Rd
index 1a2cdd9..8d4c991 100644
--- a/man/Links.Rd
+++ b/man/Links.Rd
@@ -275,10 +275,10 @@ fit3 <- vgam(agaaus ~ altitude, binomialff(link = "clog"), hunua)  # not okay
 
 # No matter what the link, the estimated var-cov matrix is the same
 y <- rbeta(n = 1000, shape1 = exp(0), shape2 = exp(1))
-fit1 <- vglm(y ~ 1, beta.ab(lshape1 = "identitylink", lshape2 = "identitylink"),
+fit1 <- vglm(y ~ 1, betaR(lshape1 = "identitylink", lshape2 = "identitylink"),
              trace = TRUE, crit = "coef")
-fit2 <- vglm(y ~ 1, beta.ab(lshape1 = logoff(offset = 1.1),
-                            lshape2 = logoff(offset = 1.1)),
+fit2 <- vglm(y ~ 1, betaR(lshape1 = logoff(offset = 1.1),
+                          lshape2 = logoff(offset = 1.1)),
             trace = TRUE, crit = "coef")
 vcov(fit1, untransform = TRUE)
 vcov(fit1, untransform = TRUE) - vcov(fit2, untransform = TRUE)  # Should be all 0s
diff --git a/man/MNSs.Rd b/man/MNSs.Rd
index f07c9cd..153e803 100644
--- a/man/MNSs.Rd
+++ b/man/MNSs.Rd
@@ -60,9 +60,11 @@ MNSs(link = "logit", imS = NULL, ims = NULL, inS = NULL)
 \seealso{
   \code{\link{AA.Aa.aa}},
   \code{\link{AB.Ab.aB.ab}},
-  \code{\link{AB.Ab.aB.ab2}},
   \code{\link{ABO}},
-  \code{\link{G1G2G3}}.
+  \code{\link{A1A2A3}}.
+
+
+% \code{\link{AB.Ab.aB.ab2}},
 
 
 }
diff --git a/man/Max.Rd b/man/Max.Rd
index 2041f36..89bc628 100644
--- a/man/Max.Rd
+++ b/man/Max.Rd
@@ -1,9 +1,9 @@
 \name{Max}
 \alias{Max}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Maxima }
+\title{ Maximums }
 \description{
-  Generic function for the \emph{maxima} (maximums) of a model.
+  Generic function for the \emph{maximums} (maxima) of a model.
 
 }
 \usage{
@@ -13,7 +13,7 @@ Max(object, ...)
 \arguments{
   \item{object}{ An object for which the computation or
     extraction of
-    a maximum (or maxima) is meaningful.
+    a maximum (or maximums) is meaningful.
 
   }
   \item{\dots}{ Other arguments fed into the specific
@@ -27,7 +27,7 @@ Max(object, ...)
   Many models have no such notion or definition.
 
 
-  Maxima occur in quadratic and additive ordination,
+  Maximums occur in quadratic and additive ordination,
   e.g., CQO or CAO.
   For these models the maximum is the fitted value at the
   optimum. For quadratic ordination models there is a formula
diff --git a/man/Opt.Rd b/man/Opt.Rd
index 792b2d9..cf2f9a2 100644
--- a/man/Opt.Rd
+++ b/man/Opt.Rd
@@ -1,9 +1,9 @@
 \name{Opt}
 \alias{Opt}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Maxima }
+\title{ Optimums }
 \description{
-  Generic function for the \emph{optima} (or optimums) of a model.
+  Generic function for the \emph{optimums} (or optima) of a model.
 }
 \usage{
 Opt(object, ...)
@@ -11,7 +11,7 @@ Opt(object, ...)
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{object}{ An object for which the computation or
-    extraction of an optimum (or optima) is meaningful.
+    extraction of an optimum (or optimums) is meaningful.
 
 
   }
@@ -27,7 +27,7 @@ Opt(object, ...)
   Many models have no such notion or definition.
 
 
-  Optima occur in quadratic and additive ordination,
+  Optimums occur in quadratic and additive ordination,
   e.g., CQO or CAO.
   For these models the optimum is the value of the latent
   variable where the maximum occurs, i.e., where the fitted value
@@ -94,7 +94,7 @@ hspider[,1:6] <- scale(hspider[,1:6])  # Standardized environmental vars
 
 \dontrun{
 index <- 1:ncol(depvar(p1))
-persp(p1, col = index, las = 1, lwd = 2, main = "Vertical lines at the optima")
+persp(p1, col = index, las = 1, lwd = 2, main = "Vertical lines at the optimums")
 abline(v = Opt(p1), lty = 2, col = index)
 }
 }
diff --git a/man/Pareto.Rd b/man/ParetoUC.Rd
similarity index 71%
rename from man/Pareto.Rd
rename to man/ParetoUC.Rd
index b950ff2..1b299ac 100644
--- a/man/Pareto.Rd
+++ b/man/ParetoUC.Rd
@@ -8,20 +8,20 @@
 \description{
   Density, distribution function, quantile function and random
   generation for the Pareto(I) distribution with parameters
-  \code{location} and \code{shape}.
+  \code{scale} and \code{shape}.
 
 }
 \usage{
-dpareto(x, location, shape, log = FALSE)
-ppareto(q, location, shape)
-qpareto(p, location, shape)
-rpareto(n, location, shape)
+dpareto(x, scale = 1, shape, log = FALSE)
+ppareto(q, scale = 1, shape)
+qpareto(p, scale = 1, shape)
+rpareto(n, scale = 1, shape)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
   \item{n}{number of observations. Must be a single positive integer. }
-  \item{location, shape}{the \eqn{\alpha}{alpha} and \eqn{k} parameters.}
+  \item{scale, shape}{the \eqn{\alpha}{alpha} and \eqn{k} parameters.}
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
@@ -67,18 +67,18 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 \examples{
 alpha <- 3; k <- exp(1); x <- seq(2.8, 8, len = 300)
 \dontrun{
-plot(x, dpareto(x, location = alpha, shape = k), type = "l",
+plot(x, dpareto(x, scale = alpha, shape = k), type = "l",
      main = "Pareto density split into 10 equal areas")
 abline(h = 0, col = "blue", lty = 2)
-qvec <- qpareto(seq(0.1,0.9,by = 0.1),location = alpha,shape = k)
-lines(qvec, dpareto(qvec, loc = alpha, shape = k),
+qvec <- qpareto(seq(0.1, 0.9, by = 0.1), scale = alpha, shape = k)
+lines(qvec, dpareto(qvec, scale = alpha, shape = k),
       col = "purple", lty = 3, type = "h")
 }
 pvec <- seq(0.1, 0.9, by = 0.1)
-qvec <- qpareto(pvec, location = alpha, shape = k)
-ppareto(qvec, location = alpha, shape = k)
-qpareto(ppareto(qvec, loc = alpha, shape = k),
-        loc = alpha, shape = k) - qvec  # Should be 0
+qvec <- qpareto(pvec, scale = alpha, shape = k)
+ppareto(qvec, scale = alpha, shape = k)
+qpareto(ppareto(qvec, scale = alpha, shape = k),
+        scale = alpha, shape = k) - qvec  # Should be 0
 }
 \keyword{distribution}
 
diff --git a/man/QvarUC.Rd b/man/QvarUC.Rd
index 6b692e6..9dba3e8 100644
--- a/man/QvarUC.Rd
+++ b/man/QvarUC.Rd
@@ -57,6 +57,7 @@ Qvar(object, factorname = NULL, which.linpred = 1,
   Character.
   Optional, for labelling the variance-covariance matrix.
 
+
 }
 \item{dispersion}{
   Numeric.
@@ -147,7 +148,13 @@ Qvar(object, factorname = NULL, which.linpred = 1,
   Quasi-variances.
   \emph{Biometrika} \bold{91}, 65--80.
 
-  
+
+Yee, T. W. and Hadi, A. F. (2014)
+Row-column interaction models, with an R implementation.
+\emph{Computational Statistics},
+\bold{29}, in press.
+
+
 }
 
 \author{
@@ -222,9 +229,9 @@ qvar(fit1, se = TRUE)   # Easy method to get the quasi-standard errors
 (quasiSE  <- sqrt(quasiVar))
 
 # Another form of input
-fit2 <- rcim(Qvar(Shipmodel, coef.ind = c(0,2:5), reference.name = "typeA"),
+fit2 <- rcim(Qvar(Shipmodel, coef.ind = c(0, 2:5), reference.name = "typeA"),
              uninormal("explink"), maxit = 99)
-\dontrun{ plotqvar(fit2, col = "green", lwd = 3, scol = "blue", slwd = 2, las = 1) }
+\dontrun{ qvplot(fit2, col = "green", lwd = 3, scol = "blue", slwd = 2, las = 1) }
 
 # The variance-covariance matrix is another form of input (not recommended)
 fit3 <- rcim(Qvar(cbind(0, rbind(0, vcov(Shipmodel)[2:5, 2:5])),
@@ -234,36 +241,32 @@ fit3 <- rcim(Qvar(cbind(0, rbind(0, vcov(Shipmodel)[2:5, 2:5])),
 (QuasiVar <- exp(diag(fitted(fit3))) / 2)                 # Version 1
 (QuasiVar <- diag(predict(fit3)[, c(TRUE, FALSE)]) / 2)   # Version 2
 (QuasiSE  <- sqrt(quasiVar))
-\dontrun{ plotqvar(fit3) }
+\dontrun{ qvplot(fit3) }
 
 
 # Example 2: a model with M > 1 linear predictors
 \dontrun{ require("VGAMdata")
 xs.nz.f <- subset(xs.nz, sex == "F")
-xs.nz.f <- subset(xs.nz.f, !is.na(babies)  & !is.na(age) & !is.na(ethnic))
-xs.nz.f$babies <- as.numeric(as.character(xs.nz.f$babies))
-xs.nz.f <- subset(xs.nz.f, babies >=  0)
-xs.nz.f <- subset(xs.nz.f, as.numeric(as.character(ethnic)) <=  2)
+xs.nz.f <- subset(xs.nz.f, !is.na(babies)  & !is.na(age) & !is.na(ethnicity))
+xs.nz.f <- subset(xs.nz.f, ethnicity != "Other")
 
 clist <- list("sm.bs(age, df = 4)" = rbind(1, 0),
               "sm.bs(age, df = 3)" = rbind(0, 1),
-              "ethnic" = diag(2),
-              "(Intercept)" = diag(2))
-fit1 <- vglm(babies ~ sm.bs(age, df = 4) + sm.bs(age, df = 3) + ethnic,
+              "ethnicity"          = diag(2),
+              "(Intercept)"        = diag(2))
+fit1 <- vglm(babies ~ sm.bs(age, df = 4) + sm.bs(age, df = 3) + ethnicity,
             zipoissonff(zero = NULL), xs.nz.f,
             constraints = clist, trace = TRUE)
-Fit1 <- rcim(Qvar(fit1, "ethnic", which.linpred = 1),
+Fit1 <- rcim(Qvar(fit1, "ethnicity", which.linpred = 1),
              uninormal("explink", imethod = 1), maxit = 99, trace = TRUE)
-Fit2 <- rcim(Qvar(fit1, "ethnic", which.linpred = 2),
+Fit2 <- rcim(Qvar(fit1, "ethnicity", which.linpred = 2),
              uninormal("explink", imethod = 1), maxit = 99, trace = TRUE)
 }
 \dontrun{ par(mfrow = c(1, 2))
-plotqvar(Fit1, scol = "blue", pch = 16,
-         main = expression(eta[1]),
-         slwd = 1.5, las = 1, length.arrows = 0.07)
-plotqvar(Fit2, scol = "blue", pch = 16,
-         main = expression(eta[2]),
-         slwd = 1.5, las = 1, length.arrows = 0.07)
+qvplot(Fit1, scol = "blue", pch = 16, main = expression(eta[1]),
+       slwd = 1.5, las = 1, length.arrows = 0.07)
+qvplot(Fit2, scol = "blue", pch = 16, main = expression(eta[2]),
+       slwd = 1.5, las = 1, length.arrows = 0.07)
 }
 }
 % Add one or more standard keywords, see file 'KEYWORDS' in the
diff --git a/man/SUR.Rd b/man/SURff.Rd
similarity index 93%
rename from man/SUR.Rd
rename to man/SURff.Rd
index 5344656..99a9ead 100644
--- a/man/SUR.Rd
+++ b/man/SURff.Rd
@@ -1,5 +1,5 @@
-\name{SUR}
-\alias{SUR}
+\name{SURff}
+\alias{SURff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Seemingly Unrelated Regressions
 %%  ~~function to do ... ~~
@@ -9,9 +9,9 @@ Fits a system of seemingly unrelated regressions.
 %%  ~~ A concise (1-5 lines) description of what the function does. ~~
 }
 \usage{
-SUR(mle.normal = FALSE,
-    divisor = c("n", "n-max(pj,pk)", "sqrt((n-pj)*(n-pk))"),
-    parallel = FALSE, Varcov = NULL, matrix.arg = FALSE)
+SURff(mle.normal = FALSE,
+      divisor = c("n", "n-max(pj,pk)", "sqrt((n-pj)*(n-pk))"),
+      parallel = FALSE, Varcov = NULL, matrix.arg = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 %apply.parint = TRUE,
@@ -171,7 +171,7 @@ clist <- list("(Intercept)" = diag(2),
               "value.w"     = rbind(0, 1))
 zef1 <- vglm(cbind(invest.g, invest.w) ~
              capital.g + value.g + capital.w + value.w,
-             SUR(divisor = "sqrt"), maxit = 1,
+             SURff(divisor = "sqrt"), maxit = 1,
              data = gew, trace = TRUE, constraints = clist)
 
 round(coef(zef1, matrix = TRUE), digits = 4)  # ZEF
@@ -179,10 +179,13 @@ zef1 at extra$ncols.X.lm
 zef1 at misc$divisor
 zef1 at misc$values.divisor
 round(sqrt(diag(vcov(zef1))),    digits = 4)  # SEs
+nobs(zef1, type = "lm")
+df.residual(zef1, type = "lm")
+
 
 mle1 <- vglm(cbind(invest.g, invest.w) ~
              capital.g + value.g + capital.w + value.w,
-             SUR(mle.normal = TRUE, divisor = "n-max"),
+             SURff(mle.normal = TRUE, divisor = "n-max"),
              epsilon = 1e-11,
              data = gew, trace = TRUE, constraints = clist)
 round(coef(mle1, matrix = TRUE), digits = 4)  # MLE
diff --git a/man/Select.Rd b/man/Select.Rd
index 45fccf7..e8054c8 100644
--- a/man/Select.Rd
+++ b/man/Select.Rd
@@ -1,6 +1,7 @@
 \name{Select}
 \alias{Select}
-\alias{subsetc}
+\alias{subsetcol}
+% \alias{subsetc}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{
   Select variables for a formula response or the RHS of a formula
@@ -20,10 +21,6 @@ Select(data = list(), prefix = "y",
        lhs = NULL, rhs = NULL, rhs2 = NULL, rhs3 = NULL,
        as.character = FALSE, as.formula.arg = FALSE, tilde = TRUE,
        exclude = NULL, sort.arg = TRUE)
-subsetc(data = list(), prefix = "y",
-        lhs = NULL, rhs = NULL, rhs2 = NULL, rhs3 = NULL,
-        as.character = FALSE, as.formula.arg = FALSE, tilde = TRUE,
-        exclude = NULL, sort.arg = TRUE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -182,7 +179,7 @@ subsetc(data = list(), prefix = "y",
 
 
 
-  Currently \code{subsetc()} and \code{Select()} are identical.
+  Currently \code{subsetcol()} and \code{Select()} are identical.
   One of these functions might be withdrawn in the future.
 
 
@@ -240,7 +237,7 @@ fit.th <-
         form2 = ~ x2 + x3.tij + t01 + t02 + t03 + t04 + t05 + t06 + t07 + t08 +
                                 t09 + t10)
 # Short way to do it:
-Fit.th <- vglm(Select(tab1, "y", sort = FALSE) ~ x2 + x3.tij,
+Fit.th <- vglm(Select(tab1, "y") ~ x2 + x3.tij,
                xij = list(Select(tab1, "t", as.formula = TRUE,
                                  sort = FALSE, lhs = "x3.tij", rhs = "0")),
                posbernoulli.t(parallel.t = TRUE ~ x2 + x3.tij),
@@ -259,4 +256,5 @@ Fit.th <- vglm(Select(tab1, "y", sort = FALSE) ~ x2 + x3.tij,
 %                             rhs = "x2 + x3.tij"))
 
 
+% dim(subset(prinia, TRUE, select = grepl("^y", colnames(prinia))))
 
diff --git a/man/SurvS4.Rd b/man/SurvS4.Rd
index 981603e..10e222b 100644
--- a/man/SurvS4.Rd
+++ b/man/SurvS4.Rd
@@ -135,7 +135,7 @@ coding in this case.
   \code{(start, end]} and not
   \code{[start, end]} as previously.
   (This makes a difference for discrete data, such as for
-  \code{\link{cenpoisson}}).
+  \code{\link{cens.poisson}}).
   All \pkg{VGAM} family functions beginning with \code{"cen"} require
   the packaging function \code{Surv} to format the input.
 
@@ -159,7 +159,7 @@ coding in this case.
 
 \seealso{
   \code{\link{SurvS4-class}},
-  \code{\link{cenpoisson}},
+  \code{\link{cens.poisson}},
   \code{\link[survival]{survreg}},
   \code{\link{leukemia}}.
 
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
index a91b4e3..301b2b5 100644
--- a/man/VGAM-package.Rd
+++ b/man/VGAM-package.Rd
@@ -111,7 +111,7 @@ Maintainer: Thomas Yee \email{t.yee at auckland.ac.nz}.
 \references{
 
 
-Yee, T. W.
+Yee, T. W. (2014)
 Vector Generalized Linear and Additive Models.
 \emph{Monograph in preparation}.
 
diff --git a/man/alaplace3.Rd b/man/alaplace3.Rd
index 598d0bc..862c94b 100644
--- a/man/alaplace3.Rd
+++ b/man/alaplace3.Rd
@@ -7,22 +7,24 @@
 \description{
    Maximum likelihood estimation of
    the 1, 2 and 3-parameter asymmetric Laplace distributions (ALDs).
-   The 1-parameter ALD may be used for quantile regression.
+   The 2-parameter ALD may,
+   with trepidation and lots of skill,
+   sometimes be used as an approximation of quantile regression.
 
 
 }
 \usage{
 alaplace1(tau = NULL, llocation = "identitylink",
           ilocation = NULL, kappa = sqrt(tau/(1 - tau)), Scale.arg = 1,
-          shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
-          dfmu.init = 3, intparloc = FALSE, imethod = 1)
+          ishrinkage = 0.95, parallel.locat = TRUE  ~ 0, digt = 4,
+          idf.mu = 3, zero = NULL, imethod = 1)
 
 alaplace2(tau = NULL,  llocation = "identitylink", lscale = "loge",
           ilocation = NULL, iscale = NULL, kappa = sqrt(tau/(1 - tau)),
-          shrinkage.init = 0.95,
-          parallelLocation = FALSE, digt = 4, eq.scale = TRUE,
-          dfmu.init = 3, intparloc = FALSE, 
-          imethod = 1, zero = -2)
+          ishrinkage = 0.95,
+          parallel.locat =  TRUE ~ 0,
+          parallel.scale = FALSE ~ 0,
+          digt = 4, idf.mu = 3, imethod = 1, zero = -2)
 
 alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
           ilocation = NULL, iscale = NULL, ikappa = 1,
@@ -61,30 +63,41 @@ alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
 
 
   }
-  \item{parallelLocation, intparloc}{ Logical.
-    Should the quantiles be parallel on the transformed scale
-    (argument \code{llocation})?
-    Assigning this argument to \code{TRUE} circumvents the
-    seriously embarrassing quantile crossing problem.
-    The argument \code{intparloc} applies to intercept term;
-    the argument \code{parallelLocation} applies to other terms.
+  \item{parallel.locat, parallel.scale}{
+  See the \code{parallel} argument of \code{\link{CommonVGAMffArguments}}.
+  These arguments apply to the location and scale parameters.
+  It generally only makes sense for the scale parameters
+  to be equal, hence set \code{parallel.scale = TRUE}.
+  Note that
+  assigning \code{parallel.locat} the value \code{TRUE} circumvents the
+  seriously embarrassing quantile crossing problem because all
+  constraint matrices except for the intercept correspond to a
+  parallelism assumption.
+
 
   }
-  \item{eq.scale}{ Logical.
-    Should the scale parameters be equal? It is advised
-    to keep \code{eq.scale = TRUE} unchanged because it
-    does not make sense to have different values for each
-    \code{tau} value.
 
 
-  }
+% \item{intparloc}{ Logical.
+%   Defunct.
+% }
+
+
+% \item{eq.scale}{ Logical.
+%   Should the scale parameters be equal? It is advised
+%   to keep \code{eq.scale = TRUE} unchanged because it
+%   does not make sense to have different values for each
+%   \code{tau} value.
+% }
+
+
   \item{imethod}{
   Initialization method.
   Either the value 1, 2, 3 or 4.
 
 
   }
-  \item{dfmu.init}{
+  \item{idf.mu}{
   Degrees of freedom for the cubic smoothing spline fit applied to
   get an initial estimate of the location parameter.
   See \code{\link{vsmooth.spline}}.
@@ -92,7 +105,7 @@ alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
 
 
   }
-  \item{shrinkage.init}{
+  \item{ishrinkage}{
   How much shrinkage is used when initializing \eqn{\xi}{xi}.
   The value must be between 0 and 1 inclusive, and
   a value of 0 means the individual response values are used,
@@ -108,7 +121,7 @@ alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
   different \eqn{\tau}{tau} values from an existing fitted
   \code{alaplace2()} model (practical only if it has a
   single value).
-  If the model has \code{parallelLocation = TRUE} then
+  If the model has \code{parallel.locat = TRUE} then
   only the intercept need be estimated; use an offset.
   See below for an example.
 
@@ -180,16 +193,17 @@ alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
    \eqn{\tau = \kappa^2 / (1 + \kappa^2)}{tau = kappa^2 / (1 + kappa^2)}
    so that
    \eqn{\kappa =  \sqrt{\tau / (1-\tau)}}{kappa = sqrt(tau / (1-tau))}.
-   Thus \code{alaplace1()} might be used as an alternative to \code{rq}
+   Thus \code{alaplace2()} might be used as an alternative to \code{rq}
    in the \pkg{quantreg} package.
 
 
    Both \code{alaplace1()} and \code{alaplace2()} can handle
    multiple responses, and the number of linear/additive
    predictors is dictated by the length of \code{tau} or
-   \code{kappa}.  The function \code{alaplace2()} can also
-   handle a matrix response with a single-valued \code{tau}
-   or \code{kappa}.
+   \code{kappa}.  The functions \code{alaplace1()}
+   and \code{alaplace2()} can also
+   handle multiple responses (i.e., a matrix response)
+   but only with a \emph{single-valued} \code{tau} or \code{kappa}.
 
 
 }
@@ -219,19 +233,30 @@ alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
   Boston: Birkhauser.
 
 
-  Yee, T. W. (2012)
-  Quantile regression for counts and proportions.
-  In preparation.
+Yee, T. W. (2014)
+Vector Generalized Linear and Additive Models.
+\emph{Monograph in preparation}.
+
+
+
+
+%  Yee, T. W. (2014)
+%  Quantile regression for counts and proportions.
+%  In preparation.
 
 
 }
 \author{ Thomas W. Yee }
 \section{Warning}{
+  These functions are experimental and especially subject to
+  change or withdrawal.
   The MLE regularity conditions do not hold for this distribution
   so that misleading inferences may result,
   e.g., in the \code{summary} and \code{vcov} of the object.
 
 
+
+
   Care is needed with \code{tau} values which are too small, e.g.,
   for count data with \code{llocation = "loge"} and if the sample
   proportion of zeros is greater than \code{tau}.
@@ -253,15 +278,18 @@ alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
   is the best model and then use \code{maxit} to choose
   that model)
   due to the regularity conditions not holding.
+  Often the iterations slowly crawl towards the solution so
+  monitoring the convergence (set \code{trace = TRUE}) is highly
+  recommended.
 
 
   For large data sets it is a very good idea to keep the length of
   \code{tau}/\code{kappa} low to avoid large memory requirements.
   Then
-  for \code{parallelLoc = FALSE} one can repeatedly fit a model with
+  for \code{parallel.locat = FALSE} one can repeatedly fit a model with
   \code{alaplace1()} with one \eqn{\tau}{tau} at a time;
   and
-  for \code{parallelLoc = TRUE} one can refit a model with
+  for \code{parallel.locat = TRUE} one can refit a model with
   \code{alaplace1()} with one \eqn{\tau}{tau} at a time but
   using offsets and an intercept-only model.
 
@@ -269,7 +297,7 @@ alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
   A second method for solving the noncrossing quantile problem is
   illustrated below in Example 3.
   This is called the \emph{accumulative quantile method} (AQM)
-  and details are in Yee (2012).
+  and details are in Yee (2014).
   It does not make the strong parallelism assumption.
 
 
@@ -282,64 +310,66 @@ alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
 \seealso{
   \code{\link{ralap}},
   \code{\link{laplace}},
+  \code{\link{CommonVGAMffArguments}},
   \code{\link{lms.bcn}},
   \code{\link{amlnormal}},
-  \code{\link{koenker}},
+  \code{\link{sc.studentt2}},
   \code{\link{simulate.vlm}}.
 
 
 }
 
+
+% set.seed(1)
+
+
 \examples{
 \dontrun{
 # Example 1: quantile regression with smoothing splines
-adata <- data.frame(x = sort(runif(n <- 500)))
+set.seed(123); adata <- data.frame(x2 = sort(runif(n <- 500)))
 mymu <- function(x) exp(-2 + 6*sin(2*x-0.2) / (x+0.5)^2)
-adata <- transform(adata, y = rpois(n, lambda = mymu(x)))
+adata <- transform(adata, y = rpois(n, lambda = mymu(x2)))
 mytau <- c(0.25, 0.75); mydof <- 4
 
-fit <- vgam(y ~ s(x, df = mydof),
-            alaplace1(tau = mytau, llocation = "loge",
-                      parallelLoc = FALSE), data = adata, trace = TRUE)
-fitp <- vgam(y ~ s(x, df = mydof), data = adata, trace = TRUE,
-             alaplace1(tau = mytau, llocation = "loge", parallelLoc = TRUE))
+fit <- vgam(y ~ s(x2, df = mydof), data = adata, trace = TRUE, maxit = 900,
+            alaplace2(tau = mytau, llocat = "loge",
+                      parallel.locat = FALSE))
+fitp <- vgam(y ~ s(x2, df = mydof), data = adata, trace = TRUE, maxit = 900,
+             alaplace2(tau = mytau, llocat = "loge", parallel.locat = TRUE))
  
-par(las = 1); mylwd = 1.5
-with(adata, plot(x, jitter(y, factor = 0.5), col = "orange",
-                 main = "Example 1; green: parallelLoc = TRUE",
+par(las = 1); mylwd <- 1.5
+with(adata, plot(x2, jitter(y, factor = 0.5), col = "orange",
+                 main = "Example 1; green: parallel.locat = TRUE",
                  ylab = "y", pch = "o", cex = 0.75))
-with(adata, matlines(x, fitted(fit ), col = "blue",
+with(adata, matlines(x2, fitted(fit ), col = "blue",
                      lty = "solid", lwd = mylwd))
-with(adata, matlines(x, fitted(fitp), col = "green",
+with(adata, matlines(x2, fitted(fitp), col = "green",
                      lty = "solid", lwd = mylwd))
 finexgrid <- seq(0, 1, len = 1001)
 for (ii in 1:length(mytau))
-    lines(finexgrid, qpois(p = mytau[ii], lambda = mymu(finexgrid)),
-          col = "blue", lwd = mylwd)
+  lines(finexgrid, qpois(p = mytau[ii], lambda = mymu(finexgrid)),
+        col = "blue", lwd = mylwd)
 fit at extra  # Contains useful information
 
 
 # Example 2: regression quantile at a new tau value from an existing fit
 # Nb. regression splines are used here since it is easier.
-fitp2 <- vglm(y ~ sm.bs(x, df = mydof),
-              family = alaplace1(tau = mytau, llocation = "loge",
-                                 parallelLoc = TRUE),
-              data = adata, trace = TRUE)
+fitp2 <- vglm(y ~ sm.bs(x2, df = mydof), data = adata, trace = TRUE,
+              alaplace1(tau = mytau, llocation = "loge",
+                        parallel.locat = TRUE))
 
 newtau <- 0.5  # Want to refit the model with this tau value
-fitp3 <- vglm(y ~ 1 + offset(predict(fitp2)[,1]),
-              family = alaplace1(tau = newtau, llocation = "loge"),
-              data = adata)
-with(adata, plot(x, jitter(y, factor = 0.5), col = "orange",
+fitp3 <- vglm(y ~ 1 + offset(predict(fitp2)[, 1]),
+              alaplace1(tau = newtau, llocation = "loge"), data = adata)
+with(adata, plot(x2, jitter(y, factor = 0.5), col = "orange",
                pch = "o", cex = 0.75, ylab = "y",
-               main = "Example 2; parallelLoc = TRUE"))
-with(adata, matlines(x, fitted(fitp2), col = "blue", 
+               main = "Example 2; parallel.locat = TRUE"))
+with(adata, matlines(x2, fitted(fitp2), col = "blue", 
                      lty = 1, lwd = mylwd))
-with(adata, matlines(x, fitted(fitp3), col = "black",
+with(adata, matlines(x2, fitted(fitp3), col = "black",
                      lty = 1, lwd = mylwd))
 
 
-
 # Example 3: noncrossing regression quantiles using a trick: obtain
 # successive solutions which are added to previous solutions; use a log
 # link to ensure an increasing quantiles at any value of x.
@@ -349,28 +379,28 @@ answer <- matrix(0, nrow(adata), length(mytau))  # Stores the quantiles
 adata <- transform(adata, offsety = y*0)
 usetau <- mytau
 for (ii in 1:length(mytau)) {
-#   cat("\n\nii  = ", ii, "\n")
+# cat("\n\nii  = ", ii, "\n")
   adata <- transform(adata, usey = y-offsety)
   iloc <- ifelse(ii == 1, with(adata, median(y)), 1.0)  # Well-chosen!
   mydf <- ifelse(ii == 1, 5, 3)  # Maybe less smoothing will help
-  lloc <- ifelse(ii == 1, "identitylink", "loge")  # 2nd value must be "loge"
-  fit3 <- vglm(usey ~ sm.ns(x, df = mydf), data = adata, trace = TRUE,
-               alaplace1(tau = usetau[ii], lloc = lloc, iloc = iloc))
-  answer[,ii] <- (if(ii == 1) 0 else answer[,ii-1]) + fitted(fit3)
-  adata <- transform(adata, offsety = answer[,ii])
+# lloc <- ifelse(ii == 1, "loge", "loge")  # 2nd value must be "loge"
+  fit3 <- vglm(usey ~ sm.ns(x2, df = mydf), data = adata, trace = TRUE,
+               alaplace2(tau = usetau[ii], lloc = "loge", iloc = iloc))
+  answer[, ii] <- (if(ii == 1) 0 else answer[, ii-1]) + fitted(fit3)
+  adata <- transform(adata, offsety = answer[, ii])
 }
 
 # Plot the results.
-with(adata, plot(x, y, col = "blue",
+with(adata, plot(x2, y, col = "blue",
      main = paste("Noncrossing and nonparallel; tau  = ",
                 paste(mytau, collapse = ", "))))
-with(adata, matlines(x, answer, col = "orange", lty = 1))
+with(adata, matlines(x2, answer, col = "orange", lty = 1))
 
 # Zoom in near the origin.
-with(adata, plot(x, y, col = "blue", xlim = c(0, 0.2), ylim = 0:1,
+with(adata, plot(x2, y, col = "blue", xlim = c(0, 0.2), ylim = 0:1,
      main = paste("Noncrossing and nonparallel; tau  = ",
                 paste(mytau, collapse = ", "))))
-with(adata, matlines(x, answer, col = "orange", lty = 1))
+with(adata, matlines(x2, answer, col = "orange", lty = 1))
 }
 }
 \keyword{models}
diff --git a/man/amlnormal.Rd b/man/amlnormal.Rd
index 1cee24e..9bca196 100644
--- a/man/amlnormal.Rd
+++ b/man/amlnormal.Rd
@@ -144,9 +144,9 @@ amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identitylink",
 \dontrun{
 # Example 1
 ooo <- with(bmi.nz, order(age))
-bmi.nz <- bmi.nz[ooo,]  # Sort by age
-(fit <- vglm(BMI ~ sm.bs(age), fam = amlnormal(w.aml = 0.1), bmi.nz))
-fit at extra # Gives the w value and the percentile
+bmi.nz <- bmi.nz[ooo, ]  # Sort by age
+(fit <- vglm(BMI ~ sm.bs(age), amlnormal(w.aml = 0.1), data = bmi.nz))
+fit at extra  # Gives the w value and the percentile
 coef(fit, matrix = TRUE)
 
 # Quantile plot
@@ -157,8 +157,8 @@ with(bmi.nz, lines(age, c(fitted(fit)), col = "black"))
 
 # Example 2
 # Find the w values that give the 25, 50 and 75 percentiles
-findw <- function(w, percentile = 50) {
-  fit2 <- vglm(BMI ~ sm.bs(age), fam = amlnormal(w = w), data = bmi.nz)
+find.w <- function(w, percentile = 50) {
+  fit2 <- vglm(BMI ~ sm.bs(age), amlnormal(w = w), data = bmi.nz)
   fit2 at extra$percentile - percentile
 }
 # Quantile plot
@@ -166,18 +166,18 @@ with(bmi.nz, plot(age, BMI, col = "blue", las = 1, main =
      "25, 50 and 75 expectile-percentile curves"))
 for (myp in c(25, 50, 75)) {
 # Note: uniroot() can only find one root at a time
-  bestw <- uniroot(f = findw, interval = c(1/10^4, 10^4), percentile = myp)
-  fit2 <- vglm(BMI ~ sm.bs(age), fam = amlnormal(w = bestw$root), data = bmi.nz)
-  with(bmi.nz, lines(age, c(fitted(fit2)), col = "red"))
+  bestw <- uniroot(f = find.w, interval = c(1/10^4, 10^4), percentile = myp)
+  fit2 <- vglm(BMI ~ sm.bs(age), amlnormal(w = bestw$root), data = bmi.nz)
+  with(bmi.nz, lines(age, c(fitted(fit2)), col = "orange"))
 }
 
 # Example 3; this is Example 1 but with smoothing splines and
 # a vector w and a parallelism assumption.
 ooo <- with(bmi.nz, order(age))
-bmi.nz <- bmi.nz[ooo,]  # Sort by age
-fit3 <- vgam(BMI ~ s(age, df = 4), bmi.nz, trace = TRUE,
-             fam = amlnormal(w = c(0.1, 1, 10), parallel = TRUE))
-fit3 at extra # The w values, percentiles and weighted deviances
+bmi.nz <- bmi.nz[ooo, ]  # Sort by age
+fit3 <- vgam(BMI ~ s(age, df = 4), data = bmi.nz, trace = TRUE,
+             amlnormal(w = c(0.1, 1, 10), parallel = TRUE))
+fit3 at extra  # The w values, percentiles and weighted deviances
 
 # The linear components of the fit; not for human consumption:
 coef(fit3, matrix = TRUE)
diff --git a/man/benini.Rd b/man/benini.Rd
index 1d50fc5..ee4893f 100644
--- a/man/benini.Rd
+++ b/man/benini.Rd
@@ -1,5 +1,5 @@
-\name{benini}
-\alias{benini}
+\name{benini1}
+\alias{benini1}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{Benini Distribution Family Function }
 \description{
@@ -8,8 +8,8 @@
 
 }
 \usage{
-benini(y0 = stop("argument 'y0' must be specified"), lshape = "loge",
-       ishape = NULL, imethod = 1, zero = NULL)
+benini1(y0 = stop("argument 'y0' must be specified"), lshape = "loge",
+        ishape = NULL, imethod = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -37,20 +37,21 @@ benini(y0 = stop("argument 'y0' must be specified"), lshape = "loge",
 \details{
   The Benini distribution
   has a probability density function that can be written
-  \deqn{f(y) = 2 b \exp(-b[(\log(y/y_0))^2]) \log(y/y_0) / y }{%
-        f(y) = 2*b*exp(-b * [(log(y/y0))^2]) * log(y/y0) / y}
-  for \eqn{0 < y_0 < y}{0 < y0 < y}, and \eqn{b > 0}.
+  \deqn{f(y) = 2 s \exp(-s[(\log(y/y_0))^2]) \log(y/y_0) / y }{%
+        f(y) = 2*s*exp(-s * [(log(y/y0))^2]) * log(y/y0) / y}
+  for \eqn{0 < y_0 < y}{0 < y0 < y}, and shape \eqn{s > 0}.
   The cumulative distribution function for \eqn{Y} is
-  \deqn{F(y) = 1 - \exp(-b[(\log(y/y_0))^2]).}{%
-        F(y) = 1 - exp(-b * [(log(y / y0))^2]). }
+  \deqn{F(y) = 1 - \exp(-s[(\log(y/y_0))^2]).}{%
+        F(y) = 1 - exp(-s * [(log(y / y0))^2]). }
   Here, Newton-Raphson and Fisher scoring coincide.
-  The median of \eqn{Y} is now returned as the fitted values.
+  The median of \eqn{Y} is now returned as the fitted values, by default.
   This \pkg{VGAM} family function can handle a multiple
   responses, which is inputted as a matrix.
 
 
-  On fitting, the \code{extra}  slot has a component called \code{y0} which 
-  contains the value of the \code{y0} argument.
+  On fitting, the \code{extra} slot has a component called
+  \code{y0} which contains the value of the \code{y0}
+  argument.
 
 
 }
@@ -76,12 +77,15 @@ Kleiber, C. and Kotz, S. (2003)
 Hoboken, NJ, USA: Wiley-Interscience.
 
 
+% Section 7.1, pp.235--8
+
+
 }
 \author{ T. W. Yee }
 \note{
-  Yet to do: the 2-parameter Benini distribution estimates \eqn{y_0}{y0}
-  as well, and the 3-parameter Benini distribution estimates another
+  Yet to do: the 2-parameter Benini distribution estimates another
   shape parameter \eqn{a}{a} too.
+  Hence, the code may change in the future.
 
 
 }
@@ -93,7 +97,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 \examples{
 y0 <- 1; nn <- 3000
 bdata <- data.frame(y  = rbenini(nn, y0 = y0, shape = exp(2)))
-fit <- vglm(y ~ 1, benini(y0 = y0), data = bdata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, benini1(y0 = y0), data = bdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 fit at extra$y0
diff --git a/man/beniniUC.Rd b/man/beniniUC.Rd
index 93e2f45..7fe8280 100644
--- a/man/beniniUC.Rd
+++ b/man/beniniUC.Rd
@@ -13,10 +13,10 @@
 
 }
 \usage{
-dbenini(x, shape, y0, log = FALSE)
-pbenini(q, shape, y0)
-qbenini(p, shape, y0)
-rbenini(n, shape, y0)
+dbenini(x, y0, shape, log = FALSE)
+pbenini(q, y0, shape)
+qbenini(p, y0, shape)
+rbenini(n, y0, shape)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -26,11 +26,11 @@ rbenini(n, shape, y0)
 
 
   }
-  \item{shape}{the shape parameter \eqn{b}.
+  \item{y0}{the scale parameter \eqn{y_0}{y0}.
 
 
   }
-  \item{y0}{the scale parameter \eqn{y_0}{y0}.
+  \item{shape}{the shape parameter \eqn{b}.
 
 
   }
@@ -61,8 +61,8 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{benini}}, the \pkg{VGAM} family function
-  for estimating the parameter \eqn{b} by maximum likelihood estimation,
+  See \code{\link{benini1}}, the \pkg{VGAM} family function
+  for estimating the parameter \eqn{s} by maximum likelihood estimation,
   for the formula of the probability density function and other details.
 
 
@@ -71,7 +71,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 %  
 %}
 \seealso{
-  \code{\link{benini}}.
+  \code{\link{benini1}}.
 
 
 }
@@ -79,7 +79,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 \dontrun{
 y0 <- 1; shape <- exp(1)
 xx <- seq(0.0, 4, len = 101)
-plot(xx, dbenini(xx, y0 = y0,shape = shape), type = "l", col = "blue",
+plot(xx, dbenini(xx, y0 = y0, shape = shape), type = "l", col = "blue",
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles", ylim = 0:1,
      las = 1, ylab = "", xlab = "x")
diff --git a/man/betaII.Rd b/man/betaII.Rd
index 57ed59a..f59b198 100644
--- a/man/betaII.Rd
+++ b/man/betaII.Rd
@@ -84,10 +84,10 @@ Hoboken, NJ, USA: Wiley-Interscience.
     \code{\link{dagum}},
     \code{\link{sinmad}},
     \code{\link{fisk}},
-    \code{\link{invlomax}},
+    \code{\link{inv.lomax}},
     \code{\link{lomax}},
     \code{\link{paralogistic}},
-    \code{\link{invparalogistic}}.
+    \code{\link{inv.paralogistic}}.
 
 
 }
diff --git a/man/beta.ab.Rd b/man/betaR.Rd
similarity index 91%
rename from man/beta.ab.Rd
rename to man/betaR.Rd
index 8272443..47ce185 100644
--- a/man/beta.ab.Rd
+++ b/man/betaR.Rd
@@ -1,5 +1,5 @@
-\name{beta.ab}
-\alias{beta.ab}
+\name{betaR}
+\alias{betaR}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ The Two-parameter Beta Distribution Family Function }
 \description{
@@ -8,9 +8,9 @@
 
 }
 \usage{
-beta.ab(lshape1 = "loge", lshape2 = "loge",
-        i1 = NULL, i2 = NULL, trim = 0.05,
-        A = 0, B = 1, parallel = FALSE, zero = NULL)
+betaR(lshape1 = "loge", lshape2 = "loge",
+         i1 = NULL, i2 = NULL, trim = 0.05,
+         A = 0, B = 1, parallel = FALSE, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -121,7 +121,7 @@ beta.ab(lshape1 = "loge", lshape2 = "loge",
   \code{\link[stats:Beta]{Beta}},
   \code{\link{genbetaII}},
   \code{\link{betaII}},
-  \code{\link{betabinomial.ab}},
+  \code{\link{betabinomialff}},
   \code{\link{betageometric}},
   \code{\link{betaprime}},
   \code{\link{rbetageom}},
@@ -133,14 +133,14 @@ beta.ab(lshape1 = "loge", lshape2 = "loge",
 }
 \examples{
 bdata <- data.frame(y = rbeta(n = 1000, shape1 = exp(0), shape2 = exp(1)))
-fit <- vglm(y ~ 1, beta.ab(lshape1 = "identitylink", lshape2 = "identitylink"),
+fit <- vglm(y ~ 1, betaR(lshape1 = "identitylink", lshape2 = "identitylink"),
             data = bdata, trace = TRUE, crit = "coef")
-fit <- vglm(y ~ 1, beta.ab, data = bdata, trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, betaR, data = bdata, trace = TRUE, crit = "coef")
 coef(fit, matrix = TRUE)
 Coef(fit)  # Useful for intercept-only models
 
 bdata <- transform(bdata, Y = 5 + 8 * y)  # From 5 to 13, not 0 to 1
-fit <- vglm(Y ~ 1, beta.ab(A = 5, B = 13), data = bdata, trace = TRUE)
+fit <- vglm(Y ~ 1, betaR(A = 5, B = 13), data = bdata, trace = TRUE)
 Coef(fit)
 c(meanY = with(bdata, mean(Y)), head(fitted(fit),2))
 }
diff --git a/man/betabinomUC.Rd b/man/betabinomUC.Rd
index c00b4e3..6026b2e 100644
--- a/man/betabinomUC.Rd
+++ b/man/betabinomUC.Rd
@@ -88,7 +88,7 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
   mean or the probability of success.
 
 
-  See \code{\link{betabinomial}} and \code{\link{betabinomial.ab}},
+  See \code{\link{betabinomial}} and \code{\link{betabinomialff}},
   the \pkg{VGAM} family functions for
   estimating the parameters, for the formula of the probability density
   function and other details.
@@ -109,7 +109,7 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
 }
 \seealso{
   \code{\link{betabinomial}},
-  \code{\link{betabinomial.ab}}.
+  \code{\link{betabinomialff}}.
 
 
 }
diff --git a/man/betabinomial.Rd b/man/betabinomial.Rd
index c68c33c..f12a697 100644
--- a/man/betabinomial.Rd
+++ b/man/betabinomial.Rd
@@ -9,9 +9,8 @@
 
 }
 \usage{
-betabinomial(lmu = "logit", lrho = "logit",
-             irho = NULL, imethod = 1, shrinkage.init = 0.95,
-             nsimEIM = NULL, zero = 2)
+betabinomial(lmu = "logit", lrho = "logit", irho = NULL, imethod = 1,
+             ishrinkage = 0.95, nsimEIM = NULL, zero = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -51,9 +50,9 @@ betabinomial(lmu = "logit", lrho = "logit",
 
 
   }
-  \item{shrinkage.init, nsimEIM}{ 
+  \item{ishrinkage, nsimEIM}{ 
   See \code{\link{CommonVGAMffArguments}} for more information.
-  The argument \code{shrinkage.init} is used only if \code{imethod = 2}.
+  The argument \code{ishrinkage} is used only if \code{imethod = 2}.
   Using the argument \code{nsimEIM} may offer large advantages for large
   values of \eqn{N} and/or large data sets.
 
@@ -84,11 +83,11 @@ betabinomial(lmu = "logit", lrho = "logit",
   \deqn{P(T=t) = {N \choose t} \frac{B(\alpha+t, \beta+N-t)}
                   {B(\alpha, \beta)}}{%
         P(T=t) = choose(N,t) B(alpha+t, beta+N-t) / B(alpha, beta)}
-   where \eqn{t=0,1,\ldots,N}, and \eqn{B} is the
-   \code{\link[base:Special]{beta}} function
-   with shape parameters \eqn{\alpha}{alpha} and \eqn{\beta}{beta}.
-   Recall \eqn{Y = T/N} is the real response being modelled.
-   
+  where \eqn{t=0,1,\ldots,N}, and \eqn{B} is the
+  \code{\link[base:Special]{beta}} function
+  with shape parameters \eqn{\alpha}{alpha} and \eqn{\beta}{beta}.
+  Recall \eqn{Y = T/N} is the real response being modelled.
+
 
   The default model is \eqn{\eta_1 = logit(\mu)}{eta1 =logit(mu)}
   and \eqn{\eta_2 = logit(\rho)}{eta2 = logit(rho)} because both
@@ -182,7 +181,7 @@ betabinomial(lmu = "logit", lrho = "logit",
 
 }
 \seealso{
-  \code{\link{betabinomial.ab}},
+  \code{\link{betabinomialff}},
   \code{\link{Betabinom}},
   \code{\link{binomialff}},
   \code{\link{betaff}},
diff --git a/man/betabinomial.ab.Rd b/man/betabinomialff.Rd
similarity index 93%
rename from man/betabinomial.ab.Rd
rename to man/betabinomialff.Rd
index f7de53c..9202876 100644
--- a/man/betabinomial.ab.Rd
+++ b/man/betabinomialff.Rd
@@ -1,5 +1,5 @@
-\name{betabinomial.ab}
-\alias{betabinomial.ab}
+\name{betabinomialff}
+\alias{betabinomialff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Beta-binomial Distribution Family Function }
 \description{
@@ -10,9 +10,8 @@
 
 }
 \usage{
-betabinomial.ab(lshape12 = "loge", i1 = 1, i2 = NULL,
-                imethod = 1, shrinkage.init = 0.95, nsimEIM = NULL,
-                zero = NULL)
+betabinomialff(lshape12 = "loge", i1 = 1, i2 = NULL,
+               imethod = 1, ishrinkage = 0.95, nsimEIM = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -41,9 +40,9 @@ betabinomial.ab(lshape12 = "loge", i1 = 1, i2 = NULL,
 
 
   }
-  \item{shrinkage.init, nsimEIM, imethod}{
+  \item{ishrinkage, nsimEIM, imethod}{
   See \code{\link{CommonVGAMffArguments}} for more information.
-  The argument \code{shrinkage.init} is used only if \code{imethod = 2}.
+  The argument \code{ishrinkage} is used only if \code{imethod = 2}.
   Using the argument \code{nsimEIM} may offer large advantages for large
   values of \eqn{N} and/or large data sets.
 
@@ -190,7 +189,7 @@ betabinomial.ab(lshape12 = "loge", i1 = 1, i2 = NULL,
 # Example 1
 N <- 10; s1 <- exp(1); s2 <- exp(2)
 y <- rbetabinom.ab(n = 100, size = N, shape1 = s1, shape2 = s2)
-fit <- vglm(cbind(y, N-y) ~ 1, betabinomial.ab, trace = TRUE)
+fit <- vglm(cbind(y, N-y) ~ 1, betabinomialff, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 head(fit at misc$rho)  # The correlation parameter
@@ -198,7 +197,7 @@ head(cbind(depvar(fit), weights(fit, type = "prior")))
 
 
 # Example 2
-fit <- vglm(cbind(R, N-R) ~ 1, betabinomial.ab, data = lirat,
+fit <- vglm(cbind(R, N-R) ~ 1, betabinomialff, data = lirat,
             trace = TRUE, subset = N > 1)
 coef(fit, matrix = TRUE)
 Coef(fit)
@@ -215,7 +214,7 @@ all.equal(c(fitted(fit)),
 # Example 3, which is more complicated
 lirat <- transform(lirat, fgrp = factor(grp))
 summary(lirat)  # Only 5 litters in group 3
-fit2 <- vglm(cbind(R, N-R) ~ fgrp + hb, betabinomial.ab(zero = 2),
+fit2 <- vglm(cbind(R, N-R) ~ fgrp + hb, betabinomialff(zero = 2),
            data = lirat, trace = TRUE, subset = N > 1)
 coef(fit2, matrix = TRUE)
 coef(fit2, matrix = TRUE)[, 1] -
diff --git a/man/betaff.Rd b/man/betaff.Rd
index 64a4bec..8355110 100644
--- a/man/betaff.Rd
+++ b/man/betaff.Rd
@@ -66,7 +66,7 @@ betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
 
 
   Another parameterization of the beta distribution involving the raw
-  shape parameters is implemented in \code{\link{beta.ab}}.
+  shape parameters is implemented in \code{\link{betaR}}.
 
 
   For general \eqn{A} and \eqn{B}, the variance of \eqn{Y} is
@@ -117,11 +117,11 @@ betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
 }
 
 \seealso{ 
-  \code{\link{beta.ab}},
+  \code{\link{betaR}},
   \code{\link[stats:Beta]{Beta}},
   \code{\link{genbetaII}},
   \code{\link{betaII}},
-  \code{\link{betabinomial.ab}},
+  \code{\link{betabinomialff}},
   \code{\link{betageometric}},
   \code{\link{betaprime}},
   \code{\link{rbetageom}},
diff --git a/man/betageometric.Rd b/man/betageometric.Rd
index 05894b8..daba7df 100644
--- a/man/betageometric.Rd
+++ b/man/betageometric.Rd
@@ -67,7 +67,9 @@ betageometric(lprob = "logit", lshape = "loge",
   of the parameters is maintained.
   The mean of \eqn{Y} is
   \eqn{E(Y) = shape2 / (shape1-1) = (1-p) / (p-\phi)}{E(Y) = 
-       shape2 / (shape1-1) = (1-prob) / (prob-phi)}.
+       shape2 / (shape1-1) = (1-prob) / (prob-phi)}
+  if \code{shape1 > 1}, and if so, then this is returned as
+  the fitted values.
 
 
   The geometric distribution is a special case of the beta-geometric
diff --git a/man/amh.Rd b/man/biamhcop.Rd
similarity index 85%
rename from man/amh.Rd
rename to man/biamhcop.Rd
index 92c53b3..f68fe84 100644
--- a/man/amh.Rd
+++ b/man/biamhcop.Rd
@@ -1,5 +1,5 @@
-\name{amh}
-\alias{amh}
+\name{biamhcop}
+\alias{biamhcop}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Ali-Mikhail-Haq Distribution Family Function }
 \description{
@@ -9,11 +9,11 @@
 
 }
 \usage{
-amh(lalpha = "rhobit", ialpha = NULL, imethod = 1, nsimEIM = 250)
+biamhcop(lapar = "rhobit", iapar = NULL, imethod = 1, nsimEIM = 250)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lalpha}{
+  \item{lapar}{
   Link function applied to the association parameter
   \eqn{\alpha}{alpha}, which is real
   and \eqn{-1 < \alpha < 1}{-1 < alpha < 1}.
@@ -21,7 +21,7 @@ amh(lalpha = "rhobit", ialpha = NULL, imethod = 1, nsimEIM = 250)
 
 
   }
-  \item{ialpha}{
+  \item{iapar}{
   Numeric. Optional initial value for \eqn{\alpha}{alpha}.
   By default, an initial value is chosen internally.
   If a convergence failure occurs try assigning a different value.
@@ -32,7 +32,7 @@ amh(lalpha = "rhobit", ialpha = NULL, imethod = 1, nsimEIM = 250)
   \item{imethod}{
   An integer with value \code{1} or \code{2} which
   specifies the initialization method. If failure to converge occurs
-  try the other value, or else specify a value for \code{ialpha}.
+  try the other value, or else specify a value for \code{iapar}.
 
 
   }
@@ -53,6 +53,8 @@ amh(lalpha = "rhobit", ialpha = NULL, imethod = 1, nsimEIM = 250)
   The marginal distributions are the standard uniform distributions.
   When \eqn{\alpha = 0}{alpha = 0} the random variables are
   independent.
+  This is an Archimedean copula.
+
 
 
 % A variant of Newton-Raphson is used, which only seems to work for an
@@ -94,16 +96,17 @@ New York: Springer.
 }
 
 \seealso{
-  \code{\link{ramh}},
-  \code{\link{fgm}},
-  \code{\link{bigumbelI}},
+  \code{\link{rbiamhcop}},
+  \code{\link{bifgmcop}},
+  \code{\link{bigumbelIexp}},
+  \code{\link{rbilogis}},
   \code{\link{simulate.vlm}}.
 
 
 }
 \examples{
-ymat <- ramh(1000, alpha = rhobit(2, inverse = TRUE))
-fit <- vglm(ymat ~ 1, amh, trace = TRUE)
+ymat <- rbiamhcop(1000, apar = rhobit(2, inverse = TRUE))
+fit <- vglm(ymat ~ 1, biamhcop, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 }
diff --git a/man/amhUC.Rd b/man/biamhcopUC.Rd
similarity index 58%
rename from man/amhUC.Rd
rename to man/biamhcopUC.Rd
index 2b67d89..3783277 100644
--- a/man/amhUC.Rd
+++ b/man/biamhcopUC.Rd
@@ -1,8 +1,8 @@
-\name{Amh}
-\alias{Amh}
-\alias{damh}
-\alias{pamh}
-\alias{ramh}
+\name{Biamhcop}
+\alias{Biamhcop}
+\alias{dbiamhcop}
+\alias{pbiamhcop}
+\alias{rbiamhcop}
 \title{Ali-Mikhail-Haq Bivariate Distribution}
 \description{
   Density, distribution function, and random
@@ -12,9 +12,9 @@
 
 }
 \usage{
-damh(x1, x2, alpha, log = FALSE)
-pamh(q1, q2, alpha)
-ramh(n, alpha)
+dbiamhcop(x1, x2, apar, log = FALSE)
+pbiamhcop(q1, q2, apar)
+rbiamhcop(n, apar)
 }
 \arguments{
   \item{x1, x2, q1, q2}{vector of quantiles.}
@@ -23,7 +23,7 @@ ramh(n, alpha)
 
 
   }
-  \item{alpha}{the association parameter.}
+  \item{apar}{the association parameter.}
   \item{log}{
   Logical.
   If \code{TRUE} then the logarithm is returned.
@@ -32,9 +32,9 @@ ramh(n, alpha)
   }
 }
 \value{
-  \code{damh} gives the density,
-  \code{pamh} gives the distribution function, and
-  \code{ramh} generates random deviates (a two-column matrix).
+  \code{dbiamhcop} gives the density,
+  \code{pbiamhcop} gives the distribution function, and
+  \code{rbiamhcop} generates random deviates (a two-column matrix).
 
 
 }
@@ -43,7 +43,7 @@ ramh(n, alpha)
 %}
 \author{ T. W. Yee and C. S. Chee}
 \details{
-  See \code{\link{amh}}, the \pkg{VGAM}
+  See \code{\link{biamhcop}}, the \pkg{VGAM}
   family functions for estimating the
   parameter by maximum likelihood estimation, for the formula of the
   cumulative distribution function and other details.
@@ -53,19 +53,19 @@ ramh(n, alpha)
 %\note{
 %}
 \seealso{
-  \code{\link{amh}}.
+  \code{\link{biamhcop}}.
 
 
 }
-\examples{ x <- seq(0, 1, len = (N <- 101)); alpha <- 0.7
+\examples{ x <- seq(0, 1, len = (N <- 101)); apar <- 0.7
 ox <- expand.grid(x, x)
-zedd <- damh(ox[, 1], ox[, 2], alpha = alpha)
+zedd <- dbiamhcop(ox[, 1], ox[, 2], apar = apar)
 \dontrun{
 contour(x, x, matrix(zedd, N, N), col = "blue")
-zedd <- pamh(ox[, 1], ox[, 2], alpha = alpha)
+zedd <- pbiamhcop(ox[, 1], ox[, 2], apar = apar)
 contour(x, x, matrix(zedd, N, N), col = "blue")
 
-plot(r <- ramh(n = 1000, alpha = alpha), col = "blue")
+plot(r <- rbiamhcop(n = 1000, apar = apar), col = "blue")
 par(mfrow = c(1, 2))
 hist(r[, 1])  # Should be uniform
 hist(r[, 2])  # Should be uniform
diff --git a/man/biclaytoncop.Rd b/man/biclaytoncop.Rd
index dbe98b7..2ecc230 100644
--- a/man/biclaytoncop.Rd
+++ b/man/biclaytoncop.Rd
@@ -9,12 +9,12 @@
 
 }
 \usage{
-biclaytoncop(lalpha = "loge", ialpha = NULL, imethod = 1,
+biclaytoncop(lapar = "loge", iapar = NULL, imethod = 1,
              parallel = FALSE, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lalpha, ialpha, imethod}{
+  \item{lapar, iapar, imethod}{
   Details at \code{\link{CommonVGAMffArguments}}.
   See \code{\link{Links}} for more link function choices.
 
@@ -34,6 +34,7 @@ biclaytoncop(lalpha = "loge", ialpha = NULL, imethod = 1,
   \deqn{P(u_1, u_2;\alpha) = (u_1^{-\alpha} + u_2^{-\alpha}-1)^{-1/\alpha}}{%
         P(u1,u2,alpha) = (u1^(-alpha) + u2^(-alpha)-1)^(-1/alpha)}
   for \eqn{0 \leq \alpha }{0 <= alpha}.
+  Here, \eqn{\alpha}{alpha} is the association parameter.
   The support of the function is the interior of the unit square;
   however, values of 0 and/or 1 are not allowed (currently).
   The marginal distributions are the standard uniform distributions.
@@ -99,7 +100,7 @@ Derivatives and Fisher information of bivariate copulas.
 
 }
 \examples{
-ymat <- rbiclaytoncop(n = (nn <- 1000), alpha = exp(2))
+ymat <- rbiclaytoncop(n = (nn <- 1000), apar = exp(2))
 bdata <- data.frame(y1 = ymat[, 1],
                     y2 = ymat[, 2],
                     y3 = ymat[, 1],
@@ -117,13 +118,13 @@ Coef(fit1)
 head(fitted(fit1))
 summary(fit1)
 
-# Another example; alpha is a function of x2
-bdata <- transform(bdata, alpha = exp(-0.5 + x2))
-ymat <- rbiclaytoncop(n = nn, alpha = with(bdata, alpha))
+# Another example; apar is a function of x2
+bdata <- transform(bdata, apar = exp(-0.5 + x2))
+ymat <- rbiclaytoncop(n = nn, apar = with(bdata, apar))
 bdata <- transform(bdata, y5 = ymat[, 1],
                           y6 = ymat[, 2])
 fit2 <- vgam(cbind(y5, y6) ~ s(x2), data = bdata,
-             biclaytoncop(lalpha = "loge"), trace = TRUE)
+             biclaytoncop(lapar = "loge"), trace = TRUE)
 \dontrun{ plot(fit2, lcol = "blue", scol = "orange", se = TRUE, las = 1) }
 }
 \keyword{models}
diff --git a/man/biclaytoncopUC.Rd b/man/biclaytoncopUC.Rd
index f35fa83..1e51b3a 100644
--- a/man/biclaytoncopUC.Rd
+++ b/man/biclaytoncopUC.Rd
@@ -11,8 +11,8 @@
 
 }
 \usage{
-dbiclaytoncop(x1, x2, alpha = 0, log = FALSE)
-rbiclaytoncop(n, alpha = 0)
+dbiclaytoncop(x1, x2, apar = 0, log = FALSE)
+rbiclaytoncop(n, apar = 0)
 }
 %pbiclaytoncop(q1, q2, rho = 0)
 \arguments{
@@ -25,7 +25,7 @@ rbiclaytoncop(n, alpha = 0)
     Same as \code{\link[stats]{rnorm}}.
 
   }
-  \item{alpha}{the association parameter.
+  \item{apar}{the association parameter.
   Should be in the interval \eqn{[0, \infty)}{[0, Inf)}.
   The default corresponds to independence.
 
@@ -34,6 +34,8 @@ rbiclaytoncop(n, alpha = 0)
   \item{log}{
   Logical.
   If \code{TRUE} then the logarithm is returned.
+
+
 %   Same as \code{\link[stats]{rnorm}}.
 
 
@@ -43,6 +45,7 @@ rbiclaytoncop(n, alpha = 0)
   \code{dbiclaytoncop} gives the density at point (\code{x1},\code{x2}),
   \code{rbiclaytoncop} generates random deviates (a two-column matrix).
 
+
 % \code{pbiclaytoncop} gives the distribution function, and
 
 
@@ -82,6 +85,7 @@ A model for association in bivariate survival data.
 
 }
 \seealso{
+  \code{\link{biclaytoncop}},
   \code{\link{binormalcop}},
   \code{\link{binormal}}.
 
@@ -91,7 +95,7 @@ A model for association in bivariate survival data.
 \dontrun{ edge <- 0.01  # A small positive value
 N <- 101; x <- seq(edge, 1.0 - edge, len = N); Rho <- 0.7
 ox <- expand.grid(x, x)
-zedd <- dbiclaytoncop(ox[, 1], ox[, 2], alpha = Rho, log = TRUE)
+zedd <- dbiclaytoncop(ox[, 1], ox[, 2], apar = Rho, log = TRUE)
 par(mfrow = c(1, 2))
 contour(x, x, matrix(zedd, N, N), col = "blue", labcex = 1.5, las = 1)
 plot(rbiclaytoncop(1000, 2), col = "blue", las = 1)
@@ -100,7 +104,7 @@ plot(rbiclaytoncop(1000, 2), col = "blue", las = 1)
 \keyword{distribution}
 
 
-%plot(r <- rbiclaytoncop(n = 3000, alpha = exp(2)), col = "blue")
+%plot(r <- rbiclaytoncop(n = 3000, apar = exp(2)), col = "blue")
 %par(mfrow = c(1, 2))
 %hist(r[, 1])  # Should be uniform
 %hist(r[, 2])  # Should be uniform
diff --git a/man/fgm.Rd b/man/bifgmcop.Rd
similarity index 90%
rename from man/fgm.Rd
rename to man/bifgmcop.Rd
index f56b1aa..9a4f424 100644
--- a/man/fgm.Rd
+++ b/man/bifgmcop.Rd
@@ -1,5 +1,5 @@
-\name{fgm}
-\alias{fgm}
+\name{bifgmcop}
+\alias{bifgmcop}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Farlie-Gumbel-Morgenstern's Bivariate Distribution Family Function }
 \description{
@@ -9,7 +9,7 @@
 
 }
 \usage{
-fgm(lapar = "rhobit", iapar = NULL, imethod = 1)
+bifgmcop(lapar = "rhobit", iapar = NULL, imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -77,17 +77,17 @@ Invariance theorems for Fisher information.
 }
 
 \seealso{
-  \code{\link{rfgm}},
+  \code{\link{rbifgmcop}},
   \code{\link{bifrankcop}},
-  \code{\link{morgenstern}},
+  \code{\link{bifgmexp}},
   \code{\link{simulate.vlm}}.
 
 
 }
 \examples{
-ymat <- rfgm(n = 1000, alpha = rhobit(3, inverse = TRUE))
+ymat <- rbifgmcop(n = 1000, apar = rhobit(3, inverse = TRUE))
 \dontrun{plot(ymat, col = "blue")}
-fit <- vglm(ymat ~ 1, fam = fgm, trace = TRUE)
+fit <- vglm(ymat ~ 1, fam = bifgmcop, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 head(fitted(fit))
diff --git a/man/fgmUC.Rd b/man/bifgmcopUC.Rd
similarity index 58%
rename from man/fgmUC.Rd
rename to man/bifgmcopUC.Rd
index 20f26f6..8485060 100644
--- a/man/fgmUC.Rd
+++ b/man/bifgmcopUC.Rd
@@ -1,8 +1,8 @@
-\name{Fgm}
-\alias{Fgm}
-\alias{dfgm}
-\alias{pfgm}
-\alias{rfgm}
+\name{Bifgmcop}
+\alias{Bifgmcop}
+\alias{dbifgmcop}
+\alias{pbifgmcop}
+\alias{rbifgmcop}
 \title{Farlie-Gumbel-Morgenstern's Bivariate Distribution}
 \description{
   Density, distribution function, and random
@@ -12,15 +12,15 @@
 
 }
 \usage{
-dfgm(x1, x2, alpha, log = FALSE)
-pfgm(q1, q2, alpha)
-rfgm(n, alpha)
+dbifgmcop(x1, x2, apar, log = FALSE)
+pbifgmcop(q1, q2, apar)
+rbifgmcop(n, apar)
 }
 \arguments{
   \item{x1, x2, q1, q2}{vector of quantiles.}
   \item{n}{number of observations.
     Must be a positive integer of length 1.}
-  \item{alpha}{the association parameter.}
+  \item{apar}{the association parameter.}
   \item{log}{
   Logical.
   If \code{TRUE} then the logarithm is returned.
@@ -28,9 +28,9 @@ rfgm(n, alpha)
   }
 }
 \value{
-  \code{dfgm} gives the density,
-  \code{pfgm} gives the distribution function, and
-  \code{rfgm} generates random deviates (a two-column matrix).
+  \code{dbifgmcop} gives the density,
+  \code{pbifgmcop} gives the distribution function, and
+  \code{rbifgmcop} generates random deviates (a two-column matrix).
 
 
 }
@@ -39,7 +39,7 @@ rfgm(n, alpha)
 %}
 \author{ T. W. Yee }
 \details{
-  See \code{\link{fgm}}, the \pkg{VGAM}
+  See \code{\link{bifgmcop}}, the \pkg{VGAM}
   family functions for estimating the
   parameter by maximum likelihood estimation, for the formula of the
   cumulative distribution function and other details.
@@ -49,19 +49,19 @@ rfgm(n, alpha)
 %\note{
 %}
 \seealso{
-  \code{\link{fgm}}.
+  \code{\link{bifgmcop}}.
 
 
 }
 \examples{
-\dontrun{ N <- 101; x <- seq(0.0, 1.0, len = N); alpha <- 0.7
+\dontrun{ N <- 101; x <- seq(0.0, 1.0, len = N); apar <- 0.7
 ox <- expand.grid(x, x)
-zedd <- dfgm(ox[, 1], ox[, 2], alpha = alpha)
+zedd <- dbifgmcop(ox[, 1], ox[, 2], apar = apar)
 contour(x, x, matrix(zedd, N, N), col = "blue")
-zedd <- pfgm(ox[, 1], ox[, 2], alpha = alpha)
+zedd <- pbifgmcop(ox[, 1], ox[, 2], apar = apar)
 contour(x, x, matrix(zedd, N, N), col = "blue")
 
-plot(r <- rfgm(n = 3000, alpha = alpha), col = "blue")
+plot(r <- rbifgmcop(n = 3000, apar = apar), col = "blue")
 par(mfrow = c(1, 2))
 hist(r[, 1])  # Should be uniform
 hist(r[, 2])  # Should be uniform
diff --git a/man/morgenstern.Rd b/man/bifgmexp.Rd
similarity index 84%
rename from man/morgenstern.Rd
rename to man/bifgmexp.Rd
index 628c3f2..d4bc2e1 100644
--- a/man/morgenstern.Rd
+++ b/man/bifgmexp.Rd
@@ -1,14 +1,15 @@
-\name{morgenstern}
-\alias{morgenstern}
+\name{bifgmexp}
+\alias{bifgmexp}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Morgenstern's Bivariate Distribution Family Function }
+\title{ Bivariate Farlie-Gumbel-Morgenstern Exponential Distribution
+        Family Function }
 \description{
-  Estimate the association parameter of Morgenstern's bivariate
-  distribution by maximum likelihood estimation.
+  Estimate the association parameter of FGM bivariate
+  exponential distribution by maximum likelihood estimation.
 
 }
 \usage{
-morgenstern(lapar = "rhobit", iapar = NULL, tola0 = 0.01, imethod = 1)
+bifgmexp(lapar = "rhobit", iapar = NULL, tola0 = 0.01, imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -103,17 +104,17 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 
 \seealso{
-  \code{\link{fgm}},
-  \code{\link{bigumbelI}}.
+  \code{\link{bifgmcop}},
+  \code{\link{bigumbelIexp}}.
 
 
 }
 \examples{
 N <- 1000; mdata <- data.frame(y1 = rexp(N), y2 = rexp(N))
 \dontrun{plot(ymat)}
-fit <- vglm(cbind(y1, y2) ~ 1, morgenstern, data = mdata, trace = TRUE)
-# This may fail:
-fit <- vglm(cbind(y1, y2) ~ 1, morgenstern, data = mdata, trace = TRUE, crit = "coef")
+fit <- vglm(cbind(y1, y2) ~ 1, bifgmexp, data = mdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ 1, bifgmexp, data = mdata, # This may fail
+            trace = TRUE, crit = "coef")
 coef(fit, matrix = TRUE)
 Coef(fit)
 head(fitted(fit))
diff --git a/man/bifrankcop.Rd b/man/bifrankcop.Rd
index bd420f1..be37172 100644
--- a/man/bifrankcop.Rd
+++ b/man/bifrankcop.Rd
@@ -97,14 +97,14 @@ Frank's family of bivariate distributions.
 
 \seealso{
   \code{\link{rbifrankcop}},
-  \code{\link{fgm}},
+  \code{\link{bifgmcop}},
   \code{\link{simulate.vlm}}.
 
 
 }
 \examples{
 \dontrun{
-ymat <- rbifrankcop(n = 2000, alpha = exp(4))
+ymat <- rbifrankcop(n = 2000, apar = exp(4))
 plot(ymat, col = "blue")
 fit <- vglm(ymat ~ 1, fam = bifrankcop, trace = TRUE)
 coef(fit, matrix = TRUE)
diff --git a/man/bifrankcopUC.Rd b/man/bifrankcopUC.Rd
index 0b3955d..0eb198d 100644
--- a/man/bifrankcopUC.Rd
+++ b/man/bifrankcopUC.Rd
@@ -10,15 +10,15 @@
 
 }
 \usage{
-dbifrankcop(x1, x2, alpha, log = FALSE)
-pbifrankcop(q1, q2, alpha)
-rbifrankcop(n, alpha)
+dbifrankcop(x1, x2, apar, log = FALSE)
+pbifrankcop(q1, q2, apar)
+rbifrankcop(n, apar)
 }
 \arguments{
   \item{x1, x2, q1, q2}{vector of quantiles.}
   \item{n}{number of observations.
     Must be a positive integer of length 1.}
-  \item{alpha}{the positive association parameter \eqn{\alpha}{alpha}.}
+  \item{apar}{the positive association parameter. }
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
@@ -59,19 +59,17 @@ Frank's family of bivariate distributions.
 
 }
 \examples{
-\dontrun{N <- 100; alpha <- exp(2)
+\dontrun{N <- 100; apar <- exp(2)
 xx <- seq(-0.30, 1.30, len = N)
 ox <- expand.grid(xx, xx)
-zedd <- dbifrankcop(ox[, 1], ox[, 2], alpha = alpha)
+zedd <- dbifrankcop(ox[, 1], ox[, 2], apar = apar)
 contour(xx, xx, matrix(zedd, N, N))
-zedd <- pbifrankcop(ox[, 1], ox[, 2], alpha = alpha)
+zedd <- pbifrankcop(ox[, 1], ox[, 2], apar = apar)
 contour(xx, xx, matrix(zedd, N, N))
 
-alpha <- exp(4)
-plot(rr <- rbifrankcop(n = 3000, alpha = alpha))
+plot(rr <- rbifrankcop(n = 3000, apar = exp(4)))
 par(mfrow = c(1, 2))
-hist(rr[, 1])  # Should be uniform
-hist(rr[, 2])  # Should be uniform
+hist(rr[, 1]); hist(rr[, 2])  # Should be uniform
 }
 }
 \keyword{distribution}
diff --git a/man/bivgamma.mckay.Rd b/man/bigamma.mckay.Rd
similarity index 98%
rename from man/bivgamma.mckay.Rd
rename to man/bigamma.mckay.Rd
index cf4ac8f..a84f4ef 100644
--- a/man/bivgamma.mckay.Rd
+++ b/man/bigamma.mckay.Rd
@@ -10,7 +10,7 @@
 \usage{
 bigamma.mckay(lscale = "loge", lshape1 = "loge", lshape2 = "loge",
               iscale = NULL, ishape1 = NULL, ishape2 = NULL,
-              imethod = 1, zero = 1)
+              imethod = 1, zero = 2:3)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -108,10 +108,12 @@ New York: Springer.
   \eqn{y_2}{y2} (all evaluated at the final iteration).
 
 
+
 % The data are sorted internally and the user need not input the
 % data presorted.
 
 
+
 }
 
 \seealso{
diff --git a/man/bigumbelI.Rd b/man/bigumbelIexp.Rd
similarity index 78%
rename from man/bigumbelI.Rd
rename to man/bigumbelIexp.Rd
index 5e20f4b..ee267fe 100644
--- a/man/bigumbelI.Rd
+++ b/man/bigumbelIexp.Rd
@@ -1,5 +1,5 @@
-\name{bigumbelI}
-\alias{bigumbelI}
+\name{bigumbelIexp}
+\alias{bigumbelIexp}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Gumbel's Type I Bivariate Distribution Family Function }
 \description{
@@ -9,7 +9,7 @@
 
 }
 \usage{
-bigumbelI(lapar = "identitylink", iapar = NULL, imethod = 1)
+bigumbelIexp(lapar = "identitylink", iapar = NULL, imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -65,9 +65,22 @@ bigumbelI(lapar = "identitylink", iapar = NULL, imethod = 1)
 
 \references{
 
-Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
-\emph{Extreme Value and Related Models with Applications in Engineering and Science},
-Hoboken, NJ, USA: Wiley-Interscience.
+
+
+%Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
+%\emph{Extreme Value and Related Models with Applications in Engineering and Science},
+%Hoboken, NJ, USA: Wiley-Interscience.
+
+
+
+Gumbel, E. J. (1960)
+Bivariate Exponential Distributions.
+\emph{Journal of the American Statistical Association},
+\bold{55}, 698--707.
+
+% Journal of the American Statistical Association.
+% Vol. 55, No. 292, Dec., 1960   >  Bivariate Exponentia.
+
 
 
 }
@@ -85,7 +98,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 
 \seealso{
-  \code{\link{morgenstern}}.
+  \code{\link{bifgmexp}}.
 
 
 }
@@ -93,7 +106,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 nn <- 1000
 gdata <- data.frame(y1 = rexp(nn), y2 = rexp(nn))
 \dontrun{ with(gdata, plot(cbind(y1, y2))) }
-fit <- vglm(cbind(y1, y2) ~ 1, fam = bigumbelI, data = gdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ 1, bigumbelIexp, data = gdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 head(fitted(fit))
diff --git a/man/bilogis4UC.Rd b/man/bilogisUC.Rd
similarity index 53%
rename from man/bilogis4UC.Rd
rename to man/bilogisUC.Rd
index 42b92b8..ee0a04a 100644
--- a/man/bilogis4UC.Rd
+++ b/man/bilogisUC.Rd
@@ -1,8 +1,8 @@
-\name{bilogis4}
-\alias{bilogis4}
-\alias{dbilogis4}
-\alias{pbilogis4}
-\alias{rbilogis4}
+\name{bilogis}
+\alias{bilogis}
+\alias{dbilogis}
+\alias{pbilogis}
+\alias{rbilogis}
 \title{Bivariate Logistic Distribution}
 \description{
   Density, distribution function, quantile function and random generation
@@ -11,9 +11,9 @@
 
 }
 \usage{
-dbilogis4(x1, x2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1, log = FALSE)
-pbilogis4(q1, q2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1)
-rbilogis4(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1)
+dbilogis(x1, x2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1, log = FALSE)
+pbilogis(q1, q2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1)
+rbilogis(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1)
 }
 \arguments{
   \item{x1, x2, q1, q2}{vector of quantiles.}
@@ -33,9 +33,9 @@ rbilogis4(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1)
 
 }
 \value{
-  \code{dbilogis4} gives the density,
-  \code{pbilogis4} gives the distribution function, and
-  \code{rbilogis4} generates random deviates (a two-column matrix).
+  \code{dbilogis} gives the density,
+  \code{pbilogis} gives the distribution function, and
+  \code{rbilogis} generates random deviates (a two-column matrix).
 
 
 }
@@ -50,22 +50,34 @@ Bivariate logistic distributions.
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{bilogis4}}, the \pkg{VGAM} family function for
+  See \code{\link{bilogis}}, the \pkg{VGAM} family function for
   estimating the four parameters by maximum likelihood estimation, for
   the formula of the cumulative distribution function and other details.
 
 
 }
-%\note{
-%}
+\note{
+  Gumbel (1961) proposed two bivariate logistic distributions with
+  logistic distribution marginals, which he called Type I and Type II.
+  The Type I is this one.
+  The Type II belongs to the Morgenstern type.
+  The \code{\link{biamhcop}} distribution has, as a special case,
+  this distribution, which is when the random variables are independent.
+
+
+% This note added 20140920
+
+
+}
 \seealso{
-  \code{\link{bilogistic4}}.
+  \code{\link{bilogistic}},
+  \code{\link{biamhcop}}.
 
 
 }
 \examples{
 \dontrun{ par(mfrow = c(1, 3))
-ymat <- rbilogis4(n = 2000, loc1 = 5, loc2 = 7, scale2 = exp(1))
+ymat <- rbilogis(n = 2000, loc1 = 5, loc2 = 7, scale2 = exp(1))
 myxlim <- c(-2, 15); myylim <- c(-10, 30)
 plot(ymat, xlim = myxlim, ylim = myylim)
 
@@ -73,9 +85,9 @@ N <- 100
 x1 <- seq(myxlim[1], myxlim[2], len = N)
 x2 <- seq(myylim[1], myylim[2], len = N)
 ox <- expand.grid(x1, x2)
-z <- dbilogis4(ox[,1], ox[,2], loc1 = 5, loc2 = 7, scale2 = exp(1))
+z <- dbilogis(ox[,1], ox[,2], loc1 = 5, loc2 = 7, scale2 = exp(1))
 contour(x1, x2, matrix(z, N, N), main = "density")
-z <- pbilogis4(ox[,1], ox[,2], loc1 = 5, loc2 = 7, scale2 = exp(1))
+z <- pbilogis(ox[,1], ox[,2], loc1 = 5, loc2 = 7, scale2 = exp(1))
 contour(x1, x2, matrix(z, N, N), main = "cdf") }
 }
 \keyword{distribution}
diff --git a/man/bilogistic4.Rd b/man/bilogistic.Rd
similarity index 91%
rename from man/bilogistic4.Rd
rename to man/bilogistic.Rd
index 9bbd788..85c690e 100644
--- a/man/bilogistic4.Rd
+++ b/man/bilogistic.Rd
@@ -1,5 +1,5 @@
-\name{bilogistic4}
-\alias{bilogistic4}
+\name{bilogistic}
+\alias{bilogistic}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Bivariate Logistic Distribution Family Function }
 \description{
@@ -8,9 +8,9 @@
 
 }
 \usage{
-bilogistic4(llocation = "identitylink", lscale = "loge",
-            iloc1 = NULL, iscale1 = NULL, iloc2 = NULL, iscale2 = NULL,
-            imethod = 1, zero = NULL)
+bilogistic(llocation = "identitylink", lscale = "loge",
+           iloc1 = NULL, iscale1 = NULL, iloc2 = NULL, iscale2 = NULL,
+           imethod = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -126,14 +126,14 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 \seealso{
   \code{\link{logistic}},
-  \code{\link{rbilogis4}}.
+  \code{\link{rbilogis}}.
 
 
 }
 \examples{
-ymat <- rbilogis4(n <- 1000, loc1 = 5, loc2 = 7, scale2 = exp(1))
+ymat <- rbilogis(n <- 1000, loc1 = 5, loc2 = 7, scale2 = exp(1))
 \dontrun{plot(ymat)}
-fit <- vglm(ymat ~ 1, fam = bilogistic4, trace = TRUE)
+fit <- vglm(ymat ~ 1, fam = bilogistic, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 head(fitted(fit))
diff --git a/man/binom2.or.Rd b/man/binom2.or.Rd
index 6f4d00e..9184f49 100644
--- a/man/binom2.or.Rd
+++ b/man/binom2.or.Rd
@@ -212,7 +212,8 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
 \examples{
 # Fit the model in Table 6.7 in McCullagh and Nelder (1989)
 coalminers <- transform(coalminers, Age = (age - 42) / 5)
-fit <- vglm(cbind(nBnW, nBW, BnW, BW) ~ Age, binom2.or(zero = NULL), coalminers)
+fit <- vglm(cbind(nBnW, nBW, BnW, BW) ~ Age,
+            binom2.or(zero = NULL), data = coalminers)
 fitted(fit)
 summary(fit)
 coef(fit, matrix = TRUE)
@@ -229,9 +230,9 @@ legend(x = -4, y = 0.5, lty = 1:4, col = 1:4, lwd = 2,
 
 
 # Another model: pet ownership
-\dontrun{ require("VGAMdata")
+\dontrun{ data(xs.nz, package = "VGAMdata")
 # More homogeneous:
-petdata <- subset(xs.nz, ethnic == "0" & age < 70 & sex == "M")
+petdata <- subset(xs.nz, ethnicity == "European" & age < 70 & sex == "M")
 petdata <- na.omit(petdata[, c("cat", "dog", "age")])
 summary(petdata)
 with(petdata, table(cat, dog))  # Can compute the odds ratio
diff --git a/man/binomialff.Rd b/man/binomialff.Rd
index a83c30f..f1d2267 100644
--- a/man/binomialff.Rd
+++ b/man/binomialff.Rd
@@ -37,7 +37,13 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
   Multivariate response? If \code{TRUE}, then the response is interpreted
   as \eqn{M} independent binary responses, where \eqn{M} is the number
   of columns of the response matrix. In this case, the response matrix
-  should have zero/one values only.
+  should have \eqn{Q} columns consisting of counts (successes),
+  and the \code{weights} argument should have \eqn{Q} columns
+  consisting of the number of trials (successes plus failures).
+
+
+% zero/one values only.
+
 
 
   If \code{FALSE} and the response is a (2-column) matrix, then the number
@@ -201,15 +207,19 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
     \code{\link{posbinomial}},
     \code{\link{zibinomial}},
     \code{\link{double.expbinomial}},
-    \code{\link{matched.binomial}},
     \code{\link{seq2binomial}},
     \code{\link{amlbinomial}},
     \code{\link{simplex}},
     \code{\link[stats:Binomial]{binomial}},
-   \code{\link{simulate.vlm}},
+    \code{\link{simulate.vlm}},
     \pkg{safeBinaryRegression}.
 
 
+
+%   \code{\link{matched.binomial}},
+
+
+
 }
 \section{Warning }{
     With a multivariate response, assigning a known dispersion parameter
diff --git a/man/binormal.Rd b/man/binormal.Rd
index d349f05..eb4ef69 100644
--- a/man/binormal.Rd
+++ b/man/binormal.Rd
@@ -110,8 +110,8 @@ constraints(fit1)
 summary(fit1)
 
 # Estimated P(Y1 <= y1, Y2 <= y2) under the fitted model
-var1  <- loge(2 * predict(fit1)[, "log(sd1)"], inverse = TRUE)
-var2  <- loge(2 * predict(fit1)[, "log(sd2)"], inverse = TRUE)
+var1  <- loge(2 * predict(fit1)[, "loge(sd1)"], inverse = TRUE)
+var2  <- loge(2 * predict(fit1)[, "loge(sd2)"], inverse = TRUE)
 cov12 <- rhobit(predict(fit1)[, "rhobit(rho)"], inverse = TRUE)
 head(with(bdata, pnorm2(y1, y2,
                         mean1 = predict(fit1)[, "mean1"],
diff --git a/man/binormalUC.Rd b/man/binormalUC.Rd
index ad738d5..8ce2d68 100644
--- a/man/binormalUC.Rd
+++ b/man/binormalUC.Rd
@@ -16,13 +16,13 @@
 % quantile function
 \usage{
 dbinorm(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0, log = FALSE)
-pbinorm(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
+pbinorm(q1, q2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
 rbinorm(n,      mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
  pnorm2(x1, x2, mean1 = 0, mean2 = 0, var1 = 1, var2 = 1, cov12 = 0)
 }
 % dbinorm(x1, x2, mean1 = 0, mean2 = 0,  sd1 = 1,  sd2 = 1,   rho = 0, log = FALSE)
 \arguments{
-  \item{x1, x2}{vector of quantiles.}
+  \item{x1, x2, q1, q2}{vector of quantiles.}
   \item{mean1, mean2, var1, var2, cov12}{
   vector of means, variances and the covariance.
 
diff --git a/man/plackett.Rd b/man/biplackettcop.Rd
similarity index 87%
rename from man/plackett.Rd
rename to man/biplackettcop.Rd
index da6a1c0..20c03d1 100644
--- a/man/plackett.Rd
+++ b/man/biplackettcop.Rd
@@ -1,15 +1,16 @@
-\name{plackett}
-\alias{plackett}
+\name{biplackettcop}
+\alias{biplackettcop}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Plackett's Bivariate Distribution Family Function }
+\title{ Plackett's Bivariate Copula Family Function }
 \description{
   Estimate the association parameter of Plackett's bivariate distribution
+  (copula)
   by maximum likelihood estimation.
 
 
 }
 \usage{
-plackett(link = "loge", ioratio = NULL, imethod = 1, nsimEIM = 200)
+biplackettcop(link = "loge", ioratio = NULL, imethod = 1, nsimEIM = 200)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -62,7 +63,7 @@ plackett(link = "loge", ioratio = NULL, imethod = 1, nsimEIM = 200)
   As the odds ratio tends to 0 one has \eqn{y_2=1-y_1}{y2=1-y1}.
 
 
-  Fisher scoring is implemented using \code{\link{rplack}}.
+  Fisher scoring is implemented using \code{\link{rbiplackcop}}.
   Convergence is often quite slow.
 
 
@@ -96,16 +97,16 @@ A class of bivariate distributions.
 }
 
 \seealso{
-  \code{\link{rplack}},
+  \code{\link{rbiplackcop}},
   \code{\link{bifrankcop}}.
 
 
 }
 \examples{
 \dontrun{
-ymat <- rplack(n = 2000, oratio = exp(2))
+ymat <- rbiplackcop(n = 2000, oratio = exp(2))
 plot(ymat, col = "blue")
-fit <- vglm(ymat ~ 1, fam = plackett, trace = TRUE)
+fit <- vglm(ymat ~ 1, fam = biplackettcop, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 vcov(fit)
diff --git a/man/plackUC.Rd b/man/biplackettcopUC.Rd
similarity index 59%
rename from man/plackUC.Rd
rename to man/biplackettcopUC.Rd
index 02d8da5..0f4722b 100644
--- a/man/plackUC.Rd
+++ b/man/biplackettcopUC.Rd
@@ -1,18 +1,18 @@
-\name{Plackett}
-\alias{Plackett}
-\alias{dplack}
-\alias{pplack}
-\alias{rplack}
-\title{Plackett's Bivariate Distribution}
+\name{Biplackett}
+\alias{Biplackett}
+\alias{dbiplackcop}
+\alias{pbiplackcop}
+\alias{rbiplackcop}
+\title{Plackett's Bivariate Copula }
 \description{
   Density, distribution function, and random
-  generation for the (one parameter) bivariate Plackett distribution.
+  generation for the (one parameter) bivariate Plackett copula.  %distribution.
 
 }
 \usage{
-dplack(x1, x2, oratio, log = FALSE)
-pplack(q1, q2, oratio)
-rplack(n, oratio)
+dbiplackcop(x1, x2, oratio, log = FALSE)
+pbiplackcop(q1, q2, oratio)
+rbiplackcop(n, oratio)
 }
 \arguments{
   \item{x1, x2, q1, q2}{vector of quantiles.}
@@ -27,9 +27,9 @@ rplack(n, oratio)
   }
 }
 \value{
-  \code{dplack} gives the density,
-  \code{pplack} gives the distribution function, and
-  \code{rplack} generates random deviates (a two-column matrix).
+  \code{dbiplackcop} gives the density,
+  \code{pbiplackcop} gives the distribution function, and
+  \code{rbiplackcop} generates random deviates (a two-column matrix).
 
 
 }
@@ -45,7 +45,7 @@ Some contributions to contingency-type distributions.
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{plackett}}, the \pkg{VGAM}
+  See \code{\link{biplackettcop}}, the \pkg{VGAM}
   family functions for estimating the
   parameter by maximum likelihood estimation, for the formula of the
   cumulative distribution function and other details.
@@ -55,7 +55,7 @@ Some contributions to contingency-type distributions.
 %\note{
 %}
 \seealso{
-  \code{\link{plackett}},
+  \code{\link{biplackettcop}},
   \code{\link{bifrankcop}}.
 
 
@@ -64,12 +64,12 @@ Some contributions to contingency-type distributions.
 \dontrun{ N <- 101; oratio <- exp(1)
 x <- seq(0.0, 1.0, len = N)
 ox <- expand.grid(x, x)
-zedd <- dplack(ox[, 1], ox[, 2], oratio = oratio)
+zedd <- dbiplackcop(ox[, 1], ox[, 2], oratio = oratio)
 contour(x, x, matrix(zedd, N, N), col = "blue")
-zedd <- pplack(ox[, 1], ox[, 2], oratio = oratio)
+zedd <- pbiplackcop(ox[, 1], ox[, 2], oratio = oratio)
 contour(x, x, matrix(zedd, N, N), col = "blue")
 
-plot(rr <- rplack(n = 3000, oratio = oratio))
+plot(rr <- rbiplackcop(n = 3000, oratio = oratio))
 par(mfrow = c(1, 2))
 hist(rr[, 1])  # Should be uniform
 hist(rr[, 2])  # Should be uniform
diff --git a/man/bisa.Rd b/man/bisa.Rd
index aa1973c..8a25849 100644
--- a/man/bisa.Rd
+++ b/man/bisa.Rd
@@ -8,11 +8,14 @@
 
 }
 \usage{
-bisa(lshape = "loge", lscale = "loge",
-     ishape = NULL, iscale = 1, imethod = 1, zero = NULL)
+bisa(lscale = "loge", lshape = "loge",
+     iscale = 1, ishape = NULL, imethod = 1, zero = NULL, nowarning = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
+  \item{nowarning}{ Logical. Suppress a warning? }
+
+
   \item{lscale, lshape}{
   Parameter link functions applied to the shape and scale parameters
   (\eqn{a} and \eqn{b} below).
@@ -128,20 +131,21 @@ New York: Wiley.
 \examples{
 bdata1 <- data.frame(x2 = runif(nn <- 1000))
 bdata1 <- transform(bdata1, shape = exp(-0.5 + x2), scale = exp(1.5))
-bdata1 <- transform(bdata1, y = rbisa(nn, shape, scale))
-fit1 <- vglm(y ~ x2, bisa(zero = 2), bdata1, trace = TRUE)
+bdata1 <- transform(bdata1, y = rbisa(nn, scale, shape))
+fit1 <- vglm(y ~ x2, bisa(zero = 1), data = bdata1, trace = TRUE)
 coef(fit1, matrix = TRUE)
 
 \dontrun{
 bdata2 <- data.frame(shape = exp(-0.5), scale = exp(0.5))
-bdata2 <- transform(bdata2, y = rbisa(nn, shape, scale))
-fit <- vglm(y ~ 1, bisa, bdata2, trace = TRUE)
+bdata2 <- transform(bdata2, y = rbisa(nn, scale, shape))
+fit <- vglm(y ~ 1, bisa, data = bdata2, trace = TRUE)
 with(bdata2, hist(y, prob = TRUE, ylim = c(0, 0.5), col = "lightblue"))
 coef(fit, matrix = TRUE)
 with(bdata2, mean(y))
 head(fitted(fit))
 x <- with(bdata2, seq(0, max(y), len = 200))
-lines(dbisa(x, Coef(fit)[1], Coef(fit)[2]) ~ x, bdata2, col = "orange", lwd = 2) }
+lines(dbisa(x, Coef(fit)[1], Coef(fit)[2]) ~ x, data = bdata2,
+      col = "orange", lwd = 2) }
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/bisaUC.Rd b/man/bisaUC.Rd
index 5c0eb15..b53dbad 100644
--- a/man/bisaUC.Rd
+++ b/man/bisaUC.Rd
@@ -11,10 +11,10 @@
 
 }
 \usage{
-dbisa(x, shape, scale = 1, log = FALSE)
-pbisa(q, shape, scale = 1)
-qbisa(p, shape, scale = 1)
-rbisa(n, shape, scale = 1)
+dbisa(x, scale = 1, shape, log = FALSE)
+pbisa(q, scale = 1, shape)
+qbisa(p, scale = 1, shape)
+rbisa(n, scale = 1, shape)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -24,8 +24,8 @@ rbisa(n, shape, scale = 1)
 
 
   }
-  \item{shape, scale}{
-  the (positive) shape and scale parameters.
+  \item{scale, shape}{
+  the (positive) scale and shape parameters.
 
 
   }
@@ -83,7 +83,7 @@ Q <- qbisa(probs, shape = shape)
 lines(Q, dbisa(Q, shape = shape), col = "purple", lty = 3, type = "h")
 pbisa(Q, shape = shape) - probs  # Should be all zero
 abline(h = probs, col = "purple", lty = 3)
-lines(Q, pbisa(Q, shape), col = "purple", lty = 3, type = "h")
+lines(Q, pbisa(Q, shape = shape), col = "purple", lty = 3, type = "h")
 }
 }
 \keyword{distribution}
diff --git a/man/bistudentt.Rd b/man/bistudentt.Rd
index 2d89492..492b936 100644
--- a/man/bistudentt.Rd
+++ b/man/bistudentt.Rd
@@ -39,6 +39,7 @@ bistudentt(ldf = "loglog", lrho = "rhobit",
   for \eqn{-1 < \rho < 1}{-1 < rho < 1},
   and real \eqn{y_1}{y1} and \eqn{y_2}{y2}.
 
+
 % The support of the function is the interior of the unit square;
 % however, values of 0 and/or 1 are not allowed.
 % The marginal distributions are the standard uniform distributions.
@@ -80,11 +81,14 @@ with help from Thibault Vatter.
   Currently, the fitted
   value is a matrix with the same number of columns and values equal to 0.0.
 
+
 }
 \section{Warning }{
 
+
   The working weight matrices have not been fully checked.
 
+
 }
 
 \seealso{
@@ -98,18 +102,12 @@ with help from Thibault Vatter.
 nn <- 1000
 mydof <- loglog(1, inverse = TRUE)
 ymat <- cbind(rt(nn, df = mydof), rt(nn, df = mydof))
-
-bdata <- data.frame(y1 = ymat[, 1],
-                    y2 = ymat[, 2],
-                    y3 = ymat[, 1],
-                    y4 = ymat[, 2],
-                    x2 = runif(nn))
-
+bdata <- data.frame(y1 = ymat[, 1], y2 = ymat[, 2],
+                    y3 = ymat[, 1], y4 = ymat[, 2], x2 = runif(nn))
 summary(bdata)
 \dontrun{ plot(ymat, col = "blue") }
-fit1 <- vglm(cbind(y1, y2, y3, y4) ~ 1,  # 2 responses, e.g., (y1,y2) is the first
-             fam = bistudentt,
-#            crit = "coef",  # Sometimes a good idea
+fit1 <- vglm(cbind(y1, y2, y3, y4) ~ 1,  # 2 responses, e.g., (y1,y2) is the 1st
+             fam = bistudentt,  # crit = "coef",  # Sometimes a good idea
              data = bdata, trace = TRUE)
 
 coef(fit1, matrix = TRUE)
diff --git a/man/borel.tanner.Rd b/man/borel.tanner.Rd
index 299b8c0..95c6d3d 100644
--- a/man/borel.tanner.Rd
+++ b/man/borel.tanner.Rd
@@ -46,10 +46,10 @@ borel.tanner(Qsize = 1, link = "logit", imethod = 1)
   parameter to be estimated.
   The density function is
   \deqn{f(y;a) =
-  \frac{ Q! }{(y-Q)!} y^{y-Q-1} a^{y-Q}  \exp(-ay)
+  \frac{ Q }{(y-Q)!} y^{y-Q-1} a^{y-Q}  \exp(-ay)
   }{%
   f(y;a) = 
-  (Q! / (y-Q)!) * y^(y-Q-1) * a^(y-Q) * exp(-ay)}
+  (Q / (y-Q)!) * y^(y-Q-1) * a^(y-Q) * exp(-ay)}
   where \eqn{y=Q,Q+1,Q+2,\ldots}{y=Q,Q+1,Q+2,...}.
   The case \eqn{Q=1} corresponds to the \emph{Borel} distribution
   (Borel, 1942).
@@ -98,7 +98,7 @@ Application au probleme de l'attente a un guichet.
 
 Consul, P. C. and Famoye, F. (2006)
 \emph{Lagrangian Probability Distributions},
-Boston: Birkhauser.
+Boston, MA, USA: Birkhauser.
 
 
 }
diff --git a/man/bortUC.Rd b/man/bortUC.Rd
index 2ceba33..3db1994 100644
--- a/man/bortUC.Rd
+++ b/man/bortUC.Rd
@@ -75,7 +75,7 @@ rbort(n, Qsize = 1, a = 0.5)
 \examples{
 \dontrun{ qsize <- 1; a <- 0.5; x <- qsize:(qsize+10)
 plot(x, dbort(x, qsize, a), type = "h", las = 1, col = "blue",
-     ylab = paste("fbort(qsize=", qsize, ", a=", a, ")"),
+     ylab = paste("fbort(qsize=", qsize, ", a=", a, ")"), log = "y",
      main = "Borel-Tanner density function") }
 }
 \keyword{distribution}
diff --git a/man/calibrate.qrrvglm.control.Rd b/man/calibrate.qrrvglm.control.Rd
index 1ae6504..6310779 100644
--- a/man/calibrate.qrrvglm.control.Rd
+++ b/man/calibrate.qrrvglm.control.Rd
@@ -108,7 +108,7 @@ p1 <- cqo(cbind(Alopacce, Alopcune, Pardlugu, Pardnigr,
           WaterCon + BareSand + FallTwig +
           CoveMoss + CoveHerb + ReflLux,
           family = poissonff, data = hspider, I.tol = TRUE)
-sort(p1 at misc$deviance.Bestof)  # A history of all the iterations
+sort(deviance(p1, history = TRUE))  # A history of all the iterations
 
 siteNos <- 3:4  # Calibrate these sites
 cp1 <- calibrate(p1, new = data.frame(depvar(p1)[siteNos, ]), trace = TRUE)
diff --git a/man/cao.Rd b/man/cao.Rd
index 519c5a7..208f694 100644
--- a/man/cao.Rd
+++ b/man/cao.Rd
@@ -233,7 +233,7 @@ cao(formula, family, data = list(),
 
   The fitted latent variables (site scores) are scaled to have
   unit variance.  The concept of a tolerance is undefined for
-  CAO models, but the optima and maxima are defined. The generic
+  CAO models, but the optimums and maximums are defined. The generic
   functions \code{\link{Max}} and \code{\link{Opt}} should work for
   CAO objects, but note that if the maximum occurs at the boundary then
   \code{\link{Max}} will return a \code{NA}.  Inference for CAO models
@@ -334,7 +334,7 @@ ap1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull) ~
            family = poissonff, data = hspider, Rank = 1,
            df1.nl = c(Pardpull= 2.7, 2.5),
            Bestof = 7, Crow1positive = FALSE)
-sort(ap1 at misc$deviance.Bestof)  # A history of all the iterations
+sort(deviance(ap1, history = TRUE))  # A history of all the iterations
 
 Coef(ap1)
 concoef(ap1)
diff --git a/man/cao.control.Rd b/man/cao.control.Rd
index a465fb5..3081140 100644
--- a/man/cao.control.Rd
+++ b/man/cao.control.Rd
@@ -326,7 +326,7 @@ ap1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
            family = poissonff, data = hspider,
            df1.nl = c(Zoraspin = 2.3, 2.1),
            Bestof = 10, Crow1positive = FALSE)
-sort(ap1 at misc$deviance.Bestof)  # A history of all the iterations
+sort(deviance(ap1, history = TRUE))  # A history of all the iterations
 
 Coef(ap1)
 
diff --git a/man/cardioid.Rd b/man/cardioid.Rd
index 53666bf..4d5622d 100644
--- a/man/cardioid.Rd
+++ b/man/cardioid.Rd
@@ -79,7 +79,7 @@ Singapore: World Scientific.
 }
 \section{Warning }{
   Numerically, this distribution can be difficult to fit because of a
-  log-likelihood having multiple maxima.
+  log-likelihood having multiple maximums.
   The user is therefore encouraged to try different starting values,
   i.e., make use of \code{imu} and \code{irho}.
 
diff --git a/man/cauchit.Rd b/man/cauchit.Rd
index b7565a2..dcdc9b8 100644
--- a/man/cauchit.Rd
+++ b/man/cauchit.Rd
@@ -37,6 +37,7 @@ cauchit(theta, bvalue = .Machine$double.eps,
   link function is that the tail is heavier relative to the other links
   (see examples below).
 
+
   Numerical values of \code{theta} close to 0 or 1 or out of range result
   in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
 
@@ -48,15 +49,19 @@ cauchit(theta, bvalue = .Machine$double.eps,
   and if \code{inverse = TRUE} then
   \code{0.5 + atan(theta)/pi}.
 
+
   For \code{deriv = 1}, then the function returns
   \emph{d} \code{theta} / \emph{d} \code{eta} as a function of
   \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE}
   then it returns the reciprocal.
 
+
 }
 \references{
   McCullagh, P. and Nelder, J. A. (1989)
   \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+
 }
 \author{ Thomas W. Yee }
 
@@ -70,6 +75,7 @@ cauchit(theta, bvalue = .Machine$double.eps,
   an ordinal response this link function corresponds to the
   Cauchy distribution (see \code{\link{cauchy1}}).
 
+
 }
 
 \seealso{ 
diff --git a/man/cauchy.Rd b/man/cauchy.Rd
index 782d468..381d3c0 100644
--- a/man/cauchy.Rd
+++ b/man/cauchy.Rd
@@ -23,11 +23,13 @@ cauchy1(scale.arg = 1, llocation = "identitylink",
   and the scale parameter \eqn{b}{b}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{ilocation, iscale}{ 
   Optional initial value for \eqn{a}{a} and \eqn{b}{b}.
   By default, an initial value is chosen internally for each.
 
+
   }
   \item{imethod}{ 
   Integer, either 1 or 2 or 3.
@@ -37,19 +39,23 @@ cauchy1(scale.arg = 1, llocation = "identitylink",
   Also, choose the another value if convergence fails, or use
   \code{ilocation} and/or \code{iscale}.
 
+
   }
   \item{iprobs}{
   Probabilities used to find the respective sample quantiles;
   used to compute \code{iscale}.
 
+
   }
   \item{zero, nsimEIM}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
+
   }
   \item{scale.arg}{
   Known (positive) scale parameter, called \eqn{b}{b} below.
 
+
   }
 }
 \details{
@@ -59,7 +65,7 @@ cauchy1(scale.arg = 1, llocation = "identitylink",
   where \eqn{y} and \eqn{a} are real and finite,
   and \eqn{b>0}{b>0}.
   The distribution is symmetric about \eqn{a} and has a heavy tail.
-  Its median and mode are \eqn{a} but the mean does not exist.
+  Its median and mode are \eqn{a}, but the mean does not exist.
   The fitted values are the estimates of \eqn{a}.
   Fisher scoring is the default but if \code{nsimEIM} is specified then
   Fisher scoring with simulation is used.
@@ -83,7 +89,7 @@ cauchy1(scale.arg = 1, llocation = "identitylink",
 }
 \section{Warning }{
   It is well-known that the Cauchy distribution may have local
-  maxima in its likelihood function;
+  maximums in its likelihood function;
   make full use of \code{imethod}, \code{ilocation}, \code{iscale}
   etc.
 
diff --git a/man/cgumbel.Rd b/man/cens.gumbel.Rd
similarity index 93%
rename from man/cgumbel.Rd
rename to man/cens.gumbel.Rd
index 063955c..47b0252 100644
--- a/man/cgumbel.Rd
+++ b/man/cens.gumbel.Rd
@@ -1,5 +1,5 @@
-\name{cgumbel}
-\alias{cgumbel}
+\name{cens.gumbel}
+\alias{cens.gumbel}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Censored Gumbel Distribution }
 \description{
@@ -10,8 +10,8 @@
 
 }
 \usage{
-cgumbel(llocation = "identitylink", lscale = "loge",
-        iscale = NULL, mean = TRUE, percentiles = NULL, zero = 2)
+cens.gumbel(llocation = "identitylink", lscale = "loge",
+            iscale = NULL, mean = TRUE, percentiles = NULL, zero = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -121,7 +121,7 @@ y <- pmax(L, ystar)  # Left  censored
 y <- pmin(U, y)      # Right censored
 extra <- list(leftcensored = ystar < L, rightcensored = ystar > U)
 fit <- vglm(y ~ scale(year), data = venice, trace = TRUE, extra = extra,
-            cgumbel(mean = FALSE, perc = c(5, 25, 50, 75, 95)))
+            cens.gumbel(mean = FALSE, perc = c(5, 25, 50, 75, 95)))
 coef(fit, matrix = TRUE)
 head(fitted(fit))
 fit at extra
@@ -135,7 +135,7 @@ y <- pmax(L, ystar)  # Left  censored
 y <- pmin(U, y)      # Right censored
 \dontrun{par(mfrow = c(1, 2)); hist(ystar); hist(y);}
 extra <- list(leftcensored = ystar < L, rightcensored = ystar > U)
-fit <- vglm(y ~ 1, trace = TRUE, extra = extra, cgumbel)
+fit <- vglm(y ~ 1, trace = TRUE, extra = extra, cens.gumbel)
 coef(fit, matrix = TRUE)
 }
 \keyword{models}
diff --git a/man/cennormal.Rd b/man/cens.normal.Rd
similarity index 89%
rename from man/cennormal.Rd
rename to man/cens.normal.Rd
index d232a66..357e35f 100644
--- a/man/cennormal.Rd
+++ b/man/cens.normal.Rd
@@ -1,7 +1,8 @@
-\name{cennormal}
-\alias{cennormal}
+\name{cens.normal}
+\alias{cens.normal}
 % 20131111: just for \pkg{cg}:
-\alias{cennormal1}
+% 20140609: just for \pkg{cg}:
+\alias{cennormal}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Censored Normal Distribution }
 \description{
@@ -11,7 +12,7 @@
 
 }
 \usage{
-cennormal(lmu = "identitylink", lsd = "loge", imethod = 1, zero = 2)
+cens.normal(lmu = "identitylink", lsd = "loge", imethod = 1, zero = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -76,15 +77,15 @@ cennormal(lmu = "identitylink", lsd = "loge", imethod = 1, zero = 2)
 
 
 
-% Function \code{\link{cennormal1}} will be depreciated soon.
-% It is exactly the same as \code{\link{cennormal}}.
+% Function \code{\link{cens.normal1}} will be depreciated soon.
+% It is exactly the same as \code{\link{cens.normal}}.
 
 
 }
 \seealso{
   \code{\link{tobit}},
   \code{\link{uninormal}},
-  \code{\link{double.cennormal}}.
+  \code{\link{double.cens.normal}}.
 
 
 }
@@ -101,7 +102,7 @@ cdata <- transform(cdata, y = pmin(U, y))      # Right censored
 with(cdata, hist(y))
 Extra <- list(leftcensored = with(cdata, ystar < L),
               rightcensored = with(cdata, ystar > U))
-fit1 <- vglm(y ~ x2, cennormal, data = cdata, crit = "c", extra = Extra, trace = TRUE)
+fit1 <- vglm(y ~ x2, cens.normal, data = cdata, crit = "c", extra = Extra)
 fit2 <- vglm(y ~ x2, tobit(Lower = with(cdata, L), Upper = with(cdata, U)),
             data = cdata, crit = "c", trace = TRUE)
 coef(fit1, matrix = TRUE)
diff --git a/man/cenpoisson.Rd b/man/cens.poisson.Rd
similarity index 91%
rename from man/cenpoisson.Rd
rename to man/cens.poisson.Rd
index 46d9a1b..516d9b9 100644
--- a/man/cenpoisson.Rd
+++ b/man/cens.poisson.Rd
@@ -1,6 +1,6 @@
-\name{cenpoisson}
-%\alias{cenpoisson}
-\alias{cenpoisson}
+\name{cens.poisson}
+%\alias{cens.poisson}
+\alias{cens.poisson}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Censored Poisson Family Function }
 \description{
@@ -9,7 +9,7 @@
 
 }
 \usage{
-cenpoisson(link = "loge", imu = NULL)
+cens.poisson(link = "loge", imu = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -88,7 +88,7 @@ cdata <- transform(cdata, status = ifelse(rcensored, 0, 1))
 with(cdata, table(cy))
 with(cdata, table(rcensored))
 with(cdata, table(ii <- print(SurvS4(cy, status))))  # Check; U+ means >= U
-fit <- vglm(SurvS4(cy, status) ~ 1, cenpoisson, data = cdata, trace = TRUE)
+fit <- vglm(SurvS4(cy, status) ~ 1, cens.poisson, data = cdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 table(print(depvar(fit)))  # Another check; U+ means >= U
 
@@ -101,7 +101,7 @@ cdata <- transform(cdata, status = ifelse(lcensored, 0, 1))
 with(cdata, table(cY))
 with(cdata, table(lcensored))
 with(cdata, table(ii <- print(SurvS4(cY, status, type = "left"))))  # Check
-fit <- vglm(SurvS4(cY, status, type = "left") ~ 1, cenpoisson, data = cdata, trace = TRUE)
+fit <- vglm(SurvS4(cY, status, type = "left") ~ 1, cens.poisson, data = cdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 
 
@@ -121,7 +121,7 @@ cdata$Lvec[with(cdata, rcensored)] <- cdata$Uvec[with(cdata, rcensored)]  # Unch
 with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status, type = "interval"))))  # Check
 
 fit <- vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
-            cenpoisson, data = cdata, trace = TRUE)
+            cens.poisson, data = cdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 table(print(depvar(fit)))  # Another check
 
@@ -135,7 +135,7 @@ with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status,
                                      type = "interval"))))  # Check
 
 fit <- vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
-            cenpoisson, data = cdata, trace = TRUE, crit = "c")
+            cens.poisson, data = cdata, trace = TRUE, crit = "c")
 coef(fit, matrix = TRUE)
 table(print(depvar(fit)))  # Another check
 }
diff --git a/man/concoef-methods.Rd b/man/concoef-methods.Rd
index 4570ad1..31abd4c 100644
--- a/man/concoef-methods.Rd
+++ b/man/concoef-methods.Rd
@@ -9,13 +9,13 @@
 \alias{concoef,Coef.rrvglm-method}
 \alias{concoef,Coef.qrrvglm-method}
 %
-\alias{ccoef-method}
-\alias{ccoef,cao-method}
-\alias{ccoef,Coef.cao-method}
-\alias{ccoef,rrvglm-method}
-\alias{ccoef,qrrvglm-method}
-\alias{ccoef,Coef.rrvglm-method}
-\alias{ccoef,Coef.qrrvglm-method}
+%%\alias{ccoef-method}
+%%\alias{ccoef,cao-method}
+%%\alias{ccoef,Coef.cao-method}
+%%\alias{ccoef,rrvglm-method}
+%%\alias{ccoef,qrrvglm-method}
+%%\alias{ccoef,Coef.rrvglm-method}
+%%\alias{ccoef,Coef.qrrvglm-method}
 %
 % This does not work:
 %\alias{ccoef,cao,Coef.cao,rrvglm,qrrvglm,Coef.rrvglm,Coef.qrrvglm-method}
diff --git a/man/concoef.Rd b/man/concoef.Rd
index e703b07..e7dbeca 100644
--- a/man/concoef.Rd
+++ b/man/concoef.Rd
@@ -1,6 +1,6 @@
 \name{concoef}
 \alias{concoef}
-\alias{ccoef}
+%\alias{ccoef}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Extract Model Constrained/Canonical Coefficients }
 \description{
@@ -65,8 +65,13 @@ Constrained additive ordination.
 %}
 
 \section{Warning }{
-  \code{\link{concoef}} and \code{\link{ccoef}} are identical,
-  but the latter will be deprecated soon.
+  \code{\link{concoef}} replaces \code{ccoef};
+  the latter is deprecated.
+
+
+% \code{\link{concoef}} and \code{\link{ccoef}} are identical,
+% but the latter will be deprecated soon.
+
 
 
   For QO models, there is a direct inverse relationship between the
diff --git a/man/cqo.Rd b/man/cqo.Rd
index 57c5ff9..0b459ca 100644
--- a/man/cqo.Rd
+++ b/man/cqo.Rd
@@ -276,9 +276,11 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
 }
 \value{
   An object of class \code{"qrrvglm"}. 
-  Note that the slot \code{misc} has a list component called
-  \code{deviance.Bestof} which gives the history of deviances over all
-  the iterations.
+
+
+% Note that the slot \code{misc} has a list component called
+% \code{deviance.Bestof} which gives the history of deviances over all
+% the iterations.
 
 
 }
@@ -572,7 +574,7 @@ p1ut <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
             WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
             fam = poissonff, data = hspider, Crow1positive = FALSE,
             eq.tol = FALSE)
-sort(p1ut at misc$deviance.Bestof)  # A history of all the iterations
+sort(deviance(p1ut, history = TRUE))  # A history of all the iterations
 if (deviance(p1ut) > 1177) warning("suboptimal fit obtained")
 
 S <- ncol(depvar(p1ut))  # Number of species
@@ -583,12 +585,12 @@ legend("topright", leg = colnames(depvar(p1ut)), col = clr,
        pch = 1:S, merge = TRUE, bty = "n", lty = 1:S, lwd = 2)
 (cp <- Coef(p1ut))
 
-(a <- cp at latvar[cp at latvar.order])  # Ordered site scores along the gradient
+(a <- latvar(cp)[cp at latvar.order])  # Ordered site scores along the gradient
 # Names of the ordered sites along the gradient:
-rownames(cp at latvar)[cp at latvar.order]
-(aa <- (cp at Optimum)[, cp at Optimum.order])  # Ordered optima along the gradient
+rownames(latvar(cp))[cp at latvar.order]
+(aa <- Opt(cp)[, cp at Optimum.order])  # Ordered optimums along the gradient
 aa <- aa[!is.na(aa)]  # Delete the species that is not unimodal
-names(aa)  # Names of the ordered optima along the gradient
+names(aa)  # Names of the ordered optimums along the gradient
 
 trplot(p1ut, which.species = 1:3, log = "xy", type = "b", lty = 1, lwd = 2,
        col = c("blue","red","green"), label = TRUE) -> ii  # Trajectory plot
@@ -608,7 +610,7 @@ p1et <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                   Trocterr, Zoraspin) ~
             WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
             poissonff, data = hspider, Crow1positive = FALSE)
-sort(p1et at misc$deviance.Bestof)  # A history of all the iterations
+sort(deviance(p1et, history = TRUE))  # A history of all the iterations
 if (deviance(p1et) > 1586) warning("suboptimal fit obtained")
 S <- ncol(depvar(p1et))  # Number of species
 clr <- (1:(S+1))[-7]  # Omits yellow
@@ -624,7 +626,7 @@ p2 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
           poissonff, data = hspider, Crow1positive = FALSE,
           I.toler = TRUE, Rank = 2, Bestof = 3, isd.latvar = c(2.1, 0.9))
-sort(p2 at misc$deviance.Bestof)  # A history of all the iterations
+sort(deviance(p2, history = TRUE))  # A history of all the iterations
 if (deviance(p2) > 1127) warning("suboptimal fit obtained")
 lvplot(p2, ellips = FALSE, label = TRUE, xlim = c(-3,4),
        C = TRUE, Ccol = "brown", sites = TRUE, scol = "grey", 
@@ -639,7 +641,7 @@ mydata <- rcqo(n, p, S, fam = "binomial", hi.abundance = 4,
 myform <- attr(mydata, "formula")
 set.seed(1234)
 b1et <- cqo(myform, binomialff(mv = TRUE, link = "cloglog"), data = mydata)
-sort(b1et at misc$deviance.Bestof)  # A history of all the iterations
+sort(deviance(b1et, history = TRUE))  # A history of all the iterations
 lvplot(b1et, y = TRUE, lcol = 1:S, pch = 1:S, pcol = 1:S, las = 1)
 Coef(b1et)
 
@@ -654,7 +656,7 @@ p1et <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
                   Trocterr, Zoraspin) ~
             WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
             poissonff, data = hspider, eq.tol = TRUE, trace = FALSE)
-sort(p1et at misc$deviance.Bestof)  # A history of all the iterations
+sort(deviance(p1et, history = TRUE))  # A history of all the iterations
 if (deviance(p1et) > 1586) warning("suboptimal fit obtained")
 S <- ncol(depvar(p1et))
 par(mfrow = c(3, 4))
diff --git a/man/dagum.Rd b/man/dagum.Rd
index 4fe852d..f1a6c44 100644
--- a/man/dagum.Rd
+++ b/man/dagum.Rd
@@ -95,10 +95,10 @@ while estimates for \eqn{a} and \eqn{p} can be considered unbiased for
     \code{\link{betaII}},
     \code{\link{sinmad}},
     \code{\link{fisk}},
-    \code{\link{invlomax}},
+    \code{\link{inv.lomax}},
     \code{\link{lomax}},
     \code{\link{paralogistic}},
-    \code{\link{invparalogistic}},
+    \code{\link{inv.paralogistic}},
     \code{\link{simulate.vlm}}.
 
 
diff --git a/man/dirichlet.Rd b/man/dirichlet.Rd
index 92cabf6..91f162b 100644
--- a/man/dirichlet.Rd
+++ b/man/dirichlet.Rd
@@ -83,6 +83,7 @@ dirichlet(link = "loge", parallel = FALSE, zero = NULL)
 
 }
 \references{
+
 Lange, K. (2002)
 \emph{Mathematical and Statistical Methods for Genetic Analysis},
 2nd ed. New York: Springer-Verlag.
@@ -117,6 +118,12 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
+
+
+% yettodo: use the data of \citet[p.81]{mosi:1962}. See
+% See also \citet[pp.8--9]{macd:2014}.
+
+
 \examples{
 ydata <- data.frame(rdiric(n = 1000,
                            shape = exp(c(y1 = -1, y2 = 1, y3 = 0))))
diff --git a/man/dirmul.old.Rd b/man/dirmul.old.Rd
index 93dfec8..55f9656 100644
--- a/man/dirmul.old.Rd
+++ b/man/dirmul.old.Rd
@@ -120,7 +120,7 @@ contains further information and examples.
 \seealso{
   \code{\link{dirmultinomial}},
   \code{\link{dirichlet}},
-  \code{\link{betabinomial.ab}},
+  \code{\link{betabinomialff}},
   \code{\link{multinomial}}.
 
 
diff --git a/man/dirmultinomial.Rd b/man/dirmultinomial.Rd
index a644ff7..deb2e10 100644
--- a/man/dirmultinomial.Rd
+++ b/man/dirmultinomial.Rd
@@ -131,7 +131,8 @@ Fisher information matrix of the Dirichlet-multinomial distribution.
 
 
 Tvedebrink, T. (2010)
-Overdispersion in allelic counts and \eqn{\theta}-correction in forensic genetics.
+Overdispersion in allelic counts and \eqn{\theta}-correction in
+forensic genetics.
 \emph{Theoretical Population Biology}, \bold{78}, 200--210.
 
 
@@ -184,7 +185,7 @@ in press;
 \seealso{
   \code{\link{dirmul.old}},
   \code{\link{betabinomial}},
-  \code{\link{betabinomial.ab}},
+  \code{\link{betabinomialff}},
   \code{\link{dirichlet}},
   \code{\link{multinomial}}.
 
@@ -196,13 +197,15 @@ nn <- 10; M <- 5
 ydata <- data.frame(round(matrix(runif(nn * M, max = 10), nn, M)))  # Integer counts
 colnames(ydata) <- paste("y", 1:M, sep = "")
 
-fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ 1, dirmultinomial, data = ydata, trace = TRUE)
+fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ 1, dirmultinomial,
+            data = ydata, trace = TRUE)
 head(fitted(fit))
 depvar(fit)  # Sample proportions
 weights(fit, type = "prior", matrix = FALSE)  # Total counts per row
 
 ydata <- transform(ydata, x2 = runif(nn))
-fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2, dirmultinomial, data = ydata, trace = TRUE)
+fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2, dirmultinomial,
+            data = ydata, trace = TRUE)
 \dontrun{ # This does not work:
 Coef(fit) }
 coef(fit, matrix = TRUE)
diff --git a/man/double.cennormal.Rd b/man/double.cens.normal.Rd
similarity index 87%
rename from man/double.cennormal.Rd
rename to man/double.cens.normal.Rd
index 5c94147..0f3164f 100644
--- a/man/double.cennormal.Rd
+++ b/man/double.cens.normal.Rd
@@ -1,5 +1,5 @@
-\name{double.cennormal}
-\alias{double.cennormal}
+\name{double.cens.normal}
+\alias{double.cens.normal}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Univariate Normal Distribution with Double Censoring }
 \description{
@@ -8,8 +8,8 @@
 
 }
 \usage{
-double.cennormal(r1 = 0, r2 = 0, lmu = "identitylink", lsd = "loge",
-                 imu = NULL, isd = NULL, zero = 2)
+double.cens.normal(r1 = 0, r2 = 0, lmu = "identitylink", lsd = "loge",
+                   imu = NULL, isd = NULL, zero = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -73,7 +73,7 @@ double.cennormal(r1 = 0, r2 = 0, lmu = "identitylink", lsd = "loge",
 
 \seealso{
   \code{\link{uninormal}},
-  \code{\link{cennormal}},
+  \code{\link{cens.normal}},
   \code{\link{tobit}}.
 
 
@@ -85,7 +85,7 @@ r1 <- 0; r2 <- 4; nn <- 20
 for (sim in 1:SIMS) {
   y <- sort(rnorm(nn))
   y <- y[(1+r1):(nn-r2)]  # Delete r1 smallest and r2 largest
-  fit <- vglm(y ~ 1, double.cennormal(r1 = r1, r2 = r2))
+  fit <- vglm(y ~ 1, double.cens.normal(r1 = r1, r2 = r2))
   mu.save[sim] <- predict(fit)[1, 1]
   sd.save[sim] <- exp(predict(fit)[1, 2])  # Assumes a log link and ~ 1
 }
@@ -95,7 +95,7 @@ c(sd(mu.save), sd(sd.save))
 
 # Data from Sarhan and Greenberg (1962); MLEs are mu = 9.2606, sd = 1.3754
 strontium90 <- data.frame(y = c(8.2, 8.4, 9.1, 9.8, 9.9))
-fit <- vglm(y ~ 1, double.cennormal(r1 = 2, r2 = 3, isd = 6), strontium90, trace = TRUE)
+fit <- vglm(y ~ 1, double.cens.normal(r1 = 2, r2 = 3, isd = 6), strontium90, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 }
diff --git a/man/double.expbinomial.Rd b/man/double.expbinomial.Rd
index e7a848c..3ebbf76 100644
--- a/man/double.expbinomial.Rd
+++ b/man/double.expbinomial.Rd
@@ -132,11 +132,11 @@ toxop <- transform(toxop,
 # A fit similar (should be identical) to Section 6 of Efron (1986).
 # But does not use poly(), and M = 1.25 here, as in (5.3)
 cmlist <- list("(Intercept)"    = diag(2),
-               "I(srainfall)"   = rbind(1,0),
-               "I(srainfall^2)" = rbind(1,0),
-               "I(srainfall^3)" = rbind(1,0),
-               "I(sN)" = rbind(0,1),
-               "I(sN^2)" = rbind(0,1))
+               "I(srainfall)"   = rbind(1, 0),
+               "I(srainfall^2)" = rbind(1, 0),
+               "I(srainfall^3)" = rbind(1, 0),
+               "I(sN)" = rbind(0, 1),
+               "I(sN^2)" = rbind(0, 1))
 fit <- vglm(cbind(phat, 1 - phat) * ssize ~
             I(srainfall) + I(srainfall^2) + I(srainfall^3) +
             I(sN) + I(sN^2),
@@ -173,7 +173,7 @@ fit2 <- vglm(cbind(phat, 1 - phat) * ssize ~
                           idisp = 0.2, zero = NULL),
              toxop, trace = TRUE, constraints = cmlist2)
 \dontrun{ par(mfrow = c(1, 2))
-plotvgam(fit2, se = TRUE, lcol = "blue", scol = "orange")  # Cf. Figure 1
+plot(as(fit2, "vgam"), se = TRUE, lcol = "blue", scol = "orange")  # Cf. Figure 1
 
 # Cf. Figure 1(a)
 par(mfrow = c(1,2))
diff --git a/man/eexpUC.Rd b/man/eexpUC.Rd
index 1641d74..bc512d2 100644
--- a/man/eexpUC.Rd
+++ b/man/eexpUC.Rd
@@ -99,9 +99,9 @@ very close to 0 or 1.
 }
 
 \examples{
-my_p <- 0.25; y <- rexp(nn <- 1000)
-(myexp <- qeexp(my_p))
-sum(myexp - y[y <= myexp]) / sum(abs(myexp - y))  # Should be my_p
+my.p <- 0.25; y <- rexp(nn <- 1000)
+(myexp <- qeexp(my.p))
+sum(myexp - y[y <= myexp]) / sum(abs(myexp - y))  # Should be my.p
 
 \dontrun{ par(mfrow = c(2,1))
 yy <- seq(-0, 4, len = nn)
@@ -119,7 +119,7 @@ lines(yy, pexp(yy), col = "darkgreen", lty = "dotted", lwd = 2) }
 %# Equivalently:
 %I1 <- mean(y <= myexp) * mean( myexp - y[y <= myexp])
 %I2 <- mean(y >  myexp) * mean(-myexp + y[y >  myexp])
-%I1 / (I1 + I2)  # Should be my_p
+%I1 / (I1 + I2)  # Should be my.p
 %# Or:
 %I1 <- sum( myexp - y[y <= myexp])
 %I2 <- sum(-myexp + y[y >  myexp])
@@ -128,13 +128,13 @@ lines(yy, pexp(yy), col = "darkgreen", lty = "dotted", lwd = 2) }
 %# Non-standard exponential
 %myrate <- 8
 %yy <- rexp(nn, rate = myrate)
-%(myexp <- qeexp(my_p, rate = myrate))
-%sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy))  # Should be my_p
+%(myexp <- qeexp(my.p, rate = myrate))
+%sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy))  # Should be my.p
 %peexp(-Inf, rate = myrate)      #  Should be 0
 %peexp( Inf, rate = myrate)      #  Should be 1
 %peexp(mean(yy), rate = myrate)  #  Should be 0.5
 %abs(qeexp(0.5, rate = myrate) - mean(yy))  #  Should be 0
-%abs(peexp(myexp, rate = myrate) - my_p)  #  Should be 0
+%abs(peexp(myexp, rate = myrate) - my.p)  #  Should be 0
 %integrate(f = deexp, lower = -1, upper = Inf, rate = myrate)  #  Should be 1
 
 
diff --git a/man/enormUC.Rd b/man/enormUC.Rd
index c3fbeff..58c6387 100644
--- a/man/enormUC.Rd
+++ b/man/enormUC.Rd
@@ -100,20 +100,20 @@ very close to 0 or 1.
 }
 
 \examples{
-my_p <- 0.25; y <- rnorm(nn <- 1000)
-(myexp <- qenorm(my_p))
-sum(myexp - y[y <= myexp]) / sum(abs(myexp - y))  # Should be my_p
+my.p <- 0.25; y <- rnorm(nn <- 1000)
+(myexp <- qenorm(my.p))
+sum(myexp - y[y <= myexp]) / sum(abs(myexp - y))  # Should be my.p
 
 # Non-standard normal
 mymean <- 1; mysd <- 2
 yy <- rnorm(nn, mymean, mysd)
-(myexp <- qenorm(my_p, mymean, mysd))
-sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy))  # Should be my_p
+(myexp <- qenorm(my.p, mymean, mysd))
+sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy))  # Should be my.p
 penorm(-Inf, mymean, mysd)      #  Should be 0
 penorm( Inf, mymean, mysd)      #  Should be 1
 penorm(mean(yy), mymean, mysd)  #  Should be 0.5
 abs(qenorm(0.5, mymean, mysd) - mean(yy))  #  Should be 0
-abs(penorm(myexp, mymean, mysd) - my_p)    #  Should be 0
+abs(penorm(myexp, mymean, mysd) - my.p)    #  Should be 0
 integrate(f = denorm, lower = -Inf, upper = Inf,
           mymean, mysd)  #  Should be 1
 
@@ -134,7 +134,7 @@ lines(yy, pnorm(yy), col = "darkgreen", lty = "dotted", lwd = 2) }
 %# Equivalently:
 %I1 = mean(y <= myexp) * mean( myexp - y[y <= myexp])
 %I2 = mean(y >  myexp) * mean(-myexp + y[y >  myexp])
-%I1 / (I1 + I2)  # Should be my_p
+%I1 / (I1 + I2)  # Should be my.p
 %# Or:
 %I1 = sum( myexp - y[y <= myexp])
 %I2 = sum(-myexp + y[y >  myexp])
diff --git a/man/erf.Rd b/man/erf.Rd
index 0699e48..9a14ff5 100644
--- a/man/erf.Rd
+++ b/man/erf.Rd
@@ -1,17 +1,22 @@
 \name{erf}
 \alias{erf}
+\alias{erfc}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Error Function }
+\title{ Error Function, and variants }
 \description{
-  Computes the error function based on the normal distribution.
+  Computes the error function, or its inverse,
+  based on the normal distribution.
+  Also computes the complement of the error function, or its inverse,
 
 }
 \usage{
-erf(x)
+erf(x, inverse = FALSE)
+erfc(x, inverse = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{x}{ Numeric. }
+  \item{inverse}{ Logical. Of length 1. }
 
 
 }
@@ -20,6 +25,8 @@ erf(x)
   \deqn{Erf(x) = \frac{2}{\sqrt{\pi}} \int_0^x \exp(-t^2) dt}{%
     Erf(x) = (2/sqrt(pi)) int_0^x exp(-t^2) dt}
   so that it is closely related to \code{\link[stats:Normal]{pnorm}}.
+  The inverse function is defined for \eqn{x} in \eqn{(-1,1)}.
+
 
 
 }
@@ -47,6 +54,8 @@ New York: Dover Publications Inc.
 
   The \emph{complementary error function} \eqn{erfc(x)} is defined
   as \eqn{1-erf(x)}, and is implemented by \code{erfc}.
+  Its inverse function is defined for \eqn{x} in \eqn{(0,2)}.
+
 
 
 }
diff --git a/man/erlang.Rd b/man/erlang.Rd
index 88c983b..3c26974 100644
--- a/man/erlang.Rd
+++ b/man/erlang.Rd
@@ -14,8 +14,9 @@ erlang(shape.arg, link = "loge", imethod = 1, zero = NULL)
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{shape.arg}{
-  The shape parameter.
-  The user must specify a positive integer.
+  The shape parameters.
+  The user must specify a positive integer, or integers for multiple responses.
+  They are recycled \code{by.row = TRUE} according to \code{\link[base]{matrix}}.
 
 
   }
@@ -80,14 +81,14 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 \note{
   Multiple responses are permitted.
-  The \code{rate} parameter found in \code{\link{gamma2.ab}}
+  The \code{rate} parameter found in \code{\link{gammaR}}
   is \code{1/scale} here---see also \code{\link[stats]{rgamma}}.
 
 
 }
 
 \seealso{
-  \code{\link{gamma2.ab}},
+  \code{\link{gammaR}},
   \code{\link{exponential}},
   \code{\link{simulate.vlm}}.
 
diff --git a/man/eunifUC.Rd b/man/eunifUC.Rd
index 658f226..e2b8436 100644
--- a/man/eunifUC.Rd
+++ b/man/eunifUC.Rd
@@ -142,19 +142,19 @@ quantile and expectile regression.
   \code{\link{deexp}},
   \code{\link{denorm}},
   \code{\link{dunif}},
-  \code{\link{dkoenker}}.
+  \code{\link{dsc.t2}}.
 
 
 }
 
 \examples{
-my_p <- 0.25; y <- runif(nn <- 1000)
-(myexp <- qeunif(my_p))
-sum(myexp - y[y <= myexp]) / sum(abs(myexp - y))  # Should be my_p
+my.p <- 0.25; y <- runif(nn <- 1000)
+(myexp <- qeunif(my.p))
+sum(myexp - y[y <= myexp]) / sum(abs(myexp - y))  # Should be my.p
 # Equivalently:
 I1 <- mean(y <= myexp) * mean( myexp - y[y <= myexp])
 I2 <- mean(y >  myexp) * mean(-myexp + y[y >  myexp])
-I1 / (I1 + I2)  # Should be my_p
+I1 / (I1 + I2)  # Should be my.p
 # Or:
 I1 <- sum( myexp - y[y <= myexp])
 I2 <- sum(-myexp + y[y >  myexp])
@@ -162,14 +162,14 @@ I2 <- sum(-myexp + y[y >  myexp])
 # Non-standard uniform
 mymin <- 1; mymax <- 8
 yy <- runif(nn, mymin, mymax)
-(myexp <- qeunif(my_p, mymin, mymax))
-sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy))  # Should be my_p
+(myexp <- qeunif(my.p, mymin, mymax))
+sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy))  # Should be my.p
 peunif(mymin, mymin, mymax)     #  Should be 0
 peunif(mymax, mymin, mymax)     #  Should be 1
 peunif(mean(yy), mymin, mymax)  #  Should be 0.5
 abs(qeunif(0.5, mymin, mymax) - mean(yy))  #  Should be 0
 abs(qeunif(0.5, mymin, mymax) - (mymin+mymax)/2)  #  Should be 0
-abs(peunif(myexp, mymin, mymax) - my_p)  #  Should be 0
+abs(peunif(myexp, mymin, mymax) - my.p)  #  Should be 0
 integrate(f = deunif, lower = mymin - 3, upper = mymax + 3,
           min = mymin, max = mymax)  # Should be 1
 
diff --git a/man/expexp.Rd b/man/expexpff.Rd
similarity index 75%
rename from man/expexp.Rd
rename to man/expexpff.Rd
index c010e80..f3aa8b5 100644
--- a/man/expexp.Rd
+++ b/man/expexpff.Rd
@@ -1,5 +1,5 @@
-\name{expexp}
-\alias{expexp}
+\name{expexpff}
+\alias{expexpff}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Exponentiated Exponential Distribution }
 \description{
@@ -9,14 +9,14 @@
 
 }
 \usage{
-expexp(lshape = "loge", lscale = "loge",
-       ishape = 1.1, iscale = NULL, tolerance = 1.0e-6, zero = NULL)
+expexpff(lrate = "loge", lshape = "loge",
+         irate = NULL, ishape = 1.1, tolerance = 1.0e-6, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lshape, lscale}{
+  \item{lshape, lrate}{
   Parameter link functions for the
-  \eqn{\alpha}{shape} and \eqn{\lambda}{scale} parameters.
+  \eqn{\alpha}{shape} and \eqn{\lambda}{rate} parameters.
   See \code{\link{Links}} for more choices.
   The defaults ensure both parameters are positive.
 
@@ -29,8 +29,8 @@ expexp(lshape = "loge", lscale = "loge",
 
 
   }
-  \item{iscale}{
-  Initial value for the \eqn{\lambda}{scale} parameter.
+  \item{irate}{
+  Initial value for the \eqn{\lambda}{rate} parameter.
   By default, an initial value is chosen internally using
   \code{ishape}.
 
@@ -53,22 +53,22 @@ expexp(lshape = "loge", lscale = "loge",
   The exponentiated exponential distribution is an alternative
   to the Weibull and the gamma distributions.
   The formula for the density is
-  \deqn{f(y;\alpha,\lambda) =
+  \deqn{f(y;\lambda,\alpha) =
     \alpha \lambda (1-\exp(-\lambda y))^{\alpha-1}
     \exp(-\lambda y) }{%
-    f(y;shape,scale) =
-    shape scale (1-\exp(-scale y))^(shape-1)
-    \exp(-scale y) }
+    f(y;rate,shape) =
+    shape rate (1-\exp(-rate y))^(shape-1)
+    \exp(-rate y) }
   where \eqn{y>0},
-  \eqn{\alpha>0}{shape>0} and
-  \eqn{\lambda>0}{scale>0}.
+  \eqn{\lambda>0}{rate>0} and
+  \eqn{\alpha>0}{shape>0}.
   The mean of \eqn{Y} is
-  \eqn{(\psi(\alpha+1)-\psi(1))/\lambda}{(psi(shape+1)-psi(1))/scale}
+  \eqn{(\psi(\alpha+1)-\psi(1))/\lambda}{(psi(shape+1)-psi(1))/rate}
   (returned as the fitted values)
   where \eqn{\psi}{psi} is the digamma function.
   The variance of \eqn{Y} is
   \eqn{(\psi'(1)-\psi'(\alpha+1))/\lambda^2}{(psi'(1)-psi'(shape+1))/
-    scale^2}
+    rate^2}
   where \eqn{\psi'}{psi'} is the trigamma function.
 
 
@@ -114,7 +114,7 @@ expexp(lshape = "loge", lscale = "loge",
 
 
   Another algorithm for fitting this model is implemented in
-  \code{\link{expexp1}}.
+  \code{\link{expexpff1}}.
 
 
 }
@@ -131,9 +131,9 @@ expexp(lshape = "loge", lscale = "loge",
 }
 
 \seealso{
-  \code{\link{expexp1}},
-  \code{\link{gamma2.ab}},
-  \code{\link{weibull}},
+  \code{\link{expexpff1}},
+  \code{\link{gammaR}},
+  \code{\link{weibullR}},
   \code{\link{CommonVGAMffArguments}}.
 
 
@@ -141,31 +141,31 @@ expexp(lshape = "loge", lscale = "loge",
 \examples{
 # A special case: exponential data
 edata <- data.frame(y = rexp(n <- 1000))
-fit <- vglm(y ~ 1, fam = expexp, data = edata, trace = TRUE, maxit = 99)
+fit <- vglm(y ~ 1, fam = expexpff, data = edata, trace = TRUE, maxit = 99)
 coef(fit, matrix = TRUE)
 Coef(fit)
 
 
 # Ball bearings data (number of million revolutions before failure)
-bbearings <- c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
+edata <- data.frame(bbearings = c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
 48.80, 51.84, 51.96, 54.12, 55.56, 67.80, 68.64, 68.64,
 68.88, 84.12, 93.12, 98.64, 105.12, 105.84, 127.92,
-128.04, 173.40)
-fit <- vglm(bbearings ~ 1, fam = expexp(iscale = 0.05, ish = 5),
-            trace = TRUE, maxit = 300)
+128.04, 173.40))
+fit <- vglm(bbearings ~ 1, fam = expexpff(irate = 0.05, ish = 5),
+            trace = TRUE, maxit = 300, data = edata)
 coef(fit, matrix = TRUE)
-Coef(fit)    # Authors get c(shape=5.2589, scale=0.0314)
+Coef(fit)    # Authors get c(rate=0.0314, shape=5.2589)
 logLik(fit)  # Authors get -112.9763
 
 
 # Failure times of the airconditioning system of an airplane
-acplane <- c(23, 261, 87, 7, 120, 14, 62, 47,
+eedata <- data.frame(acplane = c(23, 261, 87, 7, 120, 14, 62, 47,
 225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14,
-71, 11, 14, 11, 16, 90, 1, 16, 52, 95)
-fit <- vglm(acplane ~ 1, fam = expexp(ishape = 0.8, isc = 0.15),
-            trace = TRUE, maxit = 99)
+71, 11, 14, 11, 16, 90, 1, 16, 52, 95))
+fit <- vglm(acplane ~ 1, fam = expexpff(ishape = 0.8, irate = 0.15),
+            trace = TRUE, maxit = 99, data = eedata)
 coef(fit, matrix = TRUE)
-Coef(fit)    # Authors get c(shape=0.8130, scale=0.0145)
+Coef(fit)    # Authors get c(rate=0.0145, shape=0.8130)
 logLik(fit)  # Authors get log-lik -152.264
 }
 \keyword{models}
diff --git a/man/expexp1.Rd b/man/expexpff1.Rd
similarity index 72%
rename from man/expexp1.Rd
rename to man/expexpff1.Rd
index 539f8e3..ba219a5 100644
--- a/man/expexp1.Rd
+++ b/man/expexpff1.Rd
@@ -1,5 +1,5 @@
-\name{expexp1}
-\alias{expexp1}
+\name{expexpff1}
+\alias{expexpff1}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Exponentiated Exponential Distribution }
 \description{
@@ -9,18 +9,18 @@
 
 }
 \usage{
-expexp1(lscale = "loge", iscale = NULL, ishape = 1)
+expexpff1(lrate = "loge", irate = NULL, ishape = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lscale}{
-  Parameter link function for the (positive) \eqn{\lambda}{scale} parameter.
+  \item{lrate}{
+  Parameter link function for the (positive) \eqn{\lambda}{rate} parameter.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{iscale}{
-  Initial value for the \eqn{\lambda}{scale} parameter.
+  \item{irate}{
+  Initial value for the \eqn{\lambda}{rate} parameter.
   By default, an initial value is chosen internally using \code{ishape}.
 
 
@@ -33,14 +33,14 @@ expexp1(lscale = "loge", iscale = NULL, ishape = 1)
   }
 }
 \details{
-  See \code{\link{expexp}} for details about the exponentiated
+  See \code{\link{expexpff}} for details about the exponentiated
   exponential distribution. This family function uses a different
-  algorithm for fitting the model. Given \eqn{\lambda}{scale},
+  algorithm for fitting the model. Given \eqn{\lambda}{rate},
   the MLE of \eqn{\alpha}{shape} can easily be solved in terms of
-  \eqn{\lambda}{scale}. This family function maximizes a profile
-  (concentrated) likelihood with respect to \eqn{\lambda}{scale}.
+  \eqn{\lambda}{rate}. This family function maximizes a profile
+  (concentrated) likelihood with respect to \eqn{\lambda}{rate}.
   Newton-Raphson is used, which compares with Fisher scoring with
-  \code{\link{expexp}}.
+  \code{\link{expexpff}}.
 
 
 }
@@ -80,7 +80,7 @@ expexp1(lscale = "loge", iscale = NULL, ishape = 1)
   positive.
 
 
-  Like \code{\link{expexp}}, good initial
+  Like \code{\link{expexpff}}, good initial
   values are needed. Convergence may be slow.
 
 
@@ -94,35 +94,35 @@ expexp1(lscale = "loge", iscale = NULL, ishape = 1)
 }
 
 \seealso{
-  \code{\link{expexp}},
+  \code{\link{expexpff}},
   \code{\link{CommonVGAMffArguments}}.
 
 
 }
 \examples{
 # Ball bearings data (number of million revolutions before failure)
-bbearings <- data.frame(y = c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
+edata <- data.frame(bbearings = c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
 48.80, 51.84, 51.96, 54.12, 55.56, 67.80, 68.64, 68.64,
 68.88, 84.12, 93.12, 98.64, 105.12, 105.84, 127.92,
 128.04, 173.40))
-fit <- vglm(y ~ 1, expexp1(ishape = 4), bbearings, trace = TRUE,
-            maxit = 50, checkwz = FALSE)
+fit <- vglm(bbearings ~ 1, expexpff1(ishape = 4), trace = TRUE,
+            maxit = 250, checkwz = FALSE, data = edata)
 coef(fit, matrix = TRUE)
 Coef(fit)  # Authors get c(0.0314, 5.2589) with log-lik -112.9763
-fit at misc$shape  # Estimate of shape
 logLik(fit)
+fit at misc$shape  # Estimate of shape
 
 
 # Failure times of the airconditioning system of an airplane
-acplane <- data.frame(y = c(23, 261, 87, 7, 120, 14, 62, 47,
+eedata <- data.frame(acplane = c(23, 261, 87, 7, 120, 14, 62, 47,
 225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14,
 71, 11, 14, 11, 16, 90, 1, 16, 52, 95))
-fit <- vglm(y ~ 1, expexp1(ishape = 0.8), acplane, trace = TRUE,
-            maxit = 50, checkwz = FALSE)
+fit <- vglm(acplane ~ 1, expexpff1(ishape = 0.8), trace = TRUE,
+            maxit = 50, checkwz = FALSE, data = eedata)
 coef(fit, matrix = TRUE)
 Coef(fit)  # Authors get c(0.0145, 0.8130) with log-lik -152.264
-fit at misc$shape  # Estimate of shape
 logLik(fit)
+fit at misc$shape  # Estimate of shape
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/expgeometric.Rd b/man/expgeometric.Rd
index 603353a..33e0179 100644
--- a/man/expgeometric.Rd
+++ b/man/expgeometric.Rd
@@ -81,8 +81,8 @@ expgeometric(lscale = "loge", lshape = "logit",
 }
 \examples{
 \dontrun{
-scale <- exp(2); shape = logit(-1, inverse = TRUE);
-edata <- data.frame(y = rexpgeom(n = 2000, scale = scale, shape = shape))
+Scale <- exp(2); shape = logit(-1, inverse = TRUE);
+edata <- data.frame(y = rexpgeom(n = 2000, scale = Scale, shape = shape))
 fit <- vglm(y ~ 1, expgeometric, edata, trace = TRUE)
 c(with(edata, mean(y)), head(fitted(fit), 1))
 coef(fit, matrix = TRUE)
diff --git a/man/explogff.Rd b/man/explogff.Rd
index 2b5ba57..53827d2 100644
--- a/man/explogff.Rd
+++ b/man/explogff.Rd
@@ -80,9 +80,9 @@ explogff(lscale = "loge", lshape = "logit",
 
 }
 \examples{
-\dontrun{ scale <- exp(2); shape <- logit(-1, inverse = TRUE)
-edata <- data.frame(y = rexplog(n = 2000, scale = scale, shape = shape))
-fit <- vglm(y ~ 1, explogff, edata, trace = TRUE)
+\dontrun{ Scale <- exp(2); shape <- logit(-1, inverse = TRUE)
+edata <- data.frame(y = rexplog(n = 2000, scale = Scale, shape = shape))
+fit <- vglm(y ~ 1, explogff, data = edata, trace = TRUE)
 c(with(edata, median(y)), head(fitted(fit), 1))
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/exponential.Rd b/man/exponential.Rd
index e87631b..33e76df 100644
--- a/man/exponential.Rd
+++ b/man/exponential.Rd
@@ -5,10 +5,11 @@
 \description{
   Maximum likelihood estimation for the exponential distribution.
 
+
 }
 \usage{
 exponential(link = "loge", location = 0, expected = TRUE,
-            shrinkage.init = 0.95, zero = NULL)
+            ishrinkage = 0.95, parallel = FALSE, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -16,19 +17,23 @@ exponential(link = "loge", location = 0, expected = TRUE,
   Parameter link function applied to the positive parameter \eqn{rate}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{location}{
   Numeric of length 1, the known location parameter, \eqn{A}, say.
 
+
   }
   \item{expected}{
   Logical. If \code{TRUE} Fisher scoring is used,
   otherwise Newton-Raphson. The latter is usually faster.
 
+
   }
-  \item{shrinkage.init, zero}{
+  \item{ishrinkage, parallel, zero}{
   See \code{\link{CommonVGAMffArguments}} for information.
 
+
   }
 
 }
@@ -57,6 +62,7 @@ Forbes, C., Evans, M., Hastings, N. and Peacock, B. (2011)
 Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
+
 }
 
 \author{ T. W. Yee }
@@ -75,13 +81,15 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 }
 \seealso{
     \code{\link{amlexponential}},
+    \code{\link{gpd}},
     \code{\link{laplace}},
     \code{\link{expgeometric}},
     \code{\link{explogff}},
     \code{\link{poissonff}},
     \code{\link{mix2exp}},
     \code{\link{freund61}},
-    \code{\link{simulate.vlm}}.
+    \code{\link{simulate.vlm}},
+    \code{\link[stats]{Exponential}}.
 
 
 %   \code{\link{cexpon}},
@@ -97,11 +105,26 @@ edata <- transform(edata, rate = exp(eta))
 edata <- transform(edata, y = rexp(nn, rate = rate))
 with(edata, stem(y))
 
-fit.slow <- vglm(y ~ x2 + x3, exponential, data = edata, trace = TRUE, crit = "c")
+fit.slow <- vglm(y ~ x2 + x3, exponential, data = edata, trace = TRUE)
 fit.fast <- vglm(y ~ x2 + x3, exponential(exp = FALSE), data = edata,
                  trace = TRUE, crit = "coef")
 coef(fit.slow, mat = TRUE)
 summary(fit.slow)
+
+
+# Compare results with a GPD. Has a threshold.
+threshold <- 0.5
+gdata <- data.frame(y1 = threshold + rexp(n = 3000, rate = exp(1.5)))
+
+fit.exp <- vglm(y1 ~ 1, exponential(location = threshold), data = gdata)
+coef(fit.exp, matrix = TRUE)
+Coef(fit.exp)
+logLik(fit.exp)
+
+fit.gpd <- vglm(y1 ~ 1, gpd(threshold =  threshold), data = gdata)
+coef(fit.gpd, matrix = TRUE)
+Coef(fit.gpd)
+logLik(fit.gpd)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/exppoisson.Rd b/man/exppoisson.Rd
index dc10113..61fbe2b 100644
--- a/man/exppoisson.Rd
+++ b/man/exppoisson.Rd
@@ -8,20 +8,20 @@
 
 }
 \usage{
-exppoisson(llambda = "loge", lbetave = "loge",
-           ilambda = 1.1, ibetave = 2, zero = NULL)
+exppoisson(lrate = "loge", lshape = "loge",
+           irate = 2, ishape = 1.1,  zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{llambda, lbetave}{
+  \item{lshape, lrate}{
   Link function for the two positive parameters.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{ilambda, ibetave}{
+  \item{ishape, irate}{
   Numeric.
-  Initial values for the \code{lambda} and \code{betave} parameters.
+  Initial values for the \code{shape} and \code{rate} parameters.
   Currently this function is not intelligent enough to
   obtain better initial values.
 
@@ -35,18 +35,23 @@ exppoisson(llambda = "loge", lbetave = "loge",
 }
 \details{
   The exponential Poisson distribution has density function
-  \deqn{f(y; \lambda = shape, \beta = scale)  =
+  \deqn{f(y; \beta = rate, \lambda = shape)  =
   \frac{\lambda \beta}{1 - e^{-\lambda}} \,
   e^{-\lambda - \beta y + \lambda \exp{(-\beta y)}}}{%
-  f(y; a = shape, b = scale)  =
+  f(y; a = shape, b = rate)  =
   (a*b/(1 - e^(-a))) * e^{-a - b*y + a * e^(-b*y)}}
-  where \eqn{y > 0} and the
-  parameters shape, \eqn{\lambda}{a},
-  and scale, \eqn{\beta}{b}, are positive.
+  where \eqn{y > 0},
+  and the parameters shape, \eqn{\lambda}{a},
+  and rate, \eqn{\beta}{b}, are positive.
   The distribution implies a population facing discrete
   hazard rates which are multiples of a base hazard.
   This \pkg{VGAM} family function requires the \code{hypergeo} package
   (to use their \code{genhypergeo} function).
+  The median is returned as the fitted value.
+
+
+
+
 
 
 % This \pkg{VGAM} family function requires the \pkg{hypergeo} package
@@ -87,11 +92,11 @@ exppoisson(llambda = "loge", lbetave = "loge",
 }
 \examples{
 \dontrun{
-lambda <- exp(1); betave <- exp(2)
-rdata <- data.frame(y = rexppois(n = 1000, lambda, betave))
-library(hypergeo)
-fit <- vglm(y ~ 1, exppoisson, rdata, trace = TRUE)
-c(with(rdata, mean(y)), head(fitted(fit), 1))
+shape <- exp(1); rate <- exp(2)
+rdata <- data.frame(y = rexppois(n = 1000, rate = rate, shape = shape))
+library("hypergeo")  # Required!
+fit <- vglm(y ~ 1, exppoisson, data = rdata, trace = FALSE, maxit = 1200)
+c(with(rdata, median(y)), head(fitted(fit), 1))
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
diff --git a/man/exppoissonUC.Rd b/man/exppoissonUC.Rd
index ad4feea..430359b 100644
--- a/man/exppoissonUC.Rd
+++ b/man/exppoissonUC.Rd
@@ -11,17 +11,17 @@
 
 }
 \usage{
-dexppois(x, lambda, betave = 1, log = FALSE)
-pexppois(q, lambda, betave = 1)
-qexppois(p, lambda, betave = 1)  
-rexppois(n, lambda, betave = 1)
+dexppois(x, rate = 1, shape, log = FALSE)
+pexppois(q, rate = 1, shape)
+qexppois(p, rate = 1, shape)  
+rexppois(n, rate = 1, shape)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
   \item{n}{number of observations.
     If \code{length(n) > 1} then the length is taken to be the number required. }
-  \item{lambda, betave}{ both positive parameters. }
+  \item{shape, rate}{ both positive parameters. }
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
@@ -38,7 +38,10 @@ rexppois(n, lambda, betave = 1)
 
 
 }
-\author{ J. G. Lauder, jamesglauder at gmail.com }
+\author{ J. G. Lauder, jamesglauder at gmail.com
+
+
+}
 \details{
   See \code{\link{exppoisson}}, the \pkg{VGAM} family function
   for estimating the parameters, 
@@ -55,20 +58,20 @@ rexppois(n, lambda, betave = 1)
 }
 \examples{
 \dontrun{
-lambda <- 2; betave <- 2; nn <- 201
+shape <- 2; rate <- 2; nn <- 201
 x <- seq(-0.05, 1.05, len = nn)
-plot(x, dexppois(x, lambda, betave), type = "l", las = 1, ylim = c(0, 5),
-     ylab = paste("[dp]exppoisson(lambda = ", lambda, ", betave = ", betave, ")"),
+plot(x, dexppois(x, shape, rate = rate), type = "l", las = 1, ylim = c(0, 5),
+     ylab = paste("[dp]exppoisson(shape = ", shape, ", rate = ", rate, ")"),
      col = "blue", cex.main = 0.8,
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles")
-lines(x, pexppois(x, lambda, betave), col = "orange")
+lines(x, pexppois(x, shape, rate = rate), col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
-Q <- qexppois(probs, lambda, betave)
-lines(Q, dexppois(Q, lambda, betave), col = "purple", lty = 3, type = "h")
-lines(Q, pexppois(Q, lambda, betave), col = "purple", lty = 3, type = "h")
+Q <- qexppois(probs, shape, rate = rate)
+lines(Q, dexppois(Q, shape, rate = rate), col = "purple", lty = 3, type = "h")
+lines(Q, pexppois(Q, shape, rate = rate), col = "purple", lty = 3, type = "h")
 abline(h = probs, col = "purple", lty = 3)
-max(abs(pexppois(Q, lambda, betave) - probs))  # Should be 0
+max(abs(pexppois(Q, shape, rate = rate) - probs))  # Should be 0
 }
 }
 \keyword{distribution}
diff --git a/man/fisk.Rd b/man/fisk.Rd
index 161ec33..0e07c3f 100644
--- a/man/fisk.Rd
+++ b/man/fisk.Rd
@@ -83,10 +83,10 @@ Hoboken, NJ: Wiley-Interscience.
     \code{\link{betaII}},
     \code{\link{dagum}},
     \code{\link{sinmad}},
-    \code{\link{invlomax}},
+    \code{\link{inv.lomax}},
     \code{\link{lomax}},
     \code{\link{paralogistic}},
-    \code{\link{invparalogistic}},
+    \code{\link{inv.paralogistic}},
     \code{\link{simulate.vlm}}.
 
 }
diff --git a/man/fittedvlm.Rd b/man/fittedvlm.Rd
index 278f617..c1484b1 100644
--- a/man/fittedvlm.Rd
+++ b/man/fittedvlm.Rd
@@ -11,7 +11,7 @@
 
 }
 \usage{
-fittedvlm(object, matrix.arg = TRUE, type.fitted = NULL, ...)
+fittedvlm(object, drop = FALSE, type.fitted = NULL, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -20,12 +20,19 @@ fittedvlm(object, matrix.arg = TRUE, type.fitted = NULL, ...)
 
 
   }
-  \item{matrix.arg}{
-  Logical. Return the answer as a matrix?
-  If \code{FALSE} then it will be a vector.
+  \item{drop}{
+  Logical.
+  If \code{FALSE} then the answer is a matrix.
+  If \code{TRUE} then the answer is a vector.
 
 
   }
+% \item{matrix.arg}{
+% Logical. Return the answer as a matrix?
+% If \code{FALSE} then it will be a vector.
+
+
+% }
   \item{type.fitted}{
   Character.
   Some \pkg{VGAM} family functions have a \code{type.fitted} argument.
diff --git a/man/flourbeetle.Rd b/man/flourbeetle.Rd
new file mode 100644
index 0000000..7fa2b9a
--- /dev/null
+++ b/man/flourbeetle.Rd
@@ -0,0 +1,63 @@
+\name{flourbeetle}
+\alias{flourbeetle}
+\docType{data}
+\title{Mortality of Flour Beetles from Carbon Disulphide}
+\description{
+The \code{flourbeetle} data frame has 8 rows and 4 columns.
+Two columns are explanatory, the other two are responses.
+
+
+}
+\usage{data(flourbeetle)}
+\format{
+  This data frame contains the following columns:
+  \describe{
+    \item{logdose}{\code{\link[base]{log10}} applied to \code{CS2mgL}. }
+    \item{CS2mgL}{a numeric vector, the concentration of gaseous
+                  carbon disulphide in mg per litre. }
+    \item{exposed}{a numeric vector, counts; the number of
+                   beetles exposed to the poison. }
+    \item{killed}{a numeric vector, counts; the numbers killed. }
+  }
+}
+\details{
+These data were originally given in Table IV of Bliss (1935) and
+are the combination of
+two series of toxicological experiments involving
+\emph{Tribolium confusum}, also known as the flour beetle.
+Groups of such adult beetles were exposed for 5 hours of
+gaseous carbon disulphide at different concentrations,
+and their mortality measured.
+
+
+
+}
+\source{
+
+ Bliss, C.I., 1935. 
+ The calculation of the dosage-mortality curve.
+ \emph{Annals of Applied Biology}, \bold{22}, 134--167.
+
+
+}
+\seealso{
+  \code{\link{binomialff}},
+  \code{\link{probit}}.
+
+
+}
+
+%\references{
+%
+%  
+%  
+%
+%
+%}
+
+\examples{
+fit1 <- vglm(cbind(killed, exposed - killed) ~ logdose, binomialff(link = probit),
+             data = flourbeetle, trace = TRUE)
+summary(fit1)
+}
+\keyword{datasets}
diff --git a/man/foldnormal.Rd b/man/foldnormal.Rd
index 5d3ac96..b58791e 100644
--- a/man/foldnormal.Rd
+++ b/man/foldnormal.Rd
@@ -154,7 +154,8 @@ hist(with(fdata, y), prob = TRUE, main = paste("foldnormal(m = ", m,
 fit <- vglm(y ~ 1, foldnormal, data = fdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 (Cfit <- Coef(fit))
-mygrid <- with(fdata, seq(min(y), max(y), len = 200))  # Add the fit to the histogram
+# Add the fit to the histogram:
+mygrid <- with(fdata, seq(min(y), max(y), len = 200))
 lines(mygrid, dfoldnorm(mygrid, Cfit[1], Cfit[2]), col = "orange")
 }
 }
diff --git a/man/frechet.Rd b/man/frechet.Rd
index 489ea28..fc938d2 100644
--- a/man/frechet.Rd
+++ b/man/frechet.Rd
@@ -1,6 +1,6 @@
 \name{frechet}
-% \alias{frechet}
-\alias{frechet2}
+\alias{frechet}
+%\alias{frechet2}
 %\alias{frechet3}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Frechet Distribution Family Function }
@@ -12,8 +12,8 @@
 
 }
 \usage{
-frechet2(location = 0, lscale = "loge", lshape = logoff(offset = -2),
-         iscale = NULL, ishape = NULL, nsimEIM = 250, zero = NULL)
+frechet(location = 0, lscale = "loge", lshape = logoff(offset = -2),
+        iscale = NULL, ishape = NULL, nsimEIM = 250, zero = NULL)
 %frechet3(anchor = NULL, ldifference = "loge", lscale = "loge",
 %         lshape = "loglog",
 %         ilocation = NULL, iscale = NULL, ishape = NULL,
@@ -86,7 +86,7 @@ frechet2(location = 0, lscale = "loge", lshape = logoff(offset = -2),
   for \eqn{s > 2}.
 
 
-  Family \code{frechet2} has \eqn{a} known, and
+  Family \code{frechet} has \eqn{a} known, and
   \eqn{\log(b)}{log(b)} and
   \eqn{\log(s - 2)}{log(s - 2)} are the default linear/additive predictors.
   The working weights are estimated by simulated Fisher scoring.
@@ -128,7 +128,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 %  like \code{maxit = 200, trace = TRUE} is a good idea.
 
 
-  Family function \code{frechet2} may fail for low values of
+  Family function \code{frechet} may fail for low values of
   the shape parameter, e.g., near 2 or lower.
 
 
@@ -162,7 +162,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 set.seed(123)
 fdata <- data.frame(y1 = rfrechet(nn <- 1000, shape = 2 + exp(1)))
 with(fdata, hist(y1))
-fit2 <- vglm(y1 ~ 1, frechet2, data = fdata, trace = TRUE)
+fit2 <- vglm(y1 ~ 1, frechet, data = fdata, trace = TRUE)
 coef(fit2, matrix = TRUE)
 Coef(fit2)
 head(fitted(fit2))
diff --git a/man/frechetUC.Rd b/man/frechetUC.Rd
index ae98756..99be304 100644
--- a/man/frechetUC.Rd
+++ b/man/frechetUC.Rd
@@ -55,7 +55,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{frechet2}}, the \pkg{VGAM}
+  See \code{\link{frechet}}, the \pkg{VGAM}
   family function for estimating the 2 parameters (without location
   parameter) by maximum likelihood estimation, for the formula
   of the probability density function and range restrictions on
@@ -66,7 +66,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 %\note{
 %}
 \seealso{
-  \code{\link{frechet2}}.
+  \code{\link{frechet}}.
 
 
 % \code{\link{frechet3}}.
diff --git a/man/gamma1.Rd b/man/gamma1.Rd
index eaf8ab5..0f7e887 100644
--- a/man/gamma1.Rd
+++ b/man/gamma1.Rd
@@ -68,14 +68,14 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
   If \eqn{rate} is unknown use the family function
-  \code{\link{gamma2.ab}} to estimate it too.
+  \code{\link{gammaR}} to estimate it too.
 
 
 }
 
 \seealso{
-  \code{\link{gamma2.ab}} for the 2-parameter gamma distribution,
-  \code{\link{lgammaff}},
+  \code{\link{gammaR}} for the 2-parameter gamma distribution,
+  \code{\link{lgamma1}},
   \code{\link{lindley}},
   \code{\link{simulate.vlm}}.
 
diff --git a/man/gamma2.Rd b/man/gamma2.Rd
index 4f5d55f..76e316f 100644
--- a/man/gamma2.Rd
+++ b/man/gamma2.Rd
@@ -17,7 +17,7 @@ gamma2(lmu = "loge", lshape = "loge",
 \arguments{
   \item{lmu, lshape}{
   Link functions applied to the (positive) \emph{mu} and \emph{shape}
-  parameters (called \eqn{\mu}{mu} and \eqn{\lambda}{shape} respectively).
+  parameters (called \eqn{\mu}{mu} and \eqn{a}{shape} respectively).
   See \code{\link{Links}} for more choices.
 
 
@@ -80,32 +80,32 @@ gamma2(lmu = "loge", lshape = "loge",
 \details{
   This distribution can model continuous skewed responses.
   The density function is given by
-  \deqn{f(y;\mu,\lambda) = \frac{\exp(-\lambda y / \mu) \times
-               (\lambda y / \mu)^{\lambda-1}
-               \times \lambda}{
-               \mu \times \Gamma(\lambda)}}{%
+  \deqn{f(y;\mu,a) = \frac{\exp(-a y / \mu) \times
+               (a y / \mu)^{a-1}
+               \times a}{
+               \mu \times \Gamma(a)}}{%
    f(y;mu,shape) = exp(-shape * y / mu) y^(shape-1) shape^(shape) / 
           [mu^(shape) * gamma(shape)]}
   for
   \eqn{\mu > 0}{mu > 0},
-  \eqn{\lambda > 0}{shape > 0}
+  \eqn{a > 0}{shape > 0}
   and \eqn{y > 0}.
   Here,
   \eqn{\Gamma(\cdot)}{gamma()} is the gamma
   function, as in \code{\link[base:Special]{gamma}}.
   The mean of \emph{Y} is \eqn{\mu=\mu}{mu=mu} (returned as the fitted
-  values) with variance \eqn{\sigma^2 = \mu^2 / \lambda}{sigma^2 =
-  mu^2 / shape}.  If \eqn{0<\lambda<1}{0<shape<1} then the density has a
+  values) with variance \eqn{\sigma^2 = \mu^2 / a}{sigma^2 =
+  mu^2 / shape}.  If \eqn{0<a<1}{0<shape<1} then the density has a
   pole at the origin and decreases monotonically as \eqn{y} increases.
-  If \eqn{\lambda=1}{shape=1} then this corresponds to the exponential
-  distribution.  If \eqn{\lambda>1}{shape>1} then the density is zero at the
-  origin and is unimodal with mode at \eqn{y = \mu - \mu / \lambda}{y =
+  If \eqn{a=1}{shape=1} then this corresponds to the exponential
+  distribution.  If \eqn{a>1}{shape>1} then the density is zero at the
+  origin and is unimodal with mode at \eqn{y = \mu - \mu / a}{y =
   mu - mu / shape}; this can be achieved with \code{lshape="loglog"}.
 
 
   By default, the two linear/additive predictors are
   \eqn{\eta_1=\log(\mu)}{eta1=log(mu)} and
-  \eqn{\eta_2=\log(\lambda)}{eta2=log(shape)}.
+  \eqn{\eta_2=\log(a)}{eta2=log(shape)}.
   This family function implements Fisher scoring and the working
   weight matrices are diagonal.
 
@@ -122,6 +122,7 @@ gamma2(lmu = "loge", lshape = "loge",
   The object is used by modelling functions such as \code{\link{vglm}}
   and \code{\link{vgam}}.
 
+
 }
 \references{
   The parameterization of this \pkg{VGAM} family function is the
@@ -155,14 +156,16 @@ McCullagh, P. and Nelder, J. A. (1989)
 
 \seealso{
   \code{\link{gamma1}} for the 1-parameter gamma distribution,
-  \code{\link{gamma2.ab}} for another parameterization of
-  the 2-parameter gamma distribution,
+  \code{\link{gammaR}} for another parameterization of
+  the 2-parameter gamma distribution that is directly matched
+  with \code{\link[stats]{rgamma}},
   \code{\link{bigamma.mckay}} for \emph{a} bivariate gamma distribution,
-  \code{\link{expexp}},
+  \code{\link{expexpff}},
   \code{\link[stats]{GammaDist}},
   \code{\link{golf}},
   \code{\link{CommonVGAMffArguments}},
-  \code{\link{simulate.vlm}}.
+  \code{\link{simulate.vlm}},
+  \code{\link{negloge}}.
 
 
 }
@@ -172,13 +175,13 @@ gdata <- data.frame(y = rgamma(n = 100, shape = exp(1)))
 fit1 <- vglm(y ~ 1, gamma1, data = gdata)
 fit2 <- vglm(y ~ 1, gamma2, data = gdata, trace = TRUE, crit = "coef")
 coef(fit2, matrix = TRUE)
-Coef(fit2)
+c(Coef(fit2), colMeans(gdata))
 
 # Essentially a 2-parameter gamma
-gdata <- data.frame(y = rgamma(n = 500, rate = exp(1), shape = exp(2)))
+gdata <- data.frame(y = rgamma(n = 500, rate = exp(-1), shape = exp(2)))
 fit2 <- vglm(y ~ 1, gamma2, data = gdata, trace = TRUE, crit = "coef")
 coef(fit2, matrix = TRUE)
-Coef(fit2)
+c(Coef(fit2), colMeans(gdata))
 summary(fit2)
 }
 \keyword{models}
diff --git a/man/gamma2.ab.Rd b/man/gammaR.Rd
similarity index 61%
rename from man/gamma2.ab.Rd
rename to man/gammaR.Rd
index 889efd1..c4eb840 100644
--- a/man/gamma2.ab.Rd
+++ b/man/gammaR.Rd
@@ -1,16 +1,19 @@
-\name{gamma2.ab}
-\alias{gamma2.ab}
+\name{gammaR}
+\alias{gammaR}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ 2-parameter Gamma Distribution }
 \description{ Estimates the 2-parameter gamma distribution
   by maximum likelihood estimation.
 }
 \usage{
-gamma2.ab(lrate = "loge", lshape = "loge",
-          irate = NULL,   ishape = NULL, expected = TRUE, zero = 2)
+gammaR(lrate = "loge", lshape = "loge", irate = NULL,
+       ishape = NULL, lss = TRUE, zero = ifelse(lss, -2, -1))
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
+% \item{nowarning}{ Logical. Suppress a warning? }
+
+
   \item{lrate, lshape}{
   Link functions applied to the (positive) \emph{rate} and \emph{shape}
   parameters.
@@ -18,12 +21,15 @@ gamma2.ab(lrate = "loge", lshape = "loge",
 
 
   }
-  \item{expected}{
-  Logical. Use Fisher scoring? The default is yes, otherwise
-  Newton-Raphson is used.
+% \item{expected}{
+% Logical. Use Fisher scoring? The default is yes, otherwise
+% Newton-Raphson is used.
+
+% expected = TRUE,
+
+% }
 
 
-  }
   \item{irate, ishape}{
   Optional initial values for \emph{rate} and \emph{shape}.
   A \code{NULL} means a value is computed internally.
@@ -31,12 +37,19 @@ gamma2.ab(lrate = "loge", lshape = "loge",
 
 
   }
-  \item{zero}{
-  An integer specifying which
-  linear/additive predictor is to be modelled as an intercept only.
-  If assigned, the single value should be either 1 or 2 or \code{NULL}.
-  The default is to model \eqn{shape} as an intercept only.
-  A value \code{NULL} means neither 1 or 2.
+
+
+% \item{zero}{
+% An integer specifying which
+% linear/additive predictor is to be modelled as an intercept only.
+% If assigned, the single value should be either 1 or 2 or \code{NULL}.
+% The default is to model \eqn{shape} as an intercept only.
+% A value \code{NULL} means neither 1 or 2.
+% }
+
+
+  \item{zero, lss}{
+  Details at \code{\link{CommonVGAMffArguments}}.
 
 
   }
@@ -54,16 +67,20 @@ gamma2.ab(lrate = "loge", lshape = "loge",
   \eqn{\sigma^2 = \mu^2 /shape = shape/rate^2}{sigma^2 = 
        mu^2 /shape = shape/rate^2}.
   By default, the two linear/additive predictors are
-  \eqn{\eta_1 = \log(rate)}{eta1 = log(rate)} and
-  \eqn{\eta_2 = \log(shape)}{eta2 = log(shape)}.
+  \eqn{\eta_1 = \log(shape)}{eta1 = log(shape)} and
+  \eqn{\eta_2 = \log(rate)}{eta2 = log(rate)}.
+
+
+
 
 
-  The argument \code{expected} refers to the type of information
-  matrix. The expected information matrix corresponds to Fisher scoring
-  and is numerically better here. The observed information matrix
-  corresponds to the Newton-Raphson algorithm and may be withdrawn
-  from the family function in the future.  If both algorithms work then
-  the differences in the results are often not huge.
+% expected = FALSE does not work well. 20140828.
+% The argument \code{expected} refers to the type of information
+% matrix. The expected information matrix corresponds to Fisher scoring
+% and is numerically better here. The observed information matrix
+% corresponds to the Newton-Raphson algorithm and may be withdrawn
+% from the family function in the future.  If both algorithms work then
+% the differences in the results are often not huge.
 
 
 }
@@ -88,8 +105,10 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 \author{ T. W. Yee }
 \note{
   The parameters \eqn{rate} and \eqn{shape} match with the arguments
-  \code{rate} and \code{shape} of \code{\link[stats]{rgamma}}.  Often,
-  \eqn{scale = 1/rate} is used.
+  \code{rate} and \code{shape} of \code{\link[stats]{rgamma}}.
+  The order of the arguments agree too.
+  Here, \eqn{scale = 1/rate} is used, so one can use \code{\link{negloge}}.
+  Multiple responses are handled.
 
 
   If \eqn{rate = 1} use the family function \code{\link{gamma1}} to
@@ -103,8 +122,10 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
   \code{\link{gamma2}} for another parameterization of
   the 2-parameter gamma distribution,
   \code{\link{bigamma.mckay}} for \emph{a} bivariate gamma distribution,
-  \code{\link{expexp}},
-  \code{\link{simulate.vlm}}.
+  \code{\link{expexpff}},
+  \code{\link{simulate.vlm}},
+  \code{\link[stats]{rgamma}},
+  \code{\link{negloge}}.
 
 
 }
@@ -112,13 +133,13 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 # Essentially a 1-parameter gamma
 gdata <- data.frame(y1 = rgamma(n <- 100, shape =  exp(1)))
 fit1 <- vglm(y1 ~ 1, gamma1, data = gdata, trace = TRUE)
-fit2 <- vglm(y1 ~ 1, gamma2.ab, data = gdata, trace = TRUE, crit = "coef")
+fit2 <- vglm(y1 ~ 1, gammaR, data = gdata, trace = TRUE, crit = "coef")
 coef(fit2, matrix = TRUE)
 Coef(fit2)
 
 # Essentially a 2-parameter gamma
 gdata <- data.frame(y2 = rgamma(n = 500, rate = exp(1), shape = exp(2)))
-fit2 <- vglm(y2 ~ 1, gamma2.ab, data = gdata, trace = TRUE, crit = "coef")
+fit2 <- vglm(y2 ~ 1, gammaR, data = gdata, trace = TRUE, crit = "coef")
 coef(fit2, matrix = TRUE)
 Coef(fit2)
 summary(fit2)
diff --git a/man/gammahyp.Rd b/man/gammahyperbola.Rd
similarity index 88%
rename from man/gammahyp.Rd
rename to man/gammahyperbola.Rd
index f6cde5a..b5f77b2 100644
--- a/man/gammahyp.Rd
+++ b/man/gammahyperbola.Rd
@@ -1,5 +1,5 @@
-\name{gammahyp}
-\alias{gammahyp}
+\name{gammahyperbola}
+\alias{gammahyperbola}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Gamma Hyperbola Bivariate Distribution }
 \description{
@@ -9,7 +9,7 @@
 
 }
 \usage{
-gammahyp(ltheta = "loge", itheta = NULL, expected = FALSE)
+gammahyperbola(ltheta = "loge", itheta = NULL, expected = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -49,6 +49,7 @@ gammahyp(ltheta = "loge", itheta = NULL, expected = FALSE)
   \eqn{1 + 1/\theta}{1 + 1/theta}.
 
 
+
   The default algorithm is Newton-Raphson because Fisher scoring tends to
   be much slower for this distribution.
 
@@ -74,7 +75,7 @@ Asymptotics and the theory of inference.
 }
 \author{ T. W. Yee }
 \note{
-  The response must be a two column matrix.
+  The response must be a two-column matrix.
 
 
 }
@@ -88,8 +89,7 @@ gdata <- data.frame(x2 = runif(nn <- 1000))
 gdata <- transform(gdata, theta = exp(-2 + x2))
 gdata <- transform(gdata, y1 = rexp(nn, rate = exp(-theta)/theta),
                           y2 = rexp(nn, rate = theta) + 1)
-fit <- vglm(cbind(y1, y2) ~ x2, gammahyp(expected = TRUE), data = gdata)
-fit <- vglm(cbind(y1, y2) ~ x2, gammahyp, data = gdata, trace = TRUE, crit = "coef")
+fit <- vglm(cbind(y1, y2) ~ x2, gammahyperbola(expected = TRUE), data = gdata)
 coef(fit, matrix = TRUE)
 Coef(fit)
 head(fitted(fit))
@@ -98,3 +98,6 @@ summary(fit)
 \keyword{models}
 \keyword{regression}
 
+% fit <- vglm(cbind(y1, y2) ~ x2, gammahyperbola, data = gdata, trace = TRUE, crit = "coef")
+
+
diff --git a/man/gaussianff.Rd b/man/gaussianff.Rd
index d8aa612..d186653 100644
--- a/man/gaussianff.Rd
+++ b/man/gaussianff.Rd
@@ -120,7 +120,7 @@ gaussianff(dispersion = 0, parallel = FALSE, zero = NULL)
   \code{\link{huber2}},
   \code{\link{lqnorm}},
   \code{\link{binormal}},
-  \code{\link{SUR}}.
+  \code{\link{SURff}}.
   \code{vlm},
   \code{\link{vglm}},
   \code{\link{vgam}},
diff --git a/man/genbetaII.Rd b/man/genbetaII.Rd
index c77c9e6..f4268c9 100644
--- a/man/genbetaII.Rd
+++ b/man/genbetaII.Rd
@@ -124,9 +124,9 @@ More improvements could be made here.
     \code{\link{sinmad}},
     \code{\link{fisk}},
     \code{\link{lomax}},
-    \code{\link{invlomax}},
+    \code{\link{inv.lomax}},
     \code{\link{paralogistic}},
-    \code{\link{invparalogistic}},
+    \code{\link{inv.paralogistic}},
     \code{\link{lino}}.
 
 
diff --git a/man/gengamma.Rd b/man/gengamma.Rd
index e5dcc2d..fa5cfeb 100644
--- a/man/gengamma.Rd
+++ b/man/gengamma.Rd
@@ -1,5 +1,5 @@
-\name{gengamma}
-\alias{gengamma}
+\name{gengamma.stacy}
+\alias{gengamma.stacy}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Generalized Gamma distribution family function }
 \description{
@@ -8,8 +8,8 @@
 
 }
 \usage{
-gengamma(lscale = "loge", ld = "loge", lk = "loge",
-         iscale = NULL, id = NULL, ik = NULL, zero = NULL)
+gengamma.stacy(lscale = "loge", ld = "loge", lk = "loge",
+               iscale = NULL, id = NULL, ik = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -101,6 +101,8 @@ Rayleigh          \eqn{f(y;c\sqrt{2},2,1)}{f(y;c sqrt(2),2,1)} where \eqn{c>0}.
   even with two or three hundred observations.
   With covariates, even more observations are needed to increase the
   chances of convergence.
+  Using covariates is not advised unless the sample size is at least
+  a few thousand.
 
 
 }
@@ -115,7 +117,7 @@ Rayleigh          \eqn{f(y;c\sqrt{2},2,1)}{f(y;c sqrt(2),2,1)} where \eqn{c>0}.
 
 }
 \seealso{
-  \code{\link{rgengamma}},
+  \code{\link{rgengamma.stacy}},
   \code{\link{gamma1}},
   \code{\link{gamma2}},
   \code{\link{prentice74}},
@@ -124,21 +126,20 @@ Rayleigh          \eqn{f(y;c\sqrt{2},2,1)}{f(y;c sqrt(2),2,1)} where \eqn{c>0}.
 
 }
 \examples{
-\dontrun{ k <- exp(-1); Scale = exp(1)
+k <- exp(-1); Scale = exp(1)
 gdata <- data.frame(y = rgamma(1000, shape = k, scale = Scale))
-fit <- vglm(y ~ 1, gengamma, data = gdata, trace = TRUE)
-coef(fit, matrix = TRUE)
-
-# Another example
-gdata <- data.frame(x2 = runif(nn <- 5000))
-gdata <- transform(gdata, Scale = exp(1),
-                          d = exp( 0 + 1.2* x2),
-                          k = exp(-1 + 2  * x2))
-gdata <- transform(gdata, y = rgengamma(nn, scale = Scale, d = d, k = k))
-fit <- vglm(y ~ x2, gengamma(zero = 1, iscale = 6), data = gdata, trace = TRUE)
-fit <- vglm(y ~ x2, gengamma(zero = 1), data = gdata, trace = TRUE, maxit = 50)
+fit <- vglm(y ~ 1, gengamma.stacy, data = gdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 }
-}
 \keyword{models}
 \keyword{regression}
+
+%# Another example
+%gdata <- data.frame(x2 = runif(nn <- 5000))
+%gdata <- transform(gdata, Scale = exp(1),
+%                          d = exp( 0 + 1.2* x2),
+%                          k = exp(-1 + 2  * x2))
+%gdata <- transform(gdata, y = rgengamma.stacy(nn, scale = Scale, d = d, k = k))
+%fit <- vglm(y ~ x2, gengamma.stacy(zero = 1, iscale = 6), data = gdata, trace = TRUE)
+%fit <- vglm(y ~ x2, gengamma.stacy(zero = 1), data = gdata, trace = TRUE, maxit = 50)
+%coef(fit, matrix = TRUE)
diff --git a/man/gengammaUC.Rd b/man/gengammaUC.Rd
index e957c19..b09d2ec 100644
--- a/man/gengammaUC.Rd
+++ b/man/gengammaUC.Rd
@@ -1,9 +1,9 @@
 \name{gengammaUC}
 \alias{gengammaUC}
-\alias{dgengamma}
-\alias{pgengamma}
-\alias{qgengamma}
-\alias{rgengamma}
+\alias{dgengamma.stacy}
+\alias{pgengamma.stacy}
+\alias{qgengamma.stacy}
+\alias{rgengamma.stacy}
 \title{The Generalized Gamma Distribution }
 \description{
   Density, distribution function, quantile function and random
@@ -14,10 +14,10 @@
 
 }
 \usage{
-dgengamma(x, scale = 1, d = 1, k = 1, log = FALSE)
-pgengamma(q, scale = 1, d = 1, k = 1)
-qgengamma(p, scale = 1, d = 1, k = 1)
-rgengamma(n, scale = 1, d = 1, k = 1)
+dgengamma.stacy(x, scale = 1, d = 1, k = 1, log = FALSE)
+pgengamma.stacy(q, scale = 1, d = 1, k = 1)
+qgengamma.stacy(p, scale = 1, d = 1, k = 1)
+rgengamma.stacy(n, scale = 1, d = 1, k = 1)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -34,10 +34,10 @@ rgengamma(n, scale = 1, d = 1, k = 1)
 
 }
 \value{
-  \code{dgengamma} gives the density,
-  \code{pgengamma} gives the distribution function,
-  \code{qgengamma} gives the quantile function, and
-  \code{rgengamma} generates random deviates.
+  \code{dgengamma.stacy} gives the density,
+  \code{pgengamma.stacy} gives the distribution function,
+  \code{qgengamma.stacy} gives the quantile function, and
+  \code{rgengamma.stacy} generates random deviates.
 
 
 }
@@ -51,7 +51,7 @@ Parameter estimation for a generalized gamma distribution.
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{gengamma}}, the \pkg{VGAM} family function
+  See \code{\link{gengamma.stacy}}, the \pkg{VGAM} family function
   for estimating the generalized gamma distribution
   by maximum likelihood estimation,
   for formulae and other details.
@@ -63,20 +63,20 @@ Parameter estimation for a generalized gamma distribution.
 % \note{
 % }
 \seealso{
-  \code{\link{gengamma}}.
+  \code{\link{gengamma.stacy}}.
 
 
 }
 \examples{
 \dontrun{ x <- seq(0, 14, by = 0.01); d <- 1.5; Scale <- 2; k <- 6
-plot(x, dgengamma(x, Scale, d, k), type = "l", col = "blue", ylim = 0:1,
+plot(x, dgengamma.stacy(x, Scale, d = d, k = k), type = "l", col = "blue", ylim = 0:1,
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple are 5,10,...,95 percentiles", las = 1, ylab = "")
 abline(h = 0, col = "blue", lty = 2)
-lines(qgengamma(seq(0.05,0.95,by = 0.05), Scale, d, k),
-      dgengamma(qgengamma(seq(0.05,0.95,by = 0.05), Scale, d, k),
-                Scale, d, k), col = "purple", lty = 3, type = "h")
-lines(x, pgengamma(x, Scale, d, k), type = "l", col = "orange")
+lines(qgengamma.stacy(seq(0.05, 0.95, by = 0.05), Scale, d = d, k = k),
+      dgengamma.stacy(qgengamma.stacy(seq(0.05,0.95,by = 0.05), Scale, d = d, k = k),
+                      Scale, d = d, k = k), col = "purple", lty = 3, type = "h")
+lines(x, pgengamma.stacy(x, Scale, d = d, k = k), type = "l", col = "orange")
 abline(h = 0, lty = 2) }
 }
 \keyword{distribution}
diff --git a/man/genpoisson.Rd b/man/genpoisson.Rd
index 3f00bc4..e1bb40d 100644
--- a/man/genpoisson.Rd
+++ b/man/genpoisson.Rd
@@ -120,8 +120,8 @@ New York, USA: Marcel Dekker.
 }
 \examples{
 gdata <- data.frame(x2 = runif(nn <- 200))
-gdata <- transform(gdata, y = rpois(nn, exp(2 - x2)))  # Ordinary Poisson data
-fit <- vglm(y ~ x2, genpoisson(zero = 1), data = gdata, trace = TRUE)
+gdata <- transform(gdata, y1 = rpois(nn, exp(2 - x2)))  # Ordinary Poisson data
+fit <- vglm(y1 ~ x2, genpoisson(zero = 1), data = gdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 summary(fit)
 }
diff --git a/man/genrayleigh.Rd b/man/genrayleigh.Rd
index df0ccd0..7a29177 100644
--- a/man/genrayleigh.Rd
+++ b/man/genrayleigh.Rd
@@ -8,20 +8,20 @@
 
 }
 \usage{
-genrayleigh(lshape = "loge", lscale = "loge",
-            ishape = NULL,   iscale = NULL,
-            tol12 = 1e-05, nsimEIM = 300, zero = 1)
+genrayleigh(lscale = "loge", lshape = "loge",
+            iscale = NULL,   ishape = NULL,
+            tol12 = 1e-05, nsimEIM = 300, zero = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lshape, lscale}{
-  Link function for the two positive parameters, shape and scale.
+  \item{lscale, lshape}{
+  Link function for the two positive parameters, scale and shape.
   See \code{\link{Links}} for more choices.
 
   }
-  \item{ishape, iscale}{
+  \item{iscale, ishape}{
   Numeric.
-  Optional initial values for the shape and scale parameters.
+  Optional initial values for the scale and shape parameters.
 
   }
   \item{nsimEIM, zero}{
@@ -37,11 +37,11 @@ genrayleigh(lshape = "loge", lscale = "loge",
 }
 \details{
   The generalized Rayleigh distribution has density function
-  \deqn{f(y;a = shape,b = scale)  =
-  (2 a y/b^{2}) e^{-(y/b)^{2}} (1 - e^{-(y/b)^{2}})^{a-1}}{%
-  (2*a*y/b^2) * e^(-(y/b)^2) * (1 - e^(-(y/b)^2))^(a-1)}
+  \deqn{f(y;b = scale,s = shape)  =
+  (2 s y/b^{2}) e^{-(y/b)^{2}} (1 - e^{-(y/b)^{2}})^{s-1}}{%
+  (2*s*y/b^2) * e^(-(y/b)^2) * (1 - e^(-(y/b)^2))^(s-1)}
   where \eqn{y > 0} and the two parameters,
-  \eqn{a} and \eqn{b}, are positive.
+  \eqn{b} and \eqn{s}, are positive.
   The mean cannot be expressed nicely so the median is returned as 
   the fitted values.
   Applications of the generalized Rayleigh distribution include modeling
@@ -59,8 +59,7 @@ genrayleigh(lshape = "loge", lscale = "loge",
 }
 \references{
   Kundu, D., Raqab, M. C. (2005).
-  Generalized Rayleigh distribution: different 
-  methods of estimations.
+  Generalized Rayleigh distribution: different methods of estimations.
   \emph{Computational Statistics and Data Analysis},
   \bold{49}, 187--200.
 
@@ -81,9 +80,9 @@ genrayleigh(lshape = "loge", lscale = "loge",
 
 }
 \examples{
-shape <- exp(1); scale <- exp(1)
-rdata <- data.frame(y = rgenray(n = 1000, shape, scale))
-fit <- vglm(y ~ 1, genrayleigh, rdata, trace = TRUE)
+Scale <- exp(1); shape <- exp(1)
+rdata <- data.frame(y = rgenray(n = 1000, scale = Scale, shape = shape))
+fit <- vglm(y ~ 1, genrayleigh, data = rdata, trace = TRUE)
 c(with(rdata, mean(y)), head(fitted(fit), 1))
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/genrayleighUC.Rd b/man/genrayleighUC.Rd
index 665f552..49c9714 100644
--- a/man/genrayleighUC.Rd
+++ b/man/genrayleighUC.Rd
@@ -12,10 +12,10 @@
 
 }
 \usage{
-dgenray(x, shape, scale = 1, log = FALSE)
-pgenray(q, shape, scale = 1)
-qgenray(p, shape, scale = 1)
-rgenray(n, shape, scale = 1)
+dgenray(x, scale = 1, shape, log = FALSE)
+pgenray(q, scale = 1, shape)
+qgenray(p, scale = 1, shape)
+rgenray(n, scale = 1, shape)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -61,20 +61,20 @@ rgenray(n, shape, scale = 1)
 }
 \examples{
 \dontrun{
-shape <- 0.5; scale <- 1; nn <- 501
+shape <- 0.5; Scale <- 1; nn <- 501
 x <- seq(-0.10, 3.0, len = nn)
-plot(x, dgenray(x, shape, scale), type = "l", las = 1, ylim = c(0, 1.2),
-     ylab = paste("[dp]genray(shape = ", shape, ", scale = ", scale, ")"),
+plot(x, dgenray(x, shape, scale = Scale), type = "l", las = 1, ylim = c(0, 1.2),
+     ylab = paste("[dp]genray(shape = ", shape, ", scale = ", Scale, ")"),
      col = "blue", cex.main = 0.8,
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles")
-lines(x, pgenray(x, shape, scale), col = "orange")
+lines(x, pgenray(x, shape, scale = Scale), col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
-Q <- qgenray(probs, shape, scale)
-lines(Q, dgenray(Q, shape, scale), col = "purple", lty = 3, type = "h")
-lines(Q, pgenray(Q, shape, scale), col = "purple", lty = 3, type = "h")
+Q <- qgenray(probs, shape, scale = Scale)
+lines(Q, dgenray(Q, shape, scale = Scale), col = "purple", lty = 3, type = "h")
+lines(Q, pgenray(Q, shape, scale = Scale), col = "purple", lty = 3, type = "h")
 abline(h = probs, col = "purple", lty = 3)
-max(abs(pgenray(Q, shape, scale) - probs))  # Should be 0
+max(abs(pgenray(Q, shape, scale = Scale) - probs))  # Should be 0
 }
 }
 \keyword{distribution}
diff --git a/man/get.smart.Rd b/man/get.smart.Rd
index c299d06..aa86c15 100644
--- a/man/get.smart.Rd
+++ b/man/get.smart.Rd
@@ -48,17 +48,7 @@ is incremented beforehand, and then written back to
 
 }
 \examples{
-"my1" <- function(x, minx = min(x)) { # Here is a smart function
-  x <- x  # Needed for nested calls, e.g., sm.bs(sm.scale(x))
-  if (smart.mode.is("read")) {
-    smart  <- get.smart()
-    minx <- smart$minx  # Overwrite its value
-  } else
-    if(smart.mode.is("write"))
-        put.smart(list(minx = minx))
-    sqrt(x - minx)
-}
-attr(my1, "smart") <- TRUE 
+print(sm.min1)
 }
 %\keyword{smart}
 \keyword{models}
diff --git a/man/gev.Rd b/man/gev.Rd
index 96fc3d3..1a1634b 100644
--- a/man/gev.Rd
+++ b/man/gev.Rd
@@ -12,11 +12,11 @@
 gev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
     percentiles = c(95, 99), iscale=NULL, ishape = NULL,
     imethod = 1, gshape = c(-0.45, 0.45), tolshape0 = 0.001,
-    giveWarning = TRUE, zero = 3)
+    type.fitted = c("percentiles", "mean"), giveWarning = TRUE, zero = 2:3)
 egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
      percentiles = c(95, 99), iscale=NULL,  ishape = NULL,
      imethod = 1, gshape = c(-0.45, 0.45), tolshape0 = 0.001,
-     giveWarning = TRUE, zero = 3)
+     type.fitted = c("percentiles", "mean"), giveWarning = TRUE, zero = 2:3)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -53,11 +53,27 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
 
 
   \item{percentiles}{
-  Numeric vector of percentiles used
-  for the fitted values. Values should be between 0 and 100.
-  However, if \code{percentiles = NULL}, then the mean
+  Numeric vector of percentiles used for the fitted values.
+  Values should be between 0 and 100.
+  This argument is ignored if \code{type.fitted = "mean"}.
+
+
+
+% 20140912: this is still true, but using 'type.fitted' is better.
+% However, if \code{percentiles = NULL}, then the mean
+% \eqn{\mu + \sigma (\Gamma(1-\xi)-1) / \xi}{mu + sigma * (gamma(1-xi)-1)/xi}
+% is returned, and this is only defined if \eqn{\xi<1}{xi<1}.
+
+
+  }
+  \item{type.fitted}{
+  See \code{\link{CommonVGAMffArguments}} for information.
+  The default is to use the \code{percentiles} argument.
+  If \code{"mean"} is chosen, then the mean
   \eqn{\mu + \sigma (\Gamma(1-\xi)-1) / \xi}{mu + sigma * (gamma(1-xi)-1)/xi}
-  is returned, and this is only defined if \eqn{\xi<1}{xi<1}.
+  is returned as the fitted values,
+  and these are only defined for \eqn{\xi<1}{xi<1}.
+
 
 
   }
@@ -253,8 +269,8 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
   \code{\link{guplot}},
   \code{\link{rlplot.egev}},
   \code{\link{gpd}},
-  \code{\link{weibull}},
-  \code{\link{frechet2}},
+  \code{\link{weibullR}},
+  \code{\link{frechet}},
   \code{\link{elogit}},
   \code{\link{oxtemp}},
   \code{\link{venice}}.
@@ -282,7 +298,7 @@ with(venice, lines(year, fitted(fit1)[,1], lty = "dashed", col = "blue"))
 legend("topleft", lty = "dashed", col = "blue", "Fitted 95 percentile")
 
 # Univariate example
-(fit <- vglm(maxtemp ~ 1, egev, oxtemp, trace = TRUE))
+(fit <- vglm(maxtemp ~ 1, egev, data = oxtemp, trace = TRUE))
 head(fitted(fit))
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/gew.Rd b/man/gew.Rd
index 35f4f61..092fea7 100644
--- a/man/gew.Rd
+++ b/man/gew.Rd
@@ -44,7 +44,8 @@ dollars deflated by \eqn{P_2}.
   These data are a subset of a table in Boot and de Wit (1960),
   also known as the Grunfeld data.
   It is used a lot in econometrics,
-  e.g., for seemingly unrelated regressions (see \code{SUR}).
+  e.g., for seemingly unrelated regressions
+  (see \code{\link[VGAM:SURff]{SURff}}).
 
 
   Here,
@@ -77,7 +78,7 @@ dollars deflated by \eqn{P_2}.
 
 }
 \seealso{
-  \code{\link[VGAM:SUR]{SUR}},
+  \code{\link[VGAM:SURff]{SURff}},
   \url{http://statmath.wu.ac.at/~zeileis/grunfeld}.
 
 
diff --git a/man/gompertz.Rd b/man/gompertz.Rd
index 06ba745..9687123 100644
--- a/man/gompertz.Rd
+++ b/man/gompertz.Rd
@@ -8,12 +8,15 @@
 
 }
 \usage{
-gompertz(lshape = "loge", lscale = "loge",
-         ishape = NULL,   iscale = NULL,
-         nsimEIM = 500, zero = NULL)
+gompertz(lscale = "loge", lshape = "loge",
+         iscale = NULL,   ishape = NULL,
+         nsimEIM = 500, zero = NULL, nowarning = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
+  \item{nowarning}{ Logical. Suppress a warning? }
+
+
   \item{lshape, lscale}{
   Parameter link functions applied to the
   shape parameter \code{a},
@@ -112,8 +115,8 @@ gdata <- transform(gdata, shape1 = exp(eta1),
                           shape2 = exp(eta2),
                           scale1 = exp(ceta1),
                           scale2 = exp(ceta2))
-gdata <- transform(gdata, y1 = rgompertz(nn, shape = shape1, scale = scale1),
-                          y2 = rgompertz(nn, shape = shape2, scale = scale2))
+gdata <- transform(gdata, y1 = rgompertz(nn, scale = scale1, shape = shape1),
+                          y2 = rgompertz(nn, scale = scale2, shape = shape2))
 
 fit1 <- vglm(y1 ~ 1,  gompertz, data = gdata, trace = TRUE)
 fit2 <- vglm(y2 ~ x2, gompertz, data = gdata, trace = TRUE)
diff --git a/man/gompertzUC.Rd b/man/gompertzUC.Rd
index 5d4469b..bb0c583 100644
--- a/man/gompertzUC.Rd
+++ b/man/gompertzUC.Rd
@@ -15,10 +15,10 @@
 
 }
 \usage{
-dgompertz(x, shape, scale = 1, log = FALSE)
-pgompertz(q, shape, scale = 1)
-qgompertz(p, shape, scale = 1)
-rgompertz(n, shape, scale = 1)
+dgompertz(x, scale = 1, shape, log = FALSE)
+pgompertz(q, scale = 1, shape)
+qgompertz(p, scale = 1, shape)
+rgompertz(n, scale = 1, shape)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -30,7 +30,7 @@ rgompertz(n, shape, scale = 1)
 
 
   }
-  \item{shape, scale}{positive shape and scale parameters. }
+  \item{scale, shape}{positive scale and shape parameters. }
 
 }
 \value{
@@ -60,20 +60,21 @@ rgompertz(n, shape, scale = 1)
 \examples{
 probs <- seq(0.01, 0.99, by = 0.01)
 Shape <- exp(1); Scale <- exp(1)
-max(abs(pgompertz(qgompertz(p = probs, Shape, Scale),
-                  Shape, Scale) - probs))  # Should be 0
+max(abs(pgompertz(qgompertz(p = probs, Scale, shape = Shape),
+                  Scale, shape = Shape) - probs))  # Should be 0
 
-\dontrun{ x <- seq(-0.1, 1.0, by = 0.01)
-plot(x, dgompertz(x, Shape, Scale), type = "l", col = "blue", las = 1,
+\dontrun{ x <- seq(-0.1, 1.0, by = 0.001)
+plot(x, dgompertz(x, Scale,shape = Shape), type = "l", col = "blue", las = 1,
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles",
      ylab = "")
 abline(h = 0, col = "blue", lty = 2)
-lines(x, pgompertz(x, Shape, Scale), col = "orange")
+lines(x, pgompertz(x, Scale, shape = Shape), col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
-Q <- qgompertz(probs, Shape, Scale)
-lines(Q, dgompertz(Q, Shape, Scale), col = "purple", lty = 3, type = "h")
-pgompertz(Q, Shape, Scale) - probs  # Should be all zero
+Q <- qgompertz(probs, Scale, shape = Shape)
+lines(Q, dgompertz(Q, Scale, shape = Shape), col = "purple",
+      lty = 3, type = "h")
+pgompertz(Q, Scale, shape = Shape) - probs  # Should be all zero
 abline(h = probs, col = "purple", lty = 3) }
 }
 \keyword{distribution}
diff --git a/man/gpd.Rd b/man/gpd.Rd
index 22c321b..ea27ec4 100644
--- a/man/gpd.Rd
+++ b/man/gpd.Rd
@@ -10,7 +10,8 @@
 \usage{
 gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
     percentiles = c(90, 95), iscale = NULL, ishape = NULL,
-    tolshape0 = 0.001, giveWarning = TRUE, imethod = 1, zero = -2)
+    tolshape0 = 0.001, type.fitted = c("percentiles", "mean"),
+    giveWarning = TRUE, imethod = 1, zero = -2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -18,11 +19,13 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
   Numeric, values are recycled if necessary.
   The threshold value(s), called \eqn{\mu}{mu} below.
 
+
   }
   \item{lscale}{
   Parameter link function for the scale parameter \eqn{\sigma}{sigma}.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{lshape}{
   Parameter link function for the shape parameter \eqn{\xi}{xi}.
@@ -40,6 +43,7 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
   \eqn{\xi > -A}{xi > -A}.
   The working weight matrices are positive definite if \eqn{A = 0.5}.
 
+
   }
 
 % \item{Offset}{
@@ -56,11 +60,28 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
   Numeric vector of percentiles used
   for the fitted values. Values should be between 0 and 100.
   See the example below for illustration.
-  However, if \code{percentiles = NULL} then the mean
-  \eqn{\mu + \sigma / (1-\xi)}{mu + sigma / (1-xi)} is returned;
-  this is only defined if \eqn{\xi<1}{xi<1}.
+  This argument is ignored if \code{type.fitted = "mean"}.
+
+
+
+% However, if \code{percentiles = NULL} then the mean
+% \eqn{\mu + \sigma / (1-\xi)}{mu + sigma / (1-xi)} is returned;
+% this is only defined if \eqn{\xi<1}{xi<1}.
+
+
+  }
+  \item{type.fitted}{
+  See \code{\link{CommonVGAMffArguments}} for information.
+  The default is to use the \code{percentiles} argument.
+  If \code{"mean"} is chosen, then the mean
+  \eqn{\mu + \sigma / (1-\xi)}{mu + sigma / (1-xi)}
+  is returned as the fitted values,
+  and these are only defined for \eqn{\xi<1}{xi<1}.
+
+
 
   }
+
   \item{iscale, ishape}{
   Numeric. Optional initial values for \eqn{\sigma}{sigma}
   and \eqn{\xi}{xi}.
@@ -69,6 +90,7 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
   Values of \code{ishape} should be between \eqn{-0.5} and \eqn{1}.
   Values of \code{iscale} should be positive.
 
+
   }
 % \item{rshape}{ 
 % Numeric, of length 2.
@@ -76,10 +98,12 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
 % The default values ensures the algorithm works (\eqn{\xi > -0.5}{xi > -0.5})
 % and the variance exists (\eqn{\xi < 0.5}{xi < 0.5}).
 
+
 % }
   \item{tolshape0, giveWarning}{
   Passed into \code{\link{dgpd}} when computing the log-likelihood.
 
+
   }
 
 % \item{tolshape0}{
@@ -89,12 +113,14 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
 % this value then it will be assumed zero and exponential distribution
 % derivatives etc. will be used.
 
+
 % }
   \item{imethod}{
    Method of initialization, either 1 or 2. The first is the method of
    moments, and the second is a variant of this.  If neither work, try
    assigning values to arguments \code{ishape} and/or \code{iscale}.
 
+
   }
   \item{zero}{
   An integer-valued vector specifying which
@@ -164,6 +190,7 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
   Internally, \eqn{y-\mu}{y-mu} is computed.
   This \pkg{VGAM} family function can handle a multiple
   responses, which is inputted as a matrix.
+  The response stored on the object is the original uncentred data.
 
 
 
@@ -184,6 +211,7 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
   half-stepsizing whereas \code{\link{vgam}} doesn't. Half-stepsizing
   helps handle the problem of straying outside the parameter space.
 
+
 }  
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -225,10 +253,11 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
 
 \examples{
 # Simulated data from an exponential distribution (xi = 0)
-threshold <- 0.5
-gdata <- data.frame(y1 = threshold + rexp(n = 3000, rate = 2))
-fit <- vglm(y1 ~ 1, gpd(threshold = threshold), data = gdata, trace = TRUE)
+Threshold <- 0.5
+gdata <- data.frame(y1 = Threshold + rexp(n = 3000, rate = 2))
+fit <- vglm(y1 ~ 1, gpd(threshold = Threshold), data = gdata, trace = TRUE)
 head(fitted(fit))
+summary(depvar(fit))  # The original uncentred data
 coef(fit, matrix = TRUE)  # xi should be close to 0
 Coef(fit)
 summary(fit)
@@ -250,22 +279,24 @@ matlines(1:length(depvar(fit)), fitted(fit), lty = 2:3, lwd = 2) }
 
 # Another example
 gdata <- data.frame(x2 = runif(nn <- 2000))
-threshold <- 0; xi <- exp(-0.8) - 0.5
+Threshold <- 0; xi <- exp(-0.8) - 0.5
 gdata <- transform(gdata, y2 = rgpd(nn, scale = exp(1 + 0.1*x2), shape = xi))
-fit <- vglm(y2 ~ x2, gpd(threshold), data = gdata, trace = TRUE)
+fit <- vglm(y2 ~ x2, gpd(Threshold), data = gdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 
 
 \dontrun{ # Nonparametric fits
-gdata <- transform(gdata, yy = y2 + rnorm(nn, sd = 0.1))
 # Not so recommended:
-fit1 <- vgam(yy ~ s(x2), gpd(threshold), data = gdata, trace = TRUE)
-par(mfrow = c(2,1))
-plotvgam(fit1, se = TRUE, scol = "blue")
+fit1 <- vgam(y2 ~ s(x2), gpd(Threshold), data = gdata, trace = TRUE)
+par(mfrow = c(2, 1))
+plot(fit1, se = TRUE, scol = "blue")
 # More recommended:
-fit2 <- vglm(yy ~ sm.bs(x2), gpd(threshold), data = gdata, trace = TRUE)
-plotvgam(fit2, se = TRUE, scol = "blue") }
+fit2 <- vglm(y2 ~ sm.bs(x2), gpd(Threshold), data = gdata, trace = TRUE)
+plot(as(fit2, "vgam"), se = TRUE, scol = "blue") }
 }
 \keyword{models}
 \keyword{regression}
 
+%
+% # gdata <- transform(gdata, yy = y2 + rnorm(nn, sd = 0.1))
+%
diff --git a/man/gpdUC.Rd b/man/gpdUC.Rd
index ddf2657..fd5111b 100644
--- a/man/gpdUC.Rd
+++ b/man/gpdUC.Rd
@@ -97,7 +97,8 @@ London: Springer-Verlag.
 
 }
 \seealso{
-  \code{\link{gpd}}.
+  \code{\link{gpd}},
+  \code{\link[stats]{Exponential}}.
 
 
 }
diff --git a/man/grc.Rd b/man/grc.Rd
index 45a8c03..5e004da 100644
--- a/man/grc.Rd
+++ b/man/grc.Rd
@@ -390,9 +390,9 @@ model3 <- rcim(auuc, Rank = 1, fam = multinomial,
 fitted(model3)
 summary(model3)
 
-# Roughly median polish but not 100 percent reliable
+# Median polish but not 100 percent reliable. Maybe call alaplace2()...
 \dontrun{
-rcim0 <- rcim(auuc, fam = alaplace2(tau = 0.5, intparloc = TRUE), trace = TRUE)
+rcim0 <- rcim(auuc, fam = alaplace1(tau = 0.5), trace = FALSE, maxit = 500)
 round(fitted(rcim0), digits = 0)
 round(100 * (fitted(rcim0) - auuc) / auuc, digits = 0)  # Discrepancy
 depvar(rcim0)
@@ -434,10 +434,10 @@ uqo.rcim1 <- rcim(Y, Rank = 1,
 
 # Plot 1
 par(mfrow = c(2, 2))
-plot(attr(pdata, "optima"), Coef(uqo.rcim1)@A,
-     col = "blue", type = "p", main = "(a) UQO optima",
-     xlab = "True optima", ylab = "Estimated (UQO) optima")
-mylm <- lm(Coef(uqo.rcim1)@A ~ attr(pdata, "optima"))
+plot(attr(pdata, "optimums"), Coef(uqo.rcim1)@A,
+     col = "blue", type = "p", main = "(a) UQO optimums",
+     xlab = "True optimums", ylab = "Estimated (UQO) optimums")
+mylm <- lm(Coef(uqo.rcim1)@A ~ attr(pdata, "optimums"))
 abline(coef = coef(mylm), col = "orange", lty = "dashed")
 
 # Plot 2
diff --git a/man/gumbel.Rd b/man/gumbel.Rd
index 5dab0c9..b6f93c4 100644
--- a/man/gumbel.Rd
+++ b/man/gumbel.Rd
@@ -186,7 +186,7 @@ egumbel(llocation = "identitylink", lscale = "loge",
 \seealso{
   \code{\link{rgumbel}},
   \code{\link{dgumbelII}},
-  \code{\link{cgumbel}},
+  \code{\link{cens.gumbel}},
   \code{\link{guplot}},
   \code{\link{gev}},
   \code{\link{egev}},
diff --git a/man/gumbelII.Rd b/man/gumbelII.Rd
index 53237bc..a93a863 100644
--- a/man/gumbelII.Rd
+++ b/man/gumbelII.Rd
@@ -10,16 +10,19 @@
 
 }
 \usage{
-gumbelII(lshape = "loge", lscale = "loge",
-         ishape = NULL,   iscale = NULL,
-         probs.y = c(0.2, 0.5, 0.8),
-         perc.out = NULL, imethod = 1, zero = -2)
+gumbelII(lscale = "loge", lshape = "loge", iscale = NULL, ishape = NULL,
+         probs.y = c(0.2, 0.5, 0.8), perc.out = NULL, imethod = 1,
+         zero = -1, nowarning = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
+
+  \item{nowarning}{ Logical. Suppress a warning? }
+
+
   \item{lshape, lscale}{
   Parameter link functions applied to the 
-  (positive) shape parameter (called \eqn{a} below) and
+  (positive) shape parameter (called \eqn{s} below) and
   (positive) scale parameter (called \eqn{b} below).
   See \code{\link{Links}} for more choices.
 
@@ -39,13 +42,14 @@ gumbelII(lshape = "loge", lscale = "loge",
 
   }
   \item{imethod}{
-  See \code{\link{weibull}}.
+  See \code{\link{weibullR}}.
 
 
   }
   \item{zero, probs.y}{
   Details at \code{\link{CommonVGAMffArguments}}.
 
+
   }
   \item{perc.out}{
   If the fitted values are to be quantiles then set this
@@ -56,18 +60,18 @@ gumbelII(lshape = "loge", lscale = "loge",
 }
 \details{
   The Gumbel-II density for a response \eqn{Y} is 
-  \deqn{f(y;a,b) = a y^{a-1} \exp[-(y/b)^a] / (b^a)}{%
-        f(y;a,b) = a y^(a-1) * exp(-(y/b)^a) / [b^a]}
-  for \eqn{a > 0}, \eqn{b > 0}, \eqn{y > 0}.
+  \deqn{f(y;b,s) = s y^{s-1} \exp[-(y/b)^s] / (b^s)}{%
+        f(y;b,s) = s y^(s-1) * exp(-(y/b)^s) / [b^s]}
+  for \eqn{b > 0}, \eqn{s > 0}, \eqn{y > 0}.
   The cumulative distribution function is 
-  \deqn{F(y;a,b) = \exp[-(y/b)^{-a}].}{%
-        F(y;a,b) = exp(-(y/b)^(-a)).}
-  The mean of \eqn{Y} is \eqn{b \, \Gamma(1 - 1/a)}{b * gamma(1 - 1/a)}
+  \deqn{F(y;b,s) = \exp[-(y/b)^{-s}].}{%
+        F(y;b,s) = exp(-(y/b)^(-s)).}
+  The mean of \eqn{Y} is \eqn{b \, \Gamma(1 - 1/s)}{b * gamma(1 - 1/s)}
   (returned as the fitted values)
-  when \eqn{a>1},
-  and the variance is \eqn{b^2\,\Gamma(1-2/a)}{b^2 * Gamma(1-2/a)} when
-  \eqn{a>2}.
-  This distribution looks similar to \code{\link{weibull}}, and is
+  when \eqn{s>1},
+  and the variance is \eqn{b^2\,\Gamma(1-2/s)}{b^2 * Gamma(1-2/s)} when
+  \eqn{s>2}.
+  This distribution looks similar to \code{\link{weibullR}}, and is
   due to Gumbel (1954).
 
 
@@ -98,7 +102,7 @@ U.S. Department of Commerce, National Bureau of Standards, USA.
 }
 \author{ T. W. Yee }
 \note{
-  See \code{\link{weibull}}.
+  See \code{\link{weibullR}}.
   This \pkg{VGAM} family function handles multiple responses.
 
 
@@ -127,20 +131,20 @@ U.S. Department of Commerce, National Bureau of Standards, USA.
 }
 \examples{
 gdata <- data.frame(x2 = runif(nn <- 1000))
-gdata <- transform(gdata, eta1  = +1,
-                          eta2  = -1 + 0.1 * x2,
+gdata <- transform(gdata, heta1  = +1,
+                          heta2  = -1 + 0.1 * x2,
                           ceta1 =  0,
                           ceta2 =  1)
-gdata <- transform(gdata, shape1 = exp(eta1),
-                          shape2 = exp(eta2),
+gdata <- transform(gdata, shape1 = exp(heta1),
+                          shape2 = exp(heta2),
                           scale1 = exp(ceta1),
                           scale2 = exp(ceta2))
 gdata <- transform(gdata,
-                   y1 = rgumbelII(nn, shape = shape1, scale = scale1),
-                   y2 = rgumbelII(nn, shape = shape2, scale = scale2))
+                   y1 = rgumbelII(nn, scale = scale1, shape = shape1),
+                   y2 = rgumbelII(nn, scale = scale2, shape = shape2))
 
 fit <- vglm(cbind(y1, y2) ~ x2,
-            gumbelII(zero = c(1, 2, 4)), data = gdata, trace = TRUE)
+            gumbelII(zero = c(1, 2, 3)), data = gdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 vcov(fit)
 summary(fit)
diff --git a/man/gumbelIIUC.Rd b/man/gumbelIIUC.Rd
index e2ad859..efb5a1c 100644
--- a/man/gumbelIIUC.Rd
+++ b/man/gumbelIIUC.Rd
@@ -14,10 +14,10 @@
 
 }
 \usage{
-dgumbelII(x, shape, scale = 1, log = FALSE)
-pgumbelII(q, shape, scale = 1)
-qgumbelII(p, shape, scale = 1)
-rgumbelII(n, shape, scale = 1)
+dgumbelII(x, scale = 1, shape, log = FALSE)
+pgumbelII(q, scale = 1, shape)
+qgumbelII(p, scale = 1, shape)
+rgumbelII(n, scale = 1, shape)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -55,21 +55,21 @@ rgumbelII(n, shape, scale = 1)
 }
 \examples{
 probs <- seq(0.01, 0.99, by = 0.01)
-Shape <- exp( 0.5); Scale <- exp(1);
-max(abs(pgumbelII(qgumbelII(p = probs, Shape, Scale),
-                  Shape, Scale) - probs))  # Should be 0
+Scale <- exp(1); Shape <- exp( 0.5);
+max(abs(pgumbelII(qgumbelII(p = probs, shape = Shape, Scale),
+                  shape = Shape, Scale) - probs))  # Should be 0
 
 \dontrun{ x <- seq(-0.1, 10, by = 0.01);
-plot(x, dgumbelII(x, Shape, Scale), type = "l", col = "blue", las = 1,
+plot(x, dgumbelII(x, shape = Shape, Scale), type = "l", col = "blue", las = 1,
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles",
      ylab = "", ylim = 0:1)
 abline(h = 0, col = "blue", lty = 2)
-lines(x, pgumbelII(x, Shape, Scale), col = "orange")
+lines(x, pgumbelII(x, shape = Shape, Scale), col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
-Q <- qgumbelII(probs, Shape, Scale)
-lines(Q, dgumbelII(Q, Shape, Scale), col = "purple", lty = 3, type = "h")
-pgumbelII(Q, Shape, Scale) - probs # Should be all zero
+Q <- qgumbelII(probs, shape = Shape, Scale)
+lines(Q, dgumbelII(Q, Scale, Shape), col = "purple", lty = 3, type = "h")
+pgumbelII(Q, shape = Shape, Scale) - probs # Should be all zero
 abline(h = probs, col = "purple", lty = 3) }
 }
 \keyword{distribution}
diff --git a/man/hormone.Rd b/man/hormone.Rd
index 4375ba0..d8d2339 100644
--- a/man/hormone.Rd
+++ b/man/hormone.Rd
@@ -127,9 +127,9 @@ coef(fit2, matrix = TRUE)
 plot(Y ~ X, hormone, col = "blue")
 lines(fitted(fit2) ~ X, hormone, col = "red")
 # Add +- 2 SEs
-lines(fitted(fit2) + 2 * exp(predict(fit2)[, "log(sd)"]) ~ X,
+lines(fitted(fit2) + 2 * exp(predict(fit2)[, "loge(sd)"]) ~ X,
       hormone, col = "orange")
-lines(fitted(fit2) - 2 * exp(predict(fit2)[, "log(sd)"]) ~ X,
+lines(fitted(fit2) - 2 * exp(predict(fit2)[, "loge(sd)"]) ~ X,
       hormone, col = "orange")
 
 # Equation (3)
@@ -140,9 +140,9 @@ coef(fit3, matrix = TRUE)
 plot(Y ~ X, hormone, col = "blue")  # Does not look okay.
 lines(exp(predict(fit3)[, 1]) ~ X, hormone, col = "red")
 # Add +- 2 SEs
-lines(fitted(fit3) + 2 * exp(predict(fit3)[, "log(sd)"]) ~ X,
+lines(fitted(fit3) + 2 * exp(predict(fit3)[, "loge(sd)"]) ~ X,
       hormone, col = "orange")
-lines(fitted(fit3) - 2 * exp(predict(fit3)[, "log(sd)"]) ~ X,
+lines(fitted(fit3) - 2 * exp(predict(fit3)[, "loge(sd)"]) ~ X,
       hormone, col = "orange")
 }
 }
diff --git a/man/huberUC.Rd b/man/huberUC.Rd
index d17a0ef..be4d593 100644
--- a/man/huberUC.Rd
+++ b/man/huberUC.Rd
@@ -95,11 +95,11 @@ rhuber(5)
 
 \dontrun{ mu <- 3; xx <- seq(-2, 7, len = 100)  # Plot CDF and PDF
 plot(xx, dhuber(xx, mu = mu), type = "l", col = "blue", las = 1, ylab = "",
-     main = "blue is density, red is cumulative distribution function",
+     main = "blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles",
      ylim = 0:1)
 abline(h = 0, col = "blue", lty = 2)
-lines(xx, phuber(xx, mu = mu), type = "l", col = "red")
+lines(xx, phuber(xx, mu = mu), type = "l", col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
 Q <- qhuber(probs, mu = mu)
 lines(Q, dhuber(Q, mu = mu), col = "purple", lty = 3, type = "h")
diff --git a/man/hypersecant.Rd b/man/hypersecant.Rd
index f9ed64c..084b901 100644
--- a/man/hypersecant.Rd
+++ b/man/hypersecant.Rd
@@ -1,6 +1,6 @@
 \name{hypersecant}
 \alias{hypersecant}
-\alias{hypersecant.1}
+\alias{hypersecant01}
 \alias{nef.hs}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Hyperbolic Secant Distribution Family Function }
@@ -12,7 +12,7 @@
 }
 \usage{
   hypersecant(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
-hypersecant.1(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
+hypersecant01(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -46,7 +46,7 @@ hypersecant.1(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
 
 
 
-  Another parameterization is used for \code{hypersecant.1()}:
+  Another parameterization is used for \code{hypersecant01()}:
   let \eqn{Y = (logit U) / \pi}{Y = (logit U) / pi}.
   Then this uses
   \deqn{f(u;\theta)=(\cos(\theta)/\pi) \times
@@ -78,7 +78,7 @@ hypersecant.1(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
   \emph{The Theory of Dispersion Models}.
   London: Chapman & Hall.
 % p.101, Eqn (3.37) for hypersecant().
-% p.101, Eqn (3.38) for hypersecant.1().
+% p.101, Eqn (3.38) for hypersecant01().
 
 
 Morris, C. N. (1982)
@@ -104,14 +104,14 @@ Natural exponential families with quadratic variance functions.
 \examples{
 hdata <- data.frame(x2 = rnorm(nn <- 200))
 hdata <- transform(hdata, y = rnorm(nn))  # Not very good data!
-fit <- vglm(y ~ x2, hypersecant, data = hdata, trace = TRUE, crit = "coef")
-coef(fit, matrix = TRUE)
-fit at misc$earg
+fit1 <- vglm(y ~ x2, hypersecant, data = hdata, trace = TRUE, crit = "coef")
+coef(fit1, matrix = TRUE)
+fit1 at misc$earg
 
 # Not recommended:
-fit <- vglm(y ~ x2, hypersecant(link = "identitylink"), data = hdata, trace = TRUE)
-coef(fit, matrix = TRUE)
-fit at misc$earg
+fit2 <- vglm(y ~ x2, hypersecant(link = "identitylink"), data = hdata, trace = TRUE)
+coef(fit2, matrix = TRUE)
+fit2 at misc$earg
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/invbinomial.Rd b/man/inv.binomial.Rd
similarity index 89%
rename from man/invbinomial.Rd
rename to man/inv.binomial.Rd
index 760d177..a6432fd 100644
--- a/man/invbinomial.Rd
+++ b/man/inv.binomial.Rd
@@ -1,5 +1,5 @@
-\name{invbinomial}
-\alias{invbinomial}
+\name{inv.binomial}
+\alias{inv.binomial}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{Inverse Binomial Distribution Family Function}
 \description{
@@ -8,8 +8,8 @@
 
 }
 \usage{
-invbinomial(lrho = elogit(min = 0.5, max = 1),
-            llambda = "loge", irho = NULL, ilambda = NULL, zero = NULL)
+inv.binomial(lrho = elogit(min = 0.5, max = 1),
+             llambda = "loge", irho = NULL, ilambda = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -95,7 +95,7 @@ Some elements of the working weight matrices use the expected
 information matrix while other elements use the observed
 information matrix.
 Yet to do: using the mean and the reciprocal of \eqn{\lambda}{lambda}
-results in a EIM that is diagonal.
+results in an EIM that is diagonal.
 
 
 }
@@ -108,7 +108,7 @@ results in a EIM that is diagonal.
 }
 \examples{
 idata <- data.frame(y = rnbinom(n <- 1000, mu = exp(3), size = exp(1)))
-fit <- vglm(y ~ 1, invbinomial, data = idata, trace = TRUE)
+fit <- vglm(y ~ 1, inv.binomial, data = idata, trace = TRUE)
 with(idata, c(mean(y), head(fitted(fit), 1)))
 summary(fit)
 coef(fit, matrix = TRUE)
@@ -119,4 +119,4 @@ sum(weights(fit, type = "work"))  # Sum of the working weights
 \keyword{models}
 \keyword{regression}
 
-%fit <- vglm(y ~ 1, invbinomial(ilambda = 1), trace = TRUE, crit = "c", checkwz = FALSE)
+%fit <- vglm(y ~ 1, inv.binomial(ilambda = 1), trace = TRUE, crit = "c", checkwz = FALSE)
diff --git a/man/inv.gaussianff.Rd b/man/inv.gaussianff.Rd
index 368c81a..893bc18 100644
--- a/man/inv.gaussianff.Rd
+++ b/man/inv.gaussianff.Rd
@@ -11,7 +11,7 @@
 \usage{
 inv.gaussianff(lmu = "loge", llambda = "loge",
                imethod = 1, ilambda = NULL,
-               parallel = FALSE, shrinkage.init = 0.99, zero = NULL)
+               parallel = FALSE, ishrinkage = 0.99, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 %apply.parint = FALSE,
@@ -30,7 +30,7 @@ inv.gaussianff(lmu = "loge", llambda = "loge",
 
 
   }
-  \item{imethod, shrinkage.init, zero}{ 
+  \item{imethod, ishrinkage, zero}{ 
   See \code{\link{CommonVGAMffArguments}} for more information.
 
 
diff --git a/man/invlomax.Rd b/man/inv.lomax.Rd
similarity index 80%
rename from man/invlomax.Rd
rename to man/inv.lomax.Rd
index def8279..7a3203b 100644
--- a/man/invlomax.Rd
+++ b/man/inv.lomax.Rd
@@ -1,5 +1,5 @@
-\name{invlomax}
-\alias{invlomax}
+\name{inv.lomax}
+\alias{inv.lomax}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Inverse Lomax Distribution Family Function }
 \description{
@@ -7,8 +7,8 @@
   inverse Lomax distribution.
 }
 \usage{
-invlomax(lscale = "loge", lshape2.p = "loge",
-         iscale = NULL, ishape2.p = 1, zero = NULL)
+inv.lomax(lscale = "loge", lshape2.p = "loge",
+          iscale = NULL, ishape2.p = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -46,10 +46,14 @@ The inverse Lomax distribution has density
   for \eqn{b > 0}, \eqn{p > 0}, \eqn{y \geq 0}{y >= 0}.
 Here, \eqn{b} is the scale parameter \code{scale},
 and \code{p} is a shape parameter.
-The mean does not exist; \code{NA}s are returned as the fitted values.
+The mean does not seem to exist; the median is returned as the fitted values.
 
 
 
+% 20140826
+% The mean does not exist; \code{NA}s are returned as the fitted values.
+
+
 
 }
 \value{
@@ -77,7 +81,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 
 \seealso{
-    \code{\link{Invlomax}},
+    \code{\link{inv.lomax}},
     \code{\link{genbetaII}},
     \code{\link{betaII}},
     \code{\link{dagum}},
@@ -85,16 +89,16 @@ Hoboken, NJ, USA: Wiley-Interscience.
     \code{\link{fisk}},
     \code{\link{lomax}},
     \code{\link{paralogistic}},
-    \code{\link{invparalogistic}},
+    \code{\link{inv.paralogistic}},
     \code{\link{simulate.vlm}}.
 
 
 }
 
 \examples{
-idata <- data.frame(y = rinvlomax(n = 2000, exp(2), exp(1)))
-fit <- vglm(y ~ 1, invlomax, data = idata, trace = TRUE)
-fit <- vglm(y ~ 1, invlomax(iscale = exp(2), ishape2.p = exp(1)), data = idata,
+idata <- data.frame(y = rinv.lomax(n = 2000, exp(2), exp(1)))
+fit <- vglm(y ~ 1, inv.lomax, data = idata, trace = TRUE)
+fit <- vglm(y ~ 1, inv.lomax(iscale = exp(2), ishape2.p = exp(1)), data = idata,
             trace = TRUE, epsilon = 1e-8)
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/invlomaxUC.Rd b/man/inv.lomaxUC.Rd
similarity index 62%
rename from man/invlomaxUC.Rd
rename to man/inv.lomaxUC.Rd
index 63e6b54..1c0b279 100644
--- a/man/invlomaxUC.Rd
+++ b/man/inv.lomaxUC.Rd
@@ -1,9 +1,9 @@
-\name{Invlomax}
-\alias{Invlomax}
-\alias{dinvlomax}
-\alias{pinvlomax}
-\alias{qinvlomax}
-\alias{rinvlomax}
+\name{Inv.lomax}
+\alias{Inv.lomax}
+\alias{dinv.lomax}
+\alias{pinv.lomax}
+\alias{qinv.lomax}
+\alias{rinv.lomax}
 \title{The Inverse Lomax Distribution}
 \description{
   Density, distribution function, quantile function and random
@@ -12,10 +12,10 @@
 
 }
 \usage{
-dinvlomax(x, scale = 1, shape2.p, log = FALSE)
-pinvlomax(q, scale = 1, shape2.p)
-qinvlomax(p, scale = 1, shape2.p)
-rinvlomax(n, scale = 1, shape2.p)
+dinv.lomax(x, scale = 1, shape2.p, log = FALSE)
+pinv.lomax(q, scale = 1, shape2.p)
+qinv.lomax(p, scale = 1, shape2.p)
+rinv.lomax(n, scale = 1, shape2.p)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -32,10 +32,10 @@ rinvlomax(n, scale = 1, shape2.p)
 
 }
 \value{
-  \code{dinvlomax} gives the density,
-  \code{pinvlomax} gives the distribution function,
-  \code{qinvlomax} gives the quantile function, and
-  \code{rinvlomax} generates random deviates.
+  \code{dinv.lomax} gives the density,
+  \code{pinv.lomax} gives the distribution function,
+  \code{qinv.lomax} gives the quantile function, and
+  \code{rinv.lomax} generates random deviates.
 
 
 
@@ -50,7 +50,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{invlomax}}, which is the \pkg{VGAM} family function
+  See \code{\link{inv.lomax}}, which is the \pkg{VGAM} family function
   for estimating the parameters by maximum likelihood estimation.
 
 
@@ -62,14 +62,14 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 }
 \seealso{
-  \code{\link{invlomax}},
+  \code{\link{inv.lomax}},
   \code{\link{genbetaII}}.
 
 
 }
 \examples{
-idata <- data.frame(y = rinvlomax(n = 1000, exp(2), exp(1)))
-fit <- vglm(y ~ 1, invlomax, data = idata, trace = TRUE, crit = "coef")
+idata <- data.frame(y = rinv.lomax(n = 1000, exp(2), exp(1)))
+fit <- vglm(y ~ 1, inv.lomax, data = idata, trace = TRUE, crit = "coef")
 coef(fit, matrix = TRUE)
 Coef(fit)
 }
diff --git a/man/invparalogistic.Rd b/man/inv.paralogistic.Rd
similarity index 85%
rename from man/invparalogistic.Rd
rename to man/inv.paralogistic.Rd
index eec93b0..6649614 100644
--- a/man/invparalogistic.Rd
+++ b/man/inv.paralogistic.Rd
@@ -1,5 +1,5 @@
-\name{invparalogistic}
-\alias{invparalogistic}
+\name{inv.paralogistic}
+\alias{inv.paralogistic}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Inverse Paralogistic Distribution Family Function }
 \description{
@@ -7,8 +7,8 @@
   inverse paralogistic distribution.
 }
 \usage{
-invparalogistic(lshape1.a = "loge", lscale = "loge",
-                ishape1.a = 2, iscale = NULL, zero = NULL)
+inv.paralogistic(lshape1.a = "loge", lscale = "loge",
+                 ishape1.a = 2, iscale = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -77,13 +77,13 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 
 \seealso{
-    \code{\link{Invparalogistic}},
+    \code{\link{Inv.paralogistic}},
     \code{\link{genbetaII}},
     \code{\link{betaII}},
     \code{\link{dagum}},
     \code{\link{sinmad}},
     \code{\link{fisk}},
-    \code{\link{invlomax}},
+    \code{\link{inv.lomax}},
     \code{\link{lomax}},
     \code{\link{paralogistic}},
     \code{\link{simulate.vlm}}.
@@ -92,9 +92,9 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 
 \examples{
-idata <- data.frame(y = rinvparalogistic(n = 3000, exp(1), exp(2)))
-fit <- vglm(y ~ 1, invparalogistic, data = idata, trace = TRUE)
-fit <- vglm(y ~ 1, invparalogistic(ishape1.a = 2.7, iscale = 7.3),
+idata <- data.frame(y = rinv.paralogistic(n = 3000, exp(1), exp(2)))
+fit <- vglm(y ~ 1, inv.paralogistic, data = idata, trace = TRUE)
+fit <- vglm(y ~ 1, inv.paralogistic(ishape1.a = 2.7, iscale = 7.3),
             data = idata, trace = TRUE, epsilon = 1e-8)
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/invparalogisticUC.Rd b/man/inv.paralogisticUC.Rd
similarity index 60%
rename from man/invparalogisticUC.Rd
rename to man/inv.paralogisticUC.Rd
index 6c352f1..bb1cf08 100644
--- a/man/invparalogisticUC.Rd
+++ b/man/inv.paralogisticUC.Rd
@@ -1,9 +1,9 @@
-\name{Invparalogistic}
-\alias{Invparalogistic}
-\alias{dinvparalogistic}
-\alias{pinvparalogistic}
-\alias{qinvparalogistic}
-\alias{rinvparalogistic}
+\name{Inv.paralogistic}
+\alias{Inv.paralogistic}
+\alias{dinv.paralogistic}
+\alias{pinv.paralogistic}
+\alias{qinv.paralogistic}
+\alias{rinv.paralogistic}
 \title{The Inverse Paralogistic Distribution}
 \description{
   Density, distribution function, quantile function and random
@@ -13,10 +13,10 @@
 
 }
 \usage{
-dinvparalogistic(x, shape1.a, scale = 1, log = FALSE)
-pinvparalogistic(q, shape1.a, scale = 1)
-qinvparalogistic(p, shape1.a, scale = 1)
-rinvparalogistic(n, shape1.a, scale = 1)
+dinv.paralogistic(x, shape1.a, scale = 1, log = FALSE)
+pinv.paralogistic(q, shape1.a, scale = 1)
+qinv.paralogistic(p, shape1.a, scale = 1)
+rinv.paralogistic(n, shape1.a, scale = 1)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -33,10 +33,10 @@ rinvparalogistic(n, shape1.a, scale = 1)
 
 }
 \value{
-  \code{dinvparalogistic} gives the density,
-  \code{pinvparalogistic} gives the distribution function,
-  \code{qinvparalogistic} gives the quantile function, and
-  \code{rinvparalogistic} generates random deviates.
+  \code{dinv.paralogistic} gives the density,
+  \code{pinv.paralogistic} gives the distribution function,
+  \code{qinv.paralogistic} gives the quantile function, and
+  \code{rinv.paralogistic} generates random deviates.
 
 
 }
@@ -50,7 +50,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{invparalogistic}}, which is the \pkg{VGAM} family function
+  See \code{\link{inv.paralogistic}}, which is the \pkg{VGAM} family function
   for estimating the parameters by maximum likelihood estimation.
 
 
@@ -62,14 +62,14 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 }
 \seealso{
-  \code{\link{invparalogistic}},
+  \code{\link{inv.paralogistic}},
   \code{\link{genbetaII}}.
 
 
 }
 \examples{
-idata <- data.frame(y = rinvparalogistic(n = 3000, exp(1), exp(2)))
-fit <- vglm(y ~ 1, invparalogistic(ishape1.a = 2.1),
+idata <- data.frame(y = rinv.paralogistic(n = 3000, exp(1), exp(2)))
+fit <- vglm(y ~ 1, inv.paralogistic(ishape1.a = 2.1),
             data = idata, trace = TRUE, crit = "coef")
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/is.parallel.Rd b/man/is.parallel.Rd
index d05d352..43e3dd4 100644
--- a/man/is.parallel.Rd
+++ b/man/is.parallel.Rd
@@ -61,8 +61,8 @@ is.parallel.vglm(object, type = c("term", "lm"), \dots)
 
 \examples{
 \dontrun{ require("VGAMdata")
-fit <- vglm(educ ~ sm.bs(age) * sex + ethnic,
-            cumulative(parallel = TRUE), xs.nz[1:200, ])
+fit <- vglm(educ ~ sm.bs(age) * sex + ethnicity,
+            cumulative(parallel = TRUE), head(xs.nz, 200))
 is.parallel(fit)
 is.parallel(fit, type = "lm")  # For each column of the LM matrix
 }
diff --git a/man/is.smart.Rd b/man/is.smart.Rd
index 8910685..d015353 100644
--- a/man/is.smart.Rd
+++ b/man/is.smart.Rd
@@ -44,18 +44,18 @@
 
 }
 \examples{
-is.smart(my1)  # TRUE
-is.smart(sm.poly) # TRUE
+is.smart(sm.min1)  # TRUE
+is.smart(sm.poly)  # TRUE
 library(splines)
-is.smart(sm.bs)   # TRUE
-is.smart(sm.ns)   # TRUE
+is.smart(sm.bs)  # TRUE
+is.smart(sm.ns)  # TRUE
 is.smart(tan)  # FALSE
 \dontrun{
-x <- rnorm(9)
-fit1 <- vglm(rnorm(9) ~ x, uninormal)
-is.smart(fit1)   # TRUE
-fit2 <- vglm(rnorm(9) ~ x, uninormal, smart = FALSE)
-is.smart(fit2)   # FALSE
+udata <- data.frame(x2 = rnorm(9))
+fit1 <- vglm(rnorm(9) ~ x2, uninormal, data = udata)
+is.smart(fit1)  # TRUE
+fit2 <- vglm(rnorm(9) ~ x2, uninormal, data = udata, smart = FALSE)
+is.smart(fit2)  # FALSE
 fit2 at smart.prediction
 }
 }
diff --git a/man/levy.Rd b/man/levy.Rd
index 5913dba..e9e552a 100644
--- a/man/levy.Rd
+++ b/man/levy.Rd
@@ -3,37 +3,32 @@
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Levy Distribution Family Function }
 \description{
-Estimates the two parameters of the Levy distribution
+Estimates the scale parameter of the Levy distribution
 by maximum likelihood estimation.
 
 
 }
 \usage{
-levy(delta = NULL, link.gamma = "loge", idelta = NULL, igamma = NULL)
+levy(location = 0, lscale = "loge", iscale = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{delta}{
-  Location parameter. May be assigned a known value,
-  otherwise it is estimated (the default).
+  \item{location}{
+  Location parameter. Must have a known value.
+  Called \eqn{a} below.
 
-
-  }
-  \item{link.gamma}{
-  Parameter link function for the (positive) \eqn{\gamma}{gamma} parameter. 
-  See \code{\link{Links}} for more choices.
+% otherwise it is estimated (the default).
 
 
   }
-  \item{idelta}{
-  Initial value for the \eqn{\delta}{delta} parameter
-  (if it is to be estimated).
-  By default, an initial value is chosen internally.
+  \item{lscale}{
+  Parameter link function for the (positive) scale parameter \eqn{b}.
+  See \code{\link{Links}} for more choices.
 
 
   }
-  \item{igamma}{
-  Initial value for the \eqn{\gamma}{gamma} parameter.
+  \item{iscale}{
+  Initial value for the \eqn{b} parameter.
   By default, an initial value is chosen internally.
 
 
@@ -43,13 +38,16 @@ levy(delta = NULL, link.gamma = "loge", idelta = NULL, igamma = NULL)
   The Levy distribution is one of three stable distributions
   whose density function has a tractable form. 
   The formula for the density is
- \deqn{f(y;\gamma,\delta) = \sqrt{\frac{\gamma}{2\pi}}
-       \exp \left( \frac{-\gamma}{2(y - \delta)}
-            \right) / (y - \delta)^{3/2} }{%
-  f(y;gamma,delta) = sqrt(gamma / (2 pi))
-       exp( -gamma / (2(y - delta))) / (y - \delta)^{3/2} }
-  where \eqn{\delta<y<\infty}{delta<y<Inf} and \eqn{\gamma>0}{gamma>0}.
+ \deqn{f(y;b) = \sqrt{\frac{b}{2\pi}}
+       \exp \left( \frac{-b}{2(y - a)}
+            \right) / (y - a)^{3/2} }{%
+  f(y;b) = sqrt(b / (2 pi))
+       exp( -b / (2(y - a))) / (y - a)^{3/2} }
+  where \eqn{a<y<\infty}{a<y<Inf} and \eqn{b>0}.
+  Note that if \eqn{a} is very close to \code{min(y)} 
+  (where \code{y} is the response), then numerical problem will occur.
   The mean does not exist.
+  The median is returned as the fitted values.
 
 
 }
@@ -65,40 +63,52 @@ levy(delta = NULL, link.gamma = "loge", idelta = NULL, igamma = NULL)
   \emph{Stable Distributions: Models for Heavy Tailed Data}.
 
 
+% p.5
+
+
+
 }
 \author{ T. W. Yee }
-\note{
-  If \eqn{\delta}{delta} is given, then only one parameter is estimated
-  and the default is \eqn{\eta_1=\log(\gamma)}{eta1=log(gamma)}.
-  If \eqn{\delta}{delta} is not given, then \eqn{\eta_2=\delta}{eta2=delta}.
 
 
-}
+%\note{
+%  If \eqn{\delta}{delta} is given, then only one parameter is estimated
+%  and the default is \eqn{\eta_1=\log(\gamma)}{eta1=log(gamma)}.
+%  If \eqn{\delta}{delta} is not given, then \eqn{\eta_2=\delta}{eta2=delta}.
+%
+%
+%}
 
 
 \seealso{ 
-
   The Nolan article is at
   \url{http://academic2.american.edu/~jpnolan/stable/chap1.pdf}.
 
 
+% \code{\link{dlevy}}.
+
+
 }
 \examples{
-nn <- 1000; delta <- 0
-mygamma <- 1  # log link ==> 0 is the answer
-ldata <- data.frame(y = delta + mygamma/rnorm(nn)^2)  # Levy(mygamma, delta)
+nn <- 1000; loc1 <- 0; loc2 <- 10
+myscale <- 1  # log link ==> 0 is the answer
+ldata <- data.frame(y1 = loc1 + myscale/rnorm(nn)^2,  # Levy(myscale, a)
+                    y2 = rlevy(nn, loc = loc2, scale = exp(+2)))
 
 # Cf. Table 1.1 of Nolan for Levy(1,0)
-with(ldata, sum(y > 1) / length(y))  # Should be 0.6827
-with(ldata, sum(y > 2) / length(y))  # Should be 0.5205
-
-fit <- vglm(y ~ 1, levy(delta = delta), data = ldata, trace = TRUE)  # 1 parameter
-fit <- vglm(y ~ 1, levy(idelta = delta, igamma = mygamma),
-           data = ldata, trace = TRUE)  # 2 parameters
-coef(fit, matrix = TRUE)
-Coef(fit)
-summary(fit)
-head(weights(fit, type = "work"))
+with(ldata, sum(y1 > 1) / length(y1))  # Should be 0.6827
+with(ldata, sum(y1 > 2) / length(y1))  # Should be 0.5205
+
+fit1 <- vglm(y1 ~ 1, levy(location = loc1), data = ldata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+Coef(fit1)
+summary(fit1)
+head(weights(fit1, type = "work"))
+
+fit2 <- vglm(y2 ~ 1, levy(location = loc2), data = ldata, trace = TRUE)
+coef(fit2, matrix = TRUE)
+Coef(fit2)
+c(median = with(ldata, median(y2)), fitted.median = head(fitted(fit2), 1))
 }
 \keyword{models}
 \keyword{regression}
@@ -121,3 +131,9 @@ head(weights(fit, type = "work"))
 %%sum(y > 4) / length(y)  # Should be 0.3829
 %%sum(y > 5) / length(y)  # Should be 0.3453
 
+%fit <- vglm(y ~ 1, levy(idelta = delta, igamma = mygamma),
+%           data = ldata, trace = TRUE)  # 2 parameters
+
+
+
+
diff --git a/man/lgammaUC.Rd b/man/lgammaUC.Rd
index a5721b7..a0c809a 100644
--- a/man/lgammaUC.Rd
+++ b/man/lgammaUC.Rd
@@ -15,10 +15,10 @@
 
 }
 \usage{
-dlgamma(x, location = 0, scale = 1, k = 1, log = FALSE)
-plgamma(q, location = 0, scale = 1, k = 1)
-qlgamma(p, location = 0, scale = 1, k = 1)
-rlgamma(n, location = 0, scale = 1, k = 1)
+dlgamma(x, location = 0, scale = 1, shape = 1, log = FALSE)
+plgamma(q, location = 0, scale = 1, shape = 1)
+qlgamma(p, location = 0, scale = 1, shape = 1)
+rlgamma(n, location = 0, scale = 1, shape = 1)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -26,7 +26,7 @@ rlgamma(n, location = 0, scale = 1, k = 1)
   \item{n}{number of observations. Positive integer of length 1.}
   \item{location}{the location parameter \eqn{a}.}
   \item{scale}{the (positive) scale parameter \eqn{b}.}
-  \item{k}{the (positive) shape parameter \eqn{k}.}
+  \item{shape}{the (positive) shape parameter \eqn{k}.}
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
@@ -54,7 +54,7 @@ London: Imperial College Press.
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{lgammaff}}, the \pkg{VGAM} family function for
+  See \code{\link{lgamma1}}, the \pkg{VGAM} family function for
   estimating the one parameter standard log-gamma distribution by maximum
   likelihood estimation, for formulae and other details.  Apart from
   \code{n}, all the above arguments may be vectors and are recyled to
@@ -63,28 +63,30 @@ London: Imperial College Press.
 
 }
 \note{
-  The \pkg{VGAM} family function \code{\link{lgamma3ff}} is
+  The \pkg{VGAM} family function \code{\link{lgamma3}} is
   for the three parameter (nonstandard) log-gamma distribution.
 
 
 }
 \seealso{
-  \code{\link{lgammaff}},
+  \code{\link{lgamma1}},
   \code{\link{prentice74}}.
 
 
 }
 \examples{
-\dontrun{ loc <- 1; Scale <- 1.5; k <- 1.4
+\dontrun{ loc <- 1; Scale <- 1.5; shape <- 1.4
 x <- seq(-3.2, 5, by = 0.01)
-plot(x, dlgamma(x, loc, Scale, k), type = "l", col = "blue", ylim = 0:1,
+plot(x, dlgamma(x, loc = loc, Scale, shape = shape), type = "l",
+     col = "blue", ylim = 0:1,
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple are 5,10,...,95 percentiles", las = 1, ylab = "")
 abline(h = 0, col = "blue", lty = 2)
-lines(qlgamma(seq(0.05, 0.95, by = 0.05), loc, Scale, k),
-      dlgamma(qlgamma(seq(0.05, 0.95, by = 0.05), loc, Scale, k),
-              loc, Scale, k), col = "purple", lty = 3, type = "h")
-lines(x, plgamma(x, loc, Scale, k), type = "l", col = "orange")
+lines(qlgamma(seq(0.05, 0.95, by = 0.05), loc = loc, Scale, shape = shape),
+      dlgamma(qlgamma(seq(0.05, 0.95, by = 0.05), loc = loc, scale = Scale,
+                      shape = shape),
+      loc = loc, Scale, shape = shape), col = "purple", lty = 3, type = "h")
+lines(x, plgamma(x, loc = loc, Scale, shape = shape), col = "orange")
 abline(h = 0, lty = 2) }
 }
 \keyword{distribution}
diff --git a/man/lgammaff.Rd b/man/lgammaff.Rd
index ff10f36..61a6f09 100644
--- a/man/lgammaff.Rd
+++ b/man/lgammaff.Rd
@@ -1,6 +1,6 @@
-\name{lgammaff}
-\alias{lgammaff}
-\alias{lgamma3ff}
+\name{lgamma1}
+\alias{lgamma1}
+\alias{lgamma3}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Log-gamma Distribution Family Function }
 \description{
@@ -9,9 +9,9 @@
 
 }
 \usage{
-lgammaff(link = "loge", init.k = NULL)
-lgamma3ff(llocation = "identitylink", lscale = "loge", lshape = "loge",
-          ilocation = NULL, iscale = NULL, ishape = 1, zero = NULL)
+lgamma1(lshape = "loge", ishape = NULL)
+lgamma3(llocation = "identitylink", lscale = "loge", lshape = "loge",
+        ilocation = NULL, iscale = NULL, ishape = 1, zero = 2:3)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -23,14 +23,14 @@ lgamma3ff(llocation = "identitylink", lscale = "loge", lshape = "loge",
 
 
   }
-  \item{link, lshape}{
+  \item{lshape}{
   Parameter link function applied to 
   the positive shape parameter \eqn{k}. 
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{init.k, ishape}{
+  \item{ishape}{
   Initial value for \eqn{k}.
   If given, it must be positive. 
   If failure to converge occurs, try some other value.
@@ -106,15 +106,15 @@ New York: Wiley.
   type 1 extreme value distribution.
 
 
-  The standard log-gamma distribution is fitted with \code{lgammaff}
+  The standard log-gamma distribution is fitted with \code{lgamma1}
   and the non-standard (3-parameter) log-gamma distribution is fitted
-  with \code{lgamma3ff}.
+  with \code{lgamma3}.
 
 
 }
 \seealso{
   \code{\link{rlgamma}},
-  \code{\link{gengamma}},
+  \code{\link{gengamma.stacy}},
   \code{\link{prentice74}},
   \code{\link{gamma1}},
   \code{\link[base:Special]{lgamma}}.
@@ -122,16 +122,16 @@ New York: Wiley.
 
 }
 \examples{
-ldata <- data.frame(y = rlgamma(100, k = exp(1)))
-fit <- vglm(y ~ 1, lgammaff, data = ldata, trace = TRUE, crit = "coef")
+ldata <- data.frame(y = rlgamma(100, shape = exp(1)))
+fit <- vglm(y ~ 1, lgamma1, data = ldata, trace = TRUE, crit = "coef")
 summary(fit)
 coef(fit, matrix = TRUE)
 Coef(fit)
 
-ldata <- data.frame(x = runif(nn <- 5000))  # Another example
-ldata <- transform(ldata, loc = -1 + 2 * x, Scale = exp(1))
-ldata <- transform(ldata, y = rlgamma(nn, loc, scale = Scale, k = exp(0)))
-fit2 <- vglm(y ~ x, lgamma3ff(zero = 2:3), data = ldata, trace = TRUE, crit = "c")
+ldata <- data.frame(x2 = runif(nn <- 5000))  # Another example
+ldata <- transform(ldata, loc = -1 + 2 * x2, Scale = exp(1))
+ldata <- transform(ldata, y = rlgamma(nn, loc, scale = Scale, shape = exp(0)))
+fit2 <- vglm(y ~ x2, lgamma3, data = ldata, trace = TRUE, crit = "c")
 coef(fit2, matrix = TRUE)
 }
 \keyword{models}
diff --git a/man/lindley.Rd b/man/lindley.Rd
index c9be3a3..7654312 100644
--- a/man/lindley.Rd
+++ b/man/lindley.Rd
@@ -79,7 +79,7 @@ Lindley distribution and its application.
 
 \seealso{
   \code{\link{dlind}},
-  \code{\link{gamma2.ab}},
+  \code{\link{gammaR}},
   \code{\link{simulate.vlm}}.
 
 
diff --git a/man/lino.Rd b/man/lino.Rd
index fca42ca..008f9fc 100644
--- a/man/lino.Rd
+++ b/man/lino.Rd
@@ -89,8 +89,14 @@ lino(lshape1 = "loge", lshape2 = "loge", llambda = "loge",
 
 \author{ T. W. Yee }
 \note{
-  The fitted values, which is usually the mean, have not been implemented
-  yet and consequently are \code{NA}s.
+  The fitted values, which is usually the mean, have not
+  been implemented yet.
+  Currently the median is returned as the fitted values.
+
+
+% and consequently are \code{NA}s.
+
+
 
 
   Although Fisher scoring is used, the working weight matrices
@@ -116,18 +122,17 @@ lino(lshape1 = "loge", lshape2 = "loge", llambda = "loge",
 
 \examples{
 ldata <- data.frame(y1 = rbeta(n = 1000, exp(0.5), exp(1)))  # ~ standard beta
-fit <- vglm(y1 ~ 1, lino, ldata, trace = TRUE)
+fit <- vglm(y1 ~ 1, lino, data = ldata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 head(fitted(fit))
 summary(fit)
 
 # Nonstandard beta distribution
-ldata <-
-  transform(ldata, y2 = rlino(n = 1000, shape1 = 2, shape2 = 3, lambda = exp(1)))
-fit2 <- vglm(y2 ~ 1, lino(lshape1 = identitylink,
-                          lshape2 = identitylink, ilamb = 10),
-             data = ldata)
+ldata <- transform(ldata, y2 = rlino(n = 1000, shape1 = exp(1),
+                                     shape2 = exp(2), lambda = exp(1)))
+fit2 <- vglm(y2 ~ 1, lino(lshape1 = "identitylink", lshape2 = "identitylink",
+             ilamb = 10), data = ldata, trace = TRUE)
 coef(fit2, matrix = TRUE)
 }
 \keyword{models}
diff --git a/man/lms.bcg.Rd b/man/lms.bcg.Rd
index 9da6467..6905833 100644
--- a/man/lms.bcg.Rd
+++ b/man/lms.bcg.Rd
@@ -9,7 +9,7 @@
 \usage{
 lms.bcg(percentiles = c(25, 50, 75), zero = c(1, 3), 
         llambda = "identitylink", lmu = "identitylink", lsigma = "loge",
-        dfmu.init = 4, dfsigma.init = 2, ilambda = 1, isigma = NULL)
+        idf.mu = 4, idf.sigma = 2, ilambda = 1, isigma = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -30,7 +30,7 @@ lms.bcg(percentiles = c(25, 50, 75), zero = c(1, 3),
 
   }
 
-  \item{dfmu.init, dfsigma.init}{
+  \item{idf.mu, idf.sigma}{
   See \code{\link{lms.bcn}}.
   
   }
diff --git a/man/lms.bcn.Rd b/man/lms.bcn.Rd
index 27bc240..c495f77 100644
--- a/man/lms.bcn.Rd
+++ b/man/lms.bcn.Rd
@@ -10,8 +10,8 @@
 \usage{
 lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
         llambda = "identitylink", lmu = "identitylink", lsigma = "loge",
-        dfmu.init = 4, dfsigma.init = 2, ilambda = 1,
-        isigma = NULL, tol0 = 0.001, expectiles = FALSE)
+        idf.mu = 4, idf.sigma = 2, ilambda = 1,
+        isigma = NULL, tol0 = 0.001)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -21,7 +21,13 @@ lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
   which are the quantiles.
   They will be returned as `fitted values'.
 
+
+
 % or expectiles.
+% 20140624; withdrawn 'expectiles'.
+%       isigma = NULL, tol0 = 0.001, expectiles = FALSE
+
+
 
 
   }
@@ -44,14 +50,14 @@ lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
 
 
   }
-  \item{dfmu.init}{
+  \item{idf.mu}{
   Degrees of freedom for the cubic smoothing spline fit applied to
   get an initial estimate of mu.
   See \code{\link{vsmooth.spline}}.
 
 
   }
-  \item{dfsigma.init}{
+  \item{idf.sigma}{
   Degrees of freedom for the cubic smoothing spline fit applied to
   get an initial estimate of sigma.
   See \code{\link{vsmooth.spline}}.
@@ -81,15 +87,15 @@ lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
 
 
   }
-  \item{expectiles}{
-  Experimental; please do not use.
+% \item{expectiles}{
+% Experimental; please do not use.
 
 % A single logical. If \code{TRUE} then the method is LMS-expectile
 % regression; \emph{expectiles} are returned rather than quantiles.
 % The default is LMS quantile regression based on the normal distribution.
 
 
-  }
+% }
 
 }
 \details{
@@ -243,7 +249,7 @@ contains further information and examples.
 
 \examples{
 \dontrun{ require("VGAMdata")
-mysubset <- subset(xs.nz, sex == "M" & ethnic == "1" & Study1)
+mysubset <- subset(xs.nz, sex == "M" & ethnicity == "Maori" & study1)
 mysubset <- transform(mysubset, BMI = weight / height^2)
 BMIdata <- na.omit(mysubset)
 BMIdata <- subset(BMIdata, BMI < 80 & age < 65,
@@ -259,7 +265,7 @@ head(predict(fit))
 head(fitted(fit))
 head(BMIdata)
 head(cdf(fit))  # Person 46 is probably overweight, given his age
-100 * colMeans(c(depvar(fit)) < fitted(fit))  # Empirical proportions
+100 * colMeans(depvar(fit, drop = TRUE) < fitted(fit))  # Empirical proportions
 
 # Convergence problems? Try this trick: fit0 is a simpler model used for fit1
 fit0 <- vgam(BMI ~ s(age, df = 4), lms.bcn(zero = c(1, 3)), data = BMIdata)
diff --git a/man/lms.yjn.Rd b/man/lms.yjn.Rd
index 9fd40ad..48a00d0 100644
--- a/man/lms.yjn.Rd
+++ b/man/lms.yjn.Rd
@@ -10,12 +10,12 @@
 \usage{
 lms.yjn(percentiles = c(25, 50, 75), zero = c(1,3),
         llambda = "identitylink", lsigma = "loge",
-        dfmu.init = 4, dfsigma.init = 2,
+        idf.mu = 4, idf.sigma = 2,
         ilambda = 1, isigma = NULL, rule = c(10, 5),
         yoffset = NULL, diagW = FALSE, iters.diagW = 6)
 lms.yjn2(percentiles=c(25,50,75), zero=c(1,3),
          llambda = "identitylink", lmu = "identitylink", lsigma = "loge",
-         dfmu.init = 4, dfsigma.init = 2, ilambda = 1.0,
+         idf.mu = 4, idf.sigma = 2, ilambda = 1.0,
          isigma = NULL, yoffset = NULL, nsimEIM = 250)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -34,7 +34,7 @@ lms.yjn2(percentiles=c(25,50,75), zero=c(1,3),
   See \code{\link{lms.bcn}}.
 
   }
-  \item{dfmu.init, dfsigma.init}{
+  \item{idf.mu, idf.sigma}{
   See \code{\link{lms.bcn}}.
 
   }
diff --git a/man/logF.Rd b/man/logF.Rd
index 77ab712..aa5357a 100644
--- a/man/logF.Rd
+++ b/man/logF.Rd
@@ -13,7 +13,7 @@
 }
 \usage{
  logF(lshape1 = "loge", lshape2 = "loge",
-      ishape1 = NULL, ishape2 = 1, imethod = 1) 
+      ishape1 = NULL, ishape2 = 1, imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
diff --git a/man/logistic.Rd b/man/logistic.Rd
index d2e2ea5..52c1f6f 100644
--- a/man/logistic.Rd
+++ b/man/logistic.Rd
@@ -1,7 +1,7 @@
 \name{logistic}
 \alias{logistic}
 \alias{logistic1}
-\alias{logistic2}
+\alias{logistic}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Logistic Distribution Family Function }
 \description{
@@ -12,8 +12,8 @@
 }
 \usage{
 logistic1(llocation = "identitylink", scale.arg = 1, imethod = 1)
-logistic2(llocation = "identitylink", lscale = "loge",
-          ilocation = NULL, iscale = NULL, imethod = 1, zero = -2)
+logistic(llocation = "identitylink", lscale = "loge",
+         ilocation = NULL, iscale = NULL, imethod = 1, zero = -2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -64,13 +64,13 @@ logistic2(llocation = "identitylink", lscale = "loge",
 
 
   \code{logistic1} estimates the location parameter only while
-  \code{logistic2} estimates both parameters.
+  \code{logistic} estimates both parameters.
   By default,
   \eqn{\eta_1 = l}{eta1 = l} and \eqn{\eta_2 = \log(s)}{eta2 = log(s)} for
-  \code{logistic2}.
+  \code{logistic}.
 
 
-  \code{logistic2} can handle multiple responses.
+  \code{logistic} can handle multiple responses.
 
 
 }
@@ -100,7 +100,7 @@ Hoboken, NJ, USA: Wiley-Interscience, p.130.
 
 
 deCani, J. S. and Stine, R. A. (1986)
-A note on Deriving the Information Matrix for a Logistic Distribution,
+A Note on Deriving the Information Matrix for a Logistic Distribution,
 \emph{The American Statistician},
 \bold{40}, 220--222.
 
@@ -119,7 +119,7 @@ A note on Deriving the Information Matrix for a Logistic Distribution,
   \code{\link[stats:Logistic]{rlogis}},
   \code{\link{logit}},
   \code{\link{cumulative}},
-  \code{\link{bilogistic4}},
+  \code{\link{bilogistic}},
   \code{\link{simulate.vlm}}.
 
 
@@ -133,7 +133,7 @@ coef(fit1, matrix = TRUE)
 
 # Both location and scale unknown
 ldata <- transform(ldata, y2 = rlogis(nn, loc = 1 + 5*x2, scale = exp(0 + 1*x2)))
-fit2 <- vglm(cbind(y1, y2) ~ x2, logistic2, data = ldata, trace = TRUE)
+fit2 <- vglm(cbind(y1, y2) ~ x2, logistic, data = ldata, trace = TRUE)
 coef(fit2, matrix = TRUE)
 vcov(fit2)
 summary(fit2)
diff --git a/man/logit.Rd b/man/logit.Rd
index cf82e94..f0a3b97 100644
--- a/man/logit.Rd
+++ b/man/logit.Rd
@@ -122,7 +122,7 @@ elogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
     \code{\link{cauchit}},
     \code{\link{logistic1}},
     \code{\link{loge}},
-    \code{\link{mlogit}}.
+    \code{\link{multilogit}}.
 
 
  }
diff --git a/man/loglaplace.Rd b/man/loglaplace.Rd
index c4907a4..0d79bac 100644
--- a/man/loglaplace.Rd
+++ b/man/loglaplace.Rd
@@ -16,13 +16,13 @@
 \usage{
 loglaplace1(tau = NULL, llocation = "loge",
     ilocation = NULL, kappa = sqrt(tau/(1 - tau)), Scale.arg = 1,
-    shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
-    dfmu.init = 3, rep0 = 0.5, minquantile = 0, maxquantile = Inf,
+    ishrinkage = 0.95, parallel.locat = FALSE, digt = 4,
+    idf.mu = 3, rep0 = 0.5, minquantile = 0, maxquantile = Inf,
     imethod = 1, zero = NULL)
 logitlaplace1(tau = NULL, llocation = "logit",
     ilocation = NULL, kappa = sqrt(tau/(1 - tau)),
-    Scale.arg = 1, shrinkage.init = 0.95, parallelLocation = FALSE,
-    digt = 4, dfmu.init = 3, rep01 = 0.5, imethod = 1, zero = NULL)
+    Scale.arg = 1, ishrinkage = 0.95, parallel.locat = FALSE,
+    digt = 4, idf.mu = 3, rep01 = 0.5, imethod = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -53,7 +53,7 @@ logitlaplace1(tau = NULL, llocation = "logit",
 
 
   }
-  \item{parallelLocation}{ Logical.
+  \item{parallel.locat}{ Logical.
     Should the quantiles be parallel on the transformed scale
     (argument \code{llocation})?
     Assigning this argument to \code{TRUE} circumvents the
@@ -74,7 +74,7 @@ logitlaplace1(tau = NULL, llocation = "logit",
 
 
   }
-  \item{dfmu.init, shrinkage.init, Scale.arg, digt, zero}{
+  \item{idf.mu, ishrinkage, Scale.arg, digt, zero}{
   See \code{\link{alaplace1}}.
 
 
@@ -205,7 +205,7 @@ adata <- transform(adata, y = rnbinom(n, mu = mymu(x2), size = my.k))
 mytau <- c(0.1, 0.25, 0.5, 0.75, 0.9); mydof = 3
 # halfstepping is usual:
 fitp <- vglm(y ~ sm.bs(x2, df = mydof), data = adata, trace = TRUE,
-            loglaplace1(tau = mytau, parallelLoc = TRUE))
+            loglaplace1(tau = mytau, parallel.locat = TRUE))
  
 \dontrun{
 par(las = 1)  # Plot on a log1p() scale
diff --git a/man/lognormal.Rd b/man/lognormal.Rd
index 184e119..de61cd1 100644
--- a/man/lognormal.Rd
+++ b/man/lognormal.Rd
@@ -1,6 +1,6 @@
 \name{lognormal}
 \alias{lognormal}
-\alias{lognormal3}
+%\alias{lognormal3}
 %%- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Lognormal Distribution }
 \description{
@@ -10,8 +10,6 @@
 }
 \usage{
 lognormal(lmeanlog = "identitylink", lsdlog = "loge", zero = 2)
-lognormal3(lmeanlog = "identitylink", lsdlog = "loge",
-           powers.try = (-3):3, delta = NULL, zero = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -39,26 +37,33 @@ lognormal3(lmeanlog = "identitylink", lsdlog = "loge",
   For \code{lognormal()},
   the values must be from the set \{1,2\} which correspond to
   \code{mu}, \code{sigma}, respectively.
-  For \code{lognormal3()},
-  the values must be from the set \{1,2,3\} where 3 is for
-  \eqn{\lambda}{\lambda}.
   See \code{\link{CommonVGAMffArguments}} for more information.
 
 
-  }
-  \item{powers.try}{
-  Numerical vector. The initial \eqn{lambda} is chosen
-  as the best value from \code{min(y) - 10^powers.try} where
-  \code{y} is the response.
+% For \code{lognormal3()},
+% the values must be from the set \{1,2,3\} where 3 is for
+% \eqn{\lambda}{\lambda}.
+
 
-  }
-  \item{delta}{
-  Numerical vector. An alternative method for
-  obtaining an initial \eqn{lambda}. Here, \code{delta = min(y)-lambda}.
-  If given, this supersedes the \code{powers.try} argument.
-  The value must be positive.
 
   }
+
+
+% \item{powers.try}{
+% Numerical vector. The initial \eqn{lambda} is chosen
+% as the best value from \code{min(y) - 10^powers.try} where
+% \code{y} is the response.
+
+% }
+
+
+% \item{delta}{
+% Numerical vector. An alternative method for
+% obtaining an initial \eqn{lambda}. Here, \code{delta = min(y)-lambda}.
+% If given, this supersedes the \code{powers.try} argument.
+% The value must be positive.
+
+% }
 }
 \details{
   A random variable \eqn{Y} has a 2-parameter lognormal distribution
@@ -70,19 +75,19 @@ lognormal3(lmeanlog = "identitylink", lsdlog = "loge",
   and not \eqn{\mu}{mu}, make up the fitted values.
 
 
-  A random variable \eqn{Y} has a 3-parameter lognormal distribution
-  if \eqn{\log(Y-\lambda)}{log(Y-lambda)}
-  is distributed \eqn{N(\mu, \sigma^2)}{N(mu, sigma^2)}. Here,
-  \eqn{\lambda < Y}{lambda < Y}.
-  The expected value of \eqn{Y}, which is
-  \deqn{E(Y) = \lambda + \exp(\mu + 0.5 \sigma^2)}{%
-        E(Y) =  lambda + exp(mu + 0.5 sigma^2)}
-  and not \eqn{\mu}{mu}, make up the fitted values.
+% A random variable \eqn{Y} has a 3-parameter lognormal distribution
+% if \eqn{\log(Y-\lambda)}{log(Y-lambda)}
+% is distributed \eqn{N(\mu, \sigma^2)}{N(mu, sigma^2)}. Here,
+% \eqn{\lambda < Y}{lambda < Y}.
+% The expected value of \eqn{Y}, which is
+% \deqn{E(Y) = \lambda + \exp(\mu + 0.5 \sigma^2)}{%
+%       E(Y) =  lambda + exp(mu + 0.5 sigma^2)}
+% and not \eqn{\mu}{mu}, make up the fitted values.
 
 
-  \code{lognormal()} and \code{lognormal3()} fit the 2- and 3-parameter
-  lognormal distribution respectively. Clearly, if the location
-  parameter \eqn{\lambda=0}{lambda=0} then both distributions coincide.
+% \code{lognormal()} and \code{lognormal3()} fit the 2- and 3-parameter
+% lognormal distribution respectively. Clearly, if the location
+% parameter \eqn{\lambda=0}{lambda=0} then both distributions coincide.
 
 
 }
@@ -112,13 +117,13 @@ Hoboken, NJ, USA: Wiley-Interscience.
 %}
 
 
-\section{Warning}{
-  Regularity conditions are not satisfied for the 3-parameter case:
-  results may be erroneous.
-  May withdraw it in later versions.
-
-
-}
+%\section{Warning}{
+%  Regularity conditions are not satisfied for the 3-parameter case:
+%  results may be erroneous.
+%  May withdraw it in later versions.
+%
+%
+%}
 
 
 \seealso{
@@ -134,21 +139,32 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 
 \examples{
-ldata <- data.frame(y1 = rlnorm(nn <- 1000, meanlog = 1.5, sdlog = exp(-0.8)))
-fit1 <- vglm(y1 ~ 1, lognormal, data = ldata, trace = TRUE, crit = "c")
-coef(fit1, matrix = TRUE)
-Coef(fit1)
-
 ldata2 <- data.frame(x2 = runif(nn <- 1000))
-ldata2 <- transform(ldata2, y2 = rlnorm(nn, mean = 0.5, sd = exp(x2)))
+ldata2 <- transform(ldata2, y1 = rlnorm(nn, mean = 1 + 2 * x2, sd = exp(-1)),
+                            y2 = rlnorm(nn, mean = 1, sd = exp(-1 + x2)))
+fit1 <- vglm(y1 ~ x2, lognormal(zero = 2), data = ldata2, trace = TRUE)
 fit2 <- vglm(y2 ~ x2, lognormal(zero = 1), data = ldata2, trace = TRUE)
+coef(fit1, matrix = TRUE)
 coef(fit2, matrix = TRUE)
-
-lambda <- 4
-ldata3 <- data.frame(y3 = lambda + rlnorm(1000, m = 1.5, sd = exp(-0.8)))
-fit3 <- vglm(y3 ~ 1, lognormal3, data = ldata3, trace = TRUE, crit = "c")
-coef(fit3, matrix = TRUE)
-summary(fit3)
 }
 \keyword{models}
 \keyword{regression}
+
+
+
+%lognormal3(lmeanlog = "identitylink", lsdlog = "loge",
+%           powers.try = (-3):3, delta = NULL, zero = 2)
+
+%lambda <- 4
+%ldata3 <- data.frame(y3 = lambda + rlnorm(1000, m = 1.5, sd = exp(-0.8)))
+%fit3 <- vglm(y3 ~ 1, lognormal3, data = ldata3, trace = TRUE, crit = "c")
+%coef(fit3, matrix = TRUE)
+%summary(fit3)
+
+
+%ldata <- data.frame(y1 = rlnorm(nn <- 1000, meanlog = 1.5, sdlog = exp(-0.8)))
+%fit1 <- vglm(y1 ~ 1, lognormal, data = ldata, trace = TRUE, crit = "c")
+%coef(fit1, matrix = TRUE)
+%Coef(fit1)
+
+
diff --git a/man/lomax.Rd b/man/lomax.Rd
index b419148..fe0da7e 100644
--- a/man/lomax.Rd
+++ b/man/lomax.Rd
@@ -102,9 +102,9 @@ Hoboken, NJ, USA: Wiley-Interscience.
     \code{\link{dagum}},
     \code{\link{sinmad}},
     \code{\link{fisk}},
-    \code{\link{invlomax}},
+    \code{\link{inv.lomax}},
     \code{\link{paralogistic}},
-    \code{\link{invparalogistic}},
+    \code{\link{inv.paralogistic}},
     \code{\link{simulate.vlm}}.
 
 }
diff --git a/man/lqnorm.Rd b/man/lqnorm.Rd
index c9ae9e3..248b40e 100644
--- a/man/lqnorm.Rd
+++ b/man/lqnorm.Rd
@@ -9,7 +9,7 @@
 }
 \usage{
 lqnorm(qpower = 2, link = "identitylink",
-       imethod = 1, imu = NULL, shrinkage.init = 0.95)
+       imethod = 1, imu = NULL, ishrinkage = 0.95)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -36,7 +36,7 @@ lqnorm(qpower = 2, link = "identitylink",
   The default is to use \code{imethod = 1}.
 
   }
-  \item{shrinkage.init}{
+  \item{ishrinkage}{
   How much shrinkage is used when initializing the fitted values.
   The value must be between 0 and 1 inclusive, and
   a value of 0 means the individual response values are used,
diff --git a/man/lvplot.qrrvglm.Rd b/man/lvplot.qrrvglm.Rd
index 15752f1..2cef1f7 100644
--- a/man/lvplot.qrrvglm.Rd
+++ b/man/lvplot.qrrvglm.Rd
@@ -10,7 +10,7 @@ y-axis are the first and second ordination axes respectively.
 
 }
 \usage{
-lvplot.qrrvglm(object, varI.latvar = FALSE, reference = NULL,
+lvplot.qrrvglm(object, varI.latvar = FALSE, refResponse = NULL,
     add = FALSE, show.plot = TRUE, 
     rug = TRUE, y = FALSE, type = c("fitted.values", "predictors"), 
     xlab = paste("Latent Variable", if (Rank == 1) "" else " 1", sep = ""), 
@@ -42,17 +42,21 @@ lvplot.qrrvglm(object, varI.latvar = FALSE, reference = NULL,
   \item{varI.latvar}{
   Logical that is fed into \code{\link{Coef.qrrvglm}}. 
 
+
   }
-  \item{reference}{
+  \item{refResponse}{
     Integer or character that is fed into \code{\link{Coef.qrrvglm}}.
 
+
   }
   \item{add}{ Logical. Add to an existing plot? If \code{FALSE}, a new
   plot is made.
 
+
  }
   \item{show.plot}{ Logical. Plot it?
 
+
  }
   \item{rug}{ Logical. If \code{TRUE}, a rug plot is plotted at the
   foot of the plot (applies to rank-1 models only).
@@ -81,7 +85,7 @@ lvplot.qrrvglm(object, varI.latvar = FALSE, reference = NULL,
  }
   \item{pcex}{ Character expansion of the points.
 Here, for rank-1 models, points are the response \emph{y} data. 
-For rank-2 models, points are the optima.
+For rank-2 models, points are the optimums.
   See the \code{cex} argument in \code{\link[graphics]{par}}.
 
  }
@@ -112,11 +116,11 @@ For rank-2 models, points are the optima.
   See the \code{lwd} argument of \code{\link[graphics]{par}}.
 
  }
-  \item{label.arg}{ Logical. Label the optima and \bold{C}? 
+  \item{label.arg}{ Logical. Label the optimums and \bold{C}? 
   (applies only to rank-2 models only).
 
  }
-  \item{adj.arg}{ Justification of text strings for labelling the optima
+  \item{adj.arg}{ Justification of text strings for labelling the optimums
   (applies only to rank-2 models only). 
   See the \code{adj} argument of \code{\link[graphics]{par}}.
 
@@ -136,7 +140,7 @@ For rank-2 models, points are the optima.
       radius \code{-ellipse}. For example, setting \code{ellipse = -1}
       will result in circular contours that have unit radius (in latent
       variable units).  If \code{ellipse} is \code{NULL} or \code{FALSE}
-      then no ellipse is drawn around the optima.
+      then no ellipse is drawn around the optimums.
 
 
   }
@@ -262,7 +266,7 @@ For rank-2 models, points are the optima.
   \code{pcex} and \code{pcol} correspond to the size and color of the
   points. Such ``\code{p}'' arguments should be vectors of length 1,
   or \eqn{n}, the number of sites.  For the rank-2 model, arguments
-  beginning with ``\code{p}'' correspond to the optima.
+  beginning with ``\code{p}'' correspond to the optimums.
 
 
 }
@@ -359,25 +363,28 @@ cdata <- transform(cdata,
             spp2 = rpois(nn, lambda2),
             spp3 = rpois(nn, lambda3))
 set.seed(111)
-# vvv p2 <- cqo(cbind(spp1,spp2,spp3) ~ x2 + x3 + x4, poissonff, 
-# vvv          data = cdata,
-# vvv          Rank = 2, I.tolerances = TRUE,
-# vvv          Crow1positive = c(TRUE, FALSE))   # deviance = 505.81
-# vvv if (deviance(p2) > 506) stop("suboptimal fit obtained")
-# vvv sort(p2 at misc$deviance.Bestof)  # A history of the fits
-# vvv Coef(p2)
+\dontrun{
+p2 <- cqo(cbind(spp1, spp2, spp3) ~ x2 + x3 + x4, poissonff,
+          data = cdata, Rank = 2, I.tolerances = TRUE,
+          Crow1positive = c(TRUE, FALSE))  # deviance = 505.81
+if (deviance(p2) > 506) stop("suboptimal fit obtained")
+sort(deviance(p2, history = TRUE))  # A history of all the iterations
+Coef(p2)
+}
 
 \dontrun{
 lvplot(p2, sites = TRUE, spch = "*", scol = "darkgreen", scex = 1.5,
        chull = TRUE, label = TRUE, Absolute = TRUE, ellipse = 140,
-       adj = -0.5, pcol = "blue", pcex = 1.3, las = 1,
-       C = TRUE, Cadj = c(-.3,-.3,1), Clwd = 2, Ccex = 1.4, Ccol = "red",
+       adj = -0.5, pcol = "blue", pcex = 1.3, las = 1, Ccol = "orange",
+       C = TRUE, Cadj = c(-0.3, -0.3, 1), Clwd = 2, Ccex = 1.4,
        main = paste("Contours at Abundance = 140 with",
                   "convex hull of the site scores")) }
-# vvv var(latvar(p2))  # A diagonal matrix, i.e., uncorrelated latent vars
-# vvv var(latvar(p2, varI.latvar = TRUE))  # Identity matrix
-# vvv Tol(p2)[, , 1:2]  # Identity matrix
-# vvv Tol(p2, varI.latvar = TRUE)[, , 1:2]  # A diagonal matrix
+\dontrun{
+var(latvar(p2))  # A diagonal matrix, i.e., uncorrelated latent vars
+var(latvar(p2, varI.latvar = TRUE))  # Identity matrix
+Tol(p2)[, , 1:2]  # Identity matrix
+Tol(p2, varI.latvar = TRUE)[, , 1:2]  # A diagonal matrix
+}
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/makeham.Rd b/man/makeham.Rd
index 16b367a..4bdc9e9 100644
--- a/man/makeham.Rd
+++ b/man/makeham.Rd
@@ -8,13 +8,17 @@
 
 }
 \usage{
-makeham(lshape = "loge", lscale = "loge", lepsilon = "loge",
-        ishape = NULL,   iscale = NULL,   iepsilon = NULL,
-        gshape = exp(-5:5), gscale = exp(-5:5), gepsilon = exp(-4:1),
-        nsimEIM = 500, oim.mean = TRUE, zero = NULL)
+makeham(lscale = "loge", lshape = "loge", lepsilon = "loge",
+        iscale = NULL,   ishape = NULL,   iepsilon = NULL,
+        gscale = exp(-5:5),gshape = exp(-5:5), gepsilon = exp(-4:1),
+        nsimEIM = 500, oim.mean = TRUE, zero = NULL, nowarning = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
+
+  \item{nowarning}{ Logical. Suppress a warning? }
+
+
   \item{lshape, lscale, lepsilon}{
   Parameter link functions applied to the
   shape parameter \code{shape},
diff --git a/man/makehamUC.Rd b/man/makehamUC.Rd
index a81b788..005a1f0 100644
--- a/man/makehamUC.Rd
+++ b/man/makehamUC.Rd
@@ -15,10 +15,10 @@
 
 }
 \usage{
-dmakeham(x, shape, scale = 1, epsilon = 0, log = FALSE)
-pmakeham(q, shape, scale = 1, epsilon = 0)
-qmakeham(p, shape, scale = 1, epsilon = 0)
-rmakeham(n, shape, scale = 1, epsilon = 0)
+dmakeham(x, scale = 1, shape, epsilon = 0, log = FALSE)
+pmakeham(q, scale = 1, shape, epsilon = 0)
+qmakeham(p, scale = 1, shape, epsilon = 0)
+rmakeham(n, scale = 1, shape, epsilon = 0)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -29,7 +29,7 @@ rmakeham(n, shape, scale = 1, epsilon = 0)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
-  \item{shape, scale}{positive shape and scale parameters. }
+  \item{scale, shape}{positive scale and shape parameters. }
   \item{epsilon}{another parameter. Must be non-negative. See below. }
 
 }
@@ -75,21 +75,21 @@ Gompertz-Makeham distribution.
 \examples{
 probs <- seq(0.01, 0.99, by = 0.01)
 Shape <- exp(-1); Scale <- exp(1); eps = Epsilon <- exp(-1)
-max(abs(pmakeham(qmakeham(p = probs, Shape, sca = Scale, eps = Epsilon),
-                 Shape, sca = Scale, eps = Epsilon) - probs))  # Should be 0
+max(abs(pmakeham(qmakeham(p = probs, sca = Scale, Shape, eps = Epsilon),
+                 sca = Scale, Shape, eps = Epsilon) - probs))  # Should be 0
 
 \dontrun{ x <- seq(-0.1, 2.0, by = 0.01);
-plot(x, dmakeham(x, Shape, sca = Scale, eps = Epsilon), type = "l",
+plot(x, dmakeham(x, sca = Scale, Shape, eps = Epsilon), type = "l",
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles",
      col = "blue", las = 1, ylab = "")
 abline(h = 0, col = "blue", lty = 2)
-lines(x, pmakeham(x, Shape, sca = Scale, eps = Epsilon), col = "orange")
+lines(x, pmakeham(x, sca = Scale, Shape, eps = Epsilon), col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
-Q <- qmakeham(probs, Shape, sca = Scale, eps = Epsilon)
-lines(Q, dmakeham(Q, Shape, sca = Scale, eps = Epsilon),
+Q <- qmakeham(probs, sca = Scale, Shape, eps = Epsilon)
+lines(Q, dmakeham(Q, sca = Scale, Shape, eps = Epsilon),
       col = "purple", lty = 3, type = "h")
-pmakeham(Q, Shape, sca = Scale, eps = Epsilon) - probs # Should be all zero
+pmakeham(Q, sca = Scale, Shape, eps = Epsilon) - probs # Should be all zero
 abline(h = probs, col = "purple", lty = 3) }
 }
 \keyword{distribution}
diff --git a/man/matched.binomial.Rd b/man/matched.binomial.Rd
deleted file mode 100644
index a4fadfc..0000000
--- a/man/matched.binomial.Rd
+++ /dev/null
@@ -1,180 +0,0 @@
-\name{matched.binomial}
-\alias{matched.binomial}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ The Matched Binomial Distribution Family Function }
-\description{
-  Estimation of a binomial regression in a
-  matched case-control study.
-
-}
-\usage{
-matched.binomial(mvar = NULL, link = "logit",
-                 parallel = TRUE, smallno = .Machine$double.eps^(3/4))
-}
-%- maybe also 'usage' for other objects documented here.
-\arguments{
-  \item{mvar}{ 
-  Formula specifying the matching variable.
-  This shows which observation belongs to which matching set.
-  The intercept should be suppressed from the formula, and 
-  the term must be a \code{\link[base]{factor}}.
-
-
-  }
-  \item{link}{ 
-  Parameter link function for the probability parameter.
-% called \eqn{p} below.
-  Information for these are at \code{\link{Links}}
-  and \code{\link{CommonVGAMffArguments}}.
-
-
-  }
-  \item{parallel}{ 
-  This should always be set \code{TRUE} otherwise there will be
-  too many parameters to estimate.
-  See \code{\link{CommonVGAMffArguments}} for more information.
-
-  }
-  \item{smallno}{
-  Numeric, a small positive value.
-  For a specific observation, used to nullify the linear/additive
-  predictors that are not needed.
-
-  }
-}
-\details{
-  By default, this \pkg{VGAM} family function fits a logistic
-  regression model to a binary response from a matched case-control
-  study. Here, each case \eqn{(Y = 1}) is matched with one or more
-  controls \eqn{(Y = 0}) with respect to some matching variables
-  (confounders). For example, the first matched set is all women
-  aged from 20 to 25, the second matched set is women aged between
-  26 to 30, etc. The logistic regression has a different intercept
-  for each matched set but the other regression coefficients
-  are assumed to be the same across matched sets
-  (\code{parallel = TRUE}).
-
-
-  Let \eqn{C} be the number of matched sets.
-  This \pkg{VGAM} family function uses a trick by allowing \eqn{M},
-  the number of linear/additive predictors, to be equal to \eqn{C},
-  and then nullifying all but one of them for a particular observation.
-  The term specified by the \code{mvar} argument must be a
-  \code{\link[base]{factor}}.
-  Consequently, the model matrix contains an intercept plus one
-  column for each level of the factor (except the first (this is
-  the default in R)).
-  Altogether there are \eqn{C} columns.
-  The algorithm here constructs a different constraint matrix for
-  each of the \eqn{C} columns.
-
-
-}
-\value{
-  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
-  The object is used by modelling functions such as \code{\link{vglm}}
-  and \code{\link{vgam}}.
-
-
-}
-\references{ 
-  Section 8.2 of 
-  Hastie, T. J. and Tibshirani, R. J. (1990)
-  \emph{Generalized Additive Models}, London: Chapman & Hall.
-
-
-  Pregibon, D. (1984)
-  Data analytic methods for matched case-control studies.
-  \emph{Biometrics},
-  \bold{40},
-  639--651.
-
-
-  Chapter 7 of 
-  Breslow, N. E. and Day, N. E. (1980)
-  \emph{Statistical Methods in Cancer Research I: The Analysis
-        of Case-Control Studies}.
-  Lyon: International Agency for Research on Cancer.
-
-
-  Holford, T. R. and White, C. and Kelsey, J. L. (1978)
-  Multivariate analysis for matched case-control studies.
-  \emph{American Journal of Epidemiology},
-  \bold{107}, 245--256.
-
-
-}
-
-\author{ Thomas W. Yee }
-\note{
-  The response is assumed to be in a format that can also be
-  inputted into \code{\link{binomialff}}.
-
-
-}
-\section{Warning }{
-  Both the memory requirements and computational time of this
-  \pkg{VGAM} family function grows very quickly with respect
-  to the number of matched sets. For example, the large model
-  matrix of a data set with 100 matched sets consisting of one
-  case and one control per set will take up at least (about)
-  20Mb of memory. For a constant number of cases and controls
-  per matched set, the memory requirements are \eqn{O(C^3)}
-  and the the computational time is \eqn{O(C^4)} flops.
-
-
-  The example below has been run successfully with \code{n = 700}
-  (this corresponds to \eqn{C = 350}) but only on a big machine
-  and it took over 10 minutes. The large model matrix was 670Mb.
-
-
-}
-
-\seealso{ 
-  \code{\link{binomialff}}.
-
-
-}
-\examples{
-\dontrun{
-# Cf. Hastie and Tibshirani (1990) p.209. The variable n must be even.
-# Here, the intercept for each matched set accounts for x3 which is
-# the confounder or matching variable.
-n <- 700  # Requires a big machine with lots of memory. Expensive wrt time
-n <- 100  # This requires a reasonably big machine.
-mydat <- data.frame(x2 = rnorm(n), x3 = rep(rnorm(n/2), each = 2))
-xmat <- with(mydat, cbind(x2, x3))
-mydat <- transform(mydat, eta = -0.1 + 0.2 * x2 + 0.3 * x3)
-etamat <- with(mydat, matrix(eta, n/2, 2))
-condmu <- exp(etamat[, 1]) / (exp(etamat[, 1]) + exp(etamat[, 2]))
-y1 <- ifelse(runif(n/2) < condmu, 1, 0)
-y <- cbind(y1, 1 - y1)
-mydat <- transform(mydat, y = c(y1, 1-y1),
-                         ID = factor(c(row(etamat))))
-fit <- vglm(y ~ 1 + ID + x2, trace = TRUE,
-            matched.binomial(mvar = ~ ID - 1), data = mydat)
-dimnames(coef(fit, matrix = TRUE))
-coef(fit, matrix = TRUE)
-summary(fit)
-head(fitted(fit))
-objsizemb <- function(object) round(object.size(object) / 2^20, digits = 2)
-objsizemb(fit)  # in Mb
-
-VLMX <- model.matrix(fit, type = "vlm")  # The big model matrix
-dim(VLMX)
-objsizemb(VLMX)  # in Mb
-rm(VLMX) }
-}
-\keyword{models}
-\keyword{regression}
-
-% Some summary(fit) output
-%ID347       -1.6699e-01    2.01099 -8.3039e-02
-%ID348       -3.0398e-01    2.00455 -1.5165e-01
-%ID349        1.7915e-01    2.00147  8.9509e-02
-%ID350       -3.7716e-02    2.00423 -1.8818e-02
-%x2           2.5748e-01    0.10647  2.4183e+00
-%# Use the trick of Holford et al. (1978)
-
-
-
diff --git a/man/maxwell.Rd b/man/maxwell.Rd
index ceaeb8a..1a7fb5d 100644
--- a/man/maxwell.Rd
+++ b/man/maxwell.Rd
@@ -13,7 +13,8 @@ maxwell(link = "loge", zero = NULL)
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{link, zero}{
-  Parameter link function applied to \eqn{a}. 
+  Parameter link function applied to \eqn{a},
+  which is called the parameter \code{rate}.
   See \code{\link{Links}} for more choices and information;
   a log link is the default because the parameter is positive.
   More information is at \code{\link{CommonVGAMffArguments}}.
@@ -70,7 +71,7 @@ maxwell(link = "loge", zero = NULL)
 
 }
 \examples{
-mdata <- data.frame(y = rmaxwell(1000, a = exp(2)))
+mdata <- data.frame(y = rmaxwell(1000, rate = exp(2)))
 fit <- vglm(y ~ 1, maxwell, data = mdata, trace = TRUE, crit = "coef")
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/maxwellUC.Rd b/man/maxwellUC.Rd
index bd9e2c5..bc81677 100644
--- a/man/maxwellUC.Rd
+++ b/man/maxwellUC.Rd
@@ -13,10 +13,10 @@
 
 }
 \usage{
-dmaxwell(x, a, log = FALSE)
-pmaxwell(q, a)
-qmaxwell(p, a)
-rmaxwell(n, a)
+dmaxwell(x, rate, log = FALSE)
+pmaxwell(q, rate)
+qmaxwell(p, rate)
+rmaxwell(n, rate)
 }
 \arguments{
   \item{x, q, p, n}{
@@ -24,7 +24,7 @@ rmaxwell(n, a)
 
 
   }
-  \item{a}{the parameter.}
+  \item{rate}{the (rate) parameter.}
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
@@ -50,9 +50,9 @@ rmaxwell(n, a)
 }
 \author{ T. W. Yee }
 \details{
-  See \code{\link{maxwell}}, the \pkg{VGAM} family function
-  for estimating the parameter \eqn{a} by maximum likelihood estimation,
-  for the formula of the probability density function.
+  See \code{\link{maxwell}}, the \pkg{VGAM} family function for
+  estimating the (rate) parameter \eqn{a} by maximum likelihood
+  estimation, for the formula of the probability density function.
 
 
 }
@@ -69,18 +69,18 @@ rmaxwell(n, a)
 
 }
 \examples{
-\dontrun{ a <- 3; x <- seq(-0.5, 3, length = 100)
-plot(x, dmaxwell(x, a = a), type = "l", col = "blue", las = 1, ylab = "",
+\dontrun{ rate <- 3; x <- seq(-0.5, 3, length = 100)
+plot(x, dmaxwell(x, rate = rate), type = "l", col = "blue", las = 1, ylab = "",
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles")
 abline(h = 0, col = "blue", lty = 2)
-lines(x, pmaxwell(x, a = a), type = "l", col = "orange")
+lines(x, pmaxwell(x, rate = rate), type = "l", col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
-Q <- qmaxwell(probs, a = a)
-lines(Q, dmaxwell(Q, a), col = "purple", lty = 3, type = "h")
-lines(Q, pmaxwell(Q, a), col = "purple", lty = 3, type = "h")
+Q <- qmaxwell(probs, rate = rate)
+lines(Q, dmaxwell(Q, rate), col = "purple", lty = 3, type = "h")
+lines(Q, pmaxwell(Q, rate), col = "purple", lty = 3, type = "h")
 abline(h = probs, col = "purple", lty = 3)
-max(abs(pmaxwell(Q, a) - probs))  # Should be zero
+max(abs(pmaxwell(Q, rate) - probs))  # Should be zero
 }
 }
 \keyword{distribution}
diff --git a/man/mmt.Rd b/man/melbmaxtemp.Rd
similarity index 83%
rename from man/mmt.Rd
rename to man/melbmaxtemp.Rd
index b2662bd..f7da7e4 100644
--- a/man/mmt.Rd
+++ b/man/melbmaxtemp.Rd
@@ -1,7 +1,7 @@
-\name{mmt}
-\alias{mmt}
+\name{melbmaxtemp}
+\alias{melbmaxtemp}
 \docType{data}
-\title{ mmt daily maximum temperatures}
+\title{ melbmaxtemp daily maximum temperatures}
 \description{
   Melbourne daily maximum temperatures in degrees Celsius
   over the ten-year period 1981--1990.
@@ -9,7 +9,7 @@
 
 }
 \usage{
-data(mmt)
+data(melbmaxtemp)
 }
 \format{
   A vector with 3650 observations.
@@ -45,10 +45,10 @@ data(mmt)
 }
 
 \examples{
-summary(mmt)
+summary(melbmaxtemp)
 \dontrun{ par(mfrow = c(1, 1), mar = c(5, 4, 0.2, 0.1) + 0.1, las = 1)
-melb <- data.frame(today     = mmt[-1],
-                   yesterday = mmt[-length(mmt)])
+melb <- data.frame(today     = melbmaxtemp[-1],
+                   yesterday = melbmaxtemp[-length(melbmaxtemp)])
 plot(today ~ yesterday, data = melb,
      xlab = "Yesterday's Max Temperature",
      ylab = "Today's Max Temperature", cex = 1.4, type = "n")
diff --git a/man/mix2exp.Rd b/man/mix2exp.Rd
index 1413cc7..1bf94cc 100644
--- a/man/mix2exp.Rd
+++ b/man/mix2exp.Rd
@@ -47,11 +47,11 @@ mix2exp(lphi = "logit", llambda = "loge", iphi = 0.5, il1 = NULL,
   }
 }
 \details{
-  The probability function can be loosely written as 
-  \deqn{P(Y=y) = \phi\,Exponential(\lambda_1) +
+  The probability density function can be loosely written as 
+  \deqn{f(y) = \phi\,Exponential(\lambda_1) +
               (1-\phi)\,Exponential(\lambda_2)}{%
-        P(Y=y) = phi * Exponential(lambda1) +
-                (1-phi) * Exponential(lambda2)}
+        f(y) = phi * Exponential(lambda1) +
+              (1-phi) * Exponential(lambda2)}
   where \eqn{\phi}{phi} is the probability an observation belongs
   to the first group, and \eqn{y>0}.
   The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}.
diff --git a/man/mix2poisson.Rd b/man/mix2poisson.Rd
index b93bf6b..22d5201 100644
--- a/man/mix2poisson.Rd
+++ b/man/mix2poisson.Rd
@@ -48,10 +48,12 @@ mix2poisson(lphi = "logit", llambda = "loge",
     The two values are fed in as the \code{probs} argument into
     \code{\link[stats]{quantile}}.
 
+
   }
   \item{nsimEIM, zero}{
   See \code{\link{CommonVGAMffArguments}}.
 
+
   }
 }
 \details{
@@ -134,16 +136,16 @@ mu1 <- exp(2.5)  # Also known as lambda1
 mu2 <- exp(3)
 (phi <- logit(-0.5, inverse = TRUE))
 mdata <- data.frame(y = rpois(nn, ifelse(runif(nn) < phi, mu1, mu2)))
-fit <- vglm(y ~ 1, mix2poisson, data = mdata)
-coef(fit, matrix = TRUE)
+mfit <- vglm(y ~ 1, mix2poisson, data = mdata)
+coef(mfit, matrix = TRUE)
 
 # Compare the results with the truth
-round(rbind('Estimated' = Coef(fit), 'Truth' = c(phi, mu1, mu2)), digits = 2)
+round(rbind('Estimated' = Coef(mfit), 'Truth' = c(phi, mu1, mu2)), digits = 2)
 
 ty <- with(mdata, table(y))
 plot(names(ty), ty, type = "h", main = "Orange=estimate, blue=truth",
      ylab = "Frequency", xlab = "y")
-abline(v = Coef(fit)[-1], lty = 2, col = "orange", lwd = 2)
+abline(v = Coef(mfit)[-1], lty = 2, col = "orange", lwd = 2)
 abline(v = c(mu1, mu2), lty = 2, col = "blue", lwd = 2)
 
 # Example 2: London Times data (Lange, 1997, p.31)
@@ -152,13 +154,13 @@ ltdata1 <- data.frame(deaths = 0:9,
 ltdata2 <- data.frame(y = with(ltdata1, rep(deaths, freq)))
 
 # Usually this does not work well unless nsimEIM is large
-fit <- vglm(deaths ~ 1, weight = freq, data = ltdata1,
+Mfit <- vglm(deaths ~ 1, weight = freq, data = ltdata1,
             mix2poisson(iphi = 0.3, il1 = 1, il2 = 2.5, nsimEIM = 5000))
 
 # This works better in general
-fit <- vglm(y ~ 1, mix2poisson(iphi = 0.3, il1 = 1, il2 = 2.5), ltdata2)
-coef(fit, matrix = TRUE)
-Coef(fit)
+Mfit <- vglm(y ~ 1, mix2poisson(iphi = 0.3, il1 = 1, il2 = 2.5), data = ltdata2)
+coef(Mfit, matrix = TRUE)
+Coef(Mfit)
 }
 }
 \keyword{models}
diff --git a/man/mlogit.Rd b/man/multilogit.Rd
similarity index 71%
rename from man/mlogit.Rd
rename to man/multilogit.Rd
index 592d3ec..b89debe 100644
--- a/man/mlogit.Rd
+++ b/man/multilogit.Rd
@@ -1,16 +1,16 @@
-\name{mlogit}
-\alias{mlogit}
+\name{multilogit}
+\alias{multilogit}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Multi-logit Link Function }
 \description{
-  Computes the mlogit transformation, including its inverse and the
+  Computes the multilogit transformation, including its inverse and the
   first two derivatives.
 
 }
 \usage{
-mlogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
-       bvalue = NULL, inverse = FALSE, deriv = 0,
-       short = TRUE, tag = FALSE)
+multilogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
+           bvalue = NULL, inverse = FALSE, deriv = 0,
+           short = TRUE, tag = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -38,7 +38,7 @@ mlogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
 
 }
 \details{
-  The \code{mlogit()} link function is a generalization of the
+  The \code{multilogit()} link function is a generalization of the
   \code{\link{logit}} link to \eqn{M} levels/classes.
   It forms the basis of the \code{\link{multinomial}} logit model.
   It is sometimes called the \emph{multi-logit} link
@@ -49,7 +49,7 @@ mlogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
 
 }
 \value{
-  For \code{mlogit} with \code{deriv = 0}, the mlogit of \code{theta},
+  For \code{multilogit} with \code{deriv = 0}, the multilogit of \code{theta},
   i.e.,
   \code{log(theta[, j]/theta[, M+1])} when \code{inverse = FALSE},
   and if \code{inverse = TRUE} then
@@ -76,7 +76,7 @@ mlogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
 
 \note{
   Numerical instability may occur when \code{theta} is
-  close to 1 or 0 (for \code{mlogit}).
+  close to 1 or 0 (for \code{multilogit}).
   One way of overcoming this is to use, e.g., \code{bvalue}.
   Currently \code{care.exp()} is used to avoid \code{NA}s being
   returned if the probability is too close to 1.
@@ -100,15 +100,15 @@ fit <- vglm(cbind(normal, mild, severe) ~ let,
 fitted(fit)
 predict(fit)
 
-mlogit(fitted(fit))
-mlogit(fitted(fit)) - predict(fit)  # Should be all 0s
+multilogit(fitted(fit))
+multilogit(fitted(fit)) - predict(fit)  # Should be all 0s
 
-mlogit(predict(fit), inverse = TRUE)  # rowSums() add to unity
-mlogit(predict(fit), inverse = TRUE, refLevel = 1)  # For illustration only
-mlogit(predict(fit), inverse = TRUE) - fitted(fit)  # Should be all 0s
+multilogit(predict(fit), inverse = TRUE)  # rowSums() add to unity
+multilogit(predict(fit), inverse = TRUE, refLevel = 1)  # For illustration only
+multilogit(predict(fit), inverse = TRUE) - fitted(fit)  # Should be all 0s
 
-mlogit(fitted(fit), deriv = 1)
-mlogit(fitted(fit), deriv = 2)
+multilogit(fitted(fit), deriv = 1)
+multilogit(fitted(fit), deriv = 2)
 }
 \keyword{math}
 \keyword{models}
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
index fa39a0c..da3f1ae 100644
--- a/man/multinomial.Rd
+++ b/man/multinomial.Rd
@@ -224,7 +224,7 @@ by the \pkg{VGAM} package can be found at
     \code{\link{rrvglm}},
     \code{\link{fill1}},
     \code{\link[stats:Multinom]{Multinomial}},
-    \code{\link{mlogit}},
+    \code{\link{multilogit}},
     \code{\link[datasets]{iris}}.
   The author's homepage has further documentation about
   categorical data analysis using \pkg{VGAM}.
diff --git a/man/nakagami.Rd b/man/nakagami.Rd
index bb7b505..fe0b00e 100644
--- a/man/nakagami.Rd
+++ b/man/nakagami.Rd
@@ -8,20 +8,24 @@
 
 }
 \usage{
-nakagami(lshape = "loge", lscale = "loge", ishape = NULL, iscale = 1)
+nakagami(lscale = "loge", lshape = "loge", iscale = 1, ishape = NULL,
+         nowarning = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lshape, lscale}{
+  \item{nowarning}{ Logical. Suppress a warning? }
+
+
+  \item{lscale, lshape}{
   Parameter link functions applied to the
-  \emph{shape} and \emph{scale} parameters.
+  \emph{scale} and \emph{shape} parameters.
   Log links ensure they are positive.
   See \code{\link{Links}} for more choices
   and information.
 
 
   }
-  \item{ishape, iscale}{
+  \item{iscale, ishape}{
   Optional initial values for the shape and scale parameters.
   For \code{ishape}, a \code{NULL} value means it is obtained in the
   \code{initialize} slot based on the value of \code{iscale}.
@@ -46,8 +50,8 @@ nakagami(lshape = "loge", lscale = "loge", ishape = NULL, iscale = 1)
        \Gamma(shape)}{sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)} and
   these are returned as the fitted values.
   By default, the linear/additive predictors are
-  \eqn{\eta_1=\log(shape)}{eta1=log(shape)} and
-  \eqn{\eta_2=\log(scale)}{eta2=log(scale)}.  
+  \eqn{\eta_1=\log(scale)}{eta1=log(scale)} and
+  \eqn{\eta_2=\log(shape)}{eta2=log(shape)}.
   Fisher scoring is implemented.
 
 
@@ -97,15 +101,17 @@ nakagami(lshape = "loge", lscale = "loge", ishape = NULL, iscale = 1)
 nn <- 1000; shape <- exp(0); Scale <- exp(1)
 ndata <- data.frame(y1 = sqrt(rgamma(nn, shape = shape, scale = Scale/shape)))
 fit <- vglm(y1 ~ 1, nakagami, data = ndata, trace = TRUE, crit = "coef")
-ndata <- transform(ndata, y2 = rnaka(nn, shape = shape, scale = Scale))
+ndata <- transform(ndata, y2 = rnaka(nn, scale = Scale, shape = shape))
 fit <- vglm(y2 ~ 1, nakagami(iscale = 3), data = ndata, trace = TRUE)
 head(fitted(fit))
 with(ndata, mean(y2))
 coef(fit, matrix = TRUE)
 (Cfit <- Coef(fit))
-\dontrun{ with(ndata,
-hist(sy <- sort(y2), prob = TRUE, main = "", xlab = "y", ylim = c(0, 0.6)))
-lines(dnaka(sy, shape = Cfit[1], scale = Cfit[2]) ~ sy, data = ndata, col = "orange") }
+\dontrun{ sy <- with(ndata, sort(y2))
+hist(with(ndata, y2), prob = TRUE, main = "", xlab = "y", ylim = c(0, 0.6),
+     col = "lightblue")
+lines(dnaka(sy, scale = Cfit["scale"], shape = Cfit["shape"]) ~ sy,
+      data = ndata, col = "orange") }
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/nakagamiUC.Rd b/man/nakagamiUC.Rd
index 8efc6e9..7dea8ab 100644
--- a/man/nakagamiUC.Rd
+++ b/man/nakagamiUC.Rd
@@ -12,17 +12,17 @@
 
 }
 \usage{
-dnaka(x, shape, scale = 1, log = FALSE)
-pnaka(q, shape, scale = 1)
-qnaka(p, shape, scale = 1, ...)
-rnaka(n, shape, scale = 1, Smallno = 1.0e-6)
+dnaka(x, scale = 1, shape, log = FALSE)
+pnaka(q, scale = 1, shape)
+qnaka(p, scale = 1, shape, ...)
+rnaka(n, scale = 1, shape, Smallno = 1.0e-6)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
   \item{n}{number of observations.
     Must be a positive integer of length 1.}
-  \item{shape, scale }{
+  \item{scale, shape}{
   arguments for the parameters of the distribution.
   See \code{\link{nakagami}} for more details.
   For \code{rnaka}, arguments \code{shape} and \code{scale} must be of
@@ -74,7 +74,7 @@ rnaka(n, shape, scale = 1, Smallno = 1.0e-6)
 \examples{
 \dontrun{ x <- seq(0, 3.2, len = 200)
 plot(x, dgamma(x, shape = 1), type = "n", col = "black", ylab = "",
-     ylim = c(0,1.5), main = "dnaka(x, shape)")
+     ylim = c(0,1.5), main = "dnaka(x, shape = shape)")
 lines(x, dnaka(x, shape = 1), col = "orange")
 lines(x, dnaka(x, shape = 2), col = "blue")
 lines(x, dnaka(x, shape = 3), col = "green")
@@ -82,7 +82,7 @@ legend(2, 1.0, col = c("orange","blue","green"), lty = rep(1, len = 3),
        legend = paste("shape =", c(1, 2, 3)))
 
 plot(x, pnorm(x), type = "n", col = "black", ylab = "",
-     ylim = 0:1, main = "pnaka(x, shape)")
+     ylim = 0:1, main = "pnaka(x, shape = shape)")
 lines(x, pnaka(x, shape = 1), col = "orange")
 lines(x, pnaka(x, shape = 2), col = "blue")
 lines(x, pnaka(x, shape = 3), col = "green")
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index e3e13fe..20dbd55 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -1,6 +1,7 @@
 \name{negbinomial}
 \alias{negbinomial}
 \alias{polya}
+\alias{polyaR}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Negative Binomial Distribution Family Function }
 \description{
@@ -13,10 +14,13 @@ negbinomial(lmu = "loge", lsize = "loge",
             imu = NULL, isize = NULL, probs.y = 0.75,
             nsimEIM = 100, cutoff = 0.995,
             Maxiter = 5000, deviance.arg = FALSE, imethod = 1,
-            parallel = FALSE, shrinkage.init = 0.95, zero = -2)
+            parallel = FALSE, ishrinkage = 0.95, zero = -2)
 polya(lprob = "logit", lsize = "loge",
       iprob = NULL, isize = NULL, probs.y = 0.75, nsimEIM = 100,
-      imethod = 1, shrinkage.init = 0.95, zero = -2)
+      imethod = 1, ishrinkage = 0.95, zero = -2)
+polyaR(lsize = "loge", lprob = "logit", 
+       isize = NULL, iprob = NULL, probs.y = 0.75, nsimEIM = 100,
+       imethod = 1, ishrinkage = 0.95, zero = -1)
 }
 
 %     deviance.arg = FALSE,
@@ -119,7 +123,7 @@ polya(lprob = "logit", lsize = "loge",
   An integer with value \code{1} or \code{2} or \code{3} which
   specifies the initialization method for the \eqn{\mu}{mu} parameter.
   If failure to converge occurs try another value
-  and/or else specify a value for \code{shrinkage.init}
+  and/or else specify a value for \code{ishrinkage}
   and/or else specify a value for \code{isize}.
 
 
@@ -135,7 +139,7 @@ polya(lprob = "logit", lsize = "loge",
 
 
   }
-  \item{shrinkage.init}{
+  \item{ishrinkage}{
   How much shrinkage is used when initializing \eqn{\mu}{mu}.
   The value must be between 0 and 1 inclusive, and
   a value of 0 means the individual response values are used,
@@ -196,10 +200,22 @@ polya(lprob = "logit", lsize = "loge",
   \deqn{f(y;p,k) ~=~ {y + k - 1 \choose y} \,
     \left( 1 - p \right)^y\,
     p^k }{%
-    f(y;p,k) = C_{y}^{y + k - 1}
+    f(y;k,p) = C_{y}^{y + k - 1}
     [1 - p]^y p^k}
   where \eqn{y=0,1,2,\ldots},
-  and \eqn{0 < p < 1}{0 < p < 1} and \eqn{k > 0}.
+  and \eqn{k > 0} and \eqn{0 < p < 1}{0 < p < 1}. 
+
+
+  Family function \code{polyaR()} is the same as \code{polya()} except
+  the order of the two parameters are switched.
+  The reason is that \code{polyaR()} tries to match with
+  \code{\link[stats:NegBinomial]{rnbinom}} closely
+  in terms of the argument order, etc.
+  Should the probability parameter be of primary interest,
+  probably, users will prefer using  \code{polya()} rather than
+  \code{polyaR()}.
+  Possibly \code{polyaR()} will be decommissioned one day.
+
 
 
   The negative binomial distribution can be coerced into the
@@ -294,7 +310,7 @@ Fitting the negative binomial distribution to biological data.
 % very well.
 
 
-  These two functions implement two common parameterizations
+  These 3 functions implement 2 common parameterizations
   of the negative binomial (NB). Some people called the
   NB with integer \eqn{k} the \emph{Pascal} distribution,
   whereas if \eqn{k} is real then this is the \emph{Polya}
@@ -308,6 +324,7 @@ Fitting the negative binomial distribution to biological data.
   or \code{\link{cao}}.
 
 
+
   Suppose the response is called \code{ymat}.
   For \code{negbinomial()}
   the diagonal element of the \emph{expected information matrix}
@@ -329,7 +346,7 @@ Fitting the negative binomial distribution to biological data.
   If convergence failure occurs, try using arguments
   (in recommended decreasing order)
   \code{nsimEIM},
-  \code{shrinkage.init},
+  \code{ishrinkage},
   \code{imethod},
   \code{Maxiter}, 
   \code{cutoff},
@@ -400,7 +417,7 @@ Fitting the negative binomial distribution to biological data.
   \code{\link{negbinomial.size}} (e.g., NB-G),
   \code{\link{nbcanlink}} (NB-C),
   \code{\link{posnegbinomial}},
-  \code{\link{invbinomial}},
+  \code{\link{inv.binomial}},
   \code{\link[stats:NegBinomial]{rnbinom}},
   \code{\link{nbolf}},
   \code{\link{rrvglm}},
@@ -453,8 +470,8 @@ nb1 <- vglm(y3 ~ x2 + x3, negbinomial(parallel = TRUE, zero = NULL),
             data = mydata, trace = TRUE)
 # Extracting out some quantities:
 cnb1 <- coef(nb1, matrix = TRUE)
-mydiff <- (cnb1["(Intercept)", "log(size)"] -
-           cnb1["(Intercept)", "log(mu)"])
+mydiff <- (cnb1["(Intercept)", "loge(size)"] -
+           cnb1["(Intercept)", "loge(mu)"])
 delta0.hat <- exp(mydiff)
 (phi.hat <- 1 + 1 / delta0.hat)  # MLE of phi
 summary(nb1)
diff --git a/man/negbinomial.size.Rd b/man/negbinomial.size.Rd
index 3d102e6..8b308e7 100644
--- a/man/negbinomial.size.Rd
+++ b/man/negbinomial.size.Rd
@@ -10,7 +10,7 @@
 \usage{
 negbinomial.size(size = Inf, lmu = "loge", imu = NULL,
                  probs.y = 0.75, imethod = 1,
-                 shrinkage.init = 0.95, zero = NULL)
+                 ishrinkage = 0.95, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -39,7 +39,7 @@ negbinomial.size(size = Inf, lmu = "loge", imu = NULL,
 
 
   }
-  \item{shrinkage.init}{
+  \item{ishrinkage}{
   Same as \code{\link{negbinomial}}.
 
 
diff --git a/man/normal.vcm.Rd b/man/normal.vcm.Rd
index 40dde16..2c51f40 100644
--- a/man/normal.vcm.Rd
+++ b/man/normal.vcm.Rd
@@ -83,9 +83,9 @@ of such models have been named \emph{varying-coefficient models} (VCMs).
   through argument \code{formula}.
 
 
-  The \code{\link{mlogit}} link allows a subset of the
+  The \code{\link{multilogit}} link allows a subset of the
   coefficients to be positive and add to unity.  Either
-  none or more than one call to \code{\link{mlogit}}
+  none or more than one call to \code{\link{multilogit}}
   is allowed. The last variable will be used as the
   baseline/reference group, and therefore excluded from
   the estimation.
@@ -187,14 +187,14 @@ of such models have been named \emph{varying-coefficient models} (VCMs).
 }
 \examples{
 ndata <- data.frame(x2 = runif(nn <- 2000))
-# Note that coeff1 + coeff2 + coeff5 == 1. So try a "mlogit" link.
+# Note that coeff1 + coeff2 + coeff5 == 1. So try a "multilogit" link.
 myoffset <- 10
 ndata <- transform(ndata,
-           coeff1 = 0.25,  # "mlogit" link
-           coeff2 = 0.25,  # "mlogit" link
+           coeff1 = 0.25,  # "multilogit" link
+           coeff2 = 0.25,  # "multilogit" link
            coeff3 = exp(-0.5),  # "loge" link
            coeff4 = logoff(+0.5, offset = myoffset, inverse = TRUE),  # "logoff" link
-           coeff5 = 0.50,  # "mlogit" link
+           coeff5 = 0.50,  # "multilogit" link
            coeff6 = 1.00,  # "identitylink" link
            v2 = runif(nn),
            v3 = runif(nn),
@@ -225,12 +225,12 @@ ndata <- transform(ndata,
 # An intercept-only model
 fit1 <- vglm(y1 ~ 1,
              form2 = ~ 1 + v2 + v3 + v4 + v5 + v6,
-             normal.vcm(link.list = list("(Intercept)" = "mlogit",
-                                         "v2"          = "mlogit",
+             normal.vcm(link.list = list("(Intercept)" = "multilogit",
+                                         "v2"          = "multilogit",
                                          "v3"          = "loge",
                                          "v4"          = "logoff",
                                          "(Default)"   = "identitylink",
-                                         "v5"          = "mlogit"),
+                                         "v5"          = "multilogit"),
                         earg.list = list("(Intercept)" = list(),
                                          "v2"          = list(),
                                          "v4"          = list(offset = myoffset),
@@ -242,17 +242,17 @@ fit1 <- vglm(y1 ~ 1,
 coef(fit1, matrix = TRUE)
 summary(fit1)
 # This works only for intercept-only models:
-mlogit(rbind(coef(fit1, matrix = TRUE)[1, c(1, 2)]), inverse = TRUE)
+multilogit(rbind(coef(fit1, matrix = TRUE)[1, c(1, 2)]), inverse = TRUE)
 
 # A model with covariate x2 for the regression coefficients
 fit2 <- vglm(y2 ~ 1 + x2,
              form2 = ~ 1 + v2 + v3 + v4 + v5 + v6,
-             normal.vcm(link.list = list("(Intercept)" = "mlogit",
-                                         "v2"          = "mlogit",
+             normal.vcm(link.list = list("(Intercept)" = "multilogit",
+                                         "v2"          = "multilogit",
                                          "v3"          = "logit",
                                          "v4"          = "loglog",
                                          "(Default)"   = "identitylink",
-                                         "v5"          = "mlogit"),
+                                         "v5"          = "multilogit"),
                         earg.list = list("(Intercept)" = list(),
                                          "v2"          = list(),
                                          "v3"          = list(),
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index eb72730..eecb3d6 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -2,7 +2,20 @@
 \alias{notdocumentedyet}
 %
 %
-% 201405;
+% 201408;
+\alias{dlevy}
+\alias{plevy}
+\alias{qlevy}
+\alias{rlevy}
+% 201407; expected.betabin.ab is needed for zibetabinomialff() in YBook.
+\alias{grid.search}
+\alias{expected.betabin.ab}
+% 201406;
+\alias{interleave.VGAM}
+\alias{marcumQ}
+\alias{QR.Q}
+\alias{QR.R}
+%
 %\alias{sm.bs}
 %\alias{sm.ns}
 %\alias{sm.poly}
@@ -30,7 +43,8 @@
 %
 % 201307;
 \alias{posnormal.control}
-\alias{recnormal.control}
+\alias{rec.normal.control}
+\alias{rec.exp1.control}
 %\alias{kendall.tau}
 %\alias{binormalcop}
 %\alias{dbinormcop}
@@ -52,7 +66,7 @@
 \alias{Rank}
 \alias{Rank.rrvglm}
 \alias{Rank.qrrvglm}
-\alias{Rank.cao}
+\alias{Rank.rrvgam}
 % 20121015; delete this later
 %\alias{huggins91.old}
 %
@@ -116,11 +130,11 @@
 \alias{vcovvlm}
 \alias{VGAMenv}
 \alias{nobs}
-\alias{show.Coef.cao}
+\alias{show.Coef.rrvgam}
 \alias{show.Coef.qrrvglm}
 \alias{show.Coef.rrvglm}
 \alias{show.rrvglm}
-\alias{show.summary.cao}
+\alias{show.summary.rrvgam}
 % \alias{show.summary.lms}
 \alias{show.summary.qrrvglm}
 % \alias{show.summary.rc.exponential}
@@ -185,7 +199,7 @@
 \alias{Confint.rrnb}
 \alias{Confint.nb1}
 %\alias{gala}
-% \alias{mmt}
+% \alias{melbmaxtemp}
 %
 %
 %
@@ -201,6 +215,7 @@
 %\alias{moffset}     % Has been written
 % \alias{Qvar}
 \alias{plotqvar}
+\alias{qvplot}
 \alias{depvar.vlm}
 %
 %
@@ -228,7 +243,7 @@
 %
 %
 %
-\alias{A1A2A3}
+% \alias{A1A2A3.orig}  20140909; same as A1A2A3(hwe = TRUE)
 \alias{AAaa.nohw}
 %\alias{AIC}
 %\alias{AIC.qrrvglm}
@@ -238,7 +253,7 @@
 %\alias{AIC.vlm}
 % \alias{Build.terms}
 \alias{Build.terms.vlm}
-\alias{Coef.cao}
+\alias{Coef.rrvgam}
 \alias{Coefficients}
 \alias{Cut}
 \alias{Deviance.categorical.data.vgam}
@@ -290,9 +305,9 @@
 \alias{car.all}
 \alias{care.exp}
 %
-\alias{concoef.Coef.cao}
+\alias{concoef.Coef.rrvgam}
 \alias{concoef.Coef.qrrvglm}
-\alias{concoef.cao}
+\alias{concoef.rrvgam}
 \alias{concoef.qrrvglm}
 %
 \alias{cdf}
@@ -300,9 +315,9 @@
 \alias{cdf.lms.bcn}
 \alias{cdf.lms.yjn}
 \alias{cdf.vglm}
-% \alias{cm.nointercept.vgam}
-% \alias{cm.vgam}
-% \alias{cm.zero.vgam}
+\alias{cm.VGAM}
+\alias{cm.zero.VGAM}
+\alias{cm.nointercept.VGAM}
 \alias{coefficients}
 \alias{coefqrrvglm}
 % \alias{coefvlm}  % 20140124
@@ -324,6 +339,7 @@
 %\alias{deviance.uqo}
 %\alias{deviance.vglm}
 \alias{deviance.vlm}
+\alias{deviance.qrrvglm}
 %\alias{df.residual}
 %\alias{df.residual_vlm}
 \alias{dimm}
@@ -342,7 +358,6 @@
 % \alias{effects.vlm}
 % \alias{eifun}
 % \alias{eijfun}
-\alias{erfc}
 \alias{eta2theta}
 %\alias{explink}
 % \alias{extract.arg}
@@ -379,14 +394,13 @@
 % \alias{gleg.weight.yjn.13}
 \alias{glm}
 % \alias{hypersecant}
-% \alias{hypersecant.1}
+% \alias{hypersecant01}
 % \alias{ima}
-% \alias{interleave.VGAM}
-% \alias{invbinomial}
+% \alias{inv.binomial}
 \alias{inverse.gaussianff}
 \alias{is.Numeric}
 \alias{is.bell}
-\alias{is.bell.cao}
+\alias{is.bell.rrvgam}
 \alias{is.bell.qrrvglm}
 \alias{is.bell.rrvglm}
 \alias{is.bell.vlm}
@@ -403,14 +417,14 @@
 \alias{lmscreg.control}
 % \alias{logLik.vlm}
 \alias{logLik.qrrvglm}
-% \alias{lv.Coef.cao} 20090505
+% \alias{lv.Coef.rrvgam} 20090505
 \alias{latvar.Coef.qrrvglm}
-\alias{latvar.cao}
+\alias{latvar.rrvgam}
 \alias{latvar.rrvglm}
 \alias{latvar.qrrvglm}
-\alias{lvplot.cao}
-\alias{m2adefault}
-\alias{m2avglm}
+\alias{lvplot.rrvgam}
+\alias{m2a}
+%\alias{m2avglm}
 % \alias{matrix.power}
 \alias{mbesselI0}
 \alias{mix2exp.control}
@@ -444,12 +458,12 @@
 \alias{nvar.vgam}
 \alias{nvar.rrvglm}
 \alias{nvar.qrrvglm}
-\alias{nvar.cao}
+\alias{nvar.rrvgam}
 \alias{nvar.rcim}
 % \alias{num.deriv.rrr}
 \alias{persp}
-\alias{persp.cao}
-\alias{plot.cao}
+\alias{persp.rrvgam}
+\alias{plot.rrvgam}
 \alias{plotpreplotvgam}
 %\alias{plotvglm}
 \alias{plotvlm}
@@ -457,7 +471,7 @@
 % \alias{pnorm2} done 20120910
 % \alias{poissonqn}
 \alias{predict}
-\alias{predict.cao}
+\alias{predict.rrvgam}
 \alias{predict.glm}
 \alias{predict.lm}
 \alias{predict.mlm}
@@ -466,7 +480,7 @@
 %\alias{predict.uqo}
 \alias{predict.vgam}
 \alias{predict.vlm}
-\alias{predictcao}
+\alias{predictrrvgam}
 \alias{predictors}
 \alias{predictors.vglm}
 \alias{predictvsmooth.spline}
@@ -474,9 +488,9 @@
 % \alias{preplotvgam}
 \alias{print}
 \alias{procVec}
-\alias{negzero.expression}
-\alias{process.binomial2.data.vgam}
-\alias{process.categorical.data.vgam}
+\alias{negzero.expression.VGAM}
+\alias{process.binomial2.data.VGAM}
+\alias{process.categorical.data.VGAM}
 % \alias{process.constraints}
 % \alias{proj.vgam}
 % \alias{proj.vglm}
@@ -529,14 +543,23 @@
 \alias{ResSS.vgam}
 \alias{s.vam}
 \alias{simple.exponential}
+\alias{better.exponential}
 \alias{simple.poisson}
 \alias{size.binomial}
-\alias{stdze1}
-\alias{stdze2}
+%
+%
+\alias{sm.min1}
+\alias{sm.min2}
+\alias{sm.scale1}
+\alias{sm.scale2}
+%\alias{stdze1}
+%\alias{stdze2}
+%
+%
 % \alias{step.vgam}
 % \alias{step.vglm}
 % \alias{subconstraints}
-\alias{summary.cao}
+\alias{summary.rrvgam}
 \alias{summary.grc}
 \alias{summary.lms}
 \alias{summary.qrrvglm}
@@ -556,6 +579,7 @@
 % \alias{uqo.fit}
 % \alias{valid.vglmff}
 % \alias{valid.vknotl2}
+\alias{valt.control}
 % \alias{valt}
 % \alias{valt.1iter}
 % \alias{valt.2iter}
@@ -607,7 +631,7 @@
 %
 %
 %\alias{Coef.uqo-class}
-\alias{cao-class}
+\alias{rrvgam-class}
 \alias{rcim0-class}
 \alias{rcim-class}
 \alias{grc-class}
@@ -626,8 +650,8 @@
 \alias{vlmsmall-class}
 \alias{vsmooth.spline-class}
 \alias{vsmooth.spline.fit-class}
-\alias{Coef.cao-class}
-\alias{summary.cao-class}
+\alias{Coef.rrvgam-class}
+\alias{summary.rrvgam-class}
 %
 %
 %- Also NEED an '\alias' for EACH other topic documented here.
diff --git a/man/paralogistic.Rd b/man/paralogistic.Rd
index 79dcf2f..dd232d4 100644
--- a/man/paralogistic.Rd
+++ b/man/paralogistic.Rd
@@ -84,9 +84,9 @@ Hoboken, NJ, USA: Wiley-Interscience.
     \code{\link{betaII}},
     \code{\link{dagum}},
     \code{\link{fisk}},
-    \code{\link{invlomax}},
+    \code{\link{inv.lomax}},
     \code{\link{lomax}},
-    \code{\link{invparalogistic}}.
+    \code{\link{inv.paralogistic}}.
 
 }
 
diff --git a/man/paretoIV.Rd b/man/paretoIV.Rd
index e1dcfbd..d9a5959 100644
--- a/man/paretoIV.Rd
+++ b/man/paretoIV.Rd
@@ -12,8 +12,7 @@
 
 }
 \usage{
-paretoIV(location = 0, lscale = "loge", linequality = "loge",
-         lshape = "loge",
+paretoIV(location = 0, lscale = "loge", linequality = "loge", lshape = "loge",
          iscale = 1, iinequality = 1, ishape = NULL, imethod = 1)
 paretoIII(location = 0, lscale = "loge", linequality = "loge",
           iscale = NULL, iinequality = NULL)
@@ -84,8 +83,13 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
   inequality, provided \eqn{g \leq 1}{g<=1}.
 
 
-  The fitted values are currently \code{NA} because I
-  haven't worked out what the mean of \eqn{Y} is yet.
+  The fitted values are currently the median, e.g.,
+  \code{\link{qparetoIV}} is used for \code{paretoIV()}.
+
+
+
+% The fitted values are currently \code{NA} because I
+% haven't worked out what the mean of \eqn{Y} is yet.
 
 
 % The mean of \eqn{Y} is
@@ -126,6 +130,14 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
 }
 \references{ 
 
+
+Johnson N. L., Kotz S., and Balakrishnan N. (1994)
+\emph{Continuous Univariate Distributions, Volume 1},
+2nd ed.
+New York: Wiley.
+
+
+
 Brazauskas, V. (2003)
 Information matrix for Pareto(IV), Burr, and related distributions.
 \emph{Comm. Statist. Theory and Methods}
@@ -145,16 +157,22 @@ Fairland, Maryland: International Cooperative Publishing House.
 
 
 }
-%\section{Warning }{
-%  The Pareto(IV) distribution is very general,
-%  for example, special cases include the Pareto(I), Pareto(II),
-%  Pareto(III), and Burr family of distributions. Consequently, reasonably
-%  good initial values are recommended, and convergence to a local solution
-%  may occur. For this reason setting \code{trace=TRUE} is a good idea
-%  for monitoring the convergence.
-%  Large samples are ideally required to get reasonable results.
-%
-%}
+\section{Warning }{
+   The Pareto(IV) distribution is very general,
+   for example, special cases include the Pareto(I), Pareto(II),
+   Pareto(III), and Burr family of distributions.
+   [Johnson et al. (1994) says on p.19 that fitting Type IV by ML is
+   very difficult and rarely attempted].
+   Consequently, reasonably good initial values are recommended,
+   and convergence to a local solution may occur. For this
+   reason setting \code{trace=TRUE} is a good idea for monitoring
+   the convergence.  Large samples are ideally required to get
+   reasonable results.
+
+
+
+ 
+ }
 \seealso{
   \code{\link{ParetoIV}},
   \code{\link{paretoff}},
@@ -168,6 +186,8 @@ pdata <- data.frame(y = rparetoIV(2000, scale = exp(1),
 \dontrun{par(mfrow = c(2, 1))
 with(pdata, hist(y)); with(pdata, hist(log(y))) }
 fit <- vglm(y ~ 1, paretoIV, data = pdata, trace = TRUE)
+head(fitted(fit))
+summary(pdata)
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
diff --git a/man/paretoff.Rd b/man/paretoff.Rd
index b9d0cd1..a88f6bf 100644
--- a/man/paretoff.Rd
+++ b/man/paretoff.Rd
@@ -11,7 +11,7 @@
 
 }
 \usage{
-paretoff(lshape = "loge", location = NULL)
+paretoff(scale = NULL, lshape = "loge")
 truncpareto(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -23,7 +23,7 @@ truncpareto(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
 
 
   }
-  \item{location}{
+  \item{scale}{
   Numeric.
   The parameter \eqn{\alpha}{alpha} below.
   If the user inputs a number then it is assumed known with this value.
@@ -68,12 +68,12 @@ truncpareto(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
 
   The Pareto distribution, which is used a lot in economics,
   has a probability density function that can be written
-  \deqn{f(y;k,\alpha) = k  \alpha^k / y^{k+1}}{%
-        f(y;k,alpha) = k * alpha^k / y^(k+1)}
-  for \eqn{0<k} and \eqn{0 < \alpha < y}{0< alpha < y}.
-  The \eqn{\alpha}{alpha} is called the location parameter, and
+  \deqn{f(y;\alpha,k) = k  \alpha^k / y^{k+1}}{%
+        f(y;alpha,k) = k * alpha^k / y^(k+1)}
+  for \eqn{0 < \alpha < y}{0< alpha < y} and \eqn{0<k}.
+  The \eqn{\alpha}{alpha} is called the \emph{scale} parameter, and
   it is either assumed \emph{known} or else \code{min(y)} is used.
-  The parameter \eqn{k} is called the shape parameter.
+  The parameter \eqn{k} is called the \emph{shape} parameter.
   The mean of \eqn{Y} is
   \eqn{\alpha k/(k-1)}{alpha*k/(k-1)} provided \eqn{k > 1}.
   Its variance is
@@ -148,8 +148,8 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
   The usual or unbounded Pareto distribution has two
   parameters (called \eqn{\alpha}{alpha} and \eqn{k} here)
   but the family function \code{paretoff} estimates only
-  \eqn{k} using iteratively reweighted least squares. The
-  MLE of the \eqn{\alpha}{alpha} parameter lies on the
+  \eqn{k} using iteratively reweighted least squares.
+  The MLE of the \eqn{\alpha}{alpha} parameter lies on the
   boundary and is \code{min(y)} where \code{y} is the
   response. Consequently, using the default argument
   values, the standard errors are incorrect when one does a
@@ -165,13 +165,14 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
   \code{\link{Pareto}},
   \code{\link{Truncpareto}},
   \code{\link{paretoIV}},
-  \code{\link{gpd}}.
+  \code{\link{gpd}},
+  \code{\link{benini1}}.
 
 
 }
 \examples{
 alpha <- 2; kay <- exp(3)
-pdata <- data.frame(y = rpareto(n = 1000, location = alpha, shape = kay))
+pdata <- data.frame(y = rpareto(n = 1000, scale = alpha, shape = kay))
 fit <- vglm(y ~ 1, paretoff, data = pdata, trace = TRUE)
 fit at extra  # The estimate of alpha is here
 head(fitted(fit))
@@ -180,7 +181,7 @@ coef(fit, matrix = TRUE)
 summary(fit)  # Standard errors are incorrect!!
 
 # Here, alpha is assumed known
-fit2 <- vglm(y ~ 1, paretoff(location = alpha), data = pdata, trace = TRUE)
+fit2 <- vglm(y ~ 1, paretoff(scale = alpha), data = pdata, trace = TRUE)
 fit2 at extra  # alpha stored here
 head(fitted(fit2))
 coef(fit2, matrix = TRUE)
@@ -190,7 +191,7 @@ summary(fit2)  # Standard errors are okay
 lower <- 2; upper <- 8; kay <- exp(2)
 pdata3 <- data.frame(y = rtruncpareto(n = 100, lower = lower,
                                       upper = upper, shape = kay))
-fit3 <- vglm(y ~ 1, truncpareto(lower, upper), pdata3, trace = TRUE)
+fit3 <- vglm(y ~ 1, truncpareto(lower, upper), data = pdata3, trace = TRUE)
 coef(fit3, matrix = TRUE)
 c(fit3 at misc$lower, fit3 at misc$upper)
 }
diff --git a/man/perks.Rd b/man/perks.Rd
index ee82bbe..f15cd2b 100644
--- a/man/perks.Rd
+++ b/man/perks.Rd
@@ -8,14 +8,17 @@
 
 }
 \usage{
-perks(lshape = "loge", lscale = "loge",
-      ishape = NULL,   iscale = NULL,
-      gshape = exp(-5:5), gscale = exp(-5:5),
-      nsimEIM = 500, oim.mean = FALSE, zero = NULL)
+perks(lscale = "loge", lshape = "loge",
+      iscale = NULL,   ishape = NULL,
+      gscale = exp(-5:5), gshape = exp(-5:5), 
+      nsimEIM = 500, oim.mean = FALSE, zero = NULL, nowarning = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lshape, lscale}{
+  \item{nowarning}{ Logical. Suppress a warning? }
+
+
+  \item{lscale, lshape}{
   Parameter link functions applied to the
   shape parameter \code{shape},
   scale parameter \code{scale}.
@@ -31,13 +34,13 @@ perks(lshape = "loge", lscale = "loge",
 % }
 
 
-  \item{ishape, iscale}{
+  \item{iscale, ishape}{
   Optional initial values.
   A \code{NULL} means a value is computed internally.
 
 
   }
-  \item{gshape, gscale}{
+  \item{gscale, gshape}{
   See \code{\link{CommonVGAMffArguments}}.
 
 
@@ -132,9 +135,7 @@ pdata <- transform(pdata, eta1  = -1,
                           ceta1 =  1)
 pdata <- transform(pdata, shape1 = exp(eta1),
                           scale1 = exp(ceta1))
-pdata <- transform(pdata,
-                   y1 = rperks(nn, shape = shape1, scale = scale1))
-
+pdata <- transform(pdata, y1 = rperks(nn, shape = shape1, scale = scale1))
 fit1 <- vglm(y1 ~ 1, perks, data = pdata, trace = TRUE)
 coef(fit1, matrix = TRUE)
 summary(fit1)
diff --git a/man/perksUC.Rd b/man/perksUC.Rd
index effcc0f..ea90e47 100644
--- a/man/perksUC.Rd
+++ b/man/perksUC.Rd
@@ -14,10 +14,10 @@
 
 }
 \usage{
-dperks(x, shape, scale = 1, log = FALSE)
-pperks(q, shape, scale = 1)
-qperks(p, shape, scale = 1)
-rperks(n, shape, scale = 1)
+dperks(x, scale = 1, shape, log = FALSE)
+pperks(q, scale = 1, shape)
+qperks(p, scale = 1, shape)
+rperks(n, scale = 1, shape)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
diff --git a/man/persp.qrrvglm.Rd b/man/persp.qrrvglm.Rd
index e43f9d7..5b3fecc 100644
--- a/man/persp.qrrvglm.Rd
+++ b/man/persp.qrrvglm.Rd
@@ -8,7 +8,7 @@ applicable for rank-1 or rank-2 models with argument \code{noRRR = ~ 1}.
 
 }
 \usage{
-perspqrrvglm(x, varI.latvar = FALSE, reference = NULL, show.plot = TRUE, 
+perspqrrvglm(x, varI.latvar = FALSE, refResponse = NULL, show.plot = TRUE, 
              xlim = NULL, ylim = NULL, zlim = NULL,
              gridlength = if (Rank == 1) 301 else c(51,51),
              which.species = NULL,
@@ -31,7 +31,7 @@ perspqrrvglm(x, varI.latvar = FALSE, reference = NULL, show.plot = TRUE,
   Logical that is fed into \code{\link{Coef.qrrvglm}}.
 
   }
-  \item{reference}{
+  \item{refResponse}{
   Integer or character that is fed into \code{\link{Coef.qrrvglm}}.
 
   }
@@ -218,16 +218,16 @@ r2 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
           isd.lv = c(2.4, 1.0), Muxfactor = 3.0, trace = FALSE,
           poissonff, data = hspider, Rank = 2, eq.tolerances = TRUE)
 
-sort(r1 at misc$deviance.Bestof)  # A history of the fits
-sort(r2 at misc$deviance.Bestof)  # A history of the fits
+sort(deviance(r1, history = TRUE))  # A history of all the fits
+sort(deviance(r2, history = TRUE))  # A history of all the fits
 if (deviance(r2) > 857) stop("suboptimal fit obtained")
 
-persp(r1, xlim = c(-6,5), col = 1:4, label = TRUE)
+persp(r1, xlim = c(-6, 5), col = 1:4, label = TRUE)
 
 # Involves all species 
-persp(r2, xlim = c(-6,5), ylim = c(-4, 5), theta = 10, phi = 20, zlim = c(0, 220))
+persp(r2, xlim = c(-6, 5), ylim = c(-4, 5), theta = 10, phi = 20, zlim = c(0, 220))
 # Omit the two dominant species to see what is behind them
-persp(r2, xlim = c(-6,5), ylim = c(-4, 5), theta = 10, phi = 20, zlim = c(0, 220),
+persp(r2, xlim = c(-6, 5), ylim = c(-4, 5), theta = 10, phi = 20, zlim = c(0, 220),
       which = (1:10)[-c(8, 10)])  # Use zlim to retain the original z-scale
 }
 }
diff --git a/man/plotrcim0.Rd b/man/plotrcim0.Rd
index 1030c52..bf99532 100644
--- a/man/plotrcim0.Rd
+++ b/man/plotrcim0.Rd
@@ -208,11 +208,11 @@
 alcoff.e <- moffset(alcoff, "6", "Mon", postfix = "*")  # Effective day
 fit0 <- rcim(alcoff.e, family = poissonff)
 \dontrun{par(oma = c(0, 0, 4, 0), mfrow = 1:2)  # For all plots below too
-ii = plot(fit0, rcol = "blue", ccol = "orange",
-          lwd = 4, ylim = c(-2, 2),  # A common ylim
-          cylab = "Effective daily effects", rylab = "Hourly effects",
-          rxlab = "Hour", cxlab = "Effective day")
-ii at post # Endowed with additional information
+ii <- plot(fit0, rcol = "blue", ccol = "orange",
+           lwd = 4, ylim = c(-2, 2),  # A common ylim
+           cylab = "Effective daily effects", rylab = "Hourly effects",
+           rxlab = "Hour", cxlab = "Effective day")
+ii at post  # Endowed with additional information
 }
 
 # Negative binomial example
@@ -224,9 +224,9 @@ fit2 <- rcim(alcoff.e, uninormal, trace = TRUE)
 \dontrun{ plot(fit2, ylim = c(-200, 400)) }
 
 # Median-polish example
-fit3 <- rcim(alcoff.e, alaplace2(tau  =  0.5, intparloc  =  TRUE),
-             trace = TRUE)
-\dontrun{ plot(fit3, ylim = c(-200, 250)) }
+\dontrun{
+fit3 <- rcim(alcoff.e, alaplace1(tau = 0.5), maxit = 1000, trace = FALSE)
+plot(fit3, ylim = c(-200, 250)) }
 
 # Zero-inflated Poisson example on "crashp" (no 0s in alcoff)
 cbind(rowSums(crashp))  # Easy to see the data
diff --git a/man/plotvgam.Rd b/man/plotvgam.Rd
index 2b2e689..e24c92c 100644
--- a/man/plotvgam.Rd
+++ b/man/plotvgam.Rd
@@ -1,5 +1,6 @@
 \name{plotvgam}
 \alias{plotvgam}
+\alias{plot.vgam}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Default VGAM Plotting }
 \description{
diff --git a/man/poissonff.Rd b/man/poissonff.Rd
index bb58ecd..f7d2989 100644
--- a/man/poissonff.Rd
+++ b/man/poissonff.Rd
@@ -150,10 +150,10 @@ poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
     \code{\link{zipoisson}},
     \code{\link{skellam}},
     \code{\link{mix2poisson}},
-    \code{\link{cenpoisson}},
+    \code{\link{cens.poisson}},
     \code{\link{ordpoisson}},
     \code{\link{amlpoisson}},
-    \code{\link{invbinomial}},
+    \code{\link{inv.binomial}},
     \code{\link{simulate.vlm}},
     \code{\link{loge}},
     \code{\link{polf}},
diff --git a/man/posbernoulli.t.Rd b/man/posbernoulli.t.Rd
index 5605cf1..2404f52 100644
--- a/man/posbernoulli.t.Rd
+++ b/man/posbernoulli.t.Rd
@@ -153,6 +153,20 @@ capture--recapture experiments.
   \bold{62}, 3--135.
 
 
+Yee, T. W. and Stoklosa, J. and Huggins, R. M. (2014)
+The \pkg{VGAM} package for capture--recapture data using the
+conditional likelihood.
+In preparation.
+
+
+%\emph{Journal of Statistical Software},
+%\bold{32}, 1--34.
+%\url{http://www.jstatsoft.org/v32/i10/}.
+
+
+
+
+
 }
 \author{ Thomas W. Yee. }
 
diff --git a/man/posbernoulli.tb.Rd b/man/posbernoulli.tb.Rd
index 08c7bc1..b53624d 100644
--- a/man/posbernoulli.tb.Rd
+++ b/man/posbernoulli.tb.Rd
@@ -13,10 +13,8 @@
 
 }
 \usage{
-posbernoulli.tb(link = "logit",
-                parallel.t = FALSE ~ 1,
-                parallel.b = FALSE ~ 0,
-                drop.b     = FALSE ~ 1,
+posbernoulli.tb(link = "logit", parallel.t = FALSE ~ 1,
+                parallel.b = FALSE ~ 0, drop.b = FALSE ~ 1,
                 type.fitted = c("likelihood.cond", "mean.uncond"),
                 imethod = 1, iprob = NULL,
                 p.small = 1e-4, no.warning = FALSE,
@@ -188,6 +186,15 @@ posbernoulli.tb(link = "logit",
   \code{\link{posbernoulli.b}} or
   \code{\link{posbinomial}}.
 
+
+
+% yettodo:
+% Some time in the future it might be possible to allow for a
+% different tau value for each row.
+% Then the response would be a matrix padded with NAs on the RHS.
+
+
+
 }
 
 
diff --git a/man/posnegbinomial.Rd b/man/posnegbinomial.Rd
index bea6cef..f2002dd 100644
--- a/man/posnegbinomial.Rd
+++ b/man/posnegbinomial.Rd
@@ -10,7 +10,7 @@
 \usage{
 posnegbinomial(lmunb = "loge", lsize = "loge",
                isize = NULL, zero = -2, nsimEIM = 250,
-               shrinkage.init = 0.95, imethod = 1)
+               ishrinkage = 0.95, imethod = 1)
 
 }
 %- maybe also 'usage' for other objects documented here.
@@ -46,7 +46,7 @@ posnegbinomial(lmunb = "loge", lsize = "loge",
 
 
   }
-  \item{shrinkage.init, imethod}{
+  \item{ishrinkage, imethod}{
   See \code{\link{negbinomial}}.
 
   }
diff --git a/man/powerlink.Rd b/man/powerlink.Rd
index 397977b..2b5319a 100644
--- a/man/powerlink.Rd
+++ b/man/powerlink.Rd
@@ -86,8 +86,8 @@ powerlink(x <- (-5):5, power = 0.5)  # Has NAs
 
 # 1/2 = 0.5
 pdata <- data.frame(y = rbeta(n = 1000, shape1 = 2^2, shape2 = 3^2))
-fit <- vglm(y ~ 1, beta.ab(lshape1 = powerlink(power = 0.5), i1 = 3,
-                           lshape2 = powerlink(power = 0.5), i2 = 7), pdata)
+fit <- vglm(y ~ 1, betaR(lshape1 = powerlink(power = 0.5), i1 = 3,
+                         lshape2 = powerlink(power = 0.5), i2 = 7), data = pdata)
 t(coef(fit, matrix = TRUE))
 Coef(fit)  # Useful for intercept-only models
 vcov(fit, untransform = TRUE)
diff --git a/man/prats.Rd b/man/prats.Rd
index 7b7f2ab..0c4cdf3 100644
--- a/man/prats.Rd
+++ b/man/prats.Rd
@@ -78,7 +78,7 @@ data(prats)
 }
 \seealso{
   \code{\link[VGAM]{betabinomial}},
-  \code{\link[VGAM]{betabinomial.ab}}.
+  \code{\link[VGAM]{betabinomialff}}.
 
 
 }
diff --git a/man/predictqrrvglm.Rd b/man/predictqrrvglm.Rd
index e462fb5..8fcb02d 100644
--- a/man/predictqrrvglm.Rd
+++ b/man/predictqrrvglm.Rd
@@ -11,7 +11,7 @@
 predictqrrvglm(object, newdata=NULL,
                type = c("link", "response", "latvar", "terms"),
                se.fit = FALSE, deriv = 0, dispersion = NULL,
-               extra = object at extra, varI.latvar = FALSE, reference = NULL, ...)
+               extra = object at extra, varI.latvar = FALSE, refResponse = NULL, ...)
 
 }
 %- maybe also 'usage' for other objects documented here.
@@ -29,7 +29,7 @@ predictqrrvglm(object, newdata=NULL,
 
   }
   \item{deriv}{ Derivative. Currently only 0 is handled. }
-  \item{varI.latvar, reference}{
+  \item{varI.latvar, refResponse}{
   Arguments passed into \code{\link{Coef.qrrvglm}}.
 
 
@@ -83,7 +83,7 @@ set.seed(1234)
 # vvv                 Pardnigr, Pardpull, Trocterr, Zoraspin) ~
 # vvv       WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
 # vvv       poissonff, data = hspider, Crow1positive = FALSE, I.tol = TRUE)
-# vvv sort(p1 at misc$deviance.Bestof)  # A history of all the iterations
+# vvv sort(deviance(p1, history = TRUE))  # A history of all the iterations
 
 # vvv head(predict(p1))
 
diff --git a/man/predictvglm.Rd b/man/predictvglm.Rd
index 75df01b..1a02d62 100644
--- a/man/predictvglm.Rd
+++ b/man/predictvglm.Rd
@@ -80,6 +80,12 @@ predictvglm(object, newdata = NULL,
   Logical. Reverses any parameter link function.
   This argument only works if
   \code{type = "link", se.fit = FALSE, deriv = 0}.
+  Setting \code{untransform = TRUE} does not work for
+  all \pkg{VGAM} family functions; only ones where there
+  is a one-to-one correspondence between a simple link function
+  and a simple parameter might work.
+
+
 
 
   }
diff --git a/man/prentice74.Rd b/man/prentice74.Rd
index fb45585..9099a8a 100644
--- a/man/prentice74.Rd
+++ b/man/prentice74.Rd
@@ -56,12 +56,12 @@ Here, \eqn{w = (y-a)q/b+\psi(1/q^2)}{w = (y-a)*q/b+psi(1/q^2)}
 where \eqn{\psi}{psi} is the digamma function,
 \code{\link[base:Special]{digamma}}.
 The mean of \eqn{Y} is \eqn{a} (returned as the fitted values).
-This is a different parameterization compared to \code{\link{lgamma3ff}}.
+This is a different parameterization compared to \code{\link{lgamma3}}.
 
 
 Special cases: 
 \eqn{q = 0} is the normal distribution with standard deviation \eqn{b},
-\eqn{q = -1} is the extreme value distribution for maxima,
+\eqn{q = -1} is the extreme value distribution for maximums,
 \eqn{q = 1} is the extreme value distribution for minima (Weibull).
 If \eqn{q > 0} then the distribution is left skew,
 else \eqn{q < 0} is right skew.
@@ -105,16 +105,16 @@ else \eqn{q < 0} is right skew.
 
 }
 \seealso{
-  \code{\link{lgamma3ff}},
+  \code{\link{lgamma3}},
   \code{\link[base:Special]{lgamma}},
-  \code{\link{gengamma}}.
+  \code{\link{gengamma.stacy}}.
 
 
 }
 \examples{
 pdata <- data.frame(x2 = runif(nn <- 1000))
 pdata <- transform(pdata, loc = -1 + 2*x2, Scale = exp(1))
-pdata <- transform(pdata, y = rlgamma(nn, loc = loc, scale = Scale, k = 1))
+pdata <- transform(pdata, y = rlgamma(nn, loc = loc, scale = Scale, shape = 1))
 fit <- vglm(y ~ x2, prentice74(zero = 2:3), data = pdata, trace = TRUE)
 coef(fit, matrix = TRUE)  # Note the coefficients for location
 }
diff --git a/man/put.smart.Rd b/man/put.smart.Rd
index 361b10a..58a1ca8 100644
--- a/man/put.smart.Rd
+++ b/man/put.smart.Rd
@@ -52,17 +52,7 @@ Nothing is returned.
 
 }
 \examples{
-"my1" <- function(x, minx = min(x)) { # Here is a smart function
-  x <- x  # Needed for nested calls, e.g., bs(scale(x))
-  if (smart.mode.is("read")) {
-    smart <- get.smart()
-    minx <- smart$minx  # Overwrite its value
-  } else
-    if (smart.mode.is("write"))
-      put.smart(list(minx = minx))
-  sqrt(x - minx)
-}
-attr(my1, "smart") <- TRUE
+print(sm.min1)
 }
 %\keyword{smart}
 \keyword{models}
diff --git a/man/qrrvglm.control.Rd b/man/qrrvglm.control.Rd
index 4181198..04cd0e5 100644
--- a/man/qrrvglm.control.Rd
+++ b/man/qrrvglm.control.Rd
@@ -513,7 +513,7 @@ p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
                Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
           quasipoissonff, data = hspider, eq.tolerances = TRUE)
-sort(p1 at misc$deviance.Bestof)  # A history of all the iterations
+sort(deviance(p1, history = TRUE))  # A history of all the iterations
 
 (isd.latvar <- apply(latvar(p1), 2, sd))  # Should be approx isd.latvar
  
@@ -524,7 +524,7 @@ p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
           WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
           I.tolerances = TRUE, quasipoissonff, data = hspider,
           isd.latvar = isd.latvar)  # Note the use of isd.latvar here
-sort(p1 at misc$deviance.Bestof)  # A history of all the iterations
+sort(deviance(p1, history = TRUE))  # A history of all the iterations
 }
 }
 \keyword{models}
@@ -541,7 +541,7 @@ sort(p1 at misc$deviance.Bestof)  # A history of all the iterations
 %          WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
 %          I.tol = FALSE, eq.tol = TRUE,  # A good idea for negbinomial
 %          fam = negbinomial, data = hspider)
-%sort(nb1 at misc$deviance.Bestof)  # A history of all the iterations
+%sort(deviance(nb1, history = TRUE))  # A history of all the iterations
 %summary(nb1)
 %}
 %\dontrun{ lvplot(nb1, lcol = 1:12, y = TRUE, pcol = 1:12) }
diff --git a/man/quasipoissonff.Rd b/man/quasipoissonff.Rd
index 75e915d..77d022a 100644
--- a/man/quasipoissonff.Rd
+++ b/man/quasipoissonff.Rd
@@ -124,7 +124,7 @@ quasipoissonff()
 mydata <- rcqo(n, p, S, fam = "poisson", eq.tol = FALSE)
 myform <- attr(mydata, "formula")
 p1 <- cqo(myform, fam = quasipoissonff, eq.tol = FALSE, data = mydata)
-sort(p1 at misc$deviance.Bestof)  # A history of all the iterations
+sort(deviance(p1, history = TRUE))  # A history of all the iterations
 lvplot(p1, y = TRUE, lcol = 1:S, pch = 1:S, pcol = 1:S)
 summary(p1)  # The dispersion parameters are estimated
 }}
diff --git a/man/qvar.Rd b/man/qvar.Rd
index 8269f74..763b28d 100644
--- a/man/qvar.Rd
+++ b/man/qvar.Rd
@@ -27,7 +27,7 @@ qvar(object, se = FALSE, ...)
 
 }
 \item{se}{
-  Logical. If \code{TRUE} then the quasi-variances are returned,
+  Logical. If \code{FALSE} then the quasi-variances are returned,
   else the square root of them, called quasi-standard errors.
 
 
@@ -53,7 +53,7 @@ qvar(object, se = FALSE, ...)
 \value{
   A vector of quasi-variances  or quasi-standard errors.
 
-  
+
 }
 %\references{
 %
@@ -97,15 +97,15 @@ Shipmodel <- vglm(incidents ~ type + year + period,
 
 # Easiest form of input
 fit1 <- rcim(Qvar(Shipmodel, "type"), uninormal("explink"), maxit = 99)
-qvar(fit1)              # Quasi-variances
-qvar(fit1, se = FALSE)  # Quasi-standard errors
+qvar(fit1)             # Quasi-variances
+qvar(fit1, se = TRUE)  # Quasi-standard errors
 
 # Manually compute them:
 (quasiVar <- exp(diag(fitted(fit1))) / 2)                 # Version 1
 (quasiVar <- diag(predict(fit1)[, c(TRUE, FALSE)]) / 2)   # Version 2
 (quasiSE  <- sqrt(quasiVar))
 
-\dontrun{ plotqvar(fit1, col = "green", lwd = 3, scol = "blue", slwd = 2, las = 1) }
+\dontrun{ qvplot(fit1, col = "green", lwd = 3, scol = "blue", slwd = 2, las = 1) }
 }
 % Add one or more standard keywords, see file 'KEYWORDS' in the
 % R documentation directory.
diff --git a/man/rayleigh.Rd b/man/rayleigh.Rd
index fd12669..3c6694d 100644
--- a/man/rayleigh.Rd
+++ b/man/rayleigh.Rd
@@ -1,6 +1,6 @@
 \name{rayleigh}
 \alias{rayleigh}
-\alias{cenrayleigh}
+\alias{cens.rayleigh}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{Rayleigh Distribution Family Function }
 \description{
@@ -11,7 +11,7 @@
 \usage{
    rayleigh(lscale = "loge", nrfs = 1/3 + 0.01,
             oim.mean = TRUE, zero = NULL)
-cenrayleigh(lscale = "loge", oim = TRUE)
+cens.rayleigh(lscale = "loge", oim = TRUE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -67,7 +67,7 @@ cenrayleigh(lscale = "loge", oim = TRUE)
   \eqn{b^2 (4-\pi)/2}{b^2 (4-pi)/2}.
 
 
-  The \pkg{VGAM} family function \code{cenrayleigh} handles right-censored
+  The \pkg{VGAM} family function \code{cens.rayleigh} handles right-censored
   data (the true value is greater than the observed value). To indicate
   which type of censoring, input \code{extra = list(rightcensored = vec2)}
   where \code{vec2} is a logical vector the same length as the response.
@@ -106,7 +106,16 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
   The  \code{\link{poisson.points}} family function is
   more general so that if \code{ostatistic = 1} and \code{dimension = 2}
   then it coincides with \code{\link{rayleigh}}.
-  Another related distribution is the Maxwell distribution.
+  Other related distributions are the Maxwell
+  and Weibull distributions.
+
+
+
+
+% http://www.math.uah.edu/stat/special/MultiNormal.html
+% The distribution of R is known as the Rayleigh distribution,
+%named for William Strutt, Lord Rayleigh. It is a member of the
+%family of Weibull distributions, named in turn for Wallodi Weibull.
 
 
 
@@ -117,6 +126,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
   \code{\link{genrayleigh}},
   \code{\link{riceff}},
   \code{\link{maxwell}},
+  \code{\link{weibullR}},
   \code{\link{poisson.points}},
   \code{\link{simulate.vlm}}.
 
@@ -137,7 +147,7 @@ rdata <- transform(rdata, y = pmin(U, ystar))
 \dontrun{ par(mfrow = c(1, 2))
 hist(with(rdata, ystar)); hist(with(rdata, y)) }
 extra <- with(rdata, list(rightcensored = ystar > U))
-fit <- vglm(y ~ 1, cenrayleigh, data = rdata, trace = TRUE, extra = extra)
+fit <- vglm(y ~ 1, cens.rayleigh, data = rdata, trace = TRUE, extra = extra)
 table(fit at extra$rightcen)
 coef(fit, matrix = TRUE)
 head(fitted(fit))
diff --git a/man/rcqo.Rd b/man/rcqo.Rd
index 95c3a60..3685b6a 100644
--- a/man/rcqo.Rd
+++ b/man/rcqo.Rd
@@ -10,14 +10,14 @@ rcqo(n, p, S, Rank = 1,
      family = c("poisson", "negbinomial", "binomial-poisson",
                 "Binomial-negbinomial", "ordinal-poisson",
                 "Ordinal-negbinomial", "gamma2"),
-     eq.maxima = FALSE, eq.tolerances = TRUE, es.optima = FALSE,
-     lo.abundance = if (eq.maxima) hi.abundance else 10,
+     eq.maximums = FALSE, eq.tolerances = TRUE, es.optimums = FALSE,
+     lo.abundance = if (eq.maximums) hi.abundance else 10,
      hi.abundance = 100, sd.latvar = head(1.5/2^(0:3), Rank),
-     sd.optima = ifelse(es.optima, 1.5/Rank, 1) *
+     sd.optimums = ifelse(es.optimums, 1.5/Rank, 1) *
                        ifelse(scale.latvar, sd.latvar, 1),
      sd.tolerances = 0.25, Kvector = 1, Shape = 1,
-     sqrt.arg = FALSE, Log = FALSE, rhox = 0.5, breaks = 4,
-     seed = NULL, optima1.arg = NULL, Crow1positive = TRUE,
+     sqrt.arg = FALSE, log.arg = FALSE, rhox = 0.5, breaks = 4,
+     seed = NULL, optimums1.arg = NULL, Crow1positive = TRUE,
      xmat = NULL, scale.latvar = TRUE)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -62,8 +62,8 @@ rcqo(n, p, S, Rank = 1,
 %    else zz.
     
   }
-  \item{eq.maxima}{
-    Logical. Does each species have the same maxima?
+  \item{eq.maximums}{
+    Logical. Does each species have the same maximum?
     See arguments \code{lo.abundance} and \code{hi.abundance}.
 
 
@@ -76,15 +76,15 @@ rcqo(n, p, S, Rank = 1,
 
 
   }
-  \item{es.optima}{
-    Logical. Do the species have equally spaced optima?
+  \item{es.optimums}{
+    Logical. Do the species have equally spaced optimums?
     If \code{TRUE} then the quantity
     \eqn{S^{1/R}}{S^(1/R)} must be an
     integer with value 2 or more. That is, there has to be an
     appropriate number of species in total. This is so that a grid
     of optimum values is possible in \eqn{R}-dimensional
     latent variable space
-    in order to place the species' optima.
+    in order to place the species' optimums.
     Also see the argument \code{sd.tolerances}.
     
     
@@ -94,11 +94,11 @@ rcqo(n, p, S, Rank = 1,
     The species have a maximum
     between \code{lo.abundance} and \code{hi.abundance}. That is,
     at their optimal environment, the mean abundance of each
-    species is between the two componentwise values. If \code{eq.maxima}
+    species is between the two componentwise values. If \code{eq.maximums}
     is \code{TRUE} then \code{lo.abundance} and \code{hi.abundance}
     must have the same values.
-    If \code{eq.maxima} is \code{FALSE} then the
-    logarithm of the maxima are uniformly distributed between
+    If \code{eq.maximums} is \code{FALSE} then the
+    logarithm of the maximums are uniformly distributed between
     \code{log(lo.abundance)} and \code{log(hi.abundance)}.
     
   }
@@ -112,16 +112,16 @@ rcqo(n, p, S, Rank = 1,
     axis, etc.
         
   }
-  \item{sd.optima}{
+  \item{sd.optimums}{
     Numeric, of length \eqn{R} (recycled if necessary).
-    If \code{es.optima = FALSE} then,
+    If \code{es.optimums = FALSE} then,
     for the \eqn{r}th latent variable axis,
-    the optima of the species are generated from a
+    the optimums of the species are generated from a
     normal distribution centered about 0.
-    If \code{es.optima = TRUE} then the \eqn{S} optima
+    If \code{es.optimums = TRUE} then the \eqn{S} optimums
     are equally spaced about 0 along every latent variable axis.
-    Regardless of the value of \code{es.optima}, the optima
-    are then scaled to give standard deviation \code{sd.optima[r]}.
+    Regardless of the value of \code{es.optimums}, the optimums
+    are then scaled to give standard deviation \code{sd.optimums[r]}.
     
   }
   \item{sd.tolerances}{
@@ -165,9 +165,9 @@ rcqo(n, p, S, Rank = 1,
     problems) to estimate using something like \code{cqo(..., family="poissonff")}.
 
   }
-  \item{Log}{
+  \item{log.arg}{
     Logical. Take the logarithm of the gamma random variates?
-    Assigning \code{Log = TRUE} when \code{family="gamma2"} means
+    Assigning \code{log.arg = TRUE} when \code{family="gamma2"} means
     that the resulting species data can be considered very crudely to be
     approximately Gaussian distributed about its (quadratic) mean.
     The result is that it is much easier (less numerical
@@ -200,8 +200,8 @@ rcqo(n, p, S, Rank = 1,
     \code{\link[base:Random]{.Random.seed}} as \code{"seed"} attribute.
 
   }
-  \item{optima1.arg}{
-    If assigned and \code{Rank = 1} then these are the explicity optima.
+  \item{optimums1.arg}{
+    If assigned and \code{Rank = 1} then these are the explicity optimums.
     Recycled to length \code{S}.
 
 
@@ -229,9 +229,9 @@ rcqo(n, p, S, Rank = 1,
   data coming from a \emph{species packing model} can be generated
   with this function.
   The species packing model states that species have equal tolerances,
-  equal maxima, and optima which are uniformly distributed over
+  equal maximums, and optimums which are uniformly distributed over
   the latent variable space. This can be achieved by assigning
-  the arguments \code{es.optima = TRUE}, \code{eq.maxima = TRUE},
+  the arguments \code{es.optimums = TRUE}, \code{eq.maximums = TRUE},
   \code{eq.tolerances = TRUE}.
 
   At present, the Poisson and negative binomial abundances are
@@ -283,8 +283,8 @@ rcqo(n, p, S, Rank = 1,
     \code{\link{cqo}}.
     
   }
-  \item{"logmaxima"}{
-    The \eqn{S}-vector of species' maxima, on a log scale.
+  \item{"log.maximums"}{
+    The \eqn{S}-vector of species' maximums, on a log scale.
     These are uniformly distributed between
     \code{log(lo.abundance)} and \code{log(hi.abundance)}.
     
@@ -301,8 +301,8 @@ rcqo(n, p, S, Rank = 1,
     The linear/additive predictor value.
     
   }
-  \item{"optima"}{
-    The \eqn{S} by \eqn{R} matrix of species' optima.
+  \item{"optimums"}{
+    The \eqn{S} by \eqn{R} matrix of species' optimums.
 
   }
   \item{"tolerances"}{
@@ -316,7 +316,7 @@ rcqo(n, p, S, Rank = 1,
   Other attributes are \code{"break"},
   \code{"family"}, \code{"Rank"},
   \code{"lo.abundance"}, \code{"hi.abundance"},
-  \code{"eq.tolerances"}, \code{"eq.maxima"},
+  \code{"eq.tolerances"}, \code{"eq.maximums"},
   \code{"seed"} as used.
 
 }
@@ -397,7 +397,7 @@ attr(mydata, "concoefficients")  # The 'truth'
 
 # Example 3: gamma2 data fitted using a Gaussian model:
 n <- 200; p <- 5; S <- 3
-mydata <- rcqo(n, p, S, fam = "gamma2", Log = TRUE)
+mydata <- rcqo(n, p, S, fam = "gamma2", log.arg = TRUE)
 fit <- cqo(attr(mydata, "formula"),
            fam = gaussianff, data = mydata)  # I.tol = TRUE,
 matplot(attr(mydata, "latvar"),
diff --git a/man/recexp1.Rd b/man/rec.exp1.Rd
similarity index 93%
rename from man/recexp1.Rd
rename to man/rec.exp1.Rd
index 66e525f..5e21a5f 100644
--- a/man/recexp1.Rd
+++ b/man/rec.exp1.Rd
@@ -1,14 +1,15 @@
-\name{recexp1}
-\alias{recexp1}
+\name{rec.exp1}
+\alias{rec.exp1}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Upper Record Values from a 1-parameter Exponential Distribution }
 \description{
   Maximum likelihood estimation of the rate parameter of a
   1-parameter exponential distribution when the observations are upper 
   record values.
+
 }
 \usage{
-recexp1(lrate = "loge", irate = NULL, imethod = 1)
+rec.exp1(lrate = "loge", irate = NULL, imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -16,18 +17,21 @@ recexp1(lrate = "loge", irate = NULL, imethod = 1)
   Link function applied to the rate parameter.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{irate}{
   Numeric. Optional initial values for the rate.
   The default value \code{NULL} means they are computed internally,
   with the help of \code{imethod}.
 
+
   }
   \item{imethod}{
   Integer, either 1 or 2 or 3. Initial method, three algorithms are
   implemented. Choose the another value if convergence fails, or use
   \code{irate}.
 
+
   }
 }
 \details{
@@ -70,7 +74,7 @@ y <- unique(cummax(rawy))  # Keep only the records
 
 length(y) / y[length(y)]   # MLE of rate
 
-fit <- vglm(y ~ 1, recexp1, trace = TRUE)
+fit <- vglm(y ~ 1, rec.exp1, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 }
diff --git a/man/recnormal.Rd b/man/rec.normal.Rd
similarity index 93%
rename from man/recnormal.Rd
rename to man/rec.normal.Rd
index a593b77..93eba5b 100644
--- a/man/recnormal.Rd
+++ b/man/rec.normal.Rd
@@ -1,5 +1,5 @@
-\name{recnormal}
-\alias{recnormal}
+\name{rec.normal}
+\alias{rec.normal}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Upper Record Values from a Univariate Normal Distribution }
 \description{
@@ -9,7 +9,7 @@
 
 }
 \usage{
-recnormal(lmean = "identitylink", lsd = "loge",
+rec.normal(lmean = "identitylink", lsd = "loge",
           imean = NULL, isd = NULL, imethod = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -18,18 +18,21 @@ recnormal(lmean = "identitylink", lsd = "loge",
   Link functions applied to the mean and sd parameters.
   See \code{\link{Links}} for more choices.
 
+
   }
   \item{imean, isd}{
   Numeric. Optional initial values for the mean and sd.
   The default value \code{NULL} means they are computed internally,
   with the help of \code{imethod}.
 
+
   }
   \item{imethod}{
   Integer, either 1 or 2 or 3. Initial method, three algorithms are
   implemented. Choose the another value if convergence fails, or use
   \code{imean} and/or \code{isd}.
 
+
   }
   \item{zero}{
   An integer vector, containing the value 1 or 2. If so, the mean or
@@ -38,6 +41,7 @@ recnormal(lmean = "identitylink", lsd = "loge",
   The default value \code{NULL} means both linear/additive predictors
   are modelled as functions of the explanatory variables.
 
+
   }
 }
 \details{
@@ -80,7 +84,7 @@ recnormal(lmean = "identitylink", lsd = "loge",
 
 \seealso{
     \code{\link{uninormal}},
-    \code{\link{double.cennormal}}.
+    \code{\link{double.cens.normal}}.
 
 
 }
@@ -91,7 +95,7 @@ Rdata <- data.frame(rawy = c(mymean, rnorm(nn, me = mymean, sd = exp(3))))
 # Keep only observations that are records:
 rdata <- data.frame(y = unique(cummax(with(Rdata, rawy))))
 
-fit <- vglm(y ~ 1, recnormal, data = rdata, trace = TRUE, maxit = 200)
+fit <- vglm(y ~ 1, rec.normal, data = rdata, trace = TRUE, maxit = 200)
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
diff --git a/man/riceUC.Rd b/man/riceUC.Rd
index 47bb250..51d6cbc 100644
--- a/man/riceUC.Rd
+++ b/man/riceUC.Rd
@@ -1,35 +1,51 @@
 \name{Rice}
 \alias{Rice}
 \alias{drice}
-%\alias{price}
-%\alias{qrice}
+\alias{price}
+\alias{qrice}
 \alias{rrice}
 \title{The Rice Distribution}
 \description{
-  Density
+  Density,
+  distribution function, quantile function
   and random generation for the
   Rician distribution.
 
 
-% distribution function, quantile function
 
 }
 
 \usage{
-drice(x, vee, sigma, log = FALSE)
-rrice(n, vee, sigma)
+drice(x, sigma, vee, log = FALSE)
+price(q, sigma, vee, lower.tail = TRUE, ...)
+qrice(p, sigma, vee, ...)
+rrice(n, sigma, vee)
 }
-%price(q, vee, sigma)
-%qrice(p, vee, sigma)
 
 
 \arguments{
-  \item{x}{vector of quantiles.}
-% \item{p}{vector of probabilities.}
- \item{n}{number of observations.
-   Must be a positive integer of length 1.}
-  \item{vee, sigma}{ See \code{\link{riceff}}.
-    }
+  \item{x, q}{vector of quantiles.}
+  \item{p}{vector of probabilities.}
+  \item{n}{number of observations.
+  Must be a positive integer of length 1.
+
+  }
+  \item{vee, sigma}{
+  See \code{\link{riceff}}.
+
+  }
+  \item{\dots}{
+  Other arguments such as
+  \code{lower.tail}.
+
+
+  }
+  \item{lower.tail}{
+  Logical.
+  If \code{TRUE} then the LHS area, else the RHS area.
+
+
+  }
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
@@ -39,11 +55,11 @@ rrice(n, vee, sigma)
 }
 \value{
   \code{drice} gives the density,
+  \code{price} gives the distribution function,
+  \code{qrice} gives the quantile function, and
   \code{rrice} generates random deviates.
 
 
-% \code{price} gives the distribution function,
-% \code{qrice} gives the quantile function, and
 
 
 }
@@ -53,6 +69,10 @@ rrice(n, vee, sigma)
   for estimating the two parameters,
   for the formula of the probability density function and other details.
 
+  Formulas for \code{price()} and \code{qrice()} are based on the
+  Marcum-Q function.
+
+
 
 }
 %\section{Warning }{
@@ -69,9 +89,25 @@ plot(x, drice(x, vee = 0, sigma = 1), type = "n", las = 1,, ylab = "",
      main = "Density of Rice distribution for various values of v")
 sigma <- 1; vee <- c(0, 0.5, 1, 2, 4)
 for (ii in 1:length(vee))
-  lines(x, drice(x, vee[ii], sigma), col = ii)
+  lines(x, drice(x, vee = vee[ii], sigma), col = ii)
 legend(x = 5, y = 0.6, legend = as.character(vee),
        col = 1:length(vee), lty = 1)
+
+x <- seq(0, 4, by = 0.01); vee <- 1; sigma <- 1
+probs <- seq(0.05, 0.95, by = 0.05)
+plot(x, drice(x, vee = vee, sigma = sigma), type = "l", col = "blue",
+     main = "Blue is density, orange is cumulative distribution function",
+     ylim = c(0, 1), sub = "Purple are 5, 10, ..., 95 percentiles",
+     las = 1, ylab = "", cex.main = 0.9)
+abline(h = 0:1, col = "black", lty = 2)
+Q <- qrice(probs, sigma, vee = vee)
+lines(Q, drice(qrice(probs, sigma, vee = vee),
+               sigma, vee = vee), col = "purple", lty = 3, type = "h")
+lines(x, price(x, sigma, vee = vee), type = "l", col = "orange")
+lines(Q, drice(Q, sigma, vee = vee), col = "purple", lty = 3, type = "h")
+lines(Q, price(Q, sigma, vee = vee), col = "purple", lty = 3, type = "h")
+abline(h = probs, col = "purple", lty = 3)
+max(abs(price(Q, sigma, vee = vee) - probs))  # Should be 0
 }
 }
 \keyword{distribution}
diff --git a/man/riceff.Rd b/man/riceff.Rd
index c3b2778..4d991ef 100644
--- a/man/riceff.Rd
+++ b/man/riceff.Rd
@@ -9,11 +9,14 @@
 
 }
 \usage{
-riceff(lvee = "loge", lsigma = "loge",
-       ivee = NULL, isigma = NULL, nsimEIM = 100, zero = NULL)
+riceff(lsigma = "loge", lvee = "loge", isigma = NULL,
+       ivee = NULL, nsimEIM = 100, zero = NULL, nowarning = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
+  \item{nowarning}{ Logical. Suppress a warning? }
+
+
   \item{lvee, lsigma}{
   Link functions for the \eqn{v} and \eqn{\sigma}{sigma} parameters.
   See \code{\link{Links}} for more choices and for general information.
@@ -93,8 +96,8 @@ Mathematical Analysis of Random Noise.
 
 }
 \examples{
-\dontrun{ vee <- exp(2); sigma <- exp(1)
-rdata <- data.frame(y = rrice(n <- 1000, vee, sigma))
+\dontrun{ sigma <- exp(1); vee <- exp(2)
+rdata <- data.frame(y = rrice(n <- 1000, sigma, vee = vee))
 fit <- vglm(y ~ 1, riceff, data = rdata, trace = TRUE, crit = "coef")
 c(with(rdata, mean(y)), fitted(fit)[1])
 coef(fit, matrix = TRUE)
diff --git a/man/rlplot.egev.Rd b/man/rlplot.egev.Rd
index 25e8542..578c81b 100644
--- a/man/rlplot.egev.Rd
+++ b/man/rlplot.egev.Rd
@@ -10,12 +10,13 @@
 \usage{
 rlplot.egev(object, show.plot = TRUE,
     probability = c((1:9)/100, (1:9)/10, 0.95, 0.99, 0.995, 0.999),
-    add.arg = FALSE, xlab = "Return Period", ylab = "Return Level",
+    add.arg = FALSE, xlab = if(log.arg) "Return Period (log-scale)" else
+    "Return Period", ylab = "Return Level",
     main = "Return Level Plot",
     pch = par()$pch, pcol.arg = par()$col, pcex = par()$cex,
     llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd,
     slty.arg = par()$lty, scol.arg = par()$col, slwd.arg = par()$lwd,
-    ylim = NULL, log = TRUE, CI = TRUE, epsilon = 1e-05, ...)
+    ylim = NULL, log.arg = TRUE, CI = TRUE, epsilon = 1e-05, ...)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -59,7 +60,7 @@ rlplot.egev(object, show.plot = TRUE,
 
   }
   \item{ylim}{ Limits for the y-axis. Numeric of length 2. }
-  \item{log}{ Logical. If \code{TRUE} then \code{log=""} otherwise
+  \item{log.arg}{ Logical. If \code{TRUE} then \code{log=""} otherwise
     \code{log="x"}. This changes the labelling of the x-axis only.
 
   }
@@ -145,7 +146,7 @@ coef(fit2, matrix = TRUE)
 \dontrun{
 par(mfrow = c(1, 2))
 rlplot(fit) -> i1
-rlplot(fit2, pcol = "darkorange", lcol = "blue", log = FALSE,
+rlplot(fit2, pcol = "darkorange", lcol = "blue", log.arg = FALSE,
        scol = "darkgreen", slty = "dashed", las = 1) -> i2
 range(i2 at post$rlplot$upper - i1 at post$rlplot$upper)  # Should be near 0
 range(i2 at post$rlplot$lower - i1 at post$rlplot$lower)  # Should be near 0
diff --git a/man/rrvglm.Rd b/man/rrvglm.Rd
index 77f46df..c80763d 100644
--- a/man/rrvglm.Rd
+++ b/man/rrvglm.Rd
@@ -251,9 +251,9 @@ plot(y2 ~ x2, data = mydata, pch = "+", col = 'blue', las = 1,
 rrnb2 <- rrvglm(y2 ~ x2 + x3, negbinomial(zero = NULL),
                 data = mydata, trace = TRUE)
 
-a21.hat <- (Coef(rrnb2)@A)["log(size)", 1]
-beta11.hat <- Coef(rrnb2)@B1["(Intercept)", "log(mu)"]
-beta21.hat <- Coef(rrnb2)@B1["(Intercept)", "log(size)"]
+a21.hat <- (Coef(rrnb2)@A)["loge(size)", 1]
+beta11.hat <- Coef(rrnb2)@B1["(Intercept)", "loge(mu)"]
+beta21.hat <- Coef(rrnb2)@B1["(Intercept)", "loge(size)"]
 (delta1.hat <- exp(a21.hat * beta11.hat - beta21.hat))
 (delta2.hat <- 2 - a21.hat)
 # exp(a21.hat * predict(rrnb2)[1,1] - predict(rrnb2)[1,2])  # delta1.hat
@@ -274,9 +274,8 @@ lines(fitted(rrnb2)[ooo] ~ latvar(rrnb2)[ooo], col = "orange")
 
 # Example 2: stereotype model (reduced-rank multinomial logit model)
 data(car.all)
-index <- with(car.all, Country == "Germany" | Country == "USA" |
-                       Country == "Japan"   | Country == "Korea")
-scar <- car.all[index, ]  # standardized car data
+scar <- subset(car.all,
+               is.element(Country, c("Germany", "USA", "Japan", "Korea")))
 fcols <- c(13,14,18:20,22:26,29:31,33,34,36)  # These are factors
 scar[, -fcols] <- scale(scar[, -fcols])  # Standardize all numerical vars
 ones <- matrix(1, 3, 1)
@@ -299,3 +298,11 @@ biplot(fit, chull = TRUE, scores = TRUE, clty = 2, Ccex = 2,
 \keyword{models}
 \keyword{regression}
 
+%index <- with(car.all, Country == "Germany" | Country == "USA" |
+%                       Country == "Japan"   | Country == "Korea")
+%scar <- car.all[index, ]  # standardized car data
+
+
+%scar <- subset(car.all,
+%               is.element(Country, c("Germany", "USA", "Japan", "Korea")) |
+%               is.na(Country))
diff --git a/man/rrvglm.control.Rd b/man/rrvglm.control.Rd
index b598db9..b78a841 100644
--- a/man/rrvglm.control.Rd
+++ b/man/rrvglm.control.Rd
@@ -19,7 +19,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
     noRRR = ~1, Norrr = NA,
     noWarning = FALSE,
     trace = FALSE, Use.Init.Poisson.QO = FALSE, 
-    checkwz = TRUE, Check.rank = TRUE,
+    checkwz = TRUE, Check.rank = TRUE, Check.cm.rank = TRUE,
     wzepsilon = .Machine$double.eps^0.75, ...)
 }
 %- maybe also `usage' for other objects documented here.
@@ -122,8 +122,8 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
   of the estimated constraint matrices (\bold{A}) are
   to be all zeros.
   These are called \emph{structural zeros}.
-  Must not have any common value with \code{Index.corner}, and
-  be a subset of the vector \code{1:M}.
+  Must not have any common value with \code{Index.corner},
+  and be a subset of the vector \code{1:M}.
   The default, \code{str0 = NULL}, means no structural zero rows at all.
 
 
@@ -186,7 +186,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
 
 
   }
-  \item{noWarning, Check.rank}{
+  \item{noWarning, Check.rank, Check.cm.rank}{
     Same as \code{\link{vglm.control}}.
 
 
@@ -198,8 +198,12 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
   }
   \item{\dots}{
     Variables in \dots are passed into
-    \code{\link{vglm.control}}. If the derivative algorithm is used, then
-    \dots are also passed into \code{\link{rrvglm.optim.control}}.
+    \code{\link{vglm.control}}.
+    If the derivative algorithm is used then \dots are also passed
+    into \code{\link{rrvglm.optim.control}};
+    and
+    if the alternating algorithm is used then \dots are also passed
+    into \code{\link{valt.control}}.
 
 
   }
diff --git a/man/rrvglm.optim.control.Rd b/man/rrvglm.optim.control.Rd
index e3f5c40..5385108 100644
--- a/man/rrvglm.optim.control.Rd
+++ b/man/rrvglm.optim.control.Rd
@@ -5,6 +5,8 @@
 \description{
   Algorithmic constants and parameters for running \code{optim} within 
   \code{rrvglm} are set using this function.
+
+
 }
 \usage{
 rrvglm.optim.control(Fnscale = 1, Maxit = 100,
@@ -27,7 +29,7 @@ rrvglm.optim.control(Fnscale = 1, Maxit = 100,
 
 }
 \details{
-See \code{\link[stats]{optim}} for more details. 
+  See \code{\link[stats]{optim}} for more details. 
 
 
 }
@@ -39,16 +41,18 @@ See \code{\link[stats]{optim}} for more details.
 %\references{ ~put references to the literature/web site here ~ }
 \author{ Thomas W. Yee }
 \note{
-The transition between optimization methods may be unstable, so users
-may have to vary the value of \code{Switch.optimizer}.
+  The transition between optimization methods may be
+  unstable, so users may have to vary the value of
+  \code{Switch.optimizer}.
 
  
-Practical experience with \code{Switch.optimizer} shows that setting
-it to too large a value may lead to a local solution, whereas setting
-it to a low value will obtain the global solution.  It appears that,
-if BFGS kicks in too late when the Nelder-Mead algorithm is starting to
-converge to a local solution, then switching to BFGS will not be sufficient
-to bypass convergence to that local solution.
+  Practical experience with \code{Switch.optimizer} shows
+  that setting it to too large a value may lead to a local
+  solution, whereas setting it to a low value will obtain
+  the global solution.  It appears that, if BFGS kicks in too
+  late when the Nelder-Mead algorithm is starting to converge
+  to a local solution, then switching to BFGS will not be
+  sufficient to bypass convergence to that local solution.
 
 
 }
@@ -63,3 +67,6 @@ to bypass convergence to that local solution.
 %}
 \keyword{models}
 \keyword{regression}
+
+
+
diff --git a/man/koenker.Rd b/man/sc.studentt2.Rd
similarity index 72%
rename from man/koenker.Rd
rename to man/sc.studentt2.Rd
index 1ac464e..a03afc4 100644
--- a/man/koenker.Rd
+++ b/man/sc.studentt2.Rd
@@ -1,16 +1,17 @@
-\name{koenker}
-\alias{koenker}
+\name{sc.studentt2}
+\alias{sc.studentt2}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Koenker's Distribution Family Function }
+\title{ Scaled Student t Distribution with 2 df Family Function }
 \description{
-  Estimates the location and scale parameters of Koenker's
-  distribution by maximum likelihood estimation.
+  Estimates the location and scale parameters of 
+  a scaled Student t distribution with 2 degrees of freedom,
+  by maximum likelihood estimation.
 
 
 }
 \usage{
-koenker(percentile = 50, llocation = "identitylink", lscale = "loge",
-        ilocation = NULL, iscale = NULL, imethod = 1, zero = 2)
+sc.studentt2(percentile = 50, llocation = "identitylink", lscale = "loge",
+             ilocation = NULL, iscale = NULL, imethod = 1, zero = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -35,12 +36,15 @@ koenker(percentile = 50, llocation = "identitylink", lscale = "loge",
 }
 \details{
   Koenker (1993) solved for the distribution whose quantiles are
-  equal to its expectiles. This is called Koenker's distribution
-  here. Its canonical form has mean and mode at 0 and has a heavy
-  tail (in fact, its variance is infinite).
+  equal to its expectiles.
+  Its canonical form has mean and mode at 0,
+  and has a heavy tail (in fact, its variance is infinite).
 
 
-  The standard (``canonical'') form of Koenker's 
+% This is called Koenker's distribution here.
+
+
+  The standard (``canonical'') form of this
   distribution can be endowed with a location and scale parameter.
   The standard form has a density
   that can be written as
@@ -59,7 +63,7 @@ koenker(percentile = 50, llocation = "identitylink", lscale = "loge",
   (0.5 expectile) and  median (0.5 quantile).
 
 
-  Note that if \eqn{Y} has a standard Koenker distribution
+  Note that if \eqn{Y} has a standard \code{\link{dsc.t2}}
   then \eqn{Y = \sqrt{2} T_2}{Y = sqrt(2) * T_2} where \eqn{T_2}
   has a Student-t distribution with 2 degrees of freedom.
   The two parameters here can also be estimated using
@@ -93,7 +97,7 @@ When are expectiles percentiles? (solution)
 %}
 
 \seealso{ 
-  \code{\link{dkoenker}},
+  \code{\link{dsc.t2}},
   \code{\link{studentt2}}.
 
 
@@ -103,9 +107,9 @@ set.seed(123); nn <- 1000
 kdata <- data.frame(x2 = sort(runif(nn)))
 kdata <- transform(kdata, mylocat = 1 + 3 * x2,
                           myscale = 1)
-kdata <- transform(kdata, y = rkoenker(nn, loc = mylocat, scale = myscale))
-fit  <- vglm(y ~ x2, koenker(perc = c(1, 50, 99)), data = kdata, trace = TRUE)
-fit2 <- vglm(y ~ x2, studentt2(df = 2), data = kdata, trace = TRUE)  # 'same' as fit
+kdata <- transform(kdata, y = rsc.t2(nn, loc = mylocat, scale = myscale))
+fit  <- vglm(y ~ x2, sc.studentt2(perc = c(1, 50, 99)), data = kdata)
+fit2 <- vglm(y ~ x2,    studentt2(df = 2), data = kdata)  # 'same' as fit
 
 coef(fit, matrix = TRUE)
 head(fitted(fit))
@@ -114,7 +118,7 @@ head(predict(fit))
 # Nice plot of the results
 \dontrun{ plot(y ~ x2, data = kdata, col = "blue", las = 1,
      sub  = paste("n =", nn),
-     main = "Fitted quantiles/expectiles using Koenker's distribution")
+     main = "Fitted quantiles/expectiles using the sc.studentt2() distribution")
 matplot(with(kdata, x2), fitted(fit), add = TRUE, type = "l", lwd = 3)
 legend("bottomright", lty = 1:3, lwd = 3, legend = colnames(fitted(fit)),
        col = 1:3) }
diff --git a/man/koenkerUC.Rd b/man/sc.t2UC.Rd
similarity index 52%
rename from man/koenkerUC.Rd
rename to man/sc.t2UC.Rd
index ad34e11..a087d3a 100644
--- a/man/koenkerUC.Rd
+++ b/man/sc.t2UC.Rd
@@ -1,22 +1,22 @@
-\name{Expectiles-Koenker}
-\alias{Expectiles-Koenker}
-\alias{dkoenker}
-\alias{pkoenker}
-\alias{qkoenker}
-\alias{rkoenker}
-\title{ Expectiles/Quantiles of the Koenker Distribution }
+\name{Expectiles-sc.t2}
+\alias{Expectiles-sc.t2}
+\alias{dsc.t2}
+\alias{psc.t2}
+\alias{qsc.t2}
+\alias{rsc.t2}
+\title{ Expectiles/Quantiles of the Scaled Student t Distribution with 2 Df}
 \description{
   Density function, distribution function, and
   quantile/expectile function and random generation for the
-  Koenker distribution.
+  scaled Student t distribution with 2 degrees of freedom.
 
 
 }
 \usage{
-dkoenker(x, location = 0, scale = 1, log = FALSE)
-pkoenker(q, location = 0, scale = 1, log = FALSE)
-qkoenker(p, location = 0, scale = 1)
-rkoenker(n, location = 0, scale = 1)
+dsc.t2(x, location = 0, scale = 1, log = FALSE)
+psc.t2(q, location = 0, scale = 1, log = FALSE)
+qsc.t2(p, location = 0, scale = 1)
+rsc.t2(n, location = 0, scale = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -43,18 +43,19 @@ rkoenker(n, location = 0, scale = 1)
 }
 \details{
   A Student-t distribution with 2 degrees of freedom and
-  a scale parameter of \code{sqrt(2)} is equivalent to the
-  standard Koenker distribution.
+  a scale parameter of \code{sqrt(2)} is equivalent to
+  the standard form of this distribution
+  (called Koenker's distribution below).
   Further details about this distribution are given in
-  \code{\link{koenker}}.
+  \code{\link{sc.studentt2}}.
 
 
 }
 \value{
-  \code{dkoenker(x)} gives the density function.
-  \code{pkoenker(q)} gives the distribution function.
-  \code{qkoenker(p)} gives the expectile and quantile function.
-  \code{rkoenker(n)} gives \eqn{n} random variates.
+  \code{dsc.t2(x)} gives the density function.
+  \code{psc.t2(q)} gives the distribution function.
+  \code{qsc.t2(p)} gives the expectile and quantile function.
+  \code{rsc.t2(n)} gives \eqn{n} random variates.
 
 
 }
@@ -65,37 +66,37 @@ rkoenker(n, location = 0, scale = 1)
 
 \seealso{
   \code{\link[stats:TDist]{dt}},
-  \code{\link{koenker}}.
+  \code{\link{sc.studentt2}}.
 
 
 }
 
 \examples{
-my_p <- 0.25; y <- rkoenker(nn <- 5000)
-(myexp <- qkoenker(my_p))
-sum(myexp - y[y <= myexp]) / sum(abs(myexp - y))  # Should be my_p
+my.p <- 0.25; y <- rsc.t2(nn <- 5000)
+(myexp <- qsc.t2(my.p))
+sum(myexp - y[y <= myexp]) / sum(abs(myexp - y))  # Should be my.p
 # Equivalently:
 I1 <- mean(y <= myexp) * mean( myexp - y[y <= myexp])
 I2 <- mean(y >  myexp) * mean(-myexp + y[y >  myexp])
-I1 / (I1 + I2)  # Should be my_p
+I1 / (I1 + I2)  # Should be my.p
 # Or:
 I1 <- sum( myexp - y[y <= myexp])
 I2 <- sum(-myexp + y[y >  myexp])
 
 # Non-standard Koenker distribution
 myloc <- 1; myscale <- 2
-yy <- rkoenker(nn, myloc, myscale)
-(myexp <- qkoenker(my_p, myloc, myscale))
-sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy))  # Should be my_p
-pkoenker(mean(yy), myloc, myscale)  # Should be 0.5
-abs(qkoenker(0.5, myloc, myscale) - mean(yy))  # Should be 0
-abs(pkoenker(myexp, myloc, myscale) - my_p)  # Should be 0
-integrate(f = dkoenker, lower = -Inf, upper = Inf,
+yy <- rsc.t2(nn, myloc, myscale)
+(myexp <- qsc.t2(my.p, myloc, myscale))
+sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy))  # Should be my.p
+psc.t2(mean(yy), myloc, myscale)  # Should be 0.5
+abs(qsc.t2(0.5, myloc, myscale) - mean(yy))  # Should be 0
+abs(psc.t2(myexp, myloc, myscale) - my.p)  # Should be 0
+integrate(f = dsc.t2, lower = -Inf, upper = Inf,
           locat = myloc, scale = myscale)  # Should be 1
 
 y <- seq(-7, 7, len = 201)
-max(abs(dkoenker(y) - dt(y / sqrt(2), df = 2) / sqrt(2)))  # Should be 0
-\dontrun{ plot(y, dkoenker(y), type = "l", col = "blue", las = 1,
+max(abs(dsc.t2(y) - dt(y / sqrt(2), df = 2) / sqrt(2)))  # Should be 0
+\dontrun{ plot(y, dsc.t2(y), type = "l", col = "blue", las = 1,
      ylim = c(0, 0.4), main = "Blue = Koenker; orange = N(0, 1)")
 lines(y, dnorm(y), type = "l", col = "orange")
 abline(h = 0, v = 0, lty = 2) }
diff --git a/man/simplex.Rd b/man/simplex.Rd
index 3cee9b4..6acda67 100644
--- a/man/simplex.Rd
+++ b/man/simplex.Rd
@@ -11,7 +11,7 @@
 \usage{
 simplex(lmu = "logit", lsigma = "loge",
         imu = NULL, isigma = NULL,
-        imethod = 1, shrinkage.init = 0.95, zero = 2)
+        imethod = 1, ishrinkage = 0.95, zero = 2)
 
 }
 %- maybe also 'usage' for other objects documented here.
@@ -28,7 +28,7 @@ simplex(lmu = "logit", lsigma = "loge",
 
 
   }
-  \item{imethod, shrinkage.init, zero}{
+  \item{imethod, ishrinkage, zero}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
 
diff --git a/man/simulate.vlm.Rd b/man/simulate.vlm.Rd
index ade8362..b541bbe 100644
--- a/man/simulate.vlm.Rd
+++ b/man/simulate.vlm.Rd
@@ -40,6 +40,8 @@
 %   random generator state, and return \code{\link{.Random.seed}} as the
 %   \code{"seed"} attribute, see \sQuote{Value}.
 % }
+
+
   \item{\dots}{additional optional arguments.}
 
 
@@ -47,9 +49,8 @@
 }
 \value{
   Similar to \code{\link[stats]{simulate}}.
-  Note that many \pkg{VGAM} family functions can handle
-  multiple responses. This can result in a longer data frame
-  with more rows
+  Note that many \pkg{VGAM} family functions can handle multiple responses. 
+  This can result in a longer data frame with more rows
   (\code{nsim} multiplied by \code{n} rather than the
    ordinary \code{n}).
   In the future an argument may be available so that there
@@ -90,13 +91,13 @@
   \code{simslot} slot are:
   \code{\link{alaplace1}},
   \code{\link{alaplace2}},
-  \code{\link{amh}},
   \code{\link{betabinomial}},
-  \code{\link{betabinomial.ab}},
-  \code{\link{beta.ab}},
+  \code{\link{betabinomialff}},
+  \code{\link{betaR}},
   \code{\link{betaff}},
+  \code{\link{biamhcop}},
   \code{\link{bifrankcop}},
-  \code{\link{bilogistic4}},
+  \code{\link{bilogistic}},
   \code{\link{binomialff}},
   \code{\link{binormal}},
   \code{\link{binormalcop}},
@@ -108,28 +109,27 @@
   \code{\link{dagum}},
   \code{\link{erlang}},
   \code{\link{exponential}},
-  \code{\link{fgm}},
+  \code{\link{bifgmcop}},
   \code{\link{fisk}},
   \code{\link{gamma1}},
   \code{\link{gamma2}},
-  \code{\link{gamma2.ab}},
-  \code{\link{gengamma}},
+  \code{\link{gammaR}},
+  \code{\link{gengamma.stacy}},
   \code{\link{geometric}},
   \code{\link{gompertz}},
   \code{\link{gumbelII}},
   \code{\link{hzeta}},
-  \code{\link{invlomax}},
-  \code{\link{invparalogistic}},
+  \code{\link{inv.lomax}},
+  \code{\link{inv.paralogistic}},
   \code{\link{kumar}},
-  \code{\link{lgammaff}},
-  \code{\link{lgamma3ff}},
+  \code{\link{lgamma1}},
+  \code{\link{lgamma3}},
   \code{\link{lindley}},
   \code{\link{lino}},
   \code{\link{logff}},
   \code{\link{logistic1}},
-  \code{\link{logistic2}},
+  \code{\link{logistic}},
   \code{\link{lognormal}},
-  \code{\link{lognormal3}},
   \code{\link{lomax}},
   \code{\link{makeham}},
   \code{\link{negbinomial}},
@@ -141,6 +141,7 @@
   \code{\link{posnormal}},
   \code{\link{pospoisson}},
   \code{\link{polya}},
+  \code{\link{polyaR}},
   \code{\link{posbinomial}},
   \code{\link{rayleigh}},
   \code{\link{riceff}},
@@ -177,28 +178,67 @@
   \code{\link{vglm}}, \code{\link{vgam}} for model fitting.
 
 
+}
 
+\section{Warning}{
+  With multiple response and/or multivariate responses,
+  the order of the elements may differ.
+  For some \pkg{VGAM} families, the order is 
+  \eqn{n \times N \times F}{n x N x F},
+  where \eqn{n} is the sample size,
+  \eqn{N} is \code{nsim} and
+  \eqn{F} is \code{ncol(fitted(vglmObject))}.
+  For other \pkg{VGAM} families, the order is 
+  \eqn{n \times F \times N}{n x F x N}.
+  An example of each is given below.
+  
 
 }
+
+
 \examples{
-nn <- 10; mysize <- 20
-set.seed(123)
+nn <- 10; mysize <- 20; set.seed(123)
 bdata <- data.frame(x2 = rnorm(nn))
 bdata <- transform(bdata,
   y1   = rbinom(nn, size = mysize, p = logit(1+x2, inverse = TRUE)),
   y2   = rbinom(nn, size = mysize, p = logit(1+x2, inverse = TRUE)),
   f1   = factor(as.numeric(rbinom(nn, size = 1,
-                                   p = logit(1+x2, inverse = TRUE)))))
+                                  p = logit(1+x2, inverse = TRUE)))))
 (fit1 <- vglm(cbind(y1, aaa = mysize - y1) ~ x2,  # Matrix response (2-colns)
               binomialff, data = bdata))
-(fit2 <- vglm(f1 ~ x2,  # Factor response
-              binomialff, model = TRUE, data = bdata))
-
-set.seed(123)
-simulate(fit1, nsim = 8)
-
-set.seed(123)
-c(simulate(fit2, nsim = 3))  # Use c() when model = TRUE
+(fit2 <- vglm(f1 ~ x2, binomialff, model = TRUE, data = bdata)) # Factor response
+
+set.seed(123); simulate(fit1, nsim = 8)
+set.seed(123); c(simulate(fit2, nsim = 3))  # Use c() when model = TRUE
+
+# An n x N x F example
+set.seed(123); n <- 100
+bdata <- data.frame(x2 = runif(n), x3 = runif(n))
+bdata <- transform(bdata, y1 = rnorm(n, 1 + 2 * x2),
+                          y2 = rnorm(n, 3 + 4 * x2))
+fit1 <- vglm(cbind(y1, y2) ~ x2, binormal(eq.sd = TRUE), data = bdata)
+nsim <- 1000  # Number of simulations for each observation
+my.sims <- simulate(fit1, nsim = nsim)
+dim(my.sims)  # A data frame
+aaa <- array(unlist(my.sims), c(n, nsim, ncol(fitted(fit1))))  # n by N by F
+summary(rowMeans(aaa[, , 1]) - fitted(fit1)[, 1])  # Should be all 0s
+summary(rowMeans(aaa[, , 2]) - fitted(fit1)[, 2])  # Should be all 0s
+
+# An n x F x N example
+n <- 100; set.seed(111); nsim <- 1000
+zdata <- data.frame(x2 = runif(n))
+zdata <- transform(zdata, lambda1 =  loge(-0.5 + 2 * x2, inverse = TRUE),
+                          lambda2 =  loge( 0.5 + 2 * x2, inverse = TRUE),
+                          pstr01  = logit( 0,            inverse = TRUE),
+                          pstr02  = logit(-1.0,          inverse = TRUE))
+zdata <- transform(zdata, y1 = rzipois(n, lambda = lambda1, pstr0 = pstr01),
+                          y2 = rzipois(n, lambda = lambda2, pstr0 = pstr02))
+zip.fit  <- vglm(cbind(y1, y2) ~ x2, zipoissonff, data = zdata, crit = "coef")
+my.sims <- simulate(zip.fit, nsim = nsim)
+dim(my.sims)  # A data frame
+aaa <- array(unlist(my.sims), c(n, ncol(fitted(zip.fit)), nsim))  # n by F by N
+summary(rowMeans(aaa[, 1, ]) - fitted(zip.fit)[, 1])  # Should be all 0s
+summary(rowMeans(aaa[, 2, ]) - fitted(zip.fit)[, 2])  # Should be all 0s
 }
 \keyword{models}
 \keyword{datagen}
diff --git a/man/sinmad.Rd b/man/sinmad.Rd
index 0b83cf6..c3a19e0 100644
--- a/man/sinmad.Rd
+++ b/man/sinmad.Rd
@@ -90,10 +90,10 @@ Hoboken, NJ, USA: Wiley-Interscience.
     \code{\link{betaII}},
     \code{\link{dagum}},
     \code{\link{fisk}},
-    \code{\link{invlomax}},
+    \code{\link{inv.lomax}},
     \code{\link{lomax}},
     \code{\link{paralogistic}},
-    \code{\link{invparalogistic}},
+    \code{\link{inv.paralogistic}},
     \code{\link{simulate.vlm}}.
 
 
diff --git a/man/skewnormal.Rd b/man/skewnormal.Rd
index 86c340e..0b61d1b 100644
--- a/man/skewnormal.Rd
+++ b/man/skewnormal.Rd
@@ -69,6 +69,9 @@ skewnormal(lshape = "identitylink", ishape = NULL, nsimEIM = NULL)
   \bold{61}, 579--602.
 
 
+
+
+
 }
 
 \author{ Thomas W. Yee }
diff --git a/man/smart.expression.Rd b/man/smart.expression.Rd
index 06f2f12..ad39be9 100644
--- a/man/smart.expression.Rd
+++ b/man/smart.expression.Rd
@@ -21,16 +21,7 @@
 
 }
 \examples{
-"my2" <- function(x, minx = min(x)) { # Here is a smart function
-  x <- x  # Needed for nested calls, e.g., sm.bs(sm.scale(x))
-  if (smart.mode.is("read")) {
-    return(eval(smart.expression))
-  } else
-  if (smart.mode.is("write"))
-    put.smart(list(minx = minx, match.call = match.call()))
-  (x - minx)^2
-}
-attr(my2, "smart") <- TRUE
+print(sm.min2)
 }
 %\keyword{smartpred}
 \keyword{models}
diff --git a/man/smart.mode.is.Rd b/man/smart.mode.is.Rd
index 3bda619..98936ca 100644
--- a/man/smart.mode.is.Rd
+++ b/man/smart.mode.is.Rd
@@ -5,7 +5,7 @@
 Determine which of three modes the smart prediction is currently in.
 }
 \usage{
-smart.mode.is(mode.arg=NULL)
+smart.mode.is(mode.arg = NULL)
 }
 \arguments{
 \item{mode.arg}{
@@ -45,18 +45,7 @@ smart.mode.is(mode.arg=NULL)
 
 }
 \examples{
-my1 <- function(x, minx = min(x)) { # Here is a smart function
-  x <- x  # Needed for nested calls, e.g., bs(scale(x))
-  if (smart.mode.is("read")) {
-    smart <- get.smart()
-    minx <- smart$minx  # Overwrite its value 
-  } else
-  if (smart.mode.is("write"))
-    put.smart(list(minx = minx))
-  sqrt(x - minx)
-}
-attr(my1, "smart") <- TRUE
-
+print(sm.min1)
 smart.mode.is()  # Returns "neutral"
 smart.mode.is(smart.mode.is())  # Returns TRUE
 }
diff --git a/man/studentt.Rd b/man/studentt.Rd
index 42c744c..c9735df 100644
--- a/man/studentt.Rd
+++ b/man/studentt.Rd
@@ -73,8 +73,8 @@ studentt3(llocation = "identitylink", lscale = "loge", ldf = "loglog",
   \code{\link{cauchy1}}.
   When \eqn{\nu=2}{nu=2} with a scale parameter of \code{sqrt(2)} then
   the Student \eqn{t}-distribution 
-  corresponds to the standard Koenker distribution,
-  \code{\link{koenker}}.
+  corresponds to the standard (Koenker) distribution,
+  \code{\link{sc.studentt2}}.
   The degrees of freedom can be treated as a parameter to be estimated,
   and as a real and not an integer.
   The Student t distribution is used for a variety of reasons
@@ -148,7 +148,7 @@ application to financial econometrics.
   \code{\link{cauchy1}},
   \code{\link{logistic}},
   \code{\link{huber2}},
-  \code{\link{koenker}},
+  \code{\link{sc.studentt2}},
   \code{\link[stats]{TDist}},
   \code{\link{simulate.vlm}}.
 
@@ -161,8 +161,11 @@ tdata <- transform(tdata, y1 = rt(nn, df = exp(exp(0.5 - x2))),
 fit1 <- vglm(y1 ~ x2, studentt, data = tdata, trace = TRUE)
 coef(fit1, matrix = TRUE)
 
-fit2 <- vglm(cbind(y1, y2) ~ x2, studentt3, data = tdata, trace = TRUE)
-coef(fit2, matrix = TRUE)
+fit2 <- vglm(y1 ~ x2, studentt2(df = exp(exp(0.5))), data = tdata)
+coef(fit2, matrix = TRUE)  # df inputted into studentt2() not quite right
+
+fit3 <- vglm(cbind(y1, y2) ~ x2, studentt3, data = tdata, trace = TRUE)
+coef(fit3, matrix = TRUE)
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/tikuv.Rd b/man/tikuv.Rd
index 44946ad..1f8286e 100644
--- a/man/tikuv.Rd
+++ b/man/tikuv.Rd
@@ -7,8 +7,7 @@
 
 }
 \usage{
-tikuv(d, lmean = "identitylink", lsigma = "loge",
-      isigma = NULL, zero = 2)
+tikuv(d, lmean = "identitylink", lsigma = "loge", isigma = NULL, zero = 2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
diff --git a/man/tobit.Rd b/man/tobit.Rd
index dda5560..ef3a7d4 100644
--- a/man/tobit.Rd
+++ b/man/tobit.Rd
@@ -153,16 +153,16 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
   The fitted object has components \code{censoredL} and \code{censoredU}
   in the \code{extra} slot which specifies whether observations
   are censored in that direction.
-  The function \code{\link{cennormal}} is an alternative
+  The function \code{\link{cens.normal}} is an alternative
   to \code{tobit()}.
 
 
 }
 \seealso{
   \code{\link{rtobit}},
-  \code{\link{cennormal}},
+  \code{\link{cens.normal}},
   \code{\link{uninormal}},
-  \code{\link{double.cennormal}},
+  \code{\link{double.cens.normal}},
   \code{\link{posnormal}},
   \code{\link[stats:Normal]{rnorm}}.
 
diff --git a/man/truncweibull.Rd b/man/truncweibull.Rd
index af7b3e3..8dee297 100644
--- a/man/truncweibull.Rd
+++ b/man/truncweibull.Rd
@@ -23,7 +23,7 @@ truncweibull(lower.limit = 1e-5,
     Recycled to the same dimension as the response, going
     across rows first.
     The default, being close to 0, should mean effectively the same
-    results as \code{\link{weibull}} if there are no response
+    results as \code{\link{weibullR}} if there are no response
     values that are smaller.
 
 
@@ -44,7 +44,7 @@ truncweibull(lower.limit = 1e-5,
 
   }
   \item{imethod, nrfs, zero, probs.y}{
-  Details at \code{\link{weibull}}.
+  Details at \code{\link{weibullR}}.
 
   }
 }
@@ -56,14 +56,14 @@ truncweibull(lower.limit = 1e-5,
   For a particular observation this is any known positive value.
   This function is currently based directly on
   Wingo (1989) and his parameterization is used (it differs
-  from \code{\link{weibull}}.)
+  from \code{\link{weibullR}}.)
   In particular,
   \eqn{\beta = a} and \eqn{\alpha = (1/b)^a}
-  where \eqn{a} and \eqn{b} are as in \code{\link{weibull}} and
+  where \eqn{a} and \eqn{b} are as in \code{\link{weibullR}} and
   \code{\link[stats:Weibull]{dweibull}}.
 
 
-% More details about the Weibull density are \code{\link{weibull}}.
+% More details about the Weibull density are \code{\link{weibullR}}.
 
   
   Upon fitting the \code{extra} slot has a component called
@@ -101,7 +101,7 @@ truncweibull(lower.limit = 1e-5,
 }
 \section{Warning}{
   This function may be converted to the same parameterization as
-  \code{\link{weibull}} at any time.
+  \code{\link{weibullR}} at any time.
   Yet to do: one element of the EIM may be wrong (due to
   two interpretations of a formula; but it seems to work).
   Convergence is slower than usual and this may imply something
@@ -116,7 +116,7 @@ truncweibull(lower.limit = 1e-5,
 }
 
 \seealso{
-  \code{\link{weibull}},
+  \code{\link{weibullR}},
   \code{\link[stats:Weibull]{dweibull}},
   \code{\link{pgamma.deriv}},
   \code{\link{pgamma.deriv.unscaled}}.
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index 9d1ee8f..2de37f4 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -5,6 +5,19 @@
 %
 %
 %
+% 201407
+\alias{concoef,ANY-method}
+\alias{concoef,rrvgam-method}
+\alias{concoef,Coef.rrvgam-method}
+%
+%
+% 201406
+\alias{QR.R,ANY-method}
+\alias{QR.R,vglm-method}
+\alias{QR.Q,ANY-method}
+\alias{QR.Q,vglm-method}
+%
+%
 % 201312
 \alias{simulate,ANY-method}
 \alias{simulate,vlm-method}
@@ -20,12 +33,12 @@
 \alias{BIC,vgam-method}
 \alias{BIC,rrvglm-method}
 \alias{BIC,qrrvglm-method}
-\alias{BIC,cao-method}
+\alias{BIC,rrvgam-method}
 %
 % 20121105
 \alias{Rank,qrrvglm-method}
 \alias{Rank,rrvglm-method}
-\alias{Rank,cao-method}
+\alias{Rank,rrvgam-method}
 % 20120821
 \alias{model.matrix,vsmooth.spline-method}
 %
@@ -51,7 +64,7 @@
 \alias{logLik,ANY-method}
 \alias{plot,ANY-method}
 \alias{vcov,ANY-method}
-\alias{plot,cao,ANY-method}
+\alias{plot,rrvgam,ANY-method}
 \alias{plot,qrrvglm,ANY-method}
 \alias{plot,rcim,ANY-method}
 \alias{plot,rcim0,ANY-method}
@@ -70,7 +83,7 @@
 \alias{AIC,vgam-method}
 \alias{AIC,rrvglm-method}
 \alias{AIC,qrrvglm-method}
-\alias{AIC,cao-method}
+\alias{AIC,rrvgam-method}
 \alias{AICc,vlm-method}
 \alias{AICc,vglm-method}
 %\alias{AICc,vgam-method}
@@ -78,11 +91,11 @@
 %\alias{AICc,qrrvglm-method}
 \alias{attrassign,lm-method}
 \alias{calibrate,qrrvglm-method}
-\alias{calibrate,cao-method}
+\alias{calibrate,rrvgam-method}
 %\alias{calibrate,uqo-method}
 \alias{cdf,vglm-method}
 \alias{cdf,vgam-method}
-\alias{coefficients,cao-method}
+\alias{coefficients,rrvgam-method}
 \alias{coefficients,vlm-method}
 \alias{coefficients,vglm-method}
 \alias{coefficients,qrrvglm-method}
@@ -92,7 +105,7 @@
 \alias{coefficients,summary.vglm-method}
 \alias{coefficients,summary.rrvglm-method}
 \alias{Coefficients,vlm-method}
-\alias{coef,cao-method}
+\alias{coef,rrvgam-method}
 \alias{coef,vlm-method}
 \alias{coef,vglm-method}
 \alias{coef,qrrvglm-method}
@@ -101,7 +114,7 @@
 \alias{coef,vsmooth.spline.fit-method}
 \alias{coef,summary.vglm-method}
 \alias{coef,summary.rrvglm-method}
-\alias{Coef,cao-method}
+\alias{Coef,rrvgam-method}
 \alias{Coef,vlm-method}
 \alias{Coef,qrrvglm-method}
 \alias{Coef,rrvglm-method}
@@ -111,13 +124,14 @@
 \alias{deplot,vgam-method}
 %
 \alias{depvar,ANY-method}
-\alias{depvar,cao-method}
+\alias{depvar,rrvgam-method}
 \alias{depvar,qrrvglm-method}
 \alias{depvar,rcim-method}
 \alias{depvar,rrvglm-method}
 \alias{depvar,vlm-method}
 \alias{depvar,vsmooth.spline-method}
 %
+\alias{deviance,rrvgam-method}
 \alias{deviance,qrrvglm-method}
 \alias{deviance,vlm-method}
 %\alias{deviance,vglm-method}
@@ -168,7 +182,7 @@
 \alias{hatvalues,ANY-method}
 \alias{hatvalues,vlm-method}
 \alias{hatvalues,vglm-method}
-\alias{hatvalues,cao-method}
+\alias{hatvalues,rrvgam-method}
 \alias{hatvalues,qrrvglm-method}
 \alias{hatvalues,rcim-method}
 \alias{hatvalues,rrvglm-method}
@@ -178,7 +192,7 @@
 \alias{hatplot,matrix-method}
 \alias{hatplot,vlm-method}
 \alias{hatplot,vglm-method}
-\alias{hatplot,cao-method}
+\alias{hatplot,rrvgam-method}
 \alias{hatplot,qrrvglm-method}
 \alias{hatplot,rcim-method}
 \alias{hatplot,rrvglm-method}
@@ -188,7 +202,7 @@
 \alias{dfbeta,matrix-method}
 \alias{dfbeta,vlm-method}
 \alias{dfbeta,vglm-method}
-\alias{dfbeta,cao-method}
+\alias{dfbeta,rrvgam-method}
 \alias{dfbeta,qrrvglm-method}
 \alias{dfbeta,rcim-method}
 \alias{dfbeta,rrvglm-method}
@@ -201,7 +215,7 @@
 \alias{model.frame,vlm-method}
 %\alias{plot,rcim0,ANY-method}
 %\alias{plot,rcim,ANY-method}
-%\alias{plot,cao,ANY-method}
+%\alias{plot,rrvgam,ANY-method}
 %\alias{plot,vlm,ANY-method}
 %\alias{plot,vglm,ANY-method}
 %\alias{plot,vgam,ANY-method}
@@ -215,29 +229,29 @@
 \alias{is.bell,qrrvglm-method}
 \alias{is.bell,rrvglm-method}
 \alias{is.bell,vlm-method}
-\alias{is.bell,cao-method}
+\alias{is.bell,rrvgam-method}
 \alias{is.bell,Coef.qrrvglm-method}
 \alias{logLik,vlm-method}
 \alias{logLik,summary.vglm-method}
 \alias{logLik,vglm-method}
 \alias{logLik,vgam-method}
 \alias{logLik,qrrvglm-method}
-\alias{logLik,cao-method}
+\alias{logLik,rrvgam-method}
 %
-\alias{lvplot,cao-method}
+\alias{lvplot,rrvgam-method}
 \alias{lvplot,qrrvglm-method}
 \alias{lvplot,rrvglm-method}
 %\alias{lvplot,uqo-method}
 %
 \alias{lv,rrvglm-method}
 \alias{lv,qrrvglm-method}
-\alias{lv,cao-method}
+\alias{lv,rrvgam-method}
 \alias{lv,Coef.rrvglm-method}
 \alias{lv,Coef.qrrvglm-method}
-\alias{lv,Coef.cao-method}
+\alias{lv,Coef.rrvgam-method}
 % \alias{lv,uqo-method} defunct
 %\alias{latvar,uqo-method}
-\alias{latvar,cao-method}
+\alias{latvar,rrvgam-method}
 \alias{latvar,Coef.qrrvglm-method}
 \alias{latvar,Coef.rrvglm-method}
 \alias{latvar,rrvglm-method}
@@ -246,7 +260,7 @@
 \alias{Max,qrrvglm-method}
 \alias{Max,Coef.qrrvglm-method}
 %\alias{Max,uqo-method}
-\alias{Max,cao-method}
+\alias{Max,rrvgam-method}
 \alias{meplot,numeric-method}
 \alias{meplot,vlm-method}
 %\alias{model.matrix,ANY-method}
@@ -257,7 +271,7 @@
 \alias{nobs,vlm-method}
 \alias{npred,ANY-method}
 \alias{npred,vlm-method}
-\alias{npred,cao-method}
+\alias{npred,rrvgam-method}
 \alias{npred,qrrvglm-method}
 \alias{npred,rcim-method}
 \alias{npred,rrvglm-method}
@@ -266,17 +280,17 @@
 \alias{nvar,vgam-method}
 \alias{nvar,rrvglm-method}
 \alias{nvar,qrrvglm-method}
-\alias{nvar,cao-method}
+\alias{nvar,rrvgam-method}
 \alias{nvar,vlm-method}
 \alias{nvar,rcim-method}
 \alias{Opt,qrrvglm-method}
 \alias{Opt,Coef.qrrvglm-method}
 %\alias{Opt,uqo-method}
-\alias{Opt,cao-method}
-\alias{persp,cao-method}
+\alias{Opt,rrvgam-method}
+\alias{persp,rrvgam-method}
 \alias{persp,qrrvglm-method}
 %\alias{persp,uqo-method}
-\alias{predict,cao-method}
+\alias{predict,rrvgam-method}
 \alias{predict,qrrvglm-method}
 \alias{predict,vgam-method}
 \alias{predict,vglm-method}
@@ -300,8 +314,8 @@
 %
 %
 %
-\alias{print,Coef.cao-method}
-\alias{print,summary.cao-method}
+\alias{print,Coef.rrvgam-method}
+\alias{print,summary.rrvgam-method}
 \alias{print,qrrvglm-method}
 \alias{print,Coef.qrrvglm-method}
 \alias{print,rrvglm-method}
@@ -319,7 +333,7 @@
 %\alias{print,Coef.uqo-method}
 %\alias{print,summary.uqo-method}
 \alias{print,vsmooth.spline-method}
-\alias{print,cao-method}
+\alias{print,rrvgam-method}
 \alias{qtplot,vglm-method}
 \alias{qtplot,vgam-method}
 \alias{residuals,qrrvglm-method}
@@ -334,8 +348,8 @@
 \alias{resid,vgam-method}
 %\alias{resid,uqo-method}
 \alias{resid,vsmooth.spline-method}
-\alias{show,Coef.cao-method}
-\alias{show,summary.cao-method}
+\alias{show,Coef.rrvgam-method}
+\alias{show,summary.rrvgam-method}
 \alias{show,qrrvglm-method}
 \alias{show,Coef.qrrvglm-method}
 \alias{show,rrvglm-method}
@@ -353,9 +367,9 @@
 %\alias{show,Coef.uqo-method}
 %\alias{show,summary.uqo-method}
 \alias{show,vsmooth.spline-method}
-\alias{show,cao-method}
+\alias{show,rrvgam-method}
 \alias{summary,grc-method}
-\alias{summary,cao-method}
+\alias{summary,rrvgam-method}
 \alias{summary,qrrvglm-method}
 \alias{summary,rcim-method}
 \alias{summary,rcim0-method}
@@ -364,14 +378,14 @@
 \alias{summary,vglm-method}
 \alias{summary,vlm-method}
 %\alias{summary,uqo-method}
-\alias{Tol,cao-method}
+\alias{Tol,rrvgam-method}
 \alias{Tol,qrrvglm-method}
 \alias{Tol,Coef.qrrvglm-method}
 %\alias{Tol,uqo-method}
 %\alias{Tol,Coef.uqo-method}
 \alias{trplot,qrrvglm-method}
 %\alias{trplot,uqo-method}
-\alias{trplot,cao-method}
+\alias{trplot,rrvgam-method}
 \alias{vcov,rrvglm-method}
 \alias{vcov,qrrvglm-method}
 \alias{vcov,vlm-method}
@@ -399,7 +413,7 @@
 
 }
 %\usage{
-%  \S4method{ccoef}{cao,Coef.cao,rrvglm,qrrvglm,
+%  \S4method{ccoef}{rrvgam,Coef.rrvgam,rrvglm,qrrvglm,
 %                   Coef.rrvglm,Coef.qrrvglm}(object, ...)
 %}
 
diff --git a/man/uninormal.Rd b/man/uninormal.Rd
index be99c38..74962d2 100644
--- a/man/uninormal.Rd
+++ b/man/uninormal.Rd
@@ -109,11 +109,11 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
     \code{\link{normal.vcm}},
     \code{\link{Qvar}},
     \code{\link{tobit}},
-    \code{\link{cennormal}},
+    \code{\link{cens.normal}},
     \code{\link{foldnormal}},
     \code{\link{skewnormal}},
-    \code{\link{double.cennormal}},
-    \code{\link{SUR}},
+    \code{\link{double.cens.normal}},
+    \code{\link{SURff}},
     \code{\link{huber2}},
     \code{\link{studentt}},
     \code{\link{binormal}},
diff --git a/man/vgam.control.Rd b/man/vgam.control.Rd
index c19e35e..74e84b0 100644
--- a/man/vgam.control.Rd
+++ b/man/vgam.control.Rd
@@ -11,7 +11,7 @@
 vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30, 
              checkwz=TRUE, criterion = names(.min.criterion.VGAM),
              epsilon = 1e-07, maxit = 30, na.action = na.fail,
-             nk = NULL, save.weight = FALSE, se.fit = TRUE,
+             nk = NULL, save.weights = FALSE, se.fit = TRUE,
              trace = FALSE, wzepsilon = .Machine$double.eps^0.75,
              ...)
 }
@@ -90,7 +90,7 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
   If specified, \code{nk} overrides the automatic knot selection procedure.
 
   }
-  \item{save.weight}{
+  \item{save.weights}{
   logical indicating whether the \code{weights} slot
   of a \code{"vglm"} object will be saved on the object.
   If not, it will be reconstructed when needed, e.g., \code{summary}.
@@ -135,11 +135,11 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
   very similar.
 
 
-  Setting \code{save.weight=FALSE} is useful for some
+  Setting \code{save.weights=FALSE} is useful for some
   models because the \code{weights} slot of the object is
   often the largest and so less memory is used to store the
   object. However, for some \pkg{VGAM} family function,
-  it is necessary to set \code{save.weight=TRUE} because
+  it is necessary to set \code{save.weights=TRUE} because
   the \code{weights} slot cannot be reconstructed later.
 
 
diff --git a/man/vglm.control.Rd b/man/vglm.control.Rd
index 0e69b43..57fff4d 100644
--- a/man/vglm.control.Rd
+++ b/man/vglm.control.Rd
@@ -8,11 +8,11 @@
 
 }
 \usage{
-vglm.control(checkwz = TRUE, Check.rank = TRUE,
+vglm.control(checkwz = TRUE, Check.rank = TRUE, Check.cm.rank = TRUE,
              criterion = names(.min.criterion.VGAM),
              epsilon = 1e-07, half.stepsizing = TRUE,
              maxit = 30, noWarning = FALSE,
-             stepsize = 1, save.weight = FALSE,
+             stepsize = 1, save.weights = FALSE,
              trace = FALSE, wzepsilon = .Machine$double.eps^0.75, 
              xij = NULL, ...)
 }
@@ -36,6 +36,15 @@ vglm.control(checkwz = TRUE, Check.rank = TRUE,
 
 
   }
+  \item{Check.cm.rank}{
+  logical indicating whether the rank of each constraint matrix
+  should be checked. If this is not of full column rank then
+  an error will occur. Under no circumstances should any
+  constraint matrix have a rank less than the number of columns.
+
+
+
+  }
   \item{criterion}{
   character variable describing what criterion is to be
   used to test for convergence. The possibilities are
@@ -90,12 +99,12 @@ vglm.control(checkwz = TRUE, Check.rank = TRUE,
 
 
   }
-  \item{save.weight}{
+  \item{save.weights}{
   logical indicating whether the \code{weights} slot of a
   \code{"vglm"} object will be saved on the object. If not,
   it will be reconstructed when needed, e.g., \code{summary}.
-  Some family functions have \code{save.weight = TRUE} and
-  others have \code{save.weight = FALSE} in their control
+  Some family functions have \code{save.weights = TRUE} and
+  others have \code{save.weights = FALSE} in their control
   functions.
 
 
@@ -171,10 +180,10 @@ vglm.control(checkwz = TRUE, Check.rank = TRUE,
   understand the full details.
 
 
-  Setting \code{save.weight = FALSE} is useful for some models because
+  Setting \code{save.weights = FALSE} is useful for some models because
   the \code{weights} slot of the object is the largest and so less
   memory is used to store the object. However, for some \pkg{VGAM}
-  family function, it is necessary to set \code{save.weight = TRUE}
+  family function, it is necessary to set \code{save.weights = TRUE}
   because the \code{weights} slot cannot be reconstructed later.
 
 
diff --git a/man/vonmises.Rd b/man/vonmises.Rd
index 2d207c5..1ae6c93 100644
--- a/man/vonmises.Rd
+++ b/man/vonmises.Rd
@@ -106,7 +106,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 }
 \section{Warning }{
   Numerically, the von Mises can be difficult to fit because of a
-  log-likelihood having multiple maxima.
+  log-likelihood having multiple maximums.
   The user is therefore encouraged to try different starting values,
   i.e., make use of \code{ilocation} and \code{iscale}.
 
diff --git a/man/weibull.Rd b/man/weibullR.Rd
similarity index 93%
rename from man/weibull.Rd
rename to man/weibullR.Rd
index d733131..88e18f1 100644
--- a/man/weibull.Rd
+++ b/man/weibullR.Rd
@@ -1,5 +1,5 @@
-\name{weibull}
-\alias{weibull}
+\name{weibullR}
+\alias{weibullR}
 %\alias{weibullff}
 %\alias{weibull.lsh}
 %\alias{weibull3}
@@ -11,9 +11,9 @@
 
 }
 \usage{
-weibull(lshape = "loge", lscale = "loge", 
-        ishape = NULL,   iscale = NULL, nrfs = 1,
-        probs.y = c(0.2, 0.5, 0.8), imethod = 1, zero = -2)
+weibullR(lscale = "loge", lshape = "loge",
+         iscale = NULL,   ishape = NULL, lss = TRUE, nrfs = 1,
+         probs.y = c(0.2, 0.5, 0.8), imethod = 1, zero = ifelse(lss, -2, -1))
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -46,7 +46,7 @@ weibull(lshape = "loge", lscale = "loge",
 
 
   }
-  \item{zero, probs.y}{
+  \item{zero, probs.y, lss}{
   Details at \code{\link{CommonVGAMffArguments}}.
 
   }
@@ -187,6 +187,10 @@ Concerns about Maximum Likelihood Estimation for
   support of the distribution depends on one of the parameters.
 
 
+  Other related distributions are the Maxwell and Rayleigh
+  distributions.
+
+
 
 }
 \section{Warning}{
@@ -208,7 +212,9 @@ Concerns about Maximum Likelihood Estimation for
     \code{\link{truncweibull}},
     \code{\link{gev}},
     \code{\link{lognormal}},
-    \code{\link{expexp}}.
+    \code{\link{expexpff}},
+    \code{\link{maxwell}},
+    \code{\link{rayleigh}},
     \code{\link{gumbelII}}.
 
 
@@ -218,7 +224,7 @@ wdata <- data.frame(x2 = runif(nn <- 1000))  # Complete data
 wdata <- transform(wdata,
             y1 = rweibull(nn, shape = exp(1 + x2), scale = exp(-2)),
             y2 = rweibull(nn, shape = exp(2 - x2), scale = exp( 1)))
-fit <- vglm(cbind(y1, y2) ~ x2, weibull, data = wdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ x2, weibullR, data = wdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 vcov(fit)
 summary(fit)
diff --git a/man/weightsvglm.Rd b/man/weightsvglm.Rd
index 2100529..a81f378 100644
--- a/man/weightsvglm.Rd
+++ b/man/weightsvglm.Rd
@@ -128,12 +128,12 @@ weights(fit, type = "prior", matrix = FALSE)  # Number of observations
 nn <- nrow(model.matrix(fit, type = "lm"))
 M <- ncol(predict(fit))
 
-temp <- weights(fit, type = "working", deriv = TRUE)
-wz <- m2adefault(temp$weights, M = M)  # In array format
+wwt <- weights(fit, type = "working", deriv = TRUE)  # In matrix-band format
+wz <- m2a(wwt$weights, M = M)  # In array format
 wzinv <- array(apply(wz, 3, solve), c(M, M, nn))
 wresid <- matrix(NA, nn, M)  # Working residuals 
 for (ii in 1:nn)
-  wresid[ii,] <- wzinv[, , ii, drop = TRUE] \%*\% temp$deriv[ii, ]
+  wresid[ii, ] <- wzinv[, , ii, drop = TRUE] \%*\% wwt$deriv[ii, ]
 max(abs(c(resid(fit, type = "work")) - c(wresid)))  # Should be 0
 
 (zedd <- predict(fit) + wresid)  # Adjusted dependent vector
diff --git a/man/zanegbinomial.Rd b/man/zanegbinomial.Rd
index 7a03aee..bfa64dc 100644
--- a/man/zanegbinomial.Rd
+++ b/man/zanegbinomial.Rd
@@ -13,11 +13,11 @@
 zanegbinomial(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
               type.fitted = c("mean", "pobs0"),
               ipobs0 = NULL, isize = NULL, zero = -3, imethod = 1,
-              nsimEIM = 250, shrinkage.init = 0.95)
+              nsimEIM = 250, ishrinkage = 0.95)
 zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
                 type.fitted = c("mean", "pobs0", "onempobs0"),
                 isize = NULL, ionempobs0 = NULL, zero = c(-2, -3),
-                imethod = 1, nsimEIM = 250, shrinkage.init = 0.95)
+                imethod = 1, nsimEIM = 250, ishrinkage = 0.95)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -95,7 +95,7 @@ zanegbinomialff(lmunb = "loge", lsize = "loge", lonempobs0 = "logit",
   See \code{\link{CommonVGAMffArguments}}.
 
   }
-  \item{shrinkage.init}{
+  \item{ishrinkage}{
   See \code{\link{negbinomial}}
   and \code{\link{CommonVGAMffArguments}}.
 
diff --git a/man/zinegbinomial.Rd b/man/zinegbinomial.Rd
index 5b6ee6b..8a39f4d 100644
--- a/man/zinegbinomial.Rd
+++ b/man/zinegbinomial.Rd
@@ -12,11 +12,11 @@
 zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
               type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
               ipstr0 = NULL, isize = NULL, zero = -3,
-              imethod = 1, shrinkage.init = 0.95, nsimEIM = 250)
+              imethod = 1, ishrinkage = 0.95, nsimEIM = 250)
 zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
                 type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
                 isize = NULL, ionempstr0 = NULL, zero = c(-2, -3),
-                imethod = 1, shrinkage.init = 0.95, nsimEIM = 250)
+                imethod = 1, ishrinkage = 0.95, nsimEIM = 250)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -61,7 +61,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
   An integer with value \code{1} or \code{2} or \code{3} which
   specifies the initialization method for the mean parameter.
   If failure to converge occurs try another value
-  and/or else specify a value for \code{shrinkage.init}.
+  and/or else specify a value for \code{ishrinkage}.
 
   }
   \item{zero}{ 
@@ -73,7 +73,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
   See \code{\link{CommonVGAMffArguments}} for more information.
 
   }
-  \item{shrinkage.init, nsimEIM}{ 
+  \item{ishrinkage, nsimEIM}{ 
   See \code{\link{CommonVGAMffArguments}} for information.
 
   }
@@ -171,7 +171,7 @@ zinegbinomialff(lmunb = "loge", lsize = "loge", lonempstr0 = "logit",
   If failure to converge occurs, try using combinations of arguments
   \code{stepsize} (in \code{\link{vglm.control}}),
   \code{imethod},
-  \code{shrinkage.init},
+  \code{ishrinkage},
   \code{ipstr0},
   \code{isize}, and/or
   \code{zero} if there are explanatory variables.
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
index c6218a5..821a7dc 100644
--- a/man/zipoisson.Rd
+++ b/man/zipoisson.Rd
@@ -12,11 +12,11 @@
 zipoisson(lpstr0 = "logit", llambda = "loge",
           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
           ipstr0 = NULL, ilambda = NULL,
-          imethod = 1, shrinkage.init = 0.8, zero = NULL)
+          imethod = 1, ishrinkage = 0.8, zero = NULL)
 zipoissonff(llambda = "loge", lonempstr0 = "logit",
             type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
             ilambda = NULL,   ionempstr0 = NULL,
-            imethod = 1, shrinkage.init = 0.8, zero = -2)
+            imethod = 1, ishrinkage = 0.8, zero = -2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -63,13 +63,13 @@ zipoissonff(llambda = "loge", lonempstr0 = "logit",
   An integer with value \code{1} or \code{2} which
   specifies the initialization method for \eqn{\lambda}{lambda}.
   If failure to converge occurs try another value
-  and/or else specify a value for \code{shrinkage.init}
+  and/or else specify a value for \code{ishrinkage}
   and/or else specify a value for \code{ipstr0}.
   See \code{\link{CommonVGAMffArguments}} for more information.
 
 
   }
-  \item{shrinkage.init}{
+  \item{ishrinkage}{
   How much shrinkage is used when initializing \eqn{\lambda}{lambda}.
   The value must be between 0 and 1 inclusive, and 
   a value of 0 means the individual response values are used,
@@ -212,7 +212,7 @@ zipoissonff(llambda = "loge", lonempstr0 = "logit",
   Half-stepping is not uncommon.
   If failure to converge occurs, try using combinations of
   \code{imethod},
-  \code{shrinkage.init},
+  \code{ishrinkage},
   \code{ipstr0}, and/or
   \code{zipoisson(zero = 1)} if there are explanatory variables.
   The default for \code{zipoissonff()} is to model the
diff --git a/vignettes/categoricalVGAM.Rnw b/vignettes/categoricalVGAM.Rnw
new file mode 100644
index 0000000..8394144
--- /dev/null
+++ b/vignettes/categoricalVGAM.Rnw
@@ -0,0 +1,2325 @@
+\documentclass[article,shortnames,nojss]{jss}
+\usepackage{thumbpdf}
+%% need no \usepackage{Sweave.sty}
+
+\SweaveOpts{engine=R,eps=FALSE}
+%\VignetteIndexEntry{The VGAM Package for Categorical Data Analysis}
+%\VignetteDepends{VGAM}
+%\VignetteKeywords{categorical data analysis, Fisher scoring, iteratively reweighted least squares, multinomial distribution, nominal and ordinal polytomous responses, smoothing, vector generalized linear and additive models, VGAM R package}
+%\VignettePackage{VGAM}
+
+%% new commands
+\newcommand{\sVLM}{\mbox{\scriptsize VLM}}
+\newcommand{\sformtwo}{\mbox{\scriptsize F2}}
+\newcommand{\pr}{\mbox{$P$}}
+\newcommand{\logit}{\mbox{\rm logit}}
+\newcommand{\bzero}{{\bf 0}}
+\newcommand{\bone}{{\bf 1}}
+\newcommand{\bid}{\mbox{\boldmath $d$}}
+\newcommand{\bie}{\mbox{\boldmath $e$}}
+\newcommand{\bif}{\mbox{\boldmath $f$}}
+\newcommand{\bix}{\mbox{\boldmath $x$}}
+\newcommand{\biy}{\mbox{\boldmath $y$}}
+\newcommand{\biz}{\mbox{\boldmath $z$}}
+\newcommand{\biY}{\mbox{\boldmath $Y$}}
+\newcommand{\bA}{\mbox{\rm \bf A}}
+\newcommand{\bB}{\mbox{\rm \bf B}}
+\newcommand{\bC}{\mbox{\rm \bf C}}
+\newcommand{\bH}{\mbox{\rm \bf H}}
+\newcommand{\bI}{\mbox{\rm \bf I}}
+\newcommand{\bX}{\mbox{\rm \bf X}}
+\newcommand{\bW}{\mbox{\rm \bf W}}
+\newcommand{\bY}{\mbox{\rm \bf Y}}
+\newcommand{\bbeta}{\mbox{\boldmath $\beta$}}
+\newcommand{\boldeta}{\mbox{\boldmath $\eta$}}
+\newcommand{\bmu}{\mbox{\boldmath $\mu$}}
+\newcommand{\bnu}{\mbox{\boldmath $\nu$}}
+\newcommand{\diag}{ \mbox{\rm diag} }
+\newcommand{\Var}{ \mbox{\rm Var} }
+\newcommand{\R}{{\textsf{R}}}
+\newcommand{\VGAM}{\pkg{VGAM}}
+
+
+\author{Thomas W. Yee\\University of Auckland}
+\Plainauthor{Thomas W. Yee}
+
+\title{The \pkg{VGAM} Package for Categorical Data Analysis}
+\Plaintitle{The VGAM Package for Categorical Data Analysis}
+
+\Abstract{
+  Classical categorical regression models such as the multinomial logit and
+  proportional odds models are shown to be readily handled by the  vector
+  generalized linear and additive model (VGLM/VGAM) framework. Additionally,
+  there are natural extensions, such as reduced-rank VGLMs for
+  dimension reduction, and allowing covariates that have values
+  specific to each linear/additive predictor,
+  e.g., for consumer choice modeling. This article describes some of the
+  framework behind the \pkg{VGAM} \R{} package, its usage and implementation
+  details.
+}
+\Keywords{categorical data analysis, Fisher scoring,
+  iteratively reweighted least squares,
+  multinomial distribution, nominal and ordinal polytomous responses,
+  smoothing, vector generalized linear and additive models,
+  \VGAM{} \R{} package}
+\Plainkeywords{categorical data analysis, Fisher scoring,
+  iteratively reweighted least squares, multinomial distribution,
+  nominal and ordinal polytomous responses, smoothing,
+  vector generalized linear and additive models, VGAM R package}
+
+\Address{
+  Thomas W. Yee \\
+  Department of Statistics \\
+  University of Auckland, Private Bag 92019 \\
+  Auckland Mail Centre \\
+  Auckland 1142, New Zealand \\
+  E-mail: \email{t.yee at auckland.ac.nz}\\
+  URL: \url{http://www.stat.auckland.ac.nz/~yee/}
+}
+
+
+\begin{document}
+
+
+<<echo=FALSE, results=hide>>=
+library("VGAM")
+library("VGAMdata")
+ps.options(pointsize = 12)
+options(width = 72, digits = 4)
+options(SweaveHooks = list(fig = function() par(las = 1)))
+options(prompt = "R> ", continue = "+")
+@
+
+
+% ----------------------------------------------------------------------
+\section{Introduction}
+\label{sec:jsscat.intoduction}
+
+
+This is a \pkg{VGAM} vignette for categorical data analysis (CDA)
+based on \cite{Yee:2010}.
+Any subsequent features (especially non-backward compatible ones)
+will appear here.
+
+The subject of CDA is concerned with
+analyses where the response is categorical regardless of whether
+the explanatory variables are continuous or categorical. It is a
+very frequent form of data. Over the years several CDA regression
+models for polytomous responses have become popular, e.g., those
+in Table \ref{tab:cat.quantities}. Not surprisingly, the models
+are interrelated: their foundation is the multinomial distribution
+and consequently they share similar and overlapping properties which
+modellers should know and exploit. Unfortunately, software has been
+slow to reflect their commonality and this makes analyses unnecessarily
+difficult for the practitioner on several fronts, e.g., using different
+functions/procedures to fit different models which does not aid the
+understanding of their connections.
+
+
+This historical misfortune can be seen by considering \R{} functions
+for CDA. From the Comprehensive \proglang{R} Archive Network
+(CRAN, \url{http://CRAN.R-project.org/}) there is \texttt{polr()}
+\citep[in \pkg{MASS};][]{Venables+Ripley:2002} for a proportional odds
+model and \texttt{multinom()}
+\citep[in \pkg{nnet};][]{Venables+Ripley:2002} for the multinomial
+logit model. However, both of these can be considered `one-off'
+modeling functions rather than providing a unified offering for CDA.
+The function \texttt{lrm()} \citep[in \pkg{rms};][]{Harrell:2009}
+has greater functionality: it can fit the proportional odds model
+(and the forward continuation ratio model upon preprocessing). Neither
+\texttt{polr()} or \texttt{lrm()} appear able to fit the nonproportional
+odds model. There are non-CRAN packages too, such as the modeling
+function \texttt{nordr()} \citep[in \pkg{gnlm};][]{gnlm:2007}, which can fit
+the proportional odds, continuation ratio and adjacent categories models;
+however it calls \texttt{nlm()} and the user must supply starting values.
+In general these \R{} \citep{R} modeling functions are not modular
+and often require preprocessing and sometimes are not self-starting.
+The implementations can be perceived as a smattering and piecemeal
+in nature. Consequently if the practitioner wishes to fit the models
+of Table \ref{tab:cat.quantities} then there is a need to master several
+modeling functions from several packages each having different syntaxes
+etc. This is a hindrance to efficient CDA.
+
+
+ 
+\begin{table}[tt]
+\centering
+\begin{tabular}{|c|c|l|}
+\hline
+Quantity & Notation &
+%Range of $j$ &
+\VGAM{} family function \\
+\hline
+%
+$\pr(Y=j+1) / \pr(Y=j)$ &$\zeta_{j}$ &
+%$1,\ldots,M$ &
+\texttt{acat()} \\
+%
+$\pr(Y=j) / \pr(Y=j+1)$ &$\zeta_{j}^{R}$ &
+%$2,\ldots,M+1$ &
+\texttt{acat(reverse = TRUE)} \\
+%
+$\pr(Y>j|Y \geq j)$ &$\delta_{j}^*$ &
+%$1,\ldots,M$ & 
+\texttt{cratio()} \\
+%
+$\pr(Y<j|Y \leq j)$ &$\delta_{j}^{*R}$ &
+%$2,\ldots,M+1$ &
+\texttt{cratio(reverse = TRUE)} \\
+%
+$\pr(Y\leq j)$ &$\gamma_{j}$ &
+%$1,\ldots,M$ &
+\texttt{cumulative()} \\
+%
+$\pr(Y\geq j)$ &$\gamma_{j}^R$&
+%$2,\ldots,M+1$ &
+\texttt{cumulative(reverse = TRUE)} \\
+%
+$\log\{\pr(Y=j)/\pr(Y=M+1)\}$ & &
+%$1,\ldots,M$ &
+\texttt{multinomial()} \\
+%
+$\pr(Y=j|Y \geq j)$ &$\delta_{j}$ &
+%$1,\ldots,M$ &
+\texttt{sratio()} \\
+%
+$\pr(Y=j|Y \leq j)$ &$\delta_{j}^R$ &
+%$2,\ldots,M+1$ &
+\texttt{sratio(reverse = TRUE)} \\
+%
+\hline
+\end{tabular}
+\caption{
+Quantities defined in \VGAM{} for a
+categorical response $Y$ taking values $1,\ldots,M+1$.
+Covariates \bix{} have been omitted for clarity.
+The LHS quantities are $\eta_{j}$
+or $\eta_{j-1}$ for $j=1,\ldots,M$ (not reversed)
+and $j=2,\ldots,M+1$ (if reversed), respectively.
+All models are estimated by minimizing the deviance.
+All except for \texttt{multinomial()} are suited to ordinal $Y$.
+\label{tab:cat.quantities}
+}
+\end{table}
+ 
+
+
+
+\proglang{SAS} \citep{SAS} does not fare much better than \R. Indeed,
+it could be considered as having an \textit{excess} of options which
+bewilders the non-expert user; there is little coherent overriding
+structure. Its \code{proc logistic} handles the multinomial logit
+and proportional odds models, as well as exact logistic regression
+\citep[see][which is for Version 8 of \proglang{SAS}]{stok:davi:koch:2000}.
+The fact that the proportional odds model may be fitted by \code{proc
+logistic}, \code{proc genmod} and \code{proc probit} arguably leads
+to possible confusion rather than the making of connections, e.g.,
+\code{genmod} is primarily for GLMs and the proportional odds model is not
+a GLM in the classical \cite{neld:wedd:1972} sense. Also, \code{proc
+phreg} fits the multinomial logit model, and \code{proc catmod} with
+its WLS implementation adds to further potential confusion.
+
+
+This article attempts to show how these deficiencies can be addressed
+by considering the vector generalized linear and additive model
+(VGLM/VGAM) framework, as implemented by the author's \pkg{VGAM}
+package for \R{}. The main purpose of this paper is to demonstrate
+how the framework is very well suited to many `classical' regression
+models for categorical responses, and to describe the implementation and
+usage of \pkg{VGAM} for such. To this end an outline of this article
+is as follows. Section \ref{sec:jsscat.VGLMVGAMoverview} summarizes
+the basic VGLM/VGAM framework. Section \ref{sec:jsscat.vgamff}
+centers on functions for CDA in \VGAM. Given an adequate framework,
+some natural extensions of Section \ref{sec:jsscat.VGLMVGAMoverview} are
+described in Section \ref{sec:jsscat.othermodels}. Users of \pkg{VGAM}
+can benefit from Section \ref{sec:jsscat.userTopics} which shows how
+the software reflects their common theory. Some examples are given in
+Section \ref{sec:jsscat.eg}. Section \ref{sec:jsscat.implementDetails}
+contains selected topics in statistial computing that are
+more relevant to programmers interested in the underlying code.
+Section \ref{sec:jsscat.extnUtil} discusses several utilities and
+extensions needed for advanced CDA modeling, and the article concludes
+with a discussion. This document was run using \pkg{VGAM} 0.7-10
+\citep{yee:VGAM:2010} under \R 2.10.0.
+
+
+Some general references for categorical data providing
+background to this article include
+\cite{agre:2010},
+\cite{agre:2013},
+\cite{fahr:tutz:2001},
+\cite{leon:2000},
+\cite{lloy:1999},
+\cite{long:1997},
+\cite{mccu:neld:1989},
+\cite{simo:2003},
+\citet{smit:merk:2013} and
+\cite{tutz:2012}.
+An overview of models for ordinal responses is \cite{liu:agre:2005},
+and a manual for fitting common models found in \cite{agre:2002}
+to polytomous responses with various software is \cite{thom:2009}.
+A package for visualizing categorical data in \R{} is \pkg{vcd}
+\citep{Meyer+Zeileis+Hornik:2006,Meyer+Zeileis+Hornik:2009}.
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{VGLM/VGAM overview}
+\label{sec:jsscat.VGLMVGAMoverview}
+
+
+This section summarizes the VGLM/VGAM framework with a particular emphasis
+toward categorical models since the classes encapsulates many multivariate
+response models in, e.g., survival analysis, extreme value analysis,
+quantile and expectile regression, time series, bioassay data, nonlinear
+least-squares models, and scores of standard and nonstandard univariate
+and continuous distributions. The framework is partially summarized by
+Table \ref{tab:rrvglam.jss.subset}. More general details about VGLMs
+and VGAMs can be found in \cite{yee:hast:2003} and \cite{yee:wild:1996}
+respectively. An informal and practical article connecting the general
+framework with the software is \cite{Rnews:Yee:2008}.
+
+
+
+\subsection{VGLMs}
+\label{sec:wffc.appendixa.vglms}
+
+Suppose the observed response \biy{} is a $q$-dimensional vector.
+VGLMs are defined as a model for which the conditional distribution
+of $\biY$ given explanatory $\bix$ is of the form
+\begin{eqnarray}
+f(\biy | \bix ; \bB, \phi)  =  h(\biy, \eta_1,\ldots, \eta_M, \phi)
+\label{gammod}
+\end{eqnarray}
+for some known function $h(\cdot)$, where $\bB = (\bbeta_1 \,
+\bbeta_2 \, \cdots \, \bbeta_M)$ is a $p \times M$ matrix of
+unknown regression coefficients,
+and the $j$th linear predictor is
+\begin{equation}
+\eta_j  =  \eta_j(\bix)  =  \bbeta_j^{\top} \bix  = 
+\sum_{k=1}^p \beta_{(j)k} \, x_k ,  \qquad j=1,\ldots,M.
+\label{gammod2}
+\end{equation}
+Here $\bix=(x_1,\ldots,x_p)^{\top}$ with $x_1 = 1$ if there is an intercept.
+Note that (\ref{gammod2}) means that \textit{all} the parameters may be
+potentially modelled as functions of \bix. It can be seen that VGLMs are
+like GLMs but allow for multiple linear predictors, and they encompass
+models outside the small confines of the exponential family.
+In (\ref{gammod}) the quantity $\phi$ is an optional scaling parameter
+which is included for backward compatibility with common adjustments
+to overdispersion, e.g., with respect to GLMs.
+
+
+In general there is no relationship between $q$ and $M$: it
+depends specifically on the model or distribution to be fitted.
+However, for the `classical' categorical regression models of
+Table \ref{tab:cat.quantities} we have $M=q-1$ since $q$ is the number
+of levels the multi-category response $Y$ has.
+
+
+
+
+
+The $\eta_j$ of VGLMs may be applied directly to parameters of a
+distribution rather than just to a mean for GLMs. A simple example is
+a univariate distribution with a location parameter $\xi$ and a scale
+parameter $\sigma > 0$, where we may take $\eta_1 = \xi$ and $\eta_2 =
+\log\,\sigma$. In general, $\eta_{j}=g_{j}(\theta_{j})$ for some parameter
+link function $g_{j}$ and parameter $\theta_{j}$.
+For example, the adjacent categories models in
+Table \ref{tab:cat.quantities} are ratios of two probabilities, therefore
+a log link of $\zeta_{j}^{R}$ or $\zeta_{j}$ is the default.
+In \VGAM{}, there are currently over a dozen links to choose from, of
+which any can be assigned to any parameter, ensuring maximum flexibility.
+Table \ref{tab:jsscat.links} lists some of them.
+
+
+
+\begin{table}[tt]
+\centering
+%\  ~~~ \par
+\begin{tabular}{|l|l|l|l|}
+\hline
+\qquad \qquad $\boldeta$ & 
+Model & Modeling & Reference \\
+ & & function & \\
+%-------------------------------------------------------------
+\hline
+\hline
+%-------------------------------------------------------------
+ &&&\\[-1.1ex]
+$\bB_1^{\top} \bix_{1} + \bB_2^{\top} \bix_{2}\ ( = \bB^{\top} \bix)$ &
+VGLM & \texttt{vglm()}
+&
+\cite{yee:hast:2003} \\[1.6ex]
+%Yee \& Hastie (2003) \\[1.6ex]
+%-------------------------------------------------------------
+\hline
+ &&&\\[-1.1ex]
+$\bB_1^{\top} \bix_{1} +
+ \sum\limits_{k=p_1+1}^{p_1+p_2} \bH_k \, \bif_{k}^{*}(x_k)$ &
+%\sum\limits_{k=1}^{p_2} \bH_k \, \bif_k(x_k)$ &
+VGAM & \texttt{vgam()}
+&
+\cite{yee:wild:1996} \\[2.2ex]
+%Yee \& Wild (1996) \\[2.2ex]
+%-------------------------------------------------------------
+\hline
+ &&&\\[-1.1ex]
+$\bB_1^{\top} \bix_{1} + \bA \, \bnu$ &
+RR-VGLM & \texttt{rrvglm()}
+&
+\cite{yee:hast:2003} \\[1.8ex]
+%Yee \& Hastie (2003) \\[1.8ex]
+%-------------------------------------------------------------
+\hline
+ &&&\\[-1.1ex]
+See \cite{yee:hast:2003} &
+Goodman's RC & \texttt{grc()}
+&
+%\cite{yee:hast:2003} \\[1.8ex]
+\cite{good:1981} \\[1.8ex]
+%-------------------------------------------------------------
+\hline
+\end{tabular}
+\caption{
+Some of 
+the package \VGAM{} and
+its framework.
+The vector of latent variables $\bnu = \bC^{\top} \bix_2$
+where
+$\bix^{\top} = (\bix_1^{\top}, \bix_2^{\top})$.
+\label{tab:rrvglam.jss.subset}
+}
+%\medskip
+\end{table}
+
+
+
+
+
+
+VGLMs are estimated using iteratively reweighted least squares (IRLS) 
+which is particularly suitable for categorical models
+\citep{gree:1984}.
+All models in this article have a log-likelihood
+\begin{equation}
+\ell  =  \sum_{i=1}^n \, w_i \, \ell_i
+\label{eq:log-likelihood.VGAM}
+\end{equation}
+where the $w_i$ are known positive prior weights.
+Let $\bix_i$ denote the explanatory vector for the $i$th observation,
+for $i=1,\dots,n$.
+Then one can write
+\begin{eqnarray}
+\boldeta_i &=& \boldeta(\bix_i)  = 
+\left(
+\begin{array}{c}
+\eta_1(\bix_i) \\
+\vdots \\
+\eta_M(\bix_i)
+\end{array} \right)  = 
+\bB^{\top} \bix_i  =  
+\left(
+\begin{array}{c}
+\bbeta_1^{\top} \bix_i \\
+\vdots \\
+\bbeta_M^{\top} \bix_i
+\end{array} \right)
+\nonumber
+\\
+&=& 
+\left(
+\begin{array}{cccc}
+\beta_{(1)1} & \cdots & \beta_{(1)p} \\
+\vdots \\
+\beta_{(M)1} & \cdots & \beta_{(M)p} \\
+\end{array} \right)
+\bix_i  = 
+\left(
+\bbeta_{(1)} \; \cdots \; \bbeta_{(p)}
+\right)
+\bix_i .
+\label{eq:lin.pred}
+\end{eqnarray}
+In IRLS,
+an adjusted dependent vector $\biz_i = \boldeta_i + \bW_i^{-1} \bid_i$
+is regressed upon a large (VLM) model matrix, with
+$\bid_i = w_i \, \partial \ell_i / \partial \boldeta_i$.
+The working weights $\bW_i$ here are 
+$w_i \Var(\partial \ell_i / \partial \boldeta_i)$
+(which, under regularity conditions, is equal to
+$-w_i \, E[ \partial^2 \ell_i / (\partial \boldeta_i \,
+\partial \boldeta_i^{\top})]$),
+giving rise to the Fisher scoring algorithm.
+
+
+Let $\bX=(\bix_1,\ldots,\bix_n)^{\top}$ be the usual $n \times p$
+(LM) model matrix
+obtained from the \texttt{formula} argument of \texttt{vglm()}.
+Given $\biz_i$, $\bW_i$ and $\bX{}$ at the current IRLS iteration,
+a weighted multivariate regression is performed.
+To do this, a \textit{vector linear model} (VLM) model matrix 
+$\bX_{\sVLM}$ is formed from $\bX{}$ and $\bH_k$
+(see Section \ref{sec:wffc.appendixa.vgams}).
+This is has $nM$ rows, and if there are no constraints then $Mp$ columns.
+Then $\left(\biz_1^{\top},\ldots,\biz_n^{\top}\right)^{\top}$ is regressed
+upon $\bX_{\sVLM}$
+with variance-covariance matrix $\diag(\bW_1^{-1},\ldots,\bW_n^{-1})$.
+This system of linear equations is converted to one large
+WLS fit by premultiplication of the output of
+a Cholesky decomposition of the $\bW_i$.
+
+
+Fisher scoring usually has good numerical stability
+because the $\bW_i$ are positive-definite over a larger
+region of parameter space than Newton-Raphson. 
+For the categorical models in this article the expected
+information matrices are simpler than the observed
+information matrices, and are easily derived,
+therefore all the families in Table \ref{tab:cat.quantities}
+implement Fisher scoring.
+
+
+
+\subsection{VGAMs and constraint matrices}
+\label{sec:wffc.appendixa.vgams}
+
+
+VGAMs provide additive-model extensions to VGLMs, that is,
+(\ref{gammod2}) is generalized to
+\begin{equation}
+\eta_j(\bix)  =  \beta_{(j)1} +
+\sum_{k=2}^p \; f_{(j)k}(x_k), \qquad j = 1,\ldots, M,
+\label{addmod}
+\end{equation}
+a sum of smooth functions of the individual covariates, just as
+with ordinary GAMs \citep{hast:tibs:1990}. The $\bif_k =
+(f_{(1)k}(x_k),\ldots,f_{(M)k}(x_k))^{\top}$ are centered for uniqueness,
+and are estimated simultaneously using \textit{vector smoothers}.
+VGAMs are thus a visual data-driven method that is well suited to
+exploring data, and they retain the simplicity of interpretation that
+GAMs possess.
+
+
+
+An important concept, especially for CDA, is the idea of
+`constraints-on-the functions'.
+In practice we often wish to constrain the effect of a covariate to
+be the same for some of the $\eta_j$ and to have no effect for others.
+We shall see below that this constraints idea is important
+for several categorical models because of a popular parallelism assumption.
+As a specific example, for VGAMs we may wish to take
+\begin{eqnarray*}
+\eta_1 & = & \beta_{(1)1} + f_{(1)2}(x_2) + f_{(1)3}(x_3), \\
+\eta_2 & = & \beta_{(2)1} + f_{(1)2}(x_2),
+\end{eqnarray*}
+so that $f_{(1)2} \equiv f_{(2)2}$ and $f_{(2)3} \equiv 0$.
+For VGAMs, we can represent these models using
+\begin{eqnarray}
+\boldeta(\bix) & = & \bbeta_{(1)} + \sum_{k=2}^p \, \bif_k(x_k)
+\ =\ \bH_1 \, \bbeta_{(1)}^* + \sum_{k=2}^p \, \bH_k \, \bif_k^*(x_k)
+\label{eqn:constraints.VGAM}
+\end{eqnarray}
+where $\bH_1,\bH_2,\ldots,\bH_p$ are known full-column rank
+\textit{constraint matrices}, $\bif_k^*$ is a vector containing a
+possibly reduced set of component functions and $\bbeta_{(1)}^*$ is a
+vector of unknown intercepts. With no constraints at all, $\bH_1 =
+\bH_2 = \cdots = \bH_p = \bI_M$ and $\bbeta_{(1)}^* = \bbeta_{(1)}$.
+Like the $\bif_k$, the $\bif_k^*$ are centered for uniqueness.
+For VGLMs, the $\bif_k$ are linear so that
+\begin{eqnarray}
+{\bB}^{\top} &=&
+\left(
+\bH_1 \bbeta_{(1)}^*
+ \;
+\Bigg|
+ \;
+\bH_2 \bbeta_{(2)}^*
+ \;
+\Bigg|
+ \;
+\cdots
+ \;
+\Bigg|
+ \;
+\bH_p \bbeta_{(p)}^*
+\right) 
+\label{eqn:lin.coefs4}
+\end{eqnarray}
+for some vectors
+$\bbeta_{(1)}^*,\ldots,\bbeta_{(p)}^*$.
+
+
+The
+$\bX_{\sVLM}$ matrix is constructed from \bX{} and the $\bH_k$ using
+Kronecker product operations.
+For example, with trivial constraints,
+$\bX_{\sVLM} = \bX \otimes \bI_M$.
+More generally,
+\begin{eqnarray}
+\bX_{\sVLM} &=& 
+\left(
+\left( \bX \, \bie_{1} \right) \otimes \bH_1
+ \;
+\Bigg|
+ \;
+\left( \bX \, \bie_{2} \right) \otimes \bH_2
+ \;
+\Bigg|
+ \;
+\cdots
+ \;
+\Bigg|
+ \;
+\left( \bX \, \bie_{p} \right) \otimes \bH_p
+\right)
+\label{eqn:X_vlm_Hk}
+\end{eqnarray}
+($\bie_{k}$ is a vector of zeros except for a one in the $k$th position)
+so that 
+$\bX_{\sVLM}$ is $(nM) \times p^*$ where
+$p^* = \sum_{k=1}^{p} \mbox{\textrm{ncol}}(\bH_k)$ is the total number
+of columns of all the constraint matrices.
+Note that $\bX_{\sVLM}$ and \bX{} can be obtained by
+\texttt{model.matrix(vglmObject, type = "vlm")}
+and
+\texttt{model.matrix(vglmObject, type = "lm")}
+respectively.
+Equation \ref{eqn:lin.coefs4} focusses on the rows of \bB{} whereas
+\ref{eq:lin.pred} is on the columns.
+
+
+VGAMs are estimated by applying a modified vector backfitting algorithm
+\citep[cf.][]{buja:hast:tibs:1989} to the $\biz_i$.
+
+
+
+\subsection{Vector splines and penalized likelihood}
+\label{sec:ex.vspline}
+
+If (\ref{eqn:constraints.VGAM}) is estimated using a vector spline (a
+natural extension of the cubic smoothing spline to vector responses)
+then it can be shown that the resulting solution maximizes a penalized
+likelihood; some details are sketched in \cite{yee:step:2007}. In fact,
+knot selection for vector spline follows the same idea as O-splines
+\citep[see][]{wand:orme:2008} in order to lower the computational cost.
+
+
+The usage of \texttt{vgam()} with smoothing is very similar
+to \texttt{gam()} \citep{gam:pack:2009}, e.g.,
+to fit a nonparametric proportional odds model
+\citep[cf. p.179 of][]{mccu:neld:1989}
+to the pneumoconiosis data one could try
+<<label = pneumocat, eval=T>>=
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
+            cumulative(reverse = TRUE, parallel = TRUE), data = pneumo)
+@
+Here, setting \texttt{df = 1} means a linear fit so that
+\texttt{df = 2} affords a little nonlinearity.
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section[VGAM family functions]{\pkg{VGAM} family functions}
+\label{sec:jsscat.vgamff}
+
+
+
+This section summarizes and comments on the \VGAM{} family functions
+of Table \ref{tab:cat.quantities} for a categorical response variable
+taking values $Y=1,2,\ldots,M+1$. In its most basic invokation, the usage
+entails a trivial change compared to \texttt{glm()}: use \texttt{vglm()}
+instead and assign the \texttt{family} argument a \VGAM{} family function.
+The use of a \VGAM{} family function to fit a specific model is far
+simpler than having a different modeling function for each model.
+Options specific to that model appear as arguments of that \VGAM{} family
+function.
+
+
+
+
+
+While writing \texttt{cratio()} it was found that various authors defined
+the quantity ``continuation ratio'' differently, therefore it became
+necessary to define a ``stopping ratio''. Table \ref{tab:cat.quantities}
+defines these quantities for \VGAM{}.
+
+
+
+
+The multinomial logit model is usually described by choosing the first or
+last level of the factor to be baseline. \VGAM{} chooses the last level
+(Table \ref{tab:cat.quantities}) by default, however that can be changed
+to any other level by use of the \texttt{refLevel} argument.
+
+
+
+
+If the proportional odds assumption is inadequate then one strategy is
+to try use a different link function (see Section \ref{sec:jsscat.links}
+for a selection). Another alternative is to add extra terms such as
+interaction terms into the linear predictor
+\citep[available in the \proglang{S} language;][]{cham:hast:1993}.
+Another is to fit the so-called \textit{partial}
+proportional odds model \citep{pete:harr:1990}
+which \VGAM{} can fit via constraint matrices.
+
+
+
+In the terminology of \cite{agre:2002},
+\texttt{cumulative()} fits the class of \textit{cumulative link models},
+e.g.,
+\texttt{cumulative(link = probit)} is a cumulative probit model.
+For \texttt{cumulative()}
+it was difficult to decide whether
+\texttt{parallel = TRUE}
+or
+\texttt{parallel = FALSE}
+should be the default.
+In fact, the latter is (for now?).
+Users need to set
+\texttt{cumulative(parallel = TRUE)} explicitly to
+fit a proportional odds model---hopefully this will alert
+them to the fact that they are making
+the proportional odds assumption and
+check its validity (\cite{pete:1990}; e.g., through a deviance or
+likelihood ratio test). However the default means numerical problems
+can occur with far greater likelihood.
+Thus there is tension between the two options.
+As a compromise there is now a \VGAM{} family function
+called \texttt{propodds(reverse = TRUE)} which is equivalent to
+\texttt{cumulative(parallel = TRUE, reverse = reverse, link = "logit")}.
+
+
+
+By the way, note that arguments such as 
+\texttt{parallel}
+can handle a slightly more complex syntax.
+A call such as
+\code{parallel = TRUE ~ x2 + x5 - 1} means the parallelism assumption
+is only applied to $X_2$ and $X_5$.
+This might be equivalent to something like
+\code{parallel = FALSE ~ x3 + x4}, i.e., to the remaining
+explanatory variables.
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{Other models}
+\label{sec:jsscat.othermodels}
+
+
+Given the VGLM/VGAM framework of Section \ref{sec:jsscat.VGLMVGAMoverview}
+it is found that natural extensions are readily proposed in several
+directions. This section describes some such extensions.
+
+
+
+
+\subsection{Reduced-rank VGLMs}
+\label{sec:jsscat.RRVGLMs}
+
+
+Consider a multinomial logit model where $p$ and $M$ are both large.
+A (not-too-convincing) example might be the data frame \texttt{vowel.test}
+in the package \pkg{ElemStatLearn} \citep[see][]{hast:tibs:buja:1994}.
+The vowel recognition data set involves $q=11$ symbols produced from
+8 speakers with 6 replications of each. The training data comprises
+$10$ input features (not including the intercept) based on digitized
+utterances. A multinomial logit model fitted to these data would
+have $\widehat{\bB}$ comprising of $p \times (q-1) = 110$ regression
+coefficients for $n=8\times 6\times 11 = 528$ observations. The ratio
+of $n$ to the number of parameters is small, and it would be good to
+introduce some parsimony into the model.
+
+
+
+A simple and elegant solution is to represent $\widehat{\bB}$ by
+its reduced-rank approximation. To do this, partition $\bix$ into
+$(\bix_1^{\top}, \bix_2^{\top})^{\top}$ and $\bB = (\bB_1^{\top} \;
+\bB_2^{\top})^{\top}$ so that the reduced-rank regression is applied
+to $\bix_2$. In general, \bB{} is a dense matrix of full rank, i.e., rank
+$=\min(M,p)$, and since there are $M \times p$ regression coefficients
+to estimate this is `too' large for some models and/or data sets.
+If we approximate $\bB_2$ by a reduced-rank regression \begin{equation}
+\label{eq:rrr.BAC} \bB_2  =  \bC{} \, \bA^{\top} \end{equation} and if
+the rank $R$ is kept low then this can cut down the number of regression
+coefficients dramatically. If $R=2$ then the results may be biplotted
+(\texttt{biplot()} in \VGAM{}). Here, \bC{} and \bA{} are $p_2 \times R$
+and $M \times R$ respectively, and usually they are `thin'.
+
+
+More generally, the class of \textit{reduced-rank VGLMs} (RR-VGLMs)
+is simply a VGLM where $\bB_2$ is expressed as a product of two thin
+estimated matrices (Table \ref{tab:rrvglam.jss.subset}). Indeed,
+\cite{yee:hast:2003} show that RR-VGLMs are VGLMs with constraint
+matrices that are unknown and estimated. Computationally, this is
+done using an alternating method: in (\ref{eq:rrr.BAC}) estimate \bA{}
+given the current estimate of \bC{}, and then estimate \bC{} given the
+current estimate of \bA{}. This alternating algorithm is repeated until
+convergence within each IRLS iteration.
+
+
+Incidentally, special cases of RR-VGLMs have appeared in the
+literature. For example, a RR-multinomial logit model, is known as the
+\textit{stereotype} model \citep{ande:1984}. Another is \cite{good:1981}'s
+RC model (see Section \ref{sec:jsscat.rrr.goodman}) which is reduced-rank
+multivariate Poisson model. Note that the parallelism assumption of the
+proportional odds model \citep{mccu:neld:1989} can be thought of as a
+type of reduced-rank regression where the constraint matrices are thin
+($\bone_M$, actually) and known.
+
+
+
+The modeling function \texttt{rrvglm()} should work with any \VGAM{}
+family function compatible with \texttt{vglm()}. Of course, its
+applicability should be restricted to models where a reduced-rank
+regression of $\bB_2$ makes sense.
+
+
+
+
+
+
+
+
+
+\subsection[Goodman's R x C association model]{Goodman's $R \times C$ association model}
+\label{sec:jsscat.rrr.goodman}
+
+
+
+
+
+Let $\bY = [(y_{ij})]$ be a $n \times M$ matrix of counts.
+Section 4.2 of \cite{yee:hast:2003} shows that Goodman's RC$(R)$ association
+model \citep{good:1981} fits within the VGLM framework by setting up
+the appropriate indicator variables, structural zeros and constraint
+matrices. Goodman's model fits a reduced-rank type model to \bY{}
+by firstly assuming that $Y_{ij}$ has a Poisson distribution, and that
+\begin{eqnarray}
+\log \, \mu_{ij} &=& \mu + \alpha_{i} + \gamma_{j} + 
+\sum_{k=1}^R a_{ik} \, c_{jk} , 
+\ \ \ i=1,\ldots,n;\ \ j=1,\ldots,M,
+\label{eqn:goodmanrc}
+\end{eqnarray}
+where $\mu_{ij} = E(Y_{ij})$ is the mean of the $i$-$j$ cell, and the
+rank $R$ satisfies $R < \min(n,M)$.
+
+
+The modeling function \texttt{grc()} should work on any two-way
+table \bY{} of counts generated by (\ref{eqn:goodmanrc}) provided
+the number of 0's is not too large. Its usage is quite simple, e.g.,
+\texttt{grc(Ymatrix, Rank = 2)} fits a rank-2 model to a matrix of counts.
+By default a \texttt{Rank = 1} model is fitted.
+
+
+
+
+\subsection{Bradley-Terry models}
+\label{sec:jsscat.brat}
+
+Consider
+an experiment consists of $n_{ij}$ judges who compare
+pairs of items $T_i$, $i=1,\ldots,M+1$.
+They express their preferences between $T_i$ and $T_j$. 
+Let $N=\sum \sum_{i<j} n_{ij}$ be the total number of pairwise
+comparisons, and assume independence for ratings of the same pair
+by different judges and for ratings of different pairs by the same judge.
+Let $\pi_i$ be the \textit{worth} of item $T_i$,
+\[
+\pr(T_i > T_j)  =  p_{i/ij}  =  \frac{\pi_i}{\pi_i + \pi_j},
+\  \qquad i \neq {j},
+\]
+where ``$T_i>T_j$'' means $i$ is preferred over $j$.
+Suppose that $\pi_i > 0$.
+Let $Y_{ij}$ be the number of times that $T_i$ is preferred
+over $T_j$ in the $n_{ij}$ comparisons of the pairs.
+Then $Y_{ij} \sim {\rm Bin}(n_{ij},p_{i/ij})$.
+This is a Bradley-Terry model (without ties),
+and the \VGAM{} family function is \texttt{brat()}.
+
+
+Maximum likelihood estimation of the parameters $\pi_1,\ldots,\pi_{M+1}$
+involves maximizing
+\[
+\prod_{i<j}^{M+1}
+\left(
+\begin{array}{c}
+n_{ij} \\
+y_{ij}
+\end{array} \right)
+\left(
+\frac{\pi_i}{\pi_i + \pi_j}
+\right)^{y_{ij}}
+\left(
+\frac{\pi_j}{\pi_i + \pi_j}
+\right)^{n_{ij}-y_{ij}} .
+\]
+By default, $\pi_{M+1} \equiv 1$ is used for identifiability,
+however, this can be changed very easily.
+Note that one can define 
+linear predictors $\eta_{ij}$ of the form
+\begin{equation}
+\label{eq:bradter.logit}
+\logit 
+\left(
+\frac{\pi_i}{\pi_i + \pi_j}
+\right)  =  \log 
+\left(
+\frac{\pi_i}{\pi_j}
+\right)  =  \lambda_i - \lambda_j .
+\end{equation}
+The VGAM{} framework can handle the Bradley-Terry model only for
+intercept-only models; it has
+\begin{equation}
+\label{eq:bradter}
+\lambda_j  =  \eta_j  =  \log\, \pi_j = \beta_{(1)j},
+\ \ \ \ j=1,\ldots,M.
+\end{equation}
+
+
+As well as having many applications in the field of preferences,
+the Bradley-Terry model has many uses in modeling `contests' between
+teams $i$ and $j$, where only one of the teams can win in each
+contest (ties are not allowed under the classical model).
+The {packaging} function \texttt{Brat()} can be used to
+convert a square matrix into one that has more columns, to
+serve as input to \texttt{vglm()}.
+For example,
+for journal citation data where a citation of article B
+by article A is a win for article B and a loss for article A.
+On a specific data set,
+<<>>=
+journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
+squaremat <- matrix(c(NA, 33, 320, 284,   730, NA, 813, 276,
+                      498, 68, NA, 325,   221, 17, 142, NA), 4, 4)
+dimnames(squaremat) <- list(winner = journal, loser = journal)
+@
+then \texttt{Brat(squaremat)} returns a $1 \times 12$ matrix.
+
+
+
+
+
+
+
+\subsubsection{Bradley-Terry model with ties}
+\label{sec:cat.bratt}
+
+
+The \VGAM{} family function \texttt{bratt()} implements
+a Bradley-Terry model with ties (no preference), e.g.,
+where both $T_i$ and $T_j$ are equally good or bad.
+Here we assume
+\begin{eqnarray*}
+ \pr(T_i > T_j) &=& \frac{\pi_i}{\pi_i + \pi_j + \pi_0},
+\ \qquad
+ \pr(T_i = T_j)  =  \frac{\pi_0}{\pi_i + \pi_j + \pi_0},
+\end{eqnarray*}
+with $\pi_0 > 0$ as an extra parameter.
+It has 
+\[
+\boldeta=(\log \pi_1,\ldots, \log \pi_{M-1}, \log \pi_{0})^{\top}
+\]
+by default, where there are $M$ competitors and $\pi_M \equiv 1$.
+Like \texttt{brat()}, one can choose a different reference group
+and reference value.
+
+
+Other \R{} packages for the Bradley-Terry model
+include \pkg{BradleyTerry2}
+by H. Turner and D. Firth
+\citep[with and without ties;][]{firth:2005,firth:2008}
+and \pkg{prefmod} \citep{Hatzinger:2009}.
+
+
+
+
+\begin{table}[tt]
+\centering
+\begin{tabular}[small]{|l|c|}
+\hline
+\pkg{VGAM} family function & Independent parameters \\
+\hline
+\texttt{ABO()} & $p, q$ \\
+\texttt{MNSs()} & $m_S, m_s, n_S$ \\
+\texttt{AB.Ab.aB.ab()} & $p$ \\
+\texttt{AB.Ab.aB.ab2()} & $p$ \\
+\texttt{AA.Aa.aa()} & $p_A$ \\
+\texttt{G1G2G3()} & $p_1, p_2, f$ \\
+\hline
+\end{tabular}
+\caption{Some genetic models currently implemented
+and their unique parameters.
+\label{tab:gen.all}
+}
+\end{table}
+
+
+
+
+
+\subsection{Genetic models}
+\label{sec:jsscat.genetic}
+
+
+There are quite a number of population genetic models based on the
+multinomial distribution,
+e.g., \cite{weir:1996}, \cite{lang:2002}.
+Table \ref{tab:gen.all} lists some \pkg{VGAM} family functions for such.
+
+
+
+
+For example the ABO blood group system
+has two independent parameters $p$ and $q$, say.
+Here,
+the blood groups A, B and O form six possible combinations (genotypes)
+consisting of AA, AO, BB, BO, AB, OO
+(see Table \ref{tab:ABO}). A and B are dominant over
+bloodtype O. Let $p$, $q$ and $r$ be the probabilities
+for A, B and O respectively (so that
+$p+q+r=1$) for a given population. 
+The log-likelihood function is 
+\[
+\ell(p,q) \;=\; n_A\, \log(p^2 + 2pr) + n_B\, \log(q^2 + 2qr) + n_{AB}\,
+\log(2pq) + 2 n_O\, \log(1-p-q),
+\]
+where $r = 1 - p -q$, $p \in (\,0,1\,)$,
+$q \in (\,0,1\,)$, $p+q<1$.
+We let $\boldeta = (g(p), g(r))^{\top}$ where $g$ is the link function.
+Any $g$ from Table \ref{tab:jsscat.links} appropriate for
+a parameter $\theta \in (0,1)$ will do.
+
+
+A toy example where $p=p_A$ and $q=p_B$ is
+<<>>=
+abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
+fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, data = abodat)
+coef(fit, matrix = TRUE)
+Coef(fit)  # Estimated pA and pB
+@
+The function \texttt{Coef()}, which applies only to intercept-only models,
+applies to $g_{j}(\theta_{j})=\eta_{j}$
+the inverse link function $g_{j}^{-1}$ to $\widehat{\eta}_{j}$
+to give $\widehat{\theta}_{j}$.
+
+
+
+
+
+
+
+\begin{table}[tt]
+% Same as Table 14.1 of E-J, and Table 2.6 of Weir 1996
+\begin{center}
+\begin{tabular}{|l|cc|cc|c|c|}
+\hline
+Genotype   & AA  & AO  & BB  &  BO  & AB  &  OO  \\
+Probability&$p^2$&$2pr$&$q^2$&$ 2qr$&$2pq$& $r^2$\\
+Blood group&  A  &  A  &  B  &  B   &  AB &  O \\
+\hline
+\end{tabular}
+\end{center}
+\caption{Probability table for the ABO blood group system.
+Note that $p$ and $q$ are the parameters and $r=1-p-q$.
+\label{tab:ABO}
+}
+\end{table}
+
+
+
+
+
+\subsection{Three main distributions}
+\label{sec:jsscat.3maindist}
+
+\cite{agre:2002} discusses three main distributions for categorical
+variables: binomial, multinomial, and Poisson
+\citep{thom:2009}.
+All these are well-represented in the \VGAM{} package,
+accompanied by variant forms.
+For example,
+there is a
+\VGAM{} family function named \texttt{mbinomial()}
+which implements a 
+matched-binomial (suitable for matched case-control studies),
+Poisson ordination (useful in ecology for multi-species-environmental data),
+negative binomial families,
+positive and zero-altered and zero-inflated variants,
+and the bivariate odds ratio model
+\citep[\texttt{binom2.or()}; see Section 6.5.6 of][]{mccu:neld:1989}.
+The latter has an \texttt{exchangeable} argument to allow for an
+exchangeable error structure:
+\begin{eqnarray}
+\bH_1  = 
+\left( \begin{array}{cc}
+1 & 0 \\
+1 & 0 \\
+0 & 1 \\
+\end{array} \right), \qquad
+\bH_k  = 
+\left( \begin{array}{cc}
+1 \\
+1 \\
+0 \\
+\end{array} \right), \quad k=2,\ldots,p,
+\label{eqn:blom.exchangeable}
+\end{eqnarray}
+since, for data $(Y_1,Y_2,\bix)$,
+$\logit \, P\!\left( Y_{j} = 1 \Big{|} \bix \right) = 
+\eta_{j}$ for ${j}=1,2$, and
+$\log \, \psi = \eta_{3}$
+where $\psi$ is the odds ratio,
+and so $\eta_{1}=\eta_{2}$.
+Here, \texttt{binom2.or(zero = 3)} by default meaning $\psi$ is
+modelled as an intercept-only
+(in general, \texttt{zero} may be assigned an integer vector
+such that the value $j$ means $\eta_{j} = \beta_{(j)1}$,
+i.e., the $j$th linear/additive predictor is an intercept-only).
+See the online help for all of these models.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{Some user-oriented topics}
+\label{sec:jsscat.userTopics}
+
+
+Making the most of \VGAM{} requires an understanding of the general
+VGLM/VGAM framework described Section \ref{sec:jsscat.VGLMVGAMoverview}.
+In this section we connect elements of that framework with the software.
+Before doing so it is noted that
+a fitted \VGAM{} categorical model has access to the usual
+generic functions, e.g.,
+\texttt{coef()} for
+$\left(\widehat{\bbeta}_{(1)}^{*T},\ldots,\widehat{\bbeta}_{(p)}^{*T}\right)^{\top}$
+(see Equation \ref{eqn:lin.coefs4}),
+\texttt{constraints()} for $\bH_k$,
+\texttt{deviance()} for $2\left(\ell_{\mathrm{max}} - \ell\right)$,
+\texttt{fitted()} for $\widehat{\bmu}_i$,
+\texttt{logLik()} for $\ell$,
+\texttt{predict()} for $\widehat{\boldeta}_i$,
+\texttt{print()},
+\texttt{residuals(..., type = "response")} for $\biy_i - \widehat{\bmu}_i$ etc.,
+\texttt{summary()},
+\texttt{vcov()} for $\widehat{\Var}(\widehat{\bbeta})$,
+etc.
+The methods function for the extractor function
+\texttt{coef()} has an argument \texttt{matrix}
+which, when set \texttt{TRUE}, returns $\widehat{\bB}$
+(see Equation \ref{gammod}) as a $p \times M$ matrix,
+and this is particularly useful for confirming that a fit
+has made a parallelism assumption.
+
+
+
+
+
+
+
+\subsection{Common arguments}
+\label{sec:jsscat.commonArgs}
+
+
+The structure of the unified framework given in
+Section \ref{sec:jsscat.VGLMVGAMoverview}
+appears clearly through
+the pool of common arguments
+shared by the
+\VGAM{} family functions in Table \ref{tab:cat.quantities}.
+In particular,
+\texttt{reverse} and
+\texttt{parallel}
+are prominent with CDA.
+These are merely convenient shortcuts for the argument \texttt{constraints},
+which accepts a named list of constraint matrices $\bH_k$.
+For example, setting
+\texttt{cumulative(parallel = TRUE)} would constrain the coefficients $\beta_{(j)k}$
+in (\ref{gammod2}) to be equal for all $j=1,\ldots,M$,
+each separately for $k=2,\ldots,p$.
+That is, $\bH_k = \bone_M$.
+The argument \texttt{reverse} determines the `direction' of
+the parameter or quantity.
+
+Another argument not so much used with CDA is \texttt{zero};
+this accepts a vector specifying which $\eta_j$ is to be modelled as
+an intercept-only; assigning a \texttt{NULL} means none.
+
+
+
+
+
+
+
+
+\subsection{Link functions}
+\label{sec:jsscat.links}
+
+Almost all \VGAM{} family functions
+(one notable exception is \texttt{multinomial()})
+allow, in theory, for any link function to be assigned to each $\eta_j$.
+This provides maximum capability.
+If so then there is an extra argument to pass in any known parameter
+associated with the link function.
+For example, \texttt{link = "logoff", earg = list(offset = 1)}
+signifies a log link with a unit offset:
+$\eta_{j} = \log(\theta_{j} + 1)$ for some parameter $\theta_{j}\ (> -1)$.
+The name \texttt{earg} stands for ``extra argument''.
+Table \ref{tab:jsscat.links} lists some links relevant to categorical data.
+While the default gives a reasonable first choice,
+users are encouraged to try different links.
+For example, fitting a binary regression model
+(\texttt{binomialff()}) to the coal miners data set \texttt{coalminers} with
+respect to the response wheeze gives a
+nonsignificant regression coefficient for $\beta_{(1)3}$ with probit analysis
+but not with a logit link when
+$\eta = \beta_{(1)1} + \beta_{(1)2} \, \mathrm{age} + \beta_{(1)3} \, \mathrm{age}^2$.
+Developers and serious users are encouraged to write and use
+new link functions compatible with \VGAM.
+
+
+
+
+
+
+\begin{table*}[tt]
+\centering
+\medskip
+\begin{tabular}{|l|c|c|}
+\hline
+Link function & $g(\theta)$ & Range of $\theta$ \\
+\hline
+\texttt{cauchit()} & $\tan(\pi(\theta-\frac12))$ & $(0,1)$ \\
+\texttt{cloglog()} & $\log_e\{-\log_e(1 - \theta)\}$ & $(0,1)$ \\
+\texttt{fisherz()} & 
+$\frac12\,\log_e\{(1 + \theta)/(1 - \theta)\}$ & $(-1,1)$ \\
+\texttt{identity()} & $\theta$ & $(-\infty,\infty)$ \\
+\texttt{logc()} & $\log_e(1 - \theta)$ & $(-\infty,1)$ \\
+\texttt{loge()} & $\log_e(\theta)$ & $(0,\infty)$ \\
+\texttt{logit()} & $\log_e(\theta/(1 - \theta))$ & $(0,1)$ \\
+\texttt{logoff()} & $\log_e(\theta + A)$ & $(-A,\infty)$ \\
+\texttt{probit()} & $\Phi^{-1}(\theta)$ & $(0,1)$ \\
+\texttt{rhobit()} & $\log_e\{(1 + \theta)/(1 - \theta)\}$ & $(-1,1)$ \\
+\hline
+\end{tabular}
+\caption{
+Some \VGAM{} link functions pertinent to this article.
+\label{tab:jsscat.links}
+}
+\end{table*}
+
+
+
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{Examples}
+\label{sec:jsscat.eg}
+
+This section illustrates CDA modeling on three
+data sets in order to give a flavour of what is available in the package.
+
+
+
+
+%20130919
+%Note: 
+%\subsection{2008 World Fly Fishing Championships}
+%\label{sec:jsscat.eg.WFFC}
+%are deleted since there are problems with accessing the \texttt{wffc.nc}
+%data etc. since they are now in \pkg{VGAMdata}.
+
+
+
+
+
+
+
+\subsection{Marital status data}
+\label{sec:jsscat.eg.mstatus}
+
+We fit a nonparametric multinomial logit model to data collected from
+a self-administered questionnaire administered in a large New Zealand
+workforce observational study conducted during 1992--3.
+The data were augmented by a second study consisting of retirees.
+For homogeneity, this analysis is restricted
+to a subset of 6053 European males with no missing values.
+The ages ranged between 16 and 88 years.
+The data can be considered a reasonable representation of the white
+male New Zealand population in the early 1990s, and
+are detailed in \cite{macm:etal:1995} and \cite{yee:wild:1996}.
+We are interested in exploring how $Y=$ marital status varies as a function
+of $x_2=$ age. The nominal response $Y$ has four levels;
+in sorted order, they are divorced or separated, married or partnered,
+single and widower.
+We will write these levels as $Y=1$, $2$, $3$, $4$, respectively,
+and will choose the married/partnered (second level) as the reference group
+because the other levels emanate directly from it.
+
+Suppose the data is in a data frame called \texttt{marital.nz}
+and looks like
+<<>>=
+head(marital.nz, 4)
+summary(marital.nz)
+@
+We fit the VGAM
+<<>>=
+fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
+               data = marital.nz)
+@
+
+Once again let's firstly check the input.
+<<>>=
+head(depvar(fit.ms), 4)
+colSums(depvar(fit.ms))
+@
+This seems okay.
+
+
+
+
+Now the estimated component functions $\widehat{f}_{(s)2}(x_2)$
+may be plotted with
+<<fig=F>>=
+# Plot output
+mycol <- c("red", "darkgreen", "blue")
+par(mfrow = c(2, 2))
+plot(fit.ms, se = TRUE, scale = 12,
+         lcol = mycol, scol = mycol)
+
+# Plot output overlayed
+#par(mfrow=c(1,1))
+plot(fit.ms, se = TRUE, scale = 12,
+         overlay = TRUE,
+         llwd = 2,
+         lcol = mycol, scol = mycol)
+@
+to produce Figure \ref{fig:jsscat.eg.mstatus}.
+The \texttt{scale} argument is used here to ensure that the $y$-axes have
+a common scale---this makes comparisons between the component functions
+less susceptible to misinterpretation.
+The first three plots are the (centered) $\widehat{f}_{(s)2}(x_2)$ for
+$\eta_1$,
+$\eta_2$,
+$\eta_3$,
+where
+\begin{eqnarray}
+\label{eq:jsscat.eg.nzms.cf}
+\eta_{s}  = 
+\log(\pr(Y={t}) / \pr(Y={2}))  = 
+\beta_{(s)1} + f_{(s)2}(x_2),
+\end{eqnarray}
+$(s,t) = (1,1), (2,3), (3,4)$,
+and $x_2$ is \texttt{age}.
+The last plot are the smooths overlaid to aid comparison.
+
+
+It may be seen that the $\pm 2$ standard error bands
+about the \texttt{Widowed} group is particularly wide at
+young ages because of a paucity of data, and
+likewise at old ages amongst the \texttt{Single}s.
+The $\widehat{f}_{(s)2}(x_2)$ appear as one would expect.
+The log relative risk of
+being single relative to being married/partnered drops sharply from
+ages 16 to 40.
+The fitted function for the \texttt{Widowed} group increases
+with \texttt{age} and looks reasonably linear.
+The $\widehat{f}_{(1)2}(x_2)$
+suggests a possible maximum around 50 years old---this
+could indicate the greatest marital conflict occurs during
+the mid-life crisis years!
+
+
+
+\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
+
+\begin{figure}[tt]
+\begin{center}
+<<fig=TRUE,width=8,height=5.6,echo=FALSE>>=
+# Plot output
+mycol <- c("red", "darkgreen", "blue")
+ par(mfrow = c(2, 2))
+ par(mar = c(4.2, 4.0, 1.2, 2.2) + 0.1)
+plot(fit.ms, se = TRUE, scale = 12,
+         lcol = mycol, scol = mycol)
+
+# Plot output overlaid
+#par(mfrow = c(1, 1))
+plot(fit.ms, se = TRUE, scale = 12,
+         overlay = TRUE,
+         llwd = 2,
+         lcol = mycol, scol = mycol)
+@
+\caption{
+Fitted (and centered) component functions
+$\widehat{f}_{(s)2}(x_2)$
+from the NZ marital status data
+(see Equation \ref{eq:jsscat.eg.nzms.cf}).
+The bottom RHS plot are the smooths overlaid.
+\label{fig:jsscat.eg.mstatus}
+}
+\end{center}
+\end{figure}
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
+
+The methods function for \texttt{plot()} can also plot the
+derivatives of the smooths.
+The call
+<<fig=F>>=
+plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
+@
+results in Figure \ref{fig:jsscat.eg.mstatus.cf.deriv}.
+Once again the $y$-axis scales are commensurate.
+
+\setkeys{Gin}{width=\textwidth} % 0.8 is the current default
+
+\begin{figure}[tt]
+\begin{center}
+<<fig=TRUE,width=7.2,height=2.4,echo=FALSE>>=
+# Plot output
+ par(mfrow = c(1, 3))
+ par(mar = c(4.5, 4.0, 0.2, 2.2) + 0.1)
+plot(fit.ms, deriv = 1, lcol = mycol, scale = 0.3)
+@
+\caption{
+Estimated first derivatives of the component functions,
+$\widehat{f'}_{(s)2}(x_2)$,
+from the NZ marital status data
+(see Equation \ref{eq:jsscat.eg.nzms.cf}).
+\label{fig:jsscat.eg.mstatus.cf.deriv}
+}
+\end{center}
+\end{figure}
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
+The derivative for the \texttt{Divorced/Separated} group appears
+linear so that a quadratic component function could be tried.
+Not surprisingly the \texttt{Single} group shows the greatest change;
+also, $\widehat{f'}_{(2)2}(x_2)$ is approximately linear till 50
+and then flat---this suggests one could fit a piecewise quadratic
+function to model that component function up to 50 years.
+The \texttt{Widowed} group appears largely flat.
+We thus fit the parametric model
+<<>>=
+foo <- function(x, elbow = 50)
+  poly(pmin(x, elbow), 2)
+
+clist <- list("(Intercept)" = diag(3),
+             "poly(age, 2)" = rbind(1, 0, 0),
+             "foo(age)"     = rbind(0, 1, 0),
+             "age"          = rbind(0, 0, 1))
+fit2.ms <-
+    vglm(mstatus ~ poly(age, 2) + foo(age) + age,
+         family = multinomial(refLevel = 2),
+         constraints = clist,
+         data = marital.nz)
+@
+Then
+<<>>=
+coef(fit2.ms, matrix = TRUE)
+@
+confirms that one term was used for each component function.
+The plots from
+<<fig=F>>=
+par(mfrow = c(2, 2))
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[1], scol = mycol[1], which.term = 1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[2], scol=mycol[2], which.term = 2)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[3], scol = mycol[3], which.term = 3)
+@
+are given in Figure \ref{fig:jsscat.eg.mstatus.vglm}
+and appear like
+Figure \ref{fig:jsscat.eg.mstatus}.
+
+
+\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
+
+\begin{figure}[tt]
+\begin{center}
+<<fig=TRUE,width=8,height=5.6,echo=FALSE>>=
+# Plot output
+par(mfrow=c(2,2))
+ par(mar=c(4.5,4.0,1.2,2.2)+0.1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[1], scol = mycol[1], which.term = 1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[2], scol = mycol[2], which.term = 2)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+         lcol = mycol[3], scol = mycol[3], which.term = 3)
+@
+\caption{
+Parametric version of \texttt{fit.ms}: \texttt{fit2.ms}.
+The component functions are now quadratic, piecewise quadratic/zero,
+or linear.
+\label{fig:jsscat.eg.mstatus.vglm}
+}
+\end{center}
+\end{figure}
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
+
+
+It is possible to perform very crude inference based on heuristic theory
+of a deviance test:
+<<>>=
+deviance(fit.ms) - deviance(fit2.ms)
+@
+is small, so it seems the parametric model is quite reasonable
+against the original nonparametric model.
+Specifically,
+the difference in the number of `parameters' is approximately
+<<>>=
+(dfdiff <- df.residual(fit2.ms) - df.residual(fit.ms))
+@
+which gives an approximate $p$ value of
+<<>>=
+pchisq(deviance(fit.ms) - deviance(fit2.ms), df = dfdiff, lower.tail = FALSE)
+@
+Thus \texttt{fit2.ms} appears quite reasonable.
+
+
+
+
+
+
+
+
+The estimated probabilities of the original fit can be plotted
+against \texttt{age} using
+<<fig=F>>=
+ooo <- with(marital.nz, order(age))
+with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo, ],
+     type = "l", las = 1, lwd = 2, ylim = 0:1,
+     ylab = "Fitted probabilities",
+     xlab = "Age",  # main="Marital status amongst NZ Male Europeans",
+     col = c(mycol[1], "black", mycol[-1])))
+legend(x = 52.5, y = 0.62,  # x="topright",
+       col = c(mycol[1], "black", mycol[-1]),
+       lty = 1:4,
+       legend = colnames(fit.ms at y), lwd = 2)
+abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed")
+@
+which gives Figure \ref{fig:jsscat.eg.mstatus.fitted}.
+This shows that between 80--90\% of NZ white males
+aged between their early 30s to mid-70s
+were married/partnered.
+The proportion widowed
+started to rise steeply from 70 years onwards but remained below 0.5
+since males die younger than females on average.
+
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+\begin{figure}[tt]
+\begin{center}
+<<fig=TRUE,width=8,height=4.8,echo=FALSE>>=
+ par(mfrow = c(1,1))
+ par(mar = c(4.5,4.0,0.2,0.2)+0.1)
+ooo <- with(marital.nz, order(age))
+with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
+     type = "l", las = 1, lwd = 2, ylim = 0:1,
+     ylab = "Fitted probabilities",
+     xlab = "Age",
+     col = c(mycol[1], "black", mycol[-1])))
+legend(x = 52.5, y = 0.62,
+       col = c(mycol[1], "black", mycol[-1]),
+       lty = 1:4,
+       legend = colnames(fit.ms at y), lwd = 2.1)
+abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed")
+@
+\caption{
+Fitted probabilities for each class for the
+NZ male European
+marital status data
+(from Equation \ref{eq:jsscat.eg.nzms.cf}).
+\label{fig:jsscat.eg.mstatus.fitted}
+}
+\end{center}
+\end{figure}
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
+
+
+
+
+
+\subsection{Stereotype model}
+\label{sec:jsscat.eg.grc.stereotype}
+
+We reproduce some of the analyses of \cite{ande:1984} regarding the
+progress of 101 patients with back pain
+using the data frame \texttt{backPain} from \pkg{gnm}
+\citep{Rnews:Turner+Firth:2007,Turner+Firth:2009}.
+The three prognostic variables are
+length of previous attack ($x_1=1,2$),
+pain change ($x_2=1,2,3$) 
+and lordosis ($x_3=1,2$).
+Like him, we treat these as numerical and standardize and negate them.
+%
+The output
+<<>>=
+# Scale the variables? Yes; the Anderson (1984) paper did (see his Table 6).
+head(backPain, 4)
+summary(backPain)
+backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale(x3))
+@
+displays the six ordered categories.
+Now a rank-1 stereotype model can be fitted with
+<<>>=
+bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain)
+@
+Then
+<<>>=
+Coef(bp.rrmlm1)
+@
+are the fitted \bA, \bC{} and $\bB_1$ (see Equation \ref{eq:rrr.BAC}) and
+Table \ref{tab:rrvglam.jss.subset}) which agrees with his Table 6.
+Here, what is known as ``corner constraints'' is used
+($(1,1)$ element of \bA{} $\equiv 1$),
+and only the intercepts are not subject to any reduced-rank regression
+by default.
+The maximized log-likelihood from \textsl{\texttt{logLik(bp.rrmlm1)}}
+is $\Sexpr{round(logLik(bp.rrmlm1), 2)}$.
+The standard errors of each parameter can be obtained by
+\textsl{\texttt{summary(bp.rrmlm1)}}.
+The negative elements of $\widehat{\bC}$ imply the
+latent variable $\widehat{\nu}$ decreases in value with increasing
+\textsl{\texttt{sx1}},
+\textsl{\texttt{sx2}} and
+\textsl{\texttt{sx3}}.
+The elements of $\widehat{\bA}$ tend to decrease so it suggests
+patients get worse as $\nu$ increases,
+i.e., get better as \textsl{\texttt{sx1}},
+\textsl{\texttt{sx2}} and
+\textsl{\texttt{sx3}} increase.
+
+
+
+
+
+
+<<echo=FALSE>>=
+set.seed(123)
+@
+A rank-2 model fitted \textit{with a different normalization}
+<<>>=
+bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain, Rank = 2,
+                   Corner = FALSE, Uncor = TRUE)
+@
+produces uncorrelated $\widehat{\bnu}_i = \widehat{\bC}^{\top} \bix_{2i}$.
+In fact \textsl{\texttt{var(lv(bp.rrmlm2))}} equals $\bI_2$
+so that the latent variables are also scaled to have unit variance.
+The fit was biplotted
+(rows of $\widehat{\bC}$ plotted as arrow;
+ rows of $\widehat{\bA}$ plotted as labels) using
+<<figure=F>>=
+biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
+#      xlim = c(-1, 6), ylim = c(-1.2, 4),  # Use this if not scaled
+       xlim = c(-4.5, 2.2), ylim = c(-2.2, 2.2),  # Use this if scaled
+       chull = TRUE, clty = 2, ccol = "blue")
+@
+to give Figure \ref{fig:jsscat.eg.rrmlm2.backPain}.
+It is interpreted via inner products due to (\ref{eq:rrr.BAC}).
+The different normalization means that the interpretation of $\nu_1$
+and $\nu_2$ has changed, e.g., increasing
+\textsl{\texttt{sx1}},
+\textsl{\texttt{sx2}} and
+\textsl{\texttt{sx3}} results in increasing $\widehat{\nu}_1$ and
+patients improve more.
+Many of the latent variable points $\widehat{\bnu}_i$ are coincidental
+due to discrete nature of the $\bix_i$. The rows of $\widehat{\bA}$
+are centered on the blue labels (rather cluttered unfortunately) and
+do not seem to vary much as a function of $\nu_2$.
+In fact this is confirmed by \cite{ande:1984} who showed a rank-1
+model is to be preferred.
+
+
+
+This example demonstrates the ability to obtain a low dimensional view
+of higher dimensional data. The package's website has additional
+documentation including more detailed Goodman's RC and stereotype
+examples.
+
+
+
+
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+\begin{figure}[tt]
+\begin{center}
+<<fig=TRUE,width=8,height=5.3,echo=FALSE>>=
+# Plot output
+ par(mfrow=c(1,1))
+ par(mar=c(4.5,4.0,0.2,2.2)+0.1)
+
+biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
+#      xlim = c(-1,6), ylim = c(-1.2,4),  # Use this if not scaled
+       xlim = c(-4.5,2.2), ylim = c(-2.2, 2.2),  # Use this if scaled
+       chull = TRUE, clty = 2, ccol = "blue")
+@
+\caption{
+Biplot of a rank-2 reduced-rank multinomial logit (stereotype) model
+fitted to the back pain data.
+A convex hull surrounds the latent variable scores
+$\widehat{\bnu}_i$
+(whose observation numbers are obscured because of their discrete nature).
+The position of the $j$th row of $\widehat{\bA}$
+is the center of the label ``\texttt{log(mu[,j])/mu[,6])}''.
+\label{fig:jsscat.eg.rrmlm2.backPain}
+}
+\end{center}
+\end{figure}
+
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
+
+
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{Some implementation details}
+\label{sec:jsscat.implementDetails}
+
+This section describes some implementation details of \VGAM{}
+which will be more of interest to the developer than to the casual user.
+
+
+
+\subsection{Common code}
+\label{sec:jsscat.implementDetails.code}
+
+It is good programming practice to write reusable code where possible.
+All the \VGAM{} family functions in Table \ref{tab:cat.quantities}
+process the response in the same way because the same segment of code
+is executed. This offers a degree of uniformity in terms of how input is
+handled, and also for software maintenance
+(\cite{altm:jack:2010} enumerates good programming techniques and references).
+As well, the default initial values are computed in the same manner
+based on sample proportions of each level of $Y$.
+
+
+
+
+
+\subsection[Matrix-band format of wz]{Matrix-band format of \texttt{wz}}
+\label{sec:jsscat.implementDetails.mbformat}
+
+The working weight matrices $\bW_i$ may become large for categorical
+regression models. In general, we have to evaluate the $\bW_i$
+for $i=1,\ldots,n$, and naively, this could be held in an \texttt{array} of
+dimension \texttt{c(M, M, n)}. However, since the $\bW_i$ are symmetric
+positive-definite it suffices to only store the upper or lower half of
+the matrix.
+
+
+
+The variable \texttt{wz} in \texttt{vglm.fit()}
+stores the working weight matrices $\bW_i$ in 
+a special format called the \textit{matrix-band} format. This
+format comprises a $n \times M^*$ matrix where
+\[
+M^*  =  \sum_{i=1}^{\footnotesize \textit{hbw}} \;
+\left(M-i+1\right)  =  
+\frac12 \, \textit{hbw}\, \left(2\,M - \textit{hbw} +1\right)
+\]
+is the number of columns. Here, \textit{hbw} refers to the
+\textit{half-bandwidth} of the matrix, which is an integer
+between 1 and $M$ inclusive. A diagonal matrix has
+unit half-bandwidth, a tridiagonal matrix has half-bandwidth 2, etc.
+
+
+Suppose $M=4$. Then \texttt{wz} will have up to $M^*=10$ columns
+enumerating the unique elements of $\bW_i$ as follows:
+\begin{eqnarray}
+\bW_i  =  
+\left( \begin{array}{rrrr}
+1 & 5 & 8 & 10 \\
+  & 2 & 6 & 9 \\
+  &   & 3 & 7 \\
+  &   &   & 4 
+\end{array} \right).
+\label{eqn:hbw.eg}
+\end{eqnarray}
+That is, the order is firstly the diagonal, then the band above that,
+followed by the second band above the diagonal etc.
+Why is such a format adopted? 
+For this example, if $\bW_i$ is diagonal then only the first 4 columns
+of \texttt{wz} are needed. If $\bW_i$ is tridiagonal then only the
+first 7 columns of \texttt{wz} are needed. 
+If $\bW_i$ \textit{is} banded then \texttt{wz} needs not have
+all $\frac12 M(M+1)$ columns; only $M^*$ columns suffice, and the
+rest of the elements of $\bW_i$ are implicitly zero.
+As well as reducing the size of \texttt{wz} itself in most cases, the
+matrix-band format often makes the computation of \texttt{wz} very
+simple and efficient. Furthermore, a Cholesky decomposition of a
+banded matrix will be banded. A final reason is that sometimes we
+want to input $\bW_i$ into \VGAM: if \texttt{wz} is $M \times M \times
+n$ then \texttt{vglm(\ldots, weights = wz)} will result in an error
+whereas it will work if \texttt{wz} is an $n \times M^*$ matrix.
+
+
+
+To facilitate the use of the matrix-band format,
+a few auxiliary functions have been written.
+In particular, there is \texttt{iam()} which gives the indices
+for an array-to-matrix.
+In the $4\times 4$ example above,
+<<>>=
+iam(NA, NA, M = 4, both = TRUE, diag = TRUE)
+@
+returns the indices for the respective array coordinates for
+successive columns of matrix-band format
+(see Equation \ref{eqn:hbw.eg}).
+If \texttt{diag = FALSE} then the first 4 elements in each vector
+are omitted. Note that the first two arguments of 
+\texttt{iam()} are not used here and have been assigned
+\texttt{NA}s for simplicity.
+For its use on the multinomial logit model, where
+$(\bW_i)_{jj} = w_i\,\mu_{ij} (1-\mu_{ij}),\ j=1,\ldots,M$, and 
+$(\bW_i)_{jk} = -w_i\,\mu_{ij} \mu_{ik},\ j\neq k$,
+this can be programmed succinctly like
+\begin{Code}
+wz <- mu[, 1:M] * (1 - mu[, 1:M])
+if (M > 1) {
+  index <- iam(NA, NA, M = M, both = TRUE, diag = FALSE)
+  wz <- cbind(wz, -mu[, index$row] * mu[, index$col])
+}
+wz <- w * wz
+\end{Code}
+(the actual code is slightly more complicated).
+In general, \VGAM{} family functions can be remarkably compact,
+e.g.,
+\texttt{acat()},
+\texttt{cratio()}
+and
+\texttt{multinomial()} are all less than 120 lines of code each.
+
+
+
+
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{Extensions and utilities}
+\label{sec:jsscat.extnUtil}
+
+This section describes some useful utilities/extensions of the above.
+
+
+
+\subsection{Marginal effects}
+\label{sec:jsscat.extnUtil.margeff}
+
+
+Models such as the multinomial logit and cumulative link models
+model the posterior probability $p_{j} = \pr(Y=j|\bix)$ directly.
+In some applications, knowing the derivative of $p_{j}$
+with respect to some of the $x_k$ is useful;
+in fact, often just knowing the sign is important.
+The function \texttt{margeff()} computes the derivatives and
+returns them as a $p \times (M+1) \times n$ array.
+For the multinomial logit model it is easy to show
+\begin{eqnarray}
+\frac{\partial \, p_{j}(\bix_i)}{\partial \,
+\bix_{i}}
+&=&
+p_{j}(\bix_i)
+\left\{
+ \bbeta_{j} -
+\sum_{s=1}^{M+1}
+p_{s}(\bix_i)
+\,
+ \bbeta_{s}
+\right\},
+\label{eqn:multinomial.marginalEffects}
+\end{eqnarray}
+while for
+\texttt{cumulative(reverse = FALSE)}
+we have
+$p_{j} = \gamma_{j} - \gamma_{j-1} = h(\eta_{j}) - h(\eta_{j-1})$
+where $h=g^{-1}$ is the inverse of the link function
+(cf. Table \ref{tab:cat.quantities})
+so that
+\begin{eqnarray}
+\frac{\partial \, p_{j}(\bix_{})}{\partial \,
+\bix}
+&=&
+h'(\eta_{j}) \, \bbeta_{j} -
+h'(\eta_{j-1}) \, \bbeta_{j-1} .
+\label{eqn:cumulative.marginalEffects}
+\end{eqnarray}
+
+
+
+
+The function \texttt{margeff()} returns an array with these
+derivatives and should handle any value of
+\texttt{reverse} and \texttt{parallel}.
+
+
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\subsection[The xij argument]{The \texttt{xij} argument}
+\label{sec:jsscat.extnUtil.xij}
+
+There are many models, including those for categorical data,
+where the value of an explanatory variable $x_k$ differs depending
+on which linear/additive predictor $\eta_{j}$.
+Here is a well-known example from {consumer choice} modeling.
+Suppose an econometrician is interested in peoples'
+choice of transport for travelling to work
+and that there are four choices:
+$Y=1$ for ``bus'',
+$Y=2$ ``train'', 
+$Y=3$ ``car'' and
+$Y=4$ means ``walking''.
+Assume that people only choose one means to go to work.
+Suppose there are three covariates:
+$X_2=$ cost,
+$X_3=$ journey time, and
+$X_4=$ distance.
+Of the covariates only $X_4$ (and the intercept $X_1$)
+is the same for all transport choices;
+the cost and journey time differ according to the means chosen.
+Suppose a random sample of $n$ people is collected
+from some population, and that each person has
+access to all these transport modes.
+For such data, a natural regression model would be a 
+multinomial logit model with $M=3$:
+for $j=1,\ldots,M$, we have
+$\eta_{j} =$
+\begin{eqnarray}
+\log \frac{\pr(Y=j)}{\pr(Y=M+1)}
+&=&
+\beta_{(j)1}^{*} +
+\beta_{(1)2}^{*} \, (x_{i2j}-x_{i24}) +
+\beta_{(1)3}^{*} \, (x_{i3j}-x_{i34}) +
+\beta_{(1)4}^{*} \, x_{i4},
+\label{eqn:xij.eg.gotowork}
+\end{eqnarray}
+where, for the $i$th person,
+$x_{i2j}$ is the cost for the $j$th transport means, and
+$x_{i3j}$ is the journey time of the $j$th transport means.
+The distance to get to work is $x_{i4}$; it has the same value
+regardless of the transport means.
+
+
+Equation \ref{eqn:xij.eg.gotowork}
+implies $\bH_1=\bI_3$ and $\bH_2=\bH_3=\bH_4=\bone_3$.
+Note
+also that if the last response category is used as the baseline or
+reference group (the default of \texttt{multinomial()}) then $x_{ik,M+1}$
+can be subtracted from $x_{ikj}$ for $j=1,\ldots,M$---this
+is the natural way $x_{ik,M+1}$ enters into the model.
+
+
+
+
+Recall from (\ref{gammod2}) that we had
+\begin{equation}
+\eta_j(\bix_i)  =  \bbeta_j^{\top} \bix_i  = 
+\sum_{k=1}^{p} \, x_{ik} \, \beta_{(j)k} .
+\label{eqn:xij0}
+\end{equation}
+Importantly, this can be generalized to
+\begin{equation}
+\eta_j(\bix_{ij})  =  \bbeta_j^{\top} \bix_{ij}  = 
+\sum_{k=1}^{p} \, x_{ikj} \, \beta_{(j)k} ,
+\label{eqn:xij}
+\end{equation}
+or writing this another way (as a mixture or hybrid),
+\begin{equation}
+\eta_j(\bix_{i}^{*},\bix_{ij}^{*})  =  
+\bbeta_{j}^{*T} \bix_{i}^{*} + \bbeta_{j}^{**T} \bix_{ij}^{*} .
+\label{eqn:xij2}
+\end{equation}
+Often $\bbeta_{j}^{**} = \bbeta_{}^{**}$, say.
+In (\ref{eqn:xij2}) the variables in $\bix_{i}^{*}$ are common to
+all $\eta_{j}$, and the variables in $\bix_{ij}^{*}$ have
+different values for differing $\eta_{j}$.
+This allows for covariate values that are specific to each $\eta_j$,
+a facility which is very important in many applications.
+
+
+The use of the \texttt{xij} argument with the \VGAM{} family function
+\texttt{multinomial()} has very important applications in economics.
+In that field the term ``multinomial logit model'' includes a variety of
+models such as the ``generalized logit model'' where (\ref{eqn:xij0})
+holds, the ``conditional logit model'' where (\ref{eqn:xij}) holds,
+and the ``mixed logit model,'' which is a combination of the two,
+where (\ref{eqn:xij2}) holds.
+The generalized logit model focusses on the individual as the unit of
+analysis, and uses individual characteristics as explanatory variables,
+e.g., age of the person in the transport example.
+The conditional logit model assumes different values for each
+alternative and the impact of a unit of $x_k$ is assumed to be constant
+across alternatives, e.g., journey time in the choice of transport mode.
+Unfortunately, there is confusion in the literature for the terminology
+of the models. Some authors call \texttt{multinomial()}
+with (\ref{eqn:xij0}) the ``generalized logit model''.
+Others call the mixed
+logit model the ``multinomial logit model'' and view the generalized
+logit and conditional logit models as special cases.
+In \VGAM{} terminology there is no need to give different names to
+all these slightly differing special cases. They are all still called
+multinomial logit models, although it may be added that there are
+some covariate-specific linear/additive predictors.
+The important thing is that the framework accommodates $\bix_{ij}$,
+so one tries to avoid making life unnecessarily complicated.
+And \texttt{xij} can apply in theory to any VGLM and not just to the
+multinomial logit model.
+\cite{imai:king:lau:2008} present another perspective on the
+$\bix_{ij}$ problem with illustrations from \pkg{Zelig}
+\citep{Zelig:2009}.
+
+
+
+
+
+\subsubsection[Using the xij argument]{Using the \texttt{xij} argument}
+\label{sec:xij.sub}
+
+\VGAM{} handles variables whose values depend on $\eta_{j}$,
+(\ref{eqn:xij2}), using the \texttt{xij} argument.
+It is assigned an S formula or a list of \proglang{S} formulas.
+Each formula, which must have $M$ \textit{different} terms,
+forms a matrix that premultiplies a constraint matrix.
+In detail, (\ref{eqn:xij0}) can be written in vector form as
+\begin{equation}
+\boldeta(\bix_i)  =  \bB^{\top} \bix_i  = 
+\sum_{k=1}^{p} \, \bH_{k} \, \bbeta_{k}^{*} \, x_{ik},
+\label{eqn:xij0.vector}
+\end{equation}
+where
+$\bbeta_{k}^{*} =
+\left( \beta_{(1)k}^{*},\ldots,\beta_{(r_k)k}^{*} \right)^{\top}$
+is to be estimated.
+This may be written
+\begin{eqnarray}
+\boldeta(\bix_{i})
+&=&
+\sum_{k=1}^{p} \, \diag(x_{ik},\ldots,x_{ik}) \,
+\bH_k \, \bbeta_{k}^{*}.
+\label{eqn:xij.d.vector}
+\end{eqnarray}
+To handle (\ref{eqn:xij})--(\ref{eqn:xij2})
+we can generalize (\ref{eqn:xij.d.vector}) to
+\begin{eqnarray}
+\boldeta_i
+&=&
+\sum_{k=1}^{p} \, \diag(x_{ik1},\ldots,x_{ikM}) \;
+\bH_k \, \bbeta_{k}^{*}
+\ \ \ \ \left(=
+\sum_{k=1}^{p} \, \bX_{(ik)}^{*} \,
+\bH_k \, \bbeta_{k}^{*} ,
+\mathrm{\ say} \right).
+\label{eqn:xij.vector}
+\end{eqnarray}
+Each component of the list \texttt{xij} is a formula having $M$ terms
+(ignoring the intercept) which
+specifies the successive diagonal elements of the matrix $\bX_{(ik)}^{*}$.
+Thus each row of the constraint matrix may be multiplied by a different
+vector of values.
+The constraint matrices themselves are not affected by the
+\texttt{xij} argument.
+
+
+
+
+
+How can one fit such models in \VGAM{}?
+Let us fit (\ref{eqn:xij.eg.gotowork}).
+Suppose the journey cost and time variables have had the
+cost and time of walking subtracted from them.
+Then,
+using ``\texttt{.trn}'' to denote train,
+\begin{Code}
+fit2 <- vglm(cbind(bus, train, car, walk) ~ Cost + Time + Distance,
+             fam = multinomial(parallel = TRUE ~ Cost + Time + Distance - 1),
+             xij = list(Cost ~ Cost.bus + Cost.trn + Cost.car,
+                        Time ~ Time.bus + Time.trn + Time.car),
+             form2 = ~  Cost.bus + Cost.trn + Cost.car +
+                        Time.bus + Time.trn + Time.car +
+                        Cost + Time + Distance,
+             data = gotowork)
+\end{Code}
+should do the job.
+Here, the argument \texttt{form2} is assigned a second \proglang{S} formula which
+is used in some special circumstances or by certain types
+of \VGAM{} family functions.
+The model has $\bH_{1} = \bI_{3}$ and $\bH_{2} = \bH_{3} = \bH_{4} = \bone_{3}$
+because the lack of parallelism only applies to the intercept.
+However, unless \texttt{Cost} is the same as \texttt{Cost.bus} and
+\texttt{Time} is the same as \texttt{Time.bus},
+this model should not be plotted with \texttt{plotvgam()};
+see the author's homepage for further documentation.
+
+
+By the way,
+suppose 
+$\beta_{(1)4}^{*}$
+in (\ref{eqn:xij.eg.gotowork})
+is replaced by $\beta_{(j)4}^{*}$.
+Then the above code but with
+\begin{Code}
+  fam = multinomial(parallel = FALSE ~ 1 + Distance),
+\end{Code}
+should fit this model.
+Equivalently,
+\begin{Code}
+  fam = multinomial(parallel = TRUE ~ Cost + Time - 1),
+\end{Code}
+
+
+
+
+
+
+\subsubsection{A more complicated example}
+\label{sec:xij.complicated}
+
+The above example is straightforward because the
+variables were entered linearly. However, things
+become more tricky if data-dependent functions are used in
+any \texttt{xij} terms, e.g., \texttt{bs()}, \texttt{ns()} or \texttt{poly()}.
+In particular, regression splines such as \texttt{bs()} and \texttt{ns()}
+can be used to estimate a general smooth function $f(x_{ij})$, which is
+very useful for exploratory data analysis.
+
+
+
+Suppose we wish to fit the variable \texttt{Cost} with a smoother.
+This is possible with regression splines and using a trick.
+Firstly note that
+\begin{Code}
+fit3 <- vglm(cbind(bus, train, car, walk) ~ ns(Cost) + Time + Distance,
+             multinomial(parallel = TRUE ~ ns(Cost) + Time + Distance - 1),
+             xij = list(ns(Cost) ~ ns(Cost.bus) + ns(Cost.trn) + ns(Cost.car),
+                        Time ~ Time.bus + Time.trn + Time.car),
+             form2 = ~  ns(Cost.bus) + ns(Cost.trn) + ns(Cost.car) +
+                        Time.bus + Time.trn + Time.car +
+                        ns(Cost) + Cost + Time + Distance,
+             data = gotowork)
+\end{Code}
+will \textit{not} work because the basis functions for
+\texttt{ns(Cost.bus)}, \texttt{ns(Cost.trn)} and \texttt{ns(Cost.car)}
+are not identical since the knots differ.
+Consequently, they represent different functions despite
+having common regression coefficients.
+
+
+Fortunately, it is possible to force the \texttt{ns()} terms
+to have identical basis functions by using a trick:
+combine the vectors temporarily.
+To do this, one can let
+\begin{Code}
+NS <- function(x, ..., df = 3)
+      sm.ns(c(x, ...), df = df)[1:length(x), , drop = FALSE]
+\end{Code}
+This computes a natural cubic B-spline evaluated at \texttt{x} but it uses the
+other arguments as well to form an overall vector from which to obtain
+the (common) knots.
+Then the usage of \texttt{NS()} can be something like
+\begin{Code}
+fit4 <- vglm(cbind(bus, train, car, walk) ~ NS(Cost.bus, Cost.trn, Cost.car)
+                                          + Time + Distance,
+             multinomial(parallel = TRUE ~  NS(Cost.bus, Cost.trn, Cost.car)
+                                          + Time + Distance - 1),
+             xij = list(NS(Cost.bus, Cost.trn, Cost.car) ~
+                        NS(Cost.bus, Cost.trn, Cost.car) +
+                        NS(Cost.trn, Cost.car, Cost.bus) +
+                        NS(Cost.car, Cost.bus, Cost.trn),
+                        Time ~ Time.bus + Time.trn + Time.car),
+             form2 = ~  NS(Cost.bus, Cost.trn, Cost.car) +
+                        NS(Cost.trn, Cost.car, Cost.bus) +
+                        NS(Cost.car, Cost.bus, Cost.trn) +
+                        Time.bus + Time.trn + Time.car +
+                        Cost.bus + Cost.trn + Cost.car +
+                        Time + Distance,
+             data = gotowork)
+\end{Code}
+So \texttt{NS(Cost.bus, Cost.trn, Cost.car)}
+is the smooth term for
+\texttt{Cost.bus}, etc.
+Furthermore, \texttt{plotvgam()} may be applied to
+\texttt{fit4}, in which case the fitted regression spline is plotted
+against its first inner argument, viz. \texttt{Cost.bus}.
+
+
+One of the reasons why it will predict correctly, too,
+is due to ``smart prediction''
+\citep{Rnews:Yee:2008}.
+
+
+
+\subsubsection{Implementation details} 
+\label{sec:jss.xij.implementationDetails} 
+
+The \texttt{xij} argument operates \textit{after} the
+ordinary $\bX_{\sVLM}$ matrix is created. Then selected columns
+of $\bX_{\sVLM}$ are modified from the constraint matrices, \texttt{xij}
+and \texttt{form2} arguments. That is, from \texttt{form2}'s model
+matrix $\bX_{\sformtwo}$, and the $\bH_k$. This whole operation
+is possible because $\bX_{\sVLM}$ remains structurally the same.
+The crucial equation is (\ref{eqn:xij.vector}).
+
+
+Other \texttt{xij} examples are given in the online help of
+\texttt{fill()} and \texttt{vglm.control()},
+as well as at the package's webpage.
+
+
+
+
+
+
+
+
+
+
+
+% ----------------------------------------------------------------------
+\section{Discussion}
+\label{sec:jsscat.discussion}
+
+
+This article has sought to convey how VGLMs/VGAMs are well suited for
+fitting regression models for categorical data. Its primary strength
+is its simple and unified framework, and when reflected in software,
+makes practical CDA more understandable and efficient. Furthermore,
+there are natural extensions such as a reduced-rank variant and
+covariate-specific $\eta_{j}$. The \VGAM{} package potentially offers
+a wide selection of models and utilities.
+
+
+There is much future work to do.
+Some useful additions to the package include:
+\begin{enumerate}
+
+\item
+Bias-reduction \citep{firt:1993} is a method for removing the $O(n^{-1})$
+bias from a maximum likelihood estimate. For a substantial class of
+models including GLMs it can be formulated in terms of a minor adjustment
+of the score vector within an IRLS algorithm \citep{kosm:firt:2009}.
+One by-product, for logistic regression, is that while the maximum
+likelihood estimate (MLE) can be infinite, the adjustment leads to
+estimates that are always finite. At present the \R{} package \pkg{brglm}
+\citep{Kosmidis:2008} implements bias-reduction for a number of models.
+Bias-reduction might be implemented by adding an argument
+\texttt{bred = FALSE}, say, to some existing \VGAM{} family functions.
+
+
+\item
+Nested logit models were developed to overcome a fundamental shortcoming
+related to the multinomial logit model, viz. the independence of
+irrelevant alternatives (IIA) assumption. Roughly, the multinomial logit
+model assumes the ratio of the choice probabilities of two alternatives
+is not dependent on the presence or absence of other alternatives in
+the model. This presents problems that are often illustrated by the
+famed red bus-blue bus problem.
+
+
+
+
+\item
+The generalized estimating equations (GEE) methodology is largely
+amenable to IRLS and this should be added to the package in the future
+\citep{wild:yee:1996}.
+
+
+\item
+For logistic regression \proglang{SAS}'s \code{proc logistic} gives
+a warning if the data is {completely separate} or {quasi-completely
+separate}. Its effects are that some regression coefficients tend to $\pm
+\infty$. With such data, all (to my knowledge) \R{} implementations
+give warnings that are vague, if any at all, and this is rather
+unacceptable \citep{alli:2004}. The \pkg{safeBinaryRegression} package
+\citep{Konis:2009} overloads \code{glm()} so that a check for the
+existence of the MLE is made before fitting a binary response GLM.
+
+
+\end{enumerate}
+
+
+In closing, the \pkg{VGAM} package is continually being developed,
+therefore some future changes in the implementation details and usage
+may occur. These may include non-backward-compatible changes (see the
+\code{NEWS} file.) Further documentation and updates are available at
+the author's homepage whose URL is given in the \code{DESCRIPTION} file.
+
+
+
+% ----------------------------------------------------------------------
+\section*{Acknowledgments}
+
+The author thanks Micah Altman, David Firth and Bill Venables for helpful
+conversations, and Ioannis Kosmidis for a reprint.
+Thanks also to The Institute for Quantitative Social Science at Harvard
+University for their hospitality while this document was written during a
+sabbatical visit.
+
+
+
+
+
+\bibliography{categoricalVGAMbib}
+
+\end{document}
+
+
+
+
diff --git a/vignettes/categoricalVGAMbib.bib b/vignettes/categoricalVGAMbib.bib
new file mode 100644
index 0000000..7367aff
--- /dev/null
+++ b/vignettes/categoricalVGAMbib.bib
@@ -0,0 +1,653 @@
+ at article{yee:wild:1996,
+    Author = {Yee, T. W. and Wild, C. J.},
+    Title = {Vector Generalized Additive Models},
+    Year = 1996,
+   JOURNAL = {Journal of the Royal Statistical Society~B},
+    Volume = 58,
+    Pages = {481--493},
+    Keywords = {Nonparametric regression; Smoothing},
+    Number = 3,
+}
+
+ at article{gree:1984,
+    Author = {Green, P. J.},
+    Title = {Iteratively Reweighted Least Squares for Maximum Likelihood
+            Estimation, and Some Robust and Resistant Alternatives},
+    Year = 1984,
+   JOURNAL = {Journal of the Royal Statistical Society~B},
+    Volume = 46,
+    Pages = {149--192},
+    Keywords = {Scoring; Generalized linear model; Regression; Residual},
+    Number = 2,
+}
+
+ at book{hast:tibs:1990,
+    Author = {Hastie, T. J. and Tibshirani, R. J.},
+    Title = {Generalized Additive Models},
+    Year = 1990,
+    Publisher = {Chapman \& Hall},
+    Address = {London},
+    Pages = {335},
+    Keywords = {Regression; Nonparametric; Generalized linear model}
+}
+
+ at Manual{gam:pack:2009,
+    title = {\pkg{gam}: Generalized Additive Models},
+    author = {Trevor Hastie},
+    year = {2008},
+    note = {\proglang{R}~package version~1.01},
+    url = {http://CRAN.R-project.org/package=gam}
+}
+
+ at article{ande:1984,
+    Author = {Anderson, J. A.},
+    Title = {Regression and Ordered Categorical Variables},
+    Year = 1984,
+   JOURNAL = {Journal of the Royal Statistical Society~B},
+    Volume = 46,
+    Pages = {1--30},
+    Keywords = {Assessed variable; Logistic regression; Stereotype
+               regression; Maximum likelihood},
+    Number = 1,
+}
+
+ at article{firt:1993,
+author = {Firth, D.},
+title = {Bias Reduction of Maximum Likelihood Estimates},
+journal = {Biometrika},
+volume = {80},
+pages = {27--38},
+year = {1993},
+number = {1},
+abstract = {It is shown how, in regular parametric problems, the
+first-order term is removed from the asymptotic bias of maximum likelihood
+estimates by a suitable modification of the score function. In exponential
+families with canonical parameterization the effect is to penalize the
+likelihood by the Jeffreys invariant prior. In binomial logistic models,
+Poisson log linear models and certain other generalized linear models,
+the Jeffreys prior penalty function can be imposed in standard regression
+software using a scheme of iterative adjustments to the data.},
+}
+
+ at InProceedings{alli:2004,
+    Author = {Allison, P.},
+    Title = {Convergence Problems in Logistic Regression},
+    chapter = {10},
+    Year = 2004,
+    Crossref = {altm:gill:mcdo:2004},
+    Pages = {238--252},
+ BookTITLE = {Numerical Issues in Statistical Computing for the Social
+              Scientist},
+ PUBLISHER = {Wiley-Interscience},
+   ADDRESS = {Hoboken, NJ, USA},
+}
+
+ at book {altm:gill:mcdo:2004,
+    AUTHOR = {Altman, Micah and Gill, Jeff and McDonald, Michael P.},
+     TITLE = {Numerical Issues in Statistical Computing for the Social
+              Scientist},
+ PUBLISHER = {Wiley-Interscience},
+   ADDRESS = {Hoboken, NJ, USA},
+      YEAR = {2004},
+     PAGES = {xvi+323},
+   MRCLASS = {62-02 (62-04 62P25 65-02 91-02)},
+  MRNUMBER = {MR2020104},
+}
+
+ at article{yee:2010v,
+    Author = {Yee, T. W.},
+    Title = {{VGLM}s and {VGAM}s:
+             An Overview for Applications in Fisheries Research},
+    Year = 2010,
+    Journal = {Fisheries Research},
+   FJournal = {Fisheries Research},
+    Volume = {101},
+    Pages = {116--126},
+    Number = {1--2},
+}
+
+ at article{imai:king:lau:2008,
+    AUTHOR = {Imai, Kosuke and King, Gary and Lau, Olivia},
+     TITLE = {Toward A Common Framework for Statistical Analysis and
+              Development},
+  JOURNAL = {Journal of Computational and Graphical Statistics},
+  YEAR = 2008,
+  VOLUME = 17,
+  PAGES = {892--913},
+  NUMBER = 4,
+}
+
+ at book{stok:davi:koch:2000,
+    Author = {Stokes, W. and Davis, J. and Koch, W.},
+    Title = {Categorical Data Analysis Using The \proglang{SAS} System},
+    Year = 2000,
+    Edition = {2nd},
+    Publisher = {SAS Institute Inc.},
+    Address = {Cary, NC, USA},
+    PAGES = {648},
+}
+
+ at article{neld:wedd:1972,
+    Author = {Nelder, J. A. and Wedderburn, R. W. M.},
+    Title = {Generalized Linear Models},
+    Year = 1972,
+   JOURNAL = {Journal of the Royal Statistical Society~A},
+    Volume = 135,
+    Pages = {370--384},
+    Keywords = {Probit analysis; Analysis of variance; Contingency table;
+               Exponential family; Quantal response; Weighted least
+               squares},
+    Number = 3,
+}
+
+ at book{agre:2002,
+    Author = {Agresti, Alan},
+    Title = {Categorical Data Analysis},
+    Year = 2002,
+    Publisher = {John Wiley \& Sons},
+    Address = {New York, USA},
+    Edition = {2nd},
+}
+
+
+ at book{agre:2013,
+  Author = {Agresti, Alan},
+  Title = {Categorical Data Analysis},
+  Year = 2013,
+  Publisher = {Wiley},
+  Address = {Hoboken, NJ, USA},
+  Edition = {Third},
+}
+
+
+
+ at book{agre:2010,
+      Author = {Agresti, Alan},
+      Title = {Analysis of Ordinal Categorical Data},
+      Year = 2010,
+      Publisher = {Wiley},
+      Edition = {Second},
+      Address = {Hoboken, NJ, USA},
+      Pages = {396},
+}
+
+
+
+ at book{tutz:2012,
+    AUTHOR = {Tutz, G.},
+     TITLE = {Regression for Categorical Data},
+      YEAR = {2012},
+ PUBLISHER = {Cambridge University Press},
+   ADDRESS = {Cambridge},
+}
+
+
+ at book{fahr:tutz:2001,
+    Author = {Fahrmeir, L. and Tutz, G.},
+    Title = {Multivariate Statistical Modelling Based on Generalized Linear
+            Models},
+    Year = 2001,
+    Edition = {2nd},
+    Publisher = {Springer-Verlag},
+    ADDRESS = {New York, USA},
+}
+
+ at book{leon:2000,
+    Author = {Leonard, Thomas},
+    Title = {A Course in Categorical Data Analysis},
+    Year = 2000,
+    Publisher = {Chapman \& Hall/CRC},
+    Address = {Boca Raton, FL, USA},
+}
+
+ at book{lloy:1999,
+    Author = {Lloyd, C. J.},
+    Title = {Statistical Analysis of Categorical Data},
+    Year = 1999,
+    Publisher = {John Wiley \& Sons},
+    Address = {New York, USA}
+}
+
+ at book{long:1997,
+    Author = {Long, J. S.},
+    Title = {Regression Models for Categorical and Limited Dependent Variables},
+    Year = 1997,
+    Publisher = {Sage Publications},
+    ADDRESS = {Thousand Oaks, CA, USA},
+}
+
+ at book{mccu:neld:1989,
+    Author = {McCullagh, P. and Nelder, J. A.},
+    Title = {Generalized Linear Models},
+    Year = 1989,
+    Edition = {2nd},
+    Publisher = {Chapman \& Hall},
+    Address = {London},
+    Pages = {500}
+}
+
+ at book{simo:2003,
+    Author = {Simonoff, J. S.},
+    Title = {Analyzing Categorical Data},
+    Year = 2003,
+    Pages = {496},
+    Publisher = {Springer-Verlag},
+    Address = {New York, USA}
+}
+
+ at article{liu:agre:2005,
+    Author = {Liu, I. and Agresti, A.},
+    Title = {The Analysis of Ordered Categorical Data:
+             An Overview and a Survey of Recent Developments},
+    Year = 2005,
+    Journal = {Sociedad Estad{\'i}stica e Investigaci{\'o}n Operativa Test},
+    Volume = 14,
+    Pages = {1--73},
+    Number = 1,
+}
+
+ at MANUAL{thom:2009,
+    TITLE = {\proglang{R} (and \proglang{S-PLUS}) Manual to Accompany
+             Agresti's \textit{Categorical Data Analysis}~(2002),
+             2nd edition},
+    AUTHOR = {Thompson, L. A.},
+    YEAR = {2009},
+    URL = {https://home.comcast.net/~lthompson221/Splusdiscrete2.pdf},
+}
+
+ at article{yee:2008c,
+    Author = {Yee, T. W.},
+    Title = {The \pkg{VGAM} Package},
+    Year = 2008,
+    Journal = {\proglang{R} {N}ews},
+    Volume = 8,
+    Pages = {28--39},
+    Number = 2,
+}
+
+ at article{Rnews:Yee:2008,
+  author = {Thomas W. Yee},
+  title = {The \pkg{VGAM} Package},
+  journal = {\proglang{R}~News},
+  year = 2008,
+  volume = 8,
+  pages = {28--39},
+  month = {October},
+  url = {http://CRAN.R-project.org/doc/Rnews/},
+  number = 2,
+}
+
+ at article{yee:hast:2003,
+    AUTHOR = {Yee, T. W. and Hastie, T. J.},
+     TITLE = {Reduced-rank Vector Generalized Linear Models},
+   JOURNAL = {Statistical Modelling},
+    Volume = 3,
+     Pages = {15--41},
+      YEAR = {2003},
+     Number = 1,
+}
+
+article{yee:wild:1996,
+    Author = {Yee, T. W. and Wild, C. J.},
+    Title = {Vector Generalized Additive Models},
+    Year = 1996,
+   JOURNAL = {Journal of the Royal Statistical Society~B},
+    Volume = 58,
+    Pages = {481--493},
+    Keywords = {Nonparametric regression; Smoothing},
+    Number = 3,
+}
+
+ at article{good:1981,
+    Author = {Goodman, L. A.},
+    Title = {Association Models and Canonical Correlation in the Analysis
+             of Cross-classifications Having Ordered Categories},
+    Year = 1981,
+    Journal = {Journal of the American Statistical Association},
+    Volume = 76,
+    Pages = {320--334},
+    Number = 374,
+}
+
+ at article{buja:hast:tibs:1989,
+    Author = {Buja, Andreas and Hastie, Trevor and Tibshirani, Robert},
+    Title = {Linear Smoothers and Additive Models},
+    Year = 1989,
+   JOURNAL = {The Annals of Statistics},
+    Volume = 17,
+    Pages = {453--510},
+    Keywords = {Nonparametric; Regression; Kernel estimator},
+    Number = 2,
+}
+
+ at article{yee:step:2007,
+    AUTHOR = {Yee, Thomas W. and Stephenson, Alec G.},
+     TITLE = {Vector Generalized Linear and Additive Extreme Value Models},
+   JOURNAL = {Extremes},
+  FJOURNAL = {Extremes. Statistical Theory and Applications in Science,
+              Engineering and Economics},
+    VOLUME = {10},
+      YEAR = {2007},
+     PAGES = {1--19},
+   MRCLASS = {Database Expansion Item},
+  MRNUMBER = {MR2407639},
+    NUMBER = {1--2},
+}
+
+ at article{wand:orme:2008,
+    Author = {Wand, M. P. and Ormerod, J. T.},
+    Title = {On Semiparametric Regression with {O}'{S}ullivan Penalized Splines},
+    Year = 2008,
+    Journal = {The Australian and New Zealand Journal of Statistics},
+    Volume = 50,
+    Issue = 2,
+    Pages = {179--198},
+    Number = 2,
+}
+
+ at book{cham:hast:1993,
+    Editor = {Chambers, John M. and Hastie, Trevor J.},
+    Title = {Statistical Models in \proglang{S}},
+    Publisher = {Chapman \& Hall},
+    Year = 1993,
+    Pages = {608},
+    Address = {New York, USA},
+    Keywords = {Computing},
+}
+
+ at Article{pete:harr:1990,
+    Author = {Peterson, B. and Harrell, Frank E.},
+    Title = {Partial Proportional Odds Models for Ordinal Response Variables},
+    Year = 1990,
+    Journal = {Applied Statistics},
+    Volume = 39,
+    Pages = {205--217},
+    Number = 2,
+}
+
+ at article{pete:1990,
+    Author = {Peterson, B.},
+    Title = {Letter to the Editor: Ordinal Regression Models for
+             Epidemiologic Data},
+    Year = 1990,
+    Journal = {American Journal of Epidemiology},
+    Volume = 131,
+    Pages = {745--746}
+}
+
+ at article{hast:tibs:buja:1994,
+    AUTHOR = {Hastie, Trevor and Tibshirani, Robert and Buja, Andreas},
+     TITLE = {Flexible Discriminant Analysis by Optimal Scoring},
+   JOURNAL = {Journal of the American Statistical Association},
+    VOLUME = {89},
+      YEAR = {1994},
+     PAGES = {1255--1270},
+     CODEN = {JSTNAL},
+   MRCLASS = {62H30},
+  MRNUMBER = {95h:62099},
+    NUMBER = {428},
+}
+
+ at article{firth:2005,
+    Author = {Firth, David},
+    Title = {{B}radley-{T}erry Models in \proglang{R}},
+    Year = 2005,
+    Journal = {Journal of Statistical Software},
+    Volume = 12,
+    Number = 1,
+    Pages = {1--12},
+    URL = "http://www.jstatsoft.org/v12/i01/",
+}
+
+ at book{weir:1996,
+    Author = {Weir, Bruce S.},
+    Title = {Genetic Data Analysis II: Methods for Discrete Population
+             Genetic Data},
+    Year = 1996,
+    Publisher = {Sinauer Associates, Inc.},
+    Address = {Sunderland, MA, USA}
+}
+
+ at book{lang:2002,
+    Author = {Lange, Kenneth},
+    Title = {Mathematical and Statistical Methods for Genetic Analysis},
+    Year = 2002,
+    Edition = {2nd},
+    Publisher = {Springer-Verlag},
+    Address = {New York, USA},
+}
+
+ at article{macm:etal:1995,
+    Author = {MacMahon, S. and Norton, R. and Jackson, R. and Mackie, M. J. and
+              Cheng, A. and
+              Vander Hoorn, S. and Milne, A. and McCulloch, A.},
+    Title = {Fletcher {C}hallenge-{U}niversity of {A}uckland {H}eart \&
+             {H}ealth {S}tudy: Design and Baseline Findings},
+    Year = 1995,
+    Journal = {New Zealand Medical Journal},
+    Volume = 108,
+    Pages = {499--502},
+}
+
+ at article{altm:jack:2010,
+  author =      {Altman, M. and Jackman, S.},
+  title =       "Nineteen Ways of Looking at Statistical Software",
+  journal =     "Journal of Statistical Software",
+  year =        "2010",
+  note =        "Forthcoming"
+}
+
+ at article{fox:hong:2009,
+  author =    "John Fox and Jangman Hong",
+  title =    {Effect Displays in \proglang{R} for Multinomial and
+                 Proportional-Odds Logit Models:
+                 Extensions to the \pkg{effects} Package},
+  journal =    "Journal of Statistical Software",
+  volume =    "32",
+  number =    "1",
+  pages =    "1--24",
+  year =     "2009",
+  URL =      "http://www.jstatsoft.org/v32/i01/",
+}
+
+ at article{wild:yee:1996,
+    Author = {Wild, C. J. and Yee, T. W.},
+    Title = {Additive Extensions to Generalized Estimating Equation
+            Methods},
+    Year = 1996,
+   JOURNAL = {Journal of the Royal Statistical Society~B},
+    Volume = 58,
+    Pages = {711--725},
+    Keywords = {Longitudinal data; Nonparametric; Regression; Smoothing},
+    NUMBER = {4},
+}
+
+ at Article{Yee:2010,
+   author        = {Thomas W. Yee},
+   title         = {The \pkg{VGAM} Package for Categorical Data Analysis},
+   journal       = {Journal of Statistical Software},
+   year          = {2010},
+   volume        = {32},
+   number        = {10},
+   pages	 = {1--34},
+   url           = {http://www.jstatsoft.org/v32/i10/}
+}
+
+ at Manual{R,
+   title         = {\proglang{R}: {A} Language and Environment
+                    for Statistical Computing},
+   author        = {{\proglang{R} Development Core Team}},
+   organization  = {\proglang{R} Foundation for Statistical Computing},
+   address       = {Vienna, Austria},
+   year          = {2009},
+   note          = {{ISBN} 3-900051-07-0},
+   url           = {http://www.R-project.org/}
+}
+
+ at Book{Venables+Ripley:2002,
+   author        = {William N. Venables and Brian D. Ripley},
+   title         = {Modern Applied Statistics with \proglang{S}},
+   edition       = {4th},
+   year          = {2002},
+   pages         = {495},
+   publisher     = {Springer-Verlag},
+   address       = {New York},
+    url = {http://www.stats.ox.ac.uk/pub/MASS4/},
+}
+
+ at Manual{SAS,
+   author        = {{\proglang{SAS} Institute Inc.}},
+   title         = {The \proglang{SAS} System, Version 9.1},
+   year          = {2003},
+   address       = {Cary, NC},
+   url           = {http://www.sas.com/}
+}
+
+ at Manual{yee:VGAM:2010,
+   title        = {\pkg{VGAM}: Vector Generalized Linear and Additive Models},
+   author       = {Yee, T. W.},
+   year         = {2010},
+   note         = {\proglang{R}~package version~0.7-10},
+   url          = {http://CRAN.R-project.org/package=VGAM}
+}
+
+ at Manual{Harrell:2009,
+   title        = {\pkg{rms}: Regression Modeling Strategies},
+   author       = {Frank E. {Harrell, Jr.}},
+   year         = {2009},
+   note         = {\proglang{R}~package version~2.1-0},
+   url          = {http://CRAN.R-project.org/package=rms}
+}
+
+ at Manual{Meyer+Zeileis+Hornik:2009,
+   title        = {\pkg{vcd}: Visualizing Categorical Data},
+   author       = {David Meyer and Achim Zeileis and Kurt Hornik},
+   year         = {2009},
+   note         = {\proglang{R}~package version~1.2-7},
+   url          = {http://CRAN.R-project.org/package=vcd}
+}
+
+ at Article{Meyer+Zeileis+Hornik:2006,
+   author       = {David Meyer and Achim Zeileis and Kurt Hornik},
+   title        = {The Strucplot Framework: Visualizing Multi-Way
+                   Contingency Tables with \pkg{vcd}},
+   journal      = {Journal of Statistical Software},
+   year         = {2006},
+   volume       = {17},
+   number       = {3},
+   pages        = {1--48},
+   url          = {http://www.jstatsoft.org/v17/i03/}
+}
+
+ at Manual{Turner+Firth:2009,
+   title        = {Generalized Nonlinear Models in \proglang{R}:
+                   An Overview of the \pkg{gnm} Package},
+   author       = {Heather Turner and David Firth},
+   year         = {2009},
+   note         = {\proglang{R}~package version~0.10-0},
+   url          = {http://CRAN.R-project.org/package=gnm},
+}
+
+ at Article{Rnews:Turner+Firth:2007,
+  author       = {Heather Turner and David Firth},
+  title        = {\pkg{gnm}: A Package for Generalized Nonlinear Models},
+  journal      = {\proglang{R}~News},
+  year         = 2007,
+  volume       = 7,
+  number       = 2,
+  pages        = {8--12},
+  month        = {October},
+  url = {http://CRAN.R-project.org/doc/Rnews/},
+}
+
+
+ at Manual{ElemStatLearn:2009,
+    title = {\pkg{ElemStatLearn}: Data Sets, Functions and
+             Examples from the Book `The Elements
+             of Statistical Learning, Data Mining, Inference, and
+             Prediction' by Trevor Hastie, Robert Tibshirani and Jerome
+             Friedman},
+    author = {Kjetil Halvorsen},
+    year = {2009},
+    note = {\proglang{R}~package version~0.1-7},
+    url = {http://CRAN.R-project.org/package=ElemStatLearn},
+  }
+
+ at Manual{Zelig:2009,
+    title = {\pkg{Zelig}: Everyone's Statistical Software},
+    author = {Kosuke Imai and Gary King and Olivia Lau},
+    year = {2009},
+    note = {\proglang{R}~package version~3.4-5},
+    url = {http://CRAN.R-project.org/package=Zelig},
+}
+
+ at article{kosm:firt:2009,
+    author = {Kosmidis, I. and Firth, D.},
+     title = {Bias Reduction in Exponential Family Nonlinear Models},
+      year = {2009},
+   JOURNAL = {Biometrika},
+  FJOURNAL = {Biometrika},
+    volume = {96},
+     PAGES = {793--804},
+    NUMBER = {4},
+}
+
+ at techreport{kosm:firt:2008,
+    author = {Kosmidis, I. and Firth, D.},
+    title = {Bias Reduction in Exponential Family Nonlinear Models},
+    Journal = {CRiSM Paper No.~08-05v2},
+    year = {2008},
+    URL =  "http://www.warwick.ac.uk/go/crism",
+    Institution = {Department of Statistics, Warwick University},
+}
+
+ at Manual{Kosmidis:2008,
+   title        = {\pkg{brglm}: Bias Reduction in Binary-Response {GLMs}},
+   author       = {Ioannis Kosmidis},
+   year         = {2008},
+   note         = {\proglang{R}~package version~0.5-4},
+   url          = {http://CRAN.R-project.org/package=brglm},
+}
+
+ at Manual{Hatzinger:2009,
+    title = {\pkg{prefmod}: Utilities to Fit Paired Comparison
+             Models for Preferences},
+    author = {Reinhold Hatzinger},
+    year = {2009},
+    note = {\proglang{R}~package version~0.8-16},
+    url = {http://CRAN.R-project.org/package=prefmod},
+}
+
+ at Manual{firth:2008,
+    title = {\pkg{BradleyTerry}: Bradley-Terry Models},
+    author = {David Firth},
+    year = {2008},
+    note = {\proglang{R}~package version~0.8-7},
+    url = {http://CRAN.R-project.org/package=BradleyTerry},
+  }
+
+ at Manual{gnlm:2007,
+    title = {\pkg{gnlm}: Generalized Nonlinear Regression Models},
+    author = {Jim Lindsey},
+    year = {2007},
+    note = {\proglang{R}~package version~1.0},
+    url = {http://popgen.unimaas.nl/~jlindsey/rcode.html},
+}
+
+ at Manual{Konis:2009,
+    title = {\pkg{safeBinaryRegression}: Safe Binary Regression},
+    author = {Kjell Konis},
+    year = {2009},
+    note = {\proglang{R}~package version~0.1-2},
+    url = {http://CRAN.R-project.org/package=safeBinaryRegression},
+}
+
+ at book{smit:merk:2013,
+     TITLE = {Generalized Linear Models for Categorical and
+              Continuous Limited Dependent Variables},
+    AUTHOR = {Smithson, M. and Merkle, E. C.},
+      YEAR = {2013},
+ Publisher = {Chapman \& Hall/CRC},
+   Address = {London},
+}
+

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git



More information about the debian-science-commits mailing list