[r-cran-vgam] 47/63: Import Upstream version 1.0-0

Andreas Tille tille at debian.org
Tue Jan 24 13:54:39 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-vgam.

commit 0638d282b4690a6fa560cc379021a024d8dc0738
Author: Andreas Tille <tille at debian.org>
Date:   Tue Jan 24 14:17:04 2017 +0100

    Import Upstream version 1.0-0
---
 ChangeLog                    |  17 +
 DESCRIPTION                  |  20 +-
 LICENCE.note                 |  73 ++++
 MD5                          | 263 +++++++-------
 NAMESPACE                    |  68 +++-
 NEWS                         |  63 ++++
 R/Links.R                    |  14 +-
 R/build.terms.vlm.q          |  36 +-
 R/calibrate.q                |  15 +-
 R/coef.vlm.q                 |   9 +
 R/confint.vlm.R              |  85 +++++
 R/family.aunivariate.R       |  20 +-
 R/family.basics.R            |  30 +-
 R/family.binomial.R          | 147 +++++---
 R/family.bivariate.R         |  66 ++--
 R/family.categorical.R       |  37 +-
 R/family.censored.R          | 268 ++++++++++++++-
 R/family.circular.R          |   4 +-
 R/family.exp.R               |   2 +-
 R/family.extremes.R          |  14 +-
 R/family.genetic.R           |  20 +-
 R/family.glmgam.R            |  70 ++--
 R/family.loglin.R            |   4 +-
 R/family.mixture.R           |   8 +-
 R/family.nonlinear.R         |   4 +-
 R/family.normal.R            | 752 ++++++++++++++++++++++++++--------------
 R/family.others.R            |  12 +-
 R/family.positive.R          |  31 +-
 R/family.qreg.R              | 144 +++++---
 R/family.rcim.R              |  25 +-
 R/family.robust.R            |   2 +-
 R/family.rrr.R               | 321 ++++++++---------
 R/family.survival.R          |   4 +-
 R/family.ts.R                | 130 ++++---
 R/family.univariate.R        | 199 +++++++----
 R/family.zeroinf.R           |  31 +-
 R/formula.vlm.q              |  99 +++++-
 R/links.q                    | 797 ++++++++++++++++++++++++-------------------
 R/model.matrix.vglm.q        |  38 ++-
 R/qtplot.q                   |  20 +-
 R/summary.vglm.q             |  39 ++-
 R/vglm.R                     |   6 +-
 R/vglm.fit.q                 |   2 +-
 R/vlm.wfit.q                 |   2 +-
 build/vignette.rds           | Bin 479 -> 480 bytes
 data/Huggins89.t1.rda        | Bin 442 -> 443 bytes
 data/Huggins89table1.rda     | Bin 444 -> 445 bytes
 data/alclevels.rda           | Bin 549 -> 550 bytes
 data/alcoff.rda              | Bin 546 -> 547 bytes
 data/auuc.rda                | Bin 245 -> 246 bytes
 data/backPain.rda            | Bin 484 -> 488 bytes
 data/beggs.rda               | Bin 196 -> 198 bytes
 data/car.all.rda             | Bin 6969 -> 6965 bytes
 data/cfibrosis.rda           | Bin 264 -> 264 bytes
 data/corbet.rda              | Bin 237 -> 240 bytes
 data/crashbc.rda             | Bin 373 -> 374 bytes
 data/crashf.rda              | Bin 340 -> 340 bytes
 data/crashi.rda              | Bin 490 -> 491 bytes
 data/crashmc.rda             | Bin 345 -> 385 bytes
 data/crashp.rda              | Bin 375 -> 376 bytes
 data/crashtr.rda             | Bin 360 -> 361 bytes
 data/deermice.rda            | Bin 394 -> 392 bytes
 data/ducklings.rda           | Bin 560 -> 561 bytes
 data/finney44.rda            | Bin 209 -> 210 bytes
 data/flourbeetle.rda         | Bin 343 -> 344 bytes
 data/hspider.rda             | Bin 1343 -> 1344 bytes
 data/lakeO.rda               | Bin 334 -> 335 bytes
 data/leukemia.rda            | Bin 328 -> 329 bytes
 data/marital.nz.rda          | Bin 10440 -> 10456 bytes
 data/melbmaxtemp.rda         | Bin 4262 -> 4265 bytes
 data/pneumo.rda              | Bin 266 -> 267 bytes
 data/prinia.rda              | Bin 1228 -> 1229 bytes
 data/ruge.rda                | Bin 254 -> 258 bytes
 data/toxop.rda               | Bin 473 -> 473 bytes
 data/venice.rda              | Bin 982 -> 976 bytes
 data/venice90.rda            | Bin 8004 -> 8072 bytes
 data/wine.rda                | Bin 270 -> 269 bytes
 inst/CITATION                |  61 +++-
 inst/doc/categoricalVGAM.pdf | Bin 735468 -> 735199 bytes
 inst/doc/crVGAM.R            |   4 +-
 inst/doc/crVGAM.Rnw          |  46 ++-
 inst/doc/crVGAM.pdf          | Bin 496617 -> 511655 bytes
 man/AA.Aa.aa.Rd              |   1 +
 man/AR1.Rd                   |  11 +-
 man/CommonVGAMffArguments.Rd |  29 +-
 man/Links.Rd                 | 127 +++++--
 man/SURff.Rd                 |   2 +-
 man/VGAM-package.Rd          |   9 +-
 man/acat.Rd                  |   4 +-
 man/alaplace3.Rd             |   7 +-
 man/betabinomUC.Rd           |  20 +-
 man/binomialff.Rd            |  10 +-
 man/binormal.Rd              |  10 +-
 man/brat.Rd                  |   4 +-
 man/bratUC.Rd                |   4 +-
 man/cauchit.Rd               |   2 +-
 man/cens.poisson.Rd          |  32 +-
 man/cloglog.Rd               |   5 +-
 man/confintvglm.Rd           | 106 ++++++
 man/cratio.Rd                |   4 +-
 man/cumulative.Rd            |   6 +-
 man/explink.Rd               |   2 +-
 man/fisherz.Rd               |   2 +-
 man/foldsqrt.Rd              |   2 +-
 man/formulavlm.Rd            |  88 +++++
 man/gengamma.Rd              |  10 +-
 man/golf.Rd                  |  24 +-
 man/has.intercept.Rd         |  86 +++++
 man/identitylink.Rd          |   2 +-
 man/logc.Rd                  |   2 +-
 man/loge.Rd                  |   2 +-
 man/logit.Rd                 |  18 +-
 man/lognormal.Rd             |   6 +
 man/multilogit.Rd            |   2 +-
 man/multinomial.Rd           |   4 +-
 man/nbcanlink.Rd             |  19 +-
 man/nbolf.Rd                 |  16 +-
 man/negbinomial.Rd           |  16 +-
 man/notdocumentedyet.Rd      |  34 +-
 man/polf.Rd                  |  17 +-
 man/posnormal.Rd             |  69 +++-
 man/probit.Rd                |   2 +-
 man/propodds.Rd              |   2 +-
 man/rayleigh.Rd              |   3 +-
 man/rhobit.Rd                |   7 +-
 man/sratio.Rd                |   6 +-
 man/summaryvglm.Rd           | 139 ++++++++
 man/tobit.Rd                 | 102 +++---
 man/triangle.Rd              |   9 +-
 man/undocumented-methods.Rd  |  12 +
 man/vcovvlm.Rd               | 104 ++++++
 man/vglm.Rd                  |  17 +-
 man/weibull.mean.Rd          | 115 +++++++
 man/weibullR.Rd              |   1 +
 man/wine.Rd                  |   6 +-
 vignettes/crVGAM.Rnw         |  46 ++-
 vignettes/crVGAM.bib         |  20 ++
 137 files changed, 3947 insertions(+), 1580 deletions(-)

diff --git a/ChangeLog b/ChangeLog
new file mode 100755
index 0000000..73b0616
--- /dev/null
+++ b/ChangeLog
@@ -0,0 +1,17 @@
+2015-10-26  Thomas Yee <t.yee at auckland dot ac dot nz>
+
+	* R/links.q (all link functions): big changes, when
+	  deriv >= 1 wrt 'inverse' argument. For example,
+	  logit(p, deriv = 1, inverse = TRUE) is now
+	  logit(p, deriv = 1, inverse = FALSE).
+	  Models fitted under <= VGAM 0.9-9 and saved might not work
+	  under >= VGAM 1.0-0.
+
+
+2015-10-26  Thomas Yee <t.yee at auckland dot ac dot nz>
+
+	* R/family.normal.R (tobit): tobit()@weight implements
+	  Fisher scoring entirely.
+
+
+	  
diff --git a/DESCRIPTION b/DESCRIPTION
index 51ba3d1..e1df98d 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
 Package: VGAM
-Version: 0.9-8
-Date: 2015-05-11
+Version: 1.0-0
+Date: 2015-10-29
 Title: Vector Generalized Linear and Additive Models
 Author: Thomas W. Yee <t.yee at auckland.ac.nz>
 Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
@@ -9,7 +9,10 @@ Suggests: VGAMdata, MASS
 Description: An implementation of about 6 major classes of
     statistical regression models. At the heart of it are the
     vector generalized linear and additive model (VGLM/VGAM)
-    classes. Currently only fixed-effects models are implemented,
+    classes, and the book "Vector Generalized Linear and
+    Additive Models: With an Implementation in R" (Yee, 2015)
+    gives details of the statistical framework and VGAM package.
+    Currently only fixed-effects models are implemented,
     i.e., no random-effects models. Many (150+) models and
     distributions are estimated by maximum likelihood estimation
     (MLE) or penalized MLE, using Fisher scoring. VGLMs can be
@@ -19,15 +22,14 @@ Description: An implementation of about 6 major classes of
     RCIMs (row-column interaction models)---these classes perform
     constrained and unconstrained quadratic ordination (CQO/UQO)
     models in ecology, as well as constrained additive ordination
-    (CAO). Note that these functions are subject to change,
-    especially before version 1.0.0 is released; see the NEWS
-    file for latest changes.
-License: GPL-2
+    (CAO). Note that these functions are subject to change;
+    see the NEWS and ChangeLog files for latest changes.
+License: GPL-2 | GPL-3
 URL: https://www.stat.auckland.ac.nz/~yee/VGAM
 NeedsCompilation: yes
 BuildVignettes: yes
 LazyLoad: yes
 LazyData: yes
-Packaged: 2015-05-11 03:49:53 UTC; tyee001
+Packaged: 2015-10-29 02:09:52 UTC; tyee001
 Repository: CRAN
-Date/Publication: 2015-05-11 11:14:54
+Date/Publication: 2015-10-29 08:29:12
diff --git a/LICENCE.note b/LICENCE.note
new file mode 100755
index 0000000..8f4badf
--- /dev/null
+++ b/LICENCE.note
@@ -0,0 +1,73 @@
+Software and datasets to support 'Vector Generalized Linear and
+Additive Models: With an Implementation in R', 
+first edition, by T. W. Yee.
+Springer, 2015.
+
+
+This file is intended to clarify ownership and copyright: where
+possible individual files also carry brief copyright notices.
+This file was adapted from the file of the same name from the
+MASS bundle.
+
+
+Copyrights
+==========
+
+Some slightly-modified FORTRAN subroutines from
+http://pages.cs.wisc.edu/~deboor/pgs/
+are used for the B-spline computations.
+
+
+Some modified LINPACK subroutines appear in the files
+./src/vlinpack?.f
+
+
+Portions of the smoothing code called by vsmooth.spline() is
+based on an adaptation of F. O'Sullivan's BART code.
+
+
+Regarding file ./src/lerchphi.c, this program is copyright by
+
+Sergej V. Aksenov (http://www.geocities.com/saksenov) and 
+Ulrich D. Jentschura (jentschura at physik.tu-dresden.de), 2002.
+Version 1.00 (May 1, 2002)
+
+
+R function pgamma.deriv() operates by a wrapper function to a
+Fortran subroutine written by R. J. Moore. The subroutine was
+modified to run using double precision. The original code came
+from http://lib.stat.cmu.edu/apstat/187.
+
+
+R functions expint(x), expexpint(x), expint.E1(x)
+operate by wrapper functions to code downloaded from
+http://www.netlib.org/specfun/ei
+
+
+My understanding is that the dataset files VGAM/data/* and
+VGAMdata/data/* are not copyright.
+
+
+All other files are copyright (C) 1998-2015 T. W. Yee.
+
+
+Licence
+=======
+
+    This is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 or 3 of the License
+    (at your option).
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+Files share/licenses/GPL-2 and share/licenses/GPL-3 in the R
+(source or binary) distribution are copies of versions 2 and 3
+of the 'GNU General Public License'.
+These can also be viewed at https://www.r-project.org/Licenses/
+
+t.yee at auckland.ac.nz
+
diff --git a/MD5 b/MD5
index 6d4fd77..09c8638 100644
--- a/MD5
+++ b/MD5
@@ -1,59 +1,62 @@
 66414b6ed296192426033f4ac29a6af2 *BUGS
-6506758e0e7e02c861ae3b7ca8e214b7 *DESCRIPTION
-febc599169d12ea006018ae570d5918b *NAMESPACE
-a7c3248f29ee5bcb29fd6559ba66325c *NEWS
-46f97e789f77839767fc3d3511d2f990 *R/Links.R
+7ee5b2dc375f5ec613dffed100ca7b3d *ChangeLog
+ca856d175101c0ee856cd1e2b7dff965 *DESCRIPTION
+e640665d8993539374917f850992ddc7 *LICENCE.note
+60d8c06d89c4c07cb251d84568bc8706 *NAMESPACE
+0b078da380c69041235063cf7c3fe68a *NEWS
+31e60bca4249bc261445355bd6496609 *R/Links.R
 b6b017bdea768a643afc8171516d193b *R/aamethods.q
 4ffc1530ca8113d2f2d8b0d5cc1db282 *R/add1.vglm.q
 29b192ec0239f8f013e99ef759823732 *R/attrassign.R
 19fd9a65f33bfc01a56d0ee1f4752159 *R/bAIC.q
-b35d7499e726674163cd7eeeefbe39b0 *R/build.terms.vlm.q
-6ba194b23f60728f70575d799af9c3c1 *R/calibrate.q
+f96b47c7279f6b68a3946245deff4429 *R/build.terms.vlm.q
+560f0250d8606fc4c7bbcba5474ef9ff *R/calibrate.q
 8fa625cc47ab28b74bd41019d20b7b02 *R/cao.R
 ce3d85bf00ad08175321e2098ae87462 *R/cao.fit.q
-5a26e4f96581a1ce75aba0587b261573 *R/coef.vlm.q
+4ded73a0a27a728457ca3ecfa02bb9ed *R/coef.vlm.q
+523b3faf78519c00346b1843bd5db02d *R/confint.vlm.R
 77638f2e22a3dd774115c472bf0c33e8 *R/cqo.R
 9a4e3479392194fbe0c6e55cacb03f62 *R/cqo.fit.q
 d411a1bf3bfbe7057b4211255c33ba53 *R/deviance.vlm.q
 54b928344dc9efab031bf3e83d04f21f *R/effects.vglm.q
 73607c9675d480649795248cf79816db *R/family.actuary.R
-0a2e88e04cab2be01307a0a27ed659f7 *R/family.aunivariate.R
-267ab38b53898ea5bd37c5a8e090d3e2 *R/family.basics.R
-7006c9efcd3263c79ca7c13366cfa238 *R/family.binomial.R
-be15ddf47d35b2ce3de5d69b9acbfbe9 *R/family.bivariate.R
-9e76ec4bc461e0c06e6069054a060f54 *R/family.categorical.R
-6cf1f264328d97878e0d069ea7fa48b5 *R/family.censored.R
-0c13f3a38a685b978dde42ace40e55e8 *R/family.circular.R
-b9fedec9d690c434a69d89c9e6845bb7 *R/family.exp.R
-54d3654ea2ec5f5b11428c672ad11c03 *R/family.extremes.R
+622eef73eae77e8f11a3be61c1d177de *R/family.aunivariate.R
+a92c19967dad3ac7f28a999be60bdd35 *R/family.basics.R
+d9b278484e9eeb0977f4cf37449f6d81 *R/family.binomial.R
+7b722a4d252e8889459cd4dccc734ee6 *R/family.bivariate.R
+fb37a29e583745096fdd1ca4c6b20e87 *R/family.categorical.R
+4d9023a91086b21b57ba417816b791ab *R/family.censored.R
+290eb0cf20c680e3822312da99e778c8 *R/family.circular.R
+fde7624d1a27f4c981dbea13dfca9f8d *R/family.exp.R
+b5a955403628ff48d9bd5137a72b5358 *R/family.extremes.R
 251b551aaf906c754d9e75342e7ea1af *R/family.functions.R
-8a51bab8f687ffa92c116663af075ba3 *R/family.genetic.R
-281657d44d72ef287c94653979aa5aa5 *R/family.glmgam.R
-688abce2e4312da5c9e03a6fb2cdf935 *R/family.loglin.R
+5870ba488892a27748d73c96fe09fd9e *R/family.genetic.R
+81bc7044f78ed67dedfe721b45e70c9f *R/family.glmgam.R
+040039ac1ac77acc7355986786188113 *R/family.loglin.R
 5679a8a30b54ac8f66dd945b2d1ccd2a *R/family.math.R
-f3a38cecabef0846e4f0c7bdb5c9ee81 *R/family.mixture.R
-f1a35abe66015033d743f525578179d1 *R/family.nonlinear.R
-1c0d93ee5dc8d6a3aa044c449ae6e33e *R/family.normal.R
-a3ea06d6b5e0c41585333addbe62ffe0 *R/family.others.R
-88039b35018615a28e631b670d2e971b *R/family.positive.R
-9041d6d34c26ffff3f40e816b3263e29 *R/family.qreg.R
-c86c5bac3606eb3ba284b4b6854a7684 *R/family.rcim.R
+40b0c38439d400fa0ec5004104f472b1 *R/family.mixture.R
+8a6c638eb360f7a74881ab5d18721600 *R/family.nonlinear.R
+ae9004a896cfc5a6c0aec0ee9137901e *R/family.normal.R
+8e71759e50f7fdbc320f4b3dfb57b304 *R/family.others.R
+2c3afca36be4104086c9b132e05561b6 *R/family.positive.R
+6c41acf5b9e4e2e43eb7b8520196b8e0 *R/family.qreg.R
+5bdb4590aaaff9bdde698a20fbaaac84 *R/family.rcim.R
 eaf63cac3cffe7fd0bd9352fe8223a60 *R/family.rcqo.R
-ffd541fd4b00179e6595b9e6926f3291 *R/family.robust.R
-d8845f8b8bf363485bcf7f530617358f *R/family.rrr.R
+303fbdf3b0b917cdf71e170b50934d49 *R/family.robust.R
+5b373c7cddc6faad4894c9ff7738c8f2 *R/family.rrr.R
 943ff0caa6e0cf7294b32a0a8dc1ad98 *R/family.sur.R
-ebb9d8fde5a537c58b25f47482cad8d2 *R/family.survival.R
-fe810bd8c50c5b6102e67afc15e13b68 *R/family.ts.R
-fcf0a36d2fe04e88d2dd74de20e7a0bc *R/family.univariate.R
+d8765cca44c6676d5c3761e609fd6476 *R/family.survival.R
+b88f86145cb3ad38d701edf852208a3f *R/family.ts.R
+b6a1108501f71db2e93afa194e8d678b *R/family.univariate.R
 8d7d5df3e005750453f8ed0977c0c4f6 *R/family.vglm.R
-be19cb2f6dba872ae43dd12a5d21d620 *R/family.zeroinf.R
+aa4b052796fac2667f825c9fcdb7b4bc *R/family.zeroinf.R
 e5a738b6ba3f59a3962eb089e56e5786 *R/fittedvlm.R
-d706ca44527adda488800e2834d5d278 *R/formula.vlm.q
+27dae12416e0840c1f75f4f18e0146f0 *R/formula.vlm.q
 1c7d28893d43c88a934731219098fd5c *R/generic.q
-b0401a9b8b838e57e6918f420d2f84be *R/links.q
+542665d45f3a87c4fe8e8c549a39ac11 *R/links.q
 06929b2f0a102fcca301a9f265279e04 *R/logLik.vlm.q
 92736375efccc88013c357fd287aa4cb *R/lrwaldtest.R
-ed5f231217b28cd1448cef2529a7e084 *R/model.matrix.vglm.q
+3c2bc6b07e880eb2f6ae5bfc3ee8f55e *R/model.matrix.vglm.q
 c9f890ae5310b45be85da9fd237b98e4 *R/mux.q
 ec00a9fdace1922ca78877ac43605737 *R/nobs.R
 8c7a83a2e5c10a871e722e3c307ad79b *R/plot.vglm.q
@@ -63,7 +66,7 @@ b9109db7f638db25728c3118e6baf41d *R/predict.vlm.q
 cfb0659e61f097d41e0266ee71d15a9d *R/print.vglm.q
 74f7393a57eec9a96cc7d04a569037ca *R/print.vlm.q
 c431e12369752358904f6704832decd5 *R/qrrvglm.control.q
-18ab7fc886450762ed4e982558776159 *R/qtplot.q
+d767ac65a1275661aa88e8e3cfe214cf *R/qtplot.q
 78e9224292be8718824d53dd2165dad4 *R/residuals.vlm.q
 9d5826ad08d66734f7403d17fcbba5f6 *R/rrvglm.R
 e278dec435eddcc0345a59bd9dd56f6d *R/rrvglm.control.q
@@ -74,70 +77,70 @@ cf62bdb183fe009cd47316eacbe3b14e *R/s.q
 366887aff30fbfac5afb77ed10597005 *R/smart.R
 89968d39bff60306bab87cc1e3ebdca1 *R/step.vglm.q
 ea860d4429fbcfb1c8e494a198e72adc *R/summary.vgam.q
-24bddb3d9264aa2079dc0bc1949dac08 *R/summary.vglm.q
+5e33c1f3af46348ed7ac16fff8cc3307 *R/summary.vglm.q
 8233ae7e692d6254ac739541a4774109 *R/summary.vlm.q
 f53cc75eb61ade505b6ee2d55f0ac377 *R/vgam.R
 3a7ea81a3f0c6509e71466cfae4c108c *R/vgam.control.q
 f6da05ed223f0cac5b7731c8f5da2095 *R/vgam.fit.q
 c7836fc6514f090c9852ef7427b68a95 *R/vgam.match.q
-80b44e86fa2bf5753e95397653aead65 *R/vglm.R
+602d47027ca2488c44bf5aa5299049e1 *R/vglm.R
 714a3a58e7584c7f2545aed04187a167 *R/vglm.control.q
-826df11f31484937475bd4270742900d *R/vglm.fit.q
+08ce604d1ce30bb186f59cc23faab9a2 *R/vglm.fit.q
 d3c11b3c2876d98a37ea6f4a5658a4a6 *R/vlm.R
-50d41f729a51f21ac03d717a33d708fb *R/vlm.wfit.q
+568fedfc13182adbd374ec27d7a75600 *R/vlm.wfit.q
 9c9d0afc47501544ea1da2703e60b4e9 *R/vsmooth.spline.q
-d00804c5f46ec827f32f4eb64029553e *build/vignette.rds
-3870f8993c4bea0c5192b50640e10c84 *data/Huggins89.t1.rda
-a613e4fafb1a688f5aa446073014b01f *data/Huggins89table1.rda
+6163dccd84afd0591286d3d3d44f5393 *build/vignette.rds
+b7d1c6c8f8393b07c7e9b604adc07a98 *data/Huggins89.t1.rda
+c89278fea9afbea65d5a5c67ea8920ca *data/Huggins89table1.rda
 d89f69ab78bc3c7a526960c8bdb9454b *data/V1.txt.gz
-02b718733148b399711af0138fabfb99 *data/alclevels.rda
-c6b2edafb26be2f1f0e55be907faa271 *data/alcoff.rda
-3acc92474abbbfe3dc30752e370913ff *data/auuc.rda
-fb8c3190d2030185534ec3d67d1048da *data/backPain.rda
+44af01b902591edbe25947cfb93b82a2 *data/alclevels.rda
+51600a9d117c2e4e508498c2b8c5b062 *data/alcoff.rda
+64a9bda0da78dc1be3934317c4022344 *data/auuc.rda
+9af66d0bf992be0147dd449c0b60d236 *data/backPain.rda
 4fa3eac69a59ea5ed0123d54528e5595 *data/backPain.txt.gz
-26dafe8e06fbca22ca07861091270805 *data/beggs.rda
+340932cda23a74745e46571a70dc3882 *data/beggs.rda
 e039fd36c33b359830b2ac811ca7fc49 *data/bmi.nz.txt.xz
-ceaf646e909dcc16b9bd89edcb058e8e *data/car.all.rda
-642479ab63be9250e1950a2b7499d876 *data/cfibrosis.rda
+fc97edacd7b4a480edd006de2413bb55 *data/car.all.rda
+4a83e2e836f3a9708daed64dfcbbcd2f *data/cfibrosis.rda
 b29c1a4125f0898885b0a723442d6a92 *data/chest.nz.txt.bz2
 4df5fd8b5db905c4c19071e1e6a698a4 *data/chinese.nz.txt.gz
 3cb8bc8e1fc615416f0c8838a50b3f51 *data/coalminers.txt.gz
-eb5202e2ea25642262ab857b7d514bf8 *data/corbet.rda
-aecd77d50f22e2c4f482b7c22b7edb49 *data/crashbc.rda
-7c3b7f5539427a0da536c31003d06540 *data/crashf.rda
-08808e8f40be4371114793765f431c48 *data/crashi.rda
-7a3e0f5a401ecc681e47b98b921f4cee *data/crashmc.rda
-36b8fbbafcbde4304a392d592bbfa99c *data/crashp.rda
-1e427d92814a65f98fd1d8446f37fc06 *data/crashtr.rda
-24fc8bf721df45e0e2b861c2b2d50696 *data/deermice.rda
-b5042793e4d3cde7b66c78403ef57074 *data/ducklings.rda
+c6ee8d21ed687cad8123dc3162865c9e *data/corbet.rda
+0f1edaf1442a9006da63acfd7fb59a0d *data/crashbc.rda
+f8e248b3a1082019db47ec264c9f7d5c *data/crashf.rda
+34bee33037d6199dfbbfc2d52ba96158 *data/crashi.rda
+f74434eca35fd11b2ecf33b601178f62 *data/crashmc.rda
+e76aab67b2e6c2ec1ed3e39f50b7f474 *data/crashp.rda
+ab1e91319254f296f302090e8d9abda5 *data/crashtr.rda
+a2c09c5f8c31f870be7131ecd160639e *data/deermice.rda
+c9dd72b680bd6d9c7861e3dc5f3bcb83 *data/ducklings.rda
 08e87bb80a2364697b17ccec6260387c *data/enzyme.txt.gz
-87a43a19f87ca93b45a672cd9e549c8c *data/finney44.rda
-35f3600d34f7376e7b0da2c151c997dc *data/flourbeetle.rda
+968c21409ce398b3d22466d70d79fb31 *data/finney44.rda
+9a25176f2b7870d254248d522e548726 *data/flourbeetle.rda
 3125b7b004c671f9d4516999c8473eac *data/gew.txt.gz
 bec512b2d2d680889c9b71c7b97dbffd *data/grain.us.txt.bz2
 9dcb8cdf026f5468fa70f8037fd72a0b *data/hormone.txt.bz2
-e87b6cf3f2bd1413111f075b69651512 *data/hspider.rda
+e9b5697bdd74940ed221d71f70485f1a *data/hspider.rda
 dffe21fbabf645127bccc3f3733098a7 *data/hunua.txt.bz2
-b86e36a995219a44437a844bc5b8a70c *data/lakeO.rda
-41ad3d75f07b78392324f1c66a03fdb2 *data/leukemia.rda
+fea8d38efa5ed3a8141dd1566eb89fc1 *data/lakeO.rda
+3a63fd948d478efcee0a4439ca9571b7 *data/leukemia.rda
 aba4885e0eeda8ee887a422fee01e02a *data/lirat.txt.gz
 7d7e59127af09903659c5727d71acc56 *data/machinists.txt.gz
-40d0a1b7719f9a76b76b332be23ae83a *data/marital.nz.rda
-9b9739d472f4c9e15d2f175ad7908437 *data/melbmaxtemp.rda
+04eefb3f13e372e5b0370b0f04d9ab8b *data/marital.nz.rda
+a2fc80eba077edd682ac20c42d40890d *data/melbmaxtemp.rda
 56490506642d6415ac67d9b6a7f7aff6 *data/olym08.txt.gz
 fe334fe839d5efbe61aa3a757c38faeb *data/olym12.txt.gz
 3ed63397c4a34f3233326ade6cfd1279 *data/oxtemp.txt.gz
-ac721c748208e66ad2068d61c3528179 *data/pneumo.rda
+93121d35f3ce58883f92d5d76f697083 *data/pneumo.rda
 0cd66b7ce4e596ad3ca75e1e2ec0a73c *data/prats.txt.gz
-a20303b93a38645c9310e6ce51d3db95 *data/prinia.rda
-dae1c1a61019b400f1ee8f21796fdda4 *data/ruge.rda
-bc3d45e95e47f05c97d2b7e542ba5719 *data/toxop.rda
+9f2a88b4c56838b56329eb2c11d310be *data/prinia.rda
+b08aebe141c9d5fa30c8864930836015 *data/ruge.rda
+5b3d2c05e50f5083846d15f115399209 *data/toxop.rda
 1b059fc42c890bf89f2282298828d098 *data/ucberk.txt.gz
-e46b189f709219e3d6edb70094adcc43 *data/venice.rda
-fb2409f3985d6bb29e62b74641b92e0a *data/venice90.rda
+11fc4f6aa2d660a7a178b41990ec9b60 *data/venice.rda
+314b5a505fb5ba5e55efcde3a706cc34 *data/venice90.rda
 e990ca4deea25b60febd2d315a6a9ec4 *data/waitakere.txt.bz2
-d074784b6dfe8d86e15c45a289148d09 *data/wine.rda
+d739b5c0e33ebee609294cb35283fbc7 *data/wine.rda
 81f7f0844a196dc48e91870c4cfafc99 *demo/00Index
 9327dcfa4015cf47172717bac166f353 *demo/binom2.or.R
 b9f0af62a654d77a3052997eb4cc15e2 *demo/cqo.R
@@ -145,19 +148,19 @@ b9f0af62a654d77a3052997eb4cc15e2 *demo/cqo.R
 541e1a831b9abf6a2a5bfe193b03b1b4 *demo/lmsqreg.R
 ab8081763fe2144558be25f3a154327b *demo/vgam.R
 65570d10948785994d70d817f574bd96 *demo/zipoisson.R
-01decdf5ff5cdc5cbeca6c4c42244e58 *inst/CITATION
+d2fcbc6a325172d058671fd977d0b5e5 *inst/CITATION
 4ff0e35d38b3c5bb38f1f7232b9af863 *inst/doc/categoricalVGAM.R
 bfa11dbdbff271fb20342560f2bacd53 *inst/doc/categoricalVGAM.Rnw
-162de766cebba3dde055eae8e91766ab *inst/doc/categoricalVGAM.pdf
-1a11a44235dac378645ca48db2c58495 *inst/doc/crVGAM.R
-8b5466d6da7f3c0e97030bd8364a9ca4 *inst/doc/crVGAM.Rnw
-6c712d6436cedaccc8a5f42e1e6ca1b8 *inst/doc/crVGAM.pdf
+849d8750de988da008419f2ceac54902 *inst/doc/categoricalVGAM.pdf
+2f57d2a0610fd514e05aae8ea94d8ebc *inst/doc/crVGAM.R
+8e489008d8b8b8f769e5e93e351c9c42 *inst/doc/crVGAM.Rnw
+792ac4fba77b864da03f3af65b90b2db *inst/doc/crVGAM.pdf
 9b97006cdc82d3a0c0ace3d43c9758de *man/A1A2A3.Rd
-cc9d465fc9db15abb65061e0b41a0f9e *man/AA.Aa.aa.Rd
+4bc543c785c8a213c46693e2e37f5f00 *man/AA.Aa.aa.Rd
 26a120083d1d9d77ac0a5193d0c186b9 *man/AB.Ab.aB.ab.Rd
 c6c2a703e0f76c8b0f9e0a7d36f13386 *man/ABO.Rd
 38647708600610216a454c61450810ff *man/AICvlm.Rd
-028b3edf4cf6f4de340796b6635343a3 *man/AR1.Rd
+30130df5de09e7ef03e6a85a34e6e100 *man/AR1.Rd
 e7f6a39f61b6403d60cf99f0e17f3dc1 *man/AR1UC.Rd
 0f4a799e95b245cfa0b5a37280a446ef *man/BICvlm.Rd
 32daae0afb71eae3cdeefc042f4241c6 *man/Coef.Rd
@@ -166,25 +169,25 @@ e7f6a39f61b6403d60cf99f0e17f3dc1 *man/AR1UC.Rd
 a89beda3a48d5ff1cfdfae4636032a62 *man/Coef.rrvglm-class.Rd
 4da595e2cf6fffc2227871e745a5ee77 *man/Coef.rrvglm.Rd
 9d39d6e12ea6e56f687a10f76cb1803c *man/Coef.vlm.Rd
-92a1fb3e3a10e90414a8565eb5e3ac71 *man/CommonVGAMffArguments.Rd
+5c3794b2da0ebcbd6461a95bda2b7e2c *man/CommonVGAMffArguments.Rd
 098a57d6e5525de04157c61dea2e1b9b *man/Huggins89.t1.Rd
 ce79d0626711d299c9c0cc2efab3abac *man/Inv.gaussian.Rd
-fed1d90172d0b6b35b8d708076378a7e *man/Links.Rd
+b9505b66dea5b1311aa8d2700d3d6a34 *man/Links.Rd
 e53a7b5f977320e9a2b3cfba16e097ee *man/MNSs.Rd
 5ddd860d2b28b025dbf94b80062e3fc6 *man/Max.Rd
 00dce9ac476270fc8ce02ea1e75de191 *man/Opt.Rd
 d315bc4396e206c1ec3c5219e4efc677 *man/ParetoUC.Rd
 f84dea8ac6b2c1e857d25faaceb706d2 *man/QvarUC.Rd
 bd689bfc27028aea403c93863cf2e207 *man/Rcim.Rd
-ea581f4824e64871d53376a9751c8a2e *man/SURff.Rd
+d39629f7598851d50262b1075321525a *man/SURff.Rd
 685985b08b4668ae66206e9d72170b45 *man/Select.Rd
 20a760cb2a7468d974d2de5c88d870e3 *man/SurvS4-class.Rd
 6ed5239b716d4aaef069b66f248503f0 *man/SurvS4.Rd
 21dc3918d6b5375c18dcc6cc05be554e *man/Tol.Rd
 6930cfc91e602940cafeb95cbe4a60d3 *man/V1.Rd
-caae3db8a87eac92853fc85446377c7c *man/VGAM-package.Rd
-c7ed6cfd12ca60f833e67541f00ad317 *man/acat.Rd
-b346a61c9c3965d8ca97f3c98d9cacc0 *man/alaplace3.Rd
+3656d1dde004b1de74846eaf813a2f69 *man/VGAM-package.Rd
+93acacd4fef4b73ba027faff69619938 *man/acat.Rd
+8320c9356f95587835bb7503df9ad125 *man/alaplace3.Rd
 8c0d8e4d9e634a0c2539e3a052afa9cc *man/alaplaceUC.Rd
 8e181f4f03b718c6c9825ea3b6c4b8d6 *man/amlbinomial.Rd
 f6c521d0142c7e65e7d5aad6880616ee *man/amlexponential.Rd
@@ -199,7 +202,7 @@ afa1ccbe6dd6e769dc1bbbc5702148dd *man/benini.Rd
 12d28242eea600b3e6f52db5d71d871f *man/beniniUC.Rd
 dbf1d7ee255da6a85fbafbc84f2c0650 *man/betaII.Rd
 3a31e0a304c2ccab10469d866ae8acdb *man/betaR.Rd
-6d202361c5c1981d29c597fd716050f0 *man/betabinomUC.Rd
+d489f43e8771ddb6f32e121be29b838a *man/betabinomUC.Rd
 bbb0ddef9113d1b8d1e036ac66f9bb87 *man/betabinomial.Rd
 4e9c0e3075be1050db8ad3fe1e8dce6e *man/betabinomialff.Rd
 29d0247eaef9f6447e173c8ac994acbd *man/betaff.Rd
@@ -224,8 +227,8 @@ c7a7e2b700c4358fb65489876ead2d79 *man/binom2.or.Rd
 129f6be1cf1a039f137e5ef3da503fca *man/binom2.orUC.Rd
 a8cc7cbfa4c21672956a187c4ffba22d *man/binom2.rho.Rd
 20cb304b16a9073488621b104549e361 *man/binom2.rhoUC.Rd
-7a1cc63530a0082533d6f267a7647fa2 *man/binomialff.Rd
-53f8bc3da41aabe202d80304f2f84b63 *man/binormal.Rd
+29a9e5aa565832fad506a6a45c7b2897 *man/binomialff.Rd
+92806ec6cd9c65373fffb732eda114b5 *man/binormal.Rd
 3e2bebdf7d5db7a0c7960d6b6f1597b5 *man/binormalUC.Rd
 ad66bf95a28851ff1f77b8675352cc04 *man/binormalcop.Rd
 9758ba4618c9c24caafec486b01238f5 *man/binormcopUC.Rd
@@ -239,8 +242,8 @@ f0816002d3fb698dbc17a6e55d91c18f *man/bistudentt.Rd
 81a2433effb7547679702256a5536b04 *man/bmi.nz.Rd
 214e2f5b25156e937a5af65d1e6e1b58 *man/borel.tanner.Rd
 a25a019943aa0d82d35d6c46ec726c67 *man/bortUC.Rd
-37886ad1c2aea4c4cee78bad74c92d5d *man/brat.Rd
-4b158e93b6c981f016ed121e987c50b7 *man/bratUC.Rd
+3bb03f3211ee3ce9848b85060c6de4d3 *man/brat.Rd
+a20b8cf27b3285f4ffcd0648862edc46 *man/bratUC.Rd
 c7322bedb2b3d8ba4e7c0a19a2098ced *man/bratt.Rd
 f640961a0c1a206ce052a54bb7b4ca34 *man/calibrate-methods.Rd
 b121ffb4e604644ef7082d777b4411df *man/calibrate.Rd
@@ -250,29 +253,30 @@ afbb7b695f652a4bccfb0e6cb80a8739 *man/cao.Rd
 4005c8bdb2b1a2e7d0ff5f1a800f4224 *man/cao.control.Rd
 10f72289cb33f5f734d39826893a280b *man/cardUC.Rd
 53ff522ff00f7bcfe443309762441150 *man/cardioid.Rd
-f4674b1787a58c87fbabdb369dc8a1ca *man/cauchit.Rd
+a458bca3e32bdc653cd924dd564ee58d *man/cauchit.Rd
 d361f0253fb328f70a716c09fd597fdc *man/cauchy.Rd
 4973007c9a18278e2130994b68a2e47d *man/cdf.lmscreg.Rd
 6c41f48884c2e92fa7842266d02a5a6d *man/cens.gumbel.Rd
 f96d45016bcca1b72249a3548520a2cf *man/cens.normal.Rd
-d5293110487b396f767fbd2224774b58 *man/cens.poisson.Rd
+72901f13efe7d772fc5ed78bd6c58cea *man/cens.poisson.Rd
 94e6c5ea5488d93e0400ce9675e4d692 *man/cfibrosis.Rd
 a443fafdb223e2fa87d3766ea31d3fd8 *man/cgo.Rd
 1d5073eb8aded1b67fc52855c72fbc8d *man/chest.nz.Rd
 922ebc06682ee2090eb1804d9939ec03 *man/chinese.nz.Rd
 9dc1deb6ea4940257ebab8f072584b74 *man/chisq.Rd
 aff05a422130d8ced689190eec1b09dd *man/clo.Rd
-452920d20020b6be8eb6ead2cdcbdc0e *man/cloglog.Rd
+f0fa4d5fd65cc5d53012b586f24b3fb3 *man/cloglog.Rd
 b1985e33c967fdddf79e10cbb646b974 *man/coalminers.Rd
 e492f5f148514df05cc4bf101b7505e2 *man/coefvlm.Rd
 1409b01c52bad85c87e9740fb003699a *man/concoef-methods.Rd
 e9a2bf379aac3e4035b8259463a5374b *man/concoef.Rd
+19ee88e086b371be838206bd11b5479e *man/confintvglm.Rd
 30bff4a27550ca7e9a699e5b5cba007e *man/constraints.Rd
 523567ea78adcaaeab2d9629b2aa2cf2 *man/corbet.Rd
 5314268c4257680ac10edf26e9222944 *man/cqo.Rd
 8b1b3a39d15fe353a7eceec9f6a327d4 *man/crashes.Rd
-ca3db2c26abb8120651e1d179ac6fbb3 *man/cratio.Rd
-21c6374195045745b28c854b70312efa *man/cumulative.Rd
+72ae26906f75fb658caf9ced32ba15a7 *man/cratio.Rd
+002568187283dd7faf83534553674e94 *man/cumulative.Rd
 f2ce3a3f6ad52abbbb75eddf5baf1893 *man/dagum.Rd
 12192f19751804a540e6d0852e29726c *man/dagumUC.Rd
 d5439d37875ba50990406c5c5f8595eb *man/deermice.Rd
@@ -296,7 +300,7 @@ cb83f77886603d8f133964c227915d08 *man/expexpff.Rd
 eccfa33017118bc7314ef168695a595e *man/expgeometric.Rd
 f39dd0be93d3e24eda78f08310ff4b2f *man/expgeometricUC.Rd
 93cc460d2fd8c787aa6feaf5347f1685 *man/expint.Rd
-59e10a79028eef76da5bdc868e6bb38e *man/explink.Rd
+6ab5a59ea1b5f61fbe676577b3882529 *man/explink.Rd
 89ce96662b931aa17182192618085ed0 *man/explogUC.Rd
 e51211ad603eeecbe72cd7f6db0e76e0 *man/explogff.Rd
 fdbbdfc5e8f244b0ec6759aa8894bced *man/exponential.Rd
@@ -307,14 +311,15 @@ c5d0b237e64605d008502da6b8f4f64c *man/felixUC.Rd
 09fc6553edb037bc708396a30fe3c8f2 *man/fff.Rd
 9d679a175cfe7165b89906441e5efebc *man/fill.Rd
 b929e2ab670eb59700bc4a1db07bbbc0 *man/finney44.Rd
-5fd279ebc2d6ec3df74557cdca6940c0 *man/fisherz.Rd
+460448c26c4268e7870bbff5f9d2fb66 *man/fisherz.Rd
 c75c1ffce51c2de0fec04f54bbaf466b *man/fisk.Rd
 5966dbc9e396bd3cbb15b2650d885177 *man/fiskUC.Rd
 c75d3ae0a8669fed4a71f54b8be64266 *man/fittedvlm.Rd
 742b72298fd6b2ca944812681ad625a6 *man/flourbeetle.Rd
 c0269f789f9739dc6aeeb20b446ae751 *man/foldnormUC.Rd
 3909f1a56c381d71501b6fde8d6647fe *man/foldnormal.Rd
-a1e9f04937cb86ba7027d812faabfe3d *man/foldsqrt.Rd
+e1413cdef7d5b35f976738561f60a91a *man/foldsqrt.Rd
+628edb6d51c54d246702e9521ba6470c *man/formulavlm.Rd
 7af865ab486ea1d5d043bdef4bbf81cc *man/frechet.Rd
 dabb4b7cdd3422f239888fb85ca5a70b *man/frechetUC.Rd
 cad07bc11ec21b13ecdbc3b93ec8efc0 *man/freund61.Rd
@@ -326,7 +331,7 @@ edd2c4cefb99138667d2528f3d878bad *man/garma.Rd
 e0fdd50e95e43075ac79c911f05c0b61 *man/gaussianff.Rd
 a666a1118f74b8bff779fa283e483cbc *man/genbetaII.Rd
 45999add2a92fc243422b25bfc8f8198 *man/genbetaIIUC.Rd
-59fb27b205e8ff10daca7d8d37a5d3f1 *man/gengamma.Rd
+00ace61cf251e01ebf8144a503c4305d *man/gengamma.Rd
 588e10d5c3fd9ff745c679435c5f2457 *man/gengammaUC.Rd
 0a765eb0392ad75d94c0b0f0c517f9fb *man/genpoisUC.Rd
 296e471d13459805b0cb9d98e2de2a00 *man/genpoisson.Rd
@@ -338,7 +343,7 @@ d89a22500e2031841b7bcfa1d8607d44 *man/get.smart.prediction.Rd
 7d533bf53d40503606dda3a614245aa1 *man/gev.Rd
 0496867739918b68919e42a4018a338c *man/gevUC.Rd
 fd070015282f2cca2b0a4b8200822551 *man/gew.Rd
-598ef24d82351a3cb69dd2a7a482ea4e *man/golf.Rd
+7ac66cc25e3d13cc7fed08bb6b85e1db *man/golf.Rd
 9a635d01c2a0f08b71517df675b20a92 *man/gompertz.Rd
 8170cb9545cf35f1768db069b13a893e *man/gompertzUC.Rd
 7ec773041e29285cfe05226d6d58a30e *man/gpd.Rd
@@ -350,6 +355,7 @@ bd6be76e82363793b9186e55d0e35bd0 *man/gumbelII.Rd
 5099d1835eebc1b4610481e77463a50c *man/gumbelIIUC.Rd
 6a66a220a209ae6d1c7eb0bf57f59671 *man/gumbelUC.Rd
 fc6b1658cbcb87054ab516552b6875f9 *man/guplot.Rd
+2c4e81cce3a305291fc9493d3f128b07 *man/has.intercept.Rd
 d5ad348b7727127369874c7e7faf49bd *man/hatvalues.Rd
 2be497a8d77472f00279d19f735863b5 *man/hormone.Rd
 93557c7aca25514dc023773bdd045d76 *man/hspider.Rd
@@ -361,7 +367,7 @@ e3a9765eba431e1f55e2fdc11ff52b4b *man/hypersecant.Rd
 2bf15af91bb331e94b94dd69050589c0 *man/hzeta.Rd
 04198bb4e2bf6a230e17b4e84251887f *man/hzetaUC.Rd
 7f0e64784914835bb11c6f43643aae15 *man/iam.Rd
-c2796439b1c32144c3a1ffcbd7f6da72 *man/identitylink.Rd
+c978905e9ad1554330e74b3088faa909 *man/identitylink.Rd
 857cbf6f8c5970a18867fe560f275f6f *man/inv.binomial.Rd
 745b6c5557776c23bed67b268f03f432 *man/inv.gaussianff.Rd
 c64f106b3cd1010819641b86b926440a *man/inv.lomax.Rd
@@ -401,17 +407,17 @@ b0a070fdafa635bab794c5cf3ac88ba0 *man/lms.yjn.Rd
 06a1ce6e6f01fca7e7037eabc6cf3dad *man/logF.UC.Rd
 9f80bd504e1c75b0c7b29b3449cf7362 *man/logLikvlm.Rd
 236716ee0347bd21a08aec9fec2a810b *man/logUC.Rd
-e956c4aae749e9034b7cf7fdf8661a64 *man/logc.Rd
-8c871e5697ed43662cd313fc777c2bcd *man/loge.Rd
+34497f2200a115323b8be4c181dc5b09 *man/logc.Rd
+1e7009d720bba4d0201441cd02be84d7 *man/loge.Rd
 20cc0c73ee555790179879533cb526f7 *man/logff.Rd
 12d3a7e35301ecb632191ccf31a63296 *man/logistic.Rd
-753257abec4546ba43587133aa77dd6d *man/logit.Rd
+b283163521ea21b87f21463b719fc75f *man/logit.Rd
 8822ba593955e90e63a8779aaf74d29b *man/loglapUC.Rd
 0f6dd1a9c0fc77dd6521af733693f52e *man/loglaplace.Rd
 49d5183ac04d29b5427b9159fa101dc3 *man/loglinb2.Rd
 22ad47055f4be0a62a6f418b0024c911 *man/loglinb3.Rd
 f5f48817604ad9b59304d4fb571359dd *man/loglog.Rd
-a56f1a0e81c3dfdc8620c4cef1b87450 *man/lognormal.Rd
+a6cbcf688c21d36c440c24b56dd36113 *man/lognormal.Rd
 e859c980e26eb3e483d0f3648b502d13 *man/logoff.Rd
 1a96739cc02213e306e77d33c5dec358 *man/lomax.Rd
 dbc62e15528097b42fb64d49be5f22f3 *man/lomaxUC.Rd
@@ -437,16 +443,16 @@ b1d15dda4a8aae6193ce4283ec7251bd *man/micmen.Rd
 131aaa836a137554786e8bda01d8e334 *man/model.framevlm.Rd
 3d875985c00b26af9cb66e0ae0e3aef8 *man/model.matrixvlm.Rd
 199ef13d300d6fe1210885af1647c13b *man/moffset.Rd
-764cafd682a3364a495cdf243e3a528e *man/multilogit.Rd
-900df9b977e6f2d0726620e2d7f6469e *man/multinomial.Rd
+a725287719f6c4119913108ee4824ddb *man/multilogit.Rd
+363cdcfbb07a4c10a8b29aae89f293f1 *man/multinomial.Rd
 c3248f9d509aecb0726bd0e6e36a13d4 *man/nakagami.Rd
 61319d756fcb8509696cc1aa55ae4ed2 *man/nakagamiUC.Rd
-7669f124f04f2912a3b004d509f9d15d *man/nbcanlink.Rd
-869ec0706195a833c57365fc8507c1bf *man/nbolf.Rd
-fd9adceacaf591a824a2eebd442000af *man/negbinomial.Rd
+a47f3ed802d871c374f92151f813e3cb *man/nbcanlink.Rd
+0c0ef87d1221196cdc7fc0d156ac150a *man/nbolf.Rd
+e4ed5c80c412d9c80bab940d61854dbc *man/negbinomial.Rd
 01e4d3c6a45020bef55cbadbad8388d3 *man/negbinomial.size.Rd
 14c4a7db111d0d9f41e5a810a3afdea2 *man/normal.vcm.Rd
-9c60d91960d5448bbcdb2486bba6275f *man/notdocumentedyet.Rd
+5f5f3d9146d7342cc48ecbd7d7c084d1 *man/notdocumentedyet.Rd
 5e590acdda3ff0a9e2df0db8d233f848 *man/nparamvglm.Rd
 98b83e406ea1968ba3e8b17d0933b2cf *man/olym.Rd
 858c73ce3c458d33e5151342a4e36707 *man/ordpoisson.Rd
@@ -472,7 +478,7 @@ cea29349aed21cbaf8c70f81b7900b15 *man/plotqrrvglm.Rd
 606c4d8331ff8e0e4241f0284aba98cd *man/poisson.points.Rd
 8c7d77fdf6933ab63d412be61e3fa0ec *man/poisson.pointsUC.Rd
 8d1096d9bfeee36841be53ebe7300e49 *man/poissonff.Rd
-035cdf64257014f6ebc1a4d553a4037a *man/polf.Rd
+83497c4069d8c74dc15f0308de0dac89 *man/polf.Rd
 696c74487d4cebf0251299be00d545c7 *man/polonoUC.Rd
 2f4dfc6a802a52da2e14e9789e0170ae *man/posbernUC.Rd
 a746161f043ec5c5517df4b9cf71501e *man/posbernoulli.b.Rd
@@ -484,7 +490,7 @@ dc19e3d023a2a46c670e431a2cc853e0 *man/posgeomUC.Rd
 2963a956fa63f0bd9452b10b432d4fc8 *man/posnegbinUC.Rd
 d1594d0598d420affef6f14a1c263685 *man/posnegbinomial.Rd
 45b528182d1c01bc352dea7b84fd7671 *man/posnormUC.Rd
-e3daf452b9b545aa37cefad2a93da0bd *man/posnormal.Rd
+e22de041c65d80b12a971cc0207aa1da *man/posnormal.Rd
 137d3986fcbad41bf77c10585dace0b0 *man/pospoisUC.Rd
 89e1ac898695d90f1d6075cafa971460 *man/pospoisson.Rd
 cc06ad7f82789c3703e4977cc39828ed *man/powerlink.Rd
@@ -493,8 +499,8 @@ ee31e58dfd33c2c3b0d51eac95b553ad *man/predictqrrvglm.Rd
 cb6a8c644c31d6ec5e8977ea7b1198df *man/predictvglm.Rd
 4b6da0d45912d1b7fbd9d833f20ec3e9 *man/prentice74.Rd
 5f4fbb060b2d8386d8d2bfde926d9d5d *man/prinia.Rd
-d1b88140c378a21755511fb4a6ae6bce *man/probit.Rd
-a80e37fe75a14efcfe236f6181cc58ac *man/propodds.Rd
+889d24cbaa36abd8df4c54fbf88609e2 *man/probit.Rd
+0dc0ebdd8538489ac38a624176612691 *man/propodds.Rd
 241402d089ef4159f01fb4cd2c72b9a3 *man/prplot.Rd
 ab1399d5d5f71707fd46960dc3efad04 *man/put.smart.Rd
 8f4e6ebea74037334377e346c5b476f6 *man/qrrvglm.control.Rd
@@ -503,14 +509,14 @@ b10bad72776d283be77901e730593f2e *man/qtplot.lmscreg.Rd
 bf8b2681beaeae00d54c8cb5422ad069 *man/quasibinomialff.Rd
 1dbf7bc4c97a7aafebcd736cf1baddbb *man/quasipoissonff.Rd
 bbde69d1bad346cd4ad04763c96d6ffe *man/qvar.Rd
-2ff61f599fb26b31315233d793fdded4 *man/rayleigh.Rd
+9941ff94abd604ccf9bf44d3819e60ee *man/rayleigh.Rd
 a95c0df100dedc0b4e80be0659858441 *man/rayleighUC.Rd
 6c45f58f39a63abc2ce8a0923c75cecc *man/rcqo.Rd
 97b7c30ea27ac4fa16167599c35b136e *man/rdiric.Rd
 585af0deb3deb7b61388d6d4557994d8 *man/rec.exp1.Rd
 64ea5646e75515a8b40fbd136fa6065e *man/rec.normal.Rd
 49abf27f1c088a43cda71f0723cf188b *man/reciprocal.Rd
-acfa691d70513cd589bcab400e529f4a *man/rhobit.Rd
+8e6ffaeea6e88d46925e60f343364a0d *man/rhobit.Rd
 d907e0bbe40b4fb02b0763ab6076309e *man/riceUC.Rd
 85498654134f98f8aa887bed07b4985a *man/riceff.Rd
 9dd5a151bfc05adcce0ae88a02eb08a8 *man/rigff.Rd
@@ -540,37 +546,40 @@ b62da6a60b01916a10d691e980253bc0 *man/skewnormUC.Rd
 21bada3a13aca65ba49fb28127575144 *man/smart.expression.Rd
 5726ef8bb900532df62b24bd4b7b8fe4 *man/smart.mode.is.Rd
 21a1d3bd045859ceab377610a53ba976 *man/smartpred.Rd
-a9dd55f0d6949934e7c89abc6c124e83 *man/sratio.Rd
+736fffd7cddf8065fb1dd167f2aa236c *man/sratio.Rd
 0c48da9ab33eb24273c6348320a64f64 *man/studentt.Rd
+2b5cebdae54f21ad3fc0b3df37c6dd9a *man/summaryvglm.Rd
 0258a94ee53da230fb2aea74fd90192a *man/tikuv.Rd
 ccaa57b076049fdf3cee1c321a2ab456 *man/tikuvUC.Rd
-2e766ddeb8f6fb02dcd50740639de6ad *man/tobit.Rd
+d9f889c35db05e7eef26be323a3842cb *man/tobit.Rd
 5130a86e60a3b1010b1364155a1afdd0 *man/tobitUC.Rd
 b70afa170b0cf98a6c2a9eea9dc58483 *man/toxop.Rd
-9b6a285a017b9928ae92a76eaf9e502d *man/triangle.Rd
+59e040af3616943e93946ddf0ba96aba *man/triangle.Rd
 4b120eb41d1983a4afbe2b45793dc11e *man/triangleUC.Rd
 1d13e92969384eebec80c2b5901bc5db *man/trplot.Rd
 c786330c607d69d19e59fc3823d1e2f2 *man/trplot.qrrvglm.Rd
 aeaf42ac6e475f1dc3f180450d56c2ee *man/truncparetoUC.Rd
 1d47c3a8f732ea01782c7e0b9929a921 *man/truncweibull.Rd
 50ada9ecd189456ce9f218d22b49089c *man/ucberk.Rd
-39838a7245484fda644f275531575884 *man/undocumented-methods.Rd
+5d46e81b3078ef071d0d2afe8cfae91d *man/undocumented-methods.Rd
 f8f257cf6c91bb3c2765bc9c1d5fd4f1 *man/uninormal.Rd
+6a60d8e09c890e47042be1203aee9547 *man/vcovvlm.Rd
 f787bf505e7e68f5f16a49f48abb9bcb *man/venice.Rd
 8ab09ea32a3839db780ac641218c322e *man/vgam-class.Rd
 77ac046fef4733e8dc8a26ecb61d201d *man/vgam.Rd
 ea3fe248b860921783367037c8302c49 *man/vgam.control.Rd
 126b55b4567a63cf2edb04a8b6d91506 *man/vglm-class.Rd
-def3c8ac9a4ab8a58b88a3d10df8958d *man/vglm.Rd
+71c4c86e48be338c410905722e51afb8 *man/vglm.Rd
 0fb3b6b60182efdce44c9d225bcf0a64 *man/vglm.control.Rd
 7cab64090aec93a3edb1a7df961a1fe0 *man/vglmff-class.Rd
 95420c89f2280b447cbd7784f83e7454 *man/vonmises.Rd
 25b2ef45238e3f61e82dcf52f3d17090 *man/vsmooth.spline.Rd
 c498f29d7fc8156fd345b4892f02190d *man/waitakere.Rd
 9b9bdfbbf8060eb284c84e8ed9273154 *man/waldff.Rd
-9e36f5a354e39e4d645e105c7252ad00 *man/weibullR.Rd
+c7bfab9a73e5d5914f5adeac357a54c6 *man/weibull.mean.Rd
+3f1be522e8c9beebe0835912ca81c8db *man/weibullR.Rd
 e41e54f8623a002d20e55df65c5b6a87 *man/weightsvglm.Rd
-3557b17f6054a1699cb653b36f6d1a37 *man/wine.Rd
+e7fd9c7165410545d49481aeded2b317 *man/wine.Rd
 a814b37503a9534c86789482ab81333f *man/wrapup.smart.Rd
 622f0105b04159f54fcfb361972e4fb7 *man/yeo.johnson.Rd
 ebfff81b0f4730417de95f80b7c82c41 *man/yip88.Rd
@@ -625,5 +634,5 @@ e9187111f5c6ce1e5808bbb3dc088c17 *src/vlinpack3.f
 b19585d2495c46800b0c95f347fe89f9 *src/zeta3.c
 bfa11dbdbff271fb20342560f2bacd53 *vignettes/categoricalVGAM.Rnw
 d7beca978b587625654f981f7dc433d0 *vignettes/categoricalVGAMbib.bib
-8b5466d6da7f3c0e97030bd8364a9ca4 *vignettes/crVGAM.Rnw
-f1aac71e04f29f2b6c28434f90617ffa *vignettes/crVGAM.bib
+8e489008d8b8b8f769e5e93e351c9c42 *vignettes/crVGAM.Rnw
+b5d97a0617a50e3ac2396b54a6f9db67 *vignettes/crVGAM.bib
diff --git a/NAMESPACE b/NAMESPACE
index 35e9d7e..7d5aacc 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -7,6 +7,63 @@
 useDynLib(VGAM)
 
 
+
+export(as.char.expression)
+
+
+
+
+
+
+
+
+
+S3method(vcov, vlm, vcovvlm)
+S3method(coef, vlm, coefvlm)
+S3method(df.residual, vlm, df.residual_vlm)
+S3method(model.matrix, vlm, model.matrixvlm)
+S3method(formula, vlm, formulavlm)
+export(vcov.vlm, coef.vlm, formula.vlm, model.matrix.vlm)
+export(has.interceptvlm)
+exportMethods(has.intercept)
+export(term.namesvlm)
+exportMethods(term.names)
+export(responseNamevlm)
+exportMethods(responseName)
+
+
+
+
+  importFrom("grDevices", "chull")
+  importFrom("graphics", "abline", "arrows", "axis", "lines", "matlines",
+             "matplot", "matpoints", "mtext", "par", "points", "rug",
+             "segments", "text")
+  importFrom("methods", "as", "is", "new", "slot", "slot<-", "slotNames",
+             "show")
+  importFrom("stats", ".getXlevels", "as.formula", "contrasts<-",
+             "dbeta", "dbinom", "delete.response", "deriv3", "dgamma",
+             "dgeom", "dnbinom", "dt", "dweibull", "getCall",
+             "integrate", "is.empty.model", "lm.fit", "median",
+             "model.offset", "model.response", "model.weights",
+             "na.fail", "napredict", "optim", "pbeta", "pbinom",
+             "pgamma", "pgeom", "pnbinom", "polym", "printCoefmat",
+             "pweibull", "qbeta", "qbinom", "qchisq", "qf", "qgamma",
+             "qgeom", "qnbinom", "qt", "quantile", "qweibull", "rbeta",
+             "rbinom", "rgamma", "rgeom", "rlnorm", "rlogis", "rnbinom",
+             "runif", "rweibull", "sd", "spline", "terms.formula",
+             "time", "uniroot", "update.formula", "var", "weighted.mean")
+  importFrom("utils", "flush.console", "getS3method", "head")
+
+
+
+
+importFrom("stats4", confint)  # For S4, not S3
+export(confintvglm)  # For S4, not S3
+export(confintrrvglm)  # For S4, not S3
+export(confintvgam)  # For S4, not S3
+exportMethods(confint)  # For S4, not S3
+
+
 export(dgenpois)
 export(AR1)
 export(dAR1)
@@ -171,7 +228,7 @@ export(is.empty.list)
 
 export(
 Build.terms.vlm,
-interleave.VGAM,
+interleave.VGAM, interleave.cmat,
 procVec,
 ResSS.vgam,
 valt.control,
@@ -357,6 +414,7 @@ dlevy, plevy, qlevy, rlevy, levy,
 lms.bcg.control, lms.bcn.control, lmscreg.control,
 lms.yjn.control, 
 lms.bcg, lms.bcn, lms.yjn, lms.yjn2,
+dlms.bcn, qlms.bcn,
 lqnorm,
 dbilogis, pbilogis, rbilogis, bilogistic,
 logistic1, logistic,
@@ -422,7 +480,9 @@ summary.rrvgam, summary.grc,
   summary.qrrvglm,
 summary.rrvglm,
 summaryvgam, summaryvglm, summaryvlm,
-s.vam, terms.vlm, 
+s.vam,
+terms.vlm, 
+termsvlm, 
 Tol.Coef.qrrvglm, Tol.qrrvglm,
 triangle, dtriangle, ptriangle, qtriangle, rtriangle, 
   vcovvlm,
@@ -608,7 +668,7 @@ poissonff,
 dposbinom, pposbinom, qposbinom, rposbinom, posbinomial,
 dposgeom, pposgeom, qposgeom, rposgeom, # posgeometric,
 dposnegbin, pposnegbin, qposnegbin, rposnegbin, posnegbinomial,
-dposnorm, pposnorm, qposnorm, rposnorm, posnormal, posnormal.control,
+dposnorm, pposnorm, qposnorm, rposnorm, posnormal,
 dpospois, ppospois, qpospois, rpospois, pospoisson,
 qtplot.lmscreg, quasibinomialff, quasipoissonff, rdiric, rigff,
 rrar, rrvglm.control,
@@ -627,7 +687,7 @@ cqo,
 qrrvglm.control,
 vgam.control, vgam, vglm.control, vglm,
 vsmooth.spline,
-weibullR, yip88,
+weibull.mean, weibullR, yip88,
 dzabinom, pzabinom, qzabinom, rzabinom, zabinomial, zabinomialff,
 dzageom, pzageom, qzageom, rzageom, zageometric, zageometricff,
 dzanegbin, pzanegbin, qzanegbin, rzanegbin, zanegbinomial, zanegbinomialff,
diff --git a/NEWS b/NEWS
index b5b570f..a86913e 100755
--- a/NEWS
+++ b/NEWS
@@ -6,6 +6,69 @@
 
 
 
+                CHANGES IN VGAM VERSION 1.0-0
+
+NEW FEATURES
+
+    o   Official version that goes with the just-released book
+        "Vector Generalized Linear and Additive Models:
+        With an Implementation in R" by T. W. Yee (2015),
+        Springer: New York, USA.
+    o   gengamma.stacy() implements a grid search wrt
+        all its parameters.
+    o   New functions: [dp]lms.bcn().
+    o   New family function: weibull.mean().
+    o   triangle.control() slows down the speed of the iterations
+        towards the MLE, because the regularity conditions do not hold.
+    o   New arguments: AR1(nodrift = FALSE).
+    o   binormal has arguments eq.mean and eq.sd which now operate
+        independently.
+    o   confint() should work for objects that are "vglm"s.
+        Thanks to Tingting Zhan for suggesting this.
+    o   Tested okay on R 3.2.2.
+    o   Methods functions for responseName() and term.names() and
+        has.intercept().
+
+
+BUG FIXES and CHANGES
+
+    o   Link functions have changed a lot!!!
+        They return different values when deriv = 1 and deriv = 2,
+        coupled with inverse = TRUE and inverse = FALSE.
+        Type ?Links to see examples. 
+        The first derivatives become reciprocals of each other
+        when inverse = TRUE and inverse = FALSE, however the
+        2nd derivatives are no longer reciprocals of each other.
+        Also affected are dtheta.deta() and d2theta.deta2(), etc.
+    o   'show()' added to importMethods('methods') in NAMESPACE.
+    o   The following currently do not work:
+        golf(), nbolf(), polf().
+    o   AA.Aa.aa() used the OIM and worked for intercept-only models,
+        but now it uses the EIM.
+    o   logneg("a", short = TRUE) has been modified.
+    o   posnormal(): the first and second derivatives have been modified
+        for both SFS and ordinary FS,
+        and the default is zero = -2 now ('sd' is intercept-only).
+        Several other improvements have been done.
+    o   binomialff()@deviance is assigned all the time now.
+    o   dbetabin.ab() better handles extremes in the shape
+        parameters (very close to 0 and larger than 1e6, say).
+        Thanks to Juraj Medzihorsky for picking this up.
+    o   Family functions: zigeometric()@weight and zigeometricff()@weight
+        had one element incorrect.
+    o   logit("a+b", short = FALSE) was labelled incorrectly, etc.
+    o   Family function tobit()@weights implements Fisher scoring
+        entirely. And it handles observations whose fitted
+        values are (relatively) large and positive; thanks to
+        Victor Champonnois for picking up this bug.
+    o   S3 methods function df.residual_vlm() also called by df.residual.vlm().
+        This is to avoid a bug picked up by car::linearHypothesis().
+        Ditto for vcovvlm() by vcov.vlm().
+        Also model.matrix() and formula().
+        Thanks to Michael Friendly and John Fox for help here.
+
+
+
                 CHANGES IN VGAM VERSION 0.9-8
 
 NEW FEATURES
diff --git a/R/Links.R b/R/Links.R
index 2cbb39b..b83e5c1 100644
--- a/R/Links.R
+++ b/R/Links.R
@@ -19,12 +19,11 @@
   function(theta,
            link = "identitylink",
            earg = list(theta = theta,  # Needed
-                       inverse = FALSE,
+                       inverse = TRUE,  # 20150711: big change!!!!
                        deriv = 1,
                        short = TRUE,
                        tag = FALSE)) {
 
-
   function.name  <- link
 
   function.name2 <- attr(earg, "function.name")
@@ -34,6 +33,10 @@
 
   earg[["theta"]] <- theta  # New data
 
+  if (length(earg$inverse))
+    earg[["inverse"]] <- TRUE else
+    earg$inverse <- TRUE
+
   earg[["deriv"]] <- 1  # New
 
 
@@ -48,7 +51,7 @@
   function(theta,
            link = "identitylink",
            earg = list(theta = theta,  # Needed
-                       inverse = FALSE,
+                       inverse = TRUE,  # 20150711: big change!!!!
                        deriv = 2,
                        short = TRUE,
                        tag = FALSE)) {
@@ -62,6 +65,11 @@
 
   earg[["theta"]] <- theta  # New data
 
+
+  if (length(earg$inverse))
+    earg[["inverse"]] <- TRUE else
+    earg$inverse <- TRUE
+
   earg[["deriv"]] <- 2  # New
 
   do.call(what = function.name, args = earg)
diff --git a/R/build.terms.vlm.q b/R/build.terms.vlm.q
index b7aa8ab..509b40a 100644
--- a/R/build.terms.vlm.q
+++ b/R/build.terms.vlm.q
@@ -9,19 +9,39 @@ if (!isGeneric("terms"))
 
 
 
+terms.vlm <- function(x, ...) {
+  termsvlm(x, ...)
+}
 
 
-terms.vlm <- function(x, ...) {
-  v <- x at terms
-  if (!length(v))
-    stop("terms slot is empty")
-  v <- v$terms
-  if (!length(v))
-    stop("no terms component")
-  v
+
+termsvlm <- function(x, form.number = 1, ...) {
+  if (!is.Numeric(form.number, integer.valued = TRUE,
+                  length.arg = 1, positive = TRUE) ||
+      form.number > 2)
+    stop("argument 'form.number' must be 1 or 2")
+
+
+  v <- if (form.number == 1) {
+    v <- x at terms
+    if (!length(v))
+      stop("terms slot is empty")
+    v$terms
+  } else if (form.number == 2) {
+    x at misc$Terms2
+  }
+  if (length(v)) {
+    v
+  } else {
+    warning("no terms component; returning a NULL")
+    NULL
+  }
 }
 
 
+
+
+
 setMethod("terms", "vlm", function(x, ...) terms.vlm(x, ...))
 
 
diff --git a/R/calibrate.q b/R/calibrate.q
index 1f0eb7a..83d1c43 100644
--- a/R/calibrate.q
+++ b/R/calibrate.q
@@ -83,6 +83,9 @@ calibrate.qrrvglm <-
     newdata <- as.matrix(newdata)
   newdata <- newdata[, object at misc$ynames, drop = FALSE]
 
+
+
+
   obfunct <- slot(object at family, object at misc$criterion)
   minimize.obfunct <-
     if (Quadratic) object at control$min.criterion else
@@ -135,7 +138,7 @@ calibrate.qrrvglm <-
               method = optim.control$Method.optim,  # "BFGS" or "CG" or...
               control = c(fnscale = ifelse(minimize.obfunct, 1, -1),
                           use.optim.control),
-              y = newdata[i1, ],
+              y = newdata[i1, , drop = FALSE],  # drop added 20150624
               extra = object at extra,
               objfun = obfunct,
               Coefs = Coefobject,
@@ -147,7 +150,7 @@ calibrate.qrrvglm <-
               method = optim.control$Method.optim,  # "BFGS" or "CG" or...
               control = c(fnscale = ifelse(minimize.obfunct, 1, -1),
                           use.optim.control),
-              y = newdata[i1, ],
+              y = newdata[i1, , drop = FALSE],  # drop added 20150624
               extra = object at extra,
                 objfun = obfunct,
                 object = object,
@@ -210,7 +213,7 @@ calibrate.qrrvglm <-
     for (i1 in 1:nn) {
       ans <- if (Quadratic)
                .my.calib.objfunction.qrrvglm(BestOFpar[i1, ],
-                    y = newdata[i1, ],
+                    y = newdata[i1, , drop = FALSE],  # drop added 20150624
                     extra = object at extra,
                     objfun = obfunct,
                     Coefs = Coefobject,
@@ -218,7 +221,7 @@ calibrate.qrrvglm <-
                     everything = TRUE,
                     mu.function = slot(object at family, "linkinv")) else
             .my.calib.objfunction.rrvgam(BestOFpar[i1, ],
-                    y = newdata[i1, ],
+                    y = newdata[i1, , drop = FALSE],  # drop added 20150624
                     extra = object at extra,
                     objfun = obfunct,
                     object = object,
@@ -295,7 +298,7 @@ calibrate.qrrvglm <-
     for (s in 1:M) {
       vec1 <- cbind(Coefs at A[s, ]) +
               2 * matrix(Coefs at D[, , s], Rank, Rank) %*% bnumat
-      vcmat <- vcmat + mu[1,s] * vec1 %*% t(vec1)
+      vcmat <- vcmat + mu[1, s] * vec1 %*% t(vec1)
     }
     vcmat <- solve(vcmat)
   } else {
@@ -324,7 +327,7 @@ calibrate.qrrvglm <-
     eta <- matrix(as.numeric(NA), 1, NOS)
     for (jlocal in 1:NOS) {
       eta[1, jlocal] <- predictrrvgam(object, grid = bnu, sppno = jlocal,
-                                   Rank = Rank, deriv = 0)$yvals
+                                      Rank = Rank, deriv = 0)$yvals
     }
     mu <- rbind(mu.function(eta, extra))  # Make sure it has one row 
     value <- objfun(mu = mu, y = y,
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index 5904b6f..17115c2 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -7,6 +7,13 @@
 
 
 
+coef.vlm <- function(object, ...) {
+
+  coefvlm(object, ...)
+}
+
+
+
 coefvlm <- function(object, matrix.out = FALSE, label = TRUE,
                     colon = FALSE) {
 
@@ -80,6 +87,8 @@ setMethod("coef", "vglm", function(object, ...)
 
 
 
+
+
   
 setMethod("coefficients", "summary.vglm", function(object, ...)
           object at coef3)
diff --git a/R/confint.vlm.R b/R/confint.vlm.R
new file mode 100644
index 0000000..88fec34
--- /dev/null
+++ b/R/confint.vlm.R
@@ -0,0 +1,85 @@
+# 20150827; confint.vlm.R
+
+# Last modified: 20150827,
+# 1. 20150827; Tingting Zhan prodded me to do this.
+# 2. 
+# 3.
+
+
+
+confintvglm <- function(object, parm, level = 0.95, ...) {
+  cf <- coef(object)
+  pnames <- names(cf)
+  if (missing(parm))
+    parm <- pnames else
+  if (is.numeric(parm))
+    parm <- pnames[parm]
+  a <- (1 - level)/2
+  a <- c(a, 1 - a)
+# ":::" may lead to warning in \pkg{devtools}:
+# pct <- stats:::format.perc(a, 3)
+  format.perc <- function(probs, digits)
+  paste(format(100 * probs, trim = TRUE, scientific = FALSE, digits = digits),
+        "%")
+  pct <- format.perc(a, 3)
+  fac <- qnorm(a)
+  ci <- array(NA, dim = c(length(parm), 2L), dimnames = list(parm, pct))
+  ses <- sqrt(diag(vcov(object)))[parm]
+  ci[] <- cf[parm] + ses %o% fac
+  ci
+}
+
+
+
+confintrrvglm <- function(object, parm, level = 0.95, ...) {
+  stop("currently this function has not been written")
+
+
+# 20150828; yettodo: write this function... it's not too difficult.
+# Base it on vcovrrvglm() and summaryrrvglm() because they do all
+# the work, which involves elements in A and C and B1.
+# 
+
+  
+}
+
+
+confintvgam <- function(object, parm, level = 0.95, ...) {
+  stop("currently this function has not been written")
+}
+
+
+
+
+
+
+# ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+
+
+# if (FALSE) {
+if (!isGeneric("confint"))
+    setGeneric("confint",
+               function(object, parm, level = 0.95, ...)
+               standardGeneric("confint"),
+           package = "VGAM")
+
+
+setMethod("confint", "vglm",
+          function(object, parm, level = 0.95, ...)
+            confintvglm(object = object, parm = parm, level = level, ...))
+
+
+# Stop other types of models from working... its dangerous:
+setMethod("confint", "rrvglm",
+          function(object, parm, level = 0.95, ...)
+            confintrrvglm(object = object, parm = parm, level = level, ...))
+
+setMethod("confint", "vgam",
+          function(object, parm, level = 0.95, ...)
+            confintvgam(object = object, parm = parm, level = level, ...))
+# }
+
+
+# ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+
+
diff --git a/R/family.aunivariate.R b/R/family.aunivariate.R
index 4920142..2b7f432 100644
--- a/R/family.aunivariate.R
+++ b/R/family.aunivariate.R
@@ -12,6 +12,7 @@
 
 
 
+
 dkumar <- function(x, shape1, shape2, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -375,6 +376,7 @@ riceff.control <- function(save.weights = TRUE, ...) {
 }
 
 
+
  riceff <- function(lsigma = "loge", lvee = "loge",
                     isigma = NULL, ivee = NULL,
                     nsimEIM = 100, zero = NULL, nowarning = FALSE) {
@@ -413,7 +415,7 @@ riceff.control <- function(save.weights = TRUE, ...) {
             "besselI(-z/2, nu = 0) - z * besselI(-z/2, nu = 1)) ",
             "where z=-vee^2/(2*sigma^2)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -669,7 +671,7 @@ skellam.control <- function(save.weights = TRUE, ...) {
                            bool = .parallel , 
                            constraints = constraints,
                            apply.int = TRUE)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .parallel = parallel, .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -1272,15 +1274,15 @@ rlind <- function(n, theta) {
 
     dl.dtheta <- 2 / theta - 1 / (1 + theta) - y
 
-    dtheta.deta <- dtheta.deta(theta, .link , earg = .earg )
+    DTHETA.DETA <- dtheta.deta(theta, .link , earg = .earg )
 
-    c(w) * dl.dtheta * dtheta.deta
+    c(w) * dl.dtheta * DTHETA.DETA
   }), list( .link = link, .earg = earg ))),
   weight = eval(substitute(expression({
 
     ned2l.dtheta2 <- (theta^2 + 4 * theta + 2) / (theta * (1 + theta))^2
 
-    c(w) * ned2l.dtheta2 * dtheta.deta^2
+    c(w) * ned2l.dtheta2 * DTHETA.DETA^2
   }), list( .zero = zero ))))
 }
 
@@ -1467,9 +1469,9 @@ if (FALSE)
 
     dl.dtheta <- 2 / theta + 1 / (y + 2 + theta) - (y + 3) / (theta + 1)
 
-    dtheta.deta <- dtheta.deta(theta, .link , earg = .earg )
+    DTHETA.DETA <- dtheta.deta(theta, .link , earg = .earg )
 
-    c(w) * dl.dtheta * dtheta.deta
+    c(w) * dl.dtheta * DTHETA.DETA
   }), list( .link = link, .earg = earg ))),
   weight = eval(substitute(expression({
 
@@ -1486,7 +1488,7 @@ if (FALSE)
         matrix(colMeans(cbind(run.var)),
                n, M, byrow = TRUE) else cbind(run.var)
 
-    wz <- wz * dtheta.deta^2
+    wz <- wz * DTHETA.DETA^2
 
 
     c(w) * wz
@@ -1650,7 +1652,7 @@ slash.control <- function(save.weights = TRUE, ...) {
          "\n1/(2*sigma*sqrt(2*pi))",
          "\t\t\t\t\t\t\ty=mu\n")),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
diff --git a/R/family.basics.R b/R/family.basics.R
index c11a563..0c06722 100644
--- a/R/family.basics.R
+++ b/R/family.basics.R
@@ -202,7 +202,7 @@ subsetc <-
 
 
   myvec <- objvals[ans == vov]  # Could be a vector
-  if (ret.objfun) c(ans, myvec[1]) else ans
+  if (ret.objfun) c(Value = ans, ObjFun = myvec[1]) else ans
 }
 
 
@@ -391,7 +391,7 @@ cm.nointercept.VGAM <- function(constraints, x, nointercept, M) {
 
 
 
- cm.zero.VGAM <- function(constraints, x, zero, M) {
+ cm.zero.VGAM <- function(constraints, x, zero = NULL, M = 1) {
 
   asgn <- attr(x, "assign")
   nasgn <- names(asgn)
@@ -1192,7 +1192,7 @@ negzero.expression.VGAM <- expression({
   z.Index <- if (!length(dotzero)) NULL else
                    unique(sort(c(zneg.index, zpos.index)))
 
-  constraints <- cm.zero.VGAM(constraints, x, z.Index, M)
+  constraints <- cm.zero.VGAM(constraints, x = x, z.Index, M = M)
 })
 
 
@@ -1217,6 +1217,30 @@ interleave.VGAM <- function(L, M)
 
 
 
+interleave.cmat <- function(cmat1, cmat2) {
+  ncol1 <- ncol(cmat1)
+  ncol2 <- ncol(cmat2)
+  if (ncol1 == 1) {
+    return(cbind(cmat1, cmat2))
+  } else {  # ncol1 > 1
+    if (ncol2 == 1) {
+      return(cbind(cmat1[, 1], cmat2, cmat1[, -1]))
+    } else
+    if (ncol1 != ncol2) {
+      warning("this function is confused. Returning cbind(cmat1, cmat2)")
+      return(cbind(cmat1[, 1], cmat2, cmat1[, -1]))
+    } else {  # ncol1 == ncol2 and both are > 1.
+      kronecker(cmat1, cbind(1, 0)) +
+      kronecker(cmat2, cbind(0, 1))        
+    }
+  }
+}
+
+
+
+
+
+
 w.wz.merge <- function(w, wz, n, M, ndepy,
                        intercept.only = FALSE) {
 
diff --git a/R/family.binomial.R b/R/family.binomial.R
index 0ae3887..c323b20 100644
--- a/R/family.binomial.R
+++ b/R/family.binomial.R
@@ -1365,22 +1365,16 @@ my.dbinom <- function(x,
 
 
  dbetabinom.ab <- function(x, size, shape1, shape2, log = FALSE,
-                           .dontuse.prob = NULL) {
+                           Inf.shape = 1e6
+                          ) {
 
 
+  Bigg <- Inf.shape
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
 
-  if (!is.Numeric(x))
-    stop("bad input for argument 'x'")
-  if (!is.Numeric(size, integer.valued = TRUE))
-    stop("bad input for argument 'size'")
-  if (any(shape1 < 0, na.rm = TRUE))
-    stop("negative values for argument 'shape1' not allowed")
-  if (any(shape2 < 0, na.rm = TRUE))
-    stop("negative values for argument 'shape2' not allowed")
 
 
   LLL <- max(length(x), length(size), length(shape1), length(shape2))
@@ -1389,39 +1383,95 @@ my.dbinom <- function(x,
   if (length(shape1) != LLL) shape1 <- rep(shape1, len = LLL)
   if (length(shape2) != LLL) shape2 <- rep(shape2, len = LLL)
 
-  ans <- 0 * x
+  ans <- x
+  ans[TRUE] <- log(0)
+  ans[is.na(x)]  <- NA
+  ans[is.nan(x)] <- NaN
+
+
+  ok0 <- !is.na(shape1) & !is.na(shape2) & !is.na(x) & !is.na(size)
   ok <- (round(x) == x) & (x >= 0) & (x <= size) &
-        is.finite(shape1) & is.finite(shape2)
+        is.finite(shape1) & is.finite(shape2) & ok0
   if (any(ok)) {
     ans[ok] <- lchoose(size[ok], x[ok]) +
-               lbeta(shape1[ok] + x[ok], shape2[ok] + size[ok] - x[ok]) -
+               lbeta(shape1[ok]            + x[ok],
+                     shape2[ok] + size[ok] - x[ok]) -
                lbeta(shape1[ok], shape2[ok])
-    if (log.arg) {
-    } else {
-      ans[ok] <- exp(ans[ok])
+
+
+    endpt <- (x == size) & ((shape1 < 1/Bigg) | (shape2 < 1/Bigg)) & ok0
+    if (any(endpt)) {
+      ans[endpt] <- lgamma(size[endpt] + shape1[endpt]) +
+                    lgamma(shape1[endpt] + shape2[endpt]) -
+                   (lgamma(size[endpt] + shape1[endpt] + shape2[endpt]) +
+                    lgamma(shape1[endpt]))
+    }
+
+
+
+
+    endpt <- (x == 0) & ((shape1 < 1/Bigg) | (shape2 < 1/Bigg)) & ok0
+    if (any(endpt)) {
+      ans[endpt] <- lgamma(size[endpt] + shape2[endpt]) +
+                    lgamma(shape1[endpt] + shape2[endpt]) -
+                   (lgamma(size[endpt] + shape1[endpt] + shape2[endpt]) +
+                    lgamma(shape2[endpt]))
     }
+
+
+
+
+
+    endpt <- ((shape1 > Bigg) | (shape2 > Bigg)) & ok0
+    if (any(endpt)) {
+      ans[endpt] <- lchoose(size[endpt], x[endpt]) +
+                    lgamma(x[endpt] + shape1[endpt]) +
+                    lgamma(size[endpt] - x[endpt] + shape2[endpt]) +
+                    lgamma(shape1[endpt] + shape2[endpt]) -
+                   (lgamma(size[endpt] + shape1[endpt] + shape2[endpt]) +
+                    lgamma(shape1[endpt]) +
+                    lgamma(shape2[endpt]))
+    }
+  }  # if (any(ok))
+
+
+
+  if (!log.arg) {
+    ans <- exp(ans)
   }
 
-  okay1 <- is.na(shape1)       & is.infinite(shape2)  # rho = 0 and prob == 0
-  okay2 <- is.infinite(shape1) & is.na(shape2)       # rho = 0 and prob == 1
-  okay3 <- is.infinite(shape1) & is.infinite(shape2)  # rho = 0 and 0 < prob < 1
 
-  if (sum.okay1 <- sum(okay1))
-    ans[okay1] <- dbinom(x = x[okay1], size = size[okay1],
-                         prob = 0,
-                         log = log.arg)
-  if (sum.okay2 <- sum(okay2))
-    ans[okay2] <- dbinom(x = x[okay2], size = size[okay2],
-                         prob = 1,
-                         log = log.arg)
-  if (sum.okay3 <- sum(okay3)) {
-    if (length(.dontuse.prob)   != LLL)
-      .dontuse.prob   <- rep( .dontuse.prob ,   len = LLL)
-    ans[okay3] <- dbinom(x = x[okay3], size = size[okay3],
-                         prob = .dontuse.prob[okay3],
-                         log = log.arg)
+
+  if (FALSE) {
+    ok1 <- is.na(shape1)       & is.infinite(shape2)  # rho==0 & prob==0
+    ok2 <- is.infinite(shape1) & is.na(shape2)        # rho==0 & prob==1
+    ok3 <- is.infinite(shape1) & is.infinite(shape2)  # rho==0 & 0<prob<1
+  } else {
+    ok1 <-   is.finite(shape1) & is.infinite(shape2)  # rho==0 & prob==0
+    ok2 <- is.infinite(shape1) &   is.finite(shape2)  # rho==0 & prob==1
+    ok3 <- is.infinite(shape1) & is.infinite(shape2)  # prob undefined
+
+  }
+
+  if (any(ok1))
+    ans[ok1] <- dbinom(x = x[ok1], size = size[ok1],
+                       prob = shape1[ok1] / (shape1[ok1]+shape2[ok1]),  # 0,
+                       log = log.arg)
+  if (any(ok2))
+    ans[ok2] <- dbinom(x = x[ok2], size = size[ok2],
+                       prob = 1,  # Inf / (finite + Inf) == 1
+                       log = log.arg)
+  if (any(ok3)) {
+    ans[ok3] <- dbinom(x = x[ok3], size = size[ok3],
+                       prob = shape1[ok3] / (shape1[ok3]+shape2[ok3]),
+                       log = log.arg)
   }
 
+
+  ans[shape1 < 0] <- NaN
+  ans[shape2 < 0] <- NaN
+
+
   ans
 }
 
@@ -1445,34 +1495,36 @@ my.dbinom <- function(x,
   if (length(q)       != LLL) q      <- rep(q,      len = LLL)
   if (length(shape1)  != LLL) shape1 <- rep(shape1, len = LLL)
   if (length(shape2)  != LLL) shape2 <- rep(shape2, len = LLL)
-  if (length(size)    != LLL) size   <- rep(size,   len = LLL);
+  if (length(size)    != LLL) size   <- rep(size,   len = LLL)
 
-  ans <- q * 0  # Retains names(q)
+  ans <- q   # Retains names(q)
+  ans[] <- 0  #  Set all elements to 0
 
   if (max(abs(size   -   size[1])) < 1.0e-08 &&
       max(abs(shape1 - shape1[1])) < 1.0e-08 &&
       max(abs(shape2 - shape2[1])) < 1.0e-08) {
-    qstar <- floor(q)
+    if (any(is.infinite(qstar <- floor(q))))
+      stop("argument 'q' must be finite")
     temp <- if (max(qstar) >= 0) {
-             dbetabinom.ab(0:max(qstar), size = size[1],
-                           shape1 = shape1[1],
-                           shape2 = shape2[1])
-           } else {
-             0 * qstar
-           }
+              dbetabinom.ab(0:max(qstar), size = size[1],
+                            shape1 = shape1[1],
+                            shape2 = shape2[1])
+            } else {
+              0 * qstar
+            }
       unq <- unique(qstar)
     for (ii in unq) {
-      index <- qstar == ii
+      index <- (qstar == ii)
       ans[index] <- if (ii >= 0) sum(temp[1:(1+ii)]) else 0
     }
   } else {
     for (ii in 1:LLL) {
       qstar <- floor(q[ii])
       ans[ii] <- if (qstar >= 0) {
-                  sum(dbetabinom.ab(x = 0:qstar, size = size[ii],
-                                    shape1 = shape1[ii],
-                                    shape2 = shape2[ii]))
-                } else 0
+                   sum(dbetabinom.ab(x = 0:qstar, size = size[ii],
+                                     shape1 = shape1[ii],
+                                     shape2 = shape2[ii]))
+                 } else 0
     }
   }
   if (log.p) log(ans) else ans
@@ -1534,8 +1586,7 @@ my.dbinom <- function(x,
 
  dbetabinom <- function(x, size, prob, rho = 0, log = FALSE) {
   dbetabinom.ab(x = x, size = size, shape1 = prob*(1-rho)/rho,
-                shape2 = (1-prob)*(1-rho)/rho, log = log,
-                .dontuse.prob = prob)
+                shape2 = (1-prob)*(1-rho)/rho, log = log)
 }
 
 
diff --git a/R/family.bivariate.R b/R/family.bivariate.R
index ecf4f2c..531a3a0 100644
--- a/R/family.bivariate.R
+++ b/R/family.bivariate.R
@@ -1021,7 +1021,7 @@ bilogistic.control <- function(save.weights = TRUE, ...) {
             "\n", "\n",
             "Means:     location1, location2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -1306,7 +1306,7 @@ rbilogis <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
                            bool = .independent ,
                            constraints = constraints,
                            apply.int = TRUE)
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list(.independent = independent, .zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -3150,8 +3150,6 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
 
   trivial1 <- is.logical(eq.mean) && length(eq.mean) == 1 && !eq.mean
   trivial2 <- is.logical(eq.sd  ) && length(eq.sd  ) == 1 && !eq.sd
-  if (!trivial1 && !trivial2)
-    stop("only one of 'eq.mean' and 'eq.sd' can be assigned a value")
 
   if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
@@ -3167,17 +3165,45 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
             namesof("sd2",   lsd2,   earg = esd2   ), ", ",
             namesof("rho",   lrho,   earg = erho   )),
   constraints = eval(substitute(expression({
-    temp8.m <- diag(5)[, -2]
-    temp8.m[2, 1] <- 1
-    temp8.s <- diag(5)[, -4]
-    temp8.s[4, 3] <- 1
-    constraints <- cm.VGAM(temp8.m, x = x,
-                           bool = .eq.mean ,
-                           constraints = constraints, apply.int = TRUE)
-    constraints <- cm.VGAM(temp8.s, x = x,
-                           bool = .eq.sd ,
-                           constraints = constraints, apply.int = TRUE)
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+
+
+    constraints.orig <- constraints
+    M1 <- 5
+    NOS <- M / M1
+
+    cm1.m <-
+    cmk.m <- kronecker(diag(NOS), rbind(diag(2), matrix(0, 3, 2)))
+    con.m <- cm.VGAM(kronecker(diag(NOS), rbind(1, 1, 0, 0, 0)),
+                     x = x,
+                     bool = .eq.mean ,  #
+                     constraints = constraints.orig,
+                     apply.int = TRUE, 
+                     cm.default           = cmk.m,
+                     cm.intercept.default = cm1.m)
+
+
+    cm1.s <-
+    cmk.s <- kronecker(diag(NOS),
+                       rbind(matrix(0, 2, 2), diag(2), matrix(0, 1, 2)))
+    con.s <- cm.VGAM(kronecker(diag(NOS), rbind(0, 0, 1, 1, 0)),
+                     x = x,
+                     bool = .eq.sd ,  #
+                     constraints = constraints.orig,
+                     apply.int = TRUE,
+                     cm.default           = cmk.s,
+                     cm.intercept.default = cm1.s)
+
+
+    con.use <- con.m
+    for (klocal in 1:length(con.m)) {
+      con.use[[klocal]] <-
+        cbind(con.m[[klocal]],
+              con.s[[klocal]],
+              kronecker(matrix(1, NOS, 1), diag(5)[, 5]))
+
+    }
+
+    constraints <- cm.zero.VGAM(con.use    , x = x, .zero , M = M)
   }), list( .zero = zero,
             .eq.sd   = eq.sd,
             .eq.mean = eq.mean ))),
@@ -3186,20 +3212,22 @@ rbinorm <- function(n, mean1 = 0, mean2 = 0,
     list(M1 = 5,
          Q1 = 2,
          eq.mean = .eq.mean ,
-         eq.sd   = .eq.sd   )
+         eq.sd   = .eq.sd   ,
+         zero    = .zero )
     }, list( .zero    = zero,
              .eq.mean = eq.mean,
              .eq.sd   = eq.sd    ))),
 
   initialize = eval(substitute(expression({
+    Q1 <- 2
 
     temp5 <-
     w.y.check(w = w, y = y,
               ncol.w.max = 1,
-              ncol.y.max = 2,
-              ncol.y.min = 2,
+              ncol.y.max = Q1,
+              ncol.y.min = Q1,
               out.wy = TRUE,
-              colsyperw = 2,
+              colsyperw = Q1,
               maximize = TRUE)
     w <- temp5$w
     y <- temp5$y
diff --git a/R/family.categorical.R b/R/family.categorical.R
index 2eb10e4..e680f98 100644
--- a/R/family.categorical.R
+++ b/R/family.categorical.R
@@ -97,7 +97,6 @@ Deviance.categorical.data.vgam <-
 
 
 
-
   if (ncol(y) == 1 || ncol(mu) == 1)
     stop("arguments 'y' and 'mu' must have at least 2 columns")
 
@@ -235,7 +234,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints = constraints)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .parallel = parallel, .zero = zero ))),
   deviance = Deviance.categorical.data.vgam,
 
@@ -432,7 +431,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints = constraints)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .parallel = parallel, .zero = zero ))),
 
 
@@ -703,7 +702,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
                            bool = .parallel ,
                            apply.int = TRUE,
                            constraints = constraints)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
     constraints <- cm.nointercept.VGAM(constraints, x, .nointercept , M)
   }), list( .parallel = parallel, .zero = zero,
             .nointercept = nointercept,
@@ -1313,7 +1312,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints = constraints)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .parallel = parallel, .zero = zero ))),
 
   deviance = Deviance.categorical.data.vgam,
@@ -2026,7 +2025,7 @@ InverseBrat <-
                            bool = .parallel ,
                            apply.int = TRUE,
                            constraints = constraints)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .parallel = parallel, .zero = zero ))),
   initialize = eval(substitute(expression({
     orig.y <- cbind(y)  # Convert y into a matrix if necessary
@@ -2224,10 +2223,11 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
   ii <- ii.save <- subset
   if (!is(object, "vglm"))
     stop("'object' is not a vglm() object")
-  if (!any(temp.logical <- is.element(c("multinomial","cumulative"),
-                                     object at family@vfamily)))
-    stop("'object' is not a 'multinomial' or 'cumulative' VGLM!")
-  model.multinomial <- temp.logical[1]
+  if (!any(temp.logical <-
+    is.element(c("multinomial", "cumulative", "acat"),
+               object at family@vfamily)))
+    stop("'object' is not a 'multinomial' or 'acat' or 'cumulative' VGLM!")
+  vfamily <- object at family@vfamily
   if (is(object, "vgam"))
     stop("'object' is a vgam() object")
   if (length(object at control$xij))
@@ -2244,7 +2244,7 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
   M <- object at misc$M  # ncol(B)  # length(pvec) - 1
 
 
-    if (model.multinomial) {
+    if (any(vfamily == "multinomial")) {
     rlev <- object at misc$refLevel
     cfit <- coefvlm(object, matrix.out = TRUE)
     B <- if (!length(rlev)) {
@@ -2301,6 +2301,21 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
         }
         ans
     }
+    } else if (any(vfamily == "acat")) {
+    stop("currently the 'acat' family is unsupported here")
+    reverse <- object at misc$reverse
+    linkfunctions <- object at misc$link
+    all.eargs <- object at misc$earg
+    B <- cfit <- coefvlm(object, matrix.out = TRUE)
+    ppp <- nrow(B)
+    etamat <- predict(object)  # nnn x M
+
+
+
+
+
+
+
     } else {
 
     if (is.logical(is.multivariateY <- object at misc$multiple.responses) &&
diff --git a/R/family.censored.R b/R/family.censored.R
index 3e29ee9..47d0bc2 100644
--- a/R/family.censored.R
+++ b/R/family.censored.R
@@ -159,7 +159,7 @@
   }), list( .link = link, .earg = earg ))),
   weight = eval(substitute(expression({
     d2lambda.deta2 <- d2theta.deta2(theta = lambda,
-                                   link = .link, earg = .earg)
+                                    link = .link, earg = .earg )
     d2l.dlambda2 <- 1 / lambda # uncensored; Fisher scoring
 
     if (any(cenU)) {
@@ -362,7 +362,7 @@ if (FALSE)
                           namesof("sd", lsd, tag = TRUE), "\n",
             "Conditional variance: sd^2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -626,6 +626,260 @@ if (FALSE)
 
 
 
+
+ weibull.mean <-
+  function(lmean = "loge", lshape = "loge",
+           imean = NULL,   ishape = NULL,
+           probs.y = c(0.2, 0.5, 0.8),
+           imethod = 1, zero = -2) {
+
+
+
+
+  imeann <- imean
+
+
+  lshape <- as.list(substitute(lshape))
+  eshape <- link2list(lshape)
+  lshape <- attr(eshape, "function.name")
+
+  lmeann <- as.list(substitute(lmean))
+  emeann <- link2list(lmeann)
+  lmeann <- attr(emeann, "function.name")
+
+
+  if (length(zero) &&
+      !is.Numeric(zero, integer.valued = TRUE))
+    stop("bad input for argument 'zero'")
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+      imethod > 2)
+    stop("argument 'imethod' must be 1 or 2")
+
+  if (!is.Numeric(probs.y, positive  = TRUE) ||
+      length(probs.y) < 2 ||
+      max(probs.y) >= 1)
+    stop("bad input for argument 'probs.y'")
+
+
+  if (length(ishape))
+    if (!is.Numeric(ishape, positive = TRUE))
+      stop("argument 'ishape' values must be positive")
+
+  if (length(imeann))
+    if (!is.Numeric(imeann, positive = TRUE))
+      stop("argument 'imean' values must be positive")
+
+  blurb.vec <- c(namesof("mean",  lmeann, earg = emeann),
+                 namesof("shape", lshape, earg = eshape))
+
+  new("vglmff",
+  blurb = c("Weibull distribution (parameterized by the mean)\n\n",
+            "Links:    ",
+            blurb.vec[1], ", ",
+            blurb.vec[2], "\n",
+            "Mean:     mean\n",
+            "Variance: mean^2 * (gamma(1 + 2/shape) / ",
+                      "gamma(1 + 1/shape)^2 - 1)"),
+ constraints = eval(substitute(expression({
+    dotzero <- .zero
+    M1 <- 2
+    eval(negzero.expression.VGAM)
+  }), list( .zero = zero,
+            .lmeann = lmeann ))),
+
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 1,
+         expected = TRUE,
+         multipleResponses = TRUE,
+         zero = .zero )
+  }, list( .zero = zero ))),
+
+  initialize = eval(substitute(expression({
+
+    temp5 <-
+    w.y.check(w = w, y = y,
+              Is.positive.y = TRUE,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
+
+    ncoly <- ncol(y)
+    M1 <- 2
+    extra$ncoly <- ncoly
+    extra$M1 <- M1
+    M <- M1 * ncoly
+
+
+    if (is.SurvS4(y))
+      stop("only uncensored observations are allowed; ",
+           "don't use SurvS4()")
+
+
+    mynames1 <- param.names("mean" , ncoly)
+    mynames2 <- param.names("shape", ncoly)
+    predictors.names <-
+        c(namesof(mynames1, .lmeann , earg = .emeann , tag = FALSE),
+          namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))
+    predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
+
+
+    Meann.init <- matrix(if (length( .imeann )) .imeann else 0.5 * colMeans(y),
+                         n, ncoly, byrow = TRUE) + 0.5 * y
+    Shape.init <- matrix(if (length( .ishape )) .ishape else 0 + NA,
+                         n, ncoly, byrow = TRUE)
+
+    if (!length(etastart)) {
+      if (!length( .ishape ) ||
+          !length( .imeann )) {
+        for (ilocal in 1:ncoly) {
+
+          anyc <- FALSE  # extra$leftcensored | extra$rightcensored
+          i11 <- if ( .imethod == 1) anyc else FALSE  # Can be all data
+          probs.y <- .probs.y
+          xvec <- log(-log1p(-probs.y))
+          fit0 <- lsfit(x  = xvec,
+                        y  = log(quantile(y[!i11, ilocal],
+                                 probs = probs.y )))
+
+
+          if (!is.Numeric(Shape.init[, ilocal]))
+            Shape.init[, ilocal] <- 1 / fit0$coef["X"]
+        }  # ilocal
+
+        etastart <- 
+          cbind(theta2eta(Meann.init, .lmeann , earg = .emeann ),
+                theta2eta(Shape.init, .lshape , earg = .eshape ))[,
+                interleave.VGAM(M, M = M1)]
+      }
+    }
+  }), list( .lmeann = lmeann, .lshape = lshape,
+            .emeann = emeann, .eshape = eshape,
+            .imeann = imeann, .ishape = ishape,
+            .probs.y = probs.y,
+            .imethod = imethod ) )),
+  linkinv = eval(substitute(function(eta, extra = NULL) {
+    Meann <- eta2theta(eta[, c(TRUE, FALSE)], .lmeann , earg = .emeann )
+    Meann
+  }, list( .lmeann = lmeann, .lshape = lshape,
+           .emeann = emeann, .eshape = eshape ) )),
+  last = eval(substitute(expression({
+    regnotok <- any(Shape <= 2)
+    if (any(Shape <= 1)) {
+      warning("MLE regularity conditions are violated",
+              "(shape <= 1) at the final iteration: ",
+              "MLEs are not consistent")
+    } else if (any(1 < Shape & Shape < 2)) {
+      warning("MLE regularity conditions are violated",
+              "(1 < shape < 2) at the final iteration: ",
+              "MLEs exist but are not asymptotically normal")
+    } else if (any(2 == Shape)) {
+      warning("MLE regularity conditions are violated",
+              "(shape == 2) at the final iteration: ",
+              "MLEs exist and are normal and asymptotically ",
+              "efficient but with a slower convergence rate than when ",
+              "shape > 2")
+    }
+
+
+
+    M1 <- extra$M1
+    avector <- c(rep( .lmeann , length = ncoly),
+                 rep( .lshape , length = ncoly))
+    misc$link <- avector[interleave.VGAM(M, M = M1)]
+    temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = M1)]
+    names(misc$link) <- temp.names
+
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- temp.names
+    for (ii in 1:ncoly) {
+      misc$earg[[M1*ii-1]] <- .emeann
+      misc$earg[[M1*ii  ]] <- .eshape
+    }
+
+    misc$M1 <- M1
+    misc$imethod <- .imethod
+    misc$expected <- TRUE
+    misc$multipleResponses <- TRUE
+
+
+    misc$RegCondOK <- !regnotok # Save this for later
+    misc$expected <- TRUE   # all(cen0)
+  }), list( .lmeann = lmeann, .lshape = lshape,
+            .emeann = emeann, .eshape = eshape,
+            .imethod = imethod ) )),
+  loglikelihood = eval(substitute(
+          function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+    Meann <- eta2theta(eta[, c(TRUE, FALSE)], .lmeann , earg = .emeann )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+
+    if (residuals) stop("loglikelihood residuals not ",
+                        "implemented yet") else {
+      sum(c(w) * dweibull(x = y, shape = Shape,
+                          scale = Meann / gamma(1 + 1/Shape),
+                          log = TRUE))
+    }
+  }, list( .lmeann = lmeann, .lshape = lshape,
+           .emeann = emeann, .eshape = eshape ) )),
+  vfamily = c("weibull.mean"),
+  deriv = eval(substitute(expression({
+    M1 <- 2
+    Meann <- eta2theta(eta[, c(TRUE, FALSE)], .lmeann , earg = .emeann )
+    Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+
+    if (FALSE) {
+    } else {
+      EulerM <- -digamma(1.0)
+      AA <- (EulerM - 1)^2 + (pi^2) / 6
+      BB <- digamma(1 + 1/Shape)
+      CC <- y * gamma(1 + 1/Shape) / Meann
+      dl.dmeann <- (CC^Shape - 1) * Shape / Meann  # Agrees
+      dl.dshape <- 1/Shape -
+                   (log(y/Meann) + lgamma(1 + 1/Shape)) * (CC^Shape - 1) + 
+                   (BB / Shape) * (CC^Shape - 1)
+    }
+
+
+    dmeann.deta <- dtheta.deta(Meann, .lmeann , earg = .emeann )
+    dshape.deta <- dtheta.deta(Shape, .lshape , earg = .eshape )
+
+    myderiv <- c(w) * cbind(dl.dmeann * dmeann.deta,
+                            dl.dshape * dshape.deta)
+    myderiv[, interleave.VGAM(M, M = M1)]
+  }), list( .lmeann = lmeann, .lshape = lshape,
+            .emeann = emeann, .eshape = eshape ) )),
+  weight = eval(substitute(expression({
+
+    if (FALSE) {
+    } else {
+      ned2l.dmeann <- (Shape / Meann)^2  #
+      ned2l.dshape <- AA / Shape^2  # Unchanged
+      ned2l.dshapemeann <- (EulerM - 1 + BB) / Meann
+    }
+
+
+    wz <- array(c(c(w) * ned2l.dmeann * dmeann.deta^2,
+                  c(w) * ned2l.dshape * dshape.deta^2,
+                  c(w) * ned2l.dshapemeann * dmeann.deta * dshape.deta),
+                dim = c(n, M / M1, 3))
+    wz <- arwz2wz(wz, M = M, M1 = M1)
+
+
+    wz
+  }), list( .eshape = eshape ))))
+}
+
+
+
+
+
+
  weibullR <-
   function(lscale = "loge", lshape = "loge",
            iscale = NULL,   ishape = NULL,
@@ -850,7 +1104,7 @@ if (FALSE)
 
     if (residuals) stop("loglikelihood residuals not ",
                         "implemented yet") else {
-      sum(c(w) * dweibull(x = y, shape = Shape, scale = Scale, log = TRUE))
+      sum(c(w) * dweibull(y, shape = Shape, scale = Scale, log = TRUE))
     }
   }, list( .lscale = lscale, .lshape = lshape,
            .escale = escale, .eshape = eshape,
@@ -881,10 +1135,16 @@ if (FALSE)
     EulerM <- -digamma(1.0)
 
 
-    ned2l.dshape <- (6*(EulerM - 1)^2 + pi^2)/(6*Shape^2)  # KK (2003)
+
+
+
     ned2l.dscale <- (Shape / Scale)^2
+    ned2l.dshape <- (6*(EulerM - 1)^2 + pi^2)/(6*Shape^2)  # KK (2003)
     ned2l.dshapescale <- (EulerM-1) / Scale
 
+
+
+
     wz <- if ( .lss )
             array(c(c(w) * ned2l.dscale * dscale.deta^2,
                     c(w) * ned2l.dshape * dshape.deta^2,
diff --git a/R/family.circular.R b/R/family.circular.R
index e597e04..26c0531 100644
--- a/R/family.circular.R
+++ b/R/family.circular.R
@@ -215,7 +215,7 @@ cardioid.control <- function(save.weights = TRUE, ...) {
             "pi + (rho/pi) *",
             "((2*pi-mu)*sin(2*pi-mu)+cos(2*pi-mu)-mu*sin(mu)-cos(mu))"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -381,7 +381,7 @@ cardioid.control <- function(save.weights = TRUE, ...) {
             "\n", "\n",
             "Mean:     location"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 2,
diff --git a/R/family.exp.R b/R/family.exp.R
index b5297f1..1fc1bde 100644
--- a/R/family.exp.R
+++ b/R/family.exp.R
@@ -566,7 +566,7 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
             "Mean:     location\n",
             "Variance: infinite"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
diff --git a/R/family.extremes.R b/R/family.extremes.R
index 7dfa3eb..1db2f36 100644
--- a/R/family.extremes.R
+++ b/R/family.extremes.R
@@ -284,7 +284,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0,
             namesof("scale",    lscale, escale), ", ",
             namesof("shape",    lshape, eshape)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 3,
@@ -761,7 +761,7 @@ dgammadx <- function(x, deriv.arg = 1) {
           namesof("scale",    link = lscale, earg = escale), ", ",
           namesof("shape",    link = lshape, earg = eshape)),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+      constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 3,
@@ -1154,7 +1154,7 @@ pgumbel <- function(q, location = 0, scale = 1,
             namesof("location", llocat,  earg = elocat ), ", ",
             namesof("scale",    lscale, earg = escale )),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -2054,7 +2054,7 @@ setMethod("guplot", "vlm",
             "Mean:     location + scale*0.5772..\n",
             "Variance: pi^2 * scale^2 / 6"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     y <- cbind(y)
@@ -2211,7 +2211,7 @@ setMethod("guplot", "vlm",
             "Mean:     location + scale*0.5772..\n",
             "Variance: pi^2 * scale^2 / 6"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     y <- cbind(y)
@@ -2509,7 +2509,7 @@ frechet.control <- function(save.weights = TRUE, ...) {
             namesof("scale", link = lscale, earg = escale ), ", ",
             namesof("shape", link = lshape, earg = eshape )),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -2720,7 +2720,7 @@ rec.normal.control <- function(save.weights = TRUE, ...) {
             "\n",
             "Variance: sd^2"),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+      constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
diff --git a/R/family.genetic.R b/R/family.genetic.R
index 8f4e045..4d57e30 100644
--- a/R/family.genetic.R
+++ b/R/family.genetic.R
@@ -358,7 +358,7 @@
            .zero = zero ))),
 
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
 
 
@@ -695,11 +695,11 @@
           eta2theta(eta[, 2], link = .linkf , earg = .eargf ) else 0
 
     if ( .inbreeding ) {
-      dP1 <- cbind(fp + 2*pA*(1-fp),
-                    2*(1-fp)*(1-2*pA),
-                   -2*(1-pA) + fp*(1-2*pA))
+      dP1 <- cbind(2*pA*(1-fp) + fp,
+                   2*(1-fp)*(1-2*pA),
+                  -2*(1-pA) + fp*(1-2*pA))
       dP2 <- cbind(pA*(1-pA),
-                   -2*pA*(1-pA),
+                  -2*pA*(1-pA),
                    pA*(1-pA))
       dl1 <- rowSums(y * dP1 / mu)
       dl2 <- rowSums(y * dP2 / mu)
@@ -710,9 +710,9 @@
       c(w) * cbind(dPP.deta * dl1,
                    dfp.deta * dl2)      
     } else {
-      nAA <- w * y[, 1]
-      nAa <- w * y[, 2]
-      naa <- w * y[, 3]
+      nAA <- c(w) * y[, 1]
+      nAa <- c(w) * y[, 2]
+      naa <- c(w) * y[, 3]
       dl.dpA <- (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
       dpA.deta <- dtheta.deta(pA, link = .linkp , earg = .eargp )
       dl.dpA * dpA.deta
@@ -735,8 +735,8 @@
         }
       c(w) * wz
     } else {
-      ned2l.dp2 <- (2*nAA + nAa) / pA^2 + (nAa + 2*naa) / (1-pA)^2
-      wz <- cbind((dpA.deta^2) * ned2l.dp2)
+      ned2l.dp2 <- 2 / (pA * (1-pA))
+      wz <- cbind(c(w) * ned2l.dp2 * dpA.deta^2)
       wz
     }
   }), list( .linkp = linkp, .linkf = linkf,
diff --git a/R/family.glmgam.R b/R/family.glmgam.R
index 0bc2452..c639c6e 100644
--- a/R/family.glmgam.R
+++ b/R/family.glmgam.R
@@ -58,7 +58,7 @@
                            constraints = constraints,
                            apply.int = .apply.parint )
 
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero,
             .parallel = parallel, .apply.parint = apply.parint ))),
   infos = eval(substitute(function(...) {
@@ -339,7 +339,12 @@
 
       varY <- mu * (1 - mu) / w  # Is a matrix if M>1. Seems the most correct.
       d1.ADJ <-   dtheta.deta(mu, .link , earg = .earg )
-      d2.ADJ <- d2theta.deta2(mu, .link , earg = .earg )
+
+      temp.earg <- .earg
+      temp.earg$inverse <- FALSE
+      temp.earg$inverse <- TRUE
+      d2.ADJ <- d2theta.deta2(mu, .link , earg = temp.earg )
+
 
 
       yBRED <- y + matrix(Hvector, n, M, byrow = TRUE) *
@@ -389,16 +394,28 @@
 
 
 
-  if (!multiple.responses)
+
+
+
     ans at deviance <- 
-      function(mu, y, w, residuals = FALSE, eta, extra = NULL,
-               summation = TRUE) {
-    Deviance.categorical.data.vgam(mu = cbind(mu, 1-mu),
-                                   y = cbind(y, 1-y),
-                                   w = w, residuals = residuals,
-                                   eta = eta, extra = extra,
-                                   summation = summation)
-  }
+      if (multiple.responses)
+        function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+                 summation = TRUE) {
+      Deviance.categorical.data.vgam(mu  = mu,
+                                     y   = y,
+                                     w   = w, residuals = residuals,
+                                     eta = eta, extra = extra,
+                                     summation = summation)
+        } else
+        function(mu, y, w, residuals = FALSE, eta, extra = NULL,
+                 summation = TRUE) {
+      Deviance.categorical.data.vgam(mu  = cbind(mu, 1-mu),
+                                     y   = cbind(y , 1-y),
+                                     w   = w, residuals = residuals,
+                                     eta = eta, extra = extra,
+                                     summation = summation)
+        }
+
 
   ans
 }
@@ -766,7 +783,7 @@ rinv.gaussian <- function(n, mu, lambda) {
                            constraints = constraints,
                            apply.int = .apply.parint )
 
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero,
             .parallel = parallel, .apply.parint = apply.parint ))),
   infos = eval(substitute(function(...) {
@@ -968,7 +985,7 @@ rinv.gaussian <- function(n, mu, lambda) {
     constraints <- cm.VGAM(matrix(1, M, 1), x = x, 
                            bool = .parallel , 
                            constraints = constraints)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .parallel = parallel, .zero = zero ))),
 
   deviance =
@@ -1272,7 +1289,7 @@ rinv.gaussian <- function(n, mu, lambda) {
             "Mean:     ", "mean\n",
             "Variance: mean / dispersion"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -1374,8 +1391,9 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
 
- double.expbinomial <- function(lmean = "logit", ldispersion = "logit",
-                          idispersion = 0.25, zero = 2) {
+ double.expbinomial <-
+  function(lmean = "logit", ldispersion = "logit",
+           idispersion = 0.25, zero = 2) {
 
   lmean <- as.list(substitute(lmean))
   emean <- link2list(lmean)
@@ -1398,7 +1416,7 @@ rinv.gaussian <- function(n, mu, lambda) {
             namesof("dispersion", ldisp, earg = edisp), "\n",
             "Mean:     ", "mean\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (!all(w == 1))
@@ -1420,8 +1438,7 @@ rinv.gaussian <- function(n, mu, lambda) {
           y[w == 0] <- 0
           if (!all(y == 0 | y == 1))
             stop("response values 'y' must be 0 or 1")
-          init.mu <-
-          mustart <- (0.5 + w * y) / (1 + w)
+          init.mu <- (0.5 + w * y) / (1 + w)
 
 
           no.successes <- y
@@ -1438,8 +1455,7 @@ rinv.gaussian <- function(n, mu, lambda) {
             nvec <- y[, 1] + y[, 2]
             y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
             w <- w * nvec
-            init.mu <-
-            mustart <- (0.5 + nvec * y) / (1 + nvec)
+            init.mu <- (0.5 + nvec * y) / (1 + nvec)
         } else
             stop("for the double.expbinomial family, response 'y' must be a ",
                  "vector of 0 and 1's\n",
@@ -1449,15 +1465,11 @@ rinv.gaussian <- function(n, mu, lambda) {
                      "successes and col 2 is the no. of failures")
 
     dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
-    dn2 <- if (length(dn2)) {
-        paste("E[", dn2, "]", sep = "") 
-    } else {
-        "mu"
-    }
+    dn2 <- if (length(dn2)) paste("E[", dn2, "]", sep = "") else "mu"
 
     predictors.names <-
-    c(namesof(dn2,          .lmean, earg = .emean, short = TRUE),
-      namesof("dispersion", .ldisp, earg = .edisp, short = TRUE))
+    c(namesof(dn2,          .lmean , earg = .emean , short = TRUE),
+      namesof("dispersion", .ldisp , earg = .edisp , short = TRUE))
 
     tmp2 <- rep( .idisp , len = n)
 
@@ -1468,7 +1480,7 @@ rinv.gaussian <- function(n, mu, lambda) {
             .ldisp = ldisp, .edisp = edisp,
             .idisp = idisp ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    eta2theta(eta[, 1], link = .lmean, earg = .emean)
+    eta2theta(eta[, 1], link = .lmean , earg = .emean )
   }, list( .lmean = lmean, .emean = emean,
            .ldisp = ldisp, .edisp = edisp ))),
   last = eval(substitute(expression({
diff --git a/R/family.loglin.R b/R/family.loglin.R
index 70aac99..2b93cea 100644
--- a/R/family.loglin.R
+++ b/R/family.loglin.R
@@ -28,7 +28,7 @@
                            apply.int = TRUE,
                            cm.default           = cm.intercept.default,
                            cm.intercept.default = cm.intercept.default)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .exchangeable = exchangeable, .zero = zero ))),
   initialize = expression({
 
@@ -161,7 +161,7 @@
                            apply.int = TRUE,
                            cm.default           = cm.intercept.default,
                            cm.intercept.default = cm.intercept.default)
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .exchangeable = exchangeable, .zero = zero ))),
   initialize = expression({
     predictors.names <- c("u1", "u2", "u3", "u12", "u13", "u23")
diff --git a/R/family.mixture.R b/R/family.mixture.R
index 821b8ba..c43b162 100644
--- a/R/family.mixture.R
+++ b/R/family.mixture.R
@@ -91,7 +91,7 @@ mix2normal.control <- function(trace = TRUE, ...) {
                            bool = .eq.sd ,
                            constraints = constraints,
                            apply.int = TRUE)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero, .eq.sd = eq.sd ))),
   initialize = eval(substitute(expression({
 
@@ -320,7 +320,7 @@ mix2poisson.control <- function(trace = TRUE, ...) {
             namesof("lambda2", llambda, earg = el2, tag = FALSE), "\n",
             "Mean:     phi*lambda1 + (1 - phi)*lambda2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -492,6 +492,8 @@ mix2exp.control <- function(trace = TRUE, ...) {
   list(trace = trace)
 }
 
+
+
  mix2exp <- function(lphi = "logit", llambda = "loge",
                      iphi = 0.5, il1 = NULL, il2 = NULL,
                      qmu = c(0.8, 0.2), nsimEIM = 100, zero = 1) {
@@ -535,7 +537,7 @@ mix2exp.control <- function(trace = TRUE, ...) {
             "Mean:     phi / lambda1 + (1 - phi) / lambda2\n"),
 
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
 
   initialize = eval(substitute(expression({
diff --git a/R/family.nonlinear.R b/R/family.nonlinear.R
index 385272a..bdef4f1 100644
--- a/R/family.nonlinear.R
+++ b/R/family.nonlinear.R
@@ -131,7 +131,7 @@ micmen.control <- function(save.weights = TRUE, ...) {
             "Variance: constant"),
 
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M = 2)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = 2)
   }), list( .zero = zero))),
 
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -409,7 +409,7 @@ skira.control <- function(save.weights = TRUE, ...) {
             namesof("theta1", link1, earg = earg1), ", ",
             namesof("theta2", link2, earg = earg2)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M = 2)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = 2)
   }), list( .zero = zero ))),
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     M <- if (is.matrix(y))
diff --git a/R/family.normal.R b/R/family.normal.R
index 54fa678..c171898 100644
--- a/R/family.normal.R
+++ b/R/family.normal.R
@@ -56,7 +56,7 @@ VGAM.weights.function <- function(w, M, n) {
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel , 
                            constraints = constraints)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .parallel = parallel, .zero = zero ))),
   deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
     M <- if (is.matrix(y)) ncol(y) else 1
@@ -77,7 +77,8 @@ VGAM.weights.function <- function(w, M, n) {
   infos = eval(substitute(function(...) {
     list(M1 = 1,
          Q1 = 1,
-         zero = .zero)
+         multipleResponses = TRUE,
+         zero = .zero )
   }, list( .zero = zero ))),
 
   initialize = eval(substitute(expression({
@@ -89,6 +90,7 @@ VGAM.weights.function <- function(w, M, n) {
        (is.logical( .parallel ) && .parallel )))
         stop("cannot handle non-default arguments for cqo() and cao()")
 
+
     temp5 <-
     w.y.check(w = w, y = y,
               ncol.w.max = Inf,
@@ -99,7 +101,6 @@ VGAM.weights.function <- function(w, M, n) {
     w <- temp5$w
     y <- temp5$y
 
-
     M <- if (is.matrix(y)) ncol(y) else 1
     dy <- dimnames(y)
 
@@ -288,6 +289,8 @@ rposnorm <- function(n, mean = 0, sd = 1) {
 
 
 
+
+if (FALSE)
  posnormal.control <- function(save.weights = TRUE, ...) {
   list(save.weights = save.weights)
 }
@@ -296,11 +299,21 @@ rposnorm <- function(n, mean = 0, sd = 1) {
 
 
  posnormal <- function(lmean = "identitylink", lsd = "loge",
-                        imean = NULL, isd = NULL,
-                        nsimEIM = 100, zero = NULL) {
- warning("this VGAM family function is not working properly yet")
+                       eq.mean = FALSE, eq.sd = FALSE,
+                       gmean = exp((-5:5)/2), gsd = exp((-1:5)/2),
+                       imean = NULL, isd = NULL, probs.y = 0.10,
+                       imethod = 1,
+                       nsimEIM = NULL, zero = -2) {
+
+
 
 
+
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+      imethod > 3)
+    stop("argument 'imethod' must be 1 or 2 or 3")
+
   lmean <- as.list(substitute(lmean))
   emean <- link2list(lmean)
   lmean <- attr(emean, "function.name")
@@ -309,10 +322,13 @@ rposnorm <- function(n, mean = 0, sd = 1) {
   esd <- link2list(lsd)
   lsd <- attr(esd, "function.name")
 
-
+  if (!is.logical(eq.mean) || length(eq.mean) != 1)
+    stop("bad input for argument 'eq.mean'")
+  if (!is.logical(eq.sd  ) || length(eq.sd  ) != 1)
+    stop("bad input for argument 'eq.sd'")
 
   if (length(zero) &&
-      !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+      !is.Numeric(zero, integer.valued = TRUE))
     stop("bad input for argument 'zero'")
   if (length(isd) &&
       !is.Numeric(isd, positive = TRUE))
@@ -331,63 +347,196 @@ rposnorm <- function(n, mean = 0, sd = 1) {
           "Links:    ",
           namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
           namesof("sd",   lsd,   earg = esd,   tag = TRUE)),
+
+
+
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
-  }), list( .zero = zero ))),
+
+
+    constraints.orig <- constraints
+    M1 <- 2
+    NOS <- M / M1
+
+    cm1.m <-
+    cmk.m <- kronecker(diag(NOS), rbind(1, 0))
+    con.m <- cm.VGAM(kronecker(matrix(1, NOS, 1), rbind(1, 0)),
+                     x = x,
+                     bool = .eq.mean ,  #
+                     constraints = constraints.orig,
+                     apply.int = TRUE,
+                     cm.default           = cmk.m,
+                     cm.intercept.default = cm1.m)
+
+
+    cm1.s <-
+    cmk.s <- kronecker(diag(NOS), rbind(0, 1))
+    con.s <- cm.VGAM(kronecker(matrix(1, NOS, 1), rbind(0, 1)),
+                     x = x,
+                     bool = .eq.sd ,  #
+                     constraints = constraints.orig,
+                     apply.int = TRUE,
+                     cm.default           = cmk.s,
+                     cm.intercept.default = cm1.s)
+
+
+    con.use <- con.m
+    for (klocal in 1:length(con.m)) {
+
+
+      con.use[[klocal]] <- interleave.cmat(con.m[[klocal]], con.s[[klocal]])
+
+    }
+    constraints <- con.use
+
+    dotzero <- .zero
+    M1 <- 2
+    eval(negzero.expression.VGAM)
+
+
+  }), list( .zero    = zero,
+            .eq.sd   = eq.sd,
+            .eq.mean = eq.mean ))),
+
+
+
+
+
+
+
+
+
   infos = eval(substitute(function(...) {
     list(M1 = 2,
+         Q1 = 1,
+         eq.mean = .eq.mean ,
+         eq.sd   = .eq.sd   ,
+         multipleResponses = TRUE,
          par.names = c("mean", "sd"),
          zero = .zero )
-  }, list( .zero = zero
+  }, list( .zero = zero,
+           .eq.mean = eq.mean,
+           .eq.sd   = eq.sd
          ))),
 
 
 
 
   initialize = eval(substitute(expression({
-
+    M1 <- 2
     temp5 <-
     w.y.check(w = w, y = y,
               Is.positive.y = TRUE,
-              ncol.w.max = 1,
-              ncol.y.max = 1,
+              ncol.w.max = Inf,
+              ncol.y.max = Inf,
               out.wy = TRUE,
+              colsyperw = 1,
               maximize = TRUE)
     w <- temp5$w
     y <- temp5$y
+    NOS <- ncol(y)
+    M <- NOS * M1
 
+    mean.names  <- param.names("mean",     NOS)
+    sdev.names  <- param.names("sd",       NOS)
 
     predictors.names <-
-      c(namesof("mean", .lmean , earg = .emean, tag = FALSE),
-        namesof("sd",   .lsd ,   earg = .esd,   tag = FALSE))
+      c(namesof(mean.names , .lmean     , earg = .emean     , tag = FALSE),
+        namesof(sdev.names , .lsd       , earg = .esd       , tag = FALSE))
+    predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
 
     if (!length(etastart)) {
-      init.me <- if (length( .imean)) rep( .imean, len = n) else NULL
-      init.sd <- if (length( .isd  )) rep( .isd  , len = n) else NULL
-      if (!length(init.me))
-        init.me <- rep(quantile(y, probs = 0.40), len = n)
-      if (!length(init.sd))
-        init.sd <- rep(sd(c(y)) * 1.2, len = n)
+      init.me <- matrix( if (length( .imean )) .imean else as.numeric(NA),
+                        n, NOS, byrow = TRUE)
+      init.sd <-  matrix( if (length( .isd  )) .isd   else as.numeric(NA),
+                        n, NOS, byrow = TRUE)
+
+      mean.grid.orig <- .gmean
+      sdev.grid.orig <- .gsd
+
+
+      for (jay in 1:NOS) {
+        yvec <- y[, jay]
+        wvec <- w[, jay]
+        if (any(is.na(init.me[, jay]))) {
+          init.me[, jay] <- if ( .imethod == 1) {
+            weighted.mean(yvec, wvec)
+          } else if ( .imethod == 2) {
+            quantile(yvec, probs = .probs.y )
+          } else if ( .imethod == 3) {
+            median(yvec)
+          }
+        }
+        if (any(is.na(init.sd[, jay])))
+          init.sd[, jay] <- sd(yvec)
+
+
+        ll.posnormal <- function(sdev.val, y, x, w, extraargs) {
+          ans <-
+          sum(c(w) * dposnorm(x = y, mean = extraargs$Mean,
+                              sd = sdev.val, log = TRUE))
+          ans
+        }
+
+
+        sdev.grid <- sdev.grid.orig * init.sd[1, jay]
+        mean.grid <- mean.grid.orig * init.me[1, jay]
+        mean.grid <- sort(c(-mean.grid,
+                             mean.grid))
+        allmat1 <- expand.grid(Mean = mean.grid)
+        allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+
+         for (iloc in 1:nrow(allmat1)) {
+            allmat2[iloc, ] <-
+              grid.search(sdev.grid, objfun = ll.posnormal,
+                           y = yvec, x = x, w = wvec,
+                           ret.objfun = TRUE,  # 2nd value is the loglik
+                           extraargs = list(Mean = allmat1[iloc, "Mean"]))
+         }
+        ind5 <- which.max(allmat2[, 2])  # 2nd value is the loglik
+
+        if (!length( .imean ))
+          init.me[, jay] <- allmat1[ind5, "Mean"]
+        if (!length( .isd   ))
+          init.sd[, jay] <- allmat2[ind5, 1]
+      }  # jay
+
+
+
+
       etastart <- cbind(theta2eta(init.me, .lmean , earg = .emean ),
-                        theta2eta(init.sd, .lsd ,   earg = .esd ))
+                        theta2eta(init.sd, .lsd ,   earg = .esd   ))
+      etastart <- etastart[, interleave.VGAM(M, M = M1)]
+
     }
   }), list( .lmean = lmean, .lsd = lsd,
+            .emean = emean, .esd = esd,
+            .gmean = gmean, .gsd = gsd,
             .imean = imean, .isd = isd,
-            .emean = emean, .esd = esd
+            .imethod = imethod, .probs.y = probs.y
            ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
-    mysd <- eta2theta(eta[, 2], .lsd ,   earg = .esd )
+    mymu <- eta2theta(eta[, c(TRUE, FALSE)], .lmean , earg = .emean )
+    mysd <- eta2theta(eta[, c(FALSE, TRUE)], .lsd   , earg = .esd  )
     mymu + mysd * dnorm(-mymu/mysd) / pnorm(mymu/mysd)
   }, list( .lmean = lmean, .lsd = lsd,
            .emean = emean, .esd = esd
          ))),
   last = eval(substitute(expression({
-    misc$link <-    c("mean" = .lmean , "sd" = .lsd )
+    misc$link <- c(rep( .lmean , length = NOS),
+                   rep( .lsd   , length = NOS))[interleave.VGAM(M, M = M1)]
+    temp.names <- c(mean.names, sdev.names)
+    names(misc$link) <- temp.names[interleave.VGAM(M, M = M1)]
 
-    misc$earg <- list("mean" = .emean , "sd" = .esd )
+    misc$earg <- vector("list", M)
+    names(misc$earg) <- temp.names
+    for (ii in 1:NOS) {
+      misc$earg[[M1*ii-1]] <- .emean
+      misc$earg[[M1*ii  ]] <- .esd
+    }
+
+    misc$expected          <- TRUE
+    misc$multipleResponses <- TRUE
 
-    misc$expected <- TRUE
     misc$nsimEIM <- .nsimEIM
   }), list( .lmean = lmean, .lsd = lsd,
             .emean = emean, .esd = esd,
@@ -396,12 +545,11 @@ rposnorm <- function(n, mean = 0, sd = 1) {
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
              summation = TRUE) {
-    mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
-    mysd <- eta2theta(eta[, 2], .lsd ,   earg = .esd )
+    mymu <- eta2theta(eta[, c(TRUE, FALSE)], .lmean , earg = .emean )
+    mysd <- eta2theta(eta[, c(FALSE, TRUE)], .lsd   , earg = .esd  )
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-
       ll.elts <- c(w) * dposnorm(x = y, m = mymu, sd = mysd, log = TRUE)
       if (summation) {
         sum(ll.elts)
@@ -417,14 +565,13 @@ rposnorm <- function(n, mean = 0, sd = 1) {
 
   simslot = eval(substitute(
   function(object, nsim) {
-
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
     if (any(pwts != 1)) 
       warning("ignoring prior weights")
     eta <- predict(object)
-    mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
-    mysd <- eta2theta(eta[, 2], .lsd   , earg = .esd   )
+    mymu <- eta2theta(eta[, c(TRUE, FALSE)], .lmean , earg = .emean )
+    mysd <- eta2theta(eta[, c(FALSE, TRUE)], .lsd   , earg = .esd   )
     rposnorm(nsim * length(mymu), mean = mymu, sd = mysd)
   }, list( .lmean = lmean, .lsd = lsd,
            .emean = emean, .esd = esd ))),
@@ -436,56 +583,97 @@ rposnorm <- function(n, mean = 0, sd = 1) {
 
 
   deriv = eval(substitute(expression({
-    mymu <- eta2theta(eta[, 1], .lmean , earg = .emean )
-    mysd <- eta2theta(eta[, 2], .lsd ,  earg = .esd )
+    mymu <- eta2theta(eta[, c(TRUE, FALSE)], .lmean , earg = .emean )
+    mysd <- eta2theta(eta[, c(FALSE, TRUE)], .lsd   , earg = .esd   )
+
 
     zedd <- (y-mymu) / mysd
-    temp7 <- dnorm(-mymu/mysd)
-    temp8 <- pnorm(mymu/mysd) * mysd
+    temp0 <-   mymu  / mysd
+    imratio <- dnorm(temp0) / pnorm(temp0)
 
-    dl.dmu <- zedd / mysd^2 - temp7 / temp8
-    dl.dsd <- (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
+    dl.dmu <- (zedd - imratio) / mysd
+    dl.dsd <- (temp0 * imratio + zedd^2 - 1) / mysd
 
     dmu.deta <- dtheta.deta(mymu, .lmean , earg = .emean )
-    dsd.deta <- dtheta.deta(mysd, .lsd ,   earg = .esd )
+    dsd.deta <- dtheta.deta(mysd, .lsd   , earg = .esd   )
     dthetas.detas <- cbind(dmu.deta, dsd.deta)
-    c(w) * dthetas.detas * cbind(dl.dmu, dl.dsd)
+    myderiv <- c(w) * dthetas.detas * cbind(dl.dmu, dl.dsd)
+    myderiv <- myderiv[, interleave.VGAM(M, M = M1)]
+    myderiv
   }), list( .lmean = lmean, .lsd = lsd,
             .emean = emean, .esd = esd ))),
   weight = eval(substitute(expression({
     run.varcov <- 0
     ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
     if (length( .nsimEIM )) {
-        for (ii in 1:( .nsimEIM )) {
-          ysim <- rposnorm(n, m=mymu, sd = mysd)
-          zedd <- (ysim-mymu) / mysd
-          temp7 <- dnorm(-mymu/mysd)
-          temp8 <- pnorm(mymu/mysd) * mysd
-          dl.dmu <- zedd / mysd^2 - temp7 / temp8
-          dl.dsd <- (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
-
-          rm(ysim)
-          temp3 <- matrix(c(dl.dmu, dl.dsd), n, 2)
-          run.varcov <- ((ii-1) * run.varcov +
-               temp3[, ind1$row.index]*temp3[, ind1$col.index]) / ii
+
+
+
+      NOS <- M / M1
+      dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = M1)]
+
+      wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal' 
+
+      ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
+
+      for (spp. in 1:NOS) {
+        run.varcov <- 0
+        Mymu <- mymu[, spp.]
+        Mysd <- mysd[, spp.]
+
+      for (ii in 1:( .nsimEIM )) {
+        ysim <- rposnorm(n, m = Mymu, sd = Mysd)
+
+
+        zedd <- (ysim-Mymu) / Mysd
+        dl.dmu <- (zedd - imratio) / Mysd
+        dl.dsd <- (temp0 * imratio + zedd^2 - 1) / Mysd
+
+        
+        temp7 <- cbind(dl.dmu, dl.dsd)
+        run.varcov <- run.varcov +
+                      temp7[, ind1$row.index] *
+                      temp7[, ind1$col.index]
       }
-        wz <- if (intercept.only)
-            matrix(colMeans(run.varcov),
-                   n, ncol(run.varcov), byrow = TRUE) else run.varcov
+      run.varcov <- cbind(run.varcov / .nsimEIM )
+
+
+
+      wz1 <- if (intercept.only)
+          matrix(colMeans(run.varcov),
+                 nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
+          run.varcov
+
+      wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
+                   dThetas.detas[, M1 * (spp. - 1) + ind1$col]
+
+
+      for (jay in 1:M1)
+        for (kay in jay:M1) {
+          cptr <- iam((spp. - 1) * M1 + jay,
+                      (spp. - 1) * M1 + kay,
+                      M = M)
+          wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
+        }
+      }  # End of for (spp.) loop
+
+
+
+      wz <- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / M1)
+
 
-      wz <- wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
-      wz <- c(w) * matrix(wz, n, dimm(M))
     } else {
-      wz <- matrix(as.numeric(NA), n, dimm(M))
-      ned2l.dmu2 <- (1 - temp7*mymu/temp8) / mysd^2  - (temp7/temp8)^2
-      ned2l.dmusd <- (temp7 /(mysd * temp8)) * (1 + (mymu/mysd)^2 +
-                    mymu*temp7 / temp8)
-      ned2l.dsd2 <- 2 / mysd^2  - (temp7 * mymu /(mysd^2 * temp8)) *
-                   (1 + (mymu/mysd)^2 + mymu*temp7/temp8)
-      wz[, iam(1, 1, M)] <- ned2l.dmu2  * dmu.deta^2
-      wz[, iam(2, 2, M)] <- ned2l.dsd2  * dsd.deta^2
-      wz[, iam(1, 2, M)] <- ned2l.dmusd * dsd.deta * dmu.deta
-      wz = c(w) * wz
+
+      ned2l.dmu2 <- (1 - imratio * (temp0 + imratio)) / mysd^2
+      ned2l.dmusd <- imratio * (1 + temp0 * (temp0 + imratio)) / mysd^2
+      ned2l.dsd2 <- (2 - imratio * (temp0 * (1 + temp0 *
+                    (temp0 + imratio)))) / mysd^2
+  
+      wz <- array(c(c(w) * ned2l.dmu2  * dmu.deta^2,
+                    c(w) * ned2l.dsd2  * dsd.deta^2,
+                    c(w) * ned2l.dmusd * dmu.deta * dsd.deta),
+                  dim = c(n, M/M1, M1*(M1+1)/2))
+      wz <- arwz2wz(wz, M = M, M1 = M1)
     }
     wz
   }), list( .lmean = lmean, .lsd = lsd,
@@ -496,6 +684,7 @@ rposnorm <- function(n, mean = 0, sd = 1) {
 
 
 
+
 dbetanorm <- function(x, shape1, shape2, mean = 0, sd = 1, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -579,7 +768,7 @@ ptikuv <- function(q, d, mean = 0, sigma = 1,
 
   if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
     stop("bad input for argument 'log.p'")
-  rm(log.p)   # 20141231 KaiH
+  rm(log.p)  # 20141231 KaiH
 
   L <- max(length(q), length(mean), length(sigma))
   if (length(q)     != L) q     <- rep(q,     len = L)
@@ -741,12 +930,13 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
           "\n", "\n",
           "Mean:     mean"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
     list(M1 = 2,
-         zero = .zero)
+         multipleResponses = FALSE,
+         zero = .zero )
   }, list( .zero = zero ))),
 
   initialize = eval(substitute(expression({
@@ -858,6 +1048,7 @@ rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
 
 
 
+
 dfoldnorm <- function(x, mean = 0, sd = 1, a1 = 1, a2 = 1,
                       log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
@@ -887,7 +1078,6 @@ pfoldnorm <- function(q, mean = 0, sd = 1, a1 = 1, a2 = 1,
     stop("bad input for argument 'log.p'")
 
 
-
   if (lower.tail) {
     if (log.p) {
       ans <- log(pnorm(q =  q/(a1*sd) - mean/sd) - 
@@ -947,7 +1137,7 @@ qfoldnorm <- function(p, mean = 0, sd = 1, a1 = 1, a2 = 1,
   for (ii in 1:L) {
     mytheta <- mean[ii] / sd[ii]
     EY <- sd[ii] * ((a1[ii] + a2[ii]) *
-         (mytheta * pnorm(mytheta) + dnorm(mytheta)) -
+          (mytheta * pnorm(mytheta) + dnorm(mytheta)) -
           a2[ii] * mytheta)
     Upper <- 2 * EY
     while (pfoldnorm(q = Upper, mean = mean[ii], sd = sd[ii],
@@ -967,8 +1157,7 @@ qfoldnorm <- function(p, mean = 0, sd = 1, a1 = 1, a2 = 1,
 
 
 
-rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
-
+rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2 = 1) {
   X <- rnorm(n, mean = mean, sd = sd)
   ans <- pmax(a1 * X, -a2*X)
   ans[a1 <= 0 | a2 <= 0] <- NA
@@ -980,9 +1169,9 @@ rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
 
 
  foldnormal <- function(lmean = "identitylink", lsd = "loge",
-                      imean = NULL,       isd = NULL,
-                      a1 = 1, a2 = 1,
-                      nsimEIM = 500, imethod = 1, zero = NULL) {
+                        imean = NULL,       isd = NULL,
+                        a1 = 1, a2 = 1,
+                        nsimEIM = 500, imethod = 1, zero = NULL) {
   if (!is.Numeric(a1, positive = TRUE, length.arg = 1) ||
       !is.Numeric(a2, positive = TRUE, length.arg = 1))
     stop("bad input for arguments 'a1' and 'a2'")
@@ -1433,12 +1622,12 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
   cenL <- (ans < Lower)
   cenU <- (ans > Upper)
   if (FALSE) {
-  ans[cenL] <- Lower[cenL]
-  ans[cenU] <- Upper[cenU]
-} else {
-  ans <- pmax(ans, Lower)
-  ans <- pmin(ans, Upper)
-}
+    ans[cenL] <- Lower[cenL]
+    ans[cenU] <- Upper[cenU]
+  } else {
+    ans <- pmax(ans, Lower)
+    ans <- pmin(ans, Upper)
+  }
   
   attr(ans, "Lower") <- Lower
   attr(ans, "Upper") <- Upper
@@ -1450,16 +1639,11 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
 
 
 
-tobit.control <- function(save.weights = TRUE, ...) {
-  list(save.weights = save.weights)
-}
-
-
- tobit <- function(Lower = 0, Upper = Inf,
+ tobit <- function(Lower = 0, Upper = Inf,  # See the trick described below.
                    lmu = "identitylink",  lsd = "loge",
-                   nsimEIM = 250,
                    imu = NULL,        isd = NULL,
                    type.fitted = c("uncensored", "censored", "mean.obs"),
+                   byrow.arg = FALSE,
                    imethod = 1, zero = -2) {
 
 
@@ -1469,6 +1653,7 @@ tobit.control <- function(save.weights = TRUE, ...) {
 
 
 
+
   lmu <- as.list(substitute(lmu))
   emu <- link2list(lmu)
   lmu <- attr(emu, "function.name")
@@ -1481,22 +1666,18 @@ tobit.control <- function(save.weights = TRUE, ...) {
 
   if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
-    imethod > 2)
-    stop("argument 'imethod' must be 1 or 2")
+    imethod > 3)
+    stop("argument 'imethod' must be 1 or 2 or 3")
   if ( # length(Lower) != 1 || length(Upper) != 1 ||
       !is.numeric(Lower) ||
       !is.numeric(Upper) ||
       any(Lower >= Upper))
-    stop("Lower and Upper must ",
-         "be numeric with Lower < Upper")
+    stop("arguments 'Lower' and 'Upper' must be numeric and ",
+         "satisfy Lower < Upper")
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE))
     stop("bad input for argument 'zero'")
-  if (!is.Numeric(nsimEIM, length.arg = 1,
-                  integer.valued = TRUE) ||
-      nsimEIM <= 10)
-    stop("argument 'nsimEIM' should be an integer greater than 10")
 
   if (mode(type.fitted) != "character" && mode(type.fitted) != "name")
     type.fitted <- as.character(substitute(type.fitted))
@@ -1505,12 +1686,12 @@ tobit.control <- function(save.weights = TRUE, ...) {
 
 
   stdTobit <- all(Lower == 0.0) &&
-              all(!is.finite(Upper)) &&
+              all(is.infinite(Upper)) &&
               all(lmu == "identitylink")
 
 
   new("vglmff",
-  blurb = c("Tobit model\n\n",
+  blurb = c("Tobit model (censored normal)\n\n",
             "Links:    ",
             namesof("mu", lmu, earg = emu, tag = TRUE), "; ",
             namesof("sd", lsd, earg = esd, tag = TRUE), "\n",
@@ -1526,10 +1707,16 @@ tobit.control <- function(save.weights = TRUE, ...) {
 
   infos = eval(substitute(function(...) {
     list(M1 = 2,
+         Q1 = 1,
          type.fitted = .type.fitted ,
          zero = .zero ,
-         nsimEIM = .nsimEIM )
-  }, list( .zero = zero, .nsimEIM = nsimEIM,
+         expected = TRUE,
+         byrow.arg = .byrow.arg ,
+         stdTobit = .stdTobit ,
+         expected = TRUE )
+  }, list( .zero = zero,
+           .byrow.arg = byrow.arg,
+           .stdTobit = stdTobit,
            .type.fitted = type.fitted ))),
 
   initialize = eval(substitute(expression({
@@ -1547,83 +1734,84 @@ tobit.control <- function(save.weights = TRUE, ...) {
 
 
 
-
     ncoly <- ncol(y)
     M <- M1 * ncoly
 
-    Lowmat <- matrix( .Lower , nrow = n, ncol = ncoly, byrow = TRUE)
-    Uppmat <- matrix( .Upper , nrow = n, ncol = ncoly, byrow = TRUE)
+    Lowmat <- matrix( .Lower , nrow = n, ncol = ncoly, byrow = .byrow.arg )
+    Uppmat <- matrix( .Upper , nrow = n, ncol = ncoly, byrow = .byrow.arg )
 
     extra$type.fitted <- .type.fitted
     extra$censoredL <- (y <= Lowmat)
     extra$censoredU <- (y >= Uppmat)
-    if (any(y < Lowmat)) {
-      warning("replacing response values less than the value ",
-              .Lower , " by ", .Lower )
-      y[y < Lowmat] <- Lowmat[y < Lowmat]
+    if (any(matTF <- (y < Lowmat))) {
+      warning("replacing response values less than 'Lower' by 'Lower'")
+      y[matTF] <- Lowmat[matTF]
     }
-    if (any(y > Uppmat)) {
-      warning("replacing response values greater than the value ",
-              .Upper, " by ", .Upper)
-      y[y > Uppmat] <- Uppmat[y > Uppmat]
+    if (any(matTF <- (y > Uppmat))) {
+      warning("replacing response values greater than 'Upper' by 'Upper'")
+      y[matTF] <- Uppmat[matTF]
     }
 
-    temp1.names <-
-      if (ncoly == 1) "mu" else paste("mu", 1:ncoly, sep = "")
-    temp2.names <-
-      if (ncoly == 1) "sd" else paste("sd", 1:ncoly, sep = "")
+    temp1.names <- param.names("mu", ncoly)
+    temp2.names <- param.names("sd", ncoly)
     predictors.names <-
       c(namesof(temp1.names, .lmu , earg = .emu , tag = FALSE),
         namesof(temp2.names, .lsd , earg = .esd , tag = FALSE))
     predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
 
 
-    if ( .stdTobit ) {
-      save.weights <- control$save.weights <- FALSE
-    }
-
     if (!length(etastart)) {
       anyc <- cbind(extra$censoredL | extra$censoredU)
-      i11 <- if ( .imethod == 1) anyc else FALSE  # can be all data
+      i11 <- if ( .imethod == 1) anyc else
+             matrix(FALSE, n, 1)  # can be all data
 
       mu.init <-
       sd.init <- matrix(0.0, n, ncoly)
-      for (ii in 1:ncol(y)) {
-        use.i11 <- i11[, ii]
-
-        if (sum(!use.i11) < ncol(x)) {
-          use.i11 <- rep(FALSE, length = n)
-        }
-        mylm <- lm.wfit(x = x[!use.i11,    , drop = FALSE],
-                        y = y[!use.i11, ii],
-                        w = w[!use.i11, ii])
+      for (jay in 1:ncol(y)) {
+        if ( .imethod >  2) {
+          mu.init[, jay] <- (y[, jay] + weighted.mean(y[, jay], w[, jay]))/2
+          sd.init[, jay] <- pmax(weighted.mean((y[, jay] - mu.init[, jay])^2,
+                                                w[, jay])^0.5,
+                                 0.001)
+        } else {  # .imethod <= 2
+
+          use.i11 <- i11[, jay]
+
+          if (sum(!use.i11) < ncol(x)) {
+            use.i11 <- rep(FALSE, length = n)
+          }
+          mylm <- lm.wfit(x = x[!use.i11,     , drop = FALSE],
+                          y = y[!use.i11, jay],
+                          w = w[!use.i11, jay])
 
                      
 
-        sd.init[, ii] <- sqrt( sum(w[!use.i11, ii] * mylm$resid^2)
-                              / mylm$df.residual ) * 1.5
-        mu.init[!use.i11, ii] <- mylm$fitted.values
-        if (any(anyc[, ii]))
-          mu.init[anyc[, ii], ii] <- x[anyc[, ii],, drop = FALSE] %*%
-                                     mylm$coeff
-      }
+          sd.init[, jay] <- sqrt( sum(w[!use.i11, jay] * mylm$resid^2)
+                                / mylm$df.residual ) * 1.5
+          mu.init[!use.i11, jay] <- mylm$fitted.values
+          if (any(anyc[, jay]))
+            mu.init[anyc[, jay], jay] <- x[anyc[, jay],, drop = FALSE] %*%
+                                         mylm$coeff
+        }  # .imethod <= 2
+      }  # for (jay in 1:ncol(y))
 
       if (length( .Imu ))
-        mu.init <- matrix( .Imu , n, ncoly, byrow = TRUE)
+        mu.init <- matrix( .Imu , n, ncoly, byrow = .byrow.arg )
       if (length( .isd ))
-        sd.init <- matrix( .isd , n, ncoly, byrow = TRUE)
+        sd.init <- matrix( .isd , n, ncoly, byrow = .byrow.arg )
 
       etastart <- cbind(theta2eta(mu.init, .lmu , earg = .emu ),
                         theta2eta(sd.init, .lsd , earg = .esd ))
 
       etastart <- etastart[, interleave.VGAM(M, M = M1), drop = FALSE]
-    }
+    }   # if (!length(etastart))
  }), list( .Lower = Lower, .Upper = Upper,
            .lmu = lmu, .lsd = lsd,
            .emu = emu, .esd = esd,
            .Imu = imu, .isd = isd,
            .type.fitted = type.fitted,
            .stdTobit = stdTobit,
+           .byrow.arg = byrow.arg,
            .imethod = imethod ))),
   linkinv = eval(substitute( function(eta, extra = NULL) {
     M1 <- 2
@@ -1631,13 +1819,13 @@ tobit.control <- function(save.weights = TRUE, ...) {
     mum <- eta2theta(eta[, M1*(1:ncoly)-1, drop = FALSE],
                      .lmu , earg = .emu )
 
-
-
-    type.fitted <- if (length(extra$type.fitted)) extra$type.fitted else {
-                     warning("cannot find 'type.fitted'. ",
-                             "Returning 'uncensored'.")
-                     "uncensored"
-                   }
+    type.fitted <-
+      if (length(extra$type.fitted)) {
+        extra$type.fitted
+      } else {
+        warning("cannot find 'type.fitted'. Returning 'uncensored'.")
+        "uncensored"
+      }
 
     type.fitted <- match.arg(type.fitted,
                              c("uncensored", "censored", "mean.obs"))[1]
@@ -1645,8 +1833,10 @@ tobit.control <- function(save.weights = TRUE, ...) {
     if ( type.fitted == "uncensored")
       return(mum)
 
-    Lowmat <- matrix( .Lower , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
-    Uppmat <- matrix( .Upper , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
+    Lowmat <- matrix( .Lower , nrow = nrow(eta), ncol = ncoly,
+                      byrow = .byrow.arg )
+    Uppmat <- matrix( .Upper , nrow = nrow(eta), ncol = ncoly,
+                      byrow = .byrow.arg )
     if ( type.fitted == "censored") {
       mum[mum < Lowmat] <- Lowmat[mum < Lowmat]
       mum[mum > Uppmat] <- Uppmat[mum > Uppmat]
@@ -1670,16 +1860,17 @@ tobit.control <- function(save.weights = TRUE, ...) {
     }
   }, list( .lmu = lmu, .lsd = lsd,
            .emu = emu, .esd = esd,
+           .byrow.arg = byrow.arg,
            .Lower = Lower, .Upper = Upper ))),
   last = eval(substitute(expression({
 
-    temp0303 <- c(rep( .lmu, length = ncoly),
+    temp0303 <- c(rep( .lmu , length = ncoly),
                   rep( .lsd , length = ncoly))
-    names(temp0303) =
-      c(if (ncoly == 1) "mu" else paste("mu", 1:ncoly, sep = ""),
-        if (ncoly == 1) "sd" else paste("sd", 1:ncoly, sep = ""))
+    names(temp0303) <-
+      c(param.names("mu", ncoly),
+        param.names("sd", ncoly))
     temp0303 <- temp0303[interleave.VGAM(M, M = M1)]
-    misc$link <- temp0303 # Already named
+    misc$link <- temp0303  # Already named
 
     misc$earg <- vector("list", M)
     names(misc$earg) <- names(misc$link)
@@ -1691,7 +1882,6 @@ tobit.control <- function(save.weights = TRUE, ...) {
     misc$multipleResponses <- TRUE
     misc$expected <- TRUE
     misc$imethod <- .imethod
-    misc$nsimEIM <- .nsimEIM
     misc$M1 <- M1
     misc$stdTobit <- .stdTobit
     misc$Lower <- Lowmat
@@ -1700,7 +1890,7 @@ tobit.control <- function(save.weights = TRUE, ...) {
 
   }), list( .lmu = lmu, .lsd = lsd,
             .emu = emu, .esd = esd,
-            .nsimEIM = nsimEIM, .imethod = imethod,
+            .imethod = imethod,
             .stdTobit = stdTobit,
             .Lower = Lower,
             .Upper = Upper ))),
@@ -1715,8 +1905,10 @@ tobit.control <- function(save.weights = TRUE, ...) {
     cenL <- extra$censoredL
     cenU <- extra$censoredU
     cen0 <- !cenL & !cenU  # uncensored obsns
-    Lowmat <- matrix( .Lower , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
-    Uppmat <- matrix( .Upper , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
+    Lowmat <- matrix( .Lower , nrow = nrow(eta), ncol = ncoly,
+                      byrow = .byrow.arg )
+    Uppmat <- matrix( .Upper , nrow = nrow(eta), ncol = ncoly,
+                      byrow = .byrow.arg )
 
 
     mum <- eta2theta(eta[, M1*(1:ncoly)-1, drop = FALSE],
@@ -1736,9 +1928,9 @@ tobit.control <- function(save.weights = TRUE, ...) {
       stop("loglikelihood residuals not implemented yet")
     } else {
       ll.elts <- y  # Right dimension only
-      ll.elts[cen0]<- wmat[cen0] * ell0
-      ll.elts[cenL]<- wmat[cenL] * ellL
-      ll.elts[cenU]<- wmat[cenU] * ellU
+      ll.elts[cen0] <- wmat[cen0] * ell0
+      ll.elts[cenL] <- wmat[cenL] * ellL
+      ll.elts[cenU] <- wmat[cenU] * ellU
       if (summation) {
         sum(ll.elts)
       } else {
@@ -1747,6 +1939,7 @@ tobit.control <- function(save.weights = TRUE, ...) {
     }
   }, list( .lmu = lmu, .lsd = lsd,
            .emu = emu, .esd = esd,
+           .byrow.arg = byrow.arg,
            .Lower = Lower, .Upper = Upper ))),
   vfamily = c("tobit"),
 
@@ -1763,8 +1956,44 @@ tobit.control <- function(save.weights = TRUE, ...) {
     y <- cbind(y)
     ncoly <- ncol(y)
 
-    Lowmat <- matrix( .Lower , nrow = n, ncol = ncoly, byrow = TRUE)
-    Uppmat <- matrix( .Upper , nrow = n, ncol = ncoly, byrow = TRUE)
+    mills.ratio1 <- function(x) {
+      ans <- exp(dnorm(x, log = TRUE) - pnorm(x, log = TRUE))
+      ans[x < -1e2] <- -x / (1 - 1/x^2 + 3 / x^4)
+      ans
+    }
+
+
+  mills.ratio2 <- function(x) {
+    ans <- exp(2 * dnorm(x, log = TRUE) - pnorm(x, log = TRUE))
+    ans[x < -40] <- 0
+    ans
+  }
+
+
+
+moment.k.dnorm <- function(z, k = 0) {
+  if (any(k < 0))
+    stop("this function works only for non-negative 'k'")
+  ans <- dnorm(z) * z^k
+  ans[is.infinite(z)] <- 0
+  ans
+}
+
+
+
+moment.millsratio2 <- function(zedd) {
+  ans <- exp(2 * (log(abs(zedd)) + dnorm(zedd, log = TRUE)) -
+             pnorm(zedd, log = TRUE))
+  ans[is.infinite(zedd)] <- 0  # Needed for zedd == Inf and -Inf
+  ans
+}
+
+
+
+    Lowmat <- matrix( .Lower , nrow = nrow(eta), ncol = ncoly,
+                      byrow = .byrow.arg )
+    Uppmat <- matrix( .Upper , nrow = nrow(eta), ncol = ncoly,
+                      byrow = .byrow.arg )
 
     cenL <- extra$censoredL
     cenU <- extra$censoredU
@@ -1785,20 +2014,16 @@ tobit.control <- function(save.weights = TRUE, ...) {
     if (any(cenL)) {
       mumL <- Lowmat - mum
       temp21L <- mumL[cenL] / sdm[cenL]
-      PhiL <- pnorm(temp21L)
-      phiL <- dnorm(temp21L)
-      fred21 <- phiL / PhiL
+      fred21 <- mills.ratio1(temp21L)
       dl.dmu[cenL] <- -fred21 / sdm[cenL]
-      dl.dsd[cenL] <-  fred21 * (-mumL[cenL] / sdm[cenL]^2)
+      dl.dsd[cenL] <-  fred21 * (-temp21L / sdm[cenL])
     }
     if (any(cenU)) {
       mumU <- Uppmat - mum
       temp21U <- mumU[cenU] / sdm[cenU]
-      PhiU <- pnorm(temp21U, lower.tail = FALSE)
-      phiU <- dnorm(temp21U)
-      fred21 <- -phiU / PhiU
+      fred21 <- -mills.ratio1(-temp21U)
       dl.dmu[cenU] <- -fred21 / sdm[cenU]  # Negated
-      dl.dsd[cenU] <-  fred21 * (-mumU[cenU] / sdm[cenU]^2)
+      dl.dsd[cenU] <-  fred21 * (-temp21U / sdm[cenU])
     }
 
     dthetas.detas <- cbind(dmu.deta, dsd.deta)
@@ -1809,101 +2034,56 @@ tobit.control <- function(save.weights = TRUE, ...) {
     myderiv[, interleave.VGAM(M, M = M1)]
   }), list( .lmu = lmu, .lsd = lsd,
             .emu = emu, .esd = esd,
+            .byrow.arg = byrow.arg,
             .Lower = Lower, .Upper = Upper ))),
   weight = eval(substitute(expression({
 
-    wz <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal'
-    ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
 
 
-    if (is.numeric( .nsimEIM ) &&
-      ! .stdTobit ) {
+    v.large <-  3.5
+    v.small <- -5.0  # pnorm(-5) == 3e-07
 
+    v.large <-  5.5
+    v.small <- -6.5  # pnorm(-5) == 3e-07
 
-    run.varcov <- 0
-
-    for (spp. in 1:ncoly) {
-      run.varcov <- 0
-      muvec <- mum[, spp.]
-      sdvec <- sdm[, spp.]
+    if ( .stdTobit ) {
+      wz  <- matrix(0.0, n, M + M - 1)  # wz is 'tridiagonal'
+      wz1 <- matrix(0.0, n, dimm(M1))
+      ind1 <- iam(NA, NA, M = M1, both = TRUE, diag = TRUE)
 
-      for (ii in 1:( .nsimEIM )) {
-        ysim <- rtobit(n = n, mean = muvec, sd = sdvec,
-                      Lower = Lowmat[, spp.], Upper = Uppmat[, spp.])
-        cenL <- attr(ysim, "cenL")
-        cenU <- attr(ysim, "cenU")
-        cen0 <- !cenL & !cenU   # uncensored obsns
-
-        zedd <- (ysim - muvec) / sdvec
-        dl.dmu <-   zedd / sdvec
-        dl.dsd <- (zedd^2 - 1) / sdvec
-      if (any(cenL)) {
-        mumL <- Lowmat[, spp.] - muvec
-        temp21L <- mumL[cenL] / sdvec[cenL]
-        PhiL <- pnorm(temp21L)
-        phiL <- dnorm(temp21L)
-        fred21 <- phiL / PhiL
-        dl.dmu[cenL] <- -fred21 / sdvec[cenL]
-        dl.dsd[cenL] <-  fred21 * (-mumL[cenL] / sdvec[cenL]^2)
-      }
-      if (any(cenU)) {
-        mumU <- Uppmat[, spp.] - muvec
-        temp21U <- mumU[cenU] / sdvec[cenU]
-        PhiU <- pnorm(temp21U, lower.tail = FALSE)
-        phiU <- dnorm(temp21U)
-        fred21 <- -phiU / PhiU
-        dl.dmu[cenU] <- -fred21 / sdvec[cenU]  # Negated
-        dl.dsd[cenU] <-  fred21 * (-mumU[cenU] / sdvec[cenU]^2)
-      }
+      for (spp. in 1:ncoly) {
+        zedd0 <- (            mum[, spp.]) / sdm[, spp.]
+        phivec  <- dnorm(zedd0)
+        Phivec  <- pnorm(zedd0)
+        phicPhi <- mills.ratio1(-zedd0)
 
-      rm(ysim)
-      temp3 <- cbind(dl.dmu, dl.dsd)
-      run.varcov <- run.varcov +
-                   temp3[, ind1$row.index] *
-                   temp3[, ind1$col.index]
-    }
-    run.varcov <- run.varcov / .nsimEIM
+        wz1[, iam(1, 2, M = M1)] <- phivec * (1 + zedd0 *
+                                    (zedd0 - phicPhi))
 
-    wz1 <- if (intercept.only && FALSE)
-        matrix(colMeans(run.varcov),
-               n, ncol(run.varcov), byrow = TRUE) else
-        run.varcov
 
+        wz1[, iam(1, 1, M = M1)] <- Phivec +
+                                    mills.ratio2(-zedd0) +
+                                    moment.k.dnorm(-zedd0, k = 1)
+        wz1[, iam(2, 2, M = M1)] <- 2 * Phivec +
+                                    moment.k.dnorm(-zedd0, k = 2) *
+                                    mills.ratio1(-zedd0) +
+                                    moment.k.dnorm(-zedd0, k = 1) +
+                                    moment.k.dnorm(-zedd0, k = 3)
 
-      wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
-                   dThetas.detas[, M1 * (spp. - 1) + ind1$col]
 
 
-      for (jay in 1:M1)
-        for (kay in jay:M1) {
-          cptr <- iam((spp. - 1) * M1 + jay,
-                      (spp. - 1) * M1 + kay,
-                      M = M)
-          wz[, cptr] = wz1[, iam(jay, kay, M = M1)]
+        if (FALSE && any(index1 <- (zedd0 < v.small))) {
+          wz1[index1, iam(1, 1, M = M1)] <- 1e-7
+          wz1[index1, iam(1, 2, M = M1)] <- 0
+          wz1[index1, iam(2, 2, M = M1)] <- 1e-7
+        }
+        if (FALSE && any(index1 <- (zedd0 > v.large))) {
+          wz1[index1, iam(1, 1, M = M1)] <- 1
+          wz1[index1, iam(1, 2, M = M1)] <- 0
+          wz1[index1, iam(2, 2, M = M1)] <- 2
         }
-    }  # End of for (spp.) loop
 
-    } else {
 
-      wz1 <- matrix(0.0, n, dimm(M1))
-      for (spp. in 1:ncoly) {
-        zedd  <- (y[, spp.] - mum[, spp.]) / sdm[, spp.]
-        zedd0 <- (            mum[, spp.]) / sdm[, spp.]
-        phivec <- dnorm(zedd0)
-        Phivec <- pnorm(zedd0)
-
-        wz1[, iam(1, 1, M = M1)] <-   -(phivec * zedd0 -
-                                        phivec^2 / (1 - Phivec) -
-                                        Phivec)
-        wz1[, iam(2, 2, M = M1)] <-   -(phivec   * zedd0^3 +
-                                        phivec   * zedd0 -
-                                        phivec^2 * zedd0^2 / (1 - Phivec) -
-                                        2 * Phivec)
-        wz1[, iam(1, 2, M = M1)] <- +(phivec   * zedd0^2 +
-                                        phivec   -
-                                        phivec^2 * zedd0 / (1 - Phivec))
-
-        wz1 <- wz1 / sdm[, spp.]^2
       wz1 <- wz1 * dThetas.detas[, M1 * (spp. - 1) + ind1$row] *
                    dThetas.detas[, M1 * (spp. - 1) + ind1$col]
 
@@ -1913,20 +2093,70 @@ tobit.control <- function(save.weights = TRUE, ...) {
                       (spp. - 1) * M1 + kay,
                       M = M)
           wz[, cptr] <- wz1[, iam(jay, kay, M = M1)]
-      }
+        }
       }  # End of for (spp.) loop
 
-    }  # End of EIM
+    } else {  # Not a standard Tobit model ,,,,,,,,,,,,,,,,,,,,,,,,,,,,
 
 
-    temp <- w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
 
+      A.i <- (Lowmat - mum) / sdm
+      B.i <- (Uppmat - mum) / sdm
+      phivec.A  <- dnorm(A.i)
+      phivec.B  <- dnorm(B.i)
+      Phivec.A  <- pnorm(A.i)
+      Phivec.B  <- pnorm(B.i)
+      Phivec.BB <- pnorm(-B.i)
+      phiPhi.A  <- mills.ratio1( A.i)
+      phicPhi.B <- mills.ratio1(-B.i)
 
-    w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
+
+                         
+
+
+      ned2l.dmumu <- Phivec.B - Phivec.A +
+                     moment.k.dnorm( A.i, k = 1) + mills.ratio2( A.i) +
+                     moment.k.dnorm(-B.i, k = 1) + mills.ratio2(-B.i)
+      ned2l.dsdsd <- 2 * (Phivec.B - Phivec.A) +
+                     3 * (moment.k.dnorm( A.i, k = 1) +
+                          moment.k.dnorm(-B.i, k = 1)) -
+
+                     2 * moment.k.dnorm(-B.i, k = 1) +
+                     moment.k.dnorm(-B.i, k = 3) +
+                     moment.millsratio2(-B.i) -
+                         
+                     2 * moment.k.dnorm( A.i, k = 1) +
+                     moment.k.dnorm( A.i, k = 3) +
+                     moment.millsratio2( A.i)
+      ned2l.dmusd <- phivec.A - phivec.B +
+                     moment.k.dnorm( A.i, k = 2) +
+                     moment.k.dnorm( A.i, k = 1) * mills.ratio1( A.i) +
+                     moment.k.dnorm( B.i, k = 2) +
+                     moment.k.dnorm(-B.i, k = 1) * mills.ratio1(-B.i)
+
+
+
+      if (TRUE && any(index1 <- (A.i < v.small))) {
+        ned2l.dmusd[index1] <- 0
+      }
+      if (TRUE && any(index1 <- (B.i > v.large))) {
+        ned2l.dmusd[index1] <- 0
+      }
+
+
+      wz <- array(c(ned2l.dmumu * dmu.deta^2,
+                    ned2l.dsdsd * dsd.deta^2,
+                    ned2l.dmusd * dmu.deta * dsd.deta),
+                    dim = c(n, M / M1, 3))
+      wz <- arwz2wz(wz, M = M, M1 = M1)
+
+
+    }  # Not a standard Tobit model
+
+    w.wz.merge(w = w / sdm^2, wz = wz, n = n, M = M, ndepy = ncoly)
   }), list( .lmu = lmu, .Lower = Lower, .Upper = Upper,
             .lsd = lsd,
-            .stdTobit = stdTobit,
-            .nsimEIM = nsimEIM ))))
+            .stdTobit = stdTobit ))))
 }  # End of tobit()
 
 
@@ -2029,7 +2259,7 @@ tobit.control <- function(save.weights = TRUE, ...) {
          Q1 = 1,
          expected = TRUE,
          multipleResponses = TRUE,
-         zero = .zero)
+         zero = .zero )
   }, list( .zero = zero ))),
 
   initialize = eval(substitute(expression({
@@ -2965,7 +3195,7 @@ tobit.control <- function(save.weights = TRUE, ...) {
           namesof("meanlog", lmulog, earg = emulog, tag = TRUE), ", ",
           namesof("sdlog",   lsdlog, earg = esdlog, tag = TRUE)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
diff --git a/R/family.others.R b/R/family.others.R
index b220cdc..75bca8d 100644
--- a/R/family.others.R
+++ b/R/family.others.R
@@ -189,7 +189,7 @@ rexppois <- function(n, rate = 1, shape) {
 
 
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero))),
 
   initialize = eval(substitute(expression({
@@ -476,7 +476,7 @@ genrayleigh.control <- function(save.weights = TRUE, ...) {
             namesof("scale", lscale, earg = escale), ", ",
             namesof("shape", lshape, earg = eshape), "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
 
   initialize = eval(substitute(expression({
@@ -743,7 +743,7 @@ expgeometric.control <- function(save.weights = TRUE, ...) {
             "shape) / (shape / scale)"), 
                            
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
  
 
@@ -1020,7 +1020,7 @@ explogff.control <- function(save.weights = TRUE, ...) {
             "Mean:     ", "(-polylog(2, 1 - p) * scale) / log(shape)"),
 
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
 
   initialize = eval(substitute(expression({
@@ -1356,7 +1356,7 @@ tpnff <- function(llocation = "identitylink", lscale = "loge",
             namesof("scale",     lscale,  earg = escale), "\n\n",
             "Mean: "),
   constraints = eval(substitute(expression({
-          constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+          constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -1529,7 +1529,7 @@ tpnff3 <- function(llocation = "identitylink",
             namesof("skewpar",  lscale, earg = eskewp),  "\n\n",
             "Mean: "),
   constraints = eval(substitute(expression({
-          constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+          constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
diff --git a/R/family.positive.R b/R/family.positive.R
index 7f369ef..7349555 100644
--- a/R/family.positive.R
+++ b/R/family.positive.R
@@ -70,8 +70,8 @@ N.hat.posbernoulli <-
 
 
   if (length(extra$p.small) &&
-     any(pibbeta < extra$p.small) &&
-     !extra$no.warning)
+      any(pibbeta < extra$p.small) &&
+      !extra$no.warning)
     warning("The abundance estimation for this model can be unstable")
 
 
@@ -85,7 +85,8 @@ N.hat.posbernoulli <-
     for (jay in 1:tau) {
       linpred.index <- jay.index[jay]
       Index0 <- Hmatrices[linpred.index, ] != 0
-      X.lm.jay <- X.vlm[(0:(n.lm - 1)) * M + linpred.index, Index0,
+      X.lm.jay <- X.vlm[(0:(n.lm - 1)) * M + linpred.index,
+                        Index0,
                         drop = FALSE]
 
       dvect[, Index0] <-
@@ -1257,7 +1258,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
   list( .link = link, .earg = earg,
         .multiple.responses = multiple.responses ))),
   last = eval(substitute(expression({
-    extra$w   <- NULL # Kill it off 
+    extra$w <- NULL  # Kill it off 
 
 
     misc$link <- rep( .link , length = M)
@@ -1278,17 +1279,17 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
 
 
-if (length(extra$tau)) {
-    R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
-    R[lower.tri(R)] <- 0
-    tmp6 <- N.hat.posbernoulli(eta = eta, link = .link , earg = .earg ,
-                               R = R, w = w,
-                               X.vlm = X.vlm.save,
-                               Hlist = Hlist,  # 20150428; bug fixed here
-                               extra = extra, model.type = "0")
-    extra$N.hat    <- tmp6$N.hat
-    extra$SE.N.hat <- tmp6$SE.N.hat
-}
+    if (length(extra$tau)) {
+      R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
+      R[lower.tri(R)] <- 0
+      tmp6 <- N.hat.posbernoulli(eta = eta, link = .link , earg = .earg ,
+                                 R = R, w = w,
+                                 X.vlm = X.vlm.save,
+                                 Hlist = Hlist,  # 20150428; bug fixed here
+                                 extra = extra, model.type = "0")
+      extra$N.hat    <- tmp6$N.hat
+      extra$SE.N.hat <- tmp6$SE.N.hat
+    }
 
     
   }), list( .link = link, .earg = earg,
diff --git a/R/family.qreg.R b/R/family.qreg.R
index 3ce6df0..7be8ef0 100644
--- a/R/family.qreg.R
+++ b/R/family.qreg.R
@@ -25,6 +25,38 @@
 
 
 
+dlms.bcn <- function(x, lambda = 1, mu = 0, sigma = 1,
+                     tol0 = 0.001, log = FALSE) {
+  if (!is.logical(log.arg <- log) || length(log) != 1)
+    stop("bad input for argument 'log'")
+  rm(log)
+
+  zedd   <- ((x/mu)^lambda - 1) / (lambda * sigma)
+  log.dz.dy <- (lambda - 1) * log(x/mu) - log(mu * sigma)
+
+  is.eff.0 <- abs(lambda) < tol0
+  if (any(is.eff.0)) {
+    zedd[is.eff.0] <- log(x[is.eff.0] / mu[is.eff.0]) / sigma[is.eff.0]
+    log.dz.dy[is.eff.0] <- -log(x[is.eff.0] * sigma[is.eff.0])
+  }
+  logden <- dnorm(zedd, log = TRUE) + log.dz.dy
+  if (log.arg) logden else exp(logden)
+}
+
+
+
+qlms.bcn <- function(p, lambda = 1, mu = 0, sigma = 1) {
+
+  answer <- mu * (1 + lambda * sigma * qnorm(p))^(1/lambda)
+  answer
+}
+
+
+
+
+
+
+
 lms.bcn.control <-
 lms.bcg.control <-
 lms.yjn.control <- function(trace = TRUE, ...)
@@ -33,15 +65,15 @@ lms.yjn.control <- function(trace = TRUE, ...)
 
 
  lms.bcn <- function(percentiles = c(25, 50, 75),
-                      zero = c(1, 3),
-                      llambda = "identitylink",
-                      lmu = "identitylink",
-                      lsigma = "loge",
-                      idf.mu = 4,
-                      idf.sigma = 2,
-                      ilambda = 1,
-                      isigma = NULL,
-                      tol0 = 0.001) {
+                     zero = c(1, 3),
+                     llambda = "identitylink",
+                     lmu = "identitylink",
+                     lsigma = "loge",
+                     idf.mu = 4,
+                     idf.sigma = 2,
+                     ilambda = 1,
+                     isigma = NULL,
+                     tol0 = 0.001) {
   llambda <- as.list(substitute(llambda))
   elambda <- link2list(llambda)
   llambda <- attr(elambda, "function.name")
@@ -76,7 +108,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
             namesof("mu",     link = lmu,     earg = emu), ", ",
             namesof("sigma",  link = lsigma,  earg = esigma)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -156,19 +188,12 @@ lms.yjn.control <- function(trace = TRUE, ...)
     muvec  <- eta2theta(eta[, 2], .lmu     , earg = .emu )
     sigma  <- eta2theta(eta[, 3], .lsigma  , earg = .esigma )
 
-    zedd   <- ((y/muvec)^lambda - 1) / (lambda * sigma)
-    log.dz.dy <- (lambda - 1) * log(y/muvec) - log(muvec * sigma)
-
-    is.eff.0 <- abs(lambda) < .tol0
-    if (any(is.eff.0)) {
-      zedd[is.eff.0] <- log(y[is.eff.0] / muvec[is.eff.0]) / sigma[is.eff.0]
-      log.dz.dy[is.eff.0] <- -log(y[is.eff.0] * sigma[is.eff.0])
-    }
 
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
     } else {
-      ll.elts <- c(w) * (dnorm(zedd, log = TRUE) + log.dz.dy)
+      ll.elts <- dlms.bcn(x = y, lambda = lambda, mu = mu, sigma = sigma,
+                          tol0 = .tol0 , log = TRUE)
       if (summation) {
         sum(ll.elts)
       } else {
@@ -256,7 +281,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
             namesof("mu", link = lmu, earg = emu), ", ",
             namesof("sigma", link = lsigma, earg = esigma)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list(.zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -650,6 +675,7 @@ gleg.weight.yjn.13 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
 
 
 
+
 lms.yjn2.control <- function(save.weights = TRUE, ...) {
     list(save.weights = save.weights)
 }
@@ -696,7 +722,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
             ", ",
             namesof("sigma", link = lsigma, earg = esigma)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list(.zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -898,7 +924,7 @@ lms.yjn2.control <- function(save.weights = TRUE, ...) {
             ", mu, ",
             namesof("sigma", link = lsigma, earg = esigma)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list(.zero = zero))),
   initialize = eval(substitute(expression({
 
@@ -1545,14 +1571,14 @@ amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
     devy[nz] <- devy[nz] + (1 - y[nz]) * log1p(-y[nz])
     devmu <- y * log(mu) + (1 - y) * log1p(-mu)
     if (any(small <- mu * (1 - mu) < .Machine$double.eps)) {
-        warning("fitted values close to 0 or 1")
-        smu <- mu[small]
-        sy <- y[small]
-        smu <- ifelse(smu < .Machine$double.eps,
-                      .Machine$double.eps, smu)
-        onemsmu <- ifelse((1 - smu) < .Machine$double.eps,
-                          .Machine$double.eps, 1 - smu)
-        devmu[small] <- sy * log(smu) + (1 - sy) * log(onemsmu)
+      warning("fitted values close to 0 or 1")
+      smu <- mu[small]
+      sy <- y[small]
+      smu <- ifelse(smu < .Machine$double.eps,
+                    .Machine$double.eps, smu)
+      onemsmu <- ifelse((1 - smu) < .Machine$double.eps,
+                        .Machine$double.eps, 1 - smu)
+      devmu[small] <- sy * log(smu) + (1 - sy) * log(onemsmu)
     }
     devi <- 2 * (devy - devmu)
     if (residuals) {
@@ -2331,7 +2357,7 @@ dprobitlap <-
   if (meth2) {
     dx.dy <- x
     use.x <- probit(x[index1])  # earg = earg
-    logdensity[index1] =
+    logdensity[index1] <-
       dalap(x = use.x, location = location.ald[index1],
             scale = scale.ald[index1], tau = tau[index1],
             kappa = kappa[index1], log = TRUE)
@@ -2354,7 +2380,8 @@ dprobitlap <-
 
   if (meth2) {
     dx.dy[index1] <- probit(x[index1],  # earg = earg,
-                           inverse = FALSE, deriv = 1)
+                            inverse = TRUE,
+                            deriv = 1)
     dx.dy[!index1] <- 0
     dx.dy[!indexTF] <- NaN
     if (log.arg) logdensity - log(abs(dx.dy)) else
@@ -2423,9 +2450,9 @@ dclogloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
 
   NN <- max(length(x), length(location.ald), length(scale.ald),
            length(kappa))
-  location.ald <- rep(location.ald, length.out = NN);
+  location.ald <- rep(location.ald, length.out = NN)
   scale.ald <- rep(scale.ald, length.out = NN)
-  kappa <- rep(kappa, length.out = NN);
+  kappa <- rep(kappa, length.out = NN)
   x <- rep(x, length.out = NN)
   tau <- rep(tau, length.out = NN)
 
@@ -2457,7 +2484,7 @@ dclogloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
 
   if (meth2) {
     dx.dy[index1] <- cloglog(x[index1],  # earg = earg,
-                            inverse = FALSE, deriv = 1)
+                             inverse = TRUE, deriv = 1)
     dx.dy[!index1] <- 0
     dx.dy[!indexTF] <- NaN
     if (log.arg) logdensity - log(abs(dx.dy)) else
@@ -2644,7 +2671,7 @@ alaplace2.control <- function(maxit = 100, ...) {
     dotzero <- .zero
     M1 <- 2
     eval(negzero.expression.VGAM)
-    constraints <- cm.zero.VGAM(constraints, x, z.Index, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, z.Index, M = M)
   }), list( .parallel.locat = parallel.locat,
             .parallel.scale = parallel.scale,
             .zero = zero,
@@ -3031,7 +3058,7 @@ alaplace1.control <- function(maxit = 100, ...) {
     
     constraints <- con.locat
 
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .parallel.locat = parallel.locat,
             .zero = zero,
             .apply.parint.locat = apply.parint.locat ))),
@@ -3311,7 +3338,7 @@ alaplace3.control <- function(maxit = 100, ...) {
             "\n",
             "Variance: Scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 3,
@@ -3599,7 +3626,7 @@ rlaplace <- function(n, location = 0, scale = 1) {
             "Mean:     location", "\n",
             "Variance: 2*scale^2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -3744,7 +3771,7 @@ fff.control <- function(save.weights = TRUE, ...) {
             "2*df2^2*(df1+df2-2)/(df1*(df2-2)^2*(df2-4)) ",
             "provided df2>4 and ncp = 0"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -4011,7 +4038,7 @@ fff.control <- function(save.weights = TRUE, ...) {
                    trigamma(1+Nvec*prob-yvec) -
                    trigamma(1+Nvec*(1-prob)-w+yvec))
     }
-    d2prob.deta2 <- d2theta.deta2(prob, .lprob, earg = .earg )
+    d2prob.deta2 <- d2theta.deta2(prob, .lprob , earg = .earg )
 
     wz <- -(dprob.deta^2) * d2l.dprob2
     wz <- c(w) * wz
@@ -4558,6 +4585,13 @@ ptriangle <- function(q, theta, lower = 0, upper = 1,
 
 
 
+
+
+triangle.control <- function(stepsize = 0.33, maxit = 100, ...) {
+  list(stepsize = stepsize, maxit = maxit)
+}
+
+
  triangle <-
   function(lower = 0, upper = 1,
            link = extlogit(min = 0, max = 1),
@@ -4568,8 +4602,6 @@ ptriangle <- function(q, theta, lower = 0, upper = 1,
 
 
 
-
-
   if (!is.Numeric(lower))
     stop("bad input for argument 'lower'")
   if (!is.Numeric(upper))
@@ -4613,8 +4645,8 @@ ptriangle <- function(q, theta, lower = 0, upper = 1,
 
 
 
-    extra$lower <- rep( .lower, length.out = n)
-    extra$upper <- rep( .upper, length.out = n)
+    extra$lower <- rep( .lower , length.out = n)
+    extra$upper <- rep( .upper , length.out = n)
 
     if (any(y <= extra$lower | y >= extra$upper))
       stop("some y values in [lower,upper] detected")
@@ -4690,8 +4722,7 @@ ptriangle <- function(q, theta, lower = 0, upper = 1,
 
 
   deriv = eval(substitute(expression({
-    Theta <- eta2theta(eta, .link , earg = .earg ) 
-
+    Theta       <- eta2theta(eta,     .link , earg = .earg )
     dTheta.deta <- dtheta.deta(Theta, .link , earg = .earg )
 
     pos <- y > Theta
@@ -4703,11 +4734,11 @@ ptriangle <- function(q, theta, lower = 0, upper = 1,
     dl.dTheta[neg] <-  -1 / (Theta[neg]-lower[neg])
     dl.dTheta[pos] <-   1 / (upper[pos]-Theta[pos])
 
-    w * dl.dTheta * dTheta.deta
+    c(w) * dl.dTheta * dTheta.deta
   }), list( .link = link, .earg = earg ))),
   weight = eval(substitute(expression({
-    d2l.dTheta2 <-  1 / ((Theta - lower) * (upper - Theta))
-    wz <- d2l.dTheta2 * dTheta.deta^2
+    var.dl.dTheta <-  1 / ((Theta - lower) * (upper - Theta))
+    wz <- var.dl.dTheta * dTheta.deta^2
     c(w) * wz
   }), list( .link = link, .earg = earg ))))
 }
@@ -4729,6 +4760,7 @@ loglaplace1.control <- function(maxit = 300, ...) {
   list(maxit = maxit)
 }
 
+
  loglaplace1 <- function(tau = NULL,
                      llocation = "loge",
                      ilocation = NULL,
@@ -4814,7 +4846,7 @@ loglaplace1.control <- function(maxit = 300, ...) {
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel.locat ,
                            constraints = constraints, apply.int = FALSE)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .parallel.locat = parallel.locat,
             .Scale.arg = Scale.arg, .zero = zero ))),
   initialize = eval(substitute(expression({
@@ -5087,9 +5119,9 @@ loglaplace2.control <- function(save.weights = TRUE, ...) {
             "Quantiles:  location", "\n",
             "Variance:   zz scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
   constraints = eval(substitute(expression({
-      .ZERO <- .zero
-      if (is.character( .ZERO)) .ZERO <- eval(parse(text = .ZERO))
-      .PARALLEL <- .parallel.locat
+  .ZERO <- .zero
+  if (is.character( .ZERO)) .ZERO <- eval(parse(text = .ZERO))
+  .PARALLEL <- .parallel.locat
       parelHmat <- if (is.logical( .PARALLEL ) && .PARALLEL )
                   matrix(1, M/2, 1) else diag(M/2)
       scaleHmat <- if (is.logical( .eq.scale ) && .eq.scale )
@@ -5100,7 +5132,7 @@ loglaplace2.control <- function(save.weights = TRUE, ...) {
                              bool = .PARALLEL ,
                              constraints = constraints,
                              apply.int = FALSE)
-      constraints <- cm.zero.VGAM(constraints, x, .ZERO, M)
+  constraints <- cm.zero.VGAM(constraints, x = x, .ZERO , M = M)
 
       if ( .PARALLEL && names(constraints)[1] == "(Intercept)") {
           parelHmat <- diag(M/2)
@@ -5416,7 +5448,7 @@ adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel.locat ,
                            constraints = constraints, apply.int = FALSE)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .parallel.locat = parallel.locat,
             .Scale.arg = Scale.arg, .zero = zero ))),
   initialize = eval(substitute(expression({
diff --git a/R/family.rcim.R b/R/family.rcim.R
index 3331b36..8c99499 100644
--- a/R/family.rcim.R
+++ b/R/family.rcim.R
@@ -745,7 +745,13 @@ Confint.nb1 <- function(nb1, level = 0.95) {
   delta0.hat <- exp(mydiff)
   (phi0.hat <- 1 + 1 / delta0.hat)  # MLE of phi0
 
-  myvcov <- vcovvlm(as(nb1, "vglm"))  # Not great; improve this!
+
+
+
+  myvcov <- vcov(as(nb1, "vglm"))  # Not great; improve this!
+
+
+
   myvec <- cbind(c(-1, 1, rep(0, len = nrow(myvcov) - 2)))
   (se.mydiff <- sqrt(t(myvec) %*%  myvcov %*%  myvec))
 
@@ -968,7 +974,12 @@ plota21 <- function(rrvglm2, show.plot = TRUE, nseq.a21 = 31,
       }
 
 
-      Covmat <- vcovvlm(model, dispersion = dispersion)
+
+
+      Covmat <- vcov(model, dispersion = dispersion)
+
+
+
       covmat <- Covmat[unlist(coef.indices),
                        unlist(coef.indices), drop = FALSE]
       covmat <- if (M > 1) {
@@ -986,7 +997,15 @@ plota21 <- function(rrvglm2, show.plot = TRUE, nseq.a21 = 31,
         refPos <- which(coef.indices == 0)
         coef.indices <- coef.indices[-refPos]
       }
-      covmat <- vcovvlm(model, dispersion = dispersion)
+
+
+
+
+      covmat <- vcov(model, dispersion = dispersion)
+
+
+
+
       covmat <- covmat[coef.indices, coef.indices, drop = FALSE]
 
       if (is.null(estimates))
diff --git a/R/family.robust.R b/R/family.robust.R
index 84a0d52..88afece 100644
--- a/R/family.robust.R
+++ b/R/family.robust.R
@@ -217,7 +217,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
             namesof("scale",     lscale,  earg = escale), "\n\n",
             "Mean: location"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero, M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
diff --git a/R/family.rrr.R b/R/family.rrr.R
index dff26b1..4afedab 100644
--- a/R/family.rrr.R
+++ b/R/family.rrr.R
@@ -363,36 +363,36 @@ valt.1iter <- function(x, z, U, Hlist, C, control,
         zedd[,Index.corner] <- zedd[,Index.corner] - latvar.mat
 
     if (nice31 && MSratio == 1) {
-        fit <- list(mat.coef = NULL, fitted.values = NULL, ResSS = 0)
-
-        clist2 <- NULL # for vlm.wfit
-
-        i5 <- rep(0, length.out = MSratio)
-        for (ii in 1:NOS) {
-          i5 <- i5 + 1:MSratio
-
-            tmp100 <- vlm.wfit(xmat = new.latvar.model.matrix,
-                               zedd[, i5, drop = FALSE],
-                               Hlist = clist2,
-                               U = U[i5,, drop = FALSE],
-                               matrix.out = TRUE,
-                               is.vlmX = FALSE, ResSS = TRUE,
-                               qr = FALSE,
-                               Eta.range = control$Eta.range,
-                               xij = control$xij,
-                               lp.names = lp.names[i5])
-            fit$ResSS <- fit$ResSS + tmp100$ResSS
-            fit$mat.coef <- cbind(fit$mat.coef, tmp100$mat.coef)
-            fit$fitted.values <- cbind(fit$fitted.values,
-                                       tmp100$fitted.values)
-        }
+      fit <- list(mat.coef = NULL, fitted.values = NULL, ResSS = 0)
+
+      clist2 <- NULL # for vlm.wfit
+
+      i5 <- rep(0, length.out = MSratio)
+      for (ii in 1:NOS) {
+        i5 <- i5 + 1:MSratio
+
+        tmp100 <- vlm.wfit(xmat = new.latvar.model.matrix,
+                           zedd[, i5, drop = FALSE],
+                           Hlist = clist2,
+                           U = U[i5,, drop = FALSE],
+                           matrix.out = TRUE,
+                           is.vlmX = FALSE, ResSS = TRUE,
+                           qr = FALSE,
+                           Eta.range = control$Eta.range,
+                           xij = control$xij,
+                           lp.names = lp.names[i5])
+        fit$ResSS <- fit$ResSS + tmp100$ResSS
+        fit$mat.coef <- cbind(fit$mat.coef, tmp100$mat.coef)
+        fit$fitted.values <- cbind(fit$fitted.values,
+                                   tmp100$fitted.values)
+      }
     } else {
-        fit <- vlm.wfit(xmat = new.latvar.model.matrix,
-                       zedd, Hlist = clist2, U = U,
-                       matrix.out = TRUE,
-                       is.vlmX = FALSE, ResSS = TRUE, qr = FALSE,
-                       Eta.range = control$Eta.range,
-                       xij = control$xij, lp.names = lp.names)
+      fit <- vlm.wfit(xmat = new.latvar.model.matrix,
+                      zedd, Hlist = clist2, U = U,
+                      matrix.out = TRUE,
+                      is.vlmX = FALSE, ResSS = TRUE, qr = FALSE,
+                      Eta.range = control$Eta.range,
+                      xij = control$xij, lp.names = lp.names)
     }
     A <- if (tmp833$NoA) matrix(0, M, Rank) else
         t(fit$mat.coef[1:Rank,, drop = FALSE])
@@ -447,11 +447,11 @@ rrr.init.expression <- expression({
     if (modelno == 3 || modelno == 5) {
 
 
-        M <- 2 * ifelse(is.matrix(y), ncol(y), 1)
-          control$str0 <-
-        rrcontrol$str0 <- seq(from = 2, to = M, by = 2)  # Handles A
-          control$Dzero <-
-        rrcontrol$Dzero <- seq(from = 2, to = M, by = 2)  # Handles D
+      M <- 2 * ifelse(is.matrix(y), ncol(y), 1)
+        control$str0 <-
+      rrcontrol$str0 <- seq(from = 2, to = M, by = 2)  # Handles A
+        control$Dzero <-
+      rrcontrol$Dzero <- seq(from = 2, to = M, by = 2)  # Handles D
 
 
     }
@@ -502,7 +502,7 @@ rrr.alternating.expression <- expression({
           elts <- matrix(elts, 1, Rank)
         Dk <- m2a(elts, M = Rank)[, , 1]
         Dk <- matrix(Dk, Rank, Rank)
-        Dk <- t(Mmat) %*% Dk  %*% Mmat  # 22/8/03; Not diagonal in general
+        Dk <- t(Mmat) %*% Dk  %*% Mmat  # 20030822; Not diagonal in general.
         Dmat[kay, ] <- Dk[cbind(ind0$row.index[1:ncol(Dmat)],
                                 ind0$col.index[1:ncol(Dmat)])] 
       }
@@ -905,6 +905,7 @@ Coef.qrrvglm <-
 
 
 
+
   if (length(varI.latvar) != 1 || !is.logical(varI.latvar)) 
     stop("'varI.latvar' must be TRUE or FALSE")
   if (length(refResponse) > 1)
@@ -1362,7 +1363,7 @@ predictqrrvglm <-
       setup.smart("read", smart.prediction = object at smart.prediction)
     }
 
-    tt <- object at terms$terms  # terms(object)  # 11/8/03; object at terms$terms
+    tt <- object at terms$terms  # terms(object)  # 20030811; object at terms$terms
     X <- model.matrix(delete.response(tt), newdata,
                       contrasts = if (length(object at contrasts))
                                   object at contrasts else NULL,
@@ -1546,7 +1547,8 @@ summary.rrvglm <- function(object, correlation = FALSE,
                            h.step = 0.0001, 
                            kill.all = FALSE, omit13 = FALSE,
                            fixA = FALSE, 
-                           presid = TRUE, ...) {
+                           presid = TRUE, 
+                           nopredictors = FALSE, ...) {
 
 
 
@@ -1561,10 +1563,10 @@ summary.rrvglm <- function(object, correlation = FALSE,
       stop("bad input for 'h.step'")
 
     if (!object at control$Corner)
-        stop("this function works with corner constraints only")
+      stop("this function works with corner constraints only")
 
     if (is.null(dispersion))
-        dispersion <- object at misc$dispersion
+      dispersion <- object at misc$dispersion
 
     newobject <- as(object, "vglm")
 
@@ -1630,6 +1632,8 @@ summary.rrvglm <- function(object, correlation = FALSE,
     answer at sigma <- dispersion^0.5
 
 
+    answer at misc$nopredictors <- nopredictors  # 20150925
+
     answer
 }
 
@@ -1639,9 +1643,9 @@ summary.rrvglm <- function(object, correlation = FALSE,
 
 
 get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
-                          numerical = TRUE,
-                          fixA = FALSE, h.step = 0.0001,
-                          trace.arg = FALSE, ...) {
+                           numerical = TRUE,
+                           fixA = FALSE, h.step = 0.0001,
+                           trace.arg = FALSE, ...) {
 
 
 
@@ -1653,7 +1657,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
 
 
   if (!length(fit at x))
-    stop("fix at x is empty. Run rrvglm(... , x= TRUE)")
+    stop("fix at x is empty. Run rrvglm(... , x = TRUE)")
 
   colx1.index <- fit at control$colx1.index  # May be NULL
   colx2.index <- fit at control$colx2.index 
@@ -1677,7 +1681,7 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
 
   wz <- weights(fit, type = "work")  # old: wweights(fit)  #fit at weights
   if (!length(wz))
-      stop("cannot get fit at weights")
+    stop("cannot get fit at weights")
 
   M <- fit at misc$M
   n <- fit at misc$n
@@ -1686,20 +1690,20 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
   theta <- c(Amat[-c(Index.corner,str0), ])
   if (fit at control$checkwz)
     wz <- checkwz(wz, M = M, trace = trace,
-                 wzepsilon = fit at control$wzepsilon)
+                  wzepsilon = fit at control$wzepsilon)
    U <- vchol(wz, M = M, n = n, silent= TRUE)
 
-  if (numerical) {
-    delct.da <- num.deriv.rrr(fit, M = M, r = Rank,
-                    x1mat = x1mat, x2mat = x2mat, p2 = p2, 
-                    Index.corner, Aimat = Amat,
-                    B1mat = B1mat, Cimat = Cmat,
-                    h.step = h.step,
-                    colx2.index = colx2.index,
-                    xij = fit at control$xij,
-                    str0 = str0)
+  delct.da <- if (numerical) {
+    num.deriv.rrr(fit, M = M, r = Rank,
+                  x1mat = x1mat, x2mat = x2mat, p2 = p2, 
+                  Index.corner, Aimat = Amat,
+                  B1mat = B1mat, Cimat = Cmat,
+                  h.step = h.step,
+                  colx2.index = colx2.index,
+                  xij = fit at control$xij,
+                  str0 = str0)
   } else {
-    delct.da <- dctda.fast.only(theta = theta, wz = wz,
+    dctda.fast.only(theta = theta, wz = wz,
                     U = U, zmat,
                     M = M, r = Rank, x1mat = x1mat,
                     x2mat = x2mat, p2 = p2,
@@ -1722,13 +1726,13 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
   cov2233 <- solve(sfit2233 at cov.unscaled)  # Includes any intercepts
   dimnames(cov2233) <- list(d8, d8)
 
-    log.vec33 <- NULL 
-    nassign <- names(fit at constraints) 
-    choose.from <-  varassign(fit at constraints, nassign)
-    for (ii in nassign)
-      if (any(ii == names(colx2.index))) {
-        log.vec33 <- c(log.vec33, choose.from[[ii]])
-      }
+  log.vec33 <- NULL 
+  nassign <- names(fit at constraints) 
+  choose.from <-  varassign(fit at constraints, nassign)
+  for (ii in nassign)
+    if (any(ii == names(colx2.index))) {
+      log.vec33 <- c(log.vec33, choose.from[[ii]])
+    }
     cov33 <- cov2233[ log.vec33, log.vec33, drop = FALSE]  # r*p2 by r*p2
     cov23 <- cov2233[-log.vec33, log.vec33, drop = FALSE]
     cov22 <- cov2233[-log.vec33,-log.vec33, drop = FALSE]
@@ -1921,9 +1925,9 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
 
 
 dctda.fast.only <- function(theta, wz, U, zmat, M, r, x1mat, x2mat,
-                           p2, Index.corner, Aimat, B1mat, Cimat,
-                           xij = NULL,
-                           str0 = NULL) {
+                            p2, Index.corner, Aimat, B1mat, Cimat,
+                            xij = NULL,
+                            str0 = NULL) {
 
 
   if (length(str0))
@@ -1985,7 +1989,7 @@ dctda.fast.only <- function(theta, wz, U, zmat, M, r, x1mat, x2mat,
 
 
 dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
-                     intercept= TRUE, xij = NULL) {
+                      intercept = TRUE, xij = NULL) {
 
 
 
@@ -2033,29 +2037,29 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
   cbindex <- (1:M)[-Index.corner]
   resid2 <- mux22(t(wz),
                   z - matrix(int.vec, nn, M, byrow = TRUE), M = M,
-                  upper = FALSE, as.matrix = TRUE)  # mat= TRUE,
+                  upper = FALSE, as.matrix = TRUE)  # mat = TRUE,
 
   for (s in 1:r)
-      for (tt in cbindex) {
-          fred <- (if (intercept) t(xmat[, -1, drop = FALSE]) else
-                   t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE) 
-          temp2 <- kronecker(I.col(s, r), rowSums(fred))
-
-          temp4 <- rep(0,pp)
-          for (k in 1:r) {
-              Wiak <- mux22(t(wz),
-                            matrix(Aimat[, k], nn, M, byrow = TRUE),
-                            M = M, upper = FALSE, as.matrix = TRUE)
-              wxx <- Wiak[,tt] * (if (intercept)
-                                  xmat[, -1, drop = FALSE] else
-                                  xmat)
-              blocki <- (if (intercept)
-                        t(xmat[, -1, drop = FALSE]) else
-                        t(xmat)) %*% wxx
-              temp4 <- temp4 + blocki %*% Cimat[, k]
-          }
-          dc.da[,,tt,s] <- G %*% (temp2 - 2 * kronecker(I.col(s, r), temp4))
+    for (tt in cbindex) {
+      fred <- (if (intercept) t(xmat[, -1, drop = FALSE]) else
+               t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE) 
+      temp2 <- kronecker(I.col(s, r), rowSums(fred))
+
+      temp4 <- rep(0,pp)
+      for (k in 1:r) {
+        Wiak <- mux22(t(wz),
+                      matrix(Aimat[, k], nn, M, byrow = TRUE),
+                      M = M, upper = FALSE, as.matrix = TRUE)
+        wxx <- Wiak[,tt] * (if (intercept)
+                            xmat[, -1, drop = FALSE] else
+                            xmat)
+        blocki <- (if (intercept)
+                  t(xmat[, -1, drop = FALSE]) else
+                  t(xmat)) %*% wxx
+        temp4 <- temp4 + blocki %*% Cimat[, k]
       }
+      dc.da[,,tt,s] <- G %*% (temp2 - 2 * kronecker(I.col(s, r), temp4))
+    }
   ans1 <- dc.da[,,cbindex,, drop = FALSE]  # pp x r x (M-r) x r 
   ans1 <- aperm(ans1, c(2, 1, 3, 4))   # r x pp x (M-r) x r 
 
@@ -2064,12 +2068,12 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
 
   detastar.da <- array(0,c(M,r,r,nn))
   for (s in 1:r)
-      for (j in 1:r) {
-          t1 <- t(dc.da[,j,,s])
-          t1 <- matrix(t1, M, pp)
-          detastar.da[,j,s,] <- t1 %*% (if (intercept)
-                                t(xmat[,-1, drop = FALSE]) else t(xmat))
-      }
+    for (j in 1:r) {
+      t1 <- t(dc.da[,j,,s])
+      t1 <- matrix(t1, M, pp)
+      detastar.da[,j,s,] <- t1 %*% (if (intercept)
+                            t(xmat[,-1, drop = FALSE]) else t(xmat))
+    }
 
   etastar <- (if (intercept) xmat[,-1, drop = FALSE] else xmat) %*% Cimat
   eta <- matrix(int.vec, nn, M, byrow = TRUE) + etastar %*% t(Aimat)
@@ -2082,7 +2086,7 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
   AtWi <- array(t(AtWi), c(r, M, nn))
   for (ss in 1:r) {
     temp90 <- (m2a(t(colSums(etastar[, ss]*wz)), M = M))[, , 1]  # MxM
-    temp92 <- array(detastar.da[,,ss,], c(M,r,nn))
+    temp92 <- array(detastar.da[,,ss,], c(M, r, nn))
     temp93 <- mux7(temp92, AtWi)
     temp91 <- rowSums(temp93, dims = 2)  # M x M
     deta0.da[,,ss] <- -(temp90 + temp91) %*% sumWinv
@@ -2097,8 +2101,8 @@ dcda.fast <- function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
 
 
 rrr.deriv.ResSS <- function(theta, wz, U, z, M, r, xmat,
-                         pp, Index.corner, intercept = TRUE,
-                         xij = NULL) {
+                            pp, Index.corner, intercept = TRUE,
+                            xij = NULL) {
 
   Amat <- matrix(as.numeric(NA), M, r)
   Amat[Index.corner,] <- diag(r)
@@ -2158,27 +2162,27 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
       stop("Cimat wrong shape")
 
   fred <- kronecker(matrix(1, 1,r),
-                   if (intercept) xmat[, -1, drop = FALSE] else xmat)
+                    if (intercept) xmat[, -1, drop = FALSE] else xmat)
   fred <- kronecker(fred, matrix(1, M, 1))
   barney <- kronecker(Aimat, matrix(1, 1, pp))
   barney <- kronecker(matrix(1, nn, 1), barney)
 
   temp <- array(t(barney*fred), c(r*pp, M, nn))
   temp <- aperm(temp, c(2, 1, 3))
-  temp <- mux5(wz, temp, M = M, matrix.arg= TRUE)
+  temp <- mux5(wz, temp, M = M, matrix.arg = TRUE)
   temp <- m2a(temp, M = r * pp)  # Note M != M here!
   G <- solve(rowSums(temp, dims = 2))
 
   dc.da <- array(NA,c(pp,r,r,M))
   cbindex <- (1:M)[-Index.corner]
   resid2 <- mux22(t(wz), z - matrix(int.vec, nn, M, byrow = TRUE),
-                 M = M,
-                 upper = FALSE, as.matrix = TRUE)
+                  M = M,
+                  upper = FALSE, as.matrix = TRUE)
 
   for (s in 1:r)
     for (tt in cbindex) {
-      fred <- (if (intercept) t(xmat[,-1, drop = FALSE]) else
-               t(xmat)) * matrix(resid2[,tt],pp,nn,byrow = TRUE) 
+      fred <- (if (intercept) t(xmat[, -1, drop = FALSE]) else
+               t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE) 
       temp2 <- kronecker(I.col(s, r), rowSums(fred))
 
       temp4 <- rep(0,pp)
@@ -2187,10 +2191,10 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
                      matrix(Aimat[, k], nn, M, byrow = TRUE),
                      M = M, upper = FALSE, as.matrix = TRUE)
         wxx <- Wiak[,tt] * (if (intercept)
-                           xmat[, -1, drop = FALSE] else xmat)
+                            xmat[, -1, drop = FALSE] else xmat)
         blocki <- (if (intercept) t(xmat[, -1, drop = FALSE]) else
                   t(xmat)) %*% wxx 
-        temp4 <- temp4 + blocki %*% Cimat[,k]
+        temp4 <- temp4 + blocki %*% Cimat[, k]
       }
       dc.da[,,s,tt] <- G %*% (temp2 - 2 * kronecker(I.col(s, r), temp4))
     }
@@ -2237,7 +2241,7 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
     ans[,s] <- a1 + a2 + a3
   }
 
-  ans <- -2 * c(ans[cbindex,])
+  ans <- -2 * c(ans[cbindex, ])
 
   ans
 }
@@ -2250,8 +2254,9 @@ rrr.deriv.gradient.fast <- function(theta, wz, U, z, M, r, xmat,
 
 
 vellipse <- function(R, ratio = 1, orientation = 0,
-                    center = c(0, 0), N = 300) {
-  if (length(center) != 2) stop("center must be of length 2")
+                     center = c(0, 0), N = 300) {
+  if (length(center) != 2)
+    stop("argument 'center' must be of length 2")
   theta <-       2*pi*(0:N)/N
   x1 <-       R*cos(theta)
   y1 <- ratio*R*sin(theta)
@@ -2284,7 +2289,7 @@ biplot.qrrvglm <- function(x, ...) {
           chull.arg = FALSE, clty = 2, ccol = par()$col, clwd = par()$lwd,
               cpch = "   ",
           C = FALSE,
-              OriginC = c("origin","mean"),
+              OriginC = c("origin", "mean"),
               Clty = par()$lty, Ccol = par()$col, Clwd = par()$lwd,
               Ccex = par()$cex, Cadj.arg = -0.1, stretchC = 1, 
           sites = FALSE, spch = NULL, scol = par()$col, scex = par()$cex,
@@ -2307,7 +2312,7 @@ biplot.qrrvglm <- function(x, ...) {
 
     Rank <- object at control$Rank
     if (Rank > 2)
-        stop("can only handle rank 1 or 2 models")
+      stop("can only handle rank 1 or 2 models")
     M <- object at misc$M
     NOS <- ncol(object at y)
     MSratio <- M / NOS  # First value is g(mean) = quadratic form in latvar
@@ -2333,7 +2338,7 @@ biplot.qrrvglm <- function(x, ...) {
                 if ( y && type == "fitted.values")
                 object at y else r.curves,
                 type = "n", xlab = xlab, ylab = ylab, ...)
-      } else { # Rank == 2
+      } else {  # Rank == 2
         matplot(c(Coef.list at Optimum[1, ], nustar[, 1]),
                 c(Coef.list at Optimum[2, ], nustar[, 2]),
                 type = "n", xlab = xlab, ylab = ylab, ...)
@@ -2395,22 +2400,22 @@ biplot.qrrvglm <- function(x, ...) {
             lty = clty, col = ccol, lwd = clwd)
     }
     if (length(ellipse)) {
-        ellipse.temp <- if (ellipse > 0) ellipse else 0.95
-        if (ellipse < 0 && (!object at control$eq.tolerances || varI.latvar))
-          stop("an equal-tolerances assumption and 'varI.latvar = FALSE' ",
-               "is needed for 'ellipse' < 0")
-        if ( check.ok ) {
-          colx1.index <- object at control$colx1.index
-          if (!(length(colx1.index) == 1 &&
-                names(colx1.index) == "(Intercept)"))
-            stop("can only plot ellipses for intercept models only")
-        }
-        for (i in 1:ncol(r.curves)) {
-          cutpoint <- object at family@linkfun( if (Absolute) ellipse.temp
-                          else Coef.list at Maximum[i] * ellipse.temp,
-                          extra = object at extra)
-          if (MSratio > 1) 
-            cutpoint <- cutpoint[1, 1]
+      ellipse.temp <- if (ellipse > 0) ellipse else 0.95
+      if (ellipse < 0 && (!object at control$eq.tolerances || varI.latvar))
+        stop("an equal-tolerances assumption and 'varI.latvar = FALSE' ",
+             "is needed for 'ellipse' < 0")
+      if ( check.ok ) {
+        colx1.index <- object at control$colx1.index
+        if (!(length(colx1.index) == 1 &&
+              names(colx1.index) == "(Intercept)"))
+          stop("can only plot ellipses for intercept models only")
+      }
+      for (i in 1:ncol(r.curves)) {
+        cutpoint <- object at family@linkfun( if (Absolute) ellipse.temp
+                        else Coef.list at Maximum[i] * ellipse.temp,
+                        extra = object at extra)
+        if (MSratio > 1) 
+          cutpoint <- cutpoint[1, 1]
 
           cutpoint <- object at family@linkfun(Coef.list at Maximum[i],
                       extra = object at extra) - cutpoint
@@ -2418,8 +2423,8 @@ biplot.qrrvglm <- function(x, ...) {
             Mmat <- diag(rep(ifelse(object at control$Crow1positive, 1, -1),
                             length.out = Rank))
             etoli <- eigen(t(Mmat) %*% Coef.list at Tolerance[,,i] %*% Mmat)
-            A <- ifelse(etoli$val[1]>0,sqrt(2*cutpoint*etoli$val[1]),Inf)
-            B <- ifelse(etoli$val[2]>0,sqrt(2*cutpoint*etoli$val[2]),Inf)
+            A <- ifelse(etoli$val[1]>0, sqrt(2*cutpoint*etoli$val[1]), Inf)
+            B <- ifelse(etoli$val[2]>0, sqrt(2*cutpoint*etoli$val[2]), Inf)
             if (ellipse < 0)
               A <- B <- -ellipse / 2
 
@@ -2430,7 +2435,7 @@ biplot.qrrvglm <- function(x, ...) {
             if (all(is.finite(c(A,B))))
               lines(vellipse(R = 2*A, ratio = B/A,
                              orientation = theta.angle,
-                             center = Coef.list at Optimum[,i],
+                             center = Coef.list at Optimum[, i],
                              N = egrid),
                     lwd = elwd[i], col =ecol[i], lty = elty[i])
             }
@@ -2469,35 +2474,35 @@ biplot.qrrvglm <- function(x, ...) {
 
 
 lvplot.rrvglm <- function(object,
-                         A = TRUE,
-                         C = TRUE,
-                         scores = FALSE, show.plot = TRUE,
-                         groups = rep(1, n),
-                         gapC = sqrt(sum(par()$cxy^2)), scaleA = 1,
-                         xlab = "Latent Variable 1",
-                         ylab = "Latent Variable 2",
+                          A = TRUE,
+                          C = TRUE,
+                          scores = FALSE, show.plot = TRUE,
+                          groups = rep(1, n),
+                          gapC = sqrt(sum(par()$cxy^2)), scaleA = 1,
+                          xlab = "Latent Variable 1",
+                          ylab = "Latent Variable 2",
          Alabels= if (length(object at misc$predictors.names))
          object at misc$predictors.names else paste("LP", 1:M, sep = ""),
-                         Aadj = par()$adj,
-                         Acex = par()$cex,
-                         Acol = par()$col,
-                         Apch = NULL,
-                         Clabels=rownames(Cmat),
-                         Cadj = par()$adj,
-                         Ccex = par()$cex,
-                         Ccol = par()$col, 
-                         Clty = par()$lty, 
-                         Clwd = par()$lwd, 
-                         chull.arg = FALSE,
-                         ccex = par()$cex,
-                         ccol = par()$col,
-                         clty = par()$lty,
-                         clwd = par()$lwd,
-                         spch = NULL,
-                         scex = par()$cex,
-                         scol = par()$col,
-                         slabels=rownames(x2mat),
-                         ...) {
+                          Aadj = par()$adj,
+                          Acex = par()$cex,
+                          Acol = par()$col,
+                          Apch = NULL,
+                          Clabels=rownames(Cmat),
+                          Cadj = par()$adj,
+                          Ccex = par()$cex,
+                          Ccol = par()$col, 
+                          Clty = par()$lty, 
+                          Clwd = par()$lwd, 
+                          chull.arg = FALSE,
+                          ccex = par()$cex,
+                          ccol = par()$col,
+                          clty = par()$lty,
+                          clwd = par()$lwd,
+                          spch = NULL,
+                          scex = par()$cex,
+                          scol = par()$col,
+                          slabels = rownames(x2mat),
+                          ...) {
 
 
     if (object at control$Rank != 2 && show.plot)
@@ -2689,7 +2694,8 @@ show.Coef.rrvglm <- function(x, ...) {
     setGeneric("biplot", function(x, ...) standardGeneric("biplot")) 
 
 
-setMethod("Coef", "qrrvglm", function(object, ...) Coef.qrrvglm(object, ...))
+setMethod("Coef", "qrrvglm", function(object, ...)
+          Coef.qrrvglm(object, ...))
 
 
 
@@ -2765,6 +2771,9 @@ show.summary.qrrvglm <- function(x, ...) {
 
 
 
+
+
+
 setMethod("summary", "qrrvglm",
           function(object, ...)
           summary.qrrvglm(object, ...))
diff --git a/R/family.survival.R b/R/family.survival.R
index 641187b..ce55513 100644
--- a/R/family.survival.R
+++ b/R/family.survival.R
@@ -41,7 +41,7 @@
             "\n",
             "Variance: sd^2"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }) , list( .zero = zero))),
 
 
@@ -315,7 +315,7 @@ rbisa <- function(n, scale = 1, shape) {
             namesof("scale", lscale, earg = escale, tag = TRUE), "; ",
             namesof("shape", lshape, earg = eshape, tag = TRUE)),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+      constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }) , list( .zero = zero))),
   initialize = eval(substitute(expression({
     if (ncol(y <- cbind(y)) != 1)
diff --git a/R/family.ts.R b/R/family.ts.R
index 27aa4b1..695683e 100644
--- a/R/family.ts.R
+++ b/R/family.ts.R
@@ -245,7 +245,8 @@ rrar.control <- function(stepsize = 0.5, save.weights = TRUE, ...) {
   }),
   vfamily = "rrar",
   deriv = expression({
-    temp8 <- rrar.Wmat(y.save,Ranks.,MM,ki,plag,aa,uu,nn,new.coeffs)
+    temp8 <- rrar.Wmat(y.save, Ranks., MM, ki, plag,
+                       aa, uu, nn, new.coeffs)
     X.vlm.save <- temp8$UU %*% temp8$Ht 
 
     extra$coeffs <- new.coeffs
@@ -260,7 +261,7 @@ rrar.control <- function(stepsize = 0.5, save.weights = TRUE, ...) {
       resmat <- resmat - y.save[tt - ii, , drop = FALSE] %*%
                          t(Ak1 %*% Di %*% t(Ci))
     }
-    omegahat <- (t(resmat) %*% resmat) / n # MM x MM
+    omegahat <- (t(resmat) %*% resmat) / n  # MM x MM
     omegainv <- solve(omegahat)
 
     omegainv <- solve(omegahat)
@@ -319,7 +320,7 @@ vglm.garma.control <- function(save.weights = TRUE, ...) {
     tt.index <- (1 + plag):nrow(x)
     p.lm <- ncol(x)
 
-    copy.X.vlm <- TRUE   # x matrix changes at each iteration 
+    copy.X.vlm <- TRUE  # x matrix changes at each iteration 
 
     if ( .link == "logit"   || .link == "probit" ||
          .link == "cloglog" || .link == "cauchit") {
@@ -331,9 +332,9 @@ vglm.garma.control <- function(save.weights = TRUE, ...) {
     }
 
 
-    x.save <- x # Save the original
-    y.save <- y # Save the original
-    w.save <- w # Save the original
+    x.save <- x  # Save the original
+    y.save <- y  # Save the original
+    w.save <- w  # Save the original
 
     new.coeffs <- .coefstart  # Needed for iter = 1 of @weight
     new.coeffs <- if (length(new.coeffs))
@@ -538,6 +539,7 @@ setMethod("show", "Coef.rrar",
            ishrinkage = 0.9, 
            type.likelihood = c("exact", "conditional"),
            var.arg = FALSE,  # TRUE,
+           nodrift = FALSE,  # TRUE,
            almost1 = 0.99,
            zero = c(-2, -3)) {
   imethod <- 1
@@ -562,6 +564,10 @@ setMethod("show", "Coef.rrar",
 
 
 
+  if (!is.logical(nodrift) ||
+      length(nodrift) != 1)
+    stop("argument 'nodrift' must be a single logical")
+
   if (!is.logical(var.arg) ||
       length(var.arg) != 1)
     stop("argument 'var.arg' must be a single logical")
@@ -591,31 +597,35 @@ setMethod("show", "Coef.rrar",
 
 
   new("vglmff", 
-  blurb = c("Three-parameter autoregressive process of order-1\n\n",
-            "Links:        ",
-            namesof("drift", lsmn, earg = esmn), ", ",
+  blurb = c(ifelse(nodrift, "Two", "Three"),
+            "-parameter autoregressive process of order-1\n\n",
+            "Links:       ",
+            if (nodrift) "" else
+              paste(namesof("drift", lsmn, earg = esmn), ", ", sep = ""),
             namesof(n.sc     , l.sc, earg = e.sc), ", ",
             namesof("ARcoef1", lrho, earg = erho), "\n",
             "Model:       Y_t = drift + rho * Y_{t-1} + error_{t},", "\n",
             "             where 'error_{2:n}' ~ N(0, sigma^2) independently",
+            if (nodrift) ", and drift = 0" else "",
                           "\n",
             "Mean:        drift / (1 - rho)", "\n",
             "Correlation: rho = ARcoef1", "\n",
             "Variance:    sd^2 / (1 - rho^2)"),
   constraints = eval(substitute(expression({
 
-    M1 <- 3
+    M1 <- 3 - .nodrift
     dotzero <- .zero
     eval(negzero.expression.VGAM)
-  }), list( .zero = zero ))),
+  }), list( .zero = zero,
+            .nodrift = nodrift ))),
   infos = eval(substitute(function(...) {
-    list(M1 = 3, 
+    list(M1 = 3 - nodrift, 
          Q1 = 1, 
          expected = TRUE, 
          multipleResponse = TRUE,
          type.likelihood = .type.likelihood ,
-         ldrift = .lsmn ,
-         edrift = .esmn ,
+         ldrift = if ( .nodrift) NULL else .lsmn ,
+         edrift = if ( .nodrift) NULL else .esmn ,
          lvar = .lvar ,
          lsd  = .lsdv ,
          evar = .evar ,
@@ -627,9 +637,10 @@ setMethod("show", "Coef.rrar",
   }, list( .lsmn = lsmn, .lvar = lvar, .lsdv = lsdv, .lrho = lrho,
            .esmn = esmn, .evar = evar, .esdv = esdv, .erho = erho,
            .type.likelihood = type.likelihood,
+           .nodrift = nodrift,
            .almost1 = almost1, .zero = zero))),
   initialize = eval(substitute(expression({
-    extra$M1 <- M1 <- 3
+    extra$M1 <- M1 <- 3 - .nodrift
     check <- w.y.check(w = w, y = y,
                        Is.positive.y = FALSE,
                        ncol.w.max = Inf,
@@ -647,7 +658,8 @@ setMethod("show", "Coef.rrar",
     M <- M1*NOS
     var.names <- param.names("var",     NOS)
     sdv.names <- param.names("sd",      NOS)
-    smn.names <- param.names("drift", NOS)
+    smn.names <- if ( .nodrift ) NULL else
+                 param.names("drift",   NOS)
     rho.names <- param.names("rho",     NOS)
 
     mynames1 <- smn.names
@@ -655,7 +667,8 @@ setMethod("show", "Coef.rrar",
     mynames3 <- rho.names
 
     predictors.names <-
-      c(namesof(smn.names, .lsmn , earg = .esmn , tag = FALSE),
+      c(if ( .nodrift ) NULL else
+        namesof(smn.names, .lsmn , earg = .esmn , tag = FALSE),
         if ( .var.arg ) 
         namesof(var.names, .lvar , earg = .evar , tag = FALSE) else
         namesof(sdv.names, .lsdv , earg = .esdv , tag = FALSE),
@@ -689,7 +702,8 @@ setMethod("show", "Coef.rrar",
       }  
 
       etastart <-
-        cbind(theta2eta(init.smn, .lsmn , earg = .esmn ),
+        cbind(if ( .nodrift ) NULL else
+              theta2eta(init.smn, .lsmn , earg = .esmn ),
               if ( .var.arg ) 
               theta2eta(init.var, .lvar , earg = .evar ) else
               theta2eta(init.sdv, .lsdv , earg = .esdv ),
@@ -701,17 +715,20 @@ setMethod("show", "Coef.rrar",
             .ismn = ismn, .irho = irho, .isdv = isd , .ivar = ivar,
             .type.likelihood = type.likelihood, .ishrinkage = ishrinkage,
             .var.arg = var.arg,
+            .nodrift = nodrift,
             .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    M1  <- 3
+    M1  <- 3 - .nodrift
     NOS <- ncol(eta)/M1
-    ar.smn <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
-                        .lsmn , earg = .esmn )
+    ar.smn <- if ( .nodrift ) 0 else
+      eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+                .lsmn , earg = .esmn )
     ar.rho <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                         .lrho , earg = .erho )
     ar.smn / (1 - ar.rho)
   }, list ( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
             .var.arg = var.arg, .type.likelihood = type.likelihood,
+            .nodrift = nodrift,
             .esmn = esmn, .erho = erho , .esdv = esdv, .evar = evar ))),
   last = eval(substitute(expression({
     if (any(abs(ar.rho) > 1)) 
@@ -728,10 +745,12 @@ setMethod("show", "Coef.rrar",
     names(misc$link) <-
     names(misc$earg) <- temp.names
     for (ii in 1:ncoly) {
-      misc$link[ M1*ii-2 ] <- .lsmn
+      if ( !( .nodrift ))
+        misc$link[ M1*ii-2 ] <- .lsmn
       misc$link[ M1*ii-1 ] <- if ( .var.arg ) .lvar else .lsdv
       misc$link[ M1*ii   ] <- .lrho
-      misc$earg[[M1*ii-2]] <- .esmn
+      if ( !( .nodrift ))
+        misc$earg[[M1*ii-2]] <- .esmn
       misc$earg[[M1*ii-1]] <- if ( .var.arg ) .evar else .esdv
       misc$earg[[M1*ii  ]] <- .erho
     }
@@ -742,17 +761,19 @@ setMethod("show", "Coef.rrar",
     misc$expected <- TRUE
     misc$imethod <- .imethod
     misc$multipleResponses <- TRUE
+    misc$nodrift <- .nodrift
 
 
   }), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
             .esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
             .irho = irho, .isdv = isd , .ivar = ivar,
+            .nodrift = nodrift,
             .var.arg = var.arg, .type.likelihood = type.likelihood,
             .imethod = imethod ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals= FALSE, eta, 
              extra = NULL, summation = TRUE) {
-      M1  <- 3
+      M1  <- 3 - .nodrift
       NOS <- ncol(eta)/M1
 
       if ( .var.arg ) {
@@ -763,8 +784,9 @@ setMethod("show", "Coef.rrar",
                             .lsdv , earg = .esdv )
         ar.var <- ar.sdv^2
       }  
-      ar.smn <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
-                          .lsmn , earg = .esmn )
+      ar.smn <- if ( .nodrift ) 0 else
+        eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+                  .lsmn , earg = .esmn )
       ar.rho <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                           .lrho , earg = .erho )
     
@@ -788,6 +810,7 @@ setMethod("show", "Coef.rrar",
           
     }, list( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
              .var.arg = var.arg, .type.likelihood = type.likelihood,
+             .nodrift = nodrift,
              .esmn = esmn, .erho = erho , .esdv = esdv, .evar = evar ))),
 
   vfamily = c("AR1"),
@@ -802,7 +825,7 @@ setMethod("show", "Coef.rrar",
         warning("ignoring prior weights")
       eta <- predict(object)
       fva <- fitted(object)      
-      M1  <- 3
+      M1  <- 3 - .nodrift
       NOS <- ncol(eta)/M1
 
       if ( .var.arg ) {
@@ -813,8 +836,9 @@ setMethod("show", "Coef.rrar",
                             .lsdv , earg = .esdv )
         ar.var <- ar.sdv^2
       }  
-      ar.smn <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
-                          .lsmn , earg = .esmn )
+      ar.smn <- if ( .nodrift ) matrix(0, n, NOS) else
+        eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+                  .lsmn , earg = .esmn )
       ar.rho <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                           .lrho , earg = .erho )
 
@@ -831,13 +855,14 @@ setMethod("show", "Coef.rrar",
     ans
   }, list( .lsmn = lsmn, .lrho = lrho , .lsdv = lsdv, .lvar = lvar ,
            .var.arg = var.arg, .type.likelihood = type.likelihood,
+           .nodrift = nodrift,
            .esmn = esmn, .erho = erho , .esdv = esdv, .evar = evar ))),
 
 
 
 
   deriv = eval(substitute(expression({
-    M1  <- 3
+    M1  <- 3 - .nodrift
     NOS <- ncol(eta)/M1
     ncoly <- ncol(as.matrix(y))
     
@@ -850,8 +875,9 @@ setMethod("show", "Coef.rrar",
       ar.var <- ar.sdv^2
     }  
     
-    ar.smn <- eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
-                        .lsmn , earg = .esmn )
+    ar.smn <- if ( .nodrift ) matrix(0, n, NOS) else
+      eta2theta(eta[, M1*(1:NOS) - 2, drop = FALSE],
+                .lsmn , earg = .esmn )
     ar.rho <- eta2theta(eta[, M1*(1:NOS)    , drop = FALSE],
                         .lrho , earg = .erho )
 
@@ -880,7 +906,8 @@ setMethod("show", "Coef.rrar",
                     ar.rho[1, ] / temp5[1, ]
     if ( .var.arg ) {
       dl.dvar[1, ] <- -0.5 / ar.var[1, ] +
-                       0.5 *  temp5[1, ] * ((y[1, ] - mu[1, ]) / ar.var[1, ])^2
+                       0.5 *  temp5[1, ] *
+                       ((y[1, ] - mu[1, ]) / ar.var[1, ])^2
     } else {
       dl.dsdv[1, ] <- -1 / ar.sdv[1, ] +
                        temp5[1, ] * (y[1, ] - mu[1, ])^2 / (ar.sdv[1, ])^3
@@ -897,7 +924,7 @@ setMethod("show", "Coef.rrar",
       dsdv.deta <- dtheta.deta(ar.sdv, .lsdv , earg = .esdv )
     }
     myderiv <-
-      c(w) * cbind(dl.dsmn * dsmn.deta,
+      c(w) * cbind(if ( .nodrift ) NULL else dl.dsmn * dsmn.deta,
                    if ( .var.arg ) dl.dvar * dvar.deta else
                                    dl.dsdv * dsdv.deta,
                    dl.drho * drho.deta)
@@ -905,24 +932,29 @@ setMethod("show", "Coef.rrar",
     myderiv[, interleave.VGAM(M, M = M1)]
   }), list( .lsmn = lsmn, .lrho = lrho, .lsdv = lsdv, .lvar = lvar,
             .esmn = esmn, .erho = erho, .esdv = esdv, .evar = evar,
+            .nodrift = nodrift,
             .var.arg = var.arg, .type.likelihood = type.likelihood ))),
   weight = eval(substitute(expression({
     if ( .var.arg ) {
       ned2l.dvar    <- 0.5 / ar.var^2
       ned2l.drhovar <-  matrix(0, n, ncoly)
-      ned2l.drhovar[1, ] <- .almost1 * ar.rho[1, ] / (ar.var[1, ] * temp5[1, ])
+      ned2l.drhovar[1, ] <- .almost1 * ar.rho[1, ] / (ar.var[1, ] *
+                                                      temp5[1, ])
     } else {
       ned2l.dsdv    <- 2 / ar.var
       ned2l.drhosdv <-  matrix(0, n, ncoly)
       ned2l.drhosdv[1, ] <- 2 *
-                            .almost1 * ar.rho[1, ] / (ar.sdv[1, ] * temp5[1, ])
+                            .almost1 * ar.rho[1, ] / (ar.sdv[1, ] *
+                                                      temp5[1, ])
     }
     
-    ned2l.dsmn <- 1 / ar.var
-    ned2l.dsmn[1, ] <- (1 + ar.rho[1, ]) / ((1 - ar.rho[1, ]) * ar.var[1, ])
-
-    ned2l.dsmnrho <- mu / ar.var
-    ned2l.dsmnrho[1, ] <- 0
+    if ( !( .nodrift )) {
+      ned2l.dsmn <- 1 / ar.var
+      ned2l.dsmn[1, ] <- (1 + ar.rho[1, ]) / ((1 - ar.rho[1, ]) *
+                                              ar.var[1, ])
+      ned2l.dsmnrho <- mu / ar.var
+      ned2l.dsmnrho[1, ] <- 0
+    }
 
     ned2l.drho <- ((    mu[-n, , drop = FALSE])^2 +
                     ar.var[-n, , drop = FALSE] /
@@ -930,24 +962,28 @@ setMethod("show", "Coef.rrar",
     ned2l.drho <- rbind(0, ned2l.drho)
     ned2l.drho[1, ]  <- 2 * (ar.rho[1, ] / temp5[1, ])^2
     
-    wz <- matrix(0, n, M + (M - 1) + (M - 2))
-    wz[, M1*(1:NOS) - 2] <- ned2l.dsmn * dsmn.deta^2
+    ncol.wz <- M + (M - 1) + ifelse( .nodrift , 0, M - 2)
+    wz <- matrix(0, n, ncol.wz)
+
+    if ( !( .nodrift ))
+      wz[, M1*(1:NOS) - 2] <- ned2l.dsmn * dsmn.deta^2
     wz[, M1*(1:NOS) - 1] <-
       if ( .var.arg ) ned2l.dvar * dvar.deta^2 else
                       ned2l.dsdv * dsdv.deta^2
     wz[, M1*(1:NOS)    ] <- ned2l.drho * drho.deta^2
 
-    wz[, M1*(1:NOS) + M + (M - 1) - M1 + 1] <- ned2l.dsmnrho *
-                                               dsmn.deta * drho.deta
+    if ( !( .nodrift ))
+      wz[, M1*(1:NOS) + M + (M - 1) - M1 + 1] <- ned2l.dsmnrho *
+                                                 dsmn.deta * drho.deta
     wz[, M1*(1:NOS) + M - 1] <-
       if ( .var.arg ) ned2l.drhovar * drho.deta * dvar.deta else
                       ned2l.drhosdv * drho.deta * dsdv.deta
 
+    wz <- w.wz.merge(w = w, wz = wz, n = n, M = ncol.wz, ndepy = NOS)
 
-    wz <- w.wz.merge(w = w, wz = wz, n = n, M = M + (M - 1) + (M - 2),
-                     ndepy = NOS)
     wz
   }), list( .var.arg = var.arg, .type.likelihood = type.likelihood,
+            .nodrift = nodrift,
             .almost1 = almost1)))
   )
 }  # End of function 'AR1'
diff --git a/R/family.univariate.R b/R/family.univariate.R
index 17596ed..bde34c1 100644
--- a/R/family.univariate.R
+++ b/R/family.univariate.R
@@ -63,7 +63,7 @@
             "\n", "\n",
             "Mean:     nu*theta/(1+nu)"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     w.y.check(w, y)
@@ -422,7 +422,7 @@ rhzeta <- function(n, alpha) {
     constraints <- cm.VGAM(mycmatrix, x = x,
                            bool = .PARALLEL ,
                            constraints, apply.int = TRUE)
-    constraints <- cm.zero.VGAM(constraints, x, .ZERO , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .ZERO , M)
   }), list( .parallel = parallel, .zero = zero ))),
   initialize = eval(substitute(expression({
     mustart.orig <- mustart
@@ -738,7 +738,7 @@ dirmul.old <- function(link = "loge", ialpha = 0.01,
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints, apply.int = TRUE)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   initialize = eval(substitute(expression({
     y <- as.matrix(y)
@@ -912,7 +912,7 @@ rdiric <- function(n, shape, dimension = NULL,
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints, apply.int = TRUE)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   initialize = eval(substitute(expression({
     y <- as.matrix(y)
@@ -1542,7 +1542,7 @@ cauchy.control <- function(save.weights = TRUE, ...) {
             "Mean:     NA\n",
             "Variance: NA"),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+      constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     predictors.names <- c(
@@ -2491,7 +2491,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
             namesof("mu",  lmu,  earg = emu),  ", ",
             namesof("phi", lphi, earg = ephi)),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (min(y) <= .A || max(y) >= .B)
@@ -2676,7 +2676,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
     constraints <- cm.VGAM(matrix(1, M, 1), x = x,
                            bool = .parallel ,
                            constraints, apply.int  = TRUE)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 2,
@@ -2878,7 +2878,7 @@ simple.exponential <- function() {
   constraints = eval(substitute(expression({
     constraints <- cm.VGAM(matrix(1, M, 1), x = x, bool = .parallel ,
                            constraints = constraints, apply.int = TRUE)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 1, Q1 = 1, multipleResponses = TRUE, zero = .zero )
@@ -3004,7 +3004,7 @@ simple.exponential <- function() {
   constraints = eval(substitute(expression({
     constraints <- cm.VGAM(matrix(1, M, 1), x = x, bool = .parallel ,
                            constraints = constraints, apply.int = TRUE)
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .parallel = parallel, .zero = zero ))),
   infos = eval(substitute(function(...) {
     list(M1 = 1,
@@ -3599,7 +3599,7 @@ simple.exponential <- function() {
         dotzero <- .zero
         M1 <- 2
         eval(negzero.expression.VGAM)
-        constraints <- cm.zero.VGAM(constraints, x, z.Index, M)
+        constraints <- cm.zero.VGAM(constraints, x = x, z.Index, M)
   }), list( .zero = zero,
             .parallel = parallel, .apply.parint = apply.parint ))),
 
@@ -3635,9 +3635,9 @@ simple.exponential <- function() {
       NOS <- ncoly <- ncol(y)  # Number of species
 
 
-      temp1.names =
+      temp1.names <-
         if (NOS == 1) "mu"    else paste("mu",    1:NOS, sep = "")
-      temp2.names =
+      temp2.names <-
         if (NOS == 1) "shape" else paste("shape", 1:NOS, sep = "")
       predictors.names <-
           c(namesof(temp1.names, .lmu ,    earg = .emu ,    tag = FALSE),
@@ -3657,9 +3657,9 @@ simple.exponential <- function() {
         init.shape <- matrix(1.0, n, NOS)
         mymu <- y # + 0.167 * (y == 0)  # imethod == 1 (the default)
         if ( .imethod == 2) {
-                for (ii in 1:ncol(y)) {
-                    mymu[, ii] <- weighted.mean(y[, ii], w = w[, ii])
-                }
+            for (ii in 1:ncol(y)) {
+              mymu[, ii] <- weighted.mean(y[, ii], w = w[, ii])
+            }
         }
         for (spp in 1:NOS) {
           junk <- lsfit(x, y[, spp], wt = w[, spp], intercept = FALSE)
@@ -3772,9 +3772,9 @@ simple.exponential <- function() {
     NOS <- ncol(eta) / M1
 
     mymu  <- eta2theta(eta[, M1 * (1:NOS) - 1],
-                      .lmu ,    earg = .emu    )
+                       .lmu ,    earg = .emu    )
     shape <- eta2theta(eta[, M1 * (1:NOS)],
-                      .lshape , earg = .eshape )
+                       .lshape , earg = .eshape )
 
     dl.dmu <- shape * (y / mymu - 1) / mymu
     dl.dshape <- log(y) + log(shape) - log(mymu) + 1 - digamma(shape) -
@@ -3784,21 +3784,20 @@ simple.exponential <- function() {
     dshape.deta <- dtheta.deta(shape, .lshape , earg = .eshape )
 
     myderiv <- c(w) * cbind(dl.dmu    * dmu.deta,
-                           dl.dshape * dshape.deta)
+                            dl.dshape * dshape.deta)
     myderiv[, interleave.VGAM(M, M = M1)]
   }), list( .lmu = lmu, .lshape = lshape,
             .emu = emu, .eshape = eshape))),
   weight = eval(substitute(expression({
     ned2l.dmu2 <- shape / (mymu^2)
     ned2l.dshape2 <- trigamma(shape) - 1 / shape
-    wz <- matrix(as.numeric(NA), n, M)  # 2 = M; diagonal!
+    wz <- matrix(as.numeric(NA), n, M)  # 2 = M1; diagonal!
 
     wz[, M1*(1:NOS)-1] <- ned2l.dmu2 * dmu.deta^2
     wz[, M1*(1:NOS)  ] <- ned2l.dshape2 * dshape.deta^2
 
 
     w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
-
   }), list( .lmu = lmu ))))
 
 
@@ -4096,7 +4095,7 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
            probs.y = 0.75,
            nsimEIM = 250, cutoff.prob = 0.995,  # Maxiter = 5000,
            max.qnbinom = 1000,
-           max.chunk.Mb = 20,  # max.memory = Inf is allowed
+           max.chunk.MB = 20,  # max.memory = Inf is allowed
            deviance.arg = FALSE, imethod = 1,
            gsize = exp((-4):4),
            parallel = FALSE,
@@ -4350,7 +4349,7 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
       misc$earg[[M1*ii  ]] <- .esize
     }
 
-    misc$max.chunk.Mb <- .max.chunk.Mb
+    misc$max.chunk.MB <- .max.chunk.MB
     misc$cutoff.prob <- .cutoff.prob
     misc$imethod <- .imethod 
     misc$nsimEIM <- .nsimEIM
@@ -4360,7 +4359,7 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
   }), list( .lmuuu = lmuuu, .lsize = lsize,
             .emuuu = emuuu, .esize = esize,
             .cutoff.prob = cutoff.prob,  # .min.size = min.size,
-            .max.chunk.Mb = max.chunk.Mb,
+            .max.chunk.MB = max.chunk.MB,
             .nsimEIM = nsimEIM,
             .ishrinkage = ishrinkage,
             .imethod = imethod ))),
@@ -4508,7 +4507,7 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
 
     dl.dmu <- y / mu - (y + kmat) / (mu + kmat)
     dl.dk  <- digamma(y + kmat) - digamma(kmat) -
-              (y + kmat) / (mu + kmat) + 1 + log(kmat / (kmat + mu))
+              (y - mu) / (mu + kmat) + log(kmat / (kmat + mu))
 
     if ( .lmuuu == "nbcanlink")
       newemu$wrt.eta <- 1
@@ -4554,7 +4553,7 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
 
 
     max.qnbinom <- .max.qnbinom
-    max.chunk.Mb <- .max.chunk.Mb
+    max.chunk.MB <- .max.chunk.MB
 
 
     EIM.NB.special2 <- function(mu, size, y.max = NULL,
@@ -4588,12 +4587,13 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
 
     ind2 <- matrix(FALSE, n, NOS)  # Used for SFS
     for (jay in 1:NOS) {
-      Q.maxs <- qnbinom(p = .cutoff.prob , mu = mu[, jay], size = kmat[, jay])
-      ind1 <- if (max.chunk.Mb > 0) (Q.maxs < max.qnbinom) else FALSE
+      Q.maxs <- qnbinom(p = .cutoff.prob , mu = mu[, jay],
+                        size = kmat[, jay])
+      ind1 <- if (max.chunk.MB > 0) (Q.maxs < max.qnbinom) else FALSE
         if ((NN <- sum(ind1)) > 0) {
-          Object.Size <- NN * 8 * max(Q.maxs) / (2^20)  # Mb; 8 bytes / double
+          Object.Size <- NN * 8 * max(Q.maxs) / (2^20)  # Mb; 8 bytes/double
           n.chunks <- if (intercept.only) 1 else
-                      max(1, ceiling( Object.Size / max.chunk.Mb))
+                      max(1, ceiling( Object.Size / max.chunk.MB))
           chunk.rows <- ceiling(NN / n.chunks)
           ind2[, jay] <- ind1  # Save this
           wind2 <- which(ind1)
@@ -4652,9 +4652,12 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
 
 
 
-    if ( .lmuuu == "nbcanlink") {
+    if ( FALSE && .lmuuu == "nbcanlink") {
       if ( iter %% 2 == 0) {
+
         wz[, M1*(1:NOS) - 1] <- ned2l.dk2 * dk.deta1^2
+
+
       } else {
       }
     }
@@ -4665,7 +4668,7 @@ negbinomial.control <- function(save.weights = TRUE, ...) {
     w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
   }), list( .cutoff.prob = cutoff.prob,
             .max.qnbinom = max.qnbinom,
-            .max.chunk.Mb = max.chunk.Mb,
+            .max.chunk.MB = max.chunk.MB,
             .lmuuu = lmuuu,
             .nsimEIM = nsimEIM ))))
 
@@ -6542,7 +6545,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
             "Mean:              mu\n",
             "Variance function: V(mu) = mu^3 * (1 - mu)^3"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (any(y <= 0.0 | y >= 1.0))
@@ -7147,7 +7150,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
             "Mean:     lambda*(1-rho)/(2*rho-1)\n",
             "Variance: lambda*rho*(1-rho)/(2*rho-1)^3\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -7709,7 +7712,7 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
             namesof("shape",    lshape, earg = eshape), "\n\n",
             "Mean:     a + b * digamma(k)", "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -7891,7 +7894,7 @@ rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
             namesof("shape", lshape, earg = eshape), "\n", "\n",
             "Mean:     a", "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -8033,9 +8036,10 @@ dgengamma.stacy <- function(x, scale = 1, d = 1, k = 1, log = FALSE) {
   Loglik <- rep(log(0), length.out = N)
   xok <- x > 0
   if (any(xok)) {
-    zedd <- (x[xok]/scale[xok])^d[xok]
-    Loglik[xok] <- log(d[xok]) + (-d[xok]*k[xok]) * log(scale[xok]) +
-               (d[xok]*k[xok]-1) * log(x[xok]) - zedd - lgamma(k[xok])
+    zedd <- (x[xok]/scale[xok])^(d[xok])
+    Loglik[xok] <- log(d[xok]) + (-d[xok] * k[xok]) * log(scale[xok]) +
+                   (d[xok] * k[xok]-1) * log(x[xok]) - zedd -
+                   lgamma(k[xok])
   }
 
 
@@ -8089,9 +8093,15 @@ rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
 }
 
 
+
  gengamma.stacy <-
   function(lscale = "loge", ld = "loge", lk = "loge",
-           iscale = NULL, id = NULL, ik = NULL, zero = NULL) {
+           iscale = NULL, id = NULL, ik = NULL,
+           gscale    = exp(-5:5),
+           gshape1.d = exp(-5:5),
+           gshape2.k = exp(-5:5),
+           zero = NULL) {
+
 
   lscale <- as.list(substitute(lscale))
   escale <- link2list(lscale)
@@ -8127,14 +8137,20 @@ rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
             namesof("k", lk, earg = ek), "\n", "\n",
             "Mean:     b * gamma(k+1/d) / gamma(k)", "\n"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
+    temp5 <-
     w.y.check(w = w, y = y,
               Is.positive.y = TRUE,
               ncol.w.max = 1,
-              ncol.y.max = 1)
+              ncol.y.max = 1,
+              out.wy = TRUE,
+              colsyperw = 1,
+              maximize = TRUE)
+    w <- temp5$w
+    y <- temp5$y
 
 
 
@@ -8144,32 +8160,71 @@ rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
         namesof("k", .lk , earg = .ek , tag = FALSE))
 
 
+    NOS <- 1  # For now
+
+
+
     if (!length(etastart)) {
-      b.init <- if (length( .iscale ))
-          rep( .iscale , length.out = length(y)) else {
-          rep(mean(y^2) / mean(y), length.out = length(y))
-      }
-      k.init <- if (length( .ik ))
-          rep( .ik , length.out = length(y)) else {
-          rep(mean(y) / b.init, length.out = length(y))
-      }
-      d.init <- if (length( .id ))
-          rep( .id , length.out = length(y)) else {
-          rep(digamma(k.init) / mean(log(y / b.init)),
-              length.out = length(y))
-      }
-        etastart <-
-          cbind(theta2eta(b.init, .lscale , earg = .escale ),
-                theta2eta(d.init, .ld , earg = .ed ),
-                theta2eta(k.init, .lk , earg = .ek ))
-    }
+      sc.init <-
+      dd.init <-
+      kk.init <- matrix(as.numeric(NA), n, NOS)
+          
+      for (spp. in 1:NOS) {  # For each response 'y_spp.'... do:
+        yvec <- y[, spp.]
+        wvec <- w[, spp.]
+
+          gscale     <- .gscale
+          gshape1.d  <- .gshape1.d
+          gshape2.k  <- .gshape2.k        
+          if (length( .iscale ))
+            gscale    <-  rep( .iscale    , length = NOS)
+          if (length( .id        ))
+            gshape1.d <-  rep( .id        , length = NOS)
+          if (length( .ik        ))
+            gshape2.p <-  rep( .ik        , length = NOS)
+          allmat1 <- expand.grid(shape1.d = gshape1.d,
+                                 shape2.k = gshape2.k)
+          allmat2 <- matrix(as.numeric(NA), nrow(allmat1), 2)
+
+          ll.gstacy <- function(scaleval, x = x, y = y, w = w, extraargs) { 
+            ans <- sum(c(w) * dgengamma.stacy(x = y,
+                                              scale    = scaleval,
+                                              d        = extraargs$Shape1.d,
+                                              k        = extraargs$Shape2.k,
+                                              log = TRUE))
+            ans
+          }
+
+          for (iloc in 1:nrow(allmat1)) {
+            allmat2[iloc, ] <-
+              grid.search(gscale, objfun = ll.gstacy,
+                            y = yvec, x = x, w = wvec,
+                            ret.objfun = TRUE,  # 2nd value is the loglik
+                            extraargs = list(Shape1.d = allmat1[iloc, 1],
+                                             Shape2.k = allmat1[iloc, 2]))
+          }
+          ind5 <- which.max(allmat2[, 2])  # 2nd value is the loglik
+          sc.init[, spp.] <- allmat2[ind5, 1]
+          dd.init[, spp.] <- allmat1[ind5, 1]
+          kk.init[, spp.] <- allmat1[ind5, 2]
+      }  # End of for (spp. ...)
+
+
+      etastart <-
+        cbind(theta2eta(sc.init,  .lscale    , earg = .escale  ),
+              theta2eta(dd.init , .ld        , earg = .ed      ),
+              theta2eta(kk.init , .lk        , earg = .ek      ))
+    }  # End of etastart.
   }), list( .lscale = lscale, .ld = ld, .lk = lk,
             .escale = escale, .ed = ed, .ek = ek,
-            .iscale = iscale, .id = id, .ik = ik ))),
+            .iscale = iscale, .id = id, .ik = ik,
+            .gscale = gscale, .gshape1.d = gshape1.d,           
+                              .gshape2.k = gshape2.k
+           ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     b <- eta2theta(eta[, 1], .lscale , earg = .escale )
-    d <- eta2theta(eta[, 2], .ld , earg = .ed )
-    k <- eta2theta(eta[, 3], .lk , earg = .ek )
+    d <- eta2theta(eta[, 2], .ld     , earg = .ed )
+    k <- eta2theta(eta[, 3], .lk     , earg = .ek )
     b * gamma(k + 1 / d) / gamma(k)
   }, list( .lscale = lscale, .lk = lk, .ld = ld,
            .escale = escale, .ek = ek, .ed = ed ))),
@@ -8184,8 +8239,8 @@ rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
              extra = NULL,
              summation = TRUE) {
     b <- eta2theta(eta[, 1], .lscale , earg = .escale )
-    d <- eta2theta(eta[, 2], .ld , earg = .ed )
-    k <- eta2theta(eta[, 3], .lk , earg = .ek )
+    d <- eta2theta(eta[, 2], .ld     , earg = .ed )
+    k <- eta2theta(eta[, 3], .lk     , earg = .ek )
 
     if (residuals) {
       stop("loglikelihood residuals not implemented yet")
@@ -8227,8 +8282,8 @@ rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
 
   deriv = eval(substitute(expression({
     b <- eta2theta(eta[, 1], .lscale , earg = .escale )
-    d <- eta2theta(eta[, 2], .ld , earg = .ed )
-    k <- eta2theta(eta[, 3], .lk , earg = .ek )
+    d <- eta2theta(eta[, 2], .ld     , earg = .ed )
+    k <- eta2theta(eta[, 3], .lk     , earg = .ek )
 
     tmp22 <- (y/b)^d
     tmp33 <- log(y/b)
@@ -8237,8 +8292,8 @@ rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
     dl.dk <- d * tmp33 - digamma(k)
 
     db.deta <- dtheta.deta(b, .lscale , earg = .escale )
-    dd.deta <- dtheta.deta(d, .ld , earg = .ed )
-    dk.deta <- dtheta.deta(k, .lk , earg = .ek )
+    dd.deta <- dtheta.deta(d, .ld     , earg = .ed )
+    dk.deta <- dtheta.deta(k, .lk     , earg = .ek )
 
     c(w) * cbind(dl.db * db.deta,
                  dl.dd * dd.deta,
@@ -8805,7 +8860,7 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
             namesof("lambda", llambda, earg = elambda), "\n", 
             "Mean:     something complicated"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
     if (min(y) <= 0 || max(y) >= 1)
@@ -8974,7 +9029,7 @@ rlino <- function(n, shape1, shape2, lambda = 1) {
             namesof("shape2", link, earg = earg), "\n",
             "Mean:     shape1/(shape2-1) provided shape2>1"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
@@ -9152,7 +9207,7 @@ rmaxwell <- function(n, rate) {
             "\n", "\n",
             "Mean:    sqrt(8 / (rate * pi))"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -11008,7 +11063,7 @@ rtruncpareto <- function(n, lower, upper, shape) {
              namesof("shape", lshape, earg = eshape), "\n",
              "Mean:     (digamma(shape+1)-digamma(1)) / rate"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M)
   }), list( .zero = zero ))),
   initialize = eval(substitute(expression({
 
diff --git a/R/family.zeroinf.R b/R/family.zeroinf.R
index e03dcd9..6e428fd 100644
--- a/R/family.zeroinf.R
+++ b/R/family.zeroinf.R
@@ -2326,13 +2326,15 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
             namesof("prob" , lprob , earg = eprob ), "\n",
             "Mean:     (1 - pstr0) * prob"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
 
 
   infos = eval(substitute(function(...) {
     list(M1 = 2,
          type.fitted  = .type.fitted ,
+         expected = TRUE,
+         multiple.responses  = FALSE,
          zero = .zero )
   }, list( .zero = zero,
            .type.fitted = type.fitted
@@ -2537,6 +2539,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
 
 
 
+
     ned2l.dmubin2 <- (w * (1 - phi) / (mubin * (1 - mubin)^2)) *
                      (1 - mubin - w * mubin *
                      (1 - mubin)^w * phi / pobs0)
@@ -2607,7 +2610,7 @@ zanegbinomialff.control <- function(save.weights = TRUE, ...) {
             namesof("onempstr0", lonempstr0, earg = eonempstr0), "\n",
             "Mean:     onempstr0 * prob"),
   constraints = eval(substitute(expression({
-    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+    constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
 
 
@@ -4599,6 +4602,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
            zero = NULL) {
 
 
+
   expected <- TRUE
 
 
@@ -4859,7 +4863,7 @@ rzigeom <- function(n, prob, pstr0 = 0) {
     prob   <- eta2theta(eta[, c(FALSE, TRUE)], .lprob  , earg = .eprob  )
 
 
-    prob0 <- prob  # P(Y == 0) from parent distribution
+    prob0 <- prob  # P(Y == 0) from parent distribution, aka f(0)
     pobs0 <- pstr0 + (1 - pstr0) * prob0  # P(Y == 0)
     index0 <- (y == 0)
 
@@ -4882,8 +4886,13 @@ rzigeom <- function(n, prob, pstr0 = 0) {
             .eprob = eprob, .epstr0 = epstr0 ))),
   weight = eval(substitute(expression({
     if ( .expected ) {
-      ned2l.dprob2 <- (1 - pstr0) * (1 / (prob^2 * (1 - prob)) +
-                                    (1 - pstr0) / pobs0)
+
+
+      ned2l.dprob2 <- (1 - pstr0)^2 / pobs0 +
+                      (1 - pstr0) * ((1 - prob) / prob) *
+                                    (1 / prob + 1 / (1 - prob)^2)
+
+
       ned2l.dpstr0.prob <- 1 / pobs0
       ned2l.dpstr02 <- (1 - prob0) / ((1 - pstr0) * pobs0)
     } else {
@@ -5223,8 +5232,12 @@ rzigeom <- function(n, prob, pstr0 = 0) {
             .eprob = eprob, .eonempstr0 = eonempstr0 ))),
   weight = eval(substitute(expression({
     if ( .expected ) {
-      ned2l.dprob2 <- (    onempstr0) * (1 / (prob^2 * (1 - prob)) +
-                                    ( onempstr0) / pobs0)
+
+      ned2l.dprob2 <- (onempstr0)^2 / pobs0 +
+                      (onempstr0) * ((1 - prob) / prob) *
+                                    (1 / prob + 1 / (1 - prob)^2)
+
+
       ned2l.donempstr0.prob <- -1 / pobs0
       ned2l.donempstr02 <- (1 - prob0) / ((    onempstr0) * pobs0)
     } else {
@@ -5496,7 +5509,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
             namesof("prob" ,   lprob,  earg = eprob),  "\n",
             "Mean:     (1 - pobs0) * prob / (1 - (1 - prob)^size)"),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+      constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
@@ -5800,7 +5813,7 @@ rzabinom <- function(n, size, prob, pobs0 = 0) {
             namesof("onempobs0", lonempobs0, earg = eonempobs0), "\n",
             "Mean:     onempobs0 * prob / (1 - (1 - prob)^size)"),
   constraints = eval(substitute(expression({
-      constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+      constraints <- cm.zero.VGAM(constraints, x = x, .zero , M = M)
   }), list( .zero = zero ))),
 
   infos = eval(substitute(function(...) {
diff --git a/R/formula.vlm.q b/R/formula.vlm.q
index 333699d..04f538a 100644
--- a/R/formula.vlm.q
+++ b/R/formula.vlm.q
@@ -8,16 +8,28 @@
 
 
 
-formulavlm <- function(x, fnumber = 1, ...) {
-  if (!is.Numeric(fnumber, integer.valued = TRUE,
+
+
+formula.vlm <- function(x, ...)
+  formulavlm(x, ...)
+
+
+
+
+
+
+
+
+formulavlm <- function(x, form.number = 1, ...) {
+  if (!is.Numeric(form.number, integer.valued = TRUE,
                   length.arg = 1, positive = TRUE) ||
-      fnumber > 2)
-    stop("argument 'fnumber' must be 1 or 2")
+      form.number > 2)
+    stop("argument 'form.number' must be 1 or 2")
 
   if (!any(slotNames(x) == "misc"))
     stop("cannot find slot 'misc'")
 
-  if (fnumber == 1) x at misc$formula else x at misc$form2
+  if (form.number == 1) x at misc$formula else x at misc$form2
 }
 
 
@@ -176,11 +188,88 @@ setMethod("case.names", "grc",
 
 
 
+has.interceptvlm <- function(object, form.number = 1, ...) {
+  if (!is.Numeric(form.number, integer.valued = TRUE,
+                  length.arg = 1, positive = TRUE) ||
+      form.number > 2)
+    stop("argument 'form.number' must be 1 or 2")
+
+
+  if (form.number == 1) {
+    if (is.numeric(aa <- attr(terms(object), "intercept")))
+      as.logical(aa) else
+      FALSE
+  } else if (form.number == 2) {
+    if (is.numeric(aa <- attr(terms(object, form.number = 2), "intercept")))
+      as.logical(aa) else
+      FALSE
+  }
+}
+
+
+if (!isGeneric("has.intercept"))
+    setGeneric("has.intercept", function(object, ...)
+               standardGeneric("has.intercept"),
+               package = "VGAM")
+
+
+setMethod("has.intercept",  "vlm", function(object, ...)
+           has.interceptvlm(object, ...))
+
+
+
+
+
+
+
+term.namesvlm <- function(model, form.number = 1, ...) {
+  if (!is.Numeric(form.number, integer.valued = TRUE,
+                  length.arg = 1, positive = TRUE) ||
+      form.number > 2)
+    stop("argument 'form.number' must be 1 or 2")
+
+    aa <- if (has.intercept(model, form.number = form.number))
+          "(Intercept)" else NULL
+    bb <- attr(terms(model, form.number = form.number), "term.labels")
+    c(aa, bb)
+}
+
+
+if (!isGeneric("term.names"))
+    setGeneric("term.names", function(model, ...)
+               standardGeneric("term.names"),
+               package = "VGAM")
+
+
+setMethod("term.names",  "vlm", function(model, ...)
+           term.namesvlm(model, ...))
+
+
+
+
 
 
 
+responseNamevlm <- function(model, form.number = 1, ...) {
+  TERMS.MODEL <-terms(model, form.number = form.number)
+  if (length(aa <- attr(TERMS.MODEL, "dataClasses")) &&
+      length(bb <- attr(TERMS.MODEL, "response"   )) &&
+      bb == 1) {
+    names(aa)[1]
+  } else {
+    NULL
+  }
+}
+
+
+if (!isGeneric("responseName"))
+  setGeneric("responseName", function(model, ...)
+             standardGeneric("responseName"),
+             package = "VGAM")
 
 
+setMethod("responseName",  "vlm", function(model, ...)
+           responseNamevlm(model, ...))
 
 
 
diff --git a/R/links.q b/R/links.q
index c044bab..f1672ad 100644
--- a/R/links.q
+++ b/R/links.q
@@ -17,6 +17,30 @@ ToString <- function(x)
 
 
 
+
+
+ as.char.expression <- function(x) {
+  answer <- x
+  for (i in length(x)) {
+    charvec <- substring(x[i], 1:nchar(x[i]), 1:nchar(x[i]))
+    if (!all(is.element(charvec,
+                        c(letters, LETTERS, as.character(0:9), ".", "_"))))
+      answer[i] <- paste("(", x[i], ")", sep = "")
+  }
+  answer
+}
+
+
+
+if (FALSE) {
+  as.char.expression("a")
+  as.char.expression("a+b")
+  as.char.expression(c("a", "a+b"))
+}
+
+
+
+
  TypicalVGAMfamilyFunction <-
   function(lsigma = "loge",
            isigma = NULL,
@@ -39,13 +63,13 @@ ToString <- function(x)
            probs.y = c(0.25, 0.50, 0.75),
            multiple.responses = FALSE, earg.link = FALSE,
            whitespace = FALSE, bred = FALSE, lss = TRUE,
-           oim = FALSE, nsimEIM = 100,
+           oim = FALSE, nsimEIM = 100, byrow.arg = FALSE,
            zero = NULL) {
   NULL
 }
 
 
-TypicalVGAMlinkFunction <-
+TypicalVGAMlink <-
   function(theta,
            someParameter = 0,
            bvalue = NULL,  # .Machine$double.xmin is an alternative
@@ -91,23 +115,24 @@ care.exp <- function(x,
     theta[theta <= 0.0] <- bvalue
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      exp(theta)
-    }
+    switch(deriv+1,
+           exp(theta),
+           theta,
+           theta,
+           stop("argument 'deriv' unmatched"))
   } else {
-    switch(deriv + 1, {
-       log(theta)},
-       theta,
-       theta)
+    switch(deriv + 1,
+       log(theta),
+       1 / theta,
+       -1 / theta^2,
+       stop("argument 'deriv' unmatched"))
   }
 }
 
 
 
+
+
  logneg <- function(theta,
                     bvalue = NULL,  # .Machine$double.xmin is an alternative
                     inverse = FALSE, deriv = 0,
@@ -116,7 +141,7 @@ care.exp <- function(x,
 
   if (is.character(theta)) {
     string <- if (short)
-        paste("log(-(",  theta, "))", sep = "") else
+        paste("logneg(",  theta, ")", sep = "") else
         paste("log(-(",  theta, "))", sep = "")
     if (tag)
       string <- paste("Log negative:", string)
@@ -127,23 +152,22 @@ care.exp <- function(x,
     theta[theta <= 0.0] <- bvalue
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      -exp(theta)
-    }
+    switch(deriv + 1,
+           -exp(theta),
+           theta,
+           theta)
   } else {
-    switch(deriv + 1, {
-       log(-theta)},
-       theta,
-       theta)
+    switch(deriv + 1,
+           log(-theta),
+           1 / theta,
+           -1 / theta^2)
   }
 }
 
 
 
+
+
  logoff <- function(theta,
                     offset = 0,
                     inverse = FALSE, deriv = 0,
@@ -160,7 +184,7 @@ care.exp <- function(x,
       paste("log(",
             as.character(offset),
             "+",
-            theta,
+            as.char.expression(theta),
             ")", sep = "")
     if (tag) 
       string <- paste("Log with offset:", string) 
@@ -168,18 +192,15 @@ care.exp <- function(x,
   }
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 offset = offset,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      exp(theta) - offset
-    }
+    switch(deriv + 1,
+           exp(theta) - offset,
+           theta + offset,
+           theta + offset)
   } else {
     switch(deriv + 1,
-       log(theta + offset),
-       theta + offset,
-       theta + offset)
+           log(theta + offset),
+           1 / (theta + offset),
+           -1 / (theta + offset)^2)
   }
 }
 
@@ -198,19 +219,11 @@ care.exp <- function(x,
     return(string)
   }
 
-  if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      theta
-    }
-  } else {
-    switch(deriv+1,
-       theta,
-       theta * 0 + 1,
-       theta * 0)
-  }
+  switch(deriv+1,
+         theta,
+         theta * 0 + 1,
+         theta * 0,  # zz Does not handle Inf and -Inf
+         stop("argument 'deriv' unmatched"))
 }
 
 
@@ -220,25 +233,18 @@ care.exp <- function(x,
                       inverse = FALSE, deriv = 0,
                       short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
+    theta <- as.char.expression(theta)
     string <- paste("-", theta, sep = "")
     if (tag) 
       string <- paste("Negative-identity:", string) 
     return(string)
   }
 
-  if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      -theta
-    }
-  } else {
-    switch(deriv+1,
-       -theta,
-       theta*0 - 1,
-       theta*0)
-  }
+  switch(deriv+1,
+         -theta,
+         theta * 0 - 1,
+         theta * 0,  # zz Does not handle Inf and -Inf
+         stop("argument 'deriv' unmatched"))
 }
 
 
@@ -252,8 +258,14 @@ care.exp <- function(x,
            short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- if (short) 
-        paste("logit(", theta, ")", sep = "") else
-        paste("log(",   theta, "/(1-", theta, "))", sep = "")
+        paste("logit(",
+               theta,
+              ")", sep = "") else
+        paste("log(",
+              as.char.expression(theta),
+              "/(1-",
+              as.char.expression(theta),
+              "))", sep = "")
     if (tag) 
       string <- paste("Logit:", string) 
     return(string)
@@ -264,17 +276,19 @@ care.exp <- function(x,
     theta[theta >= 1.0] <- 1.0 - bvalue
   }
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta, bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
+    switch(deriv+1, {
         yy <- theta
         Neg <- (theta <  0) & !is.na(theta)
         yy[ Neg] <- exp(theta[Neg]) / (1 + exp(theta[Neg]))
         Pos <- (theta >= 0) & !is.na(theta)
         yy[Pos] <- 1 / (1 + exp(-theta[Pos]))
         yy
-      }
+           },
+           1 / Recall(theta = theta,
+                      bvalue = bvalue,
+                      inverse = FALSE, deriv = deriv),
+           exp(log(theta) + log1p(-theta)) * (1 - 2 * theta),
+           stop("argument 'deriv' unmatched"))
   } else {
     switch(deriv+1, {
        temp2 <- log(theta) - log1p(-theta)
@@ -282,8 +296,9 @@ care.exp <- function(x,
          temp2[near0.5] <- log(theta[near0.5] / (1 - theta[near0.5]))
        temp2
        },
-       exp(log(theta) + log1p(-theta)),
-       exp(log(theta) + log1p(-theta)) * (1 - 2 * theta))
+       exp(-log(theta) - log1p(-theta)),
+       (2 * theta - 1) / (exp(log(theta) + log1p(-theta)))^2,
+       stop("argument 'deriv' unmatched"))
   }
 }
 
@@ -309,19 +324,19 @@ care.exp <- function(x,
     theta[theta <= 1.0] <- bvalue
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      exp(exp(theta))
-    }
+    switch(deriv+1,
+           exp(exp(theta)),
+           (theta * log(theta)),
+           {  junk <- log(theta)
+              theta  * junk * (1 + junk)
+           },
+           stop("argument 'deriv' unmatched"))
   } else {
     switch(deriv+1, {
            log(log(theta))},
-           theta * log(theta),
+           1 / (theta * log(theta)),
            {  junk <- log(theta)
-              -junk^2 / (1 + junk)
+              -(1 + junk) / (theta * junk)^2
            },
            stop("argument 'deriv' unmatched"))
   }
@@ -336,9 +351,11 @@ care.exp <- function(x,
                      inverse = FALSE, deriv = 0,
                      short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
-    string <- if (short) 
+    string <- if (short)
         paste("cloglog(",    theta, ")",  sep = "") else
-        paste("log(-log(1-", theta, "))", sep = "")
+        paste("log(-log(1-",
+              as.char.expression(theta),
+              "))", sep = "")
     if (tag) 
       string <- paste("Complementary log-log:", string) 
     return(string)
@@ -350,20 +367,20 @@ care.exp <- function(x,
   }
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      junk <- exp(theta)
-      -expm1(-junk)
-    }
-  } else {
     switch(deriv+1, {
-           log(-log1p(-theta)) },
-           -(1 - theta) * log1p(-theta),
+           junk <- exp(theta)
+           -expm1(-junk)
+           },
+           -((1 - theta) * log1p(-theta)),
+           {  junk <- log1p(-theta)
+              -(1 - theta) * (1 + junk) * junk },
+           stop("argument 'deriv' unmatched"))
+  } else {
+    switch(deriv+1,
+           log(-log1p(-theta)),
+           -1 / ((1 - theta) * log1p(-theta)),
            {  junk <- log1p(-theta)
-              -(1 - theta) * (1 + junk) * junk
+               -(1 + junk) / ((1 - theta) * junk)^2
            },
            stop("argument 'deriv' unmatched"))
   }
@@ -391,31 +408,49 @@ care.exp <- function(x,
   }
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
+    switch(deriv+1, {
       ans <- pnorm(theta)
       if (is.matrix(theta))
         dim(ans) <- dim(theta)
       ans
-    }
+     }, {  # 1st deriv
+      1 / Recall(theta = theta,
+                 bvalue = bvalue,
+                 inverse = FALSE, deriv = deriv)
+    }, {  # 2nd deriv
+        junk <- qnorm(theta)
+        ans <- -junk * dnorm(junk)
+        if (is.vector(theta)) ans else
+        if (is.matrix(theta)) {
+          dim(ans) <- dim(theta)
+          ans
+        } else {
+          warning("can only handle vectors and matrices;",
+                  " converting to vector")
+          ans
+        }
+    })
   } else {
     switch(deriv+1, {
         ans <- qnorm(theta)
         if (is.matrix(theta))
             dim(ans) <- dim(theta)
         ans
-     }, {
+     }, {  # 1st deriv
        if (is.matrix(theta)) {
-         ans <- dnorm(qnorm(theta))
+         ans <- 1 / dnorm(qnorm(theta))
          dim(ans) <- dim(theta)
          ans
-       } else dnorm(qnorm(as.vector(theta)))
-      }, {
+       } else {
+         1 / dnorm(qnorm(as.vector(theta)))
+       }
+      }, {  # 2nd deriv
         junk <- qnorm(theta)
-        ans <- -junk * dnorm(junk)
+
+        ans <- junk / (dnorm(junk))^2
+
+
+
         if (is.vector(theta)) ans else
         if (is.matrix(theta)) {
           dim(ans) <- dim(theta)
@@ -452,18 +487,15 @@ care.exp <- function(x,
   if (!inverse && length(bvalue))
     theta[theta <= 0.0] <- bvalue
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      log(theta)
-    }
+    switch(deriv+1,
+           log(theta),
+           exp(-theta),
+           exp(-2 * theta))
   } else {
-    switch(deriv+1, {
-       exp(theta)},
-        1 / exp(theta),
-       -1 / exp(theta * 2))
+    switch(deriv+1,
+          exp(theta),
+          exp(theta),
+          exp(theta))
   }
 }
 
@@ -476,6 +508,7 @@ care.exp <- function(x,
                         inverse = FALSE, deriv = 0,
                         short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
+    theta <- as.char.expression(theta)
     string <- paste("1/", theta, sep = "")
     if (tag) 
       string <- paste("Reciprocal:", string) 
@@ -486,18 +519,15 @@ care.exp <- function(x,
     theta[theta == 0.0] <- bvalue
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      1/theta
-    }
+    switch(deriv+1,
+           1/theta,
+           -theta^2,
+           2 * theta^3)
   } else {
-    switch(deriv+1, {
-       1/theta},
-       -theta^2,
-       2*theta^3)
+    switch(deriv+1,
+           1/theta,
+           -1/theta^2,
+           2 / theta^3)
   }
 }
 
@@ -522,18 +552,15 @@ care.exp <- function(x,
   if (!inverse && length(bvalue))
     theta[theta <= 0.0] <- bvalue
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      exp(-theta)
-    }
+    switch(deriv+1,
+           exp(-theta),
+           -theta,
+            theta)
   } else {
-    switch(deriv+1, {
-       -log(theta)},
-       -theta,
-       theta)
+    switch(deriv+1,
+           -log(theta),
+           -1/theta,
+            1/theta^2)
   }
 }
 
@@ -546,6 +573,7 @@ care.exp <- function(x,
            inverse = FALSE,
            deriv = 0, short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
+    theta <- as.char.expression(theta)
     string <- paste("-1/", theta, sep = "")
     if (tag) 
       string <- paste("Negative reciprocal:", string) 
@@ -557,30 +585,28 @@ care.exp <- function(x,
     theta[theta == 0.0] <- bvalue
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      -1/theta
-    }
+    switch(deriv+1,
+           -1/theta,
+            theta^2,
+           2 * theta^3)
   } else {
-    switch(deriv+1, {
-       -1/theta},
-       theta^2,
-       2*theta^3)
+    switch(deriv+1,
+           -1/theta,
+           1/theta^2,
+           -2 / theta^3)
   }
 }
 
 
 
- natural.ig <-
+  igcanlink <-
   function(theta,
            bvalue = NULL,  # .Machine$double.eps is an alternative
            inverse = FALSE, deriv = 0,
            short = TRUE, tag = FALSE) {
 
   if (is.character(theta)) {
+    theta <- as.char.expression(theta)
     string <- paste("-1/", theta, sep = "")
     if (tag) 
       string <- paste("Negative inverse:", string) 
@@ -588,18 +614,15 @@ care.exp <- function(x,
   }
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / negreciprocal(theta,
-                        bvalue = bvalue,
-                        inverse = FALSE, deriv = deriv)
-    } else {
-      1 / sqrt(-2*theta)
-    }
+    switch(deriv+1,
+           1 / sqrt(-2*theta),
+           theta^3,
+           3 * theta^5)
   } else {
     switch(deriv+1,
-       -1 / (2 * theta^2),
-       theta^3,
-       3 * theta^5)
+           -1 / (2 * theta^2),
+           1 / theta^3,
+           -3 / theta^4)
   }
 }
 
@@ -614,9 +637,13 @@ care.exp <- function(x,
                     inverse = FALSE, deriv = 0,
                     short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
-    string <- if (short) 
+    string <- if (short)
         paste("rhobit(", theta, ")", sep = "") else
-        paste("log((1+", theta, ")/(1-", theta, "))", sep = "")
+        paste("log((1+",
+              as.char.expression(theta),
+              ")/(1-",
+              as.char.expression(theta),
+              "))", sep = "")
     if (tag) 
       string <- paste("Rhobit:", string) 
     return(string)
@@ -628,20 +655,16 @@ care.exp <- function(x,
   }
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 bminvalue = bminvalue,
-                 bmaxvalue = bmaxvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      junk <- exp(theta)
-      expm1(theta) / (junk + 1.0)
-    }
+      switch(deriv+1, {
+             junk <- exp(theta)
+             expm1(theta) / (junk + 1.0) },
+             (1 - theta^2) / 2,
+             (-theta / 2) * (1 - theta^2))
   } else {
       switch(deriv+1, {
-          log1p(theta) - log1p(-theta)},
-          (1 - theta^2) / 2,
-          (1 - theta^2)^2 / (4*theta))
+             log1p(theta) - log1p(-theta)},
+             2 / (1 - theta^2),
+             (4*theta) / (1 - theta^2)^2)
   }
 }
 
@@ -656,7 +679,11 @@ care.exp <- function(x,
   if (is.character(theta)) {
     string <- if (short) 
         paste("fisherz(", theta, ")", sep = "") else
-        paste("(1/2) * log((1+", theta, ")/(1-", theta, "))", sep = "")
+        paste("(1/2) * log((1+",
+              as.char.expression(theta),
+              ")/(1-",
+              as.char.expression(theta),
+              "))", sep = "")
     if (tag) 
       string <- paste("Fisher's Z transformation:", string) 
     return(string)
@@ -668,19 +695,15 @@ care.exp <- function(x,
   }
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 bminvalue = bminvalue,
-                 bmaxvalue = bmaxvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-      tanh(theta)
-    }
+    switch(deriv+1,
+           tanh(theta),
+           1 - theta^2,
+           (-theta) * (1 - theta^2))
   } else {
-      switch(deriv+1,
-         atanh(theta),
-         1.0 - theta^2,
-         (1.0 - theta^2)^2 / (2*theta))
+    switch(deriv+1,
+           atanh(theta),
+           1 / (1.0 - theta^2),
+           (2*theta) / (1 - theta^2)^2)
     }
 }
 
@@ -728,8 +751,11 @@ care.exp <- function(x,
 
   if (is.character(theta)) {
     is.M <- is.finite(M) && is.numeric(M)
-    string <- if (short)
-        paste("multilogit(", theta, ")", sep = "") else {
+    string <- if (short) {
+        paste("multilogit(", theta, ")", sep = "")
+    } else {
+        theta <- as.char.expression(theta)
+
          if (refLevel < 0) {
            ifelse(whitespace,
              paste("log(", theta, "[,j] / ",
@@ -809,14 +835,11 @@ care.exp <- function(x,
 
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 refLevel = refLevel,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-       foo(theta, refLevel, M = M)  # log(theta[, -jay] / theta[, jay])
-    }
+    switch(deriv + 1, {
+           foo(theta, refLevel, M = M)  # log(theta[, -jay] / theta[, jay])
+           },
+           care.exp(log(theta) + log1p(-theta)),
+           care.exp(log(theta) + log1p(-theta)) * (1 - 2 * theta))
   } else {
     switch(deriv + 1, {
       ans <- if (refLevel < 0) {
@@ -828,8 +851,8 @@ care.exp <- function(x,
       colnames(ans) <- NULL
       ans
       },
-      care.exp(log(theta) + log1p(-theta)),
-      care.exp(log(theta) + log1p(-theta)) * (1 - 2 * theta))
+      care.exp(-log(theta) - log1p(-theta)),
+      (2 * theta - 1) / care.exp(2*log(theta) + 2*log1p(-theta)))
   }
 }  # end of multilogit
 
@@ -855,6 +878,7 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
   if (is.character(theta)) {
     string <- if (short) 
       paste("foldsqrt(", theta, ")", sep = "") else {
+    theta <- as.char.expression(theta)
       if (abs(mux-sqrt(2)) < 1.0e-10)
         paste("sqrt(2*", theta, ") - sqrt(2*(1-", theta, "))",
               sep = "") else
@@ -869,11 +893,7 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
   }
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 min = min, max = max, mux = mux,
-                 inverse = FALSE, deriv = deriv)
-    } else {
+    switch(deriv+1, {
       mid <- (min + max) / 2
       boundary <- mux * sqrt(max - min)
       temp <- pmax(0, (theta/mux)^2 * (2*(max-min) - (theta/mux)^2))
@@ -885,12 +905,14 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
       ans[theta < -boundary] <- NA
       ans[theta >  boundary] <- NA
       ans
-    }
+        },
+       (2 / mux ) / (1/sqrt(theta-min) + 1/sqrt(max-theta)),
+       stop("use the chain rule formula to obtain this"))
   } else {
     switch(deriv+1,
-        mux * (sqrt(theta-min) - sqrt(max-theta)),
-       (2 / mux) / (1/sqrt(theta-min) + 1/sqrt(max-theta)),
-       -(4 / mux) / ((theta-min)^(-3/2) - (max-theta)^(-3/2)))
+           mux * (sqrt(theta-min) - sqrt(max-theta)),
+           (1/sqrt(theta-min) + 1/sqrt(max-theta)) * mux / 2,
+           -(mux / 4) * ((theta-min)^(-3/2) - (max-theta)^(-3/2)))
   }
 }
 
@@ -910,29 +932,23 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
         paste("powerlink(", theta, ", power = ",
               as.character(exponent), ")",
               sep = "") else
-        paste(theta, "^(", as.character(exponent), ")", sep = "")
+        paste(as.char.expression(theta),
+              "^(", as.character(exponent), ")", sep = "")
     if (tag) 
       string <- paste("Power link:", string)
     return(string)
   }
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 power = power,
-                 inverse = FALSE, deriv = deriv)
-      } else {
-          theta^(1/exponent)
-      }
+    switch(deriv+1,
+           theta^(1/exponent),
+           (theta^(1-exponent)) / exponent,
+           ((1-exponent) / exponent^2) * (theta^(1 - 2*exponent)))
   } else {
     switch(deriv+1,
-    {
-      theta^exponent
-    }, {
-      (theta^(1-exponent)) / exponent
-    }, {
-      (theta^(2-exponent)) / (exponent * (exponent-1))
-    })
+           theta^exponent,
+           exponent / (theta^(1-exponent)),
+           exponent * (exponent-1) * (theta^(exponent-2)))
   }
 }
 
@@ -963,7 +979,11 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
               ", max = ", B, ")", sep = "") else
         paste("extlogit(", theta, ")", sep = "")
     } else {
-      paste("log((", theta, "-min)/(max-", theta, "))", sep = "")
+      paste("log((",
+            as.char.expression(theta),
+            "-min)/(max-",
+            as.char.expression(theta),
+            "))", sep = "")
     }
     if (tag) 
       string <- paste("Extended logit:", string) 
@@ -971,21 +991,16 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
   }
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 min = min, max = max,
-                 bminvalue = bminvalue,
-                 bmaxvalue = bmaxvalue,
-                 inverse = FALSE, deriv = deriv)
-      } else {
-        junk <- care.exp(theta)
-        (A + B * junk) / (1.0 + junk)
-      }
+    switch(deriv+1, {
+           junk <- care.exp(theta)
+           (A + B * junk) / (1.0 + junk) },
+           ((theta - A) * (B - theta)) / (B-A),
+           (A + B - 2 * theta) * (theta - A) * (B - theta) / (B-A)^2)
   } else {
     switch(deriv+1, {
            log((theta - A)/(B - theta))},
-           (theta - A) * (B - theta) / (B-A),
-           (theta - A) * (B - theta) * (B - 2 * theta + A) / (B-A)^2)
+           (B-A) / ((theta - A) * (B - theta)),
+           ((2 * theta - A - B) * (B-A)) / ((theta - A) * (B - theta))^2)
   }
 }
 
@@ -1000,8 +1015,10 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
                   short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- if (short) 
-        paste("logc(", theta, ")", sep = "") else
+        paste("logc(", theta, ")", sep = "") else {
+        theta <- as.char.expression(theta)
         paste("log(1-", theta, ")", sep = "")
+    }
     if (tag) 
       string <- paste("Log Complementary:", string) 
     return(string)
@@ -1012,18 +1029,15 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
     theta[theta >= 1.0] <- bvalue;
   }
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
-        -expm1(theta)
-    }
+    switch(deriv+1,
+           -expm1(theta),
+           theta - 1,
+           theta - 1)
   } else {
-    switch(deriv+1, {
-           log1p(-theta)},
-           -(1.0 - theta),
-           -(1.0 - theta)^2)
+    switch(deriv+1,
+           log1p(-theta),
+           1 / (theta - 1),
+           -1 / (1 - theta)^2)
   }
 }
 
@@ -1040,8 +1054,10 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
                      short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- if (short) 
-        paste("cauchit(", theta, ")", sep = "") else
+        paste("cauchit(", theta, ")", sep = "") else {
+        theta <- as.char.expression(theta)
         paste("tan(pi*(", theta, "-0.5))", sep = "")
+    }
     if (tag) 
       string <- paste("Cauchit:", string) 
     return(string)
@@ -1052,19 +1068,21 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
     theta[theta >= 1.0] <- 1.0 - bvalue
   }
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-      } else {
-        0.5 + atan(theta) / pi
-      }
+    switch(deriv+1,
+           0.5 + atan(theta) / pi,
+           (cos(pi * (theta-0.5)))^2  / pi, {
+           temp2 <- cos(pi * (theta-0.5))
+           temp4 <- sin(pi * (theta-0.5))
+           -2 * temp4 * temp2^3 / pi
+         })
   } else {
-      switch(deriv+1,
-             tan(pi * (theta-0.5)),
-             cos(pi * (theta-0.5))^2 / pi,
-            -sin(pi * (theta-0.5) * 2)
-            )
+    switch(deriv+1,
+           tan(pi * (theta-0.5)),
+           pi / (cos(pi * (theta-0.5)))^2, {
+           temp2 <- cos(pi * (theta-0.5))
+           temp3 <- tan(pi * (theta-0.5))
+           (temp3 * 2 * pi^2) / temp2^2
+         })
   }
 }
 
@@ -1080,6 +1098,8 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
 
 
 
+
+
   if (!is.Numeric(lambda, positive = TRUE))
     stop('could not determine lambda or lambda has negative values')
   if (is.Numeric(cutpoint))
@@ -1105,6 +1125,7 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
       sep = "") else "",
                   ")", sep = "")
     } else {
+      theta <- as.char.expression(theta)
       if (is.Numeric(cutpoint)) {
         paste("-3*log(1-qnorm(", theta,
               ")/(3*sqrt(lambda)))",
@@ -1137,18 +1158,20 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
 
 
   answer <- if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 lambda = lambda,
-                 cutpoint = cutpoint,
-                 inverse = FALSE, deriv = deriv)
-    } else {
+    switch(deriv+1, {
       if (is.Numeric(cutpoint)) {
         pnorm((1-care.exp(-(theta-log(cutpoint))/3)) * 3 * sqrt(lambda))
       } else {
         pnorm((1-care.exp(-theta/3)) * 3 * sqrt(lambda))
       }
-    }
+    },
+
+      1 / Recall(theta = theta,
+                 lambda = lambda,
+                 cutpoint = cutpoint,
+                 inverse = FALSE, deriv = deriv),
+      stop('cannot currently handle deriv = 2')
+    )
   } else {
     smallno <- 1 * .Machine$double.eps
     Theta <- theta
@@ -1158,14 +1181,18 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
     switch(deriv+1, {
         temp <- Ql / (3*sqrt(lambda))
         temp <- pmin(temp, 1.0 - smallno)  # 100 / .Machine$double.eps
-        -3*log1p(-temp) +
-        if (is.Numeric(cutpoint)) log(cutpoint) else 0},
-        (1 - Ql / (3*sqrt(lambda))) * sqrt(lambda) * dnorm(Ql),
-        {  stop('cannot handle deriv = 2') },
+        origans <- -3*log1p(-temp) +
+        if (is.Numeric(cutpoint)) log(cutpoint) else 0
+        1 / origans
+      }, {
+        origans <- (1 - Ql / (3*sqrt(lambda))) * sqrt(lambda) * dnorm(Ql)
+        1 / origans
+      },
+        {  stop('cannot currently handle deriv = 2') },
         stop("argument 'deriv' unmatched"))
   }
   if (!is.Numeric(answer))
-    stop("the answer contains some NAs")
+    warning("the answer contains some NAs")
   answer
 }
 
@@ -1177,6 +1204,8 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
                   cutpoint = NULL,
                   inverse = FALSE, deriv = 0,
                   short = TRUE, tag = FALSE) {
+
+
   if (!is.Numeric(cutpoint))
     stop("could not determine the cutpoint")
   if (any(cutpoint < 0) ||
@@ -1194,9 +1223,11 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
             ToString(cutpoint),
             if (lenc) ")" else "",
             ")", sep = "") 
-    } else
+    } else {
+      theta <- as.char.expression(theta)
       paste("2*log(0.5*qnorm(", theta,
             ") + sqrt(cutpoint+7/8))", sep = "")
+    }
     if (tag) 
       string <- paste("Poisson-ordinal link function:", string) 
     return(string)
@@ -1217,11 +1248,9 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
 
   answer <-
   if (inverse) {
-      if (deriv > 0) {
-          1 / Recall(theta = theta,
-                     cutpoint = cutpoint,
-                     inverse = FALSE, deriv = deriv)
-      } else {
+      switch(deriv+1, {
+ # deriv == 0
+          origans <- 
           if (any(cp.index <- cutpoint == 0)) {
               tmp <- theta
               tmp[cp.index] <- 
@@ -1234,11 +1263,22 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
           } else {
             pnorm(2 * exp(theta/2) - 2 * sqrt(cutpoint + 7/8))
           }
-      }
+        1 / origans
+      },
+                1 / Recall(theta = theta,
+                     cutpoint = cutpoint,
+                     inverse = FALSE, deriv = deriv),
+             stop('cannot currently handle deriv = 2')
+             )
+
+
+      
   } else {
     if (any(cp.index <- cutpoint == 0)) {
         cloglog(theta = theta,
                 inverse = inverse, deriv = deriv)
+
+        
     } else {
       smallno <- 1 * .Machine$double.eps
       SMALLNO <- 1 * .Machine$double.xmin
@@ -1246,17 +1286,24 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
       Theta <- pmin(Theta, 1 - smallno)  # Since theta == 1 is a possibility
       Theta <- pmax(Theta, smallno)  # Since theta == 0 is a possibility
       Ql <- qnorm(Theta)
+
+      
       switch(deriv+1, {
       temp <- 0.5 * Ql + sqrt(cutpoint + 7/8)
       temp <- pmax(temp, SMALLNO)
-      2 * log(temp)},
-      (Ql/2 + sqrt(cutpoint + 7/8)) * dnorm(Ql),
-      {  stop('cannot handle deriv = 2') },
+      origans <- 2 * log(temp)
+      1 / origans
+    }, {
+      origans <- (Ql/2 + sqrt(cutpoint + 7/8)) * dnorm(Ql)
+      1 / origans
+      },
+             
+      {  stop('cannot currently handle deriv = 2') },
       stop("argument 'deriv' unmatched"))
     }
   }
   if (!is.Numeric(answer))
-    stop("the answer contains some NAs")
+    warning("the answer contains some NAs")
   answer
 }
 
@@ -1270,6 +1317,10 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
                    inverse = FALSE, deriv = 0,
                    short = TRUE, tag = FALSE) {
 
+
+
+
+
   kay <- k
   if (!is.Numeric(kay, positive = TRUE))
     stop("could not determine 'k' or it is not positive-valued")
@@ -1294,10 +1345,12 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
               ToString(kay),
               if (lenk) ")" else "",
               ")", sep = "")
-      } else
+      } else {
+        theta <- as.char.expression(theta)
         paste("2*log(sqrt(k) * sinh(qnorm(", theta,
               ")/(2*sqrt(k)) + ",
               "asinh(sqrt(cutpoint/k))))", sep = "")
+      }
       if (tag) 
         string <- paste("Negative binomial-ordinal link function:",
                         string)
@@ -1305,63 +1358,78 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
   }
 
 
-    thmat <- cbind(theta)
-    kay <- rep(kay, len = ncol(thmat))  # Allow recycling for kay
-    cutpoint <- rep(cutpoint, len = ncol(thmat))  # Allow recycling for cutpoint
-    if (ncol(thmat) > 1) {
-      answer <- thmat
-      for (ii in 1:ncol(thmat))
-          answer[, ii] <- Recall(theta = thmat[, ii],
-                               cutpoint = cutpoint[ii],
-                               k = kay[ii],
-                               inverse = inverse, deriv = deriv)
-      return(answer)
-    }
+  thmat <- cbind(theta)
+  kay <- rep(kay, len = ncol(thmat))  # Allow recycling for kay
+  cutpoint <- rep(cutpoint, len = ncol(thmat))  # Allow recycling for cutpoint
+  if (ncol(thmat) > 1) {
+    answer <- thmat
+    for (ii in 1:ncol(thmat))
+        answer[, ii] <- Recall(theta = thmat[, ii],
+                             cutpoint = cutpoint[ii],
+                             k = kay[ii],
+                             inverse = inverse, deriv = deriv)
+    return(answer)
+  }
 
-    answer <-
-    if (inverse) {
-      if (deriv > 0) {
-        1 / Recall(theta = theta,
-                   cutpoint = cutpoint,
-                   k = kay,
-                   inverse = FALSE, deriv = deriv)
-      } else {
-        if (cutpoint == 0) {
-          1.0 - (kay / (kay + care.exp(theta)))^kay
-        } else {
-            pnorm((asinh(exp(theta/2)/sqrt(kay)) -
-                   asinh(sqrt(cutpoint/kay))) * 2 * sqrt(kay))
-        }
-      }
-    } else {
-      smallno <- 1 * .Machine$double.eps
-      SMALLNO <- 1 * .Machine$double.xmin
-      Theta <- theta
-      Theta <- pmin(Theta, 1 - smallno)  # Since theta == 1 is a possibility
-      Theta <- pmax(Theta, smallno)  # Since theta == 0 is a possibility
+  answer <-
+  if (inverse) {
+      switch(deriv+1, {
       if (cutpoint == 0) {
-        switch(deriv+1, {
-        temp <- (1 - Theta)^(-1/kay) - 1
-        temp <- pmax(temp, SMALLNO)
-        log(kay) + log(temp)},
-        (kay / (1 - Theta)^(1/kay) - kay) * (1 - Theta)^(kay+1/kay),
-        {  stop('cannot handle deriv = 2') },
-        stop("argument 'deriv' unmatched"))
+        1.0 - (kay / (kay + care.exp(theta)))^kay
       } else {
-        Ql <- qnorm(Theta)
-        switch(deriv+1, {
-              temp <- sqrt(kay) * sinh(Ql/(2*sqrt(kay)) +
-                     asinh(sqrt(cutpoint/kay)))
-              temp <- pmax(temp, SMALLNO)
-              2 * log(temp)}, {
-              arg1 <- (Ql/(2*sqrt(kay)) + asinh(sqrt(cutpoint/kay)))
-              sqrt(kay) * tanh(arg1) * dnorm(Ql) },
-              {  stop('cannot handle deriv = 2') },
-              stop("argument 'deriv' unmatched"))
+          pnorm((asinh(exp(theta/2)/sqrt(kay)) -
+                 asinh(sqrt(cutpoint/kay))) * 2 * sqrt(kay))
       }
+       },  {
+      1 / Recall(theta = theta,
+                 cutpoint = cutpoint,
+                 k = kay,
+                 inverse = FALSE, deriv = deriv)
+    }, {
+     stop('cannot currently handle deriv = 2')
+   })
+
+
+
+      
+  } else {
+    smallno <- 1 * .Machine$double.eps
+    SMALLNO <- 1 * .Machine$double.xmin
+    Theta <- theta
+    Theta <- pmin(Theta, 1 - smallno)  # Since theta == 1 is a possibility
+    Theta <- pmax(Theta, smallno)  # Since theta == 0 is a possibility
+    if (cutpoint == 0) {
+      switch(deriv+1, {
+      temp <- (1 - Theta)^(-1/kay) - 1
+      temp <- pmax(temp, SMALLNO)
+      origans <- log(kay) + log(temp)
+      1 / origans
+    }, {
+      origans <- (kay / (1 - Theta)^(1/kay) - kay) * (1 - Theta)^(kay+1/kay)
+      1 / origans
+      },
+      {  stop('cannot handle deriv = 2') },
+      stop("argument 'deriv' unmatched"))
+    } else {
+      Ql <- qnorm(Theta)
+      switch(deriv+1, {
+            temp <- sqrt(kay) * sinh(Ql/(2*sqrt(kay)) +
+                   asinh(sqrt(cutpoint/kay)))
+            temp <- pmax(temp, SMALLNO)
+            origans <- 2 * log(temp)
+            1 / origans
+          }, {
+            arg1 <- (Ql/(2*sqrt(kay)) + asinh(sqrt(cutpoint/kay)))
+            origans <- sqrt(kay) * tanh(arg1) * dnorm(Ql)
+            1 / origans
+          },
+            {  stop('cannot currently handle deriv = 2') },
+            stop("argument 'deriv' unmatched"))
     }
-    if (!is.Numeric(answer)) stop("the answer contains some NAs")
-    answer
+  }
+  if (!is.Numeric(answer))
+    warning("the answer contains some NAs")
+  answer
 }
 
 
@@ -1375,6 +1443,11 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
                     inverse = FALSE, deriv = 0,
                     short = TRUE, tag = FALSE) {
 
+warning("20150711; this function has not been updated")
+
+
+
+
   kay <- k
   if (!is.Numeric(kay, positive = TRUE))
     stop("could not determine argument 'k' or ",
@@ -1401,6 +1474,7 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
             if (lenk) ")" else "",
             "))", sep = "")
   } else {
+    theta <- as.char.expression(theta)
     paste("3*log(<a complicated expression>)", sep = "")
   }
   if (tag) 
@@ -1495,12 +1569,13 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
                  dA.dtheta <- (-denomin * BB - numerat * CC) / denomin^2
                  argmax1 / (3 * dA.dtheta)
                 },
-                {  stop('cannot handle deriv = 2') },
+                {  stop('cannot currently handle deriv = 2') },
                 stop("argument 'deriv' unmatched"))
-        }
-    }
-    if (!is.Numeric(answer)) stop("the answer contains some NAs")
-    answer
+      }
+  }
+  if (!is.Numeric(answer))
+    warning("the answer contains some NAs")
+  answer
 }
 
 
@@ -1512,7 +1587,7 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
   temp <- cut(y, breaks = breaks, labels = FALSE)
   temp <- c(temp)  # integer vector of integers
   if (any(is.na(temp)))
-    stop("there are NAs")
+    warning("there are NAs")
   answer <- if (ncol(y) > 1) matrix(temp, nrow(y), ncol(y)) else temp
   if (ncol(y) > 1) {
     ynames <- dimnames(y)[[2]]
@@ -1558,8 +1633,10 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
                        short = TRUE, tag = FALSE) {
   if (is.character(theta)) {
     string <- if (short)
-      paste("nbcanlink(", theta, ")", sep = "") else
+      paste("nbcanlink(", theta, ")", sep = "") else {
+      theta <- as.char.expression(theta)
       paste("log(", theta, " / (", theta, " + size))", sep = "")
+    }
     if (tag)
       string <- paste("Nbcanlink:", string)
     return(string)
@@ -1588,29 +1665,33 @@ foldsqrt <- function(theta,  #  = NA  , = NULL,
     theta[theta <= 0.0] <- bvalue
 
   if (inverse) {
-    if (deriv > 0) {
-      1 / Recall(theta = theta,
-                 size = size,
-                 wrt.eta = wrt.eta,
-                 bvalue = bvalue,
-                 inverse = FALSE, deriv = deriv)
-    } else {
+    switch(deriv+1, {
        ans <- (kmatrix / expm1(-theta))
        if (is.matrix(ans))
          dimnames(ans) <- NULL else
          names(ans) <- NULL
        ans
-    }
+       },
+
+        if (wrt.eta == 1) (theta * (theta + kmatrix)) / kmatrix else
+        -(theta + kmatrix),
+
+       if (wrt.eta == 1)
+       (2 * theta + kmatrix) * theta * (theta + kmatrix) / kmatrix^2 else
+        theta + kmatrix)
   } else {
     ans <-
     switch(deriv+1,
-        (log(theta / (theta + kmatrix))),
-       if (wrt.eta == 1) theta * (theta + kmatrix) / kmatrix else
-       -(theta + kmatrix),
+        log(theta / (theta + kmatrix)) ,
+
+        if (wrt.eta == 1) kmatrix / (theta * (theta + kmatrix)) else
+        -1 / (theta + kmatrix),
+
        if (wrt.eta == 1)
-       -(theta * (theta + kmatrix))^2 / ((2 * theta + kmatrix) *
-         kmatrix) else
-       (theta + kmatrix)^2)
+       (2 * theta + kmatrix) *
+         (-kmatrix) / (theta * (theta + kmatrix))^2 else
+        1 / (theta + kmatrix)^2)
+
      if (is.matrix(ans))
        dimnames(ans) <- NULL else
        names(ans) <- NULL
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index 660eedc..9bc6581 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -235,9 +235,19 @@ attrassignlm <- function(object, ...)
 
 
 
+
+
+
+model.matrix.vlm <- function(object, ...)
+  model.matrixvlm(object, ...)
+
+
+
+
+
  model.matrixvlm <- function(object,
-                            type = c("vlm", "lm", "lm2", "bothlmlm2"),
-                            linpred.index = NULL,
+                             type = c("vlm", "lm", "lm2", "bothlmlm2"),
+                             linpred.index = NULL,
                             ...) {
 
 
@@ -505,7 +515,7 @@ npred.vlm <- function(object,
   type.arg <- match.arg(type, c("total", "one.response"))[1]
 
 
-  ans <- 
+  MM <- 
     if (length(object at misc$M))
       object at misc$M else
     if (ncol(as.matrix(predict(object))) > 0)
@@ -513,30 +523,28 @@ npred.vlm <- function(object,
     stop("cannot seem to obtain 'M'")
 
 
-  ans <-
   if (type.arg == "one.response") {
-    ans.infos <- ans.y <- NULL
+    M1.infos <- NULL
     infos.fun <- object at family@infos
     Ans.infos <- infos.fun()
     if (is.list(Ans.infos) && length(Ans.infos$M1))
-      ans.infos <- Ans.infos$M1
+      M1.infos <- Ans.infos$M1
 
     Q1 <- Ans.infos$Q1
     if (is.numeric(Q1)) {
-      ans.y <- ncol(depvar(object)) / Q1
-      if (is.numeric(ans.infos) && ans.infos != ans.y)
+      S <- ncol(depvar(object)) / Q1  # Number of (multiple) responses
+      if (is.numeric(M1.infos) && M1.infos * S != MM)
         warning("contradiction in values after computing it two ways")
     }
 
 
-    if (is.numeric(ans.infos)) ans.infos else
-    if (is.numeric(ans.y    )) ans.y     else
-    ans
-  } else {
-    ans
+    M1 <- if (is.numeric(M1.infos)) M1.infos else
+          if (is.numeric(MM      )) MM       else
+          stop("failed to compute 'M'")
+    M1
+  } else {  # One response is assumed, by default
+    MM
   }
-
-  ans
 }
 
 
diff --git a/R/qtplot.q b/R/qtplot.q
index d7fac96..c08644f 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -13,6 +13,8 @@
 
 
  
+ 
+ 
 qtplot.lms.bcn <- function(percentiles = c(25, 50, 75),
                            eta = NULL, yoffset = 0) {
 
@@ -21,13 +23,16 @@ qtplot.lms.bcn <- function(percentiles = c(25, 50, 75),
                    dimnames = list(dimnames(eta)[[1]],
                    paste(as.character(percentiles), "%", sep = "")))
   for (ii in 1:lp) {
-    answer[, ii] <- eta[, 2] * (1+eta[, 1] * eta[, 3] *
-                    qnorm(percentiles[ii]/100))^(1/eta[, 1])
+    answer[, ii] <- qlms.bcn(p      = percentiles[ii]/100,
+                             lambda = eta[, 1],
+                             mu     = eta[, 2],
+                             sigma  = eta[, 3])
   }
   answer 
 }
  
  
+ 
 qtplot.lms.bcg <- function(percentiles = c(25,50,75),
                            eta = NULL, yoffset = 0) {
 
@@ -795,7 +800,16 @@ rlplot.gev <-
                                          extra = extra2)[1, ]
       zpp[, ii] <- (zpp[, ii] - zp) / epsilon  # On the transformed scale
     }
-    VCOV <- vcovvlm(object, untransform = TRUE)
+
+
+
+
+    VCOV <- vcov(object, untransform = TRUE)
+
+
+
+
+
     vv <- numeric(nrow(zpp))
     for (ii in 1:nrow(zpp))
       vv[ii] <- t(as.matrix(zpp[ii, ])) %*% VCOV %*% as.matrix(zpp[ii, ])
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index d1d1399..d985002 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -25,7 +25,8 @@ summaryvglm <-
   function(object, correlation = FALSE,
            dispersion = NULL, digits = NULL,
            presid = TRUE,
-           signif.stars = getOption("show.signif.stars")
+           signif.stars = getOption("show.signif.stars"),
+           nopredictors = FALSE
           ) {
 
 
@@ -88,6 +89,7 @@ summaryvglm <-
 
 
   answer at misc$signif.stars <- signif.stars  # 20140728
+  answer at misc$nopredictors <- nopredictors  # 20150831
 
 
   if (is.numeric(stuff at dispersion))
@@ -113,7 +115,8 @@ show.summary.vglm <-
            prefix = "",
            presid = TRUE,
            signif.stars = NULL,  # Use this if logical; 20140728
-           nopredictors = FALSE) {
+           nopredictors = NULL   # Use this if logical; 20150831
+           ) {
 
   M <- x at misc$M
   coef <- x at coef3  # icients
@@ -151,6 +154,14 @@ show.summary.vglm <-
     use.signif.stars <- getOption("show.signif.stars")
 
 
+  use.nopredictors <- if (is.logical(nopredictors))
+    nopredictors else x at misc$nopredictors  # 20140728
+  if (!is.logical(use.nopredictors)) {
+    warning("cannot determine 'nopredictors'; choosing FALSE")
+    use.nopredictors <- FALSE
+  }
+
+
   cat("\nCoefficients:\n")
   printCoefmat(coef, digits = digits,
                signif.stars = use.signif.stars,
@@ -160,7 +171,7 @@ show.summary.vglm <-
 
   cat("\nNumber of linear predictors: ", M, "\n")
 
-  if (!is.null(x at misc$predictors.names) && !nopredictors) {
+  if (!is.null(x at misc$predictors.names) && !use.nopredictors) {
     if (M == 1) {
       cat("\nName of linear predictor:",
           paste(x at misc$predictors.names, collapse = ", "), "\n") 
@@ -267,6 +278,15 @@ vcovdefault <- function(object, ...) {
 
 
 
+
+
+
+vcov.vlm <- function(object, ...) {
+
+  vcovvlm(object, ...)
+}
+
+
  vcovvlm <-
 function(object, dispersion = NULL, untransform = FALSE) {
 
@@ -357,13 +377,16 @@ function(object, dispersion = NULL, untransform = FALSE) {
 
 
     if (new.way) {
-      use.earg[["inverse"]] <- TRUE # New
-      use.earg[["theta"]] <- TTheta # New
+      use.earg[["inverse"]] <- TRUE  # New
+      use.earg[["theta"]] <- TTheta  # New
       Theta <- do.call(function.name, use.earg)
 
-      use.earg[["inverse"]] <- FALSE # Reset this
-      use.earg[["deriv"]] <- 1 # New
-      use.earg[["theta"]] <- Theta # Renew this
+
+      use.earg[["inverse"]] <- TRUE  # Reset this
+
+
+      use.earg[["deriv"]] <- 1  # New
+      use.earg[["theta"]] <- Theta  # Renew this
       tvector[ii] <- do.call(function.name, use.earg)
     } else {
       stop("link functions handled in the new way now")
diff --git a/R/vglm.R b/R/vglm.R
index 138e066..4e4c121 100644
--- a/R/vglm.R
+++ b/R/vglm.R
@@ -172,8 +172,10 @@ vglm <- function(formula,
     slot(answer, "Xm2") <- Xm2  # The second (lm) design matrix
   if (y.arg && length(Ym2))
     slot(answer, "Ym2") <- as.matrix(Ym2)  # The second response
-  if (!is.null(form2))
+  if (!is.null(form2)) {
     slot(answer, "callXm2") <- retlist$call
+    answer at misc$Terms2 <- retlist$Terms2
+  }
   answer at misc$formula <- formula
   answer at misc$form2 <- form2
 
@@ -254,7 +256,7 @@ shadowvglm <-
          matrix(, NROW(y), 0)
     attr(x, "assign") <- attrassigndefault(x, mt)
 
-    list(Xm2 = x, Ym2 = y, call = ocall)
+    list(Xm2 = x, Ym2 = y, call = ocall, Terms2 = mt)
 }
 
 
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index 1b349dc..bbd8efb 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -77,7 +77,7 @@ vglm.fit <-
       eta <- slot(family, "linkfun")(mu, extra)
     } else {
       warning("argument 'mustart' assigned a value ",
-              "but there is no 'link' slot to use it")
+              "but there is no 'linkfun' slot to use it")
     }
   }
 
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
index 35aef02..e6e8844 100644
--- a/R/vlm.wfit.q
+++ b/R/vlm.wfit.q
@@ -128,7 +128,7 @@ vlm.wfit <-
 
   dx2 <- if (is.vlmX) NULL else dimnames(xmat)[[2]]
   B <- matrix(as.numeric(NA),
-             nrow = M, ncol = ncolx, dimnames = list(lp.names, dx2))
+              nrow = M, ncol = ncolx, dimnames = list(lp.names, dx2))
   if (is.null(Hlist)) {
     Hlist <- replace.constraints(vector("list", ncolx), diag(M), 1:ncolx)
   }
diff --git a/build/vignette.rds b/build/vignette.rds
index aed19ee..109ed29 100644
Binary files a/build/vignette.rds and b/build/vignette.rds differ
diff --git a/data/Huggins89.t1.rda b/data/Huggins89.t1.rda
index 08db65e..4473871 100644
Binary files a/data/Huggins89.t1.rda and b/data/Huggins89.t1.rda differ
diff --git a/data/Huggins89table1.rda b/data/Huggins89table1.rda
index 5e8462f..1004846 100644
Binary files a/data/Huggins89table1.rda and b/data/Huggins89table1.rda differ
diff --git a/data/alclevels.rda b/data/alclevels.rda
index ad1edbc..2c3e528 100644
Binary files a/data/alclevels.rda and b/data/alclevels.rda differ
diff --git a/data/alcoff.rda b/data/alcoff.rda
index 4fced4c..e9fcf0d 100644
Binary files a/data/alcoff.rda and b/data/alcoff.rda differ
diff --git a/data/auuc.rda b/data/auuc.rda
index 0e31d6a..b053803 100644
Binary files a/data/auuc.rda and b/data/auuc.rda differ
diff --git a/data/backPain.rda b/data/backPain.rda
index efece85..38527f9 100644
Binary files a/data/backPain.rda and b/data/backPain.rda differ
diff --git a/data/beggs.rda b/data/beggs.rda
index 8c11c3d..81e7ca8 100644
Binary files a/data/beggs.rda and b/data/beggs.rda differ
diff --git a/data/car.all.rda b/data/car.all.rda
index 302d432..93a0348 100644
Binary files a/data/car.all.rda and b/data/car.all.rda differ
diff --git a/data/cfibrosis.rda b/data/cfibrosis.rda
index a99d26c..54e60bf 100644
Binary files a/data/cfibrosis.rda and b/data/cfibrosis.rda differ
diff --git a/data/corbet.rda b/data/corbet.rda
index c590311..18d0b14 100644
Binary files a/data/corbet.rda and b/data/corbet.rda differ
diff --git a/data/crashbc.rda b/data/crashbc.rda
index 8cc43af..c66a522 100644
Binary files a/data/crashbc.rda and b/data/crashbc.rda differ
diff --git a/data/crashf.rda b/data/crashf.rda
index 6da8d17..5051a1d 100644
Binary files a/data/crashf.rda and b/data/crashf.rda differ
diff --git a/data/crashi.rda b/data/crashi.rda
index 38f5bbe..08a6913 100644
Binary files a/data/crashi.rda and b/data/crashi.rda differ
diff --git a/data/crashmc.rda b/data/crashmc.rda
index 3ff885e..8e1f2c5 100644
Binary files a/data/crashmc.rda and b/data/crashmc.rda differ
diff --git a/data/crashp.rda b/data/crashp.rda
index 3c750ec..26d272a 100644
Binary files a/data/crashp.rda and b/data/crashp.rda differ
diff --git a/data/crashtr.rda b/data/crashtr.rda
index 6e25c0f..88df8ba 100644
Binary files a/data/crashtr.rda and b/data/crashtr.rda differ
diff --git a/data/deermice.rda b/data/deermice.rda
index 148801f..2c3e32e 100644
Binary files a/data/deermice.rda and b/data/deermice.rda differ
diff --git a/data/ducklings.rda b/data/ducklings.rda
index 7c20f4b..8fe331c 100644
Binary files a/data/ducklings.rda and b/data/ducklings.rda differ
diff --git a/data/finney44.rda b/data/finney44.rda
index 9444e49..18e5657 100644
Binary files a/data/finney44.rda and b/data/finney44.rda differ
diff --git a/data/flourbeetle.rda b/data/flourbeetle.rda
index 2651903..b84be12 100644
Binary files a/data/flourbeetle.rda and b/data/flourbeetle.rda differ
diff --git a/data/hspider.rda b/data/hspider.rda
index b8ccf81..78b3f91 100644
Binary files a/data/hspider.rda and b/data/hspider.rda differ
diff --git a/data/lakeO.rda b/data/lakeO.rda
index 6d9b8a6..14982c5 100644
Binary files a/data/lakeO.rda and b/data/lakeO.rda differ
diff --git a/data/leukemia.rda b/data/leukemia.rda
index a098722..a306ba2 100644
Binary files a/data/leukemia.rda and b/data/leukemia.rda differ
diff --git a/data/marital.nz.rda b/data/marital.nz.rda
index 568bab1..476e299 100644
Binary files a/data/marital.nz.rda and b/data/marital.nz.rda differ
diff --git a/data/melbmaxtemp.rda b/data/melbmaxtemp.rda
index e7306a2..69c7442 100644
Binary files a/data/melbmaxtemp.rda and b/data/melbmaxtemp.rda differ
diff --git a/data/pneumo.rda b/data/pneumo.rda
index 9fafa1d..affea33 100644
Binary files a/data/pneumo.rda and b/data/pneumo.rda differ
diff --git a/data/prinia.rda b/data/prinia.rda
index 94afdad..5055c0b 100644
Binary files a/data/prinia.rda and b/data/prinia.rda differ
diff --git a/data/ruge.rda b/data/ruge.rda
index ee3472e..f96920c 100644
Binary files a/data/ruge.rda and b/data/ruge.rda differ
diff --git a/data/toxop.rda b/data/toxop.rda
index f75c1ed..f9179eb 100644
Binary files a/data/toxop.rda and b/data/toxop.rda differ
diff --git a/data/venice.rda b/data/venice.rda
index d8e3738..3c06750 100644
Binary files a/data/venice.rda and b/data/venice.rda differ
diff --git a/data/venice90.rda b/data/venice90.rda
index ca21185..9900e20 100644
Binary files a/data/venice90.rda and b/data/venice90.rda differ
diff --git a/data/wine.rda b/data/wine.rda
index b95b5bf..e80e170 100644
Binary files a/data/wine.rda and b/data/wine.rda differ
diff --git a/inst/CITATION b/inst/CITATION
index c9a82b0..207d42d 100644
--- a/inst/CITATION
+++ b/inst/CITATION
@@ -6,6 +6,20 @@ year <- sub("-.*", "", meta$Date)
 note <- sprintf("R package version %s", meta$Version)
 
 
+citEntry(entry = "Book",
+  title        = "Vector Generalized Linear and Additive Models: With an Implementation in R",
+  author       = personList(as.person("Thomas W. Yee")),
+  year         = "2015",
+  publisher    = "Springer",
+  address      = "New York, USA",
+  textVersion  =
+  paste("Thomas W. Yee (2015).",
+        "Vector Generalized Linear and Additive Models:",
+        "With an Implementation in R. New York, USA: Springer.")
+)
+
+
+
 citEntry(entry = "Article",
   title        = "Vector Generalized Additive Models",
   author       = personList(as.person("Thomas W. Yee"),
@@ -22,6 +36,8 @@ citEntry(entry = "Article",
         "Journal of Royal Statistical Society, Series B, 58(3), 481-493.")
 )
 
+
+
 citEntry(entry = "Article",
   title        = "The {VGAM} Package for Categorical Data Analysis",
   author       = personList(as.person("Thomas W. Yee")),
@@ -41,27 +57,26 @@ citEntry(entry = "Article",
 )
 
 
+
 citEntry(entry = "Article",
-  title        = "The {VGAM} Package for Categorical Data Analysis",
-  author       = personList(as.person("Thomas W. Yee")),
-  journal      = "Journal of Statistical Software",
-  year         = "2010",
-  volume       = "32",
-  number       = "10",
-  pages        = "1--34",
-  url          = "http://www.jstatsoft.org/v32/i10/",
+  title        = "Row-column interaction models, with an {R} implementation",
+  author       = personList(as.person("Thomas W. Yee"),
+                            as.person("Alfian F. Hadi")),
+  journal      = "Computational Statistics",
+  year         = "2014",
+  volume       = "29",
+  number       = "6",
+  pages        = "1427--1445",
 
   textVersion  =
-  paste("Thomas W. Yee (2010).",
-        "The VGAM Package for Categorical Data Analysis.",
-        "Journal of Statistical Software, 32(10), 1-34.",
-        "URL http://www.jstatsoft.org/v32/i10/."),
+  paste("Thomas W. Yee, Alfian F. Hadi (2014).",
+        "Row-column interaction models, with an R implementation.",
+        "Computational Statistics, 29(6), 1427--1445."),
   header = "and/or"
 )
 
 
 
-
 citEntry(entry = "Manual",
          title = "{VGAM}: Vector Generalized Linear and Additive Models",
          author = personList(as.person("Thomas W. Yee")),
@@ -81,6 +96,23 @@ citEntry(entry = "Manual",
 
 
 citEntry(entry = "Article",
+  title        = "Two-parameter reduced-rank vector generalized linear models",
+  author       = personList(as.person("Thomas W. Yee")),
+  journal      = "Computational Statistics and Data Analysis",
+  year         = "2013",
+  url          = "http://ees.elsevier.com/csda",
+
+  textVersion  =
+  paste("Thomas W. Yee (2013).",
+        "Two-parameter reduced-rank vector generalized linear models.",
+        "Computational Statistics and Data Analysis.",
+        "URL http://ees.elsevier.com/csda."),
+  header = "and/or"
+)
+
+
+
+citEntry(entry = "Article",
   title        = "The {VGAM} Package for Capture-Recapture Data Using the Conditional Likelihood",
   author       = personList(as.person("Thomas W. Yee"),
                    as.person("Jakub Stoklosa"),
@@ -101,6 +133,3 @@ citEntry(entry = "Article",
 
 
 
-
-
-
diff --git a/inst/doc/categoricalVGAM.pdf b/inst/doc/categoricalVGAM.pdf
index eb6c7d2..d3093fb 100644
Binary files a/inst/doc/categoricalVGAM.pdf and b/inst/doc/categoricalVGAM.pdf differ
diff --git a/inst/doc/crVGAM.R b/inst/doc/crVGAM.R
index cbff521..c4c00a1 100644
--- a/inst/doc/crVGAM.R
+++ b/inst/doc/crVGAM.R
@@ -175,6 +175,7 @@ summary(fit.bh)
 ###################################################
 ### code chunk number 18: plot-deermice
 ###################################################
+getOption("SweaveHooks")[["fig"]]()
 par(mfrow = c(2, 2))
 par(las = 1, cex = 1.1, mar = c(3.8, 4, 0.5, 0.2) + 0.1)
 par(mgp = c(2.3, 1, 0))  # Default is c(3, 1, 0)
@@ -189,8 +190,6 @@ plot(fit.bh, se = TRUE, las = 1, lcol = "blue", scol = "orange",
 
 
 
-
-
 ###################################################
 ### code chunk number 19: birds91read
 ###################################################
@@ -269,6 +268,7 @@ plot.info <- plot(M.h.GAM,
 ###################################################
 ### code chunk number 24: plot-bird
 ###################################################
+getOption("SweaveHooks")[["fig"]]()
 par(mfrow = c(1, 1))
 
 
diff --git a/inst/doc/crVGAM.Rnw b/inst/doc/crVGAM.Rnw
index 6a46807..86f64d7 100644
--- a/inst/doc/crVGAM.Rnw
+++ b/inst/doc/crVGAM.Rnw
@@ -126,6 +126,15 @@ options(prompt = "R> ", continue = "+")
 \label{sec:intro}
 
 
+
+Note: this vignette is essentially \cite{yee:stok:hugg:2015}.
+
+
+
+\bigskip
+
+
+
 Capture--recapture (CR) surveys are widely used in ecology and 
 epidemiology to estimate population sizes. In essence they are 
 sampling schemes that allow the estimation of both $n$ and $p$ 
@@ -378,7 +387,7 @@ function based only on the individuals observed at least once is
 \begin{eqnarray}
 \label{eq:posbern.condlikelihood}
 L_{c} & \propto & \prod_{i=1}^{n} 
-\frac{\prod\limits_{j=1}^{\tau} p_{ij}^{y_{ij}} (1-p_{ij})^{1 - y_{ij}}}
+\frac{\prod_{j=1}^{\tau} p_{ij}^{y_{ij}} (1-p_{ij})^{1 - y_{ij}}}
 {1-\prod_{s=1}^{\tau}(1-p_{is}^{\dagger})}.
 \end{eqnarray}
 is used. Here $p_{is}^{\dagger}$ are the $p_{ij}$ computed as if the
@@ -508,7 +517,8 @@ Appendix A.
 \label{sec:Nhat}
 
 
-In the above linear models, to estimate $N$ let $\pi_{i}(\btheta)=1-\prod\limits_{s=1}^{\tau}(1-p_{is}^{\dagger})$ 
+In the above linear models, to estimate $N$
+let $\pi_{i}(\btheta)=1-\prod_{s=1}^{\tau}(1-p_{is}^{\dagger})$ 
 be the probability that individual $i$ is captured at least once 
 in the course of the study. Then, if $\btheta$ is known, 
 the Horvitz--Thompson \citep[HT;][]{horv:thom:1952} estimator
@@ -1470,9 +1480,12 @@ plot(fit.bh, se = TRUE, las = 1, lcol = "blue", scol = "orange",
 
 
 % ---------------------------------------------------------------------
+
+\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
+
 \begin{figure}[tt]
 \begin{center}
-<<plot-deermice, width=6.0, height=5, echo=FALSE, message=FALSE, warning=FALSE>>=
+<<fig=TRUE,label=plot-deermice, width=6.0, height=5, echo=FALSE, message=FALSE, warning=FALSE>>=
 par(mfrow = c(2, 2))
 par(las = 1, cex = 1.1, mar = c(3.8, 4, 0.5, 0.2) + 0.1)
 par(mgp = c(2.3, 1, 0))  # Default is c(3, 1, 0)
@@ -1485,8 +1498,6 @@ plot(fit.bh, se = TRUE, las = 1, lcol = "blue", scol = "orange",
 # < < poz-posbernoulli-eg-deermice-smooth-shadow> >
 
 
-
-
 @
 \caption{Estimated component functions with approximate $\pm 2$ 
 pointwise SE bands fitting a $\calM_{bh}$-VGAM, using 
@@ -1497,6 +1508,9 @@ each covariate value $x_{ik}$.\label{fig:poz:deermice}
 \end{figure}
 
 
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
 % ---------------------------------------------------------------------
 
 
@@ -1604,11 +1618,13 @@ legend("topleft", legend = c("Fat present", "Fat not present"), bty = "n",
 
 
 
+% ---------------------------------------------------------------------
 
+\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
 
 \begin{figure}[tt]
 \begin{center}
-<<plot-bird, width=6.0, height=5.5, echo=FALSE, message=FALSE, warning=FALSE>>=
+<<fig=TRUE, label=plot-bird, width=6.0, height=5.5, echo=FALSE, message=FALSE, warning=FALSE>>=
 par(mfrow = c(1, 1))
 
 
@@ -1668,6 +1684,12 @@ Notice that the standard errors are wider at the boundaries.
 \end{figure}
 
 
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
+% ---------------------------------------------------------------------
+
+
 Both the estimates for the population size and shape of the fitted 
 capture probabilities with smoothing (Figure \ref{fig:bird}) matched 
 those in previous studies, e.g., see Figure 1 of \citet{hwan:hugg:2007}. 
@@ -2112,7 +2134,7 @@ We give the first and (expected) second derivatives of the models.
 Let $z_{ij}= 1$ if individual $i$ has been captured before occasion $j$,
 else $=0$. Also, let $p_{cj}$ and $p_{rj}$ be the probability that an
 individual is captured/recaptured at sampling occasion $j$, 
-and $Q_{s:t} = \prod\limits_{j=s}^{t} (1-p_{cj})$.
+and $Q_{s:t} = \prod_{j=s}^{t} (1-p_{cj})$.
 Occasionally, subscripts $i$ are omitted for clarity.
 \cite{hugg:1989} gives a general form for the derivatives of the
 conditional likelihood (\ref{eq:posbern.condlikelihood}).
@@ -2218,9 +2240,15 @@ to the probabilities in the conditional
 likelihood function (\ref{eq:posbern.condlikelihood}), viz.
 \[
 {\widehat{p}}_{ij}^{ y_{ij}} \left(1-\widehat{p}_{ij}\right)^{1-y_{ij}}
-\cdot\left[1 - \prod\limits_{s=1}^{\tau}
-\left( 1 - \widehat{p}_{i,cs}^{}\right)\right]^{-1}.
+\cdot
+\left[
+ 1 - \prod_{s=1}^{\tau}
+\left( 1 - \widehat{p}_{i,cs}^{}\right)
+\right]^{-1}.
 \]
+
+
+
 Alternatively, the unconditional means of the $Y_j$ can be
 returned as the fitted values upon selecting
 \code{type.fitted = "mean"} argument.
diff --git a/inst/doc/crVGAM.pdf b/inst/doc/crVGAM.pdf
index 06786af..188fb99 100644
Binary files a/inst/doc/crVGAM.pdf and b/inst/doc/crVGAM.pdf differ
diff --git a/man/AA.Aa.aa.Rd b/man/AA.Aa.aa.Rd
index 618802f..a7b1e0b 100644
--- a/man/AA.Aa.aa.Rd
+++ b/man/AA.Aa.aa.Rd
@@ -47,6 +47,7 @@ AA.Aa.aa(linkp = "logit", linkf = "logit", inbreeding = FALSE,
   When \code{inbreeding = TRUE}, an additional parameter \code{f} is used.
   If \code{inbreeding = FALSE} then \eqn{f = 0} and Hardy-Weinberg
   Equilibrium (HWE) is assumed.
+  The EIM is used if \code{inbreeding = FALSE}.
 
 
 
diff --git a/man/AR1.Rd b/man/AR1.Rd
index 651395a..541d880 100644
--- a/man/AR1.Rd
+++ b/man/AR1.Rd
@@ -12,7 +12,7 @@ AR1(ldrift = "identitylink", lsd  = "loge", lvar = "loge",
     lrho = "rhobit", idrift  = NULL,
     isd  = NULL, ivar = NULL, irho = NULL,
     ishrinkage = 0.9, type.likelihood = c("exact", "conditional"),
-    var.arg = FALSE, almost1 = 0.99, zero = c(-2, -3))
+    var.arg = FALSE, nodrift = FALSE, almost1 = 0.99, zero = c(-2, -3))
 }
 
 %     deviance.arg = FALSE,
@@ -51,6 +51,13 @@ AR1(ldrift = "identitylink", lsd  = "loge", lvar = "loge",
 
 
   }
+  \item{nodrift}{
+  Logical, for determining whether to estimate the drift parameter.
+  The default is to estimate it.
+  If \code{TRUE}, the drift parameter is set to 0 and not estimated.
+
+
+  }
   \item{type.likelihood}{
     What type of likelihood function is maximized.
     The first choice (default) is the sum of the marginal likelihood
@@ -155,7 +162,7 @@ AR1(ldrift = "identitylink", lsd  = "loge", lvar = "loge",
 
 }
 \examples{
-# Example 1: using  arimia.sim() to generate a stationary time series
+# Example 1: using  arima.sim() to generate a stationary time series
 nn <- 100; set.seed(1)
 tsdata <- data.frame(x2 =  runif(nn))
 tsdata  <- transform(tsdata,
diff --git a/man/CommonVGAMffArguments.Rd b/man/CommonVGAMffArguments.Rd
index 23ce7c2..e482eb8 100644
--- a/man/CommonVGAMffArguments.Rd
+++ b/man/CommonVGAMffArguments.Rd
@@ -35,7 +35,8 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
                           probs.y = c(0.25, 0.50, 0.75),
                           multiple.responses = FALSE, earg.link = FALSE,
                           whitespace = FALSE, bred = FALSE, lss = TRUE,
-                          oim = FALSE, nsimEIM = 100, zero = NULL)
+                          oim = FALSE, nsimEIM = 100, byrow.arg = FALSE,
+                          zero = NULL)
 }
 \arguments{
 %                         apply.parint = FALSE,
@@ -327,6 +328,8 @@ except for \eqn{X_2}.
   Often this argument is only used if
   the argument \code{imethod} is assigned a certain value.
 
+
+
   }
   \item{nointercept}{
   An integer-valued vector specifying which
@@ -334,6 +337,8 @@ except for \eqn{X_2}.
   Any values must be from the set \{1,2,\ldots,\eqn{M}\}.
   A value of \code{NULL} means no such constraints.
 
+
+
   }
 
   \item{multiple.responses}{
@@ -352,6 +357,8 @@ except for \eqn{X_2}.
   \code{weights} argument is of the same dimension as the
   response and contains the number of trials.
 
+
+
   }
   \item{earg.link}{
   Sometimes the link argument can receive \code{earg}-type input,
@@ -359,12 +366,30 @@ except for \eqn{X_2}.
   This argument should be generally ignored.
 
 
+
+  }
+  \item{byrow.arg}{
+  Logical.
+  Some \pkg{VGAM} family functions that handle multiple responses
+  have arguments that allow input to be fed in which affect
+  all the responses,
+  e.g., \code{imu} for initalizing a \code{mu} parameter.
+  In such cases it is sometime more convenient
+  to input one value per response by
+  setting \code{byrow.arg = TRUE}; then values are recycled
+  in order to form a matrix of the appropriate dimension.
+  This argument matches \code{byrow} in \code{\link[base]{matrix}};
+  in fact it is fed into such using \code{matrix(..., byrow = byrow.arg)}.
+  This argument has no effect when there is one response.
+
+
   }
   \item{bred}{
   Logical.
   Some \pkg{VGAM} family functions will allow bias-reduction based
   on the work by Kosmidis and Firth. 
-  Currently none are working yet!
+  Sometimes half-stepping is a good idea; set \code{stepsize = 0.5}
+  and monitor convergence by setting \code{trace = TRUE}.
 
 
 
diff --git a/man/Links.Rd b/man/Links.Rd
index 6ab737d..6952148 100644
--- a/man/Links.Rd
+++ b/man/Links.Rd
@@ -1,23 +1,22 @@
 \name{Links}
 \alias{Links}
-\alias{TypicalVGAMlinkFunction}
+\alias{TypicalVGAMlink}
 \title{Link functions for VGLM/VGAM/etc. families}
 \description{
   The \pkg{VGAM} package provides a number of (parameter) link functions
   which are described in general here. Collectively, they offer the user
-  considerable flexibility for modelling data.
+  considerable choice and flexibility for modelling data.
 
 }
 \usage{
-TypicalVGAMlinkFunction(theta, someParameter = 0,
-                        bvalue = NULL,
-                        inverse = FALSE, deriv = 0,
-                        short = TRUE, tag = FALSE)
+TypicalVGAMlink(theta, someParameter = 0, bvalue = NULL, inverse = FALSE,
+                deriv = 0, short = TRUE, tag = FALSE)
 }
 \arguments{
   \item{theta}{
   Numeric or character.
-  Actually this can be \eqn{\theta}{theta} (default) or \eqn{\eta}{eta},
+  This is usually \eqn{\theta}{theta} (default) but can sometimes
+  be \eqn{\eta}{eta},
   depending on the other arguments.
   If \code{theta} is character then \code{inverse} and 
   \code{deriv} are ignored.
@@ -71,14 +70,17 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
 
 
   \item{inverse}{
-  Logical. If \code{TRUE} the inverse link value
+    Logical. If \code{TRUE} and \code{deriv = 0}  then
+    the inverse link value
   \eqn{\theta}{theta} is returned, hence the argument
   \code{theta} is really \eqn{\eta}{eta}.
+  In all other cases, the argument \code{theta} is
+  really \eqn{\theta}{theta}.
 
 
   }
   \item{deriv}{
-  Integer. Either 0, 1, or 2 specifying the order of the derivative.
+  Integer. Either 0, 1, or 2, specifying the order of the derivative.
 
 
   }
@@ -96,7 +98,8 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
 }
 \value{
 
-  Returns one of the link function value or its first or second derivative,
+  Returns one of:
+  the link function value or its first or second derivative,
   the inverse link or its first or second derivative,
   or a character description of the link.
 
@@ -105,23 +108,62 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
   If \code{inverse = FALSE} and \code{deriv = 0} (default) then the
   ordinary link
   function \eqn{\eta = g(\theta)}{eta = g(theta)} is returned.
+
+
+  If \code{inverse = TRUE} and \code{deriv = 0} then the inverse
+  link function value is returned, hence \code{theta} is really
+  \eqn{\eta}{eta} (the only occasion this happens).
+
+
   If \code{inverse = FALSE} and \code{deriv = 1} then it is
-  \eqn{d\theta / d\eta}{d theta / d eta} 
+  \eqn{d\eta / d\theta}{d eta / d theta} 
   \emph{as a function of} \eqn{\theta}{theta}.
   If \code{inverse = FALSE} and \code{deriv = 2} then it is
+  \eqn{d^2\eta / d\theta^2}{d^2 eta / d theta^2}
+  \emph{as a function of} \eqn{\theta}{theta}.
+
+
+  If \code{inverse = TRUE} and \code{deriv = 1} then it is
+  \eqn{d\theta / d\eta}{d theta / d eta} 
+  \emph{as a function of} \eqn{\theta}{theta}.
+  If \code{inverse = TRUE} and \code{deriv = 2} then it is
   \eqn{d^2\theta / d\eta^2}{d^2 theta / d eta^2}
   \emph{as a function of} \eqn{\theta}{theta}.
 
 
-  If \code{inverse = TRUE} and \code{deriv = 0} then the inverse
-  link function is returned, hence \code{theta} is really
-  \eqn{\eta}{eta}.
-  If \code{inverse = TRUE} and \code{deriv} is positive then the 
-  \emph{reciprocal} of the same link function with
-  \code{(theta = theta, someParameter, inverse = TRUE, deriv = deriv)}
-  is returned.
+
+  It is only when \code{deriv = 1} that
+  \code{linkfun(theta, deriv = 1, inverse = TRUE)}
+  and
+  \code{linkfun(theta, deriv = 1, inverse = FALSE)}
+    are \emph{reciprocals} of each other.
+    In particular,
+  \code{linkfun(theta, deriv = 2, inverse = TRUE)}
+  and
+  \code{linkfun(theta, deriv = 2, inverse = FALSE)}
+  are \emph{not} reciprocals of each other in general.
+
 
 
+% Prior to 20150711; this was what it was:
+  
+% If \code{inverse = FALSE} and \code{deriv = 1} then it is
+% \eqn{d\theta / d\eta}{d theta / d eta} 
+% \emph{as a function of} \eqn{\theta}{theta}.
+% If \code{inverse = FALSE} and \code{deriv = 2} then it is
+% \eqn{d^2\theta / d\eta^2}{d^2 theta / d eta^2}
+% \emph{as a function of} \eqn{\theta}{theta}.
+
+
+% If \code{inverse = TRUE} and \code{deriv = 0} then the inverse
+% link function is returned, hence \code{theta} is really
+% \eqn{\eta}{eta}.
+% If \code{inverse = TRUE} and \code{deriv} is positive then the 
+% \emph{reciprocal} of the same link function with
+% \code{(theta = theta, someParameter, inverse = TRUE, deriv = deriv)}
+% is returned.
+  
+
 }
 \details{
   Almost all \pkg{VGAM} link functions have something similar to
@@ -129,6 +171,9 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
   In this help file we have \eqn{\eta = g(\theta)}{eta = g(theta)}
   where \eqn{g} is the link function, \eqn{\theta}{theta} is the
   parameter and \eqn{\eta}{eta} is the linear/additive predictor.
+  The link \eqn{g} must be strictly monotonic and
+  twice-differentiable in its range.
+
 
 
 % The arguments \code{short} and \code{tag} are used only if
@@ -143,6 +188,8 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
 
   The following is a brief enumeration of all \pkg{VGAM} link functions.
 
+
+
   For parameters lying between 0 and 1 (e.g., probabilities):
   \code{\link{logit}},
   \code{\link{probit}},
@@ -192,6 +239,27 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
 
 }
 
+
+\section{Warning }{
+  The output of link functions changed at \pkg{VGAM} \code{0.9-9}
+  (date was around 2015-07).
+  Formerly, \code{linkfun(theta, deriv = 1)} is now
+  \code{linkfun(theta, deriv = 1, inverse = TRUE)}, or equivalently,
+  \code{1 / linkfun(theta, deriv = 1, inverse = TRUE)}.
+  Also, formerly, \code{linkfun(theta, deriv = 2)} was
+  \code{1 / linkfun(theta, deriv = 2, inverse = TRUE)}.
+  This was a bug.
+  Altogether, these are big changes and the user should beware!
+
+
+  
+  One day in the future, \emph{all} \pkg{VGAM} link functions
+  may be renamed to end in the characters \code{"link"}.
+
+
+  
+}
+
 \seealso{
   \code{\link{TypicalVGAMfamilyFunction}},
   \code{\link{linkfun}},
@@ -222,7 +290,7 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
   \code{binomialff(link = c("logit", "probit", "cloglog",
   "cauchit", "identitylink"), ...)}
   it is now
-  \code{binomialff(link = "logit", ...)}
+  \code{binomialff(link = "logit", ...)}.
   No checking will be done to see if the user's choice is reasonable.
   This means that the user can write his/her own \pkg{VGAM} link function
   and use it within any \pkg{VGAM} family function.
@@ -239,9 +307,11 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
   during the 2nd half of 2012).
   The major change is that arguments such as \code{offset} that used to
   be passed in via those arguments can done directly through
-  the link function. For example, \code{gev(lshape = "logoff",
-  eshape = list(offset = 0.5))} is replaced by \code{gev(lshape
-  = logoff(offset = 0.5))}. The \code{@misc} slot no longer
+  the link function. For example,
+  \code{gev(lshape = "logoff", eshape = list(offset = 0.5))}
+  is replaced by
+  \code{gev(lshape = logoff(offset = 0.5))}.
+  The \code{@misc} slot no longer
   has \code{link} and \code{earg} components, but two other
   components replace these. Functions such as \code{dtheta.deta()},
   \code{d2theta.deta2()}, \code{eta2theta()}, \code{theta2eta()}
@@ -266,8 +336,8 @@ logoff(1:5, earg = list(offset = 1))
 powerlink(1:5, earg = list(power = 2))
 }
 
-fit1 <- vgam(agaaus ~ altitude, binomialff(link = "cloglog"), hunua)  # okay
-fit2 <- vgam(agaaus ~ altitude, binomialff(link = "cloglog"), hunua)  # okay
+fit1 <- vgam(agaaus ~ altitude, binomialff(link = "cloglog"), hunua)  # best
+fit2 <- vgam(agaaus ~ altitude, binomialff(link =  cloglog ), hunua)  # okay
 
 \dontrun{
 # This no longer works since "clog" is not a valid VGAM link function:
@@ -279,8 +349,7 @@ y <- rbeta(n = 1000, shape1 = exp(0), shape2 = exp(1))
 fit1 <- vglm(y ~ 1, betaR(lshape1 = "identitylink", lshape2 = "identitylink"),
              trace = TRUE, crit = "coef")
 fit2 <- vglm(y ~ 1, betaR(lshape1 = logoff(offset = 1.1),
-                          lshape2 = logoff(offset = 1.1)),
-            trace = TRUE, crit = "coef")
+                          lshape2 = logoff(offset = 1.1)), trace = TRUE)
 vcov(fit1, untransform = TRUE)
 vcov(fit1, untransform = TRUE) - vcov(fit2, untransform = TRUE)  # Should be all 0s
 \dontrun{ # This is old:
@@ -290,12 +359,12 @@ fit2 at misc$earg  # Some 'special' parameters are here
 
 
 par(mfrow = c(2, 2))
-p <- seq(0.01, 0.99, len = 200)
+p <- seq(0.05, 0.95, len = 200)  # A rather restricted range
 x <- seq(-4, 4, len = 200)
 plot(p, logit(p), type = "l", col = "blue")
 plot(x, logit(x, inverse = TRUE), type = "l", col = "blue")
-plot(p, logit(p, deriv = 1), type = "l", col = "blue")  # reciprocal!
-plot(p, logit(p, deriv = 2), type = "l", col = "blue")  # reciprocal!
+plot(p, logit(p, deriv = 1), type = "l", col = "blue")  # 1 / (p*(1-p))
+plot(p, logit(p, deriv = 2), type = "l", col = "blue")  # (2*p-1)/(p*(1-p))^2
 }
 }
 \keyword{models}
diff --git a/man/SURff.Rd b/man/SURff.Rd
index 64545d0..fbb2d5b 100644
--- a/man/SURff.Rd
+++ b/man/SURff.Rd
@@ -1,7 +1,7 @@
 \name{SURff}
 \alias{SURff}
 %- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Seemingly Unrelated Regressions
+\title{ Seemingly Unrelated Regressions Family Function
 %%  ~~function to do ... ~~
 }
 \description{
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
index 4864d52..d464b2b 100644
--- a/man/VGAM-package.Rd
+++ b/man/VGAM-package.Rd
@@ -69,7 +69,7 @@ the covariates.
 For a complete list of this package, use \code{library(help = "VGAM")}.
 New \pkg{VGAM} family functions are continually being written and
 added to the package.
-A monograph about VGLM and VGAMs etc. is currently in the making.
+A monograph about VGLM and VGAMs etc. appeared in October 2015.
 
 
 
@@ -118,7 +118,8 @@ Maintainer: Thomas Yee \email{t.yee at auckland.ac.nz}.
 Yee, T. W. (2015)
 Vector Generalized Linear and Additive Models:
 With an Implementation in R.
-\emph{Springer} (to appear).
+New York, USA: \emph{Springer}.
+
 
 
 Yee, T. W. and Hastie, T. J. (2003)
@@ -170,8 +171,8 @@ The \pkg{VGAM} package for categorical data analysis.
 
 
 
-My website for the \pkg{VGAM} package is at
-\url{https://www.stat.auckland.ac.nz/~yee/VGAM}
+My website for the \pkg{VGAM} package and book is at
+\url{https://www.stat.auckland.ac.nz/~yee}
 and I hope to put more resources there in the future,
 especially as relating to my book.
 
diff --git a/man/acat.Rd b/man/acat.Rd
index e606181..158062c 100644
--- a/man/acat.Rd
+++ b/man/acat.Rd
@@ -69,9 +69,9 @@ acat(link = "loge", parallel = FALSE, reverse = FALSE,
 
 }
 \references{
-Agresti, A. (2002)
+Agresti, A. (2013)
 \emph{Categorical Data Analysis},
-2nd ed. New York: Wiley.
+3rd ed. Hoboken, NJ, USA: Wiley.
 
 
 Simonoff, J. S. (2003)
diff --git a/man/alaplace3.Rd b/man/alaplace3.Rd
index 862c94b..cdca950 100644
--- a/man/alaplace3.Rd
+++ b/man/alaplace3.Rd
@@ -233,9 +233,10 @@ alaplace3(llocation = "identitylink", lscale = "loge", lkappa = "loge",
   Boston: Birkhauser.
 
 
-Yee, T. W. (2014)
-Vector Generalized Linear and Additive Models.
-\emph{Monograph in preparation}.
+Yee, T. W. (2015)
+Vector Generalized Linear and Additive Models: With an
+Implementation in R.
+\emph{Monograph to appear}.
 
 
 
diff --git a/man/betabinomUC.Rd b/man/betabinomUC.Rd
index 6026b2e..9741559 100644
--- a/man/betabinomUC.Rd
+++ b/man/betabinomUC.Rd
@@ -19,10 +19,15 @@
 dbetabinom(x, size, prob, rho = 0, log = FALSE)
 pbetabinom(q, size, prob, rho, log.p = FALSE)
 rbetabinom(n, size, prob, rho = 0)
-dbetabinom.ab(x, size, shape1, shape2, log = FALSE, .dontuse.prob = NULL)
+dbetabinom.ab(x, size, shape1, shape2, log = FALSE, Inf.shape = 1e6)
 pbetabinom.ab(q, size, shape1, shape2, log.p = FALSE)
 rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
 }
+
+
+%             Infinity.shape = 1e5 .dontuse.prob = NULL
+
+
 \arguments{
   \item{x, q}{vector of quantiles.}
 % \item{p}{vector of probabilities.}
@@ -60,6 +65,19 @@ rbetabinom.ab(n, size, shape1, shape2, .dontuse.prob = NULL)
 
   }
 
+  \item{Inf.shape}{
+  Numeric. A large value such that,
+  if \code{shape1} or \code{shape2} exceeds this, then
+  it is taken to be \code{Inf}.
+  Also, if \code{shape1} or \code{shape2} is less than its reciprocal,
+  then it might be loosely thought of as being effectively \code{0}
+  (although not treated exactly as so in the code).
+  This feature/approximation is needed to avoid numerical
+  problem with catastrophic cancellation of
+  multiple \code{\link[base:Special]{lbeta}} calls.
+
+
+  }
   \item{.dontuse.prob}{
   An argument that should be ignored and unused.
 
diff --git a/man/binomialff.Rd b/man/binomialff.Rd
index 198df78..00e80b5 100644
--- a/man/binomialff.Rd
+++ b/man/binomialff.Rd
@@ -156,7 +156,8 @@ binomialff(link = "logit", dispersion = 1, multiple.responses = FALSE,
 \author{ Thomas W. Yee }
 
 \note{
-  If \code{multiple.responses} is \code{FALSE} (default) then the response can be of one
+  If \code{multiple.responses} is \code{FALSE} (default) then
+  the response can be of one
   of two formats:
   a factor (first level taken as failure),
   or a 2-column matrix (first column = successes) of counts.
@@ -172,8 +173,8 @@ binomialff(link = "logit", dispersion = 1, multiple.responses = FALSE,
   predictors.
 
 
-  If \code{multiple.responses} is \code{TRUE}, then the matrix response can only be of
-  one format: a matrix of 1's and 0's (1 = success).
+  If \code{multiple.responses} is \code{TRUE}, then the matrix response
+  can only be of one format: a matrix of 1's and 0's (1 = success).
 
 
   The call \code{binomialff(dispersion = 0, ...)} is equivalent to
@@ -246,7 +247,8 @@ quasibinomialff()
 quasibinomialff(link = "probit")
 
 shunua <- hunua[sort.list(with(hunua, altitude)), ]  # Sort by altitude
-fit <- vglm(agaaus ~ poly(altitude, 2), binomialff(link = cloglog), shunua)
+fit <- vglm(agaaus ~ poly(altitude, 2), binomialff(link = cloglog),
+            data = shunua)
 \dontrun{
 plot(agaaus ~ jitter(altitude), shunua, col = "blue", ylab = "P(Agaaus = 1)",
      main = "Presence/absence of Agathis australis", las = 1)
diff --git a/man/binormal.Rd b/man/binormal.Rd
index eb4ef69..c8d91bc 100644
--- a/man/binormal.Rd
+++ b/man/binormal.Rd
@@ -38,7 +38,13 @@ binormal(lmean1 = "identitylink", lmean2 = "identitylink",
   \item{eq.mean, eq.sd}{ 
   Logical or formula.
   Constrains the means or the standard deviations to be equal.
-  Only one of these arguments may be assigned a value.
+
+
+
+
+% 20150530; FALSE now; they work separately:
+% Only one of these arguments may be assigned a value.
+
 
 
   }
@@ -104,7 +110,7 @@ bdata <- data.frame(x2 = runif(nn), x3 = runif(nn))
 bdata <- transform(bdata, y1 = rnorm(nn, 1 + 2 * x2),
                           y2 = rnorm(nn, 3 + 4 * x2))
 fit1 <- vglm(cbind(y1, y2) ~ x2,
-            binormal(eq.sd = TRUE), data = bdata, trace = TRUE)
+             binormal(eq.sd = TRUE), data = bdata, trace = TRUE)
 coef(fit1, matrix = TRUE)
 constraints(fit1)
 summary(fit1)
diff --git a/man/brat.Rd b/man/brat.Rd
index c47d316..61ed918 100644
--- a/man/brat.Rd
+++ b/man/brat.Rd
@@ -74,9 +74,9 @@ brat(refgp = "last", refvalue = 1, ialpha = 1)
 
 }
 \references{ 
-Agresti, A. (2002)
+Agresti, A. (2013)
 \emph{Categorical Data Analysis},
-2nd ed. New York: Wiley.
+3rd ed. Hoboken, NJ, USA: Wiley.
 
 
 Stigler, S. (1994)
diff --git a/man/bratUC.Rd b/man/bratUC.Rd
index 89f2f80..b941fb8 100644
--- a/man/bratUC.Rd
+++ b/man/bratUC.Rd
@@ -71,9 +71,9 @@ Brat(mat, ties = 0 * mat, string = c(">", "=="), whitespace = FALSE)
 \references{
 
 
-Agresti, A. (2002)
+Agresti, A. (2013)
 \emph{Categorical Data Analysis},
-2nd ed. New York: Wiley.
+3rd ed. Hoboken, NJ, USA: Wiley.
 
 
 }
diff --git a/man/cauchit.Rd b/man/cauchit.Rd
index dcdc9b8..4b4c098 100644
--- a/man/cauchit.Rd
+++ b/man/cauchit.Rd
@@ -51,7 +51,7 @@ cauchit(theta, bvalue = .Machine$double.eps,
 
 
   For \code{deriv = 1}, then the function returns
-  \emph{d} \code{theta} / \emph{d} \code{eta} as a function of
+  \emph{d} \code{eta} / \emph{d} \code{theta} as a function of
   \code{theta} if \code{inverse = FALSE}, else if \code{inverse = TRUE}
   then it returns the reciprocal.
 
diff --git a/man/cens.poisson.Rd b/man/cens.poisson.Rd
index 516d9b9..7937aa8 100644
--- a/man/cens.poisson.Rd
+++ b/man/cens.poisson.Rd
@@ -70,6 +70,7 @@ cens.poisson(link = "loge", imu = NULL)
   Consequently the input must be tweaked to conform to the
   \code{(start, end]} format.
 
+
 }
 
 \seealso{
@@ -77,6 +78,7 @@ cens.poisson(link = "loge", imu = NULL)
     \code{\link{poissonff}},
     \code{\link{Links}}.
 
+
 }
 \examples{
 # Example 1: right censored data
@@ -95,30 +97,40 @@ table(print(depvar(fit)))  # Another check; U+ means >= U
 
 # Example 2: left censored data
 L <- 15
-cdata <- transform(cdata, cY = pmax(L, y),
-                          lcensored = y <  L)  # Note y < L, not cY == L or y <= L
+cdata <- transform(cdata,
+                   cY = pmax(L, y),
+                   lcensored = y <  L)  # Note y < L, not cY == L or y <= L
 cdata <- transform(cdata, status = ifelse(lcensored, 0, 1))
 with(cdata, table(cY))
 with(cdata, table(lcensored))
 with(cdata, table(ii <- print(SurvS4(cY, status, type = "left"))))  # Check
-fit <- vglm(SurvS4(cY, status, type = "left") ~ 1, cens.poisson, data = cdata, trace = TRUE)
+fit <- vglm(SurvS4(cY, status, type = "left") ~ 1, cens.poisson,
+            data = cdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 
 
 # Example 3: interval censored data
 cdata <- transform(cdata, Lvec = rep(L, len = N),
                           Uvec = rep(U, len = N))
-cdata <- transform(cdata, icensored = Lvec <= y & y < Uvec)  # Not lcensored or rcensored
+cdata <-
+  transform(cdata,
+            icensored = Lvec <= y & y < Uvec)  # Not lcensored or rcensored
 with(cdata, table(icensored))
-cdata <- transform(cdata, status = rep(3, N))       # 3 means interval censored
-cdata <- transform(cdata, status = ifelse(rcensored, 0, status))  # 0 means right censored
-cdata <- transform(cdata, status = ifelse(lcensored, 2, status))  # 2 means left  censored
+cdata <- transform(cdata, status = rep(3, N))  # 3 means interval censored
+cdata <-
+  transform(cdata,
+            status = ifelse(rcensored, 0, status))  # 0 means right censored
+cdata <-
+  transform(cdata,
+            status = ifelse(lcensored, 2, status))  # 2 means left  censored
 # Have to adjust Lvec and Uvec because of the (start, end] format:
 cdata$Lvec[with(cdata, icensored)] <- cdata$Lvec[with(cdata, icensored)] - 1
 cdata$Uvec[with(cdata, icensored)] <- cdata$Uvec[with(cdata, icensored)] - 1
-cdata$Lvec[with(cdata, lcensored)] <- cdata$Lvec[with(cdata, lcensored)]  # Unchanged
-cdata$Lvec[with(cdata, rcensored)] <- cdata$Uvec[with(cdata, rcensored)]  # Unchanged
-with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status, type = "interval"))))  # Check
+# Unchanged:
+cdata$Lvec[with(cdata, lcensored)] <- cdata$Lvec[with(cdata, lcensored)]
+cdata$Lvec[with(cdata, rcensored)] <- cdata$Uvec[with(cdata, rcensored)]
+with(cdata,
+     table(ii <- print(SurvS4(Lvec, Uvec, status, type = "interval"))))  # Check
 
 fit <- vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
             cens.poisson, data = cdata, trace = TRUE)
diff --git a/man/cloglog.Rd b/man/cloglog.Rd
index 37de246..abab645 100644
--- a/man/cloglog.Rd
+++ b/man/cloglog.Rd
@@ -40,11 +40,11 @@ cloglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 \value{
   For \code{deriv = 0}, the complimentary log-log of \code{theta},
   i.e., \code{log(-log(1 - theta))} when \code{inverse = FALSE}, and if
-  \code{inverse = TRUE} then \code{1-exp(-exp(theta))},.
+  \code{inverse = TRUE} then \code{1-exp(-exp(theta))}.
 
 
   For \code{deriv = 1}, then the function returns
-  \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+  \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta}
   if \code{inverse = FALSE},
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
@@ -57,6 +57,7 @@ cloglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
     McCullagh, P. and Nelder, J. A. (1989)
     \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
     
+    
 }
 \author{ Thomas W. Yee }
 
diff --git a/man/confintvglm.Rd b/man/confintvglm.Rd
new file mode 100644
index 0000000..202abb3
--- /dev/null
+++ b/man/confintvglm.Rd
@@ -0,0 +1,106 @@
+\name{confintvglm}
+%\name{confint}
+\alias{confint}
+\alias{confintvglm}
+\alias{confintrrvglm}
+\alias{confintvgam}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Confidence Intervals for VGLM Parameters }
+\description{
+  Computes confidence intervals for one or more parameters in a fitted
+  model.  Currently the object must be a
+  \code{"\link{vglm}"} object.
+
+}
+\usage{
+confint(object, parm, level = 0.95, \dots)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{object}{ A fitted model object.
+
+
+  }
+ \item{parm, level, \dots}{Same as \code{\link[stats]{confint}}.
+ }
+}
+\details{
+  This methods function is based on \code{\link[stats]{confint.default}}
+  and assumes
+  asymptotic normality. In particular,
+  the \code{\link[VGAM:coefvlm]{coef}} and
+  \code{vcov} methods functions are used for
+  \code{\link[VGAM]{vglm-class}} objects.
+  Unlike for \code{\link[stats]{glm}} objects, there is no
+  profiling currently implemented.
+
+
+
+  For
+  \code{\link[VGAM]{rrvglm-class}}
+  objects, currently an error message is produced because
+  I haven't gotten around to write the methods function;
+  it's not too hard, but am too busy!
+  An interim measure is to 
+  coerce the object into a \code{"\link{vglm}"} object,
+  but then the confidence intervals will tend to be too narrow because
+  the estimated constraint matrices are treated as known.
+  
+  
+  For
+  \code{\link[VGAM]{vgam-class}}
+  objects, currently an error message is produced because
+  the theory is undeveloped.
+
+
+
+}
+\value{
+  Same as \code{\link[stats]{confint}}.
+
+
+
+}
+%\references{
+%}
+\author{ Thomas W. Yee }
+
+%\note{
+%}
+%\section{Warning }{
+%}
+
+\seealso{
+  \code{\link{vcovvlm}},
+  \code{\link{summaryvglm}},
+  \code{\link[stats]{confint}}.
+
+
+}
+
+\examples{
+# Example 1: this is based on a glm example
+counts <- c(18,17,15,20,10,20,25,13,12)
+outcome <- gl(3, 1, 9); treatment <- gl(3, 3)
+ glm.D93 <-  glm(counts ~ outcome + treatment, family = poisson())
+vglm.D93 <- vglm(counts ~ outcome + treatment, family = poissonff)
+confint(glm.D93) # needs MASS to be present on the system
+confint.default(glm.D93)  # based on asymptotic normality
+confint(vglm.D93)
+confint(vglm.D93) - confint(glm.D93)    # Should be all 0s
+confint(vglm.D93) - confint.default(glm.D93)  # based on asymptotic normality
+
+# Example 2: simulated negative binomial data with multiple responses
+ndata <- data.frame(x2 = runif(nn <- 300))
+ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x2), size = exp(1)),
+                          y2 = rnbinom(nn, mu = exp(2-x2), size = exp(0)))
+fit1 <- vglm(cbind(y1, y2) ~ x2, negbinomial, data = ndata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+confint(fit1)
+confint(fit1, "x2:1")  #  This might be improved to "x2" some day...
+fit2 <- rrvglm(y1 ~ x2, negbinomial(zero = NULL), data = ndata)
+confint(as(fit2, "vglm"))  # Too narrow (SEs are biased downwards)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/cratio.Rd b/man/cratio.Rd
index f2b077a..6d403d7 100644
--- a/man/cratio.Rd
+++ b/man/cratio.Rd
@@ -76,9 +76,9 @@ cratio(link = "logit", parallel = FALSE, reverse = FALSE, zero = NULL,
 }
 \references{
 
-Agresti, A. (2002)
+Agresti, A. (2013)
 \emph{Categorical Data Analysis},
-2nd ed. New York: Wiley.
+3rd ed. Hoboken, NJ, USA: Wiley.
 
 
 Simonoff, J. S. (2003)
diff --git a/man/cumulative.Rd b/man/cumulative.Rd
index b358040..93cbf28 100644
--- a/man/cumulative.Rd
+++ b/man/cumulative.Rd
@@ -160,14 +160,14 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
 }
 \references{
 
-Agresti, A. (2002)
+Agresti, A. (2013)
 \emph{Categorical Data Analysis},
-2nd ed. New York: Wiley.
+3rd ed. Hoboken, NJ, USA: Wiley.
 
 
 Agresti, A. (2010)
 \emph{Analysis of Ordinal Categorical Data},
-2nd ed. New York: Wiley.
+2nd ed. Hoboken, NJ, USA: Wiley.
 
 
 Dobson, A. J. and Barnett, A. (2008)
diff --git a/man/explink.Rd b/man/explink.Rd
index 7586535..0fda797 100644
--- a/man/explink.Rd
+++ b/man/explink.Rd
@@ -54,7 +54,7 @@ explink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FA
 
 
   For \code{deriv = 1}, then the function returns
-  \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+  \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta}
   if \code{inverse = FALSE},
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
diff --git a/man/fisherz.Rd b/man/fisherz.Rd
index 880eb30..73c9709 100644
--- a/man/fisherz.Rd
+++ b/man/fisherz.Rd
@@ -59,7 +59,7 @@ fisherz(theta, bminvalue = NULL, bmaxvalue = NULL,
 
 
   For \code{deriv = 1}, then the function returns
-  \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+  \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta}
   if \code{inverse = FALSE},
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
diff --git a/man/foldsqrt.Rd b/man/foldsqrt.Rd
index 65156f9..60b4797 100644
--- a/man/foldsqrt.Rd
+++ b/man/foldsqrt.Rd
@@ -52,7 +52,7 @@ foldsqrt(theta, min = 0, max = 1, mux = sqrt(2),
 
 
   For \code{deriv = 1}, then the function returns
-  \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+  \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta}
   if \code{inverse = FALSE},
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
diff --git a/man/formulavlm.Rd b/man/formulavlm.Rd
new file mode 100644
index 0000000..942cf5d
--- /dev/null
+++ b/man/formulavlm.Rd
@@ -0,0 +1,88 @@
+\name{formulavlm}
+%\name{confint}
+\alias{formula.vlm}
+\alias{formulavlm}
+\alias{term.names}
+\alias{term.namesvlm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Model Formulae and Term Names for VGLMs }
+\description{
+     The methods function for \code{formula} to
+     extract the formula from a fitted object,
+     as well as a methods function to return the names
+     of the terms in the formula.
+
+}
+\usage{
+\method{formula}{vlm}(x, \dots)
+formulavlm(x, form.number = 1, \dots)
+term.names(model, \dots)
+term.namesvlm(model, form.number = 1, \dots)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{x, model}{ A fitted model object.
+
+
+  }
+ \item{form.number}{Formula number, is 1 or 2.
+       which correspond to the arguments \code{formula}
+       and \code{form2} respectively.
+
+
+  }
+ \item{\dots}{Same as \code{\link[stats]{formula}}.
+ }
+}
+\details{
+  The \code{formula} methods function is
+  based on \code{\link[stats]{formula}}.
+
+
+}
+\value{
+  The \code{formula} methods function should return something similar to
+  \code{\link[stats]{formula}}.
+  The \code{term.names} methods function should return a character string
+  with the terms in the formula; this includes any intercept (which
+  is denoted by \code{"(Intercept)"} as the first element.)
+
+
+
+}
+%\references{
+%}
+\author{ Thomas W. Yee }
+
+
+
+%\note{
+%}
+%\section{Warning }{
+%}
+
+
+
+\seealso{
+  \code{\link{has.interceptvlm}}.
+% \code{termsvlm}.
+
+
+}
+
+\examples{
+# Example: this is based on a glm example
+counts <- c(18,17,15,20,10,20,25,13,12)
+outcome <- gl(3, 1, 9); treatment <- gl(3, 3)
+vglm.D93 <- vglm(counts ~ outcome + treatment, family = poissonff)
+formula(vglm.D93)
+pdata <- data.frame(counts, outcome, treatment)  # Better style
+vglm.D93 <- vglm(counts ~ outcome + treatment, poissonff, data = pdata)
+formula(vglm.D93)
+term.names(vglm.D93)
+responseName(vglm.D93)
+has.intercept(vglm.D93)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/gengamma.Rd b/man/gengamma.Rd
index fa5cfeb..9a9b264 100644
--- a/man/gengamma.Rd
+++ b/man/gengamma.Rd
@@ -9,7 +9,9 @@
 }
 \usage{
 gengamma.stacy(lscale = "loge", ld = "loge", lk = "loge",
-               iscale = NULL, id = NULL, ik = NULL, zero = NULL)
+               iscale = NULL, id = NULL, ik = NULL,
+               gscale    = exp(-5:5), gshape1.d = exp(-5:5),
+               gshape2.k = exp(-5:5), zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -26,6 +28,12 @@ gengamma.stacy(lscale = "loge", ld = "loge", lk = "loge",
 
 
   }
+  \item{gscale, gshape1.d, gshape2.k}{
+    See \code{\link{CommonVGAMffArguments}} for information.
+    Replaced by \code{iscale}, \code{id} etc. if given.
+
+
+  }
   \item{zero}{
   An integer-valued vector specifying which
   linear/additive predictors are modelled as intercepts only.
diff --git a/man/golf.Rd b/man/golf.Rd
index c3c3a7d..caabb70 100644
--- a/man/golf.Rd
+++ b/man/golf.Rd
@@ -104,20 +104,21 @@ golf(theta, lambda = 1, cutpoint = NULL,
 
 }
 \examples{
+\dontrun{
 golf("p", lambda = 1, short = FALSE)
 golf("p", lambda = 1, tag = TRUE)
 
 p <- seq(0.02, 0.98, len = 201)
 y <- golf(p, lambda = 1)
-y. <- golf(p, lambda = 1, deriv = 1)
-max(abs(golf(y, lambda = 1, inv = TRUE) - p))  # Should be 0
-
-\dontrun{par(mfrow = c(2, 1), las = 1)
-plot(p, y, type = "l", col = "blue", main = "golf()")
-abline(h = 0, v = 0.5, col = "orange", lty = "dashed")
-plot(p, y., type = "l", col = "blue",
-     main = "(Reciprocal of) first GOLF derivative")
-}
+y. <- golf(p, lambda = 1, deriv = 1, inverse = TRUE)
+max(abs(golf(y, lambda = 1, inverse = TRUE) - p))  # Should be 0
+
+#\ dontrun{par(mfrow = c(2, 1), las = 1)
+#plot(p, y, type = "l", col = "blue", main = "golf()")
+#abline(h = 0, v = 0.5, col = "orange", lty = "dashed")
+#plot(p, y., type = "l", col = "blue",
+#     main = "(Reciprocal of) first GOLF derivative")
+#}
 
 # Another example
 gdata <- data.frame(x2 = sort(runif(nn <- 1000)))
@@ -129,8 +130,8 @@ gdata <- transform(gdata,
 cutpoints <- c(-Inf, 10, 20, Inf)
 gdata <- transform(gdata, cuty = Cut(y1, breaks = cutpoints))
 
-\dontrun{ par(mfrow = c(1, 1), las = 1)
-with(gdata, plot(x2, x3, col = cuty, pch = as.character(cuty))) }
+#\ dontrun{ par(mfrow = c(1, 1), las = 1)
+#with(gdata, plot(x2, x3, col = cuty, pch = as.character(cuty))) }
 with(gdata, table(cuty) / sum(table(cuty)))
 fit <- vglm(cuty ~ x2 + x3, cumulative(multiple.responses = TRUE,
            reverse = TRUE, parallel = FALSE ~ -1,
@@ -144,6 +145,7 @@ coef(fit, matrix = TRUE)
 constraints(fit)
 fit at misc
 }
+}
 \keyword{math}
 \keyword{models}
 \keyword{regression}
diff --git a/man/has.intercept.Rd b/man/has.intercept.Rd
new file mode 100644
index 0000000..279e614
--- /dev/null
+++ b/man/has.intercept.Rd
@@ -0,0 +1,86 @@
+\name{has.interceptvlm}
+%\name{confint}
+\alias{has.intercept}
+%\alias{has.intercept.vlm}
+\alias{has.interceptvlm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Has a Fitted VGLM Got an Intercept Term? }
+\description{
+     Looks at the \code{formula} to
+     see if it has an intercept term.
+
+}
+\usage{
+has.intercept(object, \dots)
+has.interceptvlm(object, form.number = 1, \dots)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{object}{ A fitted model object.
+
+
+  }
+ \item{form.number}{Formula number, is 1 or 2.
+       which correspond to the arguments \code{formula}
+       and \code{form2} respectively.
+
+
+  }
+ \item{\dots}{Arguments that are might be passed from 
+  one function to another.
+
+ }
+}
+
+\details{
+  This methods function is a simple way to determine whether a
+  fitted \code{\link{vglm}} object etc. has an intercept term
+  or not.
+  It is not entirely foolproof because one might suppress the
+  intercept from the formula and then add in a variable in the
+  formula that has a constant value.
+
+
+}
+\value{
+  Returns a single logical.
+
+
+
+}
+%\references{
+%}
+\author{ Thomas W. Yee }
+
+%\note{
+%}
+%\section{Warning }{
+%}
+
+\seealso{
+  \code{\link{formulavlm}},
+  \code{termsvlm}.
+
+
+}
+
+\examples{
+# Example: this is based on a glm example
+counts <- c(18,17,15,20,10,20,25,13,12)
+outcome <- gl(3, 1, 9); treatment <- gl(3, 3)
+pdata <- data.frame(counts, outcome, treatment)  # Better style
+vglm.D93 <- vglm(counts ~ outcome + treatment, poissonff, data = pdata)
+formula(vglm.D93)
+term.names(vglm.D93)
+responseName(vglm.D93)
+has.intercept(vglm.D93)
+}
+\keyword{models}
+\keyword{regression}
+
+
+
+% \method{has.intercept}{vlm}(object, \dots)
+
+
+
diff --git a/man/identitylink.Rd b/man/identitylink.Rd
index c5a4289..f5e5ec3 100644
--- a/man/identitylink.Rd
+++ b/man/identitylink.Rd
@@ -52,7 +52,7 @@ identitylink(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
   \code{theta} when \code{inverse = FALSE},
   and if \code{inverse = TRUE} then \code{theta}.
   For \code{deriv = 1}, then the function returns
-  \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+  \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta}
   if \code{inverse = FALSE},
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
diff --git a/man/logc.Rd b/man/logc.Rd
index 7822998..ada9b6c 100644
--- a/man/logc.Rd
+++ b/man/logc.Rd
@@ -49,7 +49,7 @@ logc(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 
 
   For \code{deriv = 1}, then the function returns
-  \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+  \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta}
   if \code{inverse = FALSE},
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
diff --git a/man/loge.Rd b/man/loge.Rd
index 5394939..854d311 100644
--- a/man/loge.Rd
+++ b/man/loge.Rd
@@ -66,7 +66,7 @@ logneg(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
   when \code{inverse = FALSE}, and if \code{inverse = TRUE} then
   \code{exp(theta)}.
   For \code{deriv = 1}, then the function returns
-  \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+  \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta}
   if \code{inverse = FALSE},
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
diff --git a/man/logit.Rd b/man/logit.Rd
index 66e5723..b50ac12 100644
--- a/man/logit.Rd
+++ b/man/logit.Rd
@@ -84,7 +84,7 @@ extlogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
 
 
   For \code{deriv = 1}, then the function returns
-  \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+  \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta}
   if \code{inverse = FALSE},
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
@@ -125,7 +125,7 @@ extlogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
     \code{\link{multilogit}}.
 
 
- }
+}
 \examples{
 p <- seq(0.01, 0.99, by = 0.01)
 logit(p)
@@ -144,14 +144,16 @@ extlogit(p, min = 1, max = 2,
 y <- seq(-4, 4, length = 100)
 p <- seq(0.01, 0.99, by = 0.01)
 for (d in 0:1) {
-  matplot(p, cbind(logit(p, deriv = d), probit(p, deriv = d)),
+  myinv <- (d > 0)
+  matplot(p, cbind( logit(p, deriv = d, inverse = myinv),
+                   probit(p, deriv = d, inverse = myinv)),
           type = "n", col = "purple", ylab = "transformation", las = 1,
           main = if (d ==  0) "Some probability link functions"
-          else "First derivative")
-  lines(p,   logit(p, deriv = d), col = "limegreen")
-  lines(p,  probit(p, deriv = d), col = "purple")
-  lines(p, cloglog(p, deriv = d), col = "chocolate")
-  lines(p, cauchit(p, deriv = d), col = "tan")
+          else "1 / first derivative")
+  lines(p,   logit(p, deriv = d, inverse = myinv), col = "limegreen")
+  lines(p,  probit(p, deriv = d, inverse = myinv), col = "purple")
+  lines(p, cloglog(p, deriv = d, inverse = myinv), col = "chocolate")
+  lines(p, cauchit(p, deriv = d, inverse = myinv), col = "tan")
   if (d ==  0) {
     abline(v = 0.5, h = 0, lty = "dashed")
     legend(0, 4.5, c("logit", "probit", "cloglog", "cauchit"),
diff --git a/man/lognormal.Rd b/man/lognormal.Rd
index de61cd1..b7cb034 100644
--- a/man/lognormal.Rd
+++ b/man/lognormal.Rd
@@ -73,6 +73,10 @@ lognormal(lmeanlog = "identitylink", lsdlog = "loge", zero = 2)
   \deqn{E(Y) = \exp(\mu + 0.5 \sigma^2)}{%
         E(Y) = exp(mu + 0.5 sigma^2)}
   and not \eqn{\mu}{mu}, make up the fitted values.
+  The variance of \eqn{Y} is
+  \deqn{Var(Y) = [\exp(\sigma^2) -1] \exp(2\mu + \sigma^2).}{%
+        Var(Y) = [exp(sigma^2) -1] * exp(2 mu + sigma^2).}
+
 
 
 % A random variable \eqn{Y} has a 3-parameter lognormal distribution
@@ -85,11 +89,13 @@ lognormal(lmeanlog = "identitylink", lsdlog = "loge", zero = 2)
 % and not \eqn{\mu}{mu}, make up the fitted values.
 
 
+
 % \code{lognormal()} and \code{lognormal3()} fit the 2- and 3-parameter
 % lognormal distribution respectively. Clearly, if the location
 % parameter \eqn{\lambda=0}{lambda=0} then both distributions coincide.
 
 
+
 }
 \value{
   An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
diff --git a/man/multilogit.Rd b/man/multilogit.Rd
index b89debe..8021165 100644
--- a/man/multilogit.Rd
+++ b/man/multilogit.Rd
@@ -57,7 +57,7 @@ multilogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
 
 
   For \code{deriv = 1}, then the function returns
-  \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+  \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta}
   if \code{inverse = FALSE},
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
index ec04e32..d4318ee 100644
--- a/man/multinomial.Rd
+++ b/man/multinomial.Rd
@@ -111,9 +111,9 @@ McCullagh, P. and Nelder, J. A. (1989)
 London: Chapman & Hall.
 
 
-Agresti, A. (2002)
+Agresti, A. (2013)
 \emph{Categorical Data Analysis},
-2nd ed. New York, USA: Wiley.
+3rd ed. Hoboken, NJ, USA: Wiley.
 
 
 Hastie, T. J., Tibshirani, R. J. and Friedman, J. H. (2009)
diff --git a/man/nbcanlink.Rd b/man/nbcanlink.Rd
index 3eeacb2..2b06596 100644
--- a/man/nbcanlink.Rd
+++ b/man/nbcanlink.Rd
@@ -66,7 +66,7 @@ nbcanlink(theta, size = NULL, wrt.eta = NULL, bvalue = NULL,
   when \code{inverse = FALSE}, and if \code{inverse = TRUE} then
   \code{kmatrix / expm1(-theta)}.
   For \code{deriv = 1}, then the function returns
-  \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+  \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta}
   if \code{inverse = FALSE},
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
@@ -91,7 +91,8 @@ nbcanlink(theta, size = NULL, wrt.eta = NULL, bvalue = NULL,
 
 \section{Warning}{
   This function currently does not work very well with \code{\link{negbinomial}}!
-  The NB-C model is sensitive to the initial values and may converge to a local solution.
+  The NB-C model is sensitive to the initial values and may converge to a
+  local solution.
   Pages 210 and 309 of Hilbe (2011) notes convergence difficulties (of
   Newton-Raphson type algorithms), and this applies here.
   This function should work okay with \code{\link{negbinomial.size}}.
@@ -147,7 +148,7 @@ plot(nbcanlink(mymu, size = kmatrix) ~ mymu, las = 1,
 set.seed(123)
 ndata <- data.frame(x2 = runif(nn <- 1000 ))
 size1 <- exp(1); size2 <- exp(2)
-ndata <- transform(ndata, eta1 = -1 - 2 * x2,  # eta1 < 0
+ndata <- transform(ndata, eta1 = -1 - 1 * x2,  # eta1 < 0
                           size1 = size1,
                           size2 = size2)
 ndata <- transform(ndata,
@@ -158,8 +159,10 @@ ndata <- transform(ndata, y1 = rnbinom(nn, mu = mu1, size = size1),
 head(ndata)
 summary(ndata)
 
-fit <- vglm(cbind(y1, y2) ~ x2, negbinomial("nbcanlink", imethod = 3),
-            stepsize = 0.5, data = ndata,  # Deliberately slow the convergence rate
+fit <- vglm(cbind(y1, y2) ~ x2,
+#           negbinomial("nbcanlink", imethod = 1, max.chunk.MB = 9),
+            negbinomial("nbcanlink", imethod = 2),
+            stepsize = 0.25, data = ndata,  # Deliberately slow the convergence rate
             maxit = 100, trace = TRUE)  # Warning: may converge to a local soln
 coef(fit, matrix = TRUE)
 summary(fit)
@@ -172,3 +175,9 @@ summary(fit)
 % The variance-covariance matrix may be wrong when the
 % canonical link is used.
 % vcov(fit)  # May be wrong
+
+
+
+% 20150714; yettodo: fix up this and getting it going.
+% Hint: the working weights are treated as diagonal, whereas it isn't!
+
diff --git a/man/nbolf.Rd b/man/nbolf.Rd
index 3e8e987..b17f39e 100644
--- a/man/nbolf.Rd
+++ b/man/nbolf.Rd
@@ -99,6 +99,7 @@ nbolf(theta, cutpoint = NULL, k = NULL,
 
 }
 \examples{
+\dontrun{
 nbolf("p", cutpoint = 2, k = 1, short = FALSE)
 nbolf("p", cutpoint = 2, k = 1, tag = TRUE)
 
@@ -107,12 +108,12 @@ y <- nbolf(p,cutpoint = 2, k = 1)
 y. <- nbolf(p,cutpoint = 2, k = 1, deriv = 1)
 max(abs(nbolf(y,cutpoint = 2, k = 1, inv = TRUE) - p))  # Should be 0
 
-\dontrun{ par(mfrow = c(2, 1), las = 1)
-plot(p, y, type = "l", col = "blue", main = "nbolf()")
-abline(h = 0, v = 0.5, col = "red", lty = "dashed")
-
-plot(p, y., type = "l", col = "blue",
-     main = "(Reciprocal of) first NBOLF derivative") }
+#\ dontrun{ par(mfrow = c(2, 1), las = 1)
+#plot(p, y, type = "l", col = "blue", main = "nbolf()")
+#abline(h = 0, v = 0.5, col = "red", lty = "dashed")
+#
+#plot(p, y., type = "l", col = "blue",
+#     main = "(Reciprocal of) first NBOLF derivative") }
 
 # Another example
 nn <- 1000
@@ -123,7 +124,7 @@ k <- 4
 y1 <- rnbinom(nn, mu = mymu, size = k)
 cutpoints <- c(-Inf, 10, 20, Inf)
 cuty <- Cut(y1, breaks = cutpoints)
-\dontrun{ plot(x2, x3, col = cuty, pch = as.character(cuty)) }
+#\ dontrun{ plot(x2, x3, col = cuty, pch = as.character(cuty)) }
 table(cuty) / sum(table(cuty))
 fit <- vglm(cuty ~ x2 + x3, trace = TRUE,
             cumulative(reverse = TRUE, multiple.responses = TRUE,
@@ -137,6 +138,7 @@ coef(fit, matrix = TRUE)
 constraints(fit)
 fit at misc
 }
+}
 \keyword{math}
 \keyword{models}
 \keyword{regression}
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index 7cdffe6..95402be 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -13,7 +13,7 @@
 negbinomial(lmu = "loge", lsize = "loge",
             imu = NULL, isize = NULL, probs.y = 0.75,
             nsimEIM = 250, cutoff.prob = 0.995,
-            max.qnbinom = 1000, max.chunk.Mb = 20,
+            max.qnbinom = 1000, max.chunk.MB = 20,
             deviance.arg = FALSE, imethod = 1, gsize = exp((-4):4),
             parallel = FALSE, ishrinkage = 0.95, zero = -2)
 polya(lprob = "logit", lsize = "loge",
@@ -94,7 +94,7 @@ polyaR(lsize = "loge", lprob = "logit",
 
 
   }
-  \item{max.chunk.Mb, max.qnbinom}{
+  \item{max.chunk.MB, max.qnbinom}{
     \code{max.qnbinom} is used to describe the eligibility of 
     individual observations
     to have their EIM computed by the \emph{exact method}.
@@ -105,15 +105,15 @@ polyaR(lsize = "loge", lprob = "logit",
   and it constructs a large matrix provided that the number of columns
   is less than \code{max.qnbinom}.
   If so, then the computations are done in chunks, so
-  that no more than about \code{max.chunk.Mb} megabytes
+  that no more than about \code{max.chunk.MB} megabytes
   of memory is used at a time (actually, it is proportional to this amount).
   Regarding eligibility of this algorithm, each observation must
   have the \code{cutoff.prob} quantile less than \code{max.qnbinom}
   as its approximate support.
   If you have abundant memory then you might try setting
-  \code{max.chunk.Mb = Inf}, but then the computations might take
+  \code{max.chunk.MB = Inf}, but then the computations might take
   a very long time.
-  Setting \code{max.chunk.Mb = 0} or \code{max.qnbinom = 0}
+  Setting \code{max.chunk.MB = 0} or \code{max.qnbinom = 0}
   will force the EIM to be computed using the SFS algorithm only
   (this \emph{used to be} the default method for \emph{all} the observations).
   When the fitted values of the model are large and \eqn{k} is small,
@@ -329,12 +329,12 @@ polyaR(lsize = "loge", lprob = "logit",
 
   If one wants to force SFS
   to be used on all observations, then
-  set \code{max.qnbinom = 0} or \code{max.chunk.Mb = 0}.
+  set \code{max.qnbinom = 0} or \code{max.chunk.MB = 0}.
   If one wants to force the exact method
   to be used for all observations, then
   set \code{max.qnbinom = Inf}.
   If the computer has \emph{much} memory, then trying
-  \code{max.chunk.Mb = Inf} may provide a small speed increase.
+  \code{max.chunk.MB = Inf} may provide a small speed increase.
   If SFS is used at all, then the \code{@weights} slot of the
   fitted object will be a matrix;
   otherwise that slot will be a \code{0 x 0} matrix.
@@ -438,7 +438,7 @@ Fitting the negative binomial distribution to biological data.
   \code{imethod},
   \code{isize},
   \code{zero},
-  \code{max.chunk.Mb}.
+  \code{max.chunk.MB}.
 
 
   The function \code{negbinomial} can be used by the fast algorithm in
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index c285e0c..f326f8b 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -3,8 +3,22 @@
 %
 %
 %
-%
-% 201503, 201504, 201505;
+% 201509, for a bug in car::linearHypothesis() and car:::Anova():
+\alias{as.char.expression}
+\alias{coef.vlm}
+\alias{vcov.vlm}
+\alias{model.matrix.vlm}
+%\alias{has.intercept}
+%\alias{has.interceptvlm}
+%\alias{term.names}
+%\alias{term.namesvlm}
+\alias{responseName}
+\alias{responseNamevlm}
+%
+% 201503, 201504, 201505, 201508;
+%\alias{confintvglm}
+\alias{qlms.bcn}
+\alias{dlms.bcn}
 \alias{dbetaII}
 \alias{AR1.control}
 \alias{param.names}
@@ -23,6 +37,7 @@
 \alias{expected.betabin.ab}
 % 201406;
 \alias{interleave.VGAM}
+\alias{interleave.cmat}  % 201506;
 \alias{marcumQ}
 \alias{QR.Q}
 \alias{QR.R}
@@ -53,7 +68,7 @@
 %\alias{biclaytoncop}
 %
 % 201307;
-\alias{posnormal.control}
+%\alias{posnormal.control}
 \alias{rec.normal.control}
 \alias{rec.exp1.control}
 %\alias{kendall.tau}
@@ -136,9 +151,8 @@
 \alias{coef}
 \alias{logLik}
 \alias{plot}
-\alias{show.summary.vglm}
-\alias{vcov}
-\alias{vcovvlm}
+%\alias{vcov}      % 20150828
+%\alias{vcovvlm}      % 20150828
 \alias{VGAMenv}
 \alias{nobs}
 \alias{show.Coef.rrvgam}
@@ -152,7 +166,7 @@
 \alias{show.summary.rrvglm}
 %\alias{show.summary.uqo}
 \alias{show.summary.vgam}
-\alias{show.summary.vglm}
+% \alias{show.summary.vglm} % 20150831
 \alias{show.summary.vlm}
 %\alias{show.uqo}
 \alias{show.vanova}
@@ -387,7 +401,6 @@
 \alias{case.namesvlm}
 %
 \alias{formula}
-\alias{formulavlm}
 \alias{formulaNA.VGAM}
 \alias{gammaff}
 % \alias{get.arg}
@@ -579,10 +592,11 @@
 \alias{summary.rrvglm}
 %\alias{summary.uqo}
 \alias{summaryvgam}
-\alias{summaryvglm}
+%\alias{summaryvglm}  % 20150831
 \alias{summaryvlm}
 % \alias{tapplymat1}
 \alias{terms.vlm}
+\alias{termsvlm}
 \alias{theta2eta}
 % \alias{trivial.constraints}
 % \alias{update.vgam}
@@ -602,7 +616,7 @@
 \alias{vcontrol.expression}
 % \alias{vcovdefault}
 % \alias{vcovqrrvglm}
-\alias{vcovrrvglm}
+%\alias{vcovrrvglm}   % 20150828
 % \alias{vcovvlm}
 % \alias{veigen}
 % \alias{vellipse}
diff --git a/man/polf.Rd b/man/polf.Rd
index 0d659c8..ef0c1b0 100644
--- a/man/polf.Rd
+++ b/man/polf.Rd
@@ -94,6 +94,7 @@ polf(theta, cutpoint = NULL,
 
 }
 \examples{
+\dontrun{
 polf("p", cutpoint = 2, short = FALSE)
 polf("p", cutpoint = 2, tag = TRUE)
 
@@ -102,12 +103,13 @@ y <- polf(p, cutpoint = 2)
 y. <- polf(p, cutpoint = 2, deriv = 1)
 max(abs(polf(y, cutpoint = 2, inv = TRUE) - p))  # Should be 0
 
-\dontrun{par(mfrow = c(2, 1), las = 1)
-plot(p, y, type = "l", col = "blue", main = "polf()")
-abline(h = 0, v = 0.5, col = "orange", lty = "dashed")
-
-plot(p, y., type = "l", col = "blue",
-     main = "(Reciprocal of) first POLF derivative") }
+#\ dontrun{ par(mfrow = c(2, 1), las = 1)
+#plot(p, y, type = "l", col = "blue", main = "polf()")
+#abline(h = 0, v = 0.5, col = "orange", lty = "dashed")
+#
+#plot(p, y., type = "l", col = "blue",
+#     main = "(Reciprocal of) first POLF derivative")
+#}
 
 
 # Rutherford and Geiger data
@@ -128,7 +130,7 @@ pdata <- transform(pdata, mymu = exp( 3 + 1 * x2 - 2 * x3))
 pdata <- transform(pdata, y1 = rpois(nn, lambda = mymu))
 cutpoints <- c(-Inf, 10, 20, Inf)
 pdata <- transform(pdata, cuty = Cut(y1, breaks = cutpoints))
-\dontrun{ with(pdata, plot(x2, x3, col = cuty, pch = as.character(cuty))) }
+#\ dontrun{ with(pdata, plot(x2, x3, col = cuty, pch = as.character(cuty))) }
 with(pdata, table(cuty) / sum(table(cuty)))
 fit <- vglm(cuty ~ x2 + x3, data = pdata, trace = TRUE,
             cumulative(reverse = TRUE,
@@ -143,6 +145,7 @@ coef(fit, matrix = TRUE)
 constraints(fit)
 fit at misc$earg
 }
+}
 \keyword{math}
 \keyword{models}
 \keyword{regression}
diff --git a/man/posnormal.Rd b/man/posnormal.Rd
index 6250b24..a5607ca 100644
--- a/man/posnormal.Rd
+++ b/man/posnormal.Rd
@@ -7,7 +7,10 @@
 }
 \usage{
 posnormal(lmean = "identitylink", lsd = "loge",
-          imean = NULL, isd = NULL, nsimEIM = 100, zero = NULL)
+          eq.mean = FALSE, eq.sd = FALSE,
+          gmean = exp((-5:5)/2), gsd = exp((-1:5)/2),
+          imean = NULL, isd = NULL, probs.y = 0.10, imethod = 1,
+          nsimEIM = NULL, zero = -2)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -28,25 +31,53 @@ posnormal(lmean = "identitylink", lsd = "loge",
 %  }
 
 
+  \item{gmean, gsd, imethod}{
+  See \code{\link{CommonVGAMffArguments}} for more information.
+  \code{gmean} and \code{gsd}
+  currently operate on a multiplicative scale, on the sample mean
+  and the sample standard deviation, respectively.
+
+
+  }
   \item{imean, isd}{
   Optional initial values for \eqn{\mu}{mu} and \eqn{\sigma}{sigma}.
   A \code{NULL} means a value is computed internally.
+  See \code{\link{CommonVGAMffArguments}} for more information.
+
 
   }
-  \item{nsimEIM}{
+  \item{eq.mean, eq.sd}{
   See \code{\link{CommonVGAMffArguments}} for more information.
+  The fact that these arguments are supported results in
+  default constraint matrices being a \emph{permutation} of the
+  identity matrix (effectively \emph{trivial} constraints).
+
 
   }
-  \item{zero}{
-  An integer-valued vector specifying which
-  linear/additive predictors are modelled as intercepts only.
-  The values must be from the set \{1,2\} corresponding
-  respectively to \eqn{\mu}{mu}, \eqn{\sigma}{sigma}.
-  If \code{zero = NULL} then all linear/additive predictors are modelled as
-  a linear combination of the explanatory variables.
-  For many data sets having \code{zero = 2} is a good idea.
+  \item{zero, nsimEIM, probs.y}{
+  See \code{\link{CommonVGAMffArguments}} for more information.
+
 
   }
+% \item{zero}{
+% See \code{\link{CommonVGAMffArguments}} for more information.
+
+
+
+% An integer-valued vector specifying which
+% linear/additive predictors are modelled as intercepts only.
+% The values must be from the set \{1,2\} corresponding
+% respectively to \eqn{\mu}{mu}, \eqn{\sigma}{sigma}.
+% If \code{zero = NULL} then all linear/additive predictors are modelled as
+% a linear combination of the explanatory variables.
+% For many data sets having \code{zero = 2} is a good idea.
+
+
+% }
+
+
+
+
 }
 \details{
   The positive normal distribution is the ordinary normal distribution
@@ -67,10 +98,13 @@ posnormal(lmean = "identitylink", lsd = "loge",
   where \eqn{\phi()}{dnorm()} is the probability density function of a 
   standard normal distribution (\code{\link[stats:Normal]{dnorm}}).
 
+
   The mean of \eqn{Y} is
   \deqn{E(Y) = \mu + \sigma \frac{\phi(-\mu/ \sigma)}{
                1-\Phi(-\mu/ \sigma)}. }{%
         E(Y) = mu + sigma * dnorm((y-mu)/sigma) / [1-pnorm(-mu/ sigma)]. }
+  This family function handles multiple responses.
+
 
 }
 \value{
@@ -93,15 +127,20 @@ posnormal(lmean = "identitylink", lsd = "loge",
   The response variable for this family function is the same as
   \code{\link{uninormal}} except positive values are required.
   Reasonably good initial values are needed.
-  Fisher scoring is implemented.
+  
 
 
   The distribution of the reciprocal of a positive normal random variable
   is known as an alpha distribution.
 
+
 }
 
 \section{Warning }{
+  It is recommended that \code{trace = TRUE} be used to monitor convergence;
+  sometimes the estimated mean is \code{-Inf} and the 
+  estimated mean standard deviation is \code{Inf}, especially
+  when the sample size is small.
   Under- or over-flow may occur if the data is ill-conditioned.
 
 
@@ -114,16 +153,16 @@ posnormal(lmean = "identitylink", lsd = "loge",
 }
 
 \examples{
-pdata <- data.frame(m = 1.0, SD = exp(1.0))
-pdata <- transform(pdata, y = rposnorm(n <- 1000, m = m, sd = SD))
+pdata <- data.frame(Mean = 1.0, SD = exp(1.0))
+pdata <- transform(pdata, y = rposnorm(n <- 1000, m = Mean, sd = SD))
 
 \dontrun{with(pdata, hist(y, prob = TRUE, border = "blue",
-         main = paste("posnorm(m =", m[1], ", sd =", round(SD[1], 2),")"))) }
+  main = paste("posnorm(m =", Mean[1], ", sd =", round(SD[1], 2),")"))) }
 fit <- vglm(y ~ 1, posnormal, data = pdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 (Cfit <- Coef(fit))
 mygrid <- with(pdata, seq(min(y), max(y), len = 200))  # Add the fit to the histogram
-\dontrun{lines(mygrid, dposnorm(mygrid, Cfit[1], Cfit[2]), col = "red")}
+\dontrun{lines(mygrid, dposnorm(mygrid, Cfit[1], Cfit[2]), col = "orange")}
 }
 \keyword{models}
 \keyword{regression}
diff --git a/man/probit.Rd b/man/probit.Rd
index ba5a095..a9c985c 100644
--- a/man/probit.Rd
+++ b/man/probit.Rd
@@ -46,7 +46,7 @@ probit(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
 
 
   For \code{deriv = 1}, then the function returns
-  \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+  \emph{d} \code{eta} / \emph{d} \code{theta} as a function of \code{theta}
   if \code{inverse = FALSE},
   else if \code{inverse = TRUE} then it returns the reciprocal.
 
diff --git a/man/propodds.Rd b/man/propodds.Rd
index 27db074..245b9d2 100644
--- a/man/propodds.Rd
+++ b/man/propodds.Rd
@@ -44,7 +44,7 @@ propodds(reverse = TRUE, whitespace = FALSE)
 
 Agresti, A. (2010)
 \emph{Analysis of Ordinal Categorical Data},
-2nd ed. New York: Wiley.
+2nd ed. Hoboken, NJ, USA: Wiley.
 
 
 Yee, T. W. (2010)
diff --git a/man/rayleigh.Rd b/man/rayleigh.Rd
index 3c6694d..b851c19 100644
--- a/man/rayleigh.Rd
+++ b/man/rayleigh.Rd
@@ -147,7 +147,8 @@ rdata <- transform(rdata, y = pmin(U, ystar))
 \dontrun{ par(mfrow = c(1, 2))
 hist(with(rdata, ystar)); hist(with(rdata, y)) }
 extra <- with(rdata, list(rightcensored = ystar > U))
-fit <- vglm(y ~ 1, cens.rayleigh, data = rdata, trace = TRUE, extra = extra)
+fit <- vglm(y ~ 1, cens.rayleigh, data = rdata, trace = TRUE,
+            extra = extra, crit = "coef")
 table(fit at extra$rightcen)
 coef(fit, matrix = TRUE)
 head(fitted(fit))
diff --git a/man/rhobit.Rd b/man/rhobit.Rd
index dec88fe..b3d39ad 100644
--- a/man/rhobit.Rd
+++ b/man/rhobit.Rd
@@ -53,9 +53,10 @@ rhobit(theta, bminvalue = NULL, bmaxvalue = NULL,
   TRUE} then \code{(exp(theta) - 1)/(exp(theta) + 1)}.
 
 
-  For \code{deriv = 1}, then the function returns \emph{d} \code{theta}
-  / \emph{d} \code{eta} as a function of \code{theta} if \code{inverse =
-  FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal.
+  For \code{deriv = 1}, then the function
+  returns \emph{d} \code{eta} / \emph{d} \code{theta} as a
+  function of \code{theta} if \code{inverse = FALSE},
+  else if \code{inverse = TRUE} then it returns the reciprocal.
 
 
 }
diff --git a/man/sratio.Rd b/man/sratio.Rd
index 554e825..7445bd8 100644
--- a/man/sratio.Rd
+++ b/man/sratio.Rd
@@ -71,13 +71,13 @@ sratio(link = "logit", parallel = FALSE, reverse = FALSE,
 
 }
 \references{
-Agresti, A. (2002)
+Agresti, A. (2013)
 \emph{Categorical Data Analysis},
-2nd ed. New York: Wiley.
+3rd ed. Hoboken, NJ, USA: Wiley.
 
 Simonoff, J. S. (2003)
 \emph{Analyzing Categorical Data},
-New York: Springer-Verlag.
+New York, USA: Springer-Verlag.
 
 McCullagh, P. and Nelder, J. A. (1989)
 \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
diff --git a/man/summaryvglm.Rd b/man/summaryvglm.Rd
new file mode 100644
index 0000000..c0c384a
--- /dev/null
+++ b/man/summaryvglm.Rd
@@ -0,0 +1,139 @@
+% Adapted from file src/library/stats/man/summary.glm.Rd
+% Part of the R package, http://www.R-project.org
+% Copyright 1995-2013 R Core Team
+% Distributed under GPL 2 or later
+
+
+\name{summaryvglm}
+\alias{summaryvglm}
+\alias{show.summary.vglm}
+\title{Summarizing Vector Generalized Linear Model Fits}
+\usage{
+summaryvglm(object, correlation = FALSE,
+            dispersion = NULL, digits = NULL, presid = TRUE,
+            signif.stars = getOption("show.signif.stars"),
+           nopredictors = FALSE)
+\method{show}{summary.vglm}(x, digits = max(3L, getOption("digits") - 3L),
+           quote = TRUE, prefix = "", presid = TRUE,
+           signif.stars = NULL, nopredictors = NULL)
+}
+\arguments{
+  \item{object}{an object of class \code{"vglm"}, usually, a result of a
+    call to \code{\link{vglm}}.}
+  \item{x}{an object of class \code{"summary.vglm"}, usually, a result of a
+    call to \code{summaryvglm()}.}
+  \item{dispersion}{
+    used mainly for GLMs.
+    See \code{\link[stats]{summary.glm}}. }
+  \item{correlation}{logical; if \code{TRUE}, the correlation matrix of
+    the estimated parameters is returned and printed.}
+  \item{digits}{the number of significant digits to use when printing. }
+% \item{symbolic.cor}{logical; if \code{TRUE}, print the correlations in
+%   a symbolic form (see \code{\link{symnum}}) rather than as numbers.}
+  \item{signif.stars}{logical; if \code{TRUE}, \sQuote{significance stars}
+    are printed for each coefficient. }
+% \item{\dots}{further arguments passed to or from other methods.}
+  \item{presid}{Pearson residuals; print out some summary statistics
+                of these?  }
+  \item{quote}{ Fed into \code{print()}. }
+  \item{nopredictors}{ logical;
+                       if \code{TRUE} the names of the linear predictors
+                       are not printed out.
+                       The default is that they are. }
+  \item{prefix}{ Not used. }
+
+
+}
+\description{
+  These functions are all \code{\link{methods}} for class \code{vglm} or
+  \code{summary.vglm} objects.
+
+
+}
+\details{
+  \code{show.summary.vglm()} tries to be smart about formatting the
+  coefficients, standard errors, etc. and additionally gives
+  \sQuote{significance stars} if \code{signif.stars} is \code{TRUE}.
+  The \code{coefficients} component of the result gives the estimated
+  coefficients and their estimated standard errors, together with their
+  ratio. 
+  This third column is labelled \code{z value} regardless of
+  whether the
+  dispersion is estimated or known
+  (or fixed by the family).  A fourth column gives the two-tailed
+  p-value corresponding to the z ratio based on a 
+  Normal reference distribution.
+% (It is possible that the dispersion is
+% not known and there are no residual degrees of freedom from which to
+% estimate it.  In that case the estimate is \code{NaN}.)
+%
+%
+%
+In general, the t distribution is not used, but the normal
+distribution is used.
+
+
+% Aliased coefficients are omitted in the returned object but restored
+% by the \code{print} method.
+
+  
+  Correlations are printed to two decimal places (or symbolically): to
+  see the actual correlations print \code{summary(object)@correlation}
+  directly.
+
+
+
+% The dispersion of a GLM is not used in the fitting process, but it is
+% needed to find standard errors.
+% If \code{dispersion} is not supplied or \code{NULL},
+% the dispersion is taken as \code{1} for the \code{binomial} and
+% \code{Poisson} families, and otherwise estimated by the residual
+% Chisquared statistic (calculated from cases with non-zero weights)
+% divided by the residual degrees of freedom.
+
+
+
+% \code{summary} can be used with Gaussian \code{glm} fits to handle the
+% case of a linear regression with known error variance, something not
+% handled by \code{\link{summary.lm}}.
+
+
+}
+\value{
+  \code{summaryvglm} returns an object of class \code{"summary.vglm"};
+  see \code{\link{summary.vglm-class}}.
+
+
+}
+\seealso{
+  \code{\link{vglm}},
+  \code{\link{confintvglm}},
+  \code{\link{vcovvlm}},
+  \code{\link[stats]{summary.glm}},
+  \code{\link[stats]{summary.lm}},
+  \code{\link[base]{summary}}.
+
+
+}
+\examples{
+## For examples see example(glm)
+pneumo <- transform(pneumo, let = log(exposure.time))
+(fit <- vglm(cbind(normal, mild, severe) ~ let, acat, data = pneumo))
+coef(fit, matrix = TRUE)
+summary(fit)
+coef(summary(fit))
+}
+\keyword{models}
+\keyword{regression}
+
+
+
+
+%\method{summary}{vglm}(object, correlation = FALSE,
+%           dispersion = NULL, digits = NULL,
+%           presid = TRUE,
+%           signif.stars = getOption("show.signif.stars"))
+
+
+
+
diff --git a/man/tobit.Rd b/man/tobit.Rd
index a6adee3..72b27dd 100644
--- a/man/tobit.Rd
+++ b/man/tobit.Rd
@@ -8,10 +8,19 @@
 }
 \usage{
 tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
-      nsimEIM = 250, imu = NULL, isd = NULL, 
+      imu = NULL, isd = NULL, 
       type.fitted = c("uncensored", "censored", "mean.obs"),
-      imethod = 1, zero = -2)
+      byrow.arg = FALSE, imethod = 1, zero = -2)
 }
+% 20151024 yettodo: maybe add a new option to 'type.fitted':
+%     type.fitted = c("uncensored", "censored", "mean.obs", "truncated"),
+% where "truncated" is only concerned with values of y > Lower;
+% values of y <= Lower are ignored.
+%
+%
+%
+%
+%
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{Lower}{
@@ -45,7 +54,7 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
 
   }
 
-  \item{imu, isd}{
+  \item{imu, isd, byrow.arg}{
   See \code{\link{CommonVGAMffArguments}} for information.
 
 
@@ -59,17 +68,14 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
   this is a doubly truncated normal distribution
   augmented by point masses at the truncation points
   (see \code{\link{dtobit}}).
+  See \code{\link{CommonVGAMffArguments}} for more information.
 
 
-  }
-  \item{imethod}{
-  Initialization method. Either 1 or 2, this specifies
-  two methods for obtaining initial values for the parameters.
-
 
   }
-  \item{nsimEIM}{
-  Used for the nonstandard Tobit model.
+  \item{imethod}{
+  Initialization method. Either 1 or 2 or 3, this specifies
+  some methods for obtaining initial values for the parameters.
   See \code{\link{CommonVGAMffArguments}} for information.
 
 
@@ -99,9 +105,11 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
   responses if it is below or above certain cutpoints.
 
 
+
   The defaults for \code{Lower} and \code{Upper} and
   \code{lmu} correspond to the \emph{standard} Tobit model.
-  Then Fisher scoring is used, else simulated Fisher scoring.
+  Fisher scoring is used for the standard and nonstandard
+  models.
   By default, the mean \eqn{x_i^T \beta}{x_i^T beta} is
   the first linear/additive predictor, and the log of
   the standard deviation is the second linear/additive
@@ -110,21 +118,19 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
   of \eqn{x_i^T \beta}{x_i^T beta}.
 
 
+
 }
 \section{Warning }{
-  Convergence is often slow. Setting \code{crit = "coeff"}
-  is recommended since premature convergence of the log-likelihood
-  is common.
-  Simulated Fisher scoring is implemented for the nonstandard
-  Tobit model. For this, the working weight matrices for
-  some observations are prone to not being positive-definite;
-  if so then some checking of the final model is recommended
-  and/or try inputting some initial values.
-
-
-%  The working weight matrices for most observations 
-%  are not positive-definite. These responses would otherwise have a
-%  fitted value much less than the cutpoint.
+  If values of the response and \code{Lower} and/or \code{Upper}
+  are not integers then there is the danger that the value is
+  wrongly interpreted as uncensored.
+  For example, if the first 10 values of the response were
+  \code{runif(10)} and \code{Lower} was assigned these value then
+  testing \code{y[1:10] == Lower[1:10]} is numerically fraught.
+  Currently, if any \code{y < Lower} or \code{y > Upper} then
+  a warning is issued.
+
+ 
 }
 
 \value{
@@ -148,11 +154,14 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
   If so, then \code{Lower} and \code{Upper}
   are recycled into a matrix with the number of columns equal
   to the number of responses,
-  and the recycling is done row-wise (\code{byrow = TRUE}).
+  and the recycling is done row-wise \emph{if} \code{byrow.arg = TRUE}.
+  The default order is as \code{\link[base]{matrix}}, which
+  is \code{byrow.arg = FALSE}.
   For example, these are returned in \code{fit4 at misc$Lower} and
   \code{fit4 at misc$Upper} below.
 
 
+
   If there is no censoring then
   \code{\link{uninormal}} is recommended instead. Any value of the
   response less than \code{Lower} or greater than \code{Upper} will
@@ -166,7 +175,6 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
 
 
 
-
 % 20150417; McClelland Kemp bug:
 
 
@@ -185,12 +193,13 @@ tobit(Lower = 0, Upper = Inf, lmu = "identitylink", lsd = "loge",
   \code{\link{uninormal}},
   \code{\link{double.cens.normal}},
   \code{\link{posnormal}},
+  \code{\link{CommonVGAMffArguments}},
   \code{\link[stats:Normal]{rnorm}}.
 
 
+
 }
 \examples{
-\dontrun{
 # Here, fit1 is a standard Tobit model and fit2 is a nonstandard Tobit model
 tdata <- data.frame(x2 = seq(-1, 1, length = (nn <- 100)))
 set.seed(1)
@@ -212,40 +221,38 @@ with(tdata, table(y2 == Lower | y2 == Upper))  # How many censored values?
 with(tdata, table(attr(y2, "cenL")))
 with(tdata, table(attr(y2, "cenU")))
 
-fit1 <- vglm(y1 ~ x2, tobit, data = tdata, trace = TRUE,
-             crit = "coeff")  # crit = "coeff" is recommended
+fit1 <- vglm(y1 ~ x2, tobit, data = tdata, trace = TRUE)
 coef(fit1, matrix = TRUE)
 summary(fit1)
 
 fit2 <- vglm(y2 ~ x2, tobit(Lower = Lower, Upper = Upper, type.f = "cens"),
-            data = tdata, crit = "coeff", trace = TRUE)  # ditto
+             data = tdata, trace = TRUE)
 table(fit2 at extra$censoredL)
 table(fit2 at extra$censoredU)
 coef(fit2, matrix = TRUE)
 
-fit3 <- vglm(y3 ~ x2,
-            tobit(Lower = with(tdata, Lower.vec),
-                  Upper = with(tdata, Upper.vec), type.f = "cens"),
-            data = tdata, crit = "coeff", trace = TRUE)  # ditto
+fit3 <- vglm(y3 ~ x2, tobit(Lower = with(tdata, Lower.vec),
+                            Upper = with(tdata, Upper.vec), type.f = "cens"),
+             data = tdata, trace = TRUE)
 table(fit3 at extra$censoredL)
 table(fit3 at extra$censoredU)
 coef(fit3, matrix = TRUE)
 
 # fit4 is fit3 but with type.fitted = "uncen".
 fit4 <- vglm(cbind(y3, y4) ~ x2,
-            tobit(Lower = rep(with(tdata, Lower.vec), each = 2),
-                  Upper = rep(with(tdata, Upper.vec), each = 2)),
-            data = tdata, crit = "coeff", trace = TRUE)  # ditto
+             tobit(Lower = rep(with(tdata, Lower.vec), each = 2),
+                   Upper = rep(with(tdata, Upper.vec), each = 2),
+                   byrow.arg = TRUE),
+             data = tdata, crit = "coeff", trace = TRUE)
 head(fit4 at extra$censoredL)  # A matrix
 head(fit4 at extra$censoredU)  # A matrix
 head(fit4 at misc$Lower)       # A matrix
 head(fit4 at misc$Upper)       # A matrix
 coef(fit4, matrix = TRUE)
-}
 
-\dontrun{ # Plot the results
+\dontrun{ # Plot fit1--fit4
 par(mfrow = c(2, 2))
-# Plot fit1
+
 plot(y1 ~ x2, tdata, las = 1, main = "Standard Tobit model",
      col = as.numeric(attr(y1, "cenL")) + 3,
      pch = as.numeric(attr(y1, "cenL")) + 1)
@@ -258,7 +265,6 @@ lines(fitted(fit1) ~ x2, tdata, col = "orange", lwd = 2, lty = 2)
 lines(fitted(lm(y1 ~ x2, tdata)) ~ x2, tdata, col = "black",
       lty = 2, lwd = 2)  # This is simplest but wrong!
 
-# Plot fit2
 plot(y2 ~ x2, data = tdata, las = 1, main = "Tobit model",
      col = as.numeric(attr(y2, "cenL")) + 3 +
            as.numeric(attr(y2, "cenU")),
@@ -273,13 +279,12 @@ lines(fitted(fit2) ~ x2, tdata, col = "orange", lwd = 2, lty = 2)
 lines(fitted(lm(y2 ~ x2, tdata)) ~ x2, tdata, col = "black",
       lty = 2, lwd = 2)  # This is simplest but wrong!
 
-# Plot fit3
 plot(y3 ~ x2, data = tdata, las = 1,
      main = "Tobit model with nonconstant censor levels",
-     col = as.numeric(attr(y3, "cenL")) + 3 +
-           as.numeric(attr(y3, "cenU")),
+     col = as.numeric(attr(y3, "cenL")) + 2 +
+           as.numeric(attr(y3, "cenU") * 2),
      pch = as.numeric(attr(y3, "cenL")) + 1 +
-           as.numeric(attr(y3, "cenU")))
+           as.numeric(attr(y3, "cenU") * 2))
 legend(x = "topleft", leg = c("censored", "uncensored"),
        pch = c(2, 1), col = c("blue", "green"))
 legend(-1.0, 3.5, c("Truth", "Estimate", "Naive"),
@@ -289,13 +294,12 @@ lines(fitted(fit3) ~ x2, tdata, col = "orange", lwd = 2, lty = 2)
 lines(fitted(lm(y3 ~ x2, tdata)) ~ x2, tdata, col = "black",
       lty = 2, lwd = 2)  # This is simplest but wrong!
 
-# Plot fit4
 plot(y3 ~ x2, data = tdata, las = 1,
      main = "Tobit model with nonconstant censor levels",
-     col = as.numeric(attr(y3, "cenL")) + 3 +
-           as.numeric(attr(y3, "cenU")),
+     col = as.numeric(attr(y3, "cenL")) + 2 +
+           as.numeric(attr(y3, "cenU") * 2),
      pch = as.numeric(attr(y3, "cenL")) + 1 +
-           as.numeric(attr(y3, "cenU")))
+           as.numeric(attr(y3, "cenU") * 2))
 legend(x = "topleft", leg = c("censored", "uncensored"),
        pch = c(2, 1), col = c("blue", "green"))
 legend(-1.0, 3.5, c("Truth", "Estimate", "Naive"),
diff --git a/man/triangle.Rd b/man/triangle.Rd
index afdb1cc..64272b5 100644
--- a/man/triangle.Rd
+++ b/man/triangle.Rd
@@ -74,9 +74,12 @@ World Scientific: Singapore.
 }
 \author{ T. W. Yee }
 \section{Warning}{
-  The MLE regularity conditions do not seem to hold for this
-  distribution so that misleading inferences may result, e.g., in
-  the \code{summary} and \code{vcov} of the object.
+  The MLE regularity conditions do not hold for this
+  distribution
+  (e.g., the first derivative evaluated at the mode
+  does not exist because it is not continuous)
+  so that misleading inferences may result, e.g.,
+  in the \code{summary} and \code{vcov} of the object.
   Additionally, convergence to the MLE often appears to fail.
 
 
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index e9e69c0..7a1ef0f 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -4,6 +4,18 @@
 %\alias{ccoef-method}
 %
 %
+% 201509:
+\alias{term.names,ANY-method}
+\alias{term.names,vlm-method}
+\alias{responseName,ANY-method}
+\alias{responseName,vlm-method}
+\alias{has.intercept,ANY-method}
+\alias{has.intercept,vlm-method}
+% 201508, for R 3.2.2:
+\alias{confint,ANY-method}
+\alias{confint,vglm-method}
+\alias{confint,vgam-method}
+\alias{confint,rrvglm-method}
 %
 % 201503, for R 3.1.3:
 \alias{is.buggy,ANY-method}
diff --git a/man/vcovvlm.Rd b/man/vcovvlm.Rd
new file mode 100644
index 0000000..ac028fd
--- /dev/null
+++ b/man/vcovvlm.Rd
@@ -0,0 +1,104 @@
+\name{vcovvlm}
+%\name{vcov}
+\alias{vcov}
+\alias{vcovvlm}
+\alias{vcovrrvglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Calculate Variance-Covariance Matrix for a Fitted
+        VLM or RR-VGLM
+        Object
+}
+\description{
+  Returns the variance-covariance matrix of the %main
+  parameters of
+  a fitted \code{\link[VGAM]{vlm-class}} object or
+  a fitted \code{\link[VGAM]{rrvglm-class}} object.
+
+
+}
+\usage{
+vcov(object, \dots)
+vcovvlm(object, dispersion = NULL, untransform = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{object}{ A fitted model object,
+  having class \code{\link[VGAM]{vlm-class}} or
+  \code{\link[VGAM]{rrvglm-class}} or
+  a superclass of such.
+  The former includes a \code{\link{vglm}} object.
+
+
+  }
+  \item{dispersion}{
+  Numerical. A value may be specified, else it
+  is estimated for quasi-GLMs (e.g., method of moments).
+  For almost all other types of VGLMs it is usually unity.
+  The value is multiplied by the raw variance-covariance matrix.
+
+
+  }
+  \item{untransform}{
+  logical. For intercept-only models with trivial constraints;
+  if set \code{TRUE} then the parameter link function is inverted
+  to give the answer for the untransformed/raw parameter.
+
+
+
+  }
+ \item{\dots}{Same as \code{\link[stats]{vcov}}.
+ }
+}
+\details{
+  This methods function is based on the QR decomposition
+  of the (large) VLM model matrix and working weight matrices.
+  Currently
+  \code{\link{vcovvlm}}
+  operates on the fundamental
+  \code{\link[VGAM]{vlm-class}} objects because pretty well
+  all modelling functions in \pkg{VGAM} inherit from this.
+  Currently
+  \code{\link{vcovrrvglm}}
+  is not entirely reliable because the elements of the
+  \bold{A}--\bold{C} part of the matrix sometimes cannot be
+  computed very accurately, so that the entire matrix is
+  not positive-definite. 
+
+
+}
+\value{
+  Same as \code{\link[stats]{vcov}}.
+
+
+
+}
+%\references{
+%}
+\author{ Thomas W. Yee }
+
+%\note{
+%}
+%\section{Warning }{
+%}
+
+\seealso{
+  \code{\link{confintvglm}},
+  \code{\link{summaryvglm}},
+  \code{\link[stats]{vcov}}.
+
+
+}
+
+\examples{
+ndata <- data.frame(x2 = runif(nn <- 300))
+ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x2), size = exp(1)),
+                          y2 = rnbinom(nn, mu = exp(2-x2), size = exp(0)))
+fit1 <- vglm(cbind(y1, y2) ~ x2, negbinomial, data = ndata, trace = TRUE)
+fit2 <- rrvglm(y1 ~ x2, negbinomial(zero = NULL), data = ndata)
+coef(fit1, matrix = TRUE)
+vcov(fit1)
+vcov(fit2)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/vglm.Rd b/man/vglm.Rd
index 7da2c59..c282d95 100644
--- a/man/vglm.Rd
+++ b/man/vglm.Rd
@@ -336,6 +336,13 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
 \references{ 
 
 
+Yee, T. W. (2015)
+Vector Generalized Linear and Additive Models:
+With an Implementation in R.
+New York, USA: \emph{Springer}.
+
+
+
 Yee, T. W. and Hastie, T. J. (2003)
 Reduced-rank vector generalized linear models.
 \emph{Statistical Modelling},
@@ -348,6 +355,14 @@ Vector generalized additive models.
 \bold{58}, 481--493.
 
 
+
+  Yee, T. W. (2014)
+  Reduced-rank vector generalized linear models with two linear predictors.
+  \emph{Computational Statistics and Data Analysis},
+  \bold{71}, 889--902.
+
+
+
 Yee, T. W. (2008)
 The \code{VGAM} Package.
 \emph{R News}, \bold{8}, 28--39.
@@ -428,7 +443,7 @@ The \code{VGAM} Package.
   \code{\link{rrvglm}},
   \code{\link{vgam}}.
   Methods functions include 
-  \code{coef.vlm},
+  \code{\link{coefvlm}},
   \code{\link{constraints.vlm}},
   \code{\link{hatvaluesvlm}},
   \code{\link{linkfun.vglm}},
diff --git a/man/weibull.mean.Rd b/man/weibull.mean.Rd
new file mode 100644
index 0000000..96de32d
--- /dev/null
+++ b/man/weibull.mean.Rd
@@ -0,0 +1,115 @@
+\name{weibull.mean}
+\alias{weibull.mean}
+%\alias{weibullff}
+%\alias{weibull.lsh}
+%\alias{weibull3}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Weibull Distribution Family Function, Parameterized by the Mean }
+\description{
+  Maximum likelihood estimation of the 2-parameter Weibull distribution.
+  The mean is one of the  parameters.
+  No observations should be censored.
+
+}
+\usage{
+weibull.mean(lmean = "loge", lshape = "loge", imean = NULL,
+             ishape = NULL, probs.y = c(0.2, 0.5, 0.8),
+             imethod = 1, zero = -2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{lmean, lshape}{
+  Parameter link functions applied to the 
+  (positive) mean parameter (called \eqn{mu} below) and
+  (positive) shape parameter (called \eqn{a} below).
+  See \code{\link{Links}} for more choices.
+
+
+  }
+  \item{imean, ishape}{
+  Optional initial values for the mean and shape parameters.
+
+
+  }
+  \item{imethod, zero, probs.y}{
+  Details at \code{\link{CommonVGAMffArguments}}.
+
+  }
+}
+\details{
+  See \code{\link{weibullR}} for most of the details
+  for this family function too.
+  The mean of \eqn{Y} is \eqn{b \, \Gamma(1+ 1/a)}{b * gamma(1+ 1/a)}
+  (returned as the fitted values),
+  and this is the first parameter (a \code{\link{loge}}
+  link is the default because it is positive).
+  The other parameter is the positive shape paramter \eqn{a},
+  also having a default \code{\link{loge}} link.
+
+
+  This \pkg{VGAM} family function currently does not handle 
+  censored data.
+  Fisher scoring is used to estimate the two parameters.
+  Although the expected information matrices used here are valid
+  in all regions of the parameter space,
+  the regularity conditions for maximum
+  likelihood estimation are satisfied only if \eqn{a>2}
+  (according to Kleiber and Kotz (2003)).
+  If this is violated then a warning message is issued.
+  One can enforce \eqn{a>2} by
+  choosing \code{lshape = logoff(offset = -2)}.
+  Common values of the shape parameter lie between 0.5 and 3.5.
+
+
+}
+\value{
+  An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+  The object is used by modelling functions such as \code{\link{vglm}},
+  and \code{\link{vgam}}.
+
+
+
+}
+\author{ T. W. Yee }
+\note{
+  See \code{\link{weibullR}} for more details.
+  This \pkg{VGAM} family function handles multiple responses.
+
+
+}
+%\section{Warning}{
+% This function is under development to handle other censoring situations.
+% The version of this function which will handle censored data will be
+
+
+%}
+
+\seealso{
+    \code{\link{weibullR}},
+    \code{\link[stats:Weibull]{dweibull}},
+    \code{\link{truncweibull}},
+    \code{\link{gev}},
+    \code{\link{lognormal}},
+    \code{\link{expexpff}},
+    \code{\link{maxwell}},
+    \code{\link{rayleigh}},
+    \code{\link{gumbelII}}.
+
+
+}
+\examples{
+wdata <- data.frame(x2 = runif(nn <- 1000))  # Complete data
+wdata <- transform(wdata, mu     = exp(-1 + 1 * x2),
+                          x3     = rnorm(nn),
+                          shape1 = exp(1),
+                          shape2 = exp(2))
+wdata <- transform(wdata,
+  y1 = rweibull(nn, shape = shape1, scale = mu / gamma(1 + 1/shape1)),
+  y2 = rweibull(nn, shape = shape2, scale = mu / gamma(1 + 1/shape2)))
+fit <- vglm(cbind(y1, y2) ~ x2 + x3, weibull.mean, data = wdata, trace = TRUE)
+coef(fit, matrix = TRUE)
+sqrt(diag(vcov(fit)))  # SEs
+summary(fit, presid = FALSE)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/weibullR.Rd b/man/weibullR.Rd
index 053a427..f0c27b7 100644
--- a/man/weibullR.Rd
+++ b/man/weibullR.Rd
@@ -208,6 +208,7 @@ Concerns about Maximum Likelihood Estimation for
 }
 
 \seealso{
+    \code{\link{weibull.mean}},
     \code{\link[stats:Weibull]{dweibull}},
     \code{\link{truncweibull}},
     \code{\link{gev}},
diff --git a/man/wine.Rd b/man/wine.Rd
index 11e973a..65af8f7 100644
--- a/man/wine.Rd
+++ b/man/wine.Rd
@@ -66,7 +66,11 @@ Christensen, R. H. B. (2013)
 Analysis of ordinal data with cumulative link models---estimation
 with the R-package \pkg{ordinal}.
 R Package Version 2013.9-30.
-\url{http://www.cran.r-project.org/package=ordinal}.
+\url{http://cran.r-project.org/package=ordinal}.
+%\url{http://www.r-project.org/package=ordinal}.
+%\url{http://www.cran.r-project.org/package=ordinal}.  % Prior to 20150728
+
+
 
 
   Randall, J. H. (1989) 
diff --git a/vignettes/crVGAM.Rnw b/vignettes/crVGAM.Rnw
index 6a46807..86f64d7 100644
--- a/vignettes/crVGAM.Rnw
+++ b/vignettes/crVGAM.Rnw
@@ -126,6 +126,15 @@ options(prompt = "R> ", continue = "+")
 \label{sec:intro}
 
 
+
+Note: this vignette is essentially \cite{yee:stok:hugg:2015}.
+
+
+
+\bigskip
+
+
+
 Capture--recapture (CR) surveys are widely used in ecology and 
 epidemiology to estimate population sizes. In essence they are 
 sampling schemes that allow the estimation of both $n$ and $p$ 
@@ -378,7 +387,7 @@ function based only on the individuals observed at least once is
 \begin{eqnarray}
 \label{eq:posbern.condlikelihood}
 L_{c} & \propto & \prod_{i=1}^{n} 
-\frac{\prod\limits_{j=1}^{\tau} p_{ij}^{y_{ij}} (1-p_{ij})^{1 - y_{ij}}}
+\frac{\prod_{j=1}^{\tau} p_{ij}^{y_{ij}} (1-p_{ij})^{1 - y_{ij}}}
 {1-\prod_{s=1}^{\tau}(1-p_{is}^{\dagger})}.
 \end{eqnarray}
 is used. Here $p_{is}^{\dagger}$ are the $p_{ij}$ computed as if the
@@ -508,7 +517,8 @@ Appendix A.
 \label{sec:Nhat}
 
 
-In the above linear models, to estimate $N$ let $\pi_{i}(\btheta)=1-\prod\limits_{s=1}^{\tau}(1-p_{is}^{\dagger})$ 
+In the above linear models, to estimate $N$
+let $\pi_{i}(\btheta)=1-\prod_{s=1}^{\tau}(1-p_{is}^{\dagger})$ 
 be the probability that individual $i$ is captured at least once 
 in the course of the study. Then, if $\btheta$ is known, 
 the Horvitz--Thompson \citep[HT;][]{horv:thom:1952} estimator
@@ -1470,9 +1480,12 @@ plot(fit.bh, se = TRUE, las = 1, lcol = "blue", scol = "orange",
 
 
 % ---------------------------------------------------------------------
+
+\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
+
 \begin{figure}[tt]
 \begin{center}
-<<plot-deermice, width=6.0, height=5, echo=FALSE, message=FALSE, warning=FALSE>>=
+<<fig=TRUE,label=plot-deermice, width=6.0, height=5, echo=FALSE, message=FALSE, warning=FALSE>>=
 par(mfrow = c(2, 2))
 par(las = 1, cex = 1.1, mar = c(3.8, 4, 0.5, 0.2) + 0.1)
 par(mgp = c(2.3, 1, 0))  # Default is c(3, 1, 0)
@@ -1485,8 +1498,6 @@ plot(fit.bh, se = TRUE, las = 1, lcol = "blue", scol = "orange",
 # < < poz-posbernoulli-eg-deermice-smooth-shadow> >
 
 
-
-
 @
 \caption{Estimated component functions with approximate $\pm 2$ 
 pointwise SE bands fitting a $\calM_{bh}$-VGAM, using 
@@ -1497,6 +1508,9 @@ each covariate value $x_{ik}$.\label{fig:poz:deermice}
 \end{figure}
 
 
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
 % ---------------------------------------------------------------------
 
 
@@ -1604,11 +1618,13 @@ legend("topleft", legend = c("Fat present", "Fat not present"), bty = "n",
 
 
 
+% ---------------------------------------------------------------------
 
+\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
 
 \begin{figure}[tt]
 \begin{center}
-<<plot-bird, width=6.0, height=5.5, echo=FALSE, message=FALSE, warning=FALSE>>=
+<<fig=TRUE, label=plot-bird, width=6.0, height=5.5, echo=FALSE, message=FALSE, warning=FALSE>>=
 par(mfrow = c(1, 1))
 
 
@@ -1668,6 +1684,12 @@ Notice that the standard errors are wider at the boundaries.
 \end{figure}
 
 
+\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
+
+
+% ---------------------------------------------------------------------
+
+
 Both the estimates for the population size and shape of the fitted 
 capture probabilities with smoothing (Figure \ref{fig:bird}) matched 
 those in previous studies, e.g., see Figure 1 of \citet{hwan:hugg:2007}. 
@@ -2112,7 +2134,7 @@ We give the first and (expected) second derivatives of the models.
 Let $z_{ij}= 1$ if individual $i$ has been captured before occasion $j$,
 else $=0$. Also, let $p_{cj}$ and $p_{rj}$ be the probability that an
 individual is captured/recaptured at sampling occasion $j$, 
-and $Q_{s:t} = \prod\limits_{j=s}^{t} (1-p_{cj})$.
+and $Q_{s:t} = \prod_{j=s}^{t} (1-p_{cj})$.
 Occasionally, subscripts $i$ are omitted for clarity.
 \cite{hugg:1989} gives a general form for the derivatives of the
 conditional likelihood (\ref{eq:posbern.condlikelihood}).
@@ -2218,9 +2240,15 @@ to the probabilities in the conditional
 likelihood function (\ref{eq:posbern.condlikelihood}), viz.
 \[
 {\widehat{p}}_{ij}^{ y_{ij}} \left(1-\widehat{p}_{ij}\right)^{1-y_{ij}}
-\cdot\left[1 - \prod\limits_{s=1}^{\tau}
-\left( 1 - \widehat{p}_{i,cs}^{}\right)\right]^{-1}.
+\cdot
+\left[
+ 1 - \prod_{s=1}^{\tau}
+\left( 1 - \widehat{p}_{i,cs}^{}\right)
+\right]^{-1}.
 \]
+
+
+
 Alternatively, the unconditional means of the $Y_j$ can be
 returned as the fitted values upon selecting
 \code{type.fitted = "mean"} argument.
diff --git a/vignettes/crVGAM.bib b/vignettes/crVGAM.bib
index fb184d4..a5ec8c0 100644
--- a/vignettes/crVGAM.bib
+++ b/vignettes/crVGAM.bib
@@ -514,3 +514,23 @@
   pages =	 {114--120},
   number =	 2,
 }
+
+
+
+ at article{yee:stok:hugg:2015,
+  author =      {Yee, T. W. and Stoklosa, J. and Huggins, R. M.},
+  title =       {The \textsf{VGAM} package for capture--recapture data
+                 using the conditional likelihood},
+  journal =     {J. Statist. Soft.},
+ Fjournal =     {Journal of Statistical Software},
+  year =        {2015},
+  volume =      {65},
+  number =      {5},
+  pages =       {1--33},
+  url   = "http://www.jstatsoft.org/v65/i05/",
+}
+
+
+
+
+

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git



More information about the debian-science-commits mailing list