[r-cran-vgam] 43/63: Import Upstream version 0.9-7

Andreas Tille tille at debian.org
Tue Jan 24 13:54:37 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-vgam.

commit b6a4c86643750cebc3ec0ab2a819b20a4faea954
Author: Andreas Tille <tille at debian.org>
Date:   Tue Jan 24 14:17:02 2017 +0100

    Import Upstream version 0.9-7
---
 DESCRIPTION                      |    8 +-
 MD5                              |  415 ++++---
 NAMESPACE                        |   43 +-
 NEWS                             |   52 +-
 R/Links.R                        |    2 +-
 R/aamethods.q                    |    2 +-
 R/add1.vglm.q                    |    2 +-
 R/attrassign.R                   |    2 +-
 R/bAIC.q                         |  233 ++--
 R/build.terms.vlm.q              |    2 +-
 R/calibrate.q                    |    2 +-
 R/cao.R                          |    2 +-
 R/cao.fit.q                      |    4 +-
 R/coef.vlm.q                     |    2 +-
 R/cqo.R                          |    2 +-
 R/cqo.fit.q                      |    6 +-
 R/deviance.vlm.q                 |    2 +-
 R/effects.vglm.q                 |    2 +-
 R/family.actuary.R               |  776 ++++++++++---
 R/family.aunivariate.R           |  336 +++++-
 R/family.basics.R                |   20 +-
 R/family.binomial.R              |  119 +-
 R/family.bivariate.R             |   26 +-
 R/family.categorical.R           |   54 +-
 R/family.censored.R              |    2 +-
 R/family.circular.R              |  136 ++-
 R/family.exp.R                   |  316 ++++--
 R/family.extremes.R              |  344 ++++--
 R/family.functions.R             |    2 +-
 R/family.genetic.R               |  269 +++--
 R/family.glmgam.R                |  118 +-
 R/family.loglin.R                |    2 +-
 R/family.math.R                  |    2 +-
 R/family.mixture.R               |    2 +-
 R/family.nonlinear.R             |   14 +-
 R/family.normal.R                |  321 ++++--
 R/family.others.R                |  178 ++-
 R/family.positive.R              |   66 +-
 R/family.qreg.R                  |  507 +++++++--
 R/family.quantal.R               |    4 +-
 R/family.rcim.R                  |    2 +-
 R/family.rcqo.R                  |    2 +-
 R/family.robust.R                |   77 +-
 R/family.rrr.R                   |    6 +-
 R/family.sur.R                   |    2 +-
 R/family.survival.R              |   82 +-
 R/family.ts.R                    |   10 +-
 R/family.univariate.R            |  836 +++++++++-----
 R/family.vglm.R                  |    2 +-
 R/family.zeroinf.R               |   34 +-
 R/fittedvlm.R                    |    2 +-
 R/formula.vlm.q                  |    2 +-
 R/generic.q                      |    2 +-
 R/links.q                        |   73 +-
 R/logLik.vlm.q                   |    2 +-
 R/lrwaldtest.R                   |    2 +-
 R/model.matrix.vglm.q            |    6 +-
 R/mux.q                          |    2 +-
 R/nobs.R                         |    2 +-
 R/plot.vglm.q                    |    2 +-
 R/predict.vgam.q                 |    2 +-
 R/predict.vglm.q                 |    2 +-
 R/predict.vlm.q                  |    2 +-
 R/print.vglm.q                   |    2 +-
 R/print.vlm.q                    |    2 +-
 R/qrrvglm.control.q              |    2 +-
 R/qtplot.q                       |    2 +-
 R/residuals.vlm.q                |    2 +-
 R/rrvglm.R                       |    2 +-
 R/rrvglm.control.q               |    2 +-
 R/rrvglm.fit.q                   |    6 +-
 R/s.q                            |    2 +-
 R/s.vam.q                        |    2 +-
 R/smart.R                        |    2 +-
 R/step.vglm.q                    |    2 +-
 R/summary.vgam.q                 |    2 +-
 R/summary.vglm.q                 |    2 +-
 R/summary.vlm.q                  |    2 +-
 R/vgam.R                         |    2 +-
 R/vgam.control.q                 |    2 +-
 R/vgam.fit.q                     |    6 +-
 R/vgam.match.q                   |    2 +-
 R/vglm.R                         |    2 +-
 R/vglm.control.q                 |    2 +-
 R/vglm.fit.q                     |    6 +-
 R/vlm.R                          |    4 +-
 R/vlm.wfit.q                     |    2 +-
 R/vsmooth.spline.q               |   17 +-
 build/vignette.rds               |  Bin 381 -> 0 bytes
 data/ducklings.rda               |  Bin 0 -> 561 bytes
 inst/doc/categoricalVGAM.R       |  278 -----
 inst/doc/categoricalVGAM.Rnw     | 2325 --------------------------------------
 inst/doc/categoricalVGAM.pdf     |  Bin 734870 -> 0 bytes
 man/A1A2A3.Rd                    |   10 +-
 man/AA.Aa.aa.Rd                  |   11 +-
 man/ABO.Rd                       |   19 +-
 man/CommonVGAMffArguments.Rd     |   25 +-
 man/Links.Rd                     |    5 +-
 man/ParetoUC.Rd                  |   12 +-
 man/QvarUC.Rd                    |    2 +-
 man/VGAM-package.Rd              |   12 +-
 man/alaplaceUC.Rd                |   14 +-
 man/benfUC.Rd                    |   11 +-
 man/beniniUC.Rd                  |   12 +-
 man/betabinomialff.Rd            |   17 +-
 man/betaff.Rd                    |   10 +-
 man/betanormUC.Rd                |   13 +-
 man/bilogistic.Rd                |    5 +
 man/binomialff.Rd                |   23 +-
 man/bisa.Rd                      |    6 +-
 man/bisaUC.Rd                    |   12 +-
 man/cardUC.Rd                    |   13 +-
 man/cardioid.Rd                  |    6 +-
 man/cloglog.Rd                   |    6 +-
 man/cqo.Rd                       |    8 +-
 man/cumulative.Rd                |    6 +-
 man/dagum.Rd                     |   16 +-
 man/dagumUC.Rd                   |   16 +-
 man/dirichlet.Rd                 |    5 +-
 man/dirmultinomial.Rd            |   10 +-
 man/double.expbinomial.Rd        |    8 +-
 man/ducklings.Rd                 |   70 ++
 man/eexpUC.Rd                    |   13 +-
 man/enormUC.Rd                   |   15 +-
 man/eunifUC.Rd                   |   13 +-
 man/exppoissonUC.Rd              |   42 +-
 man/felix.Rd                     |    2 +-
 man/fisk.Rd                      |   15 +-
 man/fiskUC.Rd                    |   20 +-
 man/foldnormUC.Rd                |   18 +-
 man/{fsqrt.Rd => foldsqrt.Rd}    |   32 +-
 man/frechetUC.Rd                 |   14 +-
 man/genbetaII.Rd                 |   26 +-
 man/gengammaUC.Rd                |   14 +-
 man/genpoisson.Rd                |    2 +-
 man/genrayleighUC.Rd             |   15 +-
 man/gev.Rd                       |    6 +-
 man/gevUC.Rd                     |   10 +-
 man/golf.Rd                      |    6 +-
 man/gompertz.Rd                  |    6 +-
 man/gompertzUC.Rd                |   12 +-
 man/gpd.Rd                       |    4 +-
 man/gpdUC.Rd                     |   16 +-
 man/grc.Rd                       |    2 +-
 man/gumbelIIUC.Rd                |   14 +-
 man/gumbelUC.Rd                  |   24 +-
 man/hormone.Rd                   |    3 +-
 man/hspider.Rd                   |    3 +-
 man/huberUC.Rd                   |   22 +-
 man/hypersecant.Rd               |    6 +-
 man/hzetaUC.Rd                   |   10 +-
 man/inv.binomial.Rd              |    2 +-
 man/inv.lomaxUC.Rd               |   10 +-
 man/inv.paralogistic.Rd          |   15 +-
 man/inv.paralogisticUC.Rd        |   18 +-
 man/kumarUC.Rd                   |   11 +-
 man/laplaceUC.Rd                 |   12 +-
 man/lgammaUC.Rd                  |   16 +-
 man/lindUC.Rd                    |   10 +-
 man/linkfun.Rd                   |   69 ++
 man/linkfun.vglm.Rd              |   83 ++
 man/linoUC.Rd                    |   12 +-
 man/logit.Rd                     |   20 +-
 man/loglapUC.Rd                  |   12 +-
 man/lomaxUC.Rd                   |   12 +-
 man/makeham.Rd                   |    6 +-
 man/makehamUC.Rd                 |   13 +-
 man/maxwellUC.Rd                 |   12 +-
 man/nakagamiUC.Rd                |   10 +-
 man/nbcanlink.Rd                 |    3 +-
 man/nbolf.Rd                     |    2 +-
 man/negbinomial.Rd               |  216 ++--
 man/notdocumentedyet.Rd          |   11 +-
 man/nparamvglm.Rd                |  103 ++
 man/olym.Rd                      |   14 +-
 man/paralogistic.Rd              |   15 +-
 man/paralogisticUC.Rd            |   21 +-
 man/paretoIVUC.Rd                |   32 +-
 man/perks.Rd                     |    6 +-
 man/perksUC.Rd                   |   13 +-
 man/polf.Rd                      |    2 +-
 man/posbernoulli.t.Rd            |    7 +-
 man/posbinomUC.Rd                |    2 +-
 man/posbinomial.Rd               |    8 +-
 man/posnormUC.Rd                 |   17 +-
 man/quasibinomialff.Rd           |   47 +-
 man/rayleighUC.Rd                |   12 +-
 man/riceUC.Rd                    |   12 +-
 man/riceff.Rd                    |    6 +-
 man/rrvglm.Rd                    |    9 +-
 man/rrvglm.control.Rd            |    2 +
 man/sc.t2UC.Rd                   |   12 +-
 man/sinmad.Rd                    |   25 +-
 man/sinmadUC.Rd                  |   20 +-
 man/slashUC.Rd                   |    9 +-
 man/tikuvUC.Rd                   |   12 +-
 man/tobitUC.Rd                   |   54 +-
 man/triangle.Rd                  |    8 +-
 man/triangleUC.Rd                |   12 +-
 man/truncparetoUC.Rd             |   18 +-
 man/undocumented-methods.Rd      |   18 +-
 man/vgam.Rd                      |    8 +-
 man/vglm.Rd                      |    1 +
 man/vonmises.Rd                  |    2 +-
 man/vsmooth.spline.Rd            |   20 +-
 man/weibullR.Rd                  |    4 +-
 man/yulesimonUC.Rd               |    8 +-
 man/zanegbinomial.Rd             |    3 +-
 man/zapoisson.Rd                 |    3 +-
 man/zibinomial.Rd                |   17 +-
 man/zipfUC.Rd                    |    7 +-
 man/zipoisson.Rd                 |    3 +-
 vignettes/categoricalVGAM.Rnw    | 2325 --------------------------------------
 vignettes/categoricalVGAMbib.bib |  653 -----------
 214 files changed, 5345 insertions(+), 7801 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index 5d4c6e8..70dc0e0 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
 Package: VGAM
-Version: 0.9-6
-Date: 2014-12-08
+Version: 0.9-7
+Date: 2015-03-06
 Title: Vector Generalized Linear and Additive Models
 Author: Thomas W. Yee <t.yee at auckland.ac.nz>
 Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
@@ -26,6 +26,6 @@ NeedsCompilation: yes
 BuildVignettes: yes
 LazyLoad: yes
 LazyData: yes
-Packaged: 2014-12-08 03:56:18 UTC; tyee001
+Packaged: 2015-03-06 09:11:02 UTC; tyee001
 Repository: CRAN
-Date/Publication: 2014-12-08 08:23:11
+Date/Publication: 2015-03-06 10:38:52
diff --git a/MD5 b/MD5
index f0a3e12..069a649 100644
--- a/MD5
+++ b/MD5
@@ -1,93 +1,92 @@
 66414b6ed296192426033f4ac29a6af2 *BUGS
-56b5a065696061695eb04927c166cce3 *DESCRIPTION
-80e495256dd8946f3468ec738fc98bc6 *NAMESPACE
-3a21077264e7f4a39e7aad9f086afd11 *NEWS
-21f682d56c4fc9327d48e2179788422e *R/Links.R
-50817106214d06314a10a612ab1fc95a *R/aamethods.q
-a0675022e96fd5fb7fddac8f0a57fd30 *R/add1.vglm.q
-b2b2be3f5bab46f3400b3e0448dd1e37 *R/attrassign.R
-c6e91684ada0ba6cadd66223b65ffb88 *R/bAIC.q
-749604f5f400577a87ba72d3776660c4 *R/build.terms.vlm.q
-62254e8579894244f5fe24914f534de1 *R/calibrate.q
-1a1baf9981c434c5e24795e241bc719b *R/cao.R
-25144d1bbfed30056f51b9c1583e7876 *R/cao.fit.q
-4dc53e3eae827b9729fb799d6d8b3d7a *R/coef.vlm.q
-c8c22372a0a69c23aeb94fddab38428e *R/cqo.R
-13a22d60defef24b2a813cde2323f321 *R/cqo.fit.q
-4efd72fef4a0ae5b14bd8a05377b14f7 *R/deviance.vlm.q
-3d38c5950f4f9e52085e9ba782feeb90 *R/effects.vglm.q
-f7c7f03bc4324676171f90c5635b2ad1 *R/family.actuary.R
-ef2622e509ead5e9e7e2c6fa962dd35e *R/family.aunivariate.R
-968e7fe2052ece7ad00a01b2b13270bf *R/family.basics.R
-96d7aa9d86b2877fbc25c71762ec9e67 *R/family.binomial.R
-15c543df3e3f471cd6a944827b9977d6 *R/family.bivariate.R
-69d07ebc28871093dc5afa23679c5f8d *R/family.categorical.R
-49a734273e6984063a90345afa6baa76 *R/family.censored.R
-70190886f33094f971e04d36895f80e6 *R/family.circular.R
-d46827c37e9fb9ba5d17afb1b4bb8359 *R/family.exp.R
-c85261079319b92e74b49c310d73c9aa *R/family.extremes.R
-216cab69ae6b0960bdefcedebf260217 *R/family.functions.R
-86505922fc0f9dfba086820523c6aef1 *R/family.genetic.R
-70345321a3bf8553aed029b758fef4e9 *R/family.glmgam.R
-cf4a5403b58f1f1f8ff459488f5b0843 *R/family.loglin.R
-93eb79120f4e952cf7b378a83267d010 *R/family.math.R
-0de0891b2e1603fd5eed723f3984bc50 *R/family.mixture.R
-7a559228e6eddad64acd3f662a579b74 *R/family.nonlinear.R
-2f417428f0e564d8c90b45dc2887c740 *R/family.normal.R
-b7e285f97e1c7aa401b71485a639ec4b *R/family.others.R
-e765d07abcd78f51028896740e4cb33b *R/family.positive.R
-fc390b75cf9c8f6c882b905968d2d645 *R/family.qreg.R
-c05609977f1289db83e83a11f19302d4 *R/family.quantal.R
-3d2ce925abe6b8dcd367759b115d7486 *R/family.rcim.R
-b333e00eb110446c92c2edad114072fd *R/family.rcqo.R
-80d6bdca76c8a853483497dc403e971a *R/family.robust.R
-95dc68427644418097fba1b3f2fa588c *R/family.rrr.R
-6aaa42bed6ddb4f4429fac5d4ffbf7d9 *R/family.sur.R
-76b17657c66fa8405996ccdeeb20d82c *R/family.survival.R
-b32cbe9457bc213c65a6e937e2892b01 *R/family.ts.R
-d691f008e7de2d0a162aa5b233e5b757 *R/family.univariate.R
-23e25d09aed81048919b15e35d2a3fdf *R/family.vglm.R
-89df0901db87c9c811fa7686b633cf8b *R/family.zeroinf.R
-e77bfa0f3a2e6a802a308611f84b75c1 *R/fittedvlm.R
-e0f39e9a543e616c179f53c153ada17b *R/formula.vlm.q
-66dceb0aa1906a6358ddf5840e5c3a10 *R/generic.q
-bdc951d12740944bbb70b27d98511f25 *R/links.q
-a111fc4dd1dbd7280c07009277eec304 *R/logLik.vlm.q
-9b3ae4a5273f991e6ac2feac29320343 *R/lrwaldtest.R
-8a1d472a05bbe4a0fbcbea6969ef9ae1 *R/model.matrix.vglm.q
-6aa9138e0343b812d000949720fa3036 *R/mux.q
-26d9b0e5861ac9ecb838253d7fa89aa7 *R/nobs.R
-f3eeccb2f0f1740637f48308484ddf80 *R/plot.vglm.q
-d3050d098e945c23a2617410e4698b9a *R/predict.vgam.q
-b503ba5f6eb50d39b773c79d7e57a2d8 *R/predict.vglm.q
-b0473f38d6b4fb6358ea912ae739777b *R/predict.vlm.q
-6b6c07d978349b4c2dd82d1e3768f763 *R/print.vglm.q
-77e6d91384fb55e09abdabfc0648084d *R/print.vlm.q
-6b18d42adf25ab762686f45111fc9908 *R/qrrvglm.control.q
-13edf9b27deeec4720ce3c63805c0826 *R/qtplot.q
-cd95e96c163efedcf7dc452b5b9b80aa *R/residuals.vlm.q
-2343981c7ca4d3f31b09234ee76de9aa *R/rrvglm.R
-ee1d3fe8e9731d47aab09577d16f296d *R/rrvglm.control.q
-fcd355968577b16ae0572413f3126232 *R/rrvglm.fit.q
-4d6331d1b7f7e9f49511ac68ed7ab830 *R/s.q
-e692982477e8969b40987bef7739c508 *R/s.vam.q
+d7c7e332462be2810169b8dd45f28660 *DESCRIPTION
+58a9357fcae44be200423c8bd8f156b6 *NAMESPACE
+0fa7e63737588c9dcbc71f7d3dc964a1 *NEWS
+46f97e789f77839767fc3d3511d2f990 *R/Links.R
+b6b017bdea768a643afc8171516d193b *R/aamethods.q
+4ffc1530ca8113d2f2d8b0d5cc1db282 *R/add1.vglm.q
+29b192ec0239f8f013e99ef759823732 *R/attrassign.R
+19fd9a65f33bfc01a56d0ee1f4752159 *R/bAIC.q
+b35d7499e726674163cd7eeeefbe39b0 *R/build.terms.vlm.q
+6ba194b23f60728f70575d799af9c3c1 *R/calibrate.q
+8fa625cc47ab28b74bd41019d20b7b02 *R/cao.R
+ce3d85bf00ad08175321e2098ae87462 *R/cao.fit.q
+5a26e4f96581a1ce75aba0587b261573 *R/coef.vlm.q
+77638f2e22a3dd774115c472bf0c33e8 *R/cqo.R
+9a4e3479392194fbe0c6e55cacb03f62 *R/cqo.fit.q
+d411a1bf3bfbe7057b4211255c33ba53 *R/deviance.vlm.q
+54b928344dc9efab031bf3e83d04f21f *R/effects.vglm.q
+4147f1613b1576d3ecefb7ab744a10d7 *R/family.actuary.R
+0a2e88e04cab2be01307a0a27ed659f7 *R/family.aunivariate.R
+f92ab9ae022219eebb512a18fec30f9f *R/family.basics.R
+e2ff7c9d8835dd42e6dd608c7acf40cc *R/family.binomial.R
+be15ddf47d35b2ce3de5d69b9acbfbe9 *R/family.bivariate.R
+55b5b0aa2ad15e68615b495b38dfa616 *R/family.categorical.R
+2a1cc1122243a38c4ee678d7692293e6 *R/family.censored.R
+0c13f3a38a685b978dde42ace40e55e8 *R/family.circular.R
+b9fedec9d690c434a69d89c9e6845bb7 *R/family.exp.R
+e74415fd99041e7645255d20ffd58ee2 *R/family.extremes.R
+251b551aaf906c754d9e75342e7ea1af *R/family.functions.R
+5586e8d6e71cb830f2c6b852a10b3beb *R/family.genetic.R
+842bd10d4a8da3b0d367517c272eda56 *R/family.glmgam.R
+688abce2e4312da5c9e03a6fb2cdf935 *R/family.loglin.R
+5679a8a30b54ac8f66dd945b2d1ccd2a *R/family.math.R
+f3a38cecabef0846e4f0c7bdb5c9ee81 *R/family.mixture.R
+0d5e4c835d8bab66576a0b66830afbf9 *R/family.nonlinear.R
+22b5c53812a48db4711e61da40f5aba4 *R/family.normal.R
+a3ea06d6b5e0c41585333addbe62ffe0 *R/family.others.R
+1663b0fa1af37724fd51607db5699d01 *R/family.positive.R
+9041d6d34c26ffff3f40e816b3263e29 *R/family.qreg.R
+2cf55e062652004b40b734273199f35d *R/family.quantal.R
+03397070df548d5a86ca5930b156073c *R/family.rcim.R
+990f65032a38762d9cebec6f66794235 *R/family.rcqo.R
+ffd541fd4b00179e6595b9e6926f3291 *R/family.robust.R
+d8845f8b8bf363485bcf7f530617358f *R/family.rrr.R
+943ff0caa6e0cf7294b32a0a8dc1ad98 *R/family.sur.R
+ebb9d8fde5a537c58b25f47482cad8d2 *R/family.survival.R
+d66b2f805fff817efd32ba5c5c7a56aa *R/family.ts.R
+bca7c0c7874d0c5601dfdfc25d475fee *R/family.univariate.R
+8d7d5df3e005750453f8ed0977c0c4f6 *R/family.vglm.R
+9a0ddbb47ed714430e122b85e8e254b9 *R/family.zeroinf.R
+e5a738b6ba3f59a3962eb089e56e5786 *R/fittedvlm.R
+d706ca44527adda488800e2834d5d278 *R/formula.vlm.q
+1c7d28893d43c88a934731219098fd5c *R/generic.q
+b0401a9b8b838e57e6918f420d2f84be *R/links.q
+06929b2f0a102fcca301a9f265279e04 *R/logLik.vlm.q
+92736375efccc88013c357fd287aa4cb *R/lrwaldtest.R
+ed5f231217b28cd1448cef2529a7e084 *R/model.matrix.vglm.q
+c9f890ae5310b45be85da9fd237b98e4 *R/mux.q
+ec00a9fdace1922ca78877ac43605737 *R/nobs.R
+8c7a83a2e5c10a871e722e3c307ad79b *R/plot.vglm.q
+a2547eed9a5570094efec6573e6f9f9b *R/predict.vgam.q
+6e8e3c05882565d21c582f369a13b673 *R/predict.vglm.q
+b9109db7f638db25728c3118e6baf41d *R/predict.vlm.q
+cfb0659e61f097d41e0266ee71d15a9d *R/print.vglm.q
+74f7393a57eec9a96cc7d04a569037ca *R/print.vlm.q
+c431e12369752358904f6704832decd5 *R/qrrvglm.control.q
+18ab7fc886450762ed4e982558776159 *R/qtplot.q
+78e9224292be8718824d53dd2165dad4 *R/residuals.vlm.q
+9d5826ad08d66734f7403d17fcbba5f6 *R/rrvglm.R
+e278dec435eddcc0345a59bd9dd56f6d *R/rrvglm.control.q
+7f713596fe6bb361c2e4e6a7520daec8 *R/rrvglm.fit.q
+cf62bdb183fe009cd47316eacbe3b14e *R/s.q
+156f02ed65d0d90c17241cbada6d0c00 *R/s.vam.q
 400c72e71f4820b58f292523029c6245 *R/simulate.vglm.R
-277ba59aa1a252dbfb97c6ca24e95b66 *R/smart.R
-40b65c32c98ed7fe00459f089182209f *R/step.vglm.q
-df48678099b0d4b643d22d0a25adc5f1 *R/summary.vgam.q
-02d08e22bbacafdecfb36cf581a04ccb *R/summary.vglm.q
-b27c8d54a7efff12d6e6940459ddf2d7 *R/summary.vlm.q
-4d61781fd7a1619eeae76a619599c247 *R/vgam.R
-aee3a2ac9b9b36985e8f8c3d15709590 *R/vgam.control.q
-2aa25abd6c64976065b527635b5ce52a *R/vgam.fit.q
-58bb89dc29480d468782ac4e410716de *R/vgam.match.q
-546998232ec7d26de97f27780b611a81 *R/vglm.R
-da6d3cf2bc2312861b94e3c7c630cd47 *R/vglm.control.q
-35e660f31d1d739eb5b8f62fd178024e *R/vglm.fit.q
-6625b38a35a6485139fed10b5e2e3a90 *R/vlm.R
-090851ac7b3e8f536ba4626140dba2f0 *R/vlm.wfit.q
-128ebb1abdd41656f5662b00a16600cc *R/vsmooth.spline.q
-fccfbabb1be99d6b15eb5e5449d1b66e *build/vignette.rds
+366887aff30fbfac5afb77ed10597005 *R/smart.R
+89968d39bff60306bab87cc1e3ebdca1 *R/step.vglm.q
+ea860d4429fbcfb1c8e494a198e72adc *R/summary.vgam.q
+24bddb3d9264aa2079dc0bc1949dac08 *R/summary.vglm.q
+8233ae7e692d6254ac739541a4774109 *R/summary.vlm.q
+3d8dbd2d0163b95334a891c45b66b468 *R/vgam.R
+3a7ea81a3f0c6509e71466cfae4c108c *R/vgam.control.q
+f6da05ed223f0cac5b7731c8f5da2095 *R/vgam.fit.q
+c7836fc6514f090c9852ef7427b68a95 *R/vgam.match.q
+80b44e86fa2bf5753e95397653aead65 *R/vglm.R
+714a3a58e7584c7f2545aed04187a167 *R/vglm.control.q
+826df11f31484937475bd4270742900d *R/vglm.fit.q
+d3c11b3c2876d98a37ea6f4a5658a4a6 *R/vlm.R
+50d41f729a51f21ac03d717a33d708fb *R/vlm.wfit.q
+9c9d0afc47501544ea1da2703e60b4e9 *R/vsmooth.spline.q
 2cdabbff91d4f47a58705b2fff199298 *data/Huggins89.t1.rda
 3faa9073b7ae52defc01fde39527c39a *data/Huggins89table1.rda
 d89f69ab78bc3c7a526960c8bdb9454b *data/V1.txt.gz
@@ -111,6 +110,7 @@ eed6cd50d7aaef10522b1085fec41c11 *data/corbet.rda
 1d3c4a6ebff20d079a6a3ed3c6fbdc74 *data/crashp.rda
 c6df5decc6ce502fecc236c65248eede *data/crashtr.rda
 2360553382387ee92888f6ada418d819 *data/deermice.rda
+5117494b87c6dbac229830f421734f85 *data/ducklings.rda
 08e87bb80a2364697b17ccec6260387c *data/enzyme.txt.gz
 67e2d5489a51805dcb70a8ed17113be1 *data/finney44.rda
 3f07cf57e178c098bb51d3bd9d8d00d5 *data/flourbeetle.rda
@@ -146,13 +146,10 @@ b9f0af62a654d77a3052997eb4cc15e2 *demo/cqo.R
 ab8081763fe2144558be25f3a154327b *demo/vgam.R
 65570d10948785994d70d817f574bd96 *demo/zipoisson.R
 60616e1e78fe61c1fd4acdf0d3129747 *inst/CITATION
-4ff0e35d38b3c5bb38f1f7232b9af863 *inst/doc/categoricalVGAM.R
-bfa11dbdbff271fb20342560f2bacd53 *inst/doc/categoricalVGAM.Rnw
-009bdce7afc060ca4590c33e0eeddc8a *inst/doc/categoricalVGAM.pdf
-5ecb530e834d36b923e5167e587e5301 *man/A1A2A3.Rd
-c0d1e33c2b490cfa5d2bfcf15d8df7b4 *man/AA.Aa.aa.Rd
+9b97006cdc82d3a0c0ace3d43c9758de *man/A1A2A3.Rd
+cc9d465fc9db15abb65061e0b41a0f9e *man/AA.Aa.aa.Rd
 26a120083d1d9d77ac0a5193d0c186b9 *man/AB.Ab.aB.ab.Rd
-e1d0ae13a5a827f23b54e5ba209ddb40 *man/ABO.Rd
+c6c2a703e0f76c8b0f9e0a7d36f13386 *man/ABO.Rd
 38647708600610216a454c61450810ff *man/AICvlm.Rd
 0f4a799e95b245cfa0b5a37280a446ef *man/BICvlm.Rd
 32daae0afb71eae3cdeefc042f4241c6 *man/Coef.Rd
@@ -161,15 +158,15 @@ e1d0ae13a5a827f23b54e5ba209ddb40 *man/ABO.Rd
 a89beda3a48d5ff1cfdfae4636032a62 *man/Coef.rrvglm-class.Rd
 4da595e2cf6fffc2227871e745a5ee77 *man/Coef.rrvglm.Rd
 9d39d6e12ea6e56f687a10f76cb1803c *man/Coef.vlm.Rd
-d174c63ffcca8c67e21f3d0726a71eda *man/CommonVGAMffArguments.Rd
+92a1fb3e3a10e90414a8565eb5e3ac71 *man/CommonVGAMffArguments.Rd
 098a57d6e5525de04157c61dea2e1b9b *man/Huggins89.t1.Rd
 ce79d0626711d299c9c0cc2efab3abac *man/Inv.gaussian.Rd
-744e8c69d6102c5fca0dba602ce4dde2 *man/Links.Rd
+fed1d90172d0b6b35b8d708076378a7e *man/Links.Rd
 e53a7b5f977320e9a2b3cfba16e097ee *man/MNSs.Rd
 5ddd860d2b28b025dbf94b80062e3fc6 *man/Max.Rd
 00dce9ac476270fc8ce02ea1e75de191 *man/Opt.Rd
-f813042e34ab98120e210c18432defd5 *man/ParetoUC.Rd
-9012ad8444a0b750e3155cd43d8965bc *man/QvarUC.Rd
+d315bc4396e206c1ec3c5219e4efc677 *man/ParetoUC.Rd
+f84dea8ac6b2c1e857d25faaceb706d2 *man/QvarUC.Rd
 bd689bfc27028aea403c93863cf2e207 *man/Rcim.Rd
 ea581f4824e64871d53376a9751c8a2e *man/SURff.Rd
 685985b08b4668ae66206e9d72170b45 *man/Select.Rd
@@ -177,10 +174,10 @@ ea581f4824e64871d53376a9751c8a2e *man/SURff.Rd
 6ed5239b716d4aaef069b66f248503f0 *man/SurvS4.Rd
 21dc3918d6b5375c18dcc6cc05be554e *man/Tol.Rd
 6930cfc91e602940cafeb95cbe4a60d3 *man/V1.Rd
-af55f5a996cd8f44d8ec443481a0ea5e *man/VGAM-package.Rd
+f53b5bbb06501b7531361f0914d79a78 *man/VGAM-package.Rd
 f27b784569a22f080ff1ded6d9bbd17a *man/acat.Rd
 b346a61c9c3965d8ca97f3c98d9cacc0 *man/alaplace3.Rd
-573cdf092fc48b9b1c1f10e9af6b0fe5 *man/alaplaceUC.Rd
+8c0d8e4d9e634a0c2539e3a052afa9cc *man/alaplaceUC.Rd
 8e181f4f03b718c6c9825ea3b6c4b8d6 *man/amlbinomial.Rd
 f6c521d0142c7e65e7d5aad6880616ee *man/amlexponential.Rd
 cf9c3d4f8799980be2f9e965eb809b42 *man/amlnormal.Rd
@@ -189,18 +186,18 @@ ec213548ebb41e47b727541566160dfb *man/amlpoisson.Rd
 c8efe93df8799ff106b6784e1bf50597 *man/auxposbernoulli.t.Rd
 bcddb8c1df8893cf14a4400ee5dee6df *man/backPain.Rd
 6ac5a3f07851ac3f7e19eaa977365e0f *man/beggs.Rd
-80c65642cf41be59e4b49be5d05d93f2 *man/benfUC.Rd
+65a5426c021e0a6c90731c14786a3395 *man/benfUC.Rd
 afa1ccbe6dd6e769dc1bbbc5702148dd *man/benini.Rd
-c36237b73998bac0f19a3983cdb1df85 *man/beniniUC.Rd
+12d28242eea600b3e6f52db5d71d871f *man/beniniUC.Rd
 f4cabec88ec30505db5785b1aaf1eb48 *man/betaII.Rd
 d27525262d9c6975b15a77219afeb362 *man/betaR.Rd
 6d202361c5c1981d29c597fd716050f0 *man/betabinomUC.Rd
 bbb0ddef9113d1b8d1e036ac66f9bb87 *man/betabinomial.Rd
-481a382185943fa003bfe9c09ec4459c *man/betabinomialff.Rd
-581c39d3abaefd4d1a67e2e92ae1d925 *man/betaff.Rd
+4e9c0e3075be1050db8ad3fe1e8dce6e *man/betabinomialff.Rd
+d4fbb7ebcc599765b2e0df4ff840876f *man/betaff.Rd
 4b590ee6208b2f3025109b82c1f6d67c *man/betageomUC.Rd
 725a8c9d8b4a9facb0c3cb815d75266b *man/betageometric.Rd
-151cdf70cb16f8095369b88093ba48c7 *man/betanormUC.Rd
+7553029f69c2be7dbb20c864b97102e5 *man/betanormUC.Rd
 5a0a047bcd18649d5076999057bd1d49 *man/betaprime.Rd
 f41bc1b37620bca37ba4d2f16fdae05d *man/biamhcop.Rd
 495e32601db2c4f22462811e27436c9d *man/biamhcopUC.Rd
@@ -214,12 +211,12 @@ faeb492060203a0d89d5cf4f40b0e4c4 *man/bifgmcopUC.Rd
 3996c974a214c0d706d20d820a9a1fa0 *man/bigamma.mckay.Rd
 7a1c045834b0bd9de92a4aa97f52ab3c *man/bigumbelIexp.Rd
 ffcbfc72f334094f6dfd4842ab522e96 *man/bilogisUC.Rd
-cd241d3985e2b0dcf817f19417406596 *man/bilogistic.Rd
+e913aabb8e3808c637d264f28c90bf52 *man/bilogistic.Rd
 cebfba7c59c17329f50eb34c40c0b810 *man/binom2.or.Rd
 129f6be1cf1a039f137e5ef3da503fca *man/binom2.orUC.Rd
 a8cc7cbfa4c21672956a187c4ffba22d *man/binom2.rho.Rd
 20cb304b16a9073488621b104549e361 *man/binom2.rhoUC.Rd
-3a1ba0a046fd6c1147c675f0b87e4ddb *man/binomialff.Rd
+7a1cc63530a0082533d6f267a7647fa2 *man/binomialff.Rd
 53f8bc3da41aabe202d80304f2f84b63 *man/binormal.Rd
 3e2bebdf7d5db7a0c7960d6b6f1597b5 *man/binormalUC.Rd
 ad66bf95a28851ff1f77b8675352cc04 *man/binormalcop.Rd
@@ -227,8 +224,8 @@ ad66bf95a28851ff1f77b8675352cc04 *man/binormalcop.Rd
 1d943aad478481e7bf4c4b1a9540706c *man/biplackettcop.Rd
 79d9cd96d00531b88793d55a07d29842 *man/biplackettcopUC.Rd
 bdad9ecfb116c4f30f930bcaf7208735 *man/biplot-methods.Rd
-4b35070bbd74b15afd585110514a55f7 *man/bisa.Rd
-9901ef6bbaed14ee55eda08dc810867e *man/bisaUC.Rd
+03369be2b6898192a83d14253ca3b1d8 *man/bisa.Rd
+8b2718247258cfa11b0857a922c512ab *man/bisaUC.Rd
 f0816002d3fb698dbc17a6e55d91c18f *man/bistudentt.Rd
 0489e2ceeed7b2aaf9cbcf6cfcabae81 *man/bistudenttUC.Rd
 81a2433effb7547679702256a5536b04 *man/bmi.nz.Rd
@@ -243,8 +240,8 @@ b121ffb4e604644ef7082d777b4411df *man/calibrate.Rd
 22e9a881f2f077f7e01e1dde9043dc7d *man/calibrate.qrrvglm.control.Rd
 8a71703f9846bdda282e59f67832e941 *man/cao.Rd
 4005c8bdb2b1a2e7d0ff5f1a800f4224 *man/cao.control.Rd
-f4172189ec92d9218a05c06ed8e88487 *man/cardUC.Rd
-7d96d29fad17cf0d10564c04f00c3ecb *man/cardioid.Rd
+10f72289cb33f5f734d39826893a280b *man/cardUC.Rd
+53ff522ff00f7bcfe443309762441150 *man/cardioid.Rd
 f4674b1787a58c87fbabdb369dc8a1ca *man/cauchit.Rd
 d361f0253fb328f70a716c09fd597fdc *man/cauchy.Rd
 9035d92ae411d748c08d35086d5d3be1 *man/cdf.lmscreg.Rd
@@ -257,34 +254,35 @@ a443fafdb223e2fa87d3766ea31d3fd8 *man/cgo.Rd
 922ebc06682ee2090eb1804d9939ec03 *man/chinese.nz.Rd
 9dc1deb6ea4940257ebab8f072584b74 *man/chisq.Rd
 aff05a422130d8ced689190eec1b09dd *man/clo.Rd
-66677ed162d3e368ad0f330c49467a25 *man/cloglog.Rd
+452920d20020b6be8eb6ead2cdcbdc0e *man/cloglog.Rd
 b1985e33c967fdddf79e10cbb646b974 *man/coalminers.Rd
 e492f5f148514df05cc4bf101b7505e2 *man/coefvlm.Rd
 1409b01c52bad85c87e9740fb003699a *man/concoef-methods.Rd
 e9a2bf379aac3e4035b8259463a5374b *man/concoef.Rd
 e9cef803313f5a964f99b76995dd235f *man/constraints.Rd
 523567ea78adcaaeab2d9629b2aa2cf2 *man/corbet.Rd
-d90f189cfb8abe5e452f220b59c8ab3d *man/cqo.Rd
+0a020921c3d1686d817fc73eb9067cff *man/cqo.Rd
 8b1b3a39d15fe353a7eceec9f6a327d4 *man/crashes.Rd
 ca3db2c26abb8120651e1d179ac6fbb3 *man/cratio.Rd
-d7fe2dd88f14e6c9a3bc2fc1f7f2211a *man/cumulative.Rd
-36ffbf6456fae216d7c2eb26ee4c81bf *man/dagum.Rd
-97868e30408a4a35750f9692f5e87b68 *man/dagumUC.Rd
+db26c5eb26f1a3cc591502bca797489f *man/cumulative.Rd
+a7ccaa9a82bc79f77514dca45f2d1100 *man/dagum.Rd
+12192f19751804a540e6d0852e29726c *man/dagumUC.Rd
 8fa6a29bde444a45be31b3d8979afc00 *man/deermice.Rd
 dbebc9542906034905fe1137e86a1256 *man/deplot.lmscreg.Rd
 0e0f2e7368fa906e837d8432bb3cfb36 *man/depvar.Rd
 bffbb780b54bd3c8c76cf546ec87e4a0 *man/df.residual.Rd
-2e4c60a120c5a942ba0c0efe8037ae5b *man/dirichlet.Rd
+276aebb1ed4a71af9f9096e9f9c4515d *man/dirichlet.Rd
 6ea8579fe8a75bec917b2c26019c9e0a *man/dirmul.old.Rd
-a9d177e01da25b52fe69fb04950436b3 *man/dirmultinomial.Rd
+7a63063be35f8510ea5198556bf1c192 *man/dirmultinomial.Rd
 ed927db10e5cf69502d5485f300a9aa7 *man/double.cens.normal.Rd
-8a470177087d891a5c58e512acc0133f *man/double.expbinomial.Rd
-9c2ddeb18b92c9c5db1c7126f8abb21a *man/eexpUC.Rd
-64ff48f7c2f32c485bd3c523f0263421 *man/enormUC.Rd
+7557104d36b3087ed4d34345bdab7017 *man/double.expbinomial.Rd
+1da4d63047f620bd38bc5fadf56ebfaf *man/ducklings.Rd
+90481ad7be6cb76a82e99694a2a8e016 *man/eexpUC.Rd
+92007c408a76e89f46e756eba4724a44 *man/enormUC.Rd
 ca3e766bd344902d3b8bf05c65d6c12b *man/enzyme.Rd
 980efa41e75a65ef1c0a8ccf943f6398 *man/erf.Rd
 bce699d9d485230ad940142978689709 *man/erlang.Rd
-537ee9a86645761b5e71629458fa9edb *man/eunifUC.Rd
+b557620d84ef23c76ac3012a8fc7c35d *man/eunifUC.Rd
 cb83f77886603d8f133964c227915d08 *man/expexpff.Rd
 772ca8da2a38dbc5a2ffcb2138f91368 *man/expexpff1.Rd
 eccfa33017118bc7314ef168695a595e *man/expgeometric.Rd
@@ -295,92 +293,94 @@ f39dd0be93d3e24eda78f08310ff4b2f *man/expgeometricUC.Rd
 e51211ad603eeecbe72cd7f6db0e76e0 *man/explogff.Rd
 4e490ef9e08ab74a3af274a720a988d3 *man/exponential.Rd
 f3cca02f31b091259c7a8cf690f93148 *man/exppoisson.Rd
-51ab1a41f49477bb283fe56d97cdbcf6 *man/exppoissonUC.Rd
-9a0ac8c5f8e7cc3d5fe05e1f937944ed *man/felix.Rd
+79f43e2f29b5cca093569fd81aea3abe *man/exppoissonUC.Rd
+0712cad8a071a24a0676bbea9b09094c *man/felix.Rd
 c5d0b237e64605d008502da6b8f4f64c *man/felixUC.Rd
 09fc6553edb037bc708396a30fe3c8f2 *man/fff.Rd
 741f6474d688a5bc6ed61042d9a12eb6 *man/fill.Rd
 b929e2ab670eb59700bc4a1db07bbbc0 *man/finney44.Rd
 5fd279ebc2d6ec3df74557cdca6940c0 *man/fisherz.Rd
-1dd130b6f110b2d9ab9bbe8f7439ac08 *man/fisk.Rd
-8215ca60f756bf8f9f2e3b404741fbd7 *man/fiskUC.Rd
+7f7753b3325004cdfcc7cc145115fc99 *man/fisk.Rd
+5966dbc9e396bd3cbb15b2650d885177 *man/fiskUC.Rd
 c75d3ae0a8669fed4a71f54b8be64266 *man/fittedvlm.Rd
 742b72298fd6b2ca944812681ad625a6 *man/flourbeetle.Rd
-cd73efab4c3e718d1a77a603eb5e341c *man/foldnormUC.Rd
+c0269f789f9739dc6aeeb20b446ae751 *man/foldnormUC.Rd
 3909f1a56c381d71501b6fde8d6647fe *man/foldnormal.Rd
+a1e9f04937cb86ba7027d812faabfe3d *man/foldsqrt.Rd
 7af865ab486ea1d5d043bdef4bbf81cc *man/frechet.Rd
-537fb4f91167bddf5e76d732b9c4ad38 *man/frechetUC.Rd
+dabb4b7cdd3422f239888fb85ca5a70b *man/frechetUC.Rd
 cad07bc11ec21b13ecdbc3b93ec8efc0 *man/freund61.Rd
-17c995a0692e2f600397ade32fcd6399 *man/fsqrt.Rd
 c4aea59df1932e36cd6fb2ec38110e6d *man/gamma1.Rd
 6b32b9c30d5243afb42c0e403e70f842 *man/gamma2.Rd
 c173815d95bd553fa952911bd2ca71aa *man/gammaR.Rd
 3558584dfba54663dc4de34e21cc9aa9 *man/gammahyperbola.Rd
 edd2c4cefb99138667d2528f3d878bad *man/garma.Rd
 e0fdd50e95e43075ac79c911f05c0b61 *man/gaussianff.Rd
-a3a18ab32413faddd08a064dc1a07d9b *man/genbetaII.Rd
+4332a0e0d6a9585eb5a222e923f03464 *man/genbetaII.Rd
 59fb27b205e8ff10daca7d8d37a5d3f1 *man/gengamma.Rd
-a0a3f2e41580717ca6abb5b9fa1811eb *man/gengammaUC.Rd
-efe7d101e0303a53133b5b2dfcc21c94 *man/genpoisson.Rd
+588e10d5c3fd9ff745c679435c5f2457 *man/gengammaUC.Rd
+231a6af41c4b7ed78907ffb0542cac18 *man/genpoisson.Rd
 15429ac99e67921a77cb78e47210d7fc *man/genrayleigh.Rd
-65c6a7b53c50b4e20c9c9b2acfec6d0a *man/genrayleighUC.Rd
+2b8ec736188410b1502ce23ba1852463 *man/genrayleighUC.Rd
 94c6189883bf1848735e23156e25cdc0 *man/geometric.Rd
 ea16a72ebd8739cd2133e91fd9c92662 *man/get.smart.Rd
 d89a22500e2031841b7bcfa1d8607d44 *man/get.smart.prediction.Rd
-333a904359456c8b2e0d8054aa6ae3a7 *man/gev.Rd
-838c81d8d6c94f4f3ae49df0b25d1cfa *man/gevUC.Rd
+7d533bf53d40503606dda3a614245aa1 *man/gev.Rd
+0496867739918b68919e42a4018a338c *man/gevUC.Rd
 fd070015282f2cca2b0a4b8200822551 *man/gew.Rd
-711704243b30d0270d3ac2a51e2768a8 *man/golf.Rd
-5e388a2ffa43825bb7ff93d9344385e2 *man/gompertz.Rd
-f2d5a04084d6e07fba5d10115a20f63d *man/gompertzUC.Rd
-4d6b5b18dc48d7884f978c040d2ac4cd *man/gpd.Rd
-abb05712cc0126954637a4aeacc603e2 *man/gpdUC.Rd
+598ef24d82351a3cb69dd2a7a482ea4e *man/golf.Rd
+9a635d01c2a0f08b71517df675b20a92 *man/gompertz.Rd
+8170cb9545cf35f1768db069b13a893e *man/gompertzUC.Rd
+7ec773041e29285cfe05226d6d58a30e *man/gpd.Rd
+9cbfd18331d52c4fb66f0221d76be01f *man/gpdUC.Rd
 7e50fed7b6ffe72b14e243fcc601fc50 *man/grain.us.Rd
-87ec862c14d795b891259f1e4af22946 *man/grc.Rd
+6e28498b6d44f47f2663a6be72f68529 *man/grc.Rd
 00bd52370e6b9e28b1ec106c6ecb2b09 *man/gumbel.Rd
 bd6be76e82363793b9186e55d0e35bd0 *man/gumbelII.Rd
-24cf151e3c905d76c581e6bf4ef4d87d *man/gumbelIIUC.Rd
-6e8fe2f3bce2e1f173f97fcd5f25d38d *man/gumbelUC.Rd
+5099d1835eebc1b4610481e77463a50c *man/gumbelIIUC.Rd
+6a66a220a209ae6d1c7eb0bf57f59671 *man/gumbelUC.Rd
 fc6b1658cbcb87054ab516552b6875f9 *man/guplot.Rd
 d5ad348b7727127369874c7e7faf49bd *man/hatvalues.Rd
-1fcc98c5f0e2cc306ef01b7367f3acf8 *man/hormone.Rd
-8ef9d44522eaef45b284b7f98794d48b *man/hspider.Rd
+2be497a8d77472f00279d19f735863b5 *man/hormone.Rd
+93557c7aca25514dc023773bdd045d76 *man/hspider.Rd
 f4fc4645d2d190ef9b82cce1ee8b29d2 *man/huber.Rd
-8dce67314ab9d642694d267ea911c6f4 *man/huberUC.Rd
+bddbb4682e3ee5c97f116acfc15d3f3f *man/huberUC.Rd
 d3df700bb2a4f9ae85b13abe7ffea123 *man/hunua.Rd
 592f01af00d4309ecb01ed58b764e12e *man/hyperg.Rd
-77a4c2eb25f5db0e5fe8fb885de5bf38 *man/hypersecant.Rd
+e3a9765eba431e1f55e2fdc11ff52b4b *man/hypersecant.Rd
 2bf15af91bb331e94b94dd69050589c0 *man/hzeta.Rd
-db89dbd9462622d0e70f1648fd4ccfcd *man/hzetaUC.Rd
+04198bb4e2bf6a230e17b4e84251887f *man/hzetaUC.Rd
 c4b8cf96eae282e0746bf8126231a7f5 *man/iam.Rd
 c2796439b1c32144c3a1ffcbd7f6da72 *man/identitylink.Rd
-5a54823ff9d06736a8616aa6642a3b50 *man/inv.binomial.Rd
+857cbf6f8c5970a18867fe560f275f6f *man/inv.binomial.Rd
 745b6c5557776c23bed67b268f03f432 *man/inv.gaussianff.Rd
 ef005dcdf1e63aa98280b927adcb7820 *man/inv.lomax.Rd
-16fab00f1fdf4a2ec604edc74245d10d *man/inv.lomaxUC.Rd
-cdcbd3ab8696b74085b082ec0296377a *man/inv.paralogistic.Rd
-a72ba9b4b12830c8ea31a64a33949d20 *man/inv.paralogisticUC.Rd
+4492e4a4f91d5fe7d4ec75a128bf4e07 *man/inv.lomaxUC.Rd
+43bff747dfa6b3c2af61853823f5b0da *man/inv.paralogistic.Rd
+6f740a890a174ff4ff3879fa8719ec58 *man/inv.paralogisticUC.Rd
 a501c3d3de4a744a0e0cdbc0673b543d *man/is.parallel.Rd
 e68a1f19e55cd95da21eec0b119c0ad8 *man/is.smart.Rd
 1b33dcd08e9f444146fb7fe03a425add *man/is.zero.Rd
 5cf973ee22fcfd1442e61458a9d91ce9 *man/kendall.tau.Rd
 149c759079151bd06084810c29f6c72c *man/kumar.Rd
-2e07c2e87f84e59aac2c1d4d6d7a3789 *man/kumarUC.Rd
+255a587274163051c7c5e81b79bb24cd *man/kumarUC.Rd
 1bcedd3ac3a0c7467e5dee8ba1de9ace *man/lakeO.Rd
 decbd103cc5311735e70d906d170c742 *man/lambertW.Rd
 e80a85ec4d067a1549cc8249666f75c2 *man/laplace.Rd
-1e0d24321650e214570c5ee3b703a261 *man/laplaceUC.Rd
+55f7da75a7695c5f00b10d600711bab9 *man/laplaceUC.Rd
 16b21ecf83bb8fce76079502877b2fbd *man/latvar.Rd
 2cd5151baff29f9d8dd996dc48293301 *man/leipnik.Rd
 2e88465ad75446bbbccf208661193a8c *man/lerch.Rd
 8c7fca39c92e5f79391a7881a0f44026 *man/leukemia.Rd
 632c83ea2a7b229a64a4679f9fa6b52f *man/levy.Rd
-d375ce578b139d336603f32c4b3f4512 *man/lgammaUC.Rd
+d3fb68f03d6cc946da6b48772bea3297 *man/lgammaUC.Rd
 745ab1fea005b7572910ae5919111054 *man/lgammaff.Rd
-66acbe44a180b5adb8fa0c5ea4897a18 *man/lindUC.Rd
+1bb4af539f983579a19c180c3ab29aec *man/lindUC.Rd
 271536a592dedaff73d9cde20c844d76 *man/lindley.Rd
+53b900fd7a3bc5a1f4ff6a9b9353d4e9 *man/linkfun.Rd
+79a20f167d06958b953c5a7a8dfe16f0 *man/linkfun.vglm.Rd
 20873e71a07de6b42d07fc6e0008ea05 *man/lino.Rd
-0729b015342bba6152263c4ff9b07b8f *man/linoUC.Rd
+f56802c0fe3ec1b61cd313c370b9ff58 *man/linoUC.Rd
 b5dfa4faa955b15ebade0a3bdc8f93fe *man/lirat.Rd
 1ecc473854215d5c5209ea54ad206370 *man/lms.bcg.Rd
 194627e9dc632ec82df59b116971582a *man/lms.bcn.Rd
@@ -394,8 +394,8 @@ e956c4aae749e9034b7cf7fdf8661a64 *man/logc.Rd
 8c871e5697ed43662cd313fc777c2bcd *man/loge.Rd
 20cc0c73ee555790179879533cb526f7 *man/logff.Rd
 12d3a7e35301ecb632191ccf31a63296 *man/logistic.Rd
-bb38e0972a038145ee81a2b28dea5d75 *man/logit.Rd
-15e03c1d93d5eef749c03ecb446945b3 *man/loglapUC.Rd
+753257abec4546ba43587133aa77dd6d *man/logit.Rd
+8822ba593955e90e63a8779aaf74d29b *man/loglapUC.Rd
 0f6dd1a9c0fc77dd6521af733693f52e *man/loglaplace.Rd
 49d5183ac04d29b5427b9159fa101dc3 *man/loglinb2.Rd
 a569b31d918209e8b54a62e8594a3268 *man/loglinb3.Rd
@@ -403,19 +403,19 @@ f5f48817604ad9b59304d4fb571359dd *man/loglog.Rd
 a56f1a0e81c3dfdc8620c4cef1b87450 *man/lognormal.Rd
 e859c980e26eb3e483d0f3648b502d13 *man/logoff.Rd
 ad3e8f3b35bfbd792e8a8cb6105a2121 *man/lomax.Rd
-1c4a4a2ce7661905273c47b4d8b6f898 *man/lomaxUC.Rd
+dbc62e15528097b42fb64d49be5f22f3 *man/lomaxUC.Rd
 ac49f1d5575295a237328c2de3cbab10 *man/lqnorm.Rd
 fc9ca61a4c495cf650cba5a458b0dae1 *man/lrtest.Rd
 f0a38f0b82c1525dcd51687a2f2768c1 *man/lvplot.Rd
 7dcf0051720ee4587304e819ecc8de71 *man/lvplot.qrrvglm.Rd
 16b238586876d84bad0a1420402b5718 *man/lvplot.rrvglm.Rd
 c5760c3960748f906230ded119478271 *man/machinists.Rd
-eb7e6bf84eead25f006dc2fb6bfa55f7 *man/makeham.Rd
-053679132aed00d872f73cc2954defee *man/makehamUC.Rd
+3c2901cca3e665cc792cfbc5ca9c260d *man/makeham.Rd
+7785dc7e94e63e94e688d9553a9c7b2a *man/makehamUC.Rd
 583f3f406844c550079d2592ecba0c25 *man/margeff.Rd
 b5c6a5a36ebe07a60b152387e8096d9a *man/marital.nz.Rd
 b2f1aa9cecaec318a14cc5d4fbb20d67 *man/maxwell.Rd
-5eee0079954bf17587bc495e45cc4c7f *man/maxwellUC.Rd
+c7fcbd341df77f76494a92836715789a *man/maxwellUC.Rd
 bd8250aaa1bc17c017c0b201642882dd *man/mccullagh89.Rd
 c007d94fac5c46a26baae899a04aaf9d *man/melbmaxtemp.Rd
 4d8d0f37dc8249d00e52283764534e98 *man/meplot.Rd
@@ -429,23 +429,24 @@ c007d94fac5c46a26baae899a04aaf9d *man/melbmaxtemp.Rd
 764cafd682a3364a495cdf243e3a528e *man/multilogit.Rd
 d2ecbe308776f1e5065b0399959e2d99 *man/multinomial.Rd
 c3248f9d509aecb0726bd0e6e36a13d4 *man/nakagami.Rd
-15f93d300e50fe4c89470dccbc1b9fd8 *man/nakagamiUC.Rd
-892ee6d069216d6568be506a7460c1c4 *man/nbcanlink.Rd
-798f2e547a94356359c3d50a57ccef17 *man/nbolf.Rd
-e707b37436b27c43ce07b77492e4fde2 *man/negbinomial.Rd
+61319d756fcb8509696cc1aa55ae4ed2 *man/nakagamiUC.Rd
+7669f124f04f2912a3b004d509f9d15d *man/nbcanlink.Rd
+869ec0706195a833c57365fc8507c1bf *man/nbolf.Rd
+e83e0c32f33d41bd3c3d6816d81acb39 *man/negbinomial.Rd
 01e4d3c6a45020bef55cbadbad8388d3 *man/negbinomial.size.Rd
 14c4a7db111d0d9f41e5a810a3afdea2 *man/normal.vcm.Rd
-e03710346340cd0b2b8bb818110f8c62 *man/notdocumentedyet.Rd
-d361e050435d7a4e64474487ecfd782c *man/olym.Rd
+9872fa02f51e95fc254c1ed7ce95df69 *man/notdocumentedyet.Rd
+5e590acdda3ff0a9e2df0db8d233f848 *man/nparamvglm.Rd
+98b83e406ea1968ba3e8b17d0933b2cf *man/olym.Rd
 858c73ce3c458d33e5151342a4e36707 *man/ordpoisson.Rd
 025c5545a37dd996931ea7d2b42211b5 *man/oxtemp.Rd
-687d43f8b77241bea9e7cbee86333fdb *man/paralogistic.Rd
-73228cd851fcf468b1fe1ff209ef5eca *man/paralogisticUC.Rd
+2fdefe9211b855ae8a00e0ec0f88fe35 *man/paralogistic.Rd
+383805a5130a512c207a6a30c28553d3 *man/paralogisticUC.Rd
 b8a1bd0580460ec6155b7c7bb2dae503 *man/paretoIV.Rd
-bb67a2455bfecfa9b6244178a15ced06 *man/paretoIVUC.Rd
+9e30cad5872ffef80576a429e37cdaca *man/paretoIVUC.Rd
 c0c60830c70e697aeab8bc6d11472b78 *man/paretoff.Rd
-97cf8349af611f4a6acf10e445e6587e *man/perks.Rd
-3b6b40a1a031e3158efb9ab7c2760eb2 *man/perksUC.Rd
+28a8a9fa1e219d71dcb68cfdb6f88d1b *man/perks.Rd
+a0d64aa4469a9ca70fcfa4e5af26956a *man/perksUC.Rd
 60fac0e03c8dce88e04e2c3f6def20b9 *man/persp.qrrvglm.Rd
 a38168dd57b4be503cf47732714e441b *man/pgamma.deriv.Rd
 8e0120c68b69d0760218c483490aed8e *man/pgamma.deriv.unscaled.Rd
@@ -460,18 +461,18 @@ cea29349aed21cbaf8c70f81b7900b15 *man/plotqrrvglm.Rd
 606c4d8331ff8e0e4241f0284aba98cd *man/poisson.points.Rd
 8c7d77fdf6933ab63d412be61e3fa0ec *man/poisson.pointsUC.Rd
 8d1096d9bfeee36841be53ebe7300e49 *man/poissonff.Rd
-c0578de27756a8b6912b7940f2de96e5 *man/polf.Rd
+035cdf64257014f6ebc1a4d553a4037a *man/polf.Rd
 696c74487d4cebf0251299be00d545c7 *man/polonoUC.Rd
 2f4dfc6a802a52da2e14e9789e0170ae *man/posbernUC.Rd
 a746161f043ec5c5517df4b9cf71501e *man/posbernoulli.b.Rd
-de03a99d1f36509f75b4a428eb36c76b *man/posbernoulli.t.Rd
+1ecd67b130cd5c4f6d2d3066fdbe849b *man/posbernoulli.t.Rd
 936b86f4b44e438536136d1aec313be4 *man/posbernoulli.tb.Rd
-ca1949d75cb146d17b16d46009f55b9a *man/posbinomUC.Rd
-a0ff19c3e87fa3697f2d3a48a4230473 *man/posbinomial.Rd
+c2c82f9a71f8a7d20e991dee48a9c734 *man/posbinomUC.Rd
+aab909e407aa248772db0235e64890dd *man/posbinomial.Rd
 dc19e3d023a2a46c670e431a2cc853e0 *man/posgeomUC.Rd
 2963a956fa63f0bd9452b10b432d4fc8 *man/posnegbinUC.Rd
 d1594d0598d420affef6f14a1c263685 *man/posnegbinomial.Rd
-7176035d384054db426d3f3322429372 *man/posnormUC.Rd
+45b528182d1c01bc352dea7b84fd7671 *man/posnormUC.Rd
 e130fade4adc7216d9d825d73cf83dd6 *man/posnormal.Rd
 137d3986fcbad41bf77c10585dace0b0 *man/pospoisUC.Rd
 02066c793ac6cc88cdcb14ceb9b67fcb *man/pospoisson.Rd
@@ -488,105 +489,105 @@ ab1399d5d5f71707fd46960dc3efad04 *man/put.smart.Rd
 8f4e6ebea74037334377e346c5b476f6 *man/qrrvglm.control.Rd
 0b4cf628cd3e15b0668ae4ddae4d3ee6 *man/qtplot.gumbel.Rd
 19419758045a8282b21c6c7a8412a725 *man/qtplot.lmscreg.Rd
-2d496ded26329ff563f7d838c1f6a2cd *man/quasibinomialff.Rd
+bf8b2681beaeae00d54c8cb5422ad069 *man/quasibinomialff.Rd
 1dbf7bc4c97a7aafebcd736cf1baddbb *man/quasipoissonff.Rd
 bbde69d1bad346cd4ad04763c96d6ffe *man/qvar.Rd
 2ff61f599fb26b31315233d793fdded4 *man/rayleigh.Rd
-45b293604a0e71f14b2dad2976d7b845 *man/rayleighUC.Rd
+a95c0df100dedc0b4e80be0659858441 *man/rayleighUC.Rd
 6c45f58f39a63abc2ce8a0923c75cecc *man/rcqo.Rd
 97b7c30ea27ac4fa16167599c35b136e *man/rdiric.Rd
 585af0deb3deb7b61388d6d4557994d8 *man/rec.exp1.Rd
 64ea5646e75515a8b40fbd136fa6065e *man/rec.normal.Rd
 49abf27f1c088a43cda71f0723cf188b *man/reciprocal.Rd
 a56ddce8598af2320fdadb94c42a9b24 *man/rhobit.Rd
-e17b5680243d2c545139e60ff084ab47 *man/riceUC.Rd
-728fcd45a64fbe92638143f6b1800038 *man/riceff.Rd
+d907e0bbe40b4fb02b0763ab6076309e *man/riceUC.Rd
+85498654134f98f8aa887bed07b4985a *man/riceff.Rd
 9dd5a151bfc05adcce0ae88a02eb08a8 *man/rigff.Rd
 0e12c48578228c300e8c04ab3b08c04a *man/rlplot.egev.Rd
 3c6afb0af10ae003dfa8cf9caa567d9b *man/rrar.Rd
 21af7f47c09e9758460cbf6d2ebf79cc *man/rrvglm-class.Rd
-b95a04698f6a2a7163a03717d72f7dc0 *man/rrvglm.Rd
-cf46faf7bd3cb7bbe65811130f78084f *man/rrvglm.control.Rd
+78c5e9f5ae598e17f06957873e645c96 *man/rrvglm.Rd
+71e3f19a37b6f429458eb9060f5e2ef4 *man/rrvglm.control.Rd
 eb0e4a0a8b0c63cd0c17120e9ca8df53 *man/rrvglm.optim.control.Rd
 ecc44804896b8f3d4a9d469a952fe9a6 *man/ruge.Rd
 b60106c185ce93eb2c09bc34d1f7b349 *man/s.Rd
 3ebe2abf58080c4588a912c695adae77 *man/sc.studentt2.Rd
-e5c019ffe15b61578ec4c5ed894d70ea *man/sc.t2UC.Rd
+114f55f02750721179c9fc78d93f686c *man/sc.t2UC.Rd
 c3096134b4f765a7d1d893fb9388488b *man/seq2binomial.Rd
 9985ea15444cc317e3e8fc2aad7200da *man/setup.smart.Rd
 451a726465c8e82555ba50a857e86ce0 *man/simplex.Rd
 f158e6c60a4e6b6e13f2a9519515a021 *man/simplexUC.Rd
 41af17badd0ef1b17cee591a35d46a12 *man/simulate.vlm.Rd
-8b660b0d990b62c07cb5222e5966a1a9 *man/sinmad.Rd
-702d8c7998205774dde5a93d2e5a49fe *man/sinmadUC.Rd
+86da93168db7b3bf3153e7b0eca85439 *man/sinmad.Rd
+9a4c16a6d079f7d9e5c22914e30497dc *man/sinmadUC.Rd
 5327f9644795a6ed4e1909159156b656 *man/skellam.Rd
 2424940e3cff6d5a3ddd0ee99565ea39 *man/skellamUC.Rd
 b62da6a60b01916a10d691e980253bc0 *man/skewnormUC.Rd
 3797084c4e552d460e8b3942a661260a *man/skewnormal.Rd
 9f34bfb220e6d0400971a1efa5db28c6 *man/slash.Rd
-d36053a053c3cdf9619cbc7b1f27f3bc *man/slashUC.Rd
+9fc90a85fdd63c0b3c49203f5e3d776f *man/slashUC.Rd
 21bada3a13aca65ba49fb28127575144 *man/smart.expression.Rd
 5726ef8bb900532df62b24bd4b7b8fe4 *man/smart.mode.is.Rd
 3d5d3a55f66ef8048b446da063e36ceb *man/smartpred.Rd
 098bc8b943b6ae2e0de9a4da57fcfd22 *man/sratio.Rd
 0c48da9ab33eb24273c6348320a64f64 *man/studentt.Rd
 0258a94ee53da230fb2aea74fd90192a *man/tikuv.Rd
-18fb4965cd111cd04fd37a8a8ba1cde2 *man/tikuvUC.Rd
+ccaa57b076049fdf3cee1c321a2ab456 *man/tikuvUC.Rd
 5fbf542c18e27e990c98bacedd614a39 *man/tobit.Rd
-2b4e875a4415043bf0cd019e71e955cd *man/tobitUC.Rd
+5130a86e60a3b1010b1364155a1afdd0 *man/tobitUC.Rd
 b70afa170b0cf98a6c2a9eea9dc58483 *man/toxop.Rd
-5a424c4e215899bc18b87099fcaf98e1 *man/triangle.Rd
-b35739c390fd5566b8851cd070b09492 *man/triangleUC.Rd
+9b6a285a017b9928ae92a76eaf9e502d *man/triangle.Rd
+4b120eb41d1983a4afbe2b45793dc11e *man/triangleUC.Rd
 1d13e92969384eebec80c2b5901bc5db *man/trplot.Rd
 c786330c607d69d19e59fc3823d1e2f2 *man/trplot.qrrvglm.Rd
-d77a2419400b9ae1059949803b8a1dd2 *man/truncparetoUC.Rd
+aeaf42ac6e475f1dc3f180450d56c2ee *man/truncparetoUC.Rd
 1d47c3a8f732ea01782c7e0b9929a921 *man/truncweibull.Rd
 50ada9ecd189456ce9f218d22b49089c *man/ucberk.Rd
-db1902b011f19b59642d53797848dcc8 *man/undocumented-methods.Rd
+c21d431f9341336ff44cc7019e755c98 *man/undocumented-methods.Rd
 2fd783dbf5c2dbcb81727fe479729163 *man/uninormal.Rd
 f787bf505e7e68f5f16a49f48abb9bcb *man/venice.Rd
 215970e9b9824a503e8984e432c5c924 *man/vgam-class.Rd
-6db59f46bb2fbdbd6329f07498eca6d5 *man/vgam.Rd
+a04996c86d14b710cac6615958b50caf *man/vgam.Rd
 ea3fe248b860921783367037c8302c49 *man/vgam.control.Rd
 d11e5c5279c115678bb103f5b4575938 *man/vglm-class.Rd
-c29f7ab8b46a70949458bbc9b2412c69 *man/vglm.Rd
+042568190fce50db76f5574b790e36e1 *man/vglm.Rd
 c21cd55efce9d242cbe555cb65aea5e3 *man/vglm.control.Rd
 8d9fa0cc290e49e459947c38c292df4c *man/vglmff-class.Rd
-d1e31ea42a122762891de9a8589e2a4e *man/vonmises.Rd
-7787a423c41dec21ed7c4440288ef9b7 *man/vsmooth.spline.Rd
+95420c89f2280b447cbd7784f83e7454 *man/vonmises.Rd
+25b2ef45238e3f61e82dcf52f3d17090 *man/vsmooth.spline.Rd
 c498f29d7fc8156fd345b4892f02190d *man/waitakere.Rd
 9b9bdfbbf8060eb284c84e8ed9273154 *man/waldff.Rd
-46cc302f6a200187ec753320ff6381a3 *man/weibullR.Rd
+9e36f5a354e39e4d645e105c7252ad00 *man/weibullR.Rd
 e41e54f8623a002d20e55df65c5b6a87 *man/weightsvglm.Rd
 3557b17f6054a1699cb653b36f6d1a37 *man/wine.Rd
 f5a3078b689d511325cb1dc0fd4e21f3 *man/wrapup.smart.Rd
 622f0105b04159f54fcfb361972e4fb7 *man/yeo.johnson.Rd
 ebfff81b0f4730417de95f80b7c82c41 *man/yip88.Rd
 225fcd19868f17b4a5d2590e834cb888 *man/yulesimon.Rd
-c4ec36c55401ffa660f63e3a03900465 *man/yulesimonUC.Rd
+ef96177f3ee5b07478b717529111adea *man/yulesimonUC.Rd
 ae671324c0f93f66adc72f053ef9ebd9 *man/zabinomUC.Rd
 87b0b38fe7357a2259edc9f1159add84 *man/zabinomial.Rd
 7d5df5fee6f78c5cf37faaf71adbbb91 *man/zageomUC.Rd
 925e2c8e227ffb6a26192aeeb1fd4f28 *man/zageometric.Rd
 78eef8b541d039b00e9990ff758e53e9 *man/zanegbinUC.Rd
-2c7cf46a95acba72a8d4315e057a4de0 *man/zanegbinomial.Rd
+7292195daf3dd8898a1eb971f9f46d21 *man/zanegbinomial.Rd
 b4bcb3a52a6e60efbdaa5d3cfed6fbf4 *man/zapoisUC.Rd
-9fddb7dcd81ef0e4d6777a4ae2a56bff *man/zapoisson.Rd
+0122299a628d1aea9cf560d1470d1367 *man/zapoisson.Rd
 41b375aed0074b0d0e87b2913685cda9 *man/zero.Rd
 7985338d08e88fa23cce9cc0a09724b6 *man/zeta.Rd
 e0ef189ae8251b5e0d20b614c18cdd5a *man/zetaUC.Rd
 648342ad0677587e55e4f92d906d0d42 *man/zetaff.Rd
 bce8783175ca63f89475e705b2fb1709 *man/zibinomUC.Rd
-476f5935d0a6fcbe67f6e8cb39509a35 *man/zibinomial.Rd
+ae0388e04ce39367e9c14bf1ad39ef06 *man/zibinomial.Rd
 7b1d2ee37f339b9a218f1db4abb30cdd *man/zigeomUC.Rd
 8de969235239ce10332c2b91304931f5 *man/zigeometric.Rd
 025dd2763701ec5b6880bcd6f4a9f35a *man/zinegbinUC.Rd
 87def1c11bb8e7e5f4857a8c7eeda491 *man/zinegbinomial.Rd
 a9b1d67033daa03a9880227187041ae5 *man/zipebcom.Rd
 abfe2e5adf8a4fcd610adccf060e4f45 *man/zipf.Rd
-24ccbcefd8c1d93f609a39a1d29e4c17 *man/zipfUC.Rd
+fd2adf6acc7093de70cb3c16d3819f23 *man/zipfUC.Rd
 0b8c923247c77bffa3dc24440e5d8bae *man/zipoisUC.Rd
-93b8b3cb5ce61536968440f227416f03 *man/zipoisson.Rd
+ce9bd4504bdb369c39394ece70c0beb0 *man/zipoisson.Rd
 f306f4262366ba8c13d31e6afd0e393b *src/caqo3.c
 ec1b60ab786ea922f9c9665ae352b147 *src/cqof.f
 8daac3d03d7cb7a355a4c5ba548c9793 *src/ei.f
@@ -611,5 +612,3 @@ e9187111f5c6ce1e5808bbb3dc088c17 *src/vlinpack3.f
 753359563526a9cd5ebac104dab2d754 *src/vmux.f
 9083b462bcc275ee6dda47e97f1ebf94 *src/vmux3.c
 b19585d2495c46800b0c95f347fe89f9 *src/zeta3.c
-bfa11dbdbff271fb20342560f2bacd53 *vignettes/categoricalVGAM.Rnw
-d7beca978b587625654f981f7dc433d0 *vignettes/categoricalVGAMbib.bib
diff --git a/NAMESPACE b/NAMESPACE
index 802cfcd..db489f4 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -11,6 +11,8 @@ importMethodsFrom("splines")
 importFrom("splines", splineDesign, bs, ns)
 
 
+export(nparam, nparam.vlm, nparam.vgam, nparam.rrvglm, nparam.qrrvglm, nparam.rrvgam)
+export(linkfun, linkfun.vglm)
 export(sm.bs, sm.ns, sm.scale.default, sm.poly, sm.scale)
 exportMethods(coefficients, coef)
 importFrom("stats", coefficients, coef)
@@ -39,9 +41,9 @@ importFrom("stats", simulate)
 
 
 
-export(family.name.vlm)
-export(family.name.vglmff)
-exportMethods(family.name)
+export(familyname.vlm)
+export(familyname.vglmff)
+exportMethods(familyname)
 export(logLik.qrrvglm)
 
 
@@ -292,7 +294,36 @@ dirmultinomial, dirmul.old,
 dtheta.deta, d2theta.deta2)
 
 
-export(cloglog,cauchit,elogit,explink,fisherz,logc,loge,logneg,logit,
+
+S3method(anova, vgam)
+S3method(anova, vglm)
+S3method(as.character, SurvS4)
+S3method(biplot, qrrvglm)
+S3method(biplot,  rrvglm)
+S3method(deviance, qrrvglm)
+S3method(deviance, vlm)
+S3method(logLik, qrrvglm)
+S3method(logLik, vlm)
+S3method(model.matrix, qrrvglm)
+S3method(nobs, vlm)
+S3method(persp, rrvgam)
+S3method(plot, rrvgam)
+S3method(plot, vgam)
+S3method(predict, rrvgam)
+S3method(predict, rrvglm)
+S3method(predict, vgam)
+S3method(predict, vlm)
+S3method(simulate, vlm)
+S3method(sm.scale, default)
+S3method(summary, grc)
+S3method(summary, qrrvglm)
+S3method(summary, rrvgam)
+S3method(summary, rrvglm)
+S3method(terms, vlm)
+
+
+
+export(cloglog,cauchit,extlogit,explink,fisherz,logc,loge,logneg,logit,
        logoff,negreciprocal,
        probit,reciprocal,rhobit,
        golf,polf,nbolf,nbolf2,Cut)
@@ -305,7 +336,7 @@ export(poisson.points, dpois.points)
 export(m2a, 
 erlang,
 dfelix, felix,
-fittedvlm, fittedvsmooth.spline, fsqrt,
+fittedvlm, fittedvsmooth.spline, foldsqrt,
 formulavlm, formulaNA.VGAM,
 garma, gaussianff,
 hypersecant, hypersecant01, 
diff --git a/NEWS b/NEWS
index e53096e..73afb0a 100755
--- a/NEWS
+++ b/NEWS
@@ -6,11 +6,61 @@
 
 
 
+                CHANGES IN VGAM VERSION 0.9-7
+
+NEW FEATURES
+
+    o   Tested okay on R 3.1.2.
+    o   linkfun() and nparam() are new generic functions.
+    o   betabinomialff() replaces 'lshape12' with 'lshape1' and 'lshape2'.
+        Arguments 'i1' and 'i2' are now 'ishape1' and 'ishape2'.
+    o   ABO() has more arguments.
+    o   Arguments lower.tail and log.p have been added to quite
+        a few pq-type functions (work done by Kai Huang).
+
+
+BUG FIXES and CHANGES
+
+    o   family.name() renamed to familyname().
+    o   Argument 'mv' has been renamed to 'multiple.responses'.
+        This applies to about 10 family functions such as binomialff().
+    o   Argument 'lss' added to betaII(), dagum(), fisk(),
+        genbetaII(), inv.paralogistic(), paralogistic(),
+        sinmad(). Note that the order of the arguments of
+        these functions will change in the near future, and
+        consequently the order of the parameters.
+        The [dpqr]-type functions of all these distributions
+        have arguments that have been rearranged.
+    o   All d-type functions handle 'x = Inf' and 'x = -Inf'.
+        Much help from Kai Huang here.
+        Thanks to Ott Toomet for alerting me to this type of bug.
+    o   vsmooth.spline() has 2 argument name changes, and
+        a little reordering of its arguments.
+    o   More p-type functions handle 'q = Inf' and 'q = -Inf'.
+        More q-type functions handle 'p = 0' and 'p = 1'.
+        Much help from Kai Huang here.
+    o   AA.Aa.aa() and A1A2A3() handled 'inbreeding' the wrong way round.
+    o   pposnorm() returned wrong answers.
+    o   ptobit(log.p = TRUE) was incorrect, as well as some
+        other bugs in [dpqr]tobit().
+        The dtobit(Lower) and dtobit(Upper) have changed.
+    o   negbinomial() now computes the EIM wrt the 'size' parameter based
+        on a finite approximation to an infinite series (provide the
+        mu and size parameter has values lying in a certain range).
+        This may be time- and/or memory-hungry, but the user has
+        control over this via some arguments such as max.mu, min.size
+        and chunk.max.Mb.
+    o   Renamed functions:
+        elogit() is now called extlogit(),
+        fsqrt() is now called foldsqrt().
+
+
+
                 CHANGES IN VGAM VERSION 0.9-6
 
 NEW FEATURES
 
-    o   All d-type functions handle the 'n' argument the same
+    o   All r-type functions handle the 'n' argument the same
         way as runif(). This was done with the help of Kai Huang.
 
 
diff --git a/R/Links.R b/R/Links.R
index 61eff8c..2cbb39b 100644
--- a/R/Links.R
+++ b/R/Links.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/aamethods.q b/R/aamethods.q
index 9c6c2c4..7416079 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/add1.vglm.q b/R/add1.vglm.q
index f45ea88..2bbe725 100644
--- a/R/add1.vglm.q
+++ b/R/add1.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/attrassign.R b/R/attrassign.R
index 430ab2c..c1a0f24 100644
--- a/R/attrassign.R
+++ b/R/attrassign.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/bAIC.q b/R/bAIC.q
index 26d5294..4742990 100644
--- a/R/bAIC.q
+++ b/R/bAIC.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -12,14 +12,6 @@
 
 
 
-if (!isGeneric("AIC"))
-  setGeneric("AIC", function(object, ..., k = 2)
-             standardGeneric("AIC"),
-             package = "VGAM")
-
-
-
-
 
 check.omit.constant <- function(object) {
 
@@ -38,59 +30,56 @@ check.omit.constant <- function(object) {
 
 
 
-AICvlm <- function(object, ..., 
-                   corrected = FALSE,
-                   k = 2) {
-  estdisp <- object at misc$estimated.dispersion
 
 
-  check.omit.constant(object)
+if (!isGeneric("nparam"))
+  setGeneric("nparam", function(object, ...)
+             standardGeneric("nparam"),
+             package = "VGAM")
+
 
 
+nparam.vlm <- function(object, dpar = TRUE, ...) {
+
+  estdisp <- object at misc$estimated.dispersion
+
+  check.omit.constant(object)
+
   no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
     length(object at misc$dispersion) else 0
 
-  tot.par <- length(coefvlm(object)) + no.dpar
-  ans <- (-2) * logLik.vlm(object, ...) + k * tot.par
-
-  if (corrected) {
-    ans <- ans + k * tot.par * (tot.par + 1) / (
-           nobs(object) - tot.par - 1)
-  }
-  ans
+  tot.par <- length(coefvlm(object)) + as.numeric(dpar) * no.dpar
+  tot.par
 }
 
 
 
 
-AICvgam <- function(object, ...,
-                    k = 2) {
 
-  estdisp <- object at misc$estimated.dispersion
+nparam.vgam <- function(object, dpar = TRUE,
+                        linear.only = FALSE, ...) {
 
+  estdisp <- object at misc$estimated.dispersion
 
   check.omit.constant(object)
 
-
   no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
              length(object at misc$dispersion) else 0 
   nldf <- if (is.Numeric(object at nl.df)) sum(object at nl.df) else 0
 
-  -2 * logLik.vlm(object, ...) +
-  k * (length(coefvlm(object)) + no.dpar + nldf)
-
+  if (linear.only) {
+    length(coefvlm(object)) + as.numeric(dpar) * no.dpar
+  } else {
+    length(coefvlm(object)) + as.numeric(dpar) * no.dpar + nldf
+  }
 }
 
 
 
-
-AICrrvglm <- function(object, ...,
-                      k = 2) {
-
+nparam.rrvglm <- function(object, dpar = TRUE, ...) {
 
   check.omit.constant(object)
 
-
   estdisp <- object at misc$estimated.dispersion
   no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
     length(object at misc$dispersion) else 0 
@@ -101,20 +90,15 @@ AICrrvglm <- function(object, ...,
 
 
 
-  -2 * logLik.vlm(object, ...) +
-  k * (length(coefvlm(object)) + no.dpar + elts.tildeA)
+  length(coefvlm(object)) + as.numeric(dpar) * no.dpar + elts.tildeA
 }
 
 
 
-
-AICqrrvglm <- function(object, ...,
-                       k = 2) {
-
+nparam.qrrvglm <- function(object, dpar = TRUE, ...) {
 
   check.omit.constant(object)
 
-
   estdisp <- object at misc$estimated.dispersion
   no.dpar <- if (length(estdisp) && is.logical(estdisp) && estdisp)
              length(object at misc$dispersion) else 0 
@@ -150,35 +134,18 @@ AICqrrvglm <- function(object, ...,
 
 
 
-  loglik.try <- logLik.qrrvglm(object, ...)
-  if (!is.numeric(loglik.try))
-    warning("cannot compute the log-likelihood of 'object'. ",
-            "Returning NULL")
-
-
-
-
   elts.B1 <- length(object at extra$B1)
   elts.C  <- length(object at extra$Cmat)
   num.params <- elts.B1 + elts.tildeA  + elts.D + elts.C
 
-
-  if (is.numeric(loglik.try)) {
-    (-2) * loglik.try     + k * num.params
-  } else {
-
-    NULL
-  }
+  num.params
 }
 
 
 
+nparam.rrvgam <- function(object, dpar = TRUE, ...) {
 
 
- 
- AICrrvgam <- function(object, ...,
-                       k = 2) {
-
 
   check.omit.constant(object)
 
@@ -201,13 +168,6 @@ AICqrrvglm <- function(object, ...,
 
 
 
-  loglik.try <- logLik(object, ...)
-  if (!is.numeric(loglik.try))
-    warning("cannot compute the log-likelihood of 'object'. ",
-            "Returning NULL")
-
-
-
 
   elts.B1     <- length(object at extra$B1)  # 0 since a NULL
   elts.C      <- length(object at extra$Cmat)
@@ -218,6 +178,145 @@ AICqrrvglm <- function(object, ...,
                 (Rank + length(str0)) * Rank
 
 
+  num.params
+}
+
+
+
+setMethod("nparam", "vlm",
+           function(object, ...)
+           nparam.vlm(object, ...))
+
+setMethod("nparam", "vglm",
+           function(object, ...)
+           nparam.vlm(object, ...))
+
+setMethod("nparam", "vgam",
+           function(object, ...)
+           nparam.vgam(object, ...))
+
+setMethod("nparam", "rrvglm",
+           function(object, ...)
+           nparam.rrvglm(object, ...))
+
+setMethod("nparam", "qrrvglm",
+           function(object, ...)
+           nparam.qrrvglm(object, ...))
+
+
+setMethod("nparam", "rrvgam",
+           function(object, ...)
+           nparam.rrvgam(object, ...))
+
+
+
+
+
+
+
+
+
+if (!isGeneric("AIC"))
+  setGeneric("AIC", function(object, ..., k = 2)
+             standardGeneric("AIC"),
+             package = "VGAM")
+
+
+
+
+
+
+AICvlm <- function(object, ..., 
+                   corrected = FALSE,
+                   k = 2) {
+  estdisp <- object at misc$estimated.dispersion
+
+
+  tot.par <- nparam.vlm(object, dpar = TRUE)
+  ans <- (-2) * logLik.vlm(object, ...) + k * tot.par
+
+  if (corrected) {
+    ans <- ans + k * tot.par * (tot.par + 1) / (
+           nobs(object) - tot.par - 1)
+  }
+  ans
+}
+
+
+
+
+AICvgam <- function(object, ...,
+                    k = 2) {
+
+
+  sum.lco.no.dpar.nldf <- nparam.vgam(object, dpar = TRUE,
+                                      linear.only = FALSE)
+
+  -2 * logLik.vlm(object, ...) + k * sum.lco.no.dpar.nldf
+}
+
+
+
+
+AICrrvglm <- function(object, ...,
+                      k = 2) {
+
+
+
+  sum.lco.no.dpar.A <- nparam.rrvglm(object, dpar = TRUE)
+  (-2) * logLik.vlm(object, ...) + k * sum.lco.no.dpar.A
+}
+
+
+
+
+AICqrrvglm <- function(object, ...,
+                       k = 2) {
+
+
+
+
+
+
+
+  loglik.try <- logLik.qrrvglm(object, ...)
+  if (!is.numeric(loglik.try))
+    warning("cannot compute the log-likelihood of 'object'. ",
+            "Returning NULL")
+
+  num.params <- nparam.qrrvglm(object, dpar = TRUE)
+
+
+  if (is.numeric(loglik.try)) {
+    (-2) * loglik.try     + k * num.params
+  } else {
+
+    NULL
+  }
+}
+
+
+
+
+
+ 
+ AICrrvgam <- function(object, ...,
+                       k = 2) {
+
+
+
+
+
+
+
+  loglik.try <- logLik(object, ...)
+  if (!is.numeric(loglik.try))
+    warning("cannot compute the log-likelihood of 'object'. ",
+            "Returning NULL")
+
+  num.params <- nparam.rrvgam(object, dpar = TRUE)
+
+
   if (is.numeric(loglik.try)) {
     (-2) * loglik.try     + k * num.params
   } else {
diff --git a/R/build.terms.vlm.q b/R/build.terms.vlm.q
index 951a472..b7aa8ab 100644
--- a/R/build.terms.vlm.q
+++ b/R/build.terms.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/calibrate.q b/R/calibrate.q
index c5e43d0..1f0eb7a 100644
--- a/R/calibrate.q
+++ b/R/calibrate.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/cao.R b/R/cao.R
index b54cfde..f41501c 100644
--- a/R/cao.R
+++ b/R/cao.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/cao.fit.q b/R/cao.fit.q
index 1955557..1225a8e 100644
--- a/R/cao.fit.q
+++ b/R/cao.fit.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -29,7 +29,7 @@ cao.fit <-
   check.rank <- TRUE
   nonparametric <- TRUE
   optim.maxit <- control$optim.maxit
-  save.weight <- control$save.weight
+  save.weights <- control$save.weights
   trace <- control$trace
   minimize.criterion <- control$min.criterion
 
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index 0935a86..5904b6f 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/cqo.R b/R/cqo.R
index bb7f8ee..8d42e47 100644
--- a/R/cqo.R
+++ b/R/cqo.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/cqo.fit.q b/R/cqo.fit.q
index bda3f30..1282312 100644
--- a/R/cqo.fit.q
+++ b/R/cqo.fit.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -317,7 +317,7 @@ cqo.fit <- function(x, y, w = rep(1, length(x[, 1])),
   nonparametric <- FALSE
   epsilon <- control$epsilon
   maxitl <- control$maxitl
-  save.weight <- control$save.weight
+  save.weights <- control$save.weights
   trace <- control$trace
   orig.stepsize <- control$stepsize
 
@@ -546,7 +546,7 @@ ny <- names(y)
     if (M == 1) {
       wz <- as.vector(wz)  # Convert wz into a vector
     }
-    fit$weights <- if (save.weight) wz else NULL
+    fit$weights <- if (save.weights) wz else NULL
 
     misc <- list(
         colnames.x = xn,
diff --git a/R/deviance.vlm.q b/R/deviance.vlm.q
index fd9a613..542290c 100644
--- a/R/deviance.vlm.q
+++ b/R/deviance.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/effects.vglm.q b/R/effects.vglm.q
index 55fe5d9..0056821 100644
--- a/R/effects.vglm.q
+++ b/R/effects.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/family.actuary.R b/R/family.actuary.R
index fb502df..6b69361 100644
--- a/R/family.actuary.R
+++ b/R/family.actuary.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -47,24 +47,69 @@ dgumbelII <- function(x, scale = 1, shape, log = FALSE) {
 }
 
 
-pgumbelII <- function(q, scale = 1, shape) {
 
-  LLL <- max(length(q), length(shape), length(scale))
+pgumbelII <- function(q, scale = 1, shape,
+                      lower.tail = TRUE, log.p = FALSE) {
+
+  # 20150121 KaiH
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  # 20150121 KaiH
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
+  LLL <- max(length(q), length(shape), length(scale)) 
   if (length(q)       != LLL) q       <- rep(q,       length.out = LLL)
   if (length(shape)   != LLL) shape   <- rep(shape,   length.out = LLL)
   if (length(scale)   != LLL) scale   <- rep(scale,   length.out = LLL)
-
-
-  ans <- exp(-(q / scale)^(-shape))
-  ans[(q <= 0)] <- 0
+  
+  # 20150121 KaiH 
+  if (lower.tail) {
+    if (log.p) { 
+      ans <- -(q / scale)^(-shape)
+      ans[q <= 0 ] <- -Inf
+      ans[q == Inf] <- 0
+    } else { 
+      ans <- exp(-(q / scale)^(-shape))
+      ans[q <= 0] <- 0 
+      ans[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- log(-expm1(-(q / scale)^(-shape)))
+      ans[q <= 0] <- 0 
+      ans[q == Inf] <- -Inf
+    } else { 
+      ans <- -expm1(-(q / scale)^(-shape))
+      ans[q <= 0] <- 1 
+      ans[q == Inf] <- 0
+    }
+  } 
   ans[shape <= 0 | scale <= 0] <- NaN
-  ans[q == Inf] <- 1
   ans
 }
 
 
 
-qgumbelII <- function(p, scale = 1, shape) {
+
+
+
+
+
+
+
+qgumbelII <- function(p, scale = 1, shape,
+                      lower.tail = TRUE, log.p = FALSE) {
+
+
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+
 
   LLL <- max(length(p), length(shape), length(scale))
   if (length(p)       != LLL) p       <- rep(p,       length.out = LLL)
@@ -72,11 +117,32 @@ qgumbelII <- function(p, scale = 1, shape) {
   if (length(scale)   != LLL) scale   <- rep(scale,   length.out = LLL)
 
 
-  ans <- scale * (-log(p))^(-1 / shape)
-  ans[p < 0] <- NaN
-  ans[p == 0] <- 0
-  ans[p == 1] <- Inf
-  ans[p > 1] <- NaN
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- scale * (-ln.p)^(-1 / shape)
+      ans[ln.p > 0] <- NaN
+    } else { # Default
+      ans <- scale * (-log(p))^(-1 / shape)
+      ans[p < 0] <- NaN
+      ans[p == 0] <- 0
+      ans[p == 1] <- Inf
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- scale * (-log(-expm1(ln.p)))^(-1 / shape)
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- scale * (-log1p(-p))^(-1 / shape)
+      ans[p < 0] <- NaN
+      ans[p == 0] <- Inf
+      ans[p == 1] <- 0
+      ans[p > 1] <- NaN
+    }
+  }
+
   ans[shape <= 0 | scale <= 0] <- NaN
   ans
 }
@@ -103,10 +169,6 @@ rgumbelII <- function(n, scale = 1, shape) {
 
 
 
-  if (!nowarning)
-    warning("order of the linear/additive predictors has been changed",
-            " in VGAM version 0.9-5")
-
 
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
@@ -605,7 +667,16 @@ dperks <- function(x, scale = 1, shape, log = FALSE) {
 
 
 
-pperks <- function(q, scale = 1, shape) {
+pperks <- function(q, scale = 1, shape,
+                   lower.tail = TRUE, log.p = FALSE) {
+
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
 
   LLL <- max(length(q), length(shape), length(scale))
   if (length(q)       != LLL) q       <- rep(q,       length.out = LLL)
@@ -614,34 +685,89 @@ pperks <- function(q, scale = 1, shape) {
 
   logS <- -q + (log1p(shape) -
           log(shape + exp(-q * scale))) / scale
-  ans <- -expm1(logS)
 
-  ans[(q <= 0)] <- 0
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log(-expm1(logS))
+      ans[q <= 0 ] <- -Inf
+      ans[q == Inf] <- 0
+    } else {
+      ans <- -expm1(logS)
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- logS
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- -Inf
+    } else {
+      ans <- exp(logS)
+      ans[q <= 0] <- 1
+      ans[q == Inf] <- 0
+    }
+  }
+
   ans[shape <= 0 | scale <= 0] <- NaN
-  ans[q == Inf] <- 1
   ans
 }
 
 
-qperks <- function(p, scale = 1, shape) {
+qperks <- function(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   LLL <- max(length(p), length(shape), length(scale))
   if (length(p)       != LLL) p       <- rep(p,       length.out = LLL)
   if (length(shape)   != LLL) shape   <- rep(shape,   length.out = LLL)
   if (length(scale)   != LLL) scale   <- rep(scale,   length.out = LLL)
 
-  tmp <- scale * log1p(-p)
-  onemFb <- exp(tmp)
-  ans <- (log1p(shape - onemFb) - log(shape) - tmp) / scale
-  ans[p < 0] <- NaN
-  ans[p == 0] <- 0
-  ans[p > 1] <- NaN
-  ans[p == 1] <- Inf
+
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      tmp <- scale * log(-expm1(ln.p))
+      onemFb <- exp(tmp)
+      ans <- (log1p(shape - onemFb) - log(shape) - tmp) / scale
+      ans[ln.p > 0] <- NaN
+    } else {
+      tmp <- scale * log1p(-p)
+      onemFb <- exp(tmp)
+      ans <- (log1p(shape - onemFb) - log(shape) - tmp) / scale
+      ans[p < 0] <- NaN
+      ans[p == 0] <- 0
+      ans[p == 1] <- Inf
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      tmp <- scale * ln.p
+      onemFb <- exp(tmp)
+      ans <- (log1p(shape - onemFb) - log(shape) - tmp) / scale
+      ans[ln.p > 0] <- NaN
+    } else {
+      tmp <- scale * log(p)
+      onemFb <- exp(tmp)
+      ans <- (log1p(shape - onemFb) - log(shape) - tmp) / scale
+      ans[p < 0] <- NaN
+      ans[p == 0] <- Inf
+      ans[p == 1] <- 0
+      ans[p > 1] <- NaN
+    }
+  }
+
   ans[shape <= 0 | scale <= 0] <- NaN
   ans
 }
 
 
+
 rperks <- function(n, scale = 1, shape) {
   qperks(runif(n), scale = scale, shape = shape)
 }
@@ -650,8 +776,8 @@ rperks <- function(n, scale = 1, shape) {
 
 
 
-perks.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+perks.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -664,9 +790,6 @@ perks.control <- function(save.weight = TRUE, ...) {
            zero = NULL, nowarning = FALSE) {
 
 
-  if (!nowarning)
-    warning("order of the linear/additive predictors has been changed",
-            " in VGAM version 0.9-5")
 
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
@@ -1027,7 +1150,15 @@ dmakeham <- function(x, scale = 1, shape, epsilon = 0, log = FALSE) {
 
 
 
-pmakeham <- function(q, scale = 1, shape, epsilon = 0) {
+pmakeham <- function(q, scale = 1, shape, epsilon = 0,
+                     lower.tail = TRUE, log.p = FALSE) {
+
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   LLL <- max(length(q), length(shape), length(scale), length(epsilon))
   if (length(q)       != LLL) q       <- rep(q,       length.out = LLL)
@@ -1035,17 +1166,42 @@ pmakeham <- function(q, scale = 1, shape, epsilon = 0) {
   if (length(scale)   != LLL) scale   <- rep(scale,   length.out = LLL)
   if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
 
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log(-expm1(-q * epsilon - (shape / scale) * expm1(scale * q)))
+      ans[q <= 0 ] <- -Inf
+      ans[q == Inf] <- 0
+    } else {
+      ans <- -expm1(-q * epsilon - (shape / scale) * expm1(scale * q))
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- -q * epsilon - (shape / scale) * expm1(scale * q)
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- -Inf
+    } else {
+      ans <- exp(-q * epsilon - (shape / scale) * expm1(scale * q))
+      ans[q <= 0] <- 1
+      ans[q == Inf] <- 0
+    }
+  }
 
-  ans <- -expm1(-q * epsilon - (shape / scale) * expm1(scale * q))
-  ans[(q <= 0)] <- 0
   ans[shape <= 0 | scale <= 0 | epsilon < 0] <- NaN
-  ans[q == Inf] <- 1
   ans
 }
 
 
 
-qmakeham <- function(p, scale = 1, shape, epsilon = 0) {
+qmakeham <- function(p, scale = 1, shape, epsilon = 0,
+                     lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   LLL <- max(length(p), length(shape), length(scale), length(epsilon))
   if (length(p)       != LLL) p       <- rep(p,       length.out = LLL)
@@ -1054,22 +1210,55 @@ qmakeham <- function(p, scale = 1, shape, epsilon = 0) {
   if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
 
 
-  ans <- shape / (scale * epsilon) - log1p(-p) / epsilon -
-  lambertW((shape / epsilon) * exp(shape / epsilon) *
-          (1 - p)^(-(scale / epsilon))) / scale
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- shape / (scale * epsilon) - log(-expm1(ln.p)) / epsilon -
+        lambertW((shape / epsilon) * exp(shape / epsilon) *
+                   exp(log(-expm1(ln.p)) * (-scale / epsilon))) / scale
+      ans[ln.p == 0] <- Inf
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- shape / (scale * epsilon) - log1p(-p) / epsilon -
+        lambertW((shape / epsilon) * exp(shape / epsilon) *
+                   exp( (-scale / epsilon) * log1p(-p) )) / scale
+      ans[p < 0] <- NaN
+      ans[p == 0] <- 0
+      ans[p == 1] <- Inf
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <-  shape / (scale * epsilon) - ln.p / epsilon -
+        lambertW((shape / epsilon) * exp(shape / epsilon) *
+                  exp(ln.p * (-scale / epsilon))) / scale
+      ans[ln.p == -Inf] <- Inf
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- shape / (scale * epsilon) - log(p) / epsilon -
+        lambertW((shape / epsilon) * exp(shape / epsilon) *
+                  p^(-scale / epsilon)) / scale
+      ans[p < 0] <- NaN
+      ans[p == 0] <- Inf
+      ans[p == 1] <- 0
+      ans[p > 1] <- NaN
+    }
+  }
+
   ans[epsilon == 0] <-
     qgompertz(p     =     p[epsilon == 0],
               shape = shape[epsilon == 0],
-              scale = scale[epsilon == 0])
-  ans[p < 0] <- NaN
-  ans[p == 0] <- 0
-  ans[p == 1] <- Inf
-  ans[p > 1] <- NaN
+              scale = scale[epsilon == 0],
+              lower.tail = lower.tail,
+              log.p = log.p)
+
   ans[shape <= 0 | scale <= 0 | epsilon < 0] <- NaN
   ans
 }
 
 
+
 rmakeham <- function(n, scale = 1, shape, epsilon = 0) {
   qmakeham(runif(n), scale = scale, shape = shape, epsilon = epsilon)
 }
@@ -1077,8 +1266,8 @@ rmakeham <- function(n, scale = 1, shape, epsilon = 0) {
 
 
 
-makeham.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+makeham.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -1096,9 +1285,6 @@ makeham.control <- function(save.weight = TRUE, ...) {
 
 
 
-  if (!nowarning)
-    warning("order of the linear/additive predictors has been changed",
-            " in VGAM version 0.9-5")
 
 
   lepsil <- lepsilon
@@ -1482,38 +1668,95 @@ dgompertz <- function(x, scale = 1, shape, log = FALSE) {
 
 
 
-pgompertz <- function(q, scale = 1, shape) {
+pgompertz <- function(q, scale = 1, shape,
+                      lower.tail = TRUE, log.p = FALSE) {
+
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   LLL <- max(length(q), length(shape), length(scale))
   if (length(q)       != LLL) q       <- rep(q,       length.out = LLL)
   if (length(shape)   != LLL) shape   <- rep(shape,   length.out = LLL)
   if (length(scale)   != LLL) scale   <- rep(scale,   length.out = LLL)
 
-  ans <- -expm1((-shape / scale) * expm1(scale * q))
-  ans[(q <= 0)] <- 0
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log1p(-exp((-shape / scale) * expm1(scale * q)))
+      ans[q <= 0 ] <- -Inf
+      ans[q == Inf] <- 0
+    } else {
+      ans <- -expm1((-shape / scale) * expm1(scale * q))
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- (-shape / scale) * expm1(scale * q)
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- -Inf
+    } else {
+      ans <- exp((-shape / scale) * expm1(scale * q))
+      ans[q <= 0] <- 1
+      ans[q == Inf] <- 0
+    }
+  }
   ans[shape <= 0 | scale <= 0] <- NaN
-  ans[q == Inf] <- 1
   ans
 }
 
 
-qgompertz <- function(p, scale = 1, shape) {
+
+qgompertz <- function(p, scale = 1, shape,
+                      lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   LLL <- max(length(p), length(shape), length(scale))
   if (length(p)       != LLL) p       <- rep(p,       length.out = LLL)
   if (length(shape)   != LLL) shape   <- rep(shape,   length.out = LLL)
   if (length(scale)   != LLL) scale   <- rep(scale,   length.out = LLL)
 
-  ans <- log1p((-scale / shape) * log1p(-p)) / scale
-  ans[p < 0] <- NaN
-  ans[p == 0] <- 0
-  ans[p == 1] <- Inf
-  ans[p > 1] <- NaN
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- log1p((-scale / shape) * log(-expm1(ln.p))) / scale
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- log1p((-scale / shape) * log1p(-p)) / scale
+      ans[p < 0] <- NaN
+      ans[p == 0] <- 0
+      ans[p == 1] <- Inf
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- log1p((-scale / shape) * ln.p) / scale
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- log1p((-scale / shape) * log(p)) / scale
+      ans[p < 0] <- NaN
+      ans[p == 0] <- Inf
+      ans[p == 1] <- 0
+      ans[p > 1] <- NaN
+    }
+  }
   ans[shape <= 0 | scale <= 0] <- NaN
   ans
 }
 
 
+
+
+
 rgompertz <- function(n, scale = 1, shape) {
   qgompertz(runif(n), scale = scale, shape = shape)
 }
@@ -1524,8 +1767,8 @@ rgompertz <- function(n, scale = 1, shape) {
 
 
 
-gompertz.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+gompertz.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -1538,9 +1781,6 @@ gompertz.control <- function(save.weight = TRUE, ...) {
 
 
 
-  if (!nowarning)
-    warning("order of the linear/additive predictors has been changed",
-            " in VGAM version 0.9-5")
 
   lshape <- as.list(substitute(lshape))
   eshape <- link2list(lshape)
@@ -1880,8 +2120,8 @@ rmoe <- function (n, alpha = 1, lambda = 1) {
 
 
 
-exponential.mo.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+exponential.mo.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -2173,7 +2413,8 @@ if (ii < 3) {
 
 
 
- genbetaII <- function(lshape1.a = "loge",
+ genbetaII <- function(lss,
+                       lshape1.a = "loge",
                        lscale = "loge",
                        lshape2.p = "loge",
                        lshape3.q = "loge",
@@ -2183,6 +2424,10 @@ if (ii < 3) {
                        ishape3.q = 1.0,
                        zero = NULL) {
 
+  if (!is.logical(lss) || lss)
+    stop("argument 'lss' not specified correctly. ",
+         "See online help for important information")
+
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -2209,14 +2454,14 @@ if (ii < 3) {
 
   new("vglmff",
   blurb = c("Generalized Beta II distribution\n\n",
-          "Links:    ",
-          namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ", 
-          namesof("scale",    lscale,    earg = escale), ", ", 
-          namesof("shape2.p", lshape2.p, earg = eshape2.p), ", ", 
-          namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n", 
-          "Mean:     scale * gamma(shape2.p + 1/shape1.a) * ",
-                    "gamma(shape3.q - 1/shape1.a) / ",
-                    "(gamma(shape2.p) * gamma(shape3.q))"),
+            "Links:    ",
+            namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ", 
+            namesof("scale",    lscale,    earg = escale), ", ", 
+            namesof("shape2.p", lshape2.p, earg = eshape2.p), ", ", 
+            namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n", 
+            "Mean:     scale * gamma(shape2.p + 1/shape1.a) * ",
+                      "gamma(shape3.q - 1/shape1.a) / ",
+                      "(gamma(shape2.p) * gamma(shape3.q))"),
   constraints = eval(substitute(expression({
     constraints <- cm.zero.VGAM(constraints, x, .zero , M)
   }), list( .zero = zero ))),
@@ -2233,7 +2478,7 @@ if (ii < 3) {
         namesof("shape3.q", .lshape3.q , earg = .eshape3.q , tag = FALSE))
 
     if (!length( .ishape1.a ) || !length( .iscale )) {
-      qvec <- c( .25, .5, .75)   # Arbitrary; could be made an argument
+      qvec <- c( .25, .5, .75)  # Arbitrary; could be made an argument
       ishape3.q <- if (length( .ishape3.q)) .ishape3.q else 1
       xvec <- log( (1-qvec)^(-1/ ishape3.q ) - 1 )
       fit0 <- lsfit(x = xvec, y = log(quantile(y, qvec )))
@@ -2363,13 +2608,13 @@ if (ii < 3) {
     temp5b <- trigamma(qq)
 
     ned2l.da <- (1 + parg+qq + parg * qq * (temp5a + temp5b +
-              (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
-              (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+                (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+                (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
     ned2l.dscale <- aa^2 * parg * qq / (scale^2 * (1+parg+qq))
     ned2l.dp <- temp5a - temp5
     ned2l.dq <- temp5b - temp5
     ned2l.dascale <- (parg - qq - parg * qq *
-                   (temp3a -temp3b)) / (scale*(1 + parg+qq))
+                     (temp3a -temp3b)) / (scale*(1 + parg+qq))
     ned2l.dap <- -(qq   * (temp3a -temp3b) -1) / (aa*(parg+qq))
     ned2l.daq <- -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
     ned2l.dscalep <-  aa * qq   / (scale*(parg+qq))
@@ -2396,41 +2641,51 @@ if (ii < 3) {
 }
 
 
-rsinmad <- function(n, shape1.a, scale = 1, shape3.q)
-  qsinmad(runif(n), shape1.a, scale = scale, shape3.q)
+rsinmad <- function(n, scale = 1, shape1.a, shape3.q)
+  qsinmad(runif(n), shape1.a = shape1.a, scale = scale,
+          shape3.q = shape3.q)
 
 
 rlomax <- function(n, scale = 1, shape3.q)
-  rsinmad(n, shape1.a = 1, scale = scale, shape3.q)
+  rsinmad(n, scale = scale, shape1.a = 1, shape3.q = shape3.q)
 
 
-rfisk <- function(n, shape1.a, scale = 1)
-  rsinmad(n, shape1.a, scale = scale, shape3.q = 1)
+rfisk <- function(n, scale = 1, shape1.a)
+  rsinmad(n, scale = scale, shape1.a = shape1.a, shape3.q = 1)
 
 
-rparalogistic <- function(n, shape1.a, scale = 1)
-  rsinmad(n, shape1.a, scale = scale, shape1.a)
+rparalogistic <- function(n, scale = 1, shape1.a)
+  rsinmad(n, scale = scale, shape1.a = shape1.a, shape3.q = shape1.a)
 
 
-rdagum <- function(n, shape1.a, scale = 1, shape2.p)
-  qdagum(runif(n), shape1.a = shape1.a, scale = scale,
+rdagum <- function(n, scale = 1, shape1.a, shape2.p)
+  qdagum(runif(n), scale = scale, shape1.a = shape1.a,
          shape2.p = shape2.p)
 
 
 rinv.lomax <- function(n, scale = 1, shape2.p)
-  rdagum(n, shape1.a = 1, scale = scale, shape2.p)
+  rdagum(n, scale = scale, shape1.a = 1, shape2.p = shape2.p)
+
+
+rinv.paralogistic <- function(n, scale = 1, shape1.a)
+  rdagum(n, scale = scale, shape1.a = shape1.a, shape2.p = shape1.a)
+
+
+
+
+qsinmad <- function(p, scale = 1, shape1.a, shape3.q,
+                    lower.tail = TRUE,
+                    log.p = FALSE) {
 
 
-rinv.paralogistic <- function(n, shape1.a, scale = 1)
-  rdagum(n, shape1.a, scale = scale, shape1.a)
 
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
 
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
 
-qsinmad <- function(p, shape1.a, scale = 1, shape3.q) {
-  bad <- (p < 0) | (p > 1) | is.na(p)
-  ans <- NA * p
-  ans[is.nan(p)] <- NaN
 
   LLL <- max(length(p), length(shape1.a), length(scale),
                         length(shape3.q))
@@ -2443,30 +2698,62 @@ qsinmad <- function(p, shape1.a, scale = 1, shape3.q) {
   if (length(shape3.q) != LLL)
     shape3.q  <- rep(shape3.q,  length.out = LLL)
 
-  Shape1.a <- shape1.a[!bad]
-  Scale    <- scale[!bad]
-  Shape3.q <- shape3.q[!bad]
 
-  QQ <- p[!bad]
-  ans[!bad] <- Scale * ((1 - QQ)^(-1/Shape3.q) - 1)^(1/Shape1.a)
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- scale * expm1((-1/shape3.q) * log(-expm1(ln.p)))^(1/shape1.a)
+    } else {
+      ans <- scale * expm1((-1/shape3.q) * log1p(-p))^(1/shape1.a)
+      ans[p == 0] <- 0
+      ans[p == 1] <- Inf
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- scale * expm1(-ln.p / shape3.q)^(1/shape1.a)
+    } else {
+      ans <- scale * expm1(-log(p) / shape3.q)^(1/shape1.a)
+      ans[p == 0] <- Inf
+      ans[p == 1] <- 0
+    }
+  }
+
+  ans[scale    <= 0 | shape1.a <= 0 | shape3.q <= 0] <- NaN
   ans
 }
 
 
-qlomax <- function(p, scale = 1, shape3.q)
-  qsinmad(p, shape1.a = 1, scale = scale, shape3.q)
 
 
-qfisk <- function(p, shape1.a, scale = 1)
-  qsinmad(p, shape1.a, scale = scale, shape3.q = 1)
+qlomax <- function(p, scale = 1, shape3.q,
+                   lower.tail = TRUE, log.p = FALSE)
+  qsinmad(p, shape1.a = 1, scale = scale, shape3.q = shape3.q,
+          lower.tail = lower.tail, log.p = log.p)
+
+qfisk <- function(p, scale = 1, shape1.a,
+                  lower.tail = TRUE, log.p = FALSE)
+  qsinmad(p, shape1.a = shape1.a, scale = scale, shape3.q = 1,
+          lower.tail = lower.tail, log.p = log.p)
+
+qparalogistic <- function(p, scale = 1, shape1.a,
+                          lower.tail = TRUE, log.p = FALSE)
+  qsinmad(p, shape1.a = shape1.a, scale = scale,
+          shape3.q = shape1.a,  ## 20150121 KaiH; add shape3.q = shape1.a
+          lower.tail = lower.tail, log.p = log.p)
 
 
-qparalogistic <- function(p, shape1.a, scale = 1)
-  qsinmad(p, shape1.a, scale = scale, shape1.a)
 
 
 
-qdagum <- function(p, shape1.a, scale = 1, shape2.p) {
+qdagum <- function(p, scale = 1, shape1.a, shape2.p,
+                   lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
 
   LLL <- max(length(p), length(shape1.a), length(scale),
                         length(shape2.p))
@@ -2479,31 +2766,63 @@ qdagum <- function(p, shape1.a, scale = 1, shape2.p) {
   if (length(shape2.p) != LLL)
     shape2.p  <- rep(shape2.p,  length.out = LLL)
 
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- scale * (expm1(-ln.p/shape2.p))^(-1/shape1.a)
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- scale * (expm1(-log(p)/shape2.p))^(-1/shape1.a)
+      ans[p < 0] <- NaN
+      ans[p == 0] <- 0
+      ans[p == 1] <- Inf
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- scale * (expm1(-log(-expm1(ln.p))/shape2.p))^(-1/shape1.a)
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- scale * (expm1(-log1p(-p)/shape2.p))^(-1/shape1.a)
+      ans[p < 0] <- NaN
+      ans[p == 0] <- Inf
+      ans[p == 1] <- 0
+      ans[p > 1] <- NaN
+    }
+  }
 
-  bad <- (p < 0) | (p > 1) | (scale <= 0) | is.na(p)
-
-  ans <- NA * p
-  ans[is.nan(p)] <- NaN
-  ans[!bad] <- scale[!bad] *
-             (p[!bad]^(-1/shape2.p[!bad]) - 1)^(-1/shape1.a[!bad])
+  ans[scale <= 0 | shape1.a <= 0 | shape2.p <= 0] <- NaN
   ans
 }
 
 
 
-qinv.lomax <- function(p, scale = 1, shape2.p)
-  qdagum(p, shape1.a = 1, scale = scale, shape2.p)
 
+qinv.lomax <- function(p, scale = 1, shape2.p,
+                       lower.tail = TRUE, log.p = FALSE)
+  qdagum(p, scale = scale, shape1.a = 1, shape2.p = shape2.p,
+         lower.tail = lower.tail, log.p = log.p)
 
-qinv.paralogistic <- function(p, shape1.a, scale = 1)
-  qdagum(p, shape1.a, scale = scale, shape1.a)
 
+qinv.paralogistic <- function(p, scale = 1, shape1.a,
+                              lower.tail = TRUE, log.p = FALSE)
+  qdagum(p, scale = scale, shape1.a = shape1.a,
+         shape2.p = shape1.a,   ##  20150121 Kai; add shape2.p = shape1.a
+         lower.tail = lower.tail, log.p = log.p)
 
 
 
 
+psinmad <- function(q, scale = 1, shape1.a, shape3.q,
+                    lower.tail = TRUE, log.p = FALSE) {
 
-psinmad <- function(q, shape1.a, scale = 1, shape3.q) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
 
   LLL <- max(length(q), length(shape1.a), length(scale),
@@ -2517,40 +2836,70 @@ psinmad <- function(q, shape1.a, scale = 1, shape3.q) {
   if (length(shape3.q) != LLL)
     shape3.q  <- rep(shape3.q,  length.out = LLL)
 
+  # 20150121 KaiH
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log1p(-(1 + (q / scale)^shape1.a)^(-shape3.q))
+      ans[q <= 0 ] <- -Inf
+      ans[q == Inf] <- 0
+    } else {
+      ans <- exp(log1p(-(1 + (q / scale)^shape1.a)^(-shape3.q)))
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- (-shape3.q) * log1p((q / scale)^shape1.a)
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- -Inf
+    } else {
+      ans <- (1 + (q / scale)^shape1.a)^(-shape3.q)
+      ans[q <= 0] <- 1
+      ans[q == Inf] <- 0
+    }
+  }
+  ans[scale <= 0 | shape1.a <= 0 | shape3.q <= 0] <- NaN
+  ans
+}
+
 
-  notpos <- (q <= 0) & !is.na(q)
-  Shape1.a <- shape1.a[!notpos]
-  Scale    <-    scale[!notpos]
-  Shape3.q <- shape3.q[!notpos]
-  QQ       <-        q[!notpos]
 
-  ans <- 0 * q # rep(0.0, len = LLL)
-  ans[!notpos] <- 1 - (1 + (QQ / Scale)^Shape1.a)^(-Shape3.q)
 
-  ans[scale    <= 0] <- NaN
-  ans[shape1.a <= 0] <- NaN
-  ans[shape3.q <= 0] <- NaN
 
-  ans[q == -Inf] <- 0
 
-  ans
-}
+plomax <- function(q, scale = 1, shape3.q,   # Change the order
+                   lower.tail = TRUE, log.p = FALSE)
+  psinmad(q, shape1.a = 1, scale = scale, shape3.q = shape3.q,
+          lower.tail = lower.tail, log.p = log.p)
+
+
+pfisk <- function(q, scale = 1, shape1.a,
+                  lower.tail = TRUE, log.p = FALSE)
+  psinmad(q, shape1.a = shape1.a, scale = scale, shape3.q = 1,
+          lower.tail = lower.tail, log.p = log.p)
+
+
+pparalogistic <- function(q, scale = 1, shape1.a,   # Change the order
+                          lower.tail = TRUE, log.p = FALSE)
+  psinmad(q, shape1.a = shape1.a, scale = scale,
+          shape3.q = shape1.a,  # Add shape3.q = shape1.a
+          lower.tail = lower.tail, log.p = log.p)
 
 
-plomax <- function(q, scale = 1, shape3.q)
-  psinmad(q, shape1.a = 1, scale = scale, shape3.q)
 
 
-pfisk <- function(q, shape1.a, scale = 1)
-  psinmad(q, shape1.a, scale = scale, shape3.q = 1)
 
 
-pparalogistic <- function(q, shape1.a, scale = 1)
-  psinmad(q, shape1.a, scale = scale, shape1.a)
+pdagum <- function(q, scale = 1, shape1.a, shape2.p,
+                   lower.tail = TRUE, log.p = FALSE) {
 
 
 
-pdagum <- function(q, shape1.a, scale = 1, shape2.p) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
 
   LLL <- max(length(q), length(shape1.a), length(scale),
@@ -2564,20 +2913,30 @@ pdagum <- function(q, shape1.a, scale = 1, shape2.p) {
   if (length(shape2.p) != LLL)
     shape2.p  <- rep(shape2.p,  length.out = LLL)
 
-  notpos   <- (q <= 0) & !is.na(q)
-  Shape1.a <- shape1.a[!notpos]
-  Scale    <-    scale[!notpos]
-  Shape2.p <- shape2.p[!notpos]
-  QQ       <-        q[!notpos]
-
-  ans <- 0 * q
-  ans[!notpos] <- (1 + (QQ/Scale)^(-Shape1.a))^(-Shape2.p)
 
-  ans[scale    <= 0] <- NaN
-  ans[shape1.a <= 0] <- NaN
-  ans[shape2.p <= 0] <- NaN
-  ans[q == -Inf] <- 0
+  if (lower.tail) {
+    if (log.p) {
+      ans <- (-shape2.p) * log1p((q/scale)^(-shape1.a))
+      ans[q <= 0 ] <- -Inf
+      ans[q == Inf] <- 0
+    } else {
+      ans <- exp( (-shape2.p) * log1p((q/scale)^(-shape1.a)) )
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- log1p(-(1 + (q/scale)^(-shape1.a))^(-shape2.p))
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- -Inf
+    } else {
+      stop("unfinished")
+      ans[q <= 0] <- 1
+      ans[q == Inf] <- 0
+    }
+  }
 
+  ans[shape1.a <= 0 | scale <= 0 | shape2.p <= 0] <- NaN
   ans
 }
 
@@ -2585,16 +2944,21 @@ pdagum <- function(q, shape1.a, scale = 1, shape2.p) {
 
 
 
-pinv.lomax <- function(q, scale = 1, shape2.p)
-  pdagum(q, shape1.a = 1, scale = scale, shape2.p)
+pinv.lomax <- function(q, scale = 1, shape2.p,
+                       lower.tail = TRUE, log.p = FALSE)
+  pdagum(q, scale = scale, shape1.a = 1, shape2.p = shape2.p,
+         lower.tail = lower.tail, log.p = log.p)
 
 
-pinv.paralogistic <- function(q, shape1.a, scale = 1)
-  pdagum(q, shape1.a, scale = scale, shape1.a)
+pinv.paralogistic <- function(q, scale = 1, shape1.a,
+                              lower.tail = TRUE, log.p = FALSE)
+  pdagum(q, scale = scale, shape1.a = shape1.a,
+         shape2.p = shape1.a,
+         lower.tail = lower.tail, log.p = log.p)
 
 
 
-dsinmad <- function(x, shape1.a, scale = 1, shape3.q, log = FALSE) {
+dsinmad <- function(x, scale = 1, shape1.a, shape3.q, log = FALSE) {
 
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -2624,20 +2988,24 @@ dsinmad <- function(x, shape1.a, scale = 1, shape3.q, log = FALSE) {
 }
 
 
+
 dlomax <- function(x, scale = 1, shape3.q, log = FALSE)
-  dsinmad(x, shape1.a = 1, scale = scale, shape3.q, log = log)
+  dsinmad(x, scale = scale, shape1.a = 1, shape3.q = shape3.q, log = log)
+
+
 
+dfisk <- function(x, scale = 1, shape1.a, log = FALSE)
+  dsinmad(x, scale = scale, shape1.a = shape1.a, shape3.q = 1, log = log)
 
-dfisk <- function(x, shape1.a, scale = 1, log = FALSE)
-  dsinmad(x, shape1.a, scale = scale, shape3.q = 1, log = log)
 
 
-dparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
-  dsinmad(x, shape1.a, scale = scale, shape1.a, log = log)
+dparalogistic <- function(x, scale = 1, shape1.a, log = FALSE)
+  dsinmad(x, scale = scale, shape1.a = shape1.a, shape3.q = shape1.a,
+          log = log)
 
 
 
-ddagum <- function(x, shape1.a, scale = 1, shape2.p, log = FALSE) {
+ddagum <- function(x, scale = 1, shape1.a, shape2.p, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
@@ -2674,16 +3042,20 @@ ddagum <- function(x, shape1.a, scale = 1, shape2.p, log = FALSE) {
 }
 
 
+
 dinv.lomax <- function(x, scale = 1, shape2.p, log = FALSE)
-  ddagum(x, shape1.a = 1, scale = scale, shape2.p, log = log)
+  ddagum(x, scale = scale, shape1.a = 1, shape2.p = shape2.p,
+         log = log)
 
 
-dinv.paralogistic <- function(x, shape1.a, scale = 1, log = FALSE)
-  ddagum(x, shape1.a, scale = scale, shape1.a, log = log)
+dinv.paralogistic <- function(x, scale = 1, shape1.a, log = FALSE)
+  ddagum(x, scale = scale, shape1.a = shape1.a, shape2.p = shape1.a,
+         log = log)
 
 
 
- sinmad <- function(lshape1.a = "loge",
+ sinmad <- function(lss,
+                    lshape1.a = "loge",
                     lscale = "loge",
                     lshape3.q = "loge",
                     ishape1.a = NULL, 
@@ -2691,6 +3063,12 @@ dinv.paralogistic <- function(x, shape1.a, scale = 1, log = FALSE)
                     ishape3.q = 1.0, 
                     zero = NULL) {
 
+  if (!is.logical(lss) || lss)
+    stop("argument 'lss' not specified correctly. ",
+         "See online help for important information")
+
+
+
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -2905,7 +3283,8 @@ dinv.paralogistic <- function(x, shape1.a, scale = 1, log = FALSE)
 }
 
 
- dagum <- function(lshape1.a = "loge",
+ dagum <- function(lss,
+                   lshape1.a = "loge",
                    lscale = "loge",
                    lshape2.p = "loge",
                    ishape1.a = NULL, 
@@ -2913,6 +3292,12 @@ dinv.paralogistic <- function(x, shape1.a, scale = 1, log = FALSE)
                    ishape2.p = 1.0, 
                    zero = NULL) {
 
+  if (!is.logical(lss) || lss)
+    stop("argument 'lss' not specified correctly. ",
+         "See online help for important information")
+
+
+
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
       stop("bad input for argument 'zero'")
@@ -3120,10 +3505,13 @@ dinv.paralogistic <- function(x, shape1.a, scale = 1, log = FALSE)
 
 
  betaII <-
-   function(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
+   function(  # lss,
+            lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
             iscale = NULL,   ishape2.p = 2,      ishape3.q = 2,
             zero = NULL) {
 
+
+
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'zero'")
@@ -3314,7 +3702,8 @@ dinv.paralogistic <- function(x, shape1.a, scale = 1, log = FALSE)
 
 
 
- lomax <- function(lscale = "loge",    lshape3.q = "loge",
+ lomax <- function(  # lss,
+                   lscale = "loge",    lshape3.q = "loge",
                    iscale = NULL,      ishape3.q = NULL,  # 2.0, 
                                        gshape3.q = exp(-5:5),
                    zero = NULL) {
@@ -3519,10 +3908,17 @@ dinv.paralogistic <- function(x, shape1.a, scale = 1, log = FALSE)
 
 
 
- fisk <- function(lshape1.a = "loge", lscale = "loge",
+ fisk <- function(lss,
+                  lshape1.a = "loge", lscale = "loge",
                   ishape1.a = NULL,   iscale = NULL,
                   zero = NULL) {
 
+  if (!is.logical(lss) || lss)
+    stop("argument 'lss' not specified correctly. ",
+         "See online help for important information")
+
+
+
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'zero'")
@@ -3692,11 +4088,12 @@ dinv.paralogistic <- function(x, shape1.a, scale = 1, log = FALSE)
 }
 
 
- inv.lomax <- function(lscale = "loge",
-                      lshape2.p = "loge",
-                      iscale = NULL,
-                      ishape2.p = 1.0, 
-                      zero = NULL) {
+ inv.lomax <- function(  # lss,
+                       lscale = "loge",
+                       lshape2.p = "loge",
+                       iscale = NULL,
+                       ishape2.p = 1.0, 
+                       zero = NULL) {
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -3854,12 +4251,19 @@ dinv.paralogistic <- function(x, shape1.a, scale = 1, log = FALSE)
 }
 
 
- paralogistic <- function(lshape1.a = "loge",
+ paralogistic <- function(lss,
+                          lshape1.a = "loge",
                           lscale = "loge",
                           ishape1.a = 2,
                           iscale = NULL,
                           zero = NULL) {
 
+  if (!is.logical(lss) || lss)
+    stop("argument 'lss' not specified correctly. ",
+         "See online help for important information")
+
+
+
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -4040,9 +4444,17 @@ dinv.paralogistic <- function(x, shape1.a, scale = 1, log = FALSE)
 }
 
 
- inv.paralogistic <- function(lshape1.a = "loge", lscale = "loge",
-                             ishape1.a = 2,      iscale = NULL,
-                             zero = NULL) {
+
+ inv.paralogistic <- function(lss,
+                              lshape1.a = "loge", lscale = "loge",
+                              ishape1.a = 2,      iscale = NULL,
+                              zero = NULL) {
+
+  if (!is.logical(lss) || lss)
+    stop("argument 'lss' not specified correctly. ",
+         "See online help for important information")
+
+
 
   if (length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
diff --git a/R/family.aunivariate.R b/R/family.aunivariate.R
index 7cde7e6..4920142 100644
--- a/R/family.aunivariate.R
+++ b/R/family.aunivariate.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -36,6 +36,7 @@ dkumar <- function(x, shape1, shape2, log = FALSE) {
 }
 
 
+
 rkumar <- function(n, shape1, shape2) {
   ans <- (1 - (runif(n))^(1/shape2))^(1/shape1)
   ans[(shape1 <= 0) | (shape2 <= 0)] <- NaN
@@ -43,22 +44,81 @@ rkumar <- function(n, shape1, shape2) {
 }
 
 
-qkumar <- function(p, shape1, shape2) {
 
+qkumar <- function(p, shape1, shape2,
+                   lower.tail = TRUE, log.p = FALSE) {
+
+
+
+  if (!is.logical(lower.tail) || length(lower.tail) != 1)
+    stop("bad input for argument 'lower.tail'")
 
-  ans <- (1.0 - (1.0 - p)^(1/shape2))^(1/shape1)
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- (-expm1((1/shape2) * log(-expm1(ln.p))))^(1/shape1)
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- (-expm1((1/shape2) * log1p(-p)))^(1/shape1)
+      ans[p < 0] <- NaN
+      ans[p == 0] <- 0
+      ans[p == 1] <- 1
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- (-expm1(ln.p / shape2))^(1/shape1)
+      ans[ln.p > 0] <- NaN
+      ans
+    } else {
+      ans <- (-expm1((1/shape2) * log(p)))^(1/shape1)
+      ans[p < 0] <- NaN
+      ans[p == 0] <- 1
+      ans[p == 1] <- 0
+      ans[p > 1] <- NaN
+    }
+  }
   ans[(shape1 <= 0) | (shape2 <= 0)] = NaN
-  ans[p <  0] <- NaN
-  ans[p >  1] <- NaN
   ans
 }
 
 
-pkumar <- function(q, shape1, shape2) {
 
-  ans <- 1.0 - (1.0 - q^shape1)^shape2
-  ans[q <= 0] <- 0
-  ans[q >= 1] <- 1
+pkumar <- function(q, shape1, shape2,
+                   lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log(-expm1(shape2 * log1p(-q^shape1)))
+      ans[q <= 0 ] <- -Inf
+      ans[q >= 1] <- 0
+    } else {
+      ans <- -expm1(shape2 * log1p(-q^shape1))
+      ans[q <= 0] <- 0
+      ans[q >= 1] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- shape2 * log1p(-q^shape1)
+      ans[q <= 0] <- 0
+      ans[q >= 1] <- -Inf
+    } else {
+      ans <- exp(shape2 * log1p(-q^shape1))
+      ans[q <= 0] <- 1
+      ans[q >= 1] <- 0
+    }
+  }
+
   ans[(shape1 <= 0) | (shape2 <= 0)] <- NaN
   ans
 }
@@ -261,6 +321,9 @@ drice <- function(x, sigma, vee, log = FALSE) {
                      x.abs
   logdensity[sigma <= 0] <- NaN
   logdensity[vee < 0] <- NaN
+
+  logdensity[is.infinite(x)] <- -Inf  # 20141209 KaiH
+
   if (log.arg) logdensity else exp(logdensity)
 }
 
@@ -276,18 +339,27 @@ rrice <- function(n, sigma, vee) {
 
 
 
-marcumQ <- function(a, b, m = 1, lower.tail = TRUE, ... ) {
-  pchisq(b^2, df = 2*m, ncp = a^2, lower.tail = lower.tail, ... )
+marcumQ <- function(a, b, m = 1,
+                    lower.tail = TRUE, log.p = FALSE, ... ) {
+  pchisq(b^2, df = 2*m, ncp = a^2,
+         lower.tail = lower.tail, log.p = log.p, ... )
 }
 
 
-price <- function(q, sigma, vee, lower.tail = TRUE, ...) {
-  marcumQ(vee/sigma, q/sigma, m = 1, lower.tail = lower.tail, ... )
+
+price <- function(q, sigma, vee,
+                  lower.tail = TRUE, log.p = FALSE, ...) {
+  ans <- marcumQ(vee/sigma, q/sigma, m = 1,
+                 lower.tail = lower.tail, log.p = log.p, ... )
+  ans
 }
 
 
-qrice <- function(p, sigma, vee, ... ) {
-  sqrt(qchisq(p, df = 2, ncp = (vee/sigma)^2, ... )) * sigma
+
+qrice <- function(p, sigma, vee,
+                  lower.tail = TRUE, log.p = FALSE, ... ) {
+  sqrt(qchisq(p, df = 2, ncp = (vee/sigma)^2,
+              lower.tail = lower.tail, log.p = log.p, ... )) * sigma
 }
 
 
@@ -298,8 +370,8 @@ qrice <- function(p, sigma, vee, ... ) {
 
 
 
-riceff.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+riceff.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
@@ -307,9 +379,6 @@ riceff.control <- function(save.weight = TRUE, ...) {
                     isigma = NULL, ivee = NULL,
                     nsimEIM = 100, zero = NULL, nowarning = FALSE) {
 
-  if (!nowarning)
-    warning("order of the linear/additive predictors has been changed",
-            " in VGAM version 0.9-5")
 
 
   lvee     <- as.list(substitute(lvee))
@@ -556,8 +625,8 @@ rskellam <- function(n, mu1, mu2) {
 
 
 
-skellam.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+skellam.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -788,19 +857,23 @@ ryules <- function(n, rho) {
 
 
 
-pyules <- function(q, rho) {
+pyules <- function(q, rho, log.p = FALSE) {
+
+
   tq <- trunc(q)
   ans <- 1 - tq * beta(abs(tq), rho+1)
   ans[q < 1] <- 0
+  ans[is.infinite(q) & q > 0] <- 1  # 20141215 KaiH
   ans[(rho <= 0) | (rho <= 0)] <- NA
+  if (log.p) log(ans) else ans
   ans
 }
 
 
 
 
-yulesimon.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+yulesimon.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -995,22 +1068,49 @@ dlind <- function(x, theta, log = FALSE) {
 
   if ( log.arg ) {
     ans <- 2 * log(theta) + log1p(x) - theta * x - log1p(theta)
-    ans[(x < 0)] <- log(0)
+    ans[x < 0 | is.infinite(x)] <- log(0)  # 20141209 KaiH
   } else {
     ans <- theta^2 * (1 + x) * exp(-theta * x) / (1 + theta)
-    ans[(x < 0)] <- 0
+    ans[x < 0 | is.infinite(x)] <- 0  # 20141209 KaiH
   }
-  ans[(theta <= 0)] <- NaN
+  ans[theta <= 0] <- NaN
   ans
 }
 
 
 
-plind <- function(q, theta) {
+plind <- function(q, theta, lower.tail = TRUE, log.p = FALSE) {
+
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
-  ifelse(q > 0,
-         1 - (theta + 1 + theta * q) * exp(-theta * q) / (1 + theta),
-         0)
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log(-expm1(-theta * q + log1p(q / (1 + 1/theta))))
+      ans[q <= 0 ] <- -Inf
+      ans[q == Inf] <- 0
+    } else {
+      ans <- -expm1(-theta * q + log1p(q / (1 + 1/theta)))
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- -theta * q + log1p(q / (1 + 1/theta))
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- -Inf
+    } else {
+      ans <- exp(-theta * q + log1p(q / (1 + 1/theta)))
+      ans[q <= 0] <- 1
+      ans[q == Inf] <- 0
+    }
+  }
+  ans[theta <= 0] <- NaN
+  ans
 }
 
 
@@ -1026,7 +1126,7 @@ rlind <- function(n, theta) {
 
 
 
-  ifelse(runif(use.n) < rep(theta / (1 + theta), length = use.n),
+  ifelse(runif(use.n) < rep(1 / (1 + 1/theta), length = use.n),
          rexp(use.n, theta),
          rgamma(use.n, shape = 2, scale = 1 / theta))
 }
@@ -1220,8 +1320,8 @@ ppoislindley <- function(q, theta) {
 
 
 if (FALSE)
-poislindley.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+poislindley.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -1428,7 +1528,8 @@ dslash <- function(x, mu = 0, sigma = 1, log = FALSE,
 
 
 
-pslash <- function(q, mu = 0, sigma = 1, very.negative = -10000) {
+pslash <- function(q, mu = 0, sigma = 1, very.negative = -10000,
+                   lower.tail = TRUE, log.p = FALSE) {
   if (any(is.na(q)))
     stop("argument 'q' must have non-missing values")
   if (!is.Numeric(mu))
@@ -1438,6 +1539,13 @@ pslash <- function(q, mu = 0, sigma = 1, very.negative = -10000) {
   if (!is.Numeric(very.negative, length.arg = 1) ||
      (very.negative >= 0))
     stop("argument 'very.negative' must be quite negative")
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
   L <- max(length(q), length(mu), length(sigma))
   if (length(q)     != L) q     <- rep(q,     len = L)
   if (length(mu)    != L) mu    <- rep(mu,    len = L)
@@ -1469,7 +1577,12 @@ pslash <- function(q, mu = 0, sigma = 1, very.negative = -10000) {
   }
   if (extreme.q)
     warning("returning 0 or 1 values for extreme values of argument 'q'")
-  ans
+
+  if (lower.tail) {
+    if (log.p) log(ans) else ans
+  } else {
+    if (log.p) log1p(-ans) else -expm1(log(ans))
+  }
 }
 
 
@@ -1481,8 +1594,8 @@ rslash <- function (n, mu = 0, sigma = 1) {
 
 
 
-slash.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+slash.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -1828,6 +1941,7 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
   logdensity <- shape1*x - lbeta(shape1, shape2) -
                 (shape1 + shape2) * log1pexp(x)
 
+  logdensity[is.infinite(x)] <- -Inf  # 20141209 KaiH
 
   if (log.arg) logdensity else exp(logdensity)
 }
@@ -1992,6 +2106,8 @@ dlogF <- function(x, shape1, shape2, log = FALSE) {
 
 
 
+
+
 dbenf <- function(x, ndigits = 1, log = FALSE) {
   if (!is.Numeric(ndigits, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE) ||
@@ -2017,6 +2133,7 @@ dbenf <- function(x, ndigits = 1, log = FALSE) {
 }
 
 
+
 rbenf <- function(n, ndigits = 1) {
   if (!is.Numeric(ndigits, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE) ||
@@ -2040,7 +2157,14 @@ rbenf <- function(n, ndigits = 1) {
 }
 
 
-pbenf <- function(q, ndigits = 1, log.p = FALSE) {
+
+pbenf <- function(q, ndigits = 1, lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
   if (!is.Numeric(ndigits, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE) ||
       ndigits > 2)
@@ -2051,17 +2175,60 @@ pbenf <- function(q, ndigits = 1, log.p = FALSE) {
   ans <- q * NA
   floorq <- floor(q)
   indexTF <- is.finite(q) & (floorq >= lowerlimit)
-  ans[indexTF] <- log10(1 + floorq[indexTF]) -
-                  ifelse(ndigits == 1, 0, 1)
-  ans[!is.na(q) & !is.nan(q) & (q >= upperlimit)] <- 1
-  ans[!is.na(q) & !is.nan(q) & (q <  lowerlimit)] <- 0
-  if (log.p) log(ans) else ans
-}
 
+  if (ndigits == 1) {
+    if (lower.tail) {
+      if (log.p) {
+        ans[indexTF] <- log(log10(1 + floorq[indexTF]))
+        ans[q <  lowerlimit ] <- -Inf
+        ans[q >= upperlimit] <- 0
+      } else {
+        ans[indexTF] <- log10(1 + floorq[indexTF])
+        ans[q <  lowerlimit] <- 0
+        ans[q >= upperlimit] <- 1
+      }
+    } else {
+      if (log.p) {
+        ans[indexTF] <- log1p(-log10(1 + floorq[indexTF]))
+        ans[q <  lowerlimit] <- 0
+        ans[q >= upperlimit] <- -Inf
+      } else {
+        ans[indexTF] <- log10(10 / (1 + floorq[indexTF]))
+        ans[q <  lowerlimit] <- 1
+        ans[q >= upperlimit] <- 0
+      }
+    }
+  } else {
+    if (lower.tail) {
+      if (log.p) {
+        ans[indexTF] <- log(log10((1 + floorq[indexTF])/10))
+        ans[q <  lowerlimit ] <- -Inf
+        ans[q >= upperlimit] <- 0
+      } else {
+        ans[indexTF] <- log10((1 + floorq[indexTF])/10)
+        ans[q <  lowerlimit] <- 0
+        ans[q >= upperlimit] <- 1
+     }
+    } else {
+      if (log.p) {
+        ans[indexTF] <- log(log10(100/(1 + floorq[indexTF])))
+        ans[q <  lowerlimit] <- 0
+        ans[q >= upperlimit] <- -Inf
+      } else {
+        ans[indexTF] <- log10(100/(1 + floorq[indexTF]))
+        ans[q <  lowerlimit] <- 1
+        ans[q >= upperlimit] <- 0
+      }
+    }
+  }
+  ans
+}
 
 
 
+if (FALSE)
 qbenf <- function(p, ndigits = 1) {
+
   if (!is.Numeric(ndigits, length.arg = 1,
                   positive = TRUE, integer.valued = TRUE) ||
       ndigits > 2)
@@ -2089,6 +2256,83 @@ qbenf <- function(p, ndigits = 1) {
 
 
 
+qbenf <- function(p, ndigits = 1,
+                  lower.tail = TRUE, log.p = FALSE) {
+  if (!is.Numeric(ndigits, length.arg = 1,
+                  positive = TRUE, integer.valued = TRUE) ||
+      ndigits > 2)
+    stop("argument 'ndigits' must be 1 or 2")
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+  if (log.p) { 
+    bad <- ((p > 0) | is.na(p) | is.nan(p))
+  } else {
+    bad <- ((p < 0) | (p > 1) | is.na(p) | is.nan(p))
+  }
+  if (any(bad))
+    stop("bad input for argument 'p'")
+
+  lowerlimit <- ifelse(ndigits == 1, 1, 10)
+  upperlimit <- ifelse(ndigits == 1, 9, 99)
+  ans <- rep(lowerlimit, length = length(p))
+
+  if (lower.tail) {
+    for (ii in (lowerlimit+1):upperlimit) {
+      indexTF <- is.finite(p) &
+                 (pbenf(ii-1, ndigits = ndigits,
+                        lower.tail = lower.tail, log.p = log.p) < p) &
+              (p <= pbenf(ii, ndigits = ndigits,
+                          lower.tail = lower.tail, log.p = log.p))
+      ans[indexTF] <- ii
+    }
+  } else {  ## when lower.tail = F, pbenf(ii-1) >= p & pben(ii) < p
+    for (ii in (lowerlimit+1):upperlimit) {
+      indexTF <- is.finite(p) &
+                 (pbenf(ii-1, ndigits = ndigits,
+                        lower.tail = lower.tail, log.p = log.p) >= p) &
+                 (p > pbenf(ii, ndigits = ndigits,
+                            lower.tail = lower.tail, log.p = log.p))
+      ans[indexTF] <- ii
+    }
+  }
+
+  if (lower.tail) {
+    if (log.p) {
+      ans[p > 0] <- NaN
+      ans[p == -Inf] <- lowerlimit
+    } else {
+      ans[p < 0] <- NaN
+      ans[p == 0] <- lowerlimit
+      ans[p == 1] <- upperlimit
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ans[p > 0] <- NaN
+      ans[p == -Inf] <- upperlimit
+    } else {
+      ans[p < 0] <- NaN
+      ans[p == 0] <- upperlimit
+      ans[p == 1] <- lowerlimit
+      ans[p > 1] <- NaN
+    }
+  }
+  ans
+}
+
+
+
+
+
+
+
+
+
 
 
 
diff --git a/R/family.basics.R b/R/family.basics.R
index 0412571..ab6a683 100644
--- a/R/family.basics.R
+++ b/R/family.basics.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -1428,33 +1428,33 @@ vweighted.mean.default <- function (x, w, ..., na.rm = FALSE) {
 
 
 
-family.name.vlm <- function(object, all = FALSE, ...) {
+familyname.vlm <- function(object, all = FALSE, ...) {
   ans <- object at family@vfamily
   if (all) ans else ans[1]
 }
 
 
-family.name.vglmff <- function(object, all = FALSE, ...) {
+familyname.vglmff <- function(object, all = FALSE, ...) {
   ans <- object at vfamily
   if (all) ans else ans[1]
 }
 
 
 
-if (!isGeneric("family.name"))
-    setGeneric("family.name",
-  function(object, ...) standardGeneric("family.name"))
+if (!isGeneric("familyname"))
+    setGeneric("familyname",
+  function(object, ...) standardGeneric("familyname"))
 
 
-setMethod("family.name", "vglmff",
+setMethod("familyname", "vglmff",
          function(object, ...)
-         family.name.vglmff(object, ...))
+         familyname.vglmff(object, ...))
 
 
 
-setMethod("family.name", "vlm",
+setMethod("familyname", "vlm",
          function(object, ...)
-         family.name.vlm(object, ...))
+         familyname.vlm(object, ...))
 
 
 
diff --git a/R/family.binomial.R b/R/family.binomial.R
index e8df510..9ee26c7 100644
--- a/R/family.binomial.R
+++ b/R/family.binomial.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -67,8 +67,8 @@ process.binomial2.data.VGAM <- expression({
 
 
 
-betabinomial.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+betabinomial.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -119,8 +119,8 @@ betabinomial.control <- function(save.weight = TRUE, ...) {
     if (!all(w == 1))
       extra$orig.w <- w
 
-    if (is.null( .nsimEIM)) {
-      save.weight <- control$save.weight <- FALSE
+    if (is.null( .nsimEIM )) {
+      save.weights <- control$save.weights <- FALSE
     }
 
     mustart.orig <- mustart
@@ -785,8 +785,8 @@ rbinom2.rho <-
 
 
 
-binom2.rho.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+binom2.rho.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -866,8 +866,8 @@ binom2.rho.control <- function(save.weight = TRUE, ...) {
         namesof("mu2", .lmu12 , earg = .emu12 , short = TRUE),
         namesof("rho", .lrho ,  earg = .erho,  short = TRUE))
 
-    if (is.null( .nsimEIM)) {
-      save.weight <- control$save.weight <- FALSE
+    if (is.null( .nsimEIM )) {
+      save.weights <- control$save.weights <- FALSE
     }
 
 
@@ -1583,38 +1583,41 @@ my.dbinom <- function(x,
 
 
 
-betabinomialff.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+betabinomialff.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
 
 
  betabinomialff <-
-  function(lshape12 = "loge",
-           i1 = 1, i2 = NULL, imethod = 1,
+  function(lshape1 = "loge",lshape2 = "loge",
+           ishape1 = 1, ishape2 = NULL, imethod = 1,
            ishrinkage = 0.95, nsimEIM = NULL,
            zero = NULL) {
 
 
 
 
-  lshape12 <- as.list(substitute(lshape12))
-  earg <- link2list(lshape12)
-  lshape12 <- attr(earg, "function.name")
+  lshape1 <- as.list(substitute(lshape1))
+  earg1 <- link2list(lshape1)
+  lshape1 <- attr(earg1, "function.name")
 
 
+  lshape2 <- as.list(substitute(lshape2))
+  earg2 <- link2list(lshape2)
+  lshape2 <- attr(earg2, "function.name")
 
 
-  if (!is.Numeric(i1, positive = TRUE))
-    stop("bad input for argument 'i1'")
+  if (!is.Numeric(ishape1, positive = TRUE))
+    stop("bad input for argument 'ishape1'")
   if (!is.Numeric(imethod, length.arg = 1,
                   integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
     stop("argument 'imethod' must be 1, 2 or 3")
 
-  if (length(i2) && !is.Numeric(i2, positive = TRUE))
-    stop("bad input for argument 'i2'")
+  if (length(ishape2) && !is.Numeric(ishape2, positive = TRUE))
+    stop("bad input for argument 'ishape2'")
 
   if (!is.null(nsimEIM)) {
     if (!is.Numeric(nsimEIM, length.arg = 1,
@@ -1628,8 +1631,8 @@ betabinomialff.control <- function(save.weight = TRUE, ...) {
   new("vglmff",
   blurb = c("Beta-binomial model\n",
             "Links:    ",
-            namesof("shape1", lshape12, earg = earg), ", ",
-            namesof("shape2", lshape12, earg = earg), "\n",
+            namesof("shape1", lshape1, earg = earg1), ", ",
+            namesof("shape2", lshape2, earg = earg2), "\n",
             "Mean:     mu = shape1 / (shape1+shape2)", "\n",
             "Variance: mu * (1-mu) * (1+(w-1)*rho) / w, ",
                        "where rho = 1 / (shape1+shape2+1)"),
@@ -1640,8 +1643,8 @@ betabinomialff.control <- function(save.weight = TRUE, ...) {
     if (!all(w == 1))
       extra$orig.w <- w
 
-    if (is.null( .nsimEIM)) {
-      save.weight <- control$save.weight <- FALSE
+    if (is.null( .nsimEIM )) {
+      save.weights <- control$save.weights <- FALSE
     }
 
     mustart.orig <- mustart
@@ -1649,17 +1652,17 @@ betabinomialff.control <- function(save.weight = TRUE, ...) {
     if (length(mustart.orig))
       mustart <- mustart.orig  # Retain it if inputted
     predictors.names <-
-         c(namesof("shape1", .lshape12 , earg = .earg, tag = FALSE),
-           namesof("shape2", .lshape12 , earg = .earg, tag = FALSE))
+         c(namesof("shape1", .lshape1 , earg = .earg1 , tag = FALSE),
+           namesof("shape2", .lshape2 , earg = .earg2 , tag = FALSE))
 
     if (!length(etastart)) {
 
       mustart.use <- if (length(mustart.orig)) mustart.orig else
                     mustart
 
-      shape1 <- rep( .i1 , len = n)
-      shape2 <- if (length( .i2 )) {
-                  rep( .i2 , len = n)
+      shape1 <- rep( .ishape1 , len = n)
+      shape2 <- if (length( .ishape2 )) {
+                  rep( .ishape2 , len = n)
                 } else if (length(mustart.orig)) {
                   shape1 * (1 / mustart.use - 1)
                 } else if ( .imethod == 1) {
@@ -1677,31 +1680,35 @@ betabinomialff.control <- function(save.weight = TRUE, ...) {
          warning("the response (as counts) does not appear to ",
                  "be integer-valued. Am rounding to integer values.")
       ycounts <- round(ycounts)  # Make sure it is an integer
-      etastart <- cbind(theta2eta(shape1, .lshape12 , earg = .earg ),
-                        theta2eta(shape2, .lshape12 , earg = .earg ))
+      etastart <- cbind(theta2eta(shape1, .lshape1 , earg = .earg1 ),
+                        theta2eta(shape2, .lshape2 , earg = .earg2 ))
       mustart <- NULL  # Since etastart has been computed.
     }
-  }), list( .lshape12 = lshape12, .earg = earg, .i1 = i1, .i2 = i2,
+  }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+            .earg1 = earg1, .earg2 = earg2,
+            .ishape1 = ishape1, .ishape2 = ishape2,
             .nsimEIM = nsimEIM,
             .imethod = imethod, .ishrinkage = ishrinkage ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
-    shape1 <- eta2theta(eta[, 1], .lshape12 , earg = .earg )
-    shape2 <- eta2theta(eta[, 2], .lshape12 , earg = .earg )
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .earg1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .earg2 )
     shape1 / (shape1 + shape2)
-  }, list( .lshape12 = lshape12, .earg = earg ))),
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2,
+           .earg1 = earg1, .earg2 = earg2 ))),
   last = eval(substitute(expression({
-    misc$link <-    c("shape1" = .lshape12 , "shape2" = .lshape12 )
+    misc$link <-    c("shape1" = .lshape1 , "shape2" = .lshape2 )
 
-    misc$earg <- list("shape1" = .earg ,     "shape2" = .earg )
+    misc$earg <- list("shape1" = .earg1 ,   "shape2" = .earg2   )
 
-    shape1 <- eta2theta(eta[, 1], .lshape12 , earg = .earg )
-    shape2 <- eta2theta(eta[, 2], .lshape12 , earg = .earg )
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .earg1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .earg2 )
 
     misc$rho <- 1 / (shape1 + shape2 + 1)
     misc$expected <- TRUE
     misc$nsimEIM <- .nsimEIM
     misc$zero <- .zero
-  }), list( .lshape12 = lshape12, .earg = earg,
+  }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+            .earg1 = earg1, .earg2 = earg2,
             .nsimEIM = nsimEIM, .zero = zero ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL,
@@ -1714,8 +1721,8 @@ betabinomialff.control <- function(save.weight = TRUE, ...) {
       warning("converting 'ycounts' to integer in @loglikelihood")
     ycounts <- round(ycounts)
 
-    shape1 <- eta2theta(eta[, 1], .lshape12 , earg = .earg )
-    shape2 <- eta2theta(eta[, 2], .lshape12 , earg = .earg )
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .earg1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .earg2 )
     nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
               round(w)
     if (residuals) {
@@ -1731,7 +1738,8 @@ betabinomialff.control <- function(save.weight = TRUE, ...) {
         ll.elts
       }
     }
-  }, list( .lshape12 = lshape12, .earg = earg ))),
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2,
+           .earg1 = earg1, .earg2 = earg2 ))),
   vfamily = c("betabinomialff"),
 
 
@@ -1748,14 +1756,15 @@ betabinomialff.control <- function(save.weight = TRUE, ...) {
     w <- pwts
     eta <- predict(object)
     extra <- object at extra
-    shape1 <- eta2theta(eta[, 1], .lshape12 , earg = .earg )
-    shape2 <- eta2theta(eta[, 2], .lshape12 , earg = .earg )
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .earg1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .earg2 )
     nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
               round(w)
     rbetabinom.ab(nsim * length(shape1), size = nvec,
                   shape1 = shape1,
                   shape2 = shape2)
-  }, list( .lshape12 = lshape12, .earg = earg ))),
+  }, list( .lshape1 = lshape1, .lshape2 = lshape2, 
+           .earg1 = earg1, .earg2 = earg2 ))),
 
 
 
@@ -1766,11 +1775,11 @@ betabinomialff.control <- function(save.weight = TRUE, ...) {
               round(w)
     ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
               y * w # Convert proportions to counts
-    shape1 <- eta2theta(eta[, 1], .lshape12 , earg = .earg )
-    shape2 <- eta2theta(eta[, 2], .lshape12 , earg = .earg )
+    shape1 <- eta2theta(eta[, 1], .lshape1 , earg = .earg1 )
+    shape2 <- eta2theta(eta[, 2], .lshape2 , earg = .earg2 )
 
-    dshape1.deta <- dtheta.deta(shape1, .lshape12 , earg = .earg )
-    dshape2.deta <- dtheta.deta(shape2, .lshape12 , earg = .earg )
+    dshape1.deta <- dtheta.deta(shape1, .lshape1 , earg = .earg1 )
+    dshape2.deta <- dtheta.deta(shape2, .lshape2 , earg = .earg2 )
 
     dl.dshape1 <- digamma(shape1+ycounts) -
                   digamma(shape1+shape2+nvec) -
@@ -1782,7 +1791,8 @@ betabinomialff.control <- function(save.weight = TRUE, ...) {
     (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
     cbind(dl.dshape1 * dshape1.deta,
           dl.dshape2 * dshape2.deta)
-  }), list( .lshape12 = lshape12, .earg = earg ))),
+  }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+            .earg1 = earg1, .earg2 = earg2 ))),
   weight = eval(substitute(expression({
     if (is.null( .nsimEIM)) {
       wz <- matrix(as.numeric(NA), n, dimm(M))  #3=dimm(2)
@@ -1828,7 +1838,8 @@ betabinomialff.control <- function(save.weight = TRUE, ...) {
       wz <- wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
       wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
     }
-  }), list( .lshape12 = lshape12, .earg = earg,
+  }), list( .lshape1 = lshape1, .lshape2 = lshape2, 
+            .earg1 = earg1, .earg2 = earg2,
             .nsimEIM = nsimEIM ))))
 }
 
@@ -2600,8 +2611,8 @@ if (FALSE)
                   namesof("mu1", .lmu12 , earg = .emu12 , short = TRUE),
                   namesof("mu2", .lmu12 , earg = .emu12 , short = TRUE))
 
-    if (is.null( .nsimEIM)) {
-         save.weight <- control$save.weight <- FALSE
+    if (is.null( .nsimEIM )) {
+         save.weights <- control$save.weights <- FALSE
     }
     if (is.null(etastart)) {
       mu1.init= if (is.Numeric(.imu1))
diff --git a/R/family.bivariate.R b/R/family.bivariate.R
index b246b17..ecf4f2c 100644
--- a/R/family.bivariate.R
+++ b/R/family.bivariate.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -337,6 +337,8 @@ dbistudentt <- function(x1, x2, df, rho = 0, log = FALSE) {
 
   logdensity[df <= 0] <- NaN  # Not picked up by dt().
 
+  logdensity[is.infinite(x1) | is.infinite(x2)] <- log(0)  # 20141216  KaiH
+
   if (log.arg) logdensity else exp(logdensity)
 }
 
@@ -983,8 +985,8 @@ rbinormcop <- function(n, rho = 0  #, inverse = FALSE
 
 
 
-bilogistic.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+bilogistic.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -1233,6 +1235,9 @@ dbilogis <- function(x1, x2, loc1 = 0, scale1 = 1,
                 log(scale1) - 3 * log1p(exp(-zedd1) + exp(-zedd2))
 
 
+  logdensity[x1 == -Inf | x2 == -Inf] <- log(0)  # 20141216 KaiH
+
+
   if (log.arg) logdensity else exp(logdensity)
 }
 
@@ -1780,8 +1785,8 @@ dbifrankcop <- function(x1, x2, apar, log = FALSE) {
 
 
 
-bifrankcop.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+bifrankcop.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -2636,8 +2641,8 @@ dbiplackcop <- function(x1, x2, oratio, log = FALSE) {
 
 
 
-biplackettcop.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+biplackettcop.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -2883,8 +2888,8 @@ rbiamhcop <- function(n, apar) {
 }
 
 
-biamhcop.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+biamhcop.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -3083,6 +3088,9 @@ dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0,
   logpdf <- -log(2 * pi) - log(sd1) - log(sd2) -
               0.5 * log1p(-rho^2) +
             -(0.5 / temp5)  * (zedd1^2 + (-2 * rho * zedd1 + zedd2) * zedd2)
+
+  logpdf[is.infinite(x1) | is.infinite(x2)] <- log(0)  # 20141216 KaiH
+
   if (log.arg) logpdf else exp(logpdf)
 }
 
diff --git a/R/family.categorical.R b/R/family.categorical.R
index 1af19f3..1f3d22e 100644
--- a/R/family.categorical.R
+++ b/R/family.categorical.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -830,7 +830,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
  cumulative <- function(link = "logit",
                         parallel = FALSE,  # Does not apply to the intercept
                         reverse = FALSE, 
-                        mv = FALSE,
+                        multiple.responses = FALSE,
                         whitespace = FALSE) {
 
 
@@ -848,14 +848,14 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
   fillerChar <- ifelse(whitespace, " ", "")
 
 
-  if (!is.logical(mv) || length(mv) != 1)
-    stop("argument 'mv' must be a single logical")
+  if (!is.logical(multiple.responses) || length(multiple.responses) != 1)
+    stop("argument 'multiple.responses' must be a single logical")
   if (!is.logical(reverse) || length(reverse) != 1)
     stop("argument 'reverse' must be a single logical")
 
 
   new("vglmff",
-  blurb = if ( mv )
+  blurb = if ( multiple.responses )
           c(paste("Multivariate cumulative", link, "model\n\n"),
           "Links:   ",
           namesof(if (reverse) 
@@ -870,7 +870,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
                   ifelse(whitespace, "P[Y <= j]",   "P[Y<=j]"),
                   link, earg = earg)),
   constraints = eval(substitute(expression({
-    if ( .mv ) {
+    if ( .multiple.responses ) {
       if ( !length(constraints) ) {
           Llevels <- extra$Llevels
           NOS <- extra$NOS
@@ -886,13 +886,14 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
                              apply.int = .apply.parint ,
                              constraints = constraints)
     }
-  }), list( .parallel = parallel, .mv = mv,
+  }), list( .parallel = parallel,
+            .multiple.responses = multiple.responses,
             .apply.parint = apply.parint ))),
   deviance = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
 
     answer <-
-    if ( .mv ) {
+    if ( .multiple.responses ) {
       totdev <- 0
       NOS <- extra$NOS
       Llevels <- extra$Llevels
@@ -916,7 +917,8 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
                                      summation = TRUE)
     }
     answer
-  }, list( .earg = earg, .link = link, .mv = mv ) )),
+  }, list( .earg = earg, .link = link,
+           .multiple.responses = multiple.responses ) )),
 
   initialize = eval(substitute(expression({
 
@@ -928,8 +930,8 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
       warning("response should be ordinal---see ordered()")
 
 
-    extra$mv <- .mv
-    if ( .mv ) {
+    extra$multiple.responses <- .multiple.responses
+    if ( .multiple.responses ) {
       checkCut(y)  # Check the input; stops if there is an error.
       if (any(w != 1) || ncol(cbind(w)) != 1)
           stop("the 'weights' argument must be a vector of all ones")
@@ -987,7 +989,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
       if (length(dimnames(y)))
         extra$dimnamesy2 <- dimnames(y)[[2]]
   }
-  }), list( .reverse = reverse, .mv = mv,
+  }), list( .reverse = reverse, .multiple.responses = multiple.responses,
             .link = link, .earg = earg,
             .fillerChar = fillerChar,
             .whitespace = whitespace ))),
@@ -995,7 +997,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
 
     linkinv = eval(substitute( function(eta, extra = NULL) {
         answer <-
-        if ( .mv ) {
+        if ( .multiple.responses ) {
           NOS <- extra$NOS
           Llevels <- extra$Llevels
           fv.matrix <- matrix(0, nrow(eta), NOS*Llevels)
@@ -1036,10 +1038,10 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
         answer
     }, list( .reverse = reverse,
              .link = link, .earg = earg,
-             .mv = mv ))),
+             .multiple.responses = multiple.responses ))),
 
   last = eval(substitute(expression({
-    if ( .mv ) {
+    if ( .multiple.responses ) {
       misc$link <- .link
       misc$earg <- list( .earg )
 
@@ -1060,16 +1062,17 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     misc$parameters <- mynames
     misc$reverse <- .reverse
     misc$parallel <- .parallel
-    misc$mv <- .mv
+    misc$multiple.responses <- .multiple.responses
   }), list(
             .reverse = reverse, .parallel = parallel,
             .link = link, .earg = earg,
-            .fillerChar = fillerChar, .mv = mv,
+            .fillerChar = fillerChar,
+            .multiple.responses = multiple.responses,
             .whitespace = whitespace ))),
 
   linkfun = eval(substitute( function(mu, extra = NULL) {
     answer <- 
-    if ( .mv ) {
+    if ( .multiple.responses ) {
       NOS <- extra$NOS
       Llevels <- extra$Llevels
       eta.matrix <- matrix(0, nrow(mu), NOS*(Llevels-1))
@@ -1092,7 +1095,8 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     answer
   }, list(
            .link = link, .earg = earg,
-           .reverse = reverse, .mv = mv ))),
+           .reverse = reverse,
+           .multiple.responses = multiple.responses ))),
   loglikelihood =
     function(mu, y, w, residuals = FALSE, eta, extra = NULL,
              summation = TRUE) {
@@ -1124,7 +1128,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
   deriv = eval(substitute(expression({
     mu.use <- pmax(mu, .Machine$double.eps * 1.0e-0)
     deriv.answer <-
-    if ( .mv ) {
+    if ( .multiple.responses ) {
       NOS <- extra$NOS
       Llevels <- extra$Llevels
       dcump.deta <- resmat <- matrix(0, n, NOS * (Llevels-1))
@@ -1149,9 +1153,9 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     deriv.answer
   }), list( .link = link, .earg = earg,
             .reverse = reverse,
-            .mv = mv ))),
+            .multiple.responses = multiple.responses ))),
   weight = eval(substitute(expression({
-    if ( .mv ) {
+    if ( .multiple.responses ) {
       NOS <- extra$NOS
       Llevels <- extra$Llevels
       wz <- matrix(0, n, NOS*(Llevels-1))  # Diagonal elts only for a start
@@ -1193,7 +1197,7 @@ dmultinomial <- function(x, size = NULL, prob, log = FALSE,
     }
     wz
   }), list( .earg = earg, .link = link,
-            .mv = mv ))))
+            .multiple.responses = multiple.responses ))))
 }
 
 
@@ -2466,9 +2470,9 @@ ordpoissonProbs <- function(extra, mu, deriv = 0) {
     }
     } else {
 
-    if (is.logical(is.multivariateY <- object at misc$mv) &&
+    if (is.logical(is.multivariateY <- object at misc$multiple.responses) &&
         is.multivariateY)
-      stop("cannot handle cumulative(mv = TRUE)")
+      stop("cannot handle cumulative(multiple.responses = TRUE)")
     reverse <- object at misc$reverse
     linkfunctions <- object at misc$link
     all.eargs  <- object at misc$earg
diff --git a/R/family.censored.R b/R/family.censored.R
index 49e67a8..e472d0e 100644
--- a/R/family.censored.R
+++ b/R/family.censored.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/family.circular.R b/R/family.circular.R
index 35fcd43..e597e04 100644
--- a/R/family.circular.R
+++ b/R/family.circular.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -33,25 +33,45 @@ dcard <- function(x, mu, rho, log = FALSE) {
 }
 
 
-pcard <- function(q, mu, rho) {
-  if (!is.Numeric(mu) || any(mu < 0) || any(mu > 2*pi))
-    stop("'mu' must be between 0 and 2*pi inclusive")
-  if (!is.Numeric(rho) || max(abs(rho) > 0.5))
-    stop("'rho' must be between -0.5 and 0.5 inclusive")
-  ans <- (q + 2 * rho * (sin(q-mu) + sin(mu))) / (2*pi)
-  ans[q >= (2*pi)] <- 1
-  ans[q <= 0] <- 0
+
+pcard <- function(q, mu, rho, lower.tail = TRUE, log.p = FALSE) {
+  
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log((q + 2 * rho * (sin(q-mu) + sin(mu))) / (2*pi))
+      ans[q <= 0 ] <- -Inf
+      ans[q >= (2*pi)]  <- 0
+    } else {
+      ans <- (q + 2 * rho * (sin(q-mu) + sin(mu))) / (2*pi)
+      ans[q <= 0] <- 0
+      ans[q >= (2*pi)] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- log1p(-(q + 2 * rho * (sin(q-mu) + sin(mu))) / (2*pi))
+      ans[q <= 0] <- 0
+      ans[q >= (2*pi)]  <- -Inf
+    } else {
+      ans <- (2*pi - q - 2 * rho * (sin(q-mu) + sin(mu))) / (2*pi)
+      ans[q <= 0] <- 1
+      ans[q >= (2*pi)] <- 0
+    }
+  } 
+  ans[mu < 0 | mu > 2*pi] <- NaN  # A warning() may be a good idea here
+  ans[abs(rho) > 0.5] <- NaN
   ans
 }
 
 
 
-qcard <- function(p, mu, rho, tolerance=1.0e-7, maxits=500) {
-  if (!is.Numeric(mu) || any(mu < 0) || any(mu > 2*pi))
-    stop("'mu' must be between 0 and 2*pi inclusive")
-  if (!is.Numeric(rho) || max(abs(rho) > 0.5))
-    stop("'rho' must be between -0.5 and 0.5 inclusive")
-  if (!is.Numeric(p, positive = TRUE) || any(p > 1))
+qcard <- function(p, mu, rho, tolerance = 1.0e-7, maxits = 500,
+                  lower.tail = TRUE, log.p = FALSE) {
+  if (!is.Numeric(p) || any(p < 0) || any(p > 1))
     stop("'p' must be between 0 and 1")
 
   nn <- max(length(p), length(mu), length(rho))
@@ -60,19 +80,73 @@ qcard <- function(p, mu, rho, tolerance=1.0e-7, maxits=500) {
   if (length(rho) != nn) rho <- rep(rho, len = nn)
 
 
-  oldans <- 2 * pi * p
-
-  for (its in 1:maxits) {
-    ans <- oldans - (oldans + 2 * rho * (sin(oldans-mu)+sin(mu)) -
-           2*pi*p) / (1 + 2 * rho * cos(oldans - mu))
-    index <- (ans <= 0) | (ans > 2*pi)
-    if (any(index)) {
-      ans[index] <- runif (sum(index), 0, 2*pi)
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      for (its in 1:maxits) {
+        oldans <- 2 * pi * exp(ln.p)
+        ans <- oldans - (oldans + 2 * rho * (sin(oldans-mu)+sin(mu)) -
+                           2*pi*exp(ln.p)) / (1 + 2 * rho * cos(oldans - mu))
+        index <- (ans < 0) | (ans > 2*pi)  # 20141216 KaiH  Remove ans == 0
+        if (any(index)) {
+          ans[index] <- runif (sum(index), 0, 2*pi)
+        }
+        if (max(abs(ans - oldans)) < tolerance) break;
+        if (its == maxits) {warning("did not converge"); break}
+        oldans <- ans
+      }
+    } else {
+      for (its in 1:maxits) {
+        oldans <- 2 * pi * p
+        ans <- oldans - (oldans + 2 * rho * (sin(oldans-mu)+sin(mu)) -
+               2*pi*p) / (1 + 2 * rho * cos(oldans - mu))
+        index <- (ans < 0) | (ans > 2*pi)  # 20141216 KaiH  Remove ans == 0
+        if (any(index)) {
+          ans[index] <- runif(sum(index), 0, 2*pi)
+        }
+        if (max(abs(ans - oldans)) < tolerance) break;
+        if (its == maxits) {warning("did not converge"); break}
+        oldans <- ans
+      }
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      for (its in 1:maxits) {
+        oldans <- - 2 * pi * expm1(ln.p)
+        ans <- oldans - (oldans + 2 * rho * (sin(oldans-mu)+sin(mu)) +
+               2*pi*expm1(ln.p)) / (1 + 2 * rho * cos(oldans - mu))
+        index <- (ans < 0) | (ans > 2*pi)
+        if (any(index)) {
+          ans[index] <- runif (sum(index), 0, 2*pi)
+        }
+        if (max(abs(ans - oldans)) < tolerance) break;
+        if (its == maxits) {warning("did not converge"); break}
+        oldans <- ans 
+       }
+    } else { 
+      for (its in 1:maxits) {
+        oldans <- 2 * pi - 2 * pi * p
+        ans <- oldans - (oldans + 2 * rho * (sin(oldans-mu)+sin(mu)) -
+               2*pi + 2*pi*p) / (1 + 2 * rho * cos(oldans - mu))
+        index <- (ans < 0) | (ans > 2*pi)  
+        if (any(index)) {
+          ans[index] <- runif (sum(index), 0, 2*pi)
+        }
+        if (max(abs(ans - oldans)) < tolerance) break;
+        if (its == maxits) {warning("did not converge"); break}
+        oldans <- ans
       }
-    if (max(abs(ans - oldans)) < tolerance) break;
-    if (its == maxits) {warning("did not converge"); break}
-    oldans <- ans
+    }
   }
+
+  ans[mu < 0 | mu > 2*pi] <- NaN  # A warning() may be a good idea here
+  ans[abs(rho) > 0.5] <- NaN
   ans
 }
 
@@ -98,15 +172,15 @@ rcard <- function(n, mu, rho, ...) {
 
 
 
-cardioid.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+cardioid.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
 
  cardioid <- function(
-     lmu  = elogit(min = 0, max = 2*pi),
-     lrho = elogit(min = -0.5, max = 0.5),
+     lmu  = extlogit(min = 0, max = 2*pi),
+     lrho = extlogit(min = -0.5, max = 0.5),
      imu = NULL, irho = 0.3,
      nsimEIM = 100, zero = NULL) {
 
@@ -272,7 +346,7 @@ cardioid.control <- function(save.weight = TRUE, ...) {
 
 
 
- vonmises <- function(llocation = elogit(min = 0, max = 2*pi),
+ vonmises <- function(llocation = extlogit(min = 0, max = 2*pi),
                       lscale  = "loge",
                       ilocation = NULL, iscale  = NULL,
                       imethod = 1, zero = NULL) {
diff --git a/R/family.exp.R b/R/family.exp.R
index aa2ceae..b5297f1 100644
--- a/R/family.exp.R
+++ b/R/family.exp.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -13,7 +13,21 @@
 
 
 
-qeunif <- function(p, min = 0, max = 1, Maxit.nr = 10, Tol.nr = 1.0e-6) {
+qeunif <- function(p, min = 0, max = 1, Maxit.nr = 10, Tol.nr = 1.0e-6,
+                   lower.tail = TRUE, log.p = FALSE) {
+
+
+  if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  rm(log.p)   # 20150102 KaiH
+  
+  if (lower.tail) {
+    if (log.arg)
+      p <- exp(p)
+  } else {
+    p <- if (log.arg) -expm1(p) else 1 - p
+  }
+
 
   ppp <- p
   vsmallno <- sqrt(.Machine$double.eps)
@@ -52,25 +66,44 @@ qeunif <- function(p, min = 0, max = 1, Maxit.nr = 10, Tol.nr = 1.0e-6) {
 }
 
 
-peunif <- function(q, min = 0, max = 1, log = FALSE) {
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
+
+peunif <- function(q, min = 0, max = 1,
+                   lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
   if (any(min >= max))
     stop("argument 'min' has values greater or equal to argument 'max'")
 
   eee <- (q - min) / (max - min)
-  if (log.arg) {
-    logGofy <- 2 * log(eee) - log1p(2 * eee * (eee - 1))
-    logGofy[eee < 0] <- -Inf
-    logGofy[eee > 1] <- 0.0
-    logGofy
+
+  if (lower.tail) {
+    if (log.p) {
+      Gofy <- -log1p((1/eee - 1)^2)
+      Gofy[eee < 0] <- -Inf
+      Gofy[eee > 1] <- 0.0
+    } else {
+      Gofy <- eee^2 / (exp(2*log1p(-eee)) + eee^2)  # KaiH
+      Gofy <- 1 / (1 + (1/eee - 1)^2)
+      Gofy[eee < 0] <- 0.0
+      Gofy[eee > 1] <- 1.0
+    }
   } else {
-    Gofy <- eee^2 / (1 + 2 * eee * (eee - 1))
-    Gofy[eee < 0] <- 0.0
-    Gofy[eee > 1] <- 1.0
-    Gofy
+    if (log.p) {
+      Gofy <- 2*log1p(-eee) - log(exp(2*log1p(-eee)) + eee^2)
+      Gofy[eee < 0] <- 0.0
+      Gofy[eee > 1] <- -Inf
+    } else {
+      Gofy <- exp(2*log1p(-eee)) / (exp(2*log1p(-eee)) + eee^2)
+      Gofy[eee < 0] <- 1
+      Gofy[eee > 1] <- 0
+    }
   }
+  Gofy
 }
 
 
@@ -93,6 +126,7 @@ deunif <- function(x, min = 0, max = 1, log = FALSE) {
     gunif <- function(y)
         as.numeric(y >= 0 & y <= 1) * 2*y*(1-y) / (2*y*(1-y) - 1)^2
     ans <- gunif(eee) / (max - min)
+    ans[is.infinite(x)] <- 0  # 20141209 KaiH
   }
   ans
 }
@@ -109,8 +143,16 @@ reunif <- function(n, min = 0, max = 1) {
 
 
 
-qenorm <- function(p, mean = 0, sd = 1, Maxit.nr = 10,
-                   Tol.nr = 1.0e-6) {
+
+
+qenorm <- function(p, mean = 0, sd = 1, Maxit.nr = 10, Tol.nr = 1.0e-6,
+                   lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
   ppp <- p
   if (!is.Numeric( Tol.nr, length.arg = 1, positive = TRUE) ||
       Tol.nr > 0.10)
@@ -118,50 +160,103 @@ qenorm <- function(p, mean = 0, sd = 1, Maxit.nr = 10,
          "positive value, or is too large")
   nrok <- is.finite(ppp)
 
-  eee <-  qnorm(ppp, sd = 2/3)
+  eee <-  qnorm(ppp, sd = 2/3, lower.tail = lower.tail, log.p = log.p) 
+
 
 
   gnorm <- function(y) dnorm(y) / (y * (1-2*pnorm(y)) - 2*dnorm(y))^2
 
+
   for (iii in 1:Maxit.nr) {
-    realdiff <- (penorm(eee[nrok]) - ppp[nrok]) / gnorm(eee[nrok])
+    if (lower.tail) {
+      realdiff <- if (log.p) {
+        ln.ppp <- ppp
+        (penorm(eee[nrok]) - exp(ln.ppp[nrok])) / gnorm(eee[nrok])
+      } else {
+        (penorm(eee[nrok]) - ppp[nrok]) / gnorm(eee[nrok])
+      }
+    } else {
+      realdiff <- if (log.p) {
+        ln.ppp <- ppp
+        (penorm(eee[nrok]) + expm1(ln.ppp[nrok])) / gnorm(eee[nrok])
+      } else {
+        (penorm(eee[nrok]) + expm1(log(ppp[nrok]))) / gnorm(eee[nrok])
+      }
+    }
     eee[nrok] <- eee[nrok] - realdiff
-    if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol.nr )) break
-    if (iii == Maxit.nr) warning("did not converge")
+    if (all(abs(realdiff) / (1.0 + abs(realdiff)) < Tol.nr ))
+      break
+    if (iii == Maxit.nr)
+      warning("did not converge")
   }
 
+
+
   if (max(abs(penorm(eee[nrok]) - ppp[nrok])) > Tol.nr)
     warning("did not converge on the second check")
 
-  eee[ppp == 0] <- -Inf
-  eee[ppp == 1] <-  Inf
-  eee[ppp <  0] <- NA
-  eee[ppp >  1] <- NA
+
+  if (lower.tail) {
+    if (log.p) {
+      eee[ln.ppp > 0] <- NaN
+    } else {
+      eee[ppp == 0] <- -Inf
+      eee[ppp == 1] <-  Inf
+      eee[ppp <  0] <- NaN
+      eee[ppp >  1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      eee[ln.ppp > 0] <- NaN
+    } else { 
+      eee[ppp == 0] <- Inf
+      eee[ppp == 1] <- -Inf
+      eee[ppp <  0] <- NaN
+      eee[ppp >  1] <- NaN
+    }
+  }
   eee * ifelse(sd >= 0, sd, NaN) + mean
 }
 
 
-penorm <- function(q, mean = 0, sd = 1, log = FALSE) {
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
+
+penorm <- function(q, mean = 0, sd = 1,
+                   lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   eee <- (q - mean) / sd
   tmp1 <- -dnorm(eee) - eee * pnorm(eee)
-  if (log.arg) {
-    logGofy <- log(tmp1) - log(2 * tmp1 + eee)
-    logGofy[eee <= -Inf] <- -Inf
-    logGofy[eee >=  Inf] <- 0.0
-    logGofy
+
+  if (lower.tail) {
+    if (log.p) {
+      Gofy <- log(tmp1 / (2 * tmp1 + eee))
+      Gofy[eee <= -Inf] <- -Inf
+      Gofy[eee >=  Inf] <- 0
+    } else {
+      Gofy <- tmp1 / (2 * tmp1 + eee)
+      Gofy[eee <= -Inf] <- 0.0
+      Gofy[eee >=  Inf] <- 1.0
+    }
   } else {
-    Gofy <- tmp1 / (2 * tmp1 + eee)
-    Gofy[eee <= -Inf] <- 0.0
-    Gofy[eee >=  Inf] <- 1.0
-    Gofy
+    if (log.p) {
+      Gofy <- log((tmp1 + eee) / (2 * tmp1 + eee))
+      Gofy[eee <= -Inf] <- 0
+      Gofy[eee >=  Inf] <- -Inf
+    } else {
+      Gofy <- (tmp1 + eee) / (2 * tmp1 + eee)
+      Gofy[eee <= -Inf] <- 1
+      Gofy[eee >=  Inf] <- 0
+    }
   }
+  Gofy
 }
 
 
+
 denorm <- function(x, mean = 0, sd = 1, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
@@ -193,7 +288,18 @@ renorm <- function(n, mean = 0, sd = 1) {
 
 
 
-qeexp <- function(p, rate = 1, Maxit.nr = 10, Tol.nr = 1.0e-6) {
+qeexp <- function(p, rate = 1, Maxit.nr = 10, Tol.nr = 1.0e-6,
+                  lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  rm(log.p)   # 20150102 KaiH
+  
+  if (lower.tail) {
+    if (log.arg) p <- exp(p)
+  } else {
+    p <- if (log.arg) -expm1(p) else 1 - p
+  }
+  
   ppp <- p
   vsmallno <- sqrt(.Machine$double.eps)
   if (!is.Numeric( Tol.nr, length.arg = 1, positive = TRUE) ||
@@ -230,25 +336,39 @@ qeexp <- function(p, rate = 1, Maxit.nr = 10, Tol.nr = 1.0e-6) {
 }
 
 
-peexp <- function(q, rate = 1, log = FALSE) {
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
+
+peexp <- function(q, rate = 1, lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   eee <- q * rate
-  if (log.arg) {
-    tmp1 <- -expm1(-eee) - eee
-    logGofy <- log1p(- eee - exp(-eee)) - log(2 * tmp1 + eee - 1.0)
-    logGofy[eee <    0] <- log(0.0)
-    logGofy[eee >= Inf] <- log(1.0)
-    logGofy
+  tmp1 <- -expm1(-eee) - eee
+
+  if (lower.tail) {
+    if (log.p) {
+      Gofy <- log(-tmp1) - log(expm1(-eee) + exp(-eee) + eee)
+      Gofy[eee < 0 ] <- -Inf
+      Gofy[eee == Inf] <- 0
+    } else {
+      Gofy <- tmp1 / (-expm1(-eee) - exp(-eee) - eee)
+      Gofy[eee < 0] <- 0
+      Gofy[eee == Inf] <- 1
+    }
   } else {
-    tmp1 <- -expm1(-eee) - eee
-    Gofy <- tmp1 / (2 * tmp1 + eee - 1.0)
-    Gofy[eee <    0] <- 0.0
-    Gofy[eee >= Inf] <- 1.0
-    Gofy
+    if (log.p) {
+      Gofy <- -eee - log(expm1(-eee) + exp(-eee) + eee)
+      Gofy[eee < 0] <- 0
+      Gofy[eee == Inf] <- -Inf
+    } else {
+      Gofy <- exp(-eee)/(expm1(-eee) +exp(-eee) +eee)
+      Gofy[eee < 0] <- 1
+      Gofy[eee == Inf] <- 0
+    }
   }
+  Gofy
 }
 
 
@@ -264,11 +384,13 @@ deexp <- function(x, rate = 1, log = FALSE) {
 
   if (log.arg) {
     ans <- log(eee) - eee + 2.0 * log((1-x) - 2 * exp(-x)) + log(rate)
+    ans[is.infinite(x)] <- log(0)
   } else {
     gexp <- function(y)
       as.numeric(y >= 0) * y * exp(-y) / ((1-y) - 2 * exp(-y))^2
     ans <- gexp(eee) * rate
     ans[rate <=  0.0] <- NaN
+    ans[is.infinite(x)] <- 0
   }
   ans
 }
@@ -299,33 +421,89 @@ dsc.t2 <- function(x, location = 0, scale = 1, log = FALSE) {
 
 
 
-psc.t2 <- function(q, location = 0, scale = 1, log = FALSE) {
-  if (!is.logical(log.arg <- log) || length(log) != 1)
-    stop("bad input for argument 'log'")
-  rm(log)
+
+
+psc.t2 <- function(q, location = 0, scale = 1,
+                   lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   zedd <- (q - location) / scale
   zedd[scale <= 0] <- NaN
 
-  if (log.arg) {
-    -log(2) + log1p(zedd / sqrt(4 + zedd^2))
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log(0.5) + log1p(zedd / sqrt(4 + zedd^2))
+      ans[q == -Inf] <- log(0)
+      ans[q == Inf] <- log(1) 
+      } else {
+      ans <- 0.5 * (1 + zedd / sqrt(4 + zedd^2))
+      ans[q == -Inf] <- 0 
+      ans[q == Inf] <- 1
+      }
   } else {
-    0.5 * (1 + zedd / sqrt(4 + zedd^2))
+    if (log.p) {
+      ans <- log(0.5) + log1p(-zedd / sqrt(4 + zedd^2))
+      ans[q == -Inf] <- log(1)
+      ans[q == Inf] <- log(0)
+    } else {
+      ans <- 0.5 * exp(log1p(-zedd / sqrt(4 + zedd^2)))
+      ans[q == -Inf] <- 1 
+      ans[q == Inf] <- 0 
+    }
   }
+  ans 
 }
 
 
 
 
-qsc.t2 <- function(p, location = 0, scale = 1) {
 
-  answer <- -2 * (1 - 2*p) / sqrt(1 - (1 - 2*p)^2)
-  answer[p  < 0] <- NaN
-  answer[p  > 1] <- NaN
-  answer[p == 0] <- -Inf
-  answer[p == 1] <- +Inf
 
-  answer <- answer * scale + location
+qsc.t2 <- function(p, location = 0, scale = 1,
+                   lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- exp(0.5*(ln.p - log(-expm1(ln.p)))) -
+             exp(0.5*(log(-expm1(ln.p)) - ln.p))
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- exp(0.5*(log(p) - log1p(-p))) -
+             exp(0.5*(log1p(-p) - log(p)))
+      ans[p < 0] <- NaN
+      ans[p == 0] <- -Inf
+      ans[p == 1] <- Inf
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- exp(0.5*(log(-expm1(ln.p)) - ln.p)) -
+             exp(0.5*(ln.p - log(-expm1(ln.p))))
+      ans[ln.p > 0] <- NaN
+      ans
+    } else { 
+      ans <- exp(0.5*(log1p(-p) - log(p))) -
+             exp(0.5*(log(p) - log1p(-p)))
+      ans[p < 0] <- NaN
+      ans[p == 0] <- Inf
+      ans[p == 1] <- -Inf
+      ans[p > 1] <- NaN
+    }
+  }
+
+  answer <- ans * scale + location
   answer[scale <= 0] <- NaN
   answer
 }
@@ -343,6 +521,8 @@ rsc.t2 <- function(n, location = 0, scale = 1) {
 
 
 
+
+
  sc.studentt2 <- function(percentile = 50,
                      llocation = "identitylink", lscale = "loge",
                      ilocation = NULL,   iscale = NULL,
diff --git a/R/family.extremes.R b/R/family.extremes.R
index 303fc44..4d20d5e 100644
--- a/R/family.extremes.R
+++ b/R/family.extremes.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -99,76 +99,111 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
   }
 
   logdensity[scale <= 0] <- NaN
+
+  logdensity[is.infinite(x)] <- log(0)  # 20141209 KaiH
+
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
 
-pgev <- function(q, location = 0, scale = 1, shape = 0) {
-  if (!is.Numeric(q))
-    stop("bad input for argument 'q'")
-  if (!is.Numeric(location))
-    stop("bad input for argument 'location'")
-  if (!is.Numeric(shape))
-    stop("bad input for argument 'shape'")
+pgev <- function(q, location = 0, scale = 1, shape = 0,
+                 lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
 
   use.n <- max(length(q), length(location), length(scale), length(shape))
-  ans <- numeric(use.n)
   if (length(shape)    != use.n)
     shape    <- rep(shape,        length.out = use.n)
   if (length(location) != use.n)
-    location <- rep(location,     length.out = use.n); 
+    location <- rep(location,     length.out = use.n)
   if (length(scale)    != use.n)
     scale    <- rep(scale,        length.out = use.n)
   if (length(q)        != use.n)
     q        <- rep(q,            length.out = use.n)
 
-  scase <- abs(shape) < sqrt( .Machine$double.eps )
-  nscase <- sum(scase)
+  scase0 <- abs(shape) < sqrt( .Machine$double.eps )  # Effectively 0
   zedd <- (q - location) / scale
-  if (use.n - nscase) {
-    use.zedd <- pmax(0, 1 + shape * zedd)
-    ans[!scase] <- exp(-use.zedd[!scase]^(-1 / shape[!scase]))
+  use.zedd <- pmax(0, 1 + shape * zedd)
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- -use.zedd^(-1 / shape)
+    } else {
+      ans <- exp(-use.zedd^(-1 / shape))
+    }
+  } else {
+    if (log.p) {
+      ans <- log(-expm1(-use.zedd^(-1 / shape)))
+    } else {
+      ans <- -expm1(-use.zedd^(-1 / shape))
+    }
   }
-  if (nscase) {
-    ans[scase] <- pgumbel(q[scase], location = location[scase],
-                          scale = scale[scase])
+
+  if (any(scase0)) {
+    ans[scase0] <- pgumbel(q[scase0], location = location[scase0],
+                           scale = scale[scase0],
+                           lower.tail = lower.tail, log.p = log.p)
   }
+
   ans[scale <= 0] <- NaN
   ans
 }
 
 
 
-qgev <- function(p, location = 0, scale = 1, shape = 0) {
-  if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
-    stop("0 < p < 1 is required")
-  if (!is.Numeric(location))
-    stop("bad input for argument 'location'")
-  if (!is.Numeric(shape))
-    stop("bad input for argument 'shape'")
+qgev <- function(p, location = 0, scale = 1, shape = 0,
+                 lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
+ 
+
 
   use.n <- max(length(p), length(location), length(scale), length(shape))
-  ans <- numeric(use.n)
   if (length(shape)    != use.n)
     shape    <- rep(shape,        length.out = use.n)
   if (length(location) != use.n)
-    location <- rep(location,     length.out = use.n); 
+    location <- rep(location,     length.out = use.n)
   if (length(scale)    != use.n)
     scale    <- rep(scale,        length.out = use.n)
   if (length(p)        != use.n)
     p        <- rep(p,            length.out = use.n)
 
 
-  scase <- abs(shape) < sqrt( .Machine$double.eps )
-  nscase <- sum(scase)
-  if (use.n - nscase) {
-    ans[!scase] <- location[!scase] + scale[!scase] *
-        ((-log(p[!scase]))^(-shape[!scase]) - 1) / shape[!scase]
+  scase0 <- abs(shape) < sqrt( .Machine$double.eps )
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- location + scale * ((-ln.p)^(-shape) - 1) / shape
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- location + scale * ((-log(p))^(-shape) - 1) / shape
+      ans[p == 1] <-  Inf
+      ans[p >  1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- location + scale * ((-log1p(-exp(ln.p)))^(-shape) - 1) / shape
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- location + scale * ((-log1p(-p))^(-shape) - 1) / shape
+      ans[p == 1] <-  Inf
+      ans[p >  1] <- NaN
+      ans[p <  0] <- NaN
+    }
   }
-  if (nscase)
-    ans[scase] <- qgumbel(p[scase], location = location[scase],
-                          scale = scale[scase])
+
+  if (any(scase0))
+    ans[scase0] <- qgumbel(p[scase0], location = location[scase0],
+                           scale = scale[scase0],
+                           lower.tail = lower.tail, log.p = log.p)
   ans[scale <= 0] <- NaN
   ans
 }
@@ -313,7 +348,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
         rep( .ishape, length.out = nrow(y)) else NULL
       LIST.lshape <- .lshape
 
-      if ( .lshape == "elogit" && length(init.xi) &&
+      if ( .lshape == "extlogit" && length(init.xi) &&
           (any(init.xi <= LIST.lshape$min |
                init.xi >= LIST.lshape$max)))
           stop("bad input for an argument in 'lshape'")
@@ -758,7 +793,7 @@ dgammadx <- function(x, deriv.arg = 1) {
       init.xi  <- if (length( .ishape ))
                   rep( .ishape , length.out = length(y)) else NULL
       eshape <- .eshape
-      if ( .lshape == "elogit" && length(init.xi) && 
+      if ( .lshape == "extlogit" && length(init.xi) && 
          (any(init.xi <= eshape$min | init.xi >= eshape$max)))
         stop("bad input for argument 'eshape'")
       if ( .imethod == 1) {
@@ -989,12 +1024,7 @@ dgammadx <- function(x, deriv.arg = 1) {
 
 
 rgumbel <- function(n, location = 0, scale = 1) {
-  use.n <- if ((length.n <- length(n)) > 1) length.n else
-           if (!is.Numeric(n, integer.valued = TRUE,
-                           length.arg = 1, positive = TRUE))
-              stop("bad input for argument 'n'") else n
-
-  answer <- location - scale * log(-log(runif(use.n)))
+  answer <- location - scale * log(-log(runif(n)))
   answer[scale <= 0] <- NaN
   answer
 }
@@ -1007,25 +1037,77 @@ dgumbel <- function(x, location = 0, scale = 1, log = FALSE) {
 
   zedd <- (x - location) / scale
   logdensity <- -zedd - exp(-zedd) - log(scale)
+  logdensity[is.infinite(x)] <- log(0)  # 20141209 KaiH
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
-qgumbel <- function(p, location = 0, scale = 1) {
-  answer <- location - scale * log(-log(p))
-  answer[scale <= 0] <- NaN
-  answer[p <  0] <- NaN
-  answer[p >  1] <- NaN
-  answer[p == 0] <- -Inf
-  answer[p == 1] <-  Inf
-  answer
+
+qgumbel <- function(p, location = 0, scale = 1,
+                    lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- location - scale * log(-ln.p)
+    } else {
+      ans <- location - scale * log(-log(p))
+      ans[p == 0] <- -Inf
+      ans[p == 1] <-  Inf
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- location - scale * log(-log(-expm1(ln.p)))
+      ans[ln.p > 0] <- NaN
+    } else { 
+      ans <- location - scale * log(-log1p(-p))
+      ans[p == 0] <-  Inf
+      ans[p == 1] <- -Inf
+    }
+  }
+  ans[scale <= 0] <- NaN
+  ans
 }
 
 
-pgumbel <- function(q, location = 0, scale = 1) {
-  answer <- exp(-exp(-(q - location) / scale))
-  answer[scale <= 0] <- NaN
-  answer
+
+pgumbel <- function(q, location = 0, scale = 1,
+                    lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
+  if (lower.tail) {
+    if (log.p) {
+      ans <- -exp(-(q - location) / scale)
+      ans[q <= -Inf] <- -Inf
+      ans[q ==  Inf] <- 0
+    } else {
+      ans <- exp(-exp(-(q - location) / scale))
+      ans[q <= -Inf] <- 0
+      ans[q ==  Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- log(-expm1(-exp(-(q - location) / scale)))
+      ans[q <= -Inf] <- 0
+      ans[q ==  Inf] <- -Inf
+    } else {
+      ans <- -expm1(-exp(-(q - location) / scale))
+      ans[q <= -Inf] <- 1
+      ans[q ==  Inf] <- 0
+    }
+  } 
+
+  ans[scale <= 0] <- NaN
+  ans
 }
 
 
@@ -1300,8 +1382,6 @@ dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
 
 
 
-
-
   logdensity <- rep(log(0), length.out = L)
   scase <- abs(shape) < tolshape0
   nscase <- sum(scase)
@@ -1339,13 +1419,15 @@ dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
 
 
 
-pgpd <- function(q, location = 0, scale = 1, shape = 0) {
-  if (!is.Numeric(q))
-    stop("bad input for argument 'q'")
-  if (!is.Numeric(location))
-    stop("bad input for argument 'location'")
-  if (!is.Numeric(shape))
-    stop("bad input for argument 'shape'")
+pgpd <- function(q, location = 0, scale = 1, shape = 0,
+                 lower.tail = TRUE, log.p = FALSE) {
+
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
 
   use.n <- max(length(q), length(location), length(scale), length(shape))
 
@@ -1363,24 +1445,44 @@ pgpd <- function(q, location = 0, scale = 1, shape = 0) {
   use.zedd <- pmax(zedd, 0)
 
 
-  scase <- abs(shape) < sqrt( .Machine$double.eps )
-  nscase <- sum(scase)
-  if (use.n - nscase) {
+  scase0 <- abs(shape) < sqrt( .Machine$double.eps )
+  nscase0 <- sum(scase0)
+  if (use.n - nscase0) {
     ans <- 1 - pmax(1 + shape * use.zedd, 0)^(-1/shape)
   }
-  if (nscase) {
+  if (nscase0) {
     pos <- (zedd >= 0)
-    ind9 <- ( pos & scase)
+    ind9 <- ( pos & scase0)
     ans[ind9] <-  -expm1(-use.zedd[ind9])
-    ind9 <- (!pos & scase)
+    ind9 <- (!pos & scase0)
     ans[ind9] <- 0
   }
   ans[scale <= 0] <- NaN
-  ans
+
+  if (lower.tail) {
+    if (log.p) log(ans) else ans
+  } else {
+    if (log.p) log1p(-ans) else 1-ans
+  }
 }
 
 
-qgpd <- function(p, location = 0, scale = 1, shape = 0) {
+
+qgpd <- function(p, location = 0, scale = 1, shape = 0,
+                 lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  rm(log.p)
+  
+  if (lower.tail) {
+    if (log.arg) p <- exp(p)
+  } else {
+    p <- if (log.arg) -expm1(p) else 1 - p
+  }
 
   use.n <- max(length(p), length(location), length(scale), length(shape))
 
@@ -1388,7 +1490,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
   if (length(shape)    != use.n)
     shape    <- rep(shape,        length.out = use.n)
   if (length(location) != use.n)
-    location <- rep(location,     length.out = use.n); 
+    location <- rep(location,     length.out = use.n)
   if (length(scale)    != use.n)
     scale    <- rep(scale,        length.out = use.n)
   if (length(p)        != use.n)
@@ -2283,29 +2385,76 @@ dfrechet <- function(x, location = 0, scale = 1, shape, log = FALSE) {
 }
 
 
-pfrechet <- function(q, location = 0, scale = 1, shape) {
-  if (!is.Numeric(scale, positive = TRUE))
-    stop("scale must be positive")
-  if (!is.Numeric(shape, positive = TRUE))
-    stop("shape must be positive")
+
+pfrechet <- function(q, location = 0, scale = 1, shape,
+                     lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
   rzedd <- scale / (q - location)
-  ans <- exp(-(rzedd^shape))
-  ans[q <= location] <- 0
+
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- -(rzedd^shape)
+      ans[q <= location] <- -Inf
+    } else {  
+      ans <- exp(-(rzedd^shape))
+      ans[q <= location] <- 0 
+      }
+  } else {
+    if (log.p) {
+      ans <- log(-expm1(-(rzedd^shape)))
+      ans[q <= location] <- 0
+    } else {
+      ans <- -expm1(-(rzedd^shape)) 
+      ans[q <= location]  <- 1
+    }
+  }
   ans
 }
 
 
-qfrechet <- function(p, location = 0, scale = 1, shape) {
-  if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
-    stop("0 < p < 1 is required")
-  if (!is.Numeric(scale, positive = TRUE))
-    stop("scale must be positive")
-  if (!is.Numeric(shape, positive = TRUE))
-    stop("shape must be positive")
-  location + scale * (-log(p))^(-1/shape)
+
+qfrechet <- function(p, location = 0, scale = 1, shape,
+                     lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- location + scale * (-ln.p)^(-1 / shape)
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- location + scale * (-log(p))^(-1 / shape)
+      ans[p < 0] <- NaN
+      ans[p == 0] <- location
+      ans[p == 1] <- Inf
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- location + scale * (-log(-expm1(ln.p)))^(-1 / shape)
+      ans[ln.p > 0] <- NaN
+    } else { 
+      ans <- location + scale * (-log1p(-p))^(-1 / shape)
+      ans[p < 0] <- NaN
+      ans[p == 0] <- Inf
+      ans[p == 1] <- location
+      ans[p > 1] <- NaN
+    }
+  }
+  ans
 }
 
 
+
 rfrechet <- function(n, location = 0, scale = 1, shape) {
   if (!is.Numeric(scale, positive = TRUE))
     stop("scale must be positive")
@@ -2322,8 +2471,8 @@ rfrechet <- function(n, location = 0, scale = 1, shape) {
 
 
 
-frechet.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+frechet.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
@@ -2531,8 +2680,8 @@ frechet.control <- function(save.weight = TRUE, ...) {
 
 
 
-frechet3.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+frechet3.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
@@ -2779,8 +2928,8 @@ if (FALSE)
 }
 
 
-rec.normal.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+rec.normal.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
@@ -2920,8 +3069,8 @@ rec.normal.control <- function(save.weight = TRUE, ...) {
 
 
 
-rec.exp1.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+rec.exp1.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -3050,10 +3199,12 @@ dpois.points <- function(x, lambda, ostatistic,
   ans2 <- log(2) + ostatistic * log(pi * lambda) -
           lgamma(ostatistic) + (2 * ostatistic - 1) * log(x) -
           lambda * pi * x^2
+  ans2[x < 0 | is.infinite(x)] <- log(0)  # 20141209 KaiH
 
   ans3 <- log(3) + ostatistic * log(4 * pi * lambda / 3) -
           lgamma(ostatistic) + (3 * ostatistic - 1) * log(x) -
           (4/3) * lambda * pi * x^3
+  ans3[x < 0 | is.infinite(x)] <- log(0)  # 20141209 KaiH
 
   ans <- ifelse(dimension == 2, ans2, ans3)
 
@@ -3062,6 +3213,7 @@ dpois.points <- function(x, lambda, ostatistic,
 }
 
 
+
  poisson.points <-
   function(ostatistic, dimension = 2,
            link = "loge",
diff --git a/R/family.functions.R b/R/family.functions.R
index 99d5ca3..e37a468 100644
--- a/R/family.functions.R
+++ b/R/family.functions.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/family.genetic.R b/R/family.genetic.R
index f3eb6a1..a4aece0 100644
--- a/R/family.genetic.R
+++ b/R/family.genetic.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -13,11 +13,14 @@
 
 
 
- A1A2A3 <- function(link = "logit", inbreeding = TRUE,
+ A1A2A3 <- function(link = "logit",
+                    inbreeding = FALSE,  # HWE assumption is the default
                     ip1 = NULL, ip2 = NULL, iF = NULL) {
 
 
 
+
+
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
@@ -29,22 +32,23 @@
 
   new("vglmff",
   blurb = c("G1-G2-G3 phenotype (",
-            ifelse(inbreeding, "with", "without"),
+            ifelse(inbreeding, "without", "with"),
             " the Hardy-Weinberg equilibrium assumption)\n\n",
             "Links:    ",
             namesof("p1", link, earg = earg, tag = FALSE), ", ", 
             namesof("p2", link, earg = earg, tag = FALSE),
-            if (!inbreeding) paste(",",
+            if (inbreeding) paste(",",
             namesof("f",  link, earg = earg, tag = FALSE)) else
             ""),
   deviance = Deviance.categorical.data.vgam,
   infos = eval(substitute(function(...) {
     list(Q1 = 6,
-         M1 = ifelse( .inbreeding , 2, 3),
+         M1 = ifelse( .inbreeding , 3, 2),
          expected = TRUE,
          multipleResponses = FALSE,
-         link = if ( .inbreeding ) c("p1" = .link , "p2" = .link ) else
-                            c("p1" = .link , "p2" = .link , "f" = .link ))
+         link = if ( .inbreeding )
+                  c("p1" = .link , "p2" = .link , "f" = .link ) else
+                  c("p1" = .link , "p2" = .link  ))
   }, list( .link = link, .inbreeding = inbreeding ))),
 
   initialize = eval(substitute(expression({
@@ -68,8 +72,8 @@
     predictors.names <-
      c(namesof("p1", .link , earg = .earg , tag = FALSE),
        namesof("p2", .link , earg = .earg , tag = FALSE),
-       if ( .inbreeding ) NULL else
-       namesof("f",  .link , earg = .earg , tag = FALSE))
+       if ( .inbreeding )
+       namesof("f",  .link , earg = .earg , tag = FALSE) else NULL)
     mustart <- (y + mustart) / 2
 
 
@@ -100,8 +104,8 @@
       etastart <-
         cbind(theta2eta(p1, .link , earg = .earg ),
               theta2eta(p2, .link , earg = .earg ),
-              if ( .inbreeding ) NULL else
-              theta2eta(ff, .link , earg = .earg ))
+              if ( .inbreeding )
+              theta2eta(ff, .link , earg = .earg ) else NULL)
       mustart <- NULL  # Since etastart has been computed.
 
     }
@@ -111,8 +115,8 @@
   linkinv = eval(substitute(function(eta, extra = NULL) {
     p1 <- eta2theta(eta[, 1], link = .link , earg = .earg )
     p2 <- eta2theta(eta[, 2], link = .link , earg = .earg )
-    f  <- if ( .inbreeding ) 0 else
-          eta2theta(eta[, 3], link = .link , earg = .earg )
+    f  <- if ( .inbreeding )
+          eta2theta(eta[, 3], link = .link , earg = .earg ) else 0
     p3 <- abs(1 - p1 - p2)
       cbind("A1A1" = f*p1+(1-f)*p1^2,
             "A1A2" = 2*p1*p2*(1-f),
@@ -124,11 +128,11 @@
 
   last = eval(substitute(expression({
     if ( .inbreeding ) {
-      misc$link <-    c(p1 = .link , p2 = .link )
-      misc$earg <- list(p1 = .earg , p2 = .earg )
-    } else {
       misc$link <-    c(p1 = .link , p2 = .link , f = .link )
       misc$earg <- list(p1 = .earg , p2 = .earg , f = .earg )
+    } else {
+      misc$link <-    c(p1 = .link , p2 = .link )
+      misc$earg <- list(p1 = .earg , p2 = .earg )
     }
 
     misc$expected <- TRUE
@@ -146,60 +150,58 @@
     p1 <- eta2theta(eta[, 1], link = .link , earg = .earg )
     p2 <- eta2theta(eta[, 2], link = .link , earg = .earg )
     p3 <- 1-p1-p2
-    f  <- if ( .inbreeding ) 0 else
-          eta2theta(eta[, 3], link = .link , earg = .earg )
+    f  <- if ( .inbreeding )
+          eta2theta(eta[, 3], link = .link , earg = .earg ) else 0
     if ( .inbreeding ) {
-    dl.dp1 <- (2*y[, 1]+y[, 2]+y[, 4])/p1 -
-              (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
-    dl.dp2 <- (2*y[, 3]+y[, 2]+y[,5])/p2 -
-              (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
+      dP1 <- cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1),
+                  0, -2*(1-f)*p2, -f - 2*p3*(1-f))
+      dP2 <- cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
+                   2*(1-f)*(1-p1-2*p2), -f - 2*p3*(1-f))
+      dP3 <- cbind(p1*(1-p1), -2*p1*p2, -2*p1*p3, p2*(1-p2), -2*p2*p3, 
+                   p3*(1-p3))
+      dl1 <- rowSums(y * dP1 / mu)
+      dl2 <- rowSums(y * dP2 / mu)
+      dl3 <- rowSums(y * dP3 / mu)
+      dPP.deta <- dtheta.deta(cbind(p1, p2, f),
+                              link = .link , earg = .earg )
+      c(w) * cbind(dPP.deta[, 1] * dl1,
+                   dPP.deta[, 2] * dl2, 
+                   dPP.deta[, 3] * dl3)
+    } else {
+      dl.dp1 <- (2*y[, 1]+y[, 2]+y[, 4])/p1 -
+                (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
+      dl.dp2 <- (2*y[, 3]+y[, 2]+y[,5])/p2 -
+                (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
 
-    dp1.deta <- dtheta.deta(p1, link = .link , earg = .earg )
-    dp2.deta <- dtheta.deta(p2, link = .link , earg = .earg )
+      dp1.deta <- dtheta.deta(p1, link = .link , earg = .earg )
+      dp2.deta <- dtheta.deta(p2, link = .link , earg = .earg )
 
-    c(w) * cbind(dl.dp1 * dp1.deta,
-                 dl.dp2 * dp2.deta)
-    } else {
-    dP1 <- cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1),
-                0, -2*(1-f)*p2, -f - 2*p3*(1-f))
-    dP2 <- cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
-                 2*(1-f)*(1-p1-2*p2), -f - 2*p3*(1-f))
-    dP3 <- cbind(p1*(1-p1), -2*p1*p2, -2*p1*p3, p2*(1-p2), -2*p2*p3, 
-                 p3*(1-p3))
-    dl1 <- rowSums(y * dP1 / mu)
-    dl2 <- rowSums(y * dP2 / mu)
-    dl3 <- rowSums(y * dP3 / mu)
-    dPP.deta <- dtheta.deta(cbind(p1, p2, f),
-                            link = .link , earg = .earg )
-    c(w) * cbind(dPP.deta[, 1] * dl1,
-                 dPP.deta[, 2] * dl2, 
-                 dPP.deta[, 3] * dl3)
-  }
+      c(w) * cbind(dl.dp1 * dp1.deta,
+                   dl.dp2 * dp2.deta)
+    }
   }), list( .link = link, .earg = earg, .inbreeding = inbreeding ))),
   weight = eval(substitute(expression({
     if ( .inbreeding ) {
-    qq <- 1-p1-p2
-    wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==3 because M==2
-    ned2l.dp12  <-  2 * (1/p1 + 1/qq)
-    ned2l.dp22  <-  2 * (1/p2 + 1/qq)
-    ned2l.dp1dp2 <-  2 / qq
-    wz[, iam(1, 1, M)] <- ned2l.dp12 * dp1.deta^2
-    wz[, iam(2, 2, M)] <- ned2l.dp22 * dp2.deta^2
-    wz[, iam(1, 2, M)] <- ned2l.dp1dp2 * dp1.deta * dp2.deta
-    c(w) * wz
+      dPP <- array(c(dP1, dP2, dP3), c(n, 6, 3))
+      wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==6 because M==3
+      for (i1 in 1:M)
+        for (i2 in i1:M) {
+          index <- iam(i1, i2, M)
+          wz[, index] <- rowSums(dPP[, , i1, drop = TRUE] *
+                                 dPP[, , i2, drop = TRUE] / mu) *
+                                 dPP.deta[, i1] * dPP.deta[, i2]
+      }
     } else {
-    dPP <- array(c(dP1, dP2, dP3), c(n, 6, 3))
-
-    wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==6 because M==3
-    for (i1 in 1:M)
-      for (i2 in i1:M) {
-        index <- iam(i1,i2, M)
-        wz[,index] <- rowSums(dPP[, , i1, drop = TRUE] *
-                              dPP[, , i2, drop = TRUE] / mu) *
-                              dPP.deta[, i1] * dPP.deta[, i2]
+      qq <- 1-p1-p2
+      wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==3 because M==2
+      ned2l.dp12  <-  2 * (1/p1 + 1/qq)
+      ned2l.dp22  <-  2 * (1/p2 + 1/qq)
+      ned2l.dp1dp2 <-  2 / qq
+      wz[, iam(1, 1, M)] <- ned2l.dp12 * dp1.deta^2
+      wz[, iam(2, 2, M)] <- ned2l.dp22 * dp2.deta^2
+      wz[, iam(1, 2, M)] <- ned2l.dp1dp2 * dp1.deta * dp2.deta
     }
     c(w) * wz
-  }
   }), list( .link = link, .earg = earg, .inbreeding = inbreeding ))))
 }
 
@@ -650,18 +652,41 @@ if (FALSE)
 
 
 
- ABO <- function(link = "logit", ipA = NULL, ipO = NULL) {
-  link <- as.list(substitute(link))
-  earg <- link2list(link)
-  link <- attr(earg, "function.name")
+ ABO <- function(link.pA = "logit", link.pB = "logit",
+                 ipA = NULL, ipB = NULL, ipO = NULL,
+                 zero = NULL) {
+  link.pA <- as.list(substitute(link.pA))
+  earg.pA <- link2list(link.pA)
+  link.pA <- attr(earg.pA, "function.name")
+
+  link.pB <- as.list(substitute(link.pB))
+  earg.pB <- link2list(link.pB)
+  link.pB <- attr(earg.pB, "function.name")
 
 
   new("vglmff",
   blurb = c("ABO Blood Group System (A-B-AB-O phenotype)\n\n",
             "Links:    ",
-            namesof("pA", link, earg = earg), ", ", 
-            namesof("pB", link, earg = earg, tag = FALSE)),
+            namesof("pA", link.pA, earg = earg.pA, tag = FALSE), ", ", 
+            namesof("pB", link.pB, earg = earg.pB, tag = FALSE)),
   deviance = Deviance.categorical.data.vgam,
+  infos = eval(substitute(function(...) {
+    list(M1 = 2,
+         Q1 = 4,
+         multipleResponses = FALSE,
+         expected = TRUE,
+         zero = .zero ,
+         link = c("pA" = .link.pA , "pB" = .link.pB ),
+         earg = c("pA" = .earg.pB , "pB" = .earg.pB )
+        )
+  }, list( .link.pA = link.pA, .link.pB = link.pB,
+           .earg.pA = earg.pA, .earg.pB = earg.pB,
+           .zero = zero ))),
+
+  constraints = eval(substitute(expression({
+    constraints <- cm.zero.VGAM(constraints, x, .zero , M)
+  }), list( .zero = zero ))),
+
 
   initialize = eval(substitute(expression({
     mustart.orig <- mustart
@@ -682,40 +707,44 @@ if (FALSE)
 
 
     predictors.names <-
-      c(namesof("pA", .link , earg = .earg , tag = FALSE),
-        namesof("pB", .link , earg = .earg , tag = FALSE))
+      c(namesof("pA", .link.pA , earg = .earg.pA , tag = FALSE),
+        namesof("pB", .link.pB , earg = .earg.pB , tag = FALSE))
     mustart <- (y + mustart) / 2
 
     if (!length(etastart)) {
       pO <- if (is.Numeric( .ipO )) rep( .ipO , len = n) else
         rep(c(sqrt( weighted.mean(mustart[, 4], w)) ), len = n)
       pA <- if (is.Numeric( .ipA )) rep( .ipA , len = n) else
-        rep(c(1 - sqrt(weighted.mean(mustart[, 2] + mustart[, 4], w))), len = n)
-      pB <- abs(1 - pA - pO)
-      etastart <- cbind(theta2eta(pA, .link , earg = .earg ),
-                        theta2eta(pB, .link , earg = .earg ))
+        rep(c(1 - sqrt(weighted.mean(mustart[, 2] + mustart[, 4], w))),
+            len = n)
+      pB <- if (is.Numeric( .ipB )) rep( .ipB , len = n) else
+            abs(1 - pA - pO)
+      etastart <- cbind(theta2eta(pA, .link.pA , earg = .earg.pA ),
+                        theta2eta(pB, .link.pB , earg = .earg.pB ))
       mustart <- NULL  # Since etastart has been computed.
     }
-  }), list( .link = link, .ipO = ipO, .ipA = ipA, .earg = earg))),
+  }), list( .link.pA = link.pA, .link.pB = link.pB,
+            .ipO = ipO, .ipA = ipA, .ipB = ipB,
+            .earg.pA = earg.pA, .earg.pB = earg.pB ))),
 
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
-      pA <- eta2theta(eta[, 1], link = .link , earg = .earg )
-      pB <- eta2theta(eta[, 2], link = .link , earg = .earg )
+      pA <- eta2theta(eta[, 1], link = .link.pA , earg = .earg.pA )
+      pB <- eta2theta(eta[, 2], link = .link.pB , earg = .earg.pB )
       pO <- abs(1 - pA - pB)
       cbind(A  = pA*(pA+2*pO),
             B  = pB*(pB+2*pO),
             AB = 2*pA*pB,
             O  = pO*pO) 
-  }, list( .link = link, .earg = earg))),
+  }, list( .link.pA = link.pA, .link.pB = link.pB,
+           .earg.pA = earg.pA, .earg.pB = earg.pB ))),
 
   last = eval(substitute(expression({
-    misc$link <-    c(pA = .link , pB = .link )
-
-    misc$earg <- list(pA = .earg , pB = .earg )
-
+    misc$link <-    c(pA = .link.pA , pB = .link.pB )
+    misc$earg <- list(pA = .earg.pA , pB = .earg.pB )
     misc$expected <- TRUE
-  }), list( .link = link, .earg = earg))),
+  }), list( .link.pA = link.pA, .link.pB = link.pB,
+            .earg.pA = earg.pA, .earg.pB = earg.pB ))),
 
 
   loglikelihood =
@@ -729,8 +758,8 @@ if (FALSE)
   vfamily = c("ABO", "vgenetic"),
 
   deriv = eval(substitute(expression({
-    ppp <- eta2theta(eta[, 1], link = .link , earg = .earg )
-    qqq <- eta2theta(eta[, 2], link = .link , earg = .earg )
+    ppp <- eta2theta(eta[, 1], link = .link.pA , earg = .earg.pA )
+    qqq <- eta2theta(eta[, 2], link = .link.pB , earg = .earg.pB )
     rrr <- abs(1 - ppp - qqq)
 
 
@@ -743,12 +772,13 @@ if (FALSE)
 
     dl.dp <- (naa+nab)/ppp -   naa/pbar - 2*nbb/qbar - 2*noo/rrr
     dl.dq <- (nbb+nab)/qqq - 2*naa/pbar -   nbb/qbar - 2*noo/rrr
-    dp.deta <- dtheta.deta(ppp, link = .link , earg = .earg )
-    dq.deta <- dtheta.deta(qqq, link = .link , earg = .earg )
+    dp.deta <- dtheta.deta(ppp, link = .link.pA , earg = .earg.pA )
+    dq.deta <- dtheta.deta(qqq, link = .link.pB , earg = .earg.pB )
 
     c(w) * cbind(dl.dp * dp.deta,
                  dl.dq * dq.deta)
-  }), list( .link = link, .earg = earg))),
+  }), list( .link.pA = link.pA, .link.pB = link.pB,
+            .earg.pA = earg.pA, .earg.pB = earg.pB ))),
 
   weight = eval(substitute(expression({
     wz <- matrix(as.numeric(NA), n, dimm(M))  # dimm(M)==3 because M==2
@@ -757,11 +787,12 @@ if (FALSE)
     ned2l.dq2  <- (1 + 2/qqq + 4*ppp/pbar + qqq/qbar)
     ned2l.dpdq <- 2 * (1 + qqq/qbar + ppp/pbar)
 
-    wz[, iam(1, 1, M)] <- ned2l.dp2 * dp.deta^2
-    wz[, iam(2, 2, M)] <- ned2l.dq2 * dq.deta^2
+    wz[, iam(1, 1, M)] <- ned2l.dp2  * dp.deta^2
+    wz[, iam(2, 2, M)] <- ned2l.dq2  * dq.deta^2
     wz[, iam(1, 2, M)] <- ned2l.dpdq * dp.deta * dq.deta
     c(w) * wz
-  }), list( .link = link, .earg = earg))))
+  }), list( .link.pA = link.pA, .link.pB = link.pB,
+            .earg.pA = earg.pA, .earg.pB = earg.pB ))))
 }
 
 
@@ -865,10 +896,12 @@ if (FALSE)
 
 
 
+
+
  AA.Aa.aa <-
   function(linkp = "logit",
            linkf = "logit",
-           inbreeding = TRUE,
+           inbreeding = FALSE,  # HWE assumption is the default
            ipA = NULL,
            ifp = NULL,
            zero = NULL) {
@@ -888,22 +921,22 @@ if (FALSE)
 
   new("vglmff",
   blurb = c("AA-Aa-aa phenotype (",
-            ifelse(inbreeding, "with", "without"),
+            ifelse(inbreeding, "without", "with"),
             " the Hardy-Weinberg equilibrium assumption)\n\n",
             "Links:    ",
             namesof("pA", linkp, earg = eargp, tag = FALSE),
-            if (!inbreeding) paste(",",
+            if (inbreeding) paste(",",
             namesof("f",  linkf, earg = eargf, tag = FALSE)) else
             ""),
   deviance = Deviance.categorical.data.vgam,
   infos = eval(substitute(function(...) {
-    list(M1 = ifelse( .inbreeding , 1, 2),
+    list(M1 = ifelse( .inbreeding , 2, 1),
          Q1 = 3,
          multipleResponses = FALSE,
          expected = TRUE,
          zero = .zero ,
-         link = if ( .inbreeding ) c("pA" = .linkp ) else
-                            c("pA" = .linkp , "f" = .linkf ))
+         link = if ( .inbreeding ) c("pA" = .linkp , "f" = .linkf ) else
+                            c("pA" = .linkp ))
   }, list( .linkp = linkp,
            .linkf = linkf, .inbreeding = inbreeding,
            .zero = zero ))),
@@ -928,8 +961,8 @@ if (FALSE)
 
     predictors.names <-
       c(namesof("pA", .linkp , earg = .eargp , tag = FALSE),
-        if ( .inbreeding ) NULL else
-        namesof("f",  .linkf , earg = .eargf , tag = FALSE))
+        if ( .inbreeding )
+        namesof("f",  .linkf , earg = .eargf , tag = FALSE) else NULL)
     mustart <- (y + mustart) / 2
         
 
@@ -939,8 +972,8 @@ if (FALSE)
       fp <- if (is.numeric( .ifp )) rep( .ifp , len = n) else
               runif(n)  # 1- mustart[, 2]/(2*pA*(1-pA))
       etastart <- cbind(theta2eta(pA, .linkp , earg = .eargp ),
-                        if ( .inbreeding ) NULL else
-                        theta2eta(fp, .linkf , earg = .eargf ) )
+                        if ( .inbreeding )
+                        theta2eta(fp, .linkf , earg = .eargf ) else NULL)
       mustart <- NULL  # Since etastart has been computed.
     }
   }), list( .linkp = linkp, .linkf = linkf,
@@ -949,8 +982,8 @@ if (FALSE)
   linkinv = eval(substitute(function(eta, extra = NULL) {
     eta <- as.matrix(eta)
     pA <- eta2theta(eta[, 1], link = .linkp , earg = .eargp )
-    fp <- if ( .inbreeding ) 0 else
-          eta2theta(eta[, 2], link = .linkf , earg = .eargf )
+    fp <- if ( .inbreeding )
+          eta2theta(eta[, 2], link = .linkf , earg = .eargf ) else 0
 
     cbind(AA = pA^2 + pA * (1-pA) * fp,
           Aa = 2 * pA * (1-pA) * (1 - fp),
@@ -961,11 +994,11 @@ if (FALSE)
 
   last = eval(substitute(expression({
     if ( .inbreeding ) {
-      misc$link <-    c("pA" = .linkp )
-      misc$earg <- list("pA" = .eargp )
-    } else {
       misc$link <-    c("pA" = .linkp, "f" = .linkf )
       misc$earg <- list("pA" = .eargp, "f" = .eargf )
+    } else {
+      misc$link <-    c("pA" = .linkp )
+      misc$earg <- list("pA" = .eargp )
     }
     misc$expected <- TRUE
   }), list( .linkp = linkp, .linkf = linkf,
@@ -984,17 +1017,10 @@ if (FALSE)
   deriv = eval(substitute(expression({
     eta <- as.matrix(eta)
     pA <- eta2theta(eta[, 1], link = .linkp , earg = .eargp )
-    fp <- if ( .inbreeding ) 0 else
-          eta2theta(eta[, 2], link = .linkf , earg = .eargf )
+    fp <- if ( .inbreeding )
+          eta2theta(eta[, 2], link = .linkf , earg = .eargf ) else 0
 
     if ( .inbreeding ) {
-      nAA <- w * y[, 1]
-      nAa <- w * y[, 2]
-      naa <- w * y[, 3]
-      dl.dpA <- (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
-      dpA.deta <- dtheta.deta(pA, link = .linkp , earg = .eargp )
-      dl.dpA * dpA.deta
-    } else {
       dP1 <- cbind(fp + 2*pA*(1-fp),
                     2*(1-fp)*(1-2*pA),
                    -2*(1-pA) + fp*(1-2*pA))
@@ -1009,16 +1035,19 @@ if (FALSE)
 
       c(w) * cbind(dPP.deta * dl1,
                    dfp.deta * dl2)      
+    } else {
+      nAA <- w * y[, 1]
+      nAa <- w * y[, 2]
+      naa <- w * y[, 3]
+      dl.dpA <- (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
+      dpA.deta <- dtheta.deta(pA, link = .linkp , earg = .eargp )
+      dl.dpA * dpA.deta
     }  
   }), list( .linkp = linkp, .linkf = linkf,
             .eargp = eargp, .eargf = eargf,
             .inbreeding = inbreeding ))),
   weight = eval(substitute(expression({
     if ( .inbreeding ) {
-      ned2l.dp2 <- (2*nAA+nAa)/pA^2 + (nAa+2*naa)/(1-pA)^2
-      wz <- cbind((dpA.deta^2) * ned2l.dp2)
-      wz
-    } else {
       dPP <- array(c(dP1, dP2), c(n, 3, 2))
       dPP.deta <- cbind(dtheta.deta(pA, link = .linkp , earg = .eargp ),
                         dtheta.deta(fp, link = .linkf , earg = .eargf ))
@@ -1031,6 +1060,10 @@ if (FALSE)
                                  dPP.deta[, i1] * dPP.deta[, i2]
         }
       c(w) * wz
+    } else {
+      ned2l.dp2 <- (2*nAA + nAa) / pA^2 + (nAa + 2*naa) / (1-pA)^2
+      wz <- cbind((dpA.deta^2) * ned2l.dp2)
+      wz
     }
   }), list( .linkp = linkp, .linkf = linkf,
             .eargp = eargp, .eargf = eargf,
diff --git a/R/family.glmgam.R b/R/family.glmgam.R
index 762c923..deb5d44 100644
--- a/R/family.glmgam.R
+++ b/R/family.glmgam.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -13,12 +13,14 @@
 
 
 
- binomialff <- function(link = "logit",
-                        dispersion = 1, mv = FALSE, onedpar = !mv,
-                        parallel = FALSE,  # apply.parint = FALSE,
-                        zero = NULL,
-                        bred = FALSE,
-                        earg.link = FALSE) {
+ binomialff <-
+  function(link = "logit",
+           dispersion = 1,
+           multiple.responses = FALSE, onedpar = !multiple.responses,
+           parallel = FALSE,  # apply.parint = FALSE,
+           zero = NULL,
+           bred = FALSE,
+           earg.link = FALSE) {
 
 
  if (!is.logical(bred) || length(bred) > 1)
@@ -44,7 +46,7 @@
 
   ans <-
   new("vglmff",
-  blurb = if (mv) c("Multiple binomial model\n\n", 
+  blurb = if (multiple.responses) c("Multiple binomial model\n\n", 
          "Link:     ", namesof("mu[,j]", link, earg = earg), "\n",
          "Variance: mu[,j]*(1-mu[,j])") else
          c("Binomial model\n\n", 
@@ -82,7 +84,7 @@
 
 
 
-    if ( .mv ) {
+    if ( .multiple.responses ) {
       temp5 <-
       w.y.check(w = w, y = y,
                 Is.nonnegative.y = TRUE,
@@ -128,7 +130,7 @@
 
 
 
-      extra$mv <- TRUE
+      extra$multiple.responses <- TRUE
 
     } else {
 
@@ -178,14 +180,14 @@
 
 
     if ( .bred ) {
-      if ( !control$save.weight ) {
-       save.weight <- control$save.weight <- TRUE
+      if ( !control$save.weights ) {
+       save.weights <- control$save.weights <- TRUE
       }
     }
 
 
 
-    }), list( .link = link, .mv = mv,
+    }), list( .link = link, .multiple.responses = multiple.responses,
               .earg = earg, .bred = bred ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -208,7 +210,7 @@
       temp87 <- (y-mu)^2 * wz / (
                 dtheta.deta(mu, link = .link ,
                             earg = .earg )^2)  # w cancel
-      if (.mv && ! .onedpar ) {
+      if (.multiple.responses && ! .onedpar ) {
         dpar <- rep(as.numeric(NA), len = M)
         temp87 <- cbind(temp87)
         nrow.mu <- if (is.matrix(mu)) nrow(mu) else length(mu)
@@ -221,7 +223,7 @@
       }
     }
 
-    misc$mv <- .mv
+    misc$multiple.responses <- .multiple.responses
     misc$dispersion <- dpar
     misc$default.dispersion <- 1
     misc$estimated.dispersion <- .estimated.dispersion
@@ -238,7 +240,7 @@
 
   }), list( .dispersion = dispersion,
             .estimated.dispersion = estimated.dispersion,
-            .onedpar = onedpar, .mv = mv,
+            .onedpar = onedpar, .multiple.responses = multiple.responses,
             .bred = bred,
             .link = link, .earg = earg))),
 
@@ -265,7 +267,7 @@
         warning("converting 'ycounts' to integer in @loglikelihood")
       ycounts <- round(ycounts)
 
-      ll.elts <- if ( .mv ) {
+      ll.elts <- if ( .multiple.responses ) {
         c(w) * (    ycounts  * log(   mu) +
                (1 - ycounts) * log1p(-mu))
       } else {
@@ -278,7 +280,7 @@
         ll.elts
       }
     }
-  }, list( .mv = mv ))),
+  }, list( .multiple.responses = multiple.responses ))),
 
   vfamily = c("binomialff", "vcategorical"),
 
@@ -387,7 +389,7 @@
 
 
 
-  if (!mv)
+  if (!multiple.responses)
     ans at deviance <- 
       function(mu, y, w, residuals = FALSE, eta, extra = NULL,
                summation = TRUE) {
@@ -1030,8 +1032,8 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
     if ( .bred ) {
-      if ( !control$save.weight ) {
-       save.weight <- control$save.weight <- TRUE
+      if ( !control$save.weights ) {
+       save.weights <- control$save.weights <- TRUE
       }
     }
 
@@ -1186,7 +1188,7 @@ rinv.gaussian <- function(n, mu, lambda) {
  quasibinomialff <-
   function(
            link = "logit",
-           mv = FALSE, onedpar = !mv,
+           multiple.responses = FALSE, onedpar = !multiple.responses,
            parallel = FALSE, zero = NULL) {
 
 
@@ -1197,7 +1199,8 @@ rinv.gaussian <- function(n, mu, lambda) {
   dispersion <- 0 # Estimated; this is the only difference with binomialff()
   ans <- binomialff(link = earg, earg.link = TRUE,
                     dispersion = dispersion,
-                    mv = mv, onedpar = onedpar,
+                    multiple.responses = multiple.responses,
+                    onedpar = onedpar,
                     parallel = parallel, zero = zero)
   ans at vfamily <- "quasibinomialff"
   ans at infos <- eval(substitute(function(...) {
@@ -1537,9 +1540,10 @@ rinv.gaussian <- function(n, mu, lambda) {
 
 
 if (FALSE)
- matched.binomial <- function(mvar = NULL, link = "logit",
-                              parallel = TRUE,
-                              smallno = .Machine$double.eps^(3/4)) {
+ matched.binomial <-
+  function(multiple.responses = NULL, link = "logit",
+           parallel = TRUE,
+           smallno = .Machine$double.eps^(3/4)) {
   link <- as.list(substitute(link))
   earg <- link2list(link)
   link <- attr(earg, "function.name")
@@ -1552,11 +1556,13 @@ if (FALSE)
   if (is.logical(parallel) && !parallel)
     stop("'parallel' must be TRUE")
 
-    temp <- terms(mvar)
-    mvar <- attr(temp,"term.labels")
-    if (length(mvar) != 1) stop("cannot obtain the matching variable")
-    if (!is.character(mvar) || length(mvar) != 1) {
-        stop("bad input for 'mvar'")
+    temp <- terms(multiple.responses)
+    multiple.responses <- attr(temp,"term.labels")
+    if (length(multiple.responses) != 1)
+      stop("cannot obtain the matching variable")
+    if (!is.character(multiple.responses) ||
+        length(multiple.responses) != 1) {
+      stop("bad input for 'multiple.responses'")
     }
 
     new("vglmff",
@@ -1567,20 +1573,20 @@ if (FALSE)
                                bool = .parallel ,
                                constraints = constraints,
                                apply.int = TRUE)
-        constraints[[extra$mvar]] <- diag(M)
+        constraints[[extra$multiple.responses]] <- diag(M)
 
         specialCM <- list(a = vector("list", M-1))
         for (ii in 1:(M-1)) {
           specialCM[[1]][[ii]] <-
-            (constraints[[extra$mvar]])[, 1+ii,drop = FALSE]
+            (constraints[[extra$multiple.responses]])[, 1+ii,drop = FALSE]
         }
-        names(specialCM) = extra$mvar
+        names(specialCM) = extra$multiple.responses
     }), list( .parallel = parallel ))),
     initialize = eval(substitute(expression({
         if (!all(w == 1))
             extra$orig.w = w
 
-        mvar <- .mvar
+        multiple.responses <- .multiple.responses
 
         NCOL <- function (x) 
             if (is.array(x) && length(dim(x)) > 1 ||
@@ -1608,22 +1614,23 @@ if (FALSE)
     temp1 <- attr(x, "assign")
     if (colnames(x)[1] != "(Intercept)")
       stop("x must have an intercept")
-    M <- CCC <- length(temp1[[mvar]]) +
+    M <- CCC <- length(temp1[[multiple.responses]]) +
                 (colnames(x)[1] == "(Intercept)")
-    temp9 <- x[,temp1[[mvar]],drop = FALSE]
+    temp9 <- x[,temp1[[multiple.responses]],drop = FALSE]
     temp9 <- temp9 * matrix(2:CCC, n, CCC-1, byrow = TRUE)
     temp9 <- apply(temp9, 1, max)
       temp9[temp9 == 0] <- 1
       extra$NoMatchedSets <- CCC
       extra$n <- n
       extra$M <- M
-      extra$mvar <- mvar
+      extra$multiple.responses <- multiple.responses
       extra$index9 <- temp9
 
     predictors.names <-
       namesof("mu", .link , earg = .earg , short = TRUE)
     predictors.names <- rep(predictors.names, len = M)
-  }), list( .link = link, .earg = earg, .mvar = mvar ))),
+  }), list( .link = link, .earg = earg,
+            .multiple.responses = multiple.responses ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     mu <- eta2theta(eta, link = .link , earg = .earg )
     mu[cbind(1:extra$n, extra$index9)]
@@ -1782,16 +1789,17 @@ mypool <- function(x, index) {
           etastart <- theta2eta(mustart, link = "logit", earg = list())
 
         temp1 = attr(x, "assign")
-        mvar = extra$mvar
-        if (length(mvar) != n) stop("input extra$mvar doesn't look right")
+        multiple.responses = extra$multiple.responses
+        if (length(multiple.responses) != n)
+          stop("input extra$multiple.responses doesn't look right")
 
         if (any(y != 0 & y != 1))
             stop("response vector must have 0 or 1 values only")
-        xrle = rle(mvar)
-        if (length(unique(mvar)) != length(xrel$length))
-            stop("extra$mvar must take on contiguous values")
+        xrle = rle(multiple.responses)
+        if (length(unique(multiple.responses)) != length(xrel$length))
+          stop("extra$multiple.responses must take on contiguous values")
 
-        temp9 = factor(mvar)
+        temp9 = factor(multiple.responses)
         extra$NoMatchedSets = levels(temp9)
         extra$n = n
         extra$M = M
@@ -1799,10 +1807,11 @@ mypool <- function(x, index) {
         extra$index9 = temp9
         predictors.names <-
           namesof("mu", .link , earg = .earg , short = TRUE)
-  }), list( .link = link, .earg = earg, .mvar = mvar ))),
+  }), list( .link = link, .earg = earg,
+            .multiple.responses = multiple.responses ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
       denominator <- exp(eta)
-      numerator <- mypool(denominator, extra$mvar)
+      numerator <- mypool(denominator, extra$multiple.responses)
       numerator / denominator
   }, list( .link = link, .earg = earg  ))),
   last = eval(substitute(expression({
@@ -1856,7 +1865,7 @@ mypool <- function(x, index) {
 
 
 
- augbinomial <- function(link = "logit", mv = FALSE,
+ augbinomial <- function(link = "logit", multiple.responses = FALSE,
                         parallel = TRUE) {
 
     if (!is.logical(parallel) ||
@@ -1870,7 +1879,8 @@ mypool <- function(x, index) {
 
 
   new("vglmff",
-  blurb = if (mv) c("Augmented multivariate binomial model\n\n", 
+  blurb = if (multiple.responses)
+          c("Augmented multivariate binomial model\n\n", 
          "Link:     ",
          namesof("mu.1[,j]", link, earg = earg), ", ",
          namesof("mu.2[,j]", link, earg = earg),
@@ -1897,7 +1907,7 @@ mypool <- function(x, index) {
 
     M1 = 2
 
-    if ( .mv ) {
+    if ( .multiple.responses ) {
         y = as.matrix(y)
         M = M1 * ncol(y)
         if (!all(y == 0 | y == 1))
@@ -1971,7 +1981,8 @@ mypool <- function(x, index) {
             c(namesof("mu.1", .link , earg = .earg , short = TRUE),
               namesof("mu.2", .link , earg = .earg , short = TRUE))
       }
-  }), list( .link = link, .mv = mv, .earg = earg))),
+  }), list( .link = link,
+            .multiple.responses = multiple.responses, .earg = earg))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     Mdiv2  =  ncol(eta) / 2
     index1 =  2*(1:Mdiv2) - 1
@@ -1990,8 +2001,9 @@ mypool <- function(x, index) {
 
     misc$parallel <- .parallel
     misc$expected <- TRUE
-    misc$mv <- .mv
-  }), list( .link = link, .mv = mv, .earg = earg,
+    misc$multiple.responses <- .multiple.responses
+  }), list( .link = link,
+            .multiple.responses = multiple.responses, .earg = earg,
             .parallel = parallel ))),
   linkfun = eval(substitute(function(mu, extra = NULL) {
     usualanswer = theta2eta(mu, .link , earg = .earg )
diff --git a/R/family.loglin.R b/R/family.loglin.R
index 9b23924..70aac99 100644
--- a/R/family.loglin.R
+++ b/R/family.loglin.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/family.math.R b/R/family.math.R
index 94adc65..7d7370d 100644
--- a/R/family.math.R
+++ b/R/family.math.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/family.mixture.R b/R/family.mixture.R
index a6ea3cf..821b8ba 100644
--- a/R/family.mixture.R
+++ b/R/family.mixture.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/family.nonlinear.R b/R/family.nonlinear.R
index f79cfb3..1e3a21d 100644
--- a/R/family.nonlinear.R
+++ b/R/family.nonlinear.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -12,11 +12,11 @@
 
 
 
-vnonlinear.control <- function(save.weight = TRUE, ...) {
+vnonlinear.control <- function(save.weights = TRUE, ...) {
 
 
 
-    list(save.weight = as.logical(save.weight)[1])
+    list(save.weights = as.logical(save.weights)[1])
 }
 
 
@@ -71,8 +71,8 @@ subset.lohi <- function(xvec, yvec,
 
 
 
-micmen.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+micmen.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
@@ -356,8 +356,8 @@ micmen.control <- function(save.weight = TRUE, ...) {
 
 
 
-skira.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+skira.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
diff --git a/R/family.normal.R b/R/family.normal.R
index 8192deb..e354cac 100644
--- a/R/family.normal.R
+++ b/R/family.normal.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -223,7 +223,6 @@ dposnorm <- function(x, mean = 0, sd = 1, log = FALSE) {
     stop("bad input for argument 'log'")
   rm(log)
 
-
   L <- max(length(x), length(mean), length(sd))
   if (length(x)    != L) x    <- rep(x,    len = L)
   if (length(mean) != L) mean <- rep(mean, len = L)
@@ -238,23 +237,47 @@ dposnorm <- function(x, mean = 0, sd = 1, log = FALSE) {
 }
 
 
-pposnorm <- function(q, mean = 0, sd = 1) {
-  L <- max(length(q), length(mean), length(sd))
-  if (length(q)    != L) q    <- rep(q,    len = L)
-  if (length(mean) != L) mean <- rep(mean, len = L)
-  if (length(sd)   != L) sd   <- rep(sd,   len = L)
 
-  ifelse(q < 0, 0, (pnorm(q, mean = mean, sd = sd) -
-                    pnorm(0, mean = mean, sd = sd)) / pnorm(q = mean / sd))
+pposnorm <- function(q, mean = 0, sd = 1,
+                     lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+
+  ans <- (pnorm(q, mean = mean, sd = sd) -
+          pnorm(0, mean = mean, sd = sd)) / pnorm(mean / sd)
+  ans[q <= 0] <- 0
+
+  if (lower.tail) {
+    if (log.p) log(ans) else ans
+  } else {
+    if (log.p) log1p(-ans) else 1-ans
+  }
 }
 
 
-qposnorm <- function(p, mean = 0, sd = 1) {
+
+qposnorm <- function(p, mean = 0, sd = 1,
+                     lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  rm(log.p)   # 20150102 KaiH
+  
+  if (lower.tail) {
+    if (log.arg) p <- exp(p)
+  } else {
+    p <- if (log.arg) -expm1(p) else 1 - p
+  }
+
   qnorm(p = p + (1 - p) * pnorm(0, mean = mean, sd = sd),
         mean = mean, sd = sd)
 }
 
 
+
 rposnorm <- function(n, mean = 0, sd = 1) {
   qnorm(p = runif(n, min = pnorm(0, mean = mean, sd = sd)),
         mean = mean, sd = sd)
@@ -262,8 +285,8 @@ rposnorm <- function(n, mean = 0, sd = 1) {
 
 
 
- posnormal.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+ posnormal.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -476,20 +499,15 @@ dbetanorm <- function(x, shape1, shape2, mean = 0, sd = 1, log = FALSE) {
   rm(log)
 
 
-  ans <- if (log.arg) {
+  logden <-
     dnorm(x = x, mean = mean, sd = sd, log = TRUE) +
     (shape1-1) * pnorm(q = x, mean = mean, sd = sd, log.p = TRUE) +
     (shape2-1) * pnorm(q = x, mean = mean, sd = sd, log.p = TRUE,
                        lower.tail = FALSE) -
     lbeta(shape1, shape2)
-  } else {
-    dnorm(x = x, mean = mean, sd = sd) *
-    pnorm(q = x, mean = mean, sd = sd)^(shape1-1) *
-    pnorm(q = x, mean = mean, sd = sd,
-          lower.tail = FALSE)^(shape2-1) / beta(shape1, shape2)
-  }
 
-  ans
+  logden[is.infinite(x)] <- log(0)  # 20141210 KaiH
+  if (log.arg) logden else exp(logden)
 }
 
 
@@ -503,12 +521,16 @@ pbetanorm <- function(q, shape1, shape2, mean = 0, sd = 1,
 }
 
 
-qbetanorm <- function(p, shape1, shape2, mean = 0, sd = 1) {
-  qnorm(p = qbeta(p = p, shape1 = shape1, shape2 = shape2),
+
+qbetanorm <- function(p, shape1, shape2, mean = 0, sd = 1,
+                      lower.tail = TRUE, log.p = FALSE) {
+  qnorm(p = qbeta(p = p, shape1 = shape1, shape2 = shape2,
+                  lower.tail = lower.tail, log.p = log.p),
         mean = mean, sd = sd)
 }
 
 
+
 rbetanorm <- function(n, shape1, shape2, mean = 0, sd = 1) {
   qnorm(p = qbeta(p = runif(n), shape1 = shape1, shape2 = shape2),
         mean = mean, sd = sd)
@@ -535,21 +557,27 @@ dtikuv <- function(x, d, mean = 0, sigma = 1, log = FALSE) {
 
   hh <- 2 - d
   KK <- 1 / (1 + 1/hh + 0.75/hh^2)
-  if (log.arg) {
-    dnorm(x = x, mean = mean, sd = sigma, log = TRUE) + log(KK) +
+  logden <- dnorm(x = x, mean = mean, sd = sigma, log = TRUE) + log(KK) +
     2 * log1p(((x-mean)/sigma)^2 / (2*hh))
-  } else {
-    dnorm(x = x, mean = mean, sd = sigma) * KK *
-    (1 + ((x-mean)/sigma)^2 / (2*hh))^2
-  }
+  logden[is.infinite(x)] <- log(0)  # 20141209 KaiH
+  if (log.arg) logden else exp(logden)
 }
 
 
-ptikuv <- function(q, d, mean = 0, sigma = 1) {
+
+ptikuv <- function(q, d, mean = 0, sigma = 1,
+                   lower.tail = TRUE, log.p = FALSE) {
   if (!is.Numeric(d, length.arg = 1) ||
       max(d) >= 2)
     stop("bad input for argument 'd'")
 
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  rm(log.p)   # 20141231 KaiH
+
   L <- max(length(q), length(mean), length(sigma))
   if (length(q)     != L) q     <- rep(q,     len = L)
   if (length(mean)  != L) mean  <- rep(mean,  len = L)
@@ -569,26 +597,39 @@ ptikuv <- function(q, d, mean = 0, sigma = 1) {
     ans[rhs] <- 1.0 - Recall(q = (2*mean[rhs] - q[rhs]), d = d,
                              mean = mean[rhs], sigma = sigma[rhs])
   }
-  ans
+
+  if (lower.tail) {
+    if (log.arg) log(ans) else ans
+  } else {
+    if (log.arg) log1p(-ans) else 1 - ans
+  }
 }
 
 
-qtikuv <- function(p, d, mean = 0, sigma = 1, ...) {
-  if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
-    stop("bad input for argument 'p'")
+
+
+qtikuv <- function(p, d, mean = 0, sigma = 1, 
+                   lower.tail = TRUE, log.p = FALSE, ...) {
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
   if (!is.Numeric(d, length.arg = 1) || max(d) >= 2)
     stop("bad input for argument 'd'")
-  if (!is.Numeric(mean))
-    stop("bad input for argument 'mean'")
-  if (!is.Numeric(sigma))
-    stop("bad input for argument 'sigma'")
 
+  orig.p <- p
+  if (lower.tail) {
+    if (log.p) p <- exp(p)
+  } else {
+    p <- if (log.p) -expm1(p) else 1 - p
+  }
+  
   L <- max(length(p), length(mean), length(sigma))
   if (length(p)     != L) p     <- rep(p,     len = L)
   if (length(mean)  != L) mean  <- rep(mean,  len = L)
   if (length(sigma) != L) sigma <- rep(sigma, len = L)
   ans <- rep(0.0, len = L)
 
+
   myfun <- function(x, d, mean = 0, sigma = 1, p)
     ptikuv(q = x, d = d, mean = mean, sigma = sigma) - p
 
@@ -605,6 +646,15 @@ qtikuv <- function(p, d, mean = 0, sigma = 1, ...) {
                        d = d, p = p[ii],
                        mean = mean[ii], sigma = sigma[ii], ...)$root
   }
+
+
+  if (log.p) {
+    ans[orig.p > 0] <- NaN
+  } else {
+    ans[orig.p < 0] <- NaN
+    ans[orig.p > 1] <- NaN
+  }
+
   ans
 }
 
@@ -823,26 +873,63 @@ dfoldnorm <- function(x, mean = 0, sd = 1, a1 = 1, a2 = 1,
 }
 
 
-pfoldnorm <- function(q, mean = 0, sd = 1, a1 = 1, a2 = 1) {
 
+pfoldnorm <- function(q, mean = 0, sd = 1, a1 = 1, a2 = 1,
+                      lower.tail = TRUE, log.p = FALSE) {
 
-  L <- max(length(q), length(mean), length(sd))
-  if (length(q)    != L) q    <- rep(q,    len = L)
-  if (length(mean) != L) mean <- rep(mean, len = L)
-  if (length(sd)   != L) sd   <- rep(sd,   len = L)
 
-  ans <- ifelse(q < 0, 0, pnorm(q =  q/(a1*sd) - mean/sd) -
-                          pnorm(q = -q/(a2*sd) - mean/sd))
-  ans[a1 <= 0 | a2 <= 0] <- NA
-  ans[sd <= 0] <- NA
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log(pnorm(q =  q/(a1*sd) - mean/sd) - 
+                 pnorm(q = -q/(a2*sd) - mean/sd))
+      ans[q <= 0 ] <- -Inf
+      ans[q == Inf] <- 0
+    } else {
+      ans <- pnorm(q =  q/(a1*sd) - mean/sd) - 
+             pnorm(q = -q/(a2*sd) - mean/sd)
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- log(pnorm(q =  q/(a1*sd) - mean/sd, lower.tail = FALSE) + 
+                 pnorm(q = -q/(a2*sd) - mean/sd))
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- -Inf
+    } else {
+      ans <- pnorm(q =  q/(a1*sd) - mean/sd, lower.tail = FALSE) + 
+             pnorm(q = -q/(a2*sd) - mean/sd)
+      ans[q <= 0] <- 1
+      ans[q == Inf] <- 0
+    }
+  } 
+  ans[a1 <= 0 | a2 <= 0] <- NaN
+  ans[sd <= 0] <- NaN
   ans
 }
 
 
-qfoldnorm <- function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
-  if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
-    stop("bad input for argument 'p'")
 
+qfoldnorm <- function(p, mean = 0, sd = 1, a1 = 1, a2 = 1,
+                      lower.tail = TRUE, log.p = FALSE, ...) {
+
+  if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  rm(log.p)
+  
+  if (lower.tail) {
+    if (log.arg) p <- exp(p)
+  } else {
+    p <- if (log.arg) -expm1(p) else 1 - p
+  }
+  
   L <- max(length(p), length(mean), length(sd), length(a1), length(a2))
   if (length(p)    != L) p    <- rep(p,    len = L)
   if (length(mean) != L) mean <- rep(mean, len = L)
@@ -855,8 +942,8 @@ qfoldnorm <- function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
     pfoldnorm(q = x, mean = mean, sd = sd, a1 = a1, a2 = a2) - p
 
   for (ii in 1:L) {
-    mytheta <- mean[ii]/sd[ii]
-    EY <- sd[ii] * ((a1[ii]+a2[ii]) *
+    mytheta <- mean[ii] / sd[ii]
+    EY <- sd[ii] * ((a1[ii] + a2[ii]) *
          (mytheta * pnorm(mytheta) + dnorm(mytheta)) -
           a2[ii] * mytheta)
     Upper <- 2 * EY
@@ -869,13 +956,14 @@ qfoldnorm <- function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
                        p = p[ii], ...)$root
   }
 
-  ans[a1 <= 0 | a2 <= 0] <- NA
-  ans[sd <= 0] <- NA
+  ans[a1 <= 0 | a2 <= 0] <- NaN
+  ans[sd <= 0] <- NaN
 
   ans
 }
 
 
+
 rfoldnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
 
   X <- rnorm(n, mean = mean, sd = sd)
@@ -1232,60 +1320,48 @@ dtobit <- function(x, mean = 0, sd = 1,
   if (length(Lower) != L) Lower <- rep(Lower, len = L)
   if (length(Upper) != L) Upper <- rep(Upper, len = L)
 
+  if (!all(Lower < Upper, na.rm = TRUE))
+    stop("all(Lower < Upper) is not TRUE")
+
   ans <- dnorm(x = x, mean = mean, sd = sd, log = log.arg)
   ans[x <  Lower] <- if (log.arg) log(0.0) else 0.0
   ans[x >  Upper] <- if (log.arg) log(0.0) else 0.0
 
 
   ind3 <- x == Lower
-  ans[ind3] <- if (log.arg) {
-        log(exp(ans[ind3]) +
-            pnorm(q = Lower[ind3], mean = mean[ind3], sd = sd[ind3]))
-      } else {
-        ans[ind3] +
-        pnorm(q = Lower[ind3], mean = mean[ind3], sd = sd[ind3])
-      }
+  ans[ind3] <- pnorm(q = Lower[ind3], mean = mean[ind3], sd = sd[ind3],
+                     log.p = log.arg)
 
   ind4 <- x == Upper
-  ans[ind4] <- if (log.arg) {
-        log(exp(ans[ind4]) +
-            pnorm(q = Upper[ind4], mean = mean[ind4], sd = sd[ind4],
-                  lower.tail = FALSE))
-      } else {
-        ans[ind4] +
-        pnorm(q = Upper[ind4], mean = mean[ind4], sd = sd[ind4],
-              lower.tail = FALSE)
-      }
+  ans[ind4] <- pnorm(q = Upper[ind4], mean = mean[ind4], sd = sd[ind4],
+                     lower.tail = FALSE, log.p = log.arg)
+
   ans
 }
 
 
 
-ptobit <- function(q, mean = 0, sd = 1,
-                  Lower = 0, Upper = Inf,
-                  lower.tail = TRUE, log.p = FALSE) {
+ptobit <- function(q, mean = 0, sd = 1, Lower = 0, Upper = Inf,
+                   lower.tail = TRUE, log.p = FALSE) {
 
   if (!is.logical(lower.tail) || length(lower.tail) != 1)
     stop("argument 'lower.tail' must be a single logical")
   if (!is.logical(log.p) || length(log.p) != 1)
     stop("argument 'log.p' must be a single logical")
 
-  L <- max(length(q), length(mean), length(sd),
-           length(Lower), length(Upper))
-  if (length(q)     != L) q     <- rep(q,     len = L)
-  if (length(mean)  != L) mean  <- rep(mean,  len = L)
-  if (length(sd)    != L) sd    <- rep(sd,    len = L)
-  if (length(Lower) != L) Lower <- rep(Lower, len = L)
-  if (length(Upper) != L) Upper <- rep(Upper, len = L)
 
-  ans <- pnorm(q = q, mean = mean, sd = sd, lower.tail = lower.tail)
+  if (!all(Lower < Upper, na.rm = TRUE))
+    stop("all(Lower < Upper) is not TRUE")
+
+
+  ans <- pnorm(q = q, mean = mean, sd = sd, 
+               lower.tail = lower.tail, log.p = log.p)
   ind1 <- (q <  Lower)
   ans[ind1] <- if (lower.tail) ifelse(log.p, log(0.0), 0.0) else
                                ifelse(log.p, log(1.0), 1.0)
   ind2 <- (Upper <= q)
   ans[ind2] <- if (lower.tail) ifelse(log.p, log(1.0), 1.0) else
                                ifelse(log.p, log(0.0), 0.0)
-
   ans
 }
 
@@ -1293,25 +1369,39 @@ ptobit <- function(q, mean = 0, sd = 1,
 
 
 qtobit <- function(p, mean = 0, sd = 1,
-                  Lower = 0, Upper = Inf) {
-
-  L <- max(length(p), length(mean), length(sd),
-           length(Lower), length(Upper))
-  if (length(p)     != L) p     <- rep(p,     len = L)
-  if (length(mean)  != L) mean  <- rep(mean,  len = L)
-  if (length(sd)    != L) sd    <- rep(sd,    len = L)
-  if (length(Lower) != L) Lower <- rep(Lower, len = L)
-  if (length(Upper) != L) Upper <- rep(Upper, len = L)
-
-  ans <- qnorm(p = p, mean = mean, sd = sd)
-  pnorm.Lower <- ptobit(q = Lower, mean = mean, sd = sd)
-  pnorm.Upper <- ptobit(q = Upper, mean = mean, sd = sd)
-
-  ind1 <- (p <= pnorm.Lower)
-  ans[ind1] <- Lower[ind1]
-
-  ind2 <- (pnorm.Upper <= p)
-  ans[ind2] <- Upper[ind2]
+                   Lower = 0, Upper = Inf,
+                   lower.tail = TRUE, log.p = FALSE) {
+
+
+  if (!all(Lower < Upper, na.rm = TRUE))
+    stop("all(Lower < Upper) is not TRUE")
+
+  # 20150127 KaiH; add lower.tail = lower.tail, log.p = log.p
+  ans <- qnorm(p, mean = mean, sd = sd, 
+               lower.tail = lower.tail, log.p = log.p)
+  pnorm.Lower <- ptobit(q = Lower, mean = mean, sd = sd, 
+                        lower.tail = lower.tail, log.p = log.p)
+  pnorm.Upper <- ptobit(q = Upper, mean = mean, sd = sd, 
+                        lower.tail = lower.tail, log.p = log.p)
+
+if (FALSE) {
+  if (lower.tail) {
+    ind1 <- (p <= pnorm.Lower)
+    ans[ind1] <- Lower[ind1]
+    ind2 <- (pnorm.Upper <= p)
+    ans[ind2] <- Upper[ind2] 
+  } else {
+    ind1 <- (p >= pnorm.Lower)
+    ans[ind1] <- Lower[ind1]
+    ind2 <- (pnorm.Upper >= p)
+    ans[ind2] <- Upper[ind2] 
+  }
+} else {
+  ans <- qnorm(p = p, mean = mean, sd = sd,
+               lower.tail = lower.tail, log.p = log.p)
+  ans <- pmax(ans, Lower)
+  ans <- pmin(ans, Upper)
+}
 
   ans
 }
@@ -1333,12 +1423,20 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
   if (length(Lower) != L) Lower <- rep(Lower, len = L)
   if (length(Upper) != L) Upper <- rep(Upper, len = L)
 
+  if (!all(Lower < Upper, na.rm = TRUE))
+    stop("all(Lower < Upper) is not TRUE")
+
   ans <- rnorm(n = use.n, mean = mean, sd = sd)
   cenL <- (ans < Lower)
-  ans[cenL] <- Lower[cenL]
   cenU <- (ans > Upper)
+  if (FALSE) {
+  ans[cenL] <- Lower[cenL]
   ans[cenU] <- Upper[cenU]
-
+} else {
+  ans <- pmax(ans, Lower)
+  ans <- pmin(ans, Upper)
+}
+  
   attr(ans, "Lower") <- Lower
   attr(ans, "Upper") <- Upper
   attr(ans, "cenL") <- cenL
@@ -1349,8 +1447,8 @@ rtobit <- function(n, mean = 0, sd = 1, Lower = 0, Upper = Inf) {
 
 
 
-tobit.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+tobit.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -1476,6 +1574,11 @@ tobit.control <- function(save.weight = TRUE, ...) {
         namesof(temp2.names, .lsd , earg = .esd , tag = FALSE))
     predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
 
+
+    if ( .stdTobit ) {
+      save.weights <- control$save.weights <- FALSE
+    }
+
     if (!length(etastart)) {
       anyc <- cbind(extra$censoredL | extra$censoredU)
       i11 <- if ( .imethod == 1) anyc else FALSE  # can be all data
@@ -1509,6 +1612,7 @@ tobit.control <- function(save.weight = TRUE, ...) {
            .emu = emu, .esd = esd,
            .Imu = imu, .isd = isd,
            .type.fitted = type.fitted,
+           .stdTobit = stdTobit,
            .imethod = imethod ))),
   linkinv = eval(substitute( function(eta, extra = NULL) {
     M1 <- 2
@@ -1583,12 +1687,6 @@ tobit.control <- function(save.weight = TRUE, ...) {
     misc$Upper <- Uppmat
 
 
-    if ( .stdTobit ) {
-      save.weight <- control$save.weight <- FALSE
-      fit$weights <- NULL
-    }
-
-
   }), list( .lmu = lmu, .lsd = lsd,
             .emu = emu, .esd = esd,
             .nsimEIM = nsimEIM, .imethod = imethod,
@@ -3166,6 +3264,9 @@ dskewnorm <- function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
   zedd <- (x - location) / scale
   loglik <- log(2) + dnorm(zedd, log = TRUE) +
             pnorm(shape * zedd, log.p = TRUE) - log(scale)
+
+  loglik[is.infinite(x)] <- log(0)  # 20141209 KaiH
+
   if (log.arg) {
     loglik
   } else {
diff --git a/R/family.others.R b/R/family.others.R
index d3ded89..b220cdc 100644
--- a/R/family.others.R
+++ b/R/family.others.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -46,28 +46,95 @@ dexppois <- function(x, rate = 1, shape, log = FALSE) {
 }
 
 
-qexppois<- function(p, rate = 1, shape) {
-  ans <- -log(log(p * -(expm1(shape)) + exp(shape)) / shape) / rate
-  ans[(shape <= 0) | (rate <= 0)] = NaN
-  ans[p < 0] <- NaN
-  ans[p > 1] <- NaN
+
+
+
+
+qexppois<- function(p, rate = 1, shape, 
+                    lower.tail = TRUE, log.p = FALSE) { 
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- -log(log(exp(ln.p) * (-expm1(shape)) + exp(shape)) / shape) / rate
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- -log(log(p * (-expm1(shape)) + exp(shape)) / shape) / rate
+      ans[p < 0] <- NaN
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- -log(log(expm1(ln.p) * expm1(shape) + exp(shape)) / shape) / rate
+      ans[ln.p > 0] <- NaN
+    } else { 
+      ans <- -log(log(p * expm1(shape) + 1) / shape) / rate
+      ans[p < 0] <- NaN
+      ans[p > 1] <- NaN
+    }
+  }
+  ans[(shape <= 0) | (rate <= 0)] <- NaN
   ans
 }
 
 
 
-pexppois<- function(q, rate = 1, shape) {
-  ans <-(exp(shape * exp(-rate * q)) -
-         exp(shape)) / -expm1(shape)  
-  ans[q <= 0] <- 0
+
+
+
+
+
+
+
+
+
+
+
+pexppois<- function(q, rate = 1, shape, 
+                    lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log((exp(shape * exp(-rate * q)) -
+                    exp(shape)) / -expm1(shape))
+      ans[q <= 0 ] <- -Inf
+      ans[q == Inf] <- 0
+    } else {
+      ans <- (exp(shape * exp(-rate * q)) - exp(shape)) / (-expm1(shape))
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- log(expm1(shape * exp(-rate * q)) / expm1(shape))
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- -Inf
+    } else {
+      ans <- expm1(shape * exp(-rate * q)) / expm1(shape)
+      ans[q <= 0] <- 1
+      ans[q == Inf] <- 0
+    }
+  } 
   ans[(shape <= 0) | (rate <= 0)] <- NaN
   ans
 }
 
 
 
+
 rexppois <- function(n, rate = 1, shape) {
-  ans <- -log(log(runif(n) * -(expm1(shape)) +
+  ans <- -log(log(runif(n) * (-expm1(shape)) +
          exp(shape)) / shape) / rate
   ans[(shape <= 0) | (rate <= 0)] <- NaN
   ans
@@ -270,6 +337,7 @@ dgenray <- function(x, scale = 1, shape, log = FALSE) {
                        (shape[xok] - 1) * log1p(-exp(-temp1^2))
   }
   logdensity[(shape <= 0) | (scale <= 0)] <- NaN
+  logdensity[is.infinite(x)] <- log(0)  # 20141209 KaiH
   if (log.arg) {
     logdensity
   } else {
@@ -278,27 +346,85 @@ dgenray <- function(x, scale = 1, shape, log = FALSE) {
 }
 
 
-pgenray <- function(q, scale = 1, shape) {
-  ans <- (-expm1(-(q/scale)^2))^shape
-  ans[q <= 0] <- 0
+
+
+
+
+pgenray <- function(q, scale = 1, shape, 
+                    lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log((-expm1(-(q/scale)^2))^shape)
+      ans[q <= 0 ] <- -Inf
+    } else {
+      ans <- (-expm1(-(q/scale)^2))^shape
+      ans[q <= 0] <- 0
+    }
+  } else {
+    if (log.p) {
+      ans <- log(-expm1(shape*log(-expm1(-(q/scale)^2))))
+      ans[q <= 0] <- 0
+    } else {
+      ans <- -expm1(shape*log(-expm1(-(q/scale)^2)))
+      ans[q <= 0] <- 1
+    }
+  } 
   ans[(shape <= 0) | (scale <= 0)] <- NaN
   ans
 }
 
 
-qgenray <- function(p, scale = 1, shape) {
-  ans <- scale * sqrt(-log1p(-(p^(1/shape))))
+
+
+
+
+
+
+
+qgenray <- function(p, scale = 1, shape, 
+                    lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- scale * sqrt(-log1p(-(exp(ln.p)^(1/shape))))
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- scale * sqrt(-log1p(-(p^(1/shape))))
+      ans[p < 0] <- NaN
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- scale * sqrt(-log1p(-((-expm1(ln.p))^(1/shape))))
+      ans[ln.p > 0] <- NaN
+    } else { 
+      ans <- scale * sqrt(-log1p(-exp((1/shape)*log1p(-p))))
+      ans[p < 0] <- NaN
+      ans[p > 1] <- NaN
+    }
+  }
   ans[(shape <= 0) | (scale <= 0)] <- NaN
-  ans[p < 0] <- NaN
-  ans[p > 1] <- NaN
-  ans[p == 0] <- 0
-  ans[p == 1] <- Inf
   ans
 }
 
 
 
 
+
+
 rgenray <- function(n, scale = 1, shape) {
   ans <- qgenray(runif(n), shape = shape, scale = scale)
   ans[(shape <= 0) | (scale <= 0)] <- NaN
@@ -308,8 +434,8 @@ rgenray <- function(n, scale = 1, shape) {
 
 
 
-genrayleigh.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+genrayleigh.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
@@ -567,8 +693,8 @@ rexpgeom <- function(n, scale = 1, shape) {
 
 
 
-expgeometric.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+expgeometric.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -845,8 +971,8 @@ rexplog <- function(n, scale = 1, shape) {
 
 
 
-explogff.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+explogff.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
diff --git a/R/family.positive.R b/R/family.positive.R
index da7f7f9..8456add 100644
--- a/R/family.positive.R
+++ b/R/family.positive.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -417,8 +417,8 @@ qposnegbin <- function(p, size, prob = NULL, munb = NULL) {
 
 
 
-posnegbinomial.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+posnegbinomial.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -1105,7 +1105,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
  posbinomial <-
   function(link = "logit",
-           mv = FALSE, parallel = FALSE,
+           multiple.responses = FALSE, parallel = FALSE,
            omit.constant = FALSE,
 
            p.small = 1e-4, no.warning = FALSE,
@@ -1121,13 +1121,13 @@ dposbinom <- function(x, size, prob, log = FALSE) {
 
 
 
-  if (!is.logical(mv) || length(mv) != 1)
-    stop("bad input for argument 'mv'")
+  if (!is.logical(multiple.responses) || length(multiple.responses) != 1)
+    stop("bad input for argument 'multiple.responses'")
 
   if (!is.logical(omit.constant) || length(omit.constant) != 1)
     stop("bad input for argument 'omit.constant'")
 
-  if (mv && length(zero) &&
+  if (multiple.responses && length(zero) &&
       !is.Numeric(zero, integer.valued = TRUE))
     stop("bad input for argument 'zero'")
 
@@ -1139,7 +1139,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
   new("vglmff",
   blurb = c("Positive-binomial distribution\n\n",
             "Links:    ",
-            if (mv)
+            if (multiple.responses)
             c(namesof("prob1", link, earg = earg, tag = FALSE),
               ",...,",
               namesof("probM", link, earg = earg, tag = FALSE)) else
@@ -1167,7 +1167,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
   initialize = eval(substitute(expression({
 
     mustart.orig <- mustart
-    if ( .mv ) {
+    if ( .multiple.responses ) {
     temp5 <-
     w.y.check(w = w, y = y,
               Is.positive.y = TRUE,
@@ -1200,7 +1200,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
     }
 
 
-    if ( .mv ) {
+    if ( .multiple.responses ) {
 
       dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
       dn2 <- if (length(dn2)) {
@@ -1240,12 +1240,12 @@ dposbinom <- function(x, size, prob, log = FALSE) {
   }), list( .link = link,
             .p.small    = p.small,
             .no.warning = no.warning,
-            .earg = earg, .mv = mv ))),
+            .earg = earg, .multiple.responses = multiple.responses ))),
 
   linkinv = eval(substitute(function(eta, extra = NULL) {
     w <- extra$w
     binprob <- eta2theta(eta, .link , earg = .earg )
-    nvec <- if ( .mv ) {
+    nvec <- if ( .multiple.responses ) {
              w
            } else {
              if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
@@ -1254,7 +1254,8 @@ dposbinom <- function(x, size, prob, log = FALSE) {
     binprob / (1.0 - (1.0 - binprob)^nvec)
   },
 
-  list( .link = link, .earg = earg, .mv = mv ))),
+  list( .link = link, .earg = earg,
+        .multiple.responses = multiple.responses ))),
   last = eval(substitute(expression({
     extra$w   <- NULL # Kill it off 
 
@@ -1272,7 +1273,7 @@ dposbinom <- function(x, size, prob, log = FALSE) {
     misc$needto.omit.constant <- TRUE  # Safety mechanism
     
     
-    misc$mv   <- .mv
+    misc$multiple.responses   <- .multiple.responses
     w <- as.numeric(w)
 
 
@@ -1289,7 +1290,8 @@ if (length(extra$tau)) {
 }
 
     
-  }), list( .link = link, .earg = earg, .mv = mv,
+  }), list( .link = link, .earg = earg,
+            .multiple.responses = multiple.responses,
             .omit.constant = omit.constant ))),
 
   loglikelihood = eval(substitute(
@@ -1297,13 +1299,13 @@ if (length(extra$tau)) {
              extra = NULL,
              summation = TRUE) {
 
-      ycounts <- if ( .mv ) {
+      ycounts <- if ( .multiple.responses ) {
                   round(y * extra$orig.w)
                  } else {
                    if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
                    y * w  # Convert proportions to counts
                  }
-      nvec <- if ( .mv ) {
+      nvec <- if ( .multiple.responses ) {
                 w
               } else {
                 if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
@@ -1328,7 +1330,7 @@ if (length(extra$tau)) {
       }
     }
   }, list( .link = link, .earg = earg,
-          .mv = mv,
+          .multiple.responses = multiple.responses,
           .omit.constant = omit.constant ))),
 
   vfamily = c("posbinomial"),
@@ -1342,8 +1344,8 @@ if (length(extra$tau)) {
     pwts <- if (length(pwts <- object at prior.weights) > 0)
               pwts else weights(object, type = "prior")
 
-    if ( .mv )
-      stop("cannot run simulate() when 'mv = TRUE'")
+    if ( .multiple.responses )
+      stop("cannot run simulate() when 'multiple.responses = TRUE'")
 
     eta <- predict(object)
     binprob <- eta2theta(eta, .link , earg = .earg )
@@ -1353,7 +1355,7 @@ if (length(extra$tau)) {
     w <- pwts  # 20140101
 
 
-    nvec <- if ( .mv ) {
+    nvec <- if ( .multiple.responses ) {
               w
             } else {
               if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
@@ -1361,7 +1363,7 @@ if (length(extra$tau)) {
             }
     rposbinom(nsim * length(eta), size = nvec, prob = binprob)
   }, list( .link = link, .earg = earg,
-          .mv = mv,
+          .multiple.responses = multiple.responses,
           .omit.constant = omit.constant ))),
 
 
@@ -1372,7 +1374,7 @@ if (length(extra$tau)) {
     use.orig.w <- if (is.numeric(extra$orig.w)) extra$orig.w else
                   rep(1, n)
 
-    nvec <- if ( .mv ) {
+    nvec <- if ( .multiple.responses ) {
               w
             } else {
               if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
@@ -1389,7 +1391,8 @@ if (length(extra$tau)) {
              (1 - binprob) * temp3 / temp1
 
     c(w) * dl.dmu * dmu.deta
-  }), list( .link = link, .earg = earg, .mv = mv ))),
+  }), list( .link = link, .earg = earg,
+            .multiple.responses = multiple.responses ))),
   weight = eval(substitute(expression({
 
     ned2l.dmu2 <- 1 / (binprob * temp1) +
@@ -1401,7 +1404,8 @@ if (length(extra$tau)) {
 
     wz <- c(w) * ned2l.dmu2 * dmu.deta^2
     wz
-  }), list( .link = link, .earg = earg, .mv = mv ))))
+  }), list( .link = link, .earg = earg,
+            .multiple.responses = multiple.responses ))))
 }
 
 
@@ -1563,8 +1567,8 @@ if (length(extra$tau)) {
     for (ii in 1:M) misc$earg[[ii]] <- .earg
 
 
-    misc$mv           <- TRUE
-    misc$iprob        <- .iprob
+    misc$multiple.responses  <- TRUE
+    misc$iprob               <- .iprob
 
 
     R <- tfit$qr$qr[1:ncol.X.vlm, 1:ncol.X.vlm, drop = FALSE]
@@ -1890,8 +1894,8 @@ if (length(extra$tau)) {
     misc$earg[[1]] <- .earg
     misc$earg[[2]] <- .earg
 
-    misc$expected    <- TRUE
-    misc$mv          <- TRUE
+    misc$expected           <- TRUE
+    misc$multiple.responses <- TRUE
     misc$ipcapture   <- .ipcapture
     misc$iprecapture <- .iprecapture
     misc$drop.b      <- .drop.b
@@ -2326,8 +2330,8 @@ if (length(extra$tau)) {
       misc$earg[[ii]] <- .earg
 
 
-    misc$mv           <- TRUE
-    misc$iprob        <- .iprob
+    misc$multiple.responses <- TRUE
+    misc$iprob              <- .iprob
 
 
 
diff --git a/R/family.qreg.R b/R/family.qreg.R
index 6d74454..3ce6df0 100644
--- a/R/family.qreg.R
+++ b/R/family.qreg.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -650,8 +650,8 @@ gleg.weight.yjn.13 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
 
 
 
-lms.yjn2.control <- function(save.weight = TRUE, ...) {
-    list(save.weight=save.weight)
+lms.yjn2.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
  lms.yjn2 <- function(percentiles = c(25, 50, 75),
@@ -1914,8 +1914,16 @@ ralap <- function(n, location = 0, scale = 1, tau = 0.5,
 }
 
 
+
 palap <- function(q, location = 0, scale = 1, tau = 0.5,
-                  kappa = sqrt(tau/(1-tau))) {
+                  kappa = sqrt(tau/(1-tau)),
+                  lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   NN <- max(length(q), length(location), length(scale), length(kappa),
             length(tau))
@@ -1926,20 +1934,46 @@ palap <- function(q, location = 0, scale = 1, tau = 0.5,
   if (length(tau)      != NN) tau      <- rep(tau,      length.out = NN)
 
   exponent <- -(sqrt(2) / scale) * abs(q - location) *
-             ifelse(q >= location, kappa, 1/kappa)
+              ifelse(q >= location, kappa, 1/kappa)
   temp5 <- exp(exponent) / (1 + kappa^2)
-  ans <- 1 - temp5
   index1 <- (q < location)
-  ans[index1] <- (kappa[index1])^2 * temp5[index1]
 
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log1p(-exp(exponent) / (1 + kappa^2))
+      logtemp5 <- exponent - log1p(kappa^2)
+      ans[index1] <- 2 * log(kappa[index1]) + logtemp5[index1]
+    } else {
+      ans <- (kappa^2 - expm1(exponent)) / (1 + kappa^2)
+      ans[index1] <- (kappa[index1])^2 * temp5[index1]
+    }
+  } else {
+    if (log.p) {
+      ans <- exponent - log1p(kappa^2)  # logtemp5
+      ans[index1] <- log1p(-(kappa[index1])^2 * temp5[index1])
+    } else {
+      ans <- temp5
+      ans[index1] <- (1 + (kappa[index1])^2 *
+                     (-expm1(exponent[index1]))) / (1+(kappa[index1])^2)
+      }
+  } 
   indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   ans[!indexTF] <- NaN
   ans
 }
 
 
+
 qalap <- function(p, location = 0, scale = 1, tau = 0.5,
-                  kappa = sqrt(tau / (1 - tau))) {
+                  kappa = sqrt(tau / (1 - tau)),
+                  lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   NN <- max(length(p), length(location), length(scale), length(kappa),
             length(tau))
@@ -1950,22 +1984,53 @@ qalap <- function(p, location = 0, scale = 1, tau = 0.5,
   if (length(tau)      != NN) tau      <- rep(tau,      length.out = NN)
 
 
-  ans <- p
+
   temp5 <- kappa^2 / (1 + kappa^2)
-  index1 <- (p <= temp5)
-  exponent <- p[index1] / temp5[index1]
-  ans[index1] <- location[index1] + (scale[index1] * kappa[index1]) *
-                log(exponent) / sqrt(2)
-  ans[!index1] <- location[!index1] - (scale[!index1] / kappa[!index1]) *
-                 (log1p((kappa[!index1])^2) +
-                  log1p(-p[!index1])) / sqrt(2)
-
-  indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) &
-            (p >= 0) & (p <= 1)
+  if (lower.tail) {
+    if (log.p) {
+      ans <- exp(p) 
+      index1 <- (exp(p) <= temp5)
+      exponent <- exp(p[index1]) / temp5[index1]
+      ans[index1] <- location[index1] + (scale[index1] * kappa[index1]) *
+        log(exponent) / sqrt(2)
+      ans[!index1] <- location[!index1] - (scale[!index1] / kappa[!index1]) *
+        (log1p((kappa[!index1])^2) +
+           log(-expm1(p[!index1]))) / sqrt(2)
+    } else {
+      ans <- p 
+      index1 <- (p <= temp5)
+      exponent <- p[index1] / temp5[index1]
+      ans[index1] <- location[index1] + (scale[index1] * kappa[index1]) *
+        log(exponent) / sqrt(2)
+      ans[!index1] <- location[!index1] - (scale[!index1] / kappa[!index1]) *
+                      (log1p((kappa[!index1])^2) +
+                      log1p(-p[!index1])) / sqrt(2)
+    }
+  } else {
+    if (log.p) {
+      ans <- -expm1(p) 
+      index1 <- (-expm1(p)  <= temp5)
+      exponent <- -expm1(p[index1]) / temp5[index1]
+      ans[index1] <- location[index1] + (scale[index1] * kappa[index1]) *
+        log(exponent) / sqrt(2)
+      ans[!index1] <- location[!index1] - (scale[!index1] / kappa[!index1]) *
+        (log1p((kappa[!index1])^2) +
+           p[!index1]) / sqrt(2)
+    } else {
+      ans <- exp(log1p(-p)) 
+      index1 <- (p >= (1 / (1+kappa^2))) 
+      exponent <- exp(log1p(-p[index1])) / temp5[index1]
+      ans[index1] <- location[index1] + (scale[index1] * kappa[index1]) *
+        log(exponent) / sqrt(2)
+      ans[!index1] <- location[!index1] - (scale[!index1] / kappa[!index1]) *
+        (log1p((kappa[!index1])^2) +
+           log(p[!index1])) / sqrt(2)
+    }
+  }
+
+  indexTF <- (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   ans[!indexTF] <- NaN
-  ans[p == 0 & indexTF] <- -Inf
-  ans[p == 1 & indexTF] <-  Inf
-  ans
+  ans  
 }
 
 
@@ -1991,6 +2056,7 @@ rloglap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
 }
 
 
+
 dloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
                     kappa = sqrt(tau/(1-tau)), log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
@@ -2025,31 +2091,66 @@ dloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
 
 
 
-
 qloglap <- function(p, location.ald = 0, scale.ald = 1,
-                    tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+                    tau = 0.5, kappa = sqrt(tau/(1-tau)),
+                    lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+
   NN <- max(length(p), length(location.ald), length(scale.ald),
             length(kappa))
   p        <- rep(p,            length.out = NN)
-  location <- rep(location.ald, length.out = NN);
+  location <- rep(location.ald, length.out = NN)
   scale    <- rep(scale.ald,    length.out = NN)
-  kappa    <- rep(kappa,        length.out = NN);
+  kappa    <- rep(kappa,        length.out = NN)
   tau      <- rep(tau,          length.out = NN)
 
 
   Alpha <- sqrt(2) * kappa / scale.ald
   Beta  <- sqrt(2) / (scale.ald * kappa)
   Delta <- exp(location.ald)
-
   temp9 <- Alpha + Beta
-  ans <- Delta * (p * temp9 / Alpha)^(1/Beta)
-  index1 <- (p > Alpha / temp9)
-  ans[index1] <- (Delta * ((1-p) * temp9 / Beta)^(-1/Alpha))[index1]
-  ans[p == 0] <- 0
-  ans[p == 1] <- Inf
 
+
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- ifelse((exp(ln.p) > Alpha / temp9), 
+                    Delta * (-expm1(ln.p) * temp9 / Beta)^(-1/Alpha),
+                    Delta * (exp(ln.p) * temp9 / Alpha)^(1/Beta))
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- ifelse((p > Alpha / temp9), 
+                    Delta * exp((-1/Alpha) * (log1p(-p) + log(temp9/Beta))),
+                    Delta * (p * temp9 / Alpha)^(1/Beta))
+      ans[p <  0] <- NaN
+      ans[p == 0] <- 0
+      ans[p == 1] <- Inf
+      ans[p >  1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- ifelse((-expm1(ln.p) > Alpha / temp9), 
+                    Delta * (exp(ln.p) * temp9 / Beta)^(-1/Alpha),
+                    Delta * (-expm1(ln.p) * temp9 / Alpha)^(1/Beta))
+      ans[ln.p > 0] <- NaN
+    } else { 
+      ans <- ifelse((p < (temp9 - Alpha) / temp9), 
+                    Delta * (p * temp9 / Beta)^(-1/Alpha),
+                    Delta * exp((1/Beta)*(log1p(-p) + log(temp9/Alpha))))
+      ans[p <  0] <- NaN
+      ans[p == 0] <- Inf
+      ans[p == 1] <- 0
+      ans[p >  1] <- NaN
+    }
+  }
   indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)
-            (p >= 0) & (p <= 1)  # &
   ans[!indexTF] <- NaN
   ans
 }
@@ -2057,24 +2158,56 @@ qloglap <- function(p, location.ald = 0, scale.ald = 1,
 
 
 ploglap <- function(q, location.ald = 0, scale.ald = 1,
-                    tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+                    tau = 0.5, kappa = sqrt(tau/(1-tau)),
+                    lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
   NN <- max(length(q), length(location.ald), length(scale.ald),
             length(kappa))
-  location <- rep(location.ald, length.out = NN);
-  scale <- rep(scale.ald, length.out = NN)
-  kappa <- rep(kappa, length.out = NN);
-  q <- rep(q, length.out = NN)
-  tau <- rep(tau, length.out = NN)
+  location <- rep(location.ald, length.out = NN)
+  scale    <- rep(scale.ald,    length.out = NN)
+  kappa    <- rep(kappa,        length.out = NN)
+  q        <- rep(q,            length.out = NN)
+  tau      <- rep(tau,          length.out = NN)
 
   Alpha <- sqrt(2) * kappa / scale.ald
   Beta  <- sqrt(2) / (scale.ald * kappa)
   Delta <- exp(location.ald)
 
   temp9 <- Alpha + Beta
-  ans <- (Alpha / temp9) * (q / Delta)^(Beta)
-  ans[q <= 0] <- 0
-  index1 <- (q >= Delta)
-  ans[index1] <- (1 - (Beta/temp9) * (Delta/q)^(Alpha))[index1]
+  index1 <- (Delta <= q)
+
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log((Alpha / temp9) * (q / Delta)^(Beta))
+      ans[index1] <- log1p((-(Beta/temp9) * (Delta/q)^(Alpha))[index1])
+      ans[q <= 0 ] <- -Inf
+      ans[q == Inf] <- 0
+    } else {
+      ans <- (Alpha / temp9) * (q / Delta)^(Beta)
+      ans[index1] <- -expm1((log(Beta/temp9) + Alpha * log(Delta/q)))[index1]
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- log1p(-(Alpha / temp9) * (q / Delta)^(Beta))
+      ans[index1] <- log(((Beta/temp9) * (Delta/q)^(Alpha))[index1])
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- -Inf
+    } else {
+      ans <- -expm1(log(Alpha/temp9) + Beta * log(q/Delta))
+      ans[index1] <- ((Beta/temp9) * (Delta/q)^(Alpha))[index1]
+      ans[q <= 0] <- 1
+      ans[q == Inf] <- 0
+    }
+  } 
 
   indexTF <- (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)  # &
   ans[!indexTF] <- NaN
@@ -2092,6 +2225,7 @@ rlogitlap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
 }
 
 
+
 dlogitlap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
                       kappa = sqrt(tau/(1-tau)), log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
@@ -2125,6 +2259,7 @@ dlogitlap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
 }
 
 
+
 qlogitlap <- function(p, location.ald = 0, scale.ald = 1,
                       tau = 0.5, kappa = sqrt(tau/(1-tau))) {
   qqq <- qalap(p = p, location = location.ald, scale = scale.ald,
@@ -2172,6 +2307,7 @@ rprobitlap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
 }
 
 
+
 dprobitlap <-
   function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
            kappa = sqrt(tau/(1-tau)), log = FALSE,
@@ -2266,6 +2402,7 @@ pprobitlap <- function(q, location.ald = 0, scale.ald = 1,
 
 
 
+
 rclogloglap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
                         kappa = sqrt(tau/(1-tau))) {
   cloglog(ralap(n = n, location = location.ald, scale = scale.ald,
@@ -2274,6 +2411,7 @@ rclogloglap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
 }
 
 
+
 dclogloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
                         kappa = sqrt(tau/(1-tau)), log = FALSE,
                         meth2 = TRUE) {
@@ -3323,33 +3461,82 @@ dlaplace <- function(x, location = 0, scale = 1, log = FALSE) {
 }
 
 
-plaplace <- function(q, location = 0, scale = 1) {
-  if (!is.Numeric(scale, positive = TRUE)) 
-    stop("argument 'scale' must be positive")
-  zedd <- (q-location) / scale
 
+plaplace <- function(q, location = 0, scale = 1,
+                     lower.tail = TRUE, log.p =FALSE) {
+  zedd <- (q - location) / scale
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
   L <- max(length(q), length(location), length(scale))
   if (length(q)        != L) q        <- rep(q,        length.out = L)
   if (length(location) != L) location <- rep(location, length.out = L)
   if (length(scale)    != L) scale    <- rep(scale,    length.out = L)
 
-  ifelse(q < location, 0.5 * exp(zedd), 1 - 0.5 * exp(-zedd))
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- ifelse(q < location, log(0.5) + zedd, log1p(- 0.5 * exp(-zedd)))
+    } else {
+      ans <- ifelse(q < location, 0.5 * exp(zedd), 1 - 0.5 * exp(-zedd))
+    }
+  } else {
+    if (log.p) {
+      ans <- ifelse(q < location, log1p(- 0.5 * exp(zedd)), log(0.5) - zedd)
+    } else {
+      ans <- ifelse(q < location, 1 - 0.5 * exp(zedd), 0.5 * exp(-zedd))
+    }
+  }
+  ans[scale <= 0] <- NaN
+  ans
 }
 
 
-qlaplace <- function(p, location = 0, scale = 1) {
-  if (!is.Numeric(scale, positive = TRUE)) 
-    stop("argument 'scale' must be positive")
+
+qlaplace <- function(p, location = 0, scale = 1,
+                     lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
 
   L <- max(length(p), length(location), length(scale))
   if (length(p)        != L) p        <- rep(p,        length.out = L)
   if (length(location) != L) location <- rep(location, length.out = L)
   if (length(scale)    != L) scale    <- rep(scale,    length.out = L)
 
-  location - sign(p-0.5) * scale * log(2 * ifelse(p < 0.5, p, 1-p))
+
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- location - sign(exp(ln.p)-0.5) * scale *
+             log(2 * ifelse(exp(ln.p) < 0.5, exp(ln.p), -expm1(ln.p)))
+    } else {
+      ans <- location - sign(p-0.5) * scale * log(2 * ifelse(p < 0.5, p, 1-p))
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- location - sign(0.5 - exp(ln.p)) * scale *
+             log(2 * ifelse(-expm1(ln.p) < 0.5, -expm1(ln.p), exp(ln.p)))
+     # ans[ln.p > 0] <- NaN
+    } else { 
+      ans <- location - sign(0.5 - p) * scale *
+             log(2 * ifelse(p > 0.5, 1 - p, p))
+    }
+  }
+
+  ans[scale <= 0] <- NaN
+  ans  
 }
 
 
+
 rlaplace <- function(n, location = 0, scale = 1) {
 
   use.n <- if ((length.n <- length(n)) > 1) length.n else
@@ -3513,8 +3700,8 @@ rlaplace <- function(n, location = 0, scale = 1) {
 
 
 
-fff.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+fff.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -3836,13 +4023,13 @@ fff.control <- function(save.weight = TRUE, ...) {
 
 
 
+
 dbenini <- function(x, y0, shape, log = FALSE) {
   if (!is.logical(log.arg <- log) || length(log) != 1)
     stop("bad input for argument 'log'")
   rm(log)
 
 
-
   N <- max(length(x), length(shape), length(y0))
   if (length(x)        != N) x        <- rep(x,        length.out = N)
   if (length(shape)    != N) shape    <- rep(shape,    length.out = N)
@@ -3853,17 +4040,24 @@ dbenini <- function(x, y0, shape, log = FALSE) {
   tempxok <- log(x[xok]/y0[xok])
   logdensity[xok] <- log(2*shape[xok]) - shape[xok] * tempxok^2 +
                     log(tempxok) - log(x[xok])
+  logdensity[is.infinite(x)] <- log(0)  # 20141209 KaiH
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
-pbenini <- function(q, y0, shape) {
+
+pbenini <- function(q, y0, shape, lower.tail = TRUE, log.p = FALSE) {
   if (!is.Numeric(q))
     stop("bad input for argument 'q'")
   if (!is.Numeric(shape, positive = TRUE))
     stop("bad input for argument 'shape'")
   if (!is.Numeric(y0, positive = TRUE))
     stop("bad input for argument 'y0'")
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
   N <- max(length(q), length(shape), length(y0))
   if (length(q)        != N) q        <- rep(q,        length.out = N)
   if (length(shape)    != N) shape    <- rep(shape,    length.out = N)
@@ -3871,23 +4065,59 @@ pbenini <- function(q, y0, shape) {
 
   ans <- y0 * 0
   ok <- q > y0
-  ans[ok] <-   -expm1(-shape[ok] * (log(q[ok]/y0[ok]))^2)
+
+
+  if (lower.tail) {
+    if (log.p) {
+      ans[ok] <- log(-expm1(-shape[ok] * (log(q[ok]/y0[ok]))^2))
+      ans[q <= y0 ] <- -Inf
+    } else {
+      ans[ok] <- -expm1(-shape[ok] * (log(q[ok]/y0[ok]))^2)
+    }
+  } else {
+    if (log.p) {
+      ans[ok] <- -shape[ok] * (log(q[ok]/y0[ok]))^2
+      ans[q <= y0] <- 0
+    } else {
+      ans[ok] <- exp(-shape[ok] * (log(q[ok]/y0[ok]))^2)
+      ans[q <= y0] <- 1
+    }
+  } 
+
   ans
 }
 
 
-qbenini <- function(p, y0, shape) {
-  if (!is.Numeric(p, positive = TRUE) ||
-      any(p >= 1)) 
-    stop("bad input for argument 'p'")
-  if (!is.Numeric(shape, positive = TRUE))
-  stop("bad input for argument 'shape'")
-  if (!is.Numeric(y0, positive = TRUE))
-    stop("bad input for argument 'y0'")
-  y0 * exp(sqrt(-log1p(-p) / shape))
+
+qbenini <- function(p, y0, shape, lower.tail = TRUE, log.p = FALSE) {
+
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- y0 * exp(sqrt(-log(-expm1(ln.p)) / shape))
+    } else {
+      ans <- y0 * exp(sqrt(-log1p(-p) / shape))
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- y0 * exp(sqrt(-ln.p / shape))
+    } else { 
+      ans <-  y0 * exp(sqrt(-log(p) / shape))
+    }
+  }
+  ans[y0 <= 0] <- NaN
+  ans
 }
 
 
+
 rbenini <- function(n, y0, shape) {
   y0 * exp(sqrt(-log(runif(n)) / shape))
 }
@@ -4076,6 +4306,9 @@ rbenini <- function(n, y0, shape) {
     if (abs(x) > floor(x)) { # zero prob for -ve or non-integer
       0
     } else
+    if (x == Inf) { # 20141215 KaiH  
+      0
+    } else
     if (x > bigx) {
       z <- (log(x) - meanlog) / sdlog
       (1 + (z^2 + log(x) - meanlog - 1) / (2 * x * sdlog^2)) *
@@ -4191,59 +4424,71 @@ rtriangle <- function(n, theta, lower = 0, upper = 1) {
 }
 
 
-qtriangle <- function(p, theta, lower = 0, upper = 1) {
-  if (!is.Numeric(p, positive = TRUE))
-    stop("bad input for argument 'p'")
-  if (!is.Numeric(theta))
-    stop("bad input for argument 'theta'")
-  if (!is.Numeric(lower))
-    stop("bad input for argument 'lower'")
-  if (!is.Numeric(upper))
-    stop("bad input for argument 'upper'")
-  if (!all(lower < theta & theta < upper))
-    stop("lower < theta < upper values are required")
 
+qtriangle <- function(p, theta, lower = 0, upper = 1,
+                      lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  
   N <- max(length(p), length(theta), length(lower), length(upper))
   if (length(p)     != N) p     <- rep(p,     length.out = N)
   if (length(theta) != N) theta <- rep(theta, length.out = N)
   if (length(lower) != N) lower <- rep(lower, length.out = N)
   if (length(upper) != N) upper <- rep(upper, length.out = N)
 
-  bad <- (p < 0) | (p > 1)
-  if (any(bad))
-    stop("bad input for argument 'p'")
-
-  Neg <- (p <= (theta - lower)/(upper - lower))
   ans <- as.numeric(NA) * p
-  temp1 <- p * (upper - lower) * (theta - lower)
+  if (lower.tail) {
+    if (log.p) {
+      Neg <- (exp(ln.p) <= (theta - lower) / (upper - lower))
+      temp1 <- exp(ln.p) * (upper - lower) * (theta - lower)
+      Pos <- (exp(ln.p) >= (theta - lower) / (upper - lower))
+      pstar <- (exp(ln.p) - (theta - lower) / (upper - lower)) /
+               ((upper - theta) / (upper - lower))
+    } else {
+      Neg <- (p <= (theta - lower) / (upper - lower))
+      temp1 <- p * (upper - lower) * (theta - lower)
+      Pos <- (p >= (theta - lower) / (upper - lower))
+      pstar <- (p - (theta - lower) / (upper - lower)) /
+               ((upper - theta) / (upper - lower))
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      Neg <- (exp(ln.p) >= (upper- theta) / (upper - lower))
+      temp1 <- -expm1(ln.p) * (upper - lower) * (theta - lower)
+      Pos <- (exp(ln.p) <= (upper- theta) / (upper - lower))
+      pstar <- (-expm1(ln.p) - (theta - lower) / (upper - lower)) /
+               ((upper - theta) / (upper - lower))
+    } else { 
+      Neg <- (p >= (upper- theta) / (upper - lower))
+      temp1 <- (1 - p) * (upper - lower) * (theta - lower)
+      Pos <- (p <= (upper- theta) / (upper - lower))
+      pstar <- ((upper- theta) / (upper - lower) - p) /
+               ((upper - theta) / (upper - lower))
+    }
+  }
   ans[ Neg] <- lower[ Neg] + sqrt(temp1[ Neg])
-
-  Pos <- (p >= (theta - lower)/(upper - lower))
   if (any(Pos)) {
-    pstar <- (p - (theta - lower)/(upper - lower)) / (1 -
-            (theta - lower) / (upper - lower))
     qstar <- cbind(1 - sqrt(1-pstar), 1 + sqrt(1-pstar))
     qstar <- qstar[Pos,, drop = FALSE]
     qstar <- ifelse(qstar[, 1] >= 0 & qstar[, 1] <= 1,
-                   qstar[, 1],
-                   qstar[, 2])
+                    qstar[, 1],
+                    qstar[, 2])
     ans[Pos] <- theta[Pos] + qstar * (upper - theta)[Pos]
   }
+  
+  ans[theta < lower | theta > upper] <- NaN
   ans
 }
 
 
-ptriangle <- function(q, theta, lower = 0, upper = 1) {
-  if (!is.Numeric(q))
-    stop("bad input for argument 'q'")
-  if (!is.Numeric(theta))
-    stop("bad input for argument 'theta'")
-  if (!is.Numeric(lower))
-    stop("bad input for argument 'lower'")
-  if (!is.Numeric(upper))
-    stop("bad input for argument 'upper'")
-  if (!all(lower < theta & theta < upper))
-    stop("lower < theta < upper values are required")
+
+ptriangle <- function(q, theta, lower = 0, upper = 1, 
+                      lower.tail = TRUE, log.p = FALSE) {
 
   N <- max(length(q), length(theta), length(lower), length(upper))
   if (length(q)     != N) q     <- rep(q,     length.out = N)
@@ -4251,17 +4496,61 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
   if (length(lower) != N) lower <- rep(lower, length.out = N)
   if (length(upper) != N) upper <- rep(upper, length.out = N)
 
-  ans <- q * 0
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
-  qstar <- (q - lower)^2 / ((upper-lower) * (theta-lower))
+  ans <- q * 0
+  qstar <- (q - lower)^2 / ((upper - lower) * (theta - lower))
   Neg <- (lower <= q & q <= theta)
-  ans[Neg] <- (qstar)[Neg]
+
+
+  ans[Neg] <- if (lower.tail) {
+    if (log.p) {
+      (log(qstar))[Neg]
+    } else {
+      qstar[Neg]
+    }
+  } else {
+    if (log.p) {
+      (log1p(-qstar))[Neg]
+    } else {
+      1 - qstar[Neg]
+    }
+  } 
 
   Pos <- (theta <= q & q <= upper)
   qstar <- (q - theta) / (upper-theta)
-  ans[Pos] <- ((theta-lower)/(upper-lower))[Pos] +
-             (qstar * (2-qstar) * (upper-theta) / (upper - lower))[Pos]
-  ans[q >= upper] <- 1
+  
+  if (lower.tail) {
+    if (log.p) {
+      ans[Pos] <- log(((theta-lower)/(upper-lower))[Pos] +
+                  (qstar * (2-qstar) * (upper-theta) / (upper - lower))[Pos])
+      ans[q <= lower] <- -Inf
+      ans[q >= upper] <- 0
+    } else {
+      ans[Pos] <- ((theta-lower)/(upper-lower))[Pos] +
+                  (qstar * (2-qstar) * (upper-theta) / (upper - lower))[Pos]
+      ans[q <= lower] <- 0
+      ans[q >= upper] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans[Pos] <- log(((upper - theta)/(upper-lower))[Pos] +
+                  (qstar * (2-qstar) * (upper-theta) / (upper - lower))[Pos])
+      ans[q <= lower] <- 0
+      ans[q >= upper] <- -Inf
+    } else {
+      ans[Pos] <- ((upper - theta)/(upper-lower))[Pos] +
+                  (qstar * (2-qstar) * (upper-theta) / (upper - lower))[Pos]
+      ans[q <= lower] <- 1
+      ans[q >= upper] <- 0
+    }
+  } 
+
+  ans[theta < lower | theta > upper] <- NaN
   ans
 }
 
@@ -4271,7 +4560,7 @@ ptriangle <- function(q, theta, lower = 0, upper = 1) {
 
  triangle <-
   function(lower = 0, upper = 1,
-           link = elogit(min = 0, max = 1),
+           link = extlogit(min = 0, max = 1),
            itheta = NULL) {
 
 
@@ -4716,8 +5005,8 @@ loglaplace1.control <- function(maxit = 300, ...) {
 
 
 
-loglaplace2.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+loglaplace2.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
  loglaplace2 <- function(tau = NULL,
diff --git a/R/family.quantal.R b/R/family.quantal.R
index e46d5c4..9162af1 100644
--- a/R/family.quantal.R
+++ b/R/family.quantal.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -226,7 +226,7 @@
 
 
 if (FALSE)
- Abbott <- function(lprob1 = elogit(min = 0, max = 1),  # For now, that is
+ Abbott <- function(lprob1 = extlogit(min = 0, max = 1),  # For now, that is
                    lprob0 = "logit",
                    iprob0 = NULL, iprob1 = NULL,
                    nointercept = 2,  # NULL,
diff --git a/R/family.rcim.R b/R/family.rcim.R
index 4c63236..7c31a24 100644
--- a/R/family.rcim.R
+++ b/R/family.rcim.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/family.rcqo.R b/R/family.rcqo.R
index d380f1d..36a393a 100644
--- a/R/family.rcqo.R
+++ b/R/family.rcqo.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/family.robust.R b/R/family.robust.R
index f4988fb..84a0d52 100644
--- a/R/family.robust.R
+++ b/R/family.robust.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -93,28 +93,60 @@ rhuber <- function(n, k = 0.862, mu = 0, sigma = 1) {
 
 
 
-qhuber <- function (p, k = 0.862, mu = 0, sigma = 1) {
-  if (min(sigma) <= 0)
-    stop("argument 'sigma' must be positive")
-  if (min(k)     <= 0)
-    stop("argument 'k' must be positive")
+qhuber <- function (p, k = 0.862, mu = 0, sigma = 1,
+                    lower.tail = TRUE, log.p = FALSE ) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'") 
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   cnorm <- sqrt(2 * pi) * ((2 * pnorm(k) - 1) + 2 * dnorm(k) / k)
-  x <- pmin(p, 1 - p)
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p 
+      x <- pmin(exp(ln.p), -expm1(ln.p))
+    } else {
+      x <- pmin(p, 1 - p)
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      x <- pmin(-expm1(ln.p), exp(ln.p))
+    } else { 
+      x <- pmin(1 - p, p)
+    }
+  }
   q <- ifelse(x <= sqrt(2 * pi) * dnorm(k) / ( k * cnorm),
               log(k * cnorm * x) / k - k / 2,
               qnorm(abs(1 - pnorm(k) + x * cnorm / sqrt(2 * pi) -
                     dnorm(k) / k)))
-  ifelse(p < 0.5, mu + q * sigma,
-                  mu - q * sigma)
+  ans <- if (lower.tail) {
+    if (log.p) {
+      ifelse(exp(ln.p) < 0.5, mu + q * sigma, mu - q * sigma)
+    } else {
+      ifelse(p < 0.5, mu + q * sigma, mu - q * sigma)
+    } } else {
+      if (log.p) {
+        ifelse(exp(ln.p) > 0.5, mu + q * sigma, mu - q * sigma)
+      } else { 
+        ifelse(p > 0.5, mu + q * sigma, mu - q * sigma)
+      }
+    }
+  ans[k <= 0 | sigma <= 0] <- NaN
+  ans
 }
 
 
 
 
-phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
-  if (any(sigma <= 0))
-    stop("argument 'sigma' must be positive")
+phuber <- function(q, k = 0.862, mu = 0, sigma = 1,
+                   lower.tail = TRUE, log.p = FALSE ) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'") 
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
   A1  <- (2 * dnorm(k) / k - 2 * pnorm(-k))
   eps <- A1 / (1 + A1)
@@ -123,8 +155,25 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
   p <- ifelse(x <= -k ,
               exp(k^2 / 2) / k * exp(k * x) / sqrt(2 * pi),
               dnorm(k) / k + pnorm(x) - pnorm(-k))
-  p <- p * (1 - eps)
-  ifelse(zedd <= 0, p, 1 - p)
+  
+  if (lower.tail) {
+    if (log.p) {
+      ans <- ifelse(zedd <= 0, log(p) + log1p(-eps),
+                               log1p(exp(log(p) + log1p(-eps))))
+    } else {
+      ans <- ifelse(zedd <= 0, exp(log(p) + log1p(-eps)),
+                               -expm1(log(p) + log1p(-eps)))
+    }
+  } else {
+    if (log.p) {
+      ans <- ifelse(zedd <= 0, log1p(exp(log(p) + log1p(-eps))),
+                               log(p) + log1p(-eps))
+    } else {
+      ans <- ifelse(zedd <= 0, -expm1(log(p) + log1p(-eps)),
+                               exp(log(p) + log1p(-eps)))
+    }
+  } 
+  ans
 }
 
 
diff --git a/R/family.rrr.R b/R/family.rrr.R
index 9f27063..dff26b1 100644
--- a/R/family.rrr.R
+++ b/R/family.rrr.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -1789,11 +1789,11 @@ get.rrvglm.se1 <- function(fit, omit13 = FALSE, kill.all = FALSE,
                vlm(bb,
                    constraints = Hlist, criterion = "d", weights = wz,
                    data = bbdata,
-                   save.weight = TRUE, smart = FALSE, trace = trace.arg,
+                   save.weights = TRUE, smart = FALSE, trace = trace.arg,
                    x.arg = TRUE) else
                vlm(bb,
                    constraints = Hlist, criterion = "d", weights = wz,
-                   save.weight = TRUE, smart = FALSE, trace = trace.arg,
+                   save.weights = TRUE, smart = FALSE, trace = trace.arg,
                    x.arg = TRUE)
 
 
diff --git a/R/family.sur.R b/R/family.sur.R
index acad9d6..a0679ed 100644
--- a/R/family.sur.R
+++ b/R/family.sur.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/family.survival.R b/R/family.survival.R
index 4ec7b0e..641187b 100644
--- a/R/family.survival.R
+++ b/R/family.survival.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -193,35 +193,74 @@ dbisa <- function(x, scale = 1, shape, log = FALSE) {
 }
 
 
-pbisa <- function(q, scale = 1, shape) {
-  if (!is.Numeric(q))
-    stop("bad input for argument 'q'")
-  if (!is.Numeric(shape, positive = TRUE))
-    stop("bad input for argument 'shape'")
-  if (!is.Numeric(scale, positive = TRUE))
-    stop("bad input for argument 'scale'")
-  ans <- pnorm(((temp <- sqrt(q/scale)) - 1/temp) / shape)
-  ans[scale < 0 | shape < 0] <- NA
-  ans[q <= 0] <- 0
+
+pbisa <- function(q, scale = 1, shape,
+                  lower.tail = TRUE, log.p = FALSE) {
+
+  
+  ans <- pnorm(((temp <- sqrt(q/scale)) - 1/temp) / shape,
+               lower.tail = lower.tail, log.p = log.p)
+  ans[scale < 0 | shape < 0] <- NaN
+  ans[q <= 0] <- if (lower.tail) ifelse(log.p, log(0), 0) else
+                                 ifelse(log.p, log(1), 1)
   ans
 }
 
 
-qbisa <- function(p, scale = 1, shape) {
-  if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
-      stop("argument 'p' must have values inside the interval (0,1)")
-  if (!is.Numeric(shape, positive = TRUE))
-    stop("bad input for argument 'shape'")
-  if (!is.Numeric(scale, positive = TRUE))
-    stop("bad input for argument 'scale'")
-  A <- qnorm(p)
+
+qbisa <- function(p, scale = 1, shape,
+                  lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+
+  A <- qnorm(p, lower.tail = lower.tail, log.p = log.p)
   temp1 <- A * shape * sqrt(4 + A^2 * shape^2)
   ans1 <- (2 + A^2 * shape^2 + temp1) * scale / 2
   ans2 <- (2 + A^2 * shape^2 - temp1) * scale / 2
-  ifelse(p < 0.5, pmin(ans1, ans2), pmax(ans1, ans2))
+
+
+
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- ifelse(exp(p) < 0.5, pmin(ans1, ans2), pmax(ans1, ans2))
+      ans[ln.p == -Inf] <- 0
+      ans[ln.p == 0] <- Inf
+     #ans[ln.p > 0] <- NaN
+    } else {
+      ans <- ifelse(p < 0.5, pmin(ans1, ans2), pmax(ans1, ans2))
+     #ans[p < 0] <- NaN
+      ans[p == 0] <- 0
+      ans[p == 1] <- Inf
+     #ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- ifelse(-expm1(p) < 0.5, pmin(ans1, ans2), pmax(ans1, ans2))
+      ans[ln.p == -Inf] <- Inf
+      ans[ln.p == 0] <- 0
+     #ans[ln.p > 0] <- NaN
+    } else { 
+      ans <- ifelse(p > 0.5, pmin(ans1, ans2), pmax(ans1, ans2))
+     #ans[p < 0] <- NaN
+      ans[p == 0] <- Inf
+      ans[p == 1] <- 0
+     #ans[p > 1] <- NaN
+    }
+  }
+
+  ans[scale < 0 | shape < 0] <- NaN
+  ans
 }
 
 
+
 rbisa <- function(n, scale = 1, shape) {
 
   A <- rnorm(n)
@@ -249,9 +288,6 @@ rbisa <- function(n, scale = 1, shape) {
                   iscale = 1,      ishape = NULL,
                   imethod = 1, zero = NULL, nowarning = FALSE) {
 
-  if (!nowarning)
-    warning("order of the linear/additive predictors has been changed",
-            " in VGAM version 0.9-5")
 
 
   lshape <- as.list(substitute(lshape))
diff --git a/R/family.ts.R b/R/family.ts.R
index 56c7ae8..f223a11 100644
--- a/R/family.ts.R
+++ b/R/family.ts.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -129,14 +129,14 @@ rrar.Wmat <- function(y, Ranks., MM, ki, plag, aa, uu, n, coeffs) {
 
 
 
-rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...) {
+rrar.control <- function(stepsize = 0.5, save.weights = TRUE, ...) {
 
   if (stepsize <= 0 || stepsize > 1) {
     warning("bad value of stepsize; using 0.5 instead")
     stepsize <- 0.5
   }
   list(stepsize = stepsize,
-       save.weight = as.logical(save.weight)[1])
+       save.weights = as.logical(save.weights)[1])
 }
 
 
@@ -282,8 +282,8 @@ rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...) {
 
 
 
-vglm.garma.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = as.logical(save.weight)[1])
+vglm.garma.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = as.logical(save.weights)[1])
 }
 
 
diff --git a/R/family.univariate.R b/R/family.univariate.R
index 4bb3b49..fa02e3e 100644
--- a/R/family.univariate.R
+++ b/R/family.univariate.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -167,8 +167,8 @@
 
 
 
-hzeta.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+hzeta.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -322,7 +322,8 @@ dhzeta <- function(x, alpha, log = FALSE) {
 }
 
 
-phzeta <- function(q, alpha) {
+
+phzeta <- function(q, alpha, log.p = FALSE) {
 
 
   nn <- max(length(q), length(alpha))
@@ -334,12 +335,15 @@ phzeta <- function(q, alpha) {
   ans <- 0 * q
   ans[!zero] <- 1 - (2*q[!zero]+1)^(-alpha[!zero])
 
-  ans[alpha <= 0] <- NaN
+  ans[q == -Inf] <- 0  # 20141215 KaiH
+  ans[q ==  Inf] <- 1  # 20141215 KaiH
 
-  ans
+  ans[alpha <= 0] <- NaN
+  if (log.p) log(ans) else ans
 }
 
 
+
 qhzeta <- function(p, alpha) {
 
   if (!is.Numeric(p, positive = TRUE) ||
@@ -878,7 +882,8 @@ rdiric <- function(n, shape, dimension = NULL,
 
 
 
- dirichlet <- function(link = "loge", parallel = FALSE, zero = NULL) {
+ dirichlet <- function(link = "loge", parallel = FALSE, zero = NULL,
+                       imethod = 1) {
 
 
   link <- as.list(substitute(link))
@@ -886,6 +891,12 @@ rdiric <- function(n, shape, dimension = NULL,
   link <- attr(earg, "function.name")
 
 
+  if (!is.Numeric(imethod, length.arg = 1,
+                  integer.valued = TRUE, positive = TRUE) ||
+     imethod > 2)
+    stop("argument 'imethod' must be 1 or 2")
+
+
   if (length(zero) &&
     !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
     stop("bad input for argument 'zero'")
@@ -922,11 +933,16 @@ rdiric <- function(n, shape, dimension = NULL,
     predictors.names <-
       namesof(mynames1, .link , earg = .earg , short = TRUE)
     if (!length(etastart)) {
-      yy <- matrix(t(y) %*% rep(1 / nrow(y), nrow(y)), nrow(y), M,
-                   byrow = TRUE)
+      yy <- if ( .imethod == 2) {
+        matrix(colMeans(y), nrow(y), M, byrow = TRUE)
+      } else {
+        0.5 * (y + matrix(colMeans(y), nrow(y), M, byrow = TRUE))
+      }
+
       etastart <- theta2eta(yy, .link , earg = .earg )
     }
-  }), list( .link = link, .earg = earg ))),
+  }), list( .link = link, .earg = earg,
+            .imethod = imethod ))),
   linkinv = eval(substitute(function(eta, extra = NULL) {
     shape <- eta2theta(eta, .link , earg = .earg )
     prop.table(shape, 1)
@@ -941,7 +957,9 @@ rdiric <- function(n, shape, dimension = NULL,
       misc$earg[[ii]] <- .earg
 
     misc$expected <- TRUE
-  }), list( .link = link, .earg = earg ))),
+    misc$imethod <- .imethod
+  }), list( .link = link, .earg = earg,
+            .imethod = imethod ))),
   loglikelihood = eval(substitute(
     function(mu, y, w, residuals = FALSE, eta,
              extra = NULL,
@@ -1106,25 +1124,26 @@ dzeta <- function(x, p, log = FALSE) {
   rm(log)
 
 
-    if (!is.Numeric(p, positive = TRUE))  # || min(p) <= 1
-        stop("'p' must be numeric and > 0")
-    LLL <- max(length(p), length(x))
-    x <- rep(x, length.out = LLL);
-    p <- rep(p, length.out = LLL)
+  if (!is.Numeric(p, positive = TRUE))  # || min(p) <= 1
+      stop("'p' must be numeric and > 0")
+  LLL <- max(length(p), length(x))
+  x <- rep(x, length.out = LLL);
+  p <- rep(p, length.out = LLL)
 
-    ox <- !is.finite(x)
-    zero <- ox | round(x) != x | x < 1
-    if (any(zero)) warning("non-integer x and/or x < 1 or NAs")
-    ans <- rep(if (log.arg) log(0) else 0, length.out = LLL)
-    if (any(!zero)) {
-        if (log.arg) {
-            ans[!zero] <- (-p[!zero]-1)*log(x[!zero]) - log(zeta(p[!zero]+1))
-        } else {
-            ans[!zero] <- x[!zero]^(-p[!zero]-1) / zeta(p[!zero]+1)
-        }
-    }
-    if (any(ox)) ans[ox] <- NA
-    ans
+  ox <- !is.finite(x)
+  zero <- ox | round(x) != x | x < 1
+  if (any(zero)) warning("non-integer x and/or x < 1 or NAs")
+  ans <- rep(if (log.arg) log(0) else 0, length.out = LLL)
+  if (any(!zero)) {
+      if (log.arg) {
+          ans[!zero] <- (-p[!zero]-1)*log(x[!zero]) - log(zeta(p[!zero]+1))
+      } else {
+          ans[!zero] <- x[!zero]^(-p[!zero]-1) / zeta(p[!zero]+1)
+      }
+  }
+  if (any(ox))
+    ans[ox] <- 0.0  # 20141215 KaiH
+  ans
 }
 
 
@@ -1330,7 +1349,7 @@ dzipf <- function(x, N, s, log = FALSE) {
 
 
 
-pzipf <- function(q, N, s) {
+pzipf <- function(q, N, s, log.p = FALSE) {
     if (!is.Numeric(q))
         stop("bad input for argument 'q'")
     if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
@@ -1350,10 +1369,11 @@ pzipf <- function(q, N, s) {
     if (any(!zeroOR1))
         ans[!zeroOR1] <- gharmonic(floorq[!zeroOR1], s[!zeroOR1]) /
                         gharmonic(N[!zeroOR1], s[!zeroOR1])
-    ans
+    if (log.p) log(ans) else ans
 }
 
 
+
  zipf <- function(N = NULL, link = "loge", init.s = NULL) {
 
   if (length(N) &&
@@ -1473,8 +1493,8 @@ pzipf <- function(q, N, s) {
 
 
 
-cauchy.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+cauchy.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
@@ -2323,7 +2343,7 @@ dfelix <- function(x, a = 0.25, log = FALSE) {
 
 
 
- felix <- function(link = elogit(min = 0, max = 0.5), imethod = 1) {
+ felix <- function(link = extlogit(min = 0, max = 0.5), imethod = 1) {
 
   link <- as.list(substitute(link))
   earg <- link2list(link)
@@ -4064,8 +4084,8 @@ rbetageom <- function(n, shape1, shape2) {
 
 
 
-negbinomial.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+negbinomial.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
@@ -4074,8 +4094,11 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
   function(lmu = "loge", lsize = "loge",
            imu = NULL,   isize = NULL,
            probs.y = 0.75,
-           nsimEIM = 100, cutoff = 0.995, Maxiter = 5000,
+           nsimEIM = 250, cutoff.prob = 0.995, # Maxiter = 5000,
+           max.qnbinom = 1000,
+           max.chunk.Mb = 20,  # max.memory = Inf is allowed
            deviance.arg = FALSE, imethod = 1,
+           gsize = exp((-4):4),
            parallel = FALSE,
            ishrinkage = 0.95, zero = -2) {
 
@@ -4087,10 +4110,6 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
-
-
-
-
   alternate.derivs <- FALSE  # 20130823; added for 'nbcanlink'
 
 
@@ -4102,6 +4121,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
   lmuuu <- as.list(substitute(lmu))
   emuuu <- link2list(lmuuu)
   lmuuu <- attr(emuuu, "function.name")
+  
   imuuu <- imu
 
   lsize <- as.list(substitute(lsize))
@@ -4114,13 +4134,10 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
   if (length(isize) && !is.Numeric(isize, positive = TRUE))
     stop("bad input for argument 'isize'")
 
-  if (!is.Numeric(cutoff, length.arg = 1) ||
-    cutoff < 0.8 ||
-    cutoff >= 1)
-    stop("range error in the argument 'cutoff'")
-  if (!is.Numeric(Maxiter, integer.valued = TRUE, length.arg = 1) ||
-    Maxiter < 100)
-    stop("bad input for argument 'Maxiter'")
+  if (!is.Numeric(cutoff.prob, length.arg = 1) ||
+    cutoff.prob < 0.95 ||
+    cutoff.prob >= 1)
+    stop("range error in the argument 'cutoff.prob'")
   if (!is.Numeric(imethod, length.arg = 1,
     integer.valued = TRUE, positive = TRUE) ||
      imethod > 3)
@@ -4130,13 +4147,11 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
      ishrinkage > 1)
     stop("bad input for argument 'ishrinkage'")
 
-  if (!is.null(nsimEIM)) {
     if (!is.Numeric(nsimEIM, length.arg = 1, integer.valued = TRUE))
       stop("bad input for argument 'nsimEIM'")
     if (nsimEIM <= 10)
       warning("argument 'nsimEIM' should be an integer ",
                "greater than 10, say")
-  }
 
 
     if (!is.logical( parallel ) || length( parallel ) != 1)
@@ -4174,13 +4189,14 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
-
-
   infos = eval(substitute(function(...) {
     list(M1 = 2,
          Q1 = 1,
-         zero = .zero)
-  }, list( .zero = zero ))),
+         multipleResponses = TRUE,
+         lmu = .lmuuu ,
+         lsize = .lsize ,
+         zero = .zero )
+  }, list( .zero = zero, .lsize = lsize, .lmuuu = lmuuu ))),
 
 
 
@@ -4197,10 +4213,6 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
     y <- temp5$y
 
 
-
-
-
-
     assign("CQO.FastAlgorithm",
           ( .lmuuu == "loge") && ( .lsize == "loge"),
            envir = VGAMenv)
@@ -4223,14 +4235,11 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
     NOS <- ncoly <- ncol(y)  # Number of species
     predictors.names <-
      c(namesof(if (NOS == 1) "mu"   else paste("mu",   1:NOS, sep = ""),
-                .lmuuu, earg = .emuuu, tag = FALSE),
+                .lmuuu , earg = .emuuu , tag = FALSE),
        namesof(if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""),
                 .lsize , earg = .esize , tag = FALSE))
     predictors.names <- predictors.names[interleave.VGAM(M, M = M1)]
 
-    if (is.null( .nsimEIM )) {
-       save.weight <- control$save.weight <- FALSE
-    }
 
     if (is.numeric( .mu.init ))
       MU.INIT <- matrix( .mu.init , nrow(y), ncol(y), byrow = TRUE)
@@ -4238,36 +4247,35 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
     if (!length(etastart)) {
       mu.init <- y
-      for (iii in 1:ncol(y)) {
+      for (jay in 1:ncol(y)) {
         use.this <- if ( .imethod == 1) {
-          weighted.mean(y[, iii], w[, iii]) + 1/16
+          weighted.mean(y[, jay], w[, jay]) + 1/16
         } else if ( .imethod == 3) {
-          c(quantile(y[, iii], probs = .probs.y ) + 1/16)
+          c(quantile(y[, jay], probs = .probs.y ) + 1/16)
         } else {
-          median(y[, iii]) + 1/16
+          median(y[, jay]) + 1/16
         }
 
         if (is.numeric( .mu.init )) {
-          mu.init[, iii] <- MU.INIT[, iii]
+          mu.init[, jay] <- MU.INIT[, jay]
         } else {
-          medabsres <- median(abs(y[, iii] - use.this)) + 1/32
+          medabsres <- median(abs(y[, jay] - use.this)) + 1/32
           allowfun <- function(z, maxtol = 1) sign(z) * pmin(abs(z), maxtol)
-          mu.init[, iii] <- use.this + (1 - .ishrinkage ) *
-                           allowfun(y[, iii] - use.this, maxtol = medabsres)
+          mu.init[, jay] <- use.this + (1 - .ishrinkage ) *
+                           allowfun(y[, jay] - use.this, maxtol = medabsres)
 
-          mu.init[, iii] <- abs(mu.init[, iii]) + 1 / 1024
+          mu.init[, jay] <- abs(mu.init[, jay]) + 1 / 1024
         }
-      }  # of for (iii)
+      }  # of for (jay)
 
       if ( is.Numeric( .k.init )) {
-        kay.init <- matrix( .k.init, nrow = n, ncol = NOS, byrow = TRUE)
+        kay.init <- matrix( .k.init , nrow = n, ncol = NOS, byrow = TRUE)
       } else {
         negbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
             mu <- extraargs
             sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
         }
-        k.grid <- 2^((-7):7)
-        k.grid <- 2^(seq(-8, 8, length = 40))
+        k.grid <- .gsize
         kay.init <- matrix(0, nrow = n, ncol = NOS)
         for (spp. in 1:NOS) {
           kay.init[, spp.] <- grid.search(k.grid,
@@ -4295,7 +4303,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
       }
   }), list( .lmuuu = lmuuu, .lsize = lsize,
             .emuuu = emuuu, .esize = esize,
-            .mu.init = imu,
+            .mu.init = imu, .gsize = gsize,
             .deviance.arg = deviance.arg,
             .k.init = isize, .probs.y = probs.y,
             .ishrinkage = ishrinkage, .nsimEIM = nsimEIM,
@@ -4326,8 +4334,12 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
     if (exists("CQO.FastAlgorithm", envir = VGAMenv))
         rm("CQO.FastAlgorithm", envir = VGAMenv)
 
-    temp0303 <- c(rep( .lmuuu, length = NOS),
-                 rep( .lsize , length = NOS))
+
+    save.weights <- control$save.weights <- !all(ind2)
+
+    
+    temp0303 <- c(rep( .lmuuu , length = NOS),
+                  rep( .lsize , length = NOS))
     names(temp0303) =
       c(if (NOS == 1) "mu"   else paste("mu",   1:NOS, sep = ""),
         if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
@@ -4341,7 +4353,8 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
       misc$earg[[M1*ii  ]] <- .esize
     }
 
-    misc$cutoff <- .cutoff 
+    misc$max.chunk.Mb <- .max.chunk.Mb
+    misc$cutoff.prob <- .cutoff.prob
     misc$imethod <- .imethod 
     misc$nsimEIM <- .nsimEIM
     misc$expected <- TRUE
@@ -4349,7 +4362,8 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
     misc$multipleResponses <- TRUE
   }), list( .lmuuu = lmuuu, .lsize = lsize,
             .emuuu = emuuu, .esize = esize,
-            .cutoff = cutoff,
+            .cutoff.prob = cutoff.prob,  # .min.size = min.size,
+            .max.chunk.Mb = max.chunk.Mb,
             .nsimEIM = nsimEIM,
             .ishrinkage = ishrinkage,
             .imethod = imethod ))),
@@ -4388,7 +4402,8 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
     NOS <- ncol(eta) / M1
 
     eta.k <- eta[, M1*(1:NOS), drop = FALSE]
-    if ( .lsize == "loge") {
+
+    if ( FALSE && .lsize == "loge") {
         bigval <- 68
         eta.k <- ifelse(eta.k >  bigval,  bigval, eta.k)
         eta.k <- ifelse(eta.k < -bigval, -bigval, eta.k)
@@ -4397,8 +4412,6 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
-
-
     newemu <- .emuuu
     if ( .lmuuu == "nbcanlink") {
       newemu$size <- kmat
@@ -4475,15 +4488,11 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
-
-
-
-
     M1 <- 2
     NOS <- ncol(eta) / M1
     M <- ncol(eta)
     eta.k <- eta[, M1*(1:NOS)  , drop = FALSE]
-    if ( .lsize == "loge") {
+    if (FALSE && .lsize == "loge") {
       bigval <- 68
       eta.k <- ifelse(eta.k >  bigval,  bigval, eta.k)
       eta.k <- ifelse(eta.k < -bigval, -bigval, eta.k)
@@ -4512,7 +4521,7 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
       newemu$wrt.eta <- 2
     dk.deta1 <- dtheta.deta(mu, .lmuuu , earg = newemu)  # eta2
 
-    dk.deta2 <- dtheta.deta(kmat, .lsize , earg = .esize)
+    dk.deta2 <- dtheta.deta(kmat, .lsize , earg = .esize )
 
 
 
@@ -4528,15 +4537,6 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
     }
 
 
-
-
-    if ( FALSE && .lmuuu == "nbcanlink") {  # 20130823 FALSE added
-      if ( iter%% 2 == 1)
-      myderiv[, 1:NOS] <-
-      myderiv[, 1:NOS] + c(w) * dl.dk * dk.deta1 * 1  # 20130823 Annul this
-    }
-
-
     myderiv <- myderiv[, interleave.VGAM(M, M = M1)]
 
 
@@ -4550,88 +4550,129 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
             .deviance.arg = deviance.arg,
             .emuuu = emuuu, .esize = esize))),
 
+
+
   weight = eval(substitute(expression({
     wz <- matrix(as.numeric(NA), n, M)
 
 
-    if (is.null( .nsimEIM )) {
-      fred2 <- .Fortran("enbin9", ans = double(n*NOS),
-                  as.double(kmat), as.double(mu), as.double( .cutoff ),
-                  as.integer(n), ok = as.integer(1), as.integer(NOS),
-                  sumpdf = double(1), as.double( .Machine$double.eps ),
-                  as.integer( .Maxiter ))
-      if (fred2$ok != 1)
-        stop("error in Fortran subroutine exnbin9")
-      dim(fred2$ans) <- c(n, NOS)
-      ned2l.dk2 <- -fred2$ans - 1/kmat + 1/(kmat+mu)
-      wz[, M1*(1:NOS)] <- dk.deta2^2 * ned2l.dk2
+    max.qnbinom <- .max.qnbinom
+    max.chunk.Mb <- .max.chunk.Mb
 
 
+    EIM.NB.special2 <- function(mu, size, y.max = NULL,
+                                cutoff.prob = 0.995,
+                                intercept.only = FALSE) {
 
-    } else {
 
-      run.varcov <- matrix(0, n, NOS)
 
-      for (ii in 1:( .nsimEIM )) {
-        ysim <- rnbinom(n = n*NOS, mu = c(mu), size = c(kmat))
-        if (NOS > 1) dim(ysim) = c(n, NOS)
-        dl.dk <- digamma(ysim + kmat) - digamma(kmat) -
-                (ysim + kmat) / (mu + kmat) +
-                1 + log(kmat / (kmat + mu))
-        run.varcov <- run.varcov + dl.dk^2
-      }  # end of for loop
-
-      run.varcov <- cbind(run.varcov / .nsimEIM )
-      ned2l.dk2 <- if (intercept.only)
-          matrix(colMeans(run.varcov),
-                 n, ncol(run.varcov), byrow = TRUE) else run.varcov
+      if (intercept.only) {
+        mu <- mu[1]
+        size <- size[1]
+      }
 
-      wz[, M1*(1:NOS)] <- ned2l.dk2 * dk.deta2^2
-    }  # end of else
+      if (!is.numeric(y.max)) {
+        y.max <- max(qnbinom(p = cutoff.prob, mu = mu, size = size)) + 2
+      }
 
+      Y.mat <- if (intercept.only) 0:y.max else
+               matrix(0:y.max, length(mu), y.max+1, byrow = TRUE)
+      trigg.term <- if (intercept.only) {
+         dnbinom(Y.mat, size = size, mu = mu) %*% trigamma(Y.mat + size)
+      } else {
+         rowSums(dnbinom(Y.mat, size = size, mu = mu) *
+                 trigamma(Y.mat + size))
+      }
+      ned2l.dk2 <- trigamma(size) - 1 / size + 1 / (size + mu) - trigg.term
+      ned2l.dk2
+    }  # end of EIM.NB.special2()
 
-    ned2l.dmu2 <- 1 / mu - 1 / (mu + kmat)
-    wz[, M1*(1:NOS) - 1] <- ned2l.dmu2 * dmu.deta^2
+    
 
+    ind2 <- matrix(FALSE, n, NOS)  # Used for SFS
+    for (jay in 1:NOS) {
+      Q.maxs <- qnbinom(p = .cutoff.prob , mu = mu[, jay], size = kmat[, jay])
+      ind1 <- if (max.chunk.Mb > 0) (Q.maxs < max.qnbinom) else FALSE
+        if ((NN <- sum(ind1)) > 0) {
+          Object.Size <- NN * 8 * max(Q.maxs) / (2^20)  # Mb; 8 bytes / double
+          n.chunks <- if (intercept.only) 1 else
+                      max(1, ceiling( Object.Size / max.chunk.Mb))
+          chunk.rows <- ceiling(NN / n.chunks)
+          ind2[, jay] <- ind1  # Save this
+          wind2 <- which(ind1)
+
+
+          upr.ptr <- 0
+          lwr.ptr <- upr.ptr + 1
+          while (lwr.ptr <= NN) {
+            upr.ptr <- min(upr.ptr + chunk.rows, NN)
+            sind2 <- wind2[lwr.ptr:upr.ptr]
+            wz[sind2, M1*jay] <-
+              EIM.NB.special2(mu          =   mu[sind2, jay],
+                              size        = kmat[sind2, jay],
+                              y.max = max(Q.maxs[sind2]),
+                              cutoff.prob = .cutoff.prob ,
+                              intercept.only = intercept.only) *
+              (dk.deta2[sind2, jay])^2
+            lwr.ptr <- upr.ptr + 1
+          }  # while
 
+      }
+    }  # end of for (jay in 1:NOS)
 
 
 
-    if ( .lmuuu == "nbcanlink") {
-      if ( iter%% 2 == 0) {
-        wz[, M1*(1:NOS) - 1] <- ned2l.dk2 * dk.deta1^2
-      } else {
-      }
-    }
 
 
 
+    for (jay in 1:NOS) {
+      run.varcov <- 0
+      ii.TF <- !ind2[, jay]  # Not assigned above
+      if (any(ii.TF)) {
+        kkvec <- kmat[ii.TF, jay]
+        muvec <-   mu[ii.TF, jay]
+        for (ii in 1:( .nsimEIM )) {
+          ysim <- rnbinom(sum(ii.TF), mu = muvec, size = kkvec)
+          dl.dk <- digamma(ysim + kkvec) - digamma(kkvec) -
+                   (ysim + kkvec) / (muvec + kkvec) +
+                   1 + log(kkvec / (kkvec + muvec))
+          run.varcov <- run.varcov + dl.dk^2
+        }  # end of for loop
+
+        run.varcov <- c(run.varcov / .nsimEIM )
+        ned2l.dk2 <- if (intercept.only) mean(run.varcov) else run.varcov
+
+        wz[ii.TF, M1*jay] <- ned2l.dk2 * (dk.deta2[ii.TF, jay])^2
+      }
+    }
 
 
-    if ( FALSE && .lmuuu == "nbcanlink") {  # 20130823 FALSE added
-      if ( iter%% 2 == 1)
-      wz[, M1*(1:NOS)-1] <-
-      wz[, M1*(1:NOS)-1] + ned2l.dk2 * dk.deta1^2 * 1  # 20130823
+    save.weights <- !all(ind2)
 
-      if (FALSE)
-      wz <- cbind(wz,
-                  kronecker(ned2l.dk2 * dk.deta1 * dk.deta2,
-                            if (NOS > 1) cbind(1, 0) else 1))
-    }
 
+    ned2l.dmu2 <- 1 / mu - 1 / (mu + kmat)
+    wz[, M1*(1:NOS) - 1] <- ned2l.dmu2 * dmu.deta^2
 
 
 
+    if ( .lmuuu == "nbcanlink") {
+      if ( iter %% 2 == 0) {
+        wz[, M1*(1:NOS) - 1] <- ned2l.dk2 * dk.deta1^2
+      } else {
+      }
+    }
 
 
 
 
     w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
-  }), list( .cutoff = cutoff,
-            .Maxiter = Maxiter,
+  }), list( .cutoff.prob = cutoff.prob,
+            .max.qnbinom = max.qnbinom,
+            .max.chunk.Mb = max.chunk.Mb,
             .lmuuu = lmuuu,
             .nsimEIM = nsimEIM ))))
 
+  
 
 
   if (deviance.arg) {
@@ -4682,8 +4723,11 @@ negbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
-polya.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+
+
+
+polya.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
@@ -4787,7 +4831,7 @@ polya.control <- function(save.weight = TRUE, ...) {
     predictors.names <- predictors.names[interleave.VGAM(M, M = 2)]
 
     if (is.null( .nsimEIM )) {
-       save.weight <- control$save.weight <- FALSE
+       save.weights <- control$save.weights <- FALSE
     }
 
     
@@ -5072,8 +5116,8 @@ polya.control <- function(save.weight = TRUE, ...) {
 
 
 
-polyaR.control <- function(save.weight = TRUE, ...) {
-    list(save.weight = save.weight)
+polyaR.control <- function(save.weights = TRUE, ...) {
+    list(save.weights = save.weights)
 }
 
 
@@ -5178,7 +5222,7 @@ polyaR.control <- function(save.weight = TRUE, ...) {
     predictors.names <- predictors.names[interleave.VGAM(M, M = 2)]
 
     if (is.null( .nsimEIM )) {
-       save.weight <- control$save.weight <- FALSE
+       save.weights <- control$save.weights <- FALSE
     }
 
     
@@ -6765,7 +6809,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
 
 
- hypersecant <- function(link.theta = elogit(min = -pi/2, max = pi/2),
+ hypersecant <- function(link.theta = extlogit(min = -pi/2, max = pi/2),
                          init.theta = NULL) {
 
 
@@ -6841,7 +6885,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
 
 
- hypersecant01 <- function(link.theta = elogit(min = -pi/2, max = pi/2),
+ hypersecant01 <- function(link.theta = extlogit(min = -pi/2, max = pi/2),
                            init.theta = NULL) {
 
 
@@ -7074,7 +7118,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
 
 
- inv.binomial <- function(lrho = elogit(min = 0.5, max = 1),
+ inv.binomial <- function(lrho = extlogit(min = 0.5, max = 1),
                           llambda = "loge",
                           irho = NULL,
                           ilambda = NULL,
@@ -7216,7 +7260,7 @@ rsimplex <- function(n, mu = 0.5, dispersion = 1) {
 
 
 
- genpoisson <- function(llambda = elogit(min = -1, max = 1),
+ genpoisson <- function(llambda = extlogit(min = -1, max = 1),
                         ltheta = "loge",
                         ilambda = NULL, itheta = NULL,
                         use.approx = TRUE,
@@ -7402,33 +7446,38 @@ dlgamma <- function(x, location = 0, scale = 1, shape = 1, log = FALSE) {
   if (!is.Numeric(shape, positive = TRUE))
     stop("bad input for argument 'shape'")
   z <- (x-location) / scale
-  if (log.arg) {
-    shape * z - exp(z) - log(scale) - lgamma(shape)
-  } else {
-    exp(shape * z - exp(z)) / (scale * gamma(shape))
-  }
+  logden <- shape * z - exp(z) - log(scale) - lgamma(shape)
+  logden[is.infinite(x)] <- log(0)  # 20141210
+  if (log.arg) logden else exp(logden)
 }
 
 
-plgamma <- function(q, location = 0, scale = 1, shape = 1) {
+
+plgamma <- function(q, location = 0, scale = 1, shape = 1,
+                    lower.tail = TRUE, log.p = FALSE) {
+
+
 
   zedd <- (q - location) / scale
-  ans <- pgamma(exp(zedd), shape)
+  ans <- pgamma(exp(zedd), shape, lower.tail = lower.tail, log.p = log.p)
   ans[scale <  0] <- NaN
   ans
 }
 
 
-qlgamma <- function(p, location = 0, scale = 1, shape = 1) {
-  if (!is.Numeric(scale, positive = TRUE))
-    stop("bad input for argument 'scale'")
 
-  ans <- location + scale * log(qgamma(p, shape))
+qlgamma <- function(p, location = 0, scale = 1, shape = 1,
+                    lower.tail = TRUE, log.p = FALSE) {
+
+
+  ans <- location + scale * log(qgamma(p, shape,
+                                       lower.tail = lower.tail, log.p = log.p))
   ans[scale <  0] <- NaN
   ans
 }
 
 
+
 rlgamma <- function(n, location = 0, scale = 1, shape = 1) {
   ans <- location + scale * log(rgamma(n, shape))
   ans[scale < 0] <- NaN
@@ -7926,24 +7975,27 @@ dgengamma.stacy <- function(x, scale = 1, d = 1, k = 1, log = FALSE) {
 
 
 
-
-pgengamma.stacy <- function(q, scale = 1, d = 1, k = 1) {
+pgengamma.stacy <- function(q, scale = 1, d = 1, k = 1,
+                            lower.tail = TRUE, log.p = FALSE) {
   zedd <- (q / scale)^d
-  ans <- pgamma(zedd, k)
+  ans <- pgamma(zedd, k, lower.tail = lower.tail, log.p = log.p)
   ans[scale <  0] <- NaN
   ans[d     <= 0] <- NaN
   ans
 }
 
 
-qgengamma.stacy <- function(p, scale = 1, d = 1, k = 1) {
-  ans <- scale * qgamma(p, k)^(1/d)
+
+qgengamma.stacy <- function(p, scale = 1, d = 1, k = 1,
+                            lower.tail = TRUE, log.p = FALSE) {
+  ans <- scale * qgamma(p, k, lower.tail = lower.tail, log.p = log.p)^(1/d)
   ans[scale <  0] <- NaN
   ans[d     <= 0] <- NaN
   ans
 }
 
 
+
 rgengamma.stacy <- function(n, scale = 1, d = 1, k = 1) {
 
   ans <- scale * rgamma(n, k)^(1/d)
@@ -8139,27 +8191,27 @@ dlog <- function(x, prob, log = FALSE) {
     stop("bad input for argument 'log'")
   rm(log)
 
-    if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
-        stop("bad input for argument 'prob'")
-    N <- max(length(x), length(prob))
-    if (length(x) != N)
-        x <- rep(x, length.out = N)
-    if (length(prob) != N)
-        prob <- rep(prob, length.out = N)
-    ox <- !is.finite(x)
-    zero <- ox | round(x) != x | x < 1
-    ans <- rep(0.0, length.out = length(x))
-        if (log.arg) {
-            ans[ zero] <- log(0.0)
-            ans[!zero] <- x[!zero] * log(prob[!zero]) - log(x[!zero]) -
-                         log(-log1p(-prob[!zero]))
-        } else {
-            ans[!zero] <- -(prob[!zero]^(x[!zero])) / (x[!zero] *
-                         log1p(-prob[!zero]))
-        }
-    if (any(ox))
-        ans[ox] <- NA
-    ans
+  if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
+      stop("bad input for argument 'prob'")
+  N <- max(length(x), length(prob))
+  if (length(x) != N)
+    x <- rep(x, length.out = N)
+  if (length(prob) != N)
+    prob <- rep(prob, length.out = N)
+  ox <- !is.finite(x)
+  zero <- ox | round(x) != x | x < 1
+  ans <- rep(0.0, length.out = length(x))
+  if (log.arg) {
+    ans[ zero] <- log(0.0)
+    ans[!zero] <- x[!zero] * log(prob[!zero]) - log(x[!zero]) -
+                  log(-log1p(-prob[!zero]))
+    ans[ox] <- log(0)  # 20141212 KaiH
+  } else {
+    ans[!zero] <- -(prob[!zero]^(x[!zero])) / (x[!zero] *
+                   log1p(-prob[!zero]))
+    ans[ox] <- 0.0  # 20141212 KaiH
+  }
+  ans
 }
 
 
@@ -8597,26 +8649,34 @@ dlino <- function(x, shape1, shape2, lambda = 1, log = FALSE) {
   loglik <-  dbeta(x = x, shape1 = shape1, shape2 = shape2, log = TRUE) +
              shape1 * log(lambda) -
             (shape1+shape2) * log1p(-(1-lambda) * x)
+  loglik[is.infinite(x)] <- log(0)  # 20141208 KaiH
   if (log.arg) loglik else exp(loglik)
 }
 
 
-plino <- function(q, shape1, shape2, lambda = 1) {
-  ans <- pbeta(q = lambda * q / (1 - (1-lambda) * q),
-               shape1 = shape1, shape2 = shape2)
+
+plino <- function(q, shape1, shape2, lambda = 1,
+                  lower.tail = TRUE, log.p = FALSE) {
+  ans <- pbeta(q = 1 / (1 + (1/q - 1) / lambda),  # lambda * q / (1 - (1-lambda) * q),
+               shape1 = shape1, shape2 = shape2,
+               lower.tail = lower.tail, log.p = log.p)
   ans[lambda <= 0] <- NaN
   ans
 }
 
 
-qlino <- function(p, shape1, shape2, lambda = 1) {
-  Y <- qbeta(p = p, shape1 = shape1, shape2 = shape2)
+
+qlino <- function(p, shape1, shape2, lambda = 1,
+                  lower.tail = TRUE, log.p = FALSE) {
+  Y <- qbeta(p = p, shape1 = shape1, shape2 = shape2,
+             lower.tail = lower.tail, log.p = log.p)
   ans <- Y / (lambda + (1-lambda)*Y)
   ans[lambda <= 0] <- NaN
   ans
 }
 
 
+
 rlino <- function(n, shape1, shape2, lambda = 1) {
   Y <- rbeta(n = n, shape1 = shape1, shape2 = shape2)
   ans <- Y / (lambda + (1 - lambda) * Y)
@@ -8938,24 +8998,47 @@ dmaxwell <- function(x, rate, log = FALSE) {
 
 
 
-pmaxwell <- function(q, rate) {
-  L <- max(length(q), length(rate))
-  q    <- rep(q,    length.out = L)
-  rate <- rep(rate, length.out = L) 
-  ans <- ifelse(q > 0,
-                erf(q*sqrt(rate/2)) - q*exp(-0.5*rate*q^2) * sqrt(2*rate/pi),
-                0)
-  ans[rate <= 0] <- NaN
+pmaxwell <- function(q, rate, lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log(erf(q*sqrt(rate/2)) - q*exp(-0.5*rate*q^2) * sqrt(2*rate/pi))
+      ans[q <= 0 ] <- -Inf
+      ans[q == Inf] <- 0
+    } else {
+      ans <- erf(q*sqrt(rate/2)) - q*exp(-0.5*rate*q^2) * sqrt(2*rate/pi)
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- log1p(-erf(q*sqrt(rate/2)) + q*exp(-0.5*rate*q^2) * sqrt(2*rate/pi))
+      ans[q <= 0] <- 0
+      ans[q == Inf] <- -Inf
+    } else {
+      ans <- exp(log1p(-erf(q*sqrt(rate/2)) + q*exp(-0.5*rate*q^2) * sqrt(2*rate/pi)))
+      ans[q <= 0] <- 1
+      ans[q == Inf] <- 0
+    }
+  }
   ans
 }
 
 
-qmaxwell <- function(p, rate) {
 
-  sqrt(2 * qgamma(p = p, 1.5) / rate)
+qmaxwell <- function(p, rate, lower.tail = TRUE, log.p = FALSE) {
+
+  sqrt(2 * qgamma(p = p, 1.5, lower.tail = lower.tail, log.p = log.p) / rate)
 }
 
 
+
 rmaxwell <- function(n, rate) {
 
   sqrt(2 * rgamma(n = n, 1.5) / rate)
@@ -8963,8 +9046,6 @@ rmaxwell <- function(n, rate) {
 
 
 
-
-
  maxwell <- function(link = "loge", zero = NULL) {
 
 
@@ -9127,27 +9208,23 @@ dnaka <- function(x, scale = 1, shape, log = FALSE) {
                               scale = scale[xok] / shape[xok],
                               log = TRUE) +
                       log(2) + log(x[xok])
+    logdensity[is.infinite(x)] <- log(0)  # 20141208 KaiH
+
     if (log.arg) logdensity else exp(logdensity)
 }
 
 
-pnaka <- function(q, scale = 1, shape) {
-    if (!is.Numeric(q))
-      stop("bad input for argument 'q'")
-    if (!is.Numeric(shape, positive = TRUE))
-      stop("bad input for argument 'shape'")
-    if (!is.Numeric(scale, positive = TRUE))
-      stop("bad input for argument 'scale'")
 
-    L <- max(length(q), length(shape), length(scale))
-    q     <- rep(q,     length.out = L)
-    shape <- rep(shape, length.out = L)
-    scale <- rep(scale, length.out = L)
+pnaka <- function(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE) {
 
-    ifelse(q <= 0, 0, pgamma(shape * q^2 / scale, shape))
+  ans <- pgamma(shape * q^2 / scale, shape = shape,
+                lower.tail = lower.tail, log.p = log.p)
+  ans[scale <  0] <- NaN
+  ans
 }
 
 
+
 qnaka <- function(p, scale = 1, shape, ...) {
   if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
     stop("bad input for argument 'p'")
@@ -9239,9 +9316,6 @@ rnaka <- function(n, scale = 1, shape, Smallno = 1.0e-6) {
   lscale <- attr(escale, "function.name")
 
 
-  if (!nowarning)
-    warning("order of the linear/additive predictors has been changed",
-            " in VGAM version 0.9-5")
 
   new("vglmff",
   blurb = c("Nakagami distribution f(y) = 2 * (shape/scale)^shape *\n",
@@ -9360,31 +9434,80 @@ drayleigh <- function(x, scale = 1, log = FALSE) {
   xok <- (x > 0)
   logdensity[xok] <- log(x[xok]) - 0.5 * (x[xok]/scale[xok])^2 -
                      2 * log(scale[xok])
+  logdensity[is.infinite(x)] <- log(0)  # 20141208 KaiH
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
-prayleigh <- function(q, scale = 1) {
-  if (any(scale <= 0, na.rm = TRUE))
-    stop("argument 'scale' must be positive")
 
-  L     <- max(length(q), length(scale)) 
-  q     <- rep(q,     length.out = L)
-  scale <- rep(scale, length.out = L)
+prayleigh <- function(q, scale = 1, lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
 
-  ifelse(q > 0, -expm1(-0.5 * (q / scale)^2), 0)
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log(-expm1(-0.5 * (q / scale)^2))
+      ans[q <= 0 ] <- -Inf
+    } else {
+      ans <- -expm1(-0.5 * (q / scale)^2)
+      ans[q <= 0] <- 0
+    }
+  } else {
+      if (log.p) {
+        ans <- -0.5 * (q / scale)^2
+        ans[q <= 0] <- 0
+      } else {
+        ans <- exp(-0.5 * (q / scale)^2)
+        ans[q <= 0] <- 1
+      }
+    }
+  ans[scale <  0] <- NaN
+  ans
 }
 
 
-qrayleigh <- function(p, scale = 1) {
-  if (any(p <= 0, na.rm = TRUE) || any(p >= 1, na.rm = TRUE))
-    stop("argument 'p' must be between 0 and 1")
-  ans <- scale * sqrt(-2 * log1p(-p))
+
+qrayleigh <- function(p, scale = 1,
+                      lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- scale * sqrt(-2 * log(-expm1(ln.p)))
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- scale * sqrt(-2 * log1p(-p))
+      ans[p < 0] <- NaN
+      ans[p == 0] <- 0
+      ans[p == 1] <- Inf
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- scale * sqrt(-2 * ln.p)
+      ans[ln.p > 0] <- NaN
+      ans
+    } else {
+      ans <- scale * sqrt(-2 * log(p))
+      ans[p > 1] <- NaN
+    }
+  }
   ans[scale <= 0] <- NaN
   ans
 }
 
 
+
 rrayleigh <- function(n, scale = 1) {
   ans <- scale * sqrt(-2 * log(runif(n)))
   ans[scale <= 0] <- NaN
@@ -9584,52 +9707,92 @@ dparetoIV <- function(x, location = 0, scale = 1, inequality = 1,
                     (1/inequality[xok]-1) * log(zedd[xok]) - 
                     (shape[xok]+1) *
                       log1p(zedd[xok]^(1/inequality[xok]))
+  logdensity[is.infinite(x)] <- log(0)  # 20141208 KaiH
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
+
 pparetoIV <-
-  function(q, location = 0, scale = 1, inequality = 1, shape = 1) {
-  if (!is.Numeric(q))
-    stop("bad input for argument 'q'")
-  if (!is.Numeric(scale, positive = TRUE)) 
-    stop("bad input for argument 'scale'")
-  if (!is.Numeric(inequality, positive = TRUE)) 
-    stop("bad input for argument 'inequality'")
-  if (!is.Numeric(shape, positive = TRUE)) 
-    stop("bad input for argument 'shape'")
+  function(q, location = 0, scale = 1, inequality = 1, shape = 1,
+           lower.tail = TRUE, log.p = FALSE) {
 
-  N <- max(length(q), length(location), length(scale),
-          length(inequality), length(shape))
-  if (length(q)          != N) q          <- rep(q,          length.out = N)
-  if (length(location)   != N) location   <- rep(location,   length.out = N)
-  if (length(inequality) != N) inequality <- rep(inequality, length.out = N)
-  if (length(shape)      != N) shape      <- rep(shape,      length.out = N)
-  if (length(scale)      != N) scale      <- rep(scale,      length.out = N)
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
 
-  answer <- q * 0
-  ii <- q > location
-  zedd <- (q[ii] - location[ii]) / scale[ii]
-  answer[ii] <- 1 - (1 + zedd^(1/inequality[ii]))^(-shape[ii])
+
+  zedd <- (q - location) / scale
+
+  if (lower.tail) {
+    if (log.p) {
+      answer <- log(-expm1(log1p(zedd^(1/inequality)) * (-shape)))
+      answer[q <= 0 ] <- -Inf
+      answer[q == Inf] <- 0
+    } else {
+      answer <- -expm1(log1p(zedd^(1/inequality)) * (-shape))
+      answer[q <= 0] <- 0
+      answer[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      answer <- log1p(zedd^(1/inequality)) * (-shape)
+      answer[q <= 0] <- 0
+      answer[q == Inf] <- -Inf
+    } else {
+      answer <- exp(log1p(zedd^(1/inequality)) * (-shape))
+      answer[q <= 0] <- 1
+      answer[q == Inf] <- 0
+    }
+  }
+  answer[scale <= 0 | shape <= 0 | inequality <= 0] <- NaN
   answer
 }
 
 
+
 qparetoIV <-
-  function(p, location = 0, scale = 1, inequality = 1, shape = 1) {
-  if (!is.Numeric(p, positive = TRUE) || any(p >= 1)) 
-    stop("bad input for argument 'p'")
-  if (!is.Numeric(inequality, positive = TRUE)) 
-    stop("bad input for argument 'inequality'")
-  if (!is.Numeric(shape, positive = TRUE)) 
-    stop("bad input for argument 'shape'")
-  ans <- location + scale * (-1 + (1-p)^(-1/shape))^inequality
-  ans[scale <= 0] <- NaN
-  ans[shape <= 0] <- NaN
+  function(p, location = 0, scale = 1, inequality = 1, shape = 1,
+           lower.tail = TRUE, log.p = FALSE) {
+
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- location + scale * (expm1((-1/shape)*log(-expm1(ln.p))))^inequality
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- location + scale * (expm1((-1/shape) * log1p(-p)))^inequality
+      ans[p < 0] <- NaN
+      ans[p == 0] <- 0
+      ans[p == 1] <- Inf
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- location + scale * (expm1((-1/shape)*ln.p))^inequality
+      ans[ln.p > 0] <- NaN
+      ans
+    } else {
+      ans <- location + scale * (expm1((-1/shape)*log(p)))^inequality
+      ans[p < 0] <- NaN
+      ans[p == 0] <- Inf
+      ans[p == 1] <- 0
+      ans[p > 1] <- NaN
+    }
+  }
+  ans[scale <= 0 | shape <= 0 | inequality <= 0] <- NaN
   ans
 }
 
 
+
 rparetoIV <-
   function(n, location = 0, scale = 1, inequality = 1, shape = 1) {
   if (!is.Numeric(inequality, positive = TRUE)) 
@@ -9646,13 +9809,17 @@ dparetoIII <- function(x, location = 0, scale = 1, inequality = 1,
   dparetoIV(x = x, location = location, scale = scale,
             inequality = inequality, shape = 1, log = log)
 
-pparetoIII <- function(q, location = 0, scale = 1, inequality = 1)
+pparetoIII <- function(q, location = 0, scale = 1, inequality = 1,
+                       lower.tail = TRUE, log.p = FALSE)
   pparetoIV(q = q, location = location, scale = scale,
-            inequality = inequality, shape = 1)
+            inequality = inequality, shape = 1,
+            lower.tail = lower.tail, log.p = log.p)
 
-qparetoIII <- function(p, location = 0, scale = 1, inequality = 1)
+qparetoIII <- function(p, location = 0, scale = 1, inequality = 1,
+                       lower.tail = TRUE, log.p = FALSE)
   qparetoIV(p = p, location = location, scale = scale,
-            inequality = inequality, shape = 1)
+            inequality = inequality, shape = 1,
+            lower.tail = lower.tail, log.p = log.p)
 
 rparetoIII <- function(n, location = 0, scale = 1, inequality = 1)
   rparetoIV(n = n, location= location, scale = scale,
@@ -9662,16 +9829,19 @@ rparetoIII <- function(n, location = 0, scale = 1, inequality = 1)
 
 dparetoII <- function(x, location = 0, scale = 1, shape = 1, log = FALSE)
   dparetoIV(x = x, location = location, scale = scale,
-            inequality = 1, shape = shape,
-            log = log)
+            inequality = 1, shape = shape, log = log)
 
-pparetoII <- function(q, location = 0, scale = 1, shape = 1)
+pparetoII <- function(q, location = 0, scale = 1, shape = 1,
+                      lower.tail = TRUE, log.p = FALSE)
   pparetoIV(q = q, location = location, scale = scale,
-            inequality = 1, shape = shape)
+            inequality = 1, shape = shape,
+            lower.tail = lower.tail, log.p = log.p)
 
-qparetoII <- function(p, location = 0, scale = 1, shape = 1)
+qparetoII <- function(p, location = 0, scale = 1, shape = 1,
+                      lower.tail = TRUE, log.p = FALSE)
   qparetoIV(p = p, location = location, scale = scale,
-            inequality = 1, shape = shape)
+            inequality = 1, shape = shape,
+            lower.tail = lower.tail, log.p = log.p)
 
 rparetoII <- function(n, location = 0, scale = 1, shape = 1)
   rparetoIV(n = n, location = location, scale = scale,
@@ -9682,13 +9852,17 @@ dparetoI <- function(x, scale = 1, shape = 1, log = FALSE)
   dparetoIV(x = x, location = scale, scale = scale, inequality = 1,
             shape = shape, log = log)
 
-pparetoI <- function(q, scale = 1, shape = 1)
+pparetoI <- function(q, scale = 1, shape = 1,
+                     lower.tail = TRUE, log.p = FALSE)
   pparetoIV(q = q, location = scale, scale = scale, inequality = 1,
-            shape = shape)
+            shape = shape,
+            lower.tail = lower.tail, log.p = log.p)
 
-qparetoI <- function(p, scale = 1, shape = 1)
+qparetoI <- function(p, scale = 1, shape = 1,
+                     lower.tail = TRUE, log.p = FALSE)
   qparetoIV(p = p, location = scale, scale = scale, inequality = 1,
-            shape = shape)
+            shape = shape,
+            lower.tail = lower.tail, log.p = log.p)
 
 rparetoI <- function(n, scale = 1, shape = 1)
   rparetoIV(n = n, location = scale, scale = scale, inequality = 1,
@@ -10192,38 +10366,91 @@ dpareto <- function(x, scale = 1, shape, log = FALSE) {
   shape <- rep(shape, length.out = L)
 
   logdensity <- rep(log(0), length.out = L)
-  xok <- (x > scale)
+  xok <- (x >= scale)  # 20141212 KaiH
   logdensity[xok] <- log(shape[xok]) + shape[xok] * log(scale[xok]) -
                       (shape[xok]+1) * log(x[xok])
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
-ppareto <- function(q, scale = 1, shape) {
 
-  L <- max(length(q), length(scale), length(shape))
-  q <- rep(q, length.out = L);
-  scale <- rep(scale, length.out = L);
-  shape <- rep(shape, length.out = L)
+ppareto <- function(q, scale = 1, shape,
+                    lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
 
-  ans <- ifelse(q > scale, 1 - (scale/q)^shape, 0)
-  ans[scale <= 0] <- NaN
-  ans[shape <= 0] <- NaN
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+
+  if (lower.tail) {
+    if (log.p) {
+      ans <- log1p(-(scale/q)^shape)
+      ans[q <= scale] <- -Inf
+      ans[q == Inf] <- 0
+    } else {
+      ans <- exp(log1p(-(scale/q)^shape))
+      ans[q <= scale] <- 0
+      ans[q == Inf] <- 1
+    }
+  } else {
+    if (log.p) {
+      ans <- log((scale/q)^shape)
+      ans[q <= scale] <- 0
+      ans[q == Inf] <- -Inf
+    } else {
+      ans <- (scale/q)^shape
+      ans[q <= scale] <- 1
+      ans[q == Inf] <- 0
+    }
+  }
+
+  ans[shape <= 0 | scale <= 0] <- NaN
   ans
 }
 
 
-qpareto <- function(p, scale = 1, shape) {
-  if (any(p <= 0) || any(p >= 1))
-    stop("argument 'p' must be between 0 and 1")
 
-  ans <- scale / (1 - p)^(1/shape)
-  ans[scale <= 0] <- NaN
-  ans[shape <= 0] <- NaN
+qpareto <- function(p, scale = 1, shape,
+                    lower.tail = TRUE, log.p = FALSE) {
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+  if (!is.logical(log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+
+  if (lower.tail) {
+    if (log.p) {
+      ln.p <- p
+      ans <- scale / (-expm1(ln.p))^(1/shape)
+      ans[ln.p > 0] <- NaN
+    } else {
+      ans <- scale / exp(log1p(-p) * (1/shape))
+      ans[p < 0] <- NaN
+      ans[p == 0] <- scale
+      ans[p == 1] <- Inf
+      ans[p > 1] <- NaN
+    }
+  } else {
+    if (log.p) {
+      ln.p <- p
+      ans <- scale / exp(ln.p)^(1/shape)
+      ans[ln.p > 0] <- NaN
+      ans
+    } else {
+      ans <- scale / p^(1/shape)
+      ans[p < 0] <- NaN
+      ans[p == 0] <- Inf
+      ans[p == 1] <- scale
+      ans[p > 1] <- NaN
+    }
+  }
+  ans[shape <= 0 | scale <= 0] <- NaN
   ans
 }
 
 
+
 rpareto <- function(n, scale = 1, shape) {
   ans <- scale / runif(n)^(1/shape)
   ans[scale <= 0] <- NaN
@@ -10338,8 +10565,6 @@ dtruncpareto <- function(x, lower, upper, shape, log = FALSE) {
     stop("bad input for argument 'log'")
   rm(log)
 
-  if (!is.Numeric(x))
-    stop("bad input for argument 'x'")
   if (!is.Numeric(lower, positive = TRUE))
     stop("argument 'lower' must be positive")
   if (!is.Numeric(upper, positive = TRUE))
@@ -10364,14 +10589,24 @@ dtruncpareto <- function(x, lower, upper, shape, log = FALSE) {
   logdensity[shape <= 0] <- NaN
   logdensity[upper < lower] <- NaN
   logdensity[0 > lower] <- NaN
+
   if (log.arg) logdensity else exp(logdensity)
 }
 
 
-ptruncpareto <- function(q, lower, upper, shape) {
+
+ptruncpareto <- function(q, lower, upper, shape,
+                         lower.tail = TRUE, log.p = FALSE) {
   if (!is.Numeric(q))
     stop("bad input for argument 'q'")
 
+  if (!is.logical(lower.tail) || length(lower.tail ) != 1)
+    stop("bad input for argument 'lower.tail'")
+
+ if (!is.logical(log.arg <- log.p) || length(log.p) != 1)
+    stop("bad input for argument 'log.p'")
+  rm(log.p)   # 20141231 KaiH
+
   L <- max(length(q), length(lower), length(upper), length(shape)) 
   if (length(q)     != L) q     <- rep(q,     length.out = L)
   if (length(shape) != L) shape <- rep(shape, length.out = L)
@@ -10389,10 +10624,15 @@ ptruncpareto <- function(q, lower, upper, shape) {
   ans[upper <= 0] <- NaN
   ans[shape <= 0] <- NaN
 
-  ans
+  if (lower.tail) {
+    if (log.arg) log(ans) else ans
+  } else {
+    if (log.arg) log1p(-ans) else exp(log1p(-ans))
+  }
 }
 
 
+
 qtruncpareto <- function(p, lower, upper, shape) {
   if (!is.Numeric(p, positive = TRUE))
     stop("bad input for argument 'p'")
diff --git a/R/family.vglm.R b/R/family.vglm.R
index 5c693c0..1d24ec9 100644
--- a/R/family.vglm.R
+++ b/R/family.vglm.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/family.zeroinf.R b/R/family.zeroinf.R
index 1f13b78..dc6b77f 100644
--- a/R/family.zeroinf.R
+++ b/R/family.zeroinf.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -1024,8 +1024,8 @@ rzipois <- function(n, lambda, pstr0 = 0) {
 
 
 
-zanegbinomial.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+zanegbinomial.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -1486,8 +1486,8 @@ zanegbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
-zanegbinomialff.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+zanegbinomialff.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -2331,9 +2331,9 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
            type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
            ipstr0 = NULL,
            zero = NULL,  # 20130917; was originally zero = 1,
-           mv = FALSE, imethod = 1) {
-  if (as.logical(mv))
-    stop("argument 'mv' must be FALSE")
+           multiple.responses = FALSE, imethod = 1) {
+  if (as.logical(multiple.responses))
+    stop("argument 'multiple.responses' must be FALSE")
 
   lpstr0 <- as.list(substitute(lpstr0))
   epstr0 <- link2list(lpstr0)
@@ -2606,15 +2606,15 @@ dposnegbin <- function(x, munb, size, log = FALSE) {
            type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
            ionempstr0 = NULL,
            zero = 2,
-           mv = FALSE, imethod = 1) {
+           multiple.responses = FALSE, imethod = 1) {
 
 
 
 
 
 
-  if (as.logical(mv))
-    stop("argument 'mv' must be FALSE")
+  if (as.logical(multiple.responses))
+    stop("argument 'multiple.responses' must be FALSE")
 
   lprob <- as.list(substitute(lprob))
   eprob <- link2list(lprob)
@@ -3184,8 +3184,8 @@ rzinegbin <- function(n, size, prob = NULL, munb = NULL, pstr0 = 0) {
 
 
 
-zinegbinomial.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+zinegbinomial.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
@@ -3644,8 +3644,8 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
 
     for (spp. in 1:NOS) {
       wz1[,, spp.] <- wz1[,, spp.] *
-                     dthetas.detas[, M1 * (spp. - 1) + ind3$row] *
-                     dthetas.detas[, M1 * (spp. - 1) + ind3$col]
+                      dthetas.detas[, M1 * (spp. - 1) + ind3$row] *
+                      dthetas.detas[, M1 * (spp. - 1) + ind3$col]
     }
 
     for (spp. in 1:NOS) {
@@ -3672,8 +3672,8 @@ zinegbinomial.control <- function(save.weight = TRUE, ...) {
 
 
 
-zinegbinomialff.control <- function(save.weight = TRUE, ...) {
-  list(save.weight = save.weight)
+zinegbinomialff.control <- function(save.weights = TRUE, ...) {
+  list(save.weights = save.weights)
 }
 
 
diff --git a/R/fittedvlm.R b/R/fittedvlm.R
index 5e580e6..7d70107 100644
--- a/R/fittedvlm.R
+++ b/R/fittedvlm.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/formula.vlm.q b/R/formula.vlm.q
index b5c1d58..333699d 100644
--- a/R/formula.vlm.q
+++ b/R/formula.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/generic.q b/R/generic.q
index cea5d02..02015d3 100644
--- a/R/generic.q
+++ b/R/generic.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/links.q b/R/links.q
index 93a3250..c044bab 100644
--- a/R/links.q
+++ b/R/links.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -37,7 +37,7 @@ ToString <- function(x)
            type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
            probs.x = c(0.15, 0.85),
            probs.y = c(0.25, 0.50, 0.75),
-           mv = FALSE, earg.link = FALSE,
+           multiple.responses = FALSE, earg.link = FALSE,
            whitespace = FALSE, bred = FALSE, lss = TRUE,
            oim = FALSE, nsimEIM = 100,
            zero = NULL) {
@@ -839,10 +839,10 @@ care.exp <- function(x,
 
 
 
-fsqrt <- function(theta,  #  = NA  , = NULL,
-                  min = 0, max = 1, mux = sqrt(2),
-                  inverse = FALSE, deriv = 0,
-                  short = TRUE, tag = FALSE) {
+foldsqrt <- function(theta,  #  = NA  , = NULL,
+                     min = 0, max = 1, mux = sqrt(2),
+                     inverse = FALSE, deriv = 0,
+                     short = TRUE, tag = FALSE) {
   if (!is.Numeric(min, length.arg = 1))
     stop("bad input for 'min' component")
   if (!is.Numeric(max, length.arg = 1))
@@ -854,7 +854,7 @@ fsqrt <- function(theta,  #  = NA  , = NULL,
 
   if (is.character(theta)) {
     string <- if (short) 
-      paste("fsqrt(", theta, ")", sep = "") else {
+      paste("foldsqrt(", theta, ")", sep = "") else {
       if (abs(mux-sqrt(2)) < 1.0e-10)
         paste("sqrt(2*", theta, ") - sqrt(2*(1-", theta, "))",
               sep = "") else
@@ -940,7 +940,8 @@ fsqrt <- function(theta,  #  = NA  , = NULL,
 
 
 
- elogit <- function(theta,
+
+ extlogit <- function(theta,
                     min = 0, max = 1,
                     bminvalue = NULL,
                     bmaxvalue = NULL,
@@ -957,10 +958,10 @@ fsqrt <- function(theta,  #  = NA  , = NULL,
   if (is.character(theta)) {
     string <- if (short) {
       if (A != 0 || B != 1)
-        paste("elogit(", theta,
+        paste("extlogit(", theta,
               ", min = ", A,
               ", max = ", B, ")", sep = "") else
-        paste("elogit(", theta, ")", sep = "")
+        paste("extlogit(", theta, ")", sep = "")
     } else {
       paste("log((", theta, "-min)/(max-", theta, "))", sep = "")
     }
@@ -1620,3 +1621,55 @@ fsqrt <- function(theta,  #  = NA  , = NULL,
 
 
 
+
+
+linkfun.vglm <- function(object, earg = FALSE, ...) {
+  if (!any(slotNames(object) == "extra"))
+    stop("cannot access the 'extra' slot of the object")
+  if (!any(slotNames(object) == "misc"))
+    stop("cannot access the 'misc' slot of the object")
+
+  M <- npred(object)
+
+  misc <- object at misc
+  LINKS1 <- misc$link
+  EARGS1 <- misc$earg
+
+  extra <- object at extra
+  LINKS2 <- extra$link
+  EARGS2 <- extra$earg
+
+  if (length(LINKS1) != M && length(LINKS2) != M) {
+    if (LINKS1 != "multilogit" && LINKS2 != "multilogit")
+      warning("the length of the 'links' component is not ", M)
+  }
+
+  if (length(LINKS1)) {
+    if (earg) list(link = LINKS1, earg = EARGS1) else LINKS1
+  } else {
+    if (earg) list(link = LINKS2, earg = EARGS2) else LINKS2
+  }
+}
+
+
+
+if (!isGeneric("linkfun"))
+  setGeneric("linkfun", function(object, ...) standardGeneric("linkfun"))
+
+
+
+setMethod("linkfun", "vglm", function(object, ...)
+    linkfun.vglm(object, ...))
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/logLik.vlm.q b/R/logLik.vlm.q
index a15df7a..72e2376 100644
--- a/R/logLik.vlm.q
+++ b/R/logLik.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/lrwaldtest.R b/R/lrwaldtest.R
index 4ee7635..d1e5efb 100644
--- a/R/lrwaldtest.R
+++ b/R/lrwaldtest.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index e9a300c..660eedc 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -532,7 +532,9 @@ npred.vlm <- function(object,
     if (is.numeric(ans.infos)) ans.infos else
     if (is.numeric(ans.y    )) ans.y     else
     ans
-  } else ans
+  } else {
+    ans
+  }
 
   ans
 }
diff --git a/R/mux.q b/R/mux.q
index dc7fe0e..051288f 100644
--- a/R/mux.q
+++ b/R/mux.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/nobs.R b/R/nobs.R
index 0b7dca3..36e2d84 100644
--- a/R/nobs.R
+++ b/R/nobs.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/plot.vglm.q b/R/plot.vglm.q
index bfdd684..8870903 100644
--- a/R/plot.vglm.q
+++ b/R/plot.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/predict.vgam.q b/R/predict.vgam.q
index e0f94d0..7bbe8a7 100644
--- a/R/predict.vgam.q
+++ b/R/predict.vgam.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
index 8a882c5..2a13df6 100644
--- a/R/predict.vglm.q
+++ b/R/predict.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/predict.vlm.q b/R/predict.vlm.q
index 5e3524c..6759cd3 100644
--- a/R/predict.vlm.q
+++ b/R/predict.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/print.vglm.q b/R/print.vglm.q
index d50fb3b..daaab0f 100644
--- a/R/print.vglm.q
+++ b/R/print.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/print.vlm.q b/R/print.vlm.q
index 90b4bf1..c9c9a49 100644
--- a/R/print.vlm.q
+++ b/R/print.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/qrrvglm.control.q b/R/qrrvglm.control.q
index 9b9721c..14e86cd 100644
--- a/R/qrrvglm.control.q
+++ b/R/qrrvglm.control.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/qtplot.q b/R/qtplot.q
index 2ffb996..d7fac96 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
index a31772b..b5b1e90 100644
--- a/R/residuals.vlm.q
+++ b/R/residuals.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/rrvglm.R b/R/rrvglm.R
index 7983c04..70d2991 100644
--- a/R/rrvglm.R
+++ b/R/rrvglm.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/rrvglm.control.q b/R/rrvglm.control.q
index cc187c4..76c4be3 100644
--- a/R/rrvglm.control.q
+++ b/R/rrvglm.control.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
index 4939d70..2961d66 100644
--- a/R/rrvglm.fit.q
+++ b/R/rrvglm.fit.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -28,7 +28,7 @@ rrvglm.fit <-
     nonparametric <- FALSE
     epsilon <- control$epsilon
     maxit <- control$maxit
-    save.weight <- control$save.weight
+    save.weights <- control$save.weights
     trace <- control$trace
     orig.stepsize <- control$stepsize
     minimize.criterion <- control$min.criterion
@@ -597,7 +597,7 @@ rrvglm.fit <-
     if (M == 1) {
       wz <- as.vector(wz)  # Convert wz into a vector
     } # else
-    fit$weights <- if (save.weight) wz else NULL
+    fit$weights <- if (save.weights) wz else NULL
 
 
     misc <- list(
diff --git a/R/s.q b/R/s.q
index 6859086..403f2d3 100644
--- a/R/s.q
+++ b/R/s.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/s.vam.q b/R/s.vam.q
index 2e3eadd..b2e9db0 100644
--- a/R/s.vam.q
+++ b/R/s.vam.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/smart.R b/R/smart.R
index acda626..91249e0 100644
--- a/R/smart.R
+++ b/R/smart.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/step.vglm.q b/R/step.vglm.q
index 1b18d06..f5f3bea 100644
--- a/R/step.vglm.q
+++ b/R/step.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/summary.vgam.q b/R/summary.vgam.q
index 62e24fd..87ca084 100644
--- a/R/summary.vgam.q
+++ b/R/summary.vgam.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index e4cfee2..d1d1399 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/summary.vlm.q b/R/summary.vlm.q
index 69c2281..71a5b1b 100644
--- a/R/summary.vlm.q
+++ b/R/summary.vlm.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/vgam.R b/R/vgam.R
index 7c85ea3..142069b 100644
--- a/R/vgam.R
+++ b/R/vgam.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/vgam.control.q b/R/vgam.control.q
index e0703f1..7f198b1 100644
--- a/R/vgam.control.q
+++ b/R/vgam.control.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/vgam.fit.q b/R/vgam.fit.q
index 3d557c1..dae6af9 100644
--- a/R/vgam.fit.q
+++ b/R/vgam.fit.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -24,7 +24,7 @@ vgam.fit <-
   check.Rank <- TRUE  # Set this to false for family functions vppr() etc.
   epsilon <- control$epsilon
   maxit <- control$maxit
-  save.weight <- control$save.weight
+  save.weights <- control$save.weights
   trace <- control$trace
 
   bf.maxit <- control$bf.maxit
@@ -316,7 +316,7 @@ vgam.fit <-
     if (M == 1) {
       wz <- as.vector(wz)  # Convert wz into a vector
     } # else
-    fit$weights <- if (save.weight) wz else NULL
+    fit$weights <- if (save.weights) wz else NULL
 
 
 
diff --git a/R/vgam.match.q b/R/vgam.match.q
index 7d04320..9fc6ea7 100644
--- a/R/vgam.match.q
+++ b/R/vgam.match.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/vglm.R b/R/vglm.R
index ad01b9a..138e066 100644
--- a/R/vglm.R
+++ b/R/vglm.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/vglm.control.q b/R/vglm.control.q
index a143ce9..3f15876 100644
--- a/R/vglm.control.q
+++ b/R/vglm.control.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index 60dfe32..1b349dc 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -29,7 +29,7 @@ vglm.fit <-
   nonparametric <- FALSE
   epsilon <- control$epsilon
   maxit <- control$maxit
-  save.weight <- control$save.weight
+  save.weights <- control$save.weights
   trace <- control$trace
   orig.stepsize <- control$stepsize
   minimize.criterion <- control$min.criterion
@@ -419,7 +419,7 @@ vglm.fit <-
   if (M == 1) {
     wz <- as.vector(wz)  # Convert wz into a vector
   } # else
-  fit$weights <- if (save.weight) wz else NULL
+  fit$weights <- if (save.weights) wz else NULL
 
 
   misc <- list(
diff --git a/R/vlm.R b/R/vlm.R
index 9ad719a..ab6be50 100644
--- a/R/vlm.R
+++ b/R/vlm.R
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -181,7 +181,7 @@ vlm <- function(formula,
   }
   if (x.arg)
     slot(answer, "x") <- x # The 'small' design matrix
-  if (control$save.weight)
+  if (control$save.weights)
     slot(answer, "weights") <- wz
   if (length(xlev))
     slot(answer, "xlevels") <- xlev
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
index 23cbd5d..35aef02 100644
--- a/R/vlm.wfit.q
+++ b/R/vlm.wfit.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
index 3da89fd..6fc0e65 100644
--- a/R/vsmooth.spline.q
+++ b/R/vsmooth.spline.q
@@ -1,5 +1,5 @@
 # These functions are
-# Copyright (C) 1998-2014 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2015 T.W. Yee, University of Auckland.
 # All rights reserved.
 
 
@@ -108,10 +108,11 @@ setMethod("depvar",  "vsmooth.spline", function(object, ...)
 vsmooth.spline <-
   function(x, y, w = NULL, df = rep(5, M),
            spar = NULL,  #rep(0,M),
+           i.constraint = diag(M),
+           x.constraint = diag(M),
+           constraints = list("(Intercepts)" = i.constraint,
+                              x = x.constraint),
            all.knots = FALSE, 
-           iconstraint = diag(M),
-           xconstraint = diag(M),
-           constraints = list("(Intercepts)" = diag(M), x = diag(M)),
            var.arg = FALSE,
            scale.w = TRUE,
            nk = NULL,
@@ -198,8 +199,8 @@ vsmooth.spline <-
 
  
   if (missing.constraints) {
-    constraints <- list("(Intercepts)" = eval(iconstraint),
-                        "x"            = eval(xconstraint))
+    constraints <- list("(Intercepts)" = eval(i.constraint),
+                        "x"            = eval(x.constraint))
   }
   constraints <- eval(constraints)
   if (is.matrix(constraints)) {
@@ -253,7 +254,7 @@ vsmooth.spline <-
 
       lfit <- vlm(yinyin ~ 1 + x,  # xxx
                  constraints = constraints,
-                 save.weight = FALSE,
+                 save.weights = FALSE,
                  qr.arg = FALSE, x.arg = FALSE, y.arg = FALSE,
                  smart = FALSE,
                  weights = matrix(collaps$wzbar, neff, dim2wz))
@@ -314,7 +315,7 @@ vsmooth.spline <-
   nknots <- nk
   if (all.knots) {
     knot <- if (noround) {
-      valid.vknotl2(c(rep(xbar[1],3), xbar, rep(xbar[neff],3)))
+      valid.vknotl2(c(rep(xbar[1], 3), xbar, rep(xbar[neff], 3)))
     } else { 
       c(rep(xbar[1], 3), xbar, rep(xbar[neff], 3))
     }
diff --git a/build/vignette.rds b/build/vignette.rds
deleted file mode 100644
index e9fd57c..0000000
Binary files a/build/vignette.rds and /dev/null differ
diff --git a/data/ducklings.rda b/data/ducklings.rda
new file mode 100644
index 0000000..eb2ec67
Binary files /dev/null and b/data/ducklings.rda differ
diff --git a/inst/doc/categoricalVGAM.R b/inst/doc/categoricalVGAM.R
deleted file mode 100644
index badcc3c..0000000
--- a/inst/doc/categoricalVGAM.R
+++ /dev/null
@@ -1,278 +0,0 @@
-### R code from vignette source 'categoricalVGAM.Rnw'
-
-###################################################
-### code chunk number 1: categoricalVGAM.Rnw:84-90
-###################################################
-library("VGAM")
-library("VGAMdata")
-ps.options(pointsize = 12)
-options(width = 72, digits = 4)
-options(SweaveHooks = list(fig = function() par(las = 1)))
-options(prompt = "R> ", continue = "+")
-
-
-###################################################
-### code chunk number 2: pneumocat
-###################################################
-pneumo <- transform(pneumo, let = log(exposure.time))
-fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
-            cumulative(reverse = TRUE, parallel = TRUE), data = pneumo)
-
-
-###################################################
-### code chunk number 3: categoricalVGAM.Rnw:903-907
-###################################################
-journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
-squaremat <- matrix(c(NA, 33, 320, 284,   730, NA, 813, 276,
-                      498, 68, NA, 325,   221, 17, 142, NA), 4, 4)
-dimnames(squaremat) <- list(winner = journal, loser = journal)
-
-
-###################################################
-### code chunk number 4: categoricalVGAM.Rnw:1007-1011
-###################################################
-abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
-fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, data = abodat)
-coef(fit, matrix = TRUE)
-Coef(fit)  # Estimated pA and pB
-
-
-###################################################
-### code chunk number 5: categoricalVGAM.Rnw:1289-1291
-###################################################
-head(marital.nz, 4)
-summary(marital.nz)
-
-
-###################################################
-### code chunk number 6: categoricalVGAM.Rnw:1294-1296
-###################################################
-fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
-               data = marital.nz)
-
-
-###################################################
-### code chunk number 7: categoricalVGAM.Rnw:1300-1302
-###################################################
-head(depvar(fit.ms), 4)
-colSums(depvar(fit.ms))
-
-
-###################################################
-### code chunk number 8: categoricalVGAM.Rnw:1311-1323
-###################################################
-# Plot output
-mycol <- c("red", "darkgreen", "blue")
-par(mfrow = c(2, 2))
-plot(fit.ms, se = TRUE, scale = 12,
-         lcol = mycol, scol = mycol)
-
-# Plot output overlayed
-#par(mfrow=c(1,1))
-plot(fit.ms, se = TRUE, scale = 12,
-         overlay = TRUE,
-         llwd = 2,
-         lcol = mycol, scol = mycol)
-
-
-###################################################
-### code chunk number 9: categoricalVGAM.Rnw:1366-1379
-###################################################
-getOption("SweaveHooks")[["fig"]]()
-# Plot output
-mycol <- c("red", "darkgreen", "blue")
- par(mfrow = c(2, 2))
- par(mar = c(4.2, 4.0, 1.2, 2.2) + 0.1)
-plot(fit.ms, se = TRUE, scale = 12,
-         lcol = mycol, scol = mycol)
-
-# Plot output overlaid
-#par(mfrow = c(1, 1))
-plot(fit.ms, se = TRUE, scale = 12,
-         overlay = TRUE,
-         llwd = 2,
-         lcol = mycol, scol = mycol)
-
-
-###################################################
-### code chunk number 10: categoricalVGAM.Rnw:1399-1400
-###################################################
-plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
-
-
-###################################################
-### code chunk number 11: categoricalVGAM.Rnw:1409-1413
-###################################################
-getOption("SweaveHooks")[["fig"]]()
-# Plot output
- par(mfrow = c(1, 3))
- par(mar = c(4.5, 4.0, 0.2, 2.2) + 0.1)
-plot(fit.ms, deriv = 1, lcol = mycol, scale = 0.3)
-
-
-###################################################
-### code chunk number 12: categoricalVGAM.Rnw:1436-1448
-###################################################
-foo <- function(x, elbow = 50)
-  poly(pmin(x, elbow), 2)
-
-clist <- list("(Intercept)" = diag(3),
-             "poly(age, 2)" = rbind(1, 0, 0),
-             "foo(age)"     = rbind(0, 1, 0),
-             "age"          = rbind(0, 0, 1))
-fit2.ms <-
-    vglm(mstatus ~ poly(age, 2) + foo(age) + age,
-         family = multinomial(refLevel = 2),
-         constraints = clist,
-         data = marital.nz)
-
-
-###################################################
-### code chunk number 13: categoricalVGAM.Rnw:1451-1452
-###################################################
-coef(fit2.ms, matrix = TRUE)
-
-
-###################################################
-### code chunk number 14: categoricalVGAM.Rnw:1456-1463
-###################################################
-par(mfrow = c(2, 2))
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[1], scol = mycol[1], which.term = 1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[2], scol=mycol[2], which.term = 2)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[3], scol = mycol[3], which.term = 3)
-
-
-###################################################
-### code chunk number 15: categoricalVGAM.Rnw:1474-1483
-###################################################
-getOption("SweaveHooks")[["fig"]]()
-# Plot output
-par(mfrow=c(2,2))
- par(mar=c(4.5,4.0,1.2,2.2)+0.1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[1], scol = mycol[1], which.term = 1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[2], scol = mycol[2], which.term = 2)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[3], scol = mycol[3], which.term = 3)
-
-
-###################################################
-### code chunk number 16: categoricalVGAM.Rnw:1501-1502
-###################################################
-deviance(fit.ms) - deviance(fit2.ms)
-
-
-###################################################
-### code chunk number 17: categoricalVGAM.Rnw:1508-1509
-###################################################
-(dfdiff <- df.residual(fit2.ms) - df.residual(fit.ms))
-
-
-###################################################
-### code chunk number 18: categoricalVGAM.Rnw:1512-1513
-###################################################
-pchisq(deviance(fit.ms) - deviance(fit2.ms), df = dfdiff, lower.tail = FALSE)
-
-
-###################################################
-### code chunk number 19: categoricalVGAM.Rnw:1526-1537
-###################################################
-ooo <- with(marital.nz, order(age))
-with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo, ],
-     type = "l", las = 1, lwd = 2, ylim = 0:1,
-     ylab = "Fitted probabilities",
-     xlab = "Age",  # main="Marital status amongst NZ Male Europeans",
-     col = c(mycol[1], "black", mycol[-1])))
-legend(x = 52.5, y = 0.62,  # x="topright",
-       col = c(mycol[1], "black", mycol[-1]),
-       lty = 1:4,
-       legend = colnames(fit.ms at y), lwd = 2)
-abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed")
-
-
-###################################################
-### code chunk number 20: categoricalVGAM.Rnw:1552-1565
-###################################################
-getOption("SweaveHooks")[["fig"]]()
- par(mfrow = c(1,1))
- par(mar = c(4.5,4.0,0.2,0.2)+0.1)
-ooo <- with(marital.nz, order(age))
-with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
-     type = "l", las = 1, lwd = 2, ylim = 0:1,
-     ylab = "Fitted probabilities",
-     xlab = "Age",
-     col = c(mycol[1], "black", mycol[-1])))
-legend(x = 52.5, y = 0.62,
-       col = c(mycol[1], "black", mycol[-1]),
-       lty = 1:4,
-       legend = colnames(fit.ms at y), lwd = 2.1)
-abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed")
-
-
-###################################################
-### code chunk number 21: categoricalVGAM.Rnw:1599-1603
-###################################################
-# Scale the variables? Yes; the Anderson (1984) paper did (see his Table 6).
-head(backPain, 4)
-summary(backPain)
-backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale(x3))
-
-
-###################################################
-### code chunk number 22: categoricalVGAM.Rnw:1607-1608
-###################################################
-bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain)
-
-
-###################################################
-### code chunk number 23: categoricalVGAM.Rnw:1611-1612
-###################################################
-Coef(bp.rrmlm1)
-
-
-###################################################
-### code chunk number 24: categoricalVGAM.Rnw:1640-1641
-###################################################
-set.seed(123)
-
-
-###################################################
-### code chunk number 25: categoricalVGAM.Rnw:1644-1646
-###################################################
-bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain, Rank = 2,
-                   Corner = FALSE, Uncor = TRUE)
-
-
-###################################################
-### code chunk number 26: categoricalVGAM.Rnw:1654-1658
-###################################################
-biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
-#      xlim = c(-1, 6), ylim = c(-1.2, 4),  # Use this if not scaled
-       xlim = c(-4.5, 2.2), ylim = c(-2.2, 2.2),  # Use this if scaled
-       chull = TRUE, clty = 2, ccol = "blue")
-
-
-###################################################
-### code chunk number 27: categoricalVGAM.Rnw:1690-1698
-###################################################
-getOption("SweaveHooks")[["fig"]]()
-# Plot output
- par(mfrow=c(1,1))
- par(mar=c(4.5,4.0,0.2,2.2)+0.1)
-
-biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
-#      xlim = c(-1,6), ylim = c(-1.2,4),  # Use this if not scaled
-       xlim = c(-4.5,2.2), ylim = c(-2.2, 2.2),  # Use this if scaled
-       chull = TRUE, clty = 2, ccol = "blue")
-
-
-###################################################
-### code chunk number 28: categoricalVGAM.Rnw:1812-1813
-###################################################
-iam(NA, NA, M = 4, both = TRUE, diag = TRUE)
-
-
diff --git a/inst/doc/categoricalVGAM.Rnw b/inst/doc/categoricalVGAM.Rnw
deleted file mode 100644
index 8394144..0000000
--- a/inst/doc/categoricalVGAM.Rnw
+++ /dev/null
@@ -1,2325 +0,0 @@
-\documentclass[article,shortnames,nojss]{jss}
-\usepackage{thumbpdf}
-%% need no \usepackage{Sweave.sty}
-
-\SweaveOpts{engine=R,eps=FALSE}
-%\VignetteIndexEntry{The VGAM Package for Categorical Data Analysis}
-%\VignetteDepends{VGAM}
-%\VignetteKeywords{categorical data analysis, Fisher scoring, iteratively reweighted least squares, multinomial distribution, nominal and ordinal polytomous responses, smoothing, vector generalized linear and additive models, VGAM R package}
-%\VignettePackage{VGAM}
-
-%% new commands
-\newcommand{\sVLM}{\mbox{\scriptsize VLM}}
-\newcommand{\sformtwo}{\mbox{\scriptsize F2}}
-\newcommand{\pr}{\mbox{$P$}}
-\newcommand{\logit}{\mbox{\rm logit}}
-\newcommand{\bzero}{{\bf 0}}
-\newcommand{\bone}{{\bf 1}}
-\newcommand{\bid}{\mbox{\boldmath $d$}}
-\newcommand{\bie}{\mbox{\boldmath $e$}}
-\newcommand{\bif}{\mbox{\boldmath $f$}}
-\newcommand{\bix}{\mbox{\boldmath $x$}}
-\newcommand{\biy}{\mbox{\boldmath $y$}}
-\newcommand{\biz}{\mbox{\boldmath $z$}}
-\newcommand{\biY}{\mbox{\boldmath $Y$}}
-\newcommand{\bA}{\mbox{\rm \bf A}}
-\newcommand{\bB}{\mbox{\rm \bf B}}
-\newcommand{\bC}{\mbox{\rm \bf C}}
-\newcommand{\bH}{\mbox{\rm \bf H}}
-\newcommand{\bI}{\mbox{\rm \bf I}}
-\newcommand{\bX}{\mbox{\rm \bf X}}
-\newcommand{\bW}{\mbox{\rm \bf W}}
-\newcommand{\bY}{\mbox{\rm \bf Y}}
-\newcommand{\bbeta}{\mbox{\boldmath $\beta$}}
-\newcommand{\boldeta}{\mbox{\boldmath $\eta$}}
-\newcommand{\bmu}{\mbox{\boldmath $\mu$}}
-\newcommand{\bnu}{\mbox{\boldmath $\nu$}}
-\newcommand{\diag}{ \mbox{\rm diag} }
-\newcommand{\Var}{ \mbox{\rm Var} }
-\newcommand{\R}{{\textsf{R}}}
-\newcommand{\VGAM}{\pkg{VGAM}}
-
-
-\author{Thomas W. Yee\\University of Auckland}
-\Plainauthor{Thomas W. Yee}
-
-\title{The \pkg{VGAM} Package for Categorical Data Analysis}
-\Plaintitle{The VGAM Package for Categorical Data Analysis}
-
-\Abstract{
-  Classical categorical regression models such as the multinomial logit and
-  proportional odds models are shown to be readily handled by the  vector
-  generalized linear and additive model (VGLM/VGAM) framework. Additionally,
-  there are natural extensions, such as reduced-rank VGLMs for
-  dimension reduction, and allowing covariates that have values
-  specific to each linear/additive predictor,
-  e.g., for consumer choice modeling. This article describes some of the
-  framework behind the \pkg{VGAM} \R{} package, its usage and implementation
-  details.
-}
-\Keywords{categorical data analysis, Fisher scoring,
-  iteratively reweighted least squares,
-  multinomial distribution, nominal and ordinal polytomous responses,
-  smoothing, vector generalized linear and additive models,
-  \VGAM{} \R{} package}
-\Plainkeywords{categorical data analysis, Fisher scoring,
-  iteratively reweighted least squares, multinomial distribution,
-  nominal and ordinal polytomous responses, smoothing,
-  vector generalized linear and additive models, VGAM R package}
-
-\Address{
-  Thomas W. Yee \\
-  Department of Statistics \\
-  University of Auckland, Private Bag 92019 \\
-  Auckland Mail Centre \\
-  Auckland 1142, New Zealand \\
-  E-mail: \email{t.yee at auckland.ac.nz}\\
-  URL: \url{http://www.stat.auckland.ac.nz/~yee/}
-}
-
-
-\begin{document}
-
-
-<<echo=FALSE, results=hide>>=
-library("VGAM")
-library("VGAMdata")
-ps.options(pointsize = 12)
-options(width = 72, digits = 4)
-options(SweaveHooks = list(fig = function() par(las = 1)))
-options(prompt = "R> ", continue = "+")
-@
-
-
-% ----------------------------------------------------------------------
-\section{Introduction}
-\label{sec:jsscat.intoduction}
-
-
-This is a \pkg{VGAM} vignette for categorical data analysis (CDA)
-based on \cite{Yee:2010}.
-Any subsequent features (especially non-backward compatible ones)
-will appear here.
-
-The subject of CDA is concerned with
-analyses where the response is categorical regardless of whether
-the explanatory variables are continuous or categorical. It is a
-very frequent form of data. Over the years several CDA regression
-models for polytomous responses have become popular, e.g., those
-in Table \ref{tab:cat.quantities}. Not surprisingly, the models
-are interrelated: their foundation is the multinomial distribution
-and consequently they share similar and overlapping properties which
-modellers should know and exploit. Unfortunately, software has been
-slow to reflect their commonality and this makes analyses unnecessarily
-difficult for the practitioner on several fronts, e.g., using different
-functions/procedures to fit different models which does not aid the
-understanding of their connections.
-
-
-This historical misfortune can be seen by considering \R{} functions
-for CDA. From the Comprehensive \proglang{R} Archive Network
-(CRAN, \url{http://CRAN.R-project.org/}) there is \texttt{polr()}
-\citep[in \pkg{MASS};][]{Venables+Ripley:2002} for a proportional odds
-model and \texttt{multinom()}
-\citep[in \pkg{nnet};][]{Venables+Ripley:2002} for the multinomial
-logit model. However, both of these can be considered `one-off'
-modeling functions rather than providing a unified offering for CDA.
-The function \texttt{lrm()} \citep[in \pkg{rms};][]{Harrell:2009}
-has greater functionality: it can fit the proportional odds model
-(and the forward continuation ratio model upon preprocessing). Neither
-\texttt{polr()} or \texttt{lrm()} appear able to fit the nonproportional
-odds model. There are non-CRAN packages too, such as the modeling
-function \texttt{nordr()} \citep[in \pkg{gnlm};][]{gnlm:2007}, which can fit
-the proportional odds, continuation ratio and adjacent categories models;
-however it calls \texttt{nlm()} and the user must supply starting values.
-In general these \R{} \citep{R} modeling functions are not modular
-and often require preprocessing and sometimes are not self-starting.
-The implementations can be perceived as a smattering and piecemeal
-in nature. Consequently if the practitioner wishes to fit the models
-of Table \ref{tab:cat.quantities} then there is a need to master several
-modeling functions from several packages each having different syntaxes
-etc. This is a hindrance to efficient CDA.
-
-
- 
-\begin{table}[tt]
-\centering
-\begin{tabular}{|c|c|l|}
-\hline
-Quantity & Notation &
-%Range of $j$ &
-\VGAM{} family function \\
-\hline
-%
-$\pr(Y=j+1) / \pr(Y=j)$ &$\zeta_{j}$ &
-%$1,\ldots,M$ &
-\texttt{acat()} \\
-%
-$\pr(Y=j) / \pr(Y=j+1)$ &$\zeta_{j}^{R}$ &
-%$2,\ldots,M+1$ &
-\texttt{acat(reverse = TRUE)} \\
-%
-$\pr(Y>j|Y \geq j)$ &$\delta_{j}^*$ &
-%$1,\ldots,M$ & 
-\texttt{cratio()} \\
-%
-$\pr(Y<j|Y \leq j)$ &$\delta_{j}^{*R}$ &
-%$2,\ldots,M+1$ &
-\texttt{cratio(reverse = TRUE)} \\
-%
-$\pr(Y\leq j)$ &$\gamma_{j}$ &
-%$1,\ldots,M$ &
-\texttt{cumulative()} \\
-%
-$\pr(Y\geq j)$ &$\gamma_{j}^R$&
-%$2,\ldots,M+1$ &
-\texttt{cumulative(reverse = TRUE)} \\
-%
-$\log\{\pr(Y=j)/\pr(Y=M+1)\}$ & &
-%$1,\ldots,M$ &
-\texttt{multinomial()} \\
-%
-$\pr(Y=j|Y \geq j)$ &$\delta_{j}$ &
-%$1,\ldots,M$ &
-\texttt{sratio()} \\
-%
-$\pr(Y=j|Y \leq j)$ &$\delta_{j}^R$ &
-%$2,\ldots,M+1$ &
-\texttt{sratio(reverse = TRUE)} \\
-%
-\hline
-\end{tabular}
-\caption{
-Quantities defined in \VGAM{} for a
-categorical response $Y$ taking values $1,\ldots,M+1$.
-Covariates \bix{} have been omitted for clarity.
-The LHS quantities are $\eta_{j}$
-or $\eta_{j-1}$ for $j=1,\ldots,M$ (not reversed)
-and $j=2,\ldots,M+1$ (if reversed), respectively.
-All models are estimated by minimizing the deviance.
-All except for \texttt{multinomial()} are suited to ordinal $Y$.
-\label{tab:cat.quantities}
-}
-\end{table}
- 
-
-
-
-\proglang{SAS} \citep{SAS} does not fare much better than \R. Indeed,
-it could be considered as having an \textit{excess} of options which
-bewilders the non-expert user; there is little coherent overriding
-structure. Its \code{proc logistic} handles the multinomial logit
-and proportional odds models, as well as exact logistic regression
-\citep[see][which is for Version 8 of \proglang{SAS}]{stok:davi:koch:2000}.
-The fact that the proportional odds model may be fitted by \code{proc
-logistic}, \code{proc genmod} and \code{proc probit} arguably leads
-to possible confusion rather than the making of connections, e.g.,
-\code{genmod} is primarily for GLMs and the proportional odds model is not
-a GLM in the classical \cite{neld:wedd:1972} sense. Also, \code{proc
-phreg} fits the multinomial logit model, and \code{proc catmod} with
-its WLS implementation adds to further potential confusion.
-
-
-This article attempts to show how these deficiencies can be addressed
-by considering the vector generalized linear and additive model
-(VGLM/VGAM) framework, as implemented by the author's \pkg{VGAM}
-package for \R{}. The main purpose of this paper is to demonstrate
-how the framework is very well suited to many `classical' regression
-models for categorical responses, and to describe the implementation and
-usage of \pkg{VGAM} for such. To this end an outline of this article
-is as follows. Section \ref{sec:jsscat.VGLMVGAMoverview} summarizes
-the basic VGLM/VGAM framework. Section \ref{sec:jsscat.vgamff}
-centers on functions for CDA in \VGAM. Given an adequate framework,
-some natural extensions of Section \ref{sec:jsscat.VGLMVGAMoverview} are
-described in Section \ref{sec:jsscat.othermodels}. Users of \pkg{VGAM}
-can benefit from Section \ref{sec:jsscat.userTopics} which shows how
-the software reflects their common theory. Some examples are given in
-Section \ref{sec:jsscat.eg}. Section \ref{sec:jsscat.implementDetails}
-contains selected topics in statistial computing that are
-more relevant to programmers interested in the underlying code.
-Section \ref{sec:jsscat.extnUtil} discusses several utilities and
-extensions needed for advanced CDA modeling, and the article concludes
-with a discussion. This document was run using \pkg{VGAM} 0.7-10
-\citep{yee:VGAM:2010} under \R 2.10.0.
-
-
-Some general references for categorical data providing
-background to this article include
-\cite{agre:2010},
-\cite{agre:2013},
-\cite{fahr:tutz:2001},
-\cite{leon:2000},
-\cite{lloy:1999},
-\cite{long:1997},
-\cite{mccu:neld:1989},
-\cite{simo:2003},
-\citet{smit:merk:2013} and
-\cite{tutz:2012}.
-An overview of models for ordinal responses is \cite{liu:agre:2005},
-and a manual for fitting common models found in \cite{agre:2002}
-to polytomous responses with various software is \cite{thom:2009}.
-A package for visualizing categorical data in \R{} is \pkg{vcd}
-\citep{Meyer+Zeileis+Hornik:2006,Meyer+Zeileis+Hornik:2009}.
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{VGLM/VGAM overview}
-\label{sec:jsscat.VGLMVGAMoverview}
-
-
-This section summarizes the VGLM/VGAM framework with a particular emphasis
-toward categorical models since the classes encapsulates many multivariate
-response models in, e.g., survival analysis, extreme value analysis,
-quantile and expectile regression, time series, bioassay data, nonlinear
-least-squares models, and scores of standard and nonstandard univariate
-and continuous distributions. The framework is partially summarized by
-Table \ref{tab:rrvglam.jss.subset}. More general details about VGLMs
-and VGAMs can be found in \cite{yee:hast:2003} and \cite{yee:wild:1996}
-respectively. An informal and practical article connecting the general
-framework with the software is \cite{Rnews:Yee:2008}.
-
-
-
-\subsection{VGLMs}
-\label{sec:wffc.appendixa.vglms}
-
-Suppose the observed response \biy{} is a $q$-dimensional vector.
-VGLMs are defined as a model for which the conditional distribution
-of $\biY$ given explanatory $\bix$ is of the form
-\begin{eqnarray}
-f(\biy | \bix ; \bB, \phi)  =  h(\biy, \eta_1,\ldots, \eta_M, \phi)
-\label{gammod}
-\end{eqnarray}
-for some known function $h(\cdot)$, where $\bB = (\bbeta_1 \,
-\bbeta_2 \, \cdots \, \bbeta_M)$ is a $p \times M$ matrix of
-unknown regression coefficients,
-and the $j$th linear predictor is
-\begin{equation}
-\eta_j  =  \eta_j(\bix)  =  \bbeta_j^{\top} \bix  = 
-\sum_{k=1}^p \beta_{(j)k} \, x_k ,  \qquad j=1,\ldots,M.
-\label{gammod2}
-\end{equation}
-Here $\bix=(x_1,\ldots,x_p)^{\top}$ with $x_1 = 1$ if there is an intercept.
-Note that (\ref{gammod2}) means that \textit{all} the parameters may be
-potentially modelled as functions of \bix. It can be seen that VGLMs are
-like GLMs but allow for multiple linear predictors, and they encompass
-models outside the small confines of the exponential family.
-In (\ref{gammod}) the quantity $\phi$ is an optional scaling parameter
-which is included for backward compatibility with common adjustments
-to overdispersion, e.g., with respect to GLMs.
-
-
-In general there is no relationship between $q$ and $M$: it
-depends specifically on the model or distribution to be fitted.
-However, for the `classical' categorical regression models of
-Table \ref{tab:cat.quantities} we have $M=q-1$ since $q$ is the number
-of levels the multi-category response $Y$ has.
-
-
-
-
-
-The $\eta_j$ of VGLMs may be applied directly to parameters of a
-distribution rather than just to a mean for GLMs. A simple example is
-a univariate distribution with a location parameter $\xi$ and a scale
-parameter $\sigma > 0$, where we may take $\eta_1 = \xi$ and $\eta_2 =
-\log\,\sigma$. In general, $\eta_{j}=g_{j}(\theta_{j})$ for some parameter
-link function $g_{j}$ and parameter $\theta_{j}$.
-For example, the adjacent categories models in
-Table \ref{tab:cat.quantities} are ratios of two probabilities, therefore
-a log link of $\zeta_{j}^{R}$ or $\zeta_{j}$ is the default.
-In \VGAM{}, there are currently over a dozen links to choose from, of
-which any can be assigned to any parameter, ensuring maximum flexibility.
-Table \ref{tab:jsscat.links} lists some of them.
-
-
-
-\begin{table}[tt]
-\centering
-%\  ~~~ \par
-\begin{tabular}{|l|l|l|l|}
-\hline
-\qquad \qquad $\boldeta$ & 
-Model & Modeling & Reference \\
- & & function & \\
-%-------------------------------------------------------------
-\hline
-\hline
-%-------------------------------------------------------------
- &&&\\[-1.1ex]
-$\bB_1^{\top} \bix_{1} + \bB_2^{\top} \bix_{2}\ ( = \bB^{\top} \bix)$ &
-VGLM & \texttt{vglm()}
-&
-\cite{yee:hast:2003} \\[1.6ex]
-%Yee \& Hastie (2003) \\[1.6ex]
-%-------------------------------------------------------------
-\hline
- &&&\\[-1.1ex]
-$\bB_1^{\top} \bix_{1} +
- \sum\limits_{k=p_1+1}^{p_1+p_2} \bH_k \, \bif_{k}^{*}(x_k)$ &
-%\sum\limits_{k=1}^{p_2} \bH_k \, \bif_k(x_k)$ &
-VGAM & \texttt{vgam()}
-&
-\cite{yee:wild:1996} \\[2.2ex]
-%Yee \& Wild (1996) \\[2.2ex]
-%-------------------------------------------------------------
-\hline
- &&&\\[-1.1ex]
-$\bB_1^{\top} \bix_{1} + \bA \, \bnu$ &
-RR-VGLM & \texttt{rrvglm()}
-&
-\cite{yee:hast:2003} \\[1.8ex]
-%Yee \& Hastie (2003) \\[1.8ex]
-%-------------------------------------------------------------
-\hline
- &&&\\[-1.1ex]
-See \cite{yee:hast:2003} &
-Goodman's RC & \texttt{grc()}
-&
-%\cite{yee:hast:2003} \\[1.8ex]
-\cite{good:1981} \\[1.8ex]
-%-------------------------------------------------------------
-\hline
-\end{tabular}
-\caption{
-Some of 
-the package \VGAM{} and
-its framework.
-The vector of latent variables $\bnu = \bC^{\top} \bix_2$
-where
-$\bix^{\top} = (\bix_1^{\top}, \bix_2^{\top})$.
-\label{tab:rrvglam.jss.subset}
-}
-%\medskip
-\end{table}
-
-
-
-
-
-
-VGLMs are estimated using iteratively reweighted least squares (IRLS) 
-which is particularly suitable for categorical models
-\citep{gree:1984}.
-All models in this article have a log-likelihood
-\begin{equation}
-\ell  =  \sum_{i=1}^n \, w_i \, \ell_i
-\label{eq:log-likelihood.VGAM}
-\end{equation}
-where the $w_i$ are known positive prior weights.
-Let $\bix_i$ denote the explanatory vector for the $i$th observation,
-for $i=1,\dots,n$.
-Then one can write
-\begin{eqnarray}
-\boldeta_i &=& \boldeta(\bix_i)  = 
-\left(
-\begin{array}{c}
-\eta_1(\bix_i) \\
-\vdots \\
-\eta_M(\bix_i)
-\end{array} \right)  = 
-\bB^{\top} \bix_i  =  
-\left(
-\begin{array}{c}
-\bbeta_1^{\top} \bix_i \\
-\vdots \\
-\bbeta_M^{\top} \bix_i
-\end{array} \right)
-\nonumber
-\\
-&=& 
-\left(
-\begin{array}{cccc}
-\beta_{(1)1} & \cdots & \beta_{(1)p} \\
-\vdots \\
-\beta_{(M)1} & \cdots & \beta_{(M)p} \\
-\end{array} \right)
-\bix_i  = 
-\left(
-\bbeta_{(1)} \; \cdots \; \bbeta_{(p)}
-\right)
-\bix_i .
-\label{eq:lin.pred}
-\end{eqnarray}
-In IRLS,
-an adjusted dependent vector $\biz_i = \boldeta_i + \bW_i^{-1} \bid_i$
-is regressed upon a large (VLM) model matrix, with
-$\bid_i = w_i \, \partial \ell_i / \partial \boldeta_i$.
-The working weights $\bW_i$ here are 
-$w_i \Var(\partial \ell_i / \partial \boldeta_i)$
-(which, under regularity conditions, is equal to
-$-w_i \, E[ \partial^2 \ell_i / (\partial \boldeta_i \,
-\partial \boldeta_i^{\top})]$),
-giving rise to the Fisher scoring algorithm.
-
-
-Let $\bX=(\bix_1,\ldots,\bix_n)^{\top}$ be the usual $n \times p$
-(LM) model matrix
-obtained from the \texttt{formula} argument of \texttt{vglm()}.
-Given $\biz_i$, $\bW_i$ and $\bX{}$ at the current IRLS iteration,
-a weighted multivariate regression is performed.
-To do this, a \textit{vector linear model} (VLM) model matrix 
-$\bX_{\sVLM}$ is formed from $\bX{}$ and $\bH_k$
-(see Section \ref{sec:wffc.appendixa.vgams}).
-This is has $nM$ rows, and if there are no constraints then $Mp$ columns.
-Then $\left(\biz_1^{\top},\ldots,\biz_n^{\top}\right)^{\top}$ is regressed
-upon $\bX_{\sVLM}$
-with variance-covariance matrix $\diag(\bW_1^{-1},\ldots,\bW_n^{-1})$.
-This system of linear equations is converted to one large
-WLS fit by premultiplication of the output of
-a Cholesky decomposition of the $\bW_i$.
-
-
-Fisher scoring usually has good numerical stability
-because the $\bW_i$ are positive-definite over a larger
-region of parameter space than Newton-Raphson. 
-For the categorical models in this article the expected
-information matrices are simpler than the observed
-information matrices, and are easily derived,
-therefore all the families in Table \ref{tab:cat.quantities}
-implement Fisher scoring.
-
-
-
-\subsection{VGAMs and constraint matrices}
-\label{sec:wffc.appendixa.vgams}
-
-
-VGAMs provide additive-model extensions to VGLMs, that is,
-(\ref{gammod2}) is generalized to
-\begin{equation}
-\eta_j(\bix)  =  \beta_{(j)1} +
-\sum_{k=2}^p \; f_{(j)k}(x_k), \qquad j = 1,\ldots, M,
-\label{addmod}
-\end{equation}
-a sum of smooth functions of the individual covariates, just as
-with ordinary GAMs \citep{hast:tibs:1990}. The $\bif_k =
-(f_{(1)k}(x_k),\ldots,f_{(M)k}(x_k))^{\top}$ are centered for uniqueness,
-and are estimated simultaneously using \textit{vector smoothers}.
-VGAMs are thus a visual data-driven method that is well suited to
-exploring data, and they retain the simplicity of interpretation that
-GAMs possess.
-
-
-
-An important concept, especially for CDA, is the idea of
-`constraints-on-the functions'.
-In practice we often wish to constrain the effect of a covariate to
-be the same for some of the $\eta_j$ and to have no effect for others.
-We shall see below that this constraints idea is important
-for several categorical models because of a popular parallelism assumption.
-As a specific example, for VGAMs we may wish to take
-\begin{eqnarray*}
-\eta_1 & = & \beta_{(1)1} + f_{(1)2}(x_2) + f_{(1)3}(x_3), \\
-\eta_2 & = & \beta_{(2)1} + f_{(1)2}(x_2),
-\end{eqnarray*}
-so that $f_{(1)2} \equiv f_{(2)2}$ and $f_{(2)3} \equiv 0$.
-For VGAMs, we can represent these models using
-\begin{eqnarray}
-\boldeta(\bix) & = & \bbeta_{(1)} + \sum_{k=2}^p \, \bif_k(x_k)
-\ =\ \bH_1 \, \bbeta_{(1)}^* + \sum_{k=2}^p \, \bH_k \, \bif_k^*(x_k)
-\label{eqn:constraints.VGAM}
-\end{eqnarray}
-where $\bH_1,\bH_2,\ldots,\bH_p$ are known full-column rank
-\textit{constraint matrices}, $\bif_k^*$ is a vector containing a
-possibly reduced set of component functions and $\bbeta_{(1)}^*$ is a
-vector of unknown intercepts. With no constraints at all, $\bH_1 =
-\bH_2 = \cdots = \bH_p = \bI_M$ and $\bbeta_{(1)}^* = \bbeta_{(1)}$.
-Like the $\bif_k$, the $\bif_k^*$ are centered for uniqueness.
-For VGLMs, the $\bif_k$ are linear so that
-\begin{eqnarray}
-{\bB}^{\top} &=&
-\left(
-\bH_1 \bbeta_{(1)}^*
- \;
-\Bigg|
- \;
-\bH_2 \bbeta_{(2)}^*
- \;
-\Bigg|
- \;
-\cdots
- \;
-\Bigg|
- \;
-\bH_p \bbeta_{(p)}^*
-\right) 
-\label{eqn:lin.coefs4}
-\end{eqnarray}
-for some vectors
-$\bbeta_{(1)}^*,\ldots,\bbeta_{(p)}^*$.
-
-
-The
-$\bX_{\sVLM}$ matrix is constructed from \bX{} and the $\bH_k$ using
-Kronecker product operations.
-For example, with trivial constraints,
-$\bX_{\sVLM} = \bX \otimes \bI_M$.
-More generally,
-\begin{eqnarray}
-\bX_{\sVLM} &=& 
-\left(
-\left( \bX \, \bie_{1} \right) \otimes \bH_1
- \;
-\Bigg|
- \;
-\left( \bX \, \bie_{2} \right) \otimes \bH_2
- \;
-\Bigg|
- \;
-\cdots
- \;
-\Bigg|
- \;
-\left( \bX \, \bie_{p} \right) \otimes \bH_p
-\right)
-\label{eqn:X_vlm_Hk}
-\end{eqnarray}
-($\bie_{k}$ is a vector of zeros except for a one in the $k$th position)
-so that 
-$\bX_{\sVLM}$ is $(nM) \times p^*$ where
-$p^* = \sum_{k=1}^{p} \mbox{\textrm{ncol}}(\bH_k)$ is the total number
-of columns of all the constraint matrices.
-Note that $\bX_{\sVLM}$ and \bX{} can be obtained by
-\texttt{model.matrix(vglmObject, type = "vlm")}
-and
-\texttt{model.matrix(vglmObject, type = "lm")}
-respectively.
-Equation \ref{eqn:lin.coefs4} focusses on the rows of \bB{} whereas
-\ref{eq:lin.pred} is on the columns.
-
-
-VGAMs are estimated by applying a modified vector backfitting algorithm
-\citep[cf.][]{buja:hast:tibs:1989} to the $\biz_i$.
-
-
-
-\subsection{Vector splines and penalized likelihood}
-\label{sec:ex.vspline}
-
-If (\ref{eqn:constraints.VGAM}) is estimated using a vector spline (a
-natural extension of the cubic smoothing spline to vector responses)
-then it can be shown that the resulting solution maximizes a penalized
-likelihood; some details are sketched in \cite{yee:step:2007}. In fact,
-knot selection for vector spline follows the same idea as O-splines
-\citep[see][]{wand:orme:2008} in order to lower the computational cost.
-
-
-The usage of \texttt{vgam()} with smoothing is very similar
-to \texttt{gam()} \citep{gam:pack:2009}, e.g.,
-to fit a nonparametric proportional odds model
-\citep[cf. p.179 of][]{mccu:neld:1989}
-to the pneumoconiosis data one could try
-<<label = pneumocat, eval=T>>=
-pneumo <- transform(pneumo, let = log(exposure.time))
-fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
-            cumulative(reverse = TRUE, parallel = TRUE), data = pneumo)
-@
-Here, setting \texttt{df = 1} means a linear fit so that
-\texttt{df = 2} affords a little nonlinearity.
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section[VGAM family functions]{\pkg{VGAM} family functions}
-\label{sec:jsscat.vgamff}
-
-
-
-This section summarizes and comments on the \VGAM{} family functions
-of Table \ref{tab:cat.quantities} for a categorical response variable
-taking values $Y=1,2,\ldots,M+1$. In its most basic invokation, the usage
-entails a trivial change compared to \texttt{glm()}: use \texttt{vglm()}
-instead and assign the \texttt{family} argument a \VGAM{} family function.
-The use of a \VGAM{} family function to fit a specific model is far
-simpler than having a different modeling function for each model.
-Options specific to that model appear as arguments of that \VGAM{} family
-function.
-
-
-
-
-
-While writing \texttt{cratio()} it was found that various authors defined
-the quantity ``continuation ratio'' differently, therefore it became
-necessary to define a ``stopping ratio''. Table \ref{tab:cat.quantities}
-defines these quantities for \VGAM{}.
-
-
-
-
-The multinomial logit model is usually described by choosing the first or
-last level of the factor to be baseline. \VGAM{} chooses the last level
-(Table \ref{tab:cat.quantities}) by default, however that can be changed
-to any other level by use of the \texttt{refLevel} argument.
-
-
-
-
-If the proportional odds assumption is inadequate then one strategy is
-to try use a different link function (see Section \ref{sec:jsscat.links}
-for a selection). Another alternative is to add extra terms such as
-interaction terms into the linear predictor
-\citep[available in the \proglang{S} language;][]{cham:hast:1993}.
-Another is to fit the so-called \textit{partial}
-proportional odds model \citep{pete:harr:1990}
-which \VGAM{} can fit via constraint matrices.
-
-
-
-In the terminology of \cite{agre:2002},
-\texttt{cumulative()} fits the class of \textit{cumulative link models},
-e.g.,
-\texttt{cumulative(link = probit)} is a cumulative probit model.
-For \texttt{cumulative()}
-it was difficult to decide whether
-\texttt{parallel = TRUE}
-or
-\texttt{parallel = FALSE}
-should be the default.
-In fact, the latter is (for now?).
-Users need to set
-\texttt{cumulative(parallel = TRUE)} explicitly to
-fit a proportional odds model---hopefully this will alert
-them to the fact that they are making
-the proportional odds assumption and
-check its validity (\cite{pete:1990}; e.g., through a deviance or
-likelihood ratio test). However the default means numerical problems
-can occur with far greater likelihood.
-Thus there is tension between the two options.
-As a compromise there is now a \VGAM{} family function
-called \texttt{propodds(reverse = TRUE)} which is equivalent to
-\texttt{cumulative(parallel = TRUE, reverse = reverse, link = "logit")}.
-
-
-
-By the way, note that arguments such as 
-\texttt{parallel}
-can handle a slightly more complex syntax.
-A call such as
-\code{parallel = TRUE ~ x2 + x5 - 1} means the parallelism assumption
-is only applied to $X_2$ and $X_5$.
-This might be equivalent to something like
-\code{parallel = FALSE ~ x3 + x4}, i.e., to the remaining
-explanatory variables.
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Other models}
-\label{sec:jsscat.othermodels}
-
-
-Given the VGLM/VGAM framework of Section \ref{sec:jsscat.VGLMVGAMoverview}
-it is found that natural extensions are readily proposed in several
-directions. This section describes some such extensions.
-
-
-
-
-\subsection{Reduced-rank VGLMs}
-\label{sec:jsscat.RRVGLMs}
-
-
-Consider a multinomial logit model where $p$ and $M$ are both large.
-A (not-too-convincing) example might be the data frame \texttt{vowel.test}
-in the package \pkg{ElemStatLearn} \citep[see][]{hast:tibs:buja:1994}.
-The vowel recognition data set involves $q=11$ symbols produced from
-8 speakers with 6 replications of each. The training data comprises
-$10$ input features (not including the intercept) based on digitized
-utterances. A multinomial logit model fitted to these data would
-have $\widehat{\bB}$ comprising of $p \times (q-1) = 110$ regression
-coefficients for $n=8\times 6\times 11 = 528$ observations. The ratio
-of $n$ to the number of parameters is small, and it would be good to
-introduce some parsimony into the model.
-
-
-
-A simple and elegant solution is to represent $\widehat{\bB}$ by
-its reduced-rank approximation. To do this, partition $\bix$ into
-$(\bix_1^{\top}, \bix_2^{\top})^{\top}$ and $\bB = (\bB_1^{\top} \;
-\bB_2^{\top})^{\top}$ so that the reduced-rank regression is applied
-to $\bix_2$. In general, \bB{} is a dense matrix of full rank, i.e., rank
-$=\min(M,p)$, and since there are $M \times p$ regression coefficients
-to estimate this is `too' large for some models and/or data sets.
-If we approximate $\bB_2$ by a reduced-rank regression \begin{equation}
-\label{eq:rrr.BAC} \bB_2  =  \bC{} \, \bA^{\top} \end{equation} and if
-the rank $R$ is kept low then this can cut down the number of regression
-coefficients dramatically. If $R=2$ then the results may be biplotted
-(\texttt{biplot()} in \VGAM{}). Here, \bC{} and \bA{} are $p_2 \times R$
-and $M \times R$ respectively, and usually they are `thin'.
-
-
-More generally, the class of \textit{reduced-rank VGLMs} (RR-VGLMs)
-is simply a VGLM where $\bB_2$ is expressed as a product of two thin
-estimated matrices (Table \ref{tab:rrvglam.jss.subset}). Indeed,
-\cite{yee:hast:2003} show that RR-VGLMs are VGLMs with constraint
-matrices that are unknown and estimated. Computationally, this is
-done using an alternating method: in (\ref{eq:rrr.BAC}) estimate \bA{}
-given the current estimate of \bC{}, and then estimate \bC{} given the
-current estimate of \bA{}. This alternating algorithm is repeated until
-convergence within each IRLS iteration.
-
-
-Incidentally, special cases of RR-VGLMs have appeared in the
-literature. For example, a RR-multinomial logit model, is known as the
-\textit{stereotype} model \citep{ande:1984}. Another is \cite{good:1981}'s
-RC model (see Section \ref{sec:jsscat.rrr.goodman}) which is reduced-rank
-multivariate Poisson model. Note that the parallelism assumption of the
-proportional odds model \citep{mccu:neld:1989} can be thought of as a
-type of reduced-rank regression where the constraint matrices are thin
-($\bone_M$, actually) and known.
-
-
-
-The modeling function \texttt{rrvglm()} should work with any \VGAM{}
-family function compatible with \texttt{vglm()}. Of course, its
-applicability should be restricted to models where a reduced-rank
-regression of $\bB_2$ makes sense.
-
-
-
-
-
-
-
-
-
-\subsection[Goodman's R x C association model]{Goodman's $R \times C$ association model}
-\label{sec:jsscat.rrr.goodman}
-
-
-
-
-
-Let $\bY = [(y_{ij})]$ be a $n \times M$ matrix of counts.
-Section 4.2 of \cite{yee:hast:2003} shows that Goodman's RC$(R)$ association
-model \citep{good:1981} fits within the VGLM framework by setting up
-the appropriate indicator variables, structural zeros and constraint
-matrices. Goodman's model fits a reduced-rank type model to \bY{}
-by firstly assuming that $Y_{ij}$ has a Poisson distribution, and that
-\begin{eqnarray}
-\log \, \mu_{ij} &=& \mu + \alpha_{i} + \gamma_{j} + 
-\sum_{k=1}^R a_{ik} \, c_{jk} , 
-\ \ \ i=1,\ldots,n;\ \ j=1,\ldots,M,
-\label{eqn:goodmanrc}
-\end{eqnarray}
-where $\mu_{ij} = E(Y_{ij})$ is the mean of the $i$-$j$ cell, and the
-rank $R$ satisfies $R < \min(n,M)$.
-
-
-The modeling function \texttt{grc()} should work on any two-way
-table \bY{} of counts generated by (\ref{eqn:goodmanrc}) provided
-the number of 0's is not too large. Its usage is quite simple, e.g.,
-\texttt{grc(Ymatrix, Rank = 2)} fits a rank-2 model to a matrix of counts.
-By default a \texttt{Rank = 1} model is fitted.
-
-
-
-
-\subsection{Bradley-Terry models}
-\label{sec:jsscat.brat}
-
-Consider
-an experiment consists of $n_{ij}$ judges who compare
-pairs of items $T_i$, $i=1,\ldots,M+1$.
-They express their preferences between $T_i$ and $T_j$. 
-Let $N=\sum \sum_{i<j} n_{ij}$ be the total number of pairwise
-comparisons, and assume independence for ratings of the same pair
-by different judges and for ratings of different pairs by the same judge.
-Let $\pi_i$ be the \textit{worth} of item $T_i$,
-\[
-\pr(T_i > T_j)  =  p_{i/ij}  =  \frac{\pi_i}{\pi_i + \pi_j},
-\  \qquad i \neq {j},
-\]
-where ``$T_i>T_j$'' means $i$ is preferred over $j$.
-Suppose that $\pi_i > 0$.
-Let $Y_{ij}$ be the number of times that $T_i$ is preferred
-over $T_j$ in the $n_{ij}$ comparisons of the pairs.
-Then $Y_{ij} \sim {\rm Bin}(n_{ij},p_{i/ij})$.
-This is a Bradley-Terry model (without ties),
-and the \VGAM{} family function is \texttt{brat()}.
-
-
-Maximum likelihood estimation of the parameters $\pi_1,\ldots,\pi_{M+1}$
-involves maximizing
-\[
-\prod_{i<j}^{M+1}
-\left(
-\begin{array}{c}
-n_{ij} \\
-y_{ij}
-\end{array} \right)
-\left(
-\frac{\pi_i}{\pi_i + \pi_j}
-\right)^{y_{ij}}
-\left(
-\frac{\pi_j}{\pi_i + \pi_j}
-\right)^{n_{ij}-y_{ij}} .
-\]
-By default, $\pi_{M+1} \equiv 1$ is used for identifiability,
-however, this can be changed very easily.
-Note that one can define 
-linear predictors $\eta_{ij}$ of the form
-\begin{equation}
-\label{eq:bradter.logit}
-\logit 
-\left(
-\frac{\pi_i}{\pi_i + \pi_j}
-\right)  =  \log 
-\left(
-\frac{\pi_i}{\pi_j}
-\right)  =  \lambda_i - \lambda_j .
-\end{equation}
-The VGAM{} framework can handle the Bradley-Terry model only for
-intercept-only models; it has
-\begin{equation}
-\label{eq:bradter}
-\lambda_j  =  \eta_j  =  \log\, \pi_j = \beta_{(1)j},
-\ \ \ \ j=1,\ldots,M.
-\end{equation}
-
-
-As well as having many applications in the field of preferences,
-the Bradley-Terry model has many uses in modeling `contests' between
-teams $i$ and $j$, where only one of the teams can win in each
-contest (ties are not allowed under the classical model).
-The {packaging} function \texttt{Brat()} can be used to
-convert a square matrix into one that has more columns, to
-serve as input to \texttt{vglm()}.
-For example,
-for journal citation data where a citation of article B
-by article A is a win for article B and a loss for article A.
-On a specific data set,
-<<>>=
-journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
-squaremat <- matrix(c(NA, 33, 320, 284,   730, NA, 813, 276,
-                      498, 68, NA, 325,   221, 17, 142, NA), 4, 4)
-dimnames(squaremat) <- list(winner = journal, loser = journal)
-@
-then \texttt{Brat(squaremat)} returns a $1 \times 12$ matrix.
-
-
-
-
-
-
-
-\subsubsection{Bradley-Terry model with ties}
-\label{sec:cat.bratt}
-
-
-The \VGAM{} family function \texttt{bratt()} implements
-a Bradley-Terry model with ties (no preference), e.g.,
-where both $T_i$ and $T_j$ are equally good or bad.
-Here we assume
-\begin{eqnarray*}
- \pr(T_i > T_j) &=& \frac{\pi_i}{\pi_i + \pi_j + \pi_0},
-\ \qquad
- \pr(T_i = T_j)  =  \frac{\pi_0}{\pi_i + \pi_j + \pi_0},
-\end{eqnarray*}
-with $\pi_0 > 0$ as an extra parameter.
-It has 
-\[
-\boldeta=(\log \pi_1,\ldots, \log \pi_{M-1}, \log \pi_{0})^{\top}
-\]
-by default, where there are $M$ competitors and $\pi_M \equiv 1$.
-Like \texttt{brat()}, one can choose a different reference group
-and reference value.
-
-
-Other \R{} packages for the Bradley-Terry model
-include \pkg{BradleyTerry2}
-by H. Turner and D. Firth
-\citep[with and without ties;][]{firth:2005,firth:2008}
-and \pkg{prefmod} \citep{Hatzinger:2009}.
-
-
-
-
-\begin{table}[tt]
-\centering
-\begin{tabular}[small]{|l|c|}
-\hline
-\pkg{VGAM} family function & Independent parameters \\
-\hline
-\texttt{ABO()} & $p, q$ \\
-\texttt{MNSs()} & $m_S, m_s, n_S$ \\
-\texttt{AB.Ab.aB.ab()} & $p$ \\
-\texttt{AB.Ab.aB.ab2()} & $p$ \\
-\texttt{AA.Aa.aa()} & $p_A$ \\
-\texttt{G1G2G3()} & $p_1, p_2, f$ \\
-\hline
-\end{tabular}
-\caption{Some genetic models currently implemented
-and their unique parameters.
-\label{tab:gen.all}
-}
-\end{table}
-
-
-
-
-
-\subsection{Genetic models}
-\label{sec:jsscat.genetic}
-
-
-There are quite a number of population genetic models based on the
-multinomial distribution,
-e.g., \cite{weir:1996}, \cite{lang:2002}.
-Table \ref{tab:gen.all} lists some \pkg{VGAM} family functions for such.
-
-
-
-
-For example the ABO blood group system
-has two independent parameters $p$ and $q$, say.
-Here,
-the blood groups A, B and O form six possible combinations (genotypes)
-consisting of AA, AO, BB, BO, AB, OO
-(see Table \ref{tab:ABO}). A and B are dominant over
-bloodtype O. Let $p$, $q$ and $r$ be the probabilities
-for A, B and O respectively (so that
-$p+q+r=1$) for a given population. 
-The log-likelihood function is 
-\[
-\ell(p,q) \;=\; n_A\, \log(p^2 + 2pr) + n_B\, \log(q^2 + 2qr) + n_{AB}\,
-\log(2pq) + 2 n_O\, \log(1-p-q),
-\]
-where $r = 1 - p -q$, $p \in (\,0,1\,)$,
-$q \in (\,0,1\,)$, $p+q<1$.
-We let $\boldeta = (g(p), g(r))^{\top}$ where $g$ is the link function.
-Any $g$ from Table \ref{tab:jsscat.links} appropriate for
-a parameter $\theta \in (0,1)$ will do.
-
-
-A toy example where $p=p_A$ and $q=p_B$ is
-<<>>=
-abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
-fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, data = abodat)
-coef(fit, matrix = TRUE)
-Coef(fit)  # Estimated pA and pB
-@
-The function \texttt{Coef()}, which applies only to intercept-only models,
-applies to $g_{j}(\theta_{j})=\eta_{j}$
-the inverse link function $g_{j}^{-1}$ to $\widehat{\eta}_{j}$
-to give $\widehat{\theta}_{j}$.
-
-
-
-
-
-
-
-\begin{table}[tt]
-% Same as Table 14.1 of E-J, and Table 2.6 of Weir 1996
-\begin{center}
-\begin{tabular}{|l|cc|cc|c|c|}
-\hline
-Genotype   & AA  & AO  & BB  &  BO  & AB  &  OO  \\
-Probability&$p^2$&$2pr$&$q^2$&$ 2qr$&$2pq$& $r^2$\\
-Blood group&  A  &  A  &  B  &  B   &  AB &  O \\
-\hline
-\end{tabular}
-\end{center}
-\caption{Probability table for the ABO blood group system.
-Note that $p$ and $q$ are the parameters and $r=1-p-q$.
-\label{tab:ABO}
-}
-\end{table}
-
-
-
-
-
-\subsection{Three main distributions}
-\label{sec:jsscat.3maindist}
-
-\cite{agre:2002} discusses three main distributions for categorical
-variables: binomial, multinomial, and Poisson
-\citep{thom:2009}.
-All these are well-represented in the \VGAM{} package,
-accompanied by variant forms.
-For example,
-there is a
-\VGAM{} family function named \texttt{mbinomial()}
-which implements a 
-matched-binomial (suitable for matched case-control studies),
-Poisson ordination (useful in ecology for multi-species-environmental data),
-negative binomial families,
-positive and zero-altered and zero-inflated variants,
-and the bivariate odds ratio model
-\citep[\texttt{binom2.or()}; see Section 6.5.6 of][]{mccu:neld:1989}.
-The latter has an \texttt{exchangeable} argument to allow for an
-exchangeable error structure:
-\begin{eqnarray}
-\bH_1  = 
-\left( \begin{array}{cc}
-1 & 0 \\
-1 & 0 \\
-0 & 1 \\
-\end{array} \right), \qquad
-\bH_k  = 
-\left( \begin{array}{cc}
-1 \\
-1 \\
-0 \\
-\end{array} \right), \quad k=2,\ldots,p,
-\label{eqn:blom.exchangeable}
-\end{eqnarray}
-since, for data $(Y_1,Y_2,\bix)$,
-$\logit \, P\!\left( Y_{j} = 1 \Big{|} \bix \right) = 
-\eta_{j}$ for ${j}=1,2$, and
-$\log \, \psi = \eta_{3}$
-where $\psi$ is the odds ratio,
-and so $\eta_{1}=\eta_{2}$.
-Here, \texttt{binom2.or(zero = 3)} by default meaning $\psi$ is
-modelled as an intercept-only
-(in general, \texttt{zero} may be assigned an integer vector
-such that the value $j$ means $\eta_{j} = \beta_{(j)1}$,
-i.e., the $j$th linear/additive predictor is an intercept-only).
-See the online help for all of these models.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Some user-oriented topics}
-\label{sec:jsscat.userTopics}
-
-
-Making the most of \VGAM{} requires an understanding of the general
-VGLM/VGAM framework described Section \ref{sec:jsscat.VGLMVGAMoverview}.
-In this section we connect elements of that framework with the software.
-Before doing so it is noted that
-a fitted \VGAM{} categorical model has access to the usual
-generic functions, e.g.,
-\texttt{coef()} for
-$\left(\widehat{\bbeta}_{(1)}^{*T},\ldots,\widehat{\bbeta}_{(p)}^{*T}\right)^{\top}$
-(see Equation \ref{eqn:lin.coefs4}),
-\texttt{constraints()} for $\bH_k$,
-\texttt{deviance()} for $2\left(\ell_{\mathrm{max}} - \ell\right)$,
-\texttt{fitted()} for $\widehat{\bmu}_i$,
-\texttt{logLik()} for $\ell$,
-\texttt{predict()} for $\widehat{\boldeta}_i$,
-\texttt{print()},
-\texttt{residuals(..., type = "response")} for $\biy_i - \widehat{\bmu}_i$ etc.,
-\texttt{summary()},
-\texttt{vcov()} for $\widehat{\Var}(\widehat{\bbeta})$,
-etc.
-The methods function for the extractor function
-\texttt{coef()} has an argument \texttt{matrix}
-which, when set \texttt{TRUE}, returns $\widehat{\bB}$
-(see Equation \ref{gammod}) as a $p \times M$ matrix,
-and this is particularly useful for confirming that a fit
-has made a parallelism assumption.
-
-
-
-
-
-
-
-\subsection{Common arguments}
-\label{sec:jsscat.commonArgs}
-
-
-The structure of the unified framework given in
-Section \ref{sec:jsscat.VGLMVGAMoverview}
-appears clearly through
-the pool of common arguments
-shared by the
-\VGAM{} family functions in Table \ref{tab:cat.quantities}.
-In particular,
-\texttt{reverse} and
-\texttt{parallel}
-are prominent with CDA.
-These are merely convenient shortcuts for the argument \texttt{constraints},
-which accepts a named list of constraint matrices $\bH_k$.
-For example, setting
-\texttt{cumulative(parallel = TRUE)} would constrain the coefficients $\beta_{(j)k}$
-in (\ref{gammod2}) to be equal for all $j=1,\ldots,M$,
-each separately for $k=2,\ldots,p$.
-That is, $\bH_k = \bone_M$.
-The argument \texttt{reverse} determines the `direction' of
-the parameter or quantity.
-
-Another argument not so much used with CDA is \texttt{zero};
-this accepts a vector specifying which $\eta_j$ is to be modelled as
-an intercept-only; assigning a \texttt{NULL} means none.
-
-
-
-
-
-
-
-
-\subsection{Link functions}
-\label{sec:jsscat.links}
-
-Almost all \VGAM{} family functions
-(one notable exception is \texttt{multinomial()})
-allow, in theory, for any link function to be assigned to each $\eta_j$.
-This provides maximum capability.
-If so then there is an extra argument to pass in any known parameter
-associated with the link function.
-For example, \texttt{link = "logoff", earg = list(offset = 1)}
-signifies a log link with a unit offset:
-$\eta_{j} = \log(\theta_{j} + 1)$ for some parameter $\theta_{j}\ (> -1)$.
-The name \texttt{earg} stands for ``extra argument''.
-Table \ref{tab:jsscat.links} lists some links relevant to categorical data.
-While the default gives a reasonable first choice,
-users are encouraged to try different links.
-For example, fitting a binary regression model
-(\texttt{binomialff()}) to the coal miners data set \texttt{coalminers} with
-respect to the response wheeze gives a
-nonsignificant regression coefficient for $\beta_{(1)3}$ with probit analysis
-but not with a logit link when
-$\eta = \beta_{(1)1} + \beta_{(1)2} \, \mathrm{age} + \beta_{(1)3} \, \mathrm{age}^2$.
-Developers and serious users are encouraged to write and use
-new link functions compatible with \VGAM.
-
-
-
-
-
-
-\begin{table*}[tt]
-\centering
-\medskip
-\begin{tabular}{|l|c|c|}
-\hline
-Link function & $g(\theta)$ & Range of $\theta$ \\
-\hline
-\texttt{cauchit()} & $\tan(\pi(\theta-\frac12))$ & $(0,1)$ \\
-\texttt{cloglog()} & $\log_e\{-\log_e(1 - \theta)\}$ & $(0,1)$ \\
-\texttt{fisherz()} & 
-$\frac12\,\log_e\{(1 + \theta)/(1 - \theta)\}$ & $(-1,1)$ \\
-\texttt{identity()} & $\theta$ & $(-\infty,\infty)$ \\
-\texttt{logc()} & $\log_e(1 - \theta)$ & $(-\infty,1)$ \\
-\texttt{loge()} & $\log_e(\theta)$ & $(0,\infty)$ \\
-\texttt{logit()} & $\log_e(\theta/(1 - \theta))$ & $(0,1)$ \\
-\texttt{logoff()} & $\log_e(\theta + A)$ & $(-A,\infty)$ \\
-\texttt{probit()} & $\Phi^{-1}(\theta)$ & $(0,1)$ \\
-\texttt{rhobit()} & $\log_e\{(1 + \theta)/(1 - \theta)\}$ & $(-1,1)$ \\
-\hline
-\end{tabular}
-\caption{
-Some \VGAM{} link functions pertinent to this article.
-\label{tab:jsscat.links}
-}
-\end{table*}
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Examples}
-\label{sec:jsscat.eg}
-
-This section illustrates CDA modeling on three
-data sets in order to give a flavour of what is available in the package.
-
-
-
-
-%20130919
-%Note: 
-%\subsection{2008 World Fly Fishing Championships}
-%\label{sec:jsscat.eg.WFFC}
-%are deleted since there are problems with accessing the \texttt{wffc.nc}
-%data etc. since they are now in \pkg{VGAMdata}.
-
-
-
-
-
-
-
-\subsection{Marital status data}
-\label{sec:jsscat.eg.mstatus}
-
-We fit a nonparametric multinomial logit model to data collected from
-a self-administered questionnaire administered in a large New Zealand
-workforce observational study conducted during 1992--3.
-The data were augmented by a second study consisting of retirees.
-For homogeneity, this analysis is restricted
-to a subset of 6053 European males with no missing values.
-The ages ranged between 16 and 88 years.
-The data can be considered a reasonable representation of the white
-male New Zealand population in the early 1990s, and
-are detailed in \cite{macm:etal:1995} and \cite{yee:wild:1996}.
-We are interested in exploring how $Y=$ marital status varies as a function
-of $x_2=$ age. The nominal response $Y$ has four levels;
-in sorted order, they are divorced or separated, married or partnered,
-single and widower.
-We will write these levels as $Y=1$, $2$, $3$, $4$, respectively,
-and will choose the married/partnered (second level) as the reference group
-because the other levels emanate directly from it.
-
-Suppose the data is in a data frame called \texttt{marital.nz}
-and looks like
-<<>>=
-head(marital.nz, 4)
-summary(marital.nz)
-@
-We fit the VGAM
-<<>>=
-fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
-               data = marital.nz)
-@
-
-Once again let's firstly check the input.
-<<>>=
-head(depvar(fit.ms), 4)
-colSums(depvar(fit.ms))
-@
-This seems okay.
-
-
-
-
-Now the estimated component functions $\widehat{f}_{(s)2}(x_2)$
-may be plotted with
-<<fig=F>>=
-# Plot output
-mycol <- c("red", "darkgreen", "blue")
-par(mfrow = c(2, 2))
-plot(fit.ms, se = TRUE, scale = 12,
-         lcol = mycol, scol = mycol)
-
-# Plot output overlayed
-#par(mfrow=c(1,1))
-plot(fit.ms, se = TRUE, scale = 12,
-         overlay = TRUE,
-         llwd = 2,
-         lcol = mycol, scol = mycol)
-@
-to produce Figure \ref{fig:jsscat.eg.mstatus}.
-The \texttt{scale} argument is used here to ensure that the $y$-axes have
-a common scale---this makes comparisons between the component functions
-less susceptible to misinterpretation.
-The first three plots are the (centered) $\widehat{f}_{(s)2}(x_2)$ for
-$\eta_1$,
-$\eta_2$,
-$\eta_3$,
-where
-\begin{eqnarray}
-\label{eq:jsscat.eg.nzms.cf}
-\eta_{s}  = 
-\log(\pr(Y={t}) / \pr(Y={2}))  = 
-\beta_{(s)1} + f_{(s)2}(x_2),
-\end{eqnarray}
-$(s,t) = (1,1), (2,3), (3,4)$,
-and $x_2$ is \texttt{age}.
-The last plot are the smooths overlaid to aid comparison.
-
-
-It may be seen that the $\pm 2$ standard error bands
-about the \texttt{Widowed} group is particularly wide at
-young ages because of a paucity of data, and
-likewise at old ages amongst the \texttt{Single}s.
-The $\widehat{f}_{(s)2}(x_2)$ appear as one would expect.
-The log relative risk of
-being single relative to being married/partnered drops sharply from
-ages 16 to 40.
-The fitted function for the \texttt{Widowed} group increases
-with \texttt{age} and looks reasonably linear.
-The $\widehat{f}_{(1)2}(x_2)$
-suggests a possible maximum around 50 years old---this
-could indicate the greatest marital conflict occurs during
-the mid-life crisis years!
-
-
-
-\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=5.6,echo=FALSE>>=
-# Plot output
-mycol <- c("red", "darkgreen", "blue")
- par(mfrow = c(2, 2))
- par(mar = c(4.2, 4.0, 1.2, 2.2) + 0.1)
-plot(fit.ms, se = TRUE, scale = 12,
-         lcol = mycol, scol = mycol)
-
-# Plot output overlaid
-#par(mfrow = c(1, 1))
-plot(fit.ms, se = TRUE, scale = 12,
-         overlay = TRUE,
-         llwd = 2,
-         lcol = mycol, scol = mycol)
-@
-\caption{
-Fitted (and centered) component functions
-$\widehat{f}_{(s)2}(x_2)$
-from the NZ marital status data
-(see Equation \ref{eq:jsscat.eg.nzms.cf}).
-The bottom RHS plot are the smooths overlaid.
-\label{fig:jsscat.eg.mstatus}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-The methods function for \texttt{plot()} can also plot the
-derivatives of the smooths.
-The call
-<<fig=F>>=
-plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
-@
-results in Figure \ref{fig:jsscat.eg.mstatus.cf.deriv}.
-Once again the $y$-axis scales are commensurate.
-
-\setkeys{Gin}{width=\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=7.2,height=2.4,echo=FALSE>>=
-# Plot output
- par(mfrow = c(1, 3))
- par(mar = c(4.5, 4.0, 0.2, 2.2) + 0.1)
-plot(fit.ms, deriv = 1, lcol = mycol, scale = 0.3)
-@
-\caption{
-Estimated first derivatives of the component functions,
-$\widehat{f'}_{(s)2}(x_2)$,
-from the NZ marital status data
-(see Equation \ref{eq:jsscat.eg.nzms.cf}).
-\label{fig:jsscat.eg.mstatus.cf.deriv}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-The derivative for the \texttt{Divorced/Separated} group appears
-linear so that a quadratic component function could be tried.
-Not surprisingly the \texttt{Single} group shows the greatest change;
-also, $\widehat{f'}_{(2)2}(x_2)$ is approximately linear till 50
-and then flat---this suggests one could fit a piecewise quadratic
-function to model that component function up to 50 years.
-The \texttt{Widowed} group appears largely flat.
-We thus fit the parametric model
-<<>>=
-foo <- function(x, elbow = 50)
-  poly(pmin(x, elbow), 2)
-
-clist <- list("(Intercept)" = diag(3),
-             "poly(age, 2)" = rbind(1, 0, 0),
-             "foo(age)"     = rbind(0, 1, 0),
-             "age"          = rbind(0, 0, 1))
-fit2.ms <-
-    vglm(mstatus ~ poly(age, 2) + foo(age) + age,
-         family = multinomial(refLevel = 2),
-         constraints = clist,
-         data = marital.nz)
-@
-Then
-<<>>=
-coef(fit2.ms, matrix = TRUE)
-@
-confirms that one term was used for each component function.
-The plots from
-<<fig=F>>=
-par(mfrow = c(2, 2))
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[1], scol = mycol[1], which.term = 1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[2], scol=mycol[2], which.term = 2)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[3], scol = mycol[3], which.term = 3)
-@
-are given in Figure \ref{fig:jsscat.eg.mstatus.vglm}
-and appear like
-Figure \ref{fig:jsscat.eg.mstatus}.
-
-
-\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=5.6,echo=FALSE>>=
-# Plot output
-par(mfrow=c(2,2))
- par(mar=c(4.5,4.0,1.2,2.2)+0.1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[1], scol = mycol[1], which.term = 1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[2], scol = mycol[2], which.term = 2)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[3], scol = mycol[3], which.term = 3)
-@
-\caption{
-Parametric version of \texttt{fit.ms}: \texttt{fit2.ms}.
-The component functions are now quadratic, piecewise quadratic/zero,
-or linear.
-\label{fig:jsscat.eg.mstatus.vglm}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-
-It is possible to perform very crude inference based on heuristic theory
-of a deviance test:
-<<>>=
-deviance(fit.ms) - deviance(fit2.ms)
-@
-is small, so it seems the parametric model is quite reasonable
-against the original nonparametric model.
-Specifically,
-the difference in the number of `parameters' is approximately
-<<>>=
-(dfdiff <- df.residual(fit2.ms) - df.residual(fit.ms))
-@
-which gives an approximate $p$ value of
-<<>>=
-pchisq(deviance(fit.ms) - deviance(fit2.ms), df = dfdiff, lower.tail = FALSE)
-@
-Thus \texttt{fit2.ms} appears quite reasonable.
-
-
-
-
-
-
-
-
-The estimated probabilities of the original fit can be plotted
-against \texttt{age} using
-<<fig=F>>=
-ooo <- with(marital.nz, order(age))
-with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo, ],
-     type = "l", las = 1, lwd = 2, ylim = 0:1,
-     ylab = "Fitted probabilities",
-     xlab = "Age",  # main="Marital status amongst NZ Male Europeans",
-     col = c(mycol[1], "black", mycol[-1])))
-legend(x = 52.5, y = 0.62,  # x="topright",
-       col = c(mycol[1], "black", mycol[-1]),
-       lty = 1:4,
-       legend = colnames(fit.ms at y), lwd = 2)
-abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed")
-@
-which gives Figure \ref{fig:jsscat.eg.mstatus.fitted}.
-This shows that between 80--90\% of NZ white males
-aged between their early 30s to mid-70s
-were married/partnered.
-The proportion widowed
-started to rise steeply from 70 years onwards but remained below 0.5
-since males die younger than females on average.
-
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=4.8,echo=FALSE>>=
- par(mfrow = c(1,1))
- par(mar = c(4.5,4.0,0.2,0.2)+0.1)
-ooo <- with(marital.nz, order(age))
-with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
-     type = "l", las = 1, lwd = 2, ylim = 0:1,
-     ylab = "Fitted probabilities",
-     xlab = "Age",
-     col = c(mycol[1], "black", mycol[-1])))
-legend(x = 52.5, y = 0.62,
-       col = c(mycol[1], "black", mycol[-1]),
-       lty = 1:4,
-       legend = colnames(fit.ms at y), lwd = 2.1)
-abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed")
-@
-\caption{
-Fitted probabilities for each class for the
-NZ male European
-marital status data
-(from Equation \ref{eq:jsscat.eg.nzms.cf}).
-\label{fig:jsscat.eg.mstatus.fitted}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-
-
-
-
-\subsection{Stereotype model}
-\label{sec:jsscat.eg.grc.stereotype}
-
-We reproduce some of the analyses of \cite{ande:1984} regarding the
-progress of 101 patients with back pain
-using the data frame \texttt{backPain} from \pkg{gnm}
-\citep{Rnews:Turner+Firth:2007,Turner+Firth:2009}.
-The three prognostic variables are
-length of previous attack ($x_1=1,2$),
-pain change ($x_2=1,2,3$) 
-and lordosis ($x_3=1,2$).
-Like him, we treat these as numerical and standardize and negate them.
-%
-The output
-<<>>=
-# Scale the variables? Yes; the Anderson (1984) paper did (see his Table 6).
-head(backPain, 4)
-summary(backPain)
-backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale(x3))
-@
-displays the six ordered categories.
-Now a rank-1 stereotype model can be fitted with
-<<>>=
-bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain)
-@
-Then
-<<>>=
-Coef(bp.rrmlm1)
-@
-are the fitted \bA, \bC{} and $\bB_1$ (see Equation \ref{eq:rrr.BAC}) and
-Table \ref{tab:rrvglam.jss.subset}) which agrees with his Table 6.
-Here, what is known as ``corner constraints'' is used
-($(1,1)$ element of \bA{} $\equiv 1$),
-and only the intercepts are not subject to any reduced-rank regression
-by default.
-The maximized log-likelihood from \textsl{\texttt{logLik(bp.rrmlm1)}}
-is $\Sexpr{round(logLik(bp.rrmlm1), 2)}$.
-The standard errors of each parameter can be obtained by
-\textsl{\texttt{summary(bp.rrmlm1)}}.
-The negative elements of $\widehat{\bC}$ imply the
-latent variable $\widehat{\nu}$ decreases in value with increasing
-\textsl{\texttt{sx1}},
-\textsl{\texttt{sx2}} and
-\textsl{\texttt{sx3}}.
-The elements of $\widehat{\bA}$ tend to decrease so it suggests
-patients get worse as $\nu$ increases,
-i.e., get better as \textsl{\texttt{sx1}},
-\textsl{\texttt{sx2}} and
-\textsl{\texttt{sx3}} increase.
-
-
-
-
-
-
-<<echo=FALSE>>=
-set.seed(123)
-@
-A rank-2 model fitted \textit{with a different normalization}
-<<>>=
-bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain, Rank = 2,
-                   Corner = FALSE, Uncor = TRUE)
-@
-produces uncorrelated $\widehat{\bnu}_i = \widehat{\bC}^{\top} \bix_{2i}$.
-In fact \textsl{\texttt{var(lv(bp.rrmlm2))}} equals $\bI_2$
-so that the latent variables are also scaled to have unit variance.
-The fit was biplotted
-(rows of $\widehat{\bC}$ plotted as arrow;
- rows of $\widehat{\bA}$ plotted as labels) using
-<<figure=F>>=
-biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
-#      xlim = c(-1, 6), ylim = c(-1.2, 4),  # Use this if not scaled
-       xlim = c(-4.5, 2.2), ylim = c(-2.2, 2.2),  # Use this if scaled
-       chull = TRUE, clty = 2, ccol = "blue")
-@
-to give Figure \ref{fig:jsscat.eg.rrmlm2.backPain}.
-It is interpreted via inner products due to (\ref{eq:rrr.BAC}).
-The different normalization means that the interpretation of $\nu_1$
-and $\nu_2$ has changed, e.g., increasing
-\textsl{\texttt{sx1}},
-\textsl{\texttt{sx2}} and
-\textsl{\texttt{sx3}} results in increasing $\widehat{\nu}_1$ and
-patients improve more.
-Many of the latent variable points $\widehat{\bnu}_i$ are coincidental
-due to discrete nature of the $\bix_i$. The rows of $\widehat{\bA}$
-are centered on the blue labels (rather cluttered unfortunately) and
-do not seem to vary much as a function of $\nu_2$.
-In fact this is confirmed by \cite{ande:1984} who showed a rank-1
-model is to be preferred.
-
-
-
-This example demonstrates the ability to obtain a low dimensional view
-of higher dimensional data. The package's website has additional
-documentation including more detailed Goodman's RC and stereotype
-examples.
-
-
-
-
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=5.3,echo=FALSE>>=
-# Plot output
- par(mfrow=c(1,1))
- par(mar=c(4.5,4.0,0.2,2.2)+0.1)
-
-biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
-#      xlim = c(-1,6), ylim = c(-1.2,4),  # Use this if not scaled
-       xlim = c(-4.5,2.2), ylim = c(-2.2, 2.2),  # Use this if scaled
-       chull = TRUE, clty = 2, ccol = "blue")
-@
-\caption{
-Biplot of a rank-2 reduced-rank multinomial logit (stereotype) model
-fitted to the back pain data.
-A convex hull surrounds the latent variable scores
-$\widehat{\bnu}_i$
-(whose observation numbers are obscured because of their discrete nature).
-The position of the $j$th row of $\widehat{\bA}$
-is the center of the label ``\texttt{log(mu[,j])/mu[,6])}''.
-\label{fig:jsscat.eg.rrmlm2.backPain}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Some implementation details}
-\label{sec:jsscat.implementDetails}
-
-This section describes some implementation details of \VGAM{}
-which will be more of interest to the developer than to the casual user.
-
-
-
-\subsection{Common code}
-\label{sec:jsscat.implementDetails.code}
-
-It is good programming practice to write reusable code where possible.
-All the \VGAM{} family functions in Table \ref{tab:cat.quantities}
-process the response in the same way because the same segment of code
-is executed. This offers a degree of uniformity in terms of how input is
-handled, and also for software maintenance
-(\cite{altm:jack:2010} enumerates good programming techniques and references).
-As well, the default initial values are computed in the same manner
-based on sample proportions of each level of $Y$.
-
-
-
-
-
-\subsection[Matrix-band format of wz]{Matrix-band format of \texttt{wz}}
-\label{sec:jsscat.implementDetails.mbformat}
-
-The working weight matrices $\bW_i$ may become large for categorical
-regression models. In general, we have to evaluate the $\bW_i$
-for $i=1,\ldots,n$, and naively, this could be held in an \texttt{array} of
-dimension \texttt{c(M, M, n)}. However, since the $\bW_i$ are symmetric
-positive-definite it suffices to only store the upper or lower half of
-the matrix.
-
-
-
-The variable \texttt{wz} in \texttt{vglm.fit()}
-stores the working weight matrices $\bW_i$ in 
-a special format called the \textit{matrix-band} format. This
-format comprises a $n \times M^*$ matrix where
-\[
-M^*  =  \sum_{i=1}^{\footnotesize \textit{hbw}} \;
-\left(M-i+1\right)  =  
-\frac12 \, \textit{hbw}\, \left(2\,M - \textit{hbw} +1\right)
-\]
-is the number of columns. Here, \textit{hbw} refers to the
-\textit{half-bandwidth} of the matrix, which is an integer
-between 1 and $M$ inclusive. A diagonal matrix has
-unit half-bandwidth, a tridiagonal matrix has half-bandwidth 2, etc.
-
-
-Suppose $M=4$. Then \texttt{wz} will have up to $M^*=10$ columns
-enumerating the unique elements of $\bW_i$ as follows:
-\begin{eqnarray}
-\bW_i  =  
-\left( \begin{array}{rrrr}
-1 & 5 & 8 & 10 \\
-  & 2 & 6 & 9 \\
-  &   & 3 & 7 \\
-  &   &   & 4 
-\end{array} \right).
-\label{eqn:hbw.eg}
-\end{eqnarray}
-That is, the order is firstly the diagonal, then the band above that,
-followed by the second band above the diagonal etc.
-Why is such a format adopted? 
-For this example, if $\bW_i$ is diagonal then only the first 4 columns
-of \texttt{wz} are needed. If $\bW_i$ is tridiagonal then only the
-first 7 columns of \texttt{wz} are needed. 
-If $\bW_i$ \textit{is} banded then \texttt{wz} needs not have
-all $\frac12 M(M+1)$ columns; only $M^*$ columns suffice, and the
-rest of the elements of $\bW_i$ are implicitly zero.
-As well as reducing the size of \texttt{wz} itself in most cases, the
-matrix-band format often makes the computation of \texttt{wz} very
-simple and efficient. Furthermore, a Cholesky decomposition of a
-banded matrix will be banded. A final reason is that sometimes we
-want to input $\bW_i$ into \VGAM: if \texttt{wz} is $M \times M \times
-n$ then \texttt{vglm(\ldots, weights = wz)} will result in an error
-whereas it will work if \texttt{wz} is an $n \times M^*$ matrix.
-
-
-
-To facilitate the use of the matrix-band format,
-a few auxiliary functions have been written.
-In particular, there is \texttt{iam()} which gives the indices
-for an array-to-matrix.
-In the $4\times 4$ example above,
-<<>>=
-iam(NA, NA, M = 4, both = TRUE, diag = TRUE)
-@
-returns the indices for the respective array coordinates for
-successive columns of matrix-band format
-(see Equation \ref{eqn:hbw.eg}).
-If \texttt{diag = FALSE} then the first 4 elements in each vector
-are omitted. Note that the first two arguments of 
-\texttt{iam()} are not used here and have been assigned
-\texttt{NA}s for simplicity.
-For its use on the multinomial logit model, where
-$(\bW_i)_{jj} = w_i\,\mu_{ij} (1-\mu_{ij}),\ j=1,\ldots,M$, and 
-$(\bW_i)_{jk} = -w_i\,\mu_{ij} \mu_{ik},\ j\neq k$,
-this can be programmed succinctly like
-\begin{Code}
-wz <- mu[, 1:M] * (1 - mu[, 1:M])
-if (M > 1) {
-  index <- iam(NA, NA, M = M, both = TRUE, diag = FALSE)
-  wz <- cbind(wz, -mu[, index$row] * mu[, index$col])
-}
-wz <- w * wz
-\end{Code}
-(the actual code is slightly more complicated).
-In general, \VGAM{} family functions can be remarkably compact,
-e.g.,
-\texttt{acat()},
-\texttt{cratio()}
-and
-\texttt{multinomial()} are all less than 120 lines of code each.
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Extensions and utilities}
-\label{sec:jsscat.extnUtil}
-
-This section describes some useful utilities/extensions of the above.
-
-
-
-\subsection{Marginal effects}
-\label{sec:jsscat.extnUtil.margeff}
-
-
-Models such as the multinomial logit and cumulative link models
-model the posterior probability $p_{j} = \pr(Y=j|\bix)$ directly.
-In some applications, knowing the derivative of $p_{j}$
-with respect to some of the $x_k$ is useful;
-in fact, often just knowing the sign is important.
-The function \texttt{margeff()} computes the derivatives and
-returns them as a $p \times (M+1) \times n$ array.
-For the multinomial logit model it is easy to show
-\begin{eqnarray}
-\frac{\partial \, p_{j}(\bix_i)}{\partial \,
-\bix_{i}}
-&=&
-p_{j}(\bix_i)
-\left\{
- \bbeta_{j} -
-\sum_{s=1}^{M+1}
-p_{s}(\bix_i)
-\,
- \bbeta_{s}
-\right\},
-\label{eqn:multinomial.marginalEffects}
-\end{eqnarray}
-while for
-\texttt{cumulative(reverse = FALSE)}
-we have
-$p_{j} = \gamma_{j} - \gamma_{j-1} = h(\eta_{j}) - h(\eta_{j-1})$
-where $h=g^{-1}$ is the inverse of the link function
-(cf. Table \ref{tab:cat.quantities})
-so that
-\begin{eqnarray}
-\frac{\partial \, p_{j}(\bix_{})}{\partial \,
-\bix}
-&=&
-h'(\eta_{j}) \, \bbeta_{j} -
-h'(\eta_{j-1}) \, \bbeta_{j-1} .
-\label{eqn:cumulative.marginalEffects}
-\end{eqnarray}
-
-
-
-
-The function \texttt{margeff()} returns an array with these
-derivatives and should handle any value of
-\texttt{reverse} and \texttt{parallel}.
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\subsection[The xij argument]{The \texttt{xij} argument}
-\label{sec:jsscat.extnUtil.xij}
-
-There are many models, including those for categorical data,
-where the value of an explanatory variable $x_k$ differs depending
-on which linear/additive predictor $\eta_{j}$.
-Here is a well-known example from {consumer choice} modeling.
-Suppose an econometrician is interested in peoples'
-choice of transport for travelling to work
-and that there are four choices:
-$Y=1$ for ``bus'',
-$Y=2$ ``train'', 
-$Y=3$ ``car'' and
-$Y=4$ means ``walking''.
-Assume that people only choose one means to go to work.
-Suppose there are three covariates:
-$X_2=$ cost,
-$X_3=$ journey time, and
-$X_4=$ distance.
-Of the covariates only $X_4$ (and the intercept $X_1$)
-is the same for all transport choices;
-the cost and journey time differ according to the means chosen.
-Suppose a random sample of $n$ people is collected
-from some population, and that each person has
-access to all these transport modes.
-For such data, a natural regression model would be a 
-multinomial logit model with $M=3$:
-for $j=1,\ldots,M$, we have
-$\eta_{j} =$
-\begin{eqnarray}
-\log \frac{\pr(Y=j)}{\pr(Y=M+1)}
-&=&
-\beta_{(j)1}^{*} +
-\beta_{(1)2}^{*} \, (x_{i2j}-x_{i24}) +
-\beta_{(1)3}^{*} \, (x_{i3j}-x_{i34}) +
-\beta_{(1)4}^{*} \, x_{i4},
-\label{eqn:xij.eg.gotowork}
-\end{eqnarray}
-where, for the $i$th person,
-$x_{i2j}$ is the cost for the $j$th transport means, and
-$x_{i3j}$ is the journey time of the $j$th transport means.
-The distance to get to work is $x_{i4}$; it has the same value
-regardless of the transport means.
-
-
-Equation \ref{eqn:xij.eg.gotowork}
-implies $\bH_1=\bI_3$ and $\bH_2=\bH_3=\bH_4=\bone_3$.
-Note
-also that if the last response category is used as the baseline or
-reference group (the default of \texttt{multinomial()}) then $x_{ik,M+1}$
-can be subtracted from $x_{ikj}$ for $j=1,\ldots,M$---this
-is the natural way $x_{ik,M+1}$ enters into the model.
-
-
-
-
-Recall from (\ref{gammod2}) that we had
-\begin{equation}
-\eta_j(\bix_i)  =  \bbeta_j^{\top} \bix_i  = 
-\sum_{k=1}^{p} \, x_{ik} \, \beta_{(j)k} .
-\label{eqn:xij0}
-\end{equation}
-Importantly, this can be generalized to
-\begin{equation}
-\eta_j(\bix_{ij})  =  \bbeta_j^{\top} \bix_{ij}  = 
-\sum_{k=1}^{p} \, x_{ikj} \, \beta_{(j)k} ,
-\label{eqn:xij}
-\end{equation}
-or writing this another way (as a mixture or hybrid),
-\begin{equation}
-\eta_j(\bix_{i}^{*},\bix_{ij}^{*})  =  
-\bbeta_{j}^{*T} \bix_{i}^{*} + \bbeta_{j}^{**T} \bix_{ij}^{*} .
-\label{eqn:xij2}
-\end{equation}
-Often $\bbeta_{j}^{**} = \bbeta_{}^{**}$, say.
-In (\ref{eqn:xij2}) the variables in $\bix_{i}^{*}$ are common to
-all $\eta_{j}$, and the variables in $\bix_{ij}^{*}$ have
-different values for differing $\eta_{j}$.
-This allows for covariate values that are specific to each $\eta_j$,
-a facility which is very important in many applications.
-
-
-The use of the \texttt{xij} argument with the \VGAM{} family function
-\texttt{multinomial()} has very important applications in economics.
-In that field the term ``multinomial logit model'' includes a variety of
-models such as the ``generalized logit model'' where (\ref{eqn:xij0})
-holds, the ``conditional logit model'' where (\ref{eqn:xij}) holds,
-and the ``mixed logit model,'' which is a combination of the two,
-where (\ref{eqn:xij2}) holds.
-The generalized logit model focusses on the individual as the unit of
-analysis, and uses individual characteristics as explanatory variables,
-e.g., age of the person in the transport example.
-The conditional logit model assumes different values for each
-alternative and the impact of a unit of $x_k$ is assumed to be constant
-across alternatives, e.g., journey time in the choice of transport mode.
-Unfortunately, there is confusion in the literature for the terminology
-of the models. Some authors call \texttt{multinomial()}
-with (\ref{eqn:xij0}) the ``generalized logit model''.
-Others call the mixed
-logit model the ``multinomial logit model'' and view the generalized
-logit and conditional logit models as special cases.
-In \VGAM{} terminology there is no need to give different names to
-all these slightly differing special cases. They are all still called
-multinomial logit models, although it may be added that there are
-some covariate-specific linear/additive predictors.
-The important thing is that the framework accommodates $\bix_{ij}$,
-so one tries to avoid making life unnecessarily complicated.
-And \texttt{xij} can apply in theory to any VGLM and not just to the
-multinomial logit model.
-\cite{imai:king:lau:2008} present another perspective on the
-$\bix_{ij}$ problem with illustrations from \pkg{Zelig}
-\citep{Zelig:2009}.
-
-
-
-
-
-\subsubsection[Using the xij argument]{Using the \texttt{xij} argument}
-\label{sec:xij.sub}
-
-\VGAM{} handles variables whose values depend on $\eta_{j}$,
-(\ref{eqn:xij2}), using the \texttt{xij} argument.
-It is assigned an S formula or a list of \proglang{S} formulas.
-Each formula, which must have $M$ \textit{different} terms,
-forms a matrix that premultiplies a constraint matrix.
-In detail, (\ref{eqn:xij0}) can be written in vector form as
-\begin{equation}
-\boldeta(\bix_i)  =  \bB^{\top} \bix_i  = 
-\sum_{k=1}^{p} \, \bH_{k} \, \bbeta_{k}^{*} \, x_{ik},
-\label{eqn:xij0.vector}
-\end{equation}
-where
-$\bbeta_{k}^{*} =
-\left( \beta_{(1)k}^{*},\ldots,\beta_{(r_k)k}^{*} \right)^{\top}$
-is to be estimated.
-This may be written
-\begin{eqnarray}
-\boldeta(\bix_{i})
-&=&
-\sum_{k=1}^{p} \, \diag(x_{ik},\ldots,x_{ik}) \,
-\bH_k \, \bbeta_{k}^{*}.
-\label{eqn:xij.d.vector}
-\end{eqnarray}
-To handle (\ref{eqn:xij})--(\ref{eqn:xij2})
-we can generalize (\ref{eqn:xij.d.vector}) to
-\begin{eqnarray}
-\boldeta_i
-&=&
-\sum_{k=1}^{p} \, \diag(x_{ik1},\ldots,x_{ikM}) \;
-\bH_k \, \bbeta_{k}^{*}
-\ \ \ \ \left(=
-\sum_{k=1}^{p} \, \bX_{(ik)}^{*} \,
-\bH_k \, \bbeta_{k}^{*} ,
-\mathrm{\ say} \right).
-\label{eqn:xij.vector}
-\end{eqnarray}
-Each component of the list \texttt{xij} is a formula having $M$ terms
-(ignoring the intercept) which
-specifies the successive diagonal elements of the matrix $\bX_{(ik)}^{*}$.
-Thus each row of the constraint matrix may be multiplied by a different
-vector of values.
-The constraint matrices themselves are not affected by the
-\texttt{xij} argument.
-
-
-
-
-
-How can one fit such models in \VGAM{}?
-Let us fit (\ref{eqn:xij.eg.gotowork}).
-Suppose the journey cost and time variables have had the
-cost and time of walking subtracted from them.
-Then,
-using ``\texttt{.trn}'' to denote train,
-\begin{Code}
-fit2 <- vglm(cbind(bus, train, car, walk) ~ Cost + Time + Distance,
-             fam = multinomial(parallel = TRUE ~ Cost + Time + Distance - 1),
-             xij = list(Cost ~ Cost.bus + Cost.trn + Cost.car,
-                        Time ~ Time.bus + Time.trn + Time.car),
-             form2 = ~  Cost.bus + Cost.trn + Cost.car +
-                        Time.bus + Time.trn + Time.car +
-                        Cost + Time + Distance,
-             data = gotowork)
-\end{Code}
-should do the job.
-Here, the argument \texttt{form2} is assigned a second \proglang{S} formula which
-is used in some special circumstances or by certain types
-of \VGAM{} family functions.
-The model has $\bH_{1} = \bI_{3}$ and $\bH_{2} = \bH_{3} = \bH_{4} = \bone_{3}$
-because the lack of parallelism only applies to the intercept.
-However, unless \texttt{Cost} is the same as \texttt{Cost.bus} and
-\texttt{Time} is the same as \texttt{Time.bus},
-this model should not be plotted with \texttt{plotvgam()};
-see the author's homepage for further documentation.
-
-
-By the way,
-suppose 
-$\beta_{(1)4}^{*}$
-in (\ref{eqn:xij.eg.gotowork})
-is replaced by $\beta_{(j)4}^{*}$.
-Then the above code but with
-\begin{Code}
-  fam = multinomial(parallel = FALSE ~ 1 + Distance),
-\end{Code}
-should fit this model.
-Equivalently,
-\begin{Code}
-  fam = multinomial(parallel = TRUE ~ Cost + Time - 1),
-\end{Code}
-
-
-
-
-
-
-\subsubsection{A more complicated example}
-\label{sec:xij.complicated}
-
-The above example is straightforward because the
-variables were entered linearly. However, things
-become more tricky if data-dependent functions are used in
-any \texttt{xij} terms, e.g., \texttt{bs()}, \texttt{ns()} or \texttt{poly()}.
-In particular, regression splines such as \texttt{bs()} and \texttt{ns()}
-can be used to estimate a general smooth function $f(x_{ij})$, which is
-very useful for exploratory data analysis.
-
-
-
-Suppose we wish to fit the variable \texttt{Cost} with a smoother.
-This is possible with regression splines and using a trick.
-Firstly note that
-\begin{Code}
-fit3 <- vglm(cbind(bus, train, car, walk) ~ ns(Cost) + Time + Distance,
-             multinomial(parallel = TRUE ~ ns(Cost) + Time + Distance - 1),
-             xij = list(ns(Cost) ~ ns(Cost.bus) + ns(Cost.trn) + ns(Cost.car),
-                        Time ~ Time.bus + Time.trn + Time.car),
-             form2 = ~  ns(Cost.bus) + ns(Cost.trn) + ns(Cost.car) +
-                        Time.bus + Time.trn + Time.car +
-                        ns(Cost) + Cost + Time + Distance,
-             data = gotowork)
-\end{Code}
-will \textit{not} work because the basis functions for
-\texttt{ns(Cost.bus)}, \texttt{ns(Cost.trn)} and \texttt{ns(Cost.car)}
-are not identical since the knots differ.
-Consequently, they represent different functions despite
-having common regression coefficients.
-
-
-Fortunately, it is possible to force the \texttt{ns()} terms
-to have identical basis functions by using a trick:
-combine the vectors temporarily.
-To do this, one can let
-\begin{Code}
-NS <- function(x, ..., df = 3)
-      sm.ns(c(x, ...), df = df)[1:length(x), , drop = FALSE]
-\end{Code}
-This computes a natural cubic B-spline evaluated at \texttt{x} but it uses the
-other arguments as well to form an overall vector from which to obtain
-the (common) knots.
-Then the usage of \texttt{NS()} can be something like
-\begin{Code}
-fit4 <- vglm(cbind(bus, train, car, walk) ~ NS(Cost.bus, Cost.trn, Cost.car)
-                                          + Time + Distance,
-             multinomial(parallel = TRUE ~  NS(Cost.bus, Cost.trn, Cost.car)
-                                          + Time + Distance - 1),
-             xij = list(NS(Cost.bus, Cost.trn, Cost.car) ~
-                        NS(Cost.bus, Cost.trn, Cost.car) +
-                        NS(Cost.trn, Cost.car, Cost.bus) +
-                        NS(Cost.car, Cost.bus, Cost.trn),
-                        Time ~ Time.bus + Time.trn + Time.car),
-             form2 = ~  NS(Cost.bus, Cost.trn, Cost.car) +
-                        NS(Cost.trn, Cost.car, Cost.bus) +
-                        NS(Cost.car, Cost.bus, Cost.trn) +
-                        Time.bus + Time.trn + Time.car +
-                        Cost.bus + Cost.trn + Cost.car +
-                        Time + Distance,
-             data = gotowork)
-\end{Code}
-So \texttt{NS(Cost.bus, Cost.trn, Cost.car)}
-is the smooth term for
-\texttt{Cost.bus}, etc.
-Furthermore, \texttt{plotvgam()} may be applied to
-\texttt{fit4}, in which case the fitted regression spline is plotted
-against its first inner argument, viz. \texttt{Cost.bus}.
-
-
-One of the reasons why it will predict correctly, too,
-is due to ``smart prediction''
-\citep{Rnews:Yee:2008}.
-
-
-
-\subsubsection{Implementation details} 
-\label{sec:jss.xij.implementationDetails} 
-
-The \texttt{xij} argument operates \textit{after} the
-ordinary $\bX_{\sVLM}$ matrix is created. Then selected columns
-of $\bX_{\sVLM}$ are modified from the constraint matrices, \texttt{xij}
-and \texttt{form2} arguments. That is, from \texttt{form2}'s model
-matrix $\bX_{\sformtwo}$, and the $\bH_k$. This whole operation
-is possible because $\bX_{\sVLM}$ remains structurally the same.
-The crucial equation is (\ref{eqn:xij.vector}).
-
-
-Other \texttt{xij} examples are given in the online help of
-\texttt{fill()} and \texttt{vglm.control()},
-as well as at the package's webpage.
-
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Discussion}
-\label{sec:jsscat.discussion}
-
-
-This article has sought to convey how VGLMs/VGAMs are well suited for
-fitting regression models for categorical data. Its primary strength
-is its simple and unified framework, and when reflected in software,
-makes practical CDA more understandable and efficient. Furthermore,
-there are natural extensions such as a reduced-rank variant and
-covariate-specific $\eta_{j}$. The \VGAM{} package potentially offers
-a wide selection of models and utilities.
-
-
-There is much future work to do.
-Some useful additions to the package include:
-\begin{enumerate}
-
-\item
-Bias-reduction \citep{firt:1993} is a method for removing the $O(n^{-1})$
-bias from a maximum likelihood estimate. For a substantial class of
-models including GLMs it can be formulated in terms of a minor adjustment
-of the score vector within an IRLS algorithm \citep{kosm:firt:2009}.
-One by-product, for logistic regression, is that while the maximum
-likelihood estimate (MLE) can be infinite, the adjustment leads to
-estimates that are always finite. At present the \R{} package \pkg{brglm}
-\citep{Kosmidis:2008} implements bias-reduction for a number of models.
-Bias-reduction might be implemented by adding an argument
-\texttt{bred = FALSE}, say, to some existing \VGAM{} family functions.
-
-
-\item
-Nested logit models were developed to overcome a fundamental shortcoming
-related to the multinomial logit model, viz. the independence of
-irrelevant alternatives (IIA) assumption. Roughly, the multinomial logit
-model assumes the ratio of the choice probabilities of two alternatives
-is not dependent on the presence or absence of other alternatives in
-the model. This presents problems that are often illustrated by the
-famed red bus-blue bus problem.
-
-
-
-
-\item
-The generalized estimating equations (GEE) methodology is largely
-amenable to IRLS and this should be added to the package in the future
-\citep{wild:yee:1996}.
-
-
-\item
-For logistic regression \proglang{SAS}'s \code{proc logistic} gives
-a warning if the data is {completely separate} or {quasi-completely
-separate}. Its effects are that some regression coefficients tend to $\pm
-\infty$. With such data, all (to my knowledge) \R{} implementations
-give warnings that are vague, if any at all, and this is rather
-unacceptable \citep{alli:2004}. The \pkg{safeBinaryRegression} package
-\citep{Konis:2009} overloads \code{glm()} so that a check for the
-existence of the MLE is made before fitting a binary response GLM.
-
-
-\end{enumerate}
-
-
-In closing, the \pkg{VGAM} package is continually being developed,
-therefore some future changes in the implementation details and usage
-may occur. These may include non-backward-compatible changes (see the
-\code{NEWS} file.) Further documentation and updates are available at
-the author's homepage whose URL is given in the \code{DESCRIPTION} file.
-
-
-
-% ----------------------------------------------------------------------
-\section*{Acknowledgments}
-
-The author thanks Micah Altman, David Firth and Bill Venables for helpful
-conversations, and Ioannis Kosmidis for a reprint.
-Thanks also to The Institute for Quantitative Social Science at Harvard
-University for their hospitality while this document was written during a
-sabbatical visit.
-
-
-
-
-
-\bibliography{categoricalVGAMbib}
-
-\end{document}
-
-
-
-
diff --git a/inst/doc/categoricalVGAM.pdf b/inst/doc/categoricalVGAM.pdf
deleted file mode 100644
index f5a2faf..0000000
Binary files a/inst/doc/categoricalVGAM.pdf and /dev/null differ
diff --git a/man/A1A2A3.Rd b/man/A1A2A3.Rd
index 392378a..4932e5b 100644
--- a/man/A1A2A3.Rd
+++ b/man/A1A2A3.Rd
@@ -8,7 +8,7 @@
 
 }
 \usage{
-A1A2A3(link = "logit", inbreeding = TRUE, ip1 = NULL, ip2 = NULL, iF = NULL)
+A1A2A3(link = "logit", inbreeding = FALSE, ip1 = NULL, ip2 = NULL, iF = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -18,7 +18,7 @@ A1A2A3(link = "logit", inbreeding = TRUE, ip1 = NULL, ip2 = NULL, iF = NULL)
 
   }
   \item{inbreeding}{
-    Logical. Is the inbreeding coefficient \eqn{f} equal to 0?
+    Logical. Is there inbreeding?
 
 
 %   Logical. Is the HWE assumption to be made?
@@ -35,7 +35,8 @@ A1A2A3(link = "logit", inbreeding = TRUE, ip1 = NULL, ip2 = NULL, iF = NULL)
   \code{p3=1-p1-p2} is the third probability.
   The parameter \code{f} is the third independent parameter if
   \code{inbreeding = TRUE}.
-  If \code{inbreeding = FALSE} then \eqn{f = 0}.
+  If \code{inbreeding = FALSE} then \eqn{f = 0} and Hardy-Weinberg
+  Equilibrium (HWE) is assumed.
 
 
 
@@ -89,8 +90,7 @@ ymat <- cbind(108, 196, 429, 143, 513, 559)
 fit <- vglm(ymat ~ 1, A1A2A3(link = probit), trace = TRUE, crit = "coef")
 fit <- vglm(ymat ~ 1, A1A2A3(link = logit, ip1 = 0.3, ip2 = 0.3, iF = 0.02),
             trace = TRUE, crit = "coef")
-fit <- vglm(ymat ~ 1, A1A2A3(link = "identitylink"), trace = TRUE)
-Coef(fit)  # Estimated p1, p2 and f
+Coef(fit)  # Estimated p1 and p2 
 rbind(ymat, sum(ymat) * fitted(fit))
 sqrt(diag(vcov(fit)))
 }
diff --git a/man/AA.Aa.aa.Rd b/man/AA.Aa.aa.Rd
index 5cb6afe..618802f 100644
--- a/man/AA.Aa.aa.Rd
+++ b/man/AA.Aa.aa.Rd
@@ -9,7 +9,7 @@
 
 }
 \usage{
-AA.Aa.aa(linkp = "logit", linkf = "logit", inbreeding = TRUE,
+AA.Aa.aa(linkp = "logit", linkf = "logit", inbreeding = FALSE,
          ipA = NULL, ifp = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -25,7 +25,7 @@ AA.Aa.aa(linkp = "logit", linkf = "logit", inbreeding = TRUE,
 
   }
   \item{inbreeding}{
-    Logical. Is the inbreeding coefficient \eqn{f} equal to 0?
+    Logical. Is there inbreeding?
 
 
 %HWE assumption to be made?
@@ -45,7 +45,8 @@ AA.Aa.aa(linkp = "logit", linkf = "logit", inbreeding = TRUE,
   The probability of getting a count in the first column of the
   input (an AA) is \code{pA*pA}.
   When \code{inbreeding = TRUE}, an additional parameter \code{f} is used.
-  If \code{inbreeding = FALSE} then \eqn{f = 0}.
+  If \code{inbreeding = FALSE} then \eqn{f = 0} and Hardy-Weinberg
+  Equilibrium (HWE) is assumed.
 
 
 
@@ -107,8 +108,8 @@ Sunderland, MA: Sinauer Associates, Inc.
 }
 \examples{
 y <- cbind(53, 95, 38)
-fit1 <- vglm(y ~ 1, AA.Aa.aa(linkp = "probit"), trace = TRUE)
-fit2 <- vglm(y ~ 1, AA.Aa.aa(inbreeding = FALSE), trace = TRUE)
+fit1 <- vglm(y ~ 1, AA.Aa.aa, trace = TRUE)
+fit2 <- vglm(y ~ 1, AA.Aa.aa(inbreeding = TRUE), trace = TRUE)
 rbind(y, sum(y) * fitted(fit1))
 Coef(fit1)  # Estimated pA
 Coef(fit2)  # Estimated pA and f
diff --git a/man/ABO.Rd b/man/ABO.Rd
index b0f24cc..2db7930 100644
--- a/man/ABO.Rd
+++ b/man/ABO.Rd
@@ -8,22 +8,28 @@
 
 }
 \usage{
-ABO(link = "logit", ipA = NULL, ipO = NULL)
+ABO(link.pA = "logit", link.pB = "logit", ipA = NULL, ipB = NULL,
+    ipO = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{link}{
-  Link function applied to \code{pA} and \code{pB}.
+  \item{link.pA, link.pB}{
+  Link functions applied to \code{pA} and \code{pB}.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{ipA, ipO}{
-  Optional initial value for \code{pA} and \code{pO}.
+  \item{ipA, ipB, ipO}{
+  Optional initial value for \code{pA} and \code{pB} and \code{pO}.
   A \code{NULL} value means values are computed internally.
 
 
   }
+  \item{zero}{
+  Details at \code{\link{CommonVGAMffArguments}}.
+
+  
+  }
 
 }
 \details{
@@ -79,7 +85,8 @@ ABO(link = "logit", ipA = NULL, ipO = NULL)
 }
 \examples{
 ymat <- cbind(A = 725, B = 258, AB = 72, O = 1073)  # Order matters, not the name
-fit <- vglm(ymat ~ 1, ABO(link = identitylink), trace = TRUE, cri = "coef")
+fit <- vglm(ymat ~ 1, ABO(link.pA = identitylink,
+                          link.pB = identitylink), trace = TRUE, cri = "coef")
 coef(fit, matrix = TRUE)
 Coef(fit)  # Estimated pA and pB
 rbind(ymat, sum(ymat) * fitted(fit))
diff --git a/man/CommonVGAMffArguments.Rd b/man/CommonVGAMffArguments.Rd
index 2aca39f..23ce7c2 100644
--- a/man/CommonVGAMffArguments.Rd
+++ b/man/CommonVGAMffArguments.Rd
@@ -33,7 +33,7 @@ TypicalVGAMfamilyFunction(lsigma = "loge",
                           type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
                           probs.x = c(0.15, 0.85),
                           probs.y = c(0.25, 0.50, 0.75),
-                          mv = FALSE, earg.link = FALSE,
+                          multiple.responses = FALSE, earg.link = FALSE,
                           whitespace = FALSE, bred = FALSE, lss = TRUE,
                           oim = FALSE, nsimEIM = 100, zero = NULL)
 }
@@ -236,6 +236,15 @@ except for \eqn{X_2}.
   For example, the arguments of \code{\link{weibullR}} are
   scale and shape, whereas \code{\link[stats]{rweibull}}
   are shape and scale.
+  As a temporary measure
+  (from \pkg{VGAM} 0.9-7 onwards but prior to version 1.0-0),
+  some family functions such as \code{\link{sinmad}} have an
+  \code{lss} argument without a default. For these,
+  setting \code{lss = FALSE} will work.
+  Later, \code{lss = TRUE} will be the default.
+  Be careful for the \code{dpqr}-type functions, e.g.,
+  \code{\link{rsinmad}}.
+
 
 
   }
@@ -327,7 +336,7 @@ except for \eqn{X_2}.
 
   }
 
-  \item{mv}{
+  \item{multiple.responses}{
   Logical.
   Some \pkg{VGAM} family functions allow a multivariate or vector response.
   If so, then usually the response is a matrix with columns
@@ -335,10 +344,10 @@ except for \eqn{X_2}.
   They are all fitted simultaneously.
   Arguments such as \code{parallel} may then be useful to allow
   for relationships between the regressions of each response variable.
-  If \code{mv = TRUE} then sometimes the response is interpreted
+  If \code{multiple.responses = TRUE} then sometimes the response is interpreted
   differently, e.g., \code{\link{posbinomial}} chooses the first
   column of a matrix response as success and combines the other
-  columns as failure, but when \code{mv = TRUE} then each column
+  columns as failure, but when \code{multiple.responses = TRUE} then each column
   of the response matrix is the number of successes and the
   \code{weights} argument is of the same dimension as the
   response and contains the number of trials.
@@ -398,6 +407,14 @@ except for \eqn{X_2}.
   \code{vcov(fit)},
   etc. may be misleading.
 
+
+  Changes relating to the code{lss} argument have very important
+  consequences and users must beware.
+  Good programming style is to rely on the argument names and not
+  on the order.
+
+
+
 }
 
 \details{
diff --git a/man/Links.Rd b/man/Links.Rd
index 8d4c991..6ab737d 100644
--- a/man/Links.Rd
+++ b/man/Links.Rd
@@ -148,7 +148,7 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
   \code{\link{probit}},
   \code{\link{cloglog}},
   \code{\link{cauchit}},
-  \code{\link{fsqrt}},
+  \code{\link{foldsqrt}},
   \code{\link{logc}},
   \code{\link{golf}},
   \code{\link{polf}},
@@ -171,7 +171,7 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
 
 
   For parameters between \eqn{A} and \eqn{B}:
-  \code{\link{elogit}},
+  \code{\link{extlogit}},
   \code{\link{logoff}} (\eqn{B = \infty}{B = Inf}).
 
 
@@ -194,6 +194,7 @@ TypicalVGAMlinkFunction(theta, someParameter = 0,
 
 \seealso{
   \code{\link{TypicalVGAMfamilyFunction}},
+  \code{\link{linkfun}},
   \code{\link{vglm}},
   \code{\link{vgam}},
   \code{\link{rrvglm}}.
diff --git a/man/ParetoUC.Rd b/man/ParetoUC.Rd
index 7aff0d5..c21d8f0 100644
--- a/man/ParetoUC.Rd
+++ b/man/ParetoUC.Rd
@@ -13,8 +13,8 @@
 }
 \usage{
 dpareto(x, scale = 1, shape, log = FALSE)
-ppareto(q, scale = 1, shape)
-qpareto(p, scale = 1, shape)
+ppareto(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
+qpareto(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
 rpareto(n, scale = 1, shape)
 }
 \arguments{
@@ -31,6 +31,12 @@ rpareto(n, scale = 1, shape)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -49,7 +55,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{paretoff}}, the \pkg{VGAM} family function
   for estimating the parameter \eqn{k} by maximum likelihood estimation,
diff --git a/man/QvarUC.Rd b/man/QvarUC.Rd
index 9dba3e8..eeedaa2 100644
--- a/man/QvarUC.Rd
+++ b/man/QvarUC.Rd
@@ -152,7 +152,7 @@ Qvar(object, factorname = NULL, which.linpred = 1,
 Yee, T. W. and Hadi, A. F. (2014)
 Row-column interaction models, with an R implementation.
 \emph{Computational Statistics},
-\bold{29}, in press.
+\bold{29}, 1427--1445.
 
 
 }
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
index 301b2b5..26739d3 100644
--- a/man/VGAM-package.Rd
+++ b/man/VGAM-package.Rd
@@ -111,9 +111,10 @@ Maintainer: Thomas Yee \email{t.yee at auckland.ac.nz}.
 \references{
 
 
-Yee, T. W. (2014)
-Vector Generalized Linear and Additive Models.
-\emph{Monograph in preparation}.
+Yee, T. W. (2015)
+Vector Generalized Linear and Additive Models:
+With an Implementation in R.
+\emph{Springer} (to appear).
 
 
 Yee, T. W. and Hastie, T. J. (2003)
@@ -159,7 +160,8 @@ The \pkg{VGAM} package for categorical data analysis.
 
   Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
-  \emph{Computational Statistics and Data Analysis}.
+  \emph{Computational Statistics and Data Analysis},
+  \bold{71}, 889--902.
 
 
 
@@ -212,7 +214,7 @@ coef(fit2, matrix = TRUE)  # These should agree with the above values
 
 # Example 3; fit a two species GAM simultaneously
 fit3 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
-            binomialff(mv = TRUE), hunua)
+             binomialff(multiple.responses = TRUE), data = hunua)
 coef(fit3, matrix = TRUE)   # Not really interpretable
 \dontrun{ plot(fit3, se = TRUE, overlay = TRUE, lcol = 3:4, scol = 3:4)
 
diff --git a/man/alaplaceUC.Rd b/man/alaplaceUC.Rd
index 0fed45a..61162c2 100644
--- a/man/alaplaceUC.Rd
+++ b/man/alaplaceUC.Rd
@@ -16,8 +16,10 @@
 \usage{
 dalap(x, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)),
       log = FALSE)
-palap(q, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)))
-qalap(p, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)))
+palap(q, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)),
+      lower.tail = TRUE, log.p = FALSE)
+qalap(p, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)),
+      lower.tail = TRUE, log.p = FALSE)
 ralap(n, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)))
 }
 %- maybe also 'usage' for other objects documented here.
@@ -60,6 +62,12 @@ ralap(n, location = 0, scale = 1, tau = 0.5, kappa = sqrt(tau/(1-tau)))
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \details{
@@ -89,7 +97,7 @@ Boston: Birkhauser.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 %\note{
 %  The \pkg{VGAM} family function \code{\link{alaplace3}}
 %  estimates the three parameters by maximum likelihood estimation.
diff --git a/man/benfUC.Rd b/man/benfUC.Rd
index 7a0c6cb..6da797a 100644
--- a/man/benfUC.Rd
+++ b/man/benfUC.Rd
@@ -14,8 +14,8 @@
 }
 \usage{
 dbenf(x, ndigits = 1, log = FALSE)
-pbenf(q, ndigits = 1, log.p = FALSE)
-qbenf(p, ndigits = 1)
+pbenf(q, ndigits = 1, lower.tail = TRUE, log.p = FALSE)
+qbenf(p, ndigits = 1, lower.tail = TRUE, log.p = FALSE)
 rbenf(n, ndigits = 1)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -47,7 +47,12 @@ rbenf(n, ndigits = 1)
 
 
   }
+  \item{lower.tail}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
 
+
+  }
 }
 \details{
 
@@ -105,7 +110,7 @@ Note on the Frequency of Use of the Different Digits in Natural Numbers.
 \bold{4}, 39--40.
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 %\note{ 
 %  Currently only the leading digit is handled.
 %  The first two leading digits would be the next simple extension.
diff --git a/man/beniniUC.Rd b/man/beniniUC.Rd
index 7fe8280..d941b4c 100644
--- a/man/beniniUC.Rd
+++ b/man/beniniUC.Rd
@@ -14,8 +14,8 @@
 }
 \usage{
 dbenini(x, y0, shape, log = FALSE)
-pbenini(q, y0, shape)
-qbenini(p, y0, shape)
+pbenini(q, y0, shape, lower.tail = TRUE, log.p = FALSE)
+qbenini(p, y0, shape, lower.tail = TRUE, log.p = FALSE)
 rbenini(n, y0, shape)
 }
 \arguments{
@@ -40,6 +40,12 @@ rbenini(n, y0, shape)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -59,7 +65,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{benini1}}, the \pkg{VGAM} family function
   for estimating the parameter \eqn{s} by maximum likelihood estimation,
diff --git a/man/betabinomialff.Rd b/man/betabinomialff.Rd
index 9202876..ef334b8 100644
--- a/man/betabinomialff.Rd
+++ b/man/betabinomialff.Rd
@@ -10,24 +10,25 @@
 
 }
 \usage{
-betabinomialff(lshape12 = "loge", i1 = 1, i2 = NULL,
-               imethod = 1, ishrinkage = 0.95, nsimEIM = NULL, zero = NULL)
+betabinomialff(lshape1 = "loge", lshape2 = "loge", ishape1 = 1,
+               ishape2 = NULL, imethod = 1, ishrinkage = 0.95,
+               nsimEIM = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{lshape12}{ 
-  Link function applied to both (positive) shape parameters
+  \item{lshape1, lshape2}{ 
+  Link functions for the two (positive) shape parameters
   of the beta distribution.
   See \code{\link{Links}} for more choices.
 
 
   }
-  \item{i1, i2}{ 
+  \item{ishape1, ishape2}{ 
   Initial value for the shape parameters.
   The first must be positive, and is recyled to the necessary length.
   The second is optional.
   If a failure to converge occurs, try assigning a different value
-  to \code{i1} and/or using \code{i2}.
+  to \code{ishape1} and/or using \code{ishape2}.
 
 
   }
@@ -165,8 +166,8 @@ betabinomialff(lshape12 = "loge", i1 = 1, i2 = NULL,
   This family function is prone to numerical difficulties
   due to the expected information matrices not being positive-definite
   or ill-conditioned over some regions of the parameter space.
-  If problems occur try setting \code{i1} to be some other
-  positive value, using \code{i2} and/or setting \code{zero = 2}.
+  If problems occur try setting \code{ishape1} to be some other
+  positive value, using \code{ishape2} and/or setting \code{zero = 2}.
 
 
   This family function may be renamed in the future.
diff --git a/man/betaff.Rd b/man/betaff.Rd
index 8355110..aae08c8 100644
--- a/man/betaff.Rd
+++ b/man/betaff.Rd
@@ -24,8 +24,8 @@ betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
   \item{lmu, lphi}{ 
   Link function for the mean and precision parameters. 
   The values \eqn{A} and \eqn{B} are extracted from the
-  \code{min} and \code{max} arguments of \code{\link{elogit}}.
-  Consequently, only \code{\link{elogit}} is allowed.
+  \code{min} and \code{max} arguments of \code{\link{extlogit}}.
+  Consequently, only \code{\link{extlogit}} is allowed.
 
 
 % See below for more details.
@@ -111,7 +111,7 @@ betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
   The response must have values in the interval (\eqn{A}, \eqn{B}).
   The user currently needs to manually choose \code{lmu} to match
   the input of arguments \code{A} and \code{B}, e.g.,
-  with \code{\link{elogit}}; see the example below. 
+  with \code{\link{extlogit}}; see the example below. 
 
 
 }
@@ -127,7 +127,7 @@ betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
   \code{\link{rbetageom}},
   \code{\link{rbetanorm}},
   \code{\link{kumar}},
-  \code{\link{elogit}},
+  \code{\link{extlogit}},
   \code{\link{simulate.vlm}}.
 
 
@@ -148,7 +148,7 @@ bdata <- transform(bdata,
                    y = rbeta(nn, shape1 = shape1, shape2 = shape2))
 bdata <- transform(bdata, Y = 5 + 8 * y)  # From 5 to 13, not 0 to 1
 fit <- vglm(Y ~ x2, data = bdata, trace = TRUE,
-            betaff(A = 5, B = 13, lmu = elogit(min = 5, max = 13)))
+            betaff(A = 5, B = 13, lmu = extlogit(min = 5, max = 13)))
 coef(fit, matrix = TRUE)
 }
 \keyword{models}
diff --git a/man/betanormUC.Rd b/man/betanormUC.Rd
index 2585c44..f32402a 100644
--- a/man/betanormUC.Rd
+++ b/man/betanormUC.Rd
@@ -14,7 +14,8 @@
 dbetanorm(x, shape1, shape2, mean = 0, sd = 1, log = FALSE)
 pbetanorm(q, shape1, shape2, mean = 0, sd = 1,
           lower.tail = TRUE, log.p = FALSE)
-qbetanorm(p, shape1, shape2, mean = 0, sd = 1)
+qbetanorm(p, shape1, shape2, mean = 0, sd = 1,
+          lower.tail = TRUE, log.p = FALSE)
 rbetanorm(n, shape1, shape2, mean = 0, sd = 1)
 }
 \arguments{
@@ -92,14 +93,14 @@ x <- seq(-10, 2, len = 501)
 plot(x, dbetanorm(x, shape1, shape2, m = m), type = "l", ylim = 0:1, las = 1,
      ylab = paste("betanorm(",shape1,", ",shape2,", m=",m, ", sd=1)", sep = ""),
      main = "Blue is density, orange is cumulative distribution function",
-     sub = "Purple lines are the 10,20,...,90 percentiles", col = "blue")
+     sub = "Gray lines are the 10,20,...,90 percentiles", col = "blue")
 lines(x, pbetanorm(x, shape1, shape2, m = m), col = "orange")
-abline(h = 0)
+abline(h = 0, col = "black")
 probs <- seq(0.1, 0.9, by = 0.1)
 Q <- qbetanorm(probs, shape1, shape2, m = m)
-lines(Q, dbetanorm(Q, shape1, shape2, m = m), col = "purple", lty = 3, type = "h")
-lines(Q, pbetanorm(Q, shape1, shape2, m = m), col = "purple", lty = 3, type = "h")
-abline(h = probs, col = "purple", lty = 3)
+lines(Q, dbetanorm(Q, shape1, shape2, m = m), col = "gray50", lty = 2, type = "h")
+lines(Q, pbetanorm(Q, shape1, shape2, m = m), col = "gray50", lty = 2, type = "h")
+abline(h = probs, col = "gray50", lty = 2)
 pbetanorm(Q, shape1, shape2, m = m) - probs  # Should be all 0
 }
 }
diff --git a/man/bilogistic.Rd b/man/bilogistic.Rd
index 85c690e..5277ef8 100644
--- a/man/bilogistic.Rd
+++ b/man/bilogistic.Rd
@@ -20,6 +20,11 @@ bilogistic(llocation = "identitylink", lscale = "loge",
   See \code{\link{Links}} for more choices.
 
 
+
+% 20150227; yettodo: expand/change llocation to lloc1 and lloc2.
+
+
+
   }
   \item{lscale}{
   Parameter link function applied to both
diff --git a/man/binomialff.Rd b/man/binomialff.Rd
index f1d2267..198df78 100644
--- a/man/binomialff.Rd
+++ b/man/binomialff.Rd
@@ -9,8 +9,8 @@
 
 }
 \usage{
-binomialff(link = "logit", dispersion = 1, mv = FALSE,
-           onedpar = !mv, parallel = FALSE,
+binomialff(link = "logit", dispersion = 1, multiple.responses = FALSE,
+           onedpar = !multiple.responses, parallel = FALSE,
            zero = NULL, bred = FALSE, earg.link = FALSE)
 
 }
@@ -29,11 +29,11 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
   Dispersion parameter. By default, maximum likelihood is used to
   estimate the model because it is known.  However, the user can specify
   \code{dispersion = 0} to have it estimated, or else specify a known
-  positive value (or values if \code{mv} is \code{TRUE}).
+  positive value (or values if \code{multiple.responses} is \code{TRUE}).
 
 
   }
-  \item{mv}{ 
+  \item{multiple.responses}{ 
   Multivariate response? If \code{TRUE}, then the response is interpreted
   as \eqn{M} independent binary responses, where \eqn{M} is the number
   of columns of the response matrix. In this case, the response matrix
@@ -53,7 +53,7 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
 
   }
   \item{onedpar}{ 
-  One dispersion parameter? If \code{mv}, then a separate dispersion
+  One dispersion parameter? If \code{multiple.responses}, then a separate dispersion
   parameter will be computed for each response (column), by default.
   Setting \code{onedpar = TRUE} will pool them so that there is only one
   dispersion parameter to be estimated.
@@ -61,7 +61,7 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
 
   }
   \item{parallel}{ 
-  A logical or formula. Used only if \code{mv} is \code{TRUE}.  This
+  A logical or formula. Used only if \code{multiple.responses} is \code{TRUE}.  This
   argument allows for the parallelism assumption whereby the regression
   coefficients for a variable is constrained to be equal over the \eqn{M}
   linear/additive predictors.
@@ -87,7 +87,7 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
   \item{bred}{
   Details at \code{\link{CommonVGAMffArguments}}.
   Setting \code{bred = TRUE} should work for
-  multiple responses (\code{mv = TRUE}) and
+  multiple responses (\code{multiple.responses = TRUE}) and
   all \pkg{VGAM} link functions;
   it has been tested for
   \code{\link{logit}} only (and it gives similar
@@ -115,7 +115,7 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
   is more common in practice.
 
 
-  Setting \code{mv = TRUE} is necessary when fitting a Quadratic RR-VGLM
+  Setting \code{multiple.responses = TRUE} is necessary when fitting a Quadratic RR-VGLM
   (see \code{\link{cqo}}) because the response is a matrix of \eqn{M}
   columns (e.g., one column per species). Then there will be \eqn{M}
   dispersion parameters (one per column of the response matrix).
@@ -156,7 +156,7 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
 \author{ Thomas W. Yee }
 
 \note{
-  If \code{mv} is \code{FALSE} (default) then the response can be of one
+  If \code{multiple.responses} is \code{FALSE} (default) then the response can be of one
   of two formats:
   a factor (first level taken as failure),
   or a 2-column matrix (first column = successes) of counts.
@@ -172,7 +172,7 @@ binomialff(link = "logit", dispersion = 1, mv = FALSE,
   predictors.
 
 
-  If \code{mv} is \code{TRUE}, then the matrix response can only be of
+  If \code{multiple.responses} is \code{TRUE}, then the matrix response can only be of
   one format: a matrix of 1's and 0's (1 = success).
 
 
@@ -254,7 +254,8 @@ with(shunua, lines(altitude, fitted(fit), col = "orange", lwd = 2)) }
 
 
 # Fit two species simultaneously
-fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude), binomialff(mv = TRUE), shunua)
+fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude),
+             binomialff(multiple.responses = TRUE), data = shunua)
 \dontrun{
 with(shunua, matplot(altitude, fitted(fit2), type = "l",
      main = "Two species response curves", las = 1)) }
diff --git a/man/bisa.Rd b/man/bisa.Rd
index 8a25849..dd61346 100644
--- a/man/bisa.Rd
+++ b/man/bisa.Rd
@@ -13,7 +13,11 @@ bisa(lscale = "loge", lshape = "loge",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{nowarning}{ Logical. Suppress a warning? }
+  \item{nowarning}{ Logical. Suppress a warning? 
+  Ignored for \pkg{VGAM} 0.9-7 and higher.
+
+
+  }
 
 
   \item{lscale, lshape}{
diff --git a/man/bisaUC.Rd b/man/bisaUC.Rd
index b53dbad..fa42efd 100644
--- a/man/bisaUC.Rd
+++ b/man/bisaUC.Rd
@@ -12,8 +12,8 @@
 }
 \usage{
 dbisa(x, scale = 1, shape, log = FALSE)
-pbisa(q, scale = 1, shape)
-qbisa(p, scale = 1, shape)
+pbisa(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
+qbisa(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
 rbisa(n, scale = 1, shape)
 }
 \arguments{
@@ -35,6 +35,12 @@ rbisa(n, scale = 1, shape)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 }
 \value{
   \code{dbisa} gives the density,
@@ -44,7 +50,7 @@ rbisa(n, scale = 1, shape)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee  and Kai Huang }
 \details{
   The Birnbaum-Saunders distribution
   is a distribution which is used in survival analysis.
diff --git a/man/cardUC.Rd b/man/cardUC.Rd
index 368847c..ec3eda8 100644
--- a/man/cardUC.Rd
+++ b/man/cardUC.Rd
@@ -13,8 +13,9 @@
 }
 \usage{
 dcard(x, mu, rho, log = FALSE)
-pcard(q, mu, rho)
-qcard(p, mu, rho, tolerance = 1e-07, maxits = 500)
+pcard(q, mu, rho, lower.tail = TRUE, log.p = FALSE)
+qcard(p, mu, rho, tolerance = 1e-07, maxits = 500,
+      lower.tail = TRUE, log.p = FALSE)
 rcard(n, mu, rho, ...)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -47,6 +48,12 @@ rcard(n, mu, rho, ...)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \details{
@@ -65,7 +72,7 @@ rcard(n, mu, rho, ...)
 
 }
 %\references{ }
-\author{ Thomas W. Yee }
+\author{ Thomas W. Yee and Kai Huang }
 \note{ 
   Convergence problems might occur with \code{rcard}.
 
diff --git a/man/cardioid.Rd b/man/cardioid.Rd
index 4d5622d..c6af013 100644
--- a/man/cardioid.Rd
+++ b/man/cardioid.Rd
@@ -7,8 +7,8 @@
   cardioid distribution by maximum likelihood estimation.
 }
 \usage{
-cardioid(lmu = elogit(min = 0, max = 2*pi),
-         lrho = elogit(min = -0.5, max = 0.5),
+cardioid(lmu = extlogit(min = 0, max = 2*pi),
+         lrho = extlogit(min = -0.5, max = 0.5),
          imu = NULL, irho = 0.3, nsimEIM = 100, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -88,7 +88,7 @@ Singapore: World Scientific.
 
 \seealso{
   \code{\link{rcard}},
-  \code{\link{elogit}},
+  \code{\link{extlogit}},
   \code{\link{vonmises}}.
 
 
diff --git a/man/cloglog.Rd b/man/cloglog.Rd
index 223c92c..37de246 100644
--- a/man/cloglog.Rd
+++ b/man/cloglog.Rd
@@ -120,9 +120,11 @@ mydata <- rcqo(n, p, S, eq.tol = TRUE, es.opt = TRUE, eq.max = TRUE,
                family = "binomial", hi.abundance = 5, seed = 123,
                Rank = Rank)
 fitc <- cqo(attr(mydata, "formula"), I.tol = TRUE, data = mydata,
-            fam = binomialff(mv = TRUE, link = "cloglog"), Rank = Rank)
+            fam = binomialff(multiple.responses = TRUE, link = "cloglog"),
+            Rank = Rank)
 fitl <- cqo(attr(mydata, "formula"), I.tol = TRUE, data = mydata,
-            fam = binomialff(mv = TRUE, link = "logit"), Rank = Rank)
+            fam = binomialff(multiple.responses = TRUE, link = "logit"),
+            Rank = Rank)
 
 # Compare the fitted models (cols 1 and 3) with the truth (col 2)
 cbind(concoef(fitc), attr(mydata, "concoefficients"), concoef(fitl))
diff --git a/man/cqo.Rd b/man/cqo.Rd
index 0b459ca..6e15114 100644
--- a/man/cqo.Rd
+++ b/man/cqo.Rd
@@ -41,7 +41,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
   \code{\link{gamma2}},
   \code{\link{gaussianff}}.
   Sometimes special arguments are required for \code{cqo()}, e.g.,
-  \code{binomialff(mv = TRUE)}.
+  \code{binomialff(multiple.responses = TRUE)}.
   Also, \code{\link{quasipoissonff}} and \code{\link{quasibinomialff}}
   may or may not work.
 
@@ -460,7 +460,8 @@ original FORTRAN code into C.
   In Example 4 below, constrained binary quadratic ordination (in old
   nomenclature, constrained Gaussian logit ordination) is fitted to some
   simulated data coming from a species packing model.
-  With multivariate binary responses, one must use \code{mv = TRUE} to
+  With multivariate binary responses, one must
+  use \code{multiple.responses = TRUE} to
   indicate that the response (matrix) is multivariate. Otherwise, it is
   interpreted as a single binary response variable.
   In Example 5 below, the deviance residuals are plotted for each species.
@@ -640,7 +641,8 @@ mydata <- rcqo(n, p, S, fam = "binomial", hi.abundance = 4,
                eq.tol = TRUE, es.opt = TRUE, eq.max = TRUE)
 myform <- attr(mydata, "formula")
 set.seed(1234)
-b1et <- cqo(myform, binomialff(mv = TRUE, link = "cloglog"), data = mydata)
+b1et <- cqo(myform, binomialff(multiple.responses = TRUE, link = "cloglog"),
+            data = mydata)
 sort(deviance(b1et, history = TRUE))  # A history of all the iterations
 lvplot(b1et, y = TRUE, lcol = 1:S, pch = 1:S, pcol = 1:S, las = 1)
 Coef(b1et)
diff --git a/man/cumulative.Rd b/man/cumulative.Rd
index 7af24e8..5ba3b4e 100644
--- a/man/cumulative.Rd
+++ b/man/cumulative.Rd
@@ -10,7 +10,7 @@
 }
 \usage{
 cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
-           mv = FALSE, whitespace = FALSE)
+           multiple.responses = FALSE, whitespace = FALSE)
 }
 %apply.parint = FALSE,
 %scumulative(link = "logit",
@@ -76,9 +76,9 @@ cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
 
 
   }
-  \item{mv}{
+  \item{multiple.responses}{
   Logical.
-  Multivariate response? If \code{TRUE} then the input should be
+  Multiple responses? If \code{TRUE} then the input should be
   a matrix with values \eqn{1,2,\dots,L}, where \eqn{L=J+1} is the
   number of levels.
   Each column of the matrix is a response, i.e., multivariate response.
diff --git a/man/dagum.Rd b/man/dagum.Rd
index f1a6c44..35e8ccc 100644
--- a/man/dagum.Rd
+++ b/man/dagum.Rd
@@ -7,11 +7,16 @@
   Dagum distribution.
 }
 \usage{
-dagum(lshape1.a = "loge", lscale = "loge", lshape2.p = "loge",
-      ishape1.a = NULL, iscale = NULL, ishape2.p = 1, zero = NULL)
+dagum(lss, lshape1.a = "loge", lscale = "loge", lshape2.p = "loge",
+           ishape1.a = NULL, iscale = NULL, ishape2.p = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
+  \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
+  
+  
+  }
+
   \item{lshape1.a, lscale, lshape2.p}{
   Parameter link functions applied to the
   (positive) parameters \code{a}, \code{scale}, and \code{p}.
@@ -105,9 +110,10 @@ while estimates for \eqn{a} and \eqn{p} can be considered unbiased for
 }
 
 \examples{
-ddata <- data.frame(y = rdagum(n = 3000, exp(1), exp(2), exp(1)))
-fit <- vglm(y ~ 1, dagum, data = ddata, trace = TRUE)
-fit <- vglm(y ~ 1, dagum(ishape1.a = exp(1)), data = ddata, trace = TRUE)
+ddata <- data.frame(y = rdagum(n = 3000, scale = exp(2), shape1 = exp(1), exp(1)))
+fit <- vglm(y ~ 1, dagum(lss = FALSE), data = ddata, trace = TRUE)
+fit <- vglm(y ~ 1, dagum(lss = FALSE, ishape1.a = exp(1)),
+            data = ddata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
diff --git a/man/dagumUC.Rd b/man/dagumUC.Rd
index 17821c0..83850c3 100644
--- a/man/dagumUC.Rd
+++ b/man/dagumUC.Rd
@@ -13,10 +13,10 @@
 
 }
 \usage{
-ddagum(x, shape1.a, scale = 1, shape2.p, log = FALSE)
-pdagum(q, shape1.a, scale = 1, shape2.p)
-qdagum(p, shape1.a, scale = 1, shape2.p)
-rdagum(n, shape1.a, scale = 1, shape2.p)
+ddagum(x, scale = 1, shape1.a, shape2.p, log = FALSE)
+pdagum(q, scale = 1, shape1.a, shape2.p, lower.tail = TRUE, log.p = FALSE)
+qdagum(p, scale = 1, shape1.a, shape2.p, lower.tail = TRUE, log.p = FALSE)
+rdagum(n, scale = 1, shape1.a, shape2.p)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -30,6 +30,12 @@ rdagum(n, shape1.a, scale = 1, shape2.p)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -49,7 +55,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{dagum}}, which is the \pkg{VGAM} family function
   for estimating the parameters by maximum likelihood estimation.
diff --git a/man/dirichlet.Rd b/man/dirichlet.Rd
index 91f162b..86d128d 100644
--- a/man/dirichlet.Rd
+++ b/man/dirichlet.Rd
@@ -7,7 +7,8 @@
 
 }
 \usage{
-dirichlet(link = "loge", parallel = FALSE, zero = NULL)
+dirichlet(link = "loge", parallel = FALSE, zero = NULL,
+          imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -20,7 +21,7 @@ dirichlet(link = "loge", parallel = FALSE, zero = NULL)
 
 
   }
-  \item{parallel, zero}{
+  \item{parallel, zero, imethod}{
   See \code{\link{CommonVGAMffArguments}} for more information.
 
 
diff --git a/man/dirmultinomial.Rd b/man/dirmultinomial.Rd
index deb2e10..04a4df5 100644
--- a/man/dirmultinomial.Rd
+++ b/man/dirmultinomial.Rd
@@ -141,9 +141,13 @@ Yu, P. and Shaw, C. A. (2014).
 An Efficient Algorithm for Accurate Computation of
 the Dirichlet-Multinomial Log-Likelihood Function.
 \emph{Bioinformatics},
-\bold{30},
-in press;
-\url{doi:10.1093/bioinformatics/btu079}.
+\bold{30}, 1547--54.
+
+
+
+% url {doi:10.1093/bioinformatics/btu079}.
+% number =      {11},
+
 
 
 % first published online February 11, 2014
diff --git a/man/double.expbinomial.Rd b/man/double.expbinomial.Rd
index 3ebbf76..5336994 100644
--- a/man/double.expbinomial.Rd
+++ b/man/double.expbinomial.Rd
@@ -103,7 +103,7 @@ double.expbinomial(lmean = "logit", ldispersion = "logit",
 \note{
   This function processes the input in the same way
   as \code{\link{binomialff}}, however multivariate responses are
-  not allowed (\code{binomialff(mv = FALSE)}).
+  not allowed (\code{binomialff(multiple.responses = FALSE)}).
 
 
 }
@@ -140,7 +140,7 @@ cmlist <- list("(Intercept)"    = diag(2),
 fit <- vglm(cbind(phat, 1 - phat) * ssize ~
             I(srainfall) + I(srainfall^2) + I(srainfall^3) +
             I(sN) + I(sN^2),
-            double.expbinomial(ldisp = elogit(min = 0, max = 1.25),
+            double.expbinomial(ldisp = extlogit(min = 0, max = 1.25),
                          idisp = 0.2, zero = NULL),
             toxop, trace = TRUE, constraints = cmlist)
 
@@ -153,7 +153,7 @@ sqrt(diag(vcov(fit)))  # Standard errors
 
 # Effective sample size (not quite the last column of Table 1)
 head(predict(fit))
-Dispersion <- elogit(predict(fit)[,2], min = 0, max = 1.25, inverse = TRUE)
+Dispersion <- extlogit(predict(fit)[,2], min = 0, max = 1.25, inverse = TRUE)
 c(round(weights(fit, type = "prior") * Dispersion, digits = 1))
 
 
@@ -169,7 +169,7 @@ cmlist2 <- list("(Intercept)"                 = diag(2),
                 "poly(sN, degree = 2)"        = rbind(0, 1))
 fit2 <- vglm(cbind(phat, 1 - phat) * ssize ~
              poly(srainfall, degree = 3) + poly(sN, degree = 2),
-             double.expbinomial(ldisp = elogit(min = 0, max = 1.25),
+             double.expbinomial(ldisp = extlogit(min = 0, max = 1.25),
                           idisp = 0.2, zero = NULL),
              toxop, trace = TRUE, constraints = cmlist2)
 \dontrun{ par(mfrow = c(1, 2))
diff --git a/man/ducklings.Rd b/man/ducklings.Rd
new file mode 100644
index 0000000..127a56a
--- /dev/null
+++ b/man/ducklings.Rd
@@ -0,0 +1,70 @@
+\name{ducklings}
+\alias{ducklings}
+\docType{data}
+\title{
+Relative Frequencies of Serum Proteins in white Pekin ducklings 
+
+%%   ~~ data name/kind ... ~~
+}
+\description{
+  Relative frequencies of serum proteins in white Pekin ducklings 
+  as determined by electrophoresis.
+
+
+}
+\usage{data(ducklings)}
+\format{
+  The format is:
+ chr "ducklings"
+
+}
+\details{
+  Columns \code{p1}, \code{p2}, \code{p3}
+  stand for pre-albumin, albumin, globulins respectively.
+  These were collected from 3-week old white Pekin ducklings.
+  Let \eqn{Y_1}{Y1} be proportional to the total milligrams of
+  pre-albumin in the blood serum of a duckling.
+  Similarly,
+  let \eqn{Y_2}{Y2} and \eqn{Y_3}{Y3} be directly proportional
+  to the same factor as \eqn{Y_1}{Y1} to the total milligrams 
+  respectively of albumin and globulins in its blood serum.
+  The proportion of pre-albumin is given by
+  \eqn{Y_1/(Y_1 + Y_2 + Y_3)}{Y1/(Y1 + Y2 + Y3)},
+  and similarly for the others.
+
+
+
+
+
+%  Each set of 3 measurements is based on from 7 to 12 individual ducklings.
+
+
+
+
+
+
+%%  ~~ If necessary, more details than the __description__ above ~~
+}
+\source{
+  Mosimann, J. E.  (1962)
+  On the compound multinomial distribution,
+  the multivariate \eqn{\beta}{beta}-distribution,
+  and correlations among proportions,
+  {Biometrika},
+  \bold{49}, 65--82.
+}
+
+
+\seealso{
+  \code{\link{dirichlet}}.
+
+
+}
+
+%%\references{
+%%  ~~ possibly secondary sources and usages ~~
+%%}
+\examples{
+print(ducklings)
+}
+\keyword{datasets}
diff --git a/man/eexpUC.Rd b/man/eexpUC.Rd
index bc512d2..ccbdf64 100644
--- a/man/eexpUC.Rd
+++ b/man/eexpUC.Rd
@@ -15,8 +15,9 @@
 }
 \usage{
 deexp(x, rate = 1, log = FALSE)
-peexp(q, rate = 1, log = FALSE)
-qeexp(p, rate = 1, Maxit.nr = 10, Tol.nr = 1.0e-6)
+peexp(q, rate = 1, lower.tail = TRUE, log.p = FALSE)
+qeexp(p, rate = 1, Maxit.nr = 10, Tol.nr = 1.0e-6,
+      lower.tail = TRUE, log.p = FALSE)
 reexp(n, rate = 1)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -31,6 +32,12 @@ reexp(n, rate = 1)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Exponential]{pexp}}
+  or \code{\link[stats:Exponential]{qexp}}.
+
+
+  }
   \item{Maxit.nr, Tol.nr}{
   See \code{\link{deunif}}.
 
@@ -80,7 +87,7 @@ very close to 0 or 1.
 %\bold{20}, 149--153.
 %
 %}
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 
 %\note{ 
 %The ``\code{q}'', as the first character of ``\code{qeunif}'',
diff --git a/man/enormUC.Rd b/man/enormUC.Rd
index 58c6387..7f18c7f 100644
--- a/man/enormUC.Rd
+++ b/man/enormUC.Rd
@@ -15,8 +15,9 @@
 }
 \usage{
 denorm(x, mean = 0, sd = 1, log = FALSE)
-penorm(q, mean = 0, sd = 1, log = FALSE)
-qenorm(p, mean = 0, sd = 1, Maxit.nr = 10, Tol.nr = 1.0e-6)
+penorm(q, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE)
+qenorm(p, mean = 0, sd = 1, Maxit.nr = 10, Tol.nr = 1.0e-6,
+       lower.tail = TRUE, log.p = FALSE)
 renorm(n, mean = 0, sd = 1)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -31,6 +32,12 @@ renorm(n, mean = 0, sd = 1)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
   \item{Maxit.nr, Tol.nr}{
   See \code{\link{deunif}}.
 
@@ -54,12 +61,14 @@ Thus
 \code{renorm} generates random variates from \eqn{g}.
 
 
+
 For \code{qenorm} the Newton-Raphson algorithm is used to solve for
 \eqn{y} satisfying \eqn{p = G(y)}.
 Numerical problems may occur when values of \code{p} are
 very close to 0 or 1.
 
 
+
 }
 \value{
   \code{denorm(x)} gives the density function \eqn{g(x)}.
@@ -79,7 +88,7 @@ very close to 0 or 1.
 %\bold{20}, 149--153.
 %
 %}
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 
 %\note{ 
 %The ``\code{q}'', as the first character of ``\code{qeunif}'',
diff --git a/man/eunifUC.Rd b/man/eunifUC.Rd
index e2b8436..b178eb3 100644
--- a/man/eunifUC.Rd
+++ b/man/eunifUC.Rd
@@ -14,8 +14,9 @@
 }
 \usage{
 deunif(x, min = 0, max = 1, log = FALSE)
-peunif(q, min = 0, max = 1, log = FALSE)
-qeunif(p, min = 0, max = 1, Maxit.nr = 10, Tol.nr = 1.0e-6)
+peunif(q, min = 0, max = 1, lower.tail = TRUE, log.p = FALSE)
+qeunif(p, min = 0, max = 1, Maxit.nr = 10, Tol.nr = 1.0e-6,
+       lower.tail = TRUE, log.p = FALSE)
 reunif(n, min = 0, max = 1)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -35,6 +36,12 @@ reunif(n, min = 0, max = 1)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Uniform]{punif}}
+  or \code{\link[stats:Uniform]{qunif}}.
+
+
+  }
   \item{Maxit.nr}{
   Numeric.
   Maximum number of Newton-Raphson iterations allowed.
@@ -128,7 +135,7 @@ quantile and expectile regression.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 
 %\note{ 
 %The ``\code{q}'', as the first character of ``\code{qeunif}'',
diff --git a/man/exppoissonUC.Rd b/man/exppoissonUC.Rd
index 430359b..eb8a041 100644
--- a/man/exppoissonUC.Rd
+++ b/man/exppoissonUC.Rd
@@ -12,23 +12,31 @@
 }
 \usage{
 dexppois(x, rate = 1, shape, log = FALSE)
-pexppois(q, rate = 1, shape)
-qexppois(p, rate = 1, shape)  
+pexppois(q, rate = 1, shape, lower.tail = TRUE, log.p = FALSE)
+qexppois(p, rate = 1, shape, lower.tail = TRUE, log.p = FALSE)
 rexppois(n, rate = 1, shape)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
   \item{n}{number of observations.
-    If \code{length(n) > 1} then the length is taken to be the number required. }
-  \item{shape, rate}{ both positive parameters. }
+  If \code{length(n) > 1} then the length is taken to be the number required.
+
+
+}
+  \item{shape, rate}{ positive parameters. }
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
 
+  }
 }
 \value{
   \code{dexppois} gives the density,
@@ -38,10 +46,7 @@ rexppois(n, rate = 1, shape)
 
 
 }
-\author{ J. G. Lauder, jamesglauder at gmail.com
-
-
-}
+\author{ Kai Huang and J. G. Lauder }
 \details{
   See \code{\link{exppoisson}}, the \pkg{VGAM} family function
   for estimating the parameters, 
@@ -57,21 +62,20 @@ rexppois(n, rate = 1, shape)
 
 }
 \examples{
-\dontrun{
-shape <- 2; rate <- 2; nn <- 201
+\dontrun{ rate <- 2; shape <- 0.5; nn <- 201
 x <- seq(-0.05, 1.05, len = nn)
-plot(x, dexppois(x, shape, rate = rate), type = "l", las = 1, ylim = c(0, 5),
-     ylab = paste("[dp]exppoisson(shape = ", shape, ", rate = ", rate, ")"),
+plot(x, dexppois(x, rate = rate, shape), type = "l", las = 1, ylim = c(0, 3),
+     ylab = paste("fexppoisson(rate = ", rate, ", shape = ", shape, ")"),
      col = "blue", cex.main = 0.8,
-     main = "Blue is density, orange is cumulative distribution function",
+     main = "Blue is the density, orange the cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles")
-lines(x, pexppois(x, shape, rate = rate), col = "orange")
+lines(x, pexppois(x, rate = rate, shape), col = "orange")
 probs <- seq(0.1, 0.9, by = 0.1)
-Q <- qexppois(probs, shape, rate = rate)
-lines(Q, dexppois(Q, shape, rate = rate), col = "purple", lty = 3, type = "h")
-lines(Q, pexppois(Q, shape, rate = rate), col = "purple", lty = 3, type = "h")
-abline(h = probs, col = "purple", lty = 3)
-max(abs(pexppois(Q, shape, rate = rate) - probs))  # Should be 0
+Q <- qexppois(probs, rate = rate, shape)
+lines(Q, dexppois(Q, rate = rate, shape), col = "purple", lty = 3, type = "h")
+lines(Q, pexppois(Q, rate = rate, shape), col = "purple", lty = 3, type = "h")
+abline(h = probs, col = "purple", lty = 3); abline(h = 0, col = "gray50")
+max(abs(pexppois(Q, rate = rate, shape) - probs))  # Should be 0
 }
 }
 \keyword{distribution}
diff --git a/man/felix.Rd b/man/felix.Rd
index a400c9e..db1c44e 100644
--- a/man/felix.Rd
+++ b/man/felix.Rd
@@ -8,7 +8,7 @@
 
 }
 \usage{
-felix(link = elogit(min = 0, max = 0.5), imethod = 1)
+felix(link = extlogit(min = 0, max = 0.5), imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
diff --git a/man/fisk.Rd b/man/fisk.Rd
index 0e07c3f..61e76d3 100644
--- a/man/fisk.Rd
+++ b/man/fisk.Rd
@@ -8,11 +8,16 @@
 
 }
 \usage{
-fisk(lshape1.a = "loge", lscale = "loge",
-     ishape1.a = NULL, iscale = NULL, zero = NULL)
+fisk(lss, lshape1.a = "loge", lscale = "loge",
+          ishape1.a = NULL, iscale = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
+  \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
+  
+  
+  }
+
   \item{lshape1.a, lscale}{
   Parameter link functions applied to the
   (positive) parameters \code{a} and \code{scale}.
@@ -92,9 +97,9 @@ Hoboken, NJ: Wiley-Interscience.
 }
 
 \examples{
-fdata <- data.frame(y = rfisk(n = 200, exp(1), exp(2)))
-fit <- vglm(y ~ 1, fisk, data = fdata, trace = TRUE)
-fit <- vglm(y ~ 1, fisk(ishape1.a = exp(1)), data = fdata, trace = TRUE)
+fdata <- data.frame(y = rfisk(n = 200, shape = exp(1), scale = exp(2)))
+fit <- vglm(y ~ 1, fisk(lss = FALSE), data = fdata, trace = TRUE)
+fit <- vglm(y ~ 1, fisk(lss = FALSE, ishape1.a = exp(1)), data = fdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
diff --git a/man/fiskUC.Rd b/man/fiskUC.Rd
index f5a44b1..45fa513 100644
--- a/man/fiskUC.Rd
+++ b/man/fiskUC.Rd
@@ -12,10 +12,10 @@
 
 }
 \usage{
-dfisk(x, shape1.a, scale = 1, log = FALSE)
-pfisk(q, shape1.a, scale = 1)
-qfisk(p, shape1.a, scale = 1)
-rfisk(n, shape1.a, scale = 1)
+dfisk(x, scale = 1, shape1.a, log = FALSE)
+pfisk(q, scale = 1, shape1.a, lower.tail = TRUE, log.p = FALSE)
+qfisk(p, scale = 1, shape1.a, lower.tail = TRUE, log.p = FALSE)
+rfisk(n, scale = 1, shape1.a)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -29,6 +29,12 @@ rfisk(n, shape1.a, scale = 1)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -47,7 +53,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{fisk}}, which is the \pkg{VGAM} family function
   for estimating the parameters by maximum likelihood estimation.
@@ -66,8 +72,8 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 }
 \examples{
-fdata <- data.frame(y = rfisk(n = 1000, 4, 6))
-fit <- vglm(y ~ 1, fisk, data = fdata, trace = TRUE, crit = "coef")
+fdata <- data.frame(y = rfisk(n = 1000, shape = exp(1), scale = exp(2)))
+fit <- vglm(y ~ 1, fisk(lss = FALSE), data = fdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 }
diff --git a/man/foldnormUC.Rd b/man/foldnormUC.Rd
index 99659a4..1ceaa55 100644
--- a/man/foldnormUC.Rd
+++ b/man/foldnormUC.Rd
@@ -13,8 +13,10 @@
 }
 \usage{
 dfoldnorm(x, mean = 0, sd = 1, a1 = 1, a2 = 1, log = FALSE)
-pfoldnorm(q, mean = 0, sd = 1, a1 = 1, a2 = 1)
-qfoldnorm(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...)
+pfoldnorm(q, mean = 0, sd = 1, a1 = 1, a2 = 1,
+          lower.tail = TRUE, log.p = FALSE)
+qfoldnorm(p, mean = 0, sd = 1, a1 = 1, a2 = 1,
+          lower.tail = TRUE, log.p = FALSE, ...)
 rfoldnorm(n, mean = 0, sd = 1, a1 = 1, a2 = 1)
 }
 \arguments{
@@ -33,6 +35,12 @@ rfoldnorm(n, mean = 0, sd = 1, a1 = 1, a2 = 1)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
   \item{\ldots}{
   Arguments that can be passed into \code{\link[stats]{uniroot}}.
 
@@ -47,7 +55,7 @@ rfoldnorm(n, mean = 0, sd = 1, a1 = 1, a2 = 1)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{foldnormal}}, the \pkg{VGAM} family function
   for estimating the parameters, 
@@ -56,7 +64,7 @@ rfoldnorm(n, mean = 0, sd = 1, a1 = 1, a2 = 1)
 
 }
 \note{
-  \code{qfoldnorm} runs very slowly because it calls
+  \code{qfoldnorm()} runs very slowly because it calls
   \code{\link[stats]{uniroot}} for each value of the argument \code{p}.
   The solution is consequently not exact; the \code{...} can be used
   to obtain a more accurate solution if necessary.
@@ -77,8 +85,8 @@ plot(x, dfoldnorm(x, m = m, sd = SD), type = "l", ylim = 0:1, las = 1,
      ylab = paste("foldnorm(m = ", m, ", sd = ", round(SD, digits = 3), ")"),
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles", col = "blue")
+abline(h = 0, col = "gray50")
 lines(x, pfoldnorm(x, m = m, sd = SD), col = "orange")
-abline(h = 0)
 probs <- seq(0.1, 0.9, by = 0.1)
 Q <- qfoldnorm(probs, m = m, sd = SD)
 lines(Q, dfoldnorm(Q, m = m, sd = SD), col = "purple", lty = 3, type = "h")
diff --git a/man/fsqrt.Rd b/man/foldsqrt.Rd
similarity index 81%
rename from man/fsqrt.Rd
rename to man/foldsqrt.Rd
index 34c3332..65156f9 100644
--- a/man/fsqrt.Rd
+++ b/man/foldsqrt.Rd
@@ -1,5 +1,5 @@
-\name{fsqrt}
-\alias{fsqrt}
+\name{foldsqrt}
+\alias{foldsqrt}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Folded Square Root Link Function }
 \description{
@@ -8,8 +8,8 @@
 
 }
 \usage{
-fsqrt(theta, min = 0, max = 1, mux = sqrt(2),
-      inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
+foldsqrt(theta, min = 0, max = 1, mux = sqrt(2),
+         inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -39,7 +39,7 @@ fsqrt(theta, min = 0, max = 1, mux = sqrt(2),
 
 }
 \value{
-  For \code{fsqrt} with \code{deriv = 0}:
+  For \code{foldsqrt} with \code{deriv = 0}:
   \eqn{K (\sqrt{\theta-L} - \sqrt{U-\theta})}{K *
          (sqrt(theta-L) - sqrt(U-theta))}
   or
@@ -81,28 +81,28 @@ fsqrt(theta, min = 0, max = 1, mux = sqrt(2),
 }
 \examples{
 p <- seq(0.01, 0.99, by = 0.01)
-fsqrt(p)
-max(abs(fsqrt(fsqrt(p), inverse = TRUE) - p))  # Should be 0
+foldsqrt(p)
+max(abs(foldsqrt(foldsqrt(p), inverse = TRUE) - p))  # Should be 0
 
 p <- c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01))
-fsqrt(p)  # Has NAs
+foldsqrt(p)  # Has NAs
 
 \dontrun{
 p <- seq(0.01, 0.99, by = 0.01)
 par(mfrow = c(2, 2), lwd = (mylwd <- 2))
 y <- seq(-4, 4, length = 100)
 for (d in 0:1) {
-  matplot(p, cbind(logit(p, deriv = d), fsqrt(p, deriv = d)),
+  matplot(p, cbind(logit(p, deriv = d), foldsqrt(p, deriv = d)),
           type = "n", col = "purple", ylab = "transformation", las = 1,
           main = if (d == 0) "Some probability link functions"
           else "First derivative")
   lines(p, logit(p, deriv = d), col = "limegreen")
   lines(p, probit(p, deriv = d), col = "purple")
   lines(p, cloglog(p, deriv = d), col = "chocolate")
-  lines(p, fsqrt(p, deriv = d), col = "tan")
+  lines(p, foldsqrt(p, deriv = d), col = "tan")
   if (d == 0) {
     abline(v = 0.5, h = 0, lty = "dashed")
-    legend(0, 4.5, c("logit", "probit", "cloglog", "fsqrt"), lwd = 2,
+    legend(0, 4.5, c("logit", "probit", "cloglog", "foldsqrt"), lwd = 2,
            col = c("limegreen","purple","chocolate", "tan"))
   } else
     abline(v = 0.5, lty = "dashed")
@@ -110,7 +110,7 @@ for (d in 0:1) {
 
 for (d in 0) {
   matplot(y, cbind(logit(y, deriv = d, inverse = TRUE),
-                   fsqrt(y, deriv = d, inverse = TRUE)),
+                   foldsqrt(y, deriv = d, inverse = TRUE)),
           type = "n", col = "purple", xlab = "transformation", ylab = "p",
           lwd = 2, las = 1,
           main = if (d == 0) "Some inverse probability link functions"
@@ -118,10 +118,10 @@ for (d in 0) {
   lines(y, logit(y, deriv = d, inverse = TRUE), col = "limegreen")
   lines(y, probit(y, deriv = d, inverse = TRUE), col = "purple")
   lines(y, cloglog(y, deriv = d, inverse = TRUE), col = "chocolate")
-  lines(y, fsqrt(y, deriv = d, inverse = TRUE), col = "tan")
+  lines(y, foldsqrt(y, deriv = d, inverse = TRUE), col = "tan")
   if (d == 0) {
     abline(h = 0.5, v = 0, lty = "dashed")
-    legend(-4, 1, c("logit", "probit", "cloglog", "fsqrt"), lwd = 2,
+    legend(-4, 1, c("logit", "probit", "cloglog", "foldsqrt"), lwd = 2,
            col = c("limegreen","purple","chocolate", "tan"))
   }
 }
@@ -129,7 +129,7 @@ par(lwd = 1)
 }
 
 # This is lucky to converge
-fit.h <- vglm(agaaus ~ sm.bs(altitude), binomialff(link = fsqrt(mux = 5)),
+fit.h <- vglm(agaaus ~ sm.bs(altitude), binomialff(link = foldsqrt(mux = 5)),
               data = hunua, trace = TRUE)
 \dontrun{
 plotvgam(fit.h, se = TRUE, lcol = "orange", scol = "orange",
@@ -141,7 +141,7 @@ head(predict(fit.h, hunua, type = "response"))
 # The following fails.
 pneumo <- transform(pneumo, let = log(exposure.time))
 fit <- vglm(cbind(normal, mild, severe) ~ let,
-            cumulative(link = fsqrt(mux = 10), par = TRUE, rev = TRUE),
+            cumulative(link = foldsqrt(mux = 10), par = TRUE, rev = TRUE),
             data = pneumo, trace = TRUE, maxit = 200) }
 }
 \keyword{math}
diff --git a/man/frechetUC.Rd b/man/frechetUC.Rd
index 99be304..1968805 100644
--- a/man/frechetUC.Rd
+++ b/man/frechetUC.Rd
@@ -13,8 +13,10 @@
 }
 \usage{
 dfrechet(x, location = 0, scale = 1, shape, log = FALSE)
-pfrechet(q, location = 0, scale = 1, shape)
-qfrechet(p, location = 0, scale = 1, shape)
+pfrechet(q, location = 0, scale = 1, shape,
+         lower.tail = TRUE, log.p = FALSE)
+qfrechet(p, location = 0, scale = 1, shape,
+         lower.tail = TRUE, log.p = FALSE)
 rfrechet(n, location = 0, scale = 1, shape)
 }
 \arguments{
@@ -33,6 +35,12 @@ rfrechet(n, location = 0, scale = 1, shape)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Uniform]{punif}}
+  or \code{\link[stats:Uniform]{qunif}}.
+
+
+  }
 
 }
 \value{
@@ -53,7 +61,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{frechet}}, the \pkg{VGAM}
   family function for estimating the 2 parameters (without location
diff --git a/man/genbetaII.Rd b/man/genbetaII.Rd
index f4268c9..537bb74 100644
--- a/man/genbetaII.Rd
+++ b/man/genbetaII.Rd
@@ -8,12 +8,17 @@
 
 }
 \usage{
-genbetaII(lshape1.a = "loge", lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
-          ishape1.a = NULL, iscale = NULL, ishape2.p = 1, ishape3.q = 1,
-          zero = NULL)
+genbetaII(lss, lshape1.a = "loge", lscale = "loge", lshape2.p = "loge",
+          lshape3.q = "loge", ishape1.a = NULL, iscale = NULL,
+          ishape2.p = 1, ishape3.q = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
+  \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
+
+
+  }
+
   \item{lshape1.a, lscale, lshape2.p, lshape3.q}{
   Parameter link functions applied to the
   shape parameter \code{a},
@@ -35,6 +40,13 @@ genbetaII(lshape1.a = "loge", lscale = "loge", lshape2.p = "loge", lshape3.q = "
   Optional initial values for \code{p} and \code{q}.
 
   }
+% \item{gshape1.a, gscale, gshape2.p, gshape3.q}{
+% See \code{\link{CommonVGAMffArguments}} for information.
+
+
+
+
+% }
   \item{zero}{
   An integer-valued vector specifying which
   linear/additive predictors are modelled as intercepts only.
@@ -134,10 +146,12 @@ More improvements could be made here.
 
 \examples{
 \dontrun{
-gdata <- data.frame(y = rsinmad(3000, exp(2), exp(2), exp(1)))  # A special case!
-fit <- vglm(y ~ 1, genbetaII, data = gdata, trace = TRUE)
+gdata <- data.frame(y = rsinmad(3000, shape1 = exp(2), scale = exp(2),
+                                shape3 = exp(1)))  # A special case!
+fit <- vglm(y ~ 1, genbetaII(lss = FALSE), data = gdata, trace = TRUE)
 fit <- vglm(y ~ 1, data = gdata, trace = TRUE,
-            genbetaII(ishape1.a = 4, ishape2.p = 2.2, iscale = 7, ishape3.q = 2.3))
+            genbetaII(lss = FALSE, ishape1.a = 4, ishape2.p = 2.2,
+                      iscale = 7, ishape3.q = 2.3))
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
diff --git a/man/gengammaUC.Rd b/man/gengammaUC.Rd
index 67ba760..78fcccb 100644
--- a/man/gengammaUC.Rd
+++ b/man/gengammaUC.Rd
@@ -15,8 +15,10 @@
 }
 \usage{
 dgengamma.stacy(x, scale = 1, d = 1, k = 1, log = FALSE)
-pgengamma.stacy(q, scale = 1, d = 1, k = 1)
-qgengamma.stacy(p, scale = 1, d = 1, k = 1)
+pgengamma.stacy(q, scale = 1, d = 1, k = 1,
+                lower.tail = TRUE, log.p = FALSE)
+qgengamma.stacy(p, scale = 1, d = 1, k = 1,
+                lower.tail = TRUE, log.p = FALSE)
 rgengamma.stacy(n, scale = 1, d = 1, k = 1)
 }
 \arguments{
@@ -35,6 +37,12 @@ rgengamma.stacy(n, scale = 1, d = 1, k = 1)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -53,7 +61,7 @@ Parameter estimation for a generalized gamma distribution.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{gengamma.stacy}}, the \pkg{VGAM} family function
   for estimating the generalized gamma distribution
diff --git a/man/genpoisson.Rd b/man/genpoisson.Rd
index e1bb40d..543d544 100644
--- a/man/genpoisson.Rd
+++ b/man/genpoisson.Rd
@@ -7,7 +7,7 @@
 
 }
 \usage{
-genpoisson(llambda = elogit(min = -1, max = 1), ltheta = "loge",
+genpoisson(llambda = extlogit(min = -1, max = 1), ltheta = "loge",
            ilambda = NULL, itheta = NULL,
            use.approx = TRUE, imethod = 1, zero = 1)
 }
diff --git a/man/genrayleighUC.Rd b/man/genrayleighUC.Rd
index 49c9714..00033f6 100644
--- a/man/genrayleighUC.Rd
+++ b/man/genrayleighUC.Rd
@@ -13,8 +13,8 @@
 }
 \usage{
 dgenray(x, scale = 1, shape, log = FALSE)
-pgenray(q, scale = 1, shape)
-qgenray(p, scale = 1, shape)
+pgenray(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
+qgenray(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
 rgenray(n, scale = 1, shape)
 }
 \arguments{
@@ -30,6 +30,12 @@ rgenray(n, scale = 1, shape)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -38,8 +44,11 @@ rgenray(n, scale = 1, shape)
   \code{qgenray} gives the quantile function, and
   \code{rgenray} generates random deviates.
 
+
 }
-\author{ J. G. Lauder and T. W. Yee }
+
+\author{ Kai Huang and J. G. Lauder and T. W. Yee }
+
 \details{
   See \code{\link{genrayleigh}}, the \pkg{VGAM} family function
   for estimating the parameters, 
diff --git a/man/gev.Rd b/man/gev.Rd
index 1a1634b..b4d82ed 100644
--- a/man/gev.Rd
+++ b/man/gev.Rd
@@ -92,7 +92,7 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
   }
 %  \item{rshape}{
 %  Numeric, of length 2.
-%  Range of \eqn{\xi}{xi} if \code{lshape = "elogit"} is chosen.
+%  Range of \eqn{\xi}{xi} if \code{lshape = "extlogit"} is chosen.
 %  The rationale for the default values is given below.
 %  }
 
@@ -174,7 +174,7 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
   the maximum likelihood estimators are completely regular.
   To have some control over the estimated \eqn{\xi}{xi} try
   using \code{lshape = logoff(offset = 0.5)}, say,
-  or \code{lshape = elogit(min = -0.5, max = 0.5)}, say.
+  or \code{lshape = extlogit(min = -0.5, max = 0.5)}, say.
 
 
 % and when \eqn{-1 < \xi < -0.5}{-1 < xi < -0.5} they exist but are
@@ -271,7 +271,7 @@ egev(llocation = "identitylink", lscale = "loge", lshape = logoff(offset = 0.5),
   \code{\link{gpd}},
   \code{\link{weibullR}},
   \code{\link{frechet}},
-  \code{\link{elogit}},
+  \code{\link{extlogit}},
   \code{\link{oxtemp}},
   \code{\link{venice}}.
 
diff --git a/man/gevUC.Rd b/man/gevUC.Rd
index f055252..65532b5 100644
--- a/man/gevUC.Rd
+++ b/man/gevUC.Rd
@@ -17,8 +17,8 @@
 \usage{
 dgev(x, location = 0, scale = 1, shape = 0, log = FALSE, tolshape0 =
      sqrt(.Machine$double.eps), oobounds.log = -Inf, giveWarning = FALSE)
-pgev(q, location = 0, scale = 1, shape = 0)
-qgev(p, location = 0, scale = 1, shape = 0)
+pgev(q, location = 0, scale = 1, shape = 0, lower.tail = TRUE, log.p = FALSE)
+qgev(p, location = 0, scale = 1, shape = 0, lower.tail = TRUE, log.p = FALSE)
 rgev(n, location = 0, scale = 1, shape = 0)
 }
 \arguments{
@@ -35,6 +35,12 @@ rgev(n, location = 0, scale = 1, shape = 0)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Uniform]{punif}}
+  or \code{\link[stats:Uniform]{qunif}}.
+
+
+  }
   \item{tolshape0}{
   Positive numeric.
   Threshold/tolerance value for resting whether \eqn{\xi}{xi} is zero.
diff --git a/man/golf.Rd b/man/golf.Rd
index 2a57832..c3c3a7d 100644
--- a/man/golf.Rd
+++ b/man/golf.Rd
@@ -132,7 +132,7 @@ gdata <- transform(gdata, cuty = Cut(y1, breaks = cutpoints))
 \dontrun{ par(mfrow = c(1, 1), las = 1)
 with(gdata, plot(x2, x3, col = cuty, pch = as.character(cuty))) }
 with(gdata, table(cuty) / sum(table(cuty)))
-fit <- vglm(cuty ~ x2 + x3, cumulative(mv = TRUE,
+fit <- vglm(cuty ~ x2 + x3, cumulative(multiple.responses = TRUE,
            reverse = TRUE, parallel = FALSE ~ -1,
            link = golf(cutpoint = cutpoints[2:3], lambda = lambda)),
            data = gdata, trace = TRUE)
@@ -157,10 +157,10 @@ fit at misc
 % y1 <- rnbinom(nn, mu=mymu, size=shape)
 % cuty <- Cut(y1)
 % fit <- vglm(cuty ~ x2 + x3, fam = cumulative(link = "golf", rev = TRUE,
-%            mv = TRUE, parallel = TRUE, earg = list(lambda=shape)))
+%            multiple.responses = TRUE, parallel = TRUE, earg = list(lambda=shape)))
 % coef(fit)
 % fit <- vglm(cuty ~ x2 + x3, fam = cumulative(link = "probit", rev = TRUE,
-%            mv = TRUE, parallel = TRUE))
+%            multiple.responses = TRUE, parallel = TRUE))
 % coef(fit, matrix = TRUE)
 % coef(fit)
 
diff --git a/man/gompertz.Rd b/man/gompertz.Rd
index 9687123..15628d6 100644
--- a/man/gompertz.Rd
+++ b/man/gompertz.Rd
@@ -14,7 +14,11 @@ gompertz(lscale = "loge", lshape = "loge",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{nowarning}{ Logical. Suppress a warning? }
+  \item{nowarning}{ Logical. Suppress a warning? 
+  Ignored for \pkg{VGAM} 0.9-7 and higher.
+
+
+  }
 
 
   \item{lshape, lscale}{
diff --git a/man/gompertzUC.Rd b/man/gompertzUC.Rd
index 72930ff..25cb643 100644
--- a/man/gompertzUC.Rd
+++ b/man/gompertzUC.Rd
@@ -16,8 +16,8 @@
 }
 \usage{
 dgompertz(x, scale = 1, shape, log = FALSE)
-pgompertz(q, scale = 1, shape)
-qgompertz(p, scale = 1, shape)
+pgompertz(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
+qgompertz(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
 rgompertz(n, scale = 1, shape)
 }
 \arguments{
@@ -34,6 +34,12 @@ rgompertz(n, scale = 1, shape)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
   \item{scale, shape}{positive scale and shape parameters. }
 
 }
@@ -45,7 +51,7 @@ rgompertz(n, scale = 1, shape)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{gompertz}} for details.
 
diff --git a/man/gpd.Rd b/man/gpd.Rd
index ea27ec4..39cfb98 100644
--- a/man/gpd.Rd
+++ b/man/gpd.Rd
@@ -94,7 +94,7 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
   }
 % \item{rshape}{ 
 % Numeric, of length 2.
-% Range of \eqn{\xi}{xi} if \code{lshape = "elogit"} is chosen.
+% Range of \eqn{\xi}{xi} if \code{lshape = "extlogit"} is chosen.
 % The default values ensures the algorithm works (\eqn{\xi > -0.5}{xi > -0.5})
 % and the variance exists (\eqn{\xi < 0.5}{xi < 0.5}).
 
@@ -180,7 +180,7 @@ gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
 
   The mean of \eqn{Y} does not exist unless \eqn{\xi < 1}{xi < 1}, and
   the variance does not exist unless \eqn{\xi < 0.5}{xi < 0.5}.  So if
-  you want to fit a model with finite variance use \code{lshape = "elogit"}.
+  you want to fit a model with finite variance use \code{lshape = "extlogit"}.
 
 
 }
diff --git a/man/gpdUC.Rd b/man/gpdUC.Rd
index fd5111b..616b049 100644
--- a/man/gpdUC.Rd
+++ b/man/gpdUC.Rd
@@ -17,8 +17,10 @@
 dgpd(x, location = 0, scale = 1, shape = 0, log = FALSE,
      tolshape0 = sqrt(.Machine$double.eps),
      oobounds.log = -Inf, giveWarning = FALSE)
-pgpd(q, location = 0, scale = 1, shape = 0)
-qgpd(p, location = 0, scale = 1, shape = 0)
+pgpd(q, location = 0, scale = 1, shape = 0,
+     lower.tail = TRUE, log.p = FALSE)
+qgpd(p, location = 0, scale = 1, shape = 0,
+     lower.tail = TRUE, log.p = FALSE)
 rgpd(n, location = 0, scale = 1, shape = 0)
 }
 \arguments{
@@ -35,6 +37,12 @@ rgpd(n, location = 0, scale = 1, shape = 0)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Uniform]{punif}}
+  or \code{\link[stats:Uniform]{qunif}}.
+
+
+  }
   \item{tolshape0}{
   Positive numeric.
   Threshold/tolerance value for resting whether \eqn{\xi}{xi} is zero.
@@ -68,6 +76,8 @@ rgpd(n, location = 0, scale = 1, shape = 0)
   \code{pgpd} gives the distribution function,
   \code{qgpd} gives the quantile function, and
   \code{rgpd} generates random deviates.
+
+
 }
 \references{
 Coles, S. (2001)
@@ -76,7 +86,7 @@ London: Springer-Verlag.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{gpd}}, the \pkg{VGAM} family function
   for estimating the two parameters by maximum likelihood estimation,
diff --git a/man/grc.Rd b/man/grc.Rd
index 5e004da..e41b178 100644
--- a/man/grc.Rd
+++ b/man/grc.Rd
@@ -272,7 +272,7 @@ Reduced-rank vector generalized linear models.
 Yee, T. W. and Hadi, A. F. (2014)
 Row-column interaction models, with an R implementation.
 \emph{Computational Statistics},
-\bold{29}, in press.
+\bold{29}, 1427--1445.
 
 
 Goodman, L. A. (1981)
diff --git a/man/gumbelIIUC.Rd b/man/gumbelIIUC.Rd
index 514ef83..5ff9039 100644
--- a/man/gumbelIIUC.Rd
+++ b/man/gumbelIIUC.Rd
@@ -15,8 +15,8 @@
 }
 \usage{
 dgumbelII(x, scale = 1, shape, log = FALSE)
-pgumbelII(q, scale = 1, shape)
-qgumbelII(p, scale = 1, shape)
+pgumbelII(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
+qgumbelII(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
 rgumbelII(n, scale = 1, shape)
 }
 \arguments{
@@ -32,6 +32,12 @@ rgumbelII(n, scale = 1, shape)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
   \item{shape, scale}{positive shape and scale parameters. }
 
 }
@@ -43,7 +49,9 @@ rgumbelII(n, scale = 1, shape)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang
+}
+
 \details{
   See \code{\link{gumbelII}} for details.
 
diff --git a/man/gumbelUC.Rd b/man/gumbelUC.Rd
index ef782cb..38066f9 100644
--- a/man/gumbelUC.Rd
+++ b/man/gumbelUC.Rd
@@ -14,8 +14,8 @@
 }
 \usage{
 dgumbel(x, location = 0, scale = 1, log = FALSE)
-pgumbel(q, location = 0, scale = 1)
-qgumbel(p, location = 0, scale = 1)
+pgumbel(q, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE)
+qgumbel(p, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE)
 rgumbel(n, location = 0, scale = 1)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -23,19 +23,33 @@ rgumbel(n, location = 0, scale = 1)
   \item{x, q}{vector of quantiles.}
   \item{p}{vector of probabilities.}
   \item{n}{number of observations.
-  If \code{length(n) > 1} then the length is taken to be the number required.}
+  If \code{length(n) > 1} then the length is taken to be the number required.
+
+
+  }
   \item{location}{the location parameter \eqn{\mu}{mu}.
     This is not the mean
-    of the Gumbel distribution (see \bold{Details} below). }
+    of the Gumbel distribution (see \bold{Details} below).
+
+
+  }
   \item{scale}{the scale parameter \eqn{\sigma}{sigma}.
     This is not the standard deviation
-    of the Gumbel distribution (see \bold{Details} below). }
+    of the Gumbel distribution (see \bold{Details} below).
+
+
+  }
   \item{log}{
   Logical.
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Uniform]{punif}}
+  or \code{\link[stats:Uniform]{qunif}}.
 
+
+  }
 }
 \details{
   The Gumbel distribution is a special case of the
diff --git a/man/hormone.Rd b/man/hormone.Rd
index d8d2339..b912d0f 100644
--- a/man/hormone.Rd
+++ b/man/hormone.Rd
@@ -74,7 +74,8 @@ Thus calibration might be of interest for the data.
 
   Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
-  \emph{Computational Statistics and Data Analysis}.
+  \emph{Computational Statistics and Data Analysis},
+  \bold{71}, 889--902.
 
 
 }
diff --git a/man/hspider.Rd b/man/hspider.Rd
index bbe32b9..1eca4aa 100644
--- a/man/hspider.Rd
+++ b/man/hspider.Rd
@@ -70,7 +70,8 @@ hsbin <- hspider  # Binary species data
 hsbin[, -(1:6)] <- as.numeric(hsbin[, -(1:6)] > 0)
 set.seed(123)
 ahsb1 <- cao(cbind(Alopcune, Arctlute, Auloalbi, Zoraspin) ~
-             WaterCon + ReflLux, family = binomialff(mv = TRUE),
+             WaterCon + ReflLux,
+             family = binomialff(multiple.responses = TRUE),
              df1.nl = 2.2, Bestof = 3, data = hsbin)
 par(mfrow = 2:1, las = 1)
 lvplot(ahsb1, type = "predictors", llwd = 2, ylab = "logit p", lcol = 1:9)
diff --git a/man/huberUC.Rd b/man/huberUC.Rd
index be4d593..624c21d 100644
--- a/man/huberUC.Rd
+++ b/man/huberUC.Rd
@@ -11,12 +11,11 @@
 
 }
 \usage{
-  dhuber(x, k = 0.862, mu = 0, sigma = 1, log = FALSE)
- edhuber(x, k = 0.862, mu = 0, sigma = 1, log = FALSE)
-  rhuber(n, k = 0.862, mu = 0, sigma = 1)
-  qhuber(p, k = 0.862, mu = 0, sigma = 1)
-  phuber(q, k = 0.862, mu = 0, sigma = 1)
-
+ dhuber(x, k = 0.862, mu = 0, sigma = 1, log = FALSE)
+edhuber(x, k = 0.862, mu = 0, sigma = 1, log = FALSE)
+ rhuber(n, k = 0.862, mu = 0, sigma = 1)
+ qhuber(p, k = 0.862, mu = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE)
+ phuber(q, k = 0.862, mu = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE)
 }
 \arguments{
   \item{x, q}{numeric vector, vector of quantiles.}
@@ -39,6 +38,12 @@
   If \code{log = TRUE} then the logarithm of the result is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 
 }
@@ -79,13 +84,16 @@
    (from \pkg{smoothmest}) and
    slight modifications were made by T. W. Yee to
    replace looping by vectorization and addition of the \code{log} argument.
-   Arash Ardalan wrote \code{[pq]huber()}.
+   Arash Ardalan wrote \code{[pq]huber()}, and
+   two arguments for these were implemented by Kai Huang.
    This helpfile was adapted from \pkg{smoothmest}.
 
+
 }
 \seealso{
     \code{\link{huber2}}.
 
+
 }
 
 \examples{
diff --git a/man/hypersecant.Rd b/man/hypersecant.Rd
index 084b901..83a8cd4 100644
--- a/man/hypersecant.Rd
+++ b/man/hypersecant.Rd
@@ -11,8 +11,8 @@
 
 }
 \usage{
-  hypersecant(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
-hypersecant01(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
+  hypersecant(link.theta = extlogit(min = -pi/2, max = pi/2), init.theta = NULL)
+hypersecant01(link.theta = extlogit(min = -pi/2, max = pi/2), init.theta = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -94,7 +94,7 @@ Natural exponential families with quadratic variance functions.
 
 %}
 \seealso{
-  \code{\link{elogit}}.
+  \code{\link{extlogit}}.
 
 
 % \code{\link{nefghs}},
diff --git a/man/hzetaUC.Rd b/man/hzetaUC.Rd
index 505c7ba..333ad5a 100644
--- a/man/hzetaUC.Rd
+++ b/man/hzetaUC.Rd
@@ -13,7 +13,7 @@
 }
 \usage{
 dhzeta(x, alpha, log = FALSE)
-phzeta(q, alpha)
+phzeta(q, alpha, log.p = FALSE)
 qhzeta(p, alpha)
 rhzeta(n, alpha)
 }
@@ -43,6 +43,12 @@ rhzeta(n, alpha)
 
 
   }
+  \item{log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 }
 \details{
    The probability function is
@@ -70,7 +76,7 @@ rhzeta(n, alpha)
 %
 %
 %}
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \note{ 
   Given some response data, the \pkg{VGAM} family function
   \code{\link{hzeta}} estimates the parameter \code{alpha}.
diff --git a/man/inv.binomial.Rd b/man/inv.binomial.Rd
index a6432fd..69d7c19 100644
--- a/man/inv.binomial.Rd
+++ b/man/inv.binomial.Rd
@@ -8,7 +8,7 @@
 
 }
 \usage{
-inv.binomial(lrho = elogit(min = 0.5, max = 1),
+inv.binomial(lrho = extlogit(min = 0.5, max = 1),
              llambda = "loge", irho = NULL, ilambda = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
diff --git a/man/inv.lomaxUC.Rd b/man/inv.lomaxUC.Rd
index 1c0b279..9c2844b 100644
--- a/man/inv.lomaxUC.Rd
+++ b/man/inv.lomaxUC.Rd
@@ -13,8 +13,8 @@
 }
 \usage{
 dinv.lomax(x, scale = 1, shape2.p, log = FALSE)
-pinv.lomax(q, scale = 1, shape2.p)
-qinv.lomax(p, scale = 1, shape2.p)
+pinv.lomax(q, scale = 1, shape2.p, lower.tail = TRUE, log.p = FALSE)
+qinv.lomax(p, scale = 1, shape2.p, lower.tail = TRUE, log.p = FALSE)
 rinv.lomax(n, scale = 1, shape2.p)
 }
 \arguments{
@@ -29,6 +29,12 @@ rinv.lomax(n, scale = 1, shape2.p)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
diff --git a/man/inv.paralogistic.Rd b/man/inv.paralogistic.Rd
index 6649614..bb7bfeb 100644
--- a/man/inv.paralogistic.Rd
+++ b/man/inv.paralogistic.Rd
@@ -7,11 +7,16 @@
   inverse paralogistic distribution.
 }
 \usage{
-inv.paralogistic(lshape1.a = "loge", lscale = "loge",
-                 ishape1.a = 2, iscale = NULL, zero = NULL)
+inv.paralogistic(lss, lshape1.a = "loge", lscale = "loge",
+                      ishape1.a = 2, iscale = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
+  \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
+  
+  
+  }
+
   \item{lshape1.a, lscale}{ 
   Parameter link functions applied to the
   (positive) shape parameter \code{a} and
@@ -92,9 +97,9 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 
 \examples{
-idata <- data.frame(y = rinv.paralogistic(n = 3000, exp(1), exp(2)))
-fit <- vglm(y ~ 1, inv.paralogistic, data = idata, trace = TRUE)
-fit <- vglm(y ~ 1, inv.paralogistic(ishape1.a = 2.7, iscale = 7.3),
+idata <- data.frame(y = rinv.paralogistic(n = 3000, exp(1), scale = exp(2)))
+fit <- vglm(y ~ 1, inv.paralogistic(lss = FALSE), data = idata, trace = TRUE)
+fit <- vglm(y ~ 1, inv.paralogistic(lss = FALSE, ishape1.a = 2.7, iscale = 7.3),
             data = idata, trace = TRUE, epsilon = 1e-8)
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/inv.paralogisticUC.Rd b/man/inv.paralogisticUC.Rd
index bb1cf08..59db44e 100644
--- a/man/inv.paralogisticUC.Rd
+++ b/man/inv.paralogisticUC.Rd
@@ -13,10 +13,10 @@
 
 }
 \usage{
-dinv.paralogistic(x, shape1.a, scale = 1, log = FALSE)
-pinv.paralogistic(q, shape1.a, scale = 1)
-qinv.paralogistic(p, shape1.a, scale = 1)
-rinv.paralogistic(n, shape1.a, scale = 1)
+dinv.paralogistic(x, scale = 1, shape1.a, log = FALSE)
+pinv.paralogistic(q, scale = 1, shape1.a, lower.tail = TRUE, log.p = FALSE)
+qinv.paralogistic(p, scale = 1, shape1.a, lower.tail = TRUE, log.p = FALSE)
+rinv.paralogistic(n, scale = 1, shape1.a)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -30,6 +30,12 @@ rinv.paralogistic(n, shape1.a, scale = 1)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -68,8 +74,8 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 }
 \examples{
-idata <- data.frame(y = rinv.paralogistic(n = 3000, exp(1), exp(2)))
-fit <- vglm(y ~ 1, inv.paralogistic(ishape1.a = 2.1),
+idata <- data.frame(y = rinv.paralogistic(n = 3000, exp(1), scale = exp(2)))
+fit <- vglm(y ~ 1, inv.paralogistic(lss = FALSE, ishape1.a = 2.1),
             data = idata, trace = TRUE, crit = "coef")
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/kumarUC.Rd b/man/kumarUC.Rd
index 1fe19c3..2db29c4 100644
--- a/man/kumarUC.Rd
+++ b/man/kumarUC.Rd
@@ -12,8 +12,8 @@
 }
 \usage{
 dkumar(x, shape1, shape2, log = FALSE)
-pkumar(q, shape1, shape2)
-qkumar(p, shape1, shape2)
+pkumar(q, shape1, shape2, lower.tail = TRUE, log.p = FALSE)
+qkumar(p, shape1, shape2, lower.tail = TRUE, log.p = FALSE)
 rkumar(n, shape1, shape2)
 }
 \arguments{
@@ -29,7 +29,12 @@ rkumar(n, shape1, shape2)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
 
+
+  }
 }
 \value{
   \code{dkumar} gives the density,
@@ -38,7 +43,7 @@ rkumar(n, shape1, shape2)
   \code{rkumar} generates random deviates.
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{kumar}}, the \pkg{VGAM} family function
   for estimating the parameters, 
diff --git a/man/laplaceUC.Rd b/man/laplaceUC.Rd
index c99d76c..e4e62f4 100644
--- a/man/laplaceUC.Rd
+++ b/man/laplaceUC.Rd
@@ -13,8 +13,8 @@
 }
 \usage{
 dlaplace(x, location = 0, scale = 1, log = FALSE)
-plaplace(q, location = 0, scale = 1)
-qlaplace(p, location = 0, scale = 1)
+plaplace(q, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE)
+qlaplace(p, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE)
 rlaplace(n, location = 0, scale = 1)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -42,6 +42,12 @@ rlaplace(n, location = 0, scale = 1)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \details{
@@ -82,7 +88,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang}
 %\note{
 %  The \pkg{VGAM} family function \code{\link{laplace}}
 %  estimates the two parameters by maximum likelihood estimation.
diff --git a/man/lgammaUC.Rd b/man/lgammaUC.Rd
index 0c9d455..57f0551 100644
--- a/man/lgammaUC.Rd
+++ b/man/lgammaUC.Rd
@@ -16,8 +16,10 @@
 }
 \usage{
 dlgamma(x, location = 0, scale = 1, shape = 1, log = FALSE)
-plgamma(q, location = 0, scale = 1, shape = 1)
-qlgamma(p, location = 0, scale = 1, shape = 1)
+plgamma(q, location = 0, scale = 1, shape = 1,
+        lower.tail = TRUE, log.p = FALSE)
+qlgamma(p, location = 0, scale = 1, shape = 1,
+        lower.tail = TRUE, log.p = FALSE)
 rlgamma(n, location = 0, scale = 1, shape = 1)
 }
 \arguments{
@@ -37,6 +39,12 @@ rlgamma(n, location = 0, scale = 1, shape = 1)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -56,7 +64,7 @@ London: Imperial College Press.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{lgamma1}}, the \pkg{VGAM} family function for
   estimating the one parameter standard log-gamma distribution by maximum
@@ -65,12 +73,14 @@ London: Imperial College Press.
   the appropriate length if necessary.
 
 
+
 }
 \note{
   The \pkg{VGAM} family function \code{\link{lgamma3}} is
   for the three parameter (nonstandard) log-gamma distribution.
 
 
+
 }
 \seealso{
   \code{\link{lgamma1}},
diff --git a/man/lindUC.Rd b/man/lindUC.Rd
index d4ecd8a..dbd2f7e 100644
--- a/man/lindUC.Rd
+++ b/man/lindUC.Rd
@@ -17,7 +17,7 @@
 }
 \usage{
 dlind(x, theta, log = FALSE)
-plind(q, theta)
+plind(q, theta, lower.tail = TRUE, log.p = FALSE)
 rlind(n, theta)
 }
 %qlind(p, theta)
@@ -39,6 +39,12 @@ rlind(n, theta)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -51,7 +57,7 @@ rlind(n, theta)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{lindley}} for details.
 
diff --git a/man/linkfun.Rd b/man/linkfun.Rd
new file mode 100644
index 0000000..8980e94
--- /dev/null
+++ b/man/linkfun.Rd
@@ -0,0 +1,69 @@
+\name{linkfun}
+\alias{linkfun}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Link functions }
+\description{
+  Generic function for returning the link functions of a fitted object.
+
+
+}
+\usage{
+linkfun(object, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{object}{ An object which has parameter link functions.
+
+    
+  }
+  \item{\dots}{ Other arguments fed into the specific
+    methods function of the model.
+
+
+
+  }
+}
+\details{
+  Fitted models in the \pkg{VGAM} have parameter link functions.
+  This generic function returns these.
+
+
+
+}
+\value{
+  The value returned depends specifically on the methods
+  function invoked.
+
+
+}
+%\references{
+%}
+\author{ Thomas W. Yee }
+
+%\note{
+%}
+
+
+\seealso{
+  \code{\link{linkfun.vglm}},
+  \code{\link{multilogit}},
+  \code{\link{vglm}}.
+
+
+}
+
+\examples{
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo)
+coef(fit1, matrix = TRUE)
+linkfun(fit1)
+linkfun(fit1, earg = TRUE)
+
+fit2 <- vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo)
+coef(fit2, matrix = TRUE)
+linkfun(fit2)
+linkfun(fit2, earg = TRUE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/linkfun.vglm.Rd b/man/linkfun.vglm.Rd
new file mode 100644
index 0000000..b05150a
--- /dev/null
+++ b/man/linkfun.vglm.Rd
@@ -0,0 +1,83 @@
+\name{linkfun.vglm}
+\alias{linkfun.vglm}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Link Functions for VGLMs }
+\description{
+Returns the link functions, and parameter names,
+for \emph{vector generalized linear models} (VGLMs).
+
+}
+\usage{
+linkfun.vglm(object, earg = FALSE, ...)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{object}{ Object of class \code{"vglm"}, i.e., a VGLM object. }
+  \item{earg}{ Logical.
+    Return the extra arguments associated with each
+    link function? If \code{TRUE}  then a list is returned.
+
+    
+  }
+  \item{\dots}{ Arguments that might be used
+    in the future.
+
+
+  }
+}
+\details{
+  All fitted VGLMs have a link function applied to each parameter.
+  This function returns these, and optionally, the extra
+  arguments associated with them.
+
+
+}
+\value{
+  Usually just a (named) character string, with the link functions
+  in order.
+  It is named with the parameter names.
+  If \code{earg = TRUE} then a list with the following components.
+  \item{link}{
+    The default output.
+
+
+   }
+   \item{earg}{The extra arguments, in order.
+
+
+  }
+}
+%\references{
+
+
+%}
+
+\author{ Thomas W. Yee }
+
+\note{ 
+  Presently,  the multinomial logit model has only
+  one link function, \code{\link{multilogit}}, so a warning
+  is not issued for that link.
+  For other models, if the number of link functions does
+  not equal \eqn{M} then a warning may be issued.
+
+
+}
+
+\seealso{
+  \code{\link{linkfun}},
+  \code{\link{multilogit}},
+  \code{\link{vglm}}.
+  
+
+}
+
+\examples{
+fit1 <- vgam(cbind(r1, r2) ~ s(year, df = 3), gev(zero = 2:3), data = venice)
+coef(fit1, matrix = TRUE)
+linkfun(fit1)
+linkfun(fit1, earg = TRUE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/linoUC.Rd b/man/linoUC.Rd
index 3ef379f..cf778c1 100644
--- a/man/linoUC.Rd
+++ b/man/linoUC.Rd
@@ -13,8 +13,8 @@
 }
 \usage{
 dlino(x, shape1, shape2, lambda = 1, log = FALSE)
-plino(q, shape1, shape2, lambda = 1)
-qlino(p, shape1, shape2, lambda = 1)
+plino(q, shape1, shape2, lambda = 1, lower.tail = TRUE, log.p = FALSE)
+qlino(p, shape1, shape2, lambda = 1, lower.tail = TRUE, log.p = FALSE)
 rlino(n, shape1, shape2, lambda = 1)
 }
 \arguments{
@@ -31,6 +31,12 @@ rlino(n, shape1, shape2, lambda = 1)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 }
 \value{
   \code{dlino} gives the density,
@@ -54,7 +60,7 @@ rlino(n, shape1, shape2, lambda = 1)
 %}
 
 
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{lino}}, the \pkg{VGAM} family function
   for estimating the parameters, 
diff --git a/man/logit.Rd b/man/logit.Rd
index f0a3b97..66e5723 100644
--- a/man/logit.Rd
+++ b/man/logit.Rd
@@ -1,6 +1,6 @@
 \name{logit}
 \alias{logit}
-\alias{elogit}
+\alias{extlogit}
 %- Also NEED an '\alias' for EACH other topic documented here.
 \title{ Logit Link Function }
 \description{
@@ -11,8 +11,8 @@
 \usage{
 logit(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
       short = TRUE, tag = FALSE)
-elogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
-       inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
+extlogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
+         inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -25,7 +25,7 @@ elogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
   \item{bvalue, bminvalue, bmaxvalue}{
   See \code{\link{Links}}.
   These are boundary values.
-  For \code{elogit}, values of \code{theta} less than or equal
+  For \code{extlogit}, values of \code{theta} less than or equal
   to \eqn{A} or greater than or equal to \eqn{B} can be replaced
   by \code{bminvalue} and \code{bmaxvalue}.
 
@@ -38,7 +38,7 @@ elogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
 % (respectively, and if given) before computing the logit.
 
   \item{min, max}{
-  For \code{elogit},
+  For \code{extlogit},
   \code{min} gives \eqn{A}, 
   \code{max} gives \eqn{B}, and for out of range values, 
   \code{bminvalue} and \code{bmaxvalue}.
@@ -60,7 +60,7 @@ elogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
   \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
 
 
-  The \emph{extended} logit link function \code{elogit} should be used
+  The \emph{extended} logit link function \code{extlogit} should be used
   more generally for parameters that lie in the interval \eqn{(A,B)}, say.
   The formula is
   \deqn{\log((\theta-A)/(B-\theta))}{%
@@ -104,7 +104,7 @@ elogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
 \note{
   Numerical instability may occur when \code{theta} is
   close to 1 or 0 (for \code{logit}), or close to \eqn{A} or \eqn{B} for
-  \code{elogit}.
+  \code{extlogit}.
   One way of overcoming this is to use, e.g., \code{bvalue}.
 
 
@@ -136,7 +136,7 @@ logit(p)  # Has NAs
 logit(p, bvalue = .Machine$double.eps)  # Has no NAs
 
 p <- seq(0.9, 2.2, by = 0.1)
-elogit(p, min = 1, max = 2,
+extlogit(p, min = 1, max = 2,
           bminvalue = 1 + .Machine$double.eps,
           bmaxvalue = 2 - .Machine$double.eps)  # Has no NAs
 
@@ -178,9 +178,9 @@ for (d in 0) {
 }
 
 p <- seq(0.21, 0.59, by = 0.01)
-plot(p, elogit(p, min = 0.2, max = 0.6),
+plot(p, extlogit(p, min = 0.2, max = 0.6),
      type = "l", col = "black", ylab = "transformation", xlim = c(0, 1),
-     las = 1, main = "elogit(p, min = 0.2, max = 0.6)")
+     las = 1, main = "extlogit(p, min = 0.2, max = 0.6)")
 par(lwd = 1)
 }
 }
diff --git a/man/loglapUC.Rd b/man/loglapUC.Rd
index 0afc598..42ab683 100644
--- a/man/loglapUC.Rd
+++ b/man/loglapUC.Rd
@@ -18,9 +18,9 @@
 dloglap(x, location.ald = 0, scale.ald = 1,
         tau = 0.5, kappa = sqrt(tau/(1-tau)), log = FALSE)
 ploglap(q, location.ald = 0, scale.ald = 1,
-        tau = 0.5, kappa = sqrt(tau/(1-tau)))
+        tau = 0.5, kappa = sqrt(tau/(1-tau)), lower.tail = TRUE, log.p = FALSE)
 qloglap(p, location.ald = 0, scale.ald = 1,
-        tau = 0.5, kappa = sqrt(tau/(1-tau)))
+        tau = 0.5, kappa = sqrt(tau/(1-tau)), lower.tail = TRUE, log.p = FALSE)
 rloglap(n, location.ald = 0, scale.ald = 1,
         tau = 0.5, kappa = sqrt(tau/(1-tau)))
 }
@@ -51,6 +51,12 @@ rloglap(n, location.ald = 0, scale.ald = 1,
   \item{log}{
   if \code{TRUE}, probabilities \code{p} are given as \code{log(p)}.
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \details{
@@ -79,7 +85,7 @@ Log-Laplace distributions.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 %\note{
 %  The \pkg{VGAM} family function \code{\link{loglaplace3}}
 %  estimates the three parameters by maximum likelihood estimation.
diff --git a/man/lomaxUC.Rd b/man/lomaxUC.Rd
index 53213da..330331a 100644
--- a/man/lomaxUC.Rd
+++ b/man/lomaxUC.Rd
@@ -12,8 +12,8 @@
 }
 \usage{
 dlomax(x, scale = 1, shape3.q, log = FALSE)
-plomax(q, scale = 1, shape3.q)
-qlomax(p, scale = 1, shape3.q)
+plomax(q, scale = 1, shape3.q, lower.tail = TRUE, log.p = FALSE)
+qlomax(p, scale = 1, shape3.q, lower.tail = TRUE, log.p = FALSE)
 rlomax(n, scale = 1, shape3.q)
 }
 \arguments{
@@ -28,6 +28,12 @@ rlomax(n, scale = 1, shape3.q)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -48,7 +54,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{lomax}}, which is the \pkg{VGAM} family function
   for estimating the parameters by maximum likelihood estimation.
diff --git a/man/makeham.Rd b/man/makeham.Rd
index 4bdc9e9..5f34a07 100644
--- a/man/makeham.Rd
+++ b/man/makeham.Rd
@@ -16,7 +16,11 @@ makeham(lscale = "loge", lshape = "loge", lepsilon = "loge",
 %- maybe also 'usage' for other objects documented here.
 \arguments{
 
-  \item{nowarning}{ Logical. Suppress a warning? }
+  \item{nowarning}{ Logical. Suppress a warning?
+  Ignored for \pkg{VGAM} 0.9-7 and higher.
+
+
+   }
 
 
   \item{lshape, lscale, lepsilon}{
diff --git a/man/makehamUC.Rd b/man/makehamUC.Rd
index 966b2e0..9ec6789 100644
--- a/man/makehamUC.Rd
+++ b/man/makehamUC.Rd
@@ -16,8 +16,8 @@
 }
 \usage{
 dmakeham(x, scale = 1, shape, epsilon = 0, log = FALSE)
-pmakeham(q, scale = 1, shape, epsilon = 0)
-qmakeham(p, scale = 1, shape, epsilon = 0)
+pmakeham(q, scale = 1, shape, epsilon = 0, lower.tail = TRUE, log.p = FALSE)
+qmakeham(p, scale = 1, shape, epsilon = 0, lower.tail = TRUE, log.p = FALSE)
 rmakeham(n, scale = 1, shape, epsilon = 0)
 }
 \arguments{
@@ -33,6 +33,13 @@ rmakeham(n, scale = 1, shape, epsilon = 0)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
+
   \item{scale, shape}{positive scale and shape parameters. }
   \item{epsilon}{another parameter. Must be non-negative. See below. }
 
@@ -45,7 +52,7 @@ rmakeham(n, scale = 1, shape, epsilon = 0)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{makeham}} for details.
   The default value of \code{epsilon = 0} corresponds
diff --git a/man/maxwellUC.Rd b/man/maxwellUC.Rd
index bc81677..b0e88ff 100644
--- a/man/maxwellUC.Rd
+++ b/man/maxwellUC.Rd
@@ -14,8 +14,8 @@
 }
 \usage{
 dmaxwell(x, rate, log = FALSE)
-pmaxwell(q, rate)
-qmaxwell(p, rate)
+pmaxwell(q, rate, lower.tail = TRUE, log.p = FALSE)
+qmaxwell(p, rate, lower.tail = TRUE, log.p = FALSE)
 rmaxwell(n, rate)
 }
 \arguments{
@@ -30,6 +30,12 @@ rmaxwell(n, rate)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -48,7 +54,7 @@ rmaxwell(n, rate)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{maxwell}}, the \pkg{VGAM} family function for
   estimating the (rate) parameter \eqn{a} by maximum likelihood
diff --git a/man/nakagamiUC.Rd b/man/nakagamiUC.Rd
index a653bb2..3e4f41e 100644
--- a/man/nakagamiUC.Rd
+++ b/man/nakagamiUC.Rd
@@ -13,7 +13,7 @@
 }
 \usage{
 dnaka(x, scale = 1, shape, log = FALSE)
-pnaka(q, scale = 1, shape)
+pnaka(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
 qnaka(p, scale = 1, shape, ...)
 rnaka(n, scale = 1, shape, Smallno = 1.0e-6)
 }
@@ -54,6 +54,12 @@ rnaka(n, scale = 1, shape, Smallno = 1.0e-6)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 }
 \value{
   \code{dnaka} gives the density,
@@ -63,7 +69,7 @@ rnaka(n, scale = 1, shape, Smallno = 1.0e-6)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{nakagami}} for more details.
 
diff --git a/man/nbcanlink.Rd b/man/nbcanlink.Rd
index bf43065..3eeacb2 100644
--- a/man/nbcanlink.Rd
+++ b/man/nbcanlink.Rd
@@ -76,7 +76,8 @@ nbcanlink(theta, size = NULL, wrt.eta = NULL, bvalue = NULL,
 
   Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
-  \emph{Computational Statistics and Data Analysis}.
+  \emph{Computational Statistics and Data Analysis},
+  \bold{71}, 889--902.
 
 
   Hilbe, J. M. (2011)
diff --git a/man/nbolf.Rd b/man/nbolf.Rd
index 9da72a7..3e8e987 100644
--- a/man/nbolf.Rd
+++ b/man/nbolf.Rd
@@ -126,7 +126,7 @@ cuty <- Cut(y1, breaks = cutpoints)
 \dontrun{ plot(x2, x3, col = cuty, pch = as.character(cuty)) }
 table(cuty) / sum(table(cuty))
 fit <- vglm(cuty ~ x2 + x3, trace = TRUE,
-            cumulative(reverse = TRUE, mv = TRUE,
+            cumulative(reverse = TRUE, multiple.responses = TRUE,
                        parallel = TRUE,
                        link = nbolf(cutpoint = cutpoints[2:3], k = k)))
 head(depvar(fit))
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index 20dbd55..6f90c66 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -12,8 +12,9 @@
 \usage{
 negbinomial(lmu = "loge", lsize = "loge",
             imu = NULL, isize = NULL, probs.y = 0.75,
-            nsimEIM = 100, cutoff = 0.995,
-            Maxiter = 5000, deviance.arg = FALSE, imethod = 1,
+            nsimEIM = 250, cutoff.prob = 0.995,
+            max.qnbinom = 1000, max.chunk.Mb = 20,
+            deviance.arg = FALSE, imethod = 1, gsize = exp((-4):4),
             parallel = FALSE, ishrinkage = 0.95, zero = -2)
 polya(lprob = "logit", lsize = "loge",
       iprob = NULL, isize = NULL, probs.y = 0.75, nsimEIM = 100,
@@ -37,7 +38,8 @@ polyaR(lsize = "loge", lprob = "logit",
   \code{\link[stats:NegBinomial]{rnbinom}} respectively.
   Common alternatives for \code{lsize} are
   \code{\link{negloge}} and
-  \code{\link{reciprocal}}.
+  \code{\link{reciprocal}}, and
+  \code{\link{loglog}} (if \eqn{k > 1}).
 
 
   }
@@ -47,55 +49,102 @@ polyaR(lsize = "loge", lprob = "logit",
   (and/or use \code{imethod}).
   For a \eqn{S}-column response, \code{isize} can be of length \eqn{S}.
   A value \code{NULL} means an initial value for each response is
-  computed internally using a range of values.
+  computed internally using a gridsearch based on \code{gsize}.
   The last argument is ignored if used within \code{\link{cqo}}; see
   the \code{iKvector} argument of \code{\link{qrrvglm.control}} instead.
 
 
   }
-  \item{probs.y}{
-  Passed into the \code{probs} argument
-  of \code{\link[stats:quantile]{quantile}}
-  when \code{imethod = 3} to obtain an initial value for the mean.
-
-
-  }
 
   \item{nsimEIM}{
   This argument is used
   for computing the diagonal element of the
-  \emph{expected information matrix} (EIM) corresponding to \eqn{k}.
+  \emph{expected information matrix} (EIM) corresponding to \eqn{k}
+  based on the \emph{simulated Fisher scoring} (SFS) algorithm.
   See \code{\link{CommonVGAMffArguments}} for more information
-  and the note below.
+  and the notes below.
+  SFS is one of two algorithms for computing the EIM elements
+  (so that both algorithms may be used on a given data set).
+  SFS is faster than the exact method when \code{Qmax} is large.
+
 
 
   }
-  \item{cutoff}{
-  Used in the finite series approximation.
-  A numeric which is close to 1 but never exactly 1.
+  \item{cutoff.prob}{
+  Fed into the \code{p} argument
+  of \code{\link[stats:NegBinomial]{qnbinom}}
+  in order to obtain an upper limit for the approximate
+  support of the distribution, called \code{Qmax}, say.
+  Hence the approximate support is \code{0:Qmax}.
+  This argument should be 
+  a numeric and close to 1 but never exactly 1.
   Used to specify how many terms of the infinite series
   for computing the second diagonal element of the
   EIM are actually used.
-  The sum of the probabilites are added until they reach
-  this value or more (but no more than \code{Maxiter}
-  terms allowed).
-  It is like specifying \code{p} in an imaginary function
-  \code{qnegbin(p)}.
+  The closer this argument is to 1, the more accurate the
+  standard errors of the regression coefficients will be.
 
 
-  }
-  \item{Maxiter}{
-  Used in the finite series approximation.
-  Integer. The maximum number of terms allowed when computing
-  the second diagonal element of the EIM.
-  In theory, the value involves an infinite series.
-  If this argument is too small then the value may be inaccurate.
 
+% The sum of the probabilites are added until they reach
+% at least this value.
+% (but no more than \code{Maxiter} terms allowed).
+% Used in the finite series approximation.
+% It is like specifying \code{p} in an imaginary function \code{qnegbin(p)}.
+
+
+  }
+  \item{max.chunk.Mb, max.qnbinom}{
+    \code{max.qnbinom} is used to describe the eligibility of 
+    individual observations
+    to have their EIM computed by the \emph{exact method}.
+    Here, we are concerned about
+  computing the EIM wrt \eqn{k}.
+  The exact method algorithm operates separately on each response
+  variable,
+  and it constructs a large matrix provided that the number of columns
+  is less than \code{max.qnbinom}.
+  If so, then the computations are done in chunks, so
+  that no more than about \code{max.chunk.Mb} megabytes
+  of memory is used at a time (actually, it is proportional to this amount).
+  Regarding eligibility of this algorithm, each observation must
+  have the \code{cutoff.prob} quantile less than \code{max.qnbinom}
+  as its approximate support.
+  If you have abundant memory then you might try setting
+  \code{max.chunk.Mb = Inf}, but then the computations might take
+  a very long time.
+  Setting \code{max.chunk.Mb = 0} or \code{max.qnbinom = 0}
+  will force the EIM to be computed using the SFS algorithm only
+  (this \emph{used to be} the default method for \emph{all} the observations).
+  When the fitted values of the model are large and \eqn{k} is small,
+  the computation of the EIM will be costly with respect to time
+  and memory if the exact method is used. Hence the argument
+  \code{max.qnbinom} limits the cost in terms of time.
+
+
+% Thus the number of columns of the matrix can be controlled by
+% the argument \code{cutoff.prob}.
 
   }
+
+
+\item{gsize}{
+  Similar to \code{gsigma} in \code{\link{CommonVGAMffArguments}}.
+
+
+}
+
+% \item{Maxiter}{
+% Used in the finite series approximation.
+% Integer. The maximum number of terms allowed when computing
+% the second diagonal element of the EIM.
+% In theory, the value involves an infinite series.
+% If this argument is too small then the value may be inaccurate.
+
+
+% }
   \item{deviance.arg}{
   Logical.
-
   If \code{TRUE}, the deviance is computed \emph{after} convergence.
   It only works in the NB-2 model.
   It is also necessary to set \code{criterion = "coefficients"}
@@ -139,6 +188,13 @@ polyaR(lsize = "loge", lprob = "logit",
 
 
   }
+  \item{probs.y}{
+  Passed into the \code{probs} argument
+  of \code{\link[stats:quantile]{quantile}}
+  when \code{imethod = 3} to obtain an initial value for the mean.
+
+
+  }
   \item{ishrinkage}{
   How much shrinkage is used when initializing \eqn{\mu}{mu}.
   The value must be between 0 and 1 inclusive, and
@@ -175,7 +231,7 @@ polyaR(lsize = "loge", lprob = "logit",
   mean \eqn{\mu}{mu} and an \emph{index} parameter
   \eqn{k}, both which are positive.
   Specifically, the density of a random variable \eqn{Y} is 
-  \deqn{f(y;\mu,k) ~=~ {y + k - 1 \choose y} \,
+  \deqn{f(y;\mu,k) = {y + k - 1 \choose y} \,
     \left( \frac{\mu}{\mu+k} \right)^y\,
     \left( \frac{k}{k+\mu} \right)^k }{%
     f(y;mu,k) = C_{y}^{y + k - 1}
@@ -197,10 +253,10 @@ polyaR(lsize = "loge", lprob = "logit",
 
 
   For \code{polya} the density is
-  \deqn{f(y;p,k) ~=~ {y + k - 1 \choose y} \,
+  \deqn{f(y;p,k) = {y + k - 1 \choose y} \,
     \left( 1 - p \right)^y\,
     p^k }{%
-    f(y;k,p) = C_{y}^{y + k - 1}
+    f(y;p,k) = C_{y}^{y + k - 1}
     [1 - p]^y p^k}
   where \eqn{y=0,1,2,\ldots},
   and \eqn{k > 0} and \eqn{0 < p < 1}{0 < p < 1}. 
@@ -222,11 +278,13 @@ polyaR(lsize = "loge", lprob = "logit",
   classical GLM framework with one of the parameters being
   of interest and the other treated as a nuisance/scale
   parameter (this is implemented in the \pkg{MASS} library). The
-  \pkg{VGAM} family function \code{negbinomial} treats both
+  \pkg{VGAM} family function \code{negbinomial()} treats both
   parameters on the same footing, and estimates them both
-  by full maximum likelihood estimation. Simulated Fisher
-  scoring is employed as the default (see the \code{nsimEIM}
-  argument).
+  by full maximum likelihood estimation.
+
+
+%  SFS is employed as the default (see the \code{nsimEIM}
+%  argument).
 
 
   The parameters \eqn{\mu}{mu} and \eqn{k} are independent
@@ -237,20 +295,23 @@ polyaR(lsize = "loge", lprob = "logit",
 
 
   These \pkg{VGAM} family functions handle
-  \emph{multivariate} responses, so that a matrix can be
-  used as the response. The number of columns is the number
+  \emph{multiple} responses, so that a response matrix can be
+  inputted. The number of columns is the number
   of species, say, and setting \code{zero = -2} means that
   \emph{all} species have a \eqn{k} equalling a (different)
   intercept only.
 
 
+  
 }
 \section{Warning}{
-  The Poisson model corresponds to \eqn{k} equalling
+  Poisson regression corresponds to \eqn{k} equalling
   infinity.  If the data is Poisson or close to Poisson,
   numerical problems will occur. Possibly choosing a
-  log-log link may help in such cases, otherwise use
+  log-log link may help in such cases, otherwise try
   \code{\link{poissonff}} or \code{\link{quasipoissonff}}.
+  It is possible to fit a NBD that has a similar variance
+  function as a quasi-Poisson; see the NB-1 example below.
 
 
   These functions are fragile; the maximum likelihood
@@ -264,6 +325,20 @@ polyaR(lsize = "loge", lprob = "logit",
   over large values when using this argument.
 
 
+  If one wants to force SFS
+  to be used on all observations, then
+  set \code{max.qnbinom = 0} or \code{max.chunk.Mb = 0}.
+  If one wants to force the exact method
+  to be used for all observations, then
+  set \code{max.qnbinom = Inf}.
+  If the computer has \emph{much} memory, then trying
+  \code{max.chunk.Mb = Inf} may provide a small speed increase.
+  If SFS is used at all, then the \code{@weights} slot of the
+  fitted object will be a matrix;
+  otherwise that slot will be a \code{0 x 0} matrix.
+  
+
+
   Yet to do: write a family function which uses the methods
   of moments estimator for \eqn{k}.
 
@@ -298,7 +373,8 @@ Fitting the negative binomial distribution to biological data.
 
   Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
-  \emph{Computational Statistics and Data Analysis}.
+  \emph{Computational Statistics and Data Analysis},
+  \bold{71}, 889--902.
 
 
 
@@ -329,15 +405,23 @@ Fitting the negative binomial distribution to biological data.
   For \code{negbinomial()}
   the diagonal element of the \emph{expected information matrix}
   (EIM) for parameter \eqn{k}
-  involves an infinite series; consequently simulated Fisher scoring
-  (see \code{nsimEIM}) is the default. This algorithm should definitely be
-  used if \code{max(ymat)} is large, e.g., \code{max(ymat) > 300} or there
-  are any outliers in \code{ymat}.
-  A second algorithm involving a finite series approximation can be
-  invoked by setting \code{nsimEIM = NULL}.
-  Then the arguments
-  \code{Maxiter} and
-  \code{cutoff} are pertinent.
+  involves an infinite series; consequently SFS
+  (see \code{nsimEIM}) is used as the backup algorithm only.
+  SFS should be better if \code{max(ymat)} is large,
+  e.g., \code{max(ymat) > 1000},
+  or if there are any outliers in \code{ymat}.
+  The default algorithm involves a finite series approximation 
+  to the support \code{0:Inf};
+  the arguments
+  \code{max.memory},
+  \code{min.size} and
+  \code{cutoff.prob} are pertinent.
+
+
+% \code{slope.mu},
+% the arguments \code{Maxiter} and
+% can be invoked by setting \code{nsimEIM = NULL}.
+
 
 
   Regardless of the algorithm used,
@@ -345,13 +429,14 @@ Fitting the negative binomial distribution to biological data.
   outliers or is large in magnitude.
   If convergence failure occurs, try using arguments
   (in recommended decreasing order)
+  \code{max.qnbinom},
   \code{nsimEIM},
+  \code{cutoff.prob},
   \code{ishrinkage},
   \code{imethod},
-  \code{Maxiter}, 
-  \code{cutoff},
   \code{isize},
-  \code{zero}.
+  \code{zero},
+  \code{max.chunk.Mb}.
 
 
   The function \code{negbinomial} can be used by the fast algorithm in
@@ -424,7 +509,8 @@ Fitting the negative binomial distribution to biological data.
   \code{\link{cao}},
   \code{\link{cqo}},
   \code{\link{CommonVGAMffArguments}},
-  \code{\link{simulate.vlm}}.
+  \code{\link{simulate.vlm}},
+  \code{\link[stats:NegBinomial]{qnbinom}}.
 
 
 % \code{\link[MASS]{rnegbin}}.
@@ -432,32 +518,33 @@ Fitting the negative binomial distribution to biological data.
 
 }
 \examples{
-# Example 1: apple tree data
+# Example 1: apple tree data (Bliss and Fisher, 1953)
 appletree <- data.frame(y = 0:7, w = c(70, 38, 17, 10, 9, 3, 2, 1))
 fit <- vglm(y ~ 1, negbinomial(deviance = TRUE), data = appletree,
-            weights = w, crit = "coef")
+            weights = w, crit = "coef")  # Obtain the deviance
 fit <- vglm(y ~ 1, negbinomial(deviance = TRUE), data = appletree,
             weights = w, half.step = FALSE)  # Alternative method
 summary(fit)
 coef(fit, matrix = TRUE)
-Coef(fit)
+Coef(fit)  # For intercept-only models
 deviance(fit)  # NB2 only; needs 'crit = "coef"' & 'deviance = TRUE' above
 
-# Example 2: simulated data with multivariate response
-ndata <- data.frame(x2 = runif(nn <- 500))
+# Example 2: simulated data with multiple responses
+ndata <- data.frame(x2 = runif(nn <- 300))
 ndata <- transform(ndata, y1 = rnbinom(nn, mu = exp(3+x2), size = exp(1)),
                           y2 = rnbinom(nn, mu = exp(2-x2), size = exp(0)))
 fit1 <- vglm(cbind(y1, y2) ~ x2, negbinomial, data = ndata, trace = TRUE)
 coef(fit1, matrix = TRUE)
 
-# Example 3: large counts so definitely use the nsimEIM argument
-ndata <- transform(ndata, y3 = rnbinom(nn, mu = exp(12+x2), size = exp(1)))
+# Example 3: large counts implies SFS is used
+ndata <- transform(ndata, y3 = rnbinom(nn, mu = exp(10+x2), size = exp(1)))
 with(ndata, range(y3))  # Large counts
-fit2 <- vglm(y3 ~ x2, negbinomial(nsimEIM = 100), data = ndata, trace = TRUE)
+fit2 <- vglm(y3 ~ x2, negbinomial, data = ndata, trace = TRUE)
 coef(fit2, matrix = TRUE)
+head(fit2 at weights)  # Non-empty; SFS was used
 
 # Example 4: a NB-1 to estimate a negative binomial with Var(Y) = phi0 * mu
-nn <- 1000  # Number of observations
+nn <- 500  # Number of observations
 phi0 <- 10  # Specify this; should be greater than unity
 delta0 <- 1 / (phi0 - 1)
 mydata <- data.frame(x2 = runif(nn), x3 = runif(nn))
@@ -490,5 +577,4 @@ summary(glm(y3 ~ x2 + x3, quasipoisson, mydata))$disper  # cf. moment estimator
 \keyword{regression}
 
 
-%y1 = MASS:::rnegbin(n, mu=exp(3+x), theta=exp(1))  # k is theta
-%y2 = MASS:::rnegbin(n, mu=exp(2-x), theta=exp(0))
+
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index eecb3d6..afb482c 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -2,6 +2,11 @@
 \alias{notdocumentedyet}
 %
 %
+%
+%
+%
+% 201412;
+%\alias{linkfun.vglm}
 % 201408;
 \alias{dlevy}
 \alias{plevy}
@@ -28,9 +33,9 @@
 % 201312;
 % \alias{simulate.vlm}
 % 201311;
-\alias{family.name}
-\alias{family.name.vlm}
-\alias{family.name.vglmff}
+\alias{familyname}
+\alias{familyname.vlm}
+\alias{familyname.vglmff}
 % 201309;
 \alias{I.col}
 \alias{BIC}
diff --git a/man/nparamvglm.Rd b/man/nparamvglm.Rd
new file mode 100644
index 0000000..05d8e34
--- /dev/null
+++ b/man/nparamvglm.Rd
@@ -0,0 +1,103 @@
+\name{nparam.vlm}
+\alias{nparam.vlm}
+\alias{nparam}
+%\alias{nparam.vglm}
+\alias{nparam.vgam}
+\alias{nparam.rrvglm}
+\alias{nparam.qrrvglm}
+\alias{nparam.rrvgam}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Number of Parameters }
+\description{
+  Returns the number of parameters in a fitted model object.
+
+
+}
+\usage{
+        nparam(object, \dots)
+    nparam.vlm(object, dpar = TRUE, \dots)
+   nparam.vgam(object, dpar = TRUE, linear.only = FALSE, \dots)
+ nparam.rrvglm(object, dpar = TRUE, \dots)
+nparam.qrrvglm(object, dpar = TRUE, \dots)
+ nparam.rrvgam(object, dpar = TRUE, \dots)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+  \item{object}{
+  Some \pkg{VGAM} object, for example, having
+  class \code{\link{vglmff-class}}.
+
+
+  }
+  \item{\dots}{
+  Other possible arguments fed into the function.
+
+
+  }
+  \item{dpar}{
+  Logical, include any (estimated) dispersion parameters as a parameter?
+
+
+  }
+  \item{linear.only}{
+  Logical, include only the number of linear (parametric) parameters?
+
+
+  }
+}
+\details{
+  The code was copied from the \code{AIC()} methods functions.
+
+
+
+}
+\value{
+  Returns a numeric value with the corresponding number of parameters.
+  For \code{\link{vgam}} objects, this may be real rather than
+  integer, because the nonlinear degrees of freedom is real-valued.
+
+
+
+}
+\author{T. W. Yee. }
+%\note{
+%  This code has not been checked fully.
+%
+%
+%}
+
+%\references{
+%  Sakamoto, Y., Ishiguro, M., and Kitagawa G. (1986).
+%  \emph{Akaike Information Criterion Statistics}.
+%  D. Reidel Publishing Company.
+%}
+
+\section{Warning }{
+  This code has not been double-checked.
+
+
+}
+
+\seealso{
+  VGLMs are described in \code{\link{vglm-class}};
+  VGAMs are described in \code{\link{vgam-class}};
+  RR-VGLMs are described in \code{\link{rrvglm-class}};
+  \code{\link{AICvlm}}.
+
+
+}
+\examples{
+pneumo <- transform(pneumo, let = log(exposure.time))
+(fit1 <- vglm(cbind(normal, mild, severe) ~ let, propodds, data = pneumo))
+coef(fit1)
+coef(fit1, matrix = TRUE)
+nparam(fit1)
+(fit2 <- vglm(hits ~ 1, quasipoissonff, weights = ofreq, data = V1))
+coef(fit2)
+coef(fit2, matrix = TRUE)
+nparam(fit2)
+nparam(fit2, dpar = FALSE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/olym.Rd b/man/olym.Rd
index 9802abf..9ea9fa8 100644
--- a/man/olym.Rd
+++ b/man/olym.Rd
@@ -36,15 +36,17 @@ data(olym12)
 % This is a simple two-way contingency table of counts.
 
 }
-\source{
-\url{http://www.associatedcontent.com/article/979484/2008_summer_olympic_medal_count_total.html},
-\url{http://www.london2012.com/medals/medal-count/}.
+% \source{
 
 
-}
+% url{http://www.associatedcontent.com/article/979484/2008_summer_olympic_medal_count_total.html},
+% url{http://www.london2012.com/medals/medal-count/}.
+
+
+% }
 \references{
-  The official English website was/is \url{http://en.beijing2008.cn}
-  and \url{http://www.london2012.com}.
+  The official English website was/is \code{http://en.beijing2008.cn}
+  and \code{http://www.london2012.com}.
   Help from Viet Hoang Quoc is gratefully acknowledged.
 
 
diff --git a/man/paralogistic.Rd b/man/paralogistic.Rd
index dd232d4..d3af56f 100644
--- a/man/paralogistic.Rd
+++ b/man/paralogistic.Rd
@@ -9,11 +9,16 @@
 
 }
 \usage{
-paralogistic(lshape1.a = "loge", lscale = "loge",
-             ishape1.a = 2, iscale = NULL, zero = NULL)
+paralogistic(lss, lshape1.a = "loge", lscale = "loge",
+                  ishape1.a = 2, iscale = NULL, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
+  \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
+  
+  
+  }
+
   \item{lshape1.a, lscale}{
   Parameter link functions applied to the
   (positive) shape parameter \code{a} and
@@ -91,9 +96,9 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 
 \examples{
-pdata <- data.frame(y = rparalogistic(n = 3000, exp(1), exp(2)))
-fit <- vglm(y ~ 1, paralogistic, data = pdata, trace = TRUE)
-fit <- vglm(y ~ 1, paralogistic(ishape1.a = 2.3, iscale = 7),
+pdata <- data.frame(y = rparalogistic(n = 3000, exp(1), scale = exp(2)))
+fit <- vglm(y ~ 1, paralogistic(lss = FALSE), data = pdata, trace = TRUE)
+fit <- vglm(y ~ 1, paralogistic(lss = FALSE, ishape1.a = 2.3, iscale = 7),
             data = pdata, trace = TRUE, epsilon = 1e-8)
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/paralogisticUC.Rd b/man/paralogisticUC.Rd
index 22a37d0..a8e9d7c 100644
--- a/man/paralogisticUC.Rd
+++ b/man/paralogisticUC.Rd
@@ -12,10 +12,10 @@
 
 }
 \usage{
-dparalogistic(x, shape1.a, scale = 1, log = FALSE)
-pparalogistic(q, shape1.a, scale = 1)
-qparalogistic(p, shape1.a, scale = 1)
-rparalogistic(n, shape1.a, scale = 1)
+dparalogistic(x, scale = 1, shape1.a, log = FALSE)
+pparalogistic(q, scale = 1, shape1.a, lower.tail = TRUE, log.p = FALSE)
+qparalogistic(p, scale = 1, shape1.a, lower.tail = TRUE, log.p = FALSE)
+rparalogistic(n, scale = 1, shape1.a)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -29,6 +29,12 @@ rparalogistic(n, shape1.a, scale = 1)
   If \code{log=TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -47,7 +53,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{paralogistic}}, which is the \pkg{VGAM} family function
   for estimating the parameters by maximum likelihood estimation.
@@ -67,8 +73,9 @@ Hoboken, NJ, USA: Wiley-Interscience.
 
 }
 \examples{
-pdata <- data.frame(y = rparalogistic(n = 3000, exp(1), exp(2)))
-fit <- vglm(y ~ 1, paralogistic(ishape1.a = 2.1), data = pdata, trace = TRUE)
+pdata <- data.frame(y = rparalogistic(n = 3000, scale = exp(1), exp(2)))
+fit <- vglm(y ~ 1, paralogistic(lss = FALSE, ishape1.a = 4.1),
+            data = pdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 }
diff --git a/man/paretoIVUC.Rd b/man/paretoIVUC.Rd
index 8ae99e1..7ba2b28 100644
--- a/man/paretoIVUC.Rd
+++ b/man/paretoIVUC.Rd
@@ -28,20 +28,28 @@
 }
 \usage{
 dparetoIV(x, location = 0, scale = 1, inequality = 1, shape = 1, log = FALSE)
-pparetoIV(q, location = 0, scale = 1, inequality = 1, shape = 1)
-qparetoIV(p, location = 0, scale = 1, inequality = 1, shape = 1)
+pparetoIV(q, location = 0, scale = 1, inequality = 1, shape = 1,
+          lower.tail = TRUE, log.p = FALSE)
+qparetoIV(p, location = 0, scale = 1, inequality = 1, shape = 1,
+          lower.tail = TRUE, log.p = FALSE)
 rparetoIV(n, location = 0, scale = 1, inequality = 1, shape = 1)
 dparetoIII(x, location = 0, scale = 1, inequality = 1, log = FALSE)
-pparetoIII(q, location = 0, scale = 1, inequality = 1)
-qparetoIII(p, location = 0, scale = 1, inequality = 1)
+pparetoIII(q, location = 0, scale = 1, inequality = 1,
+           lower.tail = TRUE, log.p = FALSE)
+qparetoIII(p, location = 0, scale = 1, inequality = 1,
+           lower.tail = TRUE, log.p = FALSE)
 rparetoIII(n, location = 0, scale = 1, inequality = 1)
 dparetoII(x, location = 0, scale = 1, shape = 1, log = FALSE)
-pparetoII(q, location = 0, scale = 1, shape = 1)
-qparetoII(p, location = 0, scale = 1, shape = 1)
+pparetoII(q, location = 0, scale = 1, shape = 1,
+          lower.tail = TRUE, log.p = FALSE)
+qparetoII(p, location = 0, scale = 1, shape = 1,
+          lower.tail = TRUE, log.p = FALSE)
 rparetoII(n, location = 0, scale = 1, shape = 1)
 dparetoI(x, scale = 1, shape = 1, log = FALSE)
-pparetoI(q, scale = 1, shape = 1)
-qparetoI(p, scale = 1, shape = 1)
+pparetoI(q, scale = 1, shape = 1,
+         lower.tail = TRUE, log.p = FALSE)
+qparetoI(p, scale = 1, shape = 1,
+         lower.tail = TRUE, log.p = FALSE)
 rparetoI(n, scale = 1, shape = 1)
 }
 \arguments{
@@ -63,6 +71,12 @@ rparetoI(n, scale = 1, shape = 1)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -88,7 +102,7 @@ Fairland, Maryland: International Cooperative Publishing House.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   For the formulas and other details 
   see \code{\link{paretoIV}}.
diff --git a/man/perks.Rd b/man/perks.Rd
index f15cd2b..e8b7205 100644
--- a/man/perks.Rd
+++ b/man/perks.Rd
@@ -15,7 +15,11 @@ perks(lscale = "loge", lshape = "loge",
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{nowarning}{ Logical. Suppress a warning? }
+  \item{nowarning}{ Logical. Suppress a warning?
+  Ignored for \pkg{VGAM} 0.9-7 and higher.
+
+
+   }
 
 
   \item{lscale, lshape}{
diff --git a/man/perksUC.Rd b/man/perksUC.Rd
index c8f3bf2..81b3fa8 100644
--- a/man/perksUC.Rd
+++ b/man/perksUC.Rd
@@ -15,8 +15,8 @@
 }
 \usage{
 dperks(x, scale = 1, shape, log = FALSE)
-pperks(q, scale = 1, shape)
-qperks(p, scale = 1, shape)
+pperks(q, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
+qperks(p, scale = 1, shape, lower.tail = TRUE, log.p = FALSE)
 rperks(n, scale = 1, shape)
 }
 \arguments{
@@ -31,6 +31,13 @@ rperks(n, scale = 1, shape)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+  
+
+  }
+
   \item{shape, scale}{positive shape and scale parameters. }
 
 }
@@ -42,7 +49,7 @@ rperks(n, scale = 1, shape)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{perks}} for details.
 
diff --git a/man/polf.Rd b/man/polf.Rd
index cc519db..0d659c8 100644
--- a/man/polf.Rd
+++ b/man/polf.Rd
@@ -134,7 +134,7 @@ fit <- vglm(cuty ~ x2 + x3, data = pdata, trace = TRUE,
             cumulative(reverse = TRUE,
                        parallel = TRUE,
                        link = polf(cutpoint = cutpoints[2:3]),
-                       mv = TRUE))
+                       multiple.responses = TRUE))
 head(depvar(fit))
 head(fitted(fit))
 head(predict(fit))
diff --git a/man/posbernoulli.t.Rd b/man/posbernoulli.t.Rd
index 2404f52..4c67249 100644
--- a/man/posbernoulli.t.Rd
+++ b/man/posbernoulli.t.Rd
@@ -153,10 +153,13 @@ capture--recapture experiments.
   \bold{62}, 3--135.
 
 
-Yee, T. W. and Stoklosa, J. and Huggins, R. M. (2014)
+Yee, T. W. and Stoklosa, J. and Huggins, R. M. (2015)
 The \pkg{VGAM} package for capture--recapture data using the
 conditional likelihood.
-In preparation.
+  \emph{Journal of Statistical Software},
+  in press.
+
+% \bold{62}, 3--135.
 
 
 %\emph{Journal of Statistical Software},
diff --git a/man/posbinomUC.Rd b/man/posbinomUC.Rd
index 343613e..e00d174 100644
--- a/man/posbinomUC.Rd
+++ b/man/posbinomUC.Rd
@@ -142,7 +142,7 @@ pdata <- transform(pdata, y1 = rposbinom(nn, size = sizev1, prob = prob1),
 with(pdata, table(y1))
 with(pdata, table(y2))
 # Multivariate response
-fit2 <- vglm(cbind(y1, y2) ~ x2, posbinomial(mv = TRUE),
+fit2 <- vglm(cbind(y1, y2) ~ x2, posbinomial(multiple.responses = TRUE),
              trace  = TRUE, data = pdata, weight = cbind(sizev1, sizev2))
 coef(fit2, matrix = TRUE)
 }
diff --git a/man/posbinomial.Rd b/man/posbinomial.Rd
index 1a7a9f6..34f98bd 100644
--- a/man/posbinomial.Rd
+++ b/man/posbinomial.Rd
@@ -7,13 +7,13 @@
 
 }
 \usage{
-posbinomial(link = "logit", mv = FALSE, parallel = FALSE,
+posbinomial(link = "logit", multiple.responses = FALSE, parallel = FALSE,
             omit.constant = FALSE, p.small = 1e-4, no.warning = FALSE,
             zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{link, mv, parallel, zero}{
+  \item{link, multiple.responses, parallel, zero}{
   Details at \code{\link{CommonVGAMffArguments}}.
 
 
@@ -106,11 +106,11 @@ Drapers Company Research Memoirs.
   \code{\link{binomialff}}.
 
 
-  If \code{mv = TRUE} then each column of the matrix response
+  If \code{multiple.responses = TRUE} then each column of the matrix response
   should be a count (the number of successes), and the
   \code{weights} argument should be a matrix of the same dimension
   as the response containing the number of trials.
-  If \code{mv = FALSE} then the response input should be the same
+  If \code{multiple.responses = FALSE} then the response input should be the same
   as \code{\link{binomialff}}.
 
 
diff --git a/man/posnormUC.Rd b/man/posnormUC.Rd
index 02c62d4..e856dba 100644
--- a/man/posnormUC.Rd
+++ b/man/posnormUC.Rd
@@ -12,8 +12,8 @@
 }
 \usage{
 dposnorm(x, mean = 0, sd = 1, log = FALSE)
-pposnorm(q, mean = 0, sd = 1)
-qposnorm(p, mean = 0, sd = 1)
+pposnorm(q, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE)
+qposnorm(p, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE)
 rposnorm(n, mean = 0, sd = 1)
 }
 \arguments{
@@ -23,7 +23,9 @@ rposnorm(n, mean = 0, sd = 1)
   If \code{length(n) > 1} then the length is taken to be the number required.
 
   }
-  \item{mean, sd, log}{ see \code{\link[stats:Normal]{rnorm}}.
+  \item{mean, sd, log, lower.tail, log.p}{
+  see \code{\link[stats:Normal]{rnorm}}.
+
 
   }
 
@@ -53,12 +55,12 @@ rposnorm(n, mean = 0, sd = 1)
 }
 \examples{
 \dontrun{ m <-  0.8; x <- seq(-1, 4, len = 501)
-plot(x, dposnorm(x, m = m), type = "l", ylim = 0:1, las = 1,
+plot(x, dposnorm(x, m = m), type = "l", las = 1, ylim = 0:1,
      ylab = paste("posnorm(m = ", m, ", sd = 1)"), col = "blue",
      main = "Blue is density, orange is cumulative distribution function",
      sub = "Purple lines are the 10,20,...,90 percentiles")
-lines(x, pposnorm(x, m = m), col = "orange")
 abline(h = 0, col = "grey")
+lines(x, pposnorm(x, m = m), col = "orange", type = "l")
 probs <- seq(0.1, 0.9, by = 0.1)
 Q <- qposnorm(probs, m = m)
 lines(Q, dposnorm(Q, m = m), col = "purple", lty = 3, type = "h")
@@ -70,3 +72,8 @@ max(abs(pposnorm(Q, m = m) - probs))  # Should be 0
 \keyword{distribution}
 
 
+% 20150207; bug involving ifelse() picked up for qposnorm().
+
+
+
+
diff --git a/man/quasibinomialff.Rd b/man/quasibinomialff.Rd
index 9228513..8e82d6c 100644
--- a/man/quasibinomialff.Rd
+++ b/man/quasibinomialff.Rd
@@ -9,16 +9,16 @@
 
 }
 \usage{
-quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
-                parallel = FALSE, zero = NULL)
+quasibinomialff(link = "logit", multiple.responses = FALSE,
+                onedpar = !multiple.responses, parallel = FALSE, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
   \item{link}{ Link function. See \code{\link{Links}} for more choices.
 
   }
-  \item{mv}{ 
-   Multivariate response? If \code{TRUE}, then the response is interpreted
+  \item{multiple.responses}{ 
+   Multiple responses? If \code{TRUE}, then the response is interpreted
    as \eqn{M} binary responses, where \eqn{M} is the number of columns
    of the response matrix. In this case, the response matrix should have
    zero/one values only.
@@ -31,7 +31,8 @@ quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
 
   }
   \item{onedpar}{ 
-  One dispersion parameter? If \code{mv}, then a separate dispersion
+  One dispersion parameter? If \code{multiple.responses}, then
+  a separate dispersion
   parameter will be computed for each response (column), by default.
   Setting \code{onedpar=TRUE} will pool them so that there is only one
   dispersion parameter to be estimated.
@@ -39,7 +40,8 @@ quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
 
   }
   \item{parallel}{ 
-  A logical or formula. Used only if \code{mv} is \code{TRUE}.  This
+  A logical or formula. Used only if \code{multiple.responses}
+  is \code{TRUE}.  This
   argument allows for the parallelism assumption whereby the regression
   coefficients for a variable is constrained to be equal over the \eqn{M}
   linear/additive predictors.
@@ -66,10 +68,11 @@ quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
   is more common in practice.
 
 
-  Setting \code{mv=TRUE} is necessary when fitting a Quadratic RR-VGLM
-  (see \code{\link{cqo}}) because the response will be a matrix of
-  \eqn{M} columns (e.g., one column per species). Then there will be
-  \eqn{M} dispersion parameters (one per column of the response).
+  Setting \code{multiple.responses=TRUE} is necessary
+  when fitting a Quadratic RR-VGLM (see \code{\link{cqo}})
+  because the response will be a matrix of \eqn{M} columns
+  (e.g., one column per species). Then there will be \eqn{M}
+  dispersion parameters (one per column of the response).
 
 
 }
@@ -92,17 +95,19 @@ quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
 }
 \author{ Thomas W. Yee }
 \note{
-  If \code{mv} is \code{FALSE} (the default), then the response can be
-  of one of three formats: a factor (first level taken as success), a
-  vector of proportions of success, or a 2-column matrix (first column =
-  successes) of counts.  The argument \code{weights} in the modelling
-  function can also be specified. In particular, for a general vector
-  of proportions, you will need to specify \code{weights} because the
-  number of trials is needed.
-
-
-  If \code{mv} is \code{TRUE}, then the matrix response can only be of
-  one format: a matrix of 1's and 0's (1=success).
+  If \code{multiple.responses} is \code{FALSE} (the default),
+  then the response can be of one of three formats: a factor
+  (first level taken as success), a vector of proportions of
+  success, or a 2-column matrix (first column = successes)
+  of counts.  The argument \code{weights} in the modelling
+  function can also be specified. In particular, for a
+  general vector of proportions, you will need to specify
+  \code{weights} because the number of trials is needed.
+
+
+  If \code{multiple.responses} is \code{TRUE}, then the
+  matrix response can only be of one format: a matrix of
+  1s and 0s (1=success).
 
 
   This function is only a front-end to the \pkg{VGAM} family function
diff --git a/man/rayleighUC.Rd b/man/rayleighUC.Rd
index ce47da8..afb925a 100644
--- a/man/rayleighUC.Rd
+++ b/man/rayleighUC.Rd
@@ -14,8 +14,8 @@
 }
 \usage{
 drayleigh(x, scale = 1, log = FALSE)
-prayleigh(q, scale = 1)
-qrayleigh(p, scale = 1)
+prayleigh(q, scale = 1, lower.tail = TRUE, log.p = FALSE)
+qrayleigh(p, scale = 1, lower.tail = TRUE, log.p = FALSE)
 rrayleigh(n, scale = 1)
 }
 \arguments{
@@ -33,6 +33,12 @@ rrayleigh(n, scale = 1)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -51,7 +57,7 @@ Hoboken, NJ, USA: John Wiley and Sons, Fourth edition.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{rayleigh}}, the \pkg{VGAM} family
   function for estimating the scale parameter \eqn{b} by
diff --git a/man/riceUC.Rd b/man/riceUC.Rd
index a8b8154..47d9341 100644
--- a/man/riceUC.Rd
+++ b/man/riceUC.Rd
@@ -17,8 +17,8 @@
 
 \usage{
 drice(x, sigma, vee, log = FALSE)
-price(q, sigma, vee, lower.tail = TRUE, ...)
-qrice(p, sigma, vee, ...)
+price(q, sigma, vee, lower.tail = TRUE, log.p = FALSE, ...)
+qrice(p, sigma, vee, lower.tail = TRUE, log.p = FALSE, ...)
 rrice(n, sigma, vee)
 }
 
@@ -40,9 +40,9 @@ rrice(n, sigma, vee)
 
 
   }
-  \item{lower.tail}{
-  Logical.
-  If \code{TRUE} then the LHS area, else the RHS area.
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
 
 
   }
@@ -63,7 +63,7 @@ rrice(n, sigma, vee)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{riceff}}, the \pkg{VGAM} family function
   for estimating the two parameters,
diff --git a/man/riceff.Rd b/man/riceff.Rd
index 4d991ef..404fbe3 100644
--- a/man/riceff.Rd
+++ b/man/riceff.Rd
@@ -14,7 +14,11 @@ riceff(lsigma = "loge", lvee = "loge", isigma = NULL,
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
-  \item{nowarning}{ Logical. Suppress a warning? }
+  \item{nowarning}{ Logical. Suppress a warning?
+  Ignored for \pkg{VGAM} 0.9-7 and higher.
+
+
+   }
 
 
   \item{lvee, lsigma}{
diff --git a/man/rrvglm.Rd b/man/rrvglm.Rd
index c80763d..2df90b6 100644
--- a/man/rrvglm.Rd
+++ b/man/rrvglm.Rd
@@ -192,10 +192,11 @@ Regression and ordered categorical variables.
   \code{\link{cqo}} for more details about QRR-VGLMs.
 
 
-  With multivariate binary responses, one must use
-  \code{binomialff(mv = TRUE)} to indicate that the response
-  (matrix) is multivariate. Otherwise, it is interpreted
-  as a single binary response variable.
+  With multiple binary responses, one must use
+  \code{binomialff(multiple.responses = TRUE)} to indicate
+  that the response is a matrix with one response per column.
+  Otherwise, it is interpreted as a single binary response
+  variable.
 
 
 }
diff --git a/man/rrvglm.control.Rd b/man/rrvglm.control.Rd
index b78a841..331b56f 100644
--- a/man/rrvglm.control.Rd
+++ b/man/rrvglm.control.Rd
@@ -188,7 +188,9 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
   }
   \item{noWarning, Check.rank, Check.cm.rank}{
     Same as \code{\link{vglm.control}}.
+    Ignored for \pkg{VGAM} 0.9-7 and higher.
 
+  
 
   }
   \item{wzepsilon}{
diff --git a/man/sc.t2UC.Rd b/man/sc.t2UC.Rd
index a087d3a..f186c94 100644
--- a/man/sc.t2UC.Rd
+++ b/man/sc.t2UC.Rd
@@ -14,8 +14,8 @@
 }
 \usage{
 dsc.t2(x, location = 0, scale = 1, log = FALSE)
-psc.t2(q, location = 0, scale = 1, log = FALSE)
-qsc.t2(p, location = 0, scale = 1)
+psc.t2(q, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE)
+qsc.t2(p, location = 0, scale = 1, lower.tail = TRUE, log.p = FALSE)
 rsc.t2(n, location = 0, scale = 1)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -40,6 +40,12 @@ rsc.t2(n, location = 0, scale = 1)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:TDist]{pt}}
+  or \code{\link[stats:TDist]{qt}}.
+
+
+  }
 }
 \details{
   A Student-t distribution with 2 degrees of freedom and
@@ -59,7 +65,7 @@ rsc.t2(n, location = 0, scale = 1)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 
 %\note{ 
 %}
diff --git a/man/sinmad.Rd b/man/sinmad.Rd
index c3a19e0..1e83ac3 100644
--- a/man/sinmad.Rd
+++ b/man/sinmad.Rd
@@ -7,11 +7,16 @@
   Singh-Maddala distribution.
 }
 \usage{
-sinmad(lshape1.a = "loge", lscale = "loge", lshape3.q = "loge",
-       ishape1.a = NULL, iscale = NULL, ishape3.q = 1, zero = NULL)
+sinmad(lss, lshape1.a = "loge", lscale = "loge", lshape3.q = "loge",
+            ishape1.a = NULL, iscale = NULL, ishape3.q = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
+  \item{lss}{ See \code{\link{CommonVGAMffArguments}} for important information.
+
+
+  }
+
   \item{lshape1.a, lscale, lshape3.q}{
   Parameter link functions applied to the
   (positive) parameters \code{a}, \code{scale}, and \code{q}.
@@ -100,9 +105,11 @@ Hoboken, NJ, USA: Wiley-Interscience.
 }
 
 \examples{
-sdata <- data.frame(y = rsinmad(n = 1000, exp(1), exp(2), exp(0)))
-fit <- vglm(y ~ 1, sinmad, data = sdata, trace = TRUE)
-fit <- vglm(y ~ 1, sinmad(ishape1.a = exp(1)), data = sdata, trace = TRUE)
+sdata <- data.frame(y = rsinmad(n = 1000, shape1 = exp(1),
+                    scale = exp(2), shape3 = exp(0)))
+fit <- vglm(y ~ 1, sinmad(lss = FALSE), data = sdata, trace = TRUE)
+fit <- vglm(y ~ 1, sinmad(lss = FALSE, ishape1.a = exp(1)),
+            data = sdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 Coef(fit)
 summary(fit)
@@ -114,13 +121,13 @@ sdata <- data.frame(y1 = rbeta(1000, 6, 6))
 # hist(with(sdata, y1))
 if (FALSE) {
 # This fails
-  fit1 <- vglm(y1 ~ 1, sinmad, data = sdata, trace = TRUE)
-  fit1 <- vglm(y1 ~ 1, sinmad, data = sdata, trace = TRUE, maxit = 6,
-               crit = "coef")
+  fit1 <- vglm(y1 ~ 1, sinmad(lss = FALSE), data = sdata, trace = TRUE)
+  fit1 <- vglm(y1 ~ 1, sinmad(lss = FALSE), data = sdata, trace = TRUE,
+               maxit = 6, crit = "coef")
   Coef(fit1)
 }
 # Try this remedy:
-fit2 <- vglm(y1 ~ 1, sinmad(ishape3.q = 3, lshape3.q = "loglog"),
+fit2 <- vglm(y1 ~ 1, sinmad(lss = FALSE, ishape3.q = 3, lshape3.q = "loglog"),
              data = sdata, trace = TRUE, stepsize = 0.05, maxit = 99)
 coef(fit2, matrix = TRUE)
 Coef(fit2)
diff --git a/man/sinmadUC.Rd b/man/sinmadUC.Rd
index 049475c..35db9b5 100644
--- a/man/sinmadUC.Rd
+++ b/man/sinmadUC.Rd
@@ -13,10 +13,10 @@
 
 }
 \usage{
-dsinmad(x, shape1.a, scale = 1, shape3.q, log = FALSE)
-psinmad(q, shape1.a, scale = 1, shape3.q)
-qsinmad(p, shape1.a, scale = 1, shape3.q)
-rsinmad(n, shape1.a, scale = 1, shape3.q)
+dsinmad(x, scale = 1, shape1.a, shape3.q, log = FALSE)
+psinmad(q, scale = 1, shape1.a, shape3.q, lower.tail = TRUE, log.p = FALSE)
+qsinmad(p, scale = 1, shape1.a, shape3.q, lower.tail = TRUE, log.p = FALSE)
+rsinmad(n, scale = 1, shape1.a, shape3.q)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -30,6 +30,12 @@ rsinmad(n, shape1.a, scale = 1, shape3.q)
   If \code{log = TRUE} then the logarithm of the density is returned.
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -49,7 +55,7 @@ Hoboken, NJ: Wiley-Interscience.
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{sinmad}}, which is the \pkg{VGAM} family function
   for estimating the parameters by maximum likelihood estimation.
@@ -69,8 +75,8 @@ Hoboken, NJ: Wiley-Interscience.
 
 }
 \examples{
-sdata <- data.frame(y = rsinmad(n = 3000, exp(1), exp(2), exp(1)))
-fit <- vglm(y ~ 1, sinmad(ishape1.a = 2.1), data = sdata,
+sdata <- data.frame(y = rsinmad(n = 3000, scale = exp(2), shape1 = exp(1), exp(1)))
+fit <- vglm(y ~ 1, sinmad(lss = FALSE, ishape1.a = 2.1), data = sdata,
             trace = TRUE, crit = "coef")
 coef(fit, matrix = TRUE)
 Coef(fit)
diff --git a/man/slashUC.Rd b/man/slashUC.Rd
index 2efb709..1e8b8b7 100644
--- a/man/slashUC.Rd
+++ b/man/slashUC.Rd
@@ -12,7 +12,8 @@
 }
 \usage{
 dslash(x, mu = 0, sigma = 1, log = FALSE, smallno = .Machine$double.eps*1000)
-pslash(q, mu = 0, sigma = 1, very.negative = -10000)
+pslash(q, mu = 0, sigma = 1, very.negative = -10000,
+       lower.tail = TRUE, log.p = FALSE)
 rslash(n, mu = 0, sigma = 1)
 }
 %- maybe also 'usage' for other objects documented here.
@@ -55,6 +56,12 @@ rslash(n, mu = 0, sigma = 1)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 }
 \details{
   See \code{\link{slash}}, the \pkg{VGAM} family function
diff --git a/man/tikuvUC.Rd b/man/tikuvUC.Rd
index 93e69ce..ad1308e 100644
--- a/man/tikuvUC.Rd
+++ b/man/tikuvUC.Rd
@@ -14,8 +14,8 @@
 }
 \usage{
 dtikuv(x, d, mean = 0, sigma = 1, log = FALSE)
-ptikuv(q, d, mean = 0, sigma = 1)
-qtikuv(p, d, mean = 0, sigma = 1, ...)
+ptikuv(q, d, mean = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE)
+qtikuv(p, d, mean = 0, sigma = 1, lower.tail = TRUE, log.p = FALSE, ...)
 rtikuv(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6)
 }
 \arguments{
@@ -53,6 +53,12 @@ rtikuv(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -63,7 +69,7 @@ rtikuv(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{tikuv}} for more details.
 
diff --git a/man/tobitUC.Rd b/man/tobitUC.Rd
index ba4c3a8..ea2e6a8 100644
--- a/man/tobitUC.Rd
+++ b/man/tobitUC.Rd
@@ -14,7 +14,8 @@
 dtobit(x, mean = 0, sd = 1, Lower = 0, Upper = Inf, log = FALSE)
 ptobit(q, mean = 0, sd = 1, Lower = 0, Upper = Inf,
        lower.tail = TRUE, log.p = FALSE)
-qtobit(p, mean = 0, sd = 1, Lower = 0, Upper = Inf)
+qtobit(p, mean = 0, sd = 1, Lower = 0, Upper = Inf,
+       lower.tail = TRUE, log.p = FALSE)
 rtobit(n, mean = 0, sd = 1, Lower = 0, Upper = Inf)
 }
 \arguments{
@@ -48,43 +49,62 @@ rtobit(n, mean = 0, sd = 1, Lower = 0, Upper = Inf)
   for estimating the parameters, 
   for details.
   Note that the density at \code{Lower} and \code{Upper} is the
-  value of \code{\link[stats:Normal]{dnorm}} evaluated there plus
-  the area to the left/right of that point too.
-  Thus there are two spikes; see the example below.
+  the area to the left and right of those points.
+  Thus there are two spikes (but less in value); see the example below.
+  Consequently, \code{dtobit(Lower) + dtobit(Upper) + } the area
+  in between equals unity.
+
+  
+
+
+% 20141223; this is old:
+% Note that the density at \code{Lower} and \code{Upper} is the
+% value of \code{\link[stats:Normal]{dnorm}} evaluated there plus
+% the area to the left/right of that point too.
+
+
 
 }
 %\note{
 
 %}
 \seealso{
-  \code{\link{tobit}}.
+  \code{\link{tobit}},
+  \code{\link[stats:Normal]{rnorm}}.
 
 
 }
 \examples{
-\dontrun{ m <- 0.5; x <- seq(-2, 4, len = 501)
-Lower <- -1; Upper <- 2.5
-plot(x, ptobit(x, m = m, Lower = Lower, Upper = Upper),
+mu <- 0.5; x <- seq(-2, 4, by = 0.01)
+Lower <- -1; Upper <- 2.0
+
+integrate(dtobit, lower = Lower, upper = Upper,
+          mean = mu, Lower = Lower, Upper = Upper)$value +
+dtobit(Lower, mean = mu, Lower = Lower, Upper = Upper) +
+dtobit(Upper, mean = mu, Lower = Lower, Upper = Upper)  # Adds to unity
+
+\dontrun{ 
+plot(x, ptobit(x, m = mu, Lower = Lower, Upper = Upper),
      type = "l", ylim = 0:1, las = 1, col = "orange",
-     ylab = paste("ptobit(m = ", m, ", sd = 1, Lower =", Lower,
+     ylab = paste("ptobit(m = ", mu, ", sd = 1, Lower =", Lower,
                   ", Upper =", Upper, ")"),
      main = "Orange is cumulative distribution function; blue is density",
      sub = "Purple lines are the 10,20,...,90 percentiles")
 abline(h = 0)
-lines(x, dtobit(x, m = m, Lower = Lower, Upper = Upper), col = "blue")
+lines(x, dtobit(x, m = mu, Lower = Lower, Upper = Upper), col = "blue")
 
 probs <- seq(0.1, 0.9, by = 0.1)
-Q <- qtobit(probs, m = m, Lower = Lower, Upper = Upper)
-lines(Q, ptobit(Q, m = m, Lower = Lower, Upper = Upper),
+Q <- qtobit(probs, m = mu, Lower = Lower, Upper = Upper)
+lines(Q, ptobit(Q, m = mu, Lower = Lower, Upper = Upper),
       col = "purple", lty = "dashed", type = "h")
-lines(Q, dtobit(Q, m = m, Lower = Lower, Upper = Upper),
+lines(Q, dtobit(Q, m = mu, Lower = Lower, Upper = Upper),
       col = "darkgreen", lty = "dashed", type = "h")
 abline(h = probs, col = "purple", lty = "dashed")
-max(abs(ptobit(Q, m = m, Lower = Lower, Upper = Upper) - probs))  # Should be 0
+max(abs(ptobit(Q, m = mu, Lower = Lower, Upper = Upper) - probs))  # Should be 0
 
-endpts <- c(Lower, Upper)  # Endpoints have a spike
-lines(endpts, dtobit(endpts, m = m, Lower = Lower, Upper = Upper),
-      col = "blue", lwd = 2, type = "h")
+endpts <- c(Lower, Upper)  # Endpoints have a spike (not quite, actually)
+lines(endpts, dtobit(endpts, m = mu, Lower = Lower, Upper = Upper),
+      col = "blue", lwd = 3, type = "h")
 }
 }
 \keyword{distribution}
diff --git a/man/triangle.Rd b/man/triangle.Rd
index a9317f1..afdb1cc 100644
--- a/man/triangle.Rd
+++ b/man/triangle.Rd
@@ -9,7 +9,7 @@
 }
 \usage{
 triangle(lower = 0, upper = 1,
-         link = elogit(min = 0, max = 1), itheta = NULL)
+         link = extlogit(min = 0, max = 1), itheta = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -92,8 +92,8 @@ World Scientific: Singapore.
   Arguments \code{lower} and \code{upper} and \code{link} must match.
   For example, setting
   \code{lower = 0.2} and \code{upper = 4} and
-  \code{link = elogit(min = 0.2, max = 4.1)} will result in an error.
-  Ideally \code{link = elogit(min = lower, max = upper)}
+  \code{link = extlogit(min = 0.2, max = 4.1)} will result in an error.
+  Ideally \code{link = extlogit(min = lower, max = upper)}
   ought to work but it does not (yet)!
   Minimal error checking is done for this deficiency.
 
@@ -131,5 +131,5 @@ Coef(fit)  # The MLE is the 3rd order statistic, which is 0.3.
 
 
 % 20130603: yettodo: fix up so ideally
-%        link = elogit(min = lower, max = upper), itheta = NULL)
+%        link = extlogit(min = lower, max = upper), itheta = NULL)
 % works.
diff --git a/man/triangleUC.Rd b/man/triangleUC.Rd
index 2cb4e75..65475ca 100644
--- a/man/triangleUC.Rd
+++ b/man/triangleUC.Rd
@@ -13,8 +13,8 @@
 }
 \usage{
 dtriangle(x, theta, lower = 0, upper = 1, log = FALSE)
-ptriangle(q, theta, lower = 0, upper = 1)
-qtriangle(p, theta, lower = 0, upper = 1)
+ptriangle(q, theta, lower = 0, upper = 1, lower.tail = TRUE, log.p = FALSE)
+qtriangle(p, theta, lower = 0, upper = 1, lower.tail = TRUE, log.p = FALSE)
 rtriangle(n, theta, lower = 0, upper = 1)
 }
 \arguments{
@@ -35,6 +35,12 @@ rtriangle(n, theta, lower = 0, upper = 1)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -48,7 +54,7 @@ rtriangle(n, theta, lower = 0, upper = 1)
 %\references{
 %
 %}
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
   See \code{\link{triangle}}, the \pkg{VGAM} family function
   for estimating the parameter \eqn{\theta}{theta} by
diff --git a/man/truncparetoUC.Rd b/man/truncparetoUC.Rd
index fd06ba4..cc94efd 100644
--- a/man/truncparetoUC.Rd
+++ b/man/truncparetoUC.Rd
@@ -14,7 +14,7 @@
 }
 \usage{
 dtruncpareto(x, lower, upper, shape, log = FALSE)
-ptruncpareto(q, lower, upper, shape)
+ptruncpareto(q, lower, upper, shape, lower.tail = TRUE, log.p = FALSE)
 qtruncpareto(p, lower, upper, shape)
 rtruncpareto(n, lower, upper, shape)
 }
@@ -31,6 +31,12 @@ rtruncpareto(n, lower, upper, shape)
 
 
   }
+  \item{lower.tail, log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 
 }
 \value{
@@ -52,7 +58,7 @@ rtruncpareto(n, lower, upper, shape)
 
 
 }
-\author{ T. W. Yee }
+\author{ T. W. Yee and Kai Huang }
 \details{
 
   See \code{\link{truncpareto}}, the \pkg{VGAM} family function
@@ -84,11 +90,11 @@ lines(qq, dtruncpareto(qq, low = lower, upp = upper, shape = kay),
 lines(xx, ptruncpareto(xx, low = lower, upp = upper, shape = kay),
       col = "orange") }
 pp <- seq(0.1, 0.9, by = 0.1)
-qq <- qtruncpareto(pp, low = lower, upp = upper, shape = kay)
+qq <- qtruncpareto(pp, lower = lower, upper = upper, shape = kay)
 
-ptruncpareto(qq, low = lower, upp = upper, shape = kay)
-qtruncpareto(ptruncpareto(qq, low = lower, upp = upper, shape = kay),
-         low = lower, upp = upper, shape = kay) - qq  # Should be all 0
+ptruncpareto(qq, lower = lower, upper = upper, shape = kay)
+qtruncpareto(ptruncpareto(qq, lower = lower, upper = upper, shape = kay),
+         lower = lower, upper = upper, shape = kay) - qq  # Should be all 0
 }
 \keyword{distribution}
 
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index 2de37f4..16b923d 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -5,6 +5,18 @@
 %
 %
 %
+% 201412
+\alias{nparam,ANY-method}
+\alias{nparam,vlm-method}
+\alias{nparam,qrrvglm-method}
+\alias{nparam,rrvgam-method}
+\alias{nparam,vgam-method}
+\alias{nparam,vglm-method}
+\alias{nparam,rrvglm-method}
+\alias{linkfun,ANY-method}
+\alias{linkfun,vglm-method}
+%
+%
 % 201407
 \alias{concoef,ANY-method}
 \alias{concoef,rrvgam-method}
@@ -23,9 +35,9 @@
 \alias{simulate,vlm-method}
 %
 % 20131104
-\alias{family.name,ANY-method}
-\alias{family.name,vlm-method}
-\alias{family.name,vglmff-method}
+\alias{familyname,ANY-method}
+\alias{familyname,vlm-method}
+\alias{familyname,vglmff-method}
 % 20130903
 \alias{BIC,ANY-method}
 \alias{BIC,vlm-method}
diff --git a/man/vgam.Rd b/man/vgam.Rd
index 3664de0..8ec8b59 100644
--- a/man/vgam.Rd
+++ b/man/vgam.Rd
@@ -253,7 +253,7 @@ pfit$sigma
 
 # Fit two species simultaneously 
 fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
-             binomialff(mv = TRUE), data = hunua)
+             binomialff(multiple.responses = TRUE), data = hunua)
 coef(fit2, matrix = TRUE)  # Not really interpretable 
 \dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 1:2, scol = 1:2)
 
@@ -270,9 +270,11 @@ zdata <- transform(zdata, y = rbinom(nn, 1, 0.5))
 zdata <- transform(zdata, subS = runif(nn) < 0.7)
 sub.zdata <- subset(zdata, subS)  # Use this instead
 if (FALSE)
-  fit4a <- vgam(cbind(y, y) ~ s(x2, df = 2), binomialff(mv = TRUE), 
+  fit4a <- vgam(cbind(y, y) ~ s(x2, df = 2),
+                binomialff(multiple.responses = TRUE), 
                 data = zdata, subset = subS)  # This fails!!!
-fit4b <- vgam(cbind(y, y) ~ s(x2, df = 2), binomialff(mv = TRUE), 
+fit4b <- vgam(cbind(y, y) ~ s(x2, df = 2),
+              binomialff(multiple.responses = TRUE), 
               data = sub.zdata)  # This succeeds!!!
 }
 \keyword{models}
diff --git a/man/vglm.Rd b/man/vglm.Rd
index 49009c1..4820cd1 100644
--- a/man/vglm.Rd
+++ b/man/vglm.Rd
@@ -431,6 +431,7 @@ The \code{VGAM} Package.
   \code{coef.vlm},
   \code{\link{constraints.vlm}},
   \code{\link{hatvaluesvlm}},
+  \code{\link{linkfun.vglm}},
   \code{\link{predictvglm}},
   \code{summary.vglm},
   \code{AIC.vglm},
diff --git a/man/vonmises.Rd b/man/vonmises.Rd
index 1ae6c93..f81fe52 100644
--- a/man/vonmises.Rd
+++ b/man/vonmises.Rd
@@ -7,7 +7,7 @@
   von Mises distribution by maximum likelihood estimation.
 }
 \usage{
-vonmises(llocation = elogit(min = 0, max = 2 * pi), lscale = "loge",
+vonmises(llocation = extlogit(min = 0, max = 2 * pi), lscale = "loge",
          ilocation = NULL, iscale = NULL, imethod = 1, zero = NULL)
 }
 %- maybe also 'usage' for other objects documented here.
diff --git a/man/vsmooth.spline.Rd b/man/vsmooth.spline.Rd
index 9380a58..906e3c1 100644
--- a/man/vsmooth.spline.Rd
+++ b/man/vsmooth.spline.Rd
@@ -7,11 +7,12 @@
 }
 \usage{
 vsmooth.spline(x, y, w = NULL, df = rep(5, M), spar = NULL,
-               all.knots = FALSE, iconstraint = diag(M),
-               xconstraint = diag(M), 
-               constraints = list("(Intercepts)" = diag(M), x = diag(M)), 
-               var.arg = FALSE, scale.w = TRUE, nk = NULL,
-               control.spar = list())
+               i.constraint = diag(M),
+               x.constraint = diag(M), 
+               constraints = list("(Intercepts)" = i.constraint,
+                                  x = x.constraint), 
+               all.knots = FALSE, var.arg = FALSE, scale.w = TRUE,
+               nk = NULL, control.spar = list())
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -83,14 +84,14 @@ but if \code{n <= 40} then all the unique values of \code{x} are used.
 
 
 }
-  \item{iconstraint}{ A \code{M}-row constraint matrix for the
+  \item{i.constraint}{ A \code{M}-row constraint matrix for the
 intercepts. It must be of full column rank.
 By default, the constraint matrix for the intercepts is the
 \code{M} by \code{M} identity matrix, meaning no constraints.
 
 
 }
-  \item{xconstraint}{ A \code{M}-row constraint matrix for \code{x}. 
+  \item{x.constraint}{ A \code{M}-row constraint matrix for \code{x}. 
 It must be of full column rank.
 By default, the constraint matrix for the intercepts is the
 \code{M} by \code{M} identity matrix, meaning no constraints.
@@ -98,7 +99,7 @@ By default, the constraint matrix for the intercepts is the
 
 }
   \item{constraints}{ 
-An alternative to specifying \code{iconstraint} and \code{xconstraint},
+An alternative to specifying \code{i.constraint} and \code{x.constraint},
 this is a list with two components corresponding to the
 intercept and \code{x} respectively. They must both be a
 \code{M}-row constraint matrix with full column rank.
@@ -212,7 +213,8 @@ plot(fit)  # The 1st and 3rd functions do not differ by a constant
 }
 
 mat <- matrix(c(1,0,1, 0,1,0), 3, 2)
-(fit2 <- vsmooth.spline(x, y, w = ww, df = 5, iconstr = mat, xconstr = mat))
+(fit2 <- vsmooth.spline(x, y, w = ww, df = 5, i.constr = mat,
+                        x.constr = mat))
 # The 1st and 3rd functions do differ by a constant:
 mycols <- c("orange", "blue", "orange")
 \dontrun{ plot(fit2, lcol = mycols, pcol = mycols, las = 1) }
diff --git a/man/weibullR.Rd b/man/weibullR.Rd
index 88e18f1..053a427 100644
--- a/man/weibullR.Rd
+++ b/man/weibullR.Rd
@@ -222,8 +222,8 @@ Concerns about Maximum Likelihood Estimation for
 \examples{
 wdata <- data.frame(x2 = runif(nn <- 1000))  # Complete data
 wdata <- transform(wdata,
-            y1 = rweibull(nn, shape = exp(1 + x2), scale = exp(-2)),
-            y2 = rweibull(nn, shape = exp(2 - x2), scale = exp( 1)))
+            y1 = rweibull(nn, shape = exp(1), scale = exp(-2 + x2)),
+            y2 = rweibull(nn, shape = exp(2), scale = exp( 1 - x2)))
 fit <- vglm(cbind(y1, y2) ~ x2, weibullR, data = wdata, trace = TRUE)
 coef(fit, matrix = TRUE)
 vcov(fit)
diff --git a/man/yulesimonUC.Rd b/man/yulesimonUC.Rd
index 78be631..755897c 100644
--- a/man/yulesimonUC.Rd
+++ b/man/yulesimonUC.Rd
@@ -12,7 +12,7 @@
 }
 \usage{
 dyules(x, rho, log = FALSE)
-pyules(q, rho)
+pyules(q, rho, log.p = FALSE)
 ryules(n, rho)
 }
 %qyules(p, rho)
@@ -33,6 +33,12 @@ ryules(n, rho)
 
   }
   \item{log}{logical; if TRUE, the logarithm is returned. }
+  \item{log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
+
+
+  }
 }
 \details{
   See \code{\link{yulesimon}}, the \pkg{VGAM} family function
diff --git a/man/zanegbinomial.Rd b/man/zanegbinomial.Rd
index bfa64dc..7b1ff86 100644
--- a/man/zanegbinomial.Rd
+++ b/man/zanegbinomial.Rd
@@ -168,7 +168,8 @@ for counts with extra zeros.
 
   Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
-  \emph{Computational Statistics and Data Analysis}.
+  \emph{Computational Statistics and Data Analysis},
+  \bold{71}, 889--902.
 
 
 }
diff --git a/man/zapoisson.Rd b/man/zapoisson.Rd
index fe0c925..06999c4 100644
--- a/man/zapoisson.Rd
+++ b/man/zapoisson.Rd
@@ -138,7 +138,8 @@ A Bayesian analysis of zero-inflated generalized Poisson model.
 
   Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
-  \emph{Computational Statistics and Data Analysis}.
+  \emph{Computational Statistics and Data Analysis},
+  \bold{71}, 889--902.
 
 
 
diff --git a/man/zibinomial.Rd b/man/zibinomial.Rd
index 3ffd17f..307ca9d 100644
--- a/man/zibinomial.Rd
+++ b/man/zibinomial.Rd
@@ -11,10 +11,11 @@
 \usage{
 zibinomial(lpstr0 = "logit", lprob = "logit",
            type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
-           ipstr0 = NULL, zero = NULL, mv = FALSE, imethod = 1)
+           ipstr0 = NULL, zero = NULL, multiple.responses = FALSE, imethod = 1)
 zibinomialff(lprob = "logit", lonempstr0 = "logit",
              type.fitted = c("mean", "pobs0", "pstr0", "onempstr0"),
-             ionempstr0 = NULL, zero = 2, mv = FALSE, imethod = 1)
+             ionempstr0 = NULL, zero = 2, multiple.responses = FALSE,
+             imethod = 1)
 }
 %- maybe also 'usage' for other objects documented here.
 \arguments{
@@ -62,10 +63,12 @@ zibinomialff(lprob = "logit", lonempstr0 = "logit",
 % See \code{\link{CommonVGAMffArguments}} for more information.
 
 % }
-  \item{mv}{ 
-  Logical. Currently it must be \code{FALSE} to mean the function does
-  not handle multivariate responses. This is to remain compatible with
-  the same argument in \code{\link{binomialff}}.
+  \item{multiple.responses}{ 
+  Logical. Currently it must be \code{FALSE} to mean the
+  function does not handle multivariate responses. This
+  is to remain compatible with the same argument in
+  \code{\link{binomialff}}.
+
 
   }
   \item{zero, imethod}{ 
@@ -158,7 +161,7 @@ Fitting and interpreting occupancy models.
 % the \code{misc} slot has a component called \code{pobs0} which is the
 % estimate of the probability of an observed 0, i.e., \eqn{P(Y=0)}.
 % This family function currently cannot handle a multivariate
-% response (only \code{mv = FALSE} can be handled).
+% response (only \code{multiple.responses = FALSE} can be handled).
 
 % 20130316; adding this:
   Estimated probabilities of a structural zero and an 
diff --git a/man/zipfUC.Rd b/man/zipfUC.Rd
index 2045fae..0b0c45a 100644
--- a/man/zipfUC.Rd
+++ b/man/zipfUC.Rd
@@ -12,7 +12,7 @@
 }
 \usage{
 dzipf(x, N, s, log = FALSE)
-pzipf(q, N, s)
+pzipf(q, N, s, log.p = FALSE)
 }
 \arguments{
   \item{x, q}{vector of quantiles.}
@@ -31,7 +31,12 @@ pzipf(q, N, s)
 
 
   }
+  \item{log.p}{
+  Same meaning as in \code{\link[stats:Normal]{pnorm}}
+  or \code{\link[stats:Normal]{qnorm}}.
 
+
+  }
 }
 \value{
   \code{dzipf} gives the density, and
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
index 821a7dc..25794ae 100644
--- a/man/zipoisson.Rd
+++ b/man/zipoisson.Rd
@@ -160,7 +160,8 @@ zipoissonff(llambda = "loge", lonempstr0 = "logit",
 
   Yee, T. W. (2014)
   Reduced-rank vector generalized linear models with two linear predictors.
-  \emph{Computational Statistics and Data Analysis}.
+  \emph{Computational Statistics and Data Analysis},
+  \bold{71}, 889--902.
 
 
 }
diff --git a/vignettes/categoricalVGAM.Rnw b/vignettes/categoricalVGAM.Rnw
deleted file mode 100644
index 8394144..0000000
--- a/vignettes/categoricalVGAM.Rnw
+++ /dev/null
@@ -1,2325 +0,0 @@
-\documentclass[article,shortnames,nojss]{jss}
-\usepackage{thumbpdf}
-%% need no \usepackage{Sweave.sty}
-
-\SweaveOpts{engine=R,eps=FALSE}
-%\VignetteIndexEntry{The VGAM Package for Categorical Data Analysis}
-%\VignetteDepends{VGAM}
-%\VignetteKeywords{categorical data analysis, Fisher scoring, iteratively reweighted least squares, multinomial distribution, nominal and ordinal polytomous responses, smoothing, vector generalized linear and additive models, VGAM R package}
-%\VignettePackage{VGAM}
-
-%% new commands
-\newcommand{\sVLM}{\mbox{\scriptsize VLM}}
-\newcommand{\sformtwo}{\mbox{\scriptsize F2}}
-\newcommand{\pr}{\mbox{$P$}}
-\newcommand{\logit}{\mbox{\rm logit}}
-\newcommand{\bzero}{{\bf 0}}
-\newcommand{\bone}{{\bf 1}}
-\newcommand{\bid}{\mbox{\boldmath $d$}}
-\newcommand{\bie}{\mbox{\boldmath $e$}}
-\newcommand{\bif}{\mbox{\boldmath $f$}}
-\newcommand{\bix}{\mbox{\boldmath $x$}}
-\newcommand{\biy}{\mbox{\boldmath $y$}}
-\newcommand{\biz}{\mbox{\boldmath $z$}}
-\newcommand{\biY}{\mbox{\boldmath $Y$}}
-\newcommand{\bA}{\mbox{\rm \bf A}}
-\newcommand{\bB}{\mbox{\rm \bf B}}
-\newcommand{\bC}{\mbox{\rm \bf C}}
-\newcommand{\bH}{\mbox{\rm \bf H}}
-\newcommand{\bI}{\mbox{\rm \bf I}}
-\newcommand{\bX}{\mbox{\rm \bf X}}
-\newcommand{\bW}{\mbox{\rm \bf W}}
-\newcommand{\bY}{\mbox{\rm \bf Y}}
-\newcommand{\bbeta}{\mbox{\boldmath $\beta$}}
-\newcommand{\boldeta}{\mbox{\boldmath $\eta$}}
-\newcommand{\bmu}{\mbox{\boldmath $\mu$}}
-\newcommand{\bnu}{\mbox{\boldmath $\nu$}}
-\newcommand{\diag}{ \mbox{\rm diag} }
-\newcommand{\Var}{ \mbox{\rm Var} }
-\newcommand{\R}{{\textsf{R}}}
-\newcommand{\VGAM}{\pkg{VGAM}}
-
-
-\author{Thomas W. Yee\\University of Auckland}
-\Plainauthor{Thomas W. Yee}
-
-\title{The \pkg{VGAM} Package for Categorical Data Analysis}
-\Plaintitle{The VGAM Package for Categorical Data Analysis}
-
-\Abstract{
-  Classical categorical regression models such as the multinomial logit and
-  proportional odds models are shown to be readily handled by the  vector
-  generalized linear and additive model (VGLM/VGAM) framework. Additionally,
-  there are natural extensions, such as reduced-rank VGLMs for
-  dimension reduction, and allowing covariates that have values
-  specific to each linear/additive predictor,
-  e.g., for consumer choice modeling. This article describes some of the
-  framework behind the \pkg{VGAM} \R{} package, its usage and implementation
-  details.
-}
-\Keywords{categorical data analysis, Fisher scoring,
-  iteratively reweighted least squares,
-  multinomial distribution, nominal and ordinal polytomous responses,
-  smoothing, vector generalized linear and additive models,
-  \VGAM{} \R{} package}
-\Plainkeywords{categorical data analysis, Fisher scoring,
-  iteratively reweighted least squares, multinomial distribution,
-  nominal and ordinal polytomous responses, smoothing,
-  vector generalized linear and additive models, VGAM R package}
-
-\Address{
-  Thomas W. Yee \\
-  Department of Statistics \\
-  University of Auckland, Private Bag 92019 \\
-  Auckland Mail Centre \\
-  Auckland 1142, New Zealand \\
-  E-mail: \email{t.yee at auckland.ac.nz}\\
-  URL: \url{http://www.stat.auckland.ac.nz/~yee/}
-}
-
-
-\begin{document}
-
-
-<<echo=FALSE, results=hide>>=
-library("VGAM")
-library("VGAMdata")
-ps.options(pointsize = 12)
-options(width = 72, digits = 4)
-options(SweaveHooks = list(fig = function() par(las = 1)))
-options(prompt = "R> ", continue = "+")
-@
-
-
-% ----------------------------------------------------------------------
-\section{Introduction}
-\label{sec:jsscat.intoduction}
-
-
-This is a \pkg{VGAM} vignette for categorical data analysis (CDA)
-based on \cite{Yee:2010}.
-Any subsequent features (especially non-backward compatible ones)
-will appear here.
-
-The subject of CDA is concerned with
-analyses where the response is categorical regardless of whether
-the explanatory variables are continuous or categorical. It is a
-very frequent form of data. Over the years several CDA regression
-models for polytomous responses have become popular, e.g., those
-in Table \ref{tab:cat.quantities}. Not surprisingly, the models
-are interrelated: their foundation is the multinomial distribution
-and consequently they share similar and overlapping properties which
-modellers should know and exploit. Unfortunately, software has been
-slow to reflect their commonality and this makes analyses unnecessarily
-difficult for the practitioner on several fronts, e.g., using different
-functions/procedures to fit different models which does not aid the
-understanding of their connections.
-
-
-This historical misfortune can be seen by considering \R{} functions
-for CDA. From the Comprehensive \proglang{R} Archive Network
-(CRAN, \url{http://CRAN.R-project.org/}) there is \texttt{polr()}
-\citep[in \pkg{MASS};][]{Venables+Ripley:2002} for a proportional odds
-model and \texttt{multinom()}
-\citep[in \pkg{nnet};][]{Venables+Ripley:2002} for the multinomial
-logit model. However, both of these can be considered `one-off'
-modeling functions rather than providing a unified offering for CDA.
-The function \texttt{lrm()} \citep[in \pkg{rms};][]{Harrell:2009}
-has greater functionality: it can fit the proportional odds model
-(and the forward continuation ratio model upon preprocessing). Neither
-\texttt{polr()} or \texttt{lrm()} appear able to fit the nonproportional
-odds model. There are non-CRAN packages too, such as the modeling
-function \texttt{nordr()} \citep[in \pkg{gnlm};][]{gnlm:2007}, which can fit
-the proportional odds, continuation ratio and adjacent categories models;
-however it calls \texttt{nlm()} and the user must supply starting values.
-In general these \R{} \citep{R} modeling functions are not modular
-and often require preprocessing and sometimes are not self-starting.
-The implementations can be perceived as a smattering and piecemeal
-in nature. Consequently if the practitioner wishes to fit the models
-of Table \ref{tab:cat.quantities} then there is a need to master several
-modeling functions from several packages each having different syntaxes
-etc. This is a hindrance to efficient CDA.
-
-
- 
-\begin{table}[tt]
-\centering
-\begin{tabular}{|c|c|l|}
-\hline
-Quantity & Notation &
-%Range of $j$ &
-\VGAM{} family function \\
-\hline
-%
-$\pr(Y=j+1) / \pr(Y=j)$ &$\zeta_{j}$ &
-%$1,\ldots,M$ &
-\texttt{acat()} \\
-%
-$\pr(Y=j) / \pr(Y=j+1)$ &$\zeta_{j}^{R}$ &
-%$2,\ldots,M+1$ &
-\texttt{acat(reverse = TRUE)} \\
-%
-$\pr(Y>j|Y \geq j)$ &$\delta_{j}^*$ &
-%$1,\ldots,M$ & 
-\texttt{cratio()} \\
-%
-$\pr(Y<j|Y \leq j)$ &$\delta_{j}^{*R}$ &
-%$2,\ldots,M+1$ &
-\texttt{cratio(reverse = TRUE)} \\
-%
-$\pr(Y\leq j)$ &$\gamma_{j}$ &
-%$1,\ldots,M$ &
-\texttt{cumulative()} \\
-%
-$\pr(Y\geq j)$ &$\gamma_{j}^R$&
-%$2,\ldots,M+1$ &
-\texttt{cumulative(reverse = TRUE)} \\
-%
-$\log\{\pr(Y=j)/\pr(Y=M+1)\}$ & &
-%$1,\ldots,M$ &
-\texttt{multinomial()} \\
-%
-$\pr(Y=j|Y \geq j)$ &$\delta_{j}$ &
-%$1,\ldots,M$ &
-\texttt{sratio()} \\
-%
-$\pr(Y=j|Y \leq j)$ &$\delta_{j}^R$ &
-%$2,\ldots,M+1$ &
-\texttt{sratio(reverse = TRUE)} \\
-%
-\hline
-\end{tabular}
-\caption{
-Quantities defined in \VGAM{} for a
-categorical response $Y$ taking values $1,\ldots,M+1$.
-Covariates \bix{} have been omitted for clarity.
-The LHS quantities are $\eta_{j}$
-or $\eta_{j-1}$ for $j=1,\ldots,M$ (not reversed)
-and $j=2,\ldots,M+1$ (if reversed), respectively.
-All models are estimated by minimizing the deviance.
-All except for \texttt{multinomial()} are suited to ordinal $Y$.
-\label{tab:cat.quantities}
-}
-\end{table}
- 
-
-
-
-\proglang{SAS} \citep{SAS} does not fare much better than \R. Indeed,
-it could be considered as having an \textit{excess} of options which
-bewilders the non-expert user; there is little coherent overriding
-structure. Its \code{proc logistic} handles the multinomial logit
-and proportional odds models, as well as exact logistic regression
-\citep[see][which is for Version 8 of \proglang{SAS}]{stok:davi:koch:2000}.
-The fact that the proportional odds model may be fitted by \code{proc
-logistic}, \code{proc genmod} and \code{proc probit} arguably leads
-to possible confusion rather than the making of connections, e.g.,
-\code{genmod} is primarily for GLMs and the proportional odds model is not
-a GLM in the classical \cite{neld:wedd:1972} sense. Also, \code{proc
-phreg} fits the multinomial logit model, and \code{proc catmod} with
-its WLS implementation adds to further potential confusion.
-
-
-This article attempts to show how these deficiencies can be addressed
-by considering the vector generalized linear and additive model
-(VGLM/VGAM) framework, as implemented by the author's \pkg{VGAM}
-package for \R{}. The main purpose of this paper is to demonstrate
-how the framework is very well suited to many `classical' regression
-models for categorical responses, and to describe the implementation and
-usage of \pkg{VGAM} for such. To this end an outline of this article
-is as follows. Section \ref{sec:jsscat.VGLMVGAMoverview} summarizes
-the basic VGLM/VGAM framework. Section \ref{sec:jsscat.vgamff}
-centers on functions for CDA in \VGAM. Given an adequate framework,
-some natural extensions of Section \ref{sec:jsscat.VGLMVGAMoverview} are
-described in Section \ref{sec:jsscat.othermodels}. Users of \pkg{VGAM}
-can benefit from Section \ref{sec:jsscat.userTopics} which shows how
-the software reflects their common theory. Some examples are given in
-Section \ref{sec:jsscat.eg}. Section \ref{sec:jsscat.implementDetails}
-contains selected topics in statistial computing that are
-more relevant to programmers interested in the underlying code.
-Section \ref{sec:jsscat.extnUtil} discusses several utilities and
-extensions needed for advanced CDA modeling, and the article concludes
-with a discussion. This document was run using \pkg{VGAM} 0.7-10
-\citep{yee:VGAM:2010} under \R 2.10.0.
-
-
-Some general references for categorical data providing
-background to this article include
-\cite{agre:2010},
-\cite{agre:2013},
-\cite{fahr:tutz:2001},
-\cite{leon:2000},
-\cite{lloy:1999},
-\cite{long:1997},
-\cite{mccu:neld:1989},
-\cite{simo:2003},
-\citet{smit:merk:2013} and
-\cite{tutz:2012}.
-An overview of models for ordinal responses is \cite{liu:agre:2005},
-and a manual for fitting common models found in \cite{agre:2002}
-to polytomous responses with various software is \cite{thom:2009}.
-A package for visualizing categorical data in \R{} is \pkg{vcd}
-\citep{Meyer+Zeileis+Hornik:2006,Meyer+Zeileis+Hornik:2009}.
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{VGLM/VGAM overview}
-\label{sec:jsscat.VGLMVGAMoverview}
-
-
-This section summarizes the VGLM/VGAM framework with a particular emphasis
-toward categorical models since the classes encapsulates many multivariate
-response models in, e.g., survival analysis, extreme value analysis,
-quantile and expectile regression, time series, bioassay data, nonlinear
-least-squares models, and scores of standard and nonstandard univariate
-and continuous distributions. The framework is partially summarized by
-Table \ref{tab:rrvglam.jss.subset}. More general details about VGLMs
-and VGAMs can be found in \cite{yee:hast:2003} and \cite{yee:wild:1996}
-respectively. An informal and practical article connecting the general
-framework with the software is \cite{Rnews:Yee:2008}.
-
-
-
-\subsection{VGLMs}
-\label{sec:wffc.appendixa.vglms}
-
-Suppose the observed response \biy{} is a $q$-dimensional vector.
-VGLMs are defined as a model for which the conditional distribution
-of $\biY$ given explanatory $\bix$ is of the form
-\begin{eqnarray}
-f(\biy | \bix ; \bB, \phi)  =  h(\biy, \eta_1,\ldots, \eta_M, \phi)
-\label{gammod}
-\end{eqnarray}
-for some known function $h(\cdot)$, where $\bB = (\bbeta_1 \,
-\bbeta_2 \, \cdots \, \bbeta_M)$ is a $p \times M$ matrix of
-unknown regression coefficients,
-and the $j$th linear predictor is
-\begin{equation}
-\eta_j  =  \eta_j(\bix)  =  \bbeta_j^{\top} \bix  = 
-\sum_{k=1}^p \beta_{(j)k} \, x_k ,  \qquad j=1,\ldots,M.
-\label{gammod2}
-\end{equation}
-Here $\bix=(x_1,\ldots,x_p)^{\top}$ with $x_1 = 1$ if there is an intercept.
-Note that (\ref{gammod2}) means that \textit{all} the parameters may be
-potentially modelled as functions of \bix. It can be seen that VGLMs are
-like GLMs but allow for multiple linear predictors, and they encompass
-models outside the small confines of the exponential family.
-In (\ref{gammod}) the quantity $\phi$ is an optional scaling parameter
-which is included for backward compatibility with common adjustments
-to overdispersion, e.g., with respect to GLMs.
-
-
-In general there is no relationship between $q$ and $M$: it
-depends specifically on the model or distribution to be fitted.
-However, for the `classical' categorical regression models of
-Table \ref{tab:cat.quantities} we have $M=q-1$ since $q$ is the number
-of levels the multi-category response $Y$ has.
-
-
-
-
-
-The $\eta_j$ of VGLMs may be applied directly to parameters of a
-distribution rather than just to a mean for GLMs. A simple example is
-a univariate distribution with a location parameter $\xi$ and a scale
-parameter $\sigma > 0$, where we may take $\eta_1 = \xi$ and $\eta_2 =
-\log\,\sigma$. In general, $\eta_{j}=g_{j}(\theta_{j})$ for some parameter
-link function $g_{j}$ and parameter $\theta_{j}$.
-For example, the adjacent categories models in
-Table \ref{tab:cat.quantities} are ratios of two probabilities, therefore
-a log link of $\zeta_{j}^{R}$ or $\zeta_{j}$ is the default.
-In \VGAM{}, there are currently over a dozen links to choose from, of
-which any can be assigned to any parameter, ensuring maximum flexibility.
-Table \ref{tab:jsscat.links} lists some of them.
-
-
-
-\begin{table}[tt]
-\centering
-%\  ~~~ \par
-\begin{tabular}{|l|l|l|l|}
-\hline
-\qquad \qquad $\boldeta$ & 
-Model & Modeling & Reference \\
- & & function & \\
-%-------------------------------------------------------------
-\hline
-\hline
-%-------------------------------------------------------------
- &&&\\[-1.1ex]
-$\bB_1^{\top} \bix_{1} + \bB_2^{\top} \bix_{2}\ ( = \bB^{\top} \bix)$ &
-VGLM & \texttt{vglm()}
-&
-\cite{yee:hast:2003} \\[1.6ex]
-%Yee \& Hastie (2003) \\[1.6ex]
-%-------------------------------------------------------------
-\hline
- &&&\\[-1.1ex]
-$\bB_1^{\top} \bix_{1} +
- \sum\limits_{k=p_1+1}^{p_1+p_2} \bH_k \, \bif_{k}^{*}(x_k)$ &
-%\sum\limits_{k=1}^{p_2} \bH_k \, \bif_k(x_k)$ &
-VGAM & \texttt{vgam()}
-&
-\cite{yee:wild:1996} \\[2.2ex]
-%Yee \& Wild (1996) \\[2.2ex]
-%-------------------------------------------------------------
-\hline
- &&&\\[-1.1ex]
-$\bB_1^{\top} \bix_{1} + \bA \, \bnu$ &
-RR-VGLM & \texttt{rrvglm()}
-&
-\cite{yee:hast:2003} \\[1.8ex]
-%Yee \& Hastie (2003) \\[1.8ex]
-%-------------------------------------------------------------
-\hline
- &&&\\[-1.1ex]
-See \cite{yee:hast:2003} &
-Goodman's RC & \texttt{grc()}
-&
-%\cite{yee:hast:2003} \\[1.8ex]
-\cite{good:1981} \\[1.8ex]
-%-------------------------------------------------------------
-\hline
-\end{tabular}
-\caption{
-Some of 
-the package \VGAM{} and
-its framework.
-The vector of latent variables $\bnu = \bC^{\top} \bix_2$
-where
-$\bix^{\top} = (\bix_1^{\top}, \bix_2^{\top})$.
-\label{tab:rrvglam.jss.subset}
-}
-%\medskip
-\end{table}
-
-
-
-
-
-
-VGLMs are estimated using iteratively reweighted least squares (IRLS) 
-which is particularly suitable for categorical models
-\citep{gree:1984}.
-All models in this article have a log-likelihood
-\begin{equation}
-\ell  =  \sum_{i=1}^n \, w_i \, \ell_i
-\label{eq:log-likelihood.VGAM}
-\end{equation}
-where the $w_i$ are known positive prior weights.
-Let $\bix_i$ denote the explanatory vector for the $i$th observation,
-for $i=1,\dots,n$.
-Then one can write
-\begin{eqnarray}
-\boldeta_i &=& \boldeta(\bix_i)  = 
-\left(
-\begin{array}{c}
-\eta_1(\bix_i) \\
-\vdots \\
-\eta_M(\bix_i)
-\end{array} \right)  = 
-\bB^{\top} \bix_i  =  
-\left(
-\begin{array}{c}
-\bbeta_1^{\top} \bix_i \\
-\vdots \\
-\bbeta_M^{\top} \bix_i
-\end{array} \right)
-\nonumber
-\\
-&=& 
-\left(
-\begin{array}{cccc}
-\beta_{(1)1} & \cdots & \beta_{(1)p} \\
-\vdots \\
-\beta_{(M)1} & \cdots & \beta_{(M)p} \\
-\end{array} \right)
-\bix_i  = 
-\left(
-\bbeta_{(1)} \; \cdots \; \bbeta_{(p)}
-\right)
-\bix_i .
-\label{eq:lin.pred}
-\end{eqnarray}
-In IRLS,
-an adjusted dependent vector $\biz_i = \boldeta_i + \bW_i^{-1} \bid_i$
-is regressed upon a large (VLM) model matrix, with
-$\bid_i = w_i \, \partial \ell_i / \partial \boldeta_i$.
-The working weights $\bW_i$ here are 
-$w_i \Var(\partial \ell_i / \partial \boldeta_i)$
-(which, under regularity conditions, is equal to
-$-w_i \, E[ \partial^2 \ell_i / (\partial \boldeta_i \,
-\partial \boldeta_i^{\top})]$),
-giving rise to the Fisher scoring algorithm.
-
-
-Let $\bX=(\bix_1,\ldots,\bix_n)^{\top}$ be the usual $n \times p$
-(LM) model matrix
-obtained from the \texttt{formula} argument of \texttt{vglm()}.
-Given $\biz_i$, $\bW_i$ and $\bX{}$ at the current IRLS iteration,
-a weighted multivariate regression is performed.
-To do this, a \textit{vector linear model} (VLM) model matrix 
-$\bX_{\sVLM}$ is formed from $\bX{}$ and $\bH_k$
-(see Section \ref{sec:wffc.appendixa.vgams}).
-This is has $nM$ rows, and if there are no constraints then $Mp$ columns.
-Then $\left(\biz_1^{\top},\ldots,\biz_n^{\top}\right)^{\top}$ is regressed
-upon $\bX_{\sVLM}$
-with variance-covariance matrix $\diag(\bW_1^{-1},\ldots,\bW_n^{-1})$.
-This system of linear equations is converted to one large
-WLS fit by premultiplication of the output of
-a Cholesky decomposition of the $\bW_i$.
-
-
-Fisher scoring usually has good numerical stability
-because the $\bW_i$ are positive-definite over a larger
-region of parameter space than Newton-Raphson. 
-For the categorical models in this article the expected
-information matrices are simpler than the observed
-information matrices, and are easily derived,
-therefore all the families in Table \ref{tab:cat.quantities}
-implement Fisher scoring.
-
-
-
-\subsection{VGAMs and constraint matrices}
-\label{sec:wffc.appendixa.vgams}
-
-
-VGAMs provide additive-model extensions to VGLMs, that is,
-(\ref{gammod2}) is generalized to
-\begin{equation}
-\eta_j(\bix)  =  \beta_{(j)1} +
-\sum_{k=2}^p \; f_{(j)k}(x_k), \qquad j = 1,\ldots, M,
-\label{addmod}
-\end{equation}
-a sum of smooth functions of the individual covariates, just as
-with ordinary GAMs \citep{hast:tibs:1990}. The $\bif_k =
-(f_{(1)k}(x_k),\ldots,f_{(M)k}(x_k))^{\top}$ are centered for uniqueness,
-and are estimated simultaneously using \textit{vector smoothers}.
-VGAMs are thus a visual data-driven method that is well suited to
-exploring data, and they retain the simplicity of interpretation that
-GAMs possess.
-
-
-
-An important concept, especially for CDA, is the idea of
-`constraints-on-the functions'.
-In practice we often wish to constrain the effect of a covariate to
-be the same for some of the $\eta_j$ and to have no effect for others.
-We shall see below that this constraints idea is important
-for several categorical models because of a popular parallelism assumption.
-As a specific example, for VGAMs we may wish to take
-\begin{eqnarray*}
-\eta_1 & = & \beta_{(1)1} + f_{(1)2}(x_2) + f_{(1)3}(x_3), \\
-\eta_2 & = & \beta_{(2)1} + f_{(1)2}(x_2),
-\end{eqnarray*}
-so that $f_{(1)2} \equiv f_{(2)2}$ and $f_{(2)3} \equiv 0$.
-For VGAMs, we can represent these models using
-\begin{eqnarray}
-\boldeta(\bix) & = & \bbeta_{(1)} + \sum_{k=2}^p \, \bif_k(x_k)
-\ =\ \bH_1 \, \bbeta_{(1)}^* + \sum_{k=2}^p \, \bH_k \, \bif_k^*(x_k)
-\label{eqn:constraints.VGAM}
-\end{eqnarray}
-where $\bH_1,\bH_2,\ldots,\bH_p$ are known full-column rank
-\textit{constraint matrices}, $\bif_k^*$ is a vector containing a
-possibly reduced set of component functions and $\bbeta_{(1)}^*$ is a
-vector of unknown intercepts. With no constraints at all, $\bH_1 =
-\bH_2 = \cdots = \bH_p = \bI_M$ and $\bbeta_{(1)}^* = \bbeta_{(1)}$.
-Like the $\bif_k$, the $\bif_k^*$ are centered for uniqueness.
-For VGLMs, the $\bif_k$ are linear so that
-\begin{eqnarray}
-{\bB}^{\top} &=&
-\left(
-\bH_1 \bbeta_{(1)}^*
- \;
-\Bigg|
- \;
-\bH_2 \bbeta_{(2)}^*
- \;
-\Bigg|
- \;
-\cdots
- \;
-\Bigg|
- \;
-\bH_p \bbeta_{(p)}^*
-\right) 
-\label{eqn:lin.coefs4}
-\end{eqnarray}
-for some vectors
-$\bbeta_{(1)}^*,\ldots,\bbeta_{(p)}^*$.
-
-
-The
-$\bX_{\sVLM}$ matrix is constructed from \bX{} and the $\bH_k$ using
-Kronecker product operations.
-For example, with trivial constraints,
-$\bX_{\sVLM} = \bX \otimes \bI_M$.
-More generally,
-\begin{eqnarray}
-\bX_{\sVLM} &=& 
-\left(
-\left( \bX \, \bie_{1} \right) \otimes \bH_1
- \;
-\Bigg|
- \;
-\left( \bX \, \bie_{2} \right) \otimes \bH_2
- \;
-\Bigg|
- \;
-\cdots
- \;
-\Bigg|
- \;
-\left( \bX \, \bie_{p} \right) \otimes \bH_p
-\right)
-\label{eqn:X_vlm_Hk}
-\end{eqnarray}
-($\bie_{k}$ is a vector of zeros except for a one in the $k$th position)
-so that 
-$\bX_{\sVLM}$ is $(nM) \times p^*$ where
-$p^* = \sum_{k=1}^{p} \mbox{\textrm{ncol}}(\bH_k)$ is the total number
-of columns of all the constraint matrices.
-Note that $\bX_{\sVLM}$ and \bX{} can be obtained by
-\texttt{model.matrix(vglmObject, type = "vlm")}
-and
-\texttt{model.matrix(vglmObject, type = "lm")}
-respectively.
-Equation \ref{eqn:lin.coefs4} focusses on the rows of \bB{} whereas
-\ref{eq:lin.pred} is on the columns.
-
-
-VGAMs are estimated by applying a modified vector backfitting algorithm
-\citep[cf.][]{buja:hast:tibs:1989} to the $\biz_i$.
-
-
-
-\subsection{Vector splines and penalized likelihood}
-\label{sec:ex.vspline}
-
-If (\ref{eqn:constraints.VGAM}) is estimated using a vector spline (a
-natural extension of the cubic smoothing spline to vector responses)
-then it can be shown that the resulting solution maximizes a penalized
-likelihood; some details are sketched in \cite{yee:step:2007}. In fact,
-knot selection for vector spline follows the same idea as O-splines
-\citep[see][]{wand:orme:2008} in order to lower the computational cost.
-
-
-The usage of \texttt{vgam()} with smoothing is very similar
-to \texttt{gam()} \citep{gam:pack:2009}, e.g.,
-to fit a nonparametric proportional odds model
-\citep[cf. p.179 of][]{mccu:neld:1989}
-to the pneumoconiosis data one could try
-<<label = pneumocat, eval=T>>=
-pneumo <- transform(pneumo, let = log(exposure.time))
-fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
-            cumulative(reverse = TRUE, parallel = TRUE), data = pneumo)
-@
-Here, setting \texttt{df = 1} means a linear fit so that
-\texttt{df = 2} affords a little nonlinearity.
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section[VGAM family functions]{\pkg{VGAM} family functions}
-\label{sec:jsscat.vgamff}
-
-
-
-This section summarizes and comments on the \VGAM{} family functions
-of Table \ref{tab:cat.quantities} for a categorical response variable
-taking values $Y=1,2,\ldots,M+1$. In its most basic invokation, the usage
-entails a trivial change compared to \texttt{glm()}: use \texttt{vglm()}
-instead and assign the \texttt{family} argument a \VGAM{} family function.
-The use of a \VGAM{} family function to fit a specific model is far
-simpler than having a different modeling function for each model.
-Options specific to that model appear as arguments of that \VGAM{} family
-function.
-
-
-
-
-
-While writing \texttt{cratio()} it was found that various authors defined
-the quantity ``continuation ratio'' differently, therefore it became
-necessary to define a ``stopping ratio''. Table \ref{tab:cat.quantities}
-defines these quantities for \VGAM{}.
-
-
-
-
-The multinomial logit model is usually described by choosing the first or
-last level of the factor to be baseline. \VGAM{} chooses the last level
-(Table \ref{tab:cat.quantities}) by default, however that can be changed
-to any other level by use of the \texttt{refLevel} argument.
-
-
-
-
-If the proportional odds assumption is inadequate then one strategy is
-to try use a different link function (see Section \ref{sec:jsscat.links}
-for a selection). Another alternative is to add extra terms such as
-interaction terms into the linear predictor
-\citep[available in the \proglang{S} language;][]{cham:hast:1993}.
-Another is to fit the so-called \textit{partial}
-proportional odds model \citep{pete:harr:1990}
-which \VGAM{} can fit via constraint matrices.
-
-
-
-In the terminology of \cite{agre:2002},
-\texttt{cumulative()} fits the class of \textit{cumulative link models},
-e.g.,
-\texttt{cumulative(link = probit)} is a cumulative probit model.
-For \texttt{cumulative()}
-it was difficult to decide whether
-\texttt{parallel = TRUE}
-or
-\texttt{parallel = FALSE}
-should be the default.
-In fact, the latter is (for now?).
-Users need to set
-\texttt{cumulative(parallel = TRUE)} explicitly to
-fit a proportional odds model---hopefully this will alert
-them to the fact that they are making
-the proportional odds assumption and
-check its validity (\cite{pete:1990}; e.g., through a deviance or
-likelihood ratio test). However the default means numerical problems
-can occur with far greater likelihood.
-Thus there is tension between the two options.
-As a compromise there is now a \VGAM{} family function
-called \texttt{propodds(reverse = TRUE)} which is equivalent to
-\texttt{cumulative(parallel = TRUE, reverse = reverse, link = "logit")}.
-
-
-
-By the way, note that arguments such as 
-\texttt{parallel}
-can handle a slightly more complex syntax.
-A call such as
-\code{parallel = TRUE ~ x2 + x5 - 1} means the parallelism assumption
-is only applied to $X_2$ and $X_5$.
-This might be equivalent to something like
-\code{parallel = FALSE ~ x3 + x4}, i.e., to the remaining
-explanatory variables.
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Other models}
-\label{sec:jsscat.othermodels}
-
-
-Given the VGLM/VGAM framework of Section \ref{sec:jsscat.VGLMVGAMoverview}
-it is found that natural extensions are readily proposed in several
-directions. This section describes some such extensions.
-
-
-
-
-\subsection{Reduced-rank VGLMs}
-\label{sec:jsscat.RRVGLMs}
-
-
-Consider a multinomial logit model where $p$ and $M$ are both large.
-A (not-too-convincing) example might be the data frame \texttt{vowel.test}
-in the package \pkg{ElemStatLearn} \citep[see][]{hast:tibs:buja:1994}.
-The vowel recognition data set involves $q=11$ symbols produced from
-8 speakers with 6 replications of each. The training data comprises
-$10$ input features (not including the intercept) based on digitized
-utterances. A multinomial logit model fitted to these data would
-have $\widehat{\bB}$ comprising of $p \times (q-1) = 110$ regression
-coefficients for $n=8\times 6\times 11 = 528$ observations. The ratio
-of $n$ to the number of parameters is small, and it would be good to
-introduce some parsimony into the model.
-
-
-
-A simple and elegant solution is to represent $\widehat{\bB}$ by
-its reduced-rank approximation. To do this, partition $\bix$ into
-$(\bix_1^{\top}, \bix_2^{\top})^{\top}$ and $\bB = (\bB_1^{\top} \;
-\bB_2^{\top})^{\top}$ so that the reduced-rank regression is applied
-to $\bix_2$. In general, \bB{} is a dense matrix of full rank, i.e., rank
-$=\min(M,p)$, and since there are $M \times p$ regression coefficients
-to estimate this is `too' large for some models and/or data sets.
-If we approximate $\bB_2$ by a reduced-rank regression \begin{equation}
-\label{eq:rrr.BAC} \bB_2  =  \bC{} \, \bA^{\top} \end{equation} and if
-the rank $R$ is kept low then this can cut down the number of regression
-coefficients dramatically. If $R=2$ then the results may be biplotted
-(\texttt{biplot()} in \VGAM{}). Here, \bC{} and \bA{} are $p_2 \times R$
-and $M \times R$ respectively, and usually they are `thin'.
-
-
-More generally, the class of \textit{reduced-rank VGLMs} (RR-VGLMs)
-is simply a VGLM where $\bB_2$ is expressed as a product of two thin
-estimated matrices (Table \ref{tab:rrvglam.jss.subset}). Indeed,
-\cite{yee:hast:2003} show that RR-VGLMs are VGLMs with constraint
-matrices that are unknown and estimated. Computationally, this is
-done using an alternating method: in (\ref{eq:rrr.BAC}) estimate \bA{}
-given the current estimate of \bC{}, and then estimate \bC{} given the
-current estimate of \bA{}. This alternating algorithm is repeated until
-convergence within each IRLS iteration.
-
-
-Incidentally, special cases of RR-VGLMs have appeared in the
-literature. For example, a RR-multinomial logit model, is known as the
-\textit{stereotype} model \citep{ande:1984}. Another is \cite{good:1981}'s
-RC model (see Section \ref{sec:jsscat.rrr.goodman}) which is reduced-rank
-multivariate Poisson model. Note that the parallelism assumption of the
-proportional odds model \citep{mccu:neld:1989} can be thought of as a
-type of reduced-rank regression where the constraint matrices are thin
-($\bone_M$, actually) and known.
-
-
-
-The modeling function \texttt{rrvglm()} should work with any \VGAM{}
-family function compatible with \texttt{vglm()}. Of course, its
-applicability should be restricted to models where a reduced-rank
-regression of $\bB_2$ makes sense.
-
-
-
-
-
-
-
-
-
-\subsection[Goodman's R x C association model]{Goodman's $R \times C$ association model}
-\label{sec:jsscat.rrr.goodman}
-
-
-
-
-
-Let $\bY = [(y_{ij})]$ be a $n \times M$ matrix of counts.
-Section 4.2 of \cite{yee:hast:2003} shows that Goodman's RC$(R)$ association
-model \citep{good:1981} fits within the VGLM framework by setting up
-the appropriate indicator variables, structural zeros and constraint
-matrices. Goodman's model fits a reduced-rank type model to \bY{}
-by firstly assuming that $Y_{ij}$ has a Poisson distribution, and that
-\begin{eqnarray}
-\log \, \mu_{ij} &=& \mu + \alpha_{i} + \gamma_{j} + 
-\sum_{k=1}^R a_{ik} \, c_{jk} , 
-\ \ \ i=1,\ldots,n;\ \ j=1,\ldots,M,
-\label{eqn:goodmanrc}
-\end{eqnarray}
-where $\mu_{ij} = E(Y_{ij})$ is the mean of the $i$-$j$ cell, and the
-rank $R$ satisfies $R < \min(n,M)$.
-
-
-The modeling function \texttt{grc()} should work on any two-way
-table \bY{} of counts generated by (\ref{eqn:goodmanrc}) provided
-the number of 0's is not too large. Its usage is quite simple, e.g.,
-\texttt{grc(Ymatrix, Rank = 2)} fits a rank-2 model to a matrix of counts.
-By default a \texttt{Rank = 1} model is fitted.
-
-
-
-
-\subsection{Bradley-Terry models}
-\label{sec:jsscat.brat}
-
-Consider
-an experiment consists of $n_{ij}$ judges who compare
-pairs of items $T_i$, $i=1,\ldots,M+1$.
-They express their preferences between $T_i$ and $T_j$. 
-Let $N=\sum \sum_{i<j} n_{ij}$ be the total number of pairwise
-comparisons, and assume independence for ratings of the same pair
-by different judges and for ratings of different pairs by the same judge.
-Let $\pi_i$ be the \textit{worth} of item $T_i$,
-\[
-\pr(T_i > T_j)  =  p_{i/ij}  =  \frac{\pi_i}{\pi_i + \pi_j},
-\  \qquad i \neq {j},
-\]
-where ``$T_i>T_j$'' means $i$ is preferred over $j$.
-Suppose that $\pi_i > 0$.
-Let $Y_{ij}$ be the number of times that $T_i$ is preferred
-over $T_j$ in the $n_{ij}$ comparisons of the pairs.
-Then $Y_{ij} \sim {\rm Bin}(n_{ij},p_{i/ij})$.
-This is a Bradley-Terry model (without ties),
-and the \VGAM{} family function is \texttt{brat()}.
-
-
-Maximum likelihood estimation of the parameters $\pi_1,\ldots,\pi_{M+1}$
-involves maximizing
-\[
-\prod_{i<j}^{M+1}
-\left(
-\begin{array}{c}
-n_{ij} \\
-y_{ij}
-\end{array} \right)
-\left(
-\frac{\pi_i}{\pi_i + \pi_j}
-\right)^{y_{ij}}
-\left(
-\frac{\pi_j}{\pi_i + \pi_j}
-\right)^{n_{ij}-y_{ij}} .
-\]
-By default, $\pi_{M+1} \equiv 1$ is used for identifiability,
-however, this can be changed very easily.
-Note that one can define 
-linear predictors $\eta_{ij}$ of the form
-\begin{equation}
-\label{eq:bradter.logit}
-\logit 
-\left(
-\frac{\pi_i}{\pi_i + \pi_j}
-\right)  =  \log 
-\left(
-\frac{\pi_i}{\pi_j}
-\right)  =  \lambda_i - \lambda_j .
-\end{equation}
-The VGAM{} framework can handle the Bradley-Terry model only for
-intercept-only models; it has
-\begin{equation}
-\label{eq:bradter}
-\lambda_j  =  \eta_j  =  \log\, \pi_j = \beta_{(1)j},
-\ \ \ \ j=1,\ldots,M.
-\end{equation}
-
-
-As well as having many applications in the field of preferences,
-the Bradley-Terry model has many uses in modeling `contests' between
-teams $i$ and $j$, where only one of the teams can win in each
-contest (ties are not allowed under the classical model).
-The {packaging} function \texttt{Brat()} can be used to
-convert a square matrix into one that has more columns, to
-serve as input to \texttt{vglm()}.
-For example,
-for journal citation data where a citation of article B
-by article A is a win for article B and a loss for article A.
-On a specific data set,
-<<>>=
-journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
-squaremat <- matrix(c(NA, 33, 320, 284,   730, NA, 813, 276,
-                      498, 68, NA, 325,   221, 17, 142, NA), 4, 4)
-dimnames(squaremat) <- list(winner = journal, loser = journal)
-@
-then \texttt{Brat(squaremat)} returns a $1 \times 12$ matrix.
-
-
-
-
-
-
-
-\subsubsection{Bradley-Terry model with ties}
-\label{sec:cat.bratt}
-
-
-The \VGAM{} family function \texttt{bratt()} implements
-a Bradley-Terry model with ties (no preference), e.g.,
-where both $T_i$ and $T_j$ are equally good or bad.
-Here we assume
-\begin{eqnarray*}
- \pr(T_i > T_j) &=& \frac{\pi_i}{\pi_i + \pi_j + \pi_0},
-\ \qquad
- \pr(T_i = T_j)  =  \frac{\pi_0}{\pi_i + \pi_j + \pi_0},
-\end{eqnarray*}
-with $\pi_0 > 0$ as an extra parameter.
-It has 
-\[
-\boldeta=(\log \pi_1,\ldots, \log \pi_{M-1}, \log \pi_{0})^{\top}
-\]
-by default, where there are $M$ competitors and $\pi_M \equiv 1$.
-Like \texttt{brat()}, one can choose a different reference group
-and reference value.
-
-
-Other \R{} packages for the Bradley-Terry model
-include \pkg{BradleyTerry2}
-by H. Turner and D. Firth
-\citep[with and without ties;][]{firth:2005,firth:2008}
-and \pkg{prefmod} \citep{Hatzinger:2009}.
-
-
-
-
-\begin{table}[tt]
-\centering
-\begin{tabular}[small]{|l|c|}
-\hline
-\pkg{VGAM} family function & Independent parameters \\
-\hline
-\texttt{ABO()} & $p, q$ \\
-\texttt{MNSs()} & $m_S, m_s, n_S$ \\
-\texttt{AB.Ab.aB.ab()} & $p$ \\
-\texttt{AB.Ab.aB.ab2()} & $p$ \\
-\texttt{AA.Aa.aa()} & $p_A$ \\
-\texttt{G1G2G3()} & $p_1, p_2, f$ \\
-\hline
-\end{tabular}
-\caption{Some genetic models currently implemented
-and their unique parameters.
-\label{tab:gen.all}
-}
-\end{table}
-
-
-
-
-
-\subsection{Genetic models}
-\label{sec:jsscat.genetic}
-
-
-There are quite a number of population genetic models based on the
-multinomial distribution,
-e.g., \cite{weir:1996}, \cite{lang:2002}.
-Table \ref{tab:gen.all} lists some \pkg{VGAM} family functions for such.
-
-
-
-
-For example the ABO blood group system
-has two independent parameters $p$ and $q$, say.
-Here,
-the blood groups A, B and O form six possible combinations (genotypes)
-consisting of AA, AO, BB, BO, AB, OO
-(see Table \ref{tab:ABO}). A and B are dominant over
-bloodtype O. Let $p$, $q$ and $r$ be the probabilities
-for A, B and O respectively (so that
-$p+q+r=1$) for a given population. 
-The log-likelihood function is 
-\[
-\ell(p,q) \;=\; n_A\, \log(p^2 + 2pr) + n_B\, \log(q^2 + 2qr) + n_{AB}\,
-\log(2pq) + 2 n_O\, \log(1-p-q),
-\]
-where $r = 1 - p -q$, $p \in (\,0,1\,)$,
-$q \in (\,0,1\,)$, $p+q<1$.
-We let $\boldeta = (g(p), g(r))^{\top}$ where $g$ is the link function.
-Any $g$ from Table \ref{tab:jsscat.links} appropriate for
-a parameter $\theta \in (0,1)$ will do.
-
-
-A toy example where $p=p_A$ and $q=p_B$ is
-<<>>=
-abodat <- data.frame(A = 725, B = 258, AB = 72, O = 1073)
-fit <- vglm(cbind(A, B, AB, O) ~ 1, ABO, data = abodat)
-coef(fit, matrix = TRUE)
-Coef(fit)  # Estimated pA and pB
-@
-The function \texttt{Coef()}, which applies only to intercept-only models,
-applies to $g_{j}(\theta_{j})=\eta_{j}$
-the inverse link function $g_{j}^{-1}$ to $\widehat{\eta}_{j}$
-to give $\widehat{\theta}_{j}$.
-
-
-
-
-
-
-
-\begin{table}[tt]
-% Same as Table 14.1 of E-J, and Table 2.6 of Weir 1996
-\begin{center}
-\begin{tabular}{|l|cc|cc|c|c|}
-\hline
-Genotype   & AA  & AO  & BB  &  BO  & AB  &  OO  \\
-Probability&$p^2$&$2pr$&$q^2$&$ 2qr$&$2pq$& $r^2$\\
-Blood group&  A  &  A  &  B  &  B   &  AB &  O \\
-\hline
-\end{tabular}
-\end{center}
-\caption{Probability table for the ABO blood group system.
-Note that $p$ and $q$ are the parameters and $r=1-p-q$.
-\label{tab:ABO}
-}
-\end{table}
-
-
-
-
-
-\subsection{Three main distributions}
-\label{sec:jsscat.3maindist}
-
-\cite{agre:2002} discusses three main distributions for categorical
-variables: binomial, multinomial, and Poisson
-\citep{thom:2009}.
-All these are well-represented in the \VGAM{} package,
-accompanied by variant forms.
-For example,
-there is a
-\VGAM{} family function named \texttt{mbinomial()}
-which implements a 
-matched-binomial (suitable for matched case-control studies),
-Poisson ordination (useful in ecology for multi-species-environmental data),
-negative binomial families,
-positive and zero-altered and zero-inflated variants,
-and the bivariate odds ratio model
-\citep[\texttt{binom2.or()}; see Section 6.5.6 of][]{mccu:neld:1989}.
-The latter has an \texttt{exchangeable} argument to allow for an
-exchangeable error structure:
-\begin{eqnarray}
-\bH_1  = 
-\left( \begin{array}{cc}
-1 & 0 \\
-1 & 0 \\
-0 & 1 \\
-\end{array} \right), \qquad
-\bH_k  = 
-\left( \begin{array}{cc}
-1 \\
-1 \\
-0 \\
-\end{array} \right), \quad k=2,\ldots,p,
-\label{eqn:blom.exchangeable}
-\end{eqnarray}
-since, for data $(Y_1,Y_2,\bix)$,
-$\logit \, P\!\left( Y_{j} = 1 \Big{|} \bix \right) = 
-\eta_{j}$ for ${j}=1,2$, and
-$\log \, \psi = \eta_{3}$
-where $\psi$ is the odds ratio,
-and so $\eta_{1}=\eta_{2}$.
-Here, \texttt{binom2.or(zero = 3)} by default meaning $\psi$ is
-modelled as an intercept-only
-(in general, \texttt{zero} may be assigned an integer vector
-such that the value $j$ means $\eta_{j} = \beta_{(j)1}$,
-i.e., the $j$th linear/additive predictor is an intercept-only).
-See the online help for all of these models.
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Some user-oriented topics}
-\label{sec:jsscat.userTopics}
-
-
-Making the most of \VGAM{} requires an understanding of the general
-VGLM/VGAM framework described Section \ref{sec:jsscat.VGLMVGAMoverview}.
-In this section we connect elements of that framework with the software.
-Before doing so it is noted that
-a fitted \VGAM{} categorical model has access to the usual
-generic functions, e.g.,
-\texttt{coef()} for
-$\left(\widehat{\bbeta}_{(1)}^{*T},\ldots,\widehat{\bbeta}_{(p)}^{*T}\right)^{\top}$
-(see Equation \ref{eqn:lin.coefs4}),
-\texttt{constraints()} for $\bH_k$,
-\texttt{deviance()} for $2\left(\ell_{\mathrm{max}} - \ell\right)$,
-\texttt{fitted()} for $\widehat{\bmu}_i$,
-\texttt{logLik()} for $\ell$,
-\texttt{predict()} for $\widehat{\boldeta}_i$,
-\texttt{print()},
-\texttt{residuals(..., type = "response")} for $\biy_i - \widehat{\bmu}_i$ etc.,
-\texttt{summary()},
-\texttt{vcov()} for $\widehat{\Var}(\widehat{\bbeta})$,
-etc.
-The methods function for the extractor function
-\texttt{coef()} has an argument \texttt{matrix}
-which, when set \texttt{TRUE}, returns $\widehat{\bB}$
-(see Equation \ref{gammod}) as a $p \times M$ matrix,
-and this is particularly useful for confirming that a fit
-has made a parallelism assumption.
-
-
-
-
-
-
-
-\subsection{Common arguments}
-\label{sec:jsscat.commonArgs}
-
-
-The structure of the unified framework given in
-Section \ref{sec:jsscat.VGLMVGAMoverview}
-appears clearly through
-the pool of common arguments
-shared by the
-\VGAM{} family functions in Table \ref{tab:cat.quantities}.
-In particular,
-\texttt{reverse} and
-\texttt{parallel}
-are prominent with CDA.
-These are merely convenient shortcuts for the argument \texttt{constraints},
-which accepts a named list of constraint matrices $\bH_k$.
-For example, setting
-\texttt{cumulative(parallel = TRUE)} would constrain the coefficients $\beta_{(j)k}$
-in (\ref{gammod2}) to be equal for all $j=1,\ldots,M$,
-each separately for $k=2,\ldots,p$.
-That is, $\bH_k = \bone_M$.
-The argument \texttt{reverse} determines the `direction' of
-the parameter or quantity.
-
-Another argument not so much used with CDA is \texttt{zero};
-this accepts a vector specifying which $\eta_j$ is to be modelled as
-an intercept-only; assigning a \texttt{NULL} means none.
-
-
-
-
-
-
-
-
-\subsection{Link functions}
-\label{sec:jsscat.links}
-
-Almost all \VGAM{} family functions
-(one notable exception is \texttt{multinomial()})
-allow, in theory, for any link function to be assigned to each $\eta_j$.
-This provides maximum capability.
-If so then there is an extra argument to pass in any known parameter
-associated with the link function.
-For example, \texttt{link = "logoff", earg = list(offset = 1)}
-signifies a log link with a unit offset:
-$\eta_{j} = \log(\theta_{j} + 1)$ for some parameter $\theta_{j}\ (> -1)$.
-The name \texttt{earg} stands for ``extra argument''.
-Table \ref{tab:jsscat.links} lists some links relevant to categorical data.
-While the default gives a reasonable first choice,
-users are encouraged to try different links.
-For example, fitting a binary regression model
-(\texttt{binomialff()}) to the coal miners data set \texttt{coalminers} with
-respect to the response wheeze gives a
-nonsignificant regression coefficient for $\beta_{(1)3}$ with probit analysis
-but not with a logit link when
-$\eta = \beta_{(1)1} + \beta_{(1)2} \, \mathrm{age} + \beta_{(1)3} \, \mathrm{age}^2$.
-Developers and serious users are encouraged to write and use
-new link functions compatible with \VGAM.
-
-
-
-
-
-
-\begin{table*}[tt]
-\centering
-\medskip
-\begin{tabular}{|l|c|c|}
-\hline
-Link function & $g(\theta)$ & Range of $\theta$ \\
-\hline
-\texttt{cauchit()} & $\tan(\pi(\theta-\frac12))$ & $(0,1)$ \\
-\texttt{cloglog()} & $\log_e\{-\log_e(1 - \theta)\}$ & $(0,1)$ \\
-\texttt{fisherz()} & 
-$\frac12\,\log_e\{(1 + \theta)/(1 - \theta)\}$ & $(-1,1)$ \\
-\texttt{identity()} & $\theta$ & $(-\infty,\infty)$ \\
-\texttt{logc()} & $\log_e(1 - \theta)$ & $(-\infty,1)$ \\
-\texttt{loge()} & $\log_e(\theta)$ & $(0,\infty)$ \\
-\texttt{logit()} & $\log_e(\theta/(1 - \theta))$ & $(0,1)$ \\
-\texttt{logoff()} & $\log_e(\theta + A)$ & $(-A,\infty)$ \\
-\texttt{probit()} & $\Phi^{-1}(\theta)$ & $(0,1)$ \\
-\texttt{rhobit()} & $\log_e\{(1 + \theta)/(1 - \theta)\}$ & $(-1,1)$ \\
-\hline
-\end{tabular}
-\caption{
-Some \VGAM{} link functions pertinent to this article.
-\label{tab:jsscat.links}
-}
-\end{table*}
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Examples}
-\label{sec:jsscat.eg}
-
-This section illustrates CDA modeling on three
-data sets in order to give a flavour of what is available in the package.
-
-
-
-
-%20130919
-%Note: 
-%\subsection{2008 World Fly Fishing Championships}
-%\label{sec:jsscat.eg.WFFC}
-%are deleted since there are problems with accessing the \texttt{wffc.nc}
-%data etc. since they are now in \pkg{VGAMdata}.
-
-
-
-
-
-
-
-\subsection{Marital status data}
-\label{sec:jsscat.eg.mstatus}
-
-We fit a nonparametric multinomial logit model to data collected from
-a self-administered questionnaire administered in a large New Zealand
-workforce observational study conducted during 1992--3.
-The data were augmented by a second study consisting of retirees.
-For homogeneity, this analysis is restricted
-to a subset of 6053 European males with no missing values.
-The ages ranged between 16 and 88 years.
-The data can be considered a reasonable representation of the white
-male New Zealand population in the early 1990s, and
-are detailed in \cite{macm:etal:1995} and \cite{yee:wild:1996}.
-We are interested in exploring how $Y=$ marital status varies as a function
-of $x_2=$ age. The nominal response $Y$ has four levels;
-in sorted order, they are divorced or separated, married or partnered,
-single and widower.
-We will write these levels as $Y=1$, $2$, $3$, $4$, respectively,
-and will choose the married/partnered (second level) as the reference group
-because the other levels emanate directly from it.
-
-Suppose the data is in a data frame called \texttt{marital.nz}
-and looks like
-<<>>=
-head(marital.nz, 4)
-summary(marital.nz)
-@
-We fit the VGAM
-<<>>=
-fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
-               data = marital.nz)
-@
-
-Once again let's firstly check the input.
-<<>>=
-head(depvar(fit.ms), 4)
-colSums(depvar(fit.ms))
-@
-This seems okay.
-
-
-
-
-Now the estimated component functions $\widehat{f}_{(s)2}(x_2)$
-may be plotted with
-<<fig=F>>=
-# Plot output
-mycol <- c("red", "darkgreen", "blue")
-par(mfrow = c(2, 2))
-plot(fit.ms, se = TRUE, scale = 12,
-         lcol = mycol, scol = mycol)
-
-# Plot output overlayed
-#par(mfrow=c(1,1))
-plot(fit.ms, se = TRUE, scale = 12,
-         overlay = TRUE,
-         llwd = 2,
-         lcol = mycol, scol = mycol)
-@
-to produce Figure \ref{fig:jsscat.eg.mstatus}.
-The \texttt{scale} argument is used here to ensure that the $y$-axes have
-a common scale---this makes comparisons between the component functions
-less susceptible to misinterpretation.
-The first three plots are the (centered) $\widehat{f}_{(s)2}(x_2)$ for
-$\eta_1$,
-$\eta_2$,
-$\eta_3$,
-where
-\begin{eqnarray}
-\label{eq:jsscat.eg.nzms.cf}
-\eta_{s}  = 
-\log(\pr(Y={t}) / \pr(Y={2}))  = 
-\beta_{(s)1} + f_{(s)2}(x_2),
-\end{eqnarray}
-$(s,t) = (1,1), (2,3), (3,4)$,
-and $x_2$ is \texttt{age}.
-The last plot are the smooths overlaid to aid comparison.
-
-
-It may be seen that the $\pm 2$ standard error bands
-about the \texttt{Widowed} group is particularly wide at
-young ages because of a paucity of data, and
-likewise at old ages amongst the \texttt{Single}s.
-The $\widehat{f}_{(s)2}(x_2)$ appear as one would expect.
-The log relative risk of
-being single relative to being married/partnered drops sharply from
-ages 16 to 40.
-The fitted function for the \texttt{Widowed} group increases
-with \texttt{age} and looks reasonably linear.
-The $\widehat{f}_{(1)2}(x_2)$
-suggests a possible maximum around 50 years old---this
-could indicate the greatest marital conflict occurs during
-the mid-life crisis years!
-
-
-
-\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=5.6,echo=FALSE>>=
-# Plot output
-mycol <- c("red", "darkgreen", "blue")
- par(mfrow = c(2, 2))
- par(mar = c(4.2, 4.0, 1.2, 2.2) + 0.1)
-plot(fit.ms, se = TRUE, scale = 12,
-         lcol = mycol, scol = mycol)
-
-# Plot output overlaid
-#par(mfrow = c(1, 1))
-plot(fit.ms, se = TRUE, scale = 12,
-         overlay = TRUE,
-         llwd = 2,
-         lcol = mycol, scol = mycol)
-@
-\caption{
-Fitted (and centered) component functions
-$\widehat{f}_{(s)2}(x_2)$
-from the NZ marital status data
-(see Equation \ref{eq:jsscat.eg.nzms.cf}).
-The bottom RHS plot are the smooths overlaid.
-\label{fig:jsscat.eg.mstatus}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-The methods function for \texttt{plot()} can also plot the
-derivatives of the smooths.
-The call
-<<fig=F>>=
-plot(fit.ms, deriv=1, lcol=mycol, scale=0.3)
-@
-results in Figure \ref{fig:jsscat.eg.mstatus.cf.deriv}.
-Once again the $y$-axis scales are commensurate.
-
-\setkeys{Gin}{width=\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=7.2,height=2.4,echo=FALSE>>=
-# Plot output
- par(mfrow = c(1, 3))
- par(mar = c(4.5, 4.0, 0.2, 2.2) + 0.1)
-plot(fit.ms, deriv = 1, lcol = mycol, scale = 0.3)
-@
-\caption{
-Estimated first derivatives of the component functions,
-$\widehat{f'}_{(s)2}(x_2)$,
-from the NZ marital status data
-(see Equation \ref{eq:jsscat.eg.nzms.cf}).
-\label{fig:jsscat.eg.mstatus.cf.deriv}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-The derivative for the \texttt{Divorced/Separated} group appears
-linear so that a quadratic component function could be tried.
-Not surprisingly the \texttt{Single} group shows the greatest change;
-also, $\widehat{f'}_{(2)2}(x_2)$ is approximately linear till 50
-and then flat---this suggests one could fit a piecewise quadratic
-function to model that component function up to 50 years.
-The \texttt{Widowed} group appears largely flat.
-We thus fit the parametric model
-<<>>=
-foo <- function(x, elbow = 50)
-  poly(pmin(x, elbow), 2)
-
-clist <- list("(Intercept)" = diag(3),
-             "poly(age, 2)" = rbind(1, 0, 0),
-             "foo(age)"     = rbind(0, 1, 0),
-             "age"          = rbind(0, 0, 1))
-fit2.ms <-
-    vglm(mstatus ~ poly(age, 2) + foo(age) + age,
-         family = multinomial(refLevel = 2),
-         constraints = clist,
-         data = marital.nz)
-@
-Then
-<<>>=
-coef(fit2.ms, matrix = TRUE)
-@
-confirms that one term was used for each component function.
-The plots from
-<<fig=F>>=
-par(mfrow = c(2, 2))
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[1], scol = mycol[1], which.term = 1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[2], scol=mycol[2], which.term = 2)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[3], scol = mycol[3], which.term = 3)
-@
-are given in Figure \ref{fig:jsscat.eg.mstatus.vglm}
-and appear like
-Figure \ref{fig:jsscat.eg.mstatus}.
-
-
-\setkeys{Gin}{width=0.9\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=5.6,echo=FALSE>>=
-# Plot output
-par(mfrow=c(2,2))
- par(mar=c(4.5,4.0,1.2,2.2)+0.1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[1], scol = mycol[1], which.term = 1)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[2], scol = mycol[2], which.term = 2)
-plotvgam(fit2.ms, se = TRUE, scale = 12,
-         lcol = mycol[3], scol = mycol[3], which.term = 3)
-@
-\caption{
-Parametric version of \texttt{fit.ms}: \texttt{fit2.ms}.
-The component functions are now quadratic, piecewise quadratic/zero,
-or linear.
-\label{fig:jsscat.eg.mstatus.vglm}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-
-It is possible to perform very crude inference based on heuristic theory
-of a deviance test:
-<<>>=
-deviance(fit.ms) - deviance(fit2.ms)
-@
-is small, so it seems the parametric model is quite reasonable
-against the original nonparametric model.
-Specifically,
-the difference in the number of `parameters' is approximately
-<<>>=
-(dfdiff <- df.residual(fit2.ms) - df.residual(fit.ms))
-@
-which gives an approximate $p$ value of
-<<>>=
-pchisq(deviance(fit.ms) - deviance(fit2.ms), df = dfdiff, lower.tail = FALSE)
-@
-Thus \texttt{fit2.ms} appears quite reasonable.
-
-
-
-
-
-
-
-
-The estimated probabilities of the original fit can be plotted
-against \texttt{age} using
-<<fig=F>>=
-ooo <- with(marital.nz, order(age))
-with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo, ],
-     type = "l", las = 1, lwd = 2, ylim = 0:1,
-     ylab = "Fitted probabilities",
-     xlab = "Age",  # main="Marital status amongst NZ Male Europeans",
-     col = c(mycol[1], "black", mycol[-1])))
-legend(x = 52.5, y = 0.62,  # x="topright",
-       col = c(mycol[1], "black", mycol[-1]),
-       lty = 1:4,
-       legend = colnames(fit.ms at y), lwd = 2)
-abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed")
-@
-which gives Figure \ref{fig:jsscat.eg.mstatus.fitted}.
-This shows that between 80--90\% of NZ white males
-aged between their early 30s to mid-70s
-were married/partnered.
-The proportion widowed
-started to rise steeply from 70 years onwards but remained below 0.5
-since males die younger than females on average.
-
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=4.8,echo=FALSE>>=
- par(mfrow = c(1,1))
- par(mar = c(4.5,4.0,0.2,0.2)+0.1)
-ooo <- with(marital.nz, order(age))
-with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
-     type = "l", las = 1, lwd = 2, ylim = 0:1,
-     ylab = "Fitted probabilities",
-     xlab = "Age",
-     col = c(mycol[1], "black", mycol[-1])))
-legend(x = 52.5, y = 0.62,
-       col = c(mycol[1], "black", mycol[-1]),
-       lty = 1:4,
-       legend = colnames(fit.ms at y), lwd = 2.1)
-abline(v = seq(10,90,by = 5), h = seq(0,1,by = 0.1), col = "gray", lty = "dashed")
-@
-\caption{
-Fitted probabilities for each class for the
-NZ male European
-marital status data
-(from Equation \ref{eq:jsscat.eg.nzms.cf}).
-\label{fig:jsscat.eg.mstatus.fitted}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-
-
-
-
-\subsection{Stereotype model}
-\label{sec:jsscat.eg.grc.stereotype}
-
-We reproduce some of the analyses of \cite{ande:1984} regarding the
-progress of 101 patients with back pain
-using the data frame \texttt{backPain} from \pkg{gnm}
-\citep{Rnews:Turner+Firth:2007,Turner+Firth:2009}.
-The three prognostic variables are
-length of previous attack ($x_1=1,2$),
-pain change ($x_2=1,2,3$) 
-and lordosis ($x_3=1,2$).
-Like him, we treat these as numerical and standardize and negate them.
-%
-The output
-<<>>=
-# Scale the variables? Yes; the Anderson (1984) paper did (see his Table 6).
-head(backPain, 4)
-summary(backPain)
-backPain <- transform(backPain, sx1 = -scale(x1), sx2 = -scale(x2), sx3 = -scale(x3))
-@
-displays the six ordered categories.
-Now a rank-1 stereotype model can be fitted with
-<<>>=
-bp.rrmlm1 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain)
-@
-Then
-<<>>=
-Coef(bp.rrmlm1)
-@
-are the fitted \bA, \bC{} and $\bB_1$ (see Equation \ref{eq:rrr.BAC}) and
-Table \ref{tab:rrvglam.jss.subset}) which agrees with his Table 6.
-Here, what is known as ``corner constraints'' is used
-($(1,1)$ element of \bA{} $\equiv 1$),
-and only the intercepts are not subject to any reduced-rank regression
-by default.
-The maximized log-likelihood from \textsl{\texttt{logLik(bp.rrmlm1)}}
-is $\Sexpr{round(logLik(bp.rrmlm1), 2)}$.
-The standard errors of each parameter can be obtained by
-\textsl{\texttt{summary(bp.rrmlm1)}}.
-The negative elements of $\widehat{\bC}$ imply the
-latent variable $\widehat{\nu}$ decreases in value with increasing
-\textsl{\texttt{sx1}},
-\textsl{\texttt{sx2}} and
-\textsl{\texttt{sx3}}.
-The elements of $\widehat{\bA}$ tend to decrease so it suggests
-patients get worse as $\nu$ increases,
-i.e., get better as \textsl{\texttt{sx1}},
-\textsl{\texttt{sx2}} and
-\textsl{\texttt{sx3}} increase.
-
-
-
-
-
-
-<<echo=FALSE>>=
-set.seed(123)
-@
-A rank-2 model fitted \textit{with a different normalization}
-<<>>=
-bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, data = backPain, Rank = 2,
-                   Corner = FALSE, Uncor = TRUE)
-@
-produces uncorrelated $\widehat{\bnu}_i = \widehat{\bC}^{\top} \bix_{2i}$.
-In fact \textsl{\texttt{var(lv(bp.rrmlm2))}} equals $\bI_2$
-so that the latent variables are also scaled to have unit variance.
-The fit was biplotted
-(rows of $\widehat{\bC}$ plotted as arrow;
- rows of $\widehat{\bA}$ plotted as labels) using
-<<figure=F>>=
-biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
-#      xlim = c(-1, 6), ylim = c(-1.2, 4),  # Use this if not scaled
-       xlim = c(-4.5, 2.2), ylim = c(-2.2, 2.2),  # Use this if scaled
-       chull = TRUE, clty = 2, ccol = "blue")
-@
-to give Figure \ref{fig:jsscat.eg.rrmlm2.backPain}.
-It is interpreted via inner products due to (\ref{eq:rrr.BAC}).
-The different normalization means that the interpretation of $\nu_1$
-and $\nu_2$ has changed, e.g., increasing
-\textsl{\texttt{sx1}},
-\textsl{\texttt{sx2}} and
-\textsl{\texttt{sx3}} results in increasing $\widehat{\nu}_1$ and
-patients improve more.
-Many of the latent variable points $\widehat{\bnu}_i$ are coincidental
-due to discrete nature of the $\bix_i$. The rows of $\widehat{\bA}$
-are centered on the blue labels (rather cluttered unfortunately) and
-do not seem to vary much as a function of $\nu_2$.
-In fact this is confirmed by \cite{ande:1984} who showed a rank-1
-model is to be preferred.
-
-
-
-This example demonstrates the ability to obtain a low dimensional view
-of higher dimensional data. The package's website has additional
-documentation including more detailed Goodman's RC and stereotype
-examples.
-
-
-
-
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-\begin{figure}[tt]
-\begin{center}
-<<fig=TRUE,width=8,height=5.3,echo=FALSE>>=
-# Plot output
- par(mfrow=c(1,1))
- par(mar=c(4.5,4.0,0.2,2.2)+0.1)
-
-biplot(bp.rrmlm2, Acol = "blue", Ccol = "darkgreen", scores = TRUE,
-#      xlim = c(-1,6), ylim = c(-1.2,4),  # Use this if not scaled
-       xlim = c(-4.5,2.2), ylim = c(-2.2, 2.2),  # Use this if scaled
-       chull = TRUE, clty = 2, ccol = "blue")
-@
-\caption{
-Biplot of a rank-2 reduced-rank multinomial logit (stereotype) model
-fitted to the back pain data.
-A convex hull surrounds the latent variable scores
-$\widehat{\bnu}_i$
-(whose observation numbers are obscured because of their discrete nature).
-The position of the $j$th row of $\widehat{\bA}$
-is the center of the label ``\texttt{log(mu[,j])/mu[,6])}''.
-\label{fig:jsscat.eg.rrmlm2.backPain}
-}
-\end{center}
-\end{figure}
-
-\setkeys{Gin}{width=0.8\textwidth} % 0.8 is the current default
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Some implementation details}
-\label{sec:jsscat.implementDetails}
-
-This section describes some implementation details of \VGAM{}
-which will be more of interest to the developer than to the casual user.
-
-
-
-\subsection{Common code}
-\label{sec:jsscat.implementDetails.code}
-
-It is good programming practice to write reusable code where possible.
-All the \VGAM{} family functions in Table \ref{tab:cat.quantities}
-process the response in the same way because the same segment of code
-is executed. This offers a degree of uniformity in terms of how input is
-handled, and also for software maintenance
-(\cite{altm:jack:2010} enumerates good programming techniques and references).
-As well, the default initial values are computed in the same manner
-based on sample proportions of each level of $Y$.
-
-
-
-
-
-\subsection[Matrix-band format of wz]{Matrix-band format of \texttt{wz}}
-\label{sec:jsscat.implementDetails.mbformat}
-
-The working weight matrices $\bW_i$ may become large for categorical
-regression models. In general, we have to evaluate the $\bW_i$
-for $i=1,\ldots,n$, and naively, this could be held in an \texttt{array} of
-dimension \texttt{c(M, M, n)}. However, since the $\bW_i$ are symmetric
-positive-definite it suffices to only store the upper or lower half of
-the matrix.
-
-
-
-The variable \texttt{wz} in \texttt{vglm.fit()}
-stores the working weight matrices $\bW_i$ in 
-a special format called the \textit{matrix-band} format. This
-format comprises a $n \times M^*$ matrix where
-\[
-M^*  =  \sum_{i=1}^{\footnotesize \textit{hbw}} \;
-\left(M-i+1\right)  =  
-\frac12 \, \textit{hbw}\, \left(2\,M - \textit{hbw} +1\right)
-\]
-is the number of columns. Here, \textit{hbw} refers to the
-\textit{half-bandwidth} of the matrix, which is an integer
-between 1 and $M$ inclusive. A diagonal matrix has
-unit half-bandwidth, a tridiagonal matrix has half-bandwidth 2, etc.
-
-
-Suppose $M=4$. Then \texttt{wz} will have up to $M^*=10$ columns
-enumerating the unique elements of $\bW_i$ as follows:
-\begin{eqnarray}
-\bW_i  =  
-\left( \begin{array}{rrrr}
-1 & 5 & 8 & 10 \\
-  & 2 & 6 & 9 \\
-  &   & 3 & 7 \\
-  &   &   & 4 
-\end{array} \right).
-\label{eqn:hbw.eg}
-\end{eqnarray}
-That is, the order is firstly the diagonal, then the band above that,
-followed by the second band above the diagonal etc.
-Why is such a format adopted? 
-For this example, if $\bW_i$ is diagonal then only the first 4 columns
-of \texttt{wz} are needed. If $\bW_i$ is tridiagonal then only the
-first 7 columns of \texttt{wz} are needed. 
-If $\bW_i$ \textit{is} banded then \texttt{wz} needs not have
-all $\frac12 M(M+1)$ columns; only $M^*$ columns suffice, and the
-rest of the elements of $\bW_i$ are implicitly zero.
-As well as reducing the size of \texttt{wz} itself in most cases, the
-matrix-band format often makes the computation of \texttt{wz} very
-simple and efficient. Furthermore, a Cholesky decomposition of a
-banded matrix will be banded. A final reason is that sometimes we
-want to input $\bW_i$ into \VGAM: if \texttt{wz} is $M \times M \times
-n$ then \texttt{vglm(\ldots, weights = wz)} will result in an error
-whereas it will work if \texttt{wz} is an $n \times M^*$ matrix.
-
-
-
-To facilitate the use of the matrix-band format,
-a few auxiliary functions have been written.
-In particular, there is \texttt{iam()} which gives the indices
-for an array-to-matrix.
-In the $4\times 4$ example above,
-<<>>=
-iam(NA, NA, M = 4, both = TRUE, diag = TRUE)
-@
-returns the indices for the respective array coordinates for
-successive columns of matrix-band format
-(see Equation \ref{eqn:hbw.eg}).
-If \texttt{diag = FALSE} then the first 4 elements in each vector
-are omitted. Note that the first two arguments of 
-\texttt{iam()} are not used here and have been assigned
-\texttt{NA}s for simplicity.
-For its use on the multinomial logit model, where
-$(\bW_i)_{jj} = w_i\,\mu_{ij} (1-\mu_{ij}),\ j=1,\ldots,M$, and 
-$(\bW_i)_{jk} = -w_i\,\mu_{ij} \mu_{ik},\ j\neq k$,
-this can be programmed succinctly like
-\begin{Code}
-wz <- mu[, 1:M] * (1 - mu[, 1:M])
-if (M > 1) {
-  index <- iam(NA, NA, M = M, both = TRUE, diag = FALSE)
-  wz <- cbind(wz, -mu[, index$row] * mu[, index$col])
-}
-wz <- w * wz
-\end{Code}
-(the actual code is slightly more complicated).
-In general, \VGAM{} family functions can be remarkably compact,
-e.g.,
-\texttt{acat()},
-\texttt{cratio()}
-and
-\texttt{multinomial()} are all less than 120 lines of code each.
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Extensions and utilities}
-\label{sec:jsscat.extnUtil}
-
-This section describes some useful utilities/extensions of the above.
-
-
-
-\subsection{Marginal effects}
-\label{sec:jsscat.extnUtil.margeff}
-
-
-Models such as the multinomial logit and cumulative link models
-model the posterior probability $p_{j} = \pr(Y=j|\bix)$ directly.
-In some applications, knowing the derivative of $p_{j}$
-with respect to some of the $x_k$ is useful;
-in fact, often just knowing the sign is important.
-The function \texttt{margeff()} computes the derivatives and
-returns them as a $p \times (M+1) \times n$ array.
-For the multinomial logit model it is easy to show
-\begin{eqnarray}
-\frac{\partial \, p_{j}(\bix_i)}{\partial \,
-\bix_{i}}
-&=&
-p_{j}(\bix_i)
-\left\{
- \bbeta_{j} -
-\sum_{s=1}^{M+1}
-p_{s}(\bix_i)
-\,
- \bbeta_{s}
-\right\},
-\label{eqn:multinomial.marginalEffects}
-\end{eqnarray}
-while for
-\texttt{cumulative(reverse = FALSE)}
-we have
-$p_{j} = \gamma_{j} - \gamma_{j-1} = h(\eta_{j}) - h(\eta_{j-1})$
-where $h=g^{-1}$ is the inverse of the link function
-(cf. Table \ref{tab:cat.quantities})
-so that
-\begin{eqnarray}
-\frac{\partial \, p_{j}(\bix_{})}{\partial \,
-\bix}
-&=&
-h'(\eta_{j}) \, \bbeta_{j} -
-h'(\eta_{j-1}) \, \bbeta_{j-1} .
-\label{eqn:cumulative.marginalEffects}
-\end{eqnarray}
-
-
-
-
-The function \texttt{margeff()} returns an array with these
-derivatives and should handle any value of
-\texttt{reverse} and \texttt{parallel}.
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\subsection[The xij argument]{The \texttt{xij} argument}
-\label{sec:jsscat.extnUtil.xij}
-
-There are many models, including those for categorical data,
-where the value of an explanatory variable $x_k$ differs depending
-on which linear/additive predictor $\eta_{j}$.
-Here is a well-known example from {consumer choice} modeling.
-Suppose an econometrician is interested in peoples'
-choice of transport for travelling to work
-and that there are four choices:
-$Y=1$ for ``bus'',
-$Y=2$ ``train'', 
-$Y=3$ ``car'' and
-$Y=4$ means ``walking''.
-Assume that people only choose one means to go to work.
-Suppose there are three covariates:
-$X_2=$ cost,
-$X_3=$ journey time, and
-$X_4=$ distance.
-Of the covariates only $X_4$ (and the intercept $X_1$)
-is the same for all transport choices;
-the cost and journey time differ according to the means chosen.
-Suppose a random sample of $n$ people is collected
-from some population, and that each person has
-access to all these transport modes.
-For such data, a natural regression model would be a 
-multinomial logit model with $M=3$:
-for $j=1,\ldots,M$, we have
-$\eta_{j} =$
-\begin{eqnarray}
-\log \frac{\pr(Y=j)}{\pr(Y=M+1)}
-&=&
-\beta_{(j)1}^{*} +
-\beta_{(1)2}^{*} \, (x_{i2j}-x_{i24}) +
-\beta_{(1)3}^{*} \, (x_{i3j}-x_{i34}) +
-\beta_{(1)4}^{*} \, x_{i4},
-\label{eqn:xij.eg.gotowork}
-\end{eqnarray}
-where, for the $i$th person,
-$x_{i2j}$ is the cost for the $j$th transport means, and
-$x_{i3j}$ is the journey time of the $j$th transport means.
-The distance to get to work is $x_{i4}$; it has the same value
-regardless of the transport means.
-
-
-Equation \ref{eqn:xij.eg.gotowork}
-implies $\bH_1=\bI_3$ and $\bH_2=\bH_3=\bH_4=\bone_3$.
-Note
-also that if the last response category is used as the baseline or
-reference group (the default of \texttt{multinomial()}) then $x_{ik,M+1}$
-can be subtracted from $x_{ikj}$ for $j=1,\ldots,M$---this
-is the natural way $x_{ik,M+1}$ enters into the model.
-
-
-
-
-Recall from (\ref{gammod2}) that we had
-\begin{equation}
-\eta_j(\bix_i)  =  \bbeta_j^{\top} \bix_i  = 
-\sum_{k=1}^{p} \, x_{ik} \, \beta_{(j)k} .
-\label{eqn:xij0}
-\end{equation}
-Importantly, this can be generalized to
-\begin{equation}
-\eta_j(\bix_{ij})  =  \bbeta_j^{\top} \bix_{ij}  = 
-\sum_{k=1}^{p} \, x_{ikj} \, \beta_{(j)k} ,
-\label{eqn:xij}
-\end{equation}
-or writing this another way (as a mixture or hybrid),
-\begin{equation}
-\eta_j(\bix_{i}^{*},\bix_{ij}^{*})  =  
-\bbeta_{j}^{*T} \bix_{i}^{*} + \bbeta_{j}^{**T} \bix_{ij}^{*} .
-\label{eqn:xij2}
-\end{equation}
-Often $\bbeta_{j}^{**} = \bbeta_{}^{**}$, say.
-In (\ref{eqn:xij2}) the variables in $\bix_{i}^{*}$ are common to
-all $\eta_{j}$, and the variables in $\bix_{ij}^{*}$ have
-different values for differing $\eta_{j}$.
-This allows for covariate values that are specific to each $\eta_j$,
-a facility which is very important in many applications.
-
-
-The use of the \texttt{xij} argument with the \VGAM{} family function
-\texttt{multinomial()} has very important applications in economics.
-In that field the term ``multinomial logit model'' includes a variety of
-models such as the ``generalized logit model'' where (\ref{eqn:xij0})
-holds, the ``conditional logit model'' where (\ref{eqn:xij}) holds,
-and the ``mixed logit model,'' which is a combination of the two,
-where (\ref{eqn:xij2}) holds.
-The generalized logit model focusses on the individual as the unit of
-analysis, and uses individual characteristics as explanatory variables,
-e.g., age of the person in the transport example.
-The conditional logit model assumes different values for each
-alternative and the impact of a unit of $x_k$ is assumed to be constant
-across alternatives, e.g., journey time in the choice of transport mode.
-Unfortunately, there is confusion in the literature for the terminology
-of the models. Some authors call \texttt{multinomial()}
-with (\ref{eqn:xij0}) the ``generalized logit model''.
-Others call the mixed
-logit model the ``multinomial logit model'' and view the generalized
-logit and conditional logit models as special cases.
-In \VGAM{} terminology there is no need to give different names to
-all these slightly differing special cases. They are all still called
-multinomial logit models, although it may be added that there are
-some covariate-specific linear/additive predictors.
-The important thing is that the framework accommodates $\bix_{ij}$,
-so one tries to avoid making life unnecessarily complicated.
-And \texttt{xij} can apply in theory to any VGLM and not just to the
-multinomial logit model.
-\cite{imai:king:lau:2008} present another perspective on the
-$\bix_{ij}$ problem with illustrations from \pkg{Zelig}
-\citep{Zelig:2009}.
-
-
-
-
-
-\subsubsection[Using the xij argument]{Using the \texttt{xij} argument}
-\label{sec:xij.sub}
-
-\VGAM{} handles variables whose values depend on $\eta_{j}$,
-(\ref{eqn:xij2}), using the \texttt{xij} argument.
-It is assigned an S formula or a list of \proglang{S} formulas.
-Each formula, which must have $M$ \textit{different} terms,
-forms a matrix that premultiplies a constraint matrix.
-In detail, (\ref{eqn:xij0}) can be written in vector form as
-\begin{equation}
-\boldeta(\bix_i)  =  \bB^{\top} \bix_i  = 
-\sum_{k=1}^{p} \, \bH_{k} \, \bbeta_{k}^{*} \, x_{ik},
-\label{eqn:xij0.vector}
-\end{equation}
-where
-$\bbeta_{k}^{*} =
-\left( \beta_{(1)k}^{*},\ldots,\beta_{(r_k)k}^{*} \right)^{\top}$
-is to be estimated.
-This may be written
-\begin{eqnarray}
-\boldeta(\bix_{i})
-&=&
-\sum_{k=1}^{p} \, \diag(x_{ik},\ldots,x_{ik}) \,
-\bH_k \, \bbeta_{k}^{*}.
-\label{eqn:xij.d.vector}
-\end{eqnarray}
-To handle (\ref{eqn:xij})--(\ref{eqn:xij2})
-we can generalize (\ref{eqn:xij.d.vector}) to
-\begin{eqnarray}
-\boldeta_i
-&=&
-\sum_{k=1}^{p} \, \diag(x_{ik1},\ldots,x_{ikM}) \;
-\bH_k \, \bbeta_{k}^{*}
-\ \ \ \ \left(=
-\sum_{k=1}^{p} \, \bX_{(ik)}^{*} \,
-\bH_k \, \bbeta_{k}^{*} ,
-\mathrm{\ say} \right).
-\label{eqn:xij.vector}
-\end{eqnarray}
-Each component of the list \texttt{xij} is a formula having $M$ terms
-(ignoring the intercept) which
-specifies the successive diagonal elements of the matrix $\bX_{(ik)}^{*}$.
-Thus each row of the constraint matrix may be multiplied by a different
-vector of values.
-The constraint matrices themselves are not affected by the
-\texttt{xij} argument.
-
-
-
-
-
-How can one fit such models in \VGAM{}?
-Let us fit (\ref{eqn:xij.eg.gotowork}).
-Suppose the journey cost and time variables have had the
-cost and time of walking subtracted from them.
-Then,
-using ``\texttt{.trn}'' to denote train,
-\begin{Code}
-fit2 <- vglm(cbind(bus, train, car, walk) ~ Cost + Time + Distance,
-             fam = multinomial(parallel = TRUE ~ Cost + Time + Distance - 1),
-             xij = list(Cost ~ Cost.bus + Cost.trn + Cost.car,
-                        Time ~ Time.bus + Time.trn + Time.car),
-             form2 = ~  Cost.bus + Cost.trn + Cost.car +
-                        Time.bus + Time.trn + Time.car +
-                        Cost + Time + Distance,
-             data = gotowork)
-\end{Code}
-should do the job.
-Here, the argument \texttt{form2} is assigned a second \proglang{S} formula which
-is used in some special circumstances or by certain types
-of \VGAM{} family functions.
-The model has $\bH_{1} = \bI_{3}$ and $\bH_{2} = \bH_{3} = \bH_{4} = \bone_{3}$
-because the lack of parallelism only applies to the intercept.
-However, unless \texttt{Cost} is the same as \texttt{Cost.bus} and
-\texttt{Time} is the same as \texttt{Time.bus},
-this model should not be plotted with \texttt{plotvgam()};
-see the author's homepage for further documentation.
-
-
-By the way,
-suppose 
-$\beta_{(1)4}^{*}$
-in (\ref{eqn:xij.eg.gotowork})
-is replaced by $\beta_{(j)4}^{*}$.
-Then the above code but with
-\begin{Code}
-  fam = multinomial(parallel = FALSE ~ 1 + Distance),
-\end{Code}
-should fit this model.
-Equivalently,
-\begin{Code}
-  fam = multinomial(parallel = TRUE ~ Cost + Time - 1),
-\end{Code}
-
-
-
-
-
-
-\subsubsection{A more complicated example}
-\label{sec:xij.complicated}
-
-The above example is straightforward because the
-variables were entered linearly. However, things
-become more tricky if data-dependent functions are used in
-any \texttt{xij} terms, e.g., \texttt{bs()}, \texttt{ns()} or \texttt{poly()}.
-In particular, regression splines such as \texttt{bs()} and \texttt{ns()}
-can be used to estimate a general smooth function $f(x_{ij})$, which is
-very useful for exploratory data analysis.
-
-
-
-Suppose we wish to fit the variable \texttt{Cost} with a smoother.
-This is possible with regression splines and using a trick.
-Firstly note that
-\begin{Code}
-fit3 <- vglm(cbind(bus, train, car, walk) ~ ns(Cost) + Time + Distance,
-             multinomial(parallel = TRUE ~ ns(Cost) + Time + Distance - 1),
-             xij = list(ns(Cost) ~ ns(Cost.bus) + ns(Cost.trn) + ns(Cost.car),
-                        Time ~ Time.bus + Time.trn + Time.car),
-             form2 = ~  ns(Cost.bus) + ns(Cost.trn) + ns(Cost.car) +
-                        Time.bus + Time.trn + Time.car +
-                        ns(Cost) + Cost + Time + Distance,
-             data = gotowork)
-\end{Code}
-will \textit{not} work because the basis functions for
-\texttt{ns(Cost.bus)}, \texttt{ns(Cost.trn)} and \texttt{ns(Cost.car)}
-are not identical since the knots differ.
-Consequently, they represent different functions despite
-having common regression coefficients.
-
-
-Fortunately, it is possible to force the \texttt{ns()} terms
-to have identical basis functions by using a trick:
-combine the vectors temporarily.
-To do this, one can let
-\begin{Code}
-NS <- function(x, ..., df = 3)
-      sm.ns(c(x, ...), df = df)[1:length(x), , drop = FALSE]
-\end{Code}
-This computes a natural cubic B-spline evaluated at \texttt{x} but it uses the
-other arguments as well to form an overall vector from which to obtain
-the (common) knots.
-Then the usage of \texttt{NS()} can be something like
-\begin{Code}
-fit4 <- vglm(cbind(bus, train, car, walk) ~ NS(Cost.bus, Cost.trn, Cost.car)
-                                          + Time + Distance,
-             multinomial(parallel = TRUE ~  NS(Cost.bus, Cost.trn, Cost.car)
-                                          + Time + Distance - 1),
-             xij = list(NS(Cost.bus, Cost.trn, Cost.car) ~
-                        NS(Cost.bus, Cost.trn, Cost.car) +
-                        NS(Cost.trn, Cost.car, Cost.bus) +
-                        NS(Cost.car, Cost.bus, Cost.trn),
-                        Time ~ Time.bus + Time.trn + Time.car),
-             form2 = ~  NS(Cost.bus, Cost.trn, Cost.car) +
-                        NS(Cost.trn, Cost.car, Cost.bus) +
-                        NS(Cost.car, Cost.bus, Cost.trn) +
-                        Time.bus + Time.trn + Time.car +
-                        Cost.bus + Cost.trn + Cost.car +
-                        Time + Distance,
-             data = gotowork)
-\end{Code}
-So \texttt{NS(Cost.bus, Cost.trn, Cost.car)}
-is the smooth term for
-\texttt{Cost.bus}, etc.
-Furthermore, \texttt{plotvgam()} may be applied to
-\texttt{fit4}, in which case the fitted regression spline is plotted
-against its first inner argument, viz. \texttt{Cost.bus}.
-
-
-One of the reasons why it will predict correctly, too,
-is due to ``smart prediction''
-\citep{Rnews:Yee:2008}.
-
-
-
-\subsubsection{Implementation details} 
-\label{sec:jss.xij.implementationDetails} 
-
-The \texttt{xij} argument operates \textit{after} the
-ordinary $\bX_{\sVLM}$ matrix is created. Then selected columns
-of $\bX_{\sVLM}$ are modified from the constraint matrices, \texttt{xij}
-and \texttt{form2} arguments. That is, from \texttt{form2}'s model
-matrix $\bX_{\sformtwo}$, and the $\bH_k$. This whole operation
-is possible because $\bX_{\sVLM}$ remains structurally the same.
-The crucial equation is (\ref{eqn:xij.vector}).
-
-
-Other \texttt{xij} examples are given in the online help of
-\texttt{fill()} and \texttt{vglm.control()},
-as well as at the package's webpage.
-
-
-
-
-
-
-
-
-
-
-
-% ----------------------------------------------------------------------
-\section{Discussion}
-\label{sec:jsscat.discussion}
-
-
-This article has sought to convey how VGLMs/VGAMs are well suited for
-fitting regression models for categorical data. Its primary strength
-is its simple and unified framework, and when reflected in software,
-makes practical CDA more understandable and efficient. Furthermore,
-there are natural extensions such as a reduced-rank variant and
-covariate-specific $\eta_{j}$. The \VGAM{} package potentially offers
-a wide selection of models and utilities.
-
-
-There is much future work to do.
-Some useful additions to the package include:
-\begin{enumerate}
-
-\item
-Bias-reduction \citep{firt:1993} is a method for removing the $O(n^{-1})$
-bias from a maximum likelihood estimate. For a substantial class of
-models including GLMs it can be formulated in terms of a minor adjustment
-of the score vector within an IRLS algorithm \citep{kosm:firt:2009}.
-One by-product, for logistic regression, is that while the maximum
-likelihood estimate (MLE) can be infinite, the adjustment leads to
-estimates that are always finite. At present the \R{} package \pkg{brglm}
-\citep{Kosmidis:2008} implements bias-reduction for a number of models.
-Bias-reduction might be implemented by adding an argument
-\texttt{bred = FALSE}, say, to some existing \VGAM{} family functions.
-
-
-\item
-Nested logit models were developed to overcome a fundamental shortcoming
-related to the multinomial logit model, viz. the independence of
-irrelevant alternatives (IIA) assumption. Roughly, the multinomial logit
-model assumes the ratio of the choice probabilities of two alternatives
-is not dependent on the presence or absence of other alternatives in
-the model. This presents problems that are often illustrated by the
-famed red bus-blue bus problem.
-
-
-
-
-\item
-The generalized estimating equations (GEE) methodology is largely
-amenable to IRLS and this should be added to the package in the future
-\citep{wild:yee:1996}.
-
-
-\item
-For logistic regression \proglang{SAS}'s \code{proc logistic} gives
-a warning if the data is {completely separate} or {quasi-completely
-separate}. Its effects are that some regression coefficients tend to $\pm
-\infty$. With such data, all (to my knowledge) \R{} implementations
-give warnings that are vague, if any at all, and this is rather
-unacceptable \citep{alli:2004}. The \pkg{safeBinaryRegression} package
-\citep{Konis:2009} overloads \code{glm()} so that a check for the
-existence of the MLE is made before fitting a binary response GLM.
-
-
-\end{enumerate}
-
-
-In closing, the \pkg{VGAM} package is continually being developed,
-therefore some future changes in the implementation details and usage
-may occur. These may include non-backward-compatible changes (see the
-\code{NEWS} file.) Further documentation and updates are available at
-the author's homepage whose URL is given in the \code{DESCRIPTION} file.
-
-
-
-% ----------------------------------------------------------------------
-\section*{Acknowledgments}
-
-The author thanks Micah Altman, David Firth and Bill Venables for helpful
-conversations, and Ioannis Kosmidis for a reprint.
-Thanks also to The Institute for Quantitative Social Science at Harvard
-University for their hospitality while this document was written during a
-sabbatical visit.
-
-
-
-
-
-\bibliography{categoricalVGAMbib}
-
-\end{document}
-
-
-
-
diff --git a/vignettes/categoricalVGAMbib.bib b/vignettes/categoricalVGAMbib.bib
deleted file mode 100644
index 7367aff..0000000
--- a/vignettes/categoricalVGAMbib.bib
+++ /dev/null
@@ -1,653 +0,0 @@
- at article{yee:wild:1996,
-    Author = {Yee, T. W. and Wild, C. J.},
-    Title = {Vector Generalized Additive Models},
-    Year = 1996,
-   JOURNAL = {Journal of the Royal Statistical Society~B},
-    Volume = 58,
-    Pages = {481--493},
-    Keywords = {Nonparametric regression; Smoothing},
-    Number = 3,
-}
-
- at article{gree:1984,
-    Author = {Green, P. J.},
-    Title = {Iteratively Reweighted Least Squares for Maximum Likelihood
-            Estimation, and Some Robust and Resistant Alternatives},
-    Year = 1984,
-   JOURNAL = {Journal of the Royal Statistical Society~B},
-    Volume = 46,
-    Pages = {149--192},
-    Keywords = {Scoring; Generalized linear model; Regression; Residual},
-    Number = 2,
-}
-
- at book{hast:tibs:1990,
-    Author = {Hastie, T. J. and Tibshirani, R. J.},
-    Title = {Generalized Additive Models},
-    Year = 1990,
-    Publisher = {Chapman \& Hall},
-    Address = {London},
-    Pages = {335},
-    Keywords = {Regression; Nonparametric; Generalized linear model}
-}
-
- at Manual{gam:pack:2009,
-    title = {\pkg{gam}: Generalized Additive Models},
-    author = {Trevor Hastie},
-    year = {2008},
-    note = {\proglang{R}~package version~1.01},
-    url = {http://CRAN.R-project.org/package=gam}
-}
-
- at article{ande:1984,
-    Author = {Anderson, J. A.},
-    Title = {Regression and Ordered Categorical Variables},
-    Year = 1984,
-   JOURNAL = {Journal of the Royal Statistical Society~B},
-    Volume = 46,
-    Pages = {1--30},
-    Keywords = {Assessed variable; Logistic regression; Stereotype
-               regression; Maximum likelihood},
-    Number = 1,
-}
-
- at article{firt:1993,
-author = {Firth, D.},
-title = {Bias Reduction of Maximum Likelihood Estimates},
-journal = {Biometrika},
-volume = {80},
-pages = {27--38},
-year = {1993},
-number = {1},
-abstract = {It is shown how, in regular parametric problems, the
-first-order term is removed from the asymptotic bias of maximum likelihood
-estimates by a suitable modification of the score function. In exponential
-families with canonical parameterization the effect is to penalize the
-likelihood by the Jeffreys invariant prior. In binomial logistic models,
-Poisson log linear models and certain other generalized linear models,
-the Jeffreys prior penalty function can be imposed in standard regression
-software using a scheme of iterative adjustments to the data.},
-}
-
- at InProceedings{alli:2004,
-    Author = {Allison, P.},
-    Title = {Convergence Problems in Logistic Regression},
-    chapter = {10},
-    Year = 2004,
-    Crossref = {altm:gill:mcdo:2004},
-    Pages = {238--252},
- BookTITLE = {Numerical Issues in Statistical Computing for the Social
-              Scientist},
- PUBLISHER = {Wiley-Interscience},
-   ADDRESS = {Hoboken, NJ, USA},
-}
-
- at book {altm:gill:mcdo:2004,
-    AUTHOR = {Altman, Micah and Gill, Jeff and McDonald, Michael P.},
-     TITLE = {Numerical Issues in Statistical Computing for the Social
-              Scientist},
- PUBLISHER = {Wiley-Interscience},
-   ADDRESS = {Hoboken, NJ, USA},
-      YEAR = {2004},
-     PAGES = {xvi+323},
-   MRCLASS = {62-02 (62-04 62P25 65-02 91-02)},
-  MRNUMBER = {MR2020104},
-}
-
- at article{yee:2010v,
-    Author = {Yee, T. W.},
-    Title = {{VGLM}s and {VGAM}s:
-             An Overview for Applications in Fisheries Research},
-    Year = 2010,
-    Journal = {Fisheries Research},
-   FJournal = {Fisheries Research},
-    Volume = {101},
-    Pages = {116--126},
-    Number = {1--2},
-}
-
- at article{imai:king:lau:2008,
-    AUTHOR = {Imai, Kosuke and King, Gary and Lau, Olivia},
-     TITLE = {Toward A Common Framework for Statistical Analysis and
-              Development},
-  JOURNAL = {Journal of Computational and Graphical Statistics},
-  YEAR = 2008,
-  VOLUME = 17,
-  PAGES = {892--913},
-  NUMBER = 4,
-}
-
- at book{stok:davi:koch:2000,
-    Author = {Stokes, W. and Davis, J. and Koch, W.},
-    Title = {Categorical Data Analysis Using The \proglang{SAS} System},
-    Year = 2000,
-    Edition = {2nd},
-    Publisher = {SAS Institute Inc.},
-    Address = {Cary, NC, USA},
-    PAGES = {648},
-}
-
- at article{neld:wedd:1972,
-    Author = {Nelder, J. A. and Wedderburn, R. W. M.},
-    Title = {Generalized Linear Models},
-    Year = 1972,
-   JOURNAL = {Journal of the Royal Statistical Society~A},
-    Volume = 135,
-    Pages = {370--384},
-    Keywords = {Probit analysis; Analysis of variance; Contingency table;
-               Exponential family; Quantal response; Weighted least
-               squares},
-    Number = 3,
-}
-
- at book{agre:2002,
-    Author = {Agresti, Alan},
-    Title = {Categorical Data Analysis},
-    Year = 2002,
-    Publisher = {John Wiley \& Sons},
-    Address = {New York, USA},
-    Edition = {2nd},
-}
-
-
- at book{agre:2013,
-  Author = {Agresti, Alan},
-  Title = {Categorical Data Analysis},
-  Year = 2013,
-  Publisher = {Wiley},
-  Address = {Hoboken, NJ, USA},
-  Edition = {Third},
-}
-
-
-
- at book{agre:2010,
-      Author = {Agresti, Alan},
-      Title = {Analysis of Ordinal Categorical Data},
-      Year = 2010,
-      Publisher = {Wiley},
-      Edition = {Second},
-      Address = {Hoboken, NJ, USA},
-      Pages = {396},
-}
-
-
-
- at book{tutz:2012,
-    AUTHOR = {Tutz, G.},
-     TITLE = {Regression for Categorical Data},
-      YEAR = {2012},
- PUBLISHER = {Cambridge University Press},
-   ADDRESS = {Cambridge},
-}
-
-
- at book{fahr:tutz:2001,
-    Author = {Fahrmeir, L. and Tutz, G.},
-    Title = {Multivariate Statistical Modelling Based on Generalized Linear
-            Models},
-    Year = 2001,
-    Edition = {2nd},
-    Publisher = {Springer-Verlag},
-    ADDRESS = {New York, USA},
-}
-
- at book{leon:2000,
-    Author = {Leonard, Thomas},
-    Title = {A Course in Categorical Data Analysis},
-    Year = 2000,
-    Publisher = {Chapman \& Hall/CRC},
-    Address = {Boca Raton, FL, USA},
-}
-
- at book{lloy:1999,
-    Author = {Lloyd, C. J.},
-    Title = {Statistical Analysis of Categorical Data},
-    Year = 1999,
-    Publisher = {John Wiley \& Sons},
-    Address = {New York, USA}
-}
-
- at book{long:1997,
-    Author = {Long, J. S.},
-    Title = {Regression Models for Categorical and Limited Dependent Variables},
-    Year = 1997,
-    Publisher = {Sage Publications},
-    ADDRESS = {Thousand Oaks, CA, USA},
-}
-
- at book{mccu:neld:1989,
-    Author = {McCullagh, P. and Nelder, J. A.},
-    Title = {Generalized Linear Models},
-    Year = 1989,
-    Edition = {2nd},
-    Publisher = {Chapman \& Hall},
-    Address = {London},
-    Pages = {500}
-}
-
- at book{simo:2003,
-    Author = {Simonoff, J. S.},
-    Title = {Analyzing Categorical Data},
-    Year = 2003,
-    Pages = {496},
-    Publisher = {Springer-Verlag},
-    Address = {New York, USA}
-}
-
- at article{liu:agre:2005,
-    Author = {Liu, I. and Agresti, A.},
-    Title = {The Analysis of Ordered Categorical Data:
-             An Overview and a Survey of Recent Developments},
-    Year = 2005,
-    Journal = {Sociedad Estad{\'i}stica e Investigaci{\'o}n Operativa Test},
-    Volume = 14,
-    Pages = {1--73},
-    Number = 1,
-}
-
- at MANUAL{thom:2009,
-    TITLE = {\proglang{R} (and \proglang{S-PLUS}) Manual to Accompany
-             Agresti's \textit{Categorical Data Analysis}~(2002),
-             2nd edition},
-    AUTHOR = {Thompson, L. A.},
-    YEAR = {2009},
-    URL = {https://home.comcast.net/~lthompson221/Splusdiscrete2.pdf},
-}
-
- at article{yee:2008c,
-    Author = {Yee, T. W.},
-    Title = {The \pkg{VGAM} Package},
-    Year = 2008,
-    Journal = {\proglang{R} {N}ews},
-    Volume = 8,
-    Pages = {28--39},
-    Number = 2,
-}
-
- at article{Rnews:Yee:2008,
-  author = {Thomas W. Yee},
-  title = {The \pkg{VGAM} Package},
-  journal = {\proglang{R}~News},
-  year = 2008,
-  volume = 8,
-  pages = {28--39},
-  month = {October},
-  url = {http://CRAN.R-project.org/doc/Rnews/},
-  number = 2,
-}
-
- at article{yee:hast:2003,
-    AUTHOR = {Yee, T. W. and Hastie, T. J.},
-     TITLE = {Reduced-rank Vector Generalized Linear Models},
-   JOURNAL = {Statistical Modelling},
-    Volume = 3,
-     Pages = {15--41},
-      YEAR = {2003},
-     Number = 1,
-}
-
-article{yee:wild:1996,
-    Author = {Yee, T. W. and Wild, C. J.},
-    Title = {Vector Generalized Additive Models},
-    Year = 1996,
-   JOURNAL = {Journal of the Royal Statistical Society~B},
-    Volume = 58,
-    Pages = {481--493},
-    Keywords = {Nonparametric regression; Smoothing},
-    Number = 3,
-}
-
- at article{good:1981,
-    Author = {Goodman, L. A.},
-    Title = {Association Models and Canonical Correlation in the Analysis
-             of Cross-classifications Having Ordered Categories},
-    Year = 1981,
-    Journal = {Journal of the American Statistical Association},
-    Volume = 76,
-    Pages = {320--334},
-    Number = 374,
-}
-
- at article{buja:hast:tibs:1989,
-    Author = {Buja, Andreas and Hastie, Trevor and Tibshirani, Robert},
-    Title = {Linear Smoothers and Additive Models},
-    Year = 1989,
-   JOURNAL = {The Annals of Statistics},
-    Volume = 17,
-    Pages = {453--510},
-    Keywords = {Nonparametric; Regression; Kernel estimator},
-    Number = 2,
-}
-
- at article{yee:step:2007,
-    AUTHOR = {Yee, Thomas W. and Stephenson, Alec G.},
-     TITLE = {Vector Generalized Linear and Additive Extreme Value Models},
-   JOURNAL = {Extremes},
-  FJOURNAL = {Extremes. Statistical Theory and Applications in Science,
-              Engineering and Economics},
-    VOLUME = {10},
-      YEAR = {2007},
-     PAGES = {1--19},
-   MRCLASS = {Database Expansion Item},
-  MRNUMBER = {MR2407639},
-    NUMBER = {1--2},
-}
-
- at article{wand:orme:2008,
-    Author = {Wand, M. P. and Ormerod, J. T.},
-    Title = {On Semiparametric Regression with {O}'{S}ullivan Penalized Splines},
-    Year = 2008,
-    Journal = {The Australian and New Zealand Journal of Statistics},
-    Volume = 50,
-    Issue = 2,
-    Pages = {179--198},
-    Number = 2,
-}
-
- at book{cham:hast:1993,
-    Editor = {Chambers, John M. and Hastie, Trevor J.},
-    Title = {Statistical Models in \proglang{S}},
-    Publisher = {Chapman \& Hall},
-    Year = 1993,
-    Pages = {608},
-    Address = {New York, USA},
-    Keywords = {Computing},
-}
-
- at Article{pete:harr:1990,
-    Author = {Peterson, B. and Harrell, Frank E.},
-    Title = {Partial Proportional Odds Models for Ordinal Response Variables},
-    Year = 1990,
-    Journal = {Applied Statistics},
-    Volume = 39,
-    Pages = {205--217},
-    Number = 2,
-}
-
- at article{pete:1990,
-    Author = {Peterson, B.},
-    Title = {Letter to the Editor: Ordinal Regression Models for
-             Epidemiologic Data},
-    Year = 1990,
-    Journal = {American Journal of Epidemiology},
-    Volume = 131,
-    Pages = {745--746}
-}
-
- at article{hast:tibs:buja:1994,
-    AUTHOR = {Hastie, Trevor and Tibshirani, Robert and Buja, Andreas},
-     TITLE = {Flexible Discriminant Analysis by Optimal Scoring},
-   JOURNAL = {Journal of the American Statistical Association},
-    VOLUME = {89},
-      YEAR = {1994},
-     PAGES = {1255--1270},
-     CODEN = {JSTNAL},
-   MRCLASS = {62H30},
-  MRNUMBER = {95h:62099},
-    NUMBER = {428},
-}
-
- at article{firth:2005,
-    Author = {Firth, David},
-    Title = {{B}radley-{T}erry Models in \proglang{R}},
-    Year = 2005,
-    Journal = {Journal of Statistical Software},
-    Volume = 12,
-    Number = 1,
-    Pages = {1--12},
-    URL = "http://www.jstatsoft.org/v12/i01/",
-}
-
- at book{weir:1996,
-    Author = {Weir, Bruce S.},
-    Title = {Genetic Data Analysis II: Methods for Discrete Population
-             Genetic Data},
-    Year = 1996,
-    Publisher = {Sinauer Associates, Inc.},
-    Address = {Sunderland, MA, USA}
-}
-
- at book{lang:2002,
-    Author = {Lange, Kenneth},
-    Title = {Mathematical and Statistical Methods for Genetic Analysis},
-    Year = 2002,
-    Edition = {2nd},
-    Publisher = {Springer-Verlag},
-    Address = {New York, USA},
-}
-
- at article{macm:etal:1995,
-    Author = {MacMahon, S. and Norton, R. and Jackson, R. and Mackie, M. J. and
-              Cheng, A. and
-              Vander Hoorn, S. and Milne, A. and McCulloch, A.},
-    Title = {Fletcher {C}hallenge-{U}niversity of {A}uckland {H}eart \&
-             {H}ealth {S}tudy: Design and Baseline Findings},
-    Year = 1995,
-    Journal = {New Zealand Medical Journal},
-    Volume = 108,
-    Pages = {499--502},
-}
-
- at article{altm:jack:2010,
-  author =      {Altman, M. and Jackman, S.},
-  title =       "Nineteen Ways of Looking at Statistical Software",
-  journal =     "Journal of Statistical Software",
-  year =        "2010",
-  note =        "Forthcoming"
-}
-
- at article{fox:hong:2009,
-  author =    "John Fox and Jangman Hong",
-  title =    {Effect Displays in \proglang{R} for Multinomial and
-                 Proportional-Odds Logit Models:
-                 Extensions to the \pkg{effects} Package},
-  journal =    "Journal of Statistical Software",
-  volume =    "32",
-  number =    "1",
-  pages =    "1--24",
-  year =     "2009",
-  URL =      "http://www.jstatsoft.org/v32/i01/",
-}
-
- at article{wild:yee:1996,
-    Author = {Wild, C. J. and Yee, T. W.},
-    Title = {Additive Extensions to Generalized Estimating Equation
-            Methods},
-    Year = 1996,
-   JOURNAL = {Journal of the Royal Statistical Society~B},
-    Volume = 58,
-    Pages = {711--725},
-    Keywords = {Longitudinal data; Nonparametric; Regression; Smoothing},
-    NUMBER = {4},
-}
-
- at Article{Yee:2010,
-   author        = {Thomas W. Yee},
-   title         = {The \pkg{VGAM} Package for Categorical Data Analysis},
-   journal       = {Journal of Statistical Software},
-   year          = {2010},
-   volume        = {32},
-   number        = {10},
-   pages	 = {1--34},
-   url           = {http://www.jstatsoft.org/v32/i10/}
-}
-
- at Manual{R,
-   title         = {\proglang{R}: {A} Language and Environment
-                    for Statistical Computing},
-   author        = {{\proglang{R} Development Core Team}},
-   organization  = {\proglang{R} Foundation for Statistical Computing},
-   address       = {Vienna, Austria},
-   year          = {2009},
-   note          = {{ISBN} 3-900051-07-0},
-   url           = {http://www.R-project.org/}
-}
-
- at Book{Venables+Ripley:2002,
-   author        = {William N. Venables and Brian D. Ripley},
-   title         = {Modern Applied Statistics with \proglang{S}},
-   edition       = {4th},
-   year          = {2002},
-   pages         = {495},
-   publisher     = {Springer-Verlag},
-   address       = {New York},
-    url = {http://www.stats.ox.ac.uk/pub/MASS4/},
-}
-
- at Manual{SAS,
-   author        = {{\proglang{SAS} Institute Inc.}},
-   title         = {The \proglang{SAS} System, Version 9.1},
-   year          = {2003},
-   address       = {Cary, NC},
-   url           = {http://www.sas.com/}
-}
-
- at Manual{yee:VGAM:2010,
-   title        = {\pkg{VGAM}: Vector Generalized Linear and Additive Models},
-   author       = {Yee, T. W.},
-   year         = {2010},
-   note         = {\proglang{R}~package version~0.7-10},
-   url          = {http://CRAN.R-project.org/package=VGAM}
-}
-
- at Manual{Harrell:2009,
-   title        = {\pkg{rms}: Regression Modeling Strategies},
-   author       = {Frank E. {Harrell, Jr.}},
-   year         = {2009},
-   note         = {\proglang{R}~package version~2.1-0},
-   url          = {http://CRAN.R-project.org/package=rms}
-}
-
- at Manual{Meyer+Zeileis+Hornik:2009,
-   title        = {\pkg{vcd}: Visualizing Categorical Data},
-   author       = {David Meyer and Achim Zeileis and Kurt Hornik},
-   year         = {2009},
-   note         = {\proglang{R}~package version~1.2-7},
-   url          = {http://CRAN.R-project.org/package=vcd}
-}
-
- at Article{Meyer+Zeileis+Hornik:2006,
-   author       = {David Meyer and Achim Zeileis and Kurt Hornik},
-   title        = {The Strucplot Framework: Visualizing Multi-Way
-                   Contingency Tables with \pkg{vcd}},
-   journal      = {Journal of Statistical Software},
-   year         = {2006},
-   volume       = {17},
-   number       = {3},
-   pages        = {1--48},
-   url          = {http://www.jstatsoft.org/v17/i03/}
-}
-
- at Manual{Turner+Firth:2009,
-   title        = {Generalized Nonlinear Models in \proglang{R}:
-                   An Overview of the \pkg{gnm} Package},
-   author       = {Heather Turner and David Firth},
-   year         = {2009},
-   note         = {\proglang{R}~package version~0.10-0},
-   url          = {http://CRAN.R-project.org/package=gnm},
-}
-
- at Article{Rnews:Turner+Firth:2007,
-  author       = {Heather Turner and David Firth},
-  title        = {\pkg{gnm}: A Package for Generalized Nonlinear Models},
-  journal      = {\proglang{R}~News},
-  year         = 2007,
-  volume       = 7,
-  number       = 2,
-  pages        = {8--12},
-  month        = {October},
-  url = {http://CRAN.R-project.org/doc/Rnews/},
-}
-
-
- at Manual{ElemStatLearn:2009,
-    title = {\pkg{ElemStatLearn}: Data Sets, Functions and
-             Examples from the Book `The Elements
-             of Statistical Learning, Data Mining, Inference, and
-             Prediction' by Trevor Hastie, Robert Tibshirani and Jerome
-             Friedman},
-    author = {Kjetil Halvorsen},
-    year = {2009},
-    note = {\proglang{R}~package version~0.1-7},
-    url = {http://CRAN.R-project.org/package=ElemStatLearn},
-  }
-
- at Manual{Zelig:2009,
-    title = {\pkg{Zelig}: Everyone's Statistical Software},
-    author = {Kosuke Imai and Gary King and Olivia Lau},
-    year = {2009},
-    note = {\proglang{R}~package version~3.4-5},
-    url = {http://CRAN.R-project.org/package=Zelig},
-}
-
- at article{kosm:firt:2009,
-    author = {Kosmidis, I. and Firth, D.},
-     title = {Bias Reduction in Exponential Family Nonlinear Models},
-      year = {2009},
-   JOURNAL = {Biometrika},
-  FJOURNAL = {Biometrika},
-    volume = {96},
-     PAGES = {793--804},
-    NUMBER = {4},
-}
-
- at techreport{kosm:firt:2008,
-    author = {Kosmidis, I. and Firth, D.},
-    title = {Bias Reduction in Exponential Family Nonlinear Models},
-    Journal = {CRiSM Paper No.~08-05v2},
-    year = {2008},
-    URL =  "http://www.warwick.ac.uk/go/crism",
-    Institution = {Department of Statistics, Warwick University},
-}
-
- at Manual{Kosmidis:2008,
-   title        = {\pkg{brglm}: Bias Reduction in Binary-Response {GLMs}},
-   author       = {Ioannis Kosmidis},
-   year         = {2008},
-   note         = {\proglang{R}~package version~0.5-4},
-   url          = {http://CRAN.R-project.org/package=brglm},
-}
-
- at Manual{Hatzinger:2009,
-    title = {\pkg{prefmod}: Utilities to Fit Paired Comparison
-             Models for Preferences},
-    author = {Reinhold Hatzinger},
-    year = {2009},
-    note = {\proglang{R}~package version~0.8-16},
-    url = {http://CRAN.R-project.org/package=prefmod},
-}
-
- at Manual{firth:2008,
-    title = {\pkg{BradleyTerry}: Bradley-Terry Models},
-    author = {David Firth},
-    year = {2008},
-    note = {\proglang{R}~package version~0.8-7},
-    url = {http://CRAN.R-project.org/package=BradleyTerry},
-  }
-
- at Manual{gnlm:2007,
-    title = {\pkg{gnlm}: Generalized Nonlinear Regression Models},
-    author = {Jim Lindsey},
-    year = {2007},
-    note = {\proglang{R}~package version~1.0},
-    url = {http://popgen.unimaas.nl/~jlindsey/rcode.html},
-}
-
- at Manual{Konis:2009,
-    title = {\pkg{safeBinaryRegression}: Safe Binary Regression},
-    author = {Kjell Konis},
-    year = {2009},
-    note = {\proglang{R}~package version~0.1-2},
-    url = {http://CRAN.R-project.org/package=safeBinaryRegression},
-}
-
- at book{smit:merk:2013,
-     TITLE = {Generalized Linear Models for Categorical and
-              Continuous Limited Dependent Variables},
-    AUTHOR = {Smithson, M. and Merkle, E. C.},
-      YEAR = {2013},
- Publisher = {Chapman \& Hall/CRC},
-   Address = {London},
-}
-

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git



More information about the debian-science-commits mailing list